1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
|
#!/usr/bin/python
# Copyright (C) 2017 Free Software Foundation, Inc.
# This file is part of the GNU C Library.
#
# The GNU C Library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# The GNU C Library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with the GNU C Library; if not, see
# <http://www.gnu.org/licenses/>.
"""Compare results of string functions
Given a string benchmark result file, print a table with comparisons with a
baseline. The baseline is the first function, which typically is the builtin
function.
"""
import sys
import os
import json
import pylab
try:
import jsonschema as validator
except ImportError:
print('Could not find jsonschema module.')
raise
def parse_file(filename, schema_filename):
with open(schema_filename, 'r') as schemafile:
schema = json.load(schemafile)
with open(filename, 'r') as benchfile:
bench = json.load(benchfile)
validator.validate(bench, schema)
return bench
def draw_graph(f, v, ifuncs, results):
"""Plot graphs for functions
Plot line graphs for each of the ifuncs
Args:
f: Function name
v: Benchmark variant for the function.
ifuncs: List of ifunc names
results: Dictionary of results for each test criterion
"""
xkeys = results.keys()
pylab.clf()
fig = pylab.figure(frameon=False)
fig.set_size_inches(32, 18)
pylab.ylabel('Performance improvement from base')
X = range(len(xkeys))
pylab.xticks(X, xkeys)
i = 0
while i < len(ifuncs):
Y = [results[k][i] for k in xkeys]
lines = pylab.plot(X, Y, label=':'+ifuncs[i])
i = i + 1
pylab.legend()
pylab.grid()
pylab.savefig('%s-%s.png' % (f, v), bbox_inches='tight')
def process_results(results, attrs):
""" Process results and print them
Args:
results: JSON dictionary of results
attrs: Attributes that form the test criteria
"""
for f in results['functions'].keys():
print('Function: %s' % f)
print('\t'.join(results['functions'][f]['ifuncs']))
v = results['functions'][f]['bench-variant']
print('Variant: %s' % v)
print("=" * 80)
graph_res = {}
for res in results['functions'][f]['results']:
attr_list = ['%s=%s' % (a, res[a]) for a in attrs]
first = True
key = ','.join(attr_list)
sys.stdout.write('%s: \t' % key)
graph_res[key] = res['timings']
for t in res['timings']:
sys.stdout.write ('%.2f' % t)
if first:
first = False
else:
diff = (res['timings'][0] - t) * 100 / res['timings'][0]
sys.stdout.write (' (%.2f%%)' % diff)
sys.stdout.write('\t')
print('')
draw_graph(f, v, results['functions'][f]['ifuncs'], graph_res)
def main(args):
"""Program Entry Point
Take a string benchmark output file and compare timings.
"""
if len(args) < 3:
print('Usage: %s <input file> <schema file> attr1 [attr2 ...]' % sys.argv[0])
sys.exit(os.EX_USAGE)
filename = args[0]
schema_filename = args[1]
attrs = args[2:]
results = parse_file(filename, schema_filename)
process_results(results, attrs)
if __name__ == '__main__':
main(sys.argv[1:])
|