2 # Copyright (C) 2017-2019 Free Software Foundation, Inc.
3 # This file is part of the GNU C Library.
5 # The GNU C Library is free software; you can redistribute it and/or
6 # modify it under the terms of the GNU Lesser General Public
7 # License as published by the Free Software Foundation; either
8 # version 2.1 of the License, or (at your option) any later version.
10 # The GNU C Library is distributed in the hope that it will be useful,
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 # Lesser General Public License for more details.
15 # You should have received a copy of the GNU Lesser General Public
16 # License along with the GNU C Library; if not, see
17 # <https://www.gnu.org/licenses/>.
18 """Compare results of string functions
20 Given a string benchmark result file, print a table with comparisons with a
21 baseline. The baseline is the first function, which typically is the builtin
24 import matplotlib as mpl
35 import jsonschema as validator
37 print('Could not find jsonschema module.')
41 def parse_file(filename, schema_filename):
43 with open(schema_filename, 'r') as schemafile:
44 schema = json.load(schemafile)
45 with open(filename, 'r') as benchfile:
46 bench = json.load(benchfile)
47 validator.validate(bench, schema)
50 print(traceback.format_exc(limit=1))
51 sys.exit(os.EX_NOINPUT)
53 def draw_graph(f, v, ifuncs, results):
54 """Plot graphs for functions
56 Plot line graphs for each of the ifuncs
60 v: Benchmark variant for the function.
61 ifuncs: List of ifunc names
62 results: Dictionary of results for each test criterion
64 print('Generating graph for %s, variant \'%s\'' % (f, v))
65 xkeys = results.keys()
68 fig = pylab.figure(frameon=False)
69 fig.set_size_inches(32, 18)
70 pylab.ylabel('Performance improvement from base')
72 pylab.xticks(X, xkeys)
76 while i < len(ifuncs):
77 Y = [results[k][i] for k in xkeys]
78 lines = pylab.plot(X, Y, label=':'+ifuncs[i])
83 pylab.savefig('%s-%s.png' % (f, v), bbox_inches='tight')
86 def process_results(results, attrs, funcs, base_func, graph, no_diff, no_header):
87 """ Process results and print them
90 results: JSON dictionary of results
91 attrs: Attributes that form the test criteria
92 funcs: Functions that are selected
95 for f in results['functions'].keys():
97 v = results['functions'][f]['bench-variant']
105 for i in results['functions'][f]['ifuncs']:
116 ifuncs = results['functions'][f]['ifuncs']
123 base_index = results['functions'][f]['ifuncs'].index(base_func)
125 sys.stderr.write('Invalid -b "%s" parameter. Options: %s.\n' %
126 (base_func, ', '.join(results['functions'][f]['ifuncs'])))
127 sys.exit(os.EX_DATAERR)
130 print('Function: %s' % f)
131 print('Variant: %s' % v)
132 print("%36s%s" % (' ', '\t'.join(ifuncs)))
136 for res in results['functions'][f]['results']:
138 attr_list = ['%s=%s' % (a, res[a]) for a in attrs]
139 except KeyError as ke:
140 sys.stderr.write('Invalid -a %s parameter. Options: %s.\n'
141 % (ke, ', '.join([a for a in res.keys() if a != 'timings'])))
142 sys.exit(os.EX_DATAERR)
144 key = ', '.join(attr_list)
145 sys.stdout.write('%36s: ' % key)
146 graph_res[key] = res['timings']
147 for t in res['timings']:
149 sys.stdout.write ('%12.2f' % t)
152 base = res['timings'][base_index]
153 diff = (base - t) * 100 / base
154 sys.stdout.write (' (%6.2f%%)' % diff)
155 sys.stdout.write('\t')
160 draw_graph(f, v, results['functions'][f]['ifuncs'], graph_res)
164 """Program Entry Point
166 Take a string benchmark output file and compare timings.
170 filename = args.input
171 schema_filename = args.schema
172 base_func = args.base
173 attrs = args.attributes.split(',')
175 funcs = args.functions.split(',')
176 if base_func and not base_func in funcs:
177 print('Baseline function (%s) not found.' % base_func)
178 sys.exit(os.EX_DATAERR)
182 results = parse_file(args.input, args.schema)
183 process_results(results, attrs, funcs, base_func, args.graph, args.no_diff, args.no_header)
187 if __name__ == '__main__':
188 parser = argparse.ArgumentParser()
190 # The required arguments.
191 req = parser.add_argument_group(title='required arguments')
192 req.add_argument('-a', '--attributes', required=True,
193 help='Comma separated list of benchmark attributes.')
194 req.add_argument('-i', '--input', required=True,
195 help='Input JSON benchmark result file.')
196 req.add_argument('-s', '--schema', required=True,
197 help='Schema file to validate the result file.')
199 # Optional arguments.
200 parser.add_argument('-f', '--functions',
201 help='Comma separated list of functions.')
202 parser.add_argument('-b', '--base',
203 help='IFUNC variant to set as baseline.')
204 parser.add_argument('-g', '--graph', action='store_true',
205 help='Generate a graph from results.')
206 parser.add_argument('--no-diff', action='store_true',
207 help='Do not print the difference from baseline.')
208 parser.add_argument('--no-header', action='store_true',
209 help='Do not print the header.')
211 args = parser.parse_args()