3 """%prog [options] shellpath dirpath
5 Pulls performance data on parsing via the js shell.
6 Displays the average number of milliseconds it took to parse each file.
8 For comparison, something apparently approximating a t-test is performed:
11 t_baseline_goodrun = (t_baseline_avg - t_baseline_stddev)
12 t_current_badrun = (t_current_avg + t_current_stddev)
13 t_current_badrun < t_baseline_goodrun
15 Effectively, a bad run from the current data is better than a good run from the
16 baseline data, we're probably faster. A similar computation is used for
17 determining the "slower" designation.
20 shellpath executable JavaScript shell
21 dirpath directory filled with parsilicious js files
27 import subprocess as subp
29 from string import Template
37 _DIR = os.path.dirname(__file__)
38 JS_CODE_TEMPLATE = Template("""
39 if (typeof snarf !== 'undefined') read = snarf
40 var contents = read("$filepath");
41 for (var i = 0; i < $warmup_run_count; i++)
44 for (var i = 0; i < $real_run_count; i++) {
45 var start = new Date();
48 results.push(end - start);
54 def gen_filepaths(dirpath, target_ext='.js'):
55 for filename in os.listdir(dirpath):
56 if filename.endswith(target_ext):
57 yield os.path.join(dirpath, filename)
61 return sum(seq) / len(seq)
64 def stddev(seq, mean):
65 diffs = ((float(item) - mean) ** 2 for item in seq)
66 return math.sqrt(sum(diffs) / len(seq))
69 def bench(shellpath, filepath, warmup_runs, counted_runs, stfu=False):
70 """Return a list of milliseconds for the counted runs."""
71 assert '"' not in filepath
72 code = JS_CODE_TEMPLATE.substitute(filepath=filepath,
73 warmup_run_count=warmup_runs, real_run_count=counted_runs)
74 proc = subp.Popen([shellpath, '-e', code], stdout=subp.PIPE)
75 stdout, _ = proc.communicate()
76 milliseconds = [float(val) for val in stdout.split(',')]
77 mean = avg(milliseconds)
78 sigma = stddev(milliseconds, mean)
80 print 'Runs:', [int(ms) for ms in milliseconds]
82 print 'Stddev: %.2f (%.2f%% of mean)' % (sigma, sigma / mean * 100)
86 def parsemark(filepaths, fbench, stfu=False):
87 """:param fbench: fbench(filename) -> float"""
88 bench_map = {} # {filename: (avg, stddev)}
89 for filepath in filepaths:
90 filename = os.path.split(filepath)[-1]
92 print 'Parsemarking %s...' % filename
93 bench_map[filename] = fbench(filepath)
95 for i, (filename, (avg, stddev)) in enumerate(bench_map.iteritems()):
96 assert '"' not in filename
97 fmt = ' %30s: {"average_ms": %6.2f, "stddev_ms": %6.2f}'
98 if i != len(bench_map) - 1:
100 filename_str = '"%s"' % filename
101 print fmt % (filename_str, avg, stddev)
103 return dict((filename, dict(average_ms=avg, stddev_ms=stddev))
104 for filename, (avg, stddev) in bench_map.iteritems())
108 parser = optparse.OptionParser(usage=__doc__.strip())
109 parser.add_option('-w', '--warmup-runs', metavar='COUNT', type=int,
110 default=5, help='used to minimize test instability [%default]')
111 parser.add_option('-c', '--counted-runs', metavar='COUNT', type=int,
112 default=50, help='timed data runs that count towards the average [%default]')
113 parser.add_option('-s', '--shell', metavar='PATH', help='explicit shell '
114 'location; when omitted, will look in likely places')
115 parser.add_option('-b', '--baseline', metavar='JSON_PATH',
116 dest='baseline_path', help='json file with baseline values to '
118 parser.add_option('-q', '--quiet', dest='stfu', action='store_true',
119 default=False, help='only print JSON to stdout [%default]')
120 options, args = parser.parse_args()
122 shellpath = args.pop(0)
126 print >> sys.stderr, 'error: shellpath required'
129 dirpath = args.pop(0)
133 print >> sys.stderr, 'error: dirpath required'
135 if not shellpath or not os.path.exists(shellpath):
136 print >> sys.stderr, 'error: could not find shell:', shellpath
138 if options.baseline_path:
139 if not os.path.isfile(options.baseline_path):
140 print >> sys.stderr, 'error: baseline file does not exist'
142 if not compare_bench:
143 print >> sys.stderr, 'error: JSON support is missing, cannot compare benchmarks'
145 benchfile = lambda filepath: bench(shellpath, filepath,
146 options.warmup_runs, options.counted_runs, stfu=options.stfu)
147 bench_map = parsemark(gen_filepaths(dirpath), benchfile, options.stfu)
148 if options.baseline_path:
149 compare_bench.compare_immediate(bench_map, options.baseline_path)
153 if __name__ == '__main__':