Imported Upstream version 1.27.0
[platform/upstream/grpc.git] / tools / run_tests / run_microbenchmark.py
1 #!/usr/bin/env python
2 # Copyright 2017 gRPC authors.
3 #
4 # Licensed under the Apache License, Version 2.0 (the "License");
5 # you may not use this file except in compliance with the License.
6 # You may obtain a copy of the License at
7 #
8 #     http://www.apache.org/licenses/LICENSE-2.0
9 #
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS,
12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 # See the License for the specific language governing permissions and
14 # limitations under the License.
15
16 import cgi
17 import multiprocessing
18 import os
19 import subprocess
20 import sys
21 import argparse
22
23 import python_utils.jobset as jobset
24 import python_utils.start_port_server as start_port_server
25
26 sys.path.append(
27     os.path.join(os.path.dirname(sys.argv[0]), '..', 'profiling',
28                  'microbenchmarks', 'bm_diff'))
29 import bm_constants
30
31 flamegraph_dir = os.path.join(os.path.expanduser('~'), 'FlameGraph')
32
33 os.chdir(os.path.join(os.path.dirname(sys.argv[0]), '../..'))
34 if not os.path.exists('reports'):
35     os.makedirs('reports')
36
37 start_port_server.start_port_server()
38
39
40 def fnize(s):
41     out = ''
42     for c in s:
43         if c in '<>, /':
44             if len(out) and out[-1] == '_': continue
45             out += '_'
46         else:
47             out += c
48     return out
49
50
51 # index html
52 index_html = """
53 <html>
54 <head>
55 <title>Microbenchmark Results</title>
56 </head>
57 <body>
58 """
59
60
61 def heading(name):
62     global index_html
63     index_html += "<h1>%s</h1>\n" % name
64
65
66 def link(txt, tgt):
67     global index_html
68     index_html += "<p><a href=\"%s\">%s</a></p>\n" % (cgi.escape(
69         tgt, quote=True), cgi.escape(txt))
70
71
72 def text(txt):
73     global index_html
74     index_html += "<p><pre>%s</pre></p>\n" % cgi.escape(txt)
75
76
77 def collect_latency(bm_name, args):
78     """generate latency profiles"""
79     benchmarks = []
80     profile_analysis = []
81     cleanup = []
82
83     heading('Latency Profiles: %s' % bm_name)
84     subprocess.check_call([
85         'make', bm_name, 'CONFIG=basicprof', '-j',
86         '%d' % multiprocessing.cpu_count()
87     ])
88     for line in subprocess.check_output(
89         ['bins/basicprof/%s' % bm_name, '--benchmark_list_tests']).splitlines():
90         link(line, '%s.txt' % fnize(line))
91         benchmarks.append(
92             jobset.JobSpec([
93                 'bins/basicprof/%s' % bm_name,
94                 '--benchmark_filter=^%s$' % line, '--benchmark_min_time=0.05'
95             ],
96                            environ={
97                                'GRPC_LATENCY_TRACE': '%s.trace' % fnize(line)
98                            },
99                            shortname='profile-%s' % fnize(line)))
100         profile_analysis.append(
101             jobset.JobSpec([
102                 sys.executable,
103                 'tools/profiling/latency_profile/profile_analyzer.py',
104                 '--source',
105                 '%s.trace' % fnize(line), '--fmt', 'simple', '--out',
106                 'reports/%s.txt' % fnize(line)
107             ],
108                            timeout_seconds=20 * 60,
109                            shortname='analyze-%s' % fnize(line)))
110         cleanup.append(jobset.JobSpec(['rm', '%s.trace' % fnize(line)]))
111         # periodically flush out the list of jobs: profile_analysis jobs at least
112         # consume upwards of five gigabytes of ram in some cases, and so analysing
113         # hundreds of them at once is impractical -- but we want at least some
114         # concurrency or the work takes too long
115         if len(benchmarks) >= min(16, multiprocessing.cpu_count()):
116             # run up to half the cpu count: each benchmark can use up to two cores
117             # (one for the microbenchmark, one for the data flush)
118             jobset.run(benchmarks,
119                        maxjobs=max(1,
120                                    multiprocessing.cpu_count() / 2))
121             jobset.run(profile_analysis, maxjobs=multiprocessing.cpu_count())
122             jobset.run(cleanup, maxjobs=multiprocessing.cpu_count())
123             benchmarks = []
124             profile_analysis = []
125             cleanup = []
126     # run the remaining benchmarks that weren't flushed
127     if len(benchmarks):
128         jobset.run(benchmarks, maxjobs=max(1, multiprocessing.cpu_count() / 2))
129         jobset.run(profile_analysis, maxjobs=multiprocessing.cpu_count())
130         jobset.run(cleanup, maxjobs=multiprocessing.cpu_count())
131
132
133 def collect_perf(bm_name, args):
134     """generate flamegraphs"""
135     heading('Flamegraphs: %s' % bm_name)
136     subprocess.check_call([
137         'make', bm_name, 'CONFIG=mutrace', '-j',
138         '%d' % multiprocessing.cpu_count()
139     ])
140     benchmarks = []
141     profile_analysis = []
142     cleanup = []
143     for line in subprocess.check_output(
144         ['bins/mutrace/%s' % bm_name, '--benchmark_list_tests']).splitlines():
145         link(line, '%s.svg' % fnize(line))
146         benchmarks.append(
147             jobset.JobSpec([
148                 'perf', 'record', '-o',
149                 '%s-perf.data' % fnize(line), '-g', '-F', '997',
150                 'bins/mutrace/%s' % bm_name,
151                 '--benchmark_filter=^%s$' % line, '--benchmark_min_time=10'
152             ],
153                            shortname='perf-%s' % fnize(line)))
154         profile_analysis.append(
155             jobset.JobSpec(
156                 [
157                     'tools/run_tests/performance/process_local_perf_flamegraphs.sh'
158                 ],
159                 environ={
160                     'PERF_BASE_NAME': fnize(line),
161                     'OUTPUT_DIR': 'reports',
162                     'OUTPUT_FILENAME': fnize(line),
163                 },
164                 shortname='flame-%s' % fnize(line)))
165         cleanup.append(jobset.JobSpec(['rm', '%s-perf.data' % fnize(line)]))
166         cleanup.append(jobset.JobSpec(['rm', '%s-out.perf' % fnize(line)]))
167         # periodically flush out the list of jobs: temporary space required for this
168         # processing is large
169         if len(benchmarks) >= 20:
170             # run up to half the cpu count: each benchmark can use up to two cores
171             # (one for the microbenchmark, one for the data flush)
172             jobset.run(benchmarks, maxjobs=1)
173             jobset.run(profile_analysis, maxjobs=multiprocessing.cpu_count())
174             jobset.run(cleanup, maxjobs=multiprocessing.cpu_count())
175             benchmarks = []
176             profile_analysis = []
177             cleanup = []
178     # run the remaining benchmarks that weren't flushed
179     if len(benchmarks):
180         jobset.run(benchmarks, maxjobs=1)
181         jobset.run(profile_analysis, maxjobs=multiprocessing.cpu_count())
182         jobset.run(cleanup, maxjobs=multiprocessing.cpu_count())
183
184
185 def run_summary(bm_name, cfg, base_json_name):
186     subprocess.check_call([
187         'make', bm_name,
188         'CONFIG=%s' % cfg, '-j',
189         '%d' % multiprocessing.cpu_count()
190     ])
191     cmd = [
192         'bins/%s/%s' % (cfg, bm_name),
193         '--benchmark_out=%s.%s.json' % (base_json_name, cfg),
194         '--benchmark_out_format=json'
195     ]
196     if args.summary_time is not None:
197         cmd += ['--benchmark_min_time=%d' % args.summary_time]
198     return subprocess.check_output(cmd)
199
200
201 def collect_summary(bm_name, args):
202     heading('Summary: %s [no counters]' % bm_name)
203     text(run_summary(bm_name, 'opt', bm_name))
204     heading('Summary: %s [with counters]' % bm_name)
205     text(run_summary(bm_name, 'counters', bm_name))
206     if args.bigquery_upload:
207         with open('%s.csv' % bm_name, 'w') as f:
208             f.write(
209                 subprocess.check_output([
210                     'tools/profiling/microbenchmarks/bm2bq.py',
211                     '%s.counters.json' % bm_name,
212                     '%s.opt.json' % bm_name
213                 ]))
214         subprocess.check_call([
215             'bq', 'load', 'microbenchmarks.microbenchmarks',
216             '%s.csv' % bm_name
217         ])
218
219
220 collectors = {
221     'latency': collect_latency,
222     'perf': collect_perf,
223     'summary': collect_summary,
224 }
225
226 argp = argparse.ArgumentParser(description='Collect data from microbenchmarks')
227 argp.add_argument('-c',
228                   '--collect',
229                   choices=sorted(collectors.keys()),
230                   nargs='*',
231                   default=sorted(collectors.keys()),
232                   help='Which collectors should be run against each benchmark')
233 argp.add_argument('-b',
234                   '--benchmarks',
235                   choices=bm_constants._AVAILABLE_BENCHMARK_TESTS,
236                   default=bm_constants._AVAILABLE_BENCHMARK_TESTS,
237                   nargs='+',
238                   type=str,
239                   help='Which microbenchmarks should be run')
240 argp.add_argument('--bigquery_upload',
241                   default=False,
242                   action='store_const',
243                   const=True,
244                   help='Upload results from summary collection to bigquery')
245 argp.add_argument(
246     '--summary_time',
247     default=None,
248     type=int,
249     help='Minimum time to run benchmarks for the summary collection')
250 args = argp.parse_args()
251
252 try:
253     for collect in args.collect:
254         for bm_name in args.benchmarks:
255             collectors[collect](bm_name, args)
256 finally:
257     if not os.path.exists('reports'):
258         os.makedirs('reports')
259     index_html += "</body>\n</html>\n"
260     with open('reports/index.html', 'w') as f:
261         f.write(index_html)