2 # Copyright 2015 gRPC authors.
4 # Licensed under the Apache License, Version 2.0 (the "License");
5 # you may not use this file except in compliance with the License.
6 # You may obtain a copy of the License at
8 # http://www.apache.org/licenses/LICENSE-2.0
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS,
12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 # See the License for the specific language governing permissions and
14 # limitations under the License.
15 """Run test matrix."""
17 from __future__ import print_function
20 import multiprocessing
24 from python_utils.filter_pull_request_tests import filter_tests
25 import python_utils.jobset as jobset
26 import python_utils.report_utils as report_utils
28 _ROOT = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), '../..'))
31 _DEFAULT_RUNTESTS_TIMEOUT = 1 * 60 * 60
33 # C/C++ tests can take long time
34 _CPP_RUNTESTS_TIMEOUT = 4 * 60 * 60
36 # Set timeout high for ObjC for Cocoapods to install pods
37 _OBJC_RUNTESTS_TIMEOUT = 90 * 60
39 # Number of jobs assigned to each run_tests.py instance
40 _DEFAULT_INNER_JOBS = 2
42 # Name of the top-level umbrella report that includes all the run_tests.py invocations
43 # Note that the starting letter 't' matters so that the targets are listed AFTER
44 # the per-test breakdown items that start with 'run_tests/' (it is more readable that way)
45 _MATRIX_REPORT_NAME = 'toplevel_run_tests_invocations'
48 def _safe_report_name(name):
49 """Reports with '+' in target name won't show correctly in ResultStore"""
50 return name.replace('+', 'p')
53 def _report_filename(name):
54 """Generates report file name with directory structure that leads to better presentation by internal CI"""
55 # 'sponge_log.xml' suffix must be there for results to get recognized by kokoro.
56 return '%s/%s' % (_safe_report_name(name), 'sponge_log.xml')
59 def _matrix_job_logfilename(shortname_for_multi_target):
60 """Generate location for log file that will match the sponge_log.xml from the top-level matrix report."""
61 # 'sponge_log.log' suffix must be there for log to get recognized as "target log"
62 # for the corresponding 'sponge_log.xml' report.
63 # the shortname_for_multi_target component must be set to match the sponge_log.xml location
64 # because the top-level render_junit_xml_report is called with multi_target=True
65 return '%s/%s/%s' % (_MATRIX_REPORT_NAME, shortname_for_multi_target,
69 def _docker_jobspec(name,
72 inner_jobs=_DEFAULT_INNER_JOBS,
73 timeout_seconds=None):
74 """Run a single instance of run_tests.py in a docker container"""
75 if not timeout_seconds:
76 timeout_seconds = _DEFAULT_RUNTESTS_TIMEOUT
77 shortname = 'run_tests_%s' % name
78 test_job = jobset.JobSpec(cmdline=[
79 'python', 'tools/run_tests/run_tests.py', '--use_docker', '-t', '-j',
80 str(inner_jobs), '-x',
81 'run_tests/%s' % _report_filename(name), '--report_suite_name',
82 '%s' % _safe_report_name(name)
84 environ=runtests_envs,
86 timeout_seconds=timeout_seconds,
87 logfilename=_matrix_job_logfilename(shortname))
91 def _workspace_jobspec(name,
95 inner_jobs=_DEFAULT_INNER_JOBS,
96 timeout_seconds=None):
97 """Run a single instance of run_tests.py in a separate workspace"""
98 if not workspace_name:
99 workspace_name = 'workspace_%s' % name
100 if not timeout_seconds:
101 timeout_seconds = _DEFAULT_RUNTESTS_TIMEOUT
102 shortname = 'run_tests_%s' % name
103 env = {'WORKSPACE_NAME': workspace_name}
104 env.update(runtests_envs)
105 test_job = jobset.JobSpec(cmdline=[
106 'bash', 'tools/run_tests/helper_scripts/run_tests_in_workspace.sh',
108 str(inner_jobs), '-x',
109 '../run_tests/%s' % _report_filename(name), '--report_suite_name',
110 '%s' % _safe_report_name(name)
114 timeout_seconds=timeout_seconds,
115 logfilename=_matrix_job_logfilename(shortname))
119 def _generate_jobs(languages,
122 iomgr_platforms=['native'],
128 inner_jobs=_DEFAULT_INNER_JOBS,
129 timeout_seconds=None):
131 for language in languages:
132 for platform in platforms:
133 for iomgr_platform in iomgr_platforms:
134 for config in configs:
135 name = '%s_%s_%s_%s' % (language, platform, config,
138 '-l', language, '-c', config, '--iomgr_platform',
142 name += '_%s_%s' % (arch, compiler)
144 '--arch', arch, '--compiler', compiler
146 if '--build_only' in extra_args:
148 for extra_env in extra_envs:
149 name += '_%s_%s' % (extra_env, extra_envs[extra_env])
151 runtests_args += extra_args
152 if platform == 'linux':
153 job = _docker_jobspec(name=name,
154 runtests_args=runtests_args,
155 runtests_envs=extra_envs,
156 inner_jobs=inner_jobs,
157 timeout_seconds=timeout_seconds)
159 job = _workspace_jobspec(
161 runtests_args=runtests_args,
162 runtests_envs=extra_envs,
163 inner_jobs=inner_jobs,
164 timeout_seconds=timeout_seconds)
166 job.labels = [platform, config, language, iomgr_platform
172 def _create_test_jobs(extra_args=[], inner_jobs=_DEFAULT_INNER_JOBS):
175 test_jobs += _generate_jobs(languages=['sanity'],
178 labels=['basictests'],
179 extra_args=extra_args +
180 ['--report_multi_target'],
181 inner_jobs=inner_jobs)
183 # supported on all platforms.
184 test_jobs += _generate_jobs(
186 configs=['dbg', 'opt'],
187 platforms=['linux', 'macos', 'windows'],
188 labels=['basictests', 'corelang'],
190 extra_args, # don't use multi_target report because C has too many test cases
191 inner_jobs=inner_jobs,
192 timeout_seconds=_CPP_RUNTESTS_TIMEOUT)
194 # C# tests on .NET desktop/mono
195 test_jobs += _generate_jobs(languages=['csharp'],
196 configs=['dbg', 'opt'],
197 platforms=['linux', 'macos', 'windows'],
198 labels=['basictests', 'multilang'],
199 extra_args=extra_args +
200 ['--report_multi_target'],
201 inner_jobs=inner_jobs)
202 # C# tests on .NET core
203 test_jobs += _generate_jobs(languages=['csharp'],
204 configs=['dbg', 'opt'],
205 platforms=['linux', 'macos', 'windows'],
208 labels=['basictests', 'multilang'],
209 extra_args=extra_args +
210 ['--report_multi_target'],
211 inner_jobs=inner_jobs)
213 test_jobs += _generate_jobs(languages=['python'],
215 platforms=['linux', 'macos', 'windows'],
216 iomgr_platforms=['native', 'gevent', 'asyncio'],
217 labels=['basictests', 'multilang'],
218 extra_args=extra_args +
219 ['--report_multi_target'],
220 inner_jobs=inner_jobs)
222 # supported on linux and mac.
223 test_jobs += _generate_jobs(
225 configs=['dbg', 'opt'],
226 platforms=['linux', 'macos'],
227 labels=['basictests', 'corelang'],
229 extra_args, # don't use multi_target report because C++ has too many test cases
230 inner_jobs=inner_jobs,
231 timeout_seconds=_CPP_RUNTESTS_TIMEOUT)
233 test_jobs += _generate_jobs(languages=['grpc-node', 'ruby', 'php7'],
234 configs=['dbg', 'opt'],
235 platforms=['linux', 'macos'],
236 labels=['basictests', 'multilang'],
237 extra_args=extra_args +
238 ['--report_multi_target'],
239 inner_jobs=inner_jobs)
241 # supported on mac only.
242 test_jobs += _generate_jobs(languages=['objc'],
245 labels=['basictests', 'multilang'],
246 extra_args=extra_args +
247 ['--report_multi_target'],
248 inner_jobs=inner_jobs,
249 timeout_seconds=_OBJC_RUNTESTS_TIMEOUT)
254 def _create_portability_test_jobs(extra_args=[],
255 inner_jobs=_DEFAULT_INNER_JOBS):
258 test_jobs += _generate_jobs(languages=['c'],
263 labels=['portability', 'corelang'],
264 extra_args=extra_args,
265 inner_jobs=inner_jobs)
267 # portability C and C++ on x64
269 'gcc4.9', 'gcc5.3', 'gcc7.4', 'gcc8.3', 'gcc8.3_openssl102',
270 'gcc_musl', 'clang4.0', 'clang5.0'
272 test_jobs += _generate_jobs(languages=['c', 'c++'],
277 labels=['portability', 'corelang'],
278 extra_args=extra_args,
279 inner_jobs=inner_jobs,
280 timeout_seconds=_CPP_RUNTESTS_TIMEOUT)
282 # portability C on Windows 64-bit (x86 is the default)
283 test_jobs += _generate_jobs(languages=['c'],
285 platforms=['windows'],
288 labels=['portability', 'corelang'],
289 extra_args=extra_args,
290 inner_jobs=inner_jobs)
292 # portability C++ on Windows
293 # TODO(jtattermusch): some of the tests are failing, so we force --build_only
294 test_jobs += _generate_jobs(languages=['c++'],
296 platforms=['windows'],
299 labels=['portability', 'corelang'],
300 extra_args=extra_args + ['--build_only'],
301 inner_jobs=inner_jobs,
302 timeout_seconds=_CPP_RUNTESTS_TIMEOUT)
304 # portability C and C++ on Windows using VS2017 (build only)
305 # TODO(jtattermusch): some of the tests are failing, so we force --build_only
306 test_jobs += _generate_jobs(languages=['c', 'c++'],
308 platforms=['windows'],
310 compiler='cmake_vs2017',
311 labels=['portability', 'corelang'],
312 extra_args=extra_args + ['--build_only'],
313 inner_jobs=inner_jobs,
314 timeout_seconds=_CPP_RUNTESTS_TIMEOUT)
316 # C and C++ with the c-ares DNS resolver on Linux
317 test_jobs += _generate_jobs(languages=['c', 'c++'],
320 labels=['portability', 'corelang'],
321 extra_args=extra_args,
322 extra_envs={'GRPC_DNS_RESOLVER': 'ares'},
323 timeout_seconds=_CPP_RUNTESTS_TIMEOUT)
325 # C and C++ with no-exceptions on Linux
326 test_jobs += _generate_jobs(languages=['c', 'c++'],
327 configs=['noexcept'],
329 labels=['portability', 'corelang'],
330 extra_args=extra_args,
331 timeout_seconds=_CPP_RUNTESTS_TIMEOUT)
333 test_jobs += _generate_jobs(languages=['python'],
337 compiler='python_alpine',
338 labels=['portability', 'multilang'],
339 extra_args=extra_args +
340 ['--report_multi_target'],
341 inner_jobs=inner_jobs)
343 # TODO(jtattermusch): a large portion of the libuv tests is failing,
344 # which can end up killing the kokoro job due to gigabytes of error logs
345 # generated. Remove the --build_only flag
346 # once https://github.com/grpc/grpc/issues/17556 is fixed.
347 test_jobs += _generate_jobs(languages=['c'],
350 iomgr_platforms=['uv'],
351 labels=['portability', 'corelang'],
352 extra_args=extra_args + ['--build_only'],
353 inner_jobs=inner_jobs,
354 timeout_seconds=_CPP_RUNTESTS_TIMEOUT)
359 def _allowed_labels():
360 """Returns a list of existing job labels."""
362 for job in _create_test_jobs() + _create_portability_test_jobs():
363 for label in job.labels:
364 all_labels.add(label)
365 return sorted(all_labels)
368 def _runs_per_test_type(arg_str):
369 """Auxiliary function to parse the "runs_per_test" flag."""
376 msg = '\'{}\' is not a positive integer'.format(arg_str)
377 raise argparse.ArgumentTypeError(msg)
380 if __name__ == "__main__":
381 argp = argparse.ArgumentParser(
382 description='Run a matrix of run_tests.py tests.')
383 argp.add_argument('-j',
385 default=multiprocessing.cpu_count() / _DEFAULT_INNER_JOBS,
387 help='Number of concurrent run_tests.py instances.')
388 argp.add_argument('-f',
390 choices=_allowed_labels(),
393 help='Filter targets to run by label with AND semantics.')
394 argp.add_argument('--exclude',
395 choices=_allowed_labels(),
398 help='Exclude targets with any of given labels.')
399 argp.add_argument('--build_only',
401 action='store_const',
403 help='Pass --build_only flag to run_tests.py instances.')
405 '--force_default_poller',
407 action='store_const',
409 help='Pass --force_default_poller to run_tests.py instances.')
410 argp.add_argument('--dry_run',
412 action='store_const',
414 help='Only print what would be run.')
418 action='store_const',
420 help='Filters out tests irrelevant to pull request changes.')
423 default='origin/master',
425 help='Branch that pull request is requesting to merge into')
426 argp.add_argument('--inner_jobs',
427 default=_DEFAULT_INNER_JOBS,
429 help='Number of jobs in each run_tests.py instance')
434 type=_runs_per_test_type,
435 help='How many times to run each tests. >1 runs implies ' +
436 'omitting passing test from the output & reports.')
437 argp.add_argument('--max_time',
440 help='Maximum amount of time to run tests for' +
441 '(other tests will be skipped)')
445 action='store_const',
448 '(Deprecated, has no effect) Put reports into subdirectories to improve presentation of '
449 'results by Kokoro.')
450 argp.add_argument('--bq_result_table',
454 help='Upload test results to a specified BQ table.')
455 argp.add_argument('--extra_args',
458 nargs=argparse.REMAINDER,
459 help='Extra test args passed to each sub-script.')
460 args = argp.parse_args()
464 extra_args.append('--build_only')
465 if args.force_default_poller:
466 extra_args.append('--force_default_poller')
467 if args.runs_per_test > 1:
468 extra_args.append('-n')
469 extra_args.append('%s' % args.runs_per_test)
470 extra_args.append('--quiet_success')
471 if args.max_time > 0:
472 extra_args.extend(('--max_time', '%d' % args.max_time))
473 if args.bq_result_table:
474 extra_args.append('--bq_result_table')
475 extra_args.append('%s' % args.bq_result_table)
476 extra_args.append('--measure_cpu_costs')
478 extra_args.extend(args.extra_args)
480 all_jobs = _create_test_jobs(extra_args=extra_args, inner_jobs=args.inner_jobs) + \
481 _create_portability_test_jobs(extra_args=extra_args, inner_jobs=args.inner_jobs)
485 if not args.filter or all(
486 filter in job.labels for filter in args.filter):
487 if not any(exclude_label in job.labels
488 for exclude_label in args.exclude):
492 jobset.message('FAILED',
493 'No test suites match given criteria.',
497 print('IMPORTANT: The changes you are testing need to be locally committed')
498 print('because only the committed changes in the current branch will be')
499 print('copied to the docker environment or into subworkspaces.')
503 if args.filter_pr_tests:
504 print('Looking for irrelevant tests to skip...')
505 relevant_jobs = filter_tests(jobs, args.base_branch)
506 if len(relevant_jobs) == len(jobs):
507 print('No tests will be skipped.')
509 print('These tests will be skipped:')
510 skipped_jobs = list(set(jobs) - set(relevant_jobs))
511 # Sort by shortnames to make printing of skipped tests consistent
512 skipped_jobs.sort(key=lambda job: job.shortname)
513 for job in list(skipped_jobs):
514 print(' %s' % job.shortname)
517 print('Will run these tests:')
519 print(' %s: "%s"' % (job.shortname, ' '.join(job.cmdline)))
523 print('--dry_run was used, exiting')
526 jobset.message('START', 'Running test matrix.', do_newline=True)
527 num_failures, resultset = jobset.run(jobs,
528 newline_on_success=True,
531 # Merge skipped tests into results to show skipped tests on report.xml
533 ignored_num_skipped_failures, skipped_results = jobset.run(
534 skipped_jobs, skip_jobs=True)
535 resultset.update(skipped_results)
536 report_utils.render_junit_xml_report(resultset,
537 _report_filename(_MATRIX_REPORT_NAME),
538 suite_name=_MATRIX_REPORT_NAME,
541 if num_failures == 0:
542 jobset.message('SUCCESS',
543 'All run_tests.py instances finished successfully.',
546 jobset.message('FAILED',
547 'Some run_tests.py instances have failed.',