1c4d20ef6daeffa3c6b8b6ab5af02462a3e48b0c
[platform/upstream/grpc.git] / tools / run_tests / run_tests.py
1 #!/usr/bin/env python
2 # Copyright 2015 gRPC authors.
3 #
4 # Licensed under the Apache License, Version 2.0 (the "License");
5 # you may not use this file except in compliance with the License.
6 # You may obtain a copy of the License at
7 #
8 #     http://www.apache.org/licenses/LICENSE-2.0
9 #
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS,
12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 # See the License for the specific language governing permissions and
14 # limitations under the License.
15 """Run tests in parallel."""
16
17 from __future__ import print_function
18
19 import argparse
20 import ast
21 import collections
22 import glob
23 import itertools
24 import json
25 import logging
26 import multiprocessing
27 import os
28 import os.path
29 import pipes
30 import platform
31 import random
32 import re
33 import socket
34 import subprocess
35 import sys
36 import tempfile
37 import traceback
38 import time
39 from six.moves import urllib
40 import uuid
41 import six
42
43 import python_utils.jobset as jobset
44 import python_utils.report_utils as report_utils
45 import python_utils.watch_dirs as watch_dirs
46 import python_utils.start_port_server as start_port_server
47 try:
48     from python_utils.upload_test_results import upload_results_to_bq
49 except (ImportError):
50     pass  # It's ok to not import because this is only necessary to upload results to BQ.
51
52 gcp_utils_dir = os.path.abspath(
53     os.path.join(os.path.dirname(__file__), '../gcp/utils'))
54 sys.path.append(gcp_utils_dir)
55
56 _ROOT = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), '../..'))
57 os.chdir(_ROOT)
58
59 _FORCE_ENVIRON_FOR_WRAPPERS = {
60     'GRPC_VERBOSITY': 'DEBUG',
61 }
62
63 _POLLING_STRATEGIES = {
64     'linux': ['epollex', 'epoll1', 'poll'],
65     'mac': ['poll'],
66 }
67
68 BigQueryTestData = collections.namedtuple('BigQueryTestData', 'name flaky cpu')
69
70
71 def get_bqtest_data(limit=None):
72     import big_query_utils
73
74     bq = big_query_utils.create_big_query()
75     query = """
76 SELECT
77   filtered_test_name,
78   SUM(result != 'PASSED' AND result != 'SKIPPED') > 0 as flaky,
79   MAX(cpu_measured) + 0.01 as cpu
80   FROM (
81   SELECT
82     REGEXP_REPLACE(test_name, r'/\d+', '') AS filtered_test_name,
83     result, cpu_measured
84   FROM
85     [grpc-testing:jenkins_test_results.aggregate_results]
86   WHERE
87     timestamp >= DATE_ADD(CURRENT_DATE(), -1, "WEEK")
88     AND platform = '""" + platform_string() + """'
89     AND NOT REGEXP_MATCH(job_name, '.*portability.*') )
90 GROUP BY
91   filtered_test_name"""
92     if limit:
93         query += " limit {}".format(limit)
94     query_job = big_query_utils.sync_query_job(bq, 'grpc-testing', query)
95     page = bq.jobs().getQueryResults(
96         pageToken=None, **query_job['jobReference']).execute(num_retries=3)
97     test_data = [
98         BigQueryTestData(row['f'][0]['v'], row['f'][1]['v'] == 'true',
99                          float(row['f'][2]['v'])) for row in page['rows']
100     ]
101     return test_data
102
103
104 def platform_string():
105     return jobset.platform_string()
106
107
108 _DEFAULT_TIMEOUT_SECONDS = 5 * 60
109 _PRE_BUILD_STEP_TIMEOUT_SECONDS = 10 * 60
110
111
112 def run_shell_command(cmd, env=None, cwd=None):
113     try:
114         subprocess.check_output(cmd, shell=True, env=env, cwd=cwd)
115     except subprocess.CalledProcessError as e:
116         logging.exception(
117             "Error while running command '%s'. Exit status %d. Output:\n%s",
118             e.cmd, e.returncode, e.output)
119         raise
120
121
122 def max_parallel_tests_for_current_platform():
123     # Too much test parallelization has only been seen to be a problem
124     # so far on windows.
125     if jobset.platform_string() == 'windows':
126         return 64
127     return 1024
128
129
130 # SimpleConfig: just compile with CONFIG=config, and run the binary to test
131 class Config(object):
132
133     def __init__(self,
134                  config,
135                  environ=None,
136                  timeout_multiplier=1,
137                  tool_prefix=[],
138                  iomgr_platform='native'):
139         if environ is None:
140             environ = {}
141         self.build_config = config
142         self.environ = environ
143         self.environ['CONFIG'] = config
144         self.tool_prefix = tool_prefix
145         self.timeout_multiplier = timeout_multiplier
146         self.iomgr_platform = iomgr_platform
147
148     def job_spec(self,
149                  cmdline,
150                  timeout_seconds=_DEFAULT_TIMEOUT_SECONDS,
151                  shortname=None,
152                  environ={},
153                  cpu_cost=1.0,
154                  flaky=False):
155         """Construct a jobset.JobSpec for a test under this config
156
157        Args:
158          cmdline:      a list of strings specifying the command line the test
159                        would like to run
160     """
161         actual_environ = self.environ.copy()
162         for k, v in environ.items():
163             actual_environ[k] = v
164         if not flaky and shortname and shortname in flaky_tests:
165             flaky = True
166         if shortname in shortname_to_cpu:
167             cpu_cost = shortname_to_cpu[shortname]
168         return jobset.JobSpec(
169             cmdline=self.tool_prefix + cmdline,
170             shortname=shortname,
171             environ=actual_environ,
172             cpu_cost=cpu_cost,
173             timeout_seconds=(self.timeout_multiplier * timeout_seconds
174                              if timeout_seconds else None),
175             flake_retries=4 if flaky or args.allow_flakes else 0,
176             timeout_retries=1 if flaky or args.allow_flakes else 0)
177
178
179 def get_c_tests(travis, test_lang):
180     out = []
181     platforms_str = 'ci_platforms' if travis else 'platforms'
182     with open('tools/run_tests/generated/tests.json') as f:
183         js = json.load(f)
184         return [
185             tgt for tgt in js
186             if tgt['language'] == test_lang and platform_string() in
187             tgt[platforms_str] and not (travis and tgt['flaky'])
188         ]
189
190
191 def _check_compiler(compiler, supported_compilers):
192     if compiler not in supported_compilers:
193         raise Exception(
194             'Compiler %s not supported (on this platform).' % compiler)
195
196
197 def _check_arch(arch, supported_archs):
198     if arch not in supported_archs:
199         raise Exception('Architecture %s not supported.' % arch)
200
201
202 def _is_use_docker_child():
203     """Returns True if running running as a --use_docker child."""
204     return True if os.getenv('RUN_TESTS_COMMAND') else False
205
206
207 _PythonConfigVars = collections.namedtuple('_ConfigVars', [
208     'shell',
209     'builder',
210     'builder_prefix_arguments',
211     'venv_relative_python',
212     'toolchain',
213     'runner',
214     'test_name',
215     'iomgr_platform',
216 ])
217
218
219 def _python_config_generator(name, major, minor, bits, config_vars):
220     name += '_' + config_vars.iomgr_platform
221     return PythonConfig(
222         name, config_vars.shell + config_vars.builder +
223         config_vars.builder_prefix_arguments + [
224             _python_pattern_function(major=major, minor=minor, bits=bits)
225         ] + [name] + config_vars.venv_relative_python + config_vars.toolchain,
226         config_vars.shell + config_vars.runner + [
227             os.path.join(name, config_vars.venv_relative_python[0]),
228             config_vars.test_name
229         ])
230
231
232 def _pypy_config_generator(name, major, config_vars):
233     return PythonConfig(
234         name,
235         config_vars.shell + config_vars.builder +
236         config_vars.builder_prefix_arguments + [
237             _pypy_pattern_function(major=major)
238         ] + [name] + config_vars.venv_relative_python + config_vars.toolchain,
239         config_vars.shell + config_vars.runner +
240         [os.path.join(name, config_vars.venv_relative_python[0])])
241
242
243 def _python_pattern_function(major, minor, bits):
244     # Bit-ness is handled by the test machine's environment
245     if os.name == "nt":
246         if bits == "64":
247             return '/c/Python{major}{minor}/python.exe'.format(
248                 major=major, minor=minor, bits=bits)
249         else:
250             return '/c/Python{major}{minor}_{bits}bits/python.exe'.format(
251                 major=major, minor=minor, bits=bits)
252     else:
253         return 'python{major}.{minor}'.format(major=major, minor=minor)
254
255
256 def _pypy_pattern_function(major):
257     if major == '2':
258         return 'pypy'
259     elif major == '3':
260         return 'pypy3'
261     else:
262         raise ValueError("Unknown PyPy major version")
263
264
265 class CLanguage(object):
266
267     def __init__(self, make_target, test_lang):
268         self.make_target = make_target
269         self.platform = platform_string()
270         self.test_lang = test_lang
271
272     def configure(self, config, args):
273         self.config = config
274         self.args = args
275         if self.platform == 'windows':
276             _check_compiler(
277                 self.args.compiler,
278                 ['default', 'cmake', 'cmake_vs2015', 'cmake_vs2017'])
279             _check_arch(self.args.arch, ['default', 'x64', 'x86'])
280             self._cmake_generator_option = 'Visual Studio 15 2017' if self.args.compiler == 'cmake_vs2017' else 'Visual Studio 14 2015'
281             self._cmake_arch_option = 'x64' if self.args.arch == 'x64' else 'Win32'
282             self._use_cmake = True
283             self._make_options = []
284         elif self.args.compiler == 'cmake':
285             _check_arch(self.args.arch, ['default'])
286             self._use_cmake = True
287             self._docker_distro = 'jessie'
288             self._make_options = []
289         else:
290             self._use_cmake = False
291             self._docker_distro, self._make_options = self._compiler_options(
292                 self.args.use_docker, self.args.compiler)
293         if args.iomgr_platform == "uv":
294             cflags = '-DGRPC_UV -DGRPC_CUSTOM_IOMGR_THREAD_CHECK -DGRPC_CUSTOM_SOCKET '
295             try:
296                 cflags += subprocess.check_output(
297                     ['pkg-config', '--cflags', 'libuv']).strip() + ' '
298             except (subprocess.CalledProcessError, OSError):
299                 pass
300             try:
301                 ldflags = subprocess.check_output(
302                     ['pkg-config', '--libs', 'libuv']).strip() + ' '
303             except (subprocess.CalledProcessError, OSError):
304                 ldflags = '-luv '
305             self._make_options += [
306                 'EXTRA_CPPFLAGS={}'.format(cflags),
307                 'EXTRA_LDLIBS={}'.format(ldflags)
308             ]
309
310     def test_specs(self):
311         out = []
312         binaries = get_c_tests(self.args.travis, self.test_lang)
313         for target in binaries:
314             if self._use_cmake and target.get('boringssl', False):
315                 # cmake doesn't build boringssl tests
316                 continue
317             auto_timeout_scaling = target.get('auto_timeout_scaling', True)
318             polling_strategies = (_POLLING_STRATEGIES.get(
319                 self.platform, ['all']) if target.get('uses_polling', True) else
320                                   ['none'])
321             if self.args.iomgr_platform == 'uv':
322                 polling_strategies = ['all']
323             for polling_strategy in polling_strategies:
324                 env = {
325                     'GRPC_DEFAULT_SSL_ROOTS_FILE_PATH':
326                     _ROOT + '/src/core/tsi/test_creds/ca.pem',
327                     'GRPC_POLL_STRATEGY':
328                     polling_strategy,
329                     'GRPC_VERBOSITY':
330                     'DEBUG'
331                 }
332                 resolver = os.environ.get('GRPC_DNS_RESOLVER', None)
333                 if resolver:
334                     env['GRPC_DNS_RESOLVER'] = resolver
335                 shortname_ext = '' if polling_strategy == 'all' else ' GRPC_POLL_STRATEGY=%s' % polling_strategy
336                 if polling_strategy in target.get('excluded_poll_engines', []):
337                     continue
338
339                 timeout_scaling = 1
340                 if auto_timeout_scaling:
341                     config = self.args.config
342                     if ('asan' in config or config == 'msan' or
343                             config == 'tsan' or config == 'ubsan' or
344                             config == 'helgrind' or config == 'memcheck'):
345                         # Scale overall test timeout if running under various sanitizers.
346                         # scaling value is based on historical data analysis
347                         timeout_scaling *= 3
348
349                 if self.config.build_config in target['exclude_configs']:
350                     continue
351                 if self.args.iomgr_platform in target.get('exclude_iomgrs', []):
352                     continue
353                 if self.platform == 'windows':
354                     binary = 'cmake/build/%s/%s.exe' % (
355                         _MSBUILD_CONFIG[self.config.build_config],
356                         target['name'])
357                 else:
358                     if self._use_cmake:
359                         binary = 'cmake/build/%s' % target['name']
360                     else:
361                         binary = 'bins/%s/%s' % (self.config.build_config,
362                                                  target['name'])
363                 cpu_cost = target['cpu_cost']
364                 if cpu_cost == 'capacity':
365                     cpu_cost = multiprocessing.cpu_count()
366                 if os.path.isfile(binary):
367                     list_test_command = None
368                     filter_test_command = None
369
370                     # these are the flag defined by gtest and benchmark framework to list
371                     # and filter test runs. We use them to split each individual test
372                     # into its own JobSpec, and thus into its own process.
373                     if 'benchmark' in target and target['benchmark']:
374                         with open(os.devnull, 'w') as fnull:
375                             tests = subprocess.check_output(
376                                 [binary, '--benchmark_list_tests'],
377                                 stderr=fnull)
378                         for line in tests.split('\n'):
379                             test = line.strip()
380                             if not test: continue
381                             cmdline = [binary,
382                                        '--benchmark_filter=%s$' % test
383                                       ] + target['args']
384                             out.append(
385                                 self.config.job_spec(
386                                     cmdline,
387                                     shortname='%s %s' % (' '.join(cmdline),
388                                                          shortname_ext),
389                                     cpu_cost=cpu_cost,
390                                     timeout_seconds=target.get(
391                                         'timeout_seconds',
392                                         _DEFAULT_TIMEOUT_SECONDS) *
393                                     timeout_scaling,
394                                     environ=env))
395                     elif 'gtest' in target and target['gtest']:
396                         # here we parse the output of --gtest_list_tests to build up a complete
397                         # list of the tests contained in a binary for each test, we then
398                         # add a job to run, filtering for just that test.
399                         with open(os.devnull, 'w') as fnull:
400                             tests = subprocess.check_output(
401                                 [binary, '--gtest_list_tests'], stderr=fnull)
402                         base = None
403                         for line in tests.split('\n'):
404                             i = line.find('#')
405                             if i >= 0: line = line[:i]
406                             if not line: continue
407                             if line[0] != ' ':
408                                 base = line.strip()
409                             else:
410                                 assert base is not None
411                                 assert line[1] == ' '
412                                 test = base + line.strip()
413                                 cmdline = [binary,
414                                            '--gtest_filter=%s' % test
415                                           ] + target['args']
416                                 out.append(
417                                     self.config.job_spec(
418                                         cmdline,
419                                         shortname='%s %s' % (' '.join(cmdline),
420                                                              shortname_ext),
421                                         cpu_cost=cpu_cost,
422                                         timeout_seconds=target.get(
423                                             'timeout_seconds',
424                                             _DEFAULT_TIMEOUT_SECONDS) *
425                                         timeout_scaling,
426                                         environ=env))
427                     else:
428                         cmdline = [binary] + target['args']
429                         shortname = target.get('shortname', ' '.join(
430                             pipes.quote(arg) for arg in cmdline))
431                         shortname += shortname_ext
432                         out.append(
433                             self.config.job_spec(
434                                 cmdline,
435                                 shortname=shortname,
436                                 cpu_cost=cpu_cost,
437                                 flaky=target.get('flaky', False),
438                                 timeout_seconds=target.get(
439                                     'timeout_seconds', _DEFAULT_TIMEOUT_SECONDS)
440                                 * timeout_scaling,
441                                 environ=env))
442                 elif self.args.regex == '.*' or self.platform == 'windows':
443                     print('\nWARNING: binary not found, skipping', binary)
444         return sorted(out)
445
446     def make_targets(self):
447         if self.platform == 'windows':
448             # don't build tools on windows just yet
449             return ['buildtests_%s' % self.make_target]
450         return [
451             'buildtests_%s' % self.make_target,
452             'tools_%s' % self.make_target, 'check_epollexclusive'
453         ]
454
455     def make_options(self):
456         return self._make_options
457
458     def pre_build_steps(self):
459         if self.platform == 'windows':
460             return [[
461                 'tools\\run_tests\\helper_scripts\\pre_build_cmake.bat',
462                 self._cmake_generator_option, self._cmake_arch_option
463             ]]
464         elif self._use_cmake:
465             return [['tools/run_tests/helper_scripts/pre_build_cmake.sh']]
466         else:
467             return []
468
469     def build_steps(self):
470         return []
471
472     def post_tests_steps(self):
473         if self.platform == 'windows':
474             return []
475         else:
476             return [['tools/run_tests/helper_scripts/post_tests_c.sh']]
477
478     def makefile_name(self):
479         if self._use_cmake:
480             return 'cmake/build/Makefile'
481         else:
482             return 'Makefile'
483
484     def _clang_make_options(self, version_suffix=''):
485         if self.args.config == 'ubsan':
486             return [
487                 'CC=clang%s' % version_suffix,
488                 'CXX=clang++%s' % version_suffix,
489                 'LD=clang++%s' % version_suffix,
490                 'LDXX=clang++%s' % version_suffix
491             ]
492
493         return [
494             'CC=clang%s' % version_suffix,
495             'CXX=clang++%s' % version_suffix,
496             'LD=clang%s' % version_suffix,
497             'LDXX=clang++%s' % version_suffix
498         ]
499
500     def _gcc_make_options(self, version_suffix):
501         return [
502             'CC=gcc%s' % version_suffix,
503             'CXX=g++%s' % version_suffix,
504             'LD=gcc%s' % version_suffix,
505             'LDXX=g++%s' % version_suffix
506         ]
507
508     def _compiler_options(self, use_docker, compiler):
509         """Returns docker distro and make options to use for given compiler."""
510         if not use_docker and not _is_use_docker_child():
511             _check_compiler(compiler, ['default'])
512
513         if compiler == 'gcc4.9' or compiler == 'default':
514             return ('jessie', [])
515         elif compiler == 'gcc4.8':
516             return ('jessie', self._gcc_make_options(version_suffix='-4.8'))
517         elif compiler == 'gcc5.3':
518             return ('ubuntu1604', [])
519         elif compiler == 'gcc7.2':
520             return ('ubuntu1710', [])
521         elif compiler == 'gcc_musl':
522             return ('alpine', [])
523         elif compiler == 'clang3.4':
524             # on ubuntu1404, clang-3.4 alias doesn't exist, just use 'clang'
525             return ('ubuntu1404', self._clang_make_options())
526         elif compiler == 'clang3.5':
527             return ('jessie', self._clang_make_options(version_suffix='-3.5'))
528         elif compiler == 'clang3.6':
529             return ('ubuntu1604',
530                     self._clang_make_options(version_suffix='-3.6'))
531         elif compiler == 'clang3.7':
532             return ('ubuntu1604',
533                     self._clang_make_options(version_suffix='-3.7'))
534         elif compiler == 'clang7.0':
535             # clang++-7.0 alias doesn't exist and there are no other clang versions
536             # installed.
537             return ('sanitizers_jessie', self._clang_make_options())
538         else:
539             raise Exception('Compiler %s not supported.' % compiler)
540
541     def dockerfile_dir(self):
542         return 'tools/dockerfile/test/cxx_%s_%s' % (
543             self._docker_distro, _docker_arch_suffix(self.args.arch))
544
545     def __str__(self):
546         return self.make_target
547
548
549 # This tests Node on grpc/grpc-node and will become the standard for Node testing
550 class RemoteNodeLanguage(object):
551
552     def __init__(self):
553         self.platform = platform_string()
554
555     def configure(self, config, args):
556         self.config = config
557         self.args = args
558         # Note: electron ABI only depends on major and minor version, so that's all
559         # we should specify in the compiler argument
560         _check_compiler(self.args.compiler, [
561             'default', 'node0.12', 'node4', 'node5', 'node6', 'node7', 'node8',
562             'electron1.3', 'electron1.6'
563         ])
564         if self.args.compiler == 'default':
565             self.runtime = 'node'
566             self.node_version = '8'
567         else:
568             if self.args.compiler.startswith('electron'):
569                 self.runtime = 'electron'
570                 self.node_version = self.args.compiler[8:]
571             else:
572                 self.runtime = 'node'
573                 # Take off the word "node"
574                 self.node_version = self.args.compiler[4:]
575
576     # TODO: update with Windows/electron scripts when available for grpc/grpc-node
577     def test_specs(self):
578         if self.platform == 'windows':
579             return [
580                 self.config.job_spec(
581                     ['tools\\run_tests\\helper_scripts\\run_node.bat'])
582             ]
583         else:
584             return [
585                 self.config.job_spec(
586                     ['tools/run_tests/helper_scripts/run_grpc-node.sh'],
587                     None,
588                     environ=_FORCE_ENVIRON_FOR_WRAPPERS)
589             ]
590
591     def pre_build_steps(self):
592         return []
593
594     def make_targets(self):
595         return []
596
597     def make_options(self):
598         return []
599
600     def build_steps(self):
601         return []
602
603     def post_tests_steps(self):
604         return []
605
606     def makefile_name(self):
607         return 'Makefile'
608
609     def dockerfile_dir(self):
610         return 'tools/dockerfile/test/node_jessie_%s' % _docker_arch_suffix(
611             self.args.arch)
612
613     def __str__(self):
614         return 'grpc-node'
615
616
617 class PhpLanguage(object):
618
619     def configure(self, config, args):
620         self.config = config
621         self.args = args
622         _check_compiler(self.args.compiler, ['default'])
623         self._make_options = ['EMBED_OPENSSL=true', 'EMBED_ZLIB=true']
624
625     def test_specs(self):
626         return [
627             self.config.job_spec(
628                 ['src/php/bin/run_tests.sh'],
629                 environ=_FORCE_ENVIRON_FOR_WRAPPERS)
630         ]
631
632     def pre_build_steps(self):
633         return []
634
635     def make_targets(self):
636         return ['static_c', 'shared_c']
637
638     def make_options(self):
639         return self._make_options
640
641     def build_steps(self):
642         return [['tools/run_tests/helper_scripts/build_php.sh']]
643
644     def post_tests_steps(self):
645         return [['tools/run_tests/helper_scripts/post_tests_php.sh']]
646
647     def makefile_name(self):
648         return 'Makefile'
649
650     def dockerfile_dir(self):
651         return 'tools/dockerfile/test/php_jessie_%s' % _docker_arch_suffix(
652             self.args.arch)
653
654     def __str__(self):
655         return 'php'
656
657
658 class Php7Language(object):
659
660     def configure(self, config, args):
661         self.config = config
662         self.args = args
663         _check_compiler(self.args.compiler, ['default'])
664         self._make_options = ['EMBED_OPENSSL=true', 'EMBED_ZLIB=true']
665
666     def test_specs(self):
667         return [
668             self.config.job_spec(
669                 ['src/php/bin/run_tests.sh'],
670                 environ=_FORCE_ENVIRON_FOR_WRAPPERS)
671         ]
672
673     def pre_build_steps(self):
674         return []
675
676     def make_targets(self):
677         return ['static_c', 'shared_c']
678
679     def make_options(self):
680         return self._make_options
681
682     def build_steps(self):
683         return [['tools/run_tests/helper_scripts/build_php.sh']]
684
685     def post_tests_steps(self):
686         return [['tools/run_tests/helper_scripts/post_tests_php.sh']]
687
688     def makefile_name(self):
689         return 'Makefile'
690
691     def dockerfile_dir(self):
692         return 'tools/dockerfile/test/php7_jessie_%s' % _docker_arch_suffix(
693             self.args.arch)
694
695     def __str__(self):
696         return 'php7'
697
698
699 class PythonConfig(
700         collections.namedtuple('PythonConfig', ['name', 'build', 'run'])):
701     """Tuple of commands (named s.t. 'what it says on the tin' applies)"""
702
703
704 class PythonLanguage(object):
705
706     def configure(self, config, args):
707         self.config = config
708         self.args = args
709         self.pythons = self._get_pythons(self.args)
710
711     def test_specs(self):
712         # load list of known test suites
713         with open(
714                 'src/python/grpcio_tests/tests/tests.json') as tests_json_file:
715             tests_json = json.load(tests_json_file)
716         environment = dict(_FORCE_ENVIRON_FOR_WRAPPERS)
717         return [
718             self.config.job_spec(
719                 config.run,
720                 timeout_seconds=5 * 60,
721                 environ=dict(
722                     list(environment.items()) + [(
723                         'GRPC_PYTHON_TESTRUNNER_FILTER', str(suite_name))]),
724                 shortname='%s.test.%s' % (config.name, suite_name),
725             ) for suite_name in tests_json for config in self.pythons
726         ]
727
728     def pre_build_steps(self):
729         return []
730
731     def make_targets(self):
732         return []
733
734     def make_options(self):
735         return []
736
737     def build_steps(self):
738         return [config.build for config in self.pythons]
739
740     def post_tests_steps(self):
741         if self.config.build_config != 'gcov':
742             return []
743         else:
744             return [['tools/run_tests/helper_scripts/post_tests_python.sh']]
745
746     def makefile_name(self):
747         return 'Makefile'
748
749     def dockerfile_dir(self):
750         return 'tools/dockerfile/test/python_%s_%s' % (
751             self._python_manager_name(), _docker_arch_suffix(self.args.arch))
752
753     def _python_manager_name(self):
754         """Choose the docker image to use based on python version."""
755         if self.args.compiler in [
756                 'python2.7', 'python3.5', 'python3.6', 'python3.7'
757         ]:
758             return 'stretch_' + self.args.compiler[len('python'):]
759         elif self.args.compiler == 'python_alpine':
760             return 'alpine'
761         elif self.args.compiler == 'python3.4':
762             return 'jessie'
763         else:
764             return 'stretch_3.7'
765
766     def _get_pythons(self, args):
767         """Get python runtimes to test with, based on current platform, architecture, compiler etc."""
768         if args.arch == 'x86':
769             bits = '32'
770         else:
771             bits = '64'
772
773         if os.name == 'nt':
774             shell = ['bash']
775             builder = [
776                 os.path.abspath(
777                     'tools/run_tests/helper_scripts/build_python_msys2.sh')
778             ]
779             builder_prefix_arguments = ['MINGW{}'.format(bits)]
780             venv_relative_python = ['Scripts/python.exe']
781             toolchain = ['mingw32']
782         else:
783             shell = []
784             builder = [
785                 os.path.abspath(
786                     'tools/run_tests/helper_scripts/build_python.sh')
787             ]
788             builder_prefix_arguments = []
789             venv_relative_python = ['bin/python']
790             toolchain = ['unix']
791
792         test_command = 'test_lite'
793         if args.iomgr_platform == 'gevent':
794             test_command = 'test_gevent'
795         runner = [
796             os.path.abspath('tools/run_tests/helper_scripts/run_python.sh')
797         ]
798
799         config_vars = _PythonConfigVars(
800             shell, builder, builder_prefix_arguments, venv_relative_python,
801             toolchain, runner, test_command, args.iomgr_platform)
802         python27_config = _python_config_generator(
803             name='py27',
804             major='2',
805             minor='7',
806             bits=bits,
807             config_vars=config_vars)
808         python34_config = _python_config_generator(
809             name='py34',
810             major='3',
811             minor='4',
812             bits=bits,
813             config_vars=config_vars)
814         python35_config = _python_config_generator(
815             name='py35',
816             major='3',
817             minor='5',
818             bits=bits,
819             config_vars=config_vars)
820         python36_config = _python_config_generator(
821             name='py36',
822             major='3',
823             minor='6',
824             bits=bits,
825             config_vars=config_vars)
826         python37_config = _python_config_generator(
827             name='py37',
828             major='3',
829             minor='7',
830             bits=bits,
831             config_vars=config_vars)
832         pypy27_config = _pypy_config_generator(
833             name='pypy', major='2', config_vars=config_vars)
834         pypy32_config = _pypy_config_generator(
835             name='pypy3', major='3', config_vars=config_vars)
836
837         if args.compiler == 'default':
838             if os.name == 'nt':
839                 return (python35_config,)
840             else:
841                 return (
842                     python27_config,
843                     python37_config,
844                 )
845         elif args.compiler == 'python2.7':
846             return (python27_config,)
847         elif args.compiler == 'python3.4':
848             return (python34_config,)
849         elif args.compiler == 'python3.5':
850             return (python35_config,)
851         elif args.compiler == 'python3.6':
852             return (python36_config,)
853         elif args.compiler == 'python3.7':
854             return (python37_config,)
855         elif args.compiler == 'pypy':
856             return (pypy27_config,)
857         elif args.compiler == 'pypy3':
858             return (pypy32_config,)
859         elif args.compiler == 'python_alpine':
860             return (python27_config,)
861         elif args.compiler == 'all_the_cpythons':
862             return (
863                 python27_config,
864                 python34_config,
865                 python35_config,
866                 python36_config,
867                 python37_config,
868             )
869         else:
870             raise Exception('Compiler %s not supported.' % args.compiler)
871
872     def __str__(self):
873         return 'python'
874
875
876 class RubyLanguage(object):
877
878     def configure(self, config, args):
879         self.config = config
880         self.args = args
881         _check_compiler(self.args.compiler, ['default'])
882
883     def test_specs(self):
884         tests = [
885             self.config.job_spec(
886                 ['tools/run_tests/helper_scripts/run_ruby.sh'],
887                 timeout_seconds=10 * 60,
888                 environ=_FORCE_ENVIRON_FOR_WRAPPERS)
889         ]
890         tests.append(
891             self.config.job_spec(
892                 ['tools/run_tests/helper_scripts/run_ruby_end2end_tests.sh'],
893                 timeout_seconds=20 * 60,
894                 environ=_FORCE_ENVIRON_FOR_WRAPPERS))
895         return tests
896
897     def pre_build_steps(self):
898         return [['tools/run_tests/helper_scripts/pre_build_ruby.sh']]
899
900     def make_targets(self):
901         return []
902
903     def make_options(self):
904         return []
905
906     def build_steps(self):
907         return [['tools/run_tests/helper_scripts/build_ruby.sh']]
908
909     def post_tests_steps(self):
910         return [['tools/run_tests/helper_scripts/post_tests_ruby.sh']]
911
912     def makefile_name(self):
913         return 'Makefile'
914
915     def dockerfile_dir(self):
916         return 'tools/dockerfile/test/ruby_jessie_%s' % _docker_arch_suffix(
917             self.args.arch)
918
919     def __str__(self):
920         return 'ruby'
921
922
923 class CSharpLanguage(object):
924
925     def __init__(self):
926         self.platform = platform_string()
927
928     def configure(self, config, args):
929         self.config = config
930         self.args = args
931         if self.platform == 'windows':
932             _check_compiler(self.args.compiler, ['default', 'coreclr'])
933             _check_arch(self.args.arch, ['default'])
934             self._cmake_arch_option = 'x64'
935         else:
936             _check_compiler(self.args.compiler, ['default', 'coreclr'])
937             self._docker_distro = 'stretch'
938
939     def test_specs(self):
940         with open('src/csharp/tests.json') as f:
941             tests_by_assembly = json.load(f)
942
943         msbuild_config = _MSBUILD_CONFIG[self.config.build_config]
944         nunit_args = ['--labels=All', '--noresult', '--workers=1']
945         assembly_subdir = 'bin/%s' % msbuild_config
946         assembly_extension = '.exe'
947
948         if self.args.compiler == 'coreclr':
949             assembly_subdir += '/netcoreapp2.1'
950             runtime_cmd = ['dotnet', 'exec']
951             assembly_extension = '.dll'
952         else:
953             assembly_subdir += '/net45'
954             if self.platform == 'windows':
955                 runtime_cmd = []
956             elif self.platform == 'mac':
957                 # mono before version 5.2 on MacOS defaults to 32bit runtime
958                 runtime_cmd = ['mono', '--arch=64']
959             else:
960                 runtime_cmd = ['mono']
961
962         specs = []
963         for assembly in six.iterkeys(tests_by_assembly):
964             assembly_file = 'src/csharp/%s/%s/%s%s' % (assembly,
965                                                        assembly_subdir,
966                                                        assembly,
967                                                        assembly_extension)
968             if self.config.build_config != 'gcov' or self.platform != 'windows':
969                 # normally, run each test as a separate process
970                 for test in tests_by_assembly[assembly]:
971                     cmdline = runtime_cmd + [assembly_file,
972                                              '--test=%s' % test] + nunit_args
973                     specs.append(
974                         self.config.job_spec(
975                             cmdline,
976                             shortname='csharp.%s' % test,
977                             environ=_FORCE_ENVIRON_FOR_WRAPPERS))
978             else:
979                 # For C# test coverage, run all tests from the same assembly at once
980                 # using OpenCover.Console (only works on Windows).
981                 cmdline = [
982                     'src\\csharp\\packages\\OpenCover.4.6.519\\tools\\OpenCover.Console.exe',
983                     '-target:%s' % assembly_file, '-targetdir:src\\csharp',
984                     '-targetargs:%s' % ' '.join(nunit_args),
985                     '-filter:+[Grpc.Core]*', '-register:user',
986                     '-output:src\\csharp\\coverage_csharp_%s.xml' % assembly
987                 ]
988
989                 # set really high cpu_cost to make sure instances of OpenCover.Console run exclusively
990                 # to prevent problems with registering the profiler.
991                 run_exclusive = 1000000
992                 specs.append(
993                     self.config.job_spec(
994                         cmdline,
995                         shortname='csharp.coverage.%s' % assembly,
996                         cpu_cost=run_exclusive,
997                         environ=_FORCE_ENVIRON_FOR_WRAPPERS))
998         return specs
999
1000     def pre_build_steps(self):
1001         if self.platform == 'windows':
1002             return [[
1003                 'tools\\run_tests\\helper_scripts\\pre_build_csharp.bat',
1004                 self._cmake_arch_option
1005             ]]
1006         else:
1007             return [['tools/run_tests/helper_scripts/pre_build_csharp.sh']]
1008
1009     def make_targets(self):
1010         return ['grpc_csharp_ext']
1011
1012     def make_options(self):
1013         return []
1014
1015     def build_steps(self):
1016         if self.platform == 'windows':
1017             return [['tools\\run_tests\\helper_scripts\\build_csharp.bat']]
1018         else:
1019             return [['tools/run_tests/helper_scripts/build_csharp.sh']]
1020
1021     def post_tests_steps(self):
1022         if self.platform == 'windows':
1023             return [['tools\\run_tests\\helper_scripts\\post_tests_csharp.bat']]
1024         else:
1025             return [['tools/run_tests/helper_scripts/post_tests_csharp.sh']]
1026
1027     def makefile_name(self):
1028         if self.platform == 'windows':
1029             return 'cmake/build/%s/Makefile' % self._cmake_arch_option
1030         else:
1031             # no need to set x86 specific flags as run_tests.py
1032             # currently forbids x86 C# builds on both Linux and MacOS.
1033             return 'cmake/build/Makefile'
1034
1035     def dockerfile_dir(self):
1036         return 'tools/dockerfile/test/csharp_%s_%s' % (
1037             self._docker_distro, _docker_arch_suffix(self.args.arch))
1038
1039     def __str__(self):
1040         return 'csharp'
1041
1042
1043 class ObjCLanguage(object):
1044
1045     def configure(self, config, args):
1046         self.config = config
1047         self.args = args
1048         _check_compiler(self.args.compiler, ['default'])
1049
1050     def test_specs(self):
1051         return [
1052             self.config.job_spec(
1053                 ['src/objective-c/tests/run_tests.sh'],
1054                 timeout_seconds=60 * 60,
1055                 shortname='objc-tests',
1056                 cpu_cost=1e6,
1057                 environ=_FORCE_ENVIRON_FOR_WRAPPERS),
1058             self.config.job_spec(
1059                 ['src/objective-c/tests/run_plugin_tests.sh'],
1060                 timeout_seconds=60 * 60,
1061                 shortname='objc-plugin-tests',
1062                 cpu_cost=1e6,
1063                 environ=_FORCE_ENVIRON_FOR_WRAPPERS),
1064             self.config.job_spec(
1065                 ['src/objective-c/tests/build_one_example.sh'],
1066                 timeout_seconds=10 * 60,
1067                 shortname='objc-build-example-helloworld',
1068                 cpu_cost=1e6,
1069                 environ={
1070                     'SCHEME': 'HelloWorld',
1071                     'EXAMPLE_PATH': 'examples/objective-c/helloworld'
1072                 }),
1073             self.config.job_spec(
1074                 ['src/objective-c/tests/build_one_example.sh'],
1075                 timeout_seconds=10 * 60,
1076                 shortname='objc-build-example-routeguide',
1077                 cpu_cost=1e6,
1078                 environ={
1079                     'SCHEME': 'RouteGuideClient',
1080                     'EXAMPLE_PATH': 'examples/objective-c/route_guide'
1081                 }),
1082             self.config.job_spec(
1083                 ['src/objective-c/tests/build_one_example.sh'],
1084                 timeout_seconds=10 * 60,
1085                 shortname='objc-build-example-authsample',
1086                 cpu_cost=1e6,
1087                 environ={
1088                     'SCHEME': 'AuthSample',
1089                     'EXAMPLE_PATH': 'examples/objective-c/auth_sample'
1090                 }),
1091             self.config.job_spec(
1092                 ['src/objective-c/tests/build_one_example.sh'],
1093                 timeout_seconds=10 * 60,
1094                 shortname='objc-build-example-sample',
1095                 cpu_cost=1e6,
1096                 environ={
1097                     'SCHEME': 'Sample',
1098                     'EXAMPLE_PATH': 'src/objective-c/examples/Sample'
1099                 }),
1100             self.config.job_spec(
1101                 ['src/objective-c/tests/build_one_example.sh'],
1102                 timeout_seconds=10 * 60,
1103                 shortname='objc-build-example-sample-frameworks',
1104                 cpu_cost=1e6,
1105                 environ={
1106                     'SCHEME': 'Sample',
1107                     'EXAMPLE_PATH': 'src/objective-c/examples/Sample',
1108                     'FRAMEWORKS': 'YES'
1109                 }),
1110             self.config.job_spec(
1111                 ['src/objective-c/tests/build_one_example.sh'],
1112                 timeout_seconds=10 * 60,
1113                 shortname='objc-build-example-switftsample',
1114                 cpu_cost=1e6,
1115                 environ={
1116                     'SCHEME': 'SwiftSample',
1117                     'EXAMPLE_PATH': 'src/objective-c/examples/SwiftSample'
1118                 }),
1119             self.config.job_spec(
1120                 ['test/core/iomgr/ios/CFStreamTests/run_tests.sh'],
1121                 timeout_seconds=20 * 60,
1122                 shortname='cfstream-tests',
1123                 cpu_cost=1e6,
1124                 environ=_FORCE_ENVIRON_FOR_WRAPPERS),
1125         ]
1126
1127     def pre_build_steps(self):
1128         return []
1129
1130     def make_targets(self):
1131         return ['interop_server']
1132
1133     def make_options(self):
1134         return []
1135
1136     def build_steps(self):
1137         return [
1138             ['src/objective-c/tests/build_tests.sh'],
1139             ['test/core/iomgr/ios/CFStreamTests/build_tests.sh'],
1140         ]
1141
1142     def post_tests_steps(self):
1143         return []
1144
1145     def makefile_name(self):
1146         return 'Makefile'
1147
1148     def dockerfile_dir(self):
1149         return None
1150
1151     def __str__(self):
1152         return 'objc'
1153
1154
1155 class Sanity(object):
1156
1157     def configure(self, config, args):
1158         self.config = config
1159         self.args = args
1160         _check_compiler(self.args.compiler, ['default'])
1161
1162     def test_specs(self):
1163         import yaml
1164         with open('tools/run_tests/sanity/sanity_tests.yaml', 'r') as f:
1165             environ = {'TEST': 'true'}
1166             if _is_use_docker_child():
1167                 environ['CLANG_FORMAT_SKIP_DOCKER'] = 'true'
1168                 environ['CLANG_TIDY_SKIP_DOCKER'] = 'true'
1169             return [
1170                 self.config.job_spec(
1171                     cmd['script'].split(),
1172                     timeout_seconds=30 * 60,
1173                     environ=environ,
1174                     cpu_cost=cmd.get('cpu_cost', 1)) for cmd in yaml.load(f)
1175             ]
1176
1177     def pre_build_steps(self):
1178         return []
1179
1180     def make_targets(self):
1181         return ['run_dep_checks']
1182
1183     def make_options(self):
1184         return []
1185
1186     def build_steps(self):
1187         return []
1188
1189     def post_tests_steps(self):
1190         return []
1191
1192     def makefile_name(self):
1193         return 'Makefile'
1194
1195     def dockerfile_dir(self):
1196         return 'tools/dockerfile/test/sanity'
1197
1198     def __str__(self):
1199         return 'sanity'
1200
1201
1202 # different configurations we can run under
1203 with open('tools/run_tests/generated/configs.json') as f:
1204     _CONFIGS = dict(
1205         (cfg['config'], Config(**cfg)) for cfg in ast.literal_eval(f.read()))
1206
1207 _LANGUAGES = {
1208     'c++': CLanguage('cxx', 'c++'),
1209     'c': CLanguage('c', 'c'),
1210     'grpc-node': RemoteNodeLanguage(),
1211     'php': PhpLanguage(),
1212     'php7': Php7Language(),
1213     'python': PythonLanguage(),
1214     'ruby': RubyLanguage(),
1215     'csharp': CSharpLanguage(),
1216     'objc': ObjCLanguage(),
1217     'sanity': Sanity()
1218 }
1219
1220 _MSBUILD_CONFIG = {
1221     'dbg': 'Debug',
1222     'opt': 'Release',
1223     'gcov': 'Debug',
1224 }
1225
1226
1227 def _windows_arch_option(arch):
1228     """Returns msbuild cmdline option for selected architecture."""
1229     if arch == 'default' or arch == 'x86':
1230         return '/p:Platform=Win32'
1231     elif arch == 'x64':
1232         return '/p:Platform=x64'
1233     else:
1234         print('Architecture %s not supported.' % arch)
1235         sys.exit(1)
1236
1237
1238 def _check_arch_option(arch):
1239     """Checks that architecture option is valid."""
1240     if platform_string() == 'windows':
1241         _windows_arch_option(arch)
1242     elif platform_string() == 'linux':
1243         # On linux, we need to be running under docker with the right architecture.
1244         runtime_arch = platform.architecture()[0]
1245         if arch == 'default':
1246             return
1247         elif runtime_arch == '64bit' and arch == 'x64':
1248             return
1249         elif runtime_arch == '32bit' and arch == 'x86':
1250             return
1251         else:
1252             print('Architecture %s does not match current runtime architecture.'
1253                   % arch)
1254             sys.exit(1)
1255     else:
1256         if args.arch != 'default':
1257             print('Architecture %s not supported on current platform.' %
1258                   args.arch)
1259             sys.exit(1)
1260
1261
1262 def _docker_arch_suffix(arch):
1263     """Returns suffix to dockerfile dir to use."""
1264     if arch == 'default' or arch == 'x64':
1265         return 'x64'
1266     elif arch == 'x86':
1267         return 'x86'
1268     else:
1269         print('Architecture %s not supported with current settings.' % arch)
1270         sys.exit(1)
1271
1272
1273 def runs_per_test_type(arg_str):
1274     """Auxilary function to parse the "runs_per_test" flag.
1275
1276        Returns:
1277            A positive integer or 0, the latter indicating an infinite number of
1278            runs.
1279
1280        Raises:
1281            argparse.ArgumentTypeError: Upon invalid input.
1282     """
1283     if arg_str == 'inf':
1284         return 0
1285     try:
1286         n = int(arg_str)
1287         if n <= 0: raise ValueError
1288         return n
1289     except:
1290         msg = '\'{}\' is not a positive integer or \'inf\''.format(arg_str)
1291         raise argparse.ArgumentTypeError(msg)
1292
1293
1294 def percent_type(arg_str):
1295     pct = float(arg_str)
1296     if pct > 100 or pct < 0:
1297         raise argparse.ArgumentTypeError(
1298             "'%f' is not a valid percentage in the [0, 100] range" % pct)
1299     return pct
1300
1301
1302 # This is math.isclose in python >= 3.5
1303 def isclose(a, b, rel_tol=1e-09, abs_tol=0.0):
1304     return abs(a - b) <= max(rel_tol * max(abs(a), abs(b)), abs_tol)
1305
1306
1307 # parse command line
1308 argp = argparse.ArgumentParser(description='Run grpc tests.')
1309 argp.add_argument(
1310     '-c', '--config', choices=sorted(_CONFIGS.keys()), default='opt')
1311 argp.add_argument(
1312     '-n',
1313     '--runs_per_test',
1314     default=1,
1315     type=runs_per_test_type,
1316     help='A positive integer or "inf". If "inf", all tests will run in an '
1317     'infinite loop. Especially useful in combination with "-f"')
1318 argp.add_argument('-r', '--regex', default='.*', type=str)
1319 argp.add_argument('--regex_exclude', default='', type=str)
1320 argp.add_argument('-j', '--jobs', default=multiprocessing.cpu_count(), type=int)
1321 argp.add_argument('-s', '--slowdown', default=1.0, type=float)
1322 argp.add_argument(
1323     '-p',
1324     '--sample_percent',
1325     default=100.0,
1326     type=percent_type,
1327     help='Run a random sample with that percentage of tests')
1328 argp.add_argument(
1329     '-f', '--forever', default=False, action='store_const', const=True)
1330 argp.add_argument(
1331     '-t', '--travis', default=False, action='store_const', const=True)
1332 argp.add_argument(
1333     '--newline_on_success', default=False, action='store_const', const=True)
1334 argp.add_argument(
1335     '-l',
1336     '--language',
1337     choices=sorted(_LANGUAGES.keys()),
1338     nargs='+',
1339     required=True)
1340 argp.add_argument(
1341     '-S', '--stop_on_failure', default=False, action='store_const', const=True)
1342 argp.add_argument(
1343     '--use_docker',
1344     default=False,
1345     action='store_const',
1346     const=True,
1347     help='Run all the tests under docker. That provides ' +
1348     'additional isolation and prevents the need to install ' +
1349     'language specific prerequisites. Only available on Linux.')
1350 argp.add_argument(
1351     '--allow_flakes',
1352     default=False,
1353     action='store_const',
1354     const=True,
1355     help=
1356     'Allow flaky tests to show as passing (re-runs failed tests up to five times)'
1357 )
1358 argp.add_argument(
1359     '--arch',
1360     choices=['default', 'x86', 'x64'],
1361     default='default',
1362     help=
1363     'Selects architecture to target. For some platforms "default" is the only supported choice.'
1364 )
1365 argp.add_argument(
1366     '--compiler',
1367     choices=[
1368         'default', 'gcc4.4', 'gcc4.6', 'gcc4.8', 'gcc4.9', 'gcc5.3', 'gcc7.2',
1369         'gcc_musl', 'clang3.4', 'clang3.5', 'clang3.6', 'clang3.7', 'clang7.0',
1370         'python2.7', 'python3.4', 'python3.5', 'python3.6', 'python3.7', 'pypy',
1371         'pypy3', 'python_alpine', 'all_the_cpythons', 'electron1.3',
1372         'electron1.6', 'coreclr', 'cmake', 'cmake_vs2015', 'cmake_vs2017'
1373     ],
1374     default='default',
1375     help=
1376     'Selects compiler to use. Allowed values depend on the platform and language.'
1377 )
1378 argp.add_argument(
1379     '--iomgr_platform',
1380     choices=['native', 'uv', 'gevent'],
1381     default='native',
1382     help='Selects iomgr platform to build on')
1383 argp.add_argument(
1384     '--build_only',
1385     default=False,
1386     action='store_const',
1387     const=True,
1388     help='Perform all the build steps but don\'t run any tests.')
1389 argp.add_argument(
1390     '--measure_cpu_costs',
1391     default=False,
1392     action='store_const',
1393     const=True,
1394     help='Measure the cpu costs of tests')
1395 argp.add_argument(
1396     '--update_submodules',
1397     default=[],
1398     nargs='*',
1399     help=
1400     'Update some submodules before building. If any are updated, also run generate_projects. '
1401     +
1402     'Submodules are specified as SUBMODULE_NAME:BRANCH; if BRANCH is omitted, master is assumed.'
1403 )
1404 argp.add_argument('-a', '--antagonists', default=0, type=int)
1405 argp.add_argument(
1406     '-x',
1407     '--xml_report',
1408     default=None,
1409     type=str,
1410     help='Generates a JUnit-compatible XML report')
1411 argp.add_argument(
1412     '--report_suite_name',
1413     default='tests',
1414     type=str,
1415     help='Test suite name to use in generated JUnit XML report')
1416 argp.add_argument(
1417     '--quiet_success',
1418     default=False,
1419     action='store_const',
1420     const=True,
1421     help=
1422     'Don\'t print anything when a test passes. Passing tests also will not be reported in XML report. '
1423     + 'Useful when running many iterations of each test (argument -n).')
1424 argp.add_argument(
1425     '--force_default_poller',
1426     default=False,
1427     action='store_const',
1428     const=True,
1429     help='Don\'t try to iterate over many polling strategies when they exist')
1430 argp.add_argument(
1431     '--force_use_pollers',
1432     default=None,
1433     type=str,
1434     help='Only use the specified comma-delimited list of polling engines. '
1435     'Example: --force_use_pollers epoll1,poll '
1436     ' (This flag has no effect if --force_default_poller flag is also used)')
1437 argp.add_argument(
1438     '--max_time', default=-1, type=int, help='Maximum test runtime in seconds')
1439 argp.add_argument(
1440     '--bq_result_table',
1441     default='',
1442     type=str,
1443     nargs='?',
1444     help='Upload test results to a specified BQ table.')
1445 argp.add_argument(
1446     '--auto_set_flakes',
1447     default=False,
1448     const=True,
1449     action='store_const',
1450     help=
1451     'Allow repeated runs for tests that have been failing recently (based on BQ historical data).'
1452 )
1453 args = argp.parse_args()
1454
1455 flaky_tests = set()
1456 shortname_to_cpu = {}
1457 if args.auto_set_flakes:
1458     try:
1459         for test in get_bqtest_data():
1460             if test.flaky: flaky_tests.add(test.name)
1461             if test.cpu > 0: shortname_to_cpu[test.name] = test.cpu
1462     except:
1463         print(
1464             "Unexpected error getting flaky tests: %s" % traceback.format_exc())
1465
1466 if args.force_default_poller:
1467     _POLLING_STRATEGIES = {}
1468 elif args.force_use_pollers:
1469     _POLLING_STRATEGIES[platform_string()] = args.force_use_pollers.split(',')
1470
1471 jobset.measure_cpu_costs = args.measure_cpu_costs
1472
1473 # update submodules if necessary
1474 need_to_regenerate_projects = False
1475 for spec in args.update_submodules:
1476     spec = spec.split(':', 1)
1477     if len(spec) == 1:
1478         submodule = spec[0]
1479         branch = 'master'
1480     elif len(spec) == 2:
1481         submodule = spec[0]
1482         branch = spec[1]
1483     cwd = 'third_party/%s' % submodule
1484
1485     def git(cmd, cwd=cwd):
1486         print('in %s: git %s' % (cwd, cmd))
1487         run_shell_command('git %s' % cmd, cwd=cwd)
1488
1489     git('fetch')
1490     git('checkout %s' % branch)
1491     git('pull origin %s' % branch)
1492     if os.path.exists('src/%s/gen_build_yaml.py' % submodule):
1493         need_to_regenerate_projects = True
1494 if need_to_regenerate_projects:
1495     if jobset.platform_string() == 'linux':
1496         run_shell_command('tools/buildgen/generate_projects.sh')
1497     else:
1498         print(
1499             'WARNING: may need to regenerate projects, but since we are not on')
1500         print(
1501             '         Linux this step is being skipped. Compilation MAY fail.')
1502
1503 # grab config
1504 run_config = _CONFIGS[args.config]
1505 build_config = run_config.build_config
1506
1507 if args.travis:
1508     _FORCE_ENVIRON_FOR_WRAPPERS = {'GRPC_TRACE': 'api'}
1509
1510 languages = set(_LANGUAGES[l] for l in args.language)
1511 for l in languages:
1512     l.configure(run_config, args)
1513
1514 language_make_options = []
1515 if any(language.make_options() for language in languages):
1516     if not 'gcov' in args.config and len(languages) != 1:
1517         print(
1518             'languages with custom make options cannot be built simultaneously with other languages'
1519         )
1520         sys.exit(1)
1521     else:
1522         # Combining make options is not clean and just happens to work. It allows C & C++ to build
1523         # together, and is only used under gcov. All other configs should build languages individually.
1524         language_make_options = list(
1525             set([
1526                 make_option
1527                 for lang in languages
1528                 for make_option in lang.make_options()
1529             ]))
1530
1531 if args.use_docker:
1532     if not args.travis:
1533         print('Seen --use_docker flag, will run tests under docker.')
1534         print('')
1535         print(
1536             'IMPORTANT: The changes you are testing need to be locally committed'
1537         )
1538         print(
1539             'because only the committed changes in the current branch will be')
1540         print('copied to the docker environment.')
1541         time.sleep(5)
1542
1543     dockerfile_dirs = set([l.dockerfile_dir() for l in languages])
1544     if len(dockerfile_dirs) > 1:
1545         print('Languages to be tested require running under different docker '
1546               'images.')
1547         sys.exit(1)
1548     else:
1549         dockerfile_dir = next(iter(dockerfile_dirs))
1550
1551     child_argv = [arg for arg in sys.argv if not arg == '--use_docker']
1552     run_tests_cmd = 'python tools/run_tests/run_tests.py %s' % ' '.join(
1553         child_argv[1:])
1554
1555     env = os.environ.copy()
1556     env['RUN_TESTS_COMMAND'] = run_tests_cmd
1557     env['DOCKERFILE_DIR'] = dockerfile_dir
1558     env['DOCKER_RUN_SCRIPT'] = 'tools/run_tests/dockerize/docker_run_tests.sh'
1559     if args.xml_report:
1560         env['XML_REPORT'] = args.xml_report
1561     if not args.travis:
1562         env['TTY_FLAG'] = '-t'  # enables Ctrl-C when not on Jenkins.
1563
1564     subprocess.check_call(
1565         'tools/run_tests/dockerize/build_docker_and_run_tests.sh',
1566         shell=True,
1567         env=env)
1568     sys.exit(0)
1569
1570 _check_arch_option(args.arch)
1571
1572
1573 def make_jobspec(cfg, targets, makefile='Makefile'):
1574     if platform_string() == 'windows':
1575         return [
1576             jobset.JobSpec(
1577                 [
1578                     'cmake', '--build', '.', '--target',
1579                     '%s' % target, '--config', _MSBUILD_CONFIG[cfg]
1580                 ],
1581                 cwd=os.path.dirname(makefile),
1582                 timeout_seconds=None) for target in targets
1583         ]
1584     else:
1585         if targets and makefile.startswith('cmake/build/'):
1586             # With cmake, we've passed all the build configuration in the pre-build step already
1587             return [
1588                 jobset.JobSpec(
1589                     [os.getenv('MAKE', 'make'), '-j',
1590                      '%d' % args.jobs] + targets,
1591                     cwd='cmake/build',
1592                     timeout_seconds=None)
1593             ]
1594         if targets:
1595             return [
1596                 jobset.JobSpec(
1597                     [
1598                         os.getenv('MAKE', 'make'), '-f', makefile, '-j',
1599                         '%d' % args.jobs,
1600                         'EXTRA_DEFINES=GRPC_TEST_SLOWDOWN_MACHINE_FACTOR=%f' %
1601                         args.slowdown,
1602                         'CONFIG=%s' % cfg, 'Q='
1603                     ] + language_make_options +
1604                     ([] if not args.travis else ['JENKINS_BUILD=1']) + targets,
1605                     timeout_seconds=None)
1606             ]
1607         else:
1608             return []
1609
1610
1611 make_targets = {}
1612 for l in languages:
1613     makefile = l.makefile_name()
1614     make_targets[makefile] = make_targets.get(makefile, set()).union(
1615         set(l.make_targets()))
1616
1617
1618 def build_step_environ(cfg):
1619     environ = {'CONFIG': cfg}
1620     msbuild_cfg = _MSBUILD_CONFIG.get(cfg)
1621     if msbuild_cfg:
1622         environ['MSBUILD_CONFIG'] = msbuild_cfg
1623     return environ
1624
1625
1626 build_steps = list(
1627     set(
1628         jobset.JobSpec(
1629             cmdline,
1630             environ=build_step_environ(build_config),
1631             timeout_seconds=_PRE_BUILD_STEP_TIMEOUT_SECONDS,
1632             flake_retries=2)
1633         for l in languages
1634         for cmdline in l.pre_build_steps()))
1635 if make_targets:
1636     make_commands = itertools.chain.from_iterable(
1637         make_jobspec(build_config, list(targets), makefile)
1638         for (makefile, targets) in make_targets.items())
1639     build_steps.extend(set(make_commands))
1640 build_steps.extend(
1641     set(
1642         jobset.JobSpec(
1643             cmdline,
1644             environ=build_step_environ(build_config),
1645             timeout_seconds=None)
1646         for l in languages
1647         for cmdline in l.build_steps()))
1648
1649 post_tests_steps = list(
1650     set(
1651         jobset.JobSpec(cmdline, environ=build_step_environ(build_config))
1652         for l in languages
1653         for cmdline in l.post_tests_steps()))
1654 runs_per_test = args.runs_per_test
1655 forever = args.forever
1656
1657
1658 def _shut_down_legacy_server(legacy_server_port):
1659     try:
1660         version = int(
1661             urllib.request.urlopen(
1662                 'http://localhost:%d/version_number' % legacy_server_port,
1663                 timeout=10).read())
1664     except:
1665         pass
1666     else:
1667         urllib.request.urlopen(
1668             'http://localhost:%d/quitquitquit' % legacy_server_port).read()
1669
1670
1671 def _calculate_num_runs_failures(list_of_results):
1672     """Caculate number of runs and failures for a particular test.
1673
1674   Args:
1675     list_of_results: (List) of JobResult object.
1676   Returns:
1677     A tuple of total number of runs and failures.
1678   """
1679     num_runs = len(list_of_results)  # By default, there is 1 run per JobResult.
1680     num_failures = 0
1681     for jobresult in list_of_results:
1682         if jobresult.retries > 0:
1683             num_runs += jobresult.retries
1684         if jobresult.num_failures > 0:
1685             num_failures += jobresult.num_failures
1686     return num_runs, num_failures
1687
1688
1689 # _build_and_run results
1690 class BuildAndRunError(object):
1691
1692     BUILD = object()
1693     TEST = object()
1694     POST_TEST = object()
1695
1696
1697 def _has_epollexclusive():
1698     binary = 'bins/%s/check_epollexclusive' % args.config
1699     if not os.path.exists(binary):
1700         return False
1701     try:
1702         subprocess.check_call(binary)
1703         return True
1704     except subprocess.CalledProcessError as e:
1705         return False
1706     except OSError as e:
1707         # For languages other than C and Windows the binary won't exist
1708         return False
1709
1710
1711 # returns a list of things that failed (or an empty list on success)
1712 def _build_and_run(check_cancelled,
1713                    newline_on_success,
1714                    xml_report=None,
1715                    build_only=False):
1716     """Do one pass of building & running tests."""
1717     # build latest sequentially
1718     num_failures, resultset = jobset.run(
1719         build_steps,
1720         maxjobs=1,
1721         stop_on_failure=True,
1722         newline_on_success=newline_on_success,
1723         travis=args.travis)
1724     if num_failures:
1725         return [BuildAndRunError.BUILD]
1726
1727     if build_only:
1728         if xml_report:
1729             report_utils.render_junit_xml_report(
1730                 resultset, xml_report, suite_name=args.report_suite_name)
1731         return []
1732
1733     if not args.travis and not _has_epollexclusive() and platform_string(
1734     ) in _POLLING_STRATEGIES and 'epollex' in _POLLING_STRATEGIES[platform_string(
1735     )]:
1736         print('\n\nOmitting EPOLLEXCLUSIVE tests\n\n')
1737         _POLLING_STRATEGIES[platform_string()].remove('epollex')
1738
1739     # start antagonists
1740     antagonists = [
1741         subprocess.Popen(['tools/run_tests/python_utils/antagonist.py'])
1742         for _ in range(0, args.antagonists)
1743     ]
1744     start_port_server.start_port_server()
1745     resultset = None
1746     num_test_failures = 0
1747     try:
1748         infinite_runs = runs_per_test == 0
1749         one_run = set(
1750             spec for language in languages for spec in language.test_specs()
1751             if (re.search(args.regex, spec.shortname) and
1752                 (args.regex_exclude == '' or
1753                  not re.search(args.regex_exclude, spec.shortname))))
1754         # When running on travis, we want out test runs to be as similar as possible
1755         # for reproducibility purposes.
1756         if args.travis and args.max_time <= 0:
1757             massaged_one_run = sorted(one_run, key=lambda x: x.cpu_cost)
1758         else:
1759             # whereas otherwise, we want to shuffle things up to give all tests a
1760             # chance to run.
1761             massaged_one_run = list(
1762                 one_run)  # random.sample needs an indexable seq.
1763             num_jobs = len(massaged_one_run)
1764             # for a random sample, get as many as indicated by the 'sample_percent'
1765             # argument. By default this arg is 100, resulting in a shuffle of all
1766             # jobs.
1767             sample_size = int(num_jobs * args.sample_percent / 100.0)
1768             massaged_one_run = random.sample(massaged_one_run, sample_size)
1769             if not isclose(args.sample_percent, 100.0):
1770                 assert args.runs_per_test == 1, "Can't do sampling (-p) over multiple runs (-n)."
1771                 print("Running %d tests out of %d (~%d%%)" %
1772                       (sample_size, num_jobs, args.sample_percent))
1773         if infinite_runs:
1774             assert len(massaged_one_run
1775                       ) > 0, 'Must have at least one test for a -n inf run'
1776         runs_sequence = (itertools.repeat(massaged_one_run)
1777                          if infinite_runs else itertools.repeat(
1778                              massaged_one_run, runs_per_test))
1779         all_runs = itertools.chain.from_iterable(runs_sequence)
1780
1781         if args.quiet_success:
1782             jobset.message(
1783                 'START',
1784                 'Running tests quietly, only failing tests will be reported',
1785                 do_newline=True)
1786         num_test_failures, resultset = jobset.run(
1787             all_runs,
1788             check_cancelled,
1789             newline_on_success=newline_on_success,
1790             travis=args.travis,
1791             maxjobs=args.jobs,
1792             maxjobs_cpu_agnostic=max_parallel_tests_for_current_platform(),
1793             stop_on_failure=args.stop_on_failure,
1794             quiet_success=args.quiet_success,
1795             max_time=args.max_time)
1796         if resultset:
1797             for k, v in sorted(resultset.items()):
1798                 num_runs, num_failures = _calculate_num_runs_failures(v)
1799                 if num_failures > 0:
1800                     if num_failures == num_runs:  # what about infinite_runs???
1801                         jobset.message('FAILED', k, do_newline=True)
1802                     else:
1803                         jobset.message(
1804                             'FLAKE',
1805                             '%s [%d/%d runs flaked]' % (k, num_failures,
1806                                                         num_runs),
1807                             do_newline=True)
1808     finally:
1809         for antagonist in antagonists:
1810             antagonist.kill()
1811         if args.bq_result_table and resultset:
1812             upload_extra_fields = {
1813                 'compiler': args.compiler,
1814                 'config': args.config,
1815                 'iomgr_platform': args.iomgr_platform,
1816                 'language': args.language[
1817                     0],  # args.language is a list but will always have one element when uploading to BQ is enabled.
1818                 'platform': platform_string()
1819             }
1820             upload_results_to_bq(resultset, args.bq_result_table,
1821                                  upload_extra_fields)
1822         if xml_report and resultset:
1823             report_utils.render_junit_xml_report(
1824                 resultset, xml_report, suite_name=args.report_suite_name)
1825
1826     number_failures, _ = jobset.run(
1827         post_tests_steps,
1828         maxjobs=1,
1829         stop_on_failure=False,
1830         newline_on_success=newline_on_success,
1831         travis=args.travis)
1832
1833     out = []
1834     if number_failures:
1835         out.append(BuildAndRunError.POST_TEST)
1836     if num_test_failures:
1837         out.append(BuildAndRunError.TEST)
1838
1839     return out
1840
1841
1842 if forever:
1843     success = True
1844     while True:
1845         dw = watch_dirs.DirWatcher(['src', 'include', 'test', 'examples'])
1846         initial_time = dw.most_recent_change()
1847         have_files_changed = lambda: dw.most_recent_change() != initial_time
1848         previous_success = success
1849         errors = _build_and_run(
1850             check_cancelled=have_files_changed,
1851             newline_on_success=False,
1852             build_only=args.build_only) == 0
1853         if not previous_success and not errors:
1854             jobset.message(
1855                 'SUCCESS',
1856                 'All tests are now passing properly',
1857                 do_newline=True)
1858         jobset.message('IDLE', 'No change detected')
1859         while not have_files_changed():
1860             time.sleep(1)
1861 else:
1862     errors = _build_and_run(
1863         check_cancelled=lambda: False,
1864         newline_on_success=args.newline_on_success,
1865         xml_report=args.xml_report,
1866         build_only=args.build_only)
1867     if not errors:
1868         jobset.message('SUCCESS', 'All tests passed', do_newline=True)
1869     else:
1870         jobset.message('FAILED', 'Some tests failed', do_newline=True)
1871     exit_code = 0
1872     if BuildAndRunError.BUILD in errors:
1873         exit_code |= 1
1874     if BuildAndRunError.TEST in errors:
1875         exit_code |= 2
1876     if BuildAndRunError.POST_TEST in errors:
1877         exit_code |= 4
1878     sys.exit(exit_code)