Imported Upstream version 1.33.1
[platform/upstream/grpc.git] / tools / run_tests / run_tests.py
1 #!/usr/bin/env python
2 # Copyright 2015 gRPC authors.
3 #
4 # Licensed under the Apache License, Version 2.0 (the "License");
5 # you may not use this file except in compliance with the License.
6 # You may obtain a copy of the License at
7 #
8 #     http://www.apache.org/licenses/LICENSE-2.0
9 #
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS,
12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 # See the License for the specific language governing permissions and
14 # limitations under the License.
15 """Run tests in parallel."""
16
17 from __future__ import print_function
18
19 import argparse
20 import ast
21 import collections
22 import glob
23 import itertools
24 import json
25 import logging
26 import multiprocessing
27 import os
28 import os.path
29 import pipes
30 import platform
31 import random
32 import re
33 import socket
34 import subprocess
35 import sys
36 import tempfile
37 import traceback
38 import time
39 from six.moves import urllib
40 import uuid
41 import six
42
43 import python_utils.jobset as jobset
44 import python_utils.report_utils as report_utils
45 import python_utils.watch_dirs as watch_dirs
46 import python_utils.start_port_server as start_port_server
47 try:
48     from python_utils.upload_test_results import upload_results_to_bq
49 except (ImportError):
50     pass  # It's ok to not import because this is only necessary to upload results to BQ.
51
52 gcp_utils_dir = os.path.abspath(
53     os.path.join(os.path.dirname(__file__), '../gcp/utils'))
54 sys.path.append(gcp_utils_dir)
55
56 _ROOT = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), '../..'))
57 os.chdir(_ROOT)
58
59 _FORCE_ENVIRON_FOR_WRAPPERS = {
60     'GRPC_VERBOSITY': 'DEBUG',
61 }
62
63 _POLLING_STRATEGIES = {
64     'linux': ['epollex', 'epoll1', 'poll'],
65     'mac': ['poll'],
66 }
67
68
69 def platform_string():
70     return jobset.platform_string()
71
72
73 _DEFAULT_TIMEOUT_SECONDS = 5 * 60
74 _PRE_BUILD_STEP_TIMEOUT_SECONDS = 10 * 60
75
76
77 def run_shell_command(cmd, env=None, cwd=None):
78     try:
79         subprocess.check_output(cmd, shell=True, env=env, cwd=cwd)
80     except subprocess.CalledProcessError as e:
81         logging.exception(
82             "Error while running command '%s'. Exit status %d. Output:\n%s",
83             e.cmd, e.returncode, e.output)
84         raise
85
86
87 def max_parallel_tests_for_current_platform():
88     # Too much test parallelization has only been seen to be a problem
89     # so far on windows.
90     if jobset.platform_string() == 'windows':
91         return 64
92     return 1024
93
94
95 # SimpleConfig: just compile with CONFIG=config, and run the binary to test
96 class Config(object):
97
98     def __init__(self,
99                  config,
100                  environ=None,
101                  timeout_multiplier=1,
102                  tool_prefix=[],
103                  iomgr_platform='native'):
104         if environ is None:
105             environ = {}
106         self.build_config = config
107         self.environ = environ
108         self.environ['CONFIG'] = config
109         self.tool_prefix = tool_prefix
110         self.timeout_multiplier = timeout_multiplier
111         self.iomgr_platform = iomgr_platform
112
113     def job_spec(self,
114                  cmdline,
115                  timeout_seconds=_DEFAULT_TIMEOUT_SECONDS,
116                  shortname=None,
117                  environ={},
118                  cpu_cost=1.0,
119                  flaky=False):
120         """Construct a jobset.JobSpec for a test under this config
121
122        Args:
123          cmdline:      a list of strings specifying the command line the test
124                        would like to run
125     """
126         actual_environ = self.environ.copy()
127         for k, v in environ.items():
128             actual_environ[k] = v
129         if not flaky and shortname and shortname in flaky_tests:
130             flaky = True
131         if shortname in shortname_to_cpu:
132             cpu_cost = shortname_to_cpu[shortname]
133         return jobset.JobSpec(
134             cmdline=self.tool_prefix + cmdline,
135             shortname=shortname,
136             environ=actual_environ,
137             cpu_cost=cpu_cost,
138             timeout_seconds=(self.timeout_multiplier *
139                              timeout_seconds if timeout_seconds else None),
140             flake_retries=4 if flaky or args.allow_flakes else 0,
141             timeout_retries=1 if flaky or args.allow_flakes else 0)
142
143
144 def get_c_tests(travis, test_lang):
145     out = []
146     platforms_str = 'ci_platforms' if travis else 'platforms'
147     with open('tools/run_tests/generated/tests.json') as f:
148         js = json.load(f)
149         return [
150             tgt for tgt in js
151             if tgt['language'] == test_lang and platform_string() in
152             tgt[platforms_str] and not (travis and tgt['flaky'])
153         ]
154
155
156 def _check_compiler(compiler, supported_compilers):
157     if compiler not in supported_compilers:
158         raise Exception('Compiler %s not supported (on this platform).' %
159                         compiler)
160
161
162 def _check_arch(arch, supported_archs):
163     if arch not in supported_archs:
164         raise Exception('Architecture %s not supported.' % arch)
165
166
167 def _is_use_docker_child():
168     """Returns True if running running as a --use_docker child."""
169     return True if os.getenv('RUN_TESTS_COMMAND') else False
170
171
172 _PythonConfigVars = collections.namedtuple('_ConfigVars', [
173     'shell',
174     'builder',
175     'builder_prefix_arguments',
176     'venv_relative_python',
177     'toolchain',
178     'runner',
179     'test_name',
180     'iomgr_platform',
181 ])
182
183
184 def _python_config_generator(name, major, minor, bits, config_vars):
185     name += '_' + config_vars.iomgr_platform
186     return PythonConfig(
187         name, config_vars.shell + config_vars.builder +
188         config_vars.builder_prefix_arguments +
189         [_python_pattern_function(major=major, minor=minor, bits=bits)] +
190         [name] + config_vars.venv_relative_python + config_vars.toolchain,
191         config_vars.shell + config_vars.runner + [
192             os.path.join(name, config_vars.venv_relative_python[0]),
193             config_vars.test_name
194         ])
195
196
197 def _pypy_config_generator(name, major, config_vars):
198     return PythonConfig(
199         name, config_vars.shell + config_vars.builder +
200         config_vars.builder_prefix_arguments +
201         [_pypy_pattern_function(major=major)] + [name] +
202         config_vars.venv_relative_python + config_vars.toolchain,
203         config_vars.shell + config_vars.runner +
204         [os.path.join(name, config_vars.venv_relative_python[0])])
205
206
207 def _python_pattern_function(major, minor, bits):
208     # Bit-ness is handled by the test machine's environment
209     if os.name == "nt":
210         if bits == "64":
211             return '/c/Python{major}{minor}/python.exe'.format(major=major,
212                                                                minor=minor,
213                                                                bits=bits)
214         else:
215             return '/c/Python{major}{minor}_{bits}bits/python.exe'.format(
216                 major=major, minor=minor, bits=bits)
217     else:
218         return 'python{major}.{minor}'.format(major=major, minor=minor)
219
220
221 def _pypy_pattern_function(major):
222     if major == '2':
223         return 'pypy'
224     elif major == '3':
225         return 'pypy3'
226     else:
227         raise ValueError("Unknown PyPy major version")
228
229
230 class CLanguage(object):
231
232     def __init__(self, make_target, test_lang):
233         self.make_target = make_target
234         self.platform = platform_string()
235         self.test_lang = test_lang
236
237     def configure(self, config, args):
238         self.config = config
239         self.args = args
240         self._make_options = []
241         self._use_cmake = True
242         if self.platform == 'windows':
243             _check_compiler(self.args.compiler, [
244                 'default', 'cmake', 'cmake_vs2015', 'cmake_vs2017',
245                 'cmake_vs2019'
246             ])
247             _check_arch(self.args.arch, ['default', 'x64', 'x86'])
248             if self.args.compiler == 'cmake_vs2019':
249                 cmake_generator_option = 'Visual Studio 16 2019'
250             elif self.args.compiler == 'cmake_vs2017':
251                 cmake_generator_option = 'Visual Studio 15 2017'
252             else:
253                 cmake_generator_option = 'Visual Studio 14 2015'
254             cmake_arch_option = 'x64' if self.args.arch == 'x64' else 'Win32'
255             self._cmake_configure_extra_args = [
256                 '-G', cmake_generator_option, '-A', cmake_arch_option
257             ]
258         else:
259             if self.platform == 'linux':
260                 # Allow all the known architectures. _check_arch_option has already checked that we're not doing
261                 # something illegal when not running under docker.
262                 _check_arch(self.args.arch, ['default', 'x64', 'x86'])
263             else:
264                 _check_arch(self.args.arch, ['default'])
265
266             self._docker_distro, self._cmake_configure_extra_args = self._compiler_options(
267                 self.args.use_docker, self.args.compiler)
268
269             if self.args.arch == 'x86':
270                 # disable boringssl asm optimizations when on x86
271                 # see https://github.com/grpc/grpc/blob/b5b8578b3f8b4a9ce61ed6677e19d546e43c5c68/tools/run_tests/artifacts/artifact_targets.py#L253
272                 self._cmake_configure_extra_args.append('-DOPENSSL_NO_ASM=ON')
273
274         if args.iomgr_platform == "uv":
275             cflags = '-DGRPC_UV -DGRPC_CUSTOM_IOMGR_THREAD_CHECK -DGRPC_CUSTOM_SOCKET '
276             try:
277                 cflags += subprocess.check_output(
278                     ['pkg-config', '--cflags', 'libuv']).strip() + ' '
279             except (subprocess.CalledProcessError, OSError):
280                 pass
281             try:
282                 ldflags = subprocess.check_output(
283                     ['pkg-config', '--libs', 'libuv']).strip() + ' '
284             except (subprocess.CalledProcessError, OSError):
285                 ldflags = '-luv '
286             self._make_options += [
287                 'EXTRA_CPPFLAGS={}'.format(cflags),
288                 'EXTRA_LDLIBS={}'.format(ldflags)
289             ]
290
291     def test_specs(self):
292         out = []
293         binaries = get_c_tests(self.args.travis, self.test_lang)
294         for target in binaries:
295             if self._use_cmake and target.get('boringssl', False):
296                 # cmake doesn't build boringssl tests
297                 continue
298             auto_timeout_scaling = target.get('auto_timeout_scaling', True)
299             polling_strategies = (_POLLING_STRATEGIES.get(
300                 self.platform, ['all']) if target.get('uses_polling', True) else
301                                   ['none'])
302             if self.args.iomgr_platform == 'uv':
303                 polling_strategies = ['all']
304             for polling_strategy in polling_strategies:
305                 env = {
306                     'GRPC_DEFAULT_SSL_ROOTS_FILE_PATH':
307                         _ROOT + '/src/core/tsi/test_creds/ca.pem',
308                     'GRPC_POLL_STRATEGY':
309                         polling_strategy,
310                     'GRPC_VERBOSITY':
311                         'DEBUG'
312                 }
313                 resolver = os.environ.get('GRPC_DNS_RESOLVER', None)
314                 if resolver:
315                     env['GRPC_DNS_RESOLVER'] = resolver
316                 shortname_ext = '' if polling_strategy == 'all' else ' GRPC_POLL_STRATEGY=%s' % polling_strategy
317                 if polling_strategy in target.get('excluded_poll_engines', []):
318                     continue
319
320                 timeout_scaling = 1
321                 if auto_timeout_scaling:
322                     config = self.args.config
323                     if ('asan' in config or config == 'msan' or
324                             config == 'tsan' or config == 'ubsan' or
325                             config == 'helgrind' or config == 'memcheck'):
326                         # Scale overall test timeout if running under various sanitizers.
327                         # scaling value is based on historical data analysis
328                         timeout_scaling *= 3
329
330                 if self.config.build_config in target['exclude_configs']:
331                     continue
332                 if self.args.iomgr_platform in target.get('exclude_iomgrs', []):
333                     continue
334                 if self.platform == 'windows':
335                     binary = 'cmake/build/%s/%s.exe' % (_MSBUILD_CONFIG[
336                         self.config.build_config], target['name'])
337                 else:
338                     if self._use_cmake:
339                         binary = 'cmake/build/%s' % target['name']
340                     else:
341                         binary = 'bins/%s/%s' % (self.config.build_config,
342                                                  target['name'])
343                 cpu_cost = target['cpu_cost']
344                 if cpu_cost == 'capacity':
345                     cpu_cost = multiprocessing.cpu_count()
346                 if os.path.isfile(binary):
347                     list_test_command = None
348                     filter_test_command = None
349
350                     # these are the flag defined by gtest and benchmark framework to list
351                     # and filter test runs. We use them to split each individual test
352                     # into its own JobSpec, and thus into its own process.
353                     if 'benchmark' in target and target['benchmark']:
354                         with open(os.devnull, 'w') as fnull:
355                             tests = subprocess.check_output(
356                                 [binary, '--benchmark_list_tests'],
357                                 stderr=fnull)
358                         for line in tests.split('\n'):
359                             test = line.strip()
360                             if not test: continue
361                             cmdline = [binary,
362                                        '--benchmark_filter=%s$' % test
363                                       ] + target['args']
364                             out.append(
365                                 self.config.job_spec(
366                                     cmdline,
367                                     shortname='%s %s' %
368                                     (' '.join(cmdline), shortname_ext),
369                                     cpu_cost=cpu_cost,
370                                     timeout_seconds=target.get(
371                                         'timeout_seconds',
372                                         _DEFAULT_TIMEOUT_SECONDS) *
373                                     timeout_scaling,
374                                     environ=env))
375                     elif 'gtest' in target and target['gtest']:
376                         # here we parse the output of --gtest_list_tests to build up a complete
377                         # list of the tests contained in a binary for each test, we then
378                         # add a job to run, filtering for just that test.
379                         with open(os.devnull, 'w') as fnull:
380                             tests = subprocess.check_output(
381                                 [binary, '--gtest_list_tests'], stderr=fnull)
382                         base = None
383                         for line in tests.split('\n'):
384                             i = line.find('#')
385                             if i >= 0: line = line[:i]
386                             if not line: continue
387                             if line[0] != ' ':
388                                 base = line.strip()
389                             else:
390                                 assert base is not None
391                                 assert line[1] == ' '
392                                 test = base + line.strip()
393                                 cmdline = [binary,
394                                            '--gtest_filter=%s' % test
395                                           ] + target['args']
396                                 out.append(
397                                     self.config.job_spec(
398                                         cmdline,
399                                         shortname='%s %s' %
400                                         (' '.join(cmdline), shortname_ext),
401                                         cpu_cost=cpu_cost,
402                                         timeout_seconds=target.get(
403                                             'timeout_seconds',
404                                             _DEFAULT_TIMEOUT_SECONDS) *
405                                         timeout_scaling,
406                                         environ=env))
407                     else:
408                         cmdline = [binary] + target['args']
409                         shortname = target.get(
410                             'shortname',
411                             ' '.join(pipes.quote(arg) for arg in cmdline))
412                         shortname += shortname_ext
413                         out.append(
414                             self.config.job_spec(
415                                 cmdline,
416                                 shortname=shortname,
417                                 cpu_cost=cpu_cost,
418                                 flaky=target.get('flaky', False),
419                                 timeout_seconds=target.get(
420                                     'timeout_seconds',
421                                     _DEFAULT_TIMEOUT_SECONDS) * timeout_scaling,
422                                 environ=env))
423                 elif self.args.regex == '.*' or self.platform == 'windows':
424                     print('\nWARNING: binary not found, skipping', binary)
425         return sorted(out)
426
427     def make_targets(self):
428         if self.platform == 'windows':
429             # don't build tools on windows just yet
430             return ['buildtests_%s' % self.make_target]
431         return [
432             'buildtests_%s' % self.make_target,
433             'tools_%s' % self.make_target, 'check_epollexclusive'
434         ]
435
436     def make_options(self):
437         return self._make_options
438
439     def pre_build_steps(self):
440         if self.platform == 'windows':
441             return [['tools\\run_tests\\helper_scripts\\pre_build_cmake.bat'] +
442                     self._cmake_configure_extra_args]
443         elif self._use_cmake:
444             return [['tools/run_tests/helper_scripts/pre_build_cmake.sh'] +
445                     self._cmake_configure_extra_args]
446         else:
447             return []
448
449     def build_steps(self):
450         return []
451
452     def post_tests_steps(self):
453         if self.platform == 'windows':
454             return []
455         else:
456             return [['tools/run_tests/helper_scripts/post_tests_c.sh']]
457
458     def makefile_name(self):
459         if self._use_cmake:
460             return 'cmake/build/Makefile'
461         else:
462             return 'Makefile'
463
464     def _clang_cmake_configure_extra_args(self, version_suffix=''):
465         return [
466             '-DCMAKE_C_COMPILER=clang%s' % version_suffix,
467             '-DCMAKE_CXX_COMPILER=clang++%s' % version_suffix,
468         ]
469
470     def _compiler_options(self, use_docker, compiler):
471         """Returns docker distro and cmake configure args to use for given compiler."""
472         if not use_docker and not _is_use_docker_child():
473             # if not running under docker, we cannot ensure the right compiler version will be used,
474             # so we only allow the non-specific choices.
475             _check_compiler(compiler, ['default', 'cmake'])
476
477         if compiler == 'gcc4.9' or compiler == 'default' or compiler == 'cmake':
478             return ('jessie', [])
479         elif compiler == 'gcc5.3':
480             return ('ubuntu1604', [])
481         elif compiler == 'gcc7.4':
482             return ('ubuntu1804', [])
483         elif compiler == 'gcc8.3':
484             return ('buster', [])
485         elif compiler == 'gcc_musl':
486             return ('alpine', [])
487         elif compiler == 'clang3.6':
488             return ('ubuntu1604',
489                     self._clang_cmake_configure_extra_args(
490                         version_suffix='-3.6'))
491         elif compiler == 'clang3.7':
492             return ('ubuntu1604',
493                     self._clang_cmake_configure_extra_args(
494                         version_suffix='-3.7'))
495         else:
496             raise Exception('Compiler %s not supported.' % compiler)
497
498     def dockerfile_dir(self):
499         return 'tools/dockerfile/test/cxx_%s_%s' % (
500             self._docker_distro, _docker_arch_suffix(self.args.arch))
501
502     def __str__(self):
503         return self.make_target
504
505
506 # This tests Node on grpc/grpc-node and will become the standard for Node testing
507 class RemoteNodeLanguage(object):
508
509     def __init__(self):
510         self.platform = platform_string()
511
512     def configure(self, config, args):
513         self.config = config
514         self.args = args
515         # Note: electron ABI only depends on major and minor version, so that's all
516         # we should specify in the compiler argument
517         _check_compiler(self.args.compiler, [
518             'default', 'node0.12', 'node4', 'node5', 'node6', 'node7', 'node8',
519             'electron1.3', 'electron1.6'
520         ])
521         if self.args.compiler == 'default':
522             self.runtime = 'node'
523             self.node_version = '8'
524         else:
525             if self.args.compiler.startswith('electron'):
526                 self.runtime = 'electron'
527                 self.node_version = self.args.compiler[8:]
528             else:
529                 self.runtime = 'node'
530                 # Take off the word "node"
531                 self.node_version = self.args.compiler[4:]
532
533     # TODO: update with Windows/electron scripts when available for grpc/grpc-node
534     def test_specs(self):
535         if self.platform == 'windows':
536             return [
537                 self.config.job_spec(
538                     ['tools\\run_tests\\helper_scripts\\run_node.bat'])
539             ]
540         else:
541             return [
542                 self.config.job_spec(
543                     ['tools/run_tests/helper_scripts/run_grpc-node.sh'],
544                     None,
545                     environ=_FORCE_ENVIRON_FOR_WRAPPERS)
546             ]
547
548     def pre_build_steps(self):
549         return []
550
551     def make_targets(self):
552         return []
553
554     def make_options(self):
555         return []
556
557     def build_steps(self):
558         return []
559
560     def post_tests_steps(self):
561         return []
562
563     def makefile_name(self):
564         return 'Makefile'
565
566     def dockerfile_dir(self):
567         return 'tools/dockerfile/test/node_jessie_%s' % _docker_arch_suffix(
568             self.args.arch)
569
570     def __str__(self):
571         return 'grpc-node'
572
573
574 class Php7Language(object):
575
576     def configure(self, config, args):
577         self.config = config
578         self.args = args
579         _check_compiler(self.args.compiler, ['default'])
580         self._make_options = ['EMBED_OPENSSL=true', 'EMBED_ZLIB=true']
581
582     def test_specs(self):
583         return [
584             self.config.job_spec(['src/php/bin/run_tests.sh'],
585                                  environ=_FORCE_ENVIRON_FOR_WRAPPERS)
586         ]
587
588     def pre_build_steps(self):
589         return []
590
591     def make_targets(self):
592         return ['static_c', 'shared_c']
593
594     def make_options(self):
595         return self._make_options
596
597     def build_steps(self):
598         return [['tools/run_tests/helper_scripts/build_php.sh']]
599
600     def post_tests_steps(self):
601         return [['tools/run_tests/helper_scripts/post_tests_php.sh']]
602
603     def makefile_name(self):
604         return 'Makefile'
605
606     def dockerfile_dir(self):
607         return 'tools/dockerfile/test/php7_jessie_%s' % _docker_arch_suffix(
608             self.args.arch)
609
610     def __str__(self):
611         return 'php7'
612
613
614 class PythonConfig(
615         collections.namedtuple('PythonConfig', ['name', 'build', 'run'])):
616     """Tuple of commands (named s.t. 'what it says on the tin' applies)"""
617
618
619 class PythonLanguage(object):
620
621     _TEST_SPECS_FILE = {
622         'native': 'src/python/grpcio_tests/tests/tests.json',
623         'gevent': 'src/python/grpcio_tests/tests/tests.json',
624         'asyncio': 'src/python/grpcio_tests/tests_aio/tests.json',
625     }
626     _TEST_FOLDER = {
627         'native': 'test',
628         'gevent': 'test',
629         'asyncio': 'test_aio',
630     }
631
632     def configure(self, config, args):
633         self.config = config
634         self.args = args
635         self.pythons = self._get_pythons(self.args)
636
637     def test_specs(self):
638         # load list of known test suites
639         with open(self._TEST_SPECS_FILE[
640                 self.args.iomgr_platform]) as tests_json_file:
641             tests_json = json.load(tests_json_file)
642         environment = dict(_FORCE_ENVIRON_FOR_WRAPPERS)
643         # TODO(https://github.com/grpc/grpc/issues/21401) Fork handlers is not
644         # designed for non-native IO manager. It has a side-effect that
645         # overrides threading settings in C-Core.
646         if args.iomgr_platform != 'native':
647             environment['GRPC_ENABLE_FORK_SUPPORT'] = '0'
648         return [
649             self.config.job_spec(
650                 config.run,
651                 timeout_seconds=5 * 60,
652                 environ=dict(GRPC_PYTHON_TESTRUNNER_FILTER=str(suite_name),
653                              **environment),
654                 shortname='%s.%s.%s' %
655                 (config.name, self._TEST_FOLDER[self.args.iomgr_platform],
656                  suite_name),
657             ) for suite_name in tests_json for config in self.pythons
658         ]
659
660     def pre_build_steps(self):
661         return []
662
663     def make_targets(self):
664         return []
665
666     def make_options(self):
667         return []
668
669     def build_steps(self):
670         return [config.build for config in self.pythons]
671
672     def post_tests_steps(self):
673         if self.config.build_config != 'gcov':
674             return []
675         else:
676             return [['tools/run_tests/helper_scripts/post_tests_python.sh']]
677
678     def makefile_name(self):
679         return 'Makefile'
680
681     def dockerfile_dir(self):
682         return 'tools/dockerfile/test/python_%s_%s' % (
683             self._python_manager_name(), _docker_arch_suffix(self.args.arch))
684
685     def _python_manager_name(self):
686         """Choose the docker image to use based on python version."""
687         if self.args.compiler in [
688                 'python2.7', 'python3.5', 'python3.6', 'python3.7', 'python3.8'
689         ]:
690             return 'stretch_' + self.args.compiler[len('python'):]
691         elif self.args.compiler == 'python_alpine':
692             return 'alpine'
693         else:
694             return 'stretch_default'
695
696     def _get_pythons(self, args):
697         """Get python runtimes to test with, based on current platform, architecture, compiler etc."""
698         if args.arch == 'x86':
699             bits = '32'
700         else:
701             bits = '64'
702
703         if os.name == 'nt':
704             shell = ['bash']
705             builder = [
706                 os.path.abspath(
707                     'tools/run_tests/helper_scripts/build_python_msys2.sh')
708             ]
709             builder_prefix_arguments = ['MINGW{}'.format(bits)]
710             venv_relative_python = ['Scripts/python.exe']
711             toolchain = ['mingw32']
712         else:
713             shell = []
714             builder = [
715                 os.path.abspath(
716                     'tools/run_tests/helper_scripts/build_python.sh')
717             ]
718             builder_prefix_arguments = []
719             venv_relative_python = ['bin/python']
720             toolchain = ['unix']
721
722         # Selects the corresponding testing mode.
723         # See src/python/grpcio_tests/commands.py for implementation details.
724         if args.iomgr_platform == 'native':
725             test_command = 'test_lite'
726         elif args.iomgr_platform == 'gevent':
727             test_command = 'test_gevent'
728         elif args.iomgr_platform == 'asyncio':
729             test_command = 'test_aio'
730         else:
731             raise ValueError('Unsupported IO Manager platform: %s' %
732                              args.iomgr_platform)
733         runner = [
734             os.path.abspath('tools/run_tests/helper_scripts/run_python.sh')
735         ]
736
737         config_vars = _PythonConfigVars(shell, builder,
738                                         builder_prefix_arguments,
739                                         venv_relative_python, toolchain, runner,
740                                         test_command, args.iomgr_platform)
741         python27_config = _python_config_generator(name='py27',
742                                                    major='2',
743                                                    minor='7',
744                                                    bits=bits,
745                                                    config_vars=config_vars)
746         python35_config = _python_config_generator(name='py35',
747                                                    major='3',
748                                                    minor='5',
749                                                    bits=bits,
750                                                    config_vars=config_vars)
751         python36_config = _python_config_generator(name='py36',
752                                                    major='3',
753                                                    minor='6',
754                                                    bits=bits,
755                                                    config_vars=config_vars)
756         python37_config = _python_config_generator(name='py37',
757                                                    major='3',
758                                                    minor='7',
759                                                    bits=bits,
760                                                    config_vars=config_vars)
761         python38_config = _python_config_generator(name='py38',
762                                                    major='3',
763                                                    minor='8',
764                                                    bits=bits,
765                                                    config_vars=config_vars)
766         pypy27_config = _pypy_config_generator(name='pypy',
767                                                major='2',
768                                                config_vars=config_vars)
769         pypy32_config = _pypy_config_generator(name='pypy3',
770                                                major='3',
771                                                config_vars=config_vars)
772
773         if args.iomgr_platform == 'asyncio':
774             if args.compiler not in ('default', 'python3.6', 'python3.7',
775                                      'python3.8'):
776                 raise Exception(
777                     'Compiler %s not supported with IO Manager platform: %s' %
778                     (args.compiler, args.iomgr_platform))
779
780         if args.compiler == 'default':
781             if os.name == 'nt':
782                 if args.iomgr_platform == 'gevent':
783                     # TODO(https://github.com/grpc/grpc/issues/23784) allow
784                     # gevent to run on later version once issue solved.
785                     return (python36_config,)
786                 else:
787                     return (python38_config,)
788             else:
789                 if args.iomgr_platform == 'asyncio':
790                     return (python36_config, python38_config)
791                 elif os.uname()[0] == 'Darwin':
792                     # NOTE(rbellevi): Testing takes significantly longer on
793                     # MacOS, so we restrict the number of interpreter versions
794                     # tested.
795                     return (
796                         python27_config,
797                         python38_config,
798                     )
799                 else:
800                     return (
801                         python27_config,
802                         python35_config,
803                         python37_config,
804                         python38_config,
805                     )
806         elif args.compiler == 'python2.7':
807             return (python27_config,)
808         elif args.compiler == 'python3.5':
809             return (python35_config,)
810         elif args.compiler == 'python3.6':
811             return (python36_config,)
812         elif args.compiler == 'python3.7':
813             return (python37_config,)
814         elif args.compiler == 'python3.8':
815             return (python38_config,)
816         elif args.compiler == 'pypy':
817             return (pypy27_config,)
818         elif args.compiler == 'pypy3':
819             return (pypy32_config,)
820         elif args.compiler == 'python_alpine':
821             return (python27_config,)
822         elif args.compiler == 'all_the_cpythons':
823             return (
824                 python27_config,
825                 python35_config,
826                 python36_config,
827                 python37_config,
828                 python38_config,
829             )
830         else:
831             raise Exception('Compiler %s not supported.' % args.compiler)
832
833     def __str__(self):
834         return 'python'
835
836
837 class RubyLanguage(object):
838
839     def configure(self, config, args):
840         self.config = config
841         self.args = args
842         _check_compiler(self.args.compiler, ['default'])
843
844     def test_specs(self):
845         tests = [
846             self.config.job_spec(['tools/run_tests/helper_scripts/run_ruby.sh'],
847                                  timeout_seconds=10 * 60,
848                                  environ=_FORCE_ENVIRON_FOR_WRAPPERS)
849         ]
850         for test in [
851                 'src/ruby/end2end/sig_handling_test.rb',
852                 'src/ruby/end2end/channel_state_test.rb',
853                 'src/ruby/end2end/channel_closing_test.rb',
854                 'src/ruby/end2end/sig_int_during_channel_watch_test.rb',
855                 'src/ruby/end2end/killed_client_thread_test.rb',
856                 'src/ruby/end2end/forking_client_test.rb',
857                 'src/ruby/end2end/grpc_class_init_test.rb',
858                 'src/ruby/end2end/multiple_killed_watching_threads_test.rb',
859                 'src/ruby/end2end/load_grpc_with_gc_stress_test.rb',
860                 'src/ruby/end2end/client_memory_usage_test.rb',
861                 'src/ruby/end2end/package_with_underscore_test.rb',
862                 'src/ruby/end2end/graceful_sig_handling_test.rb',
863                 'src/ruby/end2end/graceful_sig_stop_test.rb',
864                 'src/ruby/end2end/errors_load_before_grpc_lib_test.rb',
865                 'src/ruby/end2end/logger_load_before_grpc_lib_test.rb',
866                 'src/ruby/end2end/status_codes_load_before_grpc_lib_test.rb',
867                 'src/ruby/end2end/call_credentials_timeout_test.rb',
868                 'src/ruby/end2end/call_credentials_returning_bad_metadata_doesnt_kill_background_thread_test.rb'
869         ]:
870             tests.append(
871                 self.config.job_spec(['ruby', test],
872                                      shortname=test,
873                                      timeout_seconds=20 * 60,
874                                      environ=_FORCE_ENVIRON_FOR_WRAPPERS))
875         return tests
876
877     def pre_build_steps(self):
878         return [['tools/run_tests/helper_scripts/pre_build_ruby.sh']]
879
880     def make_targets(self):
881         return []
882
883     def make_options(self):
884         return []
885
886     def build_steps(self):
887         return [['tools/run_tests/helper_scripts/build_ruby.sh']]
888
889     def post_tests_steps(self):
890         return [['tools/run_tests/helper_scripts/post_tests_ruby.sh']]
891
892     def makefile_name(self):
893         return 'Makefile'
894
895     def dockerfile_dir(self):
896         return 'tools/dockerfile/test/ruby_jessie_%s' % _docker_arch_suffix(
897             self.args.arch)
898
899     def __str__(self):
900         return 'ruby'
901
902
903 class CSharpLanguage(object):
904
905     def __init__(self):
906         self.platform = platform_string()
907
908     def configure(self, config, args):
909         self.config = config
910         self.args = args
911         if self.platform == 'windows':
912             _check_compiler(self.args.compiler, ['default', 'coreclr'])
913             _check_arch(self.args.arch, ['default'])
914             self._cmake_arch_option = 'x64'
915         else:
916             _check_compiler(self.args.compiler, ['default', 'coreclr'])
917             self._docker_distro = 'stretch'
918
919     def test_specs(self):
920         with open('src/csharp/tests.json') as f:
921             tests_by_assembly = json.load(f)
922
923         msbuild_config = _MSBUILD_CONFIG[self.config.build_config]
924         nunit_args = ['--labels=All', '--noresult', '--workers=1']
925         assembly_subdir = 'bin/%s' % msbuild_config
926         assembly_extension = '.exe'
927
928         if self.args.compiler == 'coreclr':
929             assembly_subdir += '/netcoreapp2.1'
930             runtime_cmd = ['dotnet', 'exec']
931             assembly_extension = '.dll'
932         else:
933             assembly_subdir += '/net45'
934             if self.platform == 'windows':
935                 runtime_cmd = []
936             elif self.platform == 'mac':
937                 # mono before version 5.2 on MacOS defaults to 32bit runtime
938                 runtime_cmd = ['mono', '--arch=64']
939             else:
940                 runtime_cmd = ['mono']
941
942         specs = []
943         for assembly in six.iterkeys(tests_by_assembly):
944             assembly_file = 'src/csharp/%s/%s/%s%s' % (
945                 assembly, assembly_subdir, assembly, assembly_extension)
946             if self.config.build_config != 'gcov' or self.platform != 'windows':
947                 # normally, run each test as a separate process
948                 for test in tests_by_assembly[assembly]:
949                     cmdline = runtime_cmd + [assembly_file,
950                                              '--test=%s' % test] + nunit_args
951                     specs.append(
952                         self.config.job_spec(
953                             cmdline,
954                             shortname='csharp.%s' % test,
955                             environ=_FORCE_ENVIRON_FOR_WRAPPERS))
956             else:
957                 # For C# test coverage, run all tests from the same assembly at once
958                 # using OpenCover.Console (only works on Windows).
959                 cmdline = [
960                     'src\\csharp\\packages\\OpenCover.4.6.519\\tools\\OpenCover.Console.exe',
961                     '-target:%s' % assembly_file, '-targetdir:src\\csharp',
962                     '-targetargs:%s' % ' '.join(nunit_args),
963                     '-filter:+[Grpc.Core]*', '-register:user',
964                     '-output:src\\csharp\\coverage_csharp_%s.xml' % assembly
965                 ]
966
967                 # set really high cpu_cost to make sure instances of OpenCover.Console run exclusively
968                 # to prevent problems with registering the profiler.
969                 run_exclusive = 1000000
970                 specs.append(
971                     self.config.job_spec(cmdline,
972                                          shortname='csharp.coverage.%s' %
973                                          assembly,
974                                          cpu_cost=run_exclusive,
975                                          environ=_FORCE_ENVIRON_FOR_WRAPPERS))
976         return specs
977
978     def pre_build_steps(self):
979         if self.platform == 'windows':
980             return [[
981                 'tools\\run_tests\\helper_scripts\\pre_build_csharp.bat',
982                 self._cmake_arch_option
983             ]]
984         else:
985             return [['tools/run_tests/helper_scripts/pre_build_csharp.sh']]
986
987     def make_targets(self):
988         return ['grpc_csharp_ext']
989
990     def make_options(self):
991         return []
992
993     def build_steps(self):
994         if self.platform == 'windows':
995             return [['tools\\run_tests\\helper_scripts\\build_csharp.bat']]
996         else:
997             return [['tools/run_tests/helper_scripts/build_csharp.sh']]
998
999     def post_tests_steps(self):
1000         if self.platform == 'windows':
1001             return [['tools\\run_tests\\helper_scripts\\post_tests_csharp.bat']]
1002         else:
1003             return [['tools/run_tests/helper_scripts/post_tests_csharp.sh']]
1004
1005     def makefile_name(self):
1006         if self.platform == 'windows':
1007             return 'cmake/build/%s/Makefile' % self._cmake_arch_option
1008         else:
1009             # no need to set x86 specific flags as run_tests.py
1010             # currently forbids x86 C# builds on both Linux and MacOS.
1011             return 'cmake/build/Makefile'
1012
1013     def dockerfile_dir(self):
1014         return 'tools/dockerfile/test/csharp_%s_%s' % (
1015             self._docker_distro, _docker_arch_suffix(self.args.arch))
1016
1017     def __str__(self):
1018         return 'csharp'
1019
1020
1021 class ObjCLanguage(object):
1022
1023     def configure(self, config, args):
1024         self.config = config
1025         self.args = args
1026         _check_compiler(self.args.compiler, ['default'])
1027
1028     def test_specs(self):
1029         out = []
1030         out.append(
1031             self.config.job_spec(
1032                 ['src/objective-c/tests/build_one_example_bazel.sh'],
1033                 timeout_seconds=10 * 60,
1034                 shortname='ios-buildtest-example-sample',
1035                 cpu_cost=1e6,
1036                 environ={
1037                     'SCHEME': 'Sample',
1038                     'EXAMPLE_PATH': 'src/objective-c/examples/Sample',
1039                     'FRAMEWORKS': 'NO'
1040                 }))
1041         # Currently not supporting compiling as frameworks in Bazel
1042         out.append(
1043             self.config.job_spec(
1044                 ['src/objective-c/tests/build_one_example.sh'],
1045                 timeout_seconds=20 * 60,
1046                 shortname='ios-buildtest-example-sample-frameworks',
1047                 cpu_cost=1e6,
1048                 environ={
1049                     'SCHEME': 'Sample',
1050                     'EXAMPLE_PATH': 'src/objective-c/examples/Sample',
1051                     'FRAMEWORKS': 'YES'
1052                 }))
1053         out.append(
1054             self.config.job_spec(
1055                 ['src/objective-c/tests/build_one_example.sh'],
1056                 timeout_seconds=20 * 60,
1057                 shortname='ios-buildtest-example-switftsample',
1058                 cpu_cost=1e6,
1059                 environ={
1060                     'SCHEME': 'SwiftSample',
1061                     'EXAMPLE_PATH': 'src/objective-c/examples/SwiftSample'
1062                 }))
1063         out.append(
1064             self.config.job_spec(
1065                 ['src/objective-c/tests/build_one_example_bazel.sh'],
1066                 timeout_seconds=10 * 60,
1067                 shortname='ios-buildtest-example-tvOS-sample',
1068                 cpu_cost=1e6,
1069                 environ={
1070                     'SCHEME': 'tvOS-sample',
1071                     'EXAMPLE_PATH': 'src/objective-c/examples/tvOS-sample',
1072                     'FRAMEWORKS': 'NO'
1073                 }))
1074         # Disabled due to #20258
1075         # TODO (mxyan): Reenable this test when #20258 is resolved.
1076         # out.append(
1077         #     self.config.job_spec(
1078         #         ['src/objective-c/tests/build_one_example_bazel.sh'],
1079         #         timeout_seconds=20 * 60,
1080         #         shortname='ios-buildtest-example-watchOS-sample',
1081         #         cpu_cost=1e6,
1082         #         environ={
1083         #             'SCHEME': 'watchOS-sample-WatchKit-App',
1084         #             'EXAMPLE_PATH': 'src/objective-c/examples/watchOS-sample',
1085         #             'FRAMEWORKS': 'NO'
1086         #         }))
1087         out.append(
1088             self.config.job_spec(['src/objective-c/tests/run_plugin_tests.sh'],
1089                                  timeout_seconds=60 * 60,
1090                                  shortname='ios-test-plugintest',
1091                                  cpu_cost=1e6,
1092                                  environ=_FORCE_ENVIRON_FOR_WRAPPERS))
1093         out.append(
1094             self.config.job_spec(
1095                 ['src/objective-c/tests/run_plugin_option_tests.sh'],
1096                 timeout_seconds=60 * 60,
1097                 shortname='ios-test-plugin-option-test',
1098                 cpu_cost=1e6,
1099                 environ=_FORCE_ENVIRON_FOR_WRAPPERS))
1100         out.append(
1101             self.config.job_spec(
1102                 ['test/core/iomgr/ios/CFStreamTests/build_and_run_tests.sh'],
1103                 timeout_seconds=20 * 60,
1104                 shortname='ios-test-cfstream-tests',
1105                 cpu_cost=1e6,
1106                 environ=_FORCE_ENVIRON_FOR_WRAPPERS))
1107         # TODO: replace with run_one_test_bazel.sh when Bazel-Xcode is stable
1108         out.append(
1109             self.config.job_spec(['src/objective-c/tests/run_one_test.sh'],
1110                                  timeout_seconds=60 * 60,
1111                                  shortname='ios-test-unittests',
1112                                  cpu_cost=1e6,
1113                                  environ={'SCHEME': 'UnitTests'}))
1114         out.append(
1115             self.config.job_spec(['src/objective-c/tests/run_one_test.sh'],
1116                                  timeout_seconds=60 * 60,
1117                                  shortname='ios-test-interoptests',
1118                                  cpu_cost=1e6,
1119                                  environ={'SCHEME': 'InteropTests'}))
1120         out.append(
1121             self.config.job_spec(['src/objective-c/tests/run_one_test.sh'],
1122                                  timeout_seconds=60 * 60,
1123                                  shortname='ios-test-cronettests',
1124                                  cpu_cost=1e6,
1125                                  environ={'SCHEME': 'CronetTests'}))
1126         out.append(
1127             self.config.job_spec(['src/objective-c/tests/run_one_test.sh'],
1128                                  timeout_seconds=30 * 60,
1129                                  shortname='ios-perf-test',
1130                                  cpu_cost=1e6,
1131                                  environ={'SCHEME': 'PerfTests'}))
1132         out.append(
1133             self.config.job_spec(['src/objective-c/tests/run_one_test.sh'],
1134                                  timeout_seconds=30 * 60,
1135                                  shortname='ios-perf-test-posix',
1136                                  cpu_cost=1e6,
1137                                  environ={'SCHEME': 'PerfTestsPosix'}))
1138         out.append(
1139             self.config.job_spec(['test/cpp/ios/build_and_run_tests.sh'],
1140                                  timeout_seconds=30 * 60,
1141                                  shortname='ios-cpp-test-cronet',
1142                                  cpu_cost=1e6,
1143                                  environ=_FORCE_ENVIRON_FOR_WRAPPERS))
1144         out.append(
1145             self.config.job_spec(['src/objective-c/tests/run_one_test.sh'],
1146                                  timeout_seconds=60 * 60,
1147                                  shortname='mac-test-basictests',
1148                                  cpu_cost=1e6,
1149                                  environ={
1150                                      'SCHEME': 'MacTests',
1151                                      'PLATFORM': 'macos'
1152                                  }))
1153         out.append(
1154             self.config.job_spec(['src/objective-c/tests/run_one_test.sh'],
1155                                  timeout_seconds=30 * 60,
1156                                  shortname='tvos-test-basictests',
1157                                  cpu_cost=1e6,
1158                                  environ={
1159                                      'SCHEME': 'TvTests',
1160                                      'PLATFORM': 'tvos'
1161                                  }))
1162
1163         return sorted(out)
1164
1165     def pre_build_steps(self):
1166         return []
1167
1168     def make_targets(self):
1169         return []
1170
1171     def make_options(self):
1172         return []
1173
1174     def build_steps(self):
1175         return []
1176
1177     def post_tests_steps(self):
1178         return []
1179
1180     def makefile_name(self):
1181         return 'Makefile'
1182
1183     def dockerfile_dir(self):
1184         return None
1185
1186     def __str__(self):
1187         return 'objc'
1188
1189
1190 class Sanity(object):
1191
1192     def configure(self, config, args):
1193         self.config = config
1194         self.args = args
1195         _check_compiler(self.args.compiler, ['default'])
1196
1197     def test_specs(self):
1198         import yaml
1199         with open('tools/run_tests/sanity/sanity_tests.yaml', 'r') as f:
1200             environ = {'TEST': 'true'}
1201             if _is_use_docker_child():
1202                 environ['CLANG_FORMAT_SKIP_DOCKER'] = 'true'
1203                 environ['CLANG_TIDY_SKIP_DOCKER'] = 'true'
1204                 # sanity tests run tools/bazel wrapper concurrently
1205                 # and that can result in a download/run race in the wrapper.
1206                 # under docker we already have the right version of bazel
1207                 # so we can just disable the wrapper.
1208                 environ['DISABLE_BAZEL_WRAPPER'] = 'true'
1209             return [
1210                 self.config.job_spec(cmd['script'].split(),
1211                                      timeout_seconds=30 * 60,
1212                                      environ=environ,
1213                                      cpu_cost=cmd.get('cpu_cost', 1))
1214                 for cmd in yaml.load(f)
1215             ]
1216
1217     def pre_build_steps(self):
1218         return []
1219
1220     def make_targets(self):
1221         return ['run_dep_checks']
1222
1223     def make_options(self):
1224         return []
1225
1226     def build_steps(self):
1227         return []
1228
1229     def post_tests_steps(self):
1230         return []
1231
1232     def makefile_name(self):
1233         return 'Makefile'
1234
1235     def dockerfile_dir(self):
1236         return 'tools/dockerfile/test/sanity'
1237
1238     def __str__(self):
1239         return 'sanity'
1240
1241
1242 # different configurations we can run under
1243 with open('tools/run_tests/generated/configs.json') as f:
1244     _CONFIGS = dict(
1245         (cfg['config'], Config(**cfg)) for cfg in ast.literal_eval(f.read()))
1246
1247 _LANGUAGES = {
1248     'c++': CLanguage('cxx', 'c++'),
1249     'c': CLanguage('c', 'c'),
1250     'grpc-node': RemoteNodeLanguage(),
1251     'php7': Php7Language(),
1252     'python': PythonLanguage(),
1253     'ruby': RubyLanguage(),
1254     'csharp': CSharpLanguage(),
1255     'objc': ObjCLanguage(),
1256     'sanity': Sanity()
1257 }
1258
1259 _MSBUILD_CONFIG = {
1260     'dbg': 'Debug',
1261     'opt': 'Release',
1262     'gcov': 'Debug',
1263 }
1264
1265
1266 def _windows_arch_option(arch):
1267     """Returns msbuild cmdline option for selected architecture."""
1268     if arch == 'default' or arch == 'x86':
1269         return '/p:Platform=Win32'
1270     elif arch == 'x64':
1271         return '/p:Platform=x64'
1272     else:
1273         print('Architecture %s not supported.' % arch)
1274         sys.exit(1)
1275
1276
1277 def _check_arch_option(arch):
1278     """Checks that architecture option is valid."""
1279     if platform_string() == 'windows':
1280         _windows_arch_option(arch)
1281     elif platform_string() == 'linux':
1282         # On linux, we need to be running under docker with the right architecture.
1283         runtime_arch = platform.architecture()[0]
1284         if arch == 'default':
1285             return
1286         elif runtime_arch == '64bit' and arch == 'x64':
1287             return
1288         elif runtime_arch == '32bit' and arch == 'x86':
1289             return
1290         else:
1291             print(
1292                 'Architecture %s does not match current runtime architecture.' %
1293                 arch)
1294             sys.exit(1)
1295     else:
1296         if args.arch != 'default':
1297             print('Architecture %s not supported on current platform.' %
1298                   args.arch)
1299             sys.exit(1)
1300
1301
1302 def _docker_arch_suffix(arch):
1303     """Returns suffix to dockerfile dir to use."""
1304     if arch == 'default' or arch == 'x64':
1305         return 'x64'
1306     elif arch == 'x86':
1307         return 'x86'
1308     else:
1309         print('Architecture %s not supported with current settings.' % arch)
1310         sys.exit(1)
1311
1312
1313 def runs_per_test_type(arg_str):
1314     """Auxiliary function to parse the "runs_per_test" flag.
1315
1316        Returns:
1317            A positive integer or 0, the latter indicating an infinite number of
1318            runs.
1319
1320        Raises:
1321            argparse.ArgumentTypeError: Upon invalid input.
1322     """
1323     if arg_str == 'inf':
1324         return 0
1325     try:
1326         n = int(arg_str)
1327         if n <= 0: raise ValueError
1328         return n
1329     except:
1330         msg = '\'{}\' is not a positive integer or \'inf\''.format(arg_str)
1331         raise argparse.ArgumentTypeError(msg)
1332
1333
1334 def percent_type(arg_str):
1335     pct = float(arg_str)
1336     if pct > 100 or pct < 0:
1337         raise argparse.ArgumentTypeError(
1338             "'%f' is not a valid percentage in the [0, 100] range" % pct)
1339     return pct
1340
1341
1342 # This is math.isclose in python >= 3.5
1343 def isclose(a, b, rel_tol=1e-09, abs_tol=0.0):
1344     return abs(a - b) <= max(rel_tol * max(abs(a), abs(b)), abs_tol)
1345
1346
1347 # parse command line
1348 argp = argparse.ArgumentParser(description='Run grpc tests.')
1349 argp.add_argument('-c',
1350                   '--config',
1351                   choices=sorted(_CONFIGS.keys()),
1352                   default='opt')
1353 argp.add_argument(
1354     '-n',
1355     '--runs_per_test',
1356     default=1,
1357     type=runs_per_test_type,
1358     help='A positive integer or "inf". If "inf", all tests will run in an '
1359     'infinite loop. Especially useful in combination with "-f"')
1360 argp.add_argument('-r', '--regex', default='.*', type=str)
1361 argp.add_argument('--regex_exclude', default='', type=str)
1362 argp.add_argument('-j', '--jobs', default=multiprocessing.cpu_count(), type=int)
1363 argp.add_argument('-s', '--slowdown', default=1.0, type=float)
1364 argp.add_argument('-p',
1365                   '--sample_percent',
1366                   default=100.0,
1367                   type=percent_type,
1368                   help='Run a random sample with that percentage of tests')
1369 argp.add_argument('-f',
1370                   '--forever',
1371                   default=False,
1372                   action='store_const',
1373                   const=True)
1374 argp.add_argument('-t',
1375                   '--travis',
1376                   default=False,
1377                   action='store_const',
1378                   const=True)
1379 argp.add_argument('--newline_on_success',
1380                   default=False,
1381                   action='store_const',
1382                   const=True)
1383 argp.add_argument('-l',
1384                   '--language',
1385                   choices=sorted(_LANGUAGES.keys()),
1386                   nargs='+',
1387                   required=True)
1388 argp.add_argument('-S',
1389                   '--stop_on_failure',
1390                   default=False,
1391                   action='store_const',
1392                   const=True)
1393 argp.add_argument('--use_docker',
1394                   default=False,
1395                   action='store_const',
1396                   const=True,
1397                   help='Run all the tests under docker. That provides ' +
1398                   'additional isolation and prevents the need to install ' +
1399                   'language specific prerequisites. Only available on Linux.')
1400 argp.add_argument(
1401     '--allow_flakes',
1402     default=False,
1403     action='store_const',
1404     const=True,
1405     help=
1406     'Allow flaky tests to show as passing (re-runs failed tests up to five times)'
1407 )
1408 argp.add_argument(
1409     '--arch',
1410     choices=['default', 'x86', 'x64'],
1411     default='default',
1412     help=
1413     'Selects architecture to target. For some platforms "default" is the only supported choice.'
1414 )
1415 argp.add_argument(
1416     '--compiler',
1417     choices=[
1418         'default',
1419         'gcc4.9',
1420         'gcc5.3',
1421         'gcc7.4',
1422         'gcc8.3',
1423         'gcc_musl',
1424         'clang3.6',
1425         'clang3.7',
1426         'python2.7',
1427         'python3.5',
1428         'python3.6',
1429         'python3.7',
1430         'python3.8',
1431         'pypy',
1432         'pypy3',
1433         'python_alpine',
1434         'all_the_cpythons',
1435         'electron1.3',
1436         'electron1.6',
1437         'coreclr',
1438         'cmake',
1439         'cmake_vs2015',
1440         'cmake_vs2017',
1441         'cmake_vs2019',
1442     ],
1443     default='default',
1444     help=
1445     'Selects compiler to use. Allowed values depend on the platform and language.'
1446 )
1447 argp.add_argument('--iomgr_platform',
1448                   choices=['native', 'uv', 'gevent', 'asyncio'],
1449                   default='native',
1450                   help='Selects iomgr platform to build on')
1451 argp.add_argument('--build_only',
1452                   default=False,
1453                   action='store_const',
1454                   const=True,
1455                   help='Perform all the build steps but don\'t run any tests.')
1456 argp.add_argument('--measure_cpu_costs',
1457                   default=False,
1458                   action='store_const',
1459                   const=True,
1460                   help='Measure the cpu costs of tests')
1461 argp.add_argument(
1462     '--update_submodules',
1463     default=[],
1464     nargs='*',
1465     help=
1466     'Update some submodules before building. If any are updated, also run generate_projects. '
1467     +
1468     'Submodules are specified as SUBMODULE_NAME:BRANCH; if BRANCH is omitted, master is assumed.'
1469 )
1470 argp.add_argument('-a', '--antagonists', default=0, type=int)
1471 argp.add_argument('-x',
1472                   '--xml_report',
1473                   default=None,
1474                   type=str,
1475                   help='Generates a JUnit-compatible XML report')
1476 argp.add_argument('--report_suite_name',
1477                   default='tests',
1478                   type=str,
1479                   help='Test suite name to use in generated JUnit XML report')
1480 argp.add_argument(
1481     '--report_multi_target',
1482     default=False,
1483     const=True,
1484     action='store_const',
1485     help='Generate separate XML report for each test job (Looks better in UIs).'
1486 )
1487 argp.add_argument(
1488     '--quiet_success',
1489     default=False,
1490     action='store_const',
1491     const=True,
1492     help=
1493     'Don\'t print anything when a test passes. Passing tests also will not be reported in XML report. '
1494     + 'Useful when running many iterations of each test (argument -n).')
1495 argp.add_argument(
1496     '--force_default_poller',
1497     default=False,
1498     action='store_const',
1499     const=True,
1500     help='Don\'t try to iterate over many polling strategies when they exist')
1501 argp.add_argument(
1502     '--force_use_pollers',
1503     default=None,
1504     type=str,
1505     help='Only use the specified comma-delimited list of polling engines. '
1506     'Example: --force_use_pollers epoll1,poll '
1507     ' (This flag has no effect if --force_default_poller flag is also used)')
1508 argp.add_argument('--max_time',
1509                   default=-1,
1510                   type=int,
1511                   help='Maximum test runtime in seconds')
1512 argp.add_argument('--bq_result_table',
1513                   default='',
1514                   type=str,
1515                   nargs='?',
1516                   help='Upload test results to a specified BQ table.')
1517 args = argp.parse_args()
1518
1519 flaky_tests = set()
1520 shortname_to_cpu = {}
1521
1522 if args.force_default_poller:
1523     _POLLING_STRATEGIES = {}
1524 elif args.force_use_pollers:
1525     _POLLING_STRATEGIES[platform_string()] = args.force_use_pollers.split(',')
1526
1527 jobset.measure_cpu_costs = args.measure_cpu_costs
1528
1529 # update submodules if necessary
1530 need_to_regenerate_projects = False
1531 for spec in args.update_submodules:
1532     spec = spec.split(':', 1)
1533     if len(spec) == 1:
1534         submodule = spec[0]
1535         branch = 'master'
1536     elif len(spec) == 2:
1537         submodule = spec[0]
1538         branch = spec[1]
1539     cwd = 'third_party/%s' % submodule
1540
1541     def git(cmd, cwd=cwd):
1542         print('in %s: git %s' % (cwd, cmd))
1543         run_shell_command('git %s' % cmd, cwd=cwd)
1544
1545     git('fetch')
1546     git('checkout %s' % branch)
1547     git('pull origin %s' % branch)
1548     if os.path.exists('src/%s/gen_build_yaml.py' % submodule):
1549         need_to_regenerate_projects = True
1550 if need_to_regenerate_projects:
1551     if jobset.platform_string() == 'linux':
1552         run_shell_command('tools/buildgen/generate_projects.sh')
1553     else:
1554         print(
1555             'WARNING: may need to regenerate projects, but since we are not on')
1556         print(
1557             '         Linux this step is being skipped. Compilation MAY fail.')
1558
1559 # grab config
1560 run_config = _CONFIGS[args.config]
1561 build_config = run_config.build_config
1562
1563 if args.travis:
1564     _FORCE_ENVIRON_FOR_WRAPPERS = {'GRPC_TRACE': 'api'}
1565
1566 languages = set(_LANGUAGES[l] for l in args.language)
1567 for l in languages:
1568     l.configure(run_config, args)
1569
1570 language_make_options = []
1571 if any(language.make_options() for language in languages):
1572     if not 'gcov' in args.config and len(languages) != 1:
1573         print(
1574             'languages with custom make options cannot be built simultaneously with other languages'
1575         )
1576         sys.exit(1)
1577     else:
1578         # Combining make options is not clean and just happens to work. It allows C & C++ to build
1579         # together, and is only used under gcov. All other configs should build languages individually.
1580         language_make_options = list(
1581             set([
1582                 make_option for lang in languages
1583                 for make_option in lang.make_options()
1584             ]))
1585
1586 if args.use_docker:
1587     if not args.travis:
1588         print('Seen --use_docker flag, will run tests under docker.')
1589         print('')
1590         print(
1591             'IMPORTANT: The changes you are testing need to be locally committed'
1592         )
1593         print(
1594             'because only the committed changes in the current branch will be')
1595         print('copied to the docker environment.')
1596         time.sleep(5)
1597
1598     dockerfile_dirs = set([l.dockerfile_dir() for l in languages])
1599     if len(dockerfile_dirs) > 1:
1600         print('Languages to be tested require running under different docker '
1601               'images.')
1602         sys.exit(1)
1603     else:
1604         dockerfile_dir = next(iter(dockerfile_dirs))
1605
1606     child_argv = [arg for arg in sys.argv if not arg == '--use_docker']
1607     run_tests_cmd = 'python tools/run_tests/run_tests.py %s' % ' '.join(
1608         child_argv[1:])
1609
1610     env = os.environ.copy()
1611     env['RUN_TESTS_COMMAND'] = run_tests_cmd
1612     env['DOCKERFILE_DIR'] = dockerfile_dir
1613     env['DOCKER_RUN_SCRIPT'] = 'tools/run_tests/dockerize/docker_run_tests.sh'
1614     if args.xml_report:
1615         env['XML_REPORT'] = args.xml_report
1616     if not args.travis:
1617         env['TTY_FLAG'] = '-t'  # enables Ctrl-C when not on Jenkins.
1618
1619     subprocess.check_call(
1620         'tools/run_tests/dockerize/build_docker_and_run_tests.sh',
1621         shell=True,
1622         env=env)
1623     sys.exit(0)
1624
1625 _check_arch_option(args.arch)
1626
1627
1628 def make_jobspec(cfg, targets, makefile='Makefile'):
1629     if platform_string() == 'windows':
1630         return [
1631             jobset.JobSpec([
1632                 'cmake', '--build', '.', '--target',
1633                 '%s' % target, '--config', _MSBUILD_CONFIG[cfg]
1634             ],
1635                            cwd=os.path.dirname(makefile),
1636                            timeout_seconds=None) for target in targets
1637         ]
1638     else:
1639         if targets and makefile.startswith('cmake/build/'):
1640             # With cmake, we've passed all the build configuration in the pre-build step already
1641             return [
1642                 jobset.JobSpec(
1643                     [os.getenv('MAKE', 'make'), '-j',
1644                      '%d' % args.jobs] + targets,
1645                     cwd='cmake/build',
1646                     timeout_seconds=None)
1647             ]
1648         if targets:
1649             return [
1650                 jobset.JobSpec(
1651                     [
1652                         os.getenv('MAKE', 'make'), '-f', makefile, '-j',
1653                         '%d' % args.jobs,
1654                         'EXTRA_DEFINES=GRPC_TEST_SLOWDOWN_MACHINE_FACTOR=%f' %
1655                         args.slowdown,
1656                         'CONFIG=%s' % cfg, 'Q='
1657                     ] + language_make_options +
1658                     ([] if not args.travis else ['JENKINS_BUILD=1']) + targets,
1659                     timeout_seconds=None)
1660             ]
1661         else:
1662             return []
1663
1664
1665 make_targets = {}
1666 for l in languages:
1667     makefile = l.makefile_name()
1668     make_targets[makefile] = make_targets.get(makefile, set()).union(
1669         set(l.make_targets()))
1670
1671
1672 def build_step_environ(cfg):
1673     environ = {'CONFIG': cfg}
1674     msbuild_cfg = _MSBUILD_CONFIG.get(cfg)
1675     if msbuild_cfg:
1676         environ['MSBUILD_CONFIG'] = msbuild_cfg
1677     return environ
1678
1679
1680 build_steps = list(
1681     set(
1682         jobset.JobSpec(cmdline,
1683                        environ=build_step_environ(build_config),
1684                        timeout_seconds=_PRE_BUILD_STEP_TIMEOUT_SECONDS,
1685                        flake_retries=2)
1686         for l in languages
1687         for cmdline in l.pre_build_steps()))
1688 if make_targets:
1689     make_commands = itertools.chain.from_iterable(
1690         make_jobspec(build_config, list(targets), makefile)
1691         for (makefile, targets) in make_targets.items())
1692     build_steps.extend(set(make_commands))
1693 build_steps.extend(
1694     set(
1695         jobset.JobSpec(cmdline,
1696                        environ=build_step_environ(build_config),
1697                        timeout_seconds=None)
1698         for l in languages
1699         for cmdline in l.build_steps()))
1700
1701 post_tests_steps = list(
1702     set(
1703         jobset.JobSpec(cmdline, environ=build_step_environ(build_config))
1704         for l in languages
1705         for cmdline in l.post_tests_steps()))
1706 runs_per_test = args.runs_per_test
1707 forever = args.forever
1708
1709
1710 def _shut_down_legacy_server(legacy_server_port):
1711     try:
1712         version = int(
1713             urllib.request.urlopen('http://localhost:%d/version_number' %
1714                                    legacy_server_port,
1715                                    timeout=10).read())
1716     except:
1717         pass
1718     else:
1719         urllib.request.urlopen('http://localhost:%d/quitquitquit' %
1720                                legacy_server_port).read()
1721
1722
1723 def _calculate_num_runs_failures(list_of_results):
1724     """Calculate number of runs and failures for a particular test.
1725
1726   Args:
1727     list_of_results: (List) of JobResult object.
1728   Returns:
1729     A tuple of total number of runs and failures.
1730   """
1731     num_runs = len(list_of_results)  # By default, there is 1 run per JobResult.
1732     num_failures = 0
1733     for jobresult in list_of_results:
1734         if jobresult.retries > 0:
1735             num_runs += jobresult.retries
1736         if jobresult.num_failures > 0:
1737             num_failures += jobresult.num_failures
1738     return num_runs, num_failures
1739
1740
1741 # _build_and_run results
1742 class BuildAndRunError(object):
1743
1744     BUILD = object()
1745     TEST = object()
1746     POST_TEST = object()
1747
1748
1749 def _has_epollexclusive():
1750     binary = 'bins/%s/check_epollexclusive' % args.config
1751     if not os.path.exists(binary):
1752         return False
1753     try:
1754         subprocess.check_call(binary)
1755         return True
1756     except subprocess.CalledProcessError as e:
1757         return False
1758     except OSError as e:
1759         # For languages other than C and Windows the binary won't exist
1760         return False
1761
1762
1763 # returns a list of things that failed (or an empty list on success)
1764 def _build_and_run(check_cancelled,
1765                    newline_on_success,
1766                    xml_report=None,
1767                    build_only=False):
1768     """Do one pass of building & running tests."""
1769     # build latest sequentially
1770     num_failures, resultset = jobset.run(build_steps,
1771                                          maxjobs=1,
1772                                          stop_on_failure=True,
1773                                          newline_on_success=newline_on_success,
1774                                          travis=args.travis)
1775     if num_failures:
1776         return [BuildAndRunError.BUILD]
1777
1778     if build_only:
1779         if xml_report:
1780             report_utils.render_junit_xml_report(
1781                 resultset, xml_report, suite_name=args.report_suite_name)
1782         return []
1783
1784     if not args.travis and not _has_epollexclusive() and platform_string(
1785     ) in _POLLING_STRATEGIES and 'epollex' in _POLLING_STRATEGIES[
1786             platform_string()]:
1787         print('\n\nOmitting EPOLLEXCLUSIVE tests\n\n')
1788         _POLLING_STRATEGIES[platform_string()].remove('epollex')
1789
1790     # start antagonists
1791     antagonists = [
1792         subprocess.Popen(['tools/run_tests/python_utils/antagonist.py'])
1793         for _ in range(0, args.antagonists)
1794     ]
1795     start_port_server.start_port_server()
1796     resultset = None
1797     num_test_failures = 0
1798     try:
1799         infinite_runs = runs_per_test == 0
1800         one_run = set(spec for language in languages
1801                       for spec in language.test_specs()
1802                       if (re.search(args.regex, spec.shortname) and
1803                           (args.regex_exclude == '' or
1804                            not re.search(args.regex_exclude, spec.shortname))))
1805         # When running on travis, we want out test runs to be as similar as possible
1806         # for reproducibility purposes.
1807         if args.travis and args.max_time <= 0:
1808             massaged_one_run = sorted(one_run, key=lambda x: x.cpu_cost)
1809         else:
1810             # whereas otherwise, we want to shuffle things up to give all tests a
1811             # chance to run.
1812             massaged_one_run = list(
1813                 one_run)  # random.sample needs an indexable seq.
1814             num_jobs = len(massaged_one_run)
1815             # for a random sample, get as many as indicated by the 'sample_percent'
1816             # argument. By default this arg is 100, resulting in a shuffle of all
1817             # jobs.
1818             sample_size = int(num_jobs * args.sample_percent / 100.0)
1819             massaged_one_run = random.sample(massaged_one_run, sample_size)
1820             if not isclose(args.sample_percent, 100.0):
1821                 assert args.runs_per_test == 1, "Can't do sampling (-p) over multiple runs (-n)."
1822                 print("Running %d tests out of %d (~%d%%)" %
1823                       (sample_size, num_jobs, args.sample_percent))
1824         if infinite_runs:
1825             assert len(massaged_one_run
1826                       ) > 0, 'Must have at least one test for a -n inf run'
1827         runs_sequence = (itertools.repeat(massaged_one_run) if infinite_runs
1828                          else itertools.repeat(massaged_one_run, runs_per_test))
1829         all_runs = itertools.chain.from_iterable(runs_sequence)
1830
1831         if args.quiet_success:
1832             jobset.message(
1833                 'START',
1834                 'Running tests quietly, only failing tests will be reported',
1835                 do_newline=True)
1836         num_test_failures, resultset = jobset.run(
1837             all_runs,
1838             check_cancelled,
1839             newline_on_success=newline_on_success,
1840             travis=args.travis,
1841             maxjobs=args.jobs,
1842             maxjobs_cpu_agnostic=max_parallel_tests_for_current_platform(),
1843             stop_on_failure=args.stop_on_failure,
1844             quiet_success=args.quiet_success,
1845             max_time=args.max_time)
1846         if resultset:
1847             for k, v in sorted(resultset.items()):
1848                 num_runs, num_failures = _calculate_num_runs_failures(v)
1849                 if num_failures > 0:
1850                     if num_failures == num_runs:  # what about infinite_runs???
1851                         jobset.message('FAILED', k, do_newline=True)
1852                     else:
1853                         jobset.message('FLAKE',
1854                                        '%s [%d/%d runs flaked]' %
1855                                        (k, num_failures, num_runs),
1856                                        do_newline=True)
1857     finally:
1858         for antagonist in antagonists:
1859             antagonist.kill()
1860         if args.bq_result_table and resultset:
1861             upload_extra_fields = {
1862                 'compiler': args.compiler,
1863                 'config': args.config,
1864                 'iomgr_platform': args.iomgr_platform,
1865                 'language': args.language[
1866                     0
1867                 ],  # args.language is a list but will always have one element when uploading to BQ is enabled.
1868                 'platform': platform_string()
1869             }
1870             try:
1871                 upload_results_to_bq(resultset, args.bq_result_table,
1872                                      upload_extra_fields)
1873             except NameError as e:
1874                 logging.warning(
1875                     e)  # It's fine to ignore since this is not critical
1876         if xml_report and resultset:
1877             report_utils.render_junit_xml_report(
1878                 resultset,
1879                 xml_report,
1880                 suite_name=args.report_suite_name,
1881                 multi_target=args.report_multi_target)
1882
1883     number_failures, _ = jobset.run(post_tests_steps,
1884                                     maxjobs=1,
1885                                     stop_on_failure=False,
1886                                     newline_on_success=newline_on_success,
1887                                     travis=args.travis)
1888
1889     out = []
1890     if number_failures:
1891         out.append(BuildAndRunError.POST_TEST)
1892     if num_test_failures:
1893         out.append(BuildAndRunError.TEST)
1894
1895     return out
1896
1897
1898 if forever:
1899     success = True
1900     while True:
1901         dw = watch_dirs.DirWatcher(['src', 'include', 'test', 'examples'])
1902         initial_time = dw.most_recent_change()
1903         have_files_changed = lambda: dw.most_recent_change() != initial_time
1904         previous_success = success
1905         errors = _build_and_run(check_cancelled=have_files_changed,
1906                                 newline_on_success=False,
1907                                 build_only=args.build_only) == 0
1908         if not previous_success and not errors:
1909             jobset.message('SUCCESS',
1910                            'All tests are now passing properly',
1911                            do_newline=True)
1912         jobset.message('IDLE', 'No change detected')
1913         while not have_files_changed():
1914             time.sleep(1)
1915 else:
1916     errors = _build_and_run(check_cancelled=lambda: False,
1917                             newline_on_success=args.newline_on_success,
1918                             xml_report=args.xml_report,
1919                             build_only=args.build_only)
1920     if not errors:
1921         jobset.message('SUCCESS', 'All tests passed', do_newline=True)
1922     else:
1923         jobset.message('FAILED', 'Some tests failed', do_newline=True)
1924     exit_code = 0
1925     if BuildAndRunError.BUILD in errors:
1926         exit_code |= 1
1927     if BuildAndRunError.TEST in errors:
1928         exit_code |= 2
1929     if BuildAndRunError.POST_TEST in errors:
1930         exit_code |= 4
1931     sys.exit(exit_code)