2 # Copyright 2015 gRPC authors.
4 # Licensed under the Apache License, Version 2.0 (the "License");
5 # you may not use this file except in compliance with the License.
6 # You may obtain a copy of the License at
8 # http://www.apache.org/licenses/LICENSE-2.0
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS,
12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 # See the License for the specific language governing permissions and
14 # limitations under the License.
15 """Run tests in parallel."""
17 from __future__ import print_function
26 import multiprocessing
39 from six.moves import urllib
43 import python_utils.jobset as jobset
44 import python_utils.report_utils as report_utils
45 import python_utils.watch_dirs as watch_dirs
46 import python_utils.start_port_server as start_port_server
48 from python_utils.upload_test_results import upload_results_to_bq
50 pass # It's ok to not import because this is only necessary to upload results to BQ.
52 gcp_utils_dir = os.path.abspath(
53 os.path.join(os.path.dirname(__file__), '../gcp/utils'))
54 sys.path.append(gcp_utils_dir)
56 _ROOT = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), '../..'))
59 _FORCE_ENVIRON_FOR_WRAPPERS = {
60 'GRPC_VERBOSITY': 'DEBUG',
63 _POLLING_STRATEGIES = {
64 'linux': ['epollex', 'epoll1', 'poll'],
69 def platform_string():
70 return jobset.platform_string()
73 _DEFAULT_TIMEOUT_SECONDS = 5 * 60
74 _PRE_BUILD_STEP_TIMEOUT_SECONDS = 10 * 60
77 def run_shell_command(cmd, env=None, cwd=None):
79 subprocess.check_output(cmd, shell=True, env=env, cwd=cwd)
80 except subprocess.CalledProcessError as e:
82 "Error while running command '%s'. Exit status %d. Output:\n%s",
83 e.cmd, e.returncode, e.output)
87 def max_parallel_tests_for_current_platform():
88 # Too much test parallelization has only been seen to be a problem
90 if jobset.platform_string() == 'windows':
95 # SimpleConfig: just compile with CONFIG=config, and run the binary to test
101 timeout_multiplier=1,
103 iomgr_platform='native'):
106 self.build_config = config
107 self.environ = environ
108 self.environ['CONFIG'] = config
109 self.tool_prefix = tool_prefix
110 self.timeout_multiplier = timeout_multiplier
111 self.iomgr_platform = iomgr_platform
115 timeout_seconds=_DEFAULT_TIMEOUT_SECONDS,
120 """Construct a jobset.JobSpec for a test under this config
123 cmdline: a list of strings specifying the command line the test
126 actual_environ = self.environ.copy()
127 for k, v in environ.items():
128 actual_environ[k] = v
129 if not flaky and shortname and shortname in flaky_tests:
131 if shortname in shortname_to_cpu:
132 cpu_cost = shortname_to_cpu[shortname]
133 return jobset.JobSpec(
134 cmdline=self.tool_prefix + cmdline,
136 environ=actual_environ,
138 timeout_seconds=(self.timeout_multiplier *
139 timeout_seconds if timeout_seconds else None),
140 flake_retries=4 if flaky or args.allow_flakes else 0,
141 timeout_retries=1 if flaky or args.allow_flakes else 0)
144 def get_c_tests(travis, test_lang):
146 platforms_str = 'ci_platforms' if travis else 'platforms'
147 with open('tools/run_tests/generated/tests.json') as f:
151 if tgt['language'] == test_lang and platform_string() in
152 tgt[platforms_str] and not (travis and tgt['flaky'])
156 def _check_compiler(compiler, supported_compilers):
157 if compiler not in supported_compilers:
158 raise Exception('Compiler %s not supported (on this platform).' %
162 def _check_arch(arch, supported_archs):
163 if arch not in supported_archs:
164 raise Exception('Architecture %s not supported.' % arch)
167 def _is_use_docker_child():
168 """Returns True if running running as a --use_docker child."""
169 return True if os.getenv('RUN_TESTS_COMMAND') else False
172 _PythonConfigVars = collections.namedtuple('_ConfigVars', [
175 'builder_prefix_arguments',
176 'venv_relative_python',
184 def _python_config_generator(name, major, minor, bits, config_vars):
185 name += '_' + config_vars.iomgr_platform
187 name, config_vars.shell + config_vars.builder +
188 config_vars.builder_prefix_arguments +
189 [_python_pattern_function(major=major, minor=minor, bits=bits)] +
190 [name] + config_vars.venv_relative_python + config_vars.toolchain,
191 config_vars.shell + config_vars.runner + [
192 os.path.join(name, config_vars.venv_relative_python[0]),
193 config_vars.test_name
197 def _pypy_config_generator(name, major, config_vars):
199 name, config_vars.shell + config_vars.builder +
200 config_vars.builder_prefix_arguments +
201 [_pypy_pattern_function(major=major)] + [name] +
202 config_vars.venv_relative_python + config_vars.toolchain,
203 config_vars.shell + config_vars.runner +
204 [os.path.join(name, config_vars.venv_relative_python[0])])
207 def _python_pattern_function(major, minor, bits):
208 # Bit-ness is handled by the test machine's environment
211 return '/c/Python{major}{minor}/python.exe'.format(major=major,
215 return '/c/Python{major}{minor}_{bits}bits/python.exe'.format(
216 major=major, minor=minor, bits=bits)
218 return 'python{major}.{minor}'.format(major=major, minor=minor)
221 def _pypy_pattern_function(major):
227 raise ValueError("Unknown PyPy major version")
230 class CLanguage(object):
232 def __init__(self, make_target, test_lang):
233 self.make_target = make_target
234 self.platform = platform_string()
235 self.test_lang = test_lang
237 def configure(self, config, args):
240 self._make_options = []
241 self._use_cmake = True
242 if self.platform == 'windows':
243 _check_compiler(self.args.compiler, [
244 'default', 'cmake', 'cmake_vs2015', 'cmake_vs2017',
247 _check_arch(self.args.arch, ['default', 'x64', 'x86'])
248 if self.args.compiler == 'cmake_vs2019':
249 cmake_generator_option = 'Visual Studio 16 2019'
250 elif self.args.compiler == 'cmake_vs2017':
251 cmake_generator_option = 'Visual Studio 15 2017'
253 cmake_generator_option = 'Visual Studio 14 2015'
254 cmake_arch_option = 'x64' if self.args.arch == 'x64' else 'Win32'
255 self._cmake_configure_extra_args = [
256 '-G', cmake_generator_option, '-A', cmake_arch_option
259 if self.platform == 'linux':
260 # Allow all the known architectures. _check_arch_option has already checked that we're not doing
261 # something illegal when not running under docker.
262 _check_arch(self.args.arch, ['default', 'x64', 'x86'])
264 _check_arch(self.args.arch, ['default'])
266 self._docker_distro, self._cmake_configure_extra_args = self._compiler_options(
267 self.args.use_docker, self.args.compiler)
269 if self.args.arch == 'x86':
270 # disable boringssl asm optimizations when on x86
271 # see https://github.com/grpc/grpc/blob/b5b8578b3f8b4a9ce61ed6677e19d546e43c5c68/tools/run_tests/artifacts/artifact_targets.py#L253
272 self._cmake_configure_extra_args.append('-DOPENSSL_NO_ASM=ON')
274 if args.iomgr_platform == "uv":
275 cflags = '-DGRPC_UV -DGRPC_CUSTOM_IOMGR_THREAD_CHECK -DGRPC_CUSTOM_SOCKET '
277 cflags += subprocess.check_output(
278 ['pkg-config', '--cflags', 'libuv']).strip() + ' '
279 except (subprocess.CalledProcessError, OSError):
282 ldflags = subprocess.check_output(
283 ['pkg-config', '--libs', 'libuv']).strip() + ' '
284 except (subprocess.CalledProcessError, OSError):
286 self._make_options += [
287 'EXTRA_CPPFLAGS={}'.format(cflags),
288 'EXTRA_LDLIBS={}'.format(ldflags)
291 def test_specs(self):
293 binaries = get_c_tests(self.args.travis, self.test_lang)
294 for target in binaries:
295 if self._use_cmake and target.get('boringssl', False):
296 # cmake doesn't build boringssl tests
298 auto_timeout_scaling = target.get('auto_timeout_scaling', True)
299 polling_strategies = (_POLLING_STRATEGIES.get(
300 self.platform, ['all']) if target.get('uses_polling', True) else
302 if self.args.iomgr_platform == 'uv':
303 polling_strategies = ['all']
304 for polling_strategy in polling_strategies:
306 'GRPC_DEFAULT_SSL_ROOTS_FILE_PATH':
307 _ROOT + '/src/core/tsi/test_creds/ca.pem',
308 'GRPC_POLL_STRATEGY':
313 resolver = os.environ.get('GRPC_DNS_RESOLVER', None)
315 env['GRPC_DNS_RESOLVER'] = resolver
316 shortname_ext = '' if polling_strategy == 'all' else ' GRPC_POLL_STRATEGY=%s' % polling_strategy
317 if polling_strategy in target.get('excluded_poll_engines', []):
321 if auto_timeout_scaling:
322 config = self.args.config
323 if ('asan' in config or config == 'msan' or
324 config == 'tsan' or config == 'ubsan' or
325 config == 'helgrind' or config == 'memcheck'):
326 # Scale overall test timeout if running under various sanitizers.
327 # scaling value is based on historical data analysis
330 if self.config.build_config in target['exclude_configs']:
332 if self.args.iomgr_platform in target.get('exclude_iomgrs', []):
334 if self.platform == 'windows':
335 binary = 'cmake/build/%s/%s.exe' % (_MSBUILD_CONFIG[
336 self.config.build_config], target['name'])
339 binary = 'cmake/build/%s' % target['name']
341 binary = 'bins/%s/%s' % (self.config.build_config,
343 cpu_cost = target['cpu_cost']
344 if cpu_cost == 'capacity':
345 cpu_cost = multiprocessing.cpu_count()
346 if os.path.isfile(binary):
347 list_test_command = None
348 filter_test_command = None
350 # these are the flag defined by gtest and benchmark framework to list
351 # and filter test runs. We use them to split each individual test
352 # into its own JobSpec, and thus into its own process.
353 if 'benchmark' in target and target['benchmark']:
354 with open(os.devnull, 'w') as fnull:
355 tests = subprocess.check_output(
356 [binary, '--benchmark_list_tests'],
358 for line in tests.split('\n'):
360 if not test: continue
362 '--benchmark_filter=%s$' % test
365 self.config.job_spec(
368 (' '.join(cmdline), shortname_ext),
370 timeout_seconds=target.get(
372 _DEFAULT_TIMEOUT_SECONDS) *
375 elif 'gtest' in target and target['gtest']:
376 # here we parse the output of --gtest_list_tests to build up a complete
377 # list of the tests contained in a binary for each test, we then
378 # add a job to run, filtering for just that test.
379 with open(os.devnull, 'w') as fnull:
380 tests = subprocess.check_output(
381 [binary, '--gtest_list_tests'], stderr=fnull)
383 for line in tests.split('\n'):
385 if i >= 0: line = line[:i]
386 if not line: continue
390 assert base is not None
391 assert line[1] == ' '
392 test = base + line.strip()
394 '--gtest_filter=%s' % test
397 self.config.job_spec(
400 (' '.join(cmdline), shortname_ext),
402 timeout_seconds=target.get(
404 _DEFAULT_TIMEOUT_SECONDS) *
408 cmdline = [binary] + target['args']
409 shortname = target.get(
411 ' '.join(pipes.quote(arg) for arg in cmdline))
412 shortname += shortname_ext
414 self.config.job_spec(
418 flaky=target.get('flaky', False),
419 timeout_seconds=target.get(
421 _DEFAULT_TIMEOUT_SECONDS) * timeout_scaling,
423 elif self.args.regex == '.*' or self.platform == 'windows':
424 print('\nWARNING: binary not found, skipping', binary)
427 def make_targets(self):
428 if self.platform == 'windows':
429 # don't build tools on windows just yet
430 return ['buildtests_%s' % self.make_target]
432 'buildtests_%s' % self.make_target,
433 'tools_%s' % self.make_target, 'check_epollexclusive'
436 def make_options(self):
437 return self._make_options
439 def pre_build_steps(self):
440 if self.platform == 'windows':
441 return [['tools\\run_tests\\helper_scripts\\pre_build_cmake.bat'] +
442 self._cmake_configure_extra_args]
443 elif self._use_cmake:
444 return [['tools/run_tests/helper_scripts/pre_build_cmake.sh'] +
445 self._cmake_configure_extra_args]
449 def build_steps(self):
452 def post_tests_steps(self):
453 if self.platform == 'windows':
456 return [['tools/run_tests/helper_scripts/post_tests_c.sh']]
458 def makefile_name(self):
460 return 'cmake/build/Makefile'
464 def _clang_cmake_configure_extra_args(self, version_suffix=''):
466 '-DCMAKE_C_COMPILER=clang%s' % version_suffix,
467 '-DCMAKE_CXX_COMPILER=clang++%s' % version_suffix,
470 def _compiler_options(self, use_docker, compiler):
471 """Returns docker distro and cmake configure args to use for given compiler."""
472 if not use_docker and not _is_use_docker_child():
473 # if not running under docker, we cannot ensure the right compiler version will be used,
474 # so we only allow the non-specific choices.
475 _check_compiler(compiler, ['default', 'cmake'])
477 if compiler == 'gcc4.9' or compiler == 'default' or compiler == 'cmake':
478 return ('jessie', [])
479 elif compiler == 'gcc5.3':
480 return ('ubuntu1604', [])
481 elif compiler == 'gcc7.4':
482 return ('ubuntu1804', [])
483 elif compiler == 'gcc8.3':
484 return ('buster', [])
485 elif compiler == 'gcc_musl':
486 return ('alpine', [])
487 elif compiler == 'clang3.6':
488 return ('ubuntu1604',
489 self._clang_cmake_configure_extra_args(
490 version_suffix='-3.6'))
491 elif compiler == 'clang3.7':
492 return ('ubuntu1604',
493 self._clang_cmake_configure_extra_args(
494 version_suffix='-3.7'))
496 raise Exception('Compiler %s not supported.' % compiler)
498 def dockerfile_dir(self):
499 return 'tools/dockerfile/test/cxx_%s_%s' % (
500 self._docker_distro, _docker_arch_suffix(self.args.arch))
503 return self.make_target
506 # This tests Node on grpc/grpc-node and will become the standard for Node testing
507 class RemoteNodeLanguage(object):
510 self.platform = platform_string()
512 def configure(self, config, args):
515 # Note: electron ABI only depends on major and minor version, so that's all
516 # we should specify in the compiler argument
517 _check_compiler(self.args.compiler, [
518 'default', 'node0.12', 'node4', 'node5', 'node6', 'node7', 'node8',
519 'electron1.3', 'electron1.6'
521 if self.args.compiler == 'default':
522 self.runtime = 'node'
523 self.node_version = '8'
525 if self.args.compiler.startswith('electron'):
526 self.runtime = 'electron'
527 self.node_version = self.args.compiler[8:]
529 self.runtime = 'node'
530 # Take off the word "node"
531 self.node_version = self.args.compiler[4:]
533 # TODO: update with Windows/electron scripts when available for grpc/grpc-node
534 def test_specs(self):
535 if self.platform == 'windows':
537 self.config.job_spec(
538 ['tools\\run_tests\\helper_scripts\\run_node.bat'])
542 self.config.job_spec(
543 ['tools/run_tests/helper_scripts/run_grpc-node.sh'],
545 environ=_FORCE_ENVIRON_FOR_WRAPPERS)
548 def pre_build_steps(self):
551 def make_targets(self):
554 def make_options(self):
557 def build_steps(self):
560 def post_tests_steps(self):
563 def makefile_name(self):
566 def dockerfile_dir(self):
567 return 'tools/dockerfile/test/node_jessie_%s' % _docker_arch_suffix(
574 class Php7Language(object):
576 def configure(self, config, args):
579 _check_compiler(self.args.compiler, ['default'])
580 self._make_options = ['EMBED_OPENSSL=true', 'EMBED_ZLIB=true']
582 def test_specs(self):
584 self.config.job_spec(['src/php/bin/run_tests.sh'],
585 environ=_FORCE_ENVIRON_FOR_WRAPPERS)
588 def pre_build_steps(self):
591 def make_targets(self):
592 return ['static_c', 'shared_c']
594 def make_options(self):
595 return self._make_options
597 def build_steps(self):
598 return [['tools/run_tests/helper_scripts/build_php.sh']]
600 def post_tests_steps(self):
601 return [['tools/run_tests/helper_scripts/post_tests_php.sh']]
603 def makefile_name(self):
606 def dockerfile_dir(self):
607 return 'tools/dockerfile/test/php7_jessie_%s' % _docker_arch_suffix(
615 collections.namedtuple('PythonConfig', ['name', 'build', 'run'])):
616 """Tuple of commands (named s.t. 'what it says on the tin' applies)"""
619 class PythonLanguage(object):
622 'native': 'src/python/grpcio_tests/tests/tests.json',
623 'gevent': 'src/python/grpcio_tests/tests/tests.json',
624 'asyncio': 'src/python/grpcio_tests/tests_aio/tests.json',
629 'asyncio': 'test_aio',
632 def configure(self, config, args):
635 self.pythons = self._get_pythons(self.args)
637 def test_specs(self):
638 # load list of known test suites
639 with open(self._TEST_SPECS_FILE[
640 self.args.iomgr_platform]) as tests_json_file:
641 tests_json = json.load(tests_json_file)
642 environment = dict(_FORCE_ENVIRON_FOR_WRAPPERS)
643 # TODO(https://github.com/grpc/grpc/issues/21401) Fork handlers is not
644 # designed for non-native IO manager. It has a side-effect that
645 # overrides threading settings in C-Core.
646 if args.iomgr_platform != 'native':
647 environment['GRPC_ENABLE_FORK_SUPPORT'] = '0'
649 self.config.job_spec(
651 timeout_seconds=5 * 60,
652 environ=dict(GRPC_PYTHON_TESTRUNNER_FILTER=str(suite_name),
654 shortname='%s.%s.%s' %
655 (config.name, self._TEST_FOLDER[self.args.iomgr_platform],
657 ) for suite_name in tests_json for config in self.pythons
660 def pre_build_steps(self):
663 def make_targets(self):
666 def make_options(self):
669 def build_steps(self):
670 return [config.build for config in self.pythons]
672 def post_tests_steps(self):
673 if self.config.build_config != 'gcov':
676 return [['tools/run_tests/helper_scripts/post_tests_python.sh']]
678 def makefile_name(self):
681 def dockerfile_dir(self):
682 return 'tools/dockerfile/test/python_%s_%s' % (
683 self._python_manager_name(), _docker_arch_suffix(self.args.arch))
685 def _python_manager_name(self):
686 """Choose the docker image to use based on python version."""
687 if self.args.compiler in [
688 'python2.7', 'python3.5', 'python3.6', 'python3.7', 'python3.8'
690 return 'stretch_' + self.args.compiler[len('python'):]
691 elif self.args.compiler == 'python_alpine':
694 return 'stretch_default'
696 def _get_pythons(self, args):
697 """Get python runtimes to test with, based on current platform, architecture, compiler etc."""
698 if args.arch == 'x86':
707 'tools/run_tests/helper_scripts/build_python_msys2.sh')
709 builder_prefix_arguments = ['MINGW{}'.format(bits)]
710 venv_relative_python = ['Scripts/python.exe']
711 toolchain = ['mingw32']
716 'tools/run_tests/helper_scripts/build_python.sh')
718 builder_prefix_arguments = []
719 venv_relative_python = ['bin/python']
722 # Selects the corresponding testing mode.
723 # See src/python/grpcio_tests/commands.py for implementation details.
724 if args.iomgr_platform == 'native':
725 test_command = 'test_lite'
726 elif args.iomgr_platform == 'gevent':
727 test_command = 'test_gevent'
728 elif args.iomgr_platform == 'asyncio':
729 test_command = 'test_aio'
731 raise ValueError('Unsupported IO Manager platform: %s' %
734 os.path.abspath('tools/run_tests/helper_scripts/run_python.sh')
737 config_vars = _PythonConfigVars(shell, builder,
738 builder_prefix_arguments,
739 venv_relative_python, toolchain, runner,
740 test_command, args.iomgr_platform)
741 python27_config = _python_config_generator(name='py27',
745 config_vars=config_vars)
746 python35_config = _python_config_generator(name='py35',
750 config_vars=config_vars)
751 python36_config = _python_config_generator(name='py36',
755 config_vars=config_vars)
756 python37_config = _python_config_generator(name='py37',
760 config_vars=config_vars)
761 python38_config = _python_config_generator(name='py38',
765 config_vars=config_vars)
766 pypy27_config = _pypy_config_generator(name='pypy',
768 config_vars=config_vars)
769 pypy32_config = _pypy_config_generator(name='pypy3',
771 config_vars=config_vars)
773 if args.iomgr_platform == 'asyncio':
774 if args.compiler not in ('default', 'python3.6', 'python3.7',
777 'Compiler %s not supported with IO Manager platform: %s' %
778 (args.compiler, args.iomgr_platform))
780 if args.compiler == 'default':
782 if args.iomgr_platform == 'gevent':
783 # TODO(https://github.com/grpc/grpc/issues/23784) allow
784 # gevent to run on later version once issue solved.
785 return (python36_config,)
787 return (python38_config,)
789 if args.iomgr_platform == 'asyncio':
790 return (python36_config, python38_config)
791 elif os.uname()[0] == 'Darwin':
792 # NOTE(rbellevi): Testing takes significantly longer on
793 # MacOS, so we restrict the number of interpreter versions
806 elif args.compiler == 'python2.7':
807 return (python27_config,)
808 elif args.compiler == 'python3.5':
809 return (python35_config,)
810 elif args.compiler == 'python3.6':
811 return (python36_config,)
812 elif args.compiler == 'python3.7':
813 return (python37_config,)
814 elif args.compiler == 'python3.8':
815 return (python38_config,)
816 elif args.compiler == 'pypy':
817 return (pypy27_config,)
818 elif args.compiler == 'pypy3':
819 return (pypy32_config,)
820 elif args.compiler == 'python_alpine':
821 return (python27_config,)
822 elif args.compiler == 'all_the_cpythons':
831 raise Exception('Compiler %s not supported.' % args.compiler)
837 class RubyLanguage(object):
839 def configure(self, config, args):
842 _check_compiler(self.args.compiler, ['default'])
844 def test_specs(self):
846 self.config.job_spec(['tools/run_tests/helper_scripts/run_ruby.sh'],
847 timeout_seconds=10 * 60,
848 environ=_FORCE_ENVIRON_FOR_WRAPPERS)
851 'src/ruby/end2end/sig_handling_test.rb',
852 'src/ruby/end2end/channel_state_test.rb',
853 'src/ruby/end2end/channel_closing_test.rb',
854 'src/ruby/end2end/sig_int_during_channel_watch_test.rb',
855 'src/ruby/end2end/killed_client_thread_test.rb',
856 'src/ruby/end2end/forking_client_test.rb',
857 'src/ruby/end2end/grpc_class_init_test.rb',
858 'src/ruby/end2end/multiple_killed_watching_threads_test.rb',
859 'src/ruby/end2end/load_grpc_with_gc_stress_test.rb',
860 'src/ruby/end2end/client_memory_usage_test.rb',
861 'src/ruby/end2end/package_with_underscore_test.rb',
862 'src/ruby/end2end/graceful_sig_handling_test.rb',
863 'src/ruby/end2end/graceful_sig_stop_test.rb',
864 'src/ruby/end2end/errors_load_before_grpc_lib_test.rb',
865 'src/ruby/end2end/logger_load_before_grpc_lib_test.rb',
866 'src/ruby/end2end/status_codes_load_before_grpc_lib_test.rb',
867 'src/ruby/end2end/call_credentials_timeout_test.rb',
868 'src/ruby/end2end/call_credentials_returning_bad_metadata_doesnt_kill_background_thread_test.rb'
871 self.config.job_spec(['ruby', test],
873 timeout_seconds=20 * 60,
874 environ=_FORCE_ENVIRON_FOR_WRAPPERS))
877 def pre_build_steps(self):
878 return [['tools/run_tests/helper_scripts/pre_build_ruby.sh']]
880 def make_targets(self):
883 def make_options(self):
886 def build_steps(self):
887 return [['tools/run_tests/helper_scripts/build_ruby.sh']]
889 def post_tests_steps(self):
890 return [['tools/run_tests/helper_scripts/post_tests_ruby.sh']]
892 def makefile_name(self):
895 def dockerfile_dir(self):
896 return 'tools/dockerfile/test/ruby_jessie_%s' % _docker_arch_suffix(
903 class CSharpLanguage(object):
906 self.platform = platform_string()
908 def configure(self, config, args):
911 if self.platform == 'windows':
912 _check_compiler(self.args.compiler, ['default', 'coreclr'])
913 _check_arch(self.args.arch, ['default'])
914 self._cmake_arch_option = 'x64'
916 _check_compiler(self.args.compiler, ['default', 'coreclr'])
917 self._docker_distro = 'stretch'
919 def test_specs(self):
920 with open('src/csharp/tests.json') as f:
921 tests_by_assembly = json.load(f)
923 msbuild_config = _MSBUILD_CONFIG[self.config.build_config]
924 nunit_args = ['--labels=All', '--noresult', '--workers=1']
925 assembly_subdir = 'bin/%s' % msbuild_config
926 assembly_extension = '.exe'
928 if self.args.compiler == 'coreclr':
929 assembly_subdir += '/netcoreapp2.1'
930 runtime_cmd = ['dotnet', 'exec']
931 assembly_extension = '.dll'
933 assembly_subdir += '/net45'
934 if self.platform == 'windows':
936 elif self.platform == 'mac':
937 # mono before version 5.2 on MacOS defaults to 32bit runtime
938 runtime_cmd = ['mono', '--arch=64']
940 runtime_cmd = ['mono']
943 for assembly in six.iterkeys(tests_by_assembly):
944 assembly_file = 'src/csharp/%s/%s/%s%s' % (
945 assembly, assembly_subdir, assembly, assembly_extension)
946 if self.config.build_config != 'gcov' or self.platform != 'windows':
947 # normally, run each test as a separate process
948 for test in tests_by_assembly[assembly]:
949 cmdline = runtime_cmd + [assembly_file,
950 '--test=%s' % test] + nunit_args
952 self.config.job_spec(
954 shortname='csharp.%s' % test,
955 environ=_FORCE_ENVIRON_FOR_WRAPPERS))
957 # For C# test coverage, run all tests from the same assembly at once
958 # using OpenCover.Console (only works on Windows).
960 'src\\csharp\\packages\\OpenCover.4.6.519\\tools\\OpenCover.Console.exe',
961 '-target:%s' % assembly_file, '-targetdir:src\\csharp',
962 '-targetargs:%s' % ' '.join(nunit_args),
963 '-filter:+[Grpc.Core]*', '-register:user',
964 '-output:src\\csharp\\coverage_csharp_%s.xml' % assembly
967 # set really high cpu_cost to make sure instances of OpenCover.Console run exclusively
968 # to prevent problems with registering the profiler.
969 run_exclusive = 1000000
971 self.config.job_spec(cmdline,
972 shortname='csharp.coverage.%s' %
974 cpu_cost=run_exclusive,
975 environ=_FORCE_ENVIRON_FOR_WRAPPERS))
978 def pre_build_steps(self):
979 if self.platform == 'windows':
981 'tools\\run_tests\\helper_scripts\\pre_build_csharp.bat',
982 self._cmake_arch_option
985 return [['tools/run_tests/helper_scripts/pre_build_csharp.sh']]
987 def make_targets(self):
988 return ['grpc_csharp_ext']
990 def make_options(self):
993 def build_steps(self):
994 if self.platform == 'windows':
995 return [['tools\\run_tests\\helper_scripts\\build_csharp.bat']]
997 return [['tools/run_tests/helper_scripts/build_csharp.sh']]
999 def post_tests_steps(self):
1000 if self.platform == 'windows':
1001 return [['tools\\run_tests\\helper_scripts\\post_tests_csharp.bat']]
1003 return [['tools/run_tests/helper_scripts/post_tests_csharp.sh']]
1005 def makefile_name(self):
1006 if self.platform == 'windows':
1007 return 'cmake/build/%s/Makefile' % self._cmake_arch_option
1009 # no need to set x86 specific flags as run_tests.py
1010 # currently forbids x86 C# builds on both Linux and MacOS.
1011 return 'cmake/build/Makefile'
1013 def dockerfile_dir(self):
1014 return 'tools/dockerfile/test/csharp_%s_%s' % (
1015 self._docker_distro, _docker_arch_suffix(self.args.arch))
1021 class ObjCLanguage(object):
1023 def configure(self, config, args):
1024 self.config = config
1026 _check_compiler(self.args.compiler, ['default'])
1028 def test_specs(self):
1031 self.config.job_spec(
1032 ['src/objective-c/tests/build_one_example_bazel.sh'],
1033 timeout_seconds=10 * 60,
1034 shortname='ios-buildtest-example-sample',
1038 'EXAMPLE_PATH': 'src/objective-c/examples/Sample',
1041 # Currently not supporting compiling as frameworks in Bazel
1043 self.config.job_spec(
1044 ['src/objective-c/tests/build_one_example.sh'],
1045 timeout_seconds=20 * 60,
1046 shortname='ios-buildtest-example-sample-frameworks',
1050 'EXAMPLE_PATH': 'src/objective-c/examples/Sample',
1054 self.config.job_spec(
1055 ['src/objective-c/tests/build_one_example.sh'],
1056 timeout_seconds=20 * 60,
1057 shortname='ios-buildtest-example-switftsample',
1060 'SCHEME': 'SwiftSample',
1061 'EXAMPLE_PATH': 'src/objective-c/examples/SwiftSample'
1064 self.config.job_spec(
1065 ['src/objective-c/tests/build_one_example_bazel.sh'],
1066 timeout_seconds=10 * 60,
1067 shortname='ios-buildtest-example-tvOS-sample',
1070 'SCHEME': 'tvOS-sample',
1071 'EXAMPLE_PATH': 'src/objective-c/examples/tvOS-sample',
1074 # Disabled due to #20258
1075 # TODO (mxyan): Reenable this test when #20258 is resolved.
1077 # self.config.job_spec(
1078 # ['src/objective-c/tests/build_one_example_bazel.sh'],
1079 # timeout_seconds=20 * 60,
1080 # shortname='ios-buildtest-example-watchOS-sample',
1083 # 'SCHEME': 'watchOS-sample-WatchKit-App',
1084 # 'EXAMPLE_PATH': 'src/objective-c/examples/watchOS-sample',
1085 # 'FRAMEWORKS': 'NO'
1088 self.config.job_spec(['src/objective-c/tests/run_plugin_tests.sh'],
1089 timeout_seconds=60 * 60,
1090 shortname='ios-test-plugintest',
1092 environ=_FORCE_ENVIRON_FOR_WRAPPERS))
1094 self.config.job_spec(
1095 ['src/objective-c/tests/run_plugin_option_tests.sh'],
1096 timeout_seconds=60 * 60,
1097 shortname='ios-test-plugin-option-test',
1099 environ=_FORCE_ENVIRON_FOR_WRAPPERS))
1101 self.config.job_spec(
1102 ['test/core/iomgr/ios/CFStreamTests/build_and_run_tests.sh'],
1103 timeout_seconds=20 * 60,
1104 shortname='ios-test-cfstream-tests',
1106 environ=_FORCE_ENVIRON_FOR_WRAPPERS))
1107 # TODO: replace with run_one_test_bazel.sh when Bazel-Xcode is stable
1109 self.config.job_spec(['src/objective-c/tests/run_one_test.sh'],
1110 timeout_seconds=60 * 60,
1111 shortname='ios-test-unittests',
1113 environ={'SCHEME': 'UnitTests'}))
1115 self.config.job_spec(['src/objective-c/tests/run_one_test.sh'],
1116 timeout_seconds=60 * 60,
1117 shortname='ios-test-interoptests',
1119 environ={'SCHEME': 'InteropTests'}))
1121 self.config.job_spec(['src/objective-c/tests/run_one_test.sh'],
1122 timeout_seconds=60 * 60,
1123 shortname='ios-test-cronettests',
1125 environ={'SCHEME': 'CronetTests'}))
1127 self.config.job_spec(['src/objective-c/tests/run_one_test.sh'],
1128 timeout_seconds=30 * 60,
1129 shortname='ios-perf-test',
1131 environ={'SCHEME': 'PerfTests'}))
1133 self.config.job_spec(['src/objective-c/tests/run_one_test.sh'],
1134 timeout_seconds=30 * 60,
1135 shortname='ios-perf-test-posix',
1137 environ={'SCHEME': 'PerfTestsPosix'}))
1139 self.config.job_spec(['test/cpp/ios/build_and_run_tests.sh'],
1140 timeout_seconds=30 * 60,
1141 shortname='ios-cpp-test-cronet',
1143 environ=_FORCE_ENVIRON_FOR_WRAPPERS))
1145 self.config.job_spec(['src/objective-c/tests/run_one_test.sh'],
1146 timeout_seconds=60 * 60,
1147 shortname='mac-test-basictests',
1150 'SCHEME': 'MacTests',
1154 self.config.job_spec(['src/objective-c/tests/run_one_test.sh'],
1155 timeout_seconds=30 * 60,
1156 shortname='tvos-test-basictests',
1159 'SCHEME': 'TvTests',
1165 def pre_build_steps(self):
1168 def make_targets(self):
1171 def make_options(self):
1174 def build_steps(self):
1177 def post_tests_steps(self):
1180 def makefile_name(self):
1183 def dockerfile_dir(self):
1190 class Sanity(object):
1192 def configure(self, config, args):
1193 self.config = config
1195 _check_compiler(self.args.compiler, ['default'])
1197 def test_specs(self):
1199 with open('tools/run_tests/sanity/sanity_tests.yaml', 'r') as f:
1200 environ = {'TEST': 'true'}
1201 if _is_use_docker_child():
1202 environ['CLANG_FORMAT_SKIP_DOCKER'] = 'true'
1203 environ['CLANG_TIDY_SKIP_DOCKER'] = 'true'
1204 # sanity tests run tools/bazel wrapper concurrently
1205 # and that can result in a download/run race in the wrapper.
1206 # under docker we already have the right version of bazel
1207 # so we can just disable the wrapper.
1208 environ['DISABLE_BAZEL_WRAPPER'] = 'true'
1210 self.config.job_spec(cmd['script'].split(),
1211 timeout_seconds=30 * 60,
1213 cpu_cost=cmd.get('cpu_cost', 1))
1214 for cmd in yaml.load(f)
1217 def pre_build_steps(self):
1220 def make_targets(self):
1221 return ['run_dep_checks']
1223 def make_options(self):
1226 def build_steps(self):
1229 def post_tests_steps(self):
1232 def makefile_name(self):
1235 def dockerfile_dir(self):
1236 return 'tools/dockerfile/test/sanity'
1242 # different configurations we can run under
1243 with open('tools/run_tests/generated/configs.json') as f:
1245 (cfg['config'], Config(**cfg)) for cfg in ast.literal_eval(f.read()))
1248 'c++': CLanguage('cxx', 'c++'),
1249 'c': CLanguage('c', 'c'),
1250 'grpc-node': RemoteNodeLanguage(),
1251 'php7': Php7Language(),
1252 'python': PythonLanguage(),
1253 'ruby': RubyLanguage(),
1254 'csharp': CSharpLanguage(),
1255 'objc': ObjCLanguage(),
1266 def _windows_arch_option(arch):
1267 """Returns msbuild cmdline option for selected architecture."""
1268 if arch == 'default' or arch == 'x86':
1269 return '/p:Platform=Win32'
1271 return '/p:Platform=x64'
1273 print('Architecture %s not supported.' % arch)
1277 def _check_arch_option(arch):
1278 """Checks that architecture option is valid."""
1279 if platform_string() == 'windows':
1280 _windows_arch_option(arch)
1281 elif platform_string() == 'linux':
1282 # On linux, we need to be running under docker with the right architecture.
1283 runtime_arch = platform.architecture()[0]
1284 if arch == 'default':
1286 elif runtime_arch == '64bit' and arch == 'x64':
1288 elif runtime_arch == '32bit' and arch == 'x86':
1292 'Architecture %s does not match current runtime architecture.' %
1296 if args.arch != 'default':
1297 print('Architecture %s not supported on current platform.' %
1302 def _docker_arch_suffix(arch):
1303 """Returns suffix to dockerfile dir to use."""
1304 if arch == 'default' or arch == 'x64':
1309 print('Architecture %s not supported with current settings.' % arch)
1313 def runs_per_test_type(arg_str):
1314 """Auxiliary function to parse the "runs_per_test" flag.
1317 A positive integer or 0, the latter indicating an infinite number of
1321 argparse.ArgumentTypeError: Upon invalid input.
1323 if arg_str == 'inf':
1327 if n <= 0: raise ValueError
1330 msg = '\'{}\' is not a positive integer or \'inf\''.format(arg_str)
1331 raise argparse.ArgumentTypeError(msg)
1334 def percent_type(arg_str):
1335 pct = float(arg_str)
1336 if pct > 100 or pct < 0:
1337 raise argparse.ArgumentTypeError(
1338 "'%f' is not a valid percentage in the [0, 100] range" % pct)
1342 # This is math.isclose in python >= 3.5
1343 def isclose(a, b, rel_tol=1e-09, abs_tol=0.0):
1344 return abs(a - b) <= max(rel_tol * max(abs(a), abs(b)), abs_tol)
1347 # parse command line
1348 argp = argparse.ArgumentParser(description='Run grpc tests.')
1349 argp.add_argument('-c',
1351 choices=sorted(_CONFIGS.keys()),
1357 type=runs_per_test_type,
1358 help='A positive integer or "inf". If "inf", all tests will run in an '
1359 'infinite loop. Especially useful in combination with "-f"')
1360 argp.add_argument('-r', '--regex', default='.*', type=str)
1361 argp.add_argument('--regex_exclude', default='', type=str)
1362 argp.add_argument('-j', '--jobs', default=multiprocessing.cpu_count(), type=int)
1363 argp.add_argument('-s', '--slowdown', default=1.0, type=float)
1364 argp.add_argument('-p',
1368 help='Run a random sample with that percentage of tests')
1369 argp.add_argument('-f',
1372 action='store_const',
1374 argp.add_argument('-t',
1377 action='store_const',
1379 argp.add_argument('--newline_on_success',
1381 action='store_const',
1383 argp.add_argument('-l',
1385 choices=sorted(_LANGUAGES.keys()),
1388 argp.add_argument('-S',
1389 '--stop_on_failure',
1391 action='store_const',
1393 argp.add_argument('--use_docker',
1395 action='store_const',
1397 help='Run all the tests under docker. That provides ' +
1398 'additional isolation and prevents the need to install ' +
1399 'language specific prerequisites. Only available on Linux.')
1403 action='store_const',
1406 'Allow flaky tests to show as passing (re-runs failed tests up to five times)'
1410 choices=['default', 'x86', 'x64'],
1413 'Selects architecture to target. For some platforms "default" is the only supported choice.'
1445 'Selects compiler to use. Allowed values depend on the platform and language.'
1447 argp.add_argument('--iomgr_platform',
1448 choices=['native', 'uv', 'gevent', 'asyncio'],
1450 help='Selects iomgr platform to build on')
1451 argp.add_argument('--build_only',
1453 action='store_const',
1455 help='Perform all the build steps but don\'t run any tests.')
1456 argp.add_argument('--measure_cpu_costs',
1458 action='store_const',
1460 help='Measure the cpu costs of tests')
1462 '--update_submodules',
1466 'Update some submodules before building. If any are updated, also run generate_projects. '
1468 'Submodules are specified as SUBMODULE_NAME:BRANCH; if BRANCH is omitted, master is assumed.'
1470 argp.add_argument('-a', '--antagonists', default=0, type=int)
1471 argp.add_argument('-x',
1475 help='Generates a JUnit-compatible XML report')
1476 argp.add_argument('--report_suite_name',
1479 help='Test suite name to use in generated JUnit XML report')
1481 '--report_multi_target',
1484 action='store_const',
1485 help='Generate separate XML report for each test job (Looks better in UIs).'
1490 action='store_const',
1493 'Don\'t print anything when a test passes. Passing tests also will not be reported in XML report. '
1494 + 'Useful when running many iterations of each test (argument -n).')
1496 '--force_default_poller',
1498 action='store_const',
1500 help='Don\'t try to iterate over many polling strategies when they exist')
1502 '--force_use_pollers',
1505 help='Only use the specified comma-delimited list of polling engines. '
1506 'Example: --force_use_pollers epoll1,poll '
1507 ' (This flag has no effect if --force_default_poller flag is also used)')
1508 argp.add_argument('--max_time',
1511 help='Maximum test runtime in seconds')
1512 argp.add_argument('--bq_result_table',
1516 help='Upload test results to a specified BQ table.')
1517 args = argp.parse_args()
1520 shortname_to_cpu = {}
1522 if args.force_default_poller:
1523 _POLLING_STRATEGIES = {}
1524 elif args.force_use_pollers:
1525 _POLLING_STRATEGIES[platform_string()] = args.force_use_pollers.split(',')
1527 jobset.measure_cpu_costs = args.measure_cpu_costs
1529 # update submodules if necessary
1530 need_to_regenerate_projects = False
1531 for spec in args.update_submodules:
1532 spec = spec.split(':', 1)
1536 elif len(spec) == 2:
1539 cwd = 'third_party/%s' % submodule
1541 def git(cmd, cwd=cwd):
1542 print('in %s: git %s' % (cwd, cmd))
1543 run_shell_command('git %s' % cmd, cwd=cwd)
1546 git('checkout %s' % branch)
1547 git('pull origin %s' % branch)
1548 if os.path.exists('src/%s/gen_build_yaml.py' % submodule):
1549 need_to_regenerate_projects = True
1550 if need_to_regenerate_projects:
1551 if jobset.platform_string() == 'linux':
1552 run_shell_command('tools/buildgen/generate_projects.sh')
1555 'WARNING: may need to regenerate projects, but since we are not on')
1557 ' Linux this step is being skipped. Compilation MAY fail.')
1560 run_config = _CONFIGS[args.config]
1561 build_config = run_config.build_config
1564 _FORCE_ENVIRON_FOR_WRAPPERS = {'GRPC_TRACE': 'api'}
1566 languages = set(_LANGUAGES[l] for l in args.language)
1568 l.configure(run_config, args)
1570 language_make_options = []
1571 if any(language.make_options() for language in languages):
1572 if not 'gcov' in args.config and len(languages) != 1:
1574 'languages with custom make options cannot be built simultaneously with other languages'
1578 # Combining make options is not clean and just happens to work. It allows C & C++ to build
1579 # together, and is only used under gcov. All other configs should build languages individually.
1580 language_make_options = list(
1582 make_option for lang in languages
1583 for make_option in lang.make_options()
1588 print('Seen --use_docker flag, will run tests under docker.')
1591 'IMPORTANT: The changes you are testing need to be locally committed'
1594 'because only the committed changes in the current branch will be')
1595 print('copied to the docker environment.')
1598 dockerfile_dirs = set([l.dockerfile_dir() for l in languages])
1599 if len(dockerfile_dirs) > 1:
1600 print('Languages to be tested require running under different docker '
1604 dockerfile_dir = next(iter(dockerfile_dirs))
1606 child_argv = [arg for arg in sys.argv if not arg == '--use_docker']
1607 run_tests_cmd = 'python tools/run_tests/run_tests.py %s' % ' '.join(
1610 env = os.environ.copy()
1611 env['RUN_TESTS_COMMAND'] = run_tests_cmd
1612 env['DOCKERFILE_DIR'] = dockerfile_dir
1613 env['DOCKER_RUN_SCRIPT'] = 'tools/run_tests/dockerize/docker_run_tests.sh'
1615 env['XML_REPORT'] = args.xml_report
1617 env['TTY_FLAG'] = '-t' # enables Ctrl-C when not on Jenkins.
1619 subprocess.check_call(
1620 'tools/run_tests/dockerize/build_docker_and_run_tests.sh',
1625 _check_arch_option(args.arch)
1628 def make_jobspec(cfg, targets, makefile='Makefile'):
1629 if platform_string() == 'windows':
1632 'cmake', '--build', '.', '--target',
1633 '%s' % target, '--config', _MSBUILD_CONFIG[cfg]
1635 cwd=os.path.dirname(makefile),
1636 timeout_seconds=None) for target in targets
1639 if targets and makefile.startswith('cmake/build/'):
1640 # With cmake, we've passed all the build configuration in the pre-build step already
1643 [os.getenv('MAKE', 'make'), '-j',
1644 '%d' % args.jobs] + targets,
1646 timeout_seconds=None)
1652 os.getenv('MAKE', 'make'), '-f', makefile, '-j',
1654 'EXTRA_DEFINES=GRPC_TEST_SLOWDOWN_MACHINE_FACTOR=%f' %
1656 'CONFIG=%s' % cfg, 'Q='
1657 ] + language_make_options +
1658 ([] if not args.travis else ['JENKINS_BUILD=1']) + targets,
1659 timeout_seconds=None)
1667 makefile = l.makefile_name()
1668 make_targets[makefile] = make_targets.get(makefile, set()).union(
1669 set(l.make_targets()))
1672 def build_step_environ(cfg):
1673 environ = {'CONFIG': cfg}
1674 msbuild_cfg = _MSBUILD_CONFIG.get(cfg)
1676 environ['MSBUILD_CONFIG'] = msbuild_cfg
1682 jobset.JobSpec(cmdline,
1683 environ=build_step_environ(build_config),
1684 timeout_seconds=_PRE_BUILD_STEP_TIMEOUT_SECONDS,
1687 for cmdline in l.pre_build_steps()))
1689 make_commands = itertools.chain.from_iterable(
1690 make_jobspec(build_config, list(targets), makefile)
1691 for (makefile, targets) in make_targets.items())
1692 build_steps.extend(set(make_commands))
1695 jobset.JobSpec(cmdline,
1696 environ=build_step_environ(build_config),
1697 timeout_seconds=None)
1699 for cmdline in l.build_steps()))
1701 post_tests_steps = list(
1703 jobset.JobSpec(cmdline, environ=build_step_environ(build_config))
1705 for cmdline in l.post_tests_steps()))
1706 runs_per_test = args.runs_per_test
1707 forever = args.forever
1710 def _shut_down_legacy_server(legacy_server_port):
1713 urllib.request.urlopen('http://localhost:%d/version_number' %
1719 urllib.request.urlopen('http://localhost:%d/quitquitquit' %
1720 legacy_server_port).read()
1723 def _calculate_num_runs_failures(list_of_results):
1724 """Calculate number of runs and failures for a particular test.
1727 list_of_results: (List) of JobResult object.
1729 A tuple of total number of runs and failures.
1731 num_runs = len(list_of_results) # By default, there is 1 run per JobResult.
1733 for jobresult in list_of_results:
1734 if jobresult.retries > 0:
1735 num_runs += jobresult.retries
1736 if jobresult.num_failures > 0:
1737 num_failures += jobresult.num_failures
1738 return num_runs, num_failures
1741 # _build_and_run results
1742 class BuildAndRunError(object):
1746 POST_TEST = object()
1749 def _has_epollexclusive():
1750 binary = 'bins/%s/check_epollexclusive' % args.config
1751 if not os.path.exists(binary):
1754 subprocess.check_call(binary)
1756 except subprocess.CalledProcessError as e:
1758 except OSError as e:
1759 # For languages other than C and Windows the binary won't exist
1763 # returns a list of things that failed (or an empty list on success)
1764 def _build_and_run(check_cancelled,
1768 """Do one pass of building & running tests."""
1769 # build latest sequentially
1770 num_failures, resultset = jobset.run(build_steps,
1772 stop_on_failure=True,
1773 newline_on_success=newline_on_success,
1776 return [BuildAndRunError.BUILD]
1780 report_utils.render_junit_xml_report(
1781 resultset, xml_report, suite_name=args.report_suite_name)
1784 if not args.travis and not _has_epollexclusive() and platform_string(
1785 ) in _POLLING_STRATEGIES and 'epollex' in _POLLING_STRATEGIES[
1787 print('\n\nOmitting EPOLLEXCLUSIVE tests\n\n')
1788 _POLLING_STRATEGIES[platform_string()].remove('epollex')
1792 subprocess.Popen(['tools/run_tests/python_utils/antagonist.py'])
1793 for _ in range(0, args.antagonists)
1795 start_port_server.start_port_server()
1797 num_test_failures = 0
1799 infinite_runs = runs_per_test == 0
1800 one_run = set(spec for language in languages
1801 for spec in language.test_specs()
1802 if (re.search(args.regex, spec.shortname) and
1803 (args.regex_exclude == '' or
1804 not re.search(args.regex_exclude, spec.shortname))))
1805 # When running on travis, we want out test runs to be as similar as possible
1806 # for reproducibility purposes.
1807 if args.travis and args.max_time <= 0:
1808 massaged_one_run = sorted(one_run, key=lambda x: x.cpu_cost)
1810 # whereas otherwise, we want to shuffle things up to give all tests a
1812 massaged_one_run = list(
1813 one_run) # random.sample needs an indexable seq.
1814 num_jobs = len(massaged_one_run)
1815 # for a random sample, get as many as indicated by the 'sample_percent'
1816 # argument. By default this arg is 100, resulting in a shuffle of all
1818 sample_size = int(num_jobs * args.sample_percent / 100.0)
1819 massaged_one_run = random.sample(massaged_one_run, sample_size)
1820 if not isclose(args.sample_percent, 100.0):
1821 assert args.runs_per_test == 1, "Can't do sampling (-p) over multiple runs (-n)."
1822 print("Running %d tests out of %d (~%d%%)" %
1823 (sample_size, num_jobs, args.sample_percent))
1825 assert len(massaged_one_run
1826 ) > 0, 'Must have at least one test for a -n inf run'
1827 runs_sequence = (itertools.repeat(massaged_one_run) if infinite_runs
1828 else itertools.repeat(massaged_one_run, runs_per_test))
1829 all_runs = itertools.chain.from_iterable(runs_sequence)
1831 if args.quiet_success:
1834 'Running tests quietly, only failing tests will be reported',
1836 num_test_failures, resultset = jobset.run(
1839 newline_on_success=newline_on_success,
1842 maxjobs_cpu_agnostic=max_parallel_tests_for_current_platform(),
1843 stop_on_failure=args.stop_on_failure,
1844 quiet_success=args.quiet_success,
1845 max_time=args.max_time)
1847 for k, v in sorted(resultset.items()):
1848 num_runs, num_failures = _calculate_num_runs_failures(v)
1849 if num_failures > 0:
1850 if num_failures == num_runs: # what about infinite_runs???
1851 jobset.message('FAILED', k, do_newline=True)
1853 jobset.message('FLAKE',
1854 '%s [%d/%d runs flaked]' %
1855 (k, num_failures, num_runs),
1858 for antagonist in antagonists:
1860 if args.bq_result_table and resultset:
1861 upload_extra_fields = {
1862 'compiler': args.compiler,
1863 'config': args.config,
1864 'iomgr_platform': args.iomgr_platform,
1865 'language': args.language[
1867 ], # args.language is a list but will always have one element when uploading to BQ is enabled.
1868 'platform': platform_string()
1871 upload_results_to_bq(resultset, args.bq_result_table,
1872 upload_extra_fields)
1873 except NameError as e:
1875 e) # It's fine to ignore since this is not critical
1876 if xml_report and resultset:
1877 report_utils.render_junit_xml_report(
1880 suite_name=args.report_suite_name,
1881 multi_target=args.report_multi_target)
1883 number_failures, _ = jobset.run(post_tests_steps,
1885 stop_on_failure=False,
1886 newline_on_success=newline_on_success,
1891 out.append(BuildAndRunError.POST_TEST)
1892 if num_test_failures:
1893 out.append(BuildAndRunError.TEST)
1901 dw = watch_dirs.DirWatcher(['src', 'include', 'test', 'examples'])
1902 initial_time = dw.most_recent_change()
1903 have_files_changed = lambda: dw.most_recent_change() != initial_time
1904 previous_success = success
1905 errors = _build_and_run(check_cancelled=have_files_changed,
1906 newline_on_success=False,
1907 build_only=args.build_only) == 0
1908 if not previous_success and not errors:
1909 jobset.message('SUCCESS',
1910 'All tests are now passing properly',
1912 jobset.message('IDLE', 'No change detected')
1913 while not have_files_changed():
1916 errors = _build_and_run(check_cancelled=lambda: False,
1917 newline_on_success=args.newline_on_success,
1918 xml_report=args.xml_report,
1919 build_only=args.build_only)
1921 jobset.message('SUCCESS', 'All tests passed', do_newline=True)
1923 jobset.message('FAILED', 'Some tests failed', do_newline=True)
1925 if BuildAndRunError.BUILD in errors:
1927 if BuildAndRunError.TEST in errors:
1929 if BuildAndRunError.POST_TEST in errors: