2 # Copyright 2015 gRPC authors.
4 # Licensed under the Apache License, Version 2.0 (the "License");
5 # you may not use this file except in compliance with the License.
6 # You may obtain a copy of the License at
8 # http://www.apache.org/licenses/LICENSE-2.0
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS,
12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 # See the License for the specific language governing permissions and
14 # limitations under the License.
15 """Run tests in parallel."""
17 from __future__ import print_function
26 import multiprocessing
39 from six.moves import urllib
43 import python_utils.jobset as jobset
44 import python_utils.report_utils as report_utils
45 import python_utils.watch_dirs as watch_dirs
46 import python_utils.start_port_server as start_port_server
48 from python_utils.upload_test_results import upload_results_to_bq
50 pass # It's ok to not import because this is only necessary to upload results to BQ.
52 gcp_utils_dir = os.path.abspath(
53 os.path.join(os.path.dirname(__file__), '../gcp/utils'))
54 sys.path.append(gcp_utils_dir)
56 _ROOT = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), '../..'))
59 _FORCE_ENVIRON_FOR_WRAPPERS = {
60 'GRPC_VERBOSITY': 'DEBUG',
63 _POLLING_STRATEGIES = {
64 'linux': ['epollex', 'epoll1', 'poll'],
69 def platform_string():
70 return jobset.platform_string()
73 _DEFAULT_TIMEOUT_SECONDS = 5 * 60
74 _PRE_BUILD_STEP_TIMEOUT_SECONDS = 10 * 60
77 def run_shell_command(cmd, env=None, cwd=None):
79 subprocess.check_output(cmd, shell=True, env=env, cwd=cwd)
80 except subprocess.CalledProcessError as e:
82 "Error while running command '%s'. Exit status %d. Output:\n%s",
83 e.cmd, e.returncode, e.output)
87 def max_parallel_tests_for_current_platform():
88 # Too much test parallelization has only been seen to be a problem
90 if jobset.platform_string() == 'windows':
95 # SimpleConfig: just compile with CONFIG=config, and run the binary to test
101 timeout_multiplier=1,
103 iomgr_platform='native'):
106 self.build_config = config
107 self.environ = environ
108 self.environ['CONFIG'] = config
109 self.tool_prefix = tool_prefix
110 self.timeout_multiplier = timeout_multiplier
111 self.iomgr_platform = iomgr_platform
115 timeout_seconds=_DEFAULT_TIMEOUT_SECONDS,
120 """Construct a jobset.JobSpec for a test under this config
123 cmdline: a list of strings specifying the command line the test
126 actual_environ = self.environ.copy()
127 for k, v in environ.items():
128 actual_environ[k] = v
129 if not flaky and shortname and shortname in flaky_tests:
131 if shortname in shortname_to_cpu:
132 cpu_cost = shortname_to_cpu[shortname]
133 return jobset.JobSpec(
134 cmdline=self.tool_prefix + cmdline,
136 environ=actual_environ,
138 timeout_seconds=(self.timeout_multiplier *
139 timeout_seconds if timeout_seconds else None),
140 flake_retries=4 if flaky or args.allow_flakes else 0,
141 timeout_retries=1 if flaky or args.allow_flakes else 0)
144 def get_c_tests(travis, test_lang):
146 platforms_str = 'ci_platforms' if travis else 'platforms'
147 with open('tools/run_tests/generated/tests.json') as f:
151 if tgt['language'] == test_lang and platform_string() in
152 tgt[platforms_str] and not (travis and tgt['flaky'])
156 def _check_compiler(compiler, supported_compilers):
157 if compiler not in supported_compilers:
158 raise Exception('Compiler %s not supported (on this platform).' %
162 def _check_arch(arch, supported_archs):
163 if arch not in supported_archs:
164 raise Exception('Architecture %s not supported.' % arch)
167 def _is_use_docker_child():
168 """Returns True if running running as a --use_docker child."""
169 return True if os.getenv('RUN_TESTS_COMMAND') else False
172 _PythonConfigVars = collections.namedtuple('_ConfigVars', [
175 'builder_prefix_arguments',
176 'venv_relative_python',
184 def _python_config_generator(name, major, minor, bits, config_vars):
185 name += '_' + config_vars.iomgr_platform
187 name, config_vars.shell + config_vars.builder +
188 config_vars.builder_prefix_arguments +
189 [_python_pattern_function(major=major, minor=minor, bits=bits)] +
190 [name] + config_vars.venv_relative_python + config_vars.toolchain,
191 config_vars.shell + config_vars.runner + [
192 os.path.join(name, config_vars.venv_relative_python[0]),
193 config_vars.test_name
197 def _pypy_config_generator(name, major, config_vars):
199 name, config_vars.shell + config_vars.builder +
200 config_vars.builder_prefix_arguments +
201 [_pypy_pattern_function(major=major)] + [name] +
202 config_vars.venv_relative_python + config_vars.toolchain,
203 config_vars.shell + config_vars.runner +
204 [os.path.join(name, config_vars.venv_relative_python[0])])
207 def _python_pattern_function(major, minor, bits):
208 # Bit-ness is handled by the test machine's environment
211 return '/c/Python{major}{minor}/python.exe'.format(major=major,
215 return '/c/Python{major}{minor}_{bits}bits/python.exe'.format(
216 major=major, minor=minor, bits=bits)
218 return 'python{major}.{minor}'.format(major=major, minor=minor)
221 def _pypy_pattern_function(major):
227 raise ValueError("Unknown PyPy major version")
230 class CLanguage(object):
232 def __init__(self, make_target, test_lang):
233 self.make_target = make_target
234 self.platform = platform_string()
235 self.test_lang = test_lang
237 def configure(self, config, args):
240 self._make_options = []
241 self._use_cmake = True
242 if self.platform == 'windows':
243 _check_compiler(self.args.compiler, [
244 'default', 'cmake', 'cmake_vs2015', 'cmake_vs2017',
247 _check_arch(self.args.arch, ['default', 'x64', 'x86'])
248 if self.args.compiler == 'cmake_vs2019':
249 cmake_generator_option = 'Visual Studio 16 2019'
250 elif self.args.compiler == 'cmake_vs2017':
251 cmake_generator_option = 'Visual Studio 15 2017'
253 cmake_generator_option = 'Visual Studio 14 2015'
254 cmake_arch_option = 'x64' if self.args.arch == 'x64' else 'Win32'
255 self._cmake_configure_extra_args = [
256 '-G', cmake_generator_option, '-A', cmake_arch_option
259 if self.platform == 'linux':
260 # Allow all the known architectures. _check_arch_option has already checked that we're not doing
261 # something illegal when not running under docker.
262 _check_arch(self.args.arch, ['default', 'x64', 'x86'])
264 _check_arch(self.args.arch, ['default'])
266 self._docker_distro, self._cmake_configure_extra_args = self._compiler_options(
267 self.args.use_docker, self.args.compiler)
269 if self.args.arch == 'x86':
270 # disable boringssl asm optimizations when on x86
271 # see https://github.com/grpc/grpc/blob/b5b8578b3f8b4a9ce61ed6677e19d546e43c5c68/tools/run_tests/artifacts/artifact_targets.py#L253
272 self._cmake_configure_extra_args.append('-DOPENSSL_NO_ASM=ON')
274 if args.iomgr_platform == "uv":
275 cflags = '-DGRPC_UV -DGRPC_CUSTOM_IOMGR_THREAD_CHECK -DGRPC_CUSTOM_SOCKET '
277 cflags += subprocess.check_output(
278 ['pkg-config', '--cflags', 'libuv']).strip() + ' '
279 except (subprocess.CalledProcessError, OSError):
282 ldflags = subprocess.check_output(
283 ['pkg-config', '--libs', 'libuv']).strip() + ' '
284 except (subprocess.CalledProcessError, OSError):
286 self._make_options += [
287 'EXTRA_CPPFLAGS={}'.format(cflags),
288 'EXTRA_LDLIBS={}'.format(ldflags)
291 def test_specs(self):
293 binaries = get_c_tests(self.args.travis, self.test_lang)
294 for target in binaries:
295 if self._use_cmake and target.get('boringssl', False):
296 # cmake doesn't build boringssl tests
298 auto_timeout_scaling = target.get('auto_timeout_scaling', True)
299 polling_strategies = (_POLLING_STRATEGIES.get(
300 self.platform, ['all']) if target.get('uses_polling', True) else
302 if self.args.iomgr_platform == 'uv':
303 polling_strategies = ['all']
304 for polling_strategy in polling_strategies:
306 'GRPC_DEFAULT_SSL_ROOTS_FILE_PATH':
307 _ROOT + '/src/core/tsi/test_creds/ca.pem',
308 'GRPC_POLL_STRATEGY':
313 resolver = os.environ.get('GRPC_DNS_RESOLVER', None)
315 env['GRPC_DNS_RESOLVER'] = resolver
316 shortname_ext = '' if polling_strategy == 'all' else ' GRPC_POLL_STRATEGY=%s' % polling_strategy
317 if polling_strategy in target.get('excluded_poll_engines', []):
321 if auto_timeout_scaling:
322 config = self.args.config
323 if ('asan' in config or config == 'msan' or
324 config == 'tsan' or config == 'ubsan' or
325 config == 'helgrind' or config == 'memcheck'):
326 # Scale overall test timeout if running under various sanitizers.
327 # scaling value is based on historical data analysis
330 if self.config.build_config in target['exclude_configs']:
332 if self.args.iomgr_platform in target.get('exclude_iomgrs', []):
334 if self.platform == 'windows':
335 binary = 'cmake/build/%s/%s.exe' % (_MSBUILD_CONFIG[
336 self.config.build_config], target['name'])
339 binary = 'cmake/build/%s' % target['name']
341 binary = 'bins/%s/%s' % (self.config.build_config,
343 cpu_cost = target['cpu_cost']
344 if cpu_cost == 'capacity':
345 cpu_cost = multiprocessing.cpu_count()
346 if os.path.isfile(binary):
347 list_test_command = None
348 filter_test_command = None
350 # these are the flag defined by gtest and benchmark framework to list
351 # and filter test runs. We use them to split each individual test
352 # into its own JobSpec, and thus into its own process.
353 if 'benchmark' in target and target['benchmark']:
354 with open(os.devnull, 'w') as fnull:
355 tests = subprocess.check_output(
356 [binary, '--benchmark_list_tests'],
358 for line in tests.decode().split('\n'):
363 '--benchmark_filter=%s$' % test
366 self.config.job_spec(
369 (' '.join(cmdline), shortname_ext),
371 timeout_seconds=target.get(
373 _DEFAULT_TIMEOUT_SECONDS) *
376 elif 'gtest' in target and target['gtest']:
377 # here we parse the output of --gtest_list_tests to build up a complete
378 # list of the tests contained in a binary for each test, we then
379 # add a job to run, filtering for just that test.
380 with open(os.devnull, 'w') as fnull:
381 tests = subprocess.check_output(
382 [binary, '--gtest_list_tests'], stderr=fnull)
384 for line in tests.decode().split('\n'):
393 assert base is not None
394 assert line[1] == ' '
395 test = base + line.strip()
397 '--gtest_filter=%s' % test
400 self.config.job_spec(
403 (' '.join(cmdline), shortname_ext),
405 timeout_seconds=target.get(
407 _DEFAULT_TIMEOUT_SECONDS) *
411 cmdline = [binary] + target['args']
412 shortname = target.get(
414 ' '.join(pipes.quote(arg) for arg in cmdline))
415 shortname += shortname_ext
417 self.config.job_spec(
421 flaky=target.get('flaky', False),
422 timeout_seconds=target.get(
424 _DEFAULT_TIMEOUT_SECONDS) * timeout_scaling,
426 elif self.args.regex == '.*' or self.platform == 'windows':
427 print('\nWARNING: binary not found, skipping', binary)
430 def make_targets(self):
431 if self.platform == 'windows':
432 # don't build tools on windows just yet
433 return ['buildtests_%s' % self.make_target]
435 'buildtests_%s' % self.make_target,
436 'tools_%s' % self.make_target, 'check_epollexclusive'
439 def make_options(self):
440 return self._make_options
442 def pre_build_steps(self):
443 if self.platform == 'windows':
444 return [['tools\\run_tests\\helper_scripts\\pre_build_cmake.bat'] +
445 self._cmake_configure_extra_args]
446 elif self._use_cmake:
447 return [['tools/run_tests/helper_scripts/pre_build_cmake.sh'] +
448 self._cmake_configure_extra_args]
452 def build_steps(self):
455 def post_tests_steps(self):
456 if self.platform == 'windows':
459 return [['tools/run_tests/helper_scripts/post_tests_c.sh']]
461 def makefile_name(self):
463 return 'cmake/build/Makefile'
467 def _clang_cmake_configure_extra_args(self, version_suffix=''):
469 '-DCMAKE_C_COMPILER=clang%s' % version_suffix,
470 '-DCMAKE_CXX_COMPILER=clang++%s' % version_suffix,
473 def _compiler_options(self, use_docker, compiler):
474 """Returns docker distro and cmake configure args to use for given compiler."""
475 if not use_docker and not _is_use_docker_child():
476 # if not running under docker, we cannot ensure the right compiler version will be used,
477 # so we only allow the non-specific choices.
478 _check_compiler(compiler, ['default', 'cmake'])
480 if compiler == 'gcc4.9' or compiler == 'default' or compiler == 'cmake':
481 return ('jessie', [])
482 elif compiler == 'gcc5.3':
483 return ('ubuntu1604', [])
484 elif compiler == 'gcc7.4':
485 return ('ubuntu1804', [])
486 elif compiler == 'gcc8.3':
487 return ('buster', [])
488 elif compiler == 'gcc_musl':
489 return ('alpine', [])
490 elif compiler == 'clang3.6':
491 return ('ubuntu1604',
492 self._clang_cmake_configure_extra_args(
493 version_suffix='-3.6'))
494 elif compiler == 'clang3.7':
495 return ('ubuntu1604',
496 self._clang_cmake_configure_extra_args(
497 version_suffix='-3.7'))
499 raise Exception('Compiler %s not supported.' % compiler)
501 def dockerfile_dir(self):
502 return 'tools/dockerfile/test/cxx_%s_%s' % (
503 self._docker_distro, _docker_arch_suffix(self.args.arch))
506 return self.make_target
509 # This tests Node on grpc/grpc-node and will become the standard for Node testing
510 class RemoteNodeLanguage(object):
513 self.platform = platform_string()
515 def configure(self, config, args):
518 # Note: electron ABI only depends on major and minor version, so that's all
519 # we should specify in the compiler argument
520 _check_compiler(self.args.compiler, [
521 'default', 'node0.12', 'node4', 'node5', 'node6', 'node7', 'node8',
522 'electron1.3', 'electron1.6'
524 if self.args.compiler == 'default':
525 self.runtime = 'node'
526 self.node_version = '8'
528 if self.args.compiler.startswith('electron'):
529 self.runtime = 'electron'
530 self.node_version = self.args.compiler[8:]
532 self.runtime = 'node'
533 # Take off the word "node"
534 self.node_version = self.args.compiler[4:]
536 # TODO: update with Windows/electron scripts when available for grpc/grpc-node
537 def test_specs(self):
538 if self.platform == 'windows':
540 self.config.job_spec(
541 ['tools\\run_tests\\helper_scripts\\run_node.bat'])
545 self.config.job_spec(
546 ['tools/run_tests/helper_scripts/run_grpc-node.sh'],
548 environ=_FORCE_ENVIRON_FOR_WRAPPERS)
551 def pre_build_steps(self):
554 def make_targets(self):
557 def make_options(self):
560 def build_steps(self):
563 def post_tests_steps(self):
566 def makefile_name(self):
569 def dockerfile_dir(self):
570 return 'tools/dockerfile/test/node_jessie_%s' % _docker_arch_suffix(
577 class Php7Language(object):
579 def configure(self, config, args):
582 _check_compiler(self.args.compiler, ['default'])
583 self._make_options = ['EMBED_OPENSSL=true', 'EMBED_ZLIB=true']
585 def test_specs(self):
587 self.config.job_spec(['src/php/bin/run_tests.sh'],
588 environ=_FORCE_ENVIRON_FOR_WRAPPERS)
591 def pre_build_steps(self):
594 def make_targets(self):
595 return ['static_c', 'shared_c']
597 def make_options(self):
598 return self._make_options
600 def build_steps(self):
601 return [['tools/run_tests/helper_scripts/build_php.sh']]
603 def post_tests_steps(self):
604 return [['tools/run_tests/helper_scripts/post_tests_php.sh']]
606 def makefile_name(self):
609 def dockerfile_dir(self):
610 return 'tools/dockerfile/test/php7_jessie_%s' % _docker_arch_suffix(
618 collections.namedtuple('PythonConfig', ['name', 'build', 'run'])):
619 """Tuple of commands (named s.t. 'what it says on the tin' applies)"""
622 class PythonLanguage(object):
625 'native': 'src/python/grpcio_tests/tests/tests.json',
626 'gevent': 'src/python/grpcio_tests/tests/tests.json',
627 'asyncio': 'src/python/grpcio_tests/tests_aio/tests.json',
632 'asyncio': 'test_aio',
635 def configure(self, config, args):
638 self.pythons = self._get_pythons(self.args)
640 def test_specs(self):
641 # load list of known test suites
642 with open(self._TEST_SPECS_FILE[
643 self.args.iomgr_platform]) as tests_json_file:
644 tests_json = json.load(tests_json_file)
645 environment = dict(_FORCE_ENVIRON_FOR_WRAPPERS)
646 # TODO(https://github.com/grpc/grpc/issues/21401) Fork handlers is not
647 # designed for non-native IO manager. It has a side-effect that
648 # overrides threading settings in C-Core.
649 if args.iomgr_platform != 'native':
650 environment['GRPC_ENABLE_FORK_SUPPORT'] = '0'
652 self.config.job_spec(
654 timeout_seconds=8 * 60,
655 environ=dict(GRPC_PYTHON_TESTRUNNER_FILTER=str(suite_name),
657 shortname='%s.%s.%s' %
658 (config.name, self._TEST_FOLDER[self.args.iomgr_platform],
660 ) for suite_name in tests_json for config in self.pythons
663 def pre_build_steps(self):
666 def make_targets(self):
669 def make_options(self):
672 def build_steps(self):
673 return [config.build for config in self.pythons]
675 def post_tests_steps(self):
676 if self.config.build_config != 'gcov':
679 return [['tools/run_tests/helper_scripts/post_tests_python.sh']]
681 def makefile_name(self):
684 def dockerfile_dir(self):
685 return 'tools/dockerfile/test/python_%s_%s' % (
686 self._python_manager_name(), _docker_arch_suffix(self.args.arch))
688 def _python_manager_name(self):
689 """Choose the docker image to use based on python version."""
690 if self.args.compiler in [
691 'python2.7', 'python3.5', 'python3.6', 'python3.7', 'python3.8'
693 return 'stretch_' + self.args.compiler[len('python'):]
694 elif self.args.compiler == 'python_alpine':
697 return 'stretch_default'
699 def _get_pythons(self, args):
700 """Get python runtimes to test with, based on current platform, architecture, compiler etc."""
701 if args.arch == 'x86':
710 'tools/run_tests/helper_scripts/build_python_msys2.sh')
712 builder_prefix_arguments = ['MINGW{}'.format(bits)]
713 venv_relative_python = ['Scripts/python.exe']
714 toolchain = ['mingw32']
719 'tools/run_tests/helper_scripts/build_python.sh')
721 builder_prefix_arguments = []
722 venv_relative_python = ['bin/python']
725 # Selects the corresponding testing mode.
726 # See src/python/grpcio_tests/commands.py for implementation details.
727 if args.iomgr_platform == 'native':
728 test_command = 'test_lite'
729 elif args.iomgr_platform == 'gevent':
730 test_command = 'test_gevent'
731 elif args.iomgr_platform == 'asyncio':
732 test_command = 'test_aio'
734 raise ValueError('Unsupported IO Manager platform: %s' %
737 os.path.abspath('tools/run_tests/helper_scripts/run_python.sh')
740 config_vars = _PythonConfigVars(shell, builder,
741 builder_prefix_arguments,
742 venv_relative_python, toolchain, runner,
743 test_command, args.iomgr_platform)
744 python27_config = _python_config_generator(name='py27',
748 config_vars=config_vars)
749 python35_config = _python_config_generator(name='py35',
753 config_vars=config_vars)
754 python36_config = _python_config_generator(name='py36',
758 config_vars=config_vars)
759 python37_config = _python_config_generator(name='py37',
763 config_vars=config_vars)
764 python38_config = _python_config_generator(name='py38',
768 config_vars=config_vars)
769 pypy27_config = _pypy_config_generator(name='pypy',
771 config_vars=config_vars)
772 pypy32_config = _pypy_config_generator(name='pypy3',
774 config_vars=config_vars)
776 if args.iomgr_platform == 'asyncio':
777 if args.compiler not in ('default', 'python3.6', 'python3.7',
780 'Compiler %s not supported with IO Manager platform: %s' %
781 (args.compiler, args.iomgr_platform))
783 if args.compiler == 'default':
785 if args.iomgr_platform == 'gevent':
786 # TODO(https://github.com/grpc/grpc/issues/23784) allow
787 # gevent to run on later version once issue solved.
788 return (python36_config,)
790 return (python38_config,)
792 if args.iomgr_platform == 'asyncio':
793 return (python36_config, python38_config)
794 elif os.uname()[0] == 'Darwin':
795 # NOTE(rbellevi): Testing takes significantly longer on
796 # MacOS, so we restrict the number of interpreter versions
809 elif args.compiler == 'python2.7':
810 return (python27_config,)
811 elif args.compiler == 'python3.5':
812 return (python35_config,)
813 elif args.compiler == 'python3.6':
814 return (python36_config,)
815 elif args.compiler == 'python3.7':
816 return (python37_config,)
817 elif args.compiler == 'python3.8':
818 return (python38_config,)
819 elif args.compiler == 'pypy':
820 return (pypy27_config,)
821 elif args.compiler == 'pypy3':
822 return (pypy32_config,)
823 elif args.compiler == 'python_alpine':
824 return (python27_config,)
825 elif args.compiler == 'all_the_cpythons':
834 raise Exception('Compiler %s not supported.' % args.compiler)
840 class RubyLanguage(object):
842 def configure(self, config, args):
845 _check_compiler(self.args.compiler, ['default'])
847 def test_specs(self):
849 self.config.job_spec(['tools/run_tests/helper_scripts/run_ruby.sh'],
850 timeout_seconds=10 * 60,
851 environ=_FORCE_ENVIRON_FOR_WRAPPERS)
854 'src/ruby/end2end/sig_handling_test.rb',
855 'src/ruby/end2end/channel_state_test.rb',
856 'src/ruby/end2end/channel_closing_test.rb',
857 'src/ruby/end2end/sig_int_during_channel_watch_test.rb',
858 'src/ruby/end2end/killed_client_thread_test.rb',
859 'src/ruby/end2end/forking_client_test.rb',
860 'src/ruby/end2end/grpc_class_init_test.rb',
861 'src/ruby/end2end/multiple_killed_watching_threads_test.rb',
862 'src/ruby/end2end/load_grpc_with_gc_stress_test.rb',
863 'src/ruby/end2end/client_memory_usage_test.rb',
864 'src/ruby/end2end/package_with_underscore_test.rb',
865 'src/ruby/end2end/graceful_sig_handling_test.rb',
866 'src/ruby/end2end/graceful_sig_stop_test.rb',
867 'src/ruby/end2end/errors_load_before_grpc_lib_test.rb',
868 'src/ruby/end2end/logger_load_before_grpc_lib_test.rb',
869 'src/ruby/end2end/status_codes_load_before_grpc_lib_test.rb',
870 'src/ruby/end2end/call_credentials_timeout_test.rb',
871 'src/ruby/end2end/call_credentials_returning_bad_metadata_doesnt_kill_background_thread_test.rb'
874 self.config.job_spec(['ruby', test],
876 timeout_seconds=20 * 60,
877 environ=_FORCE_ENVIRON_FOR_WRAPPERS))
880 def pre_build_steps(self):
881 return [['tools/run_tests/helper_scripts/pre_build_ruby.sh']]
883 def make_targets(self):
886 def make_options(self):
889 def build_steps(self):
890 return [['tools/run_tests/helper_scripts/build_ruby.sh']]
892 def post_tests_steps(self):
893 return [['tools/run_tests/helper_scripts/post_tests_ruby.sh']]
895 def makefile_name(self):
898 def dockerfile_dir(self):
899 return 'tools/dockerfile/test/ruby_buster_%s' % _docker_arch_suffix(
906 class CSharpLanguage(object):
909 self.platform = platform_string()
911 def configure(self, config, args):
914 if self.platform == 'windows':
915 _check_compiler(self.args.compiler, ['default', 'coreclr'])
916 _check_arch(self.args.arch, ['default'])
917 self._cmake_arch_option = 'x64'
919 _check_compiler(self.args.compiler, ['default', 'coreclr'])
920 self._docker_distro = 'stretch'
922 def test_specs(self):
923 with open('src/csharp/tests.json') as f:
924 tests_by_assembly = json.load(f)
926 msbuild_config = _MSBUILD_CONFIG[self.config.build_config]
927 nunit_args = ['--labels=All', '--noresult', '--workers=1']
928 assembly_subdir = 'bin/%s' % msbuild_config
929 assembly_extension = '.exe'
931 if self.args.compiler == 'coreclr':
932 assembly_subdir += '/netcoreapp2.1'
933 runtime_cmd = ['dotnet', 'exec']
934 assembly_extension = '.dll'
936 assembly_subdir += '/net45'
937 if self.platform == 'windows':
939 elif self.platform == 'mac':
940 # mono before version 5.2 on MacOS defaults to 32bit runtime
941 runtime_cmd = ['mono', '--arch=64']
943 runtime_cmd = ['mono']
946 for assembly in six.iterkeys(tests_by_assembly):
947 assembly_file = 'src/csharp/%s/%s/%s%s' % (
948 assembly, assembly_subdir, assembly, assembly_extension)
949 if self.config.build_config != 'gcov' or self.platform != 'windows':
950 # normally, run each test as a separate process
951 for test in tests_by_assembly[assembly]:
952 cmdline = runtime_cmd + [assembly_file,
953 '--test=%s' % test] + nunit_args
955 self.config.job_spec(
957 shortname='csharp.%s' % test,
958 environ=_FORCE_ENVIRON_FOR_WRAPPERS))
960 # For C# test coverage, run all tests from the same assembly at once
961 # using OpenCover.Console (only works on Windows).
963 'src\\csharp\\packages\\OpenCover.4.6.519\\tools\\OpenCover.Console.exe',
964 '-target:%s' % assembly_file, '-targetdir:src\\csharp',
965 '-targetargs:%s' % ' '.join(nunit_args),
966 '-filter:+[Grpc.Core]*', '-register:user',
967 '-output:src\\csharp\\coverage_csharp_%s.xml' % assembly
970 # set really high cpu_cost to make sure instances of OpenCover.Console run exclusively
971 # to prevent problems with registering the profiler.
972 run_exclusive = 1000000
974 self.config.job_spec(cmdline,
975 shortname='csharp.coverage.%s' %
977 cpu_cost=run_exclusive,
978 environ=_FORCE_ENVIRON_FOR_WRAPPERS))
981 def pre_build_steps(self):
982 if self.platform == 'windows':
984 'tools\\run_tests\\helper_scripts\\pre_build_csharp.bat',
985 self._cmake_arch_option
988 return [['tools/run_tests/helper_scripts/pre_build_csharp.sh']]
990 def make_targets(self):
991 return ['grpc_csharp_ext']
993 def make_options(self):
996 def build_steps(self):
997 if self.platform == 'windows':
998 return [['tools\\run_tests\\helper_scripts\\build_csharp.bat']]
1000 return [['tools/run_tests/helper_scripts/build_csharp.sh']]
1002 def post_tests_steps(self):
1003 if self.platform == 'windows':
1004 return [['tools\\run_tests\\helper_scripts\\post_tests_csharp.bat']]
1006 return [['tools/run_tests/helper_scripts/post_tests_csharp.sh']]
1008 def makefile_name(self):
1009 if self.platform == 'windows':
1010 return 'cmake/build/%s/Makefile' % self._cmake_arch_option
1012 # no need to set x86 specific flags as run_tests.py
1013 # currently forbids x86 C# builds on both Linux and MacOS.
1014 return 'cmake/build/Makefile'
1016 def dockerfile_dir(self):
1017 return 'tools/dockerfile/test/csharp_%s_%s' % (
1018 self._docker_distro, _docker_arch_suffix(self.args.arch))
1024 class ObjCLanguage(object):
1026 def configure(self, config, args):
1027 self.config = config
1029 _check_compiler(self.args.compiler, ['default'])
1031 def test_specs(self):
1034 self.config.job_spec(
1035 ['src/objective-c/tests/build_one_example_bazel.sh'],
1036 timeout_seconds=10 * 60,
1037 shortname='ios-buildtest-example-sample',
1041 'EXAMPLE_PATH': 'src/objective-c/examples/Sample',
1044 # Currently not supporting compiling as frameworks in Bazel
1046 self.config.job_spec(
1047 ['src/objective-c/tests/build_one_example.sh'],
1048 timeout_seconds=20 * 60,
1049 shortname='ios-buildtest-example-sample-frameworks',
1053 'EXAMPLE_PATH': 'src/objective-c/examples/Sample',
1057 self.config.job_spec(
1058 ['src/objective-c/tests/build_one_example.sh'],
1059 timeout_seconds=20 * 60,
1060 shortname='ios-buildtest-example-switftsample',
1063 'SCHEME': 'SwiftSample',
1064 'EXAMPLE_PATH': 'src/objective-c/examples/SwiftSample'
1067 self.config.job_spec(
1068 ['src/objective-c/tests/build_one_example_bazel.sh'],
1069 timeout_seconds=10 * 60,
1070 shortname='ios-buildtest-example-tvOS-sample',
1073 'SCHEME': 'tvOS-sample',
1074 'EXAMPLE_PATH': 'src/objective-c/examples/tvOS-sample',
1077 # Disabled due to #20258
1078 # TODO (mxyan): Reenable this test when #20258 is resolved.
1080 # self.config.job_spec(
1081 # ['src/objective-c/tests/build_one_example_bazel.sh'],
1082 # timeout_seconds=20 * 60,
1083 # shortname='ios-buildtest-example-watchOS-sample',
1086 # 'SCHEME': 'watchOS-sample-WatchKit-App',
1087 # 'EXAMPLE_PATH': 'src/objective-c/examples/watchOS-sample',
1088 # 'FRAMEWORKS': 'NO'
1091 self.config.job_spec(['src/objective-c/tests/run_plugin_tests.sh'],
1092 timeout_seconds=60 * 60,
1093 shortname='ios-test-plugintest',
1095 environ=_FORCE_ENVIRON_FOR_WRAPPERS))
1097 self.config.job_spec(
1098 ['src/objective-c/tests/run_plugin_option_tests.sh'],
1099 timeout_seconds=60 * 60,
1100 shortname='ios-test-plugin-option-test',
1102 environ=_FORCE_ENVIRON_FOR_WRAPPERS))
1104 self.config.job_spec(
1105 ['test/core/iomgr/ios/CFStreamTests/build_and_run_tests.sh'],
1106 timeout_seconds=60 * 60,
1107 shortname='ios-test-cfstream-tests',
1109 environ=_FORCE_ENVIRON_FOR_WRAPPERS))
1110 # TODO: replace with run_one_test_bazel.sh when Bazel-Xcode is stable
1112 self.config.job_spec(['src/objective-c/tests/run_one_test.sh'],
1113 timeout_seconds=60 * 60,
1114 shortname='ios-test-unittests',
1116 environ={'SCHEME': 'UnitTests'}))
1118 self.config.job_spec(['src/objective-c/tests/run_one_test.sh'],
1119 timeout_seconds=60 * 60,
1120 shortname='ios-test-interoptests',
1122 environ={'SCHEME': 'InteropTests'}))
1124 self.config.job_spec(['src/objective-c/tests/run_one_test.sh'],
1125 timeout_seconds=60 * 60,
1126 shortname='ios-test-cronettests',
1128 environ={'SCHEME': 'CronetTests'}))
1130 self.config.job_spec(['src/objective-c/tests/run_one_test.sh'],
1131 timeout_seconds=30 * 60,
1132 shortname='ios-perf-test',
1134 environ={'SCHEME': 'PerfTests'}))
1136 self.config.job_spec(['src/objective-c/tests/run_one_test.sh'],
1137 timeout_seconds=30 * 60,
1138 shortname='ios-perf-test-posix',
1140 environ={'SCHEME': 'PerfTestsPosix'}))
1142 self.config.job_spec(['test/cpp/ios/build_and_run_tests.sh'],
1143 timeout_seconds=60 * 60,
1144 shortname='ios-cpp-test-cronet',
1146 environ=_FORCE_ENVIRON_FOR_WRAPPERS))
1148 self.config.job_spec(['src/objective-c/tests/run_one_test.sh'],
1149 timeout_seconds=60 * 60,
1150 shortname='mac-test-basictests',
1153 'SCHEME': 'MacTests',
1157 self.config.job_spec(['src/objective-c/tests/run_one_test.sh'],
1158 timeout_seconds=30 * 60,
1159 shortname='tvos-test-basictests',
1162 'SCHEME': 'TvTests',
1168 def pre_build_steps(self):
1171 def make_targets(self):
1174 def make_options(self):
1177 def build_steps(self):
1180 def post_tests_steps(self):
1183 def makefile_name(self):
1186 def dockerfile_dir(self):
1193 class Sanity(object):
1195 def configure(self, config, args):
1196 self.config = config
1198 _check_compiler(self.args.compiler, ['default'])
1200 def test_specs(self):
1202 with open('tools/run_tests/sanity/sanity_tests.yaml', 'r') as f:
1203 environ = {'TEST': 'true'}
1204 if _is_use_docker_child():
1205 environ['CLANG_FORMAT_SKIP_DOCKER'] = 'true'
1206 environ['CLANG_TIDY_SKIP_DOCKER'] = 'true'
1207 # sanity tests run tools/bazel wrapper concurrently
1208 # and that can result in a download/run race in the wrapper.
1209 # under docker we already have the right version of bazel
1210 # so we can just disable the wrapper.
1211 environ['DISABLE_BAZEL_WRAPPER'] = 'true'
1213 self.config.job_spec(cmd['script'].split(),
1214 timeout_seconds=30 * 60,
1216 cpu_cost=cmd.get('cpu_cost', 1))
1217 for cmd in yaml.load(f)
1220 def pre_build_steps(self):
1223 def make_targets(self):
1224 return ['run_dep_checks']
1226 def make_options(self):
1229 def build_steps(self):
1232 def post_tests_steps(self):
1235 def makefile_name(self):
1238 def dockerfile_dir(self):
1239 return 'tools/dockerfile/test/sanity'
1245 # different configurations we can run under
1246 with open('tools/run_tests/generated/configs.json') as f:
1248 (cfg['config'], Config(**cfg)) for cfg in ast.literal_eval(f.read()))
1251 'c++': CLanguage('cxx', 'c++'),
1252 'c': CLanguage('c', 'c'),
1253 'grpc-node': RemoteNodeLanguage(),
1254 'php7': Php7Language(),
1255 'python': PythonLanguage(),
1256 'ruby': RubyLanguage(),
1257 'csharp': CSharpLanguage(),
1258 'objc': ObjCLanguage(),
1269 def _windows_arch_option(arch):
1270 """Returns msbuild cmdline option for selected architecture."""
1271 if arch == 'default' or arch == 'x86':
1272 return '/p:Platform=Win32'
1274 return '/p:Platform=x64'
1276 print('Architecture %s not supported.' % arch)
1280 def _check_arch_option(arch):
1281 """Checks that architecture option is valid."""
1282 if platform_string() == 'windows':
1283 _windows_arch_option(arch)
1284 elif platform_string() == 'linux':
1285 # On linux, we need to be running under docker with the right architecture.
1286 runtime_arch = platform.architecture()[0]
1287 if arch == 'default':
1289 elif runtime_arch == '64bit' and arch == 'x64':
1291 elif runtime_arch == '32bit' and arch == 'x86':
1295 'Architecture %s does not match current runtime architecture.' %
1299 if args.arch != 'default':
1300 print('Architecture %s not supported on current platform.' %
1305 def _docker_arch_suffix(arch):
1306 """Returns suffix to dockerfile dir to use."""
1307 if arch == 'default' or arch == 'x64':
1312 print('Architecture %s not supported with current settings.' % arch)
1316 def runs_per_test_type(arg_str):
1317 """Auxiliary function to parse the "runs_per_test" flag.
1320 A positive integer or 0, the latter indicating an infinite number of
1324 argparse.ArgumentTypeError: Upon invalid input.
1326 if arg_str == 'inf':
1334 msg = '\'{}\' is not a positive integer or \'inf\''.format(arg_str)
1335 raise argparse.ArgumentTypeError(msg)
1338 def percent_type(arg_str):
1339 pct = float(arg_str)
1340 if pct > 100 or pct < 0:
1341 raise argparse.ArgumentTypeError(
1342 "'%f' is not a valid percentage in the [0, 100] range" % pct)
1346 # This is math.isclose in python >= 3.5
1347 def isclose(a, b, rel_tol=1e-09, abs_tol=0.0):
1348 return abs(a - b) <= max(rel_tol * max(abs(a), abs(b)), abs_tol)
1351 # parse command line
1352 argp = argparse.ArgumentParser(description='Run grpc tests.')
1353 argp.add_argument('-c',
1355 choices=sorted(_CONFIGS.keys()),
1361 type=runs_per_test_type,
1362 help='A positive integer or "inf". If "inf", all tests will run in an '
1363 'infinite loop. Especially useful in combination with "-f"')
1364 argp.add_argument('-r', '--regex', default='.*', type=str)
1365 argp.add_argument('--regex_exclude', default='', type=str)
1366 argp.add_argument('-j', '--jobs', default=multiprocessing.cpu_count(), type=int)
1367 argp.add_argument('-s', '--slowdown', default=1.0, type=float)
1368 argp.add_argument('-p',
1372 help='Run a random sample with that percentage of tests')
1373 argp.add_argument('-f',
1376 action='store_const',
1378 argp.add_argument('-t',
1381 action='store_const',
1383 argp.add_argument('--newline_on_success',
1385 action='store_const',
1387 argp.add_argument('-l',
1389 choices=sorted(_LANGUAGES.keys()),
1392 argp.add_argument('-S',
1393 '--stop_on_failure',
1395 action='store_const',
1397 argp.add_argument('--use_docker',
1399 action='store_const',
1401 help='Run all the tests under docker. That provides ' +
1402 'additional isolation and prevents the need to install ' +
1403 'language specific prerequisites. Only available on Linux.')
1407 action='store_const',
1410 'Allow flaky tests to show as passing (re-runs failed tests up to five times)'
1414 choices=['default', 'x86', 'x64'],
1417 'Selects architecture to target. For some platforms "default" is the only supported choice.'
1449 'Selects compiler to use. Allowed values depend on the platform and language.'
1451 argp.add_argument('--iomgr_platform',
1452 choices=['native', 'uv', 'gevent', 'asyncio'],
1454 help='Selects iomgr platform to build on')
1455 argp.add_argument('--build_only',
1457 action='store_const',
1459 help='Perform all the build steps but don\'t run any tests.')
1460 argp.add_argument('--measure_cpu_costs',
1462 action='store_const',
1464 help='Measure the cpu costs of tests')
1466 '--update_submodules',
1470 'Update some submodules before building. If any are updated, also run generate_projects. '
1472 'Submodules are specified as SUBMODULE_NAME:BRANCH; if BRANCH is omitted, master is assumed.'
1474 argp.add_argument('-a', '--antagonists', default=0, type=int)
1475 argp.add_argument('-x',
1479 help='Generates a JUnit-compatible XML report')
1480 argp.add_argument('--report_suite_name',
1483 help='Test suite name to use in generated JUnit XML report')
1485 '--report_multi_target',
1488 action='store_const',
1489 help='Generate separate XML report for each test job (Looks better in UIs).'
1494 action='store_const',
1497 'Don\'t print anything when a test passes. Passing tests also will not be reported in XML report. '
1498 + 'Useful when running many iterations of each test (argument -n).')
1500 '--force_default_poller',
1502 action='store_const',
1504 help='Don\'t try to iterate over many polling strategies when they exist')
1506 '--force_use_pollers',
1509 help='Only use the specified comma-delimited list of polling engines. '
1510 'Example: --force_use_pollers epoll1,poll '
1511 ' (This flag has no effect if --force_default_poller flag is also used)')
1512 argp.add_argument('--max_time',
1515 help='Maximum test runtime in seconds')
1516 argp.add_argument('--bq_result_table',
1520 help='Upload test results to a specified BQ table.')
1521 args = argp.parse_args()
1524 shortname_to_cpu = {}
1526 if args.force_default_poller:
1527 _POLLING_STRATEGIES = {}
1528 elif args.force_use_pollers:
1529 _POLLING_STRATEGIES[platform_string()] = args.force_use_pollers.split(',')
1531 jobset.measure_cpu_costs = args.measure_cpu_costs
1533 # update submodules if necessary
1534 need_to_regenerate_projects = False
1535 for spec in args.update_submodules:
1536 spec = spec.split(':', 1)
1540 elif len(spec) == 2:
1543 cwd = 'third_party/%s' % submodule
1545 def git(cmd, cwd=cwd):
1546 print('in %s: git %s' % (cwd, cmd))
1547 run_shell_command('git %s' % cmd, cwd=cwd)
1550 git('checkout %s' % branch)
1551 git('pull origin %s' % branch)
1552 if os.path.exists('src/%s/gen_build_yaml.py' % submodule):
1553 need_to_regenerate_projects = True
1554 if need_to_regenerate_projects:
1555 if jobset.platform_string() == 'linux':
1556 run_shell_command('tools/buildgen/generate_projects.sh')
1559 'WARNING: may need to regenerate projects, but since we are not on')
1561 ' Linux this step is being skipped. Compilation MAY fail.')
1564 run_config = _CONFIGS[args.config]
1565 build_config = run_config.build_config
1568 _FORCE_ENVIRON_FOR_WRAPPERS = {'GRPC_TRACE': 'api'}
1570 languages = set(_LANGUAGES[l] for l in args.language)
1572 l.configure(run_config, args)
1574 language_make_options = []
1575 if any(language.make_options() for language in languages):
1576 if not 'gcov' in args.config and len(languages) != 1:
1578 'languages with custom make options cannot be built simultaneously with other languages'
1582 # Combining make options is not clean and just happens to work. It allows C & C++ to build
1583 # together, and is only used under gcov. All other configs should build languages individually.
1584 language_make_options = list(
1586 make_option for lang in languages
1587 for make_option in lang.make_options()
1592 print('Seen --use_docker flag, will run tests under docker.')
1595 'IMPORTANT: The changes you are testing need to be locally committed'
1598 'because only the committed changes in the current branch will be')
1599 print('copied to the docker environment.')
1602 dockerfile_dirs = set([l.dockerfile_dir() for l in languages])
1603 if len(dockerfile_dirs) > 1:
1604 print('Languages to be tested require running under different docker '
1608 dockerfile_dir = next(iter(dockerfile_dirs))
1610 child_argv = [arg for arg in sys.argv if not arg == '--use_docker']
1611 run_tests_cmd = 'python tools/run_tests/run_tests.py %s' % ' '.join(
1614 env = os.environ.copy()
1615 env['RUN_TESTS_COMMAND'] = run_tests_cmd
1616 env['DOCKERFILE_DIR'] = dockerfile_dir
1617 env['DOCKER_RUN_SCRIPT'] = 'tools/run_tests/dockerize/docker_run_tests.sh'
1619 env['XML_REPORT'] = args.xml_report
1621 env['TTY_FLAG'] = '-t' # enables Ctrl-C when not on Jenkins.
1623 subprocess.check_call(
1624 'tools/run_tests/dockerize/build_docker_and_run_tests.sh',
1629 _check_arch_option(args.arch)
1632 def make_jobspec(cfg, targets, makefile='Makefile'):
1633 if platform_string() == 'windows':
1636 'cmake', '--build', '.', '--target',
1637 '%s' % target, '--config', _MSBUILD_CONFIG[cfg]
1639 cwd=os.path.dirname(makefile),
1640 timeout_seconds=None) for target in targets
1643 if targets and makefile.startswith('cmake/build/'):
1644 # With cmake, we've passed all the build configuration in the pre-build step already
1647 [os.getenv('MAKE', 'make'), '-j',
1648 '%d' % args.jobs] + targets,
1650 timeout_seconds=None)
1656 os.getenv('MAKE', 'make'), '-f', makefile, '-j',
1658 'EXTRA_DEFINES=GRPC_TEST_SLOWDOWN_MACHINE_FACTOR=%f' %
1660 'CONFIG=%s' % cfg, 'Q='
1661 ] + language_make_options +
1662 ([] if not args.travis else ['JENKINS_BUILD=1']) + targets,
1663 timeout_seconds=None)
1671 makefile = l.makefile_name()
1672 make_targets[makefile] = make_targets.get(makefile, set()).union(
1673 set(l.make_targets()))
1676 def build_step_environ(cfg):
1677 environ = {'CONFIG': cfg}
1678 msbuild_cfg = _MSBUILD_CONFIG.get(cfg)
1680 environ['MSBUILD_CONFIG'] = msbuild_cfg
1686 jobset.JobSpec(cmdline,
1687 environ=build_step_environ(build_config),
1688 timeout_seconds=_PRE_BUILD_STEP_TIMEOUT_SECONDS,
1691 for cmdline in l.pre_build_steps()))
1693 make_commands = itertools.chain.from_iterable(
1694 make_jobspec(build_config, list(targets), makefile)
1695 for (makefile, targets) in make_targets.items())
1696 build_steps.extend(set(make_commands))
1699 jobset.JobSpec(cmdline,
1700 environ=build_step_environ(build_config),
1701 timeout_seconds=None)
1703 for cmdline in l.build_steps()))
1705 post_tests_steps = list(
1707 jobset.JobSpec(cmdline, environ=build_step_environ(build_config))
1709 for cmdline in l.post_tests_steps()))
1710 runs_per_test = args.runs_per_test
1711 forever = args.forever
1714 def _shut_down_legacy_server(legacy_server_port):
1717 urllib.request.urlopen('http://localhost:%d/version_number' %
1723 urllib.request.urlopen('http://localhost:%d/quitquitquit' %
1724 legacy_server_port).read()
1727 def _calculate_num_runs_failures(list_of_results):
1728 """Calculate number of runs and failures for a particular test.
1731 list_of_results: (List) of JobResult object.
1733 A tuple of total number of runs and failures.
1735 num_runs = len(list_of_results) # By default, there is 1 run per JobResult.
1737 for jobresult in list_of_results:
1738 if jobresult.retries > 0:
1739 num_runs += jobresult.retries
1740 if jobresult.num_failures > 0:
1741 num_failures += jobresult.num_failures
1742 return num_runs, num_failures
1745 # _build_and_run results
1746 class BuildAndRunError(object):
1750 POST_TEST = object()
1753 def _has_epollexclusive():
1754 binary = 'bins/%s/check_epollexclusive' % args.config
1755 if not os.path.exists(binary):
1758 subprocess.check_call(binary)
1760 except subprocess.CalledProcessError as e:
1762 except OSError as e:
1763 # For languages other than C and Windows the binary won't exist
1767 # returns a list of things that failed (or an empty list on success)
1768 def _build_and_run(check_cancelled,
1772 """Do one pass of building & running tests."""
1773 # build latest sequentially
1774 num_failures, resultset = jobset.run(build_steps,
1776 stop_on_failure=True,
1777 newline_on_success=newline_on_success,
1780 return [BuildAndRunError.BUILD]
1784 report_utils.render_junit_xml_report(
1785 resultset, xml_report, suite_name=args.report_suite_name)
1788 if not args.travis and not _has_epollexclusive() and platform_string(
1789 ) in _POLLING_STRATEGIES and 'epollex' in _POLLING_STRATEGIES[
1791 print('\n\nOmitting EPOLLEXCLUSIVE tests\n\n')
1792 _POLLING_STRATEGIES[platform_string()].remove('epollex')
1796 subprocess.Popen(['tools/run_tests/python_utils/antagonist.py'])
1797 for _ in range(0, args.antagonists)
1799 start_port_server.start_port_server()
1801 num_test_failures = 0
1803 infinite_runs = runs_per_test == 0
1804 one_run = set(spec for language in languages
1805 for spec in language.test_specs()
1806 if (re.search(args.regex, spec.shortname) and
1807 (args.regex_exclude == '' or
1808 not re.search(args.regex_exclude, spec.shortname))))
1809 # When running on travis, we want out test runs to be as similar as possible
1810 # for reproducibility purposes.
1811 if args.travis and args.max_time <= 0:
1812 massaged_one_run = sorted(one_run, key=lambda x: x.cpu_cost)
1814 # whereas otherwise, we want to shuffle things up to give all tests a
1816 massaged_one_run = list(
1817 one_run) # random.sample needs an indexable seq.
1818 num_jobs = len(massaged_one_run)
1819 # for a random sample, get as many as indicated by the 'sample_percent'
1820 # argument. By default this arg is 100, resulting in a shuffle of all
1822 sample_size = int(num_jobs * args.sample_percent / 100.0)
1823 massaged_one_run = random.sample(massaged_one_run, sample_size)
1824 if not isclose(args.sample_percent, 100.0):
1825 assert args.runs_per_test == 1, "Can't do sampling (-p) over multiple runs (-n)."
1826 print("Running %d tests out of %d (~%d%%)" %
1827 (sample_size, num_jobs, args.sample_percent))
1829 assert len(massaged_one_run
1830 ) > 0, 'Must have at least one test for a -n inf run'
1831 runs_sequence = (itertools.repeat(massaged_one_run) if infinite_runs
1832 else itertools.repeat(massaged_one_run, runs_per_test))
1833 all_runs = itertools.chain.from_iterable(runs_sequence)
1835 if args.quiet_success:
1838 'Running tests quietly, only failing tests will be reported',
1840 num_test_failures, resultset = jobset.run(
1843 newline_on_success=newline_on_success,
1846 maxjobs_cpu_agnostic=max_parallel_tests_for_current_platform(),
1847 stop_on_failure=args.stop_on_failure,
1848 quiet_success=args.quiet_success,
1849 max_time=args.max_time)
1851 for k, v in sorted(resultset.items()):
1852 num_runs, num_failures = _calculate_num_runs_failures(v)
1853 if num_failures > 0:
1854 if num_failures == num_runs: # what about infinite_runs???
1855 jobset.message('FAILED', k, do_newline=True)
1857 jobset.message('FLAKE',
1858 '%s [%d/%d runs flaked]' %
1859 (k, num_failures, num_runs),
1862 for antagonist in antagonists:
1864 if args.bq_result_table and resultset:
1865 upload_extra_fields = {
1866 'compiler': args.compiler,
1867 'config': args.config,
1868 'iomgr_platform': args.iomgr_platform,
1869 'language': args.language[
1871 ], # args.language is a list but will always have one element when uploading to BQ is enabled.
1872 'platform': platform_string()
1875 upload_results_to_bq(resultset, args.bq_result_table,
1876 upload_extra_fields)
1877 except NameError as e:
1879 e) # It's fine to ignore since this is not critical
1880 if xml_report and resultset:
1881 report_utils.render_junit_xml_report(
1884 suite_name=args.report_suite_name,
1885 multi_target=args.report_multi_target)
1887 number_failures, _ = jobset.run(post_tests_steps,
1889 stop_on_failure=False,
1890 newline_on_success=newline_on_success,
1895 out.append(BuildAndRunError.POST_TEST)
1896 if num_test_failures:
1897 out.append(BuildAndRunError.TEST)
1905 dw = watch_dirs.DirWatcher(['src', 'include', 'test', 'examples'])
1906 initial_time = dw.most_recent_change()
1907 have_files_changed = lambda: dw.most_recent_change() != initial_time
1908 previous_success = success
1909 errors = _build_and_run(check_cancelled=have_files_changed,
1910 newline_on_success=False,
1911 build_only=args.build_only) == 0
1912 if not previous_success and not errors:
1913 jobset.message('SUCCESS',
1914 'All tests are now passing properly',
1916 jobset.message('IDLE', 'No change detected')
1917 while not have_files_changed():
1920 errors = _build_and_run(check_cancelled=lambda: False,
1921 newline_on_success=args.newline_on_success,
1922 xml_report=args.xml_report,
1923 build_only=args.build_only)
1925 jobset.message('SUCCESS', 'All tests passed', do_newline=True)
1927 jobset.message('FAILED', 'Some tests failed', do_newline=True)
1929 if BuildAndRunError.BUILD in errors:
1931 if BuildAndRunError.TEST in errors:
1933 if BuildAndRunError.POST_TEST in errors: