2 # Copyright 2015 gRPC authors.
4 # Licensed under the Apache License, Version 2.0 (the "License");
5 # you may not use this file except in compliance with the License.
6 # You may obtain a copy of the License at
8 # http://www.apache.org/licenses/LICENSE-2.0
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS,
12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 # See the License for the specific language governing permissions and
14 # limitations under the License.
15 """Run tests in parallel."""
17 from __future__ import print_function
26 import multiprocessing
39 from six.moves import urllib
43 import python_utils.jobset as jobset
44 import python_utils.report_utils as report_utils
45 import python_utils.watch_dirs as watch_dirs
46 import python_utils.start_port_server as start_port_server
48 from python_utils.upload_test_results import upload_results_to_bq
50 pass # It's ok to not import because this is only necessary to upload results to BQ.
52 gcp_utils_dir = os.path.abspath(
53 os.path.join(os.path.dirname(__file__), '../gcp/utils'))
54 sys.path.append(gcp_utils_dir)
56 _ROOT = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), '../..'))
59 _FORCE_ENVIRON_FOR_WRAPPERS = {
60 'GRPC_VERBOSITY': 'DEBUG',
63 _POLLING_STRATEGIES = {
64 'linux': ['epollex', 'epoll1', 'poll'],
68 BigQueryTestData = collections.namedtuple('BigQueryTestData', 'name flaky cpu')
71 def get_bqtest_data(limit=None):
72 import big_query_utils
74 bq = big_query_utils.create_big_query()
78 SUM(result != 'PASSED' AND result != 'SKIPPED') > 0 as flaky,
79 MAX(cpu_measured) + 0.01 as cpu
82 REGEXP_REPLACE(test_name, r'/\d+', '') AS filtered_test_name,
85 [grpc-testing:jenkins_test_results.aggregate_results]
87 timestamp >= DATE_ADD(CURRENT_DATE(), -1, "WEEK")
88 AND platform = '""" + platform_string() + """'
89 AND NOT REGEXP_MATCH(job_name, '.*portability.*') )
93 query += " limit {}".format(limit)
94 query_job = big_query_utils.sync_query_job(bq, 'grpc-testing', query)
95 page = bq.jobs().getQueryResults(
96 pageToken=None, **query_job['jobReference']).execute(num_retries=3)
98 BigQueryTestData(row['f'][0]['v'], row['f'][1]['v'] == 'true',
99 float(row['f'][2]['v'])) for row in page['rows']
104 def platform_string():
105 return jobset.platform_string()
108 _DEFAULT_TIMEOUT_SECONDS = 5 * 60
109 _PRE_BUILD_STEP_TIMEOUT_SECONDS = 10 * 60
112 def run_shell_command(cmd, env=None, cwd=None):
114 subprocess.check_output(cmd, shell=True, env=env, cwd=cwd)
115 except subprocess.CalledProcessError as e:
117 "Error while running command '%s'. Exit status %d. Output:\n%s",
118 e.cmd, e.returncode, e.output)
122 def max_parallel_tests_for_current_platform():
123 # Too much test parallelization has only been seen to be a problem
125 if jobset.platform_string() == 'windows':
130 # SimpleConfig: just compile with CONFIG=config, and run the binary to test
131 class Config(object):
136 timeout_multiplier=1,
138 iomgr_platform='native'):
141 self.build_config = config
142 self.environ = environ
143 self.environ['CONFIG'] = config
144 self.tool_prefix = tool_prefix
145 self.timeout_multiplier = timeout_multiplier
146 self.iomgr_platform = iomgr_platform
150 timeout_seconds=_DEFAULT_TIMEOUT_SECONDS,
155 """Construct a jobset.JobSpec for a test under this config
158 cmdline: a list of strings specifying the command line the test
161 actual_environ = self.environ.copy()
162 for k, v in environ.items():
163 actual_environ[k] = v
164 if not flaky and shortname and shortname in flaky_tests:
166 if shortname in shortname_to_cpu:
167 cpu_cost = shortname_to_cpu[shortname]
168 return jobset.JobSpec(
169 cmdline=self.tool_prefix + cmdline,
171 environ=actual_environ,
173 timeout_seconds=(self.timeout_multiplier * timeout_seconds
174 if timeout_seconds else None),
175 flake_retries=4 if flaky or args.allow_flakes else 0,
176 timeout_retries=1 if flaky or args.allow_flakes else 0)
179 def get_c_tests(travis, test_lang):
181 platforms_str = 'ci_platforms' if travis else 'platforms'
182 with open('tools/run_tests/generated/tests.json') as f:
186 if tgt['language'] == test_lang and platform_string() in
187 tgt[platforms_str] and not (travis and tgt['flaky'])
191 def _check_compiler(compiler, supported_compilers):
192 if compiler not in supported_compilers:
194 'Compiler %s not supported (on this platform).' % compiler)
197 def _check_arch(arch, supported_archs):
198 if arch not in supported_archs:
199 raise Exception('Architecture %s not supported.' % arch)
202 def _is_use_docker_child():
203 """Returns True if running running as a --use_docker child."""
204 return True if os.getenv('RUN_TESTS_COMMAND') else False
207 _PythonConfigVars = collections.namedtuple('_ConfigVars', [
210 'builder_prefix_arguments',
211 'venv_relative_python',
219 def _python_config_generator(name, major, minor, bits, config_vars):
220 name += '_' + config_vars.iomgr_platform
222 name, config_vars.shell + config_vars.builder +
223 config_vars.builder_prefix_arguments + [
224 _python_pattern_function(major=major, minor=minor, bits=bits)
225 ] + [name] + config_vars.venv_relative_python + config_vars.toolchain,
226 config_vars.shell + config_vars.runner + [
227 os.path.join(name, config_vars.venv_relative_python[0]),
228 config_vars.test_name
232 def _pypy_config_generator(name, major, config_vars):
235 config_vars.shell + config_vars.builder +
236 config_vars.builder_prefix_arguments + [
237 _pypy_pattern_function(major=major)
238 ] + [name] + config_vars.venv_relative_python + config_vars.toolchain,
239 config_vars.shell + config_vars.runner +
240 [os.path.join(name, config_vars.venv_relative_python[0])])
243 def _python_pattern_function(major, minor, bits):
244 # Bit-ness is handled by the test machine's environment
247 return '/c/Python{major}{minor}/python.exe'.format(
248 major=major, minor=minor, bits=bits)
250 return '/c/Python{major}{minor}_{bits}bits/python.exe'.format(
251 major=major, minor=minor, bits=bits)
253 return 'python{major}.{minor}'.format(major=major, minor=minor)
256 def _pypy_pattern_function(major):
262 raise ValueError("Unknown PyPy major version")
265 class CLanguage(object):
267 def __init__(self, make_target, test_lang):
268 self.make_target = make_target
269 self.platform = platform_string()
270 self.test_lang = test_lang
272 def configure(self, config, args):
275 if self.platform == 'windows':
278 ['default', 'cmake', 'cmake_vs2015', 'cmake_vs2017'])
279 _check_arch(self.args.arch, ['default', 'x64', 'x86'])
280 self._cmake_generator_option = 'Visual Studio 15 2017' if self.args.compiler == 'cmake_vs2017' else 'Visual Studio 14 2015'
281 self._cmake_arch_option = 'x64' if self.args.arch == 'x64' else 'Win32'
282 self._use_cmake = True
283 self._make_options = []
284 elif self.args.compiler == 'cmake':
285 _check_arch(self.args.arch, ['default'])
286 self._use_cmake = True
287 self._docker_distro = 'jessie'
288 self._make_options = []
290 self._use_cmake = False
291 self._docker_distro, self._make_options = self._compiler_options(
292 self.args.use_docker, self.args.compiler)
293 if args.iomgr_platform == "uv":
294 cflags = '-DGRPC_UV -DGRPC_CUSTOM_IOMGR_THREAD_CHECK -DGRPC_CUSTOM_SOCKET '
296 cflags += subprocess.check_output(
297 ['pkg-config', '--cflags', 'libuv']).strip() + ' '
298 except (subprocess.CalledProcessError, OSError):
301 ldflags = subprocess.check_output(
302 ['pkg-config', '--libs', 'libuv']).strip() + ' '
303 except (subprocess.CalledProcessError, OSError):
305 self._make_options += [
306 'EXTRA_CPPFLAGS={}'.format(cflags),
307 'EXTRA_LDLIBS={}'.format(ldflags)
310 def test_specs(self):
312 binaries = get_c_tests(self.args.travis, self.test_lang)
313 for target in binaries:
314 if self._use_cmake and target.get('boringssl', False):
315 # cmake doesn't build boringssl tests
317 auto_timeout_scaling = target.get('auto_timeout_scaling', True)
318 polling_strategies = (_POLLING_STRATEGIES.get(
319 self.platform, ['all']) if target.get('uses_polling', True) else
321 if self.args.iomgr_platform == 'uv':
322 polling_strategies = ['all']
323 for polling_strategy in polling_strategies:
325 'GRPC_DEFAULT_SSL_ROOTS_FILE_PATH':
326 _ROOT + '/src/core/tsi/test_creds/ca.pem',
327 'GRPC_POLL_STRATEGY':
332 resolver = os.environ.get('GRPC_DNS_RESOLVER', None)
334 env['GRPC_DNS_RESOLVER'] = resolver
335 shortname_ext = '' if polling_strategy == 'all' else ' GRPC_POLL_STRATEGY=%s' % polling_strategy
336 if polling_strategy in target.get('excluded_poll_engines', []):
340 if auto_timeout_scaling:
341 config = self.args.config
342 if ('asan' in config or config == 'msan' or
343 config == 'tsan' or config == 'ubsan' or
344 config == 'helgrind' or config == 'memcheck'):
345 # Scale overall test timeout if running under various sanitizers.
346 # scaling value is based on historical data analysis
349 if self.config.build_config in target['exclude_configs']:
351 if self.args.iomgr_platform in target.get('exclude_iomgrs', []):
353 if self.platform == 'windows':
354 binary = 'cmake/build/%s/%s.exe' % (
355 _MSBUILD_CONFIG[self.config.build_config],
359 binary = 'cmake/build/%s' % target['name']
361 binary = 'bins/%s/%s' % (self.config.build_config,
363 cpu_cost = target['cpu_cost']
364 if cpu_cost == 'capacity':
365 cpu_cost = multiprocessing.cpu_count()
366 if os.path.isfile(binary):
367 list_test_command = None
368 filter_test_command = None
370 # these are the flag defined by gtest and benchmark framework to list
371 # and filter test runs. We use them to split each individual test
372 # into its own JobSpec, and thus into its own process.
373 if 'benchmark' in target and target['benchmark']:
374 with open(os.devnull, 'w') as fnull:
375 tests = subprocess.check_output(
376 [binary, '--benchmark_list_tests'],
378 for line in tests.split('\n'):
380 if not test: continue
382 '--benchmark_filter=%s$' % test
385 self.config.job_spec(
387 shortname='%s %s' % (' '.join(cmdline),
390 timeout_seconds=target.get(
392 _DEFAULT_TIMEOUT_SECONDS) *
395 elif 'gtest' in target and target['gtest']:
396 # here we parse the output of --gtest_list_tests to build up a complete
397 # list of the tests contained in a binary for each test, we then
398 # add a job to run, filtering for just that test.
399 with open(os.devnull, 'w') as fnull:
400 tests = subprocess.check_output(
401 [binary, '--gtest_list_tests'], stderr=fnull)
403 for line in tests.split('\n'):
405 if i >= 0: line = line[:i]
406 if not line: continue
410 assert base is not None
411 assert line[1] == ' '
412 test = base + line.strip()
414 '--gtest_filter=%s' % test
417 self.config.job_spec(
419 shortname='%s %s' % (' '.join(cmdline),
422 timeout_seconds=target.get(
424 _DEFAULT_TIMEOUT_SECONDS) *
428 cmdline = [binary] + target['args']
429 shortname = target.get('shortname', ' '.join(
430 pipes.quote(arg) for arg in cmdline))
431 shortname += shortname_ext
433 self.config.job_spec(
437 flaky=target.get('flaky', False),
438 timeout_seconds=target.get(
439 'timeout_seconds', _DEFAULT_TIMEOUT_SECONDS)
442 elif self.args.regex == '.*' or self.platform == 'windows':
443 print('\nWARNING: binary not found, skipping', binary)
446 def make_targets(self):
447 if self.platform == 'windows':
448 # don't build tools on windows just yet
449 return ['buildtests_%s' % self.make_target]
451 'buildtests_%s' % self.make_target,
452 'tools_%s' % self.make_target, 'check_epollexclusive'
455 def make_options(self):
456 return self._make_options
458 def pre_build_steps(self):
459 if self.platform == 'windows':
461 'tools\\run_tests\\helper_scripts\\pre_build_cmake.bat',
462 self._cmake_generator_option, self._cmake_arch_option
464 elif self._use_cmake:
465 return [['tools/run_tests/helper_scripts/pre_build_cmake.sh']]
469 def build_steps(self):
472 def post_tests_steps(self):
473 if self.platform == 'windows':
476 return [['tools/run_tests/helper_scripts/post_tests_c.sh']]
478 def makefile_name(self):
480 return 'cmake/build/Makefile'
484 def _clang_make_options(self, version_suffix=''):
485 if self.args.config == 'ubsan':
487 'CC=clang%s' % version_suffix,
488 'CXX=clang++%s' % version_suffix,
489 'LD=clang++%s' % version_suffix,
490 'LDXX=clang++%s' % version_suffix
494 'CC=clang%s' % version_suffix,
495 'CXX=clang++%s' % version_suffix,
496 'LD=clang%s' % version_suffix,
497 'LDXX=clang++%s' % version_suffix
500 def _gcc_make_options(self, version_suffix):
502 'CC=gcc%s' % version_suffix,
503 'CXX=g++%s' % version_suffix,
504 'LD=gcc%s' % version_suffix,
505 'LDXX=g++%s' % version_suffix
508 def _compiler_options(self, use_docker, compiler):
509 """Returns docker distro and make options to use for given compiler."""
510 if not use_docker and not _is_use_docker_child():
511 _check_compiler(compiler, ['default'])
513 if compiler == 'gcc4.9' or compiler == 'default':
514 return ('jessie', [])
515 elif compiler == 'gcc4.8':
516 return ('jessie', self._gcc_make_options(version_suffix='-4.8'))
517 elif compiler == 'gcc5.3':
518 return ('ubuntu1604', [])
519 elif compiler == 'gcc7.2':
520 return ('ubuntu1710', [])
521 elif compiler == 'gcc_musl':
522 return ('alpine', [])
523 elif compiler == 'clang3.4':
524 # on ubuntu1404, clang-3.4 alias doesn't exist, just use 'clang'
525 return ('ubuntu1404', self._clang_make_options())
526 elif compiler == 'clang3.5':
527 return ('jessie', self._clang_make_options(version_suffix='-3.5'))
528 elif compiler == 'clang3.6':
529 return ('ubuntu1604',
530 self._clang_make_options(version_suffix='-3.6'))
531 elif compiler == 'clang3.7':
532 return ('ubuntu1604',
533 self._clang_make_options(version_suffix='-3.7'))
534 elif compiler == 'clang7.0':
535 # clang++-7.0 alias doesn't exist and there are no other clang versions
537 return ('sanitizers_jessie', self._clang_make_options())
539 raise Exception('Compiler %s not supported.' % compiler)
541 def dockerfile_dir(self):
542 return 'tools/dockerfile/test/cxx_%s_%s' % (
543 self._docker_distro, _docker_arch_suffix(self.args.arch))
546 return self.make_target
549 # This tests Node on grpc/grpc-node and will become the standard for Node testing
550 class RemoteNodeLanguage(object):
553 self.platform = platform_string()
555 def configure(self, config, args):
558 # Note: electron ABI only depends on major and minor version, so that's all
559 # we should specify in the compiler argument
560 _check_compiler(self.args.compiler, [
561 'default', 'node0.12', 'node4', 'node5', 'node6', 'node7', 'node8',
562 'electron1.3', 'electron1.6'
564 if self.args.compiler == 'default':
565 self.runtime = 'node'
566 self.node_version = '8'
568 if self.args.compiler.startswith('electron'):
569 self.runtime = 'electron'
570 self.node_version = self.args.compiler[8:]
572 self.runtime = 'node'
573 # Take off the word "node"
574 self.node_version = self.args.compiler[4:]
576 # TODO: update with Windows/electron scripts when available for grpc/grpc-node
577 def test_specs(self):
578 if self.platform == 'windows':
580 self.config.job_spec(
581 ['tools\\run_tests\\helper_scripts\\run_node.bat'])
585 self.config.job_spec(
586 ['tools/run_tests/helper_scripts/run_grpc-node.sh'],
588 environ=_FORCE_ENVIRON_FOR_WRAPPERS)
591 def pre_build_steps(self):
594 def make_targets(self):
597 def make_options(self):
600 def build_steps(self):
603 def post_tests_steps(self):
606 def makefile_name(self):
609 def dockerfile_dir(self):
610 return 'tools/dockerfile/test/node_jessie_%s' % _docker_arch_suffix(
617 class PhpLanguage(object):
619 def configure(self, config, args):
622 _check_compiler(self.args.compiler, ['default'])
623 self._make_options = ['EMBED_OPENSSL=true', 'EMBED_ZLIB=true']
625 def test_specs(self):
627 self.config.job_spec(
628 ['src/php/bin/run_tests.sh'],
629 environ=_FORCE_ENVIRON_FOR_WRAPPERS)
632 def pre_build_steps(self):
635 def make_targets(self):
636 return ['static_c', 'shared_c']
638 def make_options(self):
639 return self._make_options
641 def build_steps(self):
642 return [['tools/run_tests/helper_scripts/build_php.sh']]
644 def post_tests_steps(self):
645 return [['tools/run_tests/helper_scripts/post_tests_php.sh']]
647 def makefile_name(self):
650 def dockerfile_dir(self):
651 return 'tools/dockerfile/test/php_jessie_%s' % _docker_arch_suffix(
658 class Php7Language(object):
660 def configure(self, config, args):
663 _check_compiler(self.args.compiler, ['default'])
664 self._make_options = ['EMBED_OPENSSL=true', 'EMBED_ZLIB=true']
666 def test_specs(self):
668 self.config.job_spec(
669 ['src/php/bin/run_tests.sh'],
670 environ=_FORCE_ENVIRON_FOR_WRAPPERS)
673 def pre_build_steps(self):
676 def make_targets(self):
677 return ['static_c', 'shared_c']
679 def make_options(self):
680 return self._make_options
682 def build_steps(self):
683 return [['tools/run_tests/helper_scripts/build_php.sh']]
685 def post_tests_steps(self):
686 return [['tools/run_tests/helper_scripts/post_tests_php.sh']]
688 def makefile_name(self):
691 def dockerfile_dir(self):
692 return 'tools/dockerfile/test/php7_jessie_%s' % _docker_arch_suffix(
700 collections.namedtuple('PythonConfig', ['name', 'build', 'run'])):
701 """Tuple of commands (named s.t. 'what it says on the tin' applies)"""
704 class PythonLanguage(object):
706 def configure(self, config, args):
709 self.pythons = self._get_pythons(self.args)
711 def test_specs(self):
712 # load list of known test suites
714 'src/python/grpcio_tests/tests/tests.json') as tests_json_file:
715 tests_json = json.load(tests_json_file)
716 environment = dict(_FORCE_ENVIRON_FOR_WRAPPERS)
718 self.config.job_spec(
720 timeout_seconds=5 * 60,
722 list(environment.items()) + [(
723 'GRPC_PYTHON_TESTRUNNER_FILTER', str(suite_name))]),
724 shortname='%s.test.%s' % (config.name, suite_name),
725 ) for suite_name in tests_json for config in self.pythons
728 def pre_build_steps(self):
731 def make_targets(self):
734 def make_options(self):
737 def build_steps(self):
738 return [config.build for config in self.pythons]
740 def post_tests_steps(self):
741 if self.config.build_config != 'gcov':
744 return [['tools/run_tests/helper_scripts/post_tests_python.sh']]
746 def makefile_name(self):
749 def dockerfile_dir(self):
750 return 'tools/dockerfile/test/python_%s_%s' % (
751 self._python_manager_name(), _docker_arch_suffix(self.args.arch))
753 def _python_manager_name(self):
754 """Choose the docker image to use based on python version."""
755 if self.args.compiler in [
756 'python2.7', 'python3.5', 'python3.6', 'python3.7'
758 return 'stretch_' + self.args.compiler[len('python'):]
759 elif self.args.compiler == 'python_alpine':
761 elif self.args.compiler == 'python3.4':
766 def _get_pythons(self, args):
767 """Get python runtimes to test with, based on current platform, architecture, compiler etc."""
768 if args.arch == 'x86':
777 'tools/run_tests/helper_scripts/build_python_msys2.sh')
779 builder_prefix_arguments = ['MINGW{}'.format(bits)]
780 venv_relative_python = ['Scripts/python.exe']
781 toolchain = ['mingw32']
786 'tools/run_tests/helper_scripts/build_python.sh')
788 builder_prefix_arguments = []
789 venv_relative_python = ['bin/python']
792 test_command = 'test_lite'
793 if args.iomgr_platform == 'gevent':
794 test_command = 'test_gevent'
796 os.path.abspath('tools/run_tests/helper_scripts/run_python.sh')
799 config_vars = _PythonConfigVars(
800 shell, builder, builder_prefix_arguments, venv_relative_python,
801 toolchain, runner, test_command, args.iomgr_platform)
802 python27_config = _python_config_generator(
807 config_vars=config_vars)
808 python34_config = _python_config_generator(
813 config_vars=config_vars)
814 python35_config = _python_config_generator(
819 config_vars=config_vars)
820 python36_config = _python_config_generator(
825 config_vars=config_vars)
826 python37_config = _python_config_generator(
831 config_vars=config_vars)
832 pypy27_config = _pypy_config_generator(
833 name='pypy', major='2', config_vars=config_vars)
834 pypy32_config = _pypy_config_generator(
835 name='pypy3', major='3', config_vars=config_vars)
837 if args.compiler == 'default':
839 return (python35_config,)
845 elif args.compiler == 'python2.7':
846 return (python27_config,)
847 elif args.compiler == 'python3.4':
848 return (python34_config,)
849 elif args.compiler == 'python3.5':
850 return (python35_config,)
851 elif args.compiler == 'python3.6':
852 return (python36_config,)
853 elif args.compiler == 'python3.7':
854 return (python37_config,)
855 elif args.compiler == 'pypy':
856 return (pypy27_config,)
857 elif args.compiler == 'pypy3':
858 return (pypy32_config,)
859 elif args.compiler == 'python_alpine':
860 return (python27_config,)
861 elif args.compiler == 'all_the_cpythons':
870 raise Exception('Compiler %s not supported.' % args.compiler)
876 class RubyLanguage(object):
878 def configure(self, config, args):
881 _check_compiler(self.args.compiler, ['default'])
883 def test_specs(self):
885 self.config.job_spec(
886 ['tools/run_tests/helper_scripts/run_ruby.sh'],
887 timeout_seconds=10 * 60,
888 environ=_FORCE_ENVIRON_FOR_WRAPPERS)
891 self.config.job_spec(
892 ['tools/run_tests/helper_scripts/run_ruby_end2end_tests.sh'],
893 timeout_seconds=20 * 60,
894 environ=_FORCE_ENVIRON_FOR_WRAPPERS))
897 def pre_build_steps(self):
898 return [['tools/run_tests/helper_scripts/pre_build_ruby.sh']]
900 def make_targets(self):
903 def make_options(self):
906 def build_steps(self):
907 return [['tools/run_tests/helper_scripts/build_ruby.sh']]
909 def post_tests_steps(self):
910 return [['tools/run_tests/helper_scripts/post_tests_ruby.sh']]
912 def makefile_name(self):
915 def dockerfile_dir(self):
916 return 'tools/dockerfile/test/ruby_jessie_%s' % _docker_arch_suffix(
923 class CSharpLanguage(object):
926 self.platform = platform_string()
928 def configure(self, config, args):
931 if self.platform == 'windows':
932 _check_compiler(self.args.compiler, ['default', 'coreclr'])
933 _check_arch(self.args.arch, ['default'])
934 self._cmake_arch_option = 'x64'
936 _check_compiler(self.args.compiler, ['default', 'coreclr'])
937 self._docker_distro = 'stretch'
939 def test_specs(self):
940 with open('src/csharp/tests.json') as f:
941 tests_by_assembly = json.load(f)
943 msbuild_config = _MSBUILD_CONFIG[self.config.build_config]
944 nunit_args = ['--labels=All', '--noresult', '--workers=1']
945 assembly_subdir = 'bin/%s' % msbuild_config
946 assembly_extension = '.exe'
948 if self.args.compiler == 'coreclr':
949 assembly_subdir += '/netcoreapp2.1'
950 runtime_cmd = ['dotnet', 'exec']
951 assembly_extension = '.dll'
953 assembly_subdir += '/net45'
954 if self.platform == 'windows':
956 elif self.platform == 'mac':
957 # mono before version 5.2 on MacOS defaults to 32bit runtime
958 runtime_cmd = ['mono', '--arch=64']
960 runtime_cmd = ['mono']
963 for assembly in six.iterkeys(tests_by_assembly):
964 assembly_file = 'src/csharp/%s/%s/%s%s' % (assembly,
968 if self.config.build_config != 'gcov' or self.platform != 'windows':
969 # normally, run each test as a separate process
970 for test in tests_by_assembly[assembly]:
971 cmdline = runtime_cmd + [assembly_file,
972 '--test=%s' % test] + nunit_args
974 self.config.job_spec(
976 shortname='csharp.%s' % test,
977 environ=_FORCE_ENVIRON_FOR_WRAPPERS))
979 # For C# test coverage, run all tests from the same assembly at once
980 # using OpenCover.Console (only works on Windows).
982 'src\\csharp\\packages\\OpenCover.4.6.519\\tools\\OpenCover.Console.exe',
983 '-target:%s' % assembly_file, '-targetdir:src\\csharp',
984 '-targetargs:%s' % ' '.join(nunit_args),
985 '-filter:+[Grpc.Core]*', '-register:user',
986 '-output:src\\csharp\\coverage_csharp_%s.xml' % assembly
989 # set really high cpu_cost to make sure instances of OpenCover.Console run exclusively
990 # to prevent problems with registering the profiler.
991 run_exclusive = 1000000
993 self.config.job_spec(
995 shortname='csharp.coverage.%s' % assembly,
996 cpu_cost=run_exclusive,
997 environ=_FORCE_ENVIRON_FOR_WRAPPERS))
1000 def pre_build_steps(self):
1001 if self.platform == 'windows':
1003 'tools\\run_tests\\helper_scripts\\pre_build_csharp.bat',
1004 self._cmake_arch_option
1007 return [['tools/run_tests/helper_scripts/pre_build_csharp.sh']]
1009 def make_targets(self):
1010 return ['grpc_csharp_ext']
1012 def make_options(self):
1015 def build_steps(self):
1016 if self.platform == 'windows':
1017 return [['tools\\run_tests\\helper_scripts\\build_csharp.bat']]
1019 return [['tools/run_tests/helper_scripts/build_csharp.sh']]
1021 def post_tests_steps(self):
1022 if self.platform == 'windows':
1023 return [['tools\\run_tests\\helper_scripts\\post_tests_csharp.bat']]
1025 return [['tools/run_tests/helper_scripts/post_tests_csharp.sh']]
1027 def makefile_name(self):
1028 if self.platform == 'windows':
1029 return 'cmake/build/%s/Makefile' % self._cmake_arch_option
1031 # no need to set x86 specific flags as run_tests.py
1032 # currently forbids x86 C# builds on both Linux and MacOS.
1033 return 'cmake/build/Makefile'
1035 def dockerfile_dir(self):
1036 return 'tools/dockerfile/test/csharp_%s_%s' % (
1037 self._docker_distro, _docker_arch_suffix(self.args.arch))
1043 class ObjCLanguage(object):
1045 def configure(self, config, args):
1046 self.config = config
1048 _check_compiler(self.args.compiler, ['default'])
1050 def test_specs(self):
1052 self.config.job_spec(
1053 ['src/objective-c/tests/run_tests.sh'],
1054 timeout_seconds=60 * 60,
1055 shortname='objc-tests',
1057 environ=_FORCE_ENVIRON_FOR_WRAPPERS),
1058 self.config.job_spec(
1059 ['src/objective-c/tests/run_plugin_tests.sh'],
1060 timeout_seconds=60 * 60,
1061 shortname='objc-plugin-tests',
1063 environ=_FORCE_ENVIRON_FOR_WRAPPERS),
1064 self.config.job_spec(
1065 ['src/objective-c/tests/build_one_example.sh'],
1066 timeout_seconds=10 * 60,
1067 shortname='objc-build-example-helloworld',
1070 'SCHEME': 'HelloWorld',
1071 'EXAMPLE_PATH': 'examples/objective-c/helloworld'
1073 self.config.job_spec(
1074 ['src/objective-c/tests/build_one_example.sh'],
1075 timeout_seconds=10 * 60,
1076 shortname='objc-build-example-routeguide',
1079 'SCHEME': 'RouteGuideClient',
1080 'EXAMPLE_PATH': 'examples/objective-c/route_guide'
1082 self.config.job_spec(
1083 ['src/objective-c/tests/build_one_example.sh'],
1084 timeout_seconds=10 * 60,
1085 shortname='objc-build-example-authsample',
1088 'SCHEME': 'AuthSample',
1089 'EXAMPLE_PATH': 'examples/objective-c/auth_sample'
1091 self.config.job_spec(
1092 ['src/objective-c/tests/build_one_example.sh'],
1093 timeout_seconds=10 * 60,
1094 shortname='objc-build-example-sample',
1098 'EXAMPLE_PATH': 'src/objective-c/examples/Sample'
1100 self.config.job_spec(
1101 ['src/objective-c/tests/build_one_example.sh'],
1102 timeout_seconds=10 * 60,
1103 shortname='objc-build-example-sample-frameworks',
1107 'EXAMPLE_PATH': 'src/objective-c/examples/Sample',
1110 self.config.job_spec(
1111 ['src/objective-c/tests/build_one_example.sh'],
1112 timeout_seconds=10 * 60,
1113 shortname='objc-build-example-switftsample',
1116 'SCHEME': 'SwiftSample',
1117 'EXAMPLE_PATH': 'src/objective-c/examples/SwiftSample'
1119 self.config.job_spec(
1120 ['test/core/iomgr/ios/CFStreamTests/run_tests.sh'],
1121 timeout_seconds=20 * 60,
1122 shortname='cfstream-tests',
1124 environ=_FORCE_ENVIRON_FOR_WRAPPERS),
1127 def pre_build_steps(self):
1130 def make_targets(self):
1131 return ['interop_server']
1133 def make_options(self):
1136 def build_steps(self):
1138 ['src/objective-c/tests/build_tests.sh'],
1139 ['test/core/iomgr/ios/CFStreamTests/build_tests.sh'],
1142 def post_tests_steps(self):
1145 def makefile_name(self):
1148 def dockerfile_dir(self):
1155 class Sanity(object):
1157 def configure(self, config, args):
1158 self.config = config
1160 _check_compiler(self.args.compiler, ['default'])
1162 def test_specs(self):
1164 with open('tools/run_tests/sanity/sanity_tests.yaml', 'r') as f:
1165 environ = {'TEST': 'true'}
1166 if _is_use_docker_child():
1167 environ['CLANG_FORMAT_SKIP_DOCKER'] = 'true'
1168 environ['CLANG_TIDY_SKIP_DOCKER'] = 'true'
1170 self.config.job_spec(
1171 cmd['script'].split(),
1172 timeout_seconds=30 * 60,
1174 cpu_cost=cmd.get('cpu_cost', 1)) for cmd in yaml.load(f)
1177 def pre_build_steps(self):
1180 def make_targets(self):
1181 return ['run_dep_checks']
1183 def make_options(self):
1186 def build_steps(self):
1189 def post_tests_steps(self):
1192 def makefile_name(self):
1195 def dockerfile_dir(self):
1196 return 'tools/dockerfile/test/sanity'
1202 # different configurations we can run under
1203 with open('tools/run_tests/generated/configs.json') as f:
1205 (cfg['config'], Config(**cfg)) for cfg in ast.literal_eval(f.read()))
1208 'c++': CLanguage('cxx', 'c++'),
1209 'c': CLanguage('c', 'c'),
1210 'grpc-node': RemoteNodeLanguage(),
1211 'php': PhpLanguage(),
1212 'php7': Php7Language(),
1213 'python': PythonLanguage(),
1214 'ruby': RubyLanguage(),
1215 'csharp': CSharpLanguage(),
1216 'objc': ObjCLanguage(),
1227 def _windows_arch_option(arch):
1228 """Returns msbuild cmdline option for selected architecture."""
1229 if arch == 'default' or arch == 'x86':
1230 return '/p:Platform=Win32'
1232 return '/p:Platform=x64'
1234 print('Architecture %s not supported.' % arch)
1238 def _check_arch_option(arch):
1239 """Checks that architecture option is valid."""
1240 if platform_string() == 'windows':
1241 _windows_arch_option(arch)
1242 elif platform_string() == 'linux':
1243 # On linux, we need to be running under docker with the right architecture.
1244 runtime_arch = platform.architecture()[0]
1245 if arch == 'default':
1247 elif runtime_arch == '64bit' and arch == 'x64':
1249 elif runtime_arch == '32bit' and arch == 'x86':
1252 print('Architecture %s does not match current runtime architecture.'
1256 if args.arch != 'default':
1257 print('Architecture %s not supported on current platform.' %
1262 def _docker_arch_suffix(arch):
1263 """Returns suffix to dockerfile dir to use."""
1264 if arch == 'default' or arch == 'x64':
1269 print('Architecture %s not supported with current settings.' % arch)
1273 def runs_per_test_type(arg_str):
1274 """Auxilary function to parse the "runs_per_test" flag.
1277 A positive integer or 0, the latter indicating an infinite number of
1281 argparse.ArgumentTypeError: Upon invalid input.
1283 if arg_str == 'inf':
1287 if n <= 0: raise ValueError
1290 msg = '\'{}\' is not a positive integer or \'inf\''.format(arg_str)
1291 raise argparse.ArgumentTypeError(msg)
1294 def percent_type(arg_str):
1295 pct = float(arg_str)
1296 if pct > 100 or pct < 0:
1297 raise argparse.ArgumentTypeError(
1298 "'%f' is not a valid percentage in the [0, 100] range" % pct)
1302 # This is math.isclose in python >= 3.5
1303 def isclose(a, b, rel_tol=1e-09, abs_tol=0.0):
1304 return abs(a - b) <= max(rel_tol * max(abs(a), abs(b)), abs_tol)
1307 # parse command line
1308 argp = argparse.ArgumentParser(description='Run grpc tests.')
1310 '-c', '--config', choices=sorted(_CONFIGS.keys()), default='opt')
1315 type=runs_per_test_type,
1316 help='A positive integer or "inf". If "inf", all tests will run in an '
1317 'infinite loop. Especially useful in combination with "-f"')
1318 argp.add_argument('-r', '--regex', default='.*', type=str)
1319 argp.add_argument('--regex_exclude', default='', type=str)
1320 argp.add_argument('-j', '--jobs', default=multiprocessing.cpu_count(), type=int)
1321 argp.add_argument('-s', '--slowdown', default=1.0, type=float)
1327 help='Run a random sample with that percentage of tests')
1329 '-f', '--forever', default=False, action='store_const', const=True)
1331 '-t', '--travis', default=False, action='store_const', const=True)
1333 '--newline_on_success', default=False, action='store_const', const=True)
1337 choices=sorted(_LANGUAGES.keys()),
1341 '-S', '--stop_on_failure', default=False, action='store_const', const=True)
1345 action='store_const',
1347 help='Run all the tests under docker. That provides ' +
1348 'additional isolation and prevents the need to install ' +
1349 'language specific prerequisites. Only available on Linux.')
1353 action='store_const',
1356 'Allow flaky tests to show as passing (re-runs failed tests up to five times)'
1360 choices=['default', 'x86', 'x64'],
1363 'Selects architecture to target. For some platforms "default" is the only supported choice.'
1368 'default', 'gcc4.4', 'gcc4.6', 'gcc4.8', 'gcc4.9', 'gcc5.3', 'gcc7.2',
1369 'gcc_musl', 'clang3.4', 'clang3.5', 'clang3.6', 'clang3.7', 'clang7.0',
1370 'python2.7', 'python3.4', 'python3.5', 'python3.6', 'python3.7', 'pypy',
1371 'pypy3', 'python_alpine', 'all_the_cpythons', 'electron1.3',
1372 'electron1.6', 'coreclr', 'cmake', 'cmake_vs2015', 'cmake_vs2017'
1376 'Selects compiler to use. Allowed values depend on the platform and language.'
1380 choices=['native', 'uv', 'gevent'],
1382 help='Selects iomgr platform to build on')
1386 action='store_const',
1388 help='Perform all the build steps but don\'t run any tests.')
1390 '--measure_cpu_costs',
1392 action='store_const',
1394 help='Measure the cpu costs of tests')
1396 '--update_submodules',
1400 'Update some submodules before building. If any are updated, also run generate_projects. '
1402 'Submodules are specified as SUBMODULE_NAME:BRANCH; if BRANCH is omitted, master is assumed.'
1404 argp.add_argument('-a', '--antagonists', default=0, type=int)
1410 help='Generates a JUnit-compatible XML report')
1412 '--report_suite_name',
1415 help='Test suite name to use in generated JUnit XML report')
1419 action='store_const',
1422 'Don\'t print anything when a test passes. Passing tests also will not be reported in XML report. '
1423 + 'Useful when running many iterations of each test (argument -n).')
1425 '--force_default_poller',
1427 action='store_const',
1429 help='Don\'t try to iterate over many polling strategies when they exist')
1431 '--force_use_pollers',
1434 help='Only use the specified comma-delimited list of polling engines. '
1435 'Example: --force_use_pollers epoll1,poll '
1436 ' (This flag has no effect if --force_default_poller flag is also used)')
1438 '--max_time', default=-1, type=int, help='Maximum test runtime in seconds')
1440 '--bq_result_table',
1444 help='Upload test results to a specified BQ table.')
1446 '--auto_set_flakes',
1449 action='store_const',
1451 'Allow repeated runs for tests that have been failing recently (based on BQ historical data).'
1453 args = argp.parse_args()
1456 shortname_to_cpu = {}
1457 if args.auto_set_flakes:
1459 for test in get_bqtest_data():
1460 if test.flaky: flaky_tests.add(test.name)
1461 if test.cpu > 0: shortname_to_cpu[test.name] = test.cpu
1464 "Unexpected error getting flaky tests: %s" % traceback.format_exc())
1466 if args.force_default_poller:
1467 _POLLING_STRATEGIES = {}
1468 elif args.force_use_pollers:
1469 _POLLING_STRATEGIES[platform_string()] = args.force_use_pollers.split(',')
1471 jobset.measure_cpu_costs = args.measure_cpu_costs
1473 # update submodules if necessary
1474 need_to_regenerate_projects = False
1475 for spec in args.update_submodules:
1476 spec = spec.split(':', 1)
1480 elif len(spec) == 2:
1483 cwd = 'third_party/%s' % submodule
1485 def git(cmd, cwd=cwd):
1486 print('in %s: git %s' % (cwd, cmd))
1487 run_shell_command('git %s' % cmd, cwd=cwd)
1490 git('checkout %s' % branch)
1491 git('pull origin %s' % branch)
1492 if os.path.exists('src/%s/gen_build_yaml.py' % submodule):
1493 need_to_regenerate_projects = True
1494 if need_to_regenerate_projects:
1495 if jobset.platform_string() == 'linux':
1496 run_shell_command('tools/buildgen/generate_projects.sh')
1499 'WARNING: may need to regenerate projects, but since we are not on')
1501 ' Linux this step is being skipped. Compilation MAY fail.')
1504 run_config = _CONFIGS[args.config]
1505 build_config = run_config.build_config
1508 _FORCE_ENVIRON_FOR_WRAPPERS = {'GRPC_TRACE': 'api'}
1510 languages = set(_LANGUAGES[l] for l in args.language)
1512 l.configure(run_config, args)
1514 language_make_options = []
1515 if any(language.make_options() for language in languages):
1516 if not 'gcov' in args.config and len(languages) != 1:
1518 'languages with custom make options cannot be built simultaneously with other languages'
1522 # Combining make options is not clean and just happens to work. It allows C & C++ to build
1523 # together, and is only used under gcov. All other configs should build languages individually.
1524 language_make_options = list(
1527 for lang in languages
1528 for make_option in lang.make_options()
1533 print('Seen --use_docker flag, will run tests under docker.')
1536 'IMPORTANT: The changes you are testing need to be locally committed'
1539 'because only the committed changes in the current branch will be')
1540 print('copied to the docker environment.')
1543 dockerfile_dirs = set([l.dockerfile_dir() for l in languages])
1544 if len(dockerfile_dirs) > 1:
1545 print('Languages to be tested require running under different docker '
1549 dockerfile_dir = next(iter(dockerfile_dirs))
1551 child_argv = [arg for arg in sys.argv if not arg == '--use_docker']
1552 run_tests_cmd = 'python tools/run_tests/run_tests.py %s' % ' '.join(
1555 env = os.environ.copy()
1556 env['RUN_TESTS_COMMAND'] = run_tests_cmd
1557 env['DOCKERFILE_DIR'] = dockerfile_dir
1558 env['DOCKER_RUN_SCRIPT'] = 'tools/run_tests/dockerize/docker_run_tests.sh'
1560 env['XML_REPORT'] = args.xml_report
1562 env['TTY_FLAG'] = '-t' # enables Ctrl-C when not on Jenkins.
1564 subprocess.check_call(
1565 'tools/run_tests/dockerize/build_docker_and_run_tests.sh',
1570 _check_arch_option(args.arch)
1573 def make_jobspec(cfg, targets, makefile='Makefile'):
1574 if platform_string() == 'windows':
1578 'cmake', '--build', '.', '--target',
1579 '%s' % target, '--config', _MSBUILD_CONFIG[cfg]
1581 cwd=os.path.dirname(makefile),
1582 timeout_seconds=None) for target in targets
1585 if targets and makefile.startswith('cmake/build/'):
1586 # With cmake, we've passed all the build configuration in the pre-build step already
1589 [os.getenv('MAKE', 'make'), '-j',
1590 '%d' % args.jobs] + targets,
1592 timeout_seconds=None)
1598 os.getenv('MAKE', 'make'), '-f', makefile, '-j',
1600 'EXTRA_DEFINES=GRPC_TEST_SLOWDOWN_MACHINE_FACTOR=%f' %
1602 'CONFIG=%s' % cfg, 'Q='
1603 ] + language_make_options +
1604 ([] if not args.travis else ['JENKINS_BUILD=1']) + targets,
1605 timeout_seconds=None)
1613 makefile = l.makefile_name()
1614 make_targets[makefile] = make_targets.get(makefile, set()).union(
1615 set(l.make_targets()))
1618 def build_step_environ(cfg):
1619 environ = {'CONFIG': cfg}
1620 msbuild_cfg = _MSBUILD_CONFIG.get(cfg)
1622 environ['MSBUILD_CONFIG'] = msbuild_cfg
1630 environ=build_step_environ(build_config),
1631 timeout_seconds=_PRE_BUILD_STEP_TIMEOUT_SECONDS,
1634 for cmdline in l.pre_build_steps()))
1636 make_commands = itertools.chain.from_iterable(
1637 make_jobspec(build_config, list(targets), makefile)
1638 for (makefile, targets) in make_targets.items())
1639 build_steps.extend(set(make_commands))
1644 environ=build_step_environ(build_config),
1645 timeout_seconds=None)
1647 for cmdline in l.build_steps()))
1649 post_tests_steps = list(
1651 jobset.JobSpec(cmdline, environ=build_step_environ(build_config))
1653 for cmdline in l.post_tests_steps()))
1654 runs_per_test = args.runs_per_test
1655 forever = args.forever
1658 def _shut_down_legacy_server(legacy_server_port):
1661 urllib.request.urlopen(
1662 'http://localhost:%d/version_number' % legacy_server_port,
1667 urllib.request.urlopen(
1668 'http://localhost:%d/quitquitquit' % legacy_server_port).read()
1671 def _calculate_num_runs_failures(list_of_results):
1672 """Caculate number of runs and failures for a particular test.
1675 list_of_results: (List) of JobResult object.
1677 A tuple of total number of runs and failures.
1679 num_runs = len(list_of_results) # By default, there is 1 run per JobResult.
1681 for jobresult in list_of_results:
1682 if jobresult.retries > 0:
1683 num_runs += jobresult.retries
1684 if jobresult.num_failures > 0:
1685 num_failures += jobresult.num_failures
1686 return num_runs, num_failures
1689 # _build_and_run results
1690 class BuildAndRunError(object):
1694 POST_TEST = object()
1697 def _has_epollexclusive():
1698 binary = 'bins/%s/check_epollexclusive' % args.config
1699 if not os.path.exists(binary):
1702 subprocess.check_call(binary)
1704 except subprocess.CalledProcessError as e:
1706 except OSError as e:
1707 # For languages other than C and Windows the binary won't exist
1711 # returns a list of things that failed (or an empty list on success)
1712 def _build_and_run(check_cancelled,
1716 """Do one pass of building & running tests."""
1717 # build latest sequentially
1718 num_failures, resultset = jobset.run(
1721 stop_on_failure=True,
1722 newline_on_success=newline_on_success,
1725 return [BuildAndRunError.BUILD]
1729 report_utils.render_junit_xml_report(
1730 resultset, xml_report, suite_name=args.report_suite_name)
1733 if not args.travis and not _has_epollexclusive() and platform_string(
1734 ) in _POLLING_STRATEGIES and 'epollex' in _POLLING_STRATEGIES[platform_string(
1736 print('\n\nOmitting EPOLLEXCLUSIVE tests\n\n')
1737 _POLLING_STRATEGIES[platform_string()].remove('epollex')
1741 subprocess.Popen(['tools/run_tests/python_utils/antagonist.py'])
1742 for _ in range(0, args.antagonists)
1744 start_port_server.start_port_server()
1746 num_test_failures = 0
1748 infinite_runs = runs_per_test == 0
1750 spec for language in languages for spec in language.test_specs()
1751 if (re.search(args.regex, spec.shortname) and
1752 (args.regex_exclude == '' or
1753 not re.search(args.regex_exclude, spec.shortname))))
1754 # When running on travis, we want out test runs to be as similar as possible
1755 # for reproducibility purposes.
1756 if args.travis and args.max_time <= 0:
1757 massaged_one_run = sorted(one_run, key=lambda x: x.cpu_cost)
1759 # whereas otherwise, we want to shuffle things up to give all tests a
1761 massaged_one_run = list(
1762 one_run) # random.sample needs an indexable seq.
1763 num_jobs = len(massaged_one_run)
1764 # for a random sample, get as many as indicated by the 'sample_percent'
1765 # argument. By default this arg is 100, resulting in a shuffle of all
1767 sample_size = int(num_jobs * args.sample_percent / 100.0)
1768 massaged_one_run = random.sample(massaged_one_run, sample_size)
1769 if not isclose(args.sample_percent, 100.0):
1770 assert args.runs_per_test == 1, "Can't do sampling (-p) over multiple runs (-n)."
1771 print("Running %d tests out of %d (~%d%%)" %
1772 (sample_size, num_jobs, args.sample_percent))
1774 assert len(massaged_one_run
1775 ) > 0, 'Must have at least one test for a -n inf run'
1776 runs_sequence = (itertools.repeat(massaged_one_run)
1777 if infinite_runs else itertools.repeat(
1778 massaged_one_run, runs_per_test))
1779 all_runs = itertools.chain.from_iterable(runs_sequence)
1781 if args.quiet_success:
1784 'Running tests quietly, only failing tests will be reported',
1786 num_test_failures, resultset = jobset.run(
1789 newline_on_success=newline_on_success,
1792 maxjobs_cpu_agnostic=max_parallel_tests_for_current_platform(),
1793 stop_on_failure=args.stop_on_failure,
1794 quiet_success=args.quiet_success,
1795 max_time=args.max_time)
1797 for k, v in sorted(resultset.items()):
1798 num_runs, num_failures = _calculate_num_runs_failures(v)
1799 if num_failures > 0:
1800 if num_failures == num_runs: # what about infinite_runs???
1801 jobset.message('FAILED', k, do_newline=True)
1805 '%s [%d/%d runs flaked]' % (k, num_failures,
1809 for antagonist in antagonists:
1811 if args.bq_result_table and resultset:
1812 upload_extra_fields = {
1813 'compiler': args.compiler,
1814 'config': args.config,
1815 'iomgr_platform': args.iomgr_platform,
1816 'language': args.language[
1817 0], # args.language is a list but will always have one element when uploading to BQ is enabled.
1818 'platform': platform_string()
1820 upload_results_to_bq(resultset, args.bq_result_table,
1821 upload_extra_fields)
1822 if xml_report and resultset:
1823 report_utils.render_junit_xml_report(
1824 resultset, xml_report, suite_name=args.report_suite_name)
1826 number_failures, _ = jobset.run(
1829 stop_on_failure=False,
1830 newline_on_success=newline_on_success,
1835 out.append(BuildAndRunError.POST_TEST)
1836 if num_test_failures:
1837 out.append(BuildAndRunError.TEST)
1845 dw = watch_dirs.DirWatcher(['src', 'include', 'test', 'examples'])
1846 initial_time = dw.most_recent_change()
1847 have_files_changed = lambda: dw.most_recent_change() != initial_time
1848 previous_success = success
1849 errors = _build_and_run(
1850 check_cancelled=have_files_changed,
1851 newline_on_success=False,
1852 build_only=args.build_only) == 0
1853 if not previous_success and not errors:
1856 'All tests are now passing properly',
1858 jobset.message('IDLE', 'No change detected')
1859 while not have_files_changed():
1862 errors = _build_and_run(
1863 check_cancelled=lambda: False,
1864 newline_on_success=args.newline_on_success,
1865 xml_report=args.xml_report,
1866 build_only=args.build_only)
1868 jobset.message('SUCCESS', 'All tests passed', do_newline=True)
1870 jobset.message('FAILED', 'Some tests failed', do_newline=True)
1872 if BuildAndRunError.BUILD in errors:
1874 if BuildAndRunError.TEST in errors:
1876 if BuildAndRunError.POST_TEST in errors: