3 # Copyright 2013 The Chromium Authors. All rights reserved.
4 # Use of this source code is governed by a BSD-style license that can be
5 # found in the LICENSE file.
7 """Runs all types of tests from one unified interface."""
19 from pylib import android_commands
20 from pylib import constants
21 from pylib import forwarder
22 from pylib import ports
23 from pylib.base import base_test_result
24 from pylib.base import environment_factory
25 from pylib.base import test_dispatcher
26 from pylib.base import test_instance_factory
27 from pylib.base import test_run_factory
28 from pylib.gtest import gtest_config
29 from pylib.gtest import setup as gtest_setup
30 from pylib.gtest import test_options as gtest_test_options
31 from pylib.linker import setup as linker_setup
32 from pylib.host_driven import setup as host_driven_setup
33 from pylib.instrumentation import setup as instrumentation_setup
34 from pylib.instrumentation import test_options as instrumentation_test_options
35 from pylib.junit import setup as junit_setup
36 from pylib.junit import test_dispatcher as junit_dispatcher
37 from pylib.monkey import setup as monkey_setup
38 from pylib.monkey import test_options as monkey_test_options
39 from pylib.perf import setup as perf_setup
40 from pylib.perf import test_options as perf_test_options
41 from pylib.perf import test_runner as perf_test_runner
42 from pylib.uiautomator import setup as uiautomator_setup
43 from pylib.uiautomator import test_options as uiautomator_test_options
44 from pylib.utils import apk_helper
45 from pylib.utils import command_option_parser
46 from pylib.utils import report_results
47 from pylib.utils import reraiser_thread
48 from pylib.utils import run_tests_helper
51 def AddCommonOptions(option_parser):
52 """Adds all common options to |option_parser|."""
54 group = optparse.OptionGroup(option_parser, 'Common Options')
55 default_build_type = os.environ.get('BUILDTYPE', 'Debug')
56 group.add_option('--debug', action='store_const', const='Debug',
57 dest='build_type', default=default_build_type,
58 help=('If set, run test suites under out/Debug. '
59 'Default is env var BUILDTYPE or Debug.'))
60 group.add_option('--release', action='store_const',
61 const='Release', dest='build_type',
62 help=('If set, run test suites under out/Release.'
63 ' Default is env var BUILDTYPE or Debug.'))
64 group.add_option('--build-directory', dest='build_directory',
65 help=('Path to the directory in which build files are'
66 ' located (should not include build type)'))
67 group.add_option('--num_retries', dest='num_retries', type='int',
69 help=('Number of retries for a test before '
71 group.add_option('-v',
76 help='Verbose level (multiple times for more)')
77 group.add_option('--flakiness-dashboard-server',
78 dest='flakiness_dashboard_server',
79 help=('Address of the server that is hosting the '
80 'Chrome for Android flakiness dashboard.'))
81 group.add_option('--enable-platform-mode', action='store_true',
82 help=('Run the test scripts in platform mode, which '
83 'conceptually separates the test runner from the '
84 '"device" (local or remote, real or emulated) on '
85 'which the tests are running. [experimental]'))
86 group.add_option('-e', '--environment', default='local',
87 help=('Test environment to run in. Must be one of: %s' %
88 ', '.join(constants.VALID_ENVIRONMENTS)))
89 option_parser.add_option_group(group)
92 def ProcessCommonOptions(options, error_func):
93 """Processes and handles all common options."""
94 run_tests_helper.SetLogLevel(options.verbose_count)
95 constants.SetBuildType(options.build_type)
96 if options.build_directory:
97 constants.SetBuildDirectory(options.build_directory)
98 if options.environment not in constants.VALID_ENVIRONMENTS:
99 error_func('--environment must be one of: %s' %
100 ', '.join(constants.VALID_ENVIRONMENTS))
103 def AddDeviceOptions(option_parser):
104 group = optparse.OptionGroup(option_parser, 'Device Options')
105 group.add_option('-c', dest='cleanup_test_files',
106 help='Cleanup test files on the device after run',
108 group.add_option('--tool',
110 help=('Run the test under a tool '
111 '(use --tool help to list them)'))
112 group.add_option('-d', '--device', dest='test_device',
113 help=('Target device for the test suite '
115 option_parser.add_option_group(group)
118 def AddGTestOptions(option_parser):
119 """Adds gtest options to |option_parser|."""
121 option_parser.usage = '%prog gtest [options]'
122 option_parser.commands_dict = {}
123 option_parser.example = '%prog gtest -s base_unittests'
125 # TODO(gkanwar): Make this option required
126 option_parser.add_option('-s', '--suite', dest='suite_name',
127 help=('Executable name of the test suite to run '
128 '(use -s help to list them).'))
129 option_parser.add_option('-f', '--gtest_filter', '--gtest-filter',
131 help='googletest-style filter string.')
132 option_parser.add_option('--gtest_also_run_disabled_tests',
133 '--gtest-also-run-disabled-tests',
134 dest='run_disabled', action='store_true',
135 help='Also run disabled tests if applicable.')
136 option_parser.add_option('-a', '--test-arguments', dest='test_arguments',
138 help='Additional arguments to pass to the test.')
139 option_parser.add_option('-t', dest='timeout',
140 help='Timeout to wait for each test',
143 option_parser.add_option('--isolate_file_path',
144 '--isolate-file-path',
145 dest='isolate_file_path',
146 help='.isolate file path to override the default '
148 # TODO(gkanwar): Move these to Common Options once we have the plumbing
149 # in our other test types to handle these commands
150 AddCommonOptions(option_parser)
151 AddDeviceOptions(option_parser)
154 def AddLinkerTestOptions(option_parser):
155 option_parser.usage = '%prog linker'
156 option_parser.commands_dict = {}
157 option_parser.example = '%prog linker'
159 option_parser.add_option('-f', '--gtest-filter', dest='test_filter',
160 help='googletest-style filter string.')
161 AddCommonOptions(option_parser)
162 AddDeviceOptions(option_parser)
165 def ProcessGTestOptions(options):
166 """Intercept test suite help to list test suites.
169 options: Command line options.
171 if options.suite_name == 'help':
172 print 'Available test suites are:'
173 for test_suite in (gtest_config.STABLE_TEST_SUITES +
174 gtest_config.EXPERIMENTAL_TEST_SUITES):
178 # Convert to a list, assuming all test suites if nothing was specified.
179 # TODO(gkanwar): Require having a test suite
180 if options.suite_name:
181 options.suite_name = [options.suite_name]
183 options.suite_name = [s for s in gtest_config.STABLE_TEST_SUITES]
186 def AddJavaTestOptions(option_parser):
187 """Adds the Java test options to |option_parser|."""
189 option_parser.add_option('-f', '--test-filter', dest='test_filter',
190 help=('Test filter (if not fully qualified, '
191 'will run all matches).'))
192 option_parser.add_option(
193 '-A', '--annotation', dest='annotation_str',
194 help=('Comma-separated list of annotations. Run only tests with any of '
195 'the given annotations. An annotation can be either a key or a '
196 'key-values pair. A test that has no annotation is considered '
198 option_parser.add_option(
199 '-E', '--exclude-annotation', dest='exclude_annotation_str',
200 help=('Comma-separated list of annotations. Exclude tests with these '
202 option_parser.add_option(
203 '--screenshot', dest='screenshot_failures', action='store_true',
204 help='Capture screenshots of test failures')
205 option_parser.add_option(
206 '--save-perf-json', action='store_true',
207 help='Saves the JSON file for each UI Perf test.')
208 option_parser.add_option(
209 '--official-build', action='store_true', help='Run official build tests.')
210 option_parser.add_option(
211 '--test_data', '--test-data', action='append', default=[],
212 help=('Each instance defines a directory of test data that should be '
213 'copied to the target(s) before running the tests. The argument '
214 'should be of the form <target>:<source>, <target> is relative to '
215 'the device data directory, and <source> is relative to the '
216 'chromium build directory.'))
219 def ProcessJavaTestOptions(options):
220 """Processes options/arguments and populates |options| with defaults."""
222 if options.annotation_str:
223 options.annotations = options.annotation_str.split(',')
224 elif options.test_filter:
225 options.annotations = []
227 options.annotations = ['Smoke', 'SmallTest', 'MediumTest', 'LargeTest',
228 'EnormousTest', 'IntegrationTest']
230 if options.exclude_annotation_str:
231 options.exclude_annotations = options.exclude_annotation_str.split(',')
233 options.exclude_annotations = []
236 def AddInstrumentationTestOptions(option_parser):
237 """Adds Instrumentation test options to |option_parser|."""
239 option_parser.usage = '%prog instrumentation [options]'
240 option_parser.commands_dict = {}
241 option_parser.example = ('%prog instrumentation '
242 '--test-apk=ChromeShellTest')
244 AddJavaTestOptions(option_parser)
245 AddCommonOptions(option_parser)
246 AddDeviceOptions(option_parser)
248 option_parser.add_option('-j', '--java-only', action='store_true',
249 default=False, help='Run only the Java tests.')
250 option_parser.add_option('-p', '--python-only', action='store_true',
252 help='Run only the host-driven tests.')
253 option_parser.add_option('--host-driven-root',
254 help='Root of the host-driven tests.')
255 option_parser.add_option('-w', '--wait_debugger', dest='wait_for_debugger',
257 help='Wait for debugger.')
258 option_parser.add_option(
259 '--test-apk', dest='test_apk',
260 help=('The name of the apk containing the tests '
261 '(without the .apk extension; e.g. "ContentShellTest").'))
262 option_parser.add_option('--coverage-dir',
263 help=('Directory in which to place all generated '
264 'EMMA coverage files.'))
265 option_parser.add_option('--device-flags', dest='device_flags', default='',
266 help='The relative filepath to a file containing '
267 'command-line flags to set on the device')
270 def ProcessInstrumentationOptions(options, error_func):
271 """Processes options/arguments and populate |options| with defaults.
274 options: optparse.Options object.
275 error_func: Function to call with the error message in case of an error.
278 An InstrumentationOptions named tuple which contains all options relevant to
279 instrumentation tests.
282 ProcessJavaTestOptions(options)
284 if options.java_only and options.python_only:
285 error_func('Options java_only (-j) and python_only (-p) '
286 'are mutually exclusive.')
287 options.run_java_tests = True
288 options.run_python_tests = True
289 if options.java_only:
290 options.run_python_tests = False
291 elif options.python_only:
292 options.run_java_tests = False
294 if not options.host_driven_root:
295 options.run_python_tests = False
297 if not options.test_apk:
298 error_func('--test-apk must be specified.')
301 options.test_apk_path = os.path.join(
302 constants.GetOutDirectory(),
303 constants.SDK_BUILD_APKS_DIR,
304 '%s.apk' % options.test_apk)
305 options.test_apk_jar_path = os.path.join(
306 constants.GetOutDirectory(),
307 constants.SDK_BUILD_TEST_JAVALIB_DIR,
308 '%s.jar' % options.test_apk)
309 options.test_support_apk_path = '%sSupport%s' % (
310 os.path.splitext(options.test_apk_path))
312 options.test_runner = apk_helper.GetInstrumentationName(options.test_apk_path)
314 return instrumentation_test_options.InstrumentationOptions(
316 options.cleanup_test_files,
318 options.exclude_annotations,
321 options.save_perf_json,
322 options.screenshot_failures,
323 options.wait_for_debugger,
324 options.coverage_dir,
326 options.test_apk_path,
327 options.test_apk_jar_path,
329 options.test_support_apk_path,
334 def AddUIAutomatorTestOptions(option_parser):
335 """Adds UI Automator test options to |option_parser|."""
337 option_parser.usage = '%prog uiautomator [options]'
338 option_parser.commands_dict = {}
339 option_parser.example = (
340 '%prog uiautomator --test-jar=chrome_shell_uiautomator_tests'
341 ' --package=chrome_shell')
342 option_parser.add_option(
344 help=('Package under test. Possible values: %s' %
345 constants.PACKAGE_INFO.keys()))
346 option_parser.add_option(
347 '--test-jar', dest='test_jar',
348 help=('The name of the dexed jar containing the tests (without the '
349 '.dex.jar extension). Alternatively, this can be a full path '
352 AddJavaTestOptions(option_parser)
353 AddCommonOptions(option_parser)
354 AddDeviceOptions(option_parser)
357 def ProcessUIAutomatorOptions(options, error_func):
358 """Processes UIAutomator options/arguments.
361 options: optparse.Options object.
362 error_func: Function to call with the error message in case of an error.
365 A UIAutomatorOptions named tuple which contains all options relevant to
369 ProcessJavaTestOptions(options)
371 if not options.package:
372 error_func('--package is required.')
374 if options.package not in constants.PACKAGE_INFO:
375 error_func('Invalid package.')
377 if not options.test_jar:
378 error_func('--test-jar must be specified.')
380 if os.path.exists(options.test_jar):
381 # The dexed JAR is fully qualified, assume the info JAR lives along side.
382 options.uiautomator_jar = options.test_jar
384 options.uiautomator_jar = os.path.join(
385 constants.GetOutDirectory(),
386 constants.SDK_BUILD_JAVALIB_DIR,
387 '%s.dex.jar' % options.test_jar)
388 options.uiautomator_info_jar = (
389 options.uiautomator_jar[:options.uiautomator_jar.find('.dex.jar')] +
392 return uiautomator_test_options.UIAutomatorOptions(
394 options.cleanup_test_files,
396 options.exclude_annotations,
399 options.save_perf_json,
400 options.screenshot_failures,
401 options.uiautomator_jar,
402 options.uiautomator_info_jar,
406 def AddJUnitTestOptions(option_parser):
407 """Adds junit test options to |option_parser|."""
408 option_parser.usage = '%prog junit -s [test suite name]'
409 option_parser.commands_dict = {}
411 option_parser.add_option(
412 '-s', '--test-suite', dest='test_suite',
413 help=('JUnit test suite to run.'))
414 option_parser.add_option(
415 '-f', '--test-filter', dest='test_filter',
416 help='Filters tests googletest-style.')
417 option_parser.add_option(
418 '--package-filter', dest='package_filter',
419 help='Filters tests by package.')
420 option_parser.add_option(
421 '--runner-filter', dest='runner_filter',
422 help='Filters tests by runner class. Must be fully qualified.')
423 option_parser.add_option(
424 '--sdk-version', dest='sdk_version', type="int",
425 help='The Android SDK version.')
426 AddCommonOptions(option_parser)
429 def ProcessJUnitTestOptions(options, error_func):
430 """Processes all JUnit test options."""
431 if not options.test_suite:
432 error_func('No test suite specified.')
436 def AddMonkeyTestOptions(option_parser):
437 """Adds monkey test options to |option_parser|."""
439 option_parser.usage = '%prog monkey [options]'
440 option_parser.commands_dict = {}
441 option_parser.example = (
442 '%prog monkey --package=chrome_shell')
444 option_parser.add_option(
446 help=('Package under test. Possible values: %s' %
447 constants.PACKAGE_INFO.keys()))
448 option_parser.add_option(
449 '--event-count', default=10000, type='int',
450 help='Number of events to generate [default: %default].')
451 option_parser.add_option(
452 '--category', default='',
453 help='A list of allowed categories.')
454 option_parser.add_option(
455 '--throttle', default=100, type='int',
456 help='Delay between events (ms) [default: %default]. ')
457 option_parser.add_option(
458 '--seed', type='int',
459 help=('Seed value for pseudo-random generator. Same seed value generates '
460 'the same sequence of events. Seed is randomized by default.'))
461 option_parser.add_option(
462 '--extra-args', default='',
463 help=('String of other args to pass to the command verbatim '
464 '[default: "%default"].'))
466 AddCommonOptions(option_parser)
467 AddDeviceOptions(option_parser)
470 def ProcessMonkeyTestOptions(options, error_func):
471 """Processes all monkey test options.
474 options: optparse.Options object.
475 error_func: Function to call with the error message in case of an error.
478 A MonkeyOptions named tuple which contains all options relevant to
481 if not options.package:
482 error_func('--package is required.')
484 if options.package not in constants.PACKAGE_INFO:
485 error_func('Invalid package.')
487 category = options.category
489 category = options.category.split(',')
491 return monkey_test_options.MonkeyOptions(
492 options.verbose_count,
501 def AddPerfTestOptions(option_parser):
502 """Adds perf test options to |option_parser|."""
504 option_parser.usage = '%prog perf [options]'
505 option_parser.commands_dict = {}
506 option_parser.example = ('%prog perf '
507 '[--single-step -- command args] or '
508 '[--steps perf_steps.json] or '
509 '[--print-step step]')
511 option_parser.add_option(
514 help='Execute the given command with retries, but only print the result '
515 'for the "most successful" round.')
516 option_parser.add_option(
518 help='JSON file containing the list of commands to run.')
519 option_parser.add_option(
521 help=('A JSON file containing steps that are flaky '
522 'and will have its exit code ignored.'))
523 option_parser.add_option(
524 '--output-json-list',
525 help='Write a simple list of names from --steps into the given file.')
526 option_parser.add_option(
528 help='The name of a previously executed perf step to print.')
529 option_parser.add_option(
530 '--no-timeout', action='store_true',
531 help=('Do not impose a timeout. Each perf step is responsible for '
532 'implementing the timeout logic.'))
533 option_parser.add_option(
534 '-f', '--test-filter',
535 help=('Test filter (will match against the names listed in --steps).'))
536 option_parser.add_option(
539 help='Just print the steps without executing.')
540 AddCommonOptions(option_parser)
541 AddDeviceOptions(option_parser)
544 def ProcessPerfTestOptions(options, args, error_func):
545 """Processes all perf test options.
548 options: optparse.Options object.
549 error_func: Function to call with the error message in case of an error.
552 A PerfOptions named tuple which contains all options relevant to
555 # Only one of steps, print_step or single_step must be provided.
556 count = len(filter(None,
557 [options.steps, options.print_step, options.single_step]))
559 error_func('Please specify one of: --steps, --print-step, --single-step.')
561 if options.single_step:
562 single_step = ' '.join(args[2:])
563 return perf_test_options.PerfOptions(
564 options.steps, options.flaky_steps, options.output_json_list,
565 options.print_step, options.no_timeout, options.test_filter,
566 options.dry_run, single_step)
569 def AddPythonTestOptions(option_parser):
570 option_parser.add_option('-s', '--suite', dest='suite_name',
571 help=('Name of the test suite to run'
572 '(use -s help to list them).'))
573 AddCommonOptions(option_parser)
576 def ProcessPythonTestOptions(options, error_func):
577 if options.suite_name not in constants.PYTHON_UNIT_TEST_SUITES:
578 available = ('Available test suites: [%s]' %
579 ', '.join(constants.PYTHON_UNIT_TEST_SUITES.iterkeys()))
580 if options.suite_name == 'help':
583 error_func('"%s" is not a valid suite. %s' %
584 (options.suite_name, available))
587 def _RunGTests(options, devices):
588 """Subcommand of RunTestsCommands which runs gtests."""
589 ProcessGTestOptions(options)
592 for suite_name in options.suite_name:
593 # TODO(gkanwar): Move this into ProcessGTestOptions once we require -s for
595 gtest_options = gtest_test_options.GTestOptions(
597 options.cleanup_test_files,
599 options.run_disabled,
600 options.test_arguments,
602 options.isolate_file_path,
604 runner_factory, tests = gtest_setup.Setup(gtest_options, devices)
606 results, test_exit_code = test_dispatcher.RunTests(
607 tests, runner_factory, devices, shard=True, test_timeout=None,
608 num_retries=options.num_retries)
610 if test_exit_code and exit_code != constants.ERROR_EXIT_CODE:
611 exit_code = test_exit_code
613 report_results.LogFull(
615 test_type='Unit test',
616 test_package=suite_name,
617 flakiness_server=options.flakiness_dashboard_server)
619 if os.path.isdir(constants.ISOLATE_DEPS_DIR):
620 shutil.rmtree(constants.ISOLATE_DEPS_DIR)
625 def _RunLinkerTests(options, devices):
626 """Subcommand of RunTestsCommands which runs linker tests."""
627 runner_factory, tests = linker_setup.Setup(options, devices)
629 results, exit_code = test_dispatcher.RunTests(
630 tests, runner_factory, devices, shard=True, test_timeout=60,
631 num_retries=options.num_retries)
633 report_results.LogFull(
635 test_type='Linker test',
636 test_package='ChromiumLinkerTest')
641 def _RunInstrumentationTests(options, error_func, devices):
642 """Subcommand of RunTestsCommands which runs instrumentation tests."""
643 instrumentation_options = ProcessInstrumentationOptions(options, error_func)
645 if len(devices) > 1 and options.wait_for_debugger:
646 logging.warning('Debugger can not be sharded, using first available device')
647 devices = devices[:1]
649 results = base_test_result.TestRunResults()
652 if options.run_java_tests:
653 runner_factory, tests = instrumentation_setup.Setup(instrumentation_options)
655 test_results, exit_code = test_dispatcher.RunTests(
656 tests, runner_factory, devices, shard=True, test_timeout=None,
657 num_retries=options.num_retries)
659 results.AddTestRunResults(test_results)
661 if options.run_python_tests:
662 runner_factory, tests = host_driven_setup.InstrumentationSetup(
663 options.host_driven_root, options.official_build,
664 instrumentation_options)
667 test_results, test_exit_code = test_dispatcher.RunTests(
668 tests, runner_factory, devices, shard=True, test_timeout=None,
669 num_retries=options.num_retries)
671 results.AddTestRunResults(test_results)
673 # Only allow exit code escalation
674 if test_exit_code and exit_code != constants.ERROR_EXIT_CODE:
675 exit_code = test_exit_code
677 if options.device_flags:
678 options.device_flags = os.path.join(constants.DIR_SOURCE_ROOT,
679 options.device_flags)
681 report_results.LogFull(
683 test_type='Instrumentation',
684 test_package=os.path.basename(options.test_apk),
685 annotation=options.annotations,
686 flakiness_server=options.flakiness_dashboard_server)
691 def _RunUIAutomatorTests(options, error_func, devices):
692 """Subcommand of RunTestsCommands which runs uiautomator tests."""
693 uiautomator_options = ProcessUIAutomatorOptions(options, error_func)
695 runner_factory, tests = uiautomator_setup.Setup(uiautomator_options)
697 results, exit_code = test_dispatcher.RunTests(
698 tests, runner_factory, devices, shard=True, test_timeout=None,
699 num_retries=options.num_retries)
701 report_results.LogFull(
703 test_type='UIAutomator',
704 test_package=os.path.basename(options.test_jar),
705 annotation=options.annotations,
706 flakiness_server=options.flakiness_dashboard_server)
711 def _RunJUnitTests(options, error_func):
712 """Subcommand of RunTestsCommand which runs junit tests."""
713 junit_options = ProcessJUnitTestOptions(options, error_func)
714 runner_factory, tests = junit_setup.Setup(junit_options)
715 _, exit_code = junit_dispatcher.RunTests(tests, runner_factory)
720 def _RunMonkeyTests(options, error_func, devices):
721 """Subcommand of RunTestsCommands which runs monkey tests."""
722 monkey_options = ProcessMonkeyTestOptions(options, error_func)
724 runner_factory, tests = monkey_setup.Setup(monkey_options)
726 results, exit_code = test_dispatcher.RunTests(
727 tests, runner_factory, devices, shard=False, test_timeout=None,
728 num_retries=options.num_retries)
730 report_results.LogFull(
733 test_package='Monkey')
738 def _RunPerfTests(options, args, error_func):
739 """Subcommand of RunTestsCommands which runs perf tests."""
740 perf_options = ProcessPerfTestOptions(options, args, error_func)
742 # Just save a simple json with a list of test names.
743 if perf_options.output_json_list:
744 return perf_test_runner.OutputJsonList(
745 perf_options.steps, perf_options.output_json_list)
747 # Just print the results from a single previously executed step.
748 if perf_options.print_step:
749 return perf_test_runner.PrintTestOutput(perf_options.print_step)
751 runner_factory, tests, devices = perf_setup.Setup(perf_options)
753 # shard=False means that each device will get the full list of tests
754 # and then each one will decide their own affinity.
755 # shard=True means each device will pop the next test available from a queue,
756 # which increases throughput but have no affinity.
757 results, _ = test_dispatcher.RunTests(
758 tests, runner_factory, devices, shard=False, test_timeout=None,
759 num_retries=options.num_retries)
761 report_results.LogFull(
766 if perf_options.single_step:
767 return perf_test_runner.PrintTestOutput('single_step')
769 perf_test_runner.PrintSummary(tests)
771 # Always return 0 on the sharding stage. Individual tests exit_code
772 # will be returned on the print_step stage.
776 def _RunPythonTests(options, error_func):
777 """Subcommand of RunTestsCommand which runs python unit tests."""
778 ProcessPythonTestOptions(options, error_func)
780 suite_vars = constants.PYTHON_UNIT_TEST_SUITES[options.suite_name]
781 suite_path = suite_vars['path']
782 suite_test_modules = suite_vars['test_modules']
784 sys.path = [suite_path] + sys.path
786 suite = unittest.TestSuite()
787 suite.addTests(unittest.defaultTestLoader.loadTestsFromName(m)
788 for m in suite_test_modules)
789 runner = unittest.TextTestRunner(verbosity=1+options.verbose_count)
790 return 0 if runner.run(suite).wasSuccessful() else 1
792 sys.path = sys.path[1:]
795 def _GetAttachedDevices(test_device=None):
796 """Get all attached devices.
799 test_device: Name of a specific device to use.
802 A list of attached devices.
804 attached_devices = []
806 attached_devices = android_commands.GetAttachedDevices()
808 assert test_device in attached_devices, (
809 'Did not find device %s among attached device. Attached devices: %s'
810 % (test_device, ', '.join(attached_devices)))
811 attached_devices = [test_device]
813 assert attached_devices, 'No devices attached.'
815 return sorted(attached_devices)
818 def RunTestsCommand(command, options, args, option_parser):
819 """Checks test type and dispatches to the appropriate function.
822 command: String indicating the command that was received to trigger
824 options: optparse options dictionary.
825 args: List of extra args from optparse.
826 option_parser: optparse.OptionParser object.
829 Integer indicated exit code.
832 Exception: Unknown command name passed in, or an exception from an
833 individual test runner.
836 # Check for extra arguments
837 if len(args) > 2 and command != 'perf':
838 option_parser.error('Unrecognized arguments: %s' % (' '.join(args[2:])))
839 return constants.ERROR_EXIT_CODE
840 if command == 'perf':
841 if ((options.single_step and len(args) <= 2) or
842 (not options.single_step and len(args) > 2)):
843 option_parser.error('Unrecognized arguments: %s' % (' '.join(args)))
844 return constants.ERROR_EXIT_CODE
846 ProcessCommonOptions(options, option_parser.error)
848 if options.enable_platform_mode:
849 return RunTestsInPlatformMode(command, options, option_parser)
851 if command in constants.LOCAL_MACHINE_TESTS:
854 devices = _GetAttachedDevices(options.test_device)
856 forwarder.Forwarder.RemoveHostLog()
857 if not ports.ResetTestServerPortAllocation():
858 raise Exception('Failed to reset test server port.')
860 if command == 'gtest':
861 return _RunGTests(options, devices)
862 elif command == 'linker':
863 return _RunLinkerTests(options, devices)
864 elif command == 'instrumentation':
865 return _RunInstrumentationTests(options, option_parser.error, devices)
866 elif command == 'uiautomator':
867 return _RunUIAutomatorTests(options, option_parser.error, devices)
868 elif command == 'junit':
869 return _RunJUnitTests(options, option_parser.error)
870 elif command == 'monkey':
871 return _RunMonkeyTests(options, option_parser.error, devices)
872 elif command == 'perf':
873 return _RunPerfTests(options, args, option_parser.error)
874 elif command == 'python':
875 return _RunPythonTests(options, option_parser.error)
877 raise Exception('Unknown test type.')
880 _SUPPORTED_IN_PLATFORM_MODE = [
881 # TODO(jbudorick): Add support for more test types.
886 def RunTestsInPlatformMode(command, options, option_parser):
888 if command not in _SUPPORTED_IN_PLATFORM_MODE:
889 option_parser.error('%s is not yet supported in platform mode' % command)
891 with environment_factory.CreateEnvironment(
892 command, options, option_parser.error) as env:
893 with test_instance_factory.CreateTestInstance(
894 command, options, option_parser.error) as test:
895 with test_run_factory.CreateTestRun(
896 options, env, test, option_parser.error) as test_run:
897 results = test_run.RunTests()
899 report_results.LogFull(
901 test_type=test.TestType(),
902 test_package=test_run.TestPackage(),
903 annotation=options.annotations,
904 flakiness_server=options.flakiness_dashboard_server)
909 def HelpCommand(command, _options, args, option_parser):
910 """Display help for a certain command, or overall help.
913 command: String indicating the command that was received to trigger
915 options: optparse options dictionary. unused.
916 args: List of extra args from optparse.
917 option_parser: optparse.OptionParser object.
920 Integer indicated exit code.
922 # If we don't have any args, display overall help
924 option_parser.print_help()
926 # If we have too many args, print an error
928 option_parser.error('Unrecognized arguments: %s' % (' '.join(args[3:])))
929 return constants.ERROR_EXIT_CODE
933 if command not in VALID_COMMANDS:
934 option_parser.error('Unrecognized command.')
936 # Treat the help command as a special case. We don't care about showing a
937 # specific help page for itself.
938 if command == 'help':
939 option_parser.print_help()
942 VALID_COMMANDS[command].add_options_func(option_parser)
943 option_parser.usage = '%prog ' + command + ' [options]'
944 option_parser.commands_dict = {}
945 option_parser.print_help()
950 # Define a named tuple for the values in the VALID_COMMANDS dictionary so the
951 # syntax is a bit prettier. The tuple is two functions: (add options, run
953 CommandFunctionTuple = collections.namedtuple(
954 'CommandFunctionTuple', ['add_options_func', 'run_command_func'])
956 'gtest': CommandFunctionTuple(AddGTestOptions, RunTestsCommand),
957 'instrumentation': CommandFunctionTuple(
958 AddInstrumentationTestOptions, RunTestsCommand),
959 'uiautomator': CommandFunctionTuple(
960 AddUIAutomatorTestOptions, RunTestsCommand),
961 'junit': CommandFunctionTuple(
962 AddJUnitTestOptions, RunTestsCommand),
963 'monkey': CommandFunctionTuple(
964 AddMonkeyTestOptions, RunTestsCommand),
965 'perf': CommandFunctionTuple(
966 AddPerfTestOptions, RunTestsCommand),
967 'python': CommandFunctionTuple(
968 AddPythonTestOptions, RunTestsCommand),
969 'linker': CommandFunctionTuple(
970 AddLinkerTestOptions, RunTestsCommand),
971 'help': CommandFunctionTuple(lambda option_parser: None, HelpCommand)
975 def DumpThreadStacks(_signal, _frame):
976 for thread in threading.enumerate():
977 reraiser_thread.LogThreadStack(thread)
981 signal.signal(signal.SIGUSR1, DumpThreadStacks)
982 option_parser = command_option_parser.CommandOptionParser(
983 commands_dict=VALID_COMMANDS)
984 return command_option_parser.ParseAndExecute(option_parser)
987 if __name__ == '__main__':