1 # Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 # Use of this source code is governed by a BSD-style license that can be
3 # found in the LICENSE file.
5 """Class for running instrumentation tests on a single device."""
13 from pylib import constants
14 from pylib import flag_changer
15 from pylib import valgrind_tools
16 from pylib.base import base_test_result
17 from pylib.base import base_test_runner
18 from pylib.device import device_errors
19 from pylib.instrumentation import json_perf_parser
20 from pylib.instrumentation import test_result
22 sys.path.append(os.path.join(constants.DIR_SOURCE_ROOT, 'build', 'util', 'lib',
24 import perf_tests_results_helper # pylint: disable=F0401
27 _PERF_TEST_ANNOTATION = 'PerfTest'
30 def _GetDataFilesForTestSuite(suite_basename):
31 """Returns a list of data files/dirs needed by the test suite.
34 suite_basename: The test suite basename for which to return file paths.
37 A list of test file and directory paths.
40 if suite_basename in ['ChromeTest', 'ContentShellTest']:
42 'net/data/ssl/certificates/',
47 class TestRunner(base_test_runner.BaseTestRunner):
48 """Responsible for running a series of tests connected to a single device."""
50 _DEVICE_DATA_DIR = 'chrome/test/data'
51 _DEVICE_COVERAGE_DIR = 'chrome/test/coverage'
52 _HOSTMACHINE_PERF_OUTPUT_FILE = '/tmp/chrome-profile'
53 _DEVICE_PERF_OUTPUT_SEARCH_PREFIX = (constants.DEVICE_PERF_OUTPUT_DIR +
55 _DEVICE_HAS_TEST_FILES = {}
57 def __init__(self, test_options, device, shard_index, test_pkg,
58 additional_flags=None):
59 """Create a new TestRunner.
62 test_options: An InstrumentationOptions object.
63 device: Attached android device.
64 shard_index: Shard index.
65 test_pkg: A TestPackage object.
66 additional_flags: A list of additional flags to add to the command line.
68 super(TestRunner, self).__init__(device, test_options.tool,
69 test_options.push_deps,
70 test_options.cleanup_test_files)
71 self._lighttp_port = constants.LIGHTTPD_RANDOM_PORT_FIRST + shard_index
73 self.coverage_device_file = None
74 self.coverage_dir = test_options.coverage_dir
75 self.coverage_host_file = None
76 self.options = test_options
77 self.test_pkg = test_pkg
78 # Use the correct command line file for the package under test.
79 cmdline_file = [a.cmdline_file for a in constants.PACKAGE_INFO.itervalues()
80 if a.test_package == self.test_pkg.GetPackageName()]
81 assert len(cmdline_file) < 2, 'Multiple packages have the same test package'
82 if len(cmdline_file) and cmdline_file[0]:
83 self.flags = flag_changer.FlagChanger(self.device, cmdline_file[0])
85 self.flags.AddFlags(additional_flags)
90 def InstallTestPackage(self):
91 self.test_pkg.Install(self.device)
94 def PushDataDeps(self):
95 # TODO(frankf): Implement a general approach for copying/installing
96 # once across test runners.
97 if TestRunner._DEVICE_HAS_TEST_FILES.get(self.device, False):
98 logging.warning('Already copied test files to device %s, skipping.',
102 test_data = _GetDataFilesForTestSuite(self.test_pkg.GetApkName())
104 # Make sure SD card is ready.
105 self.device.WaitUntilFullyBooted(timeout=20)
107 self.device.PushChangedFiles(
108 os.path.join(constants.DIR_SOURCE_ROOT, p),
109 os.path.join(self.device.GetExternalStoragePath(), p))
111 # TODO(frankf): Specify test data in this file as opposed to passing
113 for dest_host_pair in self.options.test_data:
114 dst_src = dest_host_pair.split(':', 1)
115 dst_layer = dst_src[0]
116 host_src = dst_src[1]
117 host_test_files_path = os.path.join(constants.DIR_SOURCE_ROOT,
119 if os.path.exists(host_test_files_path):
120 self.device.PushChangedFiles(
121 host_test_files_path,
123 self.device.GetExternalStoragePath(),
124 TestRunner._DEVICE_DATA_DIR,
126 self.tool.CopyFiles()
127 TestRunner._DEVICE_HAS_TEST_FILES[str(self.device)] = True
129 def _GetInstrumentationArgs(self):
131 if self.options.wait_for_debugger:
132 ret['debug'] = 'true'
133 if self.coverage_dir:
134 ret['coverage'] = 'true'
135 ret['coverageFile'] = self.coverage_device_file
139 def _TakeScreenshot(self, test):
140 """Takes a screenshot from the device."""
141 screenshot_name = os.path.join(constants.SCREENSHOTS_DIR, '%s.png' % test)
142 logging.info('Taking screenshot named %s', screenshot_name)
143 self.device.TakeScreenshot(screenshot_name)
146 """Sets up the test harness and device before all tests are run."""
147 super(TestRunner, self).SetUp()
148 if not self.device.HasRoot():
149 logging.warning('Unable to enable java asserts for %s, non rooted device',
152 if self.device.SetJavaAsserts(True):
153 # TODO(jbudorick) How to best do shell restart after the
154 # android_commands refactor?
155 self.device.RunShellCommand('stop')
156 self.device.RunShellCommand('start')
158 # We give different default value to launch HTTP server based on shard index
159 # because it may have race condition when multiple processes are trying to
160 # launch lighttpd with same port at same time.
161 self.LaunchTestHttpServer(
162 os.path.join(constants.DIR_SOURCE_ROOT), self._lighttp_port)
164 self.flags.AddFlags(['--disable-fre', '--enable-test-intents'])
165 if self.options.device_flags:
166 with open(self.options.device_flags) as device_flags_file:
167 stripped_flags = (l.strip() for l in device_flags_file)
168 self.flags.AddFlags([flag for flag in stripped_flags if flag])
171 """Cleans up the test harness and saves outstanding data from test run."""
174 super(TestRunner, self).TearDown()
176 def TestSetup(self, test):
177 """Sets up the test harness for running a particular test.
180 test: The name of the test that will be run.
182 self.SetupPerfMonitoringIfNeeded(test)
183 self._SetupIndividualTestTimeoutScale(test)
184 self.tool.SetupEnvironment()
186 # Make sure the forwarder is still running.
187 self._RestartHttpServerForwarderIfNecessary()
189 if self.coverage_dir:
190 coverage_basename = '%s.ec' % test
191 self.coverage_device_file = '%s/%s/%s' % (
192 self.device.GetExternalStoragePath(),
193 TestRunner._DEVICE_COVERAGE_DIR, coverage_basename)
194 self.coverage_host_file = os.path.join(
195 self.coverage_dir, coverage_basename)
197 def _IsPerfTest(self, test):
198 """Determines whether a test is a performance test.
201 test: The name of the test to be checked.
204 Whether the test is annotated as a performance test.
206 return _PERF_TEST_ANNOTATION in self.test_pkg.GetTestAnnotations(test)
208 def SetupPerfMonitoringIfNeeded(self, test):
209 """Sets up performance monitoring if the specified test requires it.
212 test: The name of the test to be run.
214 if not self._IsPerfTest(test):
216 self.device.old_interface.Adb().SendCommand(
217 'shell rm ' + TestRunner._DEVICE_PERF_OUTPUT_SEARCH_PREFIX)
218 self.device.old_interface.StartMonitoringLogcat()
220 def TestTeardown(self, test, result):
221 """Cleans up the test harness after running a particular test.
223 Depending on the options of this TestRunner this might handle performance
224 tracking. This method will only be called if the test passed.
227 test: The name of the test that was just run.
228 result: result for this test.
231 self.tool.CleanUpEnvironment()
233 # The logic below relies on the test passing.
234 if not result or not result.DidRunPass():
237 self.TearDownPerfMonitoring(test)
239 if self.coverage_dir:
240 self.device.PullFile(
241 self.coverage_device_file, self.coverage_host_file)
242 self.device.RunShellCommand(
243 'rm -f %s' % self.coverage_device_file)
245 def TearDownPerfMonitoring(self, test):
246 """Cleans up performance monitoring if the specified test required it.
249 test: The name of the test that was just run.
251 Exception: if there's anything wrong with the perf data.
253 if not self._IsPerfTest(test):
255 raw_test_name = test.split('#')[1]
257 # Wait and grab annotation data so we can figure out which traces to parse
258 regex = self.device.old_interface.WaitForLogMatch(
259 re.compile('\*\*PERFANNOTATION\(' + raw_test_name + '\)\:(.*)'), None)
261 # If the test is set to run on a specific device type only (IE: only
262 # tablet or phone) and it is being run on the wrong device, the test
263 # just quits and does not do anything. The java test harness will still
264 # print the appropriate annotation for us, but will add --NORUN-- for
265 # us so we know to ignore the results.
266 # The --NORUN-- tag is managed by MainActivityTestBase.java
267 if regex.group(1) != '--NORUN--':
269 # Obtain the relevant perf data. The data is dumped to a
270 # JSON formatted file.
271 json_string = self.device.ReadFile(
272 '/data/data/com.google.android.apps.chrome/files/PerfTestData.txt',
276 json_string = '\n'.join(json_string)
278 raise Exception('Perf file does not exist or is empty')
280 if self.options.save_perf_json:
281 json_local_file = '/tmp/chromium-android-perf-json-' + raw_test_name
282 with open(json_local_file, 'w') as f:
284 logging.info('Saving Perf UI JSON from test ' +
285 test + ' to ' + json_local_file)
287 raw_perf_data = regex.group(1).split(';')
289 for raw_perf_set in raw_perf_data:
291 perf_set = raw_perf_set.split(',')
292 if len(perf_set) != 3:
293 raise Exception('Unexpected number of tokens in perf annotation '
294 'string: ' + raw_perf_set)
296 # Process the performance data
297 result = json_perf_parser.GetAverageRunInfoFromJSONString(json_string,
299 perf_tests_results_helper.PrintPerfResult(perf_set[1], perf_set[2],
303 def _SetupIndividualTestTimeoutScale(self, test):
304 timeout_scale = self._GetIndividualTestTimeoutScale(test)
305 valgrind_tools.SetChromeTimeoutScale(self.device, timeout_scale)
307 def _GetIndividualTestTimeoutScale(self, test):
308 """Returns the timeout scale for the given |test|."""
309 annotations = self.test_pkg.GetTestAnnotations(test)
311 if 'TimeoutScale' in annotations:
312 for annotation in annotations:
313 scale_match = re.match('TimeoutScale:([0-9]+)', annotation)
315 timeout_scale = int(scale_match.group(1))
316 if self.options.wait_for_debugger:
320 def _GetIndividualTestTimeoutSecs(self, test):
321 """Returns the timeout in seconds for the given |test|."""
322 annotations = self.test_pkg.GetTestAnnotations(test)
323 if 'Manual' in annotations:
325 if 'IntegrationTest' in annotations:
327 if 'External' in annotations:
329 if 'EnormousTest' in annotations:
331 if 'LargeTest' in annotations or _PERF_TEST_ANNOTATION in annotations:
333 if 'MediumTest' in annotations:
335 if 'SmallTest' in annotations:
338 logging.warn(("Test size not found in annotations for test '{0}', using " +
339 "1 minute for timeout.").format(test))
342 def _RunTest(self, test, timeout):
343 """Runs a single instrumentation test.
346 test: Test class/method.
347 timeout: Timeout time in seconds.
350 The raw output of am instrument as a list of lines.
352 # Build the 'am instrument' command
353 instrumentation_path = (
354 '%s/%s' % (self.test_pkg.GetPackageName(), self.options.test_runner))
356 cmd = ['am', 'instrument', '-r']
357 for k, v in self._GetInstrumentationArgs().iteritems():
358 cmd.extend(['-e', k, "'%s'" % v])
359 cmd.extend(['-e', 'class', "'%s'" % test])
360 cmd.extend(['-w', instrumentation_path])
361 return self.device.RunShellCommand(cmd, timeout=timeout, retries=0)
364 def _ParseAmInstrumentRawOutput(raw_output):
365 """Parses the output of an |am instrument -r| call.
368 raw_output: the output of an |am instrument -r| call as a list of lines
370 A 3-tuple containing:
371 - the instrumentation code as an integer
372 - the instrumentation result as a list of lines
373 - the instrumentation statuses received as a list of 2-tuples
375 - the status code as an integer
376 - the bundle dump as a dict mapping string keys to a list of
377 strings, one for each line.
379 INSTR_STATUS = 'INSTRUMENTATION_STATUS: '
380 INSTR_STATUS_CODE = 'INSTRUMENTATION_STATUS_CODE: '
381 INSTR_RESULT = 'INSTRUMENTATION_RESULT: '
382 INSTR_CODE = 'INSTRUMENTATION_CODE: '
389 for line in raw_output:
390 if line.startswith(INSTR_STATUS):
391 instr_var = line[len(INSTR_STATUS):]
393 k, v = instr_var.split('=', 1)
398 logging.debug('Unknown "%s" line: %s' % (INSTR_STATUS, line))
400 elif line.startswith(INSTR_STATUS_CODE):
401 instr_status = line[len(INSTR_STATUS_CODE):]
402 instr_statuses.append((int(instr_status), bundle))
404 last = INSTR_STATUS_CODE
406 elif line.startswith(INSTR_RESULT):
407 instr_result.append(line[len(INSTR_RESULT):])
410 elif line.startswith(INSTR_CODE):
411 instr_code = int(line[len(INSTR_CODE):])
414 elif last == INSTR_STATUS:
415 bundle[last_key].append(line)
417 elif last == INSTR_RESULT:
418 instr_result.append(line)
420 return (instr_code, instr_result, instr_statuses)
422 def _GenerateTestResult(self, test, instr_statuses, start_ms, duration_ms):
423 """Generate the result of |test| from |instr_statuses|.
426 instr_statuses: A list of 2-tuples containing:
427 - the status code as an integer
428 - the bundle dump as a dict mapping string keys to string values
429 Note that this is the same as the third item in the 3-tuple returned by
430 |_ParseAmInstrumentRawOutput|.
431 start_ms: The start time of the test in milliseconds.
432 duration_ms: The duration of the test in milliseconds.
434 An InstrumentationTestResult object.
436 INSTR_STATUS_CODE_START = 1
437 INSTR_STATUS_CODE_OK = 0
438 INSTR_STATUS_CODE_ERROR = -1
439 INSTR_STATUS_CODE_FAIL = -2
442 result_type = base_test_result.ResultType.UNKNOWN
444 for status_code, bundle in instr_statuses:
445 if status_code == INSTR_STATUS_CODE_START:
447 elif status_code == INSTR_STATUS_CODE_OK:
448 bundle_test = '%s#%s' % (
449 ''.join(bundle.get('class', [''])),
450 ''.join(bundle.get('test', [''])))
451 skipped = ''.join(bundle.get('test_skipped', ['']))
453 if (test == bundle_test and
454 result_type == base_test_result.ResultType.UNKNOWN):
455 result_type = base_test_result.ResultType.PASS
456 elif skipped.lower() in ('true', '1', 'yes'):
457 result_type = base_test_result.ResultType.SKIP
458 logging.info('Skipped ' + test)
460 if status_code not in (INSTR_STATUS_CODE_ERROR,
461 INSTR_STATUS_CODE_FAIL):
462 logging.info('Unrecognized status code %d. Handling as an error.',
464 result_type = base_test_result.ResultType.FAIL
465 if 'stack' in bundle:
466 log = '\n'.join(bundle['stack'])
467 # Dismiss any error dialogs. Limit the number in case we have an error
468 # loop or we are failing to dismiss.
470 package = self.device.old_interface.DismissCrashDialogIfNeeded()
473 # Assume test package convention of ".test" suffix
474 if package in self.test_pkg.GetPackageName():
475 result_type = base_test_result.ResultType.CRASH
478 return test_result.InstrumentationTestResult(
479 test, result_type, start_ms, duration_ms, log=log)
482 def RunTest(self, test):
483 results = base_test_result.TestRunResults()
484 timeout = (self._GetIndividualTestTimeoutSecs(test) *
485 self._GetIndividualTestTimeoutScale(test) *
486 self.tool.GetTimeoutScale())
493 time_ms = lambda: int(time.time() * 1000)
495 raw_output = self._RunTest(test, timeout)
496 duration_ms = time_ms() - start_ms
498 # Parse the test output
499 _, _, statuses = self._ParseAmInstrumentRawOutput(raw_output)
500 result = self._GenerateTestResult(test, statuses, start_ms, duration_ms)
501 results.AddResult(result)
502 except device_errors.CommandTimeoutError as e:
503 results.AddResult(test_result.InstrumentationTestResult(
504 test, base_test_result.ResultType.TIMEOUT, start_ms, duration_ms,
505 log=str(e) or 'No information'))
506 except device_errors.DeviceUnreachableError as e:
507 results.AddResult(test_result.InstrumentationTestResult(
508 test, base_test_result.ResultType.CRASH, start_ms, duration_ms,
509 log=str(e) or 'No information'))
510 self.TestTeardown(test, results)
511 return (results, None if results.DidRunPass() else test)