Update To 11.40.268.0
[platform/framework/web/crosswalk.git] / src / build / android / pylib / gtest / test_runner.py
1 # Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 # Use of this source code is governed by a BSD-style license that can be
3 # found in the LICENSE file.
4
5 import logging
6 import os
7 import re
8
9 from pylib import pexpect
10 from pylib.base import base_test_result
11 from pylib.base import base_test_runner
12 from pylib.device import device_errors
13 from pylib.perf import perf_control
14
15
16 def _TestSuiteRequiresMockTestServer(suite_name):
17   """Returns True if the test suite requires mock test server."""
18   tests_require_net_test_server = ['unit_tests', 'net_unittests',
19                                    'content_unittests',
20                                    'content_browsertests']
21   return (suite_name in
22           tests_require_net_test_server)
23
24 def _TestSuiteRequiresHighPerfMode(suite_name):
25   """Returns True if the test suite requires high performance mode."""
26   return 'perftests' in suite_name
27
28 class TestRunner(base_test_runner.BaseTestRunner):
29   def __init__(self, test_options, device, test_package):
30     """Single test suite attached to a single device.
31
32     Args:
33       test_options: A GTestOptions object.
34       device: Device to run the tests.
35       test_package: An instance of TestPackage class.
36     """
37
38     super(TestRunner, self).__init__(device, test_options.tool,
39                                      test_options.cleanup_test_files)
40
41     self.test_package = test_package
42     self.test_package.tool = self.tool
43     self._test_arguments = test_options.test_arguments
44
45     timeout = test_options.timeout
46     if timeout == 0:
47       timeout = 60
48     # On a VM (e.g. chromium buildbots), this timeout is way too small.
49     if os.environ.get('BUILDBOT_SLAVENAME'):
50       timeout = timeout * 2
51
52     self._timeout = timeout * self.tool.GetTimeoutScale()
53     if _TestSuiteRequiresHighPerfMode(self.test_package.suite_name):
54       self._perf_controller = perf_control.PerfControl(self.device)
55
56   #override
57   def InstallTestPackage(self):
58     self.test_package.Install(self.device)
59
60   def _ParseTestOutput(self, p):
61     """Process the test output.
62
63     Args:
64       p: An instance of pexpect spawn class.
65
66     Returns:
67       A TestRunResults object.
68     """
69     results = base_test_result.TestRunResults()
70
71     # Test case statuses.
72     re_run = re.compile('\[ RUN      \] ?(.*)\r\n')
73     re_fail = re.compile('\[  FAILED  \] ?(.*)\r\n')
74     re_ok = re.compile('\[       OK \] ?(.*?) .*\r\n')
75
76     # Test run statuses.
77     re_passed = re.compile('\[  PASSED  \] ?(.*)\r\n')
78     re_runner_fail = re.compile('\[ RUNNER_FAILED \] ?(.*)\r\n')
79     # Signal handlers are installed before starting tests
80     # to output the CRASHED marker when a crash happens.
81     re_crash = re.compile('\[ CRASHED      \](.*)\r\n')
82
83     log = ''
84     try:
85       while True:
86         full_test_name = None
87         found = p.expect([re_run, re_passed, re_runner_fail],
88                          timeout=self._timeout)
89         if found == 1:  # re_passed
90           break
91         elif found == 2:  # re_runner_fail
92           break
93         else:  # re_run
94           full_test_name = p.match.group(1).replace('\r', '')
95           found = p.expect([re_ok, re_fail, re_crash], timeout=self._timeout)
96           log = p.before.replace('\r', '')
97           if found == 0:  # re_ok
98             if full_test_name == p.match.group(1).replace('\r', ''):
99               results.AddResult(base_test_result.BaseTestResult(
100                   full_test_name, base_test_result.ResultType.PASS,
101                   log=log))
102           elif found == 2:  # re_crash
103             results.AddResult(base_test_result.BaseTestResult(
104                 full_test_name, base_test_result.ResultType.CRASH,
105                 log=log))
106             break
107           else:  # re_fail
108             results.AddResult(base_test_result.BaseTestResult(
109                 full_test_name, base_test_result.ResultType.FAIL, log=log))
110     except pexpect.EOF:
111       logging.error('Test terminated - EOF')
112       # We're here because either the device went offline, or the test harness
113       # crashed without outputting the CRASHED marker (crbug.com/175538).
114       if not self.device.IsOnline():
115         raise device_errors.DeviceUnreachableError(
116             'Device %s went offline.' % str(self.device))
117       if full_test_name:
118         results.AddResult(base_test_result.BaseTestResult(
119             full_test_name, base_test_result.ResultType.CRASH,
120             log=p.before.replace('\r', '')))
121     except pexpect.TIMEOUT:
122       logging.error('Test terminated after %d second timeout.',
123                     self._timeout)
124       if full_test_name:
125         results.AddResult(base_test_result.BaseTestResult(
126             full_test_name, base_test_result.ResultType.TIMEOUT,
127             log=p.before.replace('\r', '')))
128     finally:
129       p.close()
130
131     ret_code = self.test_package.GetGTestReturnCode(self.device)
132     if ret_code:
133       logging.critical(
134           'gtest exit code: %d\npexpect.before: %s\npexpect.after: %s',
135           ret_code, p.before, p.after)
136
137     return results
138
139   #override
140   def RunTest(self, test):
141     test_results = base_test_result.TestRunResults()
142     if not test:
143       return test_results, None
144
145     try:
146       self.test_package.ClearApplicationState(self.device)
147       self.test_package.CreateCommandLineFileOnDevice(
148           self.device, test, self._test_arguments)
149       test_results = self._ParseTestOutput(
150           self.test_package.SpawnTestProcess(self.device))
151     finally:
152       self.CleanupSpawningServerState()
153     # Calculate unknown test results.
154     all_tests = set(test.split(':'))
155     all_tests_ran = set([t.GetName() for t in test_results.GetAll()])
156     unknown_tests = all_tests - all_tests_ran
157     test_results.AddResults(
158         [base_test_result.BaseTestResult(t, base_test_result.ResultType.UNKNOWN)
159          for t in unknown_tests])
160     retry = ':'.join([t.GetName() for t in test_results.GetNotPass()])
161     return test_results, retry
162
163   #override
164   def SetUp(self):
165     """Sets up necessary test enviroment for the test suite."""
166     super(TestRunner, self).SetUp()
167     if _TestSuiteRequiresMockTestServer(self.test_package.suite_name):
168       self.LaunchChromeTestServerSpawner()
169     if _TestSuiteRequiresHighPerfMode(self.test_package.suite_name):
170       self._perf_controller.SetHighPerfMode()
171     self.tool.SetupEnvironment()
172
173   #override
174   def TearDown(self):
175     """Cleans up the test enviroment for the test suite."""
176     if _TestSuiteRequiresHighPerfMode(self.test_package.suite_name):
177       self._perf_controller.SetDefaultPerfMode()
178     self.test_package.ClearApplicationState(self.device)
179     self.tool.CleanUpEnvironment()
180     super(TestRunner, self).TearDown()