+2012-04-12 Ryosuke Niwa <rniwa@webkit.org>
+
+ Extract PerfTest class from PerfTestRunner
+ https://bugs.webkit.org/show_bug.cgi?id=83847
+
+ Reviewed by Hajime Morita.
+
+ Extracted PerfTest and ChromiumStylePerfTest from PerfTestRunner. This class abstracts a test
+ that was previously represented by a tuple.
+
+ Also moved the logic to determine whether a given test is chromium style or not from run() to
+ _collect_tests(). And moved the output parsing algorithms for parser style and chromium style
+ tests from PerfTestRunner to PerfTest and ChromiumStylePerfTest respectively so that we may
+ add new types of tests more easily.
+
+ * Scripts/webkitpy/performance_tests/perftest.py: Added.
+ (PerfTest):
+ (PerfTest.__init__):
+ (PerfTest.test_name):
+ (PerfTest.dirname):
+ (PerfTest.path_or_url):
+ (PerfTest._should_ignore_line_in_parser_test_result):
+ (PerfTest.parse_output):
+ (ChromiumStylePerfTest):
+ (ChromiumStylePerfTest.__init__):
+ (ChromiumStylePerfTest.parse_output):
+ * Scripts/webkitpy/performance_tests/perftestsrunner.py:
+ (PerfTestsRunner._collect_tests):
+ (PerfTestsRunner.run):
+ (PerfTestsRunner._run_tests_set):
+ (PerfTestsRunner._run_single_test):
+ * Scripts/webkitpy/performance_tests/perftestsrunner_unittest.py:
+ (run_test):
+ (_tests_for_runner):
+ (test_run_test_set_with_json_output):
+ (test_run_test_set_with_json_source):
+ (test_run_test_set_with_multiple_repositories):
+ (_collect_tests_and_sort_test_name):
+ (test_collect_tests):
+ (test_collect_tests_with_skipped_list):
+
2012-04-12 Balazs Kelemen <kbalazs@webkit.org>
[Qt] Fix WebKit1 build with V8
--- /dev/null
+#!/usr/bin/env python
+# Copyright (C) 2012 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+import re
+
+
+class PerfTest(object):
+ def __init__(self, test_name, dirname, path_or_url):
+ self._test_name = test_name
+ self._dirname = dirname
+ self._path_or_url = path_or_url
+
+ def test_name(self):
+ return self._test_name
+
+ def dirname(self):
+ return self._dirname
+
+ def path_or_url(self):
+ return self._path_or_url
+
+ _lines_to_ignore_in_parser_result = [
+ re.compile(r'^Running \d+ times$'),
+ re.compile(r'^Ignoring warm-up '),
+ re.compile(r'^Info:'),
+ re.compile(r'^\d+(.\d+)?$'),
+ # Following are for handle existing test like Dromaeo
+ re.compile(re.escape("""main frame - has 1 onunload handler(s)""")),
+ re.compile(re.escape("""frame "<!--framePath //<!--frame0-->-->" - has 1 onunload handler(s)""")),
+ re.compile(re.escape("""frame "<!--framePath //<!--frame0-->/<!--frame0-->-->" - has 1 onunload handler(s)"""))]
+
+ def _should_ignore_line_in_parser_test_result(self, line):
+ if not line:
+ return True
+ for regex in self._lines_to_ignore_in_parser_result:
+ if regex.search(line):
+ return True
+ return False
+
+ def parse_output(self, output, printer, buildbot_output):
+ got_a_result = False
+ test_failed = False
+ results = {}
+ keys = ['avg', 'median', 'stdev', 'min', 'max']
+ score_regex = re.compile(r'^(?P<key>' + r'|'.join(keys) + r')\s+(?P<value>[0-9\.]+)\s*(?P<unit>.*)')
+ unit = "ms"
+
+ for line in re.split('\n', output.text):
+ score = score_regex.match(line)
+ if score:
+ results[score.group('key')] = float(score.group('value'))
+ if score.group('unit'):
+ unit = score.group('unit')
+ continue
+
+ if not self._should_ignore_line_in_parser_test_result(line):
+ test_failed = True
+ printer.write("%s" % line)
+
+ if test_failed or set(keys) != set(results.keys()):
+ return None
+
+ results['unit'] = unit
+
+ test_name = re.sub(r'\.\w+$', '', self._test_name)
+ buildbot_output.write('RESULT %s= %s %s\n' % (test_name.replace('/', ': '), results['avg'], unit))
+ buildbot_output.write(', '.join(['%s= %s %s' % (key, results[key], unit) for key in keys[1:]]) + '\n')
+
+ return {test_name: results}
+
+
+class ChromiumStylePerfTest(PerfTest):
+ _chromium_style_result_regex = re.compile(r'^RESULT\s+(?P<name>[^=]+)\s*=\s+(?P<value>\d+(\.\d+)?)\s*(?P<unit>\w+)$')
+
+ def __init__(self, test_name, dirname, path_or_url):
+ super(ChromiumStylePerfTest, self).__init__(test_name, dirname, path_or_url)
+
+ def parse_output(self, output, printer, buildbot_output):
+ test_failed = False
+ got_a_result = False
+ results = {}
+ for line in re.split('\n', output.text):
+ resultLine = ChromiumStylePerfTest._chromium_style_result_regex.match(line)
+ if resultLine:
+ # FIXME: Store the unit
+ results[self.test_name() + ':' + resultLine.group('name').replace(' ', '')] = float(resultLine.group('value'))
+ buildbot_output.write("%s\n" % line)
+ elif not len(line) == 0:
+ test_failed = True
+ printer.write("%s" % line)
+ return results if results and not test_failed else None
from webkitpy.common.net.file_uploader import FileUploader
from webkitpy.layout_tests.port.driver import DriverInput
from webkitpy.layout_tests.views import printing
+from webkitpy.performance_tests.perftest import ChromiumStylePerfTest
+from webkitpy.performance_tests.perftest import PerfTest
_log = logging.getLogger(__name__)
def _is_test_file(filesystem, dirname, filename):
return filename.endswith('.html')
+ filesystem = self._host.filesystem
+
paths = []
for arg in self._args:
paths.append(arg)
- relpath = self._host.filesystem.relpath(arg, self._base_path)
+ relpath = filesystem.relpath(arg, self._base_path)
if relpath:
paths.append(relpath)
skipped_directories = set(['.svn', 'resources'])
- test_files = find_files.find(self._host.filesystem, self._base_path, paths, skipped_directories, _is_test_file)
+ test_files = find_files.find(filesystem, self._base_path, paths, skipped_directories, _is_test_file)
tests = []
for path in test_files:
- test_name = self._port.relative_perf_test_filename(path)
- if self._port.skips_perf_test(test_name):
+ relative_path = self._port.relative_perf_test_filename(path)
+ if self._port.skips_perf_test(relative_path):
continue
- tests.append((test_name.replace('\\', '/'), path))
+ test_name = relative_path.replace('\\', '/')
+ dirname = filesystem.dirname(path)
+ if self._host.filesystem.dirname(relative_path) in self._test_directories_for_chromium_style_tests:
+ tests.append(ChromiumStylePerfTest(test_name, dirname, path))
+ else:
+ tests.append(PerfTest(test_name, dirname, path))
+
return tests
def run(self):
unexpected = -1
try:
tests = self._collect_tests()
- unexpected = self._run_tests_set(sorted(list(tests)), self._port)
+ unexpected = self._run_tests_set(sorted(list(tests), key=lambda test: test.test_name()), self._port)
finally:
self._printer.cleanup()
unexpected = 0
driver = None
- for (test_name, test_path) in tests:
+ for test in tests:
driver = port.create_driver(worker_number=1, no_timeout=True)
if self._options.pause_before_testing:
driver.stop()
return unexpected
- self._printer.write('Running %s (%d of %d)' % (test_name, expected + unexpected + 1, len(tests)))
-
- is_chromium_style = self._host.filesystem.dirname(test_name) in self._test_directories_for_chromium_style_tests
- if self._run_single_test(test_name, test_path, driver, is_chromium_style):
+ self._printer.write('Running %s (%d of %d)' % (test.test_name(), expected + unexpected + 1, len(tests)))
+ if self._run_single_test(test, driver):
expected = expected + 1
else:
unexpected = unexpected + 1
self._printer.write("%s" % line)
return test_failed or not got_a_result
- _lines_to_ignore_in_parser_result = [
- re.compile(r'^Running \d+ times$'),
- re.compile(r'^Ignoring warm-up '),
- re.compile(r'^Info:'),
- re.compile(r'^\d+(.\d+)?$'),
- # Following are for handle existing test like Dromaeo
- re.compile(re.escape("""main frame - has 1 onunload handler(s)""")),
- re.compile(re.escape("""frame "<!--framePath //<!--frame0-->-->" - has 1 onunload handler(s)""")),
- re.compile(re.escape("""frame "<!--framePath //<!--frame0-->/<!--frame0-->-->" - has 1 onunload handler(s)"""))]
-
- def _should_ignore_line_in_parser_test_result(self, line):
- if not line:
- return True
- for regex in self._lines_to_ignore_in_parser_result:
- if regex.search(line):
- return True
- return False
-
- def _process_parser_test_result(self, test_name, output):
- got_a_result = False
- test_failed = False
- filesystem = self._host.filesystem
- results = {}
- keys = ['avg', 'median', 'stdev', 'min', 'max']
- score_regex = re.compile(r'^(?P<key>' + r'|'.join(keys) + r')\s+(?P<value>[0-9\.]+)\s*(?P<unit>.*)')
- unit = "ms"
-
- for line in re.split('\n', output.text):
- score = score_regex.match(line)
- if score:
- results[score.group('key')] = float(score.group('value'))
- if score.group('unit'):
- unit = score.group('unit')
- continue
-
- if not self._should_ignore_line_in_parser_test_result(line):
- test_failed = True
- self._printer.write("%s" % line)
-
- if test_failed or set(keys) != set(results.keys()):
- return True
-
- results['unit'] = unit
-
- test_name = re.sub(r'\.\w+$', '', test_name)
- self._results[test_name] = results
- self._buildbot_output.write('RESULT %s= %s %s\n' % (test_name.replace('/', ': '), results['avg'], unit))
- self._buildbot_output.write(', '.join(['%s= %s %s' % (key, results[key], unit) for key in keys[1:]]) + '\n')
- return False
-
- def _run_single_test(self, test_name, test_path, driver, is_chromium_style):
- test_failed = False
+ def _run_single_test(self, test, driver):
start_time = time.time()
- output = driver.run_test(DriverInput(test_path, self._options.time_out_ms, None, False))
+ output = driver.run_test(DriverInput(test.path_or_url(), self._options.time_out_ms, None, False))
+ new_results = None
if output.text == None:
- test_failed = True
+ pass
elif output.timeout:
- self._printer.write('timeout: %s' % test_name)
- test_failed = True
+ self._printer.write('timeout: %s' % test.test_name())
elif output.crash:
- self._printer.write('crash: %s' % test_name)
- test_failed = True
+ self._printer.write('crash: %s' % test.test_name())
else:
- if is_chromium_style:
- test_failed = self._process_chromium_style_test_result(test_name, output)
- else:
- test_failed = self._process_parser_test_result(test_name, output)
+ new_results = test.parse_output(output, self._printer, self._buildbot_output)
if len(output.error):
self._printer.write('error:\n%s' % output.error)
- test_failed = True
+ new_results = None
- if test_failed:
+ if new_results:
+ self._results.update(new_results)
+ else:
self._printer.write('FAILED')
self._printer.write("Finished: %f s" % (time.time() - start_time))
- return not test_failed
+ return new_results != None
from webkitpy.layout_tests.port.driver import DriverInput, DriverOutput
from webkitpy.layout_tests.port.test import TestPort
from webkitpy.layout_tests.views import printing
+from webkitpy.performance_tests.perftest import ChromiumStylePerfTest
+from webkitpy.performance_tests.perftest import PerfTest
from webkitpy.performance_tests.perftestsrunner import PerfTestsRunner
def run_test(self, test_name):
runner = self.create_runner()
driver = MainTest.TestDriver()
- return runner._run_single_test(test_name, test_name, driver, is_chromium_style=True)
+ return runner._run_single_test(ChromiumStylePerfTest(test_name, 'some-dir',
+ runner._host.filesystem.join('some-dir', test_name)), driver)
def test_run_passing_test(self):
self.assertTrue(self.run_test('pass.html'))
def test_run_crash_test(self):
self.assertFalse(self.run_test('crash.html'))
- def _tests_for_runner(self, runner, tests):
- return [(test, runner._base_path + '/' + test) for test in tests]
+ def _tests_for_runner(self, runner, test_names):
+ filesystem = runner._host.filesystem
+ tests = []
+ for test in test_names:
+ path = filesystem.join(runner._base_path, test)
+ dirname = filesystem.dirname(path)
+ if test.startswith('inspector/'):
+ tests.append(ChromiumStylePerfTest(test, dirname, path))
+ else:
+ tests.append(PerfTest(test, dirname, path))
+ return tests
def test_run_test_set(self):
buildbot_output = StringIO.StringIO()
self.assertEqual(json.loads(runner._host.filesystem.files['/mock-checkout/output.json']), {
"timestamp": 123456789, "results":
{"Bindings/event-target-wrapper": {"max": 1510, "avg": 1489.05, "median": 1487, "min": 1471, "stdev": 14.46, "unit": "ms"},
- "group_name:test_name": 42},
+ "inspector/pass.html:group_name:test_name": 42},
"webkit-revision": 5678})
def test_run_test_set_with_json_source(self):
self.assertEqual(json.loads(runner._host.filesystem.files['/mock-checkout/output.json']), {
"timestamp": 123456789, "results":
{"Bindings/event-target-wrapper": {"max": 1510, "avg": 1489.05, "median": 1487, "min": 1471, "stdev": 14.46, "unit": "ms"},
- "group_name:test_name": 42},
+ "inspector/pass.html:group_name:test_name": 42},
"webkit-revision": 5678,
"key": "value"})
self.assertEqual(runner.run(), 0)
self.assertEqual(json.loads(runner._host.filesystem.files['/mock-checkout/output.json']), {
- "timestamp": 123456789, "results": {"group_name:test_name": 42.0}, "webkit-revision": 5678, "some-revision": 5678})
+ "timestamp": 123456789, "results": {"inspector/pass.html:group_name:test_name": 42.0}, "webkit-revision": 5678, "some-revision": 5678})
def test_run_with_upload_json(self):
runner = self.create_runner(args=['--output-json-path=/mock-checkout/output.json',
tests = runner._collect_tests()
self.assertEqual(len(tests), 1)
+ def _collect_tests_and_sort_test_name(self, runner):
+ return sorted([test.test_name() for test in runner._collect_tests()])
+
def test_collect_tests(self):
runner = self.create_runner(args=['PerformanceTests/test1.html', 'test2.html'])
add_file('test2.html')
add_file('test3.html')
runner._host.filesystem.chdir(runner._port.perf_tests_dir()[:runner._port.perf_tests_dir().rfind(runner._host.filesystem.sep)])
- self.assertEqual(sorted([test[0] for test in runner._collect_tests()]), ['test1.html', 'test2.html'])
+ self.assertEqual(self._collect_tests_and_sort_test_name(runner), ['test1.html', 'test2.html'])
def test_collect_tests_with_skipped_list(self):
runner = self.create_runner()
add_file('inspector/resources', 'resource_file.html')
add_file('unsupported', 'unsupported_test2.html')
runner._port.skipped_perf_tests = lambda: ['inspector/unsupported_test1.html', 'unsupported']
- self.assertEqual(sorted([test[0] for test in runner._collect_tests()]), ['inspector/test1.html', 'inspector/test2.html'])
+ self.assertEqual(self._collect_tests_and_sort_test_name(runner), ['inspector/test1.html', 'inspector/test2.html'])
def test_parse_args(self):
runner = self.create_runner()