2 # Copyright (c) 2012 The Chromium Authors. All rights reserved.
3 # Use of this source code is governed by a BSD-style license that can be
4 # found in the LICENSE file.
6 """Basic pyauto performance tests.
8 For tests that need to be run for multiple iterations (e.g., so that average
9 and standard deviation values can be reported), the default number of iterations
10 run for each of these tests is specified by |_DEFAULT_NUM_ITERATIONS|.
11 That value can optionally be tweaked by setting an environment variable
12 'NUM_ITERATIONS' to a positive integer, representing the number of iterations
13 to run. An additional, initial iteration will also be run to "warm up" the
14 environment, and the result from that initial iteration will be ignored.
16 Some tests rely on repeatedly appending tabs to Chrome. Occasionally, these
17 automation calls time out, thereby affecting the timing measurements (see issue
18 crosbug.com/20503). To work around this, the tests discard timing measurements
19 that involve automation timeouts. The value |_DEFAULT_MAX_TIMEOUT_COUNT|
20 specifies the threshold number of timeouts that can be tolerated before the test
21 fails. To tweak this value, set environment variable 'MAX_TIMEOUT_COUNT' to the
22 desired threshold value.
34 import SimpleHTTPServer
47 import pyauto_functional # Must be imported before pyauto.
49 import simplejson # Must be imported after pyauto; located in third_party.
51 from netflix import NetflixTestHelper
55 from youtube import YoutubeTestHelper
58 _CHROME_BASE_DIR = os.path.abspath(os.path.join(
59 os.path.dirname(__file__), os.pardir, os.pardir, os.pardir, os.pardir))
62 def FormatChromePath(posix_path, **kwargs):
63 """Convert a path relative to the Chromium root into an OS-specific path.
66 posix_path: a path string that may be a format().
67 Example: 'src/third_party/{module_name}/__init__.py'
68 kwargs: args for the format replacement.
69 Example: {'module_name': 'pylib'}
72 an absolute path in the current Chromium tree with formatting applied.
74 formated_path = posix_path.format(**kwargs)
75 path_parts = formated_path.split('/')
76 return os.path.join(_CHROME_BASE_DIR, *path_parts)
79 def StandardDeviation(values):
80 """Returns the standard deviation of |values|."""
82 if len(values) < 2 or not avg:
84 temp_vals = [math.pow(x - avg, 2) for x in values]
85 return math.sqrt(sum(temp_vals) / (len(temp_vals) - 1))
89 """Returns the arithmetic mean of |values|."""
90 if not values or None in values:
92 return sum(values) / float(len(values))
95 def GeometricMean(values):
96 """Returns the geometric mean of |values|."""
97 if not values or None in values or [x for x in values if x < 0.0]:
101 return math.exp(Mean([math.log(x) for x in values]))
104 class BasePerfTest(pyauto.PyUITest):
105 """Base class for performance tests."""
107 _DEFAULT_NUM_ITERATIONS = 10 # Keep synced with desktopui_PyAutoPerfTests.py.
108 _DEFAULT_MAX_TIMEOUT_COUNT = 10
109 _PERF_OUTPUT_MARKER_PRE = '_PERF_PRE_'
110 _PERF_OUTPUT_MARKER_POST = '_PERF_POST_'
113 """Performs necessary setup work before running each test."""
114 self._num_iterations = self._DEFAULT_NUM_ITERATIONS
115 if 'NUM_ITERATIONS' in os.environ:
116 self._num_iterations = int(os.environ['NUM_ITERATIONS'])
117 self._max_timeout_count = self._DEFAULT_MAX_TIMEOUT_COUNT
118 if 'MAX_TIMEOUT_COUNT' in os.environ:
119 self._max_timeout_count = int(os.environ['MAX_TIMEOUT_COUNT'])
120 self._timeout_count = 0
122 # For users who want to see local perf graphs for Chrome when running the
123 # tests on their own machines.
124 self._local_perf_dir = None
125 if 'LOCAL_PERF_DIR' in os.environ:
126 self._local_perf_dir = os.environ['LOCAL_PERF_DIR']
127 if not os.path.exists(self._local_perf_dir):
128 self.fail('LOCAL_PERF_DIR environment variable specified as %s, '
129 'but this directory does not exist.' % self._local_perf_dir)
130 # When outputting perf graph information on-the-fly for Chrome, this
131 # variable lets us know whether a perf measurement is for a new test
132 # execution, or the current test execution.
133 self._seen_graph_lines = {}
135 pyauto.PyUITest.setUp(self)
137 # Flush all buffers to disk and wait until system calms down. Must be done
138 # *after* calling pyauto.PyUITest.setUp, since that is where Chrome is
139 # killed and re-initialized for a new test.
140 # TODO(dennisjeffrey): Implement wait for idle CPU on Windows/Mac.
141 if self.IsLinux(): # IsLinux() also implies IsChromeOS().
143 self._WaitForIdleCPU(60.0, 0.05)
145 def _IsPIDRunning(self, pid):
146 """Checks if a given process id is running.
149 pid: The process id of the process to check.
152 True if the process is running. False if not.
155 # Note that this sends the signal 0, which should not interfere with the
159 if err.errno == errno.ESRCH:
163 with open('/proc/%s/status' % pid) as proc_file:
164 if 'zombie' in proc_file.read():
170 def _GetAllDescendentProcesses(self, pid):
171 pstree_out = subprocess.check_output(['pstree', '-p', '%s' % pid])
172 children = re.findall('\((\d+)\)', pstree_out)
173 return [int(pid) for pid in children]
175 def _WaitForChromeExit(self, browser_info, timeout):
176 pid = browser_info['browser_pid']
177 chrome_pids = self._GetAllDescendentProcesses(pid)
178 initial_time = time.time()
179 while time.time() - initial_time < timeout:
180 if any([self._IsPIDRunning(pid) for pid in chrome_pids]):
183 logging.info('_WaitForChromeExit() took: %s seconds',
184 time.time() - initial_time)
186 self.fail('_WaitForChromeExit() did not finish within %s seconds' %
190 if self._IsPGOMode():
191 browser_info = self.GetBrowserInfo()
192 pid = browser_info['browser_pid']
193 # session_manager kills chrome without waiting for it to cleanly exit.
194 # Until that behavior is changed, we stop it and wait for Chrome to exit
195 # cleanly before restarting it. See:
197 subprocess.call(['sudo', 'pkill', '-STOP', 'session_manager'])
198 os.kill(pid, signal.SIGINT)
199 self._WaitForChromeExit(browser_info, 120)
200 subprocess.call(['sudo', 'pkill', '-CONT', 'session_manager'])
202 pyauto.PyUITest.tearDown(self)
204 def _IsPGOMode(self):
205 return 'USE_PGO' in os.environ
207 def _WaitForIdleCPU(self, timeout, utilization):
208 """Waits for the CPU to become idle (< utilization).
211 timeout: The longest time in seconds to wait before throwing an error.
212 utilization: The CPU usage below which the system should be considered
213 idle (between 0 and 1.0 independent of cores/hyperthreads).
216 fraction_non_idle_time = 1.0
217 logging.info('Starting to wait up to %fs for idle CPU...', timeout)
218 while fraction_non_idle_time >= utilization:
219 cpu_usage_start = self._GetCPUUsage()
222 cpu_usage_end = self._GetCPUUsage()
223 fraction_non_idle_time = \
224 self._GetFractionNonIdleCPUTime(cpu_usage_start, cpu_usage_end)
225 logging.info('Current CPU utilization = %f.', fraction_non_idle_time)
226 if time_passed > timeout:
227 self._LogProcessActivity()
228 message = ('CPU did not idle after %fs wait (utilization = %f).' % (
229 time_passed, fraction_non_idle_time))
232 if self._IsPGOMode():
233 logging.info(message)
234 logging.info('Still continuing because we are in PGO mode.')
238 logging.info('Wait for idle CPU took %fs (utilization = %f).',
239 time_passed, fraction_non_idle_time)
241 def _LogProcessActivity(self):
242 """Logs the output of top on Linux/Mac/CrOS.
244 TODO: use taskmgr or similar on Windows.
246 if self.IsLinux() or self.IsMac(): # IsLinux() also implies IsChromeOS().
247 logging.info('Logging current process activity using top.')
248 cmd = 'top -b -d1 -n1'
251 p = subprocess.Popen(cmd, shell=True, stdin=subprocess.PIPE,
252 stdout=subprocess.PIPE, stderr=subprocess.STDOUT, close_fds=True)
253 output = p.stdout.read()
256 logging.info('Process activity logging not implemented on this OS.')
258 def _AppendTab(self, url):
259 """Appends a tab and increments a counter if the automation call times out.
262 url: The string url to which the appended tab should be navigated.
264 if not self.AppendTab(pyauto.GURL(url)):
265 self._timeout_count += 1
267 def _MeasureElapsedTime(self, python_command, num_invocations=1):
268 """Measures time (in msec) to execute a python command one or more times.
271 python_command: A callable.
272 num_invocations: An integer number of times to invoke the given command.
275 The time required to execute the python command the specified number of
276 times, in milliseconds as a float.
278 assert callable(python_command)
280 for _ in range(num_invocations):
282 timer = timeit.Timer(stmt=RunCommand)
283 return timer.timeit(number=1) * 1000 # Convert seconds to milliseconds.
285 def _OutputPerfForStandaloneGraphing(self, graph_name, description, value,
286 units, units_x, is_stacked):
287 """Outputs perf measurement data to a local folder to be graphed.
289 This function only applies to Chrome desktop, and assumes that environment
290 variable 'LOCAL_PERF_DIR' has been specified and refers to a valid directory
291 on the local machine.
294 graph_name: A string name for the graph associated with this performance
296 description: A string description of the performance value. Should not
298 value: Either a single numeric value representing a performance
299 measurement, or else a list of (x, y) tuples representing one or more
300 long-running performance measurements, where 'x' is an x-axis value
301 (such as an iteration number) and 'y' is the corresponding performance
302 measurement. If a list of tuples is given, then the |units_x|
303 argument must also be specified.
304 units: A string representing the units of the performance measurement(s).
305 Should not include spaces.
306 units_x: A string representing the units of the x-axis values associated
307 with the performance measurements, such as 'iteration' if the x values
308 are iteration numbers. If this argument is specified, then the
309 |value| argument must be a list of (x, y) tuples.
310 is_stacked: True to draw a "stacked" graph. First-come values are
311 stacked at bottom by default.
313 revision_num_file = os.path.join(self._local_perf_dir, 'last_revision.dat')
314 if os.path.exists(revision_num_file):
315 with open(revision_num_file) as f:
316 revision = int(f.read())
320 if not self._seen_graph_lines:
321 # We're about to output data for a new test run.
326 graphs_file = os.path.join(self._local_perf_dir, 'graphs.dat')
327 if os.path.exists(graphs_file):
328 with open(graphs_file) as f:
329 existing_graphs = simplejson.loads(f.read())
331 for graph in existing_graphs:
332 if graph['name'] == graph_name:
342 new_graph['units_x'] = units_x
343 existing_graphs.append(new_graph)
344 with open(graphs_file, 'w') as f:
345 f.write(simplejson.dumps(existing_graphs))
346 os.chmod(graphs_file, 0755)
348 # Update data file for this particular graph.
350 data_file = os.path.join(self._local_perf_dir, graph_name + '-summary.dat')
351 if os.path.exists(data_file):
352 with open(data_file) as f:
353 existing_lines = f.readlines()
354 existing_lines = map(
355 simplejson.loads, map(lambda x: x.strip(), existing_lines))
357 seen_key = graph_name
358 # We assume that the first line |existing_lines[0]| is the latest.
362 'traces': { description: [] }
364 if seen_key in self._seen_graph_lines:
365 # We've added points previously for this graph line in the current
366 # test execution, so retrieve the original set of points specified in
367 # the most recent revision in the data file.
368 new_line = existing_lines[0]
369 if not description in new_line['traces']:
370 new_line['traces'][description] = []
371 for x_value, y_value in value:
372 new_line['traces'][description].append([str(x_value), str(y_value)])
376 'traces': { description: [str(value), str(0.0)] }
380 new_line['stack'] = True
381 if 'stack_order' not in new_line:
382 new_line['stack_order'] = []
383 if description not in new_line['stack_order']:
384 new_line['stack_order'].append(description)
386 if seen_key in self._seen_graph_lines:
387 # Update results for the most recent revision.
388 existing_lines[0] = new_line
390 # New results for a new revision.
391 existing_lines.insert(0, new_line)
392 self._seen_graph_lines[seen_key] = True
394 existing_lines = map(simplejson.dumps, existing_lines)
395 with open(data_file, 'w') as f:
396 f.write('\n'.join(existing_lines))
397 os.chmod(data_file, 0755)
399 with open(revision_num_file, 'w') as f:
400 f.write(str(revision))
402 def _OutputPerfGraphValue(self, description, value, units,
403 graph_name, units_x=None, is_stacked=False):
404 """Outputs a performance value to have it graphed on the performance bots.
406 The output format differs, depending on whether the current platform is
407 Chrome desktop or ChromeOS.
409 For ChromeOS, the performance bots have a 30-character limit on the length
410 of the key associated with a performance value. A key on ChromeOS is
411 considered to be of the form "units_description" (for example,
412 "milliseconds_NewTabPage"), and is created from the |units| and
413 |description| passed as input to this function. Any characters beyond the
414 length 30 limit are truncated before results are stored in the autotest
418 description: A string description of the performance value. Should not
420 value: Either a numeric value representing a performance measurement, or
421 a list of values to be averaged. Lists may also contain (x, y) tuples
422 representing one or more performance measurements, where 'x' is an
423 x-axis value (such as an iteration number) and 'y' is the
424 corresponding performance measurement. If a list of tuples is given,
425 the |units_x| argument must also be specified.
426 units: A string representing the units of the performance measurement(s).
427 Should not include spaces.
428 graph_name: A string name for the graph associated with this performance
429 value. Only used on Chrome desktop.
430 units_x: A string representing the units of the x-axis values associated
431 with the performance measurements, such as 'iteration' if the x values
432 are iteration numbers. If this argument is specified, then the
433 |value| argument must be a list of (x, y) tuples.
434 is_stacked: True to draw a "stacked" graph. First-come values are
435 stacked at bottom by default.
437 if (isinstance(value, list) and value[0] is not None and
438 isinstance(value[0], tuple)):
441 assert isinstance(value, list)
443 if self.IsChromeOS():
444 # Autotest doesn't support result lists.
445 autotest_value = value
446 if (isinstance(value, list) and value[0] is not None and
447 not isinstance(value[0], tuple)):
448 autotest_value = Mean(value)
451 # TODO(dennisjeffrey): Support long-running performance measurements on
452 # ChromeOS in a way that can be graphed: crosbug.com/21881.
453 pyauto_utils.PrintPerfResult(graph_name, description, autotest_value,
454 units + ' ' + units_x)
456 # Output short-running performance results in a format understood by
458 perf_key = '%s_%s' % (units, description)
459 if len(perf_key) > 30:
460 logging.warning('The description "%s" will be truncated to "%s" '
461 '(length 30) when added to the autotest database.',
462 perf_key, perf_key[:30])
463 print '\n%s(\'%s\', %f)%s' % (self._PERF_OUTPUT_MARKER_PRE,
464 perf_key, autotest_value,
465 self._PERF_OUTPUT_MARKER_POST)
467 # Also output results in the format recognized by buildbot, for cases
468 # in which these tests are run on chromeOS through buildbot. Since
469 # buildbot supports result lists, it's ok for |value| to be a list here.
470 pyauto_utils.PrintPerfResult(graph_name, description, value, units)
474 # TODO(dmikurube): Support stacked graphs in PrintPerfResult.
475 # See http://crbug.com/122119.
477 pyauto_utils.PrintPerfResult(graph_name, description, value,
478 units + ' ' + units_x)
480 pyauto_utils.PrintPerfResult(graph_name, description, value, units)
482 if self._local_perf_dir:
483 self._OutputPerfForStandaloneGraphing(
484 graph_name, description, value, units, units_x, is_stacked)
486 def _OutputEventForStandaloneGraphing(self, description, event_list):
487 """Outputs event information to a local folder to be graphed.
489 See function _OutputEventGraphValue below for a description of an event.
491 This function only applies to Chrome Endure tests running on Chrome desktop,
492 and assumes that environment variable 'LOCAL_PERF_DIR' has been specified
493 and refers to a valid directory on the local machine.
496 description: A string description of the event. Should not include
498 event_list: A list of (x, y) tuples representing one or more events
499 occurring during an endurance test, where 'x' is the time of the event
500 (in seconds since the start of the test), and 'y' is a dictionary
501 representing relevant data associated with that event (as key/value
504 revision_num_file = os.path.join(self._local_perf_dir, 'last_revision.dat')
505 if os.path.exists(revision_num_file):
506 with open(revision_num_file) as f:
507 revision = int(f.read())
511 if not self._seen_graph_lines:
512 # We're about to output data for a new test run.
516 data_file = os.path.join(self._local_perf_dir, '_EVENT_-summary.dat')
517 if os.path.exists(data_file):
518 with open(data_file) as f:
519 existing_lines = f.readlines()
520 existing_lines = map(eval, map(lambda x: x.strip(), existing_lines))
522 seen_event_type = description
524 if seen_event_type in self._seen_graph_lines:
525 # We've added events previously for this event type in the current
526 # test execution, so retrieve the original set of values specified in
527 # the most recent revision in the data file.
528 value_list = existing_lines[0]['events'][description]
529 for event_time, event_data in event_list:
530 value_list.append([str(event_time), event_data])
532 description: value_list
540 if seen_event_type in self._seen_graph_lines:
541 # Update results for the most recent revision.
542 existing_lines[0] = new_line
544 # New results for a new revision.
545 existing_lines.insert(0, new_line)
546 self._seen_graph_lines[seen_event_type] = True
548 existing_lines = map(str, existing_lines)
549 with open(data_file, 'w') as f:
550 f.write('\n'.join(existing_lines))
551 os.chmod(data_file, 0755)
553 with open(revision_num_file, 'w') as f:
554 f.write(str(revision))
556 def _OutputEventGraphValue(self, description, event_list):
557 """Outputs a set of events to have them graphed on the Chrome Endure bots.
559 An "event" can be anything recorded by a performance test that occurs at
560 particular times during a test execution. For example, a garbage collection
561 in the v8 heap can be considered an event. An event is distinguished from a
562 regular perf measurement in two ways: (1) an event is depicted differently
563 in the performance graphs than performance measurements; (2) an event can
564 be associated with zero or more data fields describing relevant information
565 associated with the event. For example, a garbage collection event will
566 occur at a particular time, and it may be associated with data such as
567 the number of collected bytes and/or the length of time it took to perform
568 the garbage collection.
570 This function only applies to Chrome Endure tests running on Chrome desktop.
573 description: A string description of the event. Should not include
575 event_list: A list of (x, y) tuples representing one or more events
576 occurring during an endurance test, where 'x' is the time of the event
577 (in seconds since the start of the test), and 'y' is a dictionary
578 representing relevant data associated with that event (as key/value
581 pyauto_utils.PrintPerfResult('_EVENT_', description, event_list, '')
582 if self._local_perf_dir:
583 self._OutputEventForStandaloneGraphing(description, event_list)
585 def _PrintSummaryResults(self, description, values, units, graph_name):
586 """Logs summary measurement information.
588 This function computes and outputs the average and standard deviation of
589 the specified list of value measurements. It also invokes
590 _OutputPerfGraphValue() with the computed *average* value, to ensure the
591 average value can be plotted in a performance graph.
594 description: A string description for the specified results.
595 values: A list of numeric value measurements.
596 units: A string specifying the units for the specified measurements.
597 graph_name: A string name for the graph associated with this performance
598 value. Only used on Chrome desktop.
600 logging.info('Overall results for: %s', description)
602 logging.info(' Average: %f %s', Mean(values), units)
603 logging.info(' Std dev: %f %s', StandardDeviation(values), units)
604 self._OutputPerfGraphValue(description, values, units, graph_name)
606 logging.info('No results to report.')
608 def _RunNewTabTest(self, description, open_tab_command, graph_name,
610 """Runs a perf test that involves opening new tab(s).
612 This helper function can be called from different tests to do perf testing
613 with different types of tabs. It is assumed that the |open_tab_command|
614 will open up a single tab.
617 description: A string description of the associated tab test.
618 open_tab_command: A callable that will open a single tab.
619 graph_name: A string name for the performance graph associated with this
620 test. Only used on Chrome desktop.
621 num_tabs: The number of tabs to open, i.e., the number of times to invoke
622 the |open_tab_command|.
624 assert callable(open_tab_command)
627 for iteration in range(self._num_iterations + 1):
628 orig_timeout_count = self._timeout_count
629 elapsed_time = self._MeasureElapsedTime(open_tab_command,
630 num_invocations=num_tabs)
631 # Only count the timing measurement if no automation call timed out.
632 if self._timeout_count == orig_timeout_count:
633 # Ignore the first iteration.
635 timings.append(elapsed_time)
636 logging.info('Iteration %d of %d: %f milliseconds', iteration,
637 self._num_iterations, elapsed_time)
638 self.assertTrue(self._timeout_count <= self._max_timeout_count,
639 msg='Test exceeded automation timeout threshold.')
640 self.assertEqual(1 + num_tabs, self.GetTabCount(),
641 msg='Did not open %d new tab(s).' % num_tabs)
642 for _ in range(num_tabs):
643 self.CloseTab(tab_index=1)
645 self._PrintSummaryResults(description, timings, 'milliseconds', graph_name)
647 def _GetConfig(self):
648 """Load perf test configuration file.
651 A dictionary that represents the config information.
653 config_file = os.path.join(os.path.dirname(__file__), 'perf.cfg')
654 config = {'username': None,
656 'google_account_url': 'https://accounts.google.com/',
657 'gmail_url': 'https://www.gmail.com',
658 'plus_url': 'https://plus.google.com',
659 'docs_url': 'https://docs.google.com'}
660 if os.path.exists(config_file):
662 new_config = pyauto.PyUITest.EvalDataFrom(config_file)
663 for key in new_config:
664 if new_config.get(key) is not None:
665 config[key] = new_config.get(key)
666 except SyntaxError, e:
667 logging.info('Could not read %s: %s', config_file, str(e))
670 def _LoginToGoogleAccount(self, account_key='test_google_account'):
671 """Logs in to a test Google account.
673 Login with user-defined credentials if they exist.
674 Else login with private test credentials if they exist.
678 account_key: The string key in private_tests_info.txt which is associated
679 with the test account login credentials to use. It will only
680 be used when fail to load user-defined credentials.
683 RuntimeError: if could not get credential information.
685 private_file = os.path.join(pyauto.PyUITest.DataDir(), 'pyauto_private',
686 'private_tests_info.txt')
687 config_file = os.path.join(os.path.dirname(__file__), 'perf.cfg')
688 config = self._GetConfig()
689 google_account_url = config.get('google_account_url')
690 username = config.get('username')
691 password = config.get('password')
692 if username and password:
694 'Using google account credential from %s',
695 os.path.join(os.path.dirname(__file__), 'perf.cfg'))
696 elif os.path.exists(private_file):
697 creds = self.GetPrivateInfo()[account_key]
698 username = creds['username']
699 password = creds['password']
701 'User-defined credentials not found,' +
702 ' using private test credentials instead.')
704 message = 'No user-defined or private test ' \
705 'credentials could be found. ' \
706 'Please specify credential information in %s.' \
708 raise RuntimeError(message)
709 test_utils.GoogleAccountsLogin(
710 self, username, password, url=google_account_url)
711 self.NavigateToURL('about:blank') # Clear the existing tab.
713 def _GetCPUUsage(self):
714 """Returns machine's CPU usage.
716 This function uses /proc/stat to identify CPU usage, and therefore works
717 only on Linux/ChromeOS.
720 A dictionary with 'user', 'nice', 'system' and 'idle' values.
730 f = open('/proc/stat')
731 cpu_usage_str = f.readline().split()
734 self.fail('Could not retrieve CPU usage: ' + str(e))
736 'user': int(cpu_usage_str[1]),
737 'nice': int(cpu_usage_str[2]),
738 'system': int(cpu_usage_str[3]),
739 'idle': int(cpu_usage_str[4])
742 def _GetFractionNonIdleCPUTime(self, cpu_usage_start, cpu_usage_end):
743 """Computes the fraction of CPU time spent non-idling.
745 This function should be invoked using before/after values from calls to
748 time_non_idling_end = (cpu_usage_end['user'] + cpu_usage_end['nice'] +
749 cpu_usage_end['system'])
750 time_non_idling_start = (cpu_usage_start['user'] + cpu_usage_start['nice'] +
751 cpu_usage_start['system'])
752 total_time_end = (cpu_usage_end['user'] + cpu_usage_end['nice'] +
753 cpu_usage_end['system'] + cpu_usage_end['idle'])
754 total_time_start = (cpu_usage_start['user'] + cpu_usage_start['nice'] +
755 cpu_usage_start['system'] + cpu_usage_start['idle'])
756 return ((float(time_non_idling_end) - time_non_idling_start) /
757 (total_time_end - total_time_start))
759 def ExtraChromeFlags(self):
760 """Ensures Chrome is launched with custom flags.
763 A list of extra flags to pass to Chrome when it is launched.
765 flags = super(BasePerfTest, self).ExtraChromeFlags()
766 # Window size impacts a variety of perf tests, ensure consistency.
767 flags.append('--window-size=1024,768')
768 if self._IsPGOMode():
769 flags = flags + ['--child-clean-exit', '--no-sandbox']
773 class TabPerfTest(BasePerfTest):
774 """Tests that involve opening tabs."""
776 def testNewTab(self):
777 """Measures time to open a new tab."""
778 self._RunNewTabTest('NewTabPage',
779 lambda: self._AppendTab('chrome://newtab'), 'open_tab')
781 def testNewTabFlash(self):
782 """Measures time to open a new tab navigated to a flash page."""
784 os.path.exists(os.path.join(self.ContentDataDir(), 'plugin',
786 msg='Missing required flash data file.')
787 url = self.GetFileURLForContentDataPath('plugin', 'flash.swf')
788 self._RunNewTabTest('NewTabFlashPage', lambda: self._AppendTab(url),
791 def test20Tabs(self):
792 """Measures time to open 20 tabs."""
793 self._RunNewTabTest('20TabsNewTabPage',
794 lambda: self._AppendTab('chrome://newtab'),
795 'open_20_tabs', num_tabs=20)
798 class BenchmarkPerfTest(BasePerfTest):
799 """Benchmark performance tests."""
801 def testV8BenchmarkSuite(self):
802 """Measures score from v8 benchmark suite."""
803 url = self.GetFileURLForDataPath('v8_benchmark_v6', 'run.html')
805 def _RunBenchmarkOnce(url):
806 """Runs the v8 benchmark suite once and returns the results in a dict."""
807 self.assertTrue(self.AppendTab(pyauto.GURL(url)),
808 msg='Failed to append tab for v8 benchmark suite.')
810 var val = document.getElementById("status").innerHTML;
811 window.domAutomationController.send(val);
815 lambda: 'Score:' in self.ExecuteJavascript(js_done, tab_index=1),
816 timeout=300, expect_retval=True, retry_sleep=1),
817 msg='Timed out when waiting for v8 benchmark score.')
821 result['final_score'] = document.getElementById("status").innerHTML;
822 result['all_results'] = document.getElementById("results").innerHTML;
823 window.domAutomationController.send(JSON.stringify(result));
825 results = eval(self.ExecuteJavascript(js_get_results, tab_index=1))
826 score_pattern = '(\w+): (\d+)'
827 final_score = re.search(score_pattern, results['final_score']).group(2)
828 result_dict = {'final_score': int(final_score)}
829 for match in re.finditer(score_pattern, results['all_results']):
830 benchmark_name = match.group(1)
831 benchmark_score = match.group(2)
832 result_dict[benchmark_name] = int(benchmark_score)
833 self.CloseTab(tab_index=1)
837 for iteration in xrange(self._num_iterations + 1):
838 result_dict = _RunBenchmarkOnce(url)
839 # Ignore the first iteration.
841 for key, val in result_dict.items():
842 timings.setdefault(key, []).append(val)
843 logging.info('Iteration %d of %d:\n%s', iteration,
844 self._num_iterations, self.pformat(result_dict))
846 for key, val in timings.items():
847 if key == 'final_score':
848 self._PrintSummaryResults('V8Benchmark', val, 'score',
849 'v8_benchmark_final')
851 self._PrintSummaryResults('V8Benchmark-%s' % key, val, 'score',
852 'v8_benchmark_individual')
854 def testSunSpider(self):
855 """Runs the SunSpider javascript benchmark suite."""
856 url = self.GetFileURLForDataPath('sunspider', 'sunspider-driver.html')
857 self.assertTrue(self.AppendTab(pyauto.GURL(url)),
858 msg='Failed to append tab for SunSpider benchmark suite.')
862 if (document.getElementById("console"))
864 window.domAutomationController.send(JSON.stringify(done));
868 lambda: self.ExecuteJavascript(js_is_done, tab_index=1),
869 timeout=300, expect_retval='true', retry_sleep=1),
870 msg='Timed out when waiting for SunSpider benchmark score.')
873 window.domAutomationController.send(
874 document.getElementById("console").innerHTML);
876 # Append '<br>' to the result to simplify regular expression matching.
877 results = self.ExecuteJavascript(js_get_results, tab_index=1) + '<br>'
878 total = re.search('Total:\s*([\d.]+)ms', results).group(1)
879 logging.info('Total: %f ms', float(total))
880 self._OutputPerfGraphValue('SunSpider-total', float(total), 'ms',
883 for match_category in re.finditer('\s\s(\w+):\s*([\d.]+)ms.+?<br><br>',
885 category_name = match_category.group(1)
886 category_result = match_category.group(2)
887 logging.info('Benchmark "%s": %f ms', category_name,
888 float(category_result))
889 self._OutputPerfGraphValue('SunSpider-' + category_name,
890 float(category_result), 'ms',
891 'sunspider_individual')
893 for match_result in re.finditer('<br>\s\s\s\s([\w-]+):\s*([\d.]+)ms',
894 match_category.group(0)):
895 result_name = match_result.group(1)
896 result_value = match_result.group(2)
897 logging.info(' Result "%s-%s": %f ms', category_name, result_name,
899 self._OutputPerfGraphValue(
900 'SunSpider-%s-%s' % (category_name, result_name),
901 float(result_value), 'ms', 'sunspider_individual')
903 def testDromaeoSuite(self):
904 """Measures results from Dromaeo benchmark suite."""
905 url = self.GetFileURLForDataPath('dromaeo', 'index.html')
906 self.assertTrue(self.AppendTab(pyauto.GURL(url + '?dromaeo')),
907 msg='Failed to append tab for Dromaeo benchmark suite.')
910 var val = document.getElementById('pause').value;
911 window.domAutomationController.send(val);
915 lambda: self.ExecuteJavascript(js_is_ready, tab_index=1),
916 timeout=30, expect_retval='Run', retry_sleep=1),
917 msg='Timed out when waiting for Dromaeo benchmark to load.')
920 $('#pause').val('Run').click();
921 window.domAutomationController.send('done');
923 self.ExecuteJavascript(js_run, tab_index=1)
926 var val = document.getElementById('timebar').innerHTML;
927 window.domAutomationController.send(val);
931 lambda: 'Total' in self.ExecuteJavascript(js_is_done, tab_index=1),
932 timeout=900, expect_retval=True, retry_sleep=2),
933 msg='Timed out when waiting for Dromaeo benchmark to complete.')
937 result['total_result'] = $('#timebar strong').html();
938 result['all_results'] = {};
939 $('.result-item.done').each(function (i) {
940 var group_name = $(this).find('.test b').html().replace(':', '');
941 var group_results = {};
942 group_results['result'] =
943 $(this).find('span').html().replace('runs/s', '')
945 group_results['sub_groups'] = {}
946 $(this).find('li').each(function (i) {
947 var sub_name = $(this).find('b').html().replace(':', '');
948 group_results['sub_groups'][sub_name] =
949 $(this).text().match(/: ([\d.]+)/)[1]
951 result['all_results'][group_name] = group_results;
953 window.domAutomationController.send(JSON.stringify(result));
955 results = eval(self.ExecuteJavascript(js_get_results, tab_index=1))
956 total_result = results['total_result']
957 logging.info('Total result: ' + total_result)
958 self._OutputPerfGraphValue('Dromaeo-total', float(total_result),
959 'runsPerSec', 'dromaeo_total')
961 for group_name, group in results['all_results'].iteritems():
962 logging.info('Benchmark "%s": %s', group_name, group['result'])
963 self._OutputPerfGraphValue('Dromaeo-' + group_name.replace(' ', ''),
964 float(group['result']), 'runsPerSec',
965 'dromaeo_individual')
966 for benchmark_name, benchmark_score in group['sub_groups'].iteritems():
967 logging.info(' Result "%s": %s', benchmark_name, benchmark_score)
969 def testSpaceport(self):
970 """Measures results from Spaceport benchmark suite."""
971 # TODO(tonyg): Test is failing on bots. Diagnose and re-enable.
974 # url = self.GetFileURLForDataPath('third_party', 'spaceport', 'index.html')
975 # self.assertTrue(self.AppendTab(pyauto.GURL(url + '?auto')),
976 # msg='Failed to append tab for Spaceport benchmark suite.')
978 # # The test reports results to console.log in the format "name: value".
979 # # Inject a bit of JS to intercept those.
980 # js_collect_console_log = """
981 # window.__pyautoresult = {};
982 # window.console.log = function(str) {
984 # var key_val = str.split(': ');
985 # if (!key_val.length == 2) return;
986 # __pyautoresult[key_val[0]] = key_val[1];
988 # window.domAutomationController.send('done');
990 # self.ExecuteJavascript(js_collect_console_log, tab_index=1)
993 # expected_num_results = 30 # The number of tests in benchmark.
994 # results = eval(self.ExecuteJavascript(js_get_results, tab_index=1))
995 # return expected_num_results == len(results)
997 # js_get_results = """
998 # window.domAutomationController.send(
999 # JSON.stringify(window.__pyautoresult));
1002 # self.WaitUntil(_IsDone, timeout=1200, expect_retval=True,
1004 # msg='Timed out when waiting for Spaceport benchmark to complete.')
1005 # results = eval(self.ExecuteJavascript(js_get_results, tab_index=1))
1007 # for key in results:
1008 # suite, test = key.split('.')
1009 # value = float(results[key])
1010 # self._OutputPerfGraphValue(test, value, 'ObjectsAt30FPS', suite)
1011 # self._PrintSummaryResults('Overall', [float(x) for x in results.values()],
1012 # 'ObjectsAt30FPS', 'Overall')
1015 class LiveWebappLoadTest(BasePerfTest):
1016 """Tests that involve performance measurements of live webapps.
1018 These tests connect to live webpages (e.g., Gmail, Calendar, Docs) and are
1019 therefore subject to network conditions. These tests are meant to generate
1020 "ball-park" numbers only (to see roughly how long things take to occur from a
1021 user's perspective), and are not expected to be precise.
1024 def testNewTabGmail(self):
1025 """Measures time to open a tab to a logged-in Gmail account.
1027 Timing starts right before the new tab is opened, and stops as soon as the
1028 webpage displays the substring 'Last account activity:'.
1030 EXPECTED_SUBSTRING = 'Last account activity:'
1032 def _SubstringExistsOnPage():
1034 var frame = document.getElementById("canvas_frame");
1035 var divs = frame.contentDocument.getElementsByTagName("div");
1036 for (var i = 0; i < divs.length; ++i) {
1037 if (divs[i].innerHTML.indexOf("%s") >= 0)
1038 window.domAutomationController.send("true");
1040 window.domAutomationController.send("false");
1041 """ % EXPECTED_SUBSTRING
1042 return self.ExecuteJavascript(js, tab_index=1)
1044 def _RunSingleGmailTabOpen():
1045 self._AppendTab('http://www.gmail.com')
1046 self.assertTrue(self.WaitUntil(_SubstringExistsOnPage, timeout=120,
1047 expect_retval='true', retry_sleep=0.10),
1048 msg='Timed out waiting for expected Gmail string.')
1050 self._LoginToGoogleAccount()
1051 self._RunNewTabTest('NewTabGmail', _RunSingleGmailTabOpen,
1052 'open_tab_live_webapp')
1054 def testNewTabCalendar(self):
1055 """Measures time to open a tab to a logged-in Calendar account.
1057 Timing starts right before the new tab is opened, and stops as soon as the
1058 webpage displays the calendar print button (title 'Print my calendar').
1060 EXPECTED_SUBSTRING = 'Month'
1062 def _DivTitleStartsWith():
1064 var divs = document.getElementsByTagName("div");
1065 for (var i = 0; i < divs.length; ++i) {
1066 if (divs[i].innerHTML == "%s")
1067 window.domAutomationController.send("true");
1069 window.domAutomationController.send("false");
1070 """ % EXPECTED_SUBSTRING
1071 return self.ExecuteJavascript(js, tab_index=1)
1073 def _RunSingleCalendarTabOpen():
1074 self._AppendTab('http://calendar.google.com')
1075 self.assertTrue(self.WaitUntil(_DivTitleStartsWith, timeout=120,
1076 expect_retval='true', retry_sleep=0.10),
1077 msg='Timed out waiting for expected Calendar string.')
1079 self._LoginToGoogleAccount()
1080 self._RunNewTabTest('NewTabCalendar', _RunSingleCalendarTabOpen,
1081 'open_tab_live_webapp')
1083 def testNewTabDocs(self):
1084 """Measures time to open a tab to a logged-in Docs account.
1086 Timing starts right before the new tab is opened, and stops as soon as the
1087 webpage displays the expected substring 'last modified' (case insensitive).
1089 EXPECTED_SUBSTRING = 'sort'
1091 def _SubstringExistsOnPage():
1093 var divs = document.getElementsByTagName("div");
1094 for (var i = 0; i < divs.length; ++i) {
1095 if (divs[i].innerHTML.toLowerCase().indexOf("%s") >= 0)
1096 window.domAutomationController.send("true");
1098 window.domAutomationController.send("false");
1099 """ % EXPECTED_SUBSTRING
1100 return self.ExecuteJavascript(js, tab_index=1)
1102 def _RunSingleDocsTabOpen():
1103 self._AppendTab('http://docs.google.com')
1104 self.assertTrue(self.WaitUntil(_SubstringExistsOnPage, timeout=120,
1105 expect_retval='true', retry_sleep=0.10),
1106 msg='Timed out waiting for expected Docs string.')
1108 self._LoginToGoogleAccount()
1109 self._RunNewTabTest('NewTabDocs', _RunSingleDocsTabOpen,
1110 'open_tab_live_webapp')
1113 class NetflixPerfTest(BasePerfTest, NetflixTestHelper):
1114 """Test Netflix video performance."""
1116 def __init__(self, methodName='runTest', **kwargs):
1117 pyauto.PyUITest.__init__(self, methodName, **kwargs)
1118 NetflixTestHelper.__init__(self, self)
1122 pyauto.PyUITest.tearDown(self)
1124 def testNetflixDroppedFrames(self):
1125 """Measures the Netflix video dropped frames/second. Runs for 60 secs."""
1126 self.LoginAndStartPlaying()
1127 self.CheckNetflixPlaying(self.IS_PLAYING,
1128 'Player did not start playing the title.')
1129 # Ignore first 10 seconds of video playing so we get smooth videoplayback.
1131 init_dropped_frames = self._GetVideoDroppedFrames()
1133 prev_dropped_frames = 0
1134 for iteration in xrange(60):
1135 # Ignoring initial dropped frames of first 10 seconds.
1136 total_dropped_frames = self._GetVideoDroppedFrames() - init_dropped_frames
1137 dropped_frames_last_sec = total_dropped_frames - prev_dropped_frames
1138 dropped_frames.append(dropped_frames_last_sec)
1139 logging.info('Iteration %d of %d: %f dropped frames in the last second',
1140 iteration + 1, 60, dropped_frames_last_sec)
1141 prev_dropped_frames = total_dropped_frames
1142 # Play the video for some time.
1144 self._PrintSummaryResults('NetflixDroppedFrames', dropped_frames, 'frames',
1145 'netflix_dropped_frames')
1147 def testNetflixCPU(self):
1148 """Measures the Netflix video CPU usage. Runs for 60 seconds."""
1149 self.LoginAndStartPlaying()
1150 self.CheckNetflixPlaying(self.IS_PLAYING,
1151 'Player did not start playing the title.')
1152 # Ignore first 10 seconds of video playing so we get smooth videoplayback.
1154 init_dropped_frames = self._GetVideoDroppedFrames()
1155 init_video_frames = self._GetVideoFrames()
1156 cpu_usage_start = self._GetCPUUsage()
1157 total_shown_frames = 0
1158 # Play the video for some time.
1160 total_video_frames = self._GetVideoFrames() - init_video_frames
1161 total_dropped_frames = self._GetVideoDroppedFrames() - init_dropped_frames
1162 cpu_usage_end = self._GetCPUUsage()
1163 fraction_non_idle_time = \
1164 self._GetFractionNonIdleCPUTime(cpu_usage_start, cpu_usage_end)
1165 # Counting extrapolation for utilization to play the video.
1166 extrapolation_value = fraction_non_idle_time * \
1167 (float(total_video_frames) + total_dropped_frames) / total_video_frames
1168 logging.info('Netflix CPU extrapolation: %f', extrapolation_value)
1169 self._OutputPerfGraphValue('NetflixCPUExtrapolation', extrapolation_value,
1170 'extrapolation', 'netflix_cpu_extrapolation')
1173 class YoutubePerfTest(BasePerfTest, YoutubeTestHelper):
1174 """Test Youtube video performance."""
1176 def __init__(self, methodName='runTest', **kwargs):
1177 pyauto.PyUITest.__init__(self, methodName, **kwargs)
1178 YoutubeTestHelper.__init__(self, self)
1180 def _VerifyVideoTotalBytes(self):
1181 """Returns true if video total bytes information is available."""
1182 return self.GetVideoTotalBytes() > 0
1184 def _VerifyVideoLoadedBytes(self):
1185 """Returns true if video loaded bytes information is available."""
1186 return self.GetVideoLoadedBytes() > 0
1188 def StartVideoForPerformance(self, video_id='zuzaxlddWbk'):
1189 """Start the test video with all required buffering."""
1190 self.PlayVideoAndAssert(video_id)
1191 self.ExecuteJavascript("""
1192 ytplayer.setPlaybackQuality('hd720');
1193 window.domAutomationController.send('');
1195 self.AssertPlayerState(state=self.is_playing,
1196 msg='Player did not enter the playing state')
1198 self.WaitUntil(self._VerifyVideoTotalBytes, expect_retval=True),
1199 msg='Failed to get video total bytes information.')
1201 self.WaitUntil(self._VerifyVideoLoadedBytes, expect_retval=True),
1202 msg='Failed to get video loaded bytes information')
1203 loaded_video_bytes = self.GetVideoLoadedBytes()
1204 total_video_bytes = self.GetVideoTotalBytes()
1206 logging.info('total_video_bytes: %f', total_video_bytes)
1207 # Wait for the video to finish loading.
1208 while total_video_bytes > loaded_video_bytes:
1209 loaded_video_bytes = self.GetVideoLoadedBytes()
1210 logging.info('loaded_video_bytes: %f', loaded_video_bytes)
1213 # Ignore first 10 seconds of video playing so we get smooth videoplayback.
1216 def testYoutubeDroppedFrames(self):
1217 """Measures the Youtube video dropped frames/second. Runs for 60 secs.
1219 This test measures Youtube video dropped frames for three different types
1220 of videos like slow, normal and fast motion.
1222 youtube_video = {'Slow': 'VT1-sitWRtY',
1223 'Normal': '2tqK_3mKQUw',
1224 'Fast': '8ETDE0VGJY4',
1226 for video_type in youtube_video:
1227 logging.info('Running %s video.', video_type)
1228 self.StartVideoForPerformance(youtube_video[video_type])
1229 init_dropped_frames = self.GetVideoDroppedFrames()
1230 total_dropped_frames = 0
1232 for iteration in xrange(60):
1233 frames = self.GetVideoDroppedFrames() - init_dropped_frames
1234 current_dropped_frames = frames - total_dropped_frames
1235 dropped_fps.append(current_dropped_frames)
1236 logging.info('Iteration %d of %d: %f dropped frames in the last '
1237 'second', iteration + 1, 60, current_dropped_frames)
1238 total_dropped_frames = frames
1239 # Play the video for some time
1241 graph_description = 'YoutubeDroppedFrames' + video_type
1242 self._PrintSummaryResults(graph_description, dropped_fps, 'frames',
1243 'youtube_dropped_frames')
1245 def testYoutubeCPU(self):
1246 """Measures the Youtube video CPU usage. Runs for 60 seconds.
1248 Measures the Youtube video CPU usage (between 0 and 1), extrapolated to
1249 totalframes in the video by taking dropped frames into account. For smooth
1250 videoplayback this number should be < 0.5..1.0 on a hyperthreaded CPU.
1252 self.StartVideoForPerformance()
1253 init_dropped_frames = self.GetVideoDroppedFrames()
1254 logging.info('init_dropped_frames: %f', init_dropped_frames)
1255 cpu_usage_start = self._GetCPUUsage()
1256 total_shown_frames = 0
1257 for sec_num in xrange(60):
1258 # Play the video for some time.
1260 total_shown_frames = total_shown_frames + self.GetVideoFrames()
1261 logging.info('total_shown_frames: %f', total_shown_frames)
1262 total_dropped_frames = self.GetVideoDroppedFrames() - init_dropped_frames
1263 logging.info('total_dropped_frames: %f', total_dropped_frames)
1264 cpu_usage_end = self._GetCPUUsage()
1265 fraction_non_idle_time = self._GetFractionNonIdleCPUTime(
1266 cpu_usage_start, cpu_usage_end)
1267 logging.info('fraction_non_idle_time: %f', fraction_non_idle_time)
1268 total_frames = total_shown_frames + total_dropped_frames
1269 # Counting extrapolation for utilization to play the video.
1270 extrapolation_value = (fraction_non_idle_time *
1271 (float(total_frames) / total_shown_frames))
1272 logging.info('Youtube CPU extrapolation: %f', extrapolation_value)
1273 # Video is still running so log some more detailed data.
1274 self._LogProcessActivity()
1275 self._OutputPerfGraphValue('YoutubeCPUExtrapolation', extrapolation_value,
1276 'extrapolation', 'youtube_cpu_extrapolation')
1279 class FlashVideoPerfTest(BasePerfTest):
1280 """General flash video performance tests."""
1282 def FlashVideo1080P(self):
1283 """Measures total dropped frames and average FPS for a 1080p flash video.
1285 This is a temporary test to be run manually for now, needed to collect some
1286 performance statistics across different ChromeOS devices.
1288 # Open up the test webpage; it's assumed the test will start automatically.
1289 webpage_url = 'http://www/~arscott/fl/FlashVideoTests.html'
1290 self.assertTrue(self.AppendTab(pyauto.GURL(webpage_url)),
1291 msg='Failed to append tab for webpage.')
1293 # Wait until the test is complete.
1295 window.domAutomationController.send(JSON.stringify(tests_done));
1299 lambda: self.ExecuteJavascript(js_is_done, tab_index=1) == 'true',
1300 timeout=300, expect_retval=True, retry_sleep=1),
1301 msg='Timed out when waiting for test result.')
1303 # Retrieve and output the test results.
1305 window.domAutomationController.send(JSON.stringify(tests_results));
1307 test_result = eval(self.ExecuteJavascript(js_results, tab_index=1))
1308 test_result[0] = test_result[0].replace('true', 'True')
1309 test_result = eval(test_result[0]) # Webpage only does 1 test right now.
1311 description = 'FlashVideo1080P'
1312 result = test_result['averageFPS']
1313 logging.info('Result for %s: %f FPS (average)', description, result)
1314 self._OutputPerfGraphValue(description, result, 'FPS',
1315 'flash_video_1080p_fps')
1316 result = test_result['droppedFrames']
1317 logging.info('Result for %s: %f dropped frames', description, result)
1318 self._OutputPerfGraphValue(description, result, 'DroppedFrames',
1319 'flash_video_1080p_dropped_frames')
1322 class WebGLTest(BasePerfTest):
1323 """Tests for WebGL performance."""
1325 def _RunWebGLTest(self, url, description, graph_name):
1326 """Measures FPS using a specified WebGL demo.
1329 url: The string URL that, once loaded, will run the WebGL demo (default
1330 WebGL demo settings are used, since this test does not modify any
1331 settings in the demo).
1332 description: A string description for this demo, used as a performance
1333 value description. Should not contain any spaces.
1334 graph_name: A string name for the performance graph associated with this
1335 test. Only used on Chrome desktop.
1337 self.assertTrue(self.AppendTab(pyauto.GURL(url)),
1338 msg='Failed to append tab for %s.' % description)
1341 var fps_field = document.getElementById("fps");
1344 result = fps_field.innerHTML;
1345 window.domAutomationController.send(JSON.stringify(result));
1348 # Wait until we start getting FPS values.
1351 lambda: self.ExecuteJavascript(get_fps_js, tab_index=1) != '-1',
1352 timeout=300, retry_sleep=1),
1353 msg='Timed out when waiting for FPS values to be available.')
1355 # Let the experiment run for 5 seconds before we start collecting perf
1359 # Collect the current FPS value each second for the next 30 seconds. The
1360 # final result of this test will be the average of these FPS values.
1362 for iteration in xrange(30):
1363 fps = self.ExecuteJavascript(get_fps_js, tab_index=1)
1364 fps = float(fps.replace('"', ''))
1365 fps_vals.append(fps)
1366 logging.info('Iteration %d of %d: %f FPS', iteration + 1, 30, fps)
1368 self._PrintSummaryResults(description, fps_vals, 'fps', graph_name)
1370 def testWebGLAquarium(self):
1371 """Measures performance using the WebGL Aquarium demo."""
1373 self.GetFileURLForDataPath('pyauto_private', 'webgl', 'aquarium',
1375 'WebGLAquarium', 'webgl_demo')
1377 def testWebGLField(self):
1378 """Measures performance using the WebGL Field demo."""
1380 self.GetFileURLForDataPath('pyauto_private', 'webgl', 'field',
1382 'WebGLField', 'webgl_demo')
1384 def testWebGLSpaceRocks(self):
1385 """Measures performance using the WebGL SpaceRocks demo."""
1387 self.GetFileURLForDataPath('pyauto_private', 'webgl', 'spacerocks',
1389 'WebGLSpaceRocks', 'webgl_demo')
1392 class GPUPerfTest(BasePerfTest):
1393 """Tests for GPU performance."""
1396 """Performs necessary setup work before running each test in this class."""
1397 self._gpu_info_dict = self.EvalDataFrom(os.path.join(self.DataDir(),
1398 'gpu', 'gpuperf.txt'))
1399 self._demo_name_url_dict = self._gpu_info_dict['demo_info']
1400 pyauto.PyUITest.setUp(self)
1402 def _MeasureFpsOverTime(self, tab_index=0):
1403 """Measures FPS using a specified demo.
1405 This function assumes that the demo is already loaded in the specified tab
1409 tab_index: The tab index, default is 0.
1411 # Let the experiment run for 5 seconds before we start collecting FPS
1415 # Collect the current FPS value each second for the next 10 seconds.
1416 # Then return the average FPS value from among those collected.
1418 for iteration in xrange(10):
1419 fps = self.GetFPS(tab_index=tab_index)
1420 fps_vals.append(fps['fps'])
1422 return Mean(fps_vals)
1424 def _GetStdAvgAndCompare(self, avg_fps, description, ref_dict):
1425 """Computes the average and compare set of values with reference data.
1428 avg_fps: Average fps value.
1429 description: A string description for this demo, used as a performance
1431 ref_dict: Dictionary which contains reference data for this test case.
1434 True, if the actual FPS value is within 10% of the reference FPS value,
1435 or False, otherwise.
1439 # Load reference data according to platform.
1440 platform_ref_dict = None
1442 platform_ref_dict = ref_dict['win']
1444 platform_ref_dict = ref_dict['mac']
1445 elif self.IsLinux():
1446 platform_ref_dict = ref_dict['linux']
1448 self.assertFail(msg='This platform is unsupported.')
1449 std_fps = platform_ref_dict[description]
1450 # Compare reference data to average fps.
1451 # We allow the average FPS value to be within 10% of the reference
1453 if avg_fps < (0.9 * std_fps):
1454 logging.info('FPS difference exceeds threshold for: %s', description)
1455 logging.info(' Average: %f fps', avg_fps)
1456 logging.info('Reference Average: %f fps', std_fps)
1459 logging.info('Average FPS is actually greater than 10 percent '
1460 'more than the reference FPS for: %s', description)
1461 logging.info(' Average: %f fps', avg_fps)
1462 logging.info(' Reference Average: %f fps', std_fps)
1465 def testLaunchDemosParallelInSeparateTabs(self):
1466 """Measures performance of demos in different tabs in same browser."""
1467 # Launch all the demos parallel in separate tabs
1469 all_demos_passed = True
1470 ref_dict = self._gpu_info_dict['separate_tab_ref_data']
1471 # Iterate through dictionary and append all url to browser
1472 for url in self._demo_name_url_dict.iterkeys():
1474 self.AppendTab(pyauto.GURL(self._demo_name_url_dict[url])),
1475 msg='Failed to append tab for %s.' % url)
1477 # Assert number of tab count is equal to number of tabs appended.
1478 self.assertEqual(self.GetTabCount(), counter + 1)
1479 # Measures performance using different demos and compare it golden
1481 for url in self._demo_name_url_dict.iterkeys():
1482 avg_fps = self._MeasureFpsOverTime(tab_index=counter)
1483 # Get the reference value of fps and compare the results
1484 if not self._GetStdAvgAndCompare(avg_fps, url, ref_dict):
1485 all_demos_passed = False
1489 msg='One or more demos failed to yield an acceptable FPS value')
1491 def testLaunchDemosInSeparateBrowser(self):
1492 """Measures performance by launching each demo in a separate tab."""
1493 # Launch demos in the browser
1494 ref_dict = self._gpu_info_dict['separate_browser_ref_data']
1495 all_demos_passed = True
1496 for url in self._demo_name_url_dict.iterkeys():
1497 self.NavigateToURL(self._demo_name_url_dict[url])
1498 # Measures performance using different demos.
1499 avg_fps = self._MeasureFpsOverTime()
1500 self.RestartBrowser()
1501 # Get the standard value of fps and compare the rseults
1502 if not self._GetStdAvgAndCompare(avg_fps, url, ref_dict):
1503 all_demos_passed = False
1506 msg='One or more demos failed to yield an acceptable FPS value')
1508 def testLaunchDemosBrowseForwardBackward(self):
1509 """Measures performance of various demos in browser going back and forth."""
1510 ref_dict = self._gpu_info_dict['browse_back_forward_ref_data']
1513 all_demos_passed = True
1514 # Get URL/Description from dictionary and put in individual array
1515 for url in self._demo_name_url_dict.iterkeys():
1516 url_array.append(self._demo_name_url_dict[url])
1517 desc_array.append(url)
1518 for index in range(len(url_array) - 1):
1519 # Launch demo in the Browser
1521 self.NavigateToURL(url_array[index])
1522 # Measures performance using the first demo.
1523 avg_fps = self._MeasureFpsOverTime()
1524 status1 = self._GetStdAvgAndCompare(avg_fps, desc_array[index],
1526 # Measures performance using the second demo.
1527 self.NavigateToURL(url_array[index + 1])
1528 avg_fps = self._MeasureFpsOverTime()
1529 status2 = self._GetStdAvgAndCompare(avg_fps, desc_array[index + 1],
1531 # Go Back to previous demo
1533 # Measures performance for first demo when moved back
1534 avg_fps = self._MeasureFpsOverTime()
1535 status3 = self._GetStdAvgAndCompare(
1536 avg_fps, desc_array[index] + '_backward',
1538 # Go Forward to previous demo
1540 # Measures performance for second demo when moved forward
1541 avg_fps = self._MeasureFpsOverTime()
1542 status4 = self._GetStdAvgAndCompare(
1543 avg_fps, desc_array[index + 1] + '_forward',
1545 if not all([status1, status2, status3, status4]):
1546 all_demos_passed = False
1549 msg='One or more demos failed to yield an acceptable FPS value')
1552 class HTML5BenchmarkTest(BasePerfTest):
1553 """Tests for HTML5 performance."""
1555 def testHTML5Benchmark(self):
1556 """Measures performance using the benchmark at html5-benchmark.com."""
1557 self.NavigateToURL('http://html5-benchmark.com')
1559 start_benchmark_js = """
1561 window.domAutomationController.send("done");
1563 self.ExecuteJavascript(start_benchmark_js)
1565 js_final_score = """
1567 var elem = document.getElementById("score");
1569 score = elem.innerHTML;
1570 window.domAutomationController.send(score);
1572 # Wait for the benchmark to complete, which is assumed to be when the value
1573 # of the 'score' DOM element changes to something other than '87485'.
1576 lambda: self.ExecuteJavascript(js_final_score) != '87485',
1577 timeout=900, retry_sleep=1),
1578 msg='Timed out when waiting for final score to be available.')
1580 score = self.ExecuteJavascript(js_final_score)
1581 logging.info('HTML5 Benchmark final score: %f', float(score))
1582 self._OutputPerfGraphValue('HTML5Benchmark', float(score), 'score',
1586 class FileUploadDownloadTest(BasePerfTest):
1587 """Tests that involve measuring performance of upload and download."""
1590 """Performs necessary setup work before running each test in this class."""
1591 self._temp_dir = tempfile.mkdtemp()
1592 self._test_server = PerfTestServer(self._temp_dir)
1593 self._test_server_port = self._test_server.GetPort()
1594 self._test_server.Run()
1595 self.assertTrue(self.WaitUntil(self._IsTestServerRunning),
1596 msg='Failed to start local performance test server.')
1597 BasePerfTest.setUp(self)
1600 """Performs necessary cleanup work after running each test in this class."""
1601 BasePerfTest.tearDown(self)
1602 self._test_server.ShutDown()
1603 pyauto_utils.RemovePath(self._temp_dir)
1605 def _IsTestServerRunning(self):
1606 """Determines whether the local test server is ready to accept connections.
1609 True, if a connection can be made to the local performance test server, or
1614 conn = urllib2.urlopen('http://localhost:%d' % self._test_server_port)
1622 def testDownload100MBFile(self):
1623 """Measures the time to download a 100 MB file from a local server."""
1624 CREATE_100MB_URL = (
1625 'http://localhost:%d/create_file_of_size?filename=data&mb=100' %
1626 self._test_server_port)
1627 DOWNLOAD_100MB_URL = 'http://localhost:%d/data' % self._test_server_port
1628 DELETE_100MB_URL = ('http://localhost:%d/delete_file?filename=data' %
1629 self._test_server_port)
1631 # Tell the local server to create a 100 MB file.
1632 self.NavigateToURL(CREATE_100MB_URL)
1634 # Cleaning up downloaded files is done in the same way as in downloads.py.
1635 # We first identify all existing downloaded files, then remove only those
1636 # new downloaded files that appear during the course of this test.
1637 download_dir = self.GetDownloadDirectory().value()
1639 if os.path.isdir(download_dir):
1640 orig_downloads = os.listdir(download_dir)
1642 def _CleanupAdditionalFilesInDir(directory, orig_files):
1643 """Removes the additional files in the specified directory.
1645 This function will remove all files from |directory| that are not
1646 specified in |orig_files|.
1649 directory: A string directory path.
1650 orig_files: A list of strings representing the original set of files in
1651 the specified directory.
1653 downloads_to_remove = []
1654 if os.path.isdir(directory):
1655 downloads_to_remove = [os.path.join(directory, name)
1656 for name in os.listdir(directory)
1657 if name not in orig_files]
1658 for file_name in downloads_to_remove:
1659 pyauto_utils.RemovePath(file_name)
1661 def _DownloadFile(url):
1662 self.DownloadAndWaitForStart(url)
1663 self.WaitForAllDownloadsToComplete(timeout=2 * 60 * 1000) # 2 minutes.
1666 for iteration in range(self._num_iterations + 1):
1667 elapsed_time = self._MeasureElapsedTime(
1668 lambda: _DownloadFile(DOWNLOAD_100MB_URL), num_invocations=1)
1669 # Ignore the first iteration.
1671 timings.append(elapsed_time)
1672 logging.info('Iteration %d of %d: %f milliseconds', iteration,
1673 self._num_iterations, elapsed_time)
1674 self.SetDownloadShelfVisible(False)
1675 _CleanupAdditionalFilesInDir(download_dir, orig_downloads)
1677 self._PrintSummaryResults('Download100MBFile', timings, 'milliseconds',
1680 # Tell the local server to delete the 100 MB file.
1681 self.NavigateToURL(DELETE_100MB_URL)
1683 def testUpload50MBFile(self):
1684 """Measures the time to upload a 50 MB file to a local server."""
1685 # TODO(dennisjeffrey): Replace the use of XMLHttpRequest in this test with
1686 # FileManager automation to select the upload file when crosbug.com/17903
1688 START_UPLOAD_URL = (
1689 'http://localhost:%d/start_upload?mb=50' % self._test_server_port)
1691 EXPECTED_SUBSTRING = 'Upload complete'
1693 def _IsUploadComplete():
1696 var div = document.getElementById("upload_result");
1698 result = div.innerHTML;
1699 window.domAutomationController.send(result);
1701 return self.ExecuteJavascript(js).find(EXPECTED_SUBSTRING) >= 0
1703 def _RunSingleUpload():
1704 self.NavigateToURL(START_UPLOAD_URL)
1706 self.WaitUntil(_IsUploadComplete, timeout=120, expect_retval=True,
1708 msg='Upload failed to complete before the timeout was hit.')
1711 for iteration in range(self._num_iterations + 1):
1712 elapsed_time = self._MeasureElapsedTime(_RunSingleUpload)
1713 # Ignore the first iteration.
1715 timings.append(elapsed_time)
1716 logging.info('Iteration %d of %d: %f milliseconds', iteration,
1717 self._num_iterations, elapsed_time)
1719 self._PrintSummaryResults('Upload50MBFile', timings, 'milliseconds',
1723 class ScrollResults(object):
1724 """Container for ScrollTest results."""
1726 def __init__(self, first_paint_seconds, results_list):
1727 assert len(results_list) == 2, 'Expecting initial and repeat results.'
1728 self._first_paint_time = 1000.0 * first_paint_seconds
1729 self._results_list = results_list
1731 def GetFirstPaintTime(self):
1732 return self._first_paint_time
1734 def GetFrameCount(self, index):
1735 results = self._results_list[index]
1736 return results.get('numFramesSentToScreen', results['numAnimationFrames'])
1738 def GetFps(self, index):
1739 return (self.GetFrameCount(index) /
1740 self._results_list[index]['totalTimeInSeconds'])
1742 def GetMeanFrameTime(self, index):
1743 return (self._results_list[index]['totalTimeInSeconds'] /
1744 self.GetFrameCount(index))
1746 def GetPercentBelow60Fps(self, index):
1747 return (float(self._results_list[index]['droppedFrameCount']) /
1748 self.GetFrameCount(index))
1751 class BaseScrollTest(BasePerfTest):
1752 """Base class for tests measuring scrolling performance."""
1755 """Performs necessary setup work before running each test."""
1756 super(BaseScrollTest, self).setUp()
1757 scroll_file = os.path.join(self.DataDir(), 'scroll', 'scroll.js')
1758 with open(scroll_file) as f:
1759 self._scroll_text = f.read()
1761 def ExtraChromeFlags(self):
1762 """Ensures Chrome is launched with custom flags.
1765 A list of extra flags to pass to Chrome when it is launched.
1767 # Extra flag used by scroll performance tests.
1768 return (super(BaseScrollTest, self).ExtraChromeFlags() +
1769 ['--enable-gpu-benchmarking'])
1771 def RunSingleInvocation(self, url, is_gmail_test=False):
1772 """Runs a single invocation of the scroll test.
1775 url: The string url for the webpage on which to run the scroll test.
1776 is_gmail_test: True iff the test is a GMail test.
1779 Instance of ScrollResults.
1782 self.assertTrue(self.AppendTab(pyauto.GURL(url)),
1783 msg='Failed to append tab for webpage.')
1785 timeout = pyauto.PyUITest.ActionTimeoutChanger(self, 300 * 1000) # ms
1787 new __ScrollTest(function(results) {
1788 var stringify = JSON.stringify || JSON.encode;
1789 window.domAutomationController.send(stringify(results));
1791 """ % (self._scroll_text, 'true' if is_gmail_test else 'false')
1792 results = simplejson.loads(self.ExecuteJavascript(test_js, tab_index=1))
1794 first_paint_js = ('window.domAutomationController.send('
1795 '(chrome.loadTimes().firstPaintTime - '
1796 'chrome.loadTimes().requestTime).toString());')
1797 first_paint_time = float(self.ExecuteJavascript(first_paint_js,
1800 self.CloseTab(tab_index=1)
1802 return ScrollResults(first_paint_time, results)
1804 def RunScrollTest(self, url, description, graph_name, is_gmail_test=False):
1805 """Runs a scroll performance test on the specified webpage.
1808 url: The string url for the webpage on which to run the scroll test.
1809 description: A string description for the particular test being run.
1810 graph_name: A string name for the performance graph associated with this
1811 test. Only used on Chrome desktop.
1812 is_gmail_test: True iff the test is a GMail test.
1815 for iteration in range(self._num_iterations + 1):
1816 result = self.RunSingleInvocation(url, is_gmail_test)
1817 # Ignore the first iteration.
1819 fps = result.GetFps(1)
1820 assert fps, '%s did not scroll' % url
1821 logging.info('Iteration %d of %d: %f fps', iteration,
1822 self._num_iterations, fps)
1823 results.append(result)
1824 self._PrintSummaryResults(
1825 description, [r.GetFps(1) for r in results],
1829 class PopularSitesScrollTest(BaseScrollTest):
1830 """Measures scrolling performance on recorded versions of popular sites."""
1832 def ExtraChromeFlags(self):
1833 """Ensures Chrome is launched with custom flags.
1836 A list of extra flags to pass to Chrome when it is launched.
1838 return super(PopularSitesScrollTest,
1839 self).ExtraChromeFlags() + PageCyclerReplay.CHROME_FLAGS
1841 def _GetUrlList(self, test_name):
1842 """Returns list of recorded sites."""
1843 sites_path = PageCyclerReplay.Path('page_sets', test_name=test_name)
1844 with open(sites_path) as f:
1845 sites_text = f.read()
1848 window.domAutomationController.send(JSON.stringify(pageSets));
1850 page_sets = eval(self.ExecuteJavascript(js))
1851 return list(itertools.chain(*page_sets))[1:] # Skip first.
1853 def _PrintScrollResults(self, results):
1854 self._PrintSummaryResults(
1855 'initial', [r.GetMeanFrameTime(0) for r in results],
1857 self._PrintSummaryResults(
1858 'repeat', [r.GetMeanFrameTime(1) for r in results],
1860 self._PrintSummaryResults(
1862 [r.GetPercentBelow60Fps(0) for r in results],
1863 'percent', 'PercentBelow60FPS')
1864 self._PrintSummaryResults(
1866 [r.GetPercentBelow60Fps(1) for r in results],
1867 'percent', 'PercentBelow60FPS')
1868 self._PrintSummaryResults(
1869 'first_paint_time', [r.GetFirstPaintTime() for r in results],
1870 'ms', 'FirstPaintTime')
1872 def test2012Q3(self):
1873 test_name = '2012Q3'
1874 urls = self._GetUrlList(test_name)
1876 with PageCyclerReplay.ReplayServer(test_name) as replay_server:
1877 if replay_server.is_record_mode:
1878 self._num_iterations = 1
1879 for iteration in range(self._num_iterations):
1881 result = self.RunSingleInvocation(url)
1882 fps = result.GetFps(0)
1883 assert fps, '%s did not scroll' % url
1884 logging.info('Iteration %d of %d: %f fps', iteration + 1,
1885 self._num_iterations, fps)
1886 results.append(result)
1887 self._PrintScrollResults(results)
1890 class ScrollTest(BaseScrollTest):
1891 """Tests to measure scrolling performance."""
1893 def ExtraChromeFlags(self):
1894 """Ensures Chrome is launched with custom flags.
1897 A list of extra flags to pass to Chrome when it is launched.
1899 # Extra flag needed by scroll performance tests.
1900 return super(ScrollTest, self).ExtraChromeFlags() + ['--disable-gpu-vsync']
1902 def testBlankPageScroll(self):
1903 """Runs the scroll test on a blank page."""
1905 self.GetFileURLForDataPath('scroll', 'blank.html'), 'ScrollBlankPage',
1908 def testTextScroll(self):
1909 """Runs the scroll test on a text-filled page."""
1911 self.GetFileURLForDataPath('scroll', 'text.html'), 'ScrollTextPage',
1914 def testGooglePlusScroll(self):
1915 """Runs the scroll test on a Google Plus anonymized page."""
1917 self.GetFileURLForDataPath('scroll', 'plus.html'),
1918 'ScrollGooglePlusPage', 'scroll_fps')
1920 def testGmailScroll(self):
1921 """Runs the scroll test using the live Gmail site."""
1922 self._LoginToGoogleAccount(account_key='test_google_account_gmail')
1923 self.RunScrollTest('http://www.gmail.com', 'ScrollGmail',
1927 class FlashTest(BasePerfTest):
1928 """Tests to measure flash performance."""
1930 def _RunFlashTestForAverageFPS(self, webpage_url, description, graph_name):
1931 """Runs a single flash test that measures an average FPS value.
1934 webpage_url: The string URL to a webpage that will run the test.
1935 description: A string description for this test.
1936 graph_name: A string name for the performance graph associated with this
1937 test. Only used on Chrome desktop.
1939 # Open up the test webpage; it's assumed the test will start automatically.
1940 self.assertTrue(self.AppendTab(pyauto.GURL(webpage_url)),
1941 msg='Failed to append tab for webpage.')
1943 # Wait until the final result is computed, then retrieve and output it.
1945 window.domAutomationController.send(
1946 JSON.stringify(final_average_fps));
1950 lambda: self.ExecuteJavascript(js, tab_index=1) != '-1',
1951 timeout=300, expect_retval=True, retry_sleep=1),
1952 msg='Timed out when waiting for test result.')
1953 result = float(self.ExecuteJavascript(js, tab_index=1))
1954 logging.info('Result for %s: %f FPS (average)', description, result)
1955 self._OutputPerfGraphValue(description, result, 'FPS', graph_name)
1957 def testFlashGaming(self):
1958 """Runs a simple flash gaming benchmark test."""
1959 webpage_url = self.GetHttpURLForDataPath('pyauto_private', 'flash',
1960 'FlashGamingTest2.html')
1961 self._RunFlashTestForAverageFPS(webpage_url, 'FlashGaming', 'flash_fps')
1963 def testFlashText(self):
1964 """Runs a simple flash text benchmark test."""
1965 webpage_url = self.GetHttpURLForDataPath('pyauto_private', 'flash',
1966 'FlashTextTest2.html')
1967 self._RunFlashTestForAverageFPS(webpage_url, 'FlashText', 'flash_fps')
1969 def testScimarkGui(self):
1970 """Runs the ScimarkGui benchmark tests."""
1971 webpage_url = self.GetHttpURLForDataPath('pyauto_private', 'flash',
1973 self.assertTrue(self.AppendTab(pyauto.GURL(webpage_url)),
1974 msg='Failed to append tab for webpage.')
1976 js = 'window.domAutomationController.send(JSON.stringify(tests_done));'
1979 lambda: self.ExecuteJavascript(js, tab_index=1), timeout=300,
1980 expect_retval='true', retry_sleep=1),
1981 msg='Timed out when waiting for tests to complete.')
1985 for (var i = 0; i < tests_results.length; ++i) {
1986 var test_name = tests_results[i][0];
1987 var mflops = tests_results[i][1];
1988 var mem = tests_results[i][2];
1989 result[test_name] = [mflops, mem]
1991 window.domAutomationController.send(JSON.stringify(result));
1993 result = eval(self.ExecuteJavascript(js_result, tab_index=1))
1994 for benchmark in result:
1995 mflops = float(result[benchmark][0])
1996 mem = float(result[benchmark][1])
1997 if benchmark.endswith('_mflops'):
1998 benchmark = benchmark[:benchmark.find('_mflops')]
1999 logging.info('Results for ScimarkGui_%s:', benchmark)
2000 logging.info(' %f MFLOPS', mflops)
2001 logging.info(' %f MB', mem)
2002 self._OutputPerfGraphValue('ScimarkGui-%s-MFLOPS' % benchmark, mflops,
2003 'MFLOPS', 'scimark_gui_mflops')
2004 self._OutputPerfGraphValue('ScimarkGui-%s-Mem' % benchmark, mem, 'MB',
2008 class LiveGamePerfTest(BasePerfTest):
2009 """Tests to measure performance of live gaming webapps."""
2011 def _RunLiveGamePerfTest(self, url, url_title_substring,
2012 description, graph_name):
2013 """Measures performance metrics for the specified live gaming webapp.
2015 This function connects to the specified URL to launch the gaming webapp,
2016 waits for a period of time for the webapp to run, then collects some
2017 performance metrics about the running webapp.
2020 url: The string URL of the gaming webapp to analyze.
2021 url_title_substring: A string that is expected to be a substring of the
2022 webpage title for the specified gaming webapp. Used to verify that
2023 the webapp loads correctly.
2024 description: A string description for this game, used in the performance
2025 value description. Should not contain any spaces.
2026 graph_name: A string name for the performance graph associated with this
2027 test. Only used on Chrome desktop.
2029 self.NavigateToURL(url)
2030 loaded_tab_title = self.GetActiveTabTitle()
2031 self.assertTrue(url_title_substring in loaded_tab_title,
2032 msg='Loaded tab title missing "%s": "%s"' %
2033 (url_title_substring, loaded_tab_title))
2034 cpu_usage_start = self._GetCPUUsage()
2036 # Let the app run for 1 minute.
2039 cpu_usage_end = self._GetCPUUsage()
2040 fraction_non_idle_time = self._GetFractionNonIdleCPUTime(
2041 cpu_usage_start, cpu_usage_end)
2043 logging.info('Fraction of CPU time spent non-idle: %f',
2044 fraction_non_idle_time)
2045 self._OutputPerfGraphValue(description + 'CpuBusy', fraction_non_idle_time,
2046 'Fraction', graph_name + '_cpu_busy')
2047 v8_heap_stats = self.GetV8HeapStats()
2048 v8_heap_size = v8_heap_stats['v8_memory_used'] / (1024.0 * 1024.0)
2049 logging.info('Total v8 heap size: %f MB', v8_heap_size)
2050 self._OutputPerfGraphValue(description + 'V8HeapSize', v8_heap_size, 'MB',
2051 graph_name + '_v8_heap_size')
2053 def testAngryBirds(self):
2054 """Measures performance for Angry Birds."""
2055 self._RunLiveGamePerfTest('http://chrome.angrybirds.com', 'Angry Birds',
2056 'AngryBirds', 'angry_birds')
2059 class BasePageCyclerTest(BasePerfTest):
2060 """Page class for page cycler tests.
2062 Derived classes must implement StartUrl().
2064 Environment Variables:
2065 PC_NO_AUTO: if set, avoids automatically loading pages.
2067 MAX_ITERATION_SECONDS = 60
2069 DEFAULT_USE_AUTO = True
2071 # Page Cycler lives in src/data/page_cycler rather than src/chrome/test/data
2072 DATA_PATH = os.path.abspath(
2073 os.path.join(BasePerfTest.DataDir(), os.pardir, os.pardir,
2074 os.pardir, 'data', 'page_cycler'))
2077 """Performs necessary setup work before running each test."""
2078 super(BasePageCyclerTest, self).setUp()
2079 self.use_auto = 'PC_NO_AUTO' not in os.environ
2082 def DataPath(cls, subdir):
2083 return os.path.join(cls.DATA_PATH, subdir)
2085 def ExtraChromeFlags(self):
2086 """Ensures Chrome is launched with custom flags.
2089 A list of extra flags to pass to Chrome when it is launched.
2091 # Extra flags required to run these tests.
2092 # The first two are needed for the test.
2093 # The plugins argument is to prevent bad scores due to pop-ups from
2094 # running an old version of something (like Flash).
2095 return (super(BasePageCyclerTest, self).ExtraChromeFlags() +
2096 ['--js-flags="--expose_gc"',
2097 '--enable-file-cookies',
2098 '--allow-outdated-plugins'])
2100 def WaitUntilStarted(self, start_url):
2101 """Check that the test navigates away from the start_url."""
2103 var is_started = document.location.href !== "%s";
2104 window.domAutomationController.send(JSON.stringify(is_started));
2107 self.WaitUntil(lambda: self.ExecuteJavascript(js_is_started) == 'true',
2109 msg='Timed out when waiting to leave start page.')
2111 def WaitUntilDone(self, url, iterations):
2112 """Check cookies for "__pc_done=1" to know the test is over."""
2114 cookies = self.GetCookie(pyauto.GURL(url)) # window 0, tab 0
2115 return '__pc_done=1' in cookies
2119 timeout=(self.MAX_ITERATION_SECONDS * iterations),
2121 msg='Timed out waiting for page cycler test to complete.')
2123 def CollectPagesAndTimes(self, url):
2124 """Collect the results from the cookies."""
2125 pages, times = None, None
2126 cookies = self.GetCookie(pyauto.GURL(url)) # window 0, tab 0
2127 for cookie in cookies.split(';'):
2128 if '__pc_pages' in cookie:
2129 pages_str = cookie.split('=', 1)[1]
2130 pages = pages_str.split(',')
2131 elif '__pc_timings' in cookie:
2132 times_str = cookie.split('=', 1)[1]
2133 times = [float(t) for t in times_str.split(',')]
2134 self.assertTrue(pages and times,
2135 msg='Unable to find test results in cookies: %s' % cookies)
2138 def IteratePageTimes(self, pages, times, iterations):
2139 """Regroup the times by the page.
2142 pages: the list of pages
2143 times: e.g. [page1_iter1, page2_iter1, ..., page1_iter2, page2_iter2, ...]
2144 iterations: the number of times for each page
2146 (pageN, [pageN_iter1, pageN_iter2, ...])
2148 num_pages = len(pages)
2149 num_times = len(times)
2150 expected_num_times = num_pages * iterations
2152 expected_num_times, num_times,
2153 msg=('num_times != num_pages * iterations: %s != %s * %s, times=%s' %
2154 (num_times, num_pages, iterations, times)))
2155 for i, page in enumerate(pages):
2156 yield page, list(itertools.islice(times, i, None, num_pages))
2158 def CheckPageTimes(self, pages, times, iterations):
2159 """Assert that all the times are greater than zero."""
2161 for page, times in self.IteratePageTimes(pages, times, iterations):
2162 failed_times = [t for t in times if t <= 0.0]
2164 failed_pages.append((page, failed_times))
2166 self.fail('Pages with unexpected times: %s' % failed_pages)
2168 def TrimTimes(self, times, percent):
2169 """Return a new list with |percent| number of times trimmed for each page.
2171 Removes the largest and smallest values.
2173 iterations = len(times)
2174 times = sorted(times)
2175 num_to_trim = int(iterations * float(percent) / 100.0)
2176 logging.debug('Before trimming %d: %s' % (num_to_trim, times))
2178 b = iterations - (num_to_trim / 2 + num_to_trim % 2)
2179 trimmed_times = times[a:b]
2180 logging.debug('After trimming: %s', trimmed_times)
2181 return trimmed_times
2183 def ComputeFinalResult(self, pages, times, iterations):
2184 """The final score that is calculated is a geometric mean of the
2185 arithmetic means of each page's load time, and we drop the
2186 upper/lower 20% of the times for each page so they don't skew the
2187 mean. The geometric mean is used for the final score because the
2188 time range for any given site may be very different, and we don't
2189 want slower sites to weight more heavily than others.
2191 self.CheckPageTimes(pages, times, iterations)
2193 Mean(self.TrimTimes(times, percent=self.TRIM_PERCENT))
2194 for _, times in self.IteratePageTimes(pages, times, iterations)]
2195 return GeometricMean(page_means)
2197 def StartUrl(self, test_name, iterations):
2198 """Return the URL to used to start the test.
2200 Derived classes must implement this.
2202 raise NotImplemented
2204 def RunPageCyclerTest(self, name, description):
2205 """Runs the specified PageCycler test.
2208 name: the page cycler test name (corresponds to a directory or test file)
2209 description: a string description for the test
2211 iterations = self._num_iterations
2212 start_url = self.StartUrl(name, iterations)
2213 self.NavigateToURL(start_url)
2215 self.WaitUntilStarted(start_url)
2216 self.WaitUntilDone(start_url, iterations)
2217 pages, times = self.CollectPagesAndTimes(start_url)
2218 final_result = self.ComputeFinalResult(pages, times, iterations)
2219 logging.info('%s page cycler final result: %f' %
2220 (description, final_result))
2221 self._OutputPerfGraphValue(description + '_PageCycler', final_result,
2222 'milliseconds', graph_name='PageCycler')
2225 class PageCyclerTest(BasePageCyclerTest):
2226 """Tests to run various page cyclers.
2228 Environment Variables:
2229 PC_NO_AUTO: if set, avoids automatically loading pages.
2232 def _PreReadDataDir(self, subdir):
2233 """This recursively reads all of the files in a given url directory.
2235 The intent is to get them into memory before they are used by the benchmark.
2238 subdir: a subdirectory of the page cycler data directory.
2240 def _PreReadDir(dirname, names):
2242 with open(os.path.join(dirname, rfile)) as fp:
2244 for root, dirs, files in os.walk(self.DataPath(subdir)):
2245 _PreReadDir(root, files)
2247 def StartUrl(self, test_name, iterations):
2248 # Must invoke GetFileURLForPath before appending parameters to the URL,
2249 # otherwise those parameters will get quoted.
2250 start_url = self.GetFileURLForPath(self.DataPath(test_name), 'start.html')
2251 start_url += '?iterations=%d' % iterations
2253 start_url += '&auto=1'
2256 def RunPageCyclerTest(self, dirname, description):
2257 """Runs the specified PageCycler test.
2260 dirname: directory containing the page cycler test
2261 description: a string description for the test
2263 self._PreReadDataDir('common')
2264 self._PreReadDataDir(dirname)
2265 super(PageCyclerTest, self).RunPageCyclerTest(dirname, description)
2267 def testMoreJSFile(self):
2268 self.RunPageCyclerTest('morejs', 'MoreJSFile')
2270 def testAlexaFile(self):
2271 self.RunPageCyclerTest('alexa_us', 'Alexa_usFile')
2273 def testBloatFile(self):
2274 self.RunPageCyclerTest('bloat', 'BloatFile')
2276 def testDHTMLFile(self):
2277 self.RunPageCyclerTest('dhtml', 'DhtmlFile')
2279 def testIntl1File(self):
2280 self.RunPageCyclerTest('intl1', 'Intl1File')
2282 def testIntl2File(self):
2283 self.RunPageCyclerTest('intl2', 'Intl2File')
2285 def testMozFile(self):
2286 self.RunPageCyclerTest('moz', 'MozFile')
2288 def testMoz2File(self):
2289 self.RunPageCyclerTest('moz2', 'Moz2File')
2292 class PageCyclerReplay(object):
2293 """Run page cycler tests with network simulation via Web Page Replay.
2295 Web Page Replay is a proxy that can record and "replay" web pages with
2296 simulated network characteristics -- without having to edit the pages
2297 by hand. With WPR, tests can use "real" web content, and catch
2298 performance issues that may result from introducing network delays and
2299 bandwidth throttling.
2302 'archive': 'src/data/page_cycler/webpagereplay/{test_name}.wpr',
2303 'page_sets': 'src/tools/page_cycler/webpagereplay/tests/{test_name}.js',
2304 'start_page': 'src/tools/page_cycler/webpagereplay/start.html',
2305 'extension': 'src/tools/page_cycler/webpagereplay/extension',
2308 WEBPAGEREPLAY_HOST = '127.0.0.1'
2309 WEBPAGEREPLAY_HTTP_PORT = 8080
2310 WEBPAGEREPLAY_HTTPS_PORT = 8413
2312 CHROME_FLAGS = webpagereplay.GetChromeFlags(
2314 WEBPAGEREPLAY_HTTP_PORT,
2315 WEBPAGEREPLAY_HTTPS_PORT) + [
2317 '--disable-background-networking',
2318 '--enable-experimental-extension-apis',
2320 '--enable-benchmarking',
2321 '--enable-net-benchmarking',
2322 '--metrics-recording-only',
2323 '--activate-on-launch',
2325 '--no-proxy-server',
2329 def Path(cls, key, **kwargs):
2330 return FormatChromePath(cls._PATHS[key], **kwargs)
2333 def ReplayServer(cls, test_name, replay_options=None):
2334 archive_path = cls.Path('archive', test_name=test_name)
2335 return webpagereplay.ReplayServer(archive_path,
2336 cls.WEBPAGEREPLAY_HOST,
2337 cls.WEBPAGEREPLAY_HTTP_PORT,
2338 cls.WEBPAGEREPLAY_HTTPS_PORT,
2342 class PageCyclerNetSimTest(BasePageCyclerTest):
2343 """Tests to run Web Page Replay backed page cycler tests."""
2344 MAX_ITERATION_SECONDS = 180
2346 def ExtraChromeFlags(self):
2347 """Ensures Chrome is launched with custom flags.
2350 A list of extra flags to pass to Chrome when it is launched.
2352 flags = super(PageCyclerNetSimTest, self).ExtraChromeFlags()
2353 flags.append('--load-extension=%s' % PageCyclerReplay.Path('extension'))
2354 flags.extend(PageCyclerReplay.CHROME_FLAGS)
2357 def StartUrl(self, test_name, iterations):
2358 start_path = PageCyclerReplay.Path('start_page')
2359 start_url = 'file://%s?test=%s&iterations=%d' % (
2360 start_path, test_name, iterations)
2362 start_url += '&auto=1'
2365 def RunPageCyclerTest(self, test_name, description):
2366 """Runs the specified PageCycler test.
2369 test_name: name for archive (.wpr) and config (.js) files.
2370 description: a string description for the test
2372 replay_options = None
2373 with PageCyclerReplay.ReplayServer(test_name, replay_options) as server:
2374 if server.is_record_mode:
2375 self._num_iterations = 1
2376 super_self = super(PageCyclerNetSimTest, self)
2377 super_self.RunPageCyclerTest(test_name, description)
2379 def test2012Q2(self):
2380 self.RunPageCyclerTest('2012Q2', '2012Q2')
2383 class MemoryTest(BasePerfTest):
2384 """Tests to measure memory consumption under different usage scenarios."""
2386 def ExtraChromeFlags(self):
2387 """Launches Chrome with custom flags.
2390 A list of extra flags to pass to Chrome when it is launched.
2392 # Ensure Chrome assigns one renderer process to each tab.
2393 return super(MemoryTest, self).ExtraChromeFlags() + ['--process-per-tab']
2395 def _RecordMemoryStats(self, description, when, duration):
2396 """Outputs memory statistics to be graphed.
2399 description: A string description for the test. Should not contain
2400 spaces. For example, 'MemCtrl'.
2401 when: A string description of when the memory stats are being recorded
2402 during test execution (since memory stats may be recorded multiple
2403 times during a test execution at certain "interesting" times). Should
2405 duration: The number of seconds to sample data before outputting the
2408 mem = self.GetMemoryStatsChromeOS(duration)
2409 measurement_types = [
2410 ('gem_obj', 'GemObj'),
2412 ('mem_free', 'MemFree'),
2413 ('mem_available', 'MemAvail'),
2414 ('mem_shared', 'MemShare'),
2415 ('mem_cached', 'MemCache'),
2416 ('mem_anon', 'MemAnon'),
2417 ('mem_file', 'MemFile'),
2418 ('mem_slab', 'MemSlab'),
2419 ('browser_priv', 'BrowPriv'),
2420 ('browser_shared', 'BrowShar'),
2421 ('gpu_priv', 'GpuPriv'),
2422 ('gpu_shared', 'GpuShar'),
2423 ('renderer_priv', 'RendPriv'),
2424 ('renderer_shared', 'RendShar'),
2426 for type_key, type_string in measurement_types:
2427 if type_key not in mem:
2429 self._OutputPerfGraphValue(
2430 '%s-Min%s-%s' % (description, type_string, when),
2431 mem[type_key]['min'], 'KB', '%s-%s' % (description, type_string))
2432 self._OutputPerfGraphValue(
2433 '%s-Max%s-%s' % (description, type_string, when),
2434 mem[type_key]['max'], 'KB', '%s-%s' % (description, type_string))
2435 self._OutputPerfGraphValue(
2436 '%s-End%s-%s' % (description, type_string, when),
2437 mem[type_key]['end'], 'KB', '%s-%s' % (description, type_string))
2439 def _RunTest(self, tabs, description, duration):
2440 """Runs a general memory test.
2443 tabs: A list of strings representing the URLs of the websites to open
2445 description: A string description for the test. Should not contain
2446 spaces. For example, 'MemCtrl'.
2447 duration: The number of seconds to sample data before outputting memory
2450 self._RecordMemoryStats(description, '0Tabs0', duration)
2452 for iteration_num in xrange(2):
2454 self.AppendTab(pyauto.GURL(site))
2456 self._RecordMemoryStats(description,
2457 '%dTabs%d' % (len(tabs), iteration_num + 1),
2460 for _ in xrange(len(tabs)):
2461 self.CloseTab(tab_index=1)
2463 self._RecordMemoryStats(description, '0Tabs%d' % (iteration_num + 1),
2466 def testOpenCloseTabsControl(self):
2467 """Measures memory usage when opening/closing tabs to about:blank."""
2468 tabs = ['about:blank'] * 10
2469 self._RunTest(tabs, 'MemCtrl', 15)
2471 def testOpenCloseTabsLiveSites(self):
2472 """Measures memory usage when opening/closing tabs to live sites."""
2474 'http://www.google.com/gmail',
2475 'http://www.google.com/calendar',
2476 'http://www.google.com/plus',
2477 'http://www.google.com/youtube',
2478 'http://www.nytimes.com',
2479 'http://www.cnn.com',
2480 'http://www.facebook.com/zuck',
2481 'http://www.techcrunch.com',
2482 'http://www.theverge.com',
2483 'http://www.yahoo.com',
2485 # Log in to a test Google account to make connections to the above Google
2486 # websites more interesting.
2487 self._LoginToGoogleAccount()
2488 self._RunTest(tabs, 'MemLive', 20)
2491 class PerfTestServerRequestHandler(SimpleHTTPServer.SimpleHTTPRequestHandler):
2492 """Request handler for the local performance test server."""
2494 def _IgnoreHandler(self, unused_args):
2495 """A GET request handler that simply replies with status code 200.
2498 unused_args: A dictionary of arguments for the current GET request.
2499 The arguments are ignored.
2501 self.send_response(200)
2504 def _CreateFileOfSizeHandler(self, args):
2505 """A GET handler that creates a local file with the specified size.
2508 args: A dictionary of arguments for the current GET request. Must
2509 contain 'filename' and 'mb' keys that refer to the name of the file
2510 to create and its desired size, respectively.
2515 megabytes = int(args['mb'][0])
2516 filename = args['filename'][0]
2517 except (ValueError, KeyError, IndexError), e:
2518 logging.exception('Server error creating file: %s', e)
2519 assert megabytes and filename
2520 with open(os.path.join(self.server.docroot, filename), 'wb') as f:
2521 f.write('X' * 1024 * 1024 * megabytes)
2522 self.send_response(200)
2525 def _DeleteFileHandler(self, args):
2526 """A GET handler that deletes the specified local file.
2529 args: A dictionary of arguments for the current GET request. Must
2530 contain a 'filename' key that refers to the name of the file to
2531 delete, relative to the server's document root.
2535 filename = args['filename'][0]
2536 except (KeyError, IndexError), e:
2537 logging.exception('Server error deleting file: %s', e)
2540 os.remove(os.path.join(self.server.docroot, filename))
2542 logging.warning('OS error removing file: %s', e)
2543 self.send_response(200)
2546 def _StartUploadHandler(self, args):
2547 """A GET handler to serve a page that uploads the given amount of data.
2549 When the page loads, the specified amount of data is automatically
2550 uploaded to the same local server that is handling the current request.
2553 args: A dictionary of arguments for the current GET request. Must
2554 contain an 'mb' key that refers to the size of the data to upload.
2558 megabytes = int(args['mb'][0])
2559 except (ValueError, KeyError, IndexError), e:
2560 logging.exception('Server error starting upload: %s', e)
2565 <script type='text/javascript'>
2566 function startUpload() {
2568 var data = Array((1024 * 1024 * megabytes) + 1).join('X');
2569 var boundary = '***BOUNDARY***';
2570 var xhr = new XMLHttpRequest();
2572 xhr.open('POST', 'process_upload', true);
2573 xhr.setRequestHeader(
2575 'multipart/form-data; boundary="' + boundary + '"');
2576 xhr.setRequestHeader('Content-Length', data.length);
2577 xhr.onreadystatechange = function() {
2578 if (xhr.readyState == 4 && xhr.status == 200) {
2579 document.getElementById('upload_result').innerHTML =
2583 var body = '--' + boundary + '\\r\\n';
2584 body += 'Content-Disposition: form-data;' +
2585 'file_contents=' + data;
2591 <body onload="startUpload();">
2592 <div id='upload_result'>Uploading...</div>
2596 self.send_response(200)
2598 self.wfile.write(script)
2600 def _ProcessUploadHandler(self, form):
2601 """A POST handler that discards uploaded data and sends a response.
2604 form: A dictionary containing posted form data, as returned by
2605 urlparse.parse_qs().
2607 upload_processed = False
2609 if 'file_contents' in form:
2610 file_size = len(form['file_contents'][0])
2611 upload_processed = True
2612 self.send_response(200)
2614 if upload_processed:
2615 self.wfile.write('Upload complete (%d bytes)' % file_size)
2617 self.wfile.write('No file contents uploaded')
2619 GET_REQUEST_HANDLERS = {
2620 'create_file_of_size': _CreateFileOfSizeHandler,
2621 'delete_file': _DeleteFileHandler,
2622 'start_upload': _StartUploadHandler,
2623 'favicon.ico': _IgnoreHandler,
2626 POST_REQUEST_HANDLERS = {
2627 'process_upload': _ProcessUploadHandler,
2630 def translate_path(self, path):
2631 """Ensures files are served from the given document root.
2633 Overridden from SimpleHTTPServer.SimpleHTTPRequestHandler.
2635 path = urlparse.urlparse(path)[2]
2636 path = posixpath.normpath(urllib.unquote(path))
2637 words = path.split('/')
2638 words = filter(None, words) # Remove empty strings from |words|.
2639 path = self.server.docroot
2641 _, word = os.path.splitdrive(word)
2642 _, word = os.path.split(word)
2643 if word in (os.curdir, os.pardir):
2645 path = os.path.join(path, word)
2649 """Processes a GET request to the local server.
2651 Overridden from SimpleHTTPServer.SimpleHTTPRequestHandler.
2653 split_url = urlparse.urlsplit(self.path)
2654 base_path = split_url[2]
2655 if base_path.startswith('/'):
2656 base_path = base_path[1:]
2657 args = urlparse.parse_qs(split_url[3])
2658 if base_path in self.GET_REQUEST_HANDLERS:
2659 self.GET_REQUEST_HANDLERS[base_path](self, args)
2661 SimpleHTTPServer.SimpleHTTPRequestHandler.do_GET(self)
2664 """Processes a POST request to the local server.
2666 Overridden from SimpleHTTPServer.SimpleHTTPRequestHandler.
2668 form = urlparse.parse_qs(
2669 self.rfile.read(int(self.headers.getheader('Content-Length'))))
2670 path = urlparse.urlparse(self.path)[2]
2671 if path.startswith('/'):
2673 if path in self.POST_REQUEST_HANDLERS:
2674 self.POST_REQUEST_HANDLERS[path](self, form)
2676 self.send_response(200)
2677 self.send_header('Content-Type', 'text/plain')
2679 self.wfile.write('No handler for POST request "%s".' % path)
2682 class ThreadedHTTPServer(SocketServer.ThreadingMixIn,
2683 BaseHTTPServer.HTTPServer):
2684 def __init__(self, server_address, handler_class):
2685 BaseHTTPServer.HTTPServer.__init__(self, server_address, handler_class)
2688 class PerfTestServer(object):
2689 """Local server for use by performance tests."""
2691 def __init__(self, docroot):
2692 """Initializes the performance test server.
2695 docroot: The directory from which to serve files.
2697 # The use of 0 means to start the server on an arbitrary available port.
2698 self._server = ThreadedHTTPServer(('', 0),
2699 PerfTestServerRequestHandler)
2700 self._server.docroot = docroot
2701 self._server_thread = threading.Thread(target=self._server.serve_forever)
2704 """Starts the server thread."""
2705 self._server_thread.start()
2708 """Shuts down the server."""
2709 self._server.shutdown()
2710 self._server_thread.join()
2713 """Identifies the port number to which the server is currently bound.
2716 The numeric port number to which the server is currently bound.
2718 return self._server.server_address[1]
2721 if __name__ == '__main__':
2722 pyauto_functional.Main()