1 # Copyright 2014 The Chromium OS Authors. All rights reserved.
2 # Use of this source code is governed by a BSD-style license that can be
3 # found in the LICENSE file.
5 """Uploads performance data to the performance dashboard.
7 Image tests may output data that needs to be displayed on the performance
8 dashboard. The image test stage/runner invokes this module with each test
9 associated with a job. If a test has performance data associated with it, it
10 is uploaded to the performance dashboard. The performance dashboard is owned
11 by Chrome team and is available here: https://chromeperf.appspot.com/. Users
12 must be logged in with an @google.com account to view chromeOS perf data there.
14 This module is similar to src/third_party/autotest/files/tko/perf_uploader.py.
23 # pylint: disable=W0402
25 # pylint: enable=W0402
29 from chromite.lib import osutils
32 _ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
33 _PRESENTATION_CONFIG_FILE = os.path.join(_ROOT_DIR,
34 'perf_dashboard_config.json')
35 _DASHBOARD_UPLOAD_URL = 'https://chromeperf.appspot.com/add_point'
36 #_DASHBOARD_UPLOAD_URL = 'http://localhost:8080/add_point'
38 _MAX_DESCRIPTION_LENGTH = 256
42 class PerfUploadingError(Exception):
43 """A dummy class to wrap errors in this module."""
47 PerformanceValue = collections.namedtuple('PerformanceValue',
48 'description value units higher_is_better graph')
51 def OutputPerfValue(filename, description, value, units,
52 higher_is_better=True, graph=None):
53 """Record a measured performance value in an output file.
55 This is originally from autotest/files/client/common_lib/test.py.
57 The output file will subsequently be parsed by ImageTestStage to have the
58 information sent to chromeperf.appspot.com.
61 filename: A path to the output file. Data will be appended to this file.
62 description: A string describing the measured perf value. Must
63 be maximum length 256, and may only contain letters, numbers,
64 periods, dashes, and underscores. For example:
65 "page_load_time", "scrolling-frame-rate".
66 value: A number representing the measured perf value, or a list of
67 measured values if a test takes multiple measurements. Measured perf
68 values can be either ints or floats.
69 units: A string describing the units associated with the measured perf
70 value(s). Must be maximum length 32, and may only contain letters,
71 numbers, periods, dashes, and uderscores. For example: "msec", "fps".
72 higher_is_better: A boolean indicating whether or not a higher measured
73 perf value is considered better. If False, it is assumed that a "lower"
74 measured value is better.
75 graph: A string indicating the name of the graph on which the perf value
76 will be subsequently displayed on the chrome perf dashboard. This
77 allows multiple metrics to be grouped together on the same graph.
78 Default to None, perf values should be graphed individually on separate
81 def ValidateString(param_name, value, max_len):
82 if len(value) > max_len:
83 raise ValueError('%s must be at most %d characters.', param_name, max_len)
85 allowed_chars = string.ascii_letters + string.digits + '-._'
86 if not set(value).issubset(set(allowed_chars)):
88 '%s may only contain letters, digits, hyphens, periods, and '
89 'underscores. Its current value is %s.',
93 ValidateString('description', description, _MAX_DESCRIPTION_LENGTH)
94 ValidateString('units', units, _MAX_UNIT_LENGTH)
97 'description': description,
100 'higher_is_better': higher_is_better,
104 data = (json.dumps(entry), '\n')
105 osutils.WriteFile(filename, data, 'a')
108 def LoadPerfValues(filename):
109 """Return a list of PerformanceValue objects from |filename|."""
110 lines = osutils.ReadFile(filename).splitlines()
113 entry = json.loads(line)
114 entries.append(PerformanceValue(**entry))
118 def _AggregateIterations(perf_values):
119 """Aggregate same measurements from multiple iterations.
121 Each perf measurement may exist multiple times across multiple iterations
122 of a test. Here, the results for each unique measured perf metric are
123 aggregated across multiple iterations.
126 perf_values: A list of PerformanceValue objects.
129 A dictionary mapping each unique measured perf value (keyed by tuple of
130 its description and graph name) to information about that perf value
131 (in particular, the value is a list of values for each iteration).
134 for perf_value in perf_values:
135 key = (perf_value.description, perf_value.graph)
137 aggregated_entry = aggregated_data[key]
140 'units': perf_value.units,
141 'higher_is_better': perf_value.higher_is_better,
142 'graph': perf_value.graph,
145 aggregated_data[key] = aggregated_entry
146 # Note: the stddev will be recomputed later when the results
147 # from each of the multiple iterations are averaged together.
148 aggregated_entry['value'].append(perf_value.value)
149 return aggregated_data
152 def _MeanAndStddev(data, precision=4):
153 """Computes mean and standard deviation from a list of numbers.
156 data: A list of numeric values.
157 precision: The integer number of decimal places to which to
161 A 2-tuple (mean, standard_deviation), in which each value is
162 rounded to |precision| decimal places.
166 raise ValueError('Cannot compute mean and stddev of an empty list.')
168 return round(data[0], precision), 0
170 mean = math.fsum(data) / n
171 # Divide by n-1 to compute "sample standard deviation".
172 variance = math.fsum((elem - mean) ** 2 for elem in data) / (n - 1)
173 return round(mean, precision), round(math.sqrt(variance), precision)
176 def _ComputeAvgStddev(perf_data):
177 """Compute average and standard deviations as needed for perf measurements.
179 For any perf measurement that exists in multiple iterations (has more than
180 one measured value), compute the average and standard deviation for it and
181 then store the updated information in the dictionary (in place).
184 perf_data: A dictionary of measured perf data as computed by
185 _AggregateIterations(), except each "value" is now a single value, not
188 for perf in perf_data.itervalues():
189 perf['value'], perf['stddev'] = _MeanAndStddev(perf['value'])
193 PresentationInfo = collections.namedtuple('PresentationInfo',
194 'master_name test_name')
197 def _GetPresentationInfo(test_name):
198 """Get presentation info for |test_name| from config file.
201 test_name: The test name.
204 A PresentationInfo object for this test.
206 infos = osutils.ReadFile(_PRESENTATION_CONFIG_FILE)
207 infos = json.loads(infos)
209 if info['test_name'] == test_name:
211 return PresentationInfo(**info)
213 raise PerfUploadingError('No master found for %s' % test_name)
215 raise PerfUploadingError('No presentation config found for %s' % test_name)
218 def _FormatForUpload(perf_data, platform_name, cros_version, chrome_version,
220 """Formats perf data suitably to upload to the perf dashboard.
222 The perf dashboard expects perf data to be uploaded as a
223 specially-formatted JSON string. In particular, the JSON object must be a
224 dictionary with key "data", and value being a list of dictionaries where
225 each dictionary contains all the information associated with a single
226 measured perf value: master name, bot name, test name, perf value, units,
227 and build version numbers.
229 See also google3/googleclient/chrome/speed/dashboard/add_point.py for the
233 platform_name: The string name of the platform.
234 cros_version: The string ChromeOS version number.
235 chrome_version: The string Chrome version number.
236 perf_data: A dictionary of measured perf data. This is keyed by
237 (description, graph name) tuple.
238 presentation_info: A PresentationInfo object of the given test.
241 A dictionary containing the formatted information ready to upload
242 to the performance dashboard.
245 for (desc, graph), data in perf_data.iteritems():
246 # Each perf metric is named by a path that encodes the test name,
247 # a graph name (if specified), and a description. This must be defined
248 # according to rules set by the Chrome team, as implemented in:
249 # chromium/tools/build/scripts/slave/results_dashboard.py.
250 desc = desc.replace('/', '_')
252 test_path = 'cbuildbot.%s/%s/%s' % (presentation_info.test_name,
255 test_path = 'cbuildbot.%s/%s' % (presentation_info.test_name, desc)
258 'master': presentation_info.master_name,
259 'bot': 'cros-' + platform_name, # Prefix to clarify it's chromeOS.
261 'value': data['value'],
262 'error': data['stddev'],
263 'units': data['units'],
264 'higher_is_better': data['higher_is_better'],
265 'supplemental_columns': {
266 'r_cros_version': cros_version,
267 'r_chrome_version': chrome_version,
271 dash_entries.append(new_dash_entry)
273 json_string = json.dumps(dash_entries)
274 return {'data': json_string}
277 def _SendToDashboard(data_obj):
278 """Sends formatted perf data to the perf dashboard.
281 data_obj: A formatted data object as returned by _FormatForUpload().
284 PerfUploadingError if an exception was raised when uploading.
286 encoded = urllib.urlencode(data_obj)
287 req = urllib2.Request(_DASHBOARD_UPLOAD_URL, encoded)
290 except urllib2.HTTPError as e:
291 raise PerfUploadingError('HTTPError: %d %s for JSON %s\n' %
292 (e.code, e.msg, data_obj['data']))
293 except urllib2.URLError as e:
294 raise PerfUploadingError('URLError: %s for JSON %s\n' %
295 (str(e.reason), data_obj['data']))
296 except httplib.HTTPException:
297 raise PerfUploadingError('HTTPException for JSON %s\n' % data_obj['data'])
300 def UploadPerfValues(perf_values, platform_name, cros_version, chrome_version,
302 """Uploads any perf data associated with a test to the perf dashboard.
305 perf_values: List of PerformanceValue objects.
306 platform_name: A string identifying platform e.g. 'x86-release'. 'cros-'
307 will be prepended to |platform_name| internally, by _FormatForUpload.
308 cros_version: A string identifying Chrome OS version e.g. '6052.0.0'.
309 chrome_version: A string identifying Chrome OS version e.g. '38.0.2091.2'.
310 test_name: A string identifying the test
315 # Aggregate values from multiple iterations together.
316 perf_data = _AggregateIterations(perf_values)
318 # Compute averages and standard deviations as needed for measured perf
319 # values that exist in multiple iterations. Ultimately, we only upload a
320 # single measurement (with standard deviation) for every unique measured
322 _ComputeAvgStddev(perf_data)
324 # Format the perf data for the upload, then upload it.
325 # Prefix the ChromeOS version number with the Chrome milestone.
326 # TODO(dennisjeffrey): Modify the dashboard to accept the ChromeOS version
327 # number *without* the milestone attached.
328 cros_version = chrome_version[:chrome_version.find('.') + 1] + cros_version
330 presentation_info = _GetPresentationInfo(test_name)
331 formatted_data = _FormatForUpload(perf_data, platform_name,
332 cros_version, chrome_version,
334 _SendToDashboard(formatted_data)
335 except PerfUploadingError:
336 logging.exception('Error when uploading perf data to the perf '
337 'dashboard for test %s.', test_name)
340 logging.info('Successfully uploaded perf data to the perf '
341 'dashboard for test %s.', test_name)