1 # Copyright 2015 The Chromium Authors
2 # Use of this source code is governed by a BSD-style license that can be
3 # found in the LICENSE file.
4 """Presubmit script validating field trial configs.
6 See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
7 for more details on the presubmit API built into depot_tools.
16 from collections import OrderedDict
18 VALID_EXPERIMENT_KEYS = [
19 'name', 'forcing_flag', 'params', 'enable_features', 'disable_features',
20 'min_os_version', '//0', '//1', '//2', '//3', '//4', '//5', '//6', '//7',
24 FIELDTRIAL_CONFIG_FILE_NAME = 'fieldtrial_testing_config.json'
26 BASE_FEATURE_PATTERN = r"BASE_FEATURE\((.*?),(.*?),(.*?)\);"
27 BASE_FEATURE_RE = re.compile(BASE_FEATURE_PATTERN, flags=re.MULTILINE+re.DOTALL)
29 def PrettyPrint(contents):
30 """Pretty prints a fieldtrial configuration.
33 contents: File contents as a string.
36 Pretty printed file contents.
39 # We have a preferred ordering of the fields (e.g. platforms on top). This
40 # code loads everything into OrderedDicts and then tells json to dump it out.
41 # The JSON dumper will respect the dict ordering.
43 # The ordering is as follows:
45 # 'StudyName Alphabetical': [
47 # 'platforms': [sorted platforms]
51 # forcing_flag: "forcing flag string"
52 # params: {sorted dict}
53 # enable_features: [sorted features]
54 # disable_features: [sorted features]
55 # (Unexpected extra keys will be caught by the validator)
64 config = json.loads(contents)
65 ordered_config = OrderedDict()
66 for key in sorted(config.keys()):
67 study = copy.deepcopy(config[key])
69 for experiment_config in study:
70 ordered_experiment_config = OrderedDict([('platforms',
71 experiment_config['platforms']),
73 for experiment in experiment_config['experiments']:
74 ordered_experiment = OrderedDict()
75 for index in range(0, 10):
76 comment_key = '//' + str(index)
77 if comment_key in experiment:
78 ordered_experiment[comment_key] = experiment[comment_key]
79 ordered_experiment['name'] = experiment['name']
80 if 'forcing_flag' in experiment:
81 ordered_experiment['forcing_flag'] = experiment['forcing_flag']
82 if 'params' in experiment:
83 ordered_experiment['params'] = OrderedDict(
84 sorted(experiment['params'].items(), key=lambda t: t[0]))
85 if 'enable_features' in experiment:
86 ordered_experiment['enable_features'] = \
87 sorted(experiment['enable_features'])
88 if 'disable_features' in experiment:
89 ordered_experiment['disable_features'] = \
90 sorted(experiment['disable_features'])
91 ordered_experiment_config['experiments'].append(ordered_experiment)
92 if 'min_os_version' in experiment:
93 ordered_experiment['min_os_version'] = experiment['min_os_version']
94 ordered_study.append(ordered_experiment_config)
95 ordered_config[key] = ordered_study
97 ordered_config, sort_keys=False, indent=4, separators=(',', ': ')) + '\n'
100 def ValidateData(json_data, file_path, message_type):
101 """Validates the format of a fieldtrial configuration.
104 json_data: Parsed JSON object representing the fieldtrial config.
105 file_path: String representing the path to the JSON file.
106 message_type: Type of message from |output_api| to return in the case of
110 A list of |message_type| messages. In the case of all tests passing with no
111 warnings/errors, this will return [].
114 def _CreateMessage(message_format, *args):
115 return _CreateMalformedConfigMessage(message_type, file_path,
116 message_format, *args)
118 if not isinstance(json_data, dict):
119 return _CreateMessage('Expecting dict')
120 for (study, experiment_configs) in iter(json_data.items()):
121 warnings = _ValidateEntry(study, experiment_configs, _CreateMessage)
128 def _ValidateEntry(study, experiment_configs, create_message_fn):
129 """Validates one entry of the field trial configuration."""
130 if not isinstance(study, str):
131 return create_message_fn('Expecting keys to be string, got %s', type(study))
132 if not isinstance(experiment_configs, list):
133 return create_message_fn('Expecting list for study %s', study)
135 # Add context to other messages.
136 def _CreateStudyMessage(message_format, *args):
137 suffix = ' in Study[%s]' % study
138 return create_message_fn(message_format + suffix, *args)
140 for experiment_config in experiment_configs:
141 warnings = _ValidateExperimentConfig(experiment_config, _CreateStudyMessage)
147 def _ValidateExperimentConfig(experiment_config, create_message_fn):
148 """Validates one config in a configuration entry."""
149 if not isinstance(experiment_config, dict):
150 return create_message_fn('Expecting dict for experiment config')
151 if not 'experiments' in experiment_config:
152 return create_message_fn('Missing valid experiments for experiment config')
153 if not isinstance(experiment_config['experiments'], list):
154 return create_message_fn('Expecting list for experiments')
155 for experiment_group in experiment_config['experiments']:
156 warnings = _ValidateExperimentGroup(experiment_group, create_message_fn)
159 if not 'platforms' in experiment_config:
160 return create_message_fn('Missing valid platforms for experiment config')
161 if not isinstance(experiment_config['platforms'], list):
162 return create_message_fn('Expecting list for platforms')
163 supported_platforms = [
164 'android', 'android_weblayer', 'android_webview', 'chromeos',
165 'chromeos_lacros', 'fuchsia', 'ios', 'linux', 'mac', 'windows'
167 experiment_platforms = experiment_config['platforms']
168 unsupported_platforms = list(
169 set(experiment_platforms).difference(supported_platforms))
170 if unsupported_platforms:
171 return create_message_fn('Unsupported platforms %s', unsupported_platforms)
175 def _ValidateExperimentGroup(experiment_group, create_message_fn):
176 """Validates one group of one config in a configuration entry."""
177 name = experiment_group.get('name', '')
178 if not name or not isinstance(name, str):
179 return create_message_fn('Missing valid name for experiment')
181 # Add context to other messages.
182 def _CreateGroupMessage(message_format, *args):
183 suffix = ' in Group[%s]' % name
184 return create_message_fn(message_format + suffix, *args)
186 if 'params' in experiment_group:
187 params = experiment_group['params']
188 if not isinstance(params, dict):
189 return _CreateGroupMessage('Expected dict for params')
190 for (key, value) in iter(params.items()):
191 if not isinstance(key, str) or not isinstance(value, str):
192 return _CreateGroupMessage('Invalid param (%s: %s)', key, value)
193 for key in experiment_group.keys():
194 if key not in VALID_EXPERIMENT_KEYS:
195 return _CreateGroupMessage('Key[%s] is not a valid key', key)
199 def _CreateMalformedConfigMessage(message_type, file_path, message_format,
201 """Returns a list containing one |message_type| with the error message.
204 message_type: Type of message from |output_api| to return in the case of
206 message_format: The error message format string.
207 file_path: The path to the config file.
208 *args: The args for message_format.
211 A list containing a message_type with a formatted error message and
212 'Malformed config file [file]: ' prepended to it.
214 error_message_format = 'Malformed config file %s: ' + message_format
215 format_args = (file_path,) + args
216 return [message_type(error_message_format % format_args)]
219 def CheckPretty(contents, file_path, message_type):
220 """Validates the pretty printing of fieldtrial configuration.
223 contents: File contents as a string.
224 file_path: String representing the path to the JSON file.
225 message_type: Type of message from |output_api| to return in the case of
229 A list of |message_type| messages. In the case of all tests passing with no
230 warnings/errors, this will return [].
232 pretty = PrettyPrint(contents)
233 if contents != pretty:
235 message_type('Pretty printing error: Run '
236 'python3 testing/variations/PRESUBMIT.py %s' % file_path)
240 def _GetStudyConfigFeatures(study_config):
241 """Gets the set of features overridden in a study config."""
243 for experiment in study_config.get("experiments", []):
244 features.update(experiment.get("enable_features", []))
245 features.update(experiment.get("disable_features", []))
248 def _GetDuplicatedFeatures(study1, study2):
249 """Gets the set of features that are overridden in two overlapping studies."""
250 duplicated_features = set()
251 for study_config1 in study1:
252 features = _GetStudyConfigFeatures(study_config1)
253 platforms = set(study_config1.get("platforms", []))
254 for study_config2 in study2:
255 # If the study configs do not specify any common platform, they do not
256 # overlap, so we can skip them.
257 if platforms.isdisjoint(set(study_config2.get("platforms", []))):
260 common_features = features & _GetStudyConfigFeatures(study_config2)
261 duplicated_features.update(common_features)
263 return duplicated_features
265 def CheckDuplicatedFeatures(new_json_data, old_json_data, message_type):
266 """Validates that features are not specified in multiple studies.
268 Note that a feature may be specified in different studies that do not overlap.
269 For example, if they specify different platforms. In such a case, this will
270 not give a warning/error. However, it is possible that this incorrectly
271 gives an error, as it is possible for studies to have complex filters (e.g.,
272 if they make use of additional filters such as form_factors,
273 is_low_end_device, etc.). In those cases, the PRESUBMIT check can be bypassed.
274 Since this will only check for studies that were changed in this particular
275 commit, bypassing the PRESUBMIT check will not block future commits.
278 new_json_data: Parsed JSON object representing the new fieldtrial config.
279 old_json_data: Parsed JSON object representing the old fieldtrial config.
280 message_type: Type of message from |output_api| to return in the case of
284 A list of |message_type| messages. In the case of all tests passing with no
285 warnings/errors, this will return [].
287 # Get list of studies that changed.
289 for study_name in new_json_data:
290 if (study_name not in old_json_data or
291 new_json_data[study_name] != old_json_data[study_name]):
292 changed_studies.append(study_name)
294 # A map between a feature name and the name of studies that use it. E.g.,
295 # duplicated_features_to_studies_map["FeatureA"] = {"StudyA", "StudyB"}.
296 # Only features that are defined in multiple studies are added to this map.
297 duplicated_features_to_studies_map = dict()
299 # Compare the changed studies against all studies defined.
300 for changed_study_name in changed_studies:
301 for study_name in new_json_data:
302 if changed_study_name == study_name:
305 duplicated_features = _GetDuplicatedFeatures(
306 new_json_data[changed_study_name], new_json_data[study_name])
308 for feature in duplicated_features:
309 if feature not in duplicated_features_to_studies_map:
310 duplicated_features_to_studies_map[feature] = set()
311 duplicated_features_to_studies_map[feature].update(
312 [changed_study_name, study_name])
314 if len(duplicated_features_to_studies_map) == 0:
317 duplicated_features_strings = [
318 "%s (in studies %s)" % (feature, ', '.join(studies))
319 for feature, studies in duplicated_features_to_studies_map.items()
323 message_type('The following feature(s) were specified in multiple '
324 'studies: %s' % ', '.join(duplicated_features_strings))
328 def CheckUndeclaredFeatures(input_api, output_api, json_data, changed_lines):
329 """Checks that feature names are all valid declared features.
331 There have been more than one instance of developers accidentally mistyping
332 a feature name in the fieldtrial_testing_config.json file, which leads
333 to the config silently doing nothing.
335 This check aims to catch these errors by validating that the feature name
336 is defined somewhere in the Chrome source code.
339 input_api: Presubmit InputApi
340 output_api: Presubmit OutputApi
341 json_data: The parsed fieldtrial_testing_config.json
342 changed_lines: The AffectedFile.ChangedContents() of the json file
345 List of validation messages - empty if there are no errors.
348 declared_features = set()
349 # I was unable to figure out how to do a proper top-level include that did
350 # not depend on getting the path from input_api. I found this pattern
351 # elsewhere in the code base. Please change to a top-level include if you
353 old_sys_path = sys.path[:]
355 sys.path.append(input_api.os_path.join(
356 input_api.PresubmitLocalPath(), 'presubmit'))
357 # pylint: disable=import-outside-toplevel
359 # pylint: enable=import-outside-toplevel
360 declared_features = find_features.FindDeclaredFeatures(input_api)
362 sys.path = old_sys_path
364 if not declared_features:
365 return [message_type("Presubmit unable to find any declared flags "
366 "in source. Please check PRESUBMIT.py for errors.")]
369 # Join all changed lines into a single string. This will be used to check
370 # if feature names are present in the changed lines by substring search.
371 changed_contents = " ".join([x[1].strip() for x in changed_lines])
372 for study_name in json_data:
373 study = json_data[study_name]
375 features = set(_GetStudyConfigFeatures(config))
376 # Determine if a study has been touched by the current change by checking
377 # if any of the features are part of the changed lines of the file.
378 # This limits the noise from old configs that are no longer valid.
379 probably_affected = False
380 for feature in features:
381 if feature in changed_contents:
382 probably_affected = True
385 if probably_affected and not declared_features.issuperset(features):
386 missing_features = features - declared_features
387 # CrOS has external feature declarations starting with this prefix
388 # (checked by build tools in base/BUILD.gn).
389 # Warn, but don't break, if they are present in the CL
390 cros_late_boot_features = {s for s in missing_features if
391 s.startswith("CrOSLateBoot")}
392 missing_features = missing_features - cros_late_boot_features
393 if cros_late_boot_features:
394 msg = ("CrOSLateBoot features added to "
395 "study %s are not checked by presubmit."
396 "\nPlease manually check that they exist in the code base."
398 messages.append(output_api.PresubmitResult(msg,
399 cros_late_boot_features))
402 msg = ("Presubmit was unable to verify existence of features in "
403 "study %s.\nThis happens most commonly if the feature is "
404 "defined by code generation.\n"
405 "Please verify that the feature names have been spelled "
406 "correctly before submitting. The affected features are:"
408 messages.append(output_api.PresubmitResult(msg, missing_features))
413 def CommonChecks(input_api, output_api):
414 affected_files = input_api.AffectedFiles(
415 include_deletes=False,
416 file_filter=lambda x: x.LocalPath().endswith('.json'))
417 for f in affected_files:
418 if not f.LocalPath().endswith(FIELDTRIAL_CONFIG_FILE_NAME):
420 output_api.PresubmitError(
421 '%s is the only json file expected in this folder. If new jsons '
422 'are added, please update the presubmit process with proper '
423 'validation. ' % FIELDTRIAL_CONFIG_FILE_NAME
426 contents = input_api.ReadFile(f)
428 json_data = input_api.json.loads(contents)
429 result = ValidateData(
431 f.AbsoluteLocalPath(),
432 output_api.PresubmitError)
435 result = CheckPretty(contents, f.LocalPath(), output_api.PresubmitError)
438 result = CheckDuplicatedFeatures(
440 input_api.json.loads('\n'.join(f.OldContents())),
441 output_api.PresubmitError)
444 result = CheckUndeclaredFeatures(input_api, output_api, json_data,
450 output_api.PresubmitError('Malformed JSON file: %s' % f.LocalPath())
455 def CheckChangeOnUpload(input_api, output_api):
456 return CommonChecks(input_api, output_api)
459 def CheckChangeOnCommit(input_api, output_api):
460 return CommonChecks(input_api, output_api)
464 with io.open(argv[1], encoding='utf-8') as f:
466 pretty = PrettyPrint(content)
467 io.open(argv[1], 'wb').write(pretty.encode('utf-8'))
470 if __name__ == '__main__':
471 sys.exit(main(sys.argv))