2 # Copyright (c) 2012 The Chromium Authors. All rights reserved.
3 # Use of this source code is governed by a BSD-style license that can be
4 # found in the LICENSE file.
6 ''' Runs various chrome tests through valgrind_test.py.'''
10 import multiprocessing
23 class TestNotFound(Exception): pass
25 class MultipleGTestFiltersSpecified(Exception): pass
27 class BuildDirNotFound(Exception): pass
29 class BuildDirAmbiguous(Exception): pass
31 class ExecutableNotFound(Exception): pass
33 class BadBinary(Exception): pass
36 SLOW_TOOLS = ["memcheck", "tsan", "tsan_rv", "drmemory"]
37 LAYOUT_TESTS_DEFAULT_CHUNK_SIZE = 300
39 def __init__(self, options, args, test):
41 (self._test, self._gtest_filter) = test.split(':', 1)
44 self._gtest_filter = options.gtest_filter
46 if self._test not in self._test_list:
47 raise TestNotFound("Unknown test: %s" % test)
49 if options.gtest_filter and options.gtest_filter != self._gtest_filter:
50 raise MultipleGTestFiltersSpecified("Can not specify both --gtest_filter "
51 "and --test %s" % test)
53 self._options = options
56 script_dir = path_utils.ScriptDir()
57 # Compute the top of the tree (the "source dir") from the script dir (where
58 # this script lives). We assume that the script dir is in tools/valgrind/
59 # relative to the top of the tree.
60 self._source_dir = os.path.dirname(os.path.dirname(script_dir))
61 # since this path is used for string matching, make sure it's always
62 # an absolute Unix-style path
63 self._source_dir = os.path.abspath(self._source_dir).replace('\\', '/')
64 valgrind_test_script = os.path.join(script_dir, "valgrind_test.py")
65 self._command_preamble = ["--source-dir=%s" % (self._source_dir)]
67 if not self._options.build_dir:
69 os.path.join(self._source_dir, "xcodebuild", "Debug"),
70 os.path.join(self._source_dir, "out", "Debug"),
71 os.path.join(self._source_dir, "build", "Debug"),
73 build_dir = [d for d in dirs if os.path.isdir(d)]
74 if len(build_dir) > 1:
75 raise BuildDirAmbiguous("Found more than one suitable build dir:\n"
76 "%s\nPlease specify just one "
77 "using --build-dir" % ", ".join(build_dir))
79 self._options.build_dir = build_dir[0]
81 self._options.build_dir = None
83 if self._options.build_dir:
84 build_dir = os.path.abspath(self._options.build_dir)
85 self._command_preamble += ["--build-dir=%s" % (self._options.build_dir)]
87 def _EnsureBuildDirFound(self):
88 if not self._options.build_dir:
89 raise BuildDirNotFound("Oops, couldn't find a build dir, please "
90 "specify it manually using --build-dir")
92 def _DefaultCommand(self, tool, exe=None, valgrind_test_args=None):
93 '''Generates the default command array that most tests will use.'''
94 if exe and common.IsWindows():
97 cmd = list(self._command_preamble)
99 # Find all suppressions matching the following pattern:
100 # tools/valgrind/TOOL/suppressions[_PLATFORM].txt
101 # and list them with --suppressions= prefix.
102 script_dir = path_utils.ScriptDir()
103 tool_name = tool.ToolName();
104 suppression_file = os.path.join(script_dir, tool_name, "suppressions.txt")
105 if os.path.exists(suppression_file):
106 cmd.append("--suppressions=%s" % suppression_file)
107 # Platform-specific suppression
108 for platform in common.PlatformNames():
109 platform_suppression_file = \
110 os.path.join(script_dir, tool_name, 'suppressions_%s.txt' % platform)
111 if os.path.exists(platform_suppression_file):
112 cmd.append("--suppressions=%s" % platform_suppression_file)
114 if self._options.valgrind_tool_flags:
115 cmd += self._options.valgrind_tool_flags.split(" ")
116 if self._options.keep_logs:
117 cmd += ["--keep_logs"]
118 if valgrind_test_args != None:
119 for arg in valgrind_test_args:
122 self._EnsureBuildDirFound()
123 exe_path = os.path.join(self._options.build_dir, exe)
124 if not os.path.exists(exe_path):
125 raise ExecutableNotFound("Couldn't find '%s'" % exe_path)
127 # Make sure we don't try to test ASan-built binaries
128 # with other dynamic instrumentation-based tools.
129 # TODO(timurrrr): also check TSan and MSan?
130 # `nm` might not be available, so use try-except.
132 nm_output = subprocess.check_output(["nm", exe_path])
133 if nm_output.find("__asan_init") != -1:
134 raise BadBinary("You're trying to run an executable instrumented "
135 "with AddressSanitizer under %s. Please provide "
136 "an uninstrumented executable." % tool_name)
141 # Valgrind runs tests slowly, so slow tests hurt more; show elapased time
142 # so we can find the slowpokes.
143 cmd.append("--gtest_print_time")
144 # Built-in test launcher for gtest-based executables runs tests using
145 # multiple process by default. Force the single-process mode back.
146 cmd.append("--single-process-tests")
147 if self._options.gtest_repeat:
148 cmd.append("--gtest_repeat=%s" % self._options.gtest_repeat)
149 if self._options.gtest_shuffle:
150 cmd.append("--gtest_shuffle")
151 if self._options.gtest_break_on_failure:
152 cmd.append("--gtest_break_on_failure")
153 if self._options.brave_new_test_launcher:
154 cmd.append("--brave-new-test-launcher")
155 if self._options.test_launcher_bot_mode:
156 cmd.append("--test-launcher-bot-mode")
160 ''' Runs the test specified by command-line argument --test '''
161 logging.info("running test %s" % (self._test))
162 return self._test_list[self._test](self)
164 def _AppendGtestFilter(self, tool, name, cmd):
165 '''Append an appropriate --gtest_filter flag to the googletest binary
167 If the user passed his own filter mentioning only one test, just use it.
168 Othewise, filter out tests listed in the appropriate gtest_exclude files.
170 if (self._gtest_filter and
171 ":" not in self._gtest_filter and
172 "?" not in self._gtest_filter and
173 "*" not in self._gtest_filter):
174 cmd.append("--gtest_filter=%s" % self._gtest_filter)
178 gtest_files_dir = os.path.join(path_utils.ScriptDir(), "gtest_exclude")
180 gtest_filter_files = [
181 os.path.join(gtest_files_dir, name + ".gtest-%s.txt" % tool.ToolName())]
182 # Use ".gtest.txt" files only for slow tools, as they now contain
183 # Valgrind- and Dr.Memory-specific filters.
184 # TODO(glider): rename the files to ".gtest_slow.txt"
185 if tool.ToolName() in ChromeTests.SLOW_TOOLS:
186 gtest_filter_files += [os.path.join(gtest_files_dir, name + ".gtest.txt")]
187 for platform_suffix in common.PlatformNames():
188 gtest_filter_files += [
189 os.path.join(gtest_files_dir, name + ".gtest_%s.txt" % platform_suffix),
190 os.path.join(gtest_files_dir, name + ".gtest-%s_%s.txt" % \
191 (tool.ToolName(), platform_suffix))]
192 logging.info("Reading gtest exclude filter files:")
193 for filename in gtest_filter_files:
194 # strip the leading absolute path (may be very long on the bot)
195 # and the following / or \.
196 readable_filename = filename.replace("\\", "/") # '\' on Windows
197 readable_filename = readable_filename.replace(self._source_dir, "")[1:]
198 if not os.path.exists(filename):
199 logging.info(" \"%s\" - not found" % readable_filename)
201 logging.info(" \"%s\" - OK" % readable_filename)
202 f = open(filename, 'r')
203 for line in f.readlines():
204 if line.startswith("#") or line.startswith("//") or line.isspace():
207 test_prefixes = ["FLAKY", "FAILS"]
208 for p in test_prefixes:
209 # Strip prefixes from the test names.
210 line = line.replace(".%s_" % p, ".")
211 # Exclude the original test name.
213 if line[-2:] != ".*":
214 # List all possible prefixes if line doesn't end with ".*".
215 for p in test_prefixes:
216 filters.append(line.replace(".", ".%s_" % p))
217 # Get rid of duplicates.
218 filters = set(filters)
219 gtest_filter = self._gtest_filter
223 if gtest_filter.find("-") < 0:
227 gtest_filter += ":".join(filters)
229 cmd.append("--gtest_filter=%s" % gtest_filter)
234 for name, test_function in ChromeTests._test_list.iteritems():
235 test_to_names.setdefault(test_function, []).append(name)
238 for names in test_to_names.itervalues():
239 names.sort(key=lambda name: len(name))
240 name_to_aliases[names[0]] = names[1:]
243 print "Available tests:"
244 print "----------------"
245 for name, aliases in sorted(name_to_aliases.iteritems()):
247 print " {} (aka {})".format(name, ', '.join(aliases))
249 print " {}".format(name)
251 def SetupLdPath(self, requires_build_dir):
252 if requires_build_dir:
253 self._EnsureBuildDirFound()
254 elif not self._options.build_dir:
257 # Append build_dir to LD_LIBRARY_PATH so external libraries can be loaded.
258 if (os.getenv("LD_LIBRARY_PATH")):
259 os.putenv("LD_LIBRARY_PATH", "%s:%s" % (os.getenv("LD_LIBRARY_PATH"),
260 self._options.build_dir))
262 os.putenv("LD_LIBRARY_PATH", self._options.build_dir)
264 def SimpleTest(self, module, name, valgrind_test_args=None, cmd_args=None):
265 tool = valgrind_test.CreateTool(self._options.valgrind_tool)
266 cmd = self._DefaultCommand(tool, name, valgrind_test_args)
267 self._AppendGtestFilter(tool, name, cmd)
268 cmd.extend(['--test-tiny-timeout=1000'])
272 self.SetupLdPath(True)
273 return tool.Run(cmd, module)
275 def RunCmdLine(self):
276 tool = valgrind_test.CreateTool(self._options.valgrind_tool)
277 cmd = self._DefaultCommand(tool, None, self._args)
278 self.SetupLdPath(False)
279 return tool.Run(cmd, None)
281 def TestAccessibility(self):
282 return self.SimpleTest("accessibility", "accessibility_unittests")
284 def TestAddressInput(self):
285 return self.SimpleTest("addressinput", "libaddressinput_unittests")
288 return self.SimpleTest("angle", "angle_unittests")
290 def TestAppList(self):
291 return self.SimpleTest("app_list", "app_list_unittests")
294 return self.SimpleTest("ash", "ash_unittests")
296 def TestAshShell(self):
297 return self.SimpleTest("ash_shelf", "ash_shell_unittests")
300 return self.SimpleTest("aura", "aura_unittests")
303 return self.SimpleTest("base", "base_unittests")
305 def TestBlinkHeap(self):
306 return self.SimpleTest("blink_heap", "blink_heap_unittests")
308 def TestBlinkPlatform(self):
309 return self.SimpleTest("blink_platform", "blink_platform_unittests")
311 def TestCacheInvalidation(self):
312 return self.SimpleTest("cacheinvalidation", "cacheinvalidation_unittests")
315 return self.SimpleTest("chrome", "cast_unittests")
318 return self.SimpleTest("cc", "cc_unittests")
320 def TestChromeApp(self):
321 return self.SimpleTest("chrome_app", "chrome_app_unittests")
323 def TestChromeElf(self):
324 return self.SimpleTest("chrome_elf", "chrome_elf_unittests")
326 def TestChromeDriver(self):
327 return self.SimpleTest("chromedriver", "chromedriver_unittests")
329 def TestChromeOS(self):
330 return self.SimpleTest("chromeos", "chromeos_unittests")
332 def TestCloudPrint(self):
333 return self.SimpleTest("cloud_print", "cloud_print_unittests")
335 def TestComponents(self):
336 return self.SimpleTest("components", "components_unittests")
338 def TestCompositor(self):
339 return self.SimpleTest("compositor", "compositor_unittests")
341 def TestContent(self):
342 return self.SimpleTest("content", "content_unittests")
344 def TestCourgette(self):
345 return self.SimpleTest("courgette", "courgette_unittests")
347 def TestCrypto(self):
348 return self.SimpleTest("crypto", "crypto_unittests")
350 def TestDevice(self):
351 return self.SimpleTest("device", "device_unittests")
353 def TestDisplay(self):
354 return self.SimpleTest("display", "display_unittests")
356 def TestEvents(self):
357 return self.SimpleTest("events", "events_unittests")
359 def TestExtensions(self):
360 return self.SimpleTest("extensions", "extensions_unittests")
362 def TestFFmpeg(self):
363 return self.SimpleTest("chrome", "ffmpeg_unittests")
365 def TestFFmpegRegressions(self):
366 return self.SimpleTest("chrome", "ffmpeg_regression_tests")
369 return self.SimpleTest("gcm", "gcm_unit_tests")
372 return self.SimpleTest("gfx", "gfx_unittests")
375 return self.SimpleTest("gin", "gin_unittests")
377 def TestGoogleApis(self):
378 return self.SimpleTest("google_apis", "google_apis_unittests")
381 return self.SimpleTest("gpu", "gpu_unittests")
384 return self.SimpleTest("ipc", "ipc_tests",
385 valgrind_test_args=["--trace_children"])
387 def TestInstallerUtil(self):
388 return self.SimpleTest("installer_util", "installer_util_unittests")
390 def TestJingle(self):
391 return self.SimpleTest("chrome", "jingle_unittests")
393 def TestKeyboard(self):
394 return self.SimpleTest("keyboard", "keyboard_unittests")
397 return self.SimpleTest("chrome", "media_unittests")
399 def TestMessageCenter(self):
400 return self.SimpleTest("message_center", "message_center_unittests")
402 def TestMojoCommon(self):
403 return self.SimpleTest("mojo_common", "mojo_common_unittests")
405 def TestMojoPublicBindings(self):
406 return self.SimpleTest("mojo_public_bindings",
407 "mojo_public_bindings_unittests")
409 def TestMojoPublicEnv(self):
410 return self.SimpleTest("mojo_public_env",
411 "mojo_public_environment_unittests")
413 def TestMojoPublicSystem(self):
414 return self.SimpleTest("mojo_public_system",
415 "mojo_public_system_unittests")
417 def TestMojoPublicSysPerf(self):
418 return self.SimpleTest("mojo_public_sysperf",
419 "mojo_public_system_perftests")
421 def TestMojoPublicUtility(self):
422 return self.SimpleTest("mojo_public_utility",
423 "mojo_public_utility_unittests")
425 def TestMojoSystem(self):
426 return self.SimpleTest("mojo_system", "mojo_system_unittests")
429 return self.SimpleTest("net", "net_unittests")
431 def TestNetPerf(self):
432 return self.SimpleTest("net", "net_perftests")
434 def TestPhoneNumber(self):
435 return self.SimpleTest("phonenumber", "libphonenumber_unittests")
438 return self.SimpleTest("chrome", "ppapi_unittests")
440 def TestPrinting(self):
441 return self.SimpleTest("chrome", "printing_unittests")
443 def TestRemoting(self):
444 return self.SimpleTest("chrome", "remoting_unittests",
446 "--ui-test-action-timeout=60000",
447 "--ui-test-action-max-timeout=150000"])
450 return self.SimpleTest("chrome", "sql_unittests")
453 return self.SimpleTest("chrome", "sync_unit_tests")
455 def TestLinuxSandbox(self):
456 return self.SimpleTest("sandbox", "sandbox_linux_unittests")
459 # http://crbug.com/51716
460 # Disabling all unit tests
461 # Problems reappeared after r119922
462 if common.IsMac() and (self._options.valgrind_tool == "memcheck"):
463 logging.warning("unit_tests are disabled for memcheck on MacOS.")
465 return self.SimpleTest("chrome", "unit_tests")
467 def TestUIBaseUnit(self):
468 return self.SimpleTest("chrome", "ui_base_unittests")
470 def TestUIUnit(self):
471 return self.SimpleTest("chrome", "ui_unittests")
474 return self.SimpleTest("chrome", "url_unittests")
477 return self.SimpleTest("views", "views_unittests")
480 # Valgrind timeouts are in seconds.
481 UI_VALGRIND_ARGS = ["--timeout=14400", "--trace_children", "--indirect"]
482 # UI test timeouts are in milliseconds.
483 UI_TEST_ARGS = ["--ui-test-action-timeout=60000",
484 "--ui-test-action-max-timeout=150000",
487 # TODO(thestig) fine-tune these values.
488 # Valgrind timeouts are in seconds.
489 BROWSER_VALGRIND_ARGS = ["--timeout=50000", "--trace_children", "--indirect"]
490 # Browser test timeouts are in milliseconds.
491 BROWSER_TEST_ARGS = ["--ui-test-action-timeout=400000",
492 "--ui-test-action-max-timeout=800000",
495 def TestBrowser(self):
496 return self.SimpleTest("chrome", "browser_tests",
497 valgrind_test_args=self.BROWSER_VALGRIND_ARGS,
498 cmd_args=self.BROWSER_TEST_ARGS)
500 def TestContentBrowser(self):
501 return self.SimpleTest("content", "content_browsertests",
502 valgrind_test_args=self.BROWSER_VALGRIND_ARGS,
503 cmd_args=self.BROWSER_TEST_ARGS)
505 def TestInteractiveUI(self):
506 return self.SimpleTest("chrome", "interactive_ui_tests",
507 valgrind_test_args=self.UI_VALGRIND_ARGS,
508 cmd_args=self.UI_TEST_ARGS)
510 def TestSafeBrowsing(self):
511 return self.SimpleTest("chrome", "safe_browsing_tests",
512 valgrind_test_args=self.UI_VALGRIND_ARGS,
513 cmd_args=(["--ui-test-action-max-timeout=450000"]))
515 def TestSyncIntegration(self):
516 return self.SimpleTest("chrome", "sync_integration_tests",
517 valgrind_test_args=self.UI_VALGRIND_ARGS,
518 cmd_args=(["--ui-test-action-max-timeout=450000"]))
520 def TestLayoutChunk(self, chunk_num, chunk_size):
521 # Run tests [chunk_num*chunk_size .. (chunk_num+1)*chunk_size) from the
522 # list of tests. Wrap around to beginning of list at end.
523 # If chunk_size is zero, run all tests in the list once.
524 # If a text file is given as argument, it is used as the list of tests.
525 assert((chunk_size == 0) != (len(self._args) == 0))
526 # Build the ginormous commandline in 'cmd'.
527 # It's going to be roughly
528 # python valgrind_test.py ...
529 # but we'll use the --indirect flag to valgrind_test.py
530 # to avoid valgrinding python.
531 # Start by building the valgrind_test.py commandline.
532 tool = valgrind_test.CreateTool(self._options.valgrind_tool)
533 cmd = self._DefaultCommand(tool)
534 cmd.append("--trace_children")
535 cmd.append("--indirect_webkit_layout")
536 cmd.append("--ignore_exit_code")
537 # Now build script_cmd, the run-webkits-tests commandline.
538 # Store each chunk in its own directory so that we can find the data later
539 chunk_dir = os.path.join("layout", "chunk_%05d" % chunk_num)
540 out_dir = os.path.join(path_utils.ScriptDir(), "latest")
541 out_dir = os.path.join(out_dir, chunk_dir)
542 if os.path.exists(out_dir):
543 old_files = glob.glob(os.path.join(out_dir, "*.txt"))
548 script = os.path.join(self._source_dir, "third_party", "WebKit", "Tools",
549 "Scripts", "run-webkit-tests")
550 # http://crbug.com/260627: After the switch to content_shell from DRT, each
551 # test now brings up 3 processes. Under Valgrind, they become memory bound
552 # and can eventually OOM if we don't reduce the total count.
553 # It'd be nice if content_shell automatically throttled the startup of new
554 # tests if we're low on memory.
555 jobs = max(1, int(multiprocessing.cpu_count() * 0.3))
556 script_cmd = ["python", script, "-v",
557 # run a separate DumpRenderTree for each test
560 "--child-processes=%d" % jobs,
561 "--time-out-ms=800000",
562 "--no-retry-failures", # retrying takes too much time
563 # http://crbug.com/176908: Don't launch a browser when done.
565 "--nocheck-sys-deps"]
566 # Pass build mode to run-webkit-tests. We aren't passed it directly,
567 # so parse it out of build_dir. run-webkit-tests can only handle
568 # the two values "Release" and "Debug".
569 # TODO(Hercules): unify how all our scripts pass around build mode
570 # (--mode / --target / --build-dir / --debug)
571 if self._options.build_dir:
572 build_root, mode = os.path.split(self._options.build_dir)
573 script_cmd.extend(["--build-directory", build_root, "--target", mode])
575 script_cmd.append("--run-chunk=%d:%d" % (chunk_num, chunk_size))
577 # if the arg is a txt file, then treat it as a list of tests
578 if os.path.isfile(self._args[0]) and self._args[0][-4:] == ".txt":
579 script_cmd.append("--test-list=%s" % self._args[0])
581 script_cmd.extend(self._args)
582 self._AppendGtestFilter(tool, "layout", script_cmd)
583 # Now run script_cmd with the wrapper in cmd
585 cmd.extend(script_cmd)
587 # Layout tests often times fail quickly, but the buildbot remains green.
588 # Detect this situation when running with the default chunk size.
589 if chunk_size == self.LAYOUT_TESTS_DEFAULT_CHUNK_SIZE:
590 min_runtime_in_seconds=120
592 min_runtime_in_seconds=0
593 ret = tool.Run(cmd, "layout", min_runtime_in_seconds=min_runtime_in_seconds)
597 def TestLayout(self):
598 # A "chunk file" is maintained in the local directory so that each test
599 # runs a slice of the layout tests of size chunk_size that increments with
600 # each run. Since tests can be added and removed from the layout tests at
601 # any time, this is not going to give exact coverage, but it will allow us
602 # to continuously run small slices of the layout tests under valgrind rather
603 # than having to run all of them in one shot.
604 chunk_size = self._options.num_tests
605 if chunk_size == 0 or len(self._args):
606 return self.TestLayoutChunk(0, 0)
608 chunk_file = os.path.join("valgrind_layout_chunk.txt")
609 logging.info("Reading state from " + chunk_file)
615 chunk_num = int(chunk_str)
616 # This should be enough so that we have a couple of complete runs
617 # of test data stored in the archive (although note that when we loop
618 # that we almost guaranteed won't be at the end of the test list)
619 if chunk_num > 10000:
622 except IOError, (errno, strerror):
623 logging.error("error reading from file %s (%d, %s)" % (chunk_file,
625 # Save the new chunk size before running the tests. Otherwise if a
626 # particular chunk hangs the bot, the chunk number will never get
627 # incremented and the bot will be wedged.
628 logging.info("Saving state to " + chunk_file)
630 f = open(chunk_file, "w")
632 f.write("%d" % chunk_num)
634 except IOError, (errno, strerror):
635 logging.error("error writing to file %s (%d, %s)" % (chunk_file, errno,
637 # Since we're running small chunks of the layout tests, it's important to
638 # mark the ones that have errors in them. These won't be visible in the
639 # summary list for long, but will be useful for someone reviewing this bot.
640 return self.TestLayoutChunk(chunk_num, chunk_size)
642 # The known list of tests.
643 # Recognise the original abbreviations as well as full executable names.
645 "cmdline" : RunCmdLine,
646 "addressinput": TestAddressInput,
647 "libaddressinput_unittests": TestAddressInput,
648 "accessibility": TestAccessibility,
649 "angle": TestAngle, "angle_unittests": TestAngle,
650 "app_list": TestAppList, "app_list_unittests": TestAppList,
651 "ash": TestAsh, "ash_unittests": TestAsh,
652 "ash_shell": TestAshShell, "ash_shell_unittests": TestAshShell,
653 "aura": TestAura, "aura_unittests": TestAura,
654 "base": TestBase, "base_unittests": TestBase,
655 "blink_heap": TestBlinkHeap,
656 "blink_platform": TestBlinkPlatform,
657 "browser": TestBrowser, "browser_tests": TestBrowser,
658 "cacheinvalidation": TestCacheInvalidation,
659 "cacheinvalidation_unittests": TestCacheInvalidation,
660 "cast": TestCast, "cast_unittests": TestCast,
661 "cc": TestCC, "cc_unittests": TestCC,
662 "chrome_app": TestChromeApp,
663 "chrome_elf": TestChromeElf,
664 "chromedriver": TestChromeDriver,
665 "chromeos": TestChromeOS, "chromeos_unittests": TestChromeOS,
666 "cloud_print": TestCloudPrint,
667 "cloud_print_unittests": TestCloudPrint,
668 "components": TestComponents,"components_unittests": TestComponents,
669 "compositor": TestCompositor,"compositor_unittests": TestCompositor,
670 "content": TestContent, "content_unittests": TestContent,
671 "content_browsertests": TestContentBrowser,
672 "courgette": TestCourgette, "courgette_unittests": TestCourgette,
673 "crypto": TestCrypto, "crypto_unittests": TestCrypto,
674 "device": TestDevice, "device_unittests": TestDevice,
675 "display": TestDisplay, "display_unittests": TestDisplay,
676 "events": TestEvents, "events_unittests": TestEvents,
677 "extensions": TestExtensions, "extensions_unittests": TestExtensions,
678 "ffmpeg": TestFFmpeg, "ffmpeg_unittests": TestFFmpeg,
679 "ffmpeg_regression_tests": TestFFmpegRegressions,
680 "gcm": TestGCM, "gcm_unit_tests": TestGCM,
681 "gin": TestGin, "gin_unittests": TestGin,
682 "gfx": TestGfx, "gfx_unittests": TestGfx,
683 "google_apis": TestGoogleApis,
684 "gpu": TestGPU, "gpu_unittests": TestGPU,
685 "ipc": TestIpc, "ipc_tests": TestIpc,
686 "installer_util": TestInstallerUtil,
687 "interactive_ui": TestInteractiveUI,
688 "jingle": TestJingle, "jingle_unittests": TestJingle,
689 "keyboard": TestKeyboard, "keyboard_unittests": TestKeyboard,
690 "layout": TestLayout, "layout_tests": TestLayout,
691 "media": TestMedia, "media_unittests": TestMedia,
692 "message_center": TestMessageCenter,
693 "message_center_unittests" : TestMessageCenter,
694 "mojo_common": TestMojoCommon,
695 "mojo_system": TestMojoSystem,
696 "mojo_public_system": TestMojoPublicSystem,
697 "mojo_public_utility": TestMojoPublicUtility,
698 "mojo_public_bindings": TestMojoPublicBindings,
699 "mojo_public_env": TestMojoPublicEnv,
700 "mojo_public_sysperf": TestMojoPublicSysPerf,
701 "net": TestNet, "net_unittests": TestNet,
702 "net_perf": TestNetPerf, "net_perftests": TestNetPerf,
703 "phonenumber": TestPhoneNumber,
704 "libphonenumber_unittests": TestPhoneNumber,
705 "ppapi": TestPPAPI, "ppapi_unittests": TestPPAPI,
706 "printing": TestPrinting, "printing_unittests": TestPrinting,
707 "remoting": TestRemoting, "remoting_unittests": TestRemoting,
708 "safe_browsing": TestSafeBrowsing, "safe_browsing_tests": TestSafeBrowsing,
709 "sandbox": TestLinuxSandbox, "sandbox_linux_unittests": TestLinuxSandbox,
710 "sql": TestSql, "sql_unittests": TestSql,
711 "sync": TestSync, "sync_unit_tests": TestSync,
712 "sync_integration_tests": TestSyncIntegration,
713 "sync_integration": TestSyncIntegration,
714 "ui_base_unit": TestUIBaseUnit, "ui_base_unittests": TestUIBaseUnit,
715 "ui_unit": TestUIUnit, "ui_unittests": TestUIUnit,
716 "unit": TestUnit, "unit_tests": TestUnit,
717 "url": TestURL, "url_unittests": TestURL,
718 "views": TestViews, "views_unittests": TestViews,
719 "webkit": TestLayout,
724 parser = optparse.OptionParser("usage: %prog -b <dir> -t <test> "
727 parser.add_option("--help-tests", dest="help_tests", action="store_true",
728 default=False, help="List all available tests")
729 parser.add_option("-b", "--build-dir",
730 help="the location of the compiler output")
731 parser.add_option("--target", help="Debug or Release")
732 parser.add_option("-t", "--test", action="append", default=[],
733 help="which test to run, supports test:gtest_filter format "
735 parser.add_option("--baseline", action="store_true", default=False,
736 help="generate baseline data instead of validating")
737 parser.add_option("--gtest_filter",
738 help="additional arguments to --gtest_filter")
739 parser.add_option("--gtest_repeat", help="argument for --gtest_repeat")
740 parser.add_option("--gtest_shuffle", action="store_true", default=False,
741 help="Randomize tests' orders on every iteration.")
742 parser.add_option("--gtest_break_on_failure", action="store_true",
744 help="Drop in to debugger on assertion failure. Also "
745 "useful for forcing tests to exit with a stack dump "
746 "on the first assertion failure when running with "
748 parser.add_option("-v", "--verbose", action="store_true", default=False,
749 help="verbose output - enable debug log messages")
750 parser.add_option("--tool", dest="valgrind_tool", default="memcheck",
751 help="specify a valgrind tool to run the tests under")
752 parser.add_option("--tool_flags", dest="valgrind_tool_flags", default="",
753 help="specify custom flags for the selected valgrind tool")
754 parser.add_option("--keep_logs", action="store_true", default=False,
755 help="store memory tool logs in the <tool>.logs directory "
756 "instead of /tmp.\nThis can be useful for tool "
757 "developers/maintainers.\nPlease note that the <tool>"
758 ".logs directory will be clobbered on tool startup.")
759 parser.add_option("-n", "--num_tests", type="int",
760 default=ChromeTests.LAYOUT_TESTS_DEFAULT_CHUNK_SIZE,
761 help="for layout tests: # of subtests per run. 0 for all.")
762 # TODO(thestig) Remove this if we can.
763 parser.add_option("--gtest_color", dest="gtest_color", default="no",
764 help="dummy compatibility flag for sharding_supervisor.")
765 parser.add_option("--brave-new-test-launcher", action="store_true",
766 help="run the tests with --brave-new-test-launcher")
767 parser.add_option("--test-launcher-bot-mode", action="store_true",
768 help="run the tests with --test-launcher-bot-mode")
770 options, args = parser.parse_args()
772 # Bake target into build_dir.
773 if options.target and options.build_dir:
774 assert (options.target !=
775 os.path.basename(os.path.dirname(options.build_dir)))
776 options.build_dir = os.path.join(os.path.abspath(options.build_dir),
780 logging_utils.config_root(logging.DEBUG)
782 logging_utils.config_root()
784 if options.help_tests:
785 ChromeTests.ShowTests()
789 parser.error("--test not specified")
791 if len(options.test) != 1 and options.gtest_filter:
792 parser.error("--gtest_filter and multiple tests don't make sense together")
794 for t in options.test:
795 tests = ChromeTests(options, args, t)
801 if __name__ == "__main__":