Upstream version 10.39.225.0
[platform/framework/web/crosswalk.git] / src / third_party / WebKit / Tools / Scripts / webkitpy / layout_tests / run_webkit_tests.py
index d0a52bc..aafc43f 100644 (file)
@@ -40,7 +40,7 @@ from webkitpy.layout_tests.models import test_run_results
 from webkitpy.layout_tests.port import configuration_options, platform_options
 from webkitpy.layout_tests.views import buildbot_results
 from webkitpy.layout_tests.views import printing
-from webkitpy.layout_tests.generate_results_dashboard import GenerateDashBoard
+from webkitpy.layout_tests.generate_results_dashboard import DashBoardGenerator
 
 _log = logging.getLogger(__name__)
 
@@ -76,9 +76,7 @@ def main(argv, stdout, stderr):
             not run_details.initial_results.keyboard_interrupted):
             bot_printer = buildbot_results.BuildBotPrinter(stdout, options.debug_rwt_logging)
             bot_printer.print_results(run_details)
-
-        if options.enable_versioned_results:
-            gen_dash_board = GenerateDashBoard(port)
+            gen_dash_board = DashBoardGenerator(port)
             gen_dash_board.generate()
 
         return run_details.exit_code
@@ -115,25 +113,44 @@ def parse_args(args):
     ]))
 
     option_group_definitions.append(("Results Options", [
-        optparse.make_option("-p", "--pixel", "--pixel-tests", action="store_true",
-            dest="pixel_tests", help="Enable pixel-to-pixel PNG comparisons"),
-        optparse.make_option("--no-pixel", "--no-pixel-tests", action="store_false",
-            dest="pixel_tests", help="Disable pixel-to-pixel PNG comparisons"),
-        optparse.make_option("--results-directory", help="Location of test results"),
-        optparse.make_option("--build-directory",
-            help="Path to the directory under which build files are kept (should not include configuration)"),
         optparse.make_option("--add-platform-exceptions", action="store_true", default=False,
             help="Save generated results into the *most-specific-platform* directory rather than the *generic-platform* directory"),
+        optparse.make_option("--additional-drt-flag", action="append",
+            default=[], help="Additional command line flag to pass to the driver "
+                 "Specify multiple times to add multiple flags."),
+        optparse.make_option("--additional-expectations", action="append", default=[],
+            help="Path to a test_expectations file that will override previous expectations. "
+                 "Specify multiple times for multiple sets of overrides."),
+        optparse.make_option("--additional-platform-directory", action="append",
+            default=[], help="Additional directory where to look for test "
+                 "baselines (will take precendence over platform baselines). "
+                 "Specify multiple times to add multiple search path entries."),
+        optparse.make_option("--build-directory",
+            help="Path to the directory under which build files are kept (should not include configuration)"),
+        optparse.make_option("--clobber-old-results", action="store_true",
+            default=False, help="Clobbers test results from previous runs."),
+        optparse.make_option("--compare-port", action="store", default=None,
+            help="Use the specified port's baselines first"),
+        optparse.make_option("--driver-name", type="string",
+            help="Alternative driver binary to use"),
+        optparse.make_option("--full-results-html", action="store_true",
+            default=False,
+            help="Show all failures in results.html, rather than only regressions"),
         optparse.make_option("--new-baseline", action="store_true",
             default=False, help="Save generated results as new baselines "
                  "into the *most-specific-platform* directory, overwriting whatever's "
                  "already there. Equivalent to --reset-results --add-platform-exceptions"),
-        optparse.make_option("--reset-results", action="store_true",
-            default=False, help="Reset expectations to the "
-                 "generated results in their existing location."),
         optparse.make_option("--no-new-test-results", action="store_false",
             dest="new_test_results", default=True,
             help="Don't create new baselines when no expected results exist"),
+        optparse.make_option("--no-show-results", action="store_false",
+            default=True, dest="show_results",
+            help="Don't launch a browser with results after the tests "
+                 "are done"),
+        optparse.make_option("-p", "--pixel", "--pixel-tests", action="store_true",
+            dest="pixel_tests", help="Enable pixel-to-pixel PNG comparisons"),
+        optparse.make_option("--no-pixel", "--no-pixel-tests", action="store_false",
+            dest="pixel_tests", help="Disable pixel-to-pixel PNG comparisons"),
 
         #FIXME: we should support a comma separated list with --pixel-test-directory as well.
         optparse.make_option("--pixel-test-directory", action="append", default=[], dest="pixel_test_directories",
@@ -144,35 +161,14 @@ def parse_args(args):
                  "directories enumerated with the option. Some ports may ignore this "
                  "option while others can have a default value that can be overridden here."),
 
+        optparse.make_option("--reset-results", action="store_true",
+            default=False, help="Reset expectations to the "
+                 "generated results in their existing location."),
+        optparse.make_option("--results-directory", help="Location of test results"),
         optparse.make_option("--skip-failing-tests", action="store_true",
             default=False, help="Skip tests that are expected to fail. "
                  "Note: When using this option, you might miss new crashes "
                  "in these tests."),
-        optparse.make_option("--additional-drt-flag", action="append",
-            default=[], help="Additional command line flag to pass to the driver "
-                 "Specify multiple times to add multiple flags."),
-        optparse.make_option("--driver-name", type="string",
-            help="Alternative driver binary to use"),
-        optparse.make_option("--additional-platform-directory", action="append",
-            default=[], help="Additional directory where to look for test "
-                 "baselines (will take precendence over platform baselines). "
-                 "Specify multiple times to add multiple search path entries."),
-        optparse.make_option("--additional-expectations", action="append", default=[],
-            help="Path to a test_expectations file that will override previous expectations. "
-                 "Specify multiple times for multiple sets of overrides."),
-        optparse.make_option("--compare-port", action="store", default=None,
-            help="Use the specified port's baselines first"),
-        optparse.make_option("--no-show-results", action="store_false",
-            default=True, dest="show_results",
-            help="Don't launch a browser with results after the tests "
-                 "are done"),
-        optparse.make_option("--full-results-html", action="store_true",
-            default=False,
-            help="Show all failures in results.html, rather than only regressions"),
-        optparse.make_option("--clobber-old-results", action="store_true",
-            default=False, help="Clobbers test results from previous runs."),
-        optparse.make_option("--enable-versioned-results", action="store_true",
-            default=False, help="Archive the test results for later access."),
         optparse.make_option("--smoke", action="store_true",
             help="Run just the SmokeTests"),
         optparse.make_option("--no-smoke", dest="smoke", action="store_false",
@@ -180,92 +176,91 @@ def parse_args(args):
     ]))
 
     option_group_definitions.append(("Testing Options", [
+        optparse.make_option("--additional-env-var", type="string", action="append", default=[],
+            help="Passes that environment variable to the tests (--additional-env-var=NAME=VALUE)"),
+        optparse.make_option("--batch-size",
+            help=("Run a the tests in batches (n), after every n tests, "
+                  "the driver is relaunched."), type="int", default=None),
         optparse.make_option("--build", dest="build",
             action="store_true", default=True,
             help="Check to ensure the build is up-to-date (default)."),
         optparse.make_option("--no-build", dest="build",
             action="store_false", help="Don't check to see if the build is up-to-date."),
-        optparse.make_option("-n", "--dry-run", action="store_true",
-            default=False,
-            help="Do everything but actually run the tests or upload results."),
-        optparse.make_option("--nocheck-sys-deps", action="store_true",
-            default=False,
-            help="Don't check the system dependencies (themes)"),
-        optparse.make_option("--wrapper",
-            help="wrapper command to insert before invocations of "
-                 "the driver; option is split on whitespace before "
-                 "running. (Example: --wrapper='valgrind --smc-check=all')"),
-        optparse.make_option("-i", "--ignore-tests", action="append", default=[],
-            help="directories or test to ignore (may specify multiple times)"),
+        optparse.make_option("--child-processes",
+            help="Number of drivers to run in parallel."),
+        optparse.make_option("--disable-breakpad", action="store_true",
+            help="Don't use breakpad to symbolize unexpected crashes."),
+        optparse.make_option("--driver-logging", action="store_true",
+            help="Print detailed logging of the driver/content_shell"),
+        optparse.make_option("--enable-leak-detection", action="store_true",
+            help="Enable the leak detection of DOM objects."),
+        optparse.make_option("--enable-sanitizer", action="store_true",
+            help="Only alert on sanitizer-related errors and crashes"),
+        optparse.make_option("--exit-after-n-crashes-or-timeouts", type="int",
+            default=None, help="Exit after the first N crashes instead of "
+            "running all tests"),
+        optparse.make_option("--exit-after-n-failures", type="int", default=None,
+            help="Exit after the first N failures instead of running all "
+            "tests"),
+        optparse.make_option("--ignore-builder-category", action="store",
+            help=("The category of builders to use with the --ignore-flaky-tests "
+                "option ('layout' or 'deps').")),
         optparse.make_option("--ignore-flaky-tests", action="store",
             help=("Control whether tests that are flaky on the bots get ignored."
                 "'very-flaky' == Ignore any tests that flaked more than once on the bot."
                 "'maybe-flaky' == Ignore any tests that flaked once on the bot."
                 "'unexpected' == Ignore any tests that had unexpected results on the bot.")),
-        optparse.make_option("--ignore-builder-category", action="store",
-            help=("The category of builders to use with the --ignore-flaky-tests "
-                "option ('layout' or 'deps').")),
-        optparse.make_option("--test-list", action="append",
-            help="read list of tests to run from file", metavar="FILE"),
-        optparse.make_option("--skipped", action="store", default=None,
-            help=("control how tests marked SKIP are run. "
-                 "'default' == Skip tests unless explicitly listed on the command line, "
-                 "'ignore' == Run them anyway, "
-                 "'only' == only run the SKIP tests, "
-                 "'always' == always skip, even if listed on the command line.")),
-        optparse.make_option("--time-out-ms",
-            help="Set the timeout for each test"),
+        optparse.make_option("--iterations", type="int", default=1, help="Number of times to run the set of tests (e.g. ABCABCABC)"),
+        optparse.make_option("--max-locked-shards", type="int", default=0,
+            help="Set the maximum number of locked shards"),
+        optparse.make_option("--no-retry-failures", action="store_false",
+            dest="retry_failures",
+            help="Don't re-try any tests that produce unexpected results."),
+        optparse.make_option("--nocheck-sys-deps", action="store_true",
+            default=False,
+            help="Don't check the system dependencies (themes)"),
         optparse.make_option("--order", action="store", default="natural",
             help=("determine the order in which the test cases will be run. "
                   "'none' == use the order in which the tests were listed either in arguments or test list, "
                   "'natural' == use the natural order (default), "
                   "'random-seeded' == randomize the test order using a fixed seed, "
                   "'random' == randomize the test order.")),
+        optparse.make_option("--profile", action="store_true",
+            help="Output per-test profile information."),
+        optparse.make_option("--profiler", action="store",
+            help="Output per-test profile information, using the specified profiler."),
+        optparse.make_option("--repeat-each", type="int", default=1, help="Number of times to run each test (e.g. AAABBBCCC)"),
+        optparse.make_option("--retry-failures", action="store_true",
+            help="Re-try any tests that produce unexpected results. Default is to not retry if an explicit list of tests is passed to run-webkit-tests."),
         optparse.make_option("--run-chunk",
             help=("Run a specified chunk (n:l), the nth of len l, "
                  "of the layout tests")),
         optparse.make_option("--run-part", help=("Run a specified part (n:m), "
                   "the nth of m parts, of the layout tests")),
-        optparse.make_option("--batch-size",
-            help=("Run a the tests in batches (n), after every n tests, "
-                  "the driver is relaunched."), type="int", default=None),
         optparse.make_option("--run-singly", action="store_true",
             default=False, help="DEPRECATED, same as --batch-size=1 --verbose"),
-        optparse.make_option("--child-processes",
-            help="Number of drivers to run in parallel."),
+        optparse.make_option("--skipped", action="store", default=None,
+            help=("control how tests marked SKIP are run. "
+                 "'default' == Skip tests unless explicitly listed on the command line, "
+                 "'ignore' == Run them anyway, "
+                 "'only' == only run the SKIP tests, "
+                 "'always' == always skip, even if listed on the command line.")),
+        optparse.make_option("--test-list", action="append",
+            help="read list of tests to run from file", metavar="FILE"),
+        optparse.make_option("--time-out-ms",
+            help="Set the timeout for each test"),
+        optparse.make_option("--wrapper",
+            help="wrapper command to insert before invocations of "
+                 "the driver; option is split on whitespace before "
+                 "running. (Example: --wrapper='valgrind --smc-check=all')"),
         # FIXME: Display default number of child processes that will run.
         optparse.make_option("-f", "--fully-parallel", action="store_true",
             help="run all tests in parallel"),
-        optparse.make_option("--exit-after-n-failures", type="int", default=None,
-            help="Exit after the first N failures instead of running all "
-            "tests"),
-        optparse.make_option("--exit-after-n-crashes-or-timeouts", type="int",
-            default=None, help="Exit after the first N crashes instead of "
-            "running all tests"),
-        optparse.make_option("--iterations", type="int", default=1, help="Number of times to run the set of tests (e.g. ABCABCABC)"),
-        optparse.make_option("--repeat-each", type="int", default=1, help="Number of times to run each test (e.g. AAABBBCCC)"),
-        optparse.make_option("--retry-failures", action="store_true",
-            help="Re-try any tests that produce unexpected results. Default is to not retry if an explicit list of tests is passed to run-webkit-tests."),
-        optparse.make_option("--no-retry-failures", action="store_false",
-            dest="retry_failures",
-            help="Don't re-try any tests that produce unexpected results."),
-
-        optparse.make_option("--max-locked-shards", type="int", default=0,
-            help="Set the maximum number of locked shards"),
-        optparse.make_option("--additional-env-var", type="string", action="append", default=[],
-            help="Passes that environment variable to the tests (--additional-env-var=NAME=VALUE)"),
-        optparse.make_option("--profile", action="store_true",
-            help="Output per-test profile information."),
-        optparse.make_option("--profiler", action="store",
-            help="Output per-test profile information, using the specified profiler."),
-        optparse.make_option("--driver-logging", action="store_true",
-            help="Print detailed logging of the driver/content_shell"),
-        optparse.make_option("--disable-breakpad", action="store_true",
-            help="Don't use breakpad to symbolize unexpected crashes."),
-        optparse.make_option("--enable-leak-detection", action="store_true",
-            help="Enable the leak detection of DOM objects."),
-        optparse.make_option("--enable-sanitizer", action="store_true",
-            help="Only alert on sanitizer-related errors and crashes"),
+        optparse.make_option("-i", "--ignore-tests", action="append", default=[],
+            help="directories or test to ignore (may specify multiple times)"),
+        optparse.make_option("-n", "--dry-run", action="store_true",
+            default=False,
+            help="Do everything but actually run the tests or upload results."),
     ]))
 
     option_group_definitions.append(("Miscellaneous Options", [
@@ -276,15 +271,15 @@ def parse_args(args):
 
     # FIXME: Move these into json_results_generator.py
     option_group_definitions.append(("Result JSON Options", [
-        optparse.make_option("--master-name", help="The name of the buildbot master."),
-        optparse.make_option("--builder-name", default="",
-            help=("The name of the builder shown on the waterfall running "
-                  "this script e.g. WebKit.")),
         optparse.make_option("--build-name", default="DUMMY_BUILD_NAME",
             help=("The name of the builder used in its path, e.g. "
                   "webkit-rel.")),
         optparse.make_option("--build-number", default="DUMMY_BUILD_NUMBER",
             help=("The build number of the builder running this script.")),
+        optparse.make_option("--builder-name", default="",
+            help=("The name of the builder shown on the waterfall running "
+                  "this script e.g. WebKit.")),
+        optparse.make_option("--master-name", help="The name of the buildbot master."),
         optparse.make_option("--test-results-server", default="",
             help=("If specified, upload results json files to this appengine "
                   "server.")),