[lit] Small cleanups. NFCI
authorJulian Lettner <julian.lettner@gmail.com>
Tue, 26 Feb 2019 06:51:50 +0000 (22:51 -0800)
committerJulian Lettner <julian.lettner@apple.com>
Sat, 14 Dec 2019 01:00:04 +0000 (17:00 -0800)
Remove unnecessary (argument same as default), cleanup imports, use
"pythonic" names for variables, and general formatting.

llvm/utils/lit/lit.py
llvm/utils/lit/lit/LitTestCase.py
llvm/utils/lit/lit/__init__.py
llvm/utils/lit/lit/cl_arguments.py
llvm/utils/lit/lit/main.py
llvm/utils/lit/lit/run.py
llvm/utils/lit/tests/unit/TestRunner.py

index 2f1fb43..cbb91ce 100755 (executable)
@@ -1,6 +1,6 @@
 #!/usr/bin/env python
 
-import lit.main
+from lit.main import main
 
 if __name__ == '__main__':
-    lit.main()
+    main()
index d8a3fc3..951f7be 100644 (file)
@@ -1,7 +1,8 @@
 import unittest
 
-import lit.worker
+import lit.discovery
 import lit.LitConfig
+import lit.worker
 
 """
 TestCase adaptor for providing a Python 'unittest' compatible interface to 'lit'
index 4849e7e..9b205e5 100644 (file)
@@ -6,5 +6,3 @@ __versioninfo__ = (0, 10, 0)
 __version__ = '.'.join(str(v) for v in __versioninfo__) + 'dev'
 
 __all__ = []
-
-from .main import main
index cbdd0ec..1417e89 100644 (file)
@@ -17,7 +17,7 @@ def parse_args():
             help="Show version and exit",
             action="store_true")
     parser.add_argument("-j", "--threads", "--workers",
-            dest="numWorkers",
+            dest="workers",
             metavar="N",
             help="Number of workers used for testing",
             type=_positive_int,
@@ -115,7 +115,6 @@ def parse_args():
 
     selection_group = parser.add_argument_group("Test Selection")
     selection_group.add_argument("--max-tests",
-            dest="max_tests",
             metavar="N",
             help="Maximum number of tests to run",
             type=_positive_int)
index 221c722..38a6f57 100755 (executable)
@@ -17,46 +17,47 @@ import lit.run
 import lit.Test
 import lit.util
 
-def main(builtin_params = {}):
+
+def main(builtin_params={}):
     opts = lit.cl_arguments.parse_args()
 
     if opts.show_version:
-        print("lit %s" % (lit.__version__,))
+        print("lit %s" % lit.__version__)
         return
 
     params = create_params(builtin_params, opts.user_params)
-    isWindows = platform.system() == 'Windows'
-
-    litConfig = lit.LitConfig.LitConfig(
-        progname = os.path.basename(sys.argv[0]),
-        path = opts.path,
-        quiet = opts.quiet,
-        useValgrind = opts.useValgrind,
-        valgrindLeakCheck = opts.valgrindLeakCheck,
-        valgrindArgs = opts.valgrindArgs,
-        noExecute = opts.noExecute,
-        debug = opts.debug,
-        isWindows = isWindows,
-        params = params,
-        config_prefix = opts.configPrefix,
-        maxFailures = opts.maxFailures,
-        echo_all_commands = opts.echoAllCommands)
-
-    discovered_tests = lit.discovery.find_tests_for_inputs(litConfig, opts.test_paths)
+    is_windows = platform.system() == 'Windows'
+
+    lit_config = lit.LitConfig.LitConfig(
+        progname=os.path.basename(sys.argv[0]),
+        path=opts.path,
+        quiet=opts.quiet,
+        useValgrind=opts.useValgrind,
+        valgrindLeakCheck=opts.valgrindLeakCheck,
+        valgrindArgs=opts.valgrindArgs,
+        noExecute=opts.noExecute,
+        debug=opts.debug,
+        isWindows=is_windows,
+        params=params,
+        config_prefix=opts.configPrefix,
+        maxFailures=opts.maxFailures, # TODO(yln): doesn't need to be in lit config
+        echo_all_commands=opts.echoAllCommands)
+
+    discovered_tests = lit.discovery.find_tests_for_inputs(lit_config, opts.test_paths)
     if not discovered_tests:
         sys.stderr.write('error: did not disover any tests for provided path(s)\n')
         sys.exit(2)
 
     # Command line overrides configuration for maxIndividualTestTime.
     if opts.maxIndividualTestTime is not None:  # `not None` is important (default: 0)
-        if opts.maxIndividualTestTime != litConfig.maxIndividualTestTime:
-            litConfig.note(('The test suite configuration requested an individual'
+        if opts.maxIndividualTestTime != lit_config.maxIndividualTestTime:
+            lit_config.note(('The test suite configuration requested an individual'
                 ' test timeout of {0} seconds but a timeout of {1} seconds was'
                 ' requested on the command line. Forcing timeout to be {1}'
                 ' seconds')
-                .format(litConfig.maxIndividualTestTime,
+                .format(lit_config.maxIndividualTestTime,
                         opts.maxIndividualTestTime))
-            litConfig.maxIndividualTestTime = opts.maxIndividualTestTime
+            lit_config.maxIndividualTestTime = opts.maxIndividualTestTime
 
     if opts.showSuites or opts.showTests:
         print_suites_or_tests(discovered_tests, opts)
@@ -83,7 +84,7 @@ def main(builtin_params = {}):
 
     if opts.shard:
         (run, shards) = opts.shard
-        filtered_tests = filter_by_shard(filtered_tests, run, shards, litConfig)
+        filtered_tests = filter_by_shard(filtered_tests, run, shards, lit_config)
         if not filtered_tests:
             sys.stderr.write('warning: shard does not contain any tests.  '
                              'Consider decreasing the number of shards.\n')
@@ -92,10 +93,10 @@ def main(builtin_params = {}):
     if opts.max_tests:
         filtered_tests = filtered_tests[:opts.max_tests]
 
-    opts.numWorkers = min(len(filtered_tests), opts.numWorkers)
+    opts.workers = min(len(filtered_tests), opts.workers)
 
     start = time.time()
-    run_tests(filtered_tests, litConfig, opts, len(discovered_tests))
+    run_tests(filtered_tests, lit_config, opts, len(discovered_tests))
     elapsed = time.time() - start
 
     executed_tests = [t for t in filtered_tests if t.result]
@@ -104,16 +105,16 @@ def main(builtin_params = {}):
 
     if opts.output_path:
         #TODO(yln): pass in discovered_tests
-        write_test_results(executed_tests, litConfig, elapsed, opts.output_path)
+        write_test_results(executed_tests, lit_config, elapsed, opts.output_path)
     if opts.xunit_output_file:
         write_test_results_xunit(executed_tests, opts)
 
-    if litConfig.numErrors:
-        sys.stderr.write('\n%d error(s) in tests\n' % litConfig.numErrors)
+    if lit_config.numErrors:
+        sys.stderr.write('\n%d error(s) in tests\n' % lit_config.numErrors)
         sys.exit(2)
 
-    if litConfig.numWarnings:
-        sys.stderr.write('\n%d warning(s) in tests\n' % litConfig.numWarnings)
+    if lit_config.numWarnings:
+        sys.stderr.write('\n%d warning(s) in tests\n' % lit_config.numWarnings)
 
     has_failure = any(t.isFailure() for t in executed_tests)
     if has_failure:
@@ -175,7 +176,8 @@ def touch_file(test):
     if test.isFailure():
         os.utime(test.getFilePath(), None)
 
-def filter_by_shard(tests, run, shards, litConfig):
+
+def filter_by_shard(tests, run, shards, lit_config):
     test_ixs = range(run - 1, len(tests), shards)
     selected_tests = [tests[i] for i in test_ixs]
 
@@ -190,29 +192,29 @@ def filter_by_shard(tests, run, shards, litConfig):
           'tests #({shards}*k)+{run} = [{preview}]'.format(
               run=run, shards=shards, sel_tests=len(selected_tests),
               total_tests=len(tests), preview=preview)
-    litConfig.note(msg)
+    lit_config.note(msg)
     return selected_tests
 
-def run_tests(tests, litConfig, opts, numTotalTests):
+def run_tests(tests, lit_config, opts, numTotalTests):
     display = lit.display.create_display(opts, len(tests), numTotalTests,
-                                         opts.numWorkers)
+                                         opts.workers)
     def progress_callback(test):
         display.update(test)
         if opts.order == 'failing-first':
             touch_file(test)
 
-    run = lit.run.create_run(tests, litConfig, opts.numWorkers,
-                             progress_callback, opts.timeout)
+    run = lit.run.create_run(tests, lit_config, opts.workers, progress_callback,
+                             opts.timeout)
 
     display.print_header()
     try:
-        execute_in_tmp_dir(run, litConfig)
+        execute_in_tmp_dir(run, lit_config)
         display.clear(interrupted=False)
     except KeyboardInterrupt:
         display.clear(interrupted=True)
         print(' [interrupted by user]')
 
-def execute_in_tmp_dir(run, litConfig):
+def execute_in_tmp_dir(run, lit_config):
     # Create a temp directory inside the normal temp directory so that we can
     # try to avoid temporary test file leaks. The user can avoid this behavior
     # by setting LIT_PRESERVES_TMP in the environment, so they can easily use
@@ -241,7 +243,7 @@ def execute_in_tmp_dir(run, litConfig):
                 shutil.rmtree(tmp_dir)
             except:
                 # FIXME: Re-try after timeout on Windows.
-                litConfig.warning("Failed to delete temp directory '%s'" % tmp_dir)
+                lit_config.warning("Failed to delete temp directory '%s'" % tmp_dir)
 
 def print_summary(tests, elapsed, opts):
     if not opts.quiet:
index 166b64e..f1004a4 100644 (file)
@@ -77,7 +77,7 @@ class Run(object):
 
         # If we've finished all the tests or too many tests have failed, notify
         # the main thread that we've stopped testing.
-        self.failure_count += (result.code == lit.Test.FAIL)
+        self.failure_count += (result.code == lit.Test.FAIL)  # TODO(yln): this is buggy
         if self.lit_config.maxFailures and \
                 self.failure_count == self.lit_config.maxFailures:
             self.hit_max_failures = True
index dfd4656..ceb7bef 100644 (file)
@@ -3,12 +3,12 @@
 # END.
 
 
-import unittest
-import platform
 import os.path
-import tempfile
+import platform
+import unittest
 
-import lit
+import lit.discovery
+import lit.LitConfig
 import lit.Test as Test
 from lit.TestRunner import ParserKind, IntegratedTestKeywordParser, \
                            parseIntegratedTestScript