1 # SPDX-License-Identifier: GPL-2.0+
3 # Copyright (c) 2016 Google, Inc
6 from contextlib import contextmanager
13 from patman import command
15 from io import StringIO
19 from concurrencytest.concurrencytest import ConcurrentTestSuite
20 from concurrencytest.concurrencytest import fork_for_tests
22 use_concurrent = False
25 def RunTestCoverage(prog, filter_fname, exclude_list, build_dir, required=None,
27 """Run tests and check that we get 100% coverage
30 prog: Program to run (with be passed a '-t' argument to run tests
31 filter_fname: Normally all *.py files in the program's directory will
32 be included. If this is not None, then it is used to filter the
33 list so that only filenames that don't contain filter_fname are
35 exclude_list: List of file patterns to exclude from the coverage
37 build_dir: Build directory, used to locate libfdt.py
38 required: List of modules which must be in the coverage report
39 extra_args (str): Extra arguments to pass to the tool before the -t/test
43 ValueError if the code coverage is not 100%
45 # This uses the build output from sandbox_spl to get _libfdt.so
46 path = os.path.dirname(prog)
48 glob_list = glob.glob(os.path.join(path, '*.py'))
49 glob_list = [fname for fname in glob_list if filter_fname in fname]
52 glob_list += exclude_list
53 glob_list += ['*libfdt.py', '*site-packages*', '*dist-packages*']
54 glob_list += ['*concurrencytest*']
55 test_cmd = 'test' if 'binman' in prog or 'patman' in prog else '-t'
58 prefix = 'PYTHONPATH=$PYTHONPATH:%s/sandbox_spl/tools ' % build_dir
59 cmd = ('%spython3-coverage run '
60 '--omit "%s" %s %s %s -P1' % (prefix, ','.join(glob_list),
61 prog, extra_args or '', test_cmd))
63 stdout = command.Output('python3-coverage', 'report')
64 lines = stdout.splitlines()
66 # Convert '/path/to/name.py' just the module name 'name'
67 test_set = set([os.path.splitext(os.path.basename(line.split()[0]))[0]
68 for line in lines if '/etype/' in line])
69 missing_list = required
70 missing_list.discard('__init__')
71 missing_list.difference_update(test_set)
73 print('Missing tests for %s' % (', '.join(missing_list)))
77 coverage = lines[-1].split(' ')[-1]
80 if coverage != '100%':
82 print("Type 'python3-coverage html' to get a report in "
84 print('Coverage error: %s, but should be 100%%' % coverage)
87 raise ValueError('Test coverage failure')
90 # Use this to suppress stdout/stderr output:
91 # with capture_sys_output() as (stdout, stderr)
94 def capture_sys_output():
95 capture_out, capture_err = StringIO(), StringIO()
96 old_out, old_err = sys.stdout, sys.stderr
98 sys.stdout, sys.stderr = capture_out, capture_err
99 yield capture_out, capture_err
101 sys.stdout, sys.stderr = old_out, old_err
104 def ReportResult(toolname:str, test_name: str, result: unittest.TestResult):
105 """Report the results from a suite of tests
108 toolname: Name of the tool that ran the tests
109 test_name: Name of test that was run, or None for all
110 result: A unittest.TestResult object containing the results
112 # Remove errors which just indicate a missing test. Since Python v3.5 If an
113 # ImportError or AttributeError occurs while traversing name then a
114 # synthetic test that raises that error when run will be returned. These
115 # errors are included in the errors accumulated by result.errors.
119 for test, err in result.errors:
120 if ("has no attribute '%s'" % test_name) not in err:
121 errors.append((test, err))
123 result.errors = errors
126 for test, err in result.errors:
127 print(test.id(), err)
128 for test, err in result.failures:
129 print(err, result.failures)
131 print('%d %s test%s SKIPPED:' % (len(result.skipped), toolname,
132 's' if len(result.skipped) > 1 else ''))
133 for skip_info in result.skipped:
134 print('%s: %s' % (skip_info[0], skip_info[1]))
135 if result.errors or result.failures:
136 print('%s tests FAILED' % toolname)
141 def RunTestSuites(result, debug, verbosity, test_preserve_dirs, processes,
142 test_name, toolpath, test_class_list):
143 """Run a series of test suites and collect the results
146 result: A unittest.TestResult object to add the results to
147 debug: True to enable debugging, which shows a full stack trace on error
148 verbosity: Verbosity level to use (0-4)
149 test_preserve_dirs: True to preserve the input directory used by tests
150 so that it can be examined afterwards (only useful for debugging
151 tests). If a single test is selected (in args[0]) it also preserves
152 the output directory for this test. Both directories are displayed
154 processes: Number of processes to use to run tests (None=same as #CPUs)
155 test_name: Name of test to run, or None for all
156 toolpath: List of paths to use for tools
157 test_class_list: List of test classes to run
160 suite = doctest.DocTestSuite(module)
163 sys.argv = [sys.argv[0]]
165 sys.argv.append('-D')
167 sys.argv.append('-v%d' % verbosity)
169 for path in toolpath:
170 sys.argv += ['--toolpath', path]
172 suite = unittest.TestSuite()
173 loader = unittest.TestLoader()
174 for module in test_class_list:
175 # Test the test module about our arguments, if it is interested
176 if hasattr(module, 'setup_test_args'):
177 setup_test_args = getattr(module, 'setup_test_args')
178 setup_test_args(preserve_indir=test_preserve_dirs,
179 preserve_outdirs=test_preserve_dirs and test_name is not None,
180 toolpath=toolpath, verbosity=verbosity)
183 suite.addTests(loader.loadTestsFromName(test_name, module))
184 except AttributeError:
187 suite.addTests(loader.loadTestsFromTestCase(module))
188 if use_concurrent and processes != 1:
189 concurrent_suite = ConcurrentTestSuite(suite,
190 fork_for_tests(processes or multiprocessing.cpu_count()))
191 concurrent_suite.run(result)