1 # SPDX-License-Identifier: GPL-2.0+
3 # Copyright (c) 2016 Google, Inc
6 from contextlib import contextmanager
14 from patman import command
16 from io import StringIO
20 from concurrencytest.concurrencytest import ConcurrentTestSuite
21 from concurrencytest.concurrencytest import fork_for_tests
23 use_concurrent = False
26 def RunTestCoverage(prog, filter_fname, exclude_list, build_dir, required=None,
28 """Run tests and check that we get 100% coverage
31 prog: Program to run (with be passed a '-t' argument to run tests
32 filter_fname: Normally all *.py files in the program's directory will
33 be included. If this is not None, then it is used to filter the
34 list so that only filenames that don't contain filter_fname are
36 exclude_list: List of file patterns to exclude from the coverage
38 build_dir: Build directory, used to locate libfdt.py
39 required: List of modules which must be in the coverage report
40 extra_args (str): Extra arguments to pass to the tool before the -t/test
44 ValueError if the code coverage is not 100%
46 # This uses the build output from sandbox_spl to get _libfdt.so
47 path = os.path.dirname(prog)
49 glob_list = glob.glob(os.path.join(path, '*.py'))
50 glob_list = [fname for fname in glob_list if filter_fname in fname]
53 glob_list += exclude_list
54 glob_list += ['*libfdt.py', '*site-packages*', '*dist-packages*']
55 glob_list += ['*concurrencytest*']
56 test_cmd = 'test' if 'binman' in prog or 'patman' in prog else '-t'
59 prefix = 'PYTHONPATH=$PYTHONPATH:%s/sandbox_spl/tools ' % build_dir
60 cmd = ('%spython3-coverage run '
61 '--omit "%s" %s %s %s -P1' % (prefix, ','.join(glob_list),
62 prog, extra_args or '', test_cmd))
64 stdout = command.output('python3-coverage', 'report')
65 lines = stdout.splitlines()
67 # Convert '/path/to/name.py' just the module name 'name'
68 test_set = set([os.path.splitext(os.path.basename(line.split()[0]))[0]
69 for line in lines if '/etype/' in line])
70 missing_list = required
71 missing_list.discard('__init__')
72 missing_list.difference_update(test_set)
74 print('Missing tests for %s' % (', '.join(missing_list)))
78 coverage = lines[-1].split(' ')[-1]
81 if coverage != '100%':
83 print("Type 'python3-coverage html' to get a report in "
85 print('Coverage error: %s, but should be 100%%' % coverage)
88 raise ValueError('Test coverage failure')
91 # Use this to suppress stdout/stderr output:
92 # with capture_sys_output() as (stdout, stderr)
95 def capture_sys_output():
96 capture_out, capture_err = StringIO(), StringIO()
97 old_out, old_err = sys.stdout, sys.stderr
99 sys.stdout, sys.stderr = capture_out, capture_err
100 yield capture_out, capture_err
102 sys.stdout, sys.stderr = old_out, old_err
105 def ReportResult(toolname:str, test_name: str, result: unittest.TestResult):
106 """Report the results from a suite of tests
109 toolname: Name of the tool that ran the tests
110 test_name: Name of test that was run, or None for all
111 result: A unittest.TestResult object containing the results
113 # Remove errors which just indicate a missing test. Since Python v3.5 If an
114 # ImportError or AttributeError occurs while traversing name then a
115 # synthetic test that raises that error when run will be returned. These
116 # errors are included in the errors accumulated by result.errors.
120 for test, err in result.errors:
121 if ("has no attribute '%s'" % test_name) not in err:
122 errors.append((test, err))
124 result.errors = errors
127 for test, err in result.errors:
128 print(test.id(), err)
129 for test, err in result.failures:
130 print(err, result.failures)
132 print('%d %s test%s SKIPPED:' % (len(result.skipped), toolname,
133 's' if len(result.skipped) > 1 else ''))
134 for skip_info in result.skipped:
135 print('%s: %s' % (skip_info[0], skip_info[1]))
136 if result.errors or result.failures:
137 print('%s tests FAILED' % toolname)
142 def RunTestSuites(result, debug, verbosity, test_preserve_dirs, processes,
143 test_name, toolpath, class_and_module_list):
144 """Run a series of test suites and collect the results
147 result: A unittest.TestResult object to add the results to
148 debug: True to enable debugging, which shows a full stack trace on error
149 verbosity: Verbosity level to use (0-4)
150 test_preserve_dirs: True to preserve the input directory used by tests
151 so that it can be examined afterwards (only useful for debugging
152 tests). If a single test is selected (in args[0]) it also preserves
153 the output directory for this test. Both directories are displayed
155 processes: Number of processes to use to run tests (None=same as #CPUs)
156 test_name: Name of test to run, or None for all
157 toolpath: List of paths to use for tools
158 class_and_module_list: List of test classes (type class) and module
159 names (type str) to run
161 for module in class_and_module_list:
162 if isinstance(module, str) and (not test_name or test_name == module):
163 suite = doctest.DocTestSuite(module)
166 sys.argv = [sys.argv[0]]
168 sys.argv.append('-D')
170 sys.argv.append('-v%d' % verbosity)
172 for path in toolpath:
173 sys.argv += ['--toolpath', path]
175 suite = unittest.TestSuite()
176 loader = unittest.TestLoader()
177 for module in class_and_module_list:
178 if isinstance(module, str):
180 # Test the test module about our arguments, if it is interested
181 if hasattr(module, 'setup_test_args'):
182 setup_test_args = getattr(module, 'setup_test_args')
183 setup_test_args(preserve_indir=test_preserve_dirs,
184 preserve_outdirs=test_preserve_dirs and test_name is not None,
185 toolpath=toolpath, verbosity=verbosity)
188 suite.addTests(loader.loadTestsFromName(test_name, module))
189 except AttributeError:
192 suite.addTests(loader.loadTestsFromTestCase(module))
193 if use_concurrent and processes != 1:
194 concurrent_suite = ConcurrentTestSuite(suite,
195 fork_for_tests(processes or multiprocessing.cpu_count()))
196 concurrent_suite.run(result)