1 # SPDX-License-Identifier: GPL-2.0+
3 # Copyright (c) 2016 Google, Inc
6 from contextlib import contextmanager
13 from patman import command
15 from io import StringIO
19 from concurrencytest import ConcurrentTestSuite, fork_for_tests
21 use_concurrent = False
24 def RunTestCoverage(prog, filter_fname, exclude_list, build_dir, required=None):
25 """Run tests and check that we get 100% coverage
28 prog: Program to run (with be passed a '-t' argument to run tests
29 filter_fname: Normally all *.py files in the program's directory will
30 be included. If this is not None, then it is used to filter the
31 list so that only filenames that don't contain filter_fname are
33 exclude_list: List of file patterns to exclude from the coverage
35 build_dir: Build directory, used to locate libfdt.py
36 required: List of modules which must be in the coverage report
39 ValueError if the code coverage is not 100%
41 # This uses the build output from sandbox_spl to get _libfdt.so
42 path = os.path.dirname(prog)
44 glob_list = glob.glob(os.path.join(path, '*.py'))
45 glob_list = [fname for fname in glob_list if filter_fname in fname]
48 glob_list += exclude_list
49 glob_list += ['*libfdt.py', '*site-packages*', '*dist-packages*']
50 test_cmd = 'test' if 'binman' in prog else '-t'
53 prefix = 'PYTHONPATH=$PYTHONPATH:%s/sandbox_spl/tools ' % build_dir
54 cmd = ('%spython3-coverage run '
55 '--omit "%s" %s %s -P1' % (prefix, ','.join(glob_list),
58 stdout = command.Output('python3-coverage', 'report')
59 lines = stdout.splitlines()
61 # Convert '/path/to/name.py' just the module name 'name'
62 test_set = set([os.path.splitext(os.path.basename(line.split()[0]))[0]
63 for line in lines if '/etype/' in line])
64 missing_list = required
65 missing_list.discard('__init__')
66 missing_list.difference_update(test_set)
68 print('Missing tests for %s' % (', '.join(missing_list)))
72 coverage = lines[-1].split(' ')[-1]
75 if coverage != '100%':
77 print("Type 'python3-coverage html' to get a report in "
79 print('Coverage error: %s, but should be 100%%' % coverage)
82 raise ValueError('Test coverage failure')
85 # Use this to suppress stdout/stderr output:
86 # with capture_sys_output() as (stdout, stderr)
89 def capture_sys_output():
90 capture_out, capture_err = StringIO(), StringIO()
91 old_out, old_err = sys.stdout, sys.stderr
93 sys.stdout, sys.stderr = capture_out, capture_err
94 yield capture_out, capture_err
96 sys.stdout, sys.stderr = old_out, old_err
99 def ReportResult(toolname:str, test_name: str, result: unittest.TestResult):
100 """Report the results from a suite of tests
103 toolname: Name of the tool that ran the tests
104 test_name: Name of test that was run, or None for all
105 result: A unittest.TestResult object containing the results
107 # Remove errors which just indicate a missing test. Since Python v3.5 If an
108 # ImportError or AttributeError occurs while traversing name then a
109 # synthetic test that raises that error when run will be returned. These
110 # errors are included in the errors accumulated by result.errors.
114 for test, err in result.errors:
115 if ("has no attribute '%s'" % test_name) not in err:
116 errors.append((test, err))
118 result.errors = errors
121 for test, err in result.errors:
122 print(test.id(), err)
123 for test, err in result.failures:
124 print(err, result.failures)
126 print('%d binman test%s SKIPPED:' %
127 (len(result.skipped), 's' if len(result.skipped) > 1 else ''))
128 for skip_info in result.skipped:
129 print('%s: %s' % (skip_info[0], skip_info[1]))
130 if result.errors or result.failures:
131 print('binman tests FAILED')
136 def RunTestSuites(result, debug, verbosity, test_preserve_dirs, processes,
137 test_name, toolpath, test_class_list):
138 """Run a series of test suites and collect the results
141 result: A unittest.TestResult object to add the results to
142 debug: True to enable debugging, which shows a full stack trace on error
143 verbosity: Verbosity level to use (0-4)
144 test_preserve_dirs: True to preserve the input directory used by tests
145 so that it can be examined afterwards (only useful for debugging
146 tests). If a single test is selected (in args[0]) it also preserves
147 the output directory for this test. Both directories are displayed
149 processes: Number of processes to use to run tests (None=same as #CPUs)
150 test_name: Name of test to run, or None for all
151 toolpath: List of paths to use for tools
152 test_class_list: List of test classes to run
155 suite = doctest.DocTestSuite(module)
158 sys.argv = [sys.argv[0]]
160 sys.argv.append('-D')
162 sys.argv.append('-v%d' % verbosity)
164 for path in toolpath:
165 sys.argv += ['--toolpath', path]
167 suite = unittest.TestSuite()
168 loader = unittest.TestLoader()
169 for module in test_class_list:
170 # Test the test module about our arguments, if it is interested
171 if hasattr(module, 'setup_test_args'):
172 setup_test_args = getattr(module, 'setup_test_args')
173 setup_test_args(preserve_indir=test_preserve_dirs,
174 preserve_outdirs=test_preserve_dirs and test_name is not None,
175 toolpath=toolpath, verbosity=verbosity)
178 suite.addTests(loader.loadTestsFromName(test_name, module))
179 except AttributeError:
182 suite.addTests(loader.loadTestsFromTestCase(module))
183 if use_concurrent and processes != 1:
184 concurrent_suite = ConcurrentTestSuite(suite,
185 fork_for_tests(processes or multiprocessing.cpu_count()))
186 concurrent_suite.run(result)