1 # SPDX-License-Identifier: GPL-2.0
2 # Copyright (c) 2015 Stephen Warren
3 # Copyright (c) 2015-2016, NVIDIA CORPORATION. All rights reserved.
5 # Implementation of pytest run-time hook functions. These are invoked by
6 # pytest at certain points during operation, e.g. startup, for each executed
7 # test, at shutdown etc. These hooks perform functions such as:
8 # - Parsing custom command-line options.
9 # - Pullilng in user-specified board configuration.
10 # - Creating the U-Boot console test fixture.
11 # - Creating the HTML log file.
12 # - Monitoring each test's results.
13 # - Implementing custom pytest markers.
23 from _pytest.runner import runtestprotocol
26 # Globals: The HTML log file, and the connection to the U-Boot console.
31 """Create a directory path.
33 This includes creating any intermediate/parent directories. Any errors
34 caused due to already extant directories are ignored.
37 path: The directory path to create.
45 except OSError as exc:
46 if exc.errno == errno.EEXIST and os.path.isdir(path):
51 def pytest_addoption(parser):
52 """pytest hook: Add custom command-line options to the cmdline parser.
55 parser: The pytest command-line parser.
61 parser.addoption('--build-dir', default=None,
62 help='U-Boot build directory (O=)')
63 parser.addoption('--result-dir', default=None,
64 help='U-Boot test result/tmp directory')
65 parser.addoption('--persistent-data-dir', default=None,
66 help='U-Boot test persistent generated data directory')
67 parser.addoption('--board-type', '--bd', '-B', default='sandbox',
68 help='U-Boot board type')
69 parser.addoption('--board-identity', '--id', default='na',
70 help='U-Boot board identity/instance')
71 parser.addoption('--build', default=False, action='store_true',
72 help='Compile U-Boot before running tests')
73 parser.addoption('--buildman', default=False, action='store_true',
74 help='Use buildman to build U-Boot (assuming --build is given)')
75 parser.addoption('--gdbserver', default=None,
76 help='Run sandbox under gdbserver. The argument is the channel '+
77 'over which gdbserver should communicate, e.g. localhost:1234')
79 def pytest_configure(config):
80 """pytest hook: Perform custom initialization at startup time.
83 config: The pytest configuration.
88 def parse_config(conf_file):
89 """Parse a config file, loading it into the ubconfig container
92 conf_file: Filename to load (within build_dir)
95 Exception if the file does not exist
97 dot_config = build_dir + '/' + conf_file
98 if not os.path.exists(dot_config):
99 raise Exception(conf_file + ' does not exist; ' +
100 'try passing --build option?')
102 with open(dot_config, 'rt') as f:
103 ini_str = '[root]\n' + f.read()
104 ini_sio = io.StringIO(ini_str)
105 parser = configparser.RawConfigParser()
106 parser.read_file(ini_sio)
107 ubconfig.buildconfig.update(parser.items('root'))
113 test_py_dir = os.path.dirname(os.path.abspath(__file__))
114 source_dir = os.path.dirname(os.path.dirname(test_py_dir))
116 board_type = config.getoption('board_type')
117 board_type_filename = board_type.replace('-', '_')
119 board_identity = config.getoption('board_identity')
120 board_identity_filename = board_identity.replace('-', '_')
122 build_dir = config.getoption('build_dir')
124 build_dir = source_dir + '/build-' + board_type
127 result_dir = config.getoption('result_dir')
129 result_dir = build_dir
132 persistent_data_dir = config.getoption('persistent_data_dir')
133 if not persistent_data_dir:
134 persistent_data_dir = build_dir + '/persistent-data'
135 mkdir_p(persistent_data_dir)
137 gdbserver = config.getoption('gdbserver')
138 if gdbserver and not board_type.startswith('sandbox'):
139 raise Exception('--gdbserver only supported with sandbox targets')
141 import multiplexed_log
142 log = multiplexed_log.Logfile(result_dir + '/test-log.html')
144 if config.getoption('build'):
145 if config.getoption('buildman'):
146 if build_dir != source_dir:
147 dest_args = ['-o', build_dir, '-w']
150 cmds = (['buildman', '--board', board_type] + dest_args,)
153 if build_dir != source_dir:
154 o_opt = 'O=%s' % build_dir
158 ['make', o_opt, '-s', board_type + '_defconfig'],
159 ['make', o_opt, '-s', '-j{}'.format(os.cpu_count())],
163 with log.section(name):
164 runner = log.get_runner(name, sys.stdout)
166 runner.run(cmd, cwd=source_dir)
168 log.status_pass('OK')
170 class ArbitraryAttributeContainer(object):
173 ubconfig = ArbitraryAttributeContainer()
174 ubconfig.brd = dict()
175 ubconfig.env = dict()
178 (ubconfig.brd, 'u_boot_board_' + board_type_filename),
179 (ubconfig.env, 'u_boot_boardenv_' + board_type_filename),
180 (ubconfig.env, 'u_boot_boardenv_' + board_type_filename + '_' +
181 board_identity_filename),
183 for (dict_to_fill, module_name) in modules:
185 module = __import__(module_name)
188 dict_to_fill.update(module.__dict__)
190 ubconfig.buildconfig = dict()
192 # buildman -k puts autoconf.mk in the rootdir, so handle this as well
193 # as the standard U-Boot build which leaves it in include/autoconf.mk
194 parse_config('.config')
195 if os.path.exists(build_dir + '/' + 'autoconf.mk'):
196 parse_config('autoconf.mk')
198 parse_config('include/autoconf.mk')
200 ubconfig.test_py_dir = test_py_dir
201 ubconfig.source_dir = source_dir
202 ubconfig.build_dir = build_dir
203 ubconfig.result_dir = result_dir
204 ubconfig.persistent_data_dir = persistent_data_dir
205 ubconfig.board_type = board_type
206 ubconfig.board_identity = board_identity
207 ubconfig.gdbserver = gdbserver
208 ubconfig.dtb = build_dir + '/arch/sandbox/dts/test.dtb'
217 'persistent_data_dir',
220 os.environ['U_BOOT_' + v.upper()] = getattr(ubconfig, v)
222 if board_type.startswith('sandbox'):
223 import u_boot_console_sandbox
224 console = u_boot_console_sandbox.ConsoleSandbox(log, ubconfig)
226 import u_boot_console_exec_attach
227 console = u_boot_console_exec_attach.ConsoleExecAttach(log, ubconfig)
229 re_ut_test_list = re.compile(r'_u_boot_list_2_(.*)_test_2_\1_test_(.*)\s*$')
230 def generate_ut_subtest(metafunc, fixture_name):
231 """Provide parametrization for a ut_subtest fixture.
233 Determines the set of unit tests built into a U-Boot binary by parsing the
234 list of symbols generated by the build process. Provides this information
235 to test functions by parameterizing their ut_subtest fixture parameter.
238 metafunc: The pytest test function.
239 fixture_name: The fixture name to test.
245 fn = console.config.build_dir + '/u-boot.sym'
247 with open(fn, 'rt') as f:
248 lines = f.readlines()
255 m = re_ut_test_list.search(l)
258 vals.append(m.group(1) + ' ' + m.group(2))
260 ids = ['ut_' + s.replace(' ', '_') for s in vals]
261 metafunc.parametrize(fixture_name, vals, ids=ids)
263 def generate_config(metafunc, fixture_name):
264 """Provide parametrization for {env,brd}__ fixtures.
266 If a test function takes parameter(s) (fixture names) of the form brd__xxx
267 or env__xxx, the brd and env configuration dictionaries are consulted to
268 find the list of values to use for those parameters, and the test is
269 parametrized so that it runs once for each combination of values.
272 metafunc: The pytest test function.
273 fixture_name: The fixture name to test.
280 'brd': console.config.brd,
281 'env': console.config.env,
283 parts = fixture_name.split('__')
286 if parts[0] not in subconfigs:
288 subconfig = subconfigs[parts[0]]
290 val = subconfig.get(fixture_name, [])
291 # If that exact name is a key in the data source:
293 # ... use the dict value as a single parameter value.
296 # ... otherwise, see if there's a key that contains a list of
297 # values to use instead.
298 vals = subconfig.get(fixture_name+ 's', [])
299 def fixture_id(index, val):
301 return val['fixture_id']
303 return fixture_name + str(index)
304 ids = [fixture_id(index, val) for (index, val) in enumerate(vals)]
305 metafunc.parametrize(fixture_name, vals, ids=ids)
307 def pytest_generate_tests(metafunc):
308 """pytest hook: parameterize test functions based on custom rules.
310 Check each test function parameter (fixture name) to see if it is one of
311 our custom names, and if so, provide the correct parametrization for that
315 metafunc: The pytest test function.
321 for fn in metafunc.fixturenames:
322 if fn == 'ut_subtest':
323 generate_ut_subtest(metafunc, fn)
325 generate_config(metafunc, fn)
327 @pytest.fixture(scope='session')
328 def u_boot_log(request):
329 """Generate the value of a test's log fixture.
332 request: The pytest request.
340 @pytest.fixture(scope='session')
341 def u_boot_config(request):
342 """Generate the value of a test's u_boot_config fixture.
345 request: The pytest request.
351 return console.config
353 @pytest.fixture(scope='function')
354 def u_boot_console(request):
355 """Generate the value of a test's u_boot_console fixture.
358 request: The pytest request.
364 console.ensure_spawned()
376 def pytest_itemcollected(item):
377 """pytest hook: Called once for each test found during collection.
379 This enables our custom result analysis code to see the list of all tests
380 that should eventually be run.
383 item: The item that was collected.
389 tests_not_run.append(item.name)
392 """Clean up all global state.
394 Executed (via atexit) once the entire test process is complete. This
395 includes logging the status of all tests, and the identity of any failed
408 with log.section('Status Report', 'status_report'):
409 log.status_pass('%d passed' % len(tests_passed))
411 log.status_warning('%d passed with warning' % len(tests_warning))
412 for test in tests_warning:
413 anchor = anchors.get(test, None)
414 log.status_warning('... ' + test, anchor)
416 log.status_skipped('%d skipped' % len(tests_skipped))
417 for test in tests_skipped:
418 anchor = anchors.get(test, None)
419 log.status_skipped('... ' + test, anchor)
421 log.status_xpass('%d xpass' % len(tests_xpassed))
422 for test in tests_xpassed:
423 anchor = anchors.get(test, None)
424 log.status_xpass('... ' + test, anchor)
426 log.status_xfail('%d xfail' % len(tests_xfailed))
427 for test in tests_xfailed:
428 anchor = anchors.get(test, None)
429 log.status_xfail('... ' + test, anchor)
431 log.status_fail('%d failed' % len(tests_failed))
432 for test in tests_failed:
433 anchor = anchors.get(test, None)
434 log.status_fail('... ' + test, anchor)
436 log.status_fail('%d not run' % len(tests_not_run))
437 for test in tests_not_run:
438 anchor = anchors.get(test, None)
439 log.status_fail('... ' + test, anchor)
441 atexit.register(cleanup)
443 def setup_boardspec(item):
444 """Process any 'boardspec' marker for a test.
446 Such a marker lists the set of board types that a test does/doesn't
447 support. If tests are being executed on an unsupported board, the test is
448 marked to be skipped.
451 item: The pytest test item.
458 for boards in item.iter_markers('boardspec'):
459 board = boards.args[0]
460 if board.startswith('!'):
461 if ubconfig.board_type == board[1:]:
462 pytest.skip('board "%s" not supported' % ubconfig.board_type)
465 required_boards.append(board)
466 if required_boards and ubconfig.board_type not in required_boards:
467 pytest.skip('board "%s" not supported' % ubconfig.board_type)
469 def setup_buildconfigspec(item):
470 """Process any 'buildconfigspec' marker for a test.
472 Such a marker lists some U-Boot configuration feature that the test
473 requires. If tests are being executed on an U-Boot build that doesn't
474 have the required feature, the test is marked to be skipped.
477 item: The pytest test item.
483 for options in item.iter_markers('buildconfigspec'):
484 option = options.args[0]
485 if not ubconfig.buildconfig.get('config_' + option.lower(), None):
486 pytest.skip('.config feature "%s" not enabled' % option.lower())
487 for options in item.iter_markers('notbuildconfigspec'):
488 option = options.args[0]
489 if ubconfig.buildconfig.get('config_' + option.lower(), None):
490 pytest.skip('.config feature "%s" enabled' % option.lower())
492 def tool_is_in_path(tool):
493 for path in os.environ["PATH"].split(os.pathsep):
494 fn = os.path.join(path, tool)
495 if os.path.isfile(fn) and os.access(fn, os.X_OK):
499 def setup_requiredtool(item):
500 """Process any 'requiredtool' marker for a test.
502 Such a marker lists some external tool (binary, executable, application)
503 that the test requires. If tests are being executed on a system that
504 doesn't have the required tool, the test is marked to be skipped.
507 item: The pytest test item.
513 for tools in item.iter_markers('requiredtool'):
515 if not tool_is_in_path(tool):
516 pytest.skip('tool "%s" not in $PATH' % tool)
518 def start_test_section(item):
519 anchors[item.name] = log.start_section(item.name)
521 def pytest_runtest_setup(item):
522 """pytest hook: Configure (set up) a test item.
524 Called once for each test to perform any custom configuration. This hook
525 is used to skip the test if certain conditions apply.
528 item: The pytest test item.
534 start_test_section(item)
535 setup_boardspec(item)
536 setup_buildconfigspec(item)
537 setup_requiredtool(item)
539 def pytest_runtest_protocol(item, nextitem):
540 """pytest hook: Called to execute a test.
542 This hook wraps the standard pytest runtestprotocol() function in order
543 to acquire visibility into, and record, each test function's result.
546 item: The pytest test item to execute.
547 nextitem: The pytest test item that will be executed after this one.
550 A list of pytest reports (test result data).
553 log.get_and_reset_warning()
554 reports = runtestprotocol(item, nextitem=nextitem)
555 was_warning = log.get_and_reset_warning()
557 # In pytest 3, runtestprotocol() may not call pytest_runtest_setup() if
558 # the test is skipped. That call is required to create the test's section
559 # in the log file. The call to log.end_section() requires that the log
560 # contain a section for this test. Create a section for the test if it
561 # doesn't already exist.
562 if not item.name in anchors:
563 start_test_section(item)
565 failure_cleanup = False
567 test_list = tests_passed
569 msg_log = log.status_pass
571 test_list = tests_warning
572 msg = 'OK (with warning)'
573 msg_log = log.status_warning
574 for report in reports:
575 if report.outcome == 'failed':
576 if hasattr(report, 'wasxfail'):
577 test_list = tests_xpassed
579 msg_log = log.status_xpass
581 failure_cleanup = True
582 test_list = tests_failed
583 msg = 'FAILED:\n' + str(report.longrepr)
584 msg_log = log.status_fail
586 if report.outcome == 'skipped':
587 if hasattr(report, 'wasxfail'):
588 failure_cleanup = True
589 test_list = tests_xfailed
590 msg = 'XFAILED:\n' + str(report.longrepr)
591 msg_log = log.status_xfail
593 test_list = tests_skipped
594 msg = 'SKIPPED:\n' + str(report.longrepr)
595 msg_log = log.status_skipped
598 console.drain_console()
600 test_list.append(item.name)
601 tests_not_run.remove(item.name)
606 # If something went wrong with logging, it's better to let the test
607 # process continue, which may report other exceptions that triggered
608 # the logging issue (e.g. console.log wasn't created). Hence, just
609 # squash the exception. If the test setup failed due to e.g. syntax
610 # error somewhere else, this won't be seen. However, once that issue
611 # is fixed, if this exception still exists, it will then be logged as
612 # part of the test's stdout.
614 print('Exception occurred while logging runtest status:')
615 traceback.print_exc()
616 # FIXME: Can we force a test failure here?
618 log.end_section(item.name)
621 console.cleanup_spawn()