1 # SPDX-License-Identifier: GPL-2.0
2 # Copyright (c) 2015 Stephen Warren
3 # Copyright (c) 2015-2016, NVIDIA CORPORATION. All rights reserved.
5 # Implementation of pytest run-time hook functions. These are invoked by
6 # pytest at certain points during operation, e.g. startup, for each executed
7 # test, at shutdown etc. These hooks perform functions such as:
8 # - Parsing custom command-line options.
9 # - Pullilng in user-specified board configuration.
10 # - Creating the U-Boot console test fixture.
11 # - Creating the HTML log file.
12 # - Monitoring each test's results.
13 # - Implementing custom pytest markers.
23 from _pytest.runner import runtestprotocol
26 # Globals: The HTML log file, and the connection to the U-Boot console.
31 """Create a directory path.
33 This includes creating any intermediate/parent directories. Any errors
34 caused due to already extant directories are ignored.
37 path: The directory path to create.
45 except OSError as exc:
46 if exc.errno == errno.EEXIST and os.path.isdir(path):
51 def pytest_addoption(parser):
52 """pytest hook: Add custom command-line options to the cmdline parser.
55 parser: The pytest command-line parser.
61 parser.addoption('--build-dir', default=None,
62 help='U-Boot build directory (O=)')
63 parser.addoption('--result-dir', default=None,
64 help='U-Boot test result/tmp directory')
65 parser.addoption('--persistent-data-dir', default=None,
66 help='U-Boot test persistent generated data directory')
67 parser.addoption('--board-type', '--bd', '-B', default='sandbox',
68 help='U-Boot board type')
69 parser.addoption('--board-identity', '--id', default='na',
70 help='U-Boot board identity/instance')
71 parser.addoption('--build', default=False, action='store_true',
72 help='Compile U-Boot before running tests')
73 parser.addoption('--gdbserver', default=None,
74 help='Run sandbox under gdbserver. The argument is the channel '+
75 'over which gdbserver should communicate, e.g. localhost:1234')
77 def pytest_configure(config):
78 """pytest hook: Perform custom initialization at startup time.
81 config: The pytest configuration.
86 def parse_config(conf_file):
87 """Parse a config file, loading it into the ubconfig container
90 conf_file: Filename to load (within build_dir)
93 Exception if the file does not exist
95 dot_config = build_dir + '/' + conf_file
96 if not os.path.exists(dot_config):
97 raise Exception(conf_file + ' does not exist; ' +
98 'try passing --build option?')
100 with open(dot_config, 'rt') as f:
101 ini_str = '[root]\n' + f.read()
102 ini_sio = io.StringIO(ini_str)
103 parser = configparser.RawConfigParser()
104 parser.read_file(ini_sio)
105 ubconfig.buildconfig.update(parser.items('root'))
111 test_py_dir = os.path.dirname(os.path.abspath(__file__))
112 source_dir = os.path.dirname(os.path.dirname(test_py_dir))
114 board_type = config.getoption('board_type')
115 board_type_filename = board_type.replace('-', '_')
117 board_identity = config.getoption('board_identity')
118 board_identity_filename = board_identity.replace('-', '_')
120 build_dir = config.getoption('build_dir')
122 build_dir = source_dir + '/build-' + board_type
125 result_dir = config.getoption('result_dir')
127 result_dir = build_dir
130 persistent_data_dir = config.getoption('persistent_data_dir')
131 if not persistent_data_dir:
132 persistent_data_dir = build_dir + '/persistent-data'
133 mkdir_p(persistent_data_dir)
135 gdbserver = config.getoption('gdbserver')
136 if gdbserver and not board_type.startswith('sandbox'):
137 raise Exception('--gdbserver only supported with sandbox targets')
139 import multiplexed_log
140 log = multiplexed_log.Logfile(result_dir + '/test-log.html')
142 if config.getoption('build'):
143 if build_dir != source_dir:
144 o_opt = 'O=%s' % build_dir
148 ['make', o_opt, '-s', board_type + '_defconfig'],
149 ['make', o_opt, '-s', '-j8'],
151 with log.section('make'):
152 runner = log.get_runner('make', sys.stdout)
154 runner.run(cmd, cwd=source_dir)
156 log.status_pass('OK')
158 class ArbitraryAttributeContainer(object):
161 ubconfig = ArbitraryAttributeContainer()
162 ubconfig.brd = dict()
163 ubconfig.env = dict()
166 (ubconfig.brd, 'u_boot_board_' + board_type_filename),
167 (ubconfig.env, 'u_boot_boardenv_' + board_type_filename),
168 (ubconfig.env, 'u_boot_boardenv_' + board_type_filename + '_' +
169 board_identity_filename),
171 for (dict_to_fill, module_name) in modules:
173 module = __import__(module_name)
176 dict_to_fill.update(module.__dict__)
178 ubconfig.buildconfig = dict()
180 # buildman -k puts autoconf.mk in the rootdir, so handle this as well
181 # as the standard U-Boot build which leaves it in include/autoconf.mk
182 parse_config('.config')
183 if os.path.exists(build_dir + '/' + 'autoconf.mk'):
184 parse_config('autoconf.mk')
186 parse_config('include/autoconf.mk')
188 ubconfig.test_py_dir = test_py_dir
189 ubconfig.source_dir = source_dir
190 ubconfig.build_dir = build_dir
191 ubconfig.result_dir = result_dir
192 ubconfig.persistent_data_dir = persistent_data_dir
193 ubconfig.board_type = board_type
194 ubconfig.board_identity = board_identity
195 ubconfig.gdbserver = gdbserver
196 ubconfig.dtb = build_dir + '/arch/sandbox/dts/test.dtb'
205 'persistent_data_dir',
208 os.environ['U_BOOT_' + v.upper()] = getattr(ubconfig, v)
210 if board_type.startswith('sandbox'):
211 import u_boot_console_sandbox
212 console = u_boot_console_sandbox.ConsoleSandbox(log, ubconfig)
214 import u_boot_console_exec_attach
215 console = u_boot_console_exec_attach.ConsoleExecAttach(log, ubconfig)
217 re_ut_test_list = re.compile(r'_u_boot_list_2_(.*)_test_2_\1_test_(.*)\s*$')
218 def generate_ut_subtest(metafunc, fixture_name):
219 """Provide parametrization for a ut_subtest fixture.
221 Determines the set of unit tests built into a U-Boot binary by parsing the
222 list of symbols generated by the build process. Provides this information
223 to test functions by parameterizing their ut_subtest fixture parameter.
226 metafunc: The pytest test function.
227 fixture_name: The fixture name to test.
233 fn = console.config.build_dir + '/u-boot.sym'
235 with open(fn, 'rt') as f:
236 lines = f.readlines()
243 m = re_ut_test_list.search(l)
246 vals.append(m.group(1) + ' ' + m.group(2))
248 ids = ['ut_' + s.replace(' ', '_') for s in vals]
249 metafunc.parametrize(fixture_name, vals, ids=ids)
251 def generate_config(metafunc, fixture_name):
252 """Provide parametrization for {env,brd}__ fixtures.
254 If a test function takes parameter(s) (fixture names) of the form brd__xxx
255 or env__xxx, the brd and env configuration dictionaries are consulted to
256 find the list of values to use for those parameters, and the test is
257 parametrized so that it runs once for each combination of values.
260 metafunc: The pytest test function.
261 fixture_name: The fixture name to test.
268 'brd': console.config.brd,
269 'env': console.config.env,
271 parts = fixture_name.split('__')
274 if parts[0] not in subconfigs:
276 subconfig = subconfigs[parts[0]]
278 val = subconfig.get(fixture_name, [])
279 # If that exact name is a key in the data source:
281 # ... use the dict value as a single parameter value.
284 # ... otherwise, see if there's a key that contains a list of
285 # values to use instead.
286 vals = subconfig.get(fixture_name+ 's', [])
287 def fixture_id(index, val):
289 return val['fixture_id']
291 return fixture_name + str(index)
292 ids = [fixture_id(index, val) for (index, val) in enumerate(vals)]
293 metafunc.parametrize(fixture_name, vals, ids=ids)
295 def pytest_generate_tests(metafunc):
296 """pytest hook: parameterize test functions based on custom rules.
298 Check each test function parameter (fixture name) to see if it is one of
299 our custom names, and if so, provide the correct parametrization for that
303 metafunc: The pytest test function.
309 for fn in metafunc.fixturenames:
310 if fn == 'ut_subtest':
311 generate_ut_subtest(metafunc, fn)
313 generate_config(metafunc, fn)
315 @pytest.fixture(scope='session')
316 def u_boot_log(request):
317 """Generate the value of a test's log fixture.
320 request: The pytest request.
328 @pytest.fixture(scope='session')
329 def u_boot_config(request):
330 """Generate the value of a test's u_boot_config fixture.
333 request: The pytest request.
339 return console.config
341 @pytest.fixture(scope='function')
342 def u_boot_console(request):
343 """Generate the value of a test's u_boot_console fixture.
346 request: The pytest request.
352 console.ensure_spawned()
364 def pytest_itemcollected(item):
365 """pytest hook: Called once for each test found during collection.
367 This enables our custom result analysis code to see the list of all tests
368 that should eventually be run.
371 item: The item that was collected.
377 tests_not_run.append(item.name)
380 """Clean up all global state.
382 Executed (via atexit) once the entire test process is complete. This
383 includes logging the status of all tests, and the identity of any failed
396 with log.section('Status Report', 'status_report'):
397 log.status_pass('%d passed' % len(tests_passed))
399 log.status_warning('%d passed with warning' % len(tests_warning))
400 for test in tests_warning:
401 anchor = anchors.get(test, None)
402 log.status_warning('... ' + test, anchor)
404 log.status_skipped('%d skipped' % len(tests_skipped))
405 for test in tests_skipped:
406 anchor = anchors.get(test, None)
407 log.status_skipped('... ' + test, anchor)
409 log.status_xpass('%d xpass' % len(tests_xpassed))
410 for test in tests_xpassed:
411 anchor = anchors.get(test, None)
412 log.status_xpass('... ' + test, anchor)
414 log.status_xfail('%d xfail' % len(tests_xfailed))
415 for test in tests_xfailed:
416 anchor = anchors.get(test, None)
417 log.status_xfail('... ' + test, anchor)
419 log.status_fail('%d failed' % len(tests_failed))
420 for test in tests_failed:
421 anchor = anchors.get(test, None)
422 log.status_fail('... ' + test, anchor)
424 log.status_fail('%d not run' % len(tests_not_run))
425 for test in tests_not_run:
426 anchor = anchors.get(test, None)
427 log.status_fail('... ' + test, anchor)
429 atexit.register(cleanup)
431 def setup_boardspec(item):
432 """Process any 'boardspec' marker for a test.
434 Such a marker lists the set of board types that a test does/doesn't
435 support. If tests are being executed on an unsupported board, the test is
436 marked to be skipped.
439 item: The pytest test item.
446 for boards in item.iter_markers('boardspec'):
447 board = boards.args[0]
448 if board.startswith('!'):
449 if ubconfig.board_type == board[1:]:
450 pytest.skip('board "%s" not supported' % ubconfig.board_type)
453 required_boards.append(board)
454 if required_boards and ubconfig.board_type not in required_boards:
455 pytest.skip('board "%s" not supported' % ubconfig.board_type)
457 def setup_buildconfigspec(item):
458 """Process any 'buildconfigspec' marker for a test.
460 Such a marker lists some U-Boot configuration feature that the test
461 requires. If tests are being executed on an U-Boot build that doesn't
462 have the required feature, the test is marked to be skipped.
465 item: The pytest test item.
471 for options in item.iter_markers('buildconfigspec'):
472 option = options.args[0]
473 if not ubconfig.buildconfig.get('config_' + option.lower(), None):
474 pytest.skip('.config feature "%s" not enabled' % option.lower())
475 for options in item.iter_markers('notbuildconfigspec'):
476 option = options.args[0]
477 if ubconfig.buildconfig.get('config_' + option.lower(), None):
478 pytest.skip('.config feature "%s" enabled' % option.lower())
480 def tool_is_in_path(tool):
481 for path in os.environ["PATH"].split(os.pathsep):
482 fn = os.path.join(path, tool)
483 if os.path.isfile(fn) and os.access(fn, os.X_OK):
487 def setup_requiredtool(item):
488 """Process any 'requiredtool' marker for a test.
490 Such a marker lists some external tool (binary, executable, application)
491 that the test requires. If tests are being executed on a system that
492 doesn't have the required tool, the test is marked to be skipped.
495 item: The pytest test item.
501 for tools in item.iter_markers('requiredtool'):
503 if not tool_is_in_path(tool):
504 pytest.skip('tool "%s" not in $PATH' % tool)
506 def start_test_section(item):
507 anchors[item.name] = log.start_section(item.name)
509 def pytest_runtest_setup(item):
510 """pytest hook: Configure (set up) a test item.
512 Called once for each test to perform any custom configuration. This hook
513 is used to skip the test if certain conditions apply.
516 item: The pytest test item.
522 start_test_section(item)
523 setup_boardspec(item)
524 setup_buildconfigspec(item)
525 setup_requiredtool(item)
527 def pytest_runtest_protocol(item, nextitem):
528 """pytest hook: Called to execute a test.
530 This hook wraps the standard pytest runtestprotocol() function in order
531 to acquire visibility into, and record, each test function's result.
534 item: The pytest test item to execute.
535 nextitem: The pytest test item that will be executed after this one.
538 A list of pytest reports (test result data).
541 log.get_and_reset_warning()
542 reports = runtestprotocol(item, nextitem=nextitem)
543 was_warning = log.get_and_reset_warning()
545 # In pytest 3, runtestprotocol() may not call pytest_runtest_setup() if
546 # the test is skipped. That call is required to create the test's section
547 # in the log file. The call to log.end_section() requires that the log
548 # contain a section for this test. Create a section for the test if it
549 # doesn't already exist.
550 if not item.name in anchors:
551 start_test_section(item)
553 failure_cleanup = False
555 test_list = tests_passed
557 msg_log = log.status_pass
559 test_list = tests_warning
560 msg = 'OK (with warning)'
561 msg_log = log.status_warning
562 for report in reports:
563 if report.outcome == 'failed':
564 if hasattr(report, 'wasxfail'):
565 test_list = tests_xpassed
567 msg_log = log.status_xpass
569 failure_cleanup = True
570 test_list = tests_failed
571 msg = 'FAILED:\n' + str(report.longrepr)
572 msg_log = log.status_fail
574 if report.outcome == 'skipped':
575 if hasattr(report, 'wasxfail'):
576 failure_cleanup = True
577 test_list = tests_xfailed
578 msg = 'XFAILED:\n' + str(report.longrepr)
579 msg_log = log.status_xfail
581 test_list = tests_skipped
582 msg = 'SKIPPED:\n' + str(report.longrepr)
583 msg_log = log.status_skipped
586 console.drain_console()
588 test_list.append(item.name)
589 tests_not_run.remove(item.name)
594 # If something went wrong with logging, it's better to let the test
595 # process continue, which may report other exceptions that triggered
596 # the logging issue (e.g. console.log wasn't created). Hence, just
597 # squash the exception. If the test setup failed due to e.g. syntax
598 # error somewhere else, this won't be seen. However, once that issue
599 # is fixed, if this exception still exists, it will then be logged as
600 # part of the test's stdout.
602 print('Exception occurred while logging runtest status:')
603 traceback.print_exc()
604 # FIXME: Can we force a test failure here?
606 log.end_section(item.name)
609 console.cleanup_spawn()