1 # SPDX-License-Identifier: GPL-2.0
2 # Copyright (c) 2015 Stephen Warren
3 # Copyright (c) 2015-2016, NVIDIA CORPORATION. All rights reserved.
5 # Implementation of pytest run-time hook functions. These are invoked by
6 # pytest at certain points during operation, e.g. startup, for each executed
7 # test, at shutdown etc. These hooks perform functions such as:
8 # - Parsing custom command-line options.
9 # - Pullilng in user-specified board configuration.
10 # - Creating the U-Boot console test fixture.
11 # - Creating the HTML log file.
12 # - Monitoring each test's results.
13 # - Implementing custom pytest markers.
20 from _pytest.runner import runtestprotocol
26 # Globals: The HTML log file, and the connection to the U-Boot console.
31 """Create a directory path.
33 This includes creating any intermediate/parent directories. Any errors
34 caused due to already extant directories are ignored.
37 path: The directory path to create.
45 except OSError as exc:
46 if exc.errno == errno.EEXIST and os.path.isdir(path):
51 def pytest_addoption(parser):
52 """pytest hook: Add custom command-line options to the cmdline parser.
55 parser: The pytest command-line parser.
61 parser.addoption('--build-dir', default=None,
62 help='U-Boot build directory (O=)')
63 parser.addoption('--result-dir', default=None,
64 help='U-Boot test result/tmp directory')
65 parser.addoption('--persistent-data-dir', default=None,
66 help='U-Boot test persistent generated data directory')
67 parser.addoption('--board-type', '--bd', '-B', default='sandbox',
68 help='U-Boot board type')
69 parser.addoption('--board-identity', '--id', default='na',
70 help='U-Boot board identity/instance')
71 parser.addoption('--build', default=False, action='store_true',
72 help='Compile U-Boot before running tests')
73 parser.addoption('--gdbserver', default=None,
74 help='Run sandbox under gdbserver. The argument is the channel '+
75 'over which gdbserver should communicate, e.g. localhost:1234')
77 def pytest_configure(config):
78 """pytest hook: Perform custom initialization at startup time.
81 config: The pytest configuration.
91 test_py_dir = os.path.dirname(os.path.abspath(__file__))
92 source_dir = os.path.dirname(os.path.dirname(test_py_dir))
94 board_type = config.getoption('board_type')
95 board_type_filename = board_type.replace('-', '_')
97 board_identity = config.getoption('board_identity')
98 board_identity_filename = board_identity.replace('-', '_')
100 build_dir = config.getoption('build_dir')
102 build_dir = source_dir + '/build-' + board_type
105 result_dir = config.getoption('result_dir')
107 result_dir = build_dir
110 persistent_data_dir = config.getoption('persistent_data_dir')
111 if not persistent_data_dir:
112 persistent_data_dir = build_dir + '/persistent-data'
113 mkdir_p(persistent_data_dir)
115 gdbserver = config.getoption('gdbserver')
116 if gdbserver and board_type != 'sandbox':
117 raise Exception('--gdbserver only supported with sandbox')
119 import multiplexed_log
120 log = multiplexed_log.Logfile(result_dir + '/test-log.html')
122 if config.getoption('build'):
123 if build_dir != source_dir:
124 o_opt = 'O=%s' % build_dir
128 ['make', o_opt, '-s', board_type + '_defconfig'],
129 ['make', o_opt, '-s', '-j8'],
131 with log.section('make'):
132 runner = log.get_runner('make', sys.stdout)
134 runner.run(cmd, cwd=source_dir)
136 log.status_pass('OK')
138 class ArbitraryAttributeContainer(object):
141 ubconfig = ArbitraryAttributeContainer()
142 ubconfig.brd = dict()
143 ubconfig.env = dict()
146 (ubconfig.brd, 'u_boot_board_' + board_type_filename),
147 (ubconfig.env, 'u_boot_boardenv_' + board_type_filename),
148 (ubconfig.env, 'u_boot_boardenv_' + board_type_filename + '_' +
149 board_identity_filename),
151 for (dict_to_fill, module_name) in modules:
153 module = __import__(module_name)
156 dict_to_fill.update(module.__dict__)
158 ubconfig.buildconfig = dict()
160 for conf_file in ('.config', 'include/autoconf.mk'):
161 dot_config = build_dir + '/' + conf_file
162 if not os.path.exists(dot_config):
163 raise Exception(conf_file + ' does not exist; ' +
164 'try passing --build option?')
166 with open(dot_config, 'rt') as f:
167 ini_str = '[root]\n' + f.read()
168 ini_sio = StringIO.StringIO(ini_str)
169 parser = ConfigParser.RawConfigParser()
170 parser.readfp(ini_sio)
171 ubconfig.buildconfig.update(parser.items('root'))
173 ubconfig.test_py_dir = test_py_dir
174 ubconfig.source_dir = source_dir
175 ubconfig.build_dir = build_dir
176 ubconfig.result_dir = result_dir
177 ubconfig.persistent_data_dir = persistent_data_dir
178 ubconfig.board_type = board_type
179 ubconfig.board_identity = board_identity
180 ubconfig.gdbserver = gdbserver
181 ubconfig.dtb = build_dir + '/arch/sandbox/dts/test.dtb'
190 'persistent_data_dir',
193 os.environ['U_BOOT_' + v.upper()] = getattr(ubconfig, v)
195 if board_type.startswith('sandbox'):
196 import u_boot_console_sandbox
197 console = u_boot_console_sandbox.ConsoleSandbox(log, ubconfig)
199 import u_boot_console_exec_attach
200 console = u_boot_console_exec_attach.ConsoleExecAttach(log, ubconfig)
202 re_ut_test_list = re.compile(r'_u_boot_list_2_(.*)_test_2_\1_test_(.*)\s*$')
203 def generate_ut_subtest(metafunc, fixture_name):
204 """Provide parametrization for a ut_subtest fixture.
206 Determines the set of unit tests built into a U-Boot binary by parsing the
207 list of symbols generated by the build process. Provides this information
208 to test functions by parameterizing their ut_subtest fixture parameter.
211 metafunc: The pytest test function.
212 fixture_name: The fixture name to test.
218 fn = console.config.build_dir + '/u-boot.sym'
220 with open(fn, 'rt') as f:
221 lines = f.readlines()
228 m = re_ut_test_list.search(l)
231 vals.append(m.group(1) + ' ' + m.group(2))
233 ids = ['ut_' + s.replace(' ', '_') for s in vals]
234 metafunc.parametrize(fixture_name, vals, ids=ids)
236 def generate_config(metafunc, fixture_name):
237 """Provide parametrization for {env,brd}__ fixtures.
239 If a test function takes parameter(s) (fixture names) of the form brd__xxx
240 or env__xxx, the brd and env configuration dictionaries are consulted to
241 find the list of values to use for those parameters, and the test is
242 parametrized so that it runs once for each combination of values.
245 metafunc: The pytest test function.
246 fixture_name: The fixture name to test.
253 'brd': console.config.brd,
254 'env': console.config.env,
256 parts = fixture_name.split('__')
259 if parts[0] not in subconfigs:
261 subconfig = subconfigs[parts[0]]
263 val = subconfig.get(fixture_name, [])
264 # If that exact name is a key in the data source:
266 # ... use the dict value as a single parameter value.
269 # ... otherwise, see if there's a key that contains a list of
270 # values to use instead.
271 vals = subconfig.get(fixture_name+ 's', [])
272 def fixture_id(index, val):
274 return val['fixture_id']
276 return fixture_name + str(index)
277 ids = [fixture_id(index, val) for (index, val) in enumerate(vals)]
278 metafunc.parametrize(fixture_name, vals, ids=ids)
280 def pytest_generate_tests(metafunc):
281 """pytest hook: parameterize test functions based on custom rules.
283 Check each test function parameter (fixture name) to see if it is one of
284 our custom names, and if so, provide the correct parametrization for that
288 metafunc: The pytest test function.
294 for fn in metafunc.fixturenames:
295 if fn == 'ut_subtest':
296 generate_ut_subtest(metafunc, fn)
298 generate_config(metafunc, fn)
300 @pytest.fixture(scope='session')
301 def u_boot_log(request):
302 """Generate the value of a test's log fixture.
305 request: The pytest request.
313 @pytest.fixture(scope='session')
314 def u_boot_config(request):
315 """Generate the value of a test's u_boot_config fixture.
318 request: The pytest request.
324 return console.config
326 @pytest.fixture(scope='function')
327 def u_boot_console(request):
328 """Generate the value of a test's u_boot_console fixture.
331 request: The pytest request.
337 console.ensure_spawned()
349 def pytest_itemcollected(item):
350 """pytest hook: Called once for each test found during collection.
352 This enables our custom result analysis code to see the list of all tests
353 that should eventually be run.
356 item: The item that was collected.
362 tests_not_run.append(item.name)
365 """Clean up all global state.
367 Executed (via atexit) once the entire test process is complete. This
368 includes logging the status of all tests, and the identity of any failed
381 with log.section('Status Report', 'status_report'):
382 log.status_pass('%d passed' % len(tests_passed))
384 log.status_warning('%d passed with warning' % len(tests_warning))
385 for test in tests_warning:
386 anchor = anchors.get(test, None)
387 log.status_warning('... ' + test, anchor)
389 log.status_skipped('%d skipped' % len(tests_skipped))
390 for test in tests_skipped:
391 anchor = anchors.get(test, None)
392 log.status_skipped('... ' + test, anchor)
394 log.status_xpass('%d xpass' % len(tests_xpassed))
395 for test in tests_xpassed:
396 anchor = anchors.get(test, None)
397 log.status_xpass('... ' + test, anchor)
399 log.status_xfail('%d xfail' % len(tests_xfailed))
400 for test in tests_xfailed:
401 anchor = anchors.get(test, None)
402 log.status_xfail('... ' + test, anchor)
404 log.status_fail('%d failed' % len(tests_failed))
405 for test in tests_failed:
406 anchor = anchors.get(test, None)
407 log.status_fail('... ' + test, anchor)
409 log.status_fail('%d not run' % len(tests_not_run))
410 for test in tests_not_run:
411 anchor = anchors.get(test, None)
412 log.status_fail('... ' + test, anchor)
414 atexit.register(cleanup)
416 def setup_boardspec(item):
417 """Process any 'boardspec' marker for a test.
419 Such a marker lists the set of board types that a test does/doesn't
420 support. If tests are being executed on an unsupported board, the test is
421 marked to be skipped.
424 item: The pytest test item.
430 mark = item.get_marker('boardspec')
434 for board in mark.args:
435 if board.startswith('!'):
436 if ubconfig.board_type == board[1:]:
437 pytest.skip('board "%s" not supported' % ubconfig.board_type)
440 required_boards.append(board)
441 if required_boards and ubconfig.board_type not in required_boards:
442 pytest.skip('board "%s" not supported' % ubconfig.board_type)
444 def setup_buildconfigspec(item):
445 """Process any 'buildconfigspec' marker for a test.
447 Such a marker lists some U-Boot configuration feature that the test
448 requires. If tests are being executed on an U-Boot build that doesn't
449 have the required feature, the test is marked to be skipped.
452 item: The pytest test item.
458 mark = item.get_marker('buildconfigspec')
461 for option in mark.args:
462 if not ubconfig.buildconfig.get('config_' + option.lower(), None):
463 pytest.skip('.config feature "%s" not enabled' % option.lower())
465 def tool_is_in_path(tool):
466 for path in os.environ["PATH"].split(os.pathsep):
467 fn = os.path.join(path, tool)
468 if os.path.isfile(fn) and os.access(fn, os.X_OK):
472 def setup_requiredtool(item):
473 """Process any 'requiredtool' marker for a test.
475 Such a marker lists some external tool (binary, executable, application)
476 that the test requires. If tests are being executed on a system that
477 doesn't have the required tool, the test is marked to be skipped.
480 item: The pytest test item.
486 mark = item.get_marker('requiredtool')
489 for tool in mark.args:
490 if not tool_is_in_path(tool):
491 pytest.skip('tool "%s" not in $PATH' % tool)
493 def start_test_section(item):
494 anchors[item.name] = log.start_section(item.name)
496 def pytest_runtest_setup(item):
497 """pytest hook: Configure (set up) a test item.
499 Called once for each test to perform any custom configuration. This hook
500 is used to skip the test if certain conditions apply.
503 item: The pytest test item.
509 start_test_section(item)
510 setup_boardspec(item)
511 setup_buildconfigspec(item)
512 setup_requiredtool(item)
514 def pytest_runtest_protocol(item, nextitem):
515 """pytest hook: Called to execute a test.
517 This hook wraps the standard pytest runtestprotocol() function in order
518 to acquire visibility into, and record, each test function's result.
521 item: The pytest test item to execute.
522 nextitem: The pytest test item that will be executed after this one.
525 A list of pytest reports (test result data).
528 log.get_and_reset_warning()
529 reports = runtestprotocol(item, nextitem=nextitem)
530 was_warning = log.get_and_reset_warning()
532 # In pytest 3, runtestprotocol() may not call pytest_runtest_setup() if
533 # the test is skipped. That call is required to create the test's section
534 # in the log file. The call to log.end_section() requires that the log
535 # contain a section for this test. Create a section for the test if it
536 # doesn't already exist.
537 if not item.name in anchors:
538 start_test_section(item)
540 failure_cleanup = False
542 test_list = tests_passed
544 msg_log = log.status_pass
546 test_list = tests_warning
547 msg = 'OK (with warning)'
548 msg_log = log.status_warning
549 for report in reports:
550 if report.outcome == 'failed':
551 if hasattr(report, 'wasxfail'):
552 test_list = tests_xpassed
554 msg_log = log.status_xpass
556 failure_cleanup = True
557 test_list = tests_failed
558 msg = 'FAILED:\n' + str(report.longrepr)
559 msg_log = log.status_fail
561 if report.outcome == 'skipped':
562 if hasattr(report, 'wasxfail'):
563 failure_cleanup = True
564 test_list = tests_xfailed
565 msg = 'XFAILED:\n' + str(report.longrepr)
566 msg_log = log.status_xfail
568 test_list = tests_skipped
569 msg = 'SKIPPED:\n' + str(report.longrepr)
570 msg_log = log.status_skipped
573 console.drain_console()
575 test_list.append(item.name)
576 tests_not_run.remove(item.name)
581 # If something went wrong with logging, it's better to let the test
582 # process continue, which may report other exceptions that triggered
583 # the logging issue (e.g. console.log wasn't created). Hence, just
584 # squash the exception. If the test setup failed due to e.g. syntax
585 # error somewhere else, this won't be seen. However, once that issue
586 # is fixed, if this exception still exists, it will then be logged as
587 # part of the test's stdout.
589 print 'Exception occurred while logging runtest status:'
590 traceback.print_exc()
591 # FIXME: Can we force a test failure here?
593 log.end_section(item.name)
596 console.cleanup_spawn()