1 # Copyright (c) 2015 Stephen Warren
2 # Copyright (c) 2015-2016, NVIDIA CORPORATION. All rights reserved.
4 # SPDX-License-Identifier: GPL-2.0
6 # Implementation of pytest run-time hook functions. These are invoked by
7 # pytest at certain points during operation, e.g. startup, for each executed
8 # test, at shutdown etc. These hooks perform functions such as:
9 # - Parsing custom command-line options.
10 # - Pullilng in user-specified board configuration.
11 # - Creating the U-Boot console test fixture.
12 # - Creating the HTML log file.
13 # - Monitoring each test's results.
14 # - Implementing custom pytest markers.
21 from _pytest.runner import runtestprotocol
27 # Globals: The HTML log file, and the connection to the U-Boot console.
32 """Create a directory path.
34 This includes creating any intermediate/parent directories. Any errors
35 caused due to already extant directories are ignored.
38 path: The directory path to create.
46 except OSError as exc:
47 if exc.errno == errno.EEXIST and os.path.isdir(path):
52 def pytest_addoption(parser):
53 """pytest hook: Add custom command-line options to the cmdline parser.
56 parser: The pytest command-line parser.
62 parser.addoption('--build-dir', default=None,
63 help='U-Boot build directory (O=)')
64 parser.addoption('--result-dir', default=None,
65 help='U-Boot test result/tmp directory')
66 parser.addoption('--persistent-data-dir', default=None,
67 help='U-Boot test persistent generated data directory')
68 parser.addoption('--board-type', '--bd', '-B', default='sandbox',
69 help='U-Boot board type')
70 parser.addoption('--board-identity', '--id', default='na',
71 help='U-Boot board identity/instance')
72 parser.addoption('--build', default=False, action='store_true',
73 help='Compile U-Boot before running tests')
74 parser.addoption('--gdbserver', default=None,
75 help='Run sandbox under gdbserver. The argument is the channel '+
76 'over which gdbserver should communicate, e.g. localhost:1234')
78 def pytest_configure(config):
79 """pytest hook: Perform custom initialization at startup time.
82 config: The pytest configuration.
92 test_py_dir = os.path.dirname(os.path.abspath(__file__))
93 source_dir = os.path.dirname(os.path.dirname(test_py_dir))
95 board_type = config.getoption('board_type')
96 board_type_filename = board_type.replace('-', '_')
98 board_identity = config.getoption('board_identity')
99 board_identity_filename = board_identity.replace('-', '_')
101 build_dir = config.getoption('build_dir')
103 build_dir = source_dir + '/build-' + board_type
106 result_dir = config.getoption('result_dir')
108 result_dir = build_dir
111 persistent_data_dir = config.getoption('persistent_data_dir')
112 if not persistent_data_dir:
113 persistent_data_dir = build_dir + '/persistent-data'
114 mkdir_p(persistent_data_dir)
116 gdbserver = config.getoption('gdbserver')
117 if gdbserver and board_type != 'sandbox':
118 raise Exception('--gdbserver only supported with sandbox')
120 import multiplexed_log
121 log = multiplexed_log.Logfile(result_dir + '/test-log.html')
123 if config.getoption('build'):
124 if build_dir != source_dir:
125 o_opt = 'O=%s' % build_dir
129 ['make', o_opt, '-s', board_type + '_defconfig'],
130 ['make', o_opt, '-s', '-j8'],
132 with log.section('make'):
133 runner = log.get_runner('make', sys.stdout)
135 runner.run(cmd, cwd=source_dir)
137 log.status_pass('OK')
139 class ArbitraryAttributeContainer(object):
142 ubconfig = ArbitraryAttributeContainer()
143 ubconfig.brd = dict()
144 ubconfig.env = dict()
147 (ubconfig.brd, 'u_boot_board_' + board_type_filename),
148 (ubconfig.env, 'u_boot_boardenv_' + board_type_filename),
149 (ubconfig.env, 'u_boot_boardenv_' + board_type_filename + '_' +
150 board_identity_filename),
152 for (dict_to_fill, module_name) in modules:
154 module = __import__(module_name)
157 dict_to_fill.update(module.__dict__)
159 ubconfig.buildconfig = dict()
161 for conf_file in ('.config', 'include/autoconf.mk'):
162 dot_config = build_dir + '/' + conf_file
163 if not os.path.exists(dot_config):
164 raise Exception(conf_file + ' does not exist; ' +
165 'try passing --build option?')
167 with open(dot_config, 'rt') as f:
168 ini_str = '[root]\n' + f.read()
169 ini_sio = StringIO.StringIO(ini_str)
170 parser = ConfigParser.RawConfigParser()
171 parser.readfp(ini_sio)
172 ubconfig.buildconfig.update(parser.items('root'))
174 ubconfig.test_py_dir = test_py_dir
175 ubconfig.source_dir = source_dir
176 ubconfig.build_dir = build_dir
177 ubconfig.result_dir = result_dir
178 ubconfig.persistent_data_dir = persistent_data_dir
179 ubconfig.board_type = board_type
180 ubconfig.board_identity = board_identity
181 ubconfig.gdbserver = gdbserver
182 ubconfig.dtb = build_dir + '/arch/sandbox/dts/test.dtb'
191 'persistent_data_dir',
194 os.environ['U_BOOT_' + v.upper()] = getattr(ubconfig, v)
196 if board_type.startswith('sandbox'):
197 import u_boot_console_sandbox
198 console = u_boot_console_sandbox.ConsoleSandbox(log, ubconfig)
200 import u_boot_console_exec_attach
201 console = u_boot_console_exec_attach.ConsoleExecAttach(log, ubconfig)
203 re_ut_test_list = re.compile(r'_u_boot_list_2_(.*)_test_2_\1_test_(.*)\s*$')
204 def generate_ut_subtest(metafunc, fixture_name):
205 """Provide parametrization for a ut_subtest fixture.
207 Determines the set of unit tests built into a U-Boot binary by parsing the
208 list of symbols generated by the build process. Provides this information
209 to test functions by parameterizing their ut_subtest fixture parameter.
212 metafunc: The pytest test function.
213 fixture_name: The fixture name to test.
219 fn = console.config.build_dir + '/u-boot.sym'
221 with open(fn, 'rt') as f:
222 lines = f.readlines()
229 m = re_ut_test_list.search(l)
232 vals.append(m.group(1) + ' ' + m.group(2))
234 ids = ['ut_' + s.replace(' ', '_') for s in vals]
235 metafunc.parametrize(fixture_name, vals, ids=ids)
237 def generate_config(metafunc, fixture_name):
238 """Provide parametrization for {env,brd}__ fixtures.
240 If a test function takes parameter(s) (fixture names) of the form brd__xxx
241 or env__xxx, the brd and env configuration dictionaries are consulted to
242 find the list of values to use for those parameters, and the test is
243 parametrized so that it runs once for each combination of values.
246 metafunc: The pytest test function.
247 fixture_name: The fixture name to test.
254 'brd': console.config.brd,
255 'env': console.config.env,
257 parts = fixture_name.split('__')
260 if parts[0] not in subconfigs:
262 subconfig = subconfigs[parts[0]]
264 val = subconfig.get(fixture_name, [])
265 # If that exact name is a key in the data source:
267 # ... use the dict value as a single parameter value.
270 # ... otherwise, see if there's a key that contains a list of
271 # values to use instead.
272 vals = subconfig.get(fixture_name+ 's', [])
273 def fixture_id(index, val):
275 return val['fixture_id']
277 return fixture_name + str(index)
278 ids = [fixture_id(index, val) for (index, val) in enumerate(vals)]
279 metafunc.parametrize(fixture_name, vals, ids=ids)
281 def pytest_generate_tests(metafunc):
282 """pytest hook: parameterize test functions based on custom rules.
284 Check each test function parameter (fixture name) to see if it is one of
285 our custom names, and if so, provide the correct parametrization for that
289 metafunc: The pytest test function.
295 for fn in metafunc.fixturenames:
296 if fn == 'ut_subtest':
297 generate_ut_subtest(metafunc, fn)
299 generate_config(metafunc, fn)
301 @pytest.fixture(scope='session')
302 def u_boot_log(request):
303 """Generate the value of a test's log fixture.
306 request: The pytest request.
314 @pytest.fixture(scope='session')
315 def u_boot_config(request):
316 """Generate the value of a test's u_boot_config fixture.
319 request: The pytest request.
325 return console.config
327 @pytest.fixture(scope='function')
328 def u_boot_console(request):
329 """Generate the value of a test's u_boot_console fixture.
332 request: The pytest request.
338 console.ensure_spawned()
350 def pytest_itemcollected(item):
351 """pytest hook: Called once for each test found during collection.
353 This enables our custom result analysis code to see the list of all tests
354 that should eventually be run.
357 item: The item that was collected.
363 tests_not_run.append(item.name)
366 """Clean up all global state.
368 Executed (via atexit) once the entire test process is complete. This
369 includes logging the status of all tests, and the identity of any failed
382 with log.section('Status Report', 'status_report'):
383 log.status_pass('%d passed' % len(tests_passed))
385 log.status_warning('%d passed with warning' % len(tests_warning))
386 for test in tests_warning:
387 anchor = anchors.get(test, None)
388 log.status_warning('... ' + test, anchor)
390 log.status_skipped('%d skipped' % len(tests_skipped))
391 for test in tests_skipped:
392 anchor = anchors.get(test, None)
393 log.status_skipped('... ' + test, anchor)
395 log.status_xpass('%d xpass' % len(tests_xpassed))
396 for test in tests_xpassed:
397 anchor = anchors.get(test, None)
398 log.status_xpass('... ' + test, anchor)
400 log.status_xfail('%d xfail' % len(tests_xfailed))
401 for test in tests_xfailed:
402 anchor = anchors.get(test, None)
403 log.status_xfail('... ' + test, anchor)
405 log.status_fail('%d failed' % len(tests_failed))
406 for test in tests_failed:
407 anchor = anchors.get(test, None)
408 log.status_fail('... ' + test, anchor)
410 log.status_fail('%d not run' % len(tests_not_run))
411 for test in tests_not_run:
412 anchor = anchors.get(test, None)
413 log.status_fail('... ' + test, anchor)
415 atexit.register(cleanup)
417 def setup_boardspec(item):
418 """Process any 'boardspec' marker for a test.
420 Such a marker lists the set of board types that a test does/doesn't
421 support. If tests are being executed on an unsupported board, the test is
422 marked to be skipped.
425 item: The pytest test item.
431 mark = item.get_marker('boardspec')
435 for board in mark.args:
436 if board.startswith('!'):
437 if ubconfig.board_type == board[1:]:
438 pytest.skip('board "%s" not supported' % ubconfig.board_type)
441 required_boards.append(board)
442 if required_boards and ubconfig.board_type not in required_boards:
443 pytest.skip('board "%s" not supported' % ubconfig.board_type)
445 def setup_buildconfigspec(item):
446 """Process any 'buildconfigspec' marker for a test.
448 Such a marker lists some U-Boot configuration feature that the test
449 requires. If tests are being executed on an U-Boot build that doesn't
450 have the required feature, the test is marked to be skipped.
453 item: The pytest test item.
459 mark = item.get_marker('buildconfigspec')
462 for option in mark.args:
463 if not ubconfig.buildconfig.get('config_' + option.lower(), None):
464 pytest.skip('.config feature "%s" not enabled' % option.lower())
466 def tool_is_in_path(tool):
467 for path in os.environ["PATH"].split(os.pathsep):
468 fn = os.path.join(path, tool)
469 if os.path.isfile(fn) and os.access(fn, os.X_OK):
473 def setup_requiredtool(item):
474 """Process any 'requiredtool' marker for a test.
476 Such a marker lists some external tool (binary, executable, application)
477 that the test requires. If tests are being executed on a system that
478 doesn't have the required tool, the test is marked to be skipped.
481 item: The pytest test item.
487 mark = item.get_marker('requiredtool')
490 for tool in mark.args:
491 if not tool_is_in_path(tool):
492 pytest.skip('tool "%s" not in $PATH' % tool)
494 def start_test_section(item):
495 anchors[item.name] = log.start_section(item.name)
497 def pytest_runtest_setup(item):
498 """pytest hook: Configure (set up) a test item.
500 Called once for each test to perform any custom configuration. This hook
501 is used to skip the test if certain conditions apply.
504 item: The pytest test item.
510 start_test_section(item)
511 setup_boardspec(item)
512 setup_buildconfigspec(item)
513 setup_requiredtool(item)
515 def pytest_runtest_protocol(item, nextitem):
516 """pytest hook: Called to execute a test.
518 This hook wraps the standard pytest runtestprotocol() function in order
519 to acquire visibility into, and record, each test function's result.
522 item: The pytest test item to execute.
523 nextitem: The pytest test item that will be executed after this one.
526 A list of pytest reports (test result data).
529 log.get_and_reset_warning()
530 reports = runtestprotocol(item, nextitem=nextitem)
531 was_warning = log.get_and_reset_warning()
533 # In pytest 3, runtestprotocol() may not call pytest_runtest_setup() if
534 # the test is skipped. That call is required to create the test's section
535 # in the log file. The call to log.end_section() requires that the log
536 # contain a section for this test. Create a section for the test if it
537 # doesn't already exist.
538 if not item.name in anchors:
539 start_test_section(item)
541 failure_cleanup = False
543 test_list = tests_passed
545 msg_log = log.status_pass
547 test_list = tests_warning
548 msg = 'OK (with warning)'
549 msg_log = log.status_warning
550 for report in reports:
551 if report.outcome == 'failed':
552 if hasattr(report, 'wasxfail'):
553 test_list = tests_xpassed
555 msg_log = log.status_xpass
557 failure_cleanup = True
558 test_list = tests_failed
559 msg = 'FAILED:\n' + str(report.longrepr)
560 msg_log = log.status_fail
562 if report.outcome == 'skipped':
563 if hasattr(report, 'wasxfail'):
564 failure_cleanup = True
565 test_list = tests_xfailed
566 msg = 'XFAILED:\n' + str(report.longrepr)
567 msg_log = log.status_xfail
569 test_list = tests_skipped
570 msg = 'SKIPPED:\n' + str(report.longrepr)
571 msg_log = log.status_skipped
574 console.drain_console()
576 test_list.append(item.name)
577 tests_not_run.remove(item.name)
582 # If something went wrong with logging, it's better to let the test
583 # process continue, which may report other exceptions that triggered
584 # the logging issue (e.g. console.log wasn't created). Hence, just
585 # squash the exception. If the test setup failed due to e.g. syntax
586 # error somewhere else, this won't be seen. However, once that issue
587 # is fixed, if this exception still exists, it will then be logged as
588 # part of the test's stdout.
590 print 'Exception occurred while logging runtest status:'
591 traceback.print_exc()
592 # FIXME: Can we force a test failure here?
594 log.end_section(item.name)
597 console.cleanup_spawn()