1 # Copyright (c) 2015 Stephen Warren
2 # Copyright (c) 2015-2016, NVIDIA CORPORATION. All rights reserved.
4 # SPDX-License-Identifier: GPL-2.0
6 # Implementation of pytest run-time hook functions. These are invoked by
7 # pytest at certain points during operation, e.g. startup, for each executed
8 # test, at shutdown etc. These hooks perform functions such as:
9 # - Parsing custom command-line options.
10 # - Pullilng in user-specified board configuration.
11 # - Creating the U-Boot console test fixture.
12 # - Creating the HTML log file.
13 # - Monitoring each test's results.
14 # - Implementing custom pytest markers.
21 from _pytest.runner import runtestprotocol
27 # Globals: The HTML log file, and the connection to the U-Boot console.
32 """Create a directory path.
34 This includes creating any intermediate/parent directories. Any errors
35 caused due to already extant directories are ignored.
38 path: The directory path to create.
46 except OSError as exc:
47 if exc.errno == errno.EEXIST and os.path.isdir(path):
52 def pytest_addoption(parser):
53 """pytest hook: Add custom command-line options to the cmdline parser.
56 parser: The pytest command-line parser.
62 parser.addoption('--build-dir', default=None,
63 help='U-Boot build directory (O=)')
64 parser.addoption('--result-dir', default=None,
65 help='U-Boot test result/tmp directory')
66 parser.addoption('--persistent-data-dir', default=None,
67 help='U-Boot test persistent generated data directory')
68 parser.addoption('--board-type', '--bd', '-B', default='sandbox',
69 help='U-Boot board type')
70 parser.addoption('--board-identity', '--id', default='na',
71 help='U-Boot board identity/instance')
72 parser.addoption('--build', default=False, action='store_true',
73 help='Compile U-Boot before running tests')
74 parser.addoption('--gdbserver', default=None,
75 help='Run sandbox under gdbserver. The argument is the channel '+
76 'over which gdbserver should communicate, e.g. localhost:1234')
78 def pytest_configure(config):
79 """pytest hook: Perform custom initialization at startup time.
82 config: The pytest configuration.
92 test_py_dir = os.path.dirname(os.path.abspath(__file__))
93 source_dir = os.path.dirname(os.path.dirname(test_py_dir))
95 board_type = config.getoption('board_type')
96 board_type_filename = board_type.replace('-', '_')
98 board_identity = config.getoption('board_identity')
99 board_identity_filename = board_identity.replace('-', '_')
101 build_dir = config.getoption('build_dir')
103 build_dir = source_dir + '/build-' + board_type
106 result_dir = config.getoption('result_dir')
108 result_dir = build_dir
111 persistent_data_dir = config.getoption('persistent_data_dir')
112 if not persistent_data_dir:
113 persistent_data_dir = build_dir + '/persistent-data'
114 mkdir_p(persistent_data_dir)
116 gdbserver = config.getoption('gdbserver')
117 if gdbserver and board_type != 'sandbox':
118 raise Exception('--gdbserver only supported with sandbox')
120 import multiplexed_log
121 log = multiplexed_log.Logfile(result_dir + '/test-log.html')
123 if config.getoption('build'):
124 if build_dir != source_dir:
125 o_opt = 'O=%s' % build_dir
129 ['make', o_opt, '-s', board_type + '_defconfig'],
130 ['make', o_opt, '-s', '-j8'],
132 with log.section('make'):
133 runner = log.get_runner('make', sys.stdout)
135 runner.run(cmd, cwd=source_dir)
137 log.status_pass('OK')
139 class ArbitraryAttributeContainer(object):
142 ubconfig = ArbitraryAttributeContainer()
143 ubconfig.brd = dict()
144 ubconfig.env = dict()
147 (ubconfig.brd, 'u_boot_board_' + board_type_filename),
148 (ubconfig.env, 'u_boot_boardenv_' + board_type_filename),
149 (ubconfig.env, 'u_boot_boardenv_' + board_type_filename + '_' +
150 board_identity_filename),
152 for (dict_to_fill, module_name) in modules:
154 module = __import__(module_name)
157 dict_to_fill.update(module.__dict__)
159 ubconfig.buildconfig = dict()
161 for conf_file in ('.config', 'include/autoconf.mk'):
162 dot_config = build_dir + '/' + conf_file
163 if not os.path.exists(dot_config):
164 raise Exception(conf_file + ' does not exist; ' +
165 'try passing --build option?')
167 with open(dot_config, 'rt') as f:
168 ini_str = '[root]\n' + f.read()
169 ini_sio = StringIO.StringIO(ini_str)
170 parser = ConfigParser.RawConfigParser()
171 parser.readfp(ini_sio)
172 ubconfig.buildconfig.update(parser.items('root'))
174 ubconfig.test_py_dir = test_py_dir
175 ubconfig.source_dir = source_dir
176 ubconfig.build_dir = build_dir
177 ubconfig.result_dir = result_dir
178 ubconfig.persistent_data_dir = persistent_data_dir
179 ubconfig.board_type = board_type
180 ubconfig.board_identity = board_identity
181 ubconfig.gdbserver = gdbserver
190 'persistent_data_dir',
193 os.environ['U_BOOT_' + v.upper()] = getattr(ubconfig, v)
195 if board_type == 'sandbox':
196 import u_boot_console_sandbox
197 console = u_boot_console_sandbox.ConsoleSandbox(log, ubconfig)
199 import u_boot_console_exec_attach
200 console = u_boot_console_exec_attach.ConsoleExecAttach(log, ubconfig)
202 re_ut_test_list = re.compile(r'_u_boot_list_2_(dm|env)_test_2_\1_test_(.*)\s*$')
203 def generate_ut_subtest(metafunc, fixture_name):
204 """Provide parametrization for a ut_subtest fixture.
206 Determines the set of unit tests built into a U-Boot binary by parsing the
207 list of symbols generated by the build process. Provides this information
208 to test functions by parameterizing their ut_subtest fixture parameter.
211 metafunc: The pytest test function.
212 fixture_name: The fixture name to test.
218 fn = console.config.build_dir + '/u-boot.sym'
220 with open(fn, 'rt') as f:
221 lines = f.readlines()
228 m = re_ut_test_list.search(l)
231 vals.append(m.group(1) + ' ' + m.group(2))
233 ids = ['ut_' + s.replace(' ', '_') for s in vals]
234 metafunc.parametrize(fixture_name, vals, ids=ids)
236 def generate_config(metafunc, fixture_name):
237 """Provide parametrization for {env,brd}__ fixtures.
239 If a test function takes parameter(s) (fixture names) of the form brd__xxx
240 or env__xxx, the brd and env configuration dictionaries are consulted to
241 find the list of values to use for those parameters, and the test is
242 parametrized so that it runs once for each combination of values.
245 metafunc: The pytest test function.
246 fixture_name: The fixture name to test.
253 'brd': console.config.brd,
254 'env': console.config.env,
256 parts = fixture_name.split('__')
259 if parts[0] not in subconfigs:
261 subconfig = subconfigs[parts[0]]
263 val = subconfig.get(fixture_name, [])
264 # If that exact name is a key in the data source:
266 # ... use the dict value as a single parameter value.
269 # ... otherwise, see if there's a key that contains a list of
270 # values to use instead.
271 vals = subconfig.get(fixture_name+ 's', [])
272 def fixture_id(index, val):
274 return val['fixture_id']
276 return fixture_name + str(index)
277 ids = [fixture_id(index, val) for (index, val) in enumerate(vals)]
278 metafunc.parametrize(fixture_name, vals, ids=ids)
280 def pytest_generate_tests(metafunc):
281 """pytest hook: parameterize test functions based on custom rules.
283 Check each test function parameter (fixture name) to see if it is one of
284 our custom names, and if so, provide the correct parametrization for that
288 metafunc: The pytest test function.
294 for fn in metafunc.fixturenames:
295 if fn == 'ut_subtest':
296 generate_ut_subtest(metafunc, fn)
298 generate_config(metafunc, fn)
300 @pytest.fixture(scope='function')
301 def u_boot_console(request):
302 """Generate the value of a test's u_boot_console fixture.
305 request: The pytest request.
311 console.ensure_spawned()
322 def pytest_itemcollected(item):
323 """pytest hook: Called once for each test found during collection.
325 This enables our custom result analysis code to see the list of all tests
326 that should eventually be run.
329 item: The item that was collected.
335 tests_not_run.append(item.name)
338 """Clean up all global state.
340 Executed (via atexit) once the entire test process is complete. This
341 includes logging the status of all tests, and the identity of any failed
354 with log.section('Status Report', 'status_report'):
355 log.status_pass('%d passed' % len(tests_passed))
357 log.status_skipped('%d skipped' % len(tests_skipped))
358 for test in tests_skipped:
359 anchor = anchors.get(test, None)
360 log.status_skipped('... ' + test, anchor)
362 log.status_xpass('%d xpass' % len(tests_xpassed))
363 for test in tests_xpassed:
364 anchor = anchors.get(test, None)
365 log.status_xpass('... ' + test, anchor)
367 log.status_xfail('%d xfail' % len(tests_xfailed))
368 for test in tests_xfailed:
369 anchor = anchors.get(test, None)
370 log.status_xfail('... ' + test, anchor)
372 log.status_fail('%d failed' % len(tests_failed))
373 for test in tests_failed:
374 anchor = anchors.get(test, None)
375 log.status_fail('... ' + test, anchor)
377 log.status_fail('%d not run' % len(tests_not_run))
378 for test in tests_not_run:
379 anchor = anchors.get(test, None)
380 log.status_fail('... ' + test, anchor)
382 atexit.register(cleanup)
384 def setup_boardspec(item):
385 """Process any 'boardspec' marker for a test.
387 Such a marker lists the set of board types that a test does/doesn't
388 support. If tests are being executed on an unsupported board, the test is
389 marked to be skipped.
392 item: The pytest test item.
398 mark = item.get_marker('boardspec')
402 for board in mark.args:
403 if board.startswith('!'):
404 if ubconfig.board_type == board[1:]:
405 pytest.skip('board not supported')
408 required_boards.append(board)
409 if required_boards and ubconfig.board_type not in required_boards:
410 pytest.skip('board not supported')
412 def setup_buildconfigspec(item):
413 """Process any 'buildconfigspec' marker for a test.
415 Such a marker lists some U-Boot configuration feature that the test
416 requires. If tests are being executed on an U-Boot build that doesn't
417 have the required feature, the test is marked to be skipped.
420 item: The pytest test item.
426 mark = item.get_marker('buildconfigspec')
429 for option in mark.args:
430 if not ubconfig.buildconfig.get('config_' + option.lower(), None):
431 pytest.skip('.config feature not enabled')
433 def pytest_runtest_setup(item):
434 """pytest hook: Configure (set up) a test item.
436 Called once for each test to perform any custom configuration. This hook
437 is used to skip the test if certain conditions apply.
440 item: The pytest test item.
446 anchors[item.name] = log.start_section(item.name)
447 setup_boardspec(item)
448 setup_buildconfigspec(item)
450 def pytest_runtest_protocol(item, nextitem):
451 """pytest hook: Called to execute a test.
453 This hook wraps the standard pytest runtestprotocol() function in order
454 to acquire visibility into, and record, each test function's result.
457 item: The pytest test item to execute.
458 nextitem: The pytest test item that will be executed after this one.
461 A list of pytest reports (test result data).
464 reports = runtestprotocol(item, nextitem=nextitem)
466 failure_cleanup = False
467 test_list = tests_passed
469 msg_log = log.status_pass
470 for report in reports:
471 if report.outcome == 'failed':
472 if hasattr(report, 'wasxfail'):
473 test_list = tests_xpassed
475 msg_log = log.status_xpass
477 failure_cleanup = True
478 test_list = tests_failed
479 msg = 'FAILED:\n' + str(report.longrepr)
480 msg_log = log.status_fail
482 if report.outcome == 'skipped':
483 if hasattr(report, 'wasxfail'):
484 failure_cleanup = True
485 test_list = tests_xfailed
486 msg = 'XFAILED:\n' + str(report.longrepr)
487 msg_log = log.status_xfail
489 test_list = tests_skipped
490 msg = 'SKIPPED:\n' + str(report.longrepr)
491 msg_log = log.status_skipped
494 console.drain_console()
496 test_list.append(item.name)
497 tests_not_run.remove(item.name)
502 # If something went wrong with logging, it's better to let the test
503 # process continue, which may report other exceptions that triggered
504 # the logging issue (e.g. console.log wasn't created). Hence, just
505 # squash the exception. If the test setup failed due to e.g. syntax
506 # error somewhere else, this won't be seen. However, once that issue
507 # is fixed, if this exception still exists, it will then be logged as
508 # part of the test's stdout.
510 print 'Exception occurred while logging runtest status:'
511 traceback.print_exc()
512 # FIXME: Can we force a test failure here?
514 log.end_section(item.name)
517 console.cleanup_spawn()