1 # Status: ported, except for --out-xml
4 # Copyright 2005 Dave Abrahams
5 # Copyright 2002, 2003, 2004, 2005, 2010 Vladimir Prus
6 # Distributed under the Boost Software License, Version 1.0.
7 # (See accompanying file LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt)
9 # This module implements regression testing framework. It declares a number of
10 # main target rules which perform some action and, if the results are OK,
11 # creates an output file.
13 # The exact list of rules is:
14 # 'compile' -- creates .test file if compilation of sources was
16 # 'compile-fail' -- creates .test file if compilation of sources failed.
17 # 'run' -- creates .test file is running of executable produced from
18 # sources was successful. Also leaves behind .output file
19 # with the output from program run.
20 # 'run-fail' -- same as above, but .test file is created if running fails.
22 # In all cases, presence of .test file is an indication that the test passed.
23 # For more convenient reporting, you might want to use C++ Boost regression
24 # testing utilities (see http://www.boost.org/more/regression.html).
26 # For historical reason, a 'unit-test' rule is available which has the same
27 # syntax as 'exe' and behaves just like 'run'.
30 # - Teach compiler_status handle Jamfile.v2.
32 # - <no-warn> is not implemented, since it is Como-specific, and it is not
33 # clear how to implement it
34 # - std::locale-support is not implemented (it is used in one test).
36 import b2.build.feature as feature
37 import b2.build.type as type
38 import b2.build.targets as targets
39 import b2.build.generators as generators
40 import b2.build.toolset as toolset
41 import b2.tools.common as common
42 import b2.util.option as option
43 import b2.build_system as build_system
47 from b2.manager import get_manager
48 from b2.util import stem, bjam_signature
49 from b2.util.sequence import unique
60 # Feature controling the command used to lanch test programs.
61 feature.feature("testing.launcher", [], ["free", "optional"])
63 feature.feature("test-info", [], ["free", "incidental"])
64 feature.feature("testing.arg", [], ["free", "incidental"])
65 feature.feature("testing.input-file", [], ["free", "dependency"])
67 feature.feature("preserve-test-targets", ["on", "off"], ["incidental", "propagated"])
69 # Register target types.
70 type.register("TEST", ["test"])
71 type.register("COMPILE", [], "TEST")
72 type.register("COMPILE_FAIL", [], "TEST")
74 type.register("RUN_OUTPUT", ["run"])
75 type.register("RUN", [], "TEST")
76 type.register("RUN_FAIL", [], "TEST")
78 type.register("LINK", [], "TEST")
79 type.register("LINK_FAIL", [], "TEST")
80 type.register("UNIT_TEST", ["passed"], "TEST")
84 # Declare the rules which create main targets. While the 'type' module already
85 # creates rules with the same names for us, we need extra convenience: default
86 # name of main target, so write our own versions.
88 # Helper rule. Create a test target, using basename of first source if no target
89 # name is explicitly passed. Remembers the created target in a global variable.
90 def make_test(target_type, sources, requirements, target_name=None):
93 target_name = stem(os.path.basename(sources[0]))
95 # Having periods (".") in the target name is problematic because the typed
96 # generator will strip the suffix and use the bare name for the file
97 # targets. Even though the location-prefix averts problems most times it
98 # does not prevent ambiguity issues when referring to the test targets. For
99 # example when using the XML log output. So we rename the target to remove
100 # the periods, and provide an alias for users.
101 real_name = target_name.replace(".", "~")
103 project = get_manager().projects().current()
104 # The <location-prefix> forces the build system for generate paths in the
105 # form '$build_dir/array1.test/gcc/debug'. This is necessary to allow
106 # post-processing tools to work.
107 t = get_manager().targets().create_typed_target(
108 type.type_from_rule_name(target_type), project, real_name, sources,
109 requirements + ["<location-prefix>" + real_name + ".test"], [], [])
111 # The alias to the real target, per period replacement above.
112 if real_name != target_name:
113 get_manager().projects().project_rules().all_names_["alias"](
116 # Remember the test (for --dump-tests). A good way would be to collect all
117 # given a project. This has some technical problems: e.g. we can not call
118 # this dump from a Jamfile since projects referred by 'build-project' are
119 # not available until the whole Jamfile has been loaded.
120 __all_tests.append(t)
124 # Note: passing more that one cpp file here is known to fail. Passing a cpp file
125 # and a library target works.
127 @bjam_signature((["sources", "*"], ["requirements", "*"], ["target_name", "?"]))
128 def compile(sources, requirements, target_name=None):
129 return make_test("compile", sources, requirements, target_name)
131 @bjam_signature((["sources", "*"], ["requirements", "*"], ["target_name", "?"]))
132 def compile_fail(sources, requirements, target_name=None):
133 return make_test("compile-fail", sources, requirements, target_name)
135 @bjam_signature((["sources", "*"], ["requirements", "*"], ["target_name", "?"]))
136 def link(sources, requirements, target_name=None):
137 return make_test("link", sources, requirements, target_name)
139 @bjam_signature((["sources", "*"], ["requirements", "*"], ["target_name", "?"]))
140 def link_fail(sources, requirements, target_name=None):
141 return make_test("link-fail", sources, requirements, target_name)
143 def handle_input_files(input_files):
144 if len(input_files) > 1:
145 # Check that sorting made when creating property-set instance will not
146 # change the ordering.
147 if sorted(input_files) != input_files:
148 get_manager().errors()("Names of input files must be sorted alphabetically\n" +
149 "due to internal limitations")
150 return ["<testing.input-file>" + f for f in input_files]
152 @bjam_signature((["sources", "*"], ["args", "*"], ["input_files", "*"],
153 ["requirements", "*"], ["target_name", "?"],
154 ["default_build", "*"]))
155 def run(sources, args, input_files, requirements, target_name=None, default_build=[]):
157 requirements.append("<testing.arg>" + " ".join(args))
158 requirements.extend(handle_input_files(input_files))
159 return make_test("run", sources, requirements, target_name)
161 @bjam_signature((["sources", "*"], ["args", "*"], ["input_files", "*"],
162 ["requirements", "*"], ["target_name", "?"],
163 ["default_build", "*"]))
164 def run_fail(sources, args, input_files, requirements, target_name=None, default_build=[]):
166 requirements.append("<testing.arg>" + " ".join(args))
167 requirements.extend(handle_input_files(input_files))
168 return make_test("run-fail", sources, requirements, target_name)
170 # Register all the rules
171 for name in ["compile", "compile-fail", "link", "link-fail", "run", "run-fail"]:
172 get_manager().projects().add_rule(name, getattr(sys.modules[__name__], name.replace("-", "_")))
174 # Use 'test-suite' as a synonym for 'alias', for backward compatibility.
175 from b2.build.alias import alias
176 get_manager().projects().add_rule("test-suite", alias)
178 # For all main targets in 'project-module', which are typed targets with type
179 # derived from 'TEST', produce some interesting information.
182 for t in __all_tests:
185 # Given a project location in normalized form (slashes are forward), compute the
186 # name of the Boost library.
188 __ln1 = re.compile("/(tools|libs)/(.*)/(test|example)")
189 __ln2 = re.compile("/(tools|libs)/(.*)$")
190 __ln3 = re.compile("(/status$)")
191 def get_library_name(path):
193 path = path.replace("\\", "/")
194 match1 = __ln1.match(path)
195 match2 = __ln2.match(path)
196 match3 = __ln3.match(path)
199 return match1.group(2)
201 return match2.group(2)
204 elif option.get("dump-tests", False, True):
205 # The 'run' rule and others might be used outside boost. In that case,
206 # just return the path, since the 'library name' makes no sense.
209 # Was an XML dump requested?
210 __out_xml = option.get("out-xml", False, True)
212 # Takes a target (instance of 'basic-target') and prints
215 # - comments specified via the <test-info> property
216 # - relative location of all source from the project root.
218 def dump_test(target):
221 project = target.project()
223 project_root = project.get('project-root')
224 library = get_library_name(os.path.abspath(project.get('location')))
226 name = library + "/" + name
228 sources = target.sources()
231 if isinstance(s, targets.FileReference):
232 location = os.path.abspath(os.path.join(s.location(), s.name()))
233 source_files.append(os.path.relpath(location, os.path.abspath(project_root)))
235 target_name = project.get('location') + "//" + target.name() + ".test"
237 test_info = target.requirements().get('test-info')
238 test_info = " ".join('"' + ti + '"' for ti in test_info)
240 # If the user requested XML output on the command-line, add the test info to
241 # that XML file rather than dumping them to stdout.
246 # .contents on $(.out-xml) +=
247 # "$(nl) <test type=\"$(type)\" name=\"$(name)\">"
248 # "$(nl) <target><![CDATA[$(target-name)]]></target>"
249 # "$(nl) <info><![CDATA[$(test-info)]]></info>"
250 # "$(nl) <source><![CDATA[$(source-files)]]></source>"
256 source_files = " ".join('"' + s + '"' for s in source_files)
258 print 'boost-test(%s) "%s" [%s] : %s' % (type, name, test_info, source_files)
260 print 'boost-test(%s) "%s" : %s' % (type, name, source_files)
262 # Register generators. Depending on target type, either 'expect-success' or
263 # 'expect-failure' rule will be used.
264 generators.register_standard("testing.expect-success", ["OBJ"], ["COMPILE"])
265 generators.register_standard("testing.expect-failure", ["OBJ"], ["COMPILE_FAIL"])
266 generators.register_standard("testing.expect-success", ["RUN_OUTPUT"], ["RUN"])
267 generators.register_standard("testing.expect-failure", ["RUN_OUTPUT"], ["RUN_FAIL"])
268 generators.register_standard("testing.expect-success", ["EXE"], ["LINK"])
269 generators.register_standard("testing.expect-failure", ["EXE"], ["LINK_FAIL"])
271 # Generator which runs an EXE and captures output.
272 generators.register_standard("testing.capture-output", ["EXE"], ["RUN_OUTPUT"])
274 # Generator which creates a target if sources run successfully. Differs from RUN
275 # in that run output is not captured. The reason why it exists is that the 'run'
276 # rule is much better for automated testing, but is not user-friendly (see
277 # http://article.gmane.org/gmane.comp.lib.boost.build/6353).
278 generators.register_standard("testing.unit-test", ["EXE"], ["UNIT_TEST"])
280 # FIXME: if those calls are after bjam.call, then bjam will crash
281 # when toolset.flags calls bjam.caller.
282 toolset.flags("testing.capture-output", "ARGS", [], ["<testing.arg>"])
283 toolset.flags("testing.capture-output", "INPUT_FILES", [], ["<testing.input-file>"])
284 toolset.flags("testing.capture-output", "LAUNCHER", [], ["<testing.launcher>"])
286 toolset.flags("testing.unit-test", "LAUNCHER", [], ["<testing.launcher>"])
287 toolset.flags("testing.unit-test", "ARGS", [], ["<testing.arg>"])
289 type.register("TIME", ["time"])
290 generators.register_standard("testing.time", [], ["TIME"])
293 # The following code sets up actions for this module. It's pretty convoluted,
294 # but the basic points is that we most of actions are defined by Jam code
295 # contained in testing-aux.jam, which we load into Jam module named 'testing'
297 def run_path_setup(target, sources, ps):
299 # For testing, we need to make sure that all dynamic libraries needed by the
300 # test are found. So, we collect all paths from dependency libraries (via
301 # xdll-path property) and add whatever explicit dll-path user has specified.
302 # The resulting paths are added to the environment on each test invocation.
303 dll_paths = ps.get('dll-path')
304 dll_paths.extend(ps.get('xdll-path'))
305 dll_paths.extend(bjam.call("get-target-variable", sources, "RUN_PATH"))
306 dll_paths = unique(dll_paths)
308 bjam.call("set-target-variable", target, "PATH_SETUP",
309 common.prepend_path_variable_command(
310 common.shared_library_path_variable(), dll_paths))
312 def capture_output_setup(target, sources, ps):
313 run_path_setup(target, sources, ps)
315 if ps.get('preserve-test-targets') == ['off']:
316 bjam.call("set-target-variable", target, "REMOVE_TEST_TARGETS", "1")
318 get_manager().engine().register_bjam_action("testing.capture-output",
319 capture_output_setup)
322 path = os.path.dirname(get_manager().projects().loaded_tool_module_path_[__name__])
324 get_manager().projects().project_rules()._import_rule("testing", "os.name",
326 import b2.tools.common
327 get_manager().projects().project_rules()._import_rule("testing", "common.rm-command",
328 b2.tools.common.rm_command)
329 get_manager().projects().project_rules()._import_rule("testing", "common.file-creation-command",
330 b2.tools.common.file_creation_command)
332 bjam.call("load", "testing", os.path.join(path, "testing-aux.jam"))
335 for name in ["expect-success", "expect-failure", "time"]:
336 get_manager().engine().register_bjam_action("testing." + name)
338 get_manager().engine().register_bjam_action("testing.unit-test",
341 if option.get("dump-tests", False, True):
342 build_system.add_pre_build_hook(dump_tests)