X-Git-Url: http://review.tizen.org/git/?a=blobdiff_plain;f=glib%2Fgtestutils.c;h=e67078d53243f693dca3d24baa7e5d7fb64fea3c;hb=49b59e5ac4428a6a99a85d699c3662f96efc4e9d;hp=9ddad8846af79bf5533824f19cdedd45721a4010;hpb=62e68ceec8cfad06fffb00fd23ccaad3fc0f476e;p=platform%2Fupstream%2Fglib.git diff --git a/glib/gtestutils.c b/glib/gtestutils.c index 9ddad88..e67078d 100644 --- a/glib/gtestutils.c +++ b/glib/gtestutils.c @@ -13,29 +13,31 @@ * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public - * License along with this library; if not, write to the - * Free Software Foundation, Inc., 59 Temple Place - Suite 330, - * Boston, MA 02111-1307, USA. + * License along with this library; if not, see . */ #include "config.h" #include "gtestutils.h" +#include "gfileutils.h" #include #ifdef G_OS_UNIX #include #include #include +#include +#include #endif #include #include #include -#ifdef HAVE_UNISTD_H -#include +#ifdef HAVE_SYS_RESOURCE_H +#include #endif #ifdef G_OS_WIN32 #include +#include #endif #include #include @@ -48,14 +50,495 @@ #include "grand.h" #include "gstrfuncs.h" #include "gtimer.h" +#include "gslice.h" +#include "gspawn.h" +#include "glib-private.h" + + +/** + * SECTION:testing + * @title: Testing + * @short_description: a test framework + * @see_also: [gtester][gtester], [gtester-report][gtester-report] + * + * GLib provides a framework for writing and maintaining unit tests + * in parallel to the code they are testing. The API is designed according + * to established concepts found in the other test frameworks (JUnit, NUnit, + * RUnit), which in turn is based on smalltalk unit testing concepts. + * + * - Test case: Tests (test methods) are grouped together with their + * fixture into test cases. + * + * - Fixture: A test fixture consists of fixture data and setup and + * teardown methods to establish the environment for the test + * functions. We use fresh fixtures, i.e. fixtures are newly set + * up and torn down around each test invocation to avoid dependencies + * between tests. + * + * - Test suite: Test cases can be grouped into test suites, to allow + * subsets of the available tests to be run. Test suites can be + * grouped into other test suites as well. + * + * The API is designed to handle creation and registration of test suites + * and test cases implicitly. A simple call like + * |[ + * g_test_add_func ("/misc/assertions", test_assertions); + * ]| + * creates a test suite called "misc" with a single test case named + * "assertions", which consists of running the test_assertions function. + * + * In addition to the traditional g_assert(), the test framework provides + * an extended set of assertions for string and numerical comparisons: + * g_assert_cmpfloat(), g_assert_cmpint(), g_assert_cmpuint(), + * g_assert_cmphex(), g_assert_cmpstr(). The advantage of these variants + * over plain g_assert() is that the assertion messages can be more + * elaborate, and include the values of the compared entities. + * + * GLib ships with two utilities called gtester and gtester-report to + * facilitate running tests and producing nicely formatted test reports. + */ + +/** + * g_test_initialized: + * + * Returns %TRUE if g_test_init() has been called. + * + * Returns: %TRUE if g_test_init() has been called. + * + * Since: 2.36 + */ + +/** + * g_test_quick: + * + * Returns %TRUE if tests are run in quick mode. + * Exactly one of g_test_quick() and g_test_slow() is active in any run; + * there is no "medium speed". + * + * Returns: %TRUE if in quick mode + */ + +/** + * g_test_slow: + * + * Returns %TRUE if tests are run in slow mode. + * Exactly one of g_test_quick() and g_test_slow() is active in any run; + * there is no "medium speed". + * + * Returns: the opposite of g_test_quick() + */ + +/** + * g_test_thorough: + * + * Returns %TRUE if tests are run in thorough mode, equivalent to + * g_test_slow(). + * + * Returns: the same thing as g_test_slow() + */ + +/** + * g_test_perf: + * + * Returns %TRUE if tests are run in performance mode. + * + * Returns: %TRUE if in performance mode + */ + +/** + * g_test_undefined: + * + * Returns %TRUE if tests may provoke assertions and other formally-undefined + * behaviour, to verify that appropriate warnings are given. It might, in some + * cases, be useful to turn this off if running tests under valgrind. + * + * Returns: %TRUE if tests may provoke programming errors + */ + +/** + * g_test_verbose: + * + * Returns %TRUE if tests are run in verbose mode. + * The default is neither g_test_verbose() nor g_test_quiet(). + * + * Returns: %TRUE if in verbose mode + */ + +/** + * g_test_quiet: + * + * Returns %TRUE if tests are run in quiet mode. + * The default is neither g_test_verbose() nor g_test_quiet(). + * + * Returns: %TRUE if in quiet mode + */ + +/** + * g_test_queue_unref: + * @gobject: the object to unref + * + * Enqueue an object to be released with g_object_unref() during + * the next teardown phase. This is equivalent to calling + * g_test_queue_destroy() with a destroy callback of g_object_unref(). + * + * Since: 2.16 + */ + +/** + * GTestTrapFlags: + * @G_TEST_TRAP_SILENCE_STDOUT: Redirect stdout of the test child to + * `/dev/null` so it cannot be observed on the console during test + * runs. The actual output is still captured though to allow later + * tests with g_test_trap_assert_stdout(). + * @G_TEST_TRAP_SILENCE_STDERR: Redirect stderr of the test child to + * `/dev/null` so it cannot be observed on the console during test + * runs. The actual output is still captured though to allow later + * tests with g_test_trap_assert_stderr(). + * @G_TEST_TRAP_INHERIT_STDIN: If this flag is given, stdin of the + * child process is shared with stdin of its parent process. + * It is redirected to `/dev/null` otherwise. + * + * Test traps are guards around forked tests. + * These flags determine what traps to set. + * + * Deprecated: #GTestTrapFlags is used only with g_test_trap_fork(), + * which is deprecated. g_test_trap_subprocess() uses + * #GTestTrapSubprocessFlags. + */ + +/** + * GTestSubprocessFlags: + * @G_TEST_SUBPROCESS_INHERIT_STDIN: If this flag is given, the child + * process will inherit the parent's stdin. Otherwise, the child's + * stdin is redirected to `/dev/null`. + * @G_TEST_SUBPROCESS_INHERIT_STDOUT: If this flag is given, the child + * process will inherit the parent's stdout. Otherwise, the child's + * stdout will not be visible, but it will be captured to allow + * later tests with g_test_trap_assert_stdout(). + * @G_TEST_SUBPROCESS_INHERIT_STDERR: If this flag is given, the child + * process will inherit the parent's stderr. Otherwise, the child's + * stderr will not be visible, but it will be captured to allow + * later tests with g_test_trap_assert_stderr(). + * + * Flags to pass to g_test_trap_subprocess() to control input and output. + * + * Note that in contrast with g_test_trap_fork(), the default is to + * not show stdout and stderr. + */ + +/** + * g_test_trap_assert_passed: + * + * Assert that the last test subprocess passed. + * See g_test_trap_subprocess(). + * + * Since: 2.16 + */ + +/** + * g_test_trap_assert_failed: + * + * Assert that the last test subprocess failed. + * See g_test_trap_subprocess(). + * + * This is sometimes used to test situations that are formally considered to + * be undefined behaviour, like inputs that fail a g_return_if_fail() + * check. In these situations you should skip the entire test, including the + * call to g_test_trap_subprocess(), unless g_test_undefined() returns %TRUE + * to indicate that undefined behaviour may be tested. + * + * Since: 2.16 + */ + +/** + * g_test_trap_assert_stdout: + * @soutpattern: a glob-style [pattern][glib-Glob-style-pattern-matching] + * + * Assert that the stdout output of the last test subprocess matches + * @soutpattern. See g_test_trap_subprocess(). + * + * Since: 2.16 + */ + +/** + * g_test_trap_assert_stdout_unmatched: + * @soutpattern: a glob-style [pattern][glib-Glob-style-pattern-matching] + * + * Assert that the stdout output of the last test subprocess + * does not match @soutpattern. See g_test_trap_subprocess(). + * + * Since: 2.16 + */ + +/** + * g_test_trap_assert_stderr: + * @serrpattern: a glob-style [pattern][glib-Glob-style-pattern-matching] + * + * Assert that the stderr output of the last test subprocess + * matches @serrpattern. See g_test_trap_subprocess(). + * + * This is sometimes used to test situations that are formally + * considered to be undefined behaviour, like code that hits a + * g_assert() or g_error(). In these situations you should skip the + * entire test, including the call to g_test_trap_subprocess(), unless + * g_test_undefined() returns %TRUE to indicate that undefined + * behaviour may be tested. + * + * Since: 2.16 + */ + +/** + * g_test_trap_assert_stderr_unmatched: + * @serrpattern: a glob-style [pattern][glib-Glob-style-pattern-matching] + * + * Assert that the stderr output of the last test subprocess + * does not match @serrpattern. See g_test_trap_subprocess(). + * + * Since: 2.16 + */ + +/** + * g_test_rand_bit: + * + * Get a reproducible random bit (0 or 1), see g_test_rand_int() + * for details on test case random numbers. + * + * Since: 2.16 + */ + +/** + * g_assert: + * @expr: the expression to check + * + * Debugging macro to terminate the application if the assertion + * fails. If the assertion fails (i.e. the expression is not true), + * an error message is logged and the application is terminated. + * + * The macro can be turned off in final releases of code by defining + * `G_DISABLE_ASSERT` when compiling the application. + */ + +/** + * g_assert_not_reached: + * + * Debugging macro to terminate the application if it is ever + * reached. If it is reached, an error message is logged and the + * application is terminated. + * + * The macro can be turned off in final releases of code by defining + * `G_DISABLE_ASSERT` when compiling the application. + */ + +/** + * g_assert_true: + * @expr: the expression to check + * + * Debugging macro to check that an expression is true. + * + * If the assertion fails (i.e. the expression is not true), + * an error message is logged and the application is either + * terminated or the testcase marked as failed. + * + * See g_test_set_nonfatal_assertions(). + * + * Since: 2.38 + */ + +/** + * g_assert_false: + * @expr: the expression to check + * + * Debugging macro to check an expression is false. + * + * If the assertion fails (i.e. the expression is not false), + * an error message is logged and the application is either + * terminated or the testcase marked as failed. + * + * See g_test_set_nonfatal_assertions(). + * + * Since: 2.38 + */ + +/** + * g_assert_null: + * @expr: the expression to check + * + * Debugging macro to check an expression is %NULL. + * + * If the assertion fails (i.e. the expression is not %NULL), + * an error message is logged and the application is either + * terminated or the testcase marked as failed. + * + * See g_test_set_nonfatal_assertions(). + * + * Since: 2.38 + */ + +/** + * g_assert_nonnull: + * @expr: the expression to check + * + * Debugging macro to check an expression is not %NULL. + * + * If the assertion fails (i.e. the expression is %NULL), + * an error message is logged and the application is either + * terminated or the testcase marked as failed. + * + * See g_test_set_nonfatal_assertions(). + * + * Since: 2.40 + */ + +/** + * g_assert_cmpstr: + * @s1: a string (may be %NULL) + * @cmp: The comparison operator to use. + * One of ==, !=, <, >, <=, >=. + * @s2: another string (may be %NULL) + * + * Debugging macro to compare two strings. If the comparison fails, + * an error message is logged and the application is either terminated + * or the testcase marked as failed. + * The strings are compared using g_strcmp0(). + * + * The effect of `g_assert_cmpstr (s1, op, s2)` is + * the same as `g_assert_true (g_strcmp0 (s1, s2) op 0)`. + * The advantage of this macro is that it can produce a message that + * includes the actual values of @s1 and @s2. + * + * |[ + * g_assert_cmpstr (mystring, ==, "fubar"); + * ]| + * + * Since: 2.16 + */ + +/** + * g_assert_cmpint: + * @n1: an integer + * @cmp: The comparison operator to use. + * One of ==, !=, <, >, <=, >=. + * @n2: another integer + * + * Debugging macro to compare two integers. + * + * The effect of `g_assert_cmpint (n1, op, n2)` is + * the same as `g_assert_true (n1 op n2)`. The advantage + * of this macro is that it can produce a message that includes the + * actual values of @n1 and @n2. + * + * Since: 2.16 + */ + +/** + * g_assert_cmpuint: + * @n1: an unsigned integer + * @cmp: The comparison operator to use. + * One of ==, !=, <, >, <=, >=. + * @n2: another unsigned integer + * + * Debugging macro to compare two unsigned integers. + * + * The effect of `g_assert_cmpuint (n1, op, n2)` is + * the same as `g_assert_true (n1 op n2)`. The advantage + * of this macro is that it can produce a message that includes the + * actual values of @n1 and @n2. + * + * Since: 2.16 + */ + +/** + * g_assert_cmphex: + * @n1: an unsigned integer + * @cmp: The comparison operator to use. + * One of ==, !=, <, >, <=, >=. + * @n2: another unsigned integer + * + * Debugging macro to compare to unsigned integers. + * + * This is a variant of g_assert_cmpuint() that displays the numbers + * in hexadecimal notation in the message. + * + * Since: 2.16 + */ + +/** + * g_assert_cmpfloat: + * @n1: an floating point number + * @cmp: The comparison operator to use. + * One of ==, !=, <, >, <=, >=. + * @n2: another floating point number + * + * Debugging macro to compare two floating point numbers. + * + * The effect of `g_assert_cmpfloat (n1, op, n2)` is + * the same as `g_assert_true (n1 op n2)`. The advantage + * of this macro is that it can produce a message that includes the + * actual values of @n1 and @n2. + * + * Since: 2.16 + */ + +/** + * g_assert_no_error: + * @err: a #GError, possibly %NULL + * + * Debugging macro to check that a #GError is not set. + * + * The effect of `g_assert_no_error (err)` is + * the same as `g_assert_true (err == NULL)`. The advantage + * of this macro is that it can produce a message that includes + * the error message and code. + * + * Since: 2.20 + */ + +/** + * g_assert_error: + * @err: a #GError, possibly %NULL + * @dom: the expected error domain (a #GQuark) + * @c: the expected error code + * + * Debugging macro to check that a method has returned + * the correct #GError. + * + * The effect of `g_assert_error (err, dom, c)` is + * the same as `g_assert_true (err != NULL && err->domain + * == dom && err->code == c)`. The advantage of this + * macro is that it can produce a message that includes the incorrect + * error message and code. + * + * This can only be used to test for a specific error. If you want to + * test that @err is set, but don't care what it's set to, just use + * `g_assert (err != NULL)` + * + * Since: 2.20 + */ + +/** + * GTestCase: + * + * An opaque structure representing a test case. + */ + +/** + * GTestSuite: + * + * An opaque structure representing a test suite. + */ + - /* Global variable for storing assertion messages; this is the counterpart to * glibc's (private) __abort_msg variable, and allows developers and crash * analysis systems like Apport and ABRT to fish out assertion messages from - * core dumps, instead of having to catch them on screen output. */ + * core dumps, instead of having to catch them on screen output. + */ +GLIB_VAR char *__glib_assert_msg; char *__glib_assert_msg = NULL; +/* --- constants --- */ +#define G_TEST_STATUS_TIMED_OUT 1024 + /* --- structures --- */ struct GTestCase { @@ -91,6 +574,13 @@ static void gtest_default_log_handler (const gchar *log_domain, gpointer unused_data); +typedef enum { + G_TEST_RUN_SUCCESS, + G_TEST_RUN_SKIPPED, + G_TEST_RUN_FAILURE, + G_TEST_RUN_INCOMPLETE +} GTestResult; + /* --- variables --- */ static int test_log_fd = -1; static gboolean test_mode_fatal = TRUE; @@ -99,28 +589,44 @@ static gboolean test_run_list = FALSE; static gchar *test_run_seedstr = NULL; static GRand *test_run_rand = NULL; static gchar *test_run_name = ""; +static GSList **test_filename_free_list; static guint test_run_forks = 0; static guint test_run_count = 0; -static guint test_skip_count = 0; +static guint test_skipped_count = 0; +static GTestResult test_run_success = G_TEST_RUN_FAILURE; +static gchar *test_run_msg = NULL; +static guint test_startup_skip_count = 0; static GTimer *test_user_timer = NULL; static double test_user_stamp = 0; static GSList *test_paths = NULL; +static GSList *test_paths_skipped = NULL; static GTestSuite *test_suite_root = NULL; static int test_trap_last_status = 0; -static int test_trap_last_pid = 0; +static GPid test_trap_last_pid = 0; +static char *test_trap_last_subprocess = NULL; static char *test_trap_last_stdout = NULL; static char *test_trap_last_stderr = NULL; static char *test_uri_base = NULL; static gboolean test_debug_log = FALSE; +static gboolean test_tap_log = FALSE; +static gboolean test_nonfatal_assertions = FALSE; static DestroyEntry *test_destroy_queue = NULL; +static char *test_argv0 = NULL; +static char *test_argv0_dirname; +static const char *test_disted_files_dir; +static const char *test_built_files_dir; +static char *test_initial_cwd = NULL; +static gboolean test_in_subprocess = FALSE; static GTestConfig mutable_test_config_vars = { FALSE, /* test_initialized */ TRUE, /* test_quick */ FALSE, /* test_perf */ FALSE, /* test_verbose */ FALSE, /* test_quiet */ + TRUE, /* test_undefined */ }; const GTestConfig * const g_test_config_vars = &mutable_test_config_vars; +static gboolean no_g_set_prgname = FALSE; /* --- functions --- */ const char* @@ -138,6 +644,8 @@ g_test_log_type_name (GTestLogType log_type) case G_TEST_LOG_MIN_RESULT: return "minperf"; case G_TEST_LOG_MAX_RESULT: return "maxperf"; case G_TEST_LOG_MESSAGE: return "message"; + case G_TEST_LOG_START_SUITE: return "start suite"; + case G_TEST_LOG_STOP_SUITE: return "stop suite"; } return "???"; } @@ -171,7 +679,12 @@ g_test_log_send (guint n_bytes, { g_printerr (":("); for (ui = 0; ui < msg->n_nums; ui++) - g_printerr ("%s%.16Lg", ui ? ";" : "", msg->nums[ui]); + { + if ((long double) (long) msg->nums[ui] == msg->nums[ui]) + g_printerr ("%s%ld", ui ? ";" : "", (long) msg->nums[ui]); + else + g_printerr ("%s%.16g", ui ? ";" : "", (double) msg->nums[ui]); + } g_printerr (")"); } g_printerr (":LOG*}\n"); @@ -186,7 +699,7 @@ g_test_log (GTestLogType lbit, guint n_args, long double *largs) { - gboolean fail = lbit == G_TEST_LOG_STOP_CASE && largs[0] != 0; + gboolean fail; GTestLogMsg msg; gchar *astrings[3] = { NULL, NULL, NULL }; guint8 *dbuffer; @@ -195,27 +708,69 @@ g_test_log (GTestLogType lbit, switch (lbit) { case G_TEST_LOG_START_BINARY: - if (g_test_verbose()) + if (test_tap_log) + g_print ("# random seed: %s\n", string2); + else if (g_test_verbose()) g_print ("GTest: random seed: %s\n", string2); break; + case G_TEST_LOG_START_SUITE: + if (test_tap_log) + { + if (string1[0] != 0) + g_print ("# Start of %s tests\n", string1); + } + break; + case G_TEST_LOG_STOP_SUITE: + if (test_tap_log) + { + if (string1[0] != 0) + g_print ("# End of %s tests\n", string1); + else + g_print ("1..%d\n", test_run_count); + } + break; case G_TEST_LOG_STOP_CASE: - if (g_test_verbose()) + fail = largs[0] != G_TEST_RUN_SUCCESS && largs[0] != G_TEST_RUN_SKIPPED; + if (test_tap_log) + { + g_print ("%s %d %s", fail ? "not ok" : "ok", test_run_count, string1); + if (largs[0] == G_TEST_RUN_INCOMPLETE) + g_print (" # TODO %s\n", string2 ? string2 : ""); + else if (largs[0] == G_TEST_RUN_SKIPPED) + g_print (" # SKIP %s\n", string2 ? string2 : ""); + else + g_print ("\n"); + } + else if (g_test_verbose()) g_print ("GTest: result: %s\n", fail ? "FAIL" : "OK"); else if (!g_test_quiet()) g_print ("%s\n", fail ? "FAIL" : "OK"); if (fail && test_mode_fatal) - abort(); + { + if (test_tap_log) + g_print ("Bail out!\n"); + abort(); + } + if (largs[0] == G_TEST_RUN_SKIPPED) + test_skipped_count++; break; case G_TEST_LOG_MIN_RESULT: - if (g_test_verbose()) + if (test_tap_log) + g_print ("# min perf: %s\n", string1); + else if (g_test_verbose()) g_print ("(MINPERF:%s)\n", string1); break; case G_TEST_LOG_MAX_RESULT: - if (g_test_verbose()) + if (test_tap_log) + g_print ("# max perf: %s\n", string1); + else if (g_test_verbose()) g_print ("(MAXPERF:%s)\n", string1); break; case G_TEST_LOG_MESSAGE: - if (g_test_verbose()) + case G_TEST_LOG_ERROR: + if (test_tap_log) + g_print ("# %s\n", string1); + else if (g_test_verbose()) g_print ("(MSG: %s)\n", string1); break; default: ; @@ -235,7 +790,9 @@ g_test_log (GTestLogType lbit, switch (lbit) { case G_TEST_LOG_START_CASE: - if (g_test_verbose()) + if (test_tap_log) + ; + else if (g_test_verbose()) g_print ("GTest: run: %s\n", string1); else if (!g_test_quiet()) g_print ("%s: ", string1); @@ -254,6 +811,10 @@ parse_args (gint *argc_p, guint argc = *argc_p; gchar **argv = *argv_p; guint i, e; + + test_argv0 = argv[0]; + test_initial_cwd = g_get_current_dir (); + /* parse known args */ for (i = 1; i < argc; i++) { @@ -275,6 +836,11 @@ parse_args (gint *argc_p, test_debug_log = TRUE; argv[i] = NULL; } + else if (strcmp (argv[i], "--tap") == 0) + { + test_tap_log = TRUE; + argv[i] = NULL; + } else if (strcmp ("--GTestLogFD", argv[i]) == 0 || strncmp ("--GTestLogFD=", argv[i], 13) == 0) { gchar *equal = argv[i] + 12; @@ -291,14 +857,29 @@ parse_args (gint *argc_p, { gchar *equal = argv[i] + 16; if (*equal == '=') - test_skip_count = g_ascii_strtoull (equal + 1, NULL, 0); + test_startup_skip_count = g_ascii_strtoull (equal + 1, NULL, 0); else if (i + 1 < argc) { argv[i++] = NULL; - test_skip_count = g_ascii_strtoull (argv[i], NULL, 0); + test_startup_skip_count = g_ascii_strtoull (argv[i], NULL, 0); } argv[i] = NULL; } + else if (strcmp ("--GTestSubprocess", argv[i]) == 0) + { + test_in_subprocess = TRUE; + /* We typically expect these child processes to crash, and some + * tests spawn a *lot* of them. Avoid spamming system crash + * collection programs such as systemd-coredump and abrt. + */ +#ifdef HAVE_SYS_RESOURCE_H + { + struct rlimit limit = { 0, 0 }; + (void) setrlimit (RLIMIT_CORE, &limit); + } +#endif + argv[i] = NULL; + } else if (strcmp ("-p", argv[i]) == 0 || strncmp ("-p=", argv[i], 3) == 0) { gchar *equal = argv[i] + 2; @@ -311,6 +892,18 @@ parse_args (gint *argc_p, } argv[i] = NULL; } + else if (strcmp ("-s", argv[i]) == 0 || strncmp ("-s=", argv[i], 3) == 0) + { + gchar *equal = argv[i] + 2; + if (*equal == '=') + test_paths_skipped = g_slist_prepend (test_paths_skipped, equal + 1); + else if (i + 1 < argc) + { + argv[i++] = NULL; + test_paths_skipped = g_slist_prepend (test_paths_skipped, argv[i]); + } + argv[i] = NULL; + } else if (strcmp ("-m", argv[i]) == 0 || strncmp ("-m=", argv[i], 3) == 0) { gchar *equal = argv[i] + 2; @@ -333,6 +926,10 @@ parse_args (gint *argc_p, mutable_test_config_vars.test_quick = TRUE; mutable_test_config_vars.test_perf = FALSE; } + else if (strcmp (mode, "undefined") == 0) + mutable_test_config_vars.test_undefined = TRUE; + else if (strcmp (mode, "no-undefined") == 0) + mutable_test_config_vars.test_undefined = FALSE; else g_error ("unknown test mode: -m %s", mode); argv[i] = NULL; @@ -366,24 +963,25 @@ parse_args (gint *argc_p, } argv[i] = NULL; } - else if (strcmp ("-?", argv[i]) == 0 || strcmp ("--help", argv[i]) == 0) + else if (strcmp ("-?", argv[i]) == 0 || + strcmp ("-h", argv[i]) == 0 || + strcmp ("--help", argv[i]) == 0) { printf ("Usage:\n" " %s [OPTION...]\n\n" "Help Options:\n" - " -?, --help Show help options\n" + " -h, --help Show help options\n\n" "Test Options:\n" + " --g-fatal-warnings Make all warnings fatal\n" " -l List test cases available in a test executable\n" - " -seed=RANDOMSEED Provide a random seed to reproduce test\n" - " runs using random numbers\n" - " --verbose Run tests verbosely\n" - " -q, --quiet Run tests quietly\n" - " -p TESTPATH execute all tests matching TESTPATH\n" - " -m {perf|slow|thorough|quick} Execute tests according modes\n" + " -m {perf|slow|thorough|quick} Execute tests according to mode\n" + " -m {undefined|no-undefined} Execute tests according to mode\n" + " -p TESTPATH Only start test cases matching TESTPATH\n" + " -s TESTPATH Skip all tests matching TESTPATH\n" + " -seed=SEEDSTRING Start tests with random seed SEEDSTRING\n" " --debug-log debug test logging output\n" - " -k, --keep-going gtester-specific argument\n" - " --GTestLogFD=N gtester-specific argument\n" - " --GTestSkipCount=N gtester-specific argument\n", + " -q, --quiet Run tests quietly\n" + " --verbose Run tests verbosely\n", argv[0]); exit (0); } @@ -406,83 +1004,39 @@ parse_args (gint *argc_p, * Changed if any arguments were handled. * @argv: Address of the @argv parameter of main(). * Any parameters understood by g_test_init() stripped before return. - * @Varargs: Reserved for future extension. Currently, you must pass %NULL. + * @...: %NULL-terminated list of special options. Currently the only + * defined option is `"no_g_set_prgname"`, which + * will cause g_test_init() to not call g_set_prgname(). * * Initialize the GLib testing framework, e.g. by seeding the * test random number generator, the name for g_get_prgname() * and parsing test related command line args. + * * So far, the following arguments are understood: - * - * - * - * - * list test cases available in a test executable. - * - * - * - * - * - * provide a random seed to reproduce test runs using random numbers. - * - * - * - * - * run tests verbosely. - * - * - * , - * run tests quietly. - * - * - * - * - * execute all tests matching TESTPATH. - * - * - * - * - * - * execute tests according to these test modes: - * - * - * perf - * - * performance tests, may take long and report results. - * - * - * - * slow, thorough - * - * slow and thorough tests, may take quite long and - * maximize coverage. - * - * - * - * quick - * - * quick tests, should run really quickly and give good coverage. - * - * - * - * - * - * - * - * debug test logging output. - * - * - * , - * gtester-specific argument. - * - * - * - * gtester-specific argument. - * - * - * - * gtester-specific argument. - * - * + * + * - `-l`: List test cases available in a test executable. + * - `--seed=SEED`: Provide a random seed to reproduce test + * runs using random numbers. + * - `--verbose`: Run tests verbosely. + * - `-q`, `--quiet`: Run tests quietly. + * - `-p PATH`: Execute all tests matching the given path. + * This can also be used to force a test to run that would otherwise + * be skipped (ie, a test whose name contains "/subprocess"). + * - `-m {perf|slow|thorough|quick|undefined|no-undefined}`: Execute tests according to these test modes: + * + * `perf`: Performance tests, may take long and report results. + * + * `slow`, `thorough`: Slow and thorough tests, may take quite long and maximize coverage. + * + * `quick`: Quick tests, should run really quickly and give good coverage. + * + * `undefined`: Tests for undefined behaviour, may provoke programming errors + * under g_test_trap_subprocess() or g_test_expect_messages() to check + * that appropriate assertions or warnings are given + * + * `no-undefined`: Avoid tests for undefined behaviour + * + * - `--debug-log`: Debug test logging output. * * Since: 2.16 */ @@ -493,9 +1047,10 @@ g_test_init (int *argc, { static char seedstr[4 + 4 * 8 + 1]; va_list args; - gpointer vararg1; + gpointer option; /* make warnings and criticals fatal for all test programs */ GLogLevelFlags fatal_mask = (GLogLevelFlags) g_log_set_always_fatal ((GLogLevelFlags) G_LOG_FATAL_MASK); + fatal_mask = (GLogLevelFlags) (fatal_mask | G_LOG_LEVEL_WARNING | G_LOG_LEVEL_CRITICAL); g_log_set_always_fatal (fatal_mask); /* check caller args */ @@ -505,9 +1060,12 @@ g_test_init (int *argc, mutable_test_config_vars.test_initialized = TRUE; va_start (args, argv); - vararg1 = va_arg (args, gpointer); /* reserved for future extensions */ + while ((option = va_arg (args, char *))) + { + if (g_strcmp0 (option, "no_g_set_prgname") == 0) + no_g_set_prgname = TRUE; + } va_end (args); - g_return_if_fail (vararg1 == NULL); /* setup random seed string */ g_snprintf (seedstr, sizeof (seedstr), "R02S%08x%08x%08x%08x", g_random_int(), g_random_int(), g_random_int(), g_random_int()); @@ -515,7 +1073,8 @@ g_test_init (int *argc, /* parse args, sets up mode, changes seed, etc. */ parse_args (argc, argv); - if (!g_get_prgname()) + + if (!g_get_prgname() && !no_g_set_prgname) g_set_prgname ((*argv)[0]); /* verify GRand reliability, needed for reliable seeds */ @@ -535,6 +1094,25 @@ g_test_init (int *argc, /* report program start */ g_log_set_default_handler (gtest_default_log_handler, NULL); g_test_log (G_TEST_LOG_START_BINARY, g_get_prgname(), test_run_seedstr, 0, NULL); + + test_argv0_dirname = g_path_get_dirname (test_argv0); + + /* Make sure we get the real dirname that the test was run from */ + if (g_str_has_suffix (test_argv0_dirname, "/.libs")) + { + gchar *tmp; + tmp = g_path_get_dirname (test_argv0_dirname); + g_free (test_argv0_dirname); + test_argv0_dirname = tmp; + } + + test_disted_files_dir = g_getenv ("G_TEST_SRCDIR"); + if (!test_disted_files_dir) + test_disted_files_dir = test_argv0_dirname; + + test_built_files_dir = g_getenv ("G_TEST_BUILDDIR"); + if (!test_built_files_dir) + test_built_files_dir = test_argv0_dirname; } static void @@ -704,7 +1282,7 @@ g_test_timer_last (void) * g_test_minimized_result: * @minimized_quantity: the reported value * @format: the format string of the report message - * @Varargs: arguments to pass to the printf() function + * @...: arguments to pass to the printf() function * * Report the result of a performance or measurement test. * The test should generally strive to minimize the reported @@ -722,9 +1300,11 @@ g_test_minimized_result (double minimized_quantity, long double largs = minimized_quantity; gchar *buffer; va_list args; + va_start (args, format); buffer = g_strdup_vprintf (format, args); va_end (args); + g_test_log (G_TEST_LOG_MIN_RESULT, buffer, NULL, 1, &largs); g_free (buffer); } @@ -733,7 +1313,7 @@ g_test_minimized_result (double minimized_quantity, * g_test_maximized_result: * @maximized_quantity: the reported value * @format: the format string of the report message - * @Varargs: arguments to pass to the printf() function + * @...: arguments to pass to the printf() function * * Report the result of a performance or measurement test. * The test should generally strive to maximize the reported @@ -751,9 +1331,11 @@ g_test_maximized_result (double maximized_quantity, long double largs = maximized_quantity; gchar *buffer; va_list args; + va_start (args, format); buffer = g_strdup_vprintf (format, args); va_end (args); + g_test_log (G_TEST_LOG_MAX_RESULT, buffer, NULL, 1, &largs); g_free (buffer); } @@ -773,9 +1355,11 @@ g_test_message (const char *format, { gchar *buffer; va_list args; + va_start (args, format); buffer = g_strdup_vprintf (format, args); va_end (args); + g_test_log (G_TEST_LOG_MESSAGE, buffer, NULL, 0, NULL); g_free (buffer); } @@ -794,7 +1378,7 @@ g_test_message (const char *format, * case only. * Bug URIs are constructed by appending a bug specific URI * portion to @uri_pattern, or by replacing the special string - * '%s' within @uri_pattern if that is present. + * '\%s' within @uri_pattern if that is present. * * Since: 2.16 */ @@ -820,8 +1404,10 @@ void g_test_bug (const char *bug_uri_snippet) { char *c; + g_return_if_fail (test_uri_base != NULL); g_return_if_fail (bug_uri_snippet != NULL); + c = strstr (test_uri_base, "%s"); if (c) { @@ -853,6 +1439,7 @@ g_test_get_root (void) g_free (test_suite_root->name); test_suite_root->name = g_strdup (""); } + return test_suite_root; } @@ -861,20 +1448,55 @@ g_test_get_root (void) * * Runs all tests under the toplevel suite which can be retrieved * with g_test_get_root(). Similar to g_test_run_suite(), the test - * cases to be run are filtered according to - * test path arguments (-p testpath) as - * parsed by g_test_init(). - * g_test_run_suite() or g_test_run() may only be called once - * in a program. - * - * Returns: 0 on success + * cases to be run are filtered according to test path arguments + * (`-p testpath`) as parsed by g_test_init(). g_test_run_suite() + * or g_test_run() may only be called once in a program. + * + * In general, the tests and sub-suites within each suite are run in + * the order in which they are defined. However, note that prior to + * GLib 2.36, there was a bug in the `g_test_add_*` + * functions which caused them to create multiple suites with the same + * name, meaning that if you created tests "/foo/simple", + * "/bar/simple", and "/foo/using-bar" in that order, they would get + * run in that order (since g_test_run() would run the first "/foo" + * suite, then the "/bar" suite, then the second "/foo" suite). As of + * 2.36, this bug is fixed, and adding the tests in that order would + * result in a running order of "/foo/simple", "/foo/using-bar", + * "/bar/simple". If this new ordering is sub-optimal (because it puts + * more-complicated tests before simpler ones, making it harder to + * figure out exactly what has failed), you can fix it by changing the + * test paths to group tests by suite in a way that will result in the + * desired running order. Eg, "/simple/foo", "/simple/bar", + * "/complex/foo-using-bar". + * + * However, you should never make the actual result of a test depend + * on the order that tests are run in. If you need to ensure that some + * particular code runs before or after a given test case, use + * g_test_add(), which lets you specify setup and teardown functions. + * + * If all tests are skipped, this function will return 0 if + * producing TAP output, or 77 (treated as "skip test" by Automake) otherwise. + * + * Returns: 0 on success, 1 on failure (assuming it returns at all), + * 0 or 77 if all tests were skipped with g_test_skip() * * Since: 2.16 */ int g_test_run (void) { - return g_test_run_suite (g_test_get_root()); + if (g_test_run_suite (g_test_get_root()) != 0) + return 1; + + /* 77 is special to Automake's default driver, but not Automake's TAP driver + * or Perl's prove(1) TAP driver. */ + if (test_tap_log) + return 0; + + if (test_run_count > 0 && test_run_count == test_skipped_count) + return 77; + else + return 0; } /** @@ -889,10 +1511,10 @@ g_test_run (void) * Create a new #GTestCase, named @test_name, this API is fairly * low level, calling g_test_add() or g_test_add_func() is preferable. * When this test is executed, a fixture structure of size @data_size - * will be allocated and filled with 0s. Then data_setup() is called + * will be allocated and filled with 0s. Then @data_setup is called * to initialize the fixture. After fixture setup, the actual test - * function data_test() is called. Once the test run completed, the - * fixture structure is torn down by calling data_teardown() and + * function @data_test is called. Once the test run completed, the + * fixture structure is torn down by calling @data_teardown and * after that the memory is released. * * Splitting up a test run into fixture setup, test function and @@ -914,10 +1536,12 @@ g_test_create_case (const char *test_name, GTestFixtureFunc data_teardown) { GTestCase *tc; + g_return_val_if_fail (test_name != NULL, NULL); g_return_val_if_fail (strchr (test_name, '/') == NULL, NULL); g_return_val_if_fail (test_name[0] != 0, NULL); g_return_val_if_fail (data_test != NULL, NULL); + tc = g_slice_new0 (GTestCase); tc->name = g_strdup (test_name); tc->test_data = (gpointer) test_data; @@ -925,9 +1549,19 @@ g_test_create_case (const char *test_name, tc->fixture_setup = (void*) data_setup; tc->fixture_test = (void*) data_test; tc->fixture_teardown = (void*) data_teardown; + return tc; } +static gint +find_suite (gconstpointer l, gconstpointer s) +{ + const GTestSuite *suite = l; + const gchar *str = s; + + return strcmp (suite->name, str); +} + /** * GTestFixtureFunc: * @fixture: the test fixture @@ -943,7 +1577,9 @@ g_test_create_case (const char *test_name, * @fixture will be a pointer to the area of memory allocated by the * test framework, of the size requested. If the requested size was * zero then @fixture will be equal to @user_data. - **/ + * + * Since: 2.28 + */ void g_test_add_vtable (const char *testpath, gsize data_size, @@ -957,9 +1593,12 @@ g_test_add_vtable (const char *testpath, GTestSuite *suite; g_return_if_fail (testpath != NULL); - g_return_if_fail (testpath[0] == '/'); + g_return_if_fail (g_path_is_absolute (testpath)); g_return_if_fail (fixture_test_func != NULL); + if (g_slist_find_custom (test_paths_skipped, testpath, (GCompareFunc)g_strcmp0)) + return; + suite = g_test_get_root(); segments = g_strsplit (testpath, "/", -1); for (ui = 0; segments[ui] != NULL; ui++) @@ -972,8 +1611,18 @@ g_test_add_vtable (const char *testpath, continue; /* initial or duplicate slash */ else if (!islast) { - GTestSuite *csuite = g_test_create_suite (seg); - g_test_suite_add_suite (suite, csuite); + GSList *l; + GTestSuite *csuite; + l = g_slist_find_custom (suite->suites, seg, find_suite); + if (l) + { + csuite = l->data; + } + else + { + csuite = g_test_create_suite (seg); + g_test_suite_add_suite (suite, csuite); + } suite = csuite; } else /* islast */ @@ -986,13 +1635,141 @@ g_test_add_vtable (const char *testpath, } /** + * g_test_fail: + * + * Indicates that a test failed. This function can be called + * multiple times from the same test. You can use this function + * if your test failed in a recoverable way. + * + * Do not use this function if the failure of a test could cause + * other tests to malfunction. + * + * Calling this function will not stop the test from running, you + * need to return from the test function yourself. So you can + * produce additional diagnostic messages or even continue running + * the test. + * + * If not called from inside a test, this function does nothing. + * + * Since: 2.30 + **/ +void +g_test_fail (void) +{ + test_run_success = G_TEST_RUN_FAILURE; +} + +/** + * g_test_incomplete: + * @msg: (allow-none): explanation + * + * Indicates that a test failed because of some incomplete + * functionality. This function can be called multiple times + * from the same test. + * + * Calling this function will not stop the test from running, you + * need to return from the test function yourself. So you can + * produce additional diagnostic messages or even continue running + * the test. + * + * If not called from inside a test, this function does nothing. + * + * Since: 2.38 + */ +void +g_test_incomplete (const gchar *msg) +{ + test_run_success = G_TEST_RUN_INCOMPLETE; + g_free (test_run_msg); + test_run_msg = g_strdup (msg); +} + +/** + * g_test_skip: + * @msg: (allow-none): explanation + * + * Indicates that a test was skipped. + * + * Calling this function will not stop the test from running, you + * need to return from the test function yourself. So you can + * produce additional diagnostic messages or even continue running + * the test. + * + * If not called from inside a test, this function does nothing. + * + * Since: 2.38 + */ +void +g_test_skip (const gchar *msg) +{ + test_run_success = G_TEST_RUN_SKIPPED; + g_free (test_run_msg); + test_run_msg = g_strdup (msg); +} + +/** + * g_test_failed: + * + * Returns whether a test has already failed. This will + * be the case when g_test_fail(), g_test_incomplete() + * or g_test_skip() have been called, but also if an + * assertion has failed. + * + * This can be useful to return early from a test if + * continuing after a failed assertion might be harmful. + * + * The return value of this function is only meaningful + * if it is called from inside a test function. + * + * Returns: %TRUE if the test has failed + * + * Since: 2.38 + */ +gboolean +g_test_failed (void) +{ + return test_run_success != G_TEST_RUN_SUCCESS; +} + +/** + * g_test_set_nonfatal_assertions: + * + * Changes the behaviour of g_assert_cmpstr(), g_assert_cmpint(), + * g_assert_cmpuint(), g_assert_cmphex(), g_assert_cmpfloat(), + * g_assert_true(), g_assert_false(), g_assert_null(), g_assert_no_error(), + * g_assert_error(), g_test_assert_expected_messages() and the various + * g_test_trap_assert_*() macros to not abort to program, but instead + * call g_test_fail() and continue. (This also changes the behavior of + * g_test_fail() so that it will not cause the test program to abort + * after completing the failed test.) + * + * Note that the g_assert_not_reached() and g_assert() are not + * affected by this. + * + * This function can only be called after g_test_init(). + * + * Since: 2.38 + */ +void +g_test_set_nonfatal_assertions (void) +{ + if (!g_test_config_vars->test_initialized) + g_error ("g_test_set_nonfatal_assertions called without g_test_init"); + test_nonfatal_assertions = TRUE; + test_mode_fatal = FALSE; +} + +/** * GTestFunc: * * The type used for test case functions. - **/ + * + * Since: 2.28 + */ + /** * g_test_add_func: - * @testpath: Slash-separated test case path name for the test. + * @testpath: /-separated test case path name for the test. * @test_func: The test function to invoke for this test. * * Create a new test case, similar to g_test_create_case(). However @@ -1000,6 +1777,10 @@ g_test_add_vtable (const char *testpath, * created on the fly and added to the root fixture, based on the * slash-separated portions of @testpath. * + * If @testpath includes the component "subprocess" anywhere in it, + * the test will be skipped by default, and only run if explicitly + * required via the `-p` command-line option or g_test_trap_subprocess(). + * * Since: 2.16 */ void @@ -1018,10 +1799,13 @@ g_test_add_func (const char *testpath, * * The type used for test case functions that take an extra pointer * argument. - **/ + * + * Since: 2.28 + */ + /** * g_test_add_data_func: - * @testpath: Slash-separated test case path name for the test. + * @testpath: /-separated test case path name for the test. * @test_data: Test data argument for the test function. * @test_func: The test function to invoke for this test. * @@ -1031,6 +1815,10 @@ g_test_add_func (const char *testpath, * slash-separated portions of @testpath. The @test_data argument * will be passed as first argument to @test_func. * + * If @testpath includes the component "subprocess" anywhere in it, + * the test will be skipped by default, and only run if explicitly + * required via the `-p` command-line option or g_test_trap_subprocess(). + * * Since: 2.16 */ void @@ -1041,10 +1829,73 @@ g_test_add_data_func (const char *testpath, g_return_if_fail (testpath != NULL); g_return_if_fail (testpath[0] == '/'); g_return_if_fail (test_func != NULL); + g_test_add_vtable (testpath, 0, test_data, NULL, (GTestFixtureFunc) test_func, NULL); } /** + * g_test_add_data_func_full: + * @testpath: /-separated test case path name for the test. + * @test_data: Test data argument for the test function. + * @test_func: The test function to invoke for this test. + * @data_free_func: #GDestroyNotify for @test_data. + * + * Create a new test case, as with g_test_add_data_func(), but freeing + * @test_data after the test run is complete. + * + * Since: 2.34 + */ +void +g_test_add_data_func_full (const char *testpath, + gpointer test_data, + GTestDataFunc test_func, + GDestroyNotify data_free_func) +{ + g_return_if_fail (testpath != NULL); + g_return_if_fail (testpath[0] == '/'); + g_return_if_fail (test_func != NULL); + + g_test_add_vtable (testpath, 0, test_data, NULL, + (GTestFixtureFunc) test_func, + (GTestFixtureFunc) data_free_func); +} + +static gboolean +g_test_suite_case_exists (GTestSuite *suite, + const char *test_path) +{ + GSList *iter; + char *slash; + GTestCase *tc; + + test_path++; + slash = strchr (test_path, '/'); + + if (slash) + { + for (iter = suite->suites; iter; iter = iter->next) + { + GTestSuite *child_suite = iter->data; + + if (!strncmp (child_suite->name, test_path, slash - test_path)) + if (g_test_suite_case_exists (child_suite, slash)) + return TRUE; + } + } + else + { + for (iter = suite->cases; iter; iter = iter->next) + { + tc = iter->data; + if (!strcmp (tc->name, test_path)) + return TRUE; + } + } + + return FALSE; +} + +/** * g_test_create_suite: * @suite_name: a name for the suite * @@ -1081,6 +1932,7 @@ g_test_suite_add (GTestSuite *suite, { g_return_if_fail (suite != NULL); g_return_if_fail (test_case != NULL); + suite->cases = g_slist_prepend (suite->cases, test_case); } @@ -1099,6 +1951,7 @@ g_test_suite_add_suite (GTestSuite *suite, { g_return_if_fail (suite != NULL); g_return_if_fail (nestedsuite != NULL); + suite->suites = g_slist_prepend (suite->suites, nestedsuite); } @@ -1124,7 +1977,7 @@ g_test_queue_free (gpointer gfree_pointer) * @destroy_func: Destroy callback for teardown phase. * @destroy_data: Destroy callback data. * - * This function enqueus a callback @destroy_func() to be executed + * This function enqueus a callback @destroy_func to be executed * during the next test case teardown phase. This is most useful * to auto destruct allocted test resources at the end of a test run. * Resources are released in reverse queue order, that means enqueueing @@ -1138,7 +1991,9 @@ g_test_queue_destroy (GDestroyNotify destroy_func, gpointer destroy_data) { DestroyEntry *dentry; + g_return_if_fail (destroy_func != NULL); + dentry = g_slice_new0 (DestroyEntry); dentry->destroy_func = destroy_func; dentry->destroy_data = destroy_data; @@ -1146,12 +2001,40 @@ g_test_queue_destroy (GDestroyNotify destroy_func, test_destroy_queue = dentry; } -static gboolean -test_case_run (GTestCase *tc) -{ - gchar *old_name = test_run_name, *old_base = g_strdup (test_uri_base); - test_run_name = g_strconcat (old_name, "/", tc->name, NULL); - if (++test_run_count <= test_skip_count) +static gboolean +test_case_run (GTestCase *tc) +{ + gchar *old_name = test_run_name, *old_base = g_strdup (test_uri_base); + GSList **old_free_list, *filename_free_list = NULL; + gboolean success = G_TEST_RUN_SUCCESS; + + old_free_list = test_filename_free_list; + test_filename_free_list = &filename_free_list; + + test_run_name = g_strconcat (old_name, "/", tc->name, NULL); + if (strstr (test_run_name, "/subprocess")) + { + GSList *iter; + gboolean found = FALSE; + + for (iter = test_paths; iter; iter = iter->next) + { + if (!strcmp (test_run_name, iter->data)) + { + found = TRUE; + break; + } + } + + if (!found) + { + if (g_test_verbose ()) + g_print ("GTest: skipping: %s\n", test_run_name); + goto out; + } + } + + if (++test_run_count <= test_startup_skip_count) g_test_log (G_TEST_LOG_SKIP_CASE, test_run_name, NULL, 0, NULL); else if (test_run_list) { @@ -1165,6 +2048,8 @@ test_case_run (GTestCase *tc) void *fixture; g_test_log (G_TEST_LOG_START_CASE, test_run_name, NULL, 0, NULL); test_run_forks = 0; + test_run_success = G_TEST_RUN_SUCCESS; + g_clear_pointer (&test_run_msg, g_free); g_test_log_set_fatal_handler (NULL, NULL); g_timer_start (test_run_timer); fixture = tc->fixture_size ? g_malloc0 (tc->fixture_size) : tc->test_data; @@ -1185,27 +2070,40 @@ test_case_run (GTestCase *tc) if (tc->fixture_size) g_free (fixture); g_timer_stop (test_run_timer); - largs[0] = 0; /* OK */ + success = test_run_success; + test_run_success = G_TEST_RUN_FAILURE; + largs[0] = success; /* OK */ largs[1] = test_run_forks; largs[2] = g_timer_elapsed (test_run_timer, NULL); - g_test_log (G_TEST_LOG_STOP_CASE, NULL, NULL, G_N_ELEMENTS (largs), largs); + g_test_log (G_TEST_LOG_STOP_CASE, test_run_name, test_run_msg, G_N_ELEMENTS (largs), largs); + g_clear_pointer (&test_run_msg, g_free); g_timer_destroy (test_run_timer); } + + out: + g_slist_free_full (filename_free_list, g_free); + test_filename_free_list = old_free_list; g_free (test_run_name); test_run_name = old_name; g_free (test_uri_base); test_uri_base = old_base; - return TRUE; + + return (success == G_TEST_RUN_SUCCESS || + success == G_TEST_RUN_SKIPPED); } static int g_test_run_suite_internal (GTestSuite *suite, const char *path) { - guint n_bad = 0, bad_suite = 0, l; + guint n_bad = 0, l; gchar *rest, *old_name = test_run_name; GSList *slist, *reversed; + g_return_val_if_fail (suite != NULL, -1); + + g_test_log (G_TEST_LOG_START_SUITE, suite->name, NULL, 0, NULL); + while (path[0] == '/') path++; l = strlen (path); @@ -1217,7 +2115,7 @@ g_test_run_suite_internal (GTestSuite *suite, { GTestCase *tc = slist->data; guint n = l ? strlen (tc->name) : 0; - if (l == n && strncmp (path, tc->name, n) == 0) + if (l == n && !rest && strncmp (path, tc->name, n) == 0) { if (!test_case_run (tc)) n_bad++; @@ -1230,12 +2128,15 @@ g_test_run_suite_internal (GTestSuite *suite, GTestSuite *ts = slist->data; guint n = l ? strlen (ts->name) : 0; if (l == n && strncmp (path, ts->name, n) == 0) - bad_suite += g_test_run_suite_internal (ts, rest ? rest : "") != 0; + n_bad += g_test_run_suite_internal (ts, rest ? rest : ""); } g_slist_free (reversed); g_free (test_run_name); test_run_name = old_name; - return n_bad || bad_suite; + + g_test_log (G_TEST_LOG_STOP_SUITE, suite->name, NULL, 0, NULL); + + return n_bad; } /** @@ -1244,8 +2145,10 @@ g_test_run_suite_internal (GTestSuite *suite, * * Execute the tests within @suite and all nested #GTestSuites. * The test suites to be executed are filtered according to - * test path arguments (-p testpath) - * as parsed by g_test_init(). + * test path arguments (`-p testpath`) as parsed by g_test_init(). + * See the g_test_run() documentation for more information on the + * order that tests are run in. + * * g_test_run_suite() or g_test_run() may only be called once * in a program. * @@ -1256,22 +2159,29 @@ g_test_run_suite_internal (GTestSuite *suite, int g_test_run_suite (GTestSuite *suite) { + GSList *my_test_paths; guint n_bad = 0; + g_return_val_if_fail (g_test_config_vars->test_initialized, -1); g_return_val_if_fail (g_test_run_once == TRUE, -1); + g_test_run_once = FALSE; - if (!test_paths) - test_paths = g_slist_prepend (test_paths, ""); - while (test_paths) + + if (test_paths) + my_test_paths = g_slist_copy (test_paths); + else + my_test_paths = g_slist_prepend (NULL, ""); + + while (my_test_paths) { - const char *rest, *path = test_paths->data; + const char *rest, *path = my_test_paths->data; guint l, n = strlen (suite->name); - test_paths = g_slist_delete_link (test_paths, test_paths); + my_test_paths = g_slist_delete_link (my_test_paths, my_test_paths); while (path[0] == '/') path++; if (!n) /* root suite, run unconditionally */ { - n_bad += 0 != g_test_run_suite_internal (suite, path); + n_bad += g_test_run_suite_internal (suite, path); continue; } /* regular suite, match path */ @@ -1279,8 +2189,9 @@ g_test_run_suite (GTestSuite *suite) l = strlen (path); l = rest ? MIN (l, rest - path) : l; if ((!l || l == n) && strncmp (path, suite->name, n) == 0) - n_bad += 0 != g_test_run_suite_internal (suite, rest ? rest : ""); + n_bad += g_test_run_suite_internal (suite, rest ? rest : ""); } + return n_bad; } @@ -1294,6 +2205,7 @@ gtest_default_log_handler (const gchar *log_domain, gboolean fatal = FALSE; gchar *msg; guint i = 0; + if (log_domain) { strv[i++] = log_domain; @@ -1321,9 +2233,11 @@ gtest_default_log_handler (const gchar *log_domain, strv[i++] = ": "; strv[i++] = message; strv[i++] = NULL; + msg = g_strjoinv ("", (gchar**) strv); g_test_log (fatal ? G_TEST_LOG_ERROR : G_TEST_LOG_MESSAGE, msg, NULL, 0, NULL); g_log_default_handler (log_domain, log_level, message, unused_data); + g_free (msg); } @@ -1336,6 +2250,7 @@ g_assertion_message (const char *domain, { char lstr[32]; char *s; + if (!message) message = "code should not be reached"; g_snprintf (lstr, 32, "%d", line); @@ -1345,17 +2260,35 @@ g_assertion_message (const char *domain, " ", message, NULL); g_printerr ("**\n%s\n", s); + g_test_log (G_TEST_LOG_ERROR, s, NULL, 0, NULL); + + if (test_nonfatal_assertions) + { + g_free (s); + g_test_fail (); + return; + } + /* store assertion message in global variable, so that it can be found in a * core dump */ if (__glib_assert_msg != NULL) - /* free the old one */ - free (__glib_assert_msg); + /* free the old one */ + free (__glib_assert_msg); __glib_assert_msg = (char*) malloc (strlen (s) + 1); strcpy (__glib_assert_msg, s); - g_test_log (G_TEST_LOG_ERROR, s, NULL, 0, NULL); g_free (s); - abort(); + + if (test_in_subprocess) + { + /* If this is a test case subprocess then it probably hit this + * assertion on purpose, so just exit() rather than abort()ing, + * to avoid triggering any system crash-reporting daemon. + */ + _exit (1); + } + else + abort (); } void @@ -1365,9 +2298,22 @@ g_assertion_message_expr (const char *domain, const char *func, const char *expr) { - char *s = g_strconcat ("assertion failed: (", expr, ")", NULL); + char *s; + if (!expr) + s = g_strdup ("code should not be reached"); + else + s = g_strconcat ("assertion failed: (", expr, ")", NULL); g_assertion_message (domain, file, line, func, s); g_free (s); + + /* Normally g_assertion_message() won't return, but we need this for + * when test_nonfatal_assertions is set, since + * g_assertion_message_expr() is used for always-fatal assertions. + */ + if (test_in_subprocess) + _exit (1); + else + abort (); } void @@ -1382,11 +2328,12 @@ g_assertion_message_cmpnum (const char *domain, char numtype) { char *s = NULL; + switch (numtype) { - case 'i': s = g_strdup_printf ("assertion failed (%s): (%.0Lf %s %.0Lf)", expr, arg1, cmp, arg2); break; + case 'i': s = g_strdup_printf ("assertion failed (%s): (%" G_GINT64_MODIFIER "i %s %" G_GINT64_MODIFIER "i)", expr, (gint64) arg1, cmp, (gint64) arg2); break; case 'x': s = g_strdup_printf ("assertion failed (%s): (0x%08" G_GINT64_MODIFIER "x %s 0x%08" G_GINT64_MODIFIER "x)", expr, (guint64) arg1, cmp, (guint64) arg2); break; - case 'f': s = g_strdup_printf ("assertion failed (%s): (%.9Lg %s %.9Lg)", expr, arg1, cmp, arg2); break; + case 'f': s = g_strdup_printf ("assertion failed (%s): (%.9g %s %.9g)", expr, (double) arg1, cmp, (double) arg2); break; /* ideally use: floats=%.7g double=%.17g */ } g_assertion_message (domain, file, line, func, s); @@ -1451,14 +2398,14 @@ g_assertion_message_error (const char *domain, /** * g_strcmp0: - * @str1: a C string or %NULL - * @str2: another C string or %NULL + * @str1: (allow-none): a C string or %NULL + * @str2: (allow-none): another C string or %NULL * - * Compares @str1 and @str2 like strcmp(). Handles %NULL + * Compares @str1 and @str2 like strcmp(). Handles %NULL * gracefully by sorting it before non-%NULL strings. * Comparing two %NULL pointers returns 0. * - * Returns: -1, 0 or 1, if @str1 is <, == or > than @str2. + * Returns: an integer less than, equal to, or greater than zero, if @str1 is <, == or > than @str2. * * Since: 2.16 */ @@ -1473,103 +2420,14 @@ g_strcmp0 (const char *str1, return strcmp (str1, str2); } -#ifdef G_OS_UNIX -static int /* 0 on success */ -kill_child (int pid, - int *status, - int patience) -{ - int wr; - if (patience >= 3) /* try graceful reap */ - { - if (waitpid (pid, status, WNOHANG) > 0) - return 0; - } - if (patience >= 2) /* try SIGHUP */ - { - kill (pid, SIGHUP); - if (waitpid (pid, status, WNOHANG) > 0) - return 0; - g_usleep (20 * 1000); /* give it some scheduling/shutdown time */ - if (waitpid (pid, status, WNOHANG) > 0) - return 0; - g_usleep (50 * 1000); /* give it some scheduling/shutdown time */ - if (waitpid (pid, status, WNOHANG) > 0) - return 0; - g_usleep (100 * 1000); /* give it some scheduling/shutdown time */ - if (waitpid (pid, status, WNOHANG) > 0) - return 0; - } - if (patience >= 1) /* try SIGTERM */ - { - kill (pid, SIGTERM); - if (waitpid (pid, status, WNOHANG) > 0) - return 0; - g_usleep (200 * 1000); /* give it some scheduling/shutdown time */ - if (waitpid (pid, status, WNOHANG) > 0) - return 0; - g_usleep (400 * 1000); /* give it some scheduling/shutdown time */ - if (waitpid (pid, status, WNOHANG) > 0) - return 0; - } - /* finish it off */ - kill (pid, SIGKILL); - do - wr = waitpid (pid, status, 0); - while (wr < 0 && errno == EINTR); - return wr; -} -#endif - -static inline int -g_string_must_read (GString *gstring, - int fd) -{ -#define STRING_BUFFER_SIZE 4096 - char buf[STRING_BUFFER_SIZE]; - gssize bytes; - again: - bytes = read (fd, buf, sizeof (buf)); - if (bytes == 0) - return 0; /* EOF, calling this function assumes data is available */ - else if (bytes > 0) - { - g_string_append_len (gstring, buf, bytes); - return 1; - } - else if (bytes < 0 && errno == EINTR) - goto again; - else /* bytes < 0 */ - { - g_warning ("failed to read() from child process (%d): %s", test_trap_last_pid, g_strerror (errno)); - return 1; /* ignore error after warning */ - } -} - -static inline void -g_string_write_out (GString *gstring, - int outfd, - int *stringpos) -{ - if (*stringpos < gstring->len) - { - int r; - do - r = write (outfd, gstring->str + *stringpos, gstring->len - *stringpos); - while (r < 0 && errno == EINTR); - *stringpos += MAX (r, 0); - } -} - static void test_trap_clear (void) { test_trap_last_status = 0; test_trap_last_pid = 0; - g_free (test_trap_last_stdout); - test_trap_last_stdout = NULL; - g_free (test_trap_last_stderr); - test_trap_last_stderr = NULL; + g_clear_pointer (&test_trap_last_subprocess, g_free); + g_clear_pointer (&test_trap_last_stdout, g_free); + g_clear_pointer (&test_trap_last_stderr, g_free); } #ifdef G_OS_UNIX @@ -1585,27 +2443,191 @@ sane_dup2 (int fd1, return ret; } -static guint64 -test_time_stamp (void) +#endif + +typedef struct { + GPid pid; + GMainLoop *loop; + int child_status; + + GIOChannel *stdout_io; + gboolean echo_stdout; + GString *stdout_str; + + GIOChannel *stderr_io; + gboolean echo_stderr; + GString *stderr_str; +} WaitForChildData; + +static void +check_complete (WaitForChildData *data) +{ + if (data->child_status != -1 && data->stdout_io == NULL && data->stderr_io == NULL) + g_main_loop_quit (data->loop); +} + +static void +child_exited (GPid pid, + gint status, + gpointer user_data) { - GTimeVal tv; - guint64 stamp; - g_get_current_time (&tv); - stamp = tv.tv_sec; - stamp = stamp * 1000000 + tv.tv_usec; - return stamp; + WaitForChildData *data = user_data; + +#ifdef G_OS_UNIX + if (WIFEXITED (status)) /* normal exit */ + data->child_status = WEXITSTATUS (status); /* 0..255 */ + else if (WIFSIGNALED (status) && WTERMSIG (status) == SIGALRM) + data->child_status = G_TEST_STATUS_TIMED_OUT; + else if (WIFSIGNALED (status)) + data->child_status = (WTERMSIG (status) << 12); /* signalled */ + else /* WCOREDUMP (status) */ + data->child_status = 512; /* coredump */ +#else + data->child_status = status; +#endif + + check_complete (data); } +static gboolean +child_timeout (gpointer user_data) +{ + WaitForChildData *data = user_data; + +#ifdef G_OS_WIN32 + TerminateProcess (data->pid, G_TEST_STATUS_TIMED_OUT); +#else + kill (data->pid, SIGALRM); #endif + return FALSE; +} + +static gboolean +child_read (GIOChannel *io, GIOCondition cond, gpointer user_data) +{ + WaitForChildData *data = user_data; + GIOStatus status; + gsize nread, nwrote, total; + gchar buf[4096]; + FILE *echo_file = NULL; + + status = g_io_channel_read_chars (io, buf, sizeof (buf), &nread, NULL); + if (status == G_IO_STATUS_ERROR || status == G_IO_STATUS_EOF) + { + // FIXME data->error = (status == G_IO_STATUS_ERROR); + if (io == data->stdout_io) + g_clear_pointer (&data->stdout_io, g_io_channel_unref); + else + g_clear_pointer (&data->stderr_io, g_io_channel_unref); + + check_complete (data); + return FALSE; + } + else if (status == G_IO_STATUS_AGAIN) + return TRUE; + + if (io == data->stdout_io) + { + g_string_append_len (data->stdout_str, buf, nread); + if (data->echo_stdout) + echo_file = stdout; + } + else + { + g_string_append_len (data->stderr_str, buf, nread); + if (data->echo_stderr) + echo_file = stderr; + } + + if (echo_file) + { + for (total = 0; total < nread; total += nwrote) + { + nwrote = fwrite (buf + total, 1, nread - total, echo_file); + if (nwrote == 0) + g_error ("write failed: %s", g_strerror (errno)); + } + } + + return TRUE; +} + +static void +wait_for_child (GPid pid, + int stdout_fd, gboolean echo_stdout, + int stderr_fd, gboolean echo_stderr, + guint64 timeout) +{ + WaitForChildData data; + GMainContext *context; + GSource *source; + + data.pid = pid; + data.child_status = -1; + + context = g_main_context_new (); + data.loop = g_main_loop_new (context, FALSE); + + source = g_child_watch_source_new (pid); + g_source_set_callback (source, (GSourceFunc) child_exited, &data, NULL); + g_source_attach (source, context); + g_source_unref (source); + + data.echo_stdout = echo_stdout; + data.stdout_str = g_string_new (NULL); + data.stdout_io = g_io_channel_unix_new (stdout_fd); + g_io_channel_set_close_on_unref (data.stdout_io, TRUE); + g_io_channel_set_encoding (data.stdout_io, NULL, NULL); + g_io_channel_set_buffered (data.stdout_io, FALSE); + source = g_io_create_watch (data.stdout_io, G_IO_IN | G_IO_ERR | G_IO_HUP); + g_source_set_callback (source, (GSourceFunc) child_read, &data, NULL); + g_source_attach (source, context); + g_source_unref (source); + + data.echo_stderr = echo_stderr; + data.stderr_str = g_string_new (NULL); + data.stderr_io = g_io_channel_unix_new (stderr_fd); + g_io_channel_set_close_on_unref (data.stderr_io, TRUE); + g_io_channel_set_encoding (data.stderr_io, NULL, NULL); + g_io_channel_set_buffered (data.stderr_io, FALSE); + source = g_io_create_watch (data.stderr_io, G_IO_IN | G_IO_ERR | G_IO_HUP); + g_source_set_callback (source, (GSourceFunc) child_read, &data, NULL); + g_source_attach (source, context); + g_source_unref (source); + + if (timeout) + { + source = g_timeout_source_new (0); + g_source_set_ready_time (source, g_get_monotonic_time () + timeout); + g_source_set_callback (source, (GSourceFunc) child_timeout, &data, NULL); + g_source_attach (source, context); + g_source_unref (source); + } + + g_main_loop_run (data.loop); + g_main_loop_unref (data.loop); + g_main_context_unref (context); + + test_trap_last_pid = pid; + test_trap_last_status = data.child_status; + test_trap_last_stdout = g_string_free (data.stdout_str, FALSE); + test_trap_last_stderr = g_string_free (data.stderr_str, FALSE); + + g_clear_pointer (&data.stdout_io, g_io_channel_unref); + g_clear_pointer (&data.stderr_io, g_io_channel_unref); +} + /** * g_test_trap_fork: * @usec_timeout: Timeout for the forked test in micro seconds. * @test_trap_flags: Flags to modify forking behaviour. * * Fork the current test program to execute a test case that might - * not return or that might abort. The forked test case is aborted - * and considered failing if its run time exceeds @usec_timeout. + * not return or that might abort. + * + * If @usec_timeout is non-0, the forked test case is aborted and + * considered failing if its run time exceeds it. * * The forking behavior can be configured with the #GTestTrapFlags flags. * @@ -1614,7 +2636,7 @@ test_time_stamp (void) * The forking parent process then asserts successful child program * termination and validates child program outputs. * - * |[ + * |[ * static void * test_fork_patterns (void) * { @@ -1622,33 +2644,33 @@ test_time_stamp (void) * { * g_print ("some stdout text: somagic17\n"); * g_printerr ("some stderr text: semagic43\n"); - * exit (0); /* successful test run */ + * exit (0); // successful test run * } - * g_test_trap_assert_passed(); + * g_test_trap_assert_passed (); * g_test_trap_assert_stdout ("*somagic17*"); * g_test_trap_assert_stderr ("*semagic43*"); * } * ]| * - * This function is implemented only on Unix platforms. - * * Returns: %TRUE for the forked child and %FALSE for the executing parent process. * * Since: 2.16 + * + * Deprecated: This function is implemented only on Unix platforms, + * and is not always reliable due to problems inherent in + * fork-without-exec. Use g_test_trap_subprocess() instead. */ gboolean g_test_trap_fork (guint64 usec_timeout, GTestTrapFlags test_trap_flags) { #ifdef G_OS_UNIX - gboolean pass_on_forked_log = FALSE; int stdout_pipe[2] = { -1, -1 }; int stderr_pipe[2] = { -1, -1 }; - int stdtst_pipe[2] = { -1, -1 }; + test_trap_clear(); - if (pipe (stdout_pipe) < 0 || pipe (stderr_pipe) < 0 || pipe (stdtst_pipe) < 0) + if (pipe (stdout_pipe) < 0 || pipe (stderr_pipe) < 0) g_error ("failed to create pipes to fork test program: %s", g_strerror (errno)); - signal (SIGCHLD, SIG_DFL); test_trap_last_pid = fork (); if (test_trap_last_pid < 0) g_error ("failed to fork test program: %s", g_strerror (errno)); @@ -1657,9 +2679,12 @@ g_test_trap_fork (guint64 usec_timeout, int fd0 = -1; close (stdout_pipe[0]); close (stderr_pipe[0]); - close (stdtst_pipe[0]); if (!(test_trap_flags & G_TEST_TRAP_INHERIT_STDIN)) - fd0 = open ("/dev/null", O_RDONLY); + { + fd0 = g_open ("/dev/null", O_RDONLY, 0); + if (fd0 < 0) + g_error ("failed to open /dev/null for stdin redirection"); + } if (sane_dup2 (stdout_pipe[1], 1) < 0 || sane_dup2 (stderr_pipe[1], 2) < 0 || (fd0 >= 0 && sane_dup2 (fd0, 0) < 0)) g_error ("failed to dup2() in forked test program: %s", g_strerror (errno)); if (fd0 >= 3) @@ -1668,106 +2693,18 @@ g_test_trap_fork (guint64 usec_timeout, close (stdout_pipe[1]); if (stderr_pipe[1] >= 3) close (stderr_pipe[1]); - test_log_fd = stdtst_pipe[1]; return TRUE; } else /* parent */ { - GString *sout = g_string_new (NULL); - GString *serr = g_string_new (NULL); - guint64 sstamp; - int soutpos = 0, serrpos = 0, wr, need_wait = TRUE; test_run_forks++; close (stdout_pipe[1]); close (stderr_pipe[1]); - close (stdtst_pipe[1]); - sstamp = test_time_stamp(); - /* read data until we get EOF on all pipes */ - while (stdout_pipe[0] >= 0 || stderr_pipe[0] >= 0 || stdtst_pipe[0] > 0) - { - fd_set fds; - struct timeval tv; - int ret; - FD_ZERO (&fds); - if (stdout_pipe[0] >= 0) - FD_SET (stdout_pipe[0], &fds); - if (stderr_pipe[0] >= 0) - FD_SET (stderr_pipe[0], &fds); - if (stdtst_pipe[0] >= 0) - FD_SET (stdtst_pipe[0], &fds); - tv.tv_sec = 0; - tv.tv_usec = MIN (usec_timeout ? usec_timeout : 1000000, 100 * 1000); /* sleep at most 0.5 seconds to catch clock skews, etc. */ - ret = select (MAX (MAX (stdout_pipe[0], stderr_pipe[0]), stdtst_pipe[0]) + 1, &fds, NULL, NULL, &tv); - if (ret < 0 && errno != EINTR) - { - g_warning ("Unexpected error in select() while reading from child process (%d): %s", test_trap_last_pid, g_strerror (errno)); - break; - } - if (stdout_pipe[0] >= 0 && FD_ISSET (stdout_pipe[0], &fds) && - g_string_must_read (sout, stdout_pipe[0]) == 0) - { - close (stdout_pipe[0]); - stdout_pipe[0] = -1; - } - if (stderr_pipe[0] >= 0 && FD_ISSET (stderr_pipe[0], &fds) && - g_string_must_read (serr, stderr_pipe[0]) == 0) - { - close (stderr_pipe[0]); - stderr_pipe[0] = -1; - } - if (stdtst_pipe[0] >= 0 && FD_ISSET (stdtst_pipe[0], &fds)) - { - guint8 buffer[4096]; - gint l, r = read (stdtst_pipe[0], buffer, sizeof (buffer)); - if (r > 0 && test_log_fd > 0) - do - l = write (pass_on_forked_log ? test_log_fd : -1, buffer, r); - while (l < 0 && errno == EINTR); - if (r == 0 || (r < 0 && errno != EINTR && errno != EAGAIN)) - { - close (stdtst_pipe[0]); - stdtst_pipe[0] = -1; - } - } - if (!(test_trap_flags & G_TEST_TRAP_SILENCE_STDOUT)) - g_string_write_out (sout, 1, &soutpos); - if (!(test_trap_flags & G_TEST_TRAP_SILENCE_STDERR)) - g_string_write_out (serr, 2, &serrpos); - if (usec_timeout) - { - guint64 nstamp = test_time_stamp(); - int status = 0; - sstamp = MIN (sstamp, nstamp); /* guard against backwards clock skews */ - if (usec_timeout < nstamp - sstamp) - { - /* timeout reached, need to abort the child now */ - kill_child (test_trap_last_pid, &status, 3); - test_trap_last_status = 1024; /* timeout */ - if (0 && WIFSIGNALED (status)) - g_printerr ("%s: child timed out and received: %s\n", G_STRFUNC, g_strsignal (WTERMSIG (status))); - need_wait = FALSE; - break; - } - } - } - close (stdout_pipe[0]); - close (stderr_pipe[0]); - close (stdtst_pipe[0]); - if (need_wait) - { - int status = 0; - do - wr = waitpid (test_trap_last_pid, &status, 0); - while (wr < 0 && errno == EINTR); - if (WIFEXITED (status)) /* normal exit */ - test_trap_last_status = WEXITSTATUS (status); /* 0..255 */ - else if (WIFSIGNALED (status)) - test_trap_last_status = (WTERMSIG (status) << 12); /* signalled */ - else /* WCOREDUMP (status) */ - test_trap_last_status = 512; /* coredump */ - } - test_trap_last_stdout = g_string_free (sout, FALSE); - test_trap_last_stderr = g_string_free (serr, FALSE); + + wait_for_child (test_trap_last_pid, + stdout_pipe[0], !(test_trap_flags & G_TEST_TRAP_SILENCE_STDOUT), + stderr_pipe[0], !(test_trap_flags & G_TEST_TRAP_SILENCE_STDERR), + usec_timeout); return FALSE; } #else @@ -1778,11 +2715,166 @@ g_test_trap_fork (guint64 usec_timeout, } /** + * g_test_trap_subprocess: + * @test_path: (allow-none): Test to run in a subprocess + * @usec_timeout: Timeout for the subprocess test in micro seconds. + * @test_flags: Flags to modify subprocess behaviour. + * + * Respawns the test program to run only @test_path in a subprocess. + * This can be used for a test case that might not return, or that + * might abort. + * + * If @test_path is %NULL then the same test is re-run in a subprocess. + * You can use g_test_subprocess() to determine whether the test is in + * a subprocess or not. + * + * @test_path can also be the name of the parent test, followed by + * "`/subprocess/`" and then a name for the specific subtest (or just + * ending with "`/subprocess`" if the test only has one child test); + * tests with names of this form will automatically be skipped in the + * parent process. + * + * If @usec_timeout is non-0, the test subprocess is aborted and + * considered failing if its run time exceeds it. + * + * The subprocess behavior can be configured with the + * #GTestSubprocessFlags flags. + * + * You can use methods such as g_test_trap_assert_passed(), + * g_test_trap_assert_failed(), and g_test_trap_assert_stderr() to + * check the results of the subprocess. (But note that + * g_test_trap_assert_stdout() and g_test_trap_assert_stderr() + * cannot be used if @test_flags specifies that the child should + * inherit the parent stdout/stderr.) + * + * If your `main ()` needs to behave differently in + * the subprocess, you can call g_test_subprocess() (after calling + * g_test_init()) to see whether you are in a subprocess. + * + * The following example tests that calling + * `my_object_new(1000000)` will abort with an error + * message. + * + * |[ + * static void + * test_create_large_object_subprocess (void) + * { + * if (g_test_subprocess ()) + * { + * my_object_new (1000000); + * return; + * } + * + * // Reruns this same test in a subprocess + * g_test_trap_subprocess (NULL, 0, 0); + * g_test_trap_assert_failed (); + * g_test_trap_assert_stderr ("*ERROR*too large*"); + * } + * + * int + * main (int argc, char **argv) + * { + * g_test_init (&argc, &argv, NULL); + * + * g_test_add_func ("/myobject/create_large_object", + * test_create_large_object); + * return g_test_run (); + * } + * ]| + * + * Since: 2.38 + */ +void +g_test_trap_subprocess (const char *test_path, + guint64 usec_timeout, + GTestSubprocessFlags test_flags) +{ + GError *error = NULL; + GPtrArray *argv; + GSpawnFlags flags; + int stdout_fd, stderr_fd; + GPid pid; + + /* Sanity check that they used GTestSubprocessFlags, not GTestTrapFlags */ + g_assert ((test_flags & (G_TEST_TRAP_INHERIT_STDIN | G_TEST_TRAP_SILENCE_STDOUT | G_TEST_TRAP_SILENCE_STDERR)) == 0); + + if (test_path) + { + if (!g_test_suite_case_exists (g_test_get_root (), test_path)) + g_error ("g_test_trap_subprocess: test does not exist: %s", test_path); + } + else + { + test_path = test_run_name; + } + + if (g_test_verbose ()) + g_print ("GTest: subprocess: %s\n", test_path); + + test_trap_clear (); + test_trap_last_subprocess = g_strdup (test_path); + + argv = g_ptr_array_new (); + g_ptr_array_add (argv, test_argv0); + g_ptr_array_add (argv, "-q"); + g_ptr_array_add (argv, "-p"); + g_ptr_array_add (argv, (char *)test_path); + g_ptr_array_add (argv, "--GTestSubprocess"); + if (test_log_fd != -1) + { + char log_fd_buf[128]; + + g_ptr_array_add (argv, "--GTestLogFD"); + g_snprintf (log_fd_buf, sizeof (log_fd_buf), "%d", test_log_fd); + g_ptr_array_add (argv, log_fd_buf); + } + g_ptr_array_add (argv, NULL); + + flags = G_SPAWN_DO_NOT_REAP_CHILD; + if (test_flags & G_TEST_TRAP_INHERIT_STDIN) + flags |= G_SPAWN_CHILD_INHERITS_STDIN; + + if (!g_spawn_async_with_pipes (test_initial_cwd, + (char **)argv->pdata, + NULL, flags, + NULL, NULL, + &pid, NULL, &stdout_fd, &stderr_fd, + &error)) + { + g_error ("g_test_trap_subprocess() failed: %s\n", + error->message); + } + g_ptr_array_free (argv, TRUE); + + wait_for_child (pid, + stdout_fd, !!(test_flags & G_TEST_SUBPROCESS_INHERIT_STDOUT), + stderr_fd, !!(test_flags & G_TEST_SUBPROCESS_INHERIT_STDERR), + usec_timeout); +} + +/** + * g_test_subprocess: + * + * Returns %TRUE (after g_test_init() has been called) if the test + * program is running under g_test_trap_subprocess(). + * + * Returns: %TRUE if the test program is running under + * g_test_trap_subprocess(). + * + * Since: 2.38 + */ +gboolean +g_test_subprocess (void) +{ + return test_in_subprocess; +} + +/** * g_test_trap_has_passed: * - * Check the result of the last g_test_trap_fork() call. + * Check the result of the last g_test_trap_subprocess() call. * - * Returns: %TRUE if the last forked child terminated successfully. + * Returns: %TRUE if the last test subprocess terminated successfully. * * Since: 2.16 */ @@ -1795,16 +2887,16 @@ g_test_trap_has_passed (void) /** * g_test_trap_reached_timeout: * - * Check the result of the last g_test_trap_fork() call. + * Check the result of the last g_test_trap_subprocess() call. * - * Returns: %TRUE if the last forked child got killed due to a fork timeout. + * Returns: %TRUE if the last test subprocess got killed due to a timeout. * * Since: 2.16 */ gboolean g_test_trap_reached_timeout (void) { - return 0 != (test_trap_last_status & 1024); /* timeout flag */ + return test_trap_last_status == G_TEST_STATUS_TIMED_OUT; } void @@ -1815,40 +2907,54 @@ g_test_trap_assertions (const char *domain, guint64 assertion_flags, /* 0-pass, 1-fail, 2-outpattern, 4-errpattern */ const char *pattern) { -#ifdef G_OS_UNIX gboolean must_pass = assertion_flags == 0; gboolean must_fail = assertion_flags == 1; gboolean match_result = 0 == (assertion_flags & 1); const char *stdout_pattern = (assertion_flags & 2) ? pattern : NULL; const char *stderr_pattern = (assertion_flags & 4) ? pattern : NULL; const char *match_error = match_result ? "failed to match" : "contains invalid match"; - if (test_trap_last_pid == 0) - g_error ("child process failed to exit after g_test_trap_fork() and before g_test_trap_assert*()"); + char *process_id; + +#ifdef G_OS_UNIX + if (test_trap_last_subprocess != NULL) + { + process_id = g_strdup_printf ("%s [%d]", test_trap_last_subprocess, + test_trap_last_pid); + } + else if (test_trap_last_pid != 0) + process_id = g_strdup_printf ("%d", test_trap_last_pid); +#else + if (test_trap_last_subprocess != NULL) + process_id = g_strdup (test_trap_last_subprocess); +#endif + else + g_error ("g_test_trap_ assertion with no trapped test"); + if (must_pass && !g_test_trap_has_passed()) { - char *msg = g_strdup_printf ("child process (%d) of test trap failed unexpectedly", test_trap_last_pid); + char *msg = g_strdup_printf ("child process (%s) failed unexpectedly", process_id); g_assertion_message (domain, file, line, func, msg); g_free (msg); } if (must_fail && g_test_trap_has_passed()) { - char *msg = g_strdup_printf ("child process (%d) did not fail as expected", test_trap_last_pid); + char *msg = g_strdup_printf ("child process (%s) did not fail as expected", process_id); g_assertion_message (domain, file, line, func, msg); g_free (msg); } if (stdout_pattern && match_result == !g_pattern_match_simple (stdout_pattern, test_trap_last_stdout)) { - char *msg = g_strdup_printf ("stdout of child process (%d) %s: %s", test_trap_last_pid, match_error, stdout_pattern); + char *msg = g_strdup_printf ("stdout of child process (%s) %s: %s", process_id, match_error, stdout_pattern); g_assertion_message (domain, file, line, func, msg); g_free (msg); } if (stderr_pattern && match_result == !g_pattern_match_simple (stderr_pattern, test_trap_last_stderr)) { - char *msg = g_strdup_printf ("stderr of child process (%d) %s: %s", test_trap_last_pid, match_error, stderr_pattern); + char *msg = g_strdup_printf ("stderr of child process (%s) %s: %s", process_id, match_error, stderr_pattern); g_assertion_message (domain, file, line, func, msg); g_free (msg); } -#endif + g_free (process_id); } static void @@ -1955,9 +3061,11 @@ g_test_log_extract (GTestLogBuffer *tbuffer) tbuffer->msgs = g_slist_prepend (tbuffer->msgs, g_memdup (&msg, sizeof (msg))); return TRUE; } + + g_free (msg.nums); + g_strfreev (msg.strings); } - g_free (msg.nums); - g_strfreev (msg.strings); + g_error ("corrupt log stream from test program"); return FALSE; } @@ -1976,7 +3084,7 @@ g_test_log_buffer_new (void) } /** - * g_test_log_buffer_free + * g_test_log_buffer_free: * * Internal function for gtester to free test log messages, no ABI guarantees provided. */ @@ -1991,7 +3099,7 @@ g_test_log_buffer_free (GTestLogBuffer *tbuffer) } /** - * g_test_log_buffer_push + * g_test_log_buffer_push: * * Internal function for gtester to decode test log messages, no ABI guarantees provided. */ @@ -2045,6 +3153,184 @@ g_test_log_msg_free (GTestLogMsg *tmsg) g_free (tmsg); } +static gchar * +g_test_build_filename_va (GTestFileType file_type, + const gchar *first_path, + va_list ap) +{ + const gchar *pathv[16]; + gint num_path_segments; + + if (file_type == G_TEST_DIST) + pathv[0] = test_disted_files_dir; + else if (file_type == G_TEST_BUILT) + pathv[0] = test_built_files_dir; + else + g_assert_not_reached (); + + pathv[1] = first_path; + + for (num_path_segments = 2; num_path_segments < G_N_ELEMENTS (pathv); num_path_segments++) + { + pathv[num_path_segments] = va_arg (ap, const char *); + if (pathv[num_path_segments] == NULL) + break; + } + + g_assert_cmpint (num_path_segments, <, G_N_ELEMENTS (pathv)); + + return g_build_filenamev ((gchar **) pathv); +} + +/** + * g_test_build_filename: + * @file_type: the type of file (built vs. distributed) + * @first_path: the first segment of the pathname + * @...: %NULL-terminated additional path segments + * + * Creates the pathname to a data file that is required for a test. + * + * This function is conceptually similar to g_build_filename() except + * that the first argument has been replaced with a #GTestFileType + * argument. + * + * The data file should either have been distributed with the module + * containing the test (%G_TEST_DIST) or built as part of the build + * system of that module (%G_TEST_BUILT). + * + * In order for this function to work in srcdir != builddir situations, + * the G_TEST_SRCDIR and G_TEST_BUILDDIR environment variables need to + * have been defined. As of 2.38, this is done by the glib.mk + * included in GLib. Please ensure that your copy is up to date before + * using this function. + * + * In case neither variable is set, this function will fall back to + * using the dirname portion of argv[0], possibly removing ".libs". + * This allows for casual running of tests directly from the commandline + * in the srcdir == builddir case and should also support running of + * installed tests, assuming the data files have been installed in the + * same relative path as the test binary. + * + * Returns: the path of the file, to be freed using g_free() + * + * Since: 2.38 + **/ +/** + * GTestFileType: + * @G_TEST_DIST: a file that was included in the distribution tarball + * @G_TEST_BUILT: a file that was built on the compiling machine + * + * The type of file to return the filename for, when used with + * g_test_build_filename(). + * + * These two options correspond rather directly to the 'dist' and + * 'built' terminology that automake uses and are explicitly used to + * distinguish between the 'srcdir' and 'builddir' being separate. All + * files in your project should either be dist (in the + * `DIST_EXTRA` or `dist_schema_DATA` + * sense, in which case they will always be in the srcdir) or built (in + * the `BUILT_SOURCES` sense, in which case they will + * always be in the builddir). + * + * Note: as a general rule of automake, files that are generated only as + * part of the build-from-git process (but then are distributed with the + * tarball) always go in srcdir (even if doing a srcdir != builddir + * build from git) and are considered as distributed files. + * + * Since: 2.38 + **/ +gchar * +g_test_build_filename (GTestFileType file_type, + const gchar *first_path, + ...) +{ + gchar *result; + va_list ap; + + g_assert (g_test_initialized ()); + + va_start (ap, first_path); + result = g_test_build_filename_va (file_type, first_path, ap); + va_end (ap); + + return result; +} + +/** + * g_test_get_dir: + * @file_type: the type of file (built vs. distributed) + * + * Gets the pathname of the directory containing test files of the type + * specified by @file_type. + * + * This is approximately the same as calling g_test_build_filename("."), + * but you don't need to free the return value. + * + * Returns: the path of the directory, owned by GLib + * + * Since: 2.38 + **/ +const gchar * +g_test_get_dir (GTestFileType file_type) +{ + g_assert (g_test_initialized ()); + + if (file_type == G_TEST_DIST) + return test_disted_files_dir; + else if (file_type == G_TEST_BUILT) + return test_built_files_dir; + + g_assert_not_reached (); +} + +/** + * g_test_get_filename: + * @file_type: the type of file (built vs. distributed) + * @first_path: the first segment of the pathname + * @...: %NULL-terminated additional path segments + * + * Gets the pathname to a data file that is required for a test. + * + * This is the same as g_test_build_filename() with two differences. + * The first difference is that must only use this function from within + * a testcase function. The second difference is that you need not free + * the return value -- it will be automatically freed when the testcase + * finishes running. + * + * It is safe to use this function from a thread inside of a testcase + * but you must ensure that all such uses occur before the main testcase + * function returns (ie: it is best to ensure that all threads have been + * joined). + * + * Returns: the path, automatically freed at the end of the testcase + * + * Since: 2.38 + **/ +const gchar * +g_test_get_filename (GTestFileType file_type, + const gchar *first_path, + ...) +{ + gchar *result; + GSList *node; + va_list ap; + + g_assert (g_test_initialized ()); + if (test_filename_free_list == NULL) + g_error ("g_test_get_filename() can only be used within testcase functions"); + + va_start (ap, first_path); + result = g_test_build_filename_va (file_type, first_path, ap); + va_end (ap); + + node = g_slist_prepend (NULL, result); + do + node->next = *test_filename_free_list; + while (!g_atomic_pointer_compare_and_exchange (test_filename_free_list, node->next, node)); + + return result; +} + /* --- macros docs START --- */ /** * g_test_add: