X-Git-Url: http://review.tizen.org/git/?a=blobdiff_plain;f=glib%2Fgtestutils.c;h=dede38ac9e4f9f879f67f1aa50aaf30ba826ba6e;hb=13e15733f38a40c6ef6a1baede91cce81c86ebaa;hp=b5cb321d3f22096c8c3399404809f86f7a9bd5e7;hpb=714cbbea52f0d08b6a4a4588329128a5172e8e92;p=platform%2Fupstream%2Fglib.git diff --git a/glib/gtestutils.c b/glib/gtestutils.c index b5cb321..dede38a 100644 --- a/glib/gtestutils.c +++ b/glib/gtestutils.c @@ -13,15 +13,12 @@ * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public - * License along with this library; if not, write to the - * Free Software Foundation, Inc., 59 Temple Place - Suite 330, - * Boston, MA 02111-1307, USA. + * License along with this library; if not, see . */ #include "config.h" #include "gtestutils.h" -#include "gmessages-private.h" #include "gfileutils.h" #include @@ -29,14 +26,12 @@ #include #include #include +#include #include #endif #include #include #include -#ifdef HAVE_UNISTD_H -#include -#endif #ifdef HAVE_SYS_RESOURCE_H #include #endif @@ -58,45 +53,35 @@ #include "gslice.h" #include "gspawn.h" #include "glib-private.h" -#include "gmessages-private.h" /** * SECTION:testing * @title: Testing * @short_description: a test framework - * @see_also: gtester, - * gtester-report + * @see_also: [gtester][gtester], [gtester-report][gtester-report] * * GLib provides a framework for writing and maintaining unit tests * in parallel to the code they are testing. The API is designed according * to established concepts found in the other test frameworks (JUnit, NUnit, * RUnit), which in turn is based on smalltalk unit testing concepts. * - * - * - * Test case - * Tests (test methods) are grouped together with their - * fixture into test cases. - * - * - * Fixture - * A test fixture consists of fixture data and setup and - * teardown methods to establish the environment for the test - * functions. We use fresh fixtures, i.e. fixtures are newly set - * up and torn down around each test invocation to avoid dependencies - * between tests. - * - * - * Test suite - * Test cases can be grouped into test suites, to allow - * subsets of the available tests to be run. Test suites can be - * grouped into other test suites as well. - * - * + * - Test case: Tests (test methods) are grouped together with their + * fixture into test cases. + * + * - Fixture: A test fixture consists of fixture data and setup and + * teardown methods to establish the environment for the test + * functions. We use fresh fixtures, i.e. fixtures are newly set + * up and torn down around each test invocation to avoid dependencies + * between tests. + * + * - Test suite: Test cases can be grouped into test suites, to allow + * subsets of the available tests to be run. Test suites can be + * grouped into other test suites as well. + * * The API is designed to handle creation and registration of test suites * and test cases implicitly. A simple call like - * |[ + * |[ * g_test_add_func ("/misc/assertions", test_assertions); * ]| * creates a test suite called "misc" with a single test case named @@ -202,16 +187,16 @@ /** * GTestTrapFlags: * @G_TEST_TRAP_SILENCE_STDOUT: Redirect stdout of the test child to - * /dev/null so it cannot be observed on the - * console during test runs. The actual output is still captured - * though to allow later tests with g_test_trap_assert_stdout(). + * `/dev/null` so it cannot be observed on the console during test + * runs. The actual output is still captured though to allow later + * tests with g_test_trap_assert_stdout(). * @G_TEST_TRAP_SILENCE_STDERR: Redirect stderr of the test child to - * /dev/null so it cannot be observed on the - * console during test runs. The actual output is still captured - * though to allow later tests with g_test_trap_assert_stderr(). + * `/dev/null` so it cannot be observed on the console during test + * runs. The actual output is still captured though to allow later + * tests with g_test_trap_assert_stderr(). * @G_TEST_TRAP_INHERIT_STDIN: If this flag is given, stdin of the * child process is shared with stdin of its parent process. - * It is redirected to /dev/null otherwise. + * It is redirected to `/dev/null` otherwise. * * Test traps are guards around forked tests. * These flags determine what traps to set. @@ -225,7 +210,7 @@ * GTestSubprocessFlags: * @G_TEST_SUBPROCESS_INHERIT_STDIN: If this flag is given, the child * process will inherit the parent's stdin. Otherwise, the child's - * stdin is redirected to /dev/null. + * stdin is redirected to `/dev/null`. * @G_TEST_SUBPROCESS_INHERIT_STDOUT: If this flag is given, the child * process will inherit the parent's stdout. Otherwise, the child's * stdout will not be visible, but it will be captured to allow @@ -267,8 +252,7 @@ /** * g_test_trap_assert_stdout: - * @soutpattern: a glob-style - * pattern + * @soutpattern: a glob-style [pattern][glib-Glob-style-pattern-matching] * * Assert that the stdout output of the last test subprocess matches * @soutpattern. See g_test_trap_subprocess(). @@ -278,8 +262,7 @@ /** * g_test_trap_assert_stdout_unmatched: - * @soutpattern: a glob-style - * pattern + * @soutpattern: a glob-style [pattern][glib-Glob-style-pattern-matching] * * Assert that the stdout output of the last test subprocess * does not match @soutpattern. See g_test_trap_subprocess(). @@ -289,8 +272,7 @@ /** * g_test_trap_assert_stderr: - * @serrpattern: a glob-style - * pattern + * @serrpattern: a glob-style [pattern][glib-Glob-style-pattern-matching] * * Assert that the stderr output of the last test subprocess * matches @serrpattern. See g_test_trap_subprocess(). @@ -307,8 +289,7 @@ /** * g_test_trap_assert_stderr_unmatched: - * @serrpattern: a glob-style - * pattern + * @serrpattern: a glob-style [pattern][glib-Glob-style-pattern-matching] * * Assert that the stderr output of the last test subprocess * does not match @serrpattern. See g_test_trap_subprocess(). @@ -334,7 +315,26 @@ * an error message is logged and the application is terminated. * * The macro can be turned off in final releases of code by defining - * G_DISABLE_ASSERT when compiling the application. + * `G_DISABLE_ASSERT` when compiling the application. + * + * For a version which is guaranteed to evaluate side effects in @expr, + * see g_assert_se(). + */ + +/** + * g_assert_se: + * @expr: the expression to check + * + * Debugging macro to terminate the application if the assertion + * fails. If the assertion fails (i.e. the expression is not true), + * an error message is logged and the application is terminated. + * + * The check can be turned off in final releases of code by defining + * `G_DISABLE_ASSERT` when compiling the application. + * + * Unlike g_assert(), this macro is guaranteed to evaluate side effects + * of @expr, even if checks are disabled. It is still undefined if the + * program will actually be aborted or not. */ /** @@ -345,26 +345,87 @@ * application is terminated. * * The macro can be turned off in final releases of code by defining - * G_DISABLE_ASSERT when compiling the application. + * `G_DISABLE_ASSERT` when compiling the application. + */ + +/** + * g_assert_true: + * @expr: the expression to check + * + * Debugging macro to check that an expression is true. + * + * If the assertion fails (i.e. the expression is not true), + * an error message is logged and the application is either + * terminated or the testcase marked as failed. + * + * See g_test_set_nonfatal_assertions(). + * + * Since: 2.38 + */ + +/** + * g_assert_false: + * @expr: the expression to check + * + * Debugging macro to check an expression is false. + * + * If the assertion fails (i.e. the expression is not false), + * an error message is logged and the application is either + * terminated or the testcase marked as failed. + * + * See g_test_set_nonfatal_assertions(). + * + * Since: 2.38 + */ + +/** + * g_assert_null: + * @expr: the expression to check + * + * Debugging macro to check an expression is %NULL. + * + * If the assertion fails (i.e. the expression is not %NULL), + * an error message is logged and the application is either + * terminated or the testcase marked as failed. + * + * See g_test_set_nonfatal_assertions(). + * + * Since: 2.38 + */ + +/** + * g_assert_nonnull: + * @expr: the expression to check + * + * Debugging macro to check an expression is not %NULL. + * + * If the assertion fails (i.e. the expression is %NULL), + * an error message is logged and the application is either + * terminated or the testcase marked as failed. + * + * See g_test_set_nonfatal_assertions(). + * + * Since: 2.40 */ /** * g_assert_cmpstr: * @s1: a string (may be %NULL) * @cmp: The comparison operator to use. - * One of ==, !=, <, >, <=, >=. + * One of ==, !=, <, >, <=, >=. * @s2: another string (may be %NULL) * - * Debugging macro to terminate the application with a warning - * message if a string comparison fails. The strings are compared - * using g_strcmp0(). + * Debugging macro to compare two strings. If the comparison fails, + * an error message is logged and the application is either terminated + * or the testcase marked as failed. + * The strings are compared using g_strcmp0(). * - * The effect of g_assert_cmpstr (s1, op, s2) is - * the same as g_assert (g_strcmp0 (s1, s2) op 0). + * The effect of `g_assert_cmpstr (s1, op, s2)` is + * the same as `g_assert_true (g_strcmp0 (s1, s2) op 0)`. * The advantage of this macro is that it can produce a message that * includes the actual values of @s1 and @s2. * - * |[ + * |[ * g_assert_cmpstr (mystring, ==, "fubar"); * ]| * @@ -375,14 +436,13 @@ * g_assert_cmpint: * @n1: an integer * @cmp: The comparison operator to use. - * One of ==, !=, <, >, <=, >=. + * One of ==, !=, <, >, <=, >=. * @n2: another integer * - * Debugging macro to terminate the application with a warning - * message if an integer comparison fails. + * Debugging macro to compare two integers. * - * The effect of g_assert_cmpint (n1, op, n2) is - * the same as g_assert (n1 op n2). The advantage + * The effect of `g_assert_cmpint (n1, op, n2)` is + * the same as `g_assert_true (n1 op n2)`. The advantage * of this macro is that it can produce a message that includes the * actual values of @n1 and @n2. * @@ -393,14 +453,13 @@ * g_assert_cmpuint: * @n1: an unsigned integer * @cmp: The comparison operator to use. - * One of ==, !=, <, >, <=, >=. + * One of ==, !=, <, >, <=, >=. * @n2: another unsigned integer * - * Debugging macro to terminate the application with a warning - * message if an unsigned integer comparison fails. + * Debugging macro to compare two unsigned integers. * - * The effect of g_assert_cmpuint (n1, op, n2) is - * the same as g_assert (n1 op n2). The advantage + * The effect of `g_assert_cmpuint (n1, op, n2)` is + * the same as `g_assert_true (n1 op n2)`. The advantage * of this macro is that it can produce a message that includes the * actual values of @n1 and @n2. * @@ -411,11 +470,10 @@ * g_assert_cmphex: * @n1: an unsigned integer * @cmp: The comparison operator to use. - * One of ==, !=, <, >, <=, >=. + * One of ==, !=, <, >, <=, >=. * @n2: another unsigned integer * - * Debugging macro to terminate the application with a warning - * message if an unsigned integer comparison fails. + * Debugging macro to compare to unsigned integers. * * This is a variant of g_assert_cmpuint() that displays the numbers * in hexadecimal notation in the message. @@ -427,14 +485,13 @@ * g_assert_cmpfloat: * @n1: an floating point number * @cmp: The comparison operator to use. - * One of ==, !=, <, >, <=, >=. + * One of ==, !=, <, >, <=, >=. * @n2: another floating point number * - * Debugging macro to terminate the application with a warning - * message if a floating point number comparison fails. + * Debugging macro to compare two floating point numbers. * - * The effect of g_assert_cmpfloat (n1, op, n2) is - * the same as g_assert (n1 op n2). The advantage + * The effect of `g_assert_cmpfloat (n1, op, n2)` is + * the same as `g_assert_true (n1 op n2)`. The advantage * of this macro is that it can produce a message that includes the * actual values of @n1 and @n2. * @@ -445,11 +502,10 @@ * g_assert_no_error: * @err: a #GError, possibly %NULL * - * Debugging macro to terminate the application with a warning - * message if a method has returned a #GError. + * Debugging macro to check that a #GError is not set. * - * The effect of g_assert_no_error (err) is - * the same as g_assert (err == NULL). The advantage + * The effect of `g_assert_no_error (err)` is + * the same as `g_assert_true (err == NULL)`. The advantage * of this macro is that it can produce a message that includes * the error message and code. * @@ -462,18 +518,18 @@ * @dom: the expected error domain (a #GQuark) * @c: the expected error code * - * Debugging macro to terminate the application with a warning - * message if a method has not returned the correct #GError. + * Debugging macro to check that a method has returned + * the correct #GError. * - * The effect of g_assert_error (err, dom, c) is - * the same as g_assert (err != NULL && err->domain - * == dom && err->code == c). The advantage of this + * The effect of `g_assert_error (err, dom, c)` is + * the same as `g_assert_true (err != NULL && err->domain + * == dom && err->code == c)`. The advantage of this * macro is that it can produce a message that includes the incorrect * error message and code. * * This can only be used to test for a specific error. If you want to * test that @err is set, but don't care what it's set to, just use - * g_assert (err != NULL) + * `g_assert (err != NULL)` * * Since: 2.20 */ @@ -496,6 +552,7 @@ * analysis systems like Apport and ABRT to fish out assertion messages from * core dumps, instead of having to catch them on screen output. */ +GLIB_VAR char *__glib_assert_msg; char *__glib_assert_msg = NULL; /* --- constants --- */ @@ -536,6 +593,13 @@ static void gtest_default_log_handler (const gchar *log_domain, gpointer unused_data); +typedef enum { + G_TEST_RUN_SUCCESS, + G_TEST_RUN_SKIPPED, + G_TEST_RUN_FAILURE, + G_TEST_RUN_INCOMPLETE +} GTestResult; + /* --- variables --- */ static int test_log_fd = -1; static gboolean test_mode_fatal = TRUE; @@ -544,10 +608,13 @@ static gboolean test_run_list = FALSE; static gchar *test_run_seedstr = NULL; static GRand *test_run_rand = NULL; static gchar *test_run_name = ""; +static GSList **test_filename_free_list; static guint test_run_forks = 0; static guint test_run_count = 0; -static guint test_run_success = FALSE; -static guint test_skip_count = 0; +static guint test_skipped_count = 0; +static GTestResult test_run_success = G_TEST_RUN_FAILURE; +static gchar *test_run_msg = NULL; +static guint test_startup_skip_count = 0; static GTimer *test_user_timer = NULL; static double test_user_stamp = 0; static GSList *test_paths = NULL; @@ -560,8 +627,13 @@ static char *test_trap_last_stdout = NULL; static char *test_trap_last_stderr = NULL; static char *test_uri_base = NULL; static gboolean test_debug_log = FALSE; +static gboolean test_tap_log = FALSE; +static gboolean test_nonfatal_assertions = FALSE; static DestroyEntry *test_destroy_queue = NULL; static char *test_argv0 = NULL; +static char *test_argv0_dirname; +static const char *test_disted_files_dir; +static const char *test_built_files_dir; static char *test_initial_cwd = NULL; static gboolean test_in_subprocess = FALSE; static GTestConfig mutable_test_config_vars = { @@ -573,6 +645,7 @@ static GTestConfig mutable_test_config_vars = { TRUE, /* test_undefined */ }; const GTestConfig * const g_test_config_vars = &mutable_test_config_vars; +static gboolean no_g_set_prgname = FALSE; /* --- functions --- */ const char* @@ -590,6 +663,8 @@ g_test_log_type_name (GTestLogType log_type) case G_TEST_LOG_MIN_RESULT: return "minperf"; case G_TEST_LOG_MAX_RESULT: return "maxperf"; case G_TEST_LOG_MESSAGE: return "message"; + case G_TEST_LOG_START_SUITE: return "start suite"; + case G_TEST_LOG_STOP_SUITE: return "stop suite"; } return "???"; } @@ -643,7 +718,7 @@ g_test_log (GTestLogType lbit, guint n_args, long double *largs) { - gboolean fail = lbit == G_TEST_LOG_STOP_CASE && largs[0] != 0; + gboolean fail; GTestLogMsg msg; gchar *astrings[3] = { NULL, NULL, NULL }; guint8 *dbuffer; @@ -652,28 +727,69 @@ g_test_log (GTestLogType lbit, switch (lbit) { case G_TEST_LOG_START_BINARY: - if (g_test_verbose()) + if (test_tap_log) + g_print ("# random seed: %s\n", string2); + else if (g_test_verbose()) g_print ("GTest: random seed: %s\n", string2); break; + case G_TEST_LOG_START_SUITE: + if (test_tap_log) + { + if (string1[0] != 0) + g_print ("# Start of %s tests\n", string1); + } + break; + case G_TEST_LOG_STOP_SUITE: + if (test_tap_log) + { + if (string1[0] != 0) + g_print ("# End of %s tests\n", string1); + else + g_print ("1..%d\n", test_run_count); + } + break; case G_TEST_LOG_STOP_CASE: - if (g_test_verbose()) + fail = largs[0] != G_TEST_RUN_SUCCESS && largs[0] != G_TEST_RUN_SKIPPED; + if (test_tap_log) + { + g_print ("%s %d %s", fail ? "not ok" : "ok", test_run_count, string1); + if (largs[0] == G_TEST_RUN_INCOMPLETE) + g_print (" # TODO %s\n", string2 ? string2 : ""); + else if (largs[0] == G_TEST_RUN_SKIPPED) + g_print (" # SKIP %s\n", string2 ? string2 : ""); + else + g_print ("\n"); + } + else if (g_test_verbose()) g_print ("GTest: result: %s\n", fail ? "FAIL" : "OK"); else if (!g_test_quiet()) g_print ("%s\n", fail ? "FAIL" : "OK"); if (fail && test_mode_fatal) - abort(); + { + if (test_tap_log) + g_print ("Bail out!\n"); + abort(); + } + if (largs[0] == G_TEST_RUN_SKIPPED) + test_skipped_count++; break; case G_TEST_LOG_MIN_RESULT: - if (g_test_verbose()) + if (test_tap_log) + g_print ("# min perf: %s\n", string1); + else if (g_test_verbose()) g_print ("(MINPERF:%s)\n", string1); break; case G_TEST_LOG_MAX_RESULT: - if (g_test_verbose()) + if (test_tap_log) + g_print ("# max perf: %s\n", string1); + else if (g_test_verbose()) g_print ("(MAXPERF:%s)\n", string1); break; case G_TEST_LOG_MESSAGE: case G_TEST_LOG_ERROR: - if (g_test_verbose()) + if (test_tap_log) + g_print ("# %s\n", string1); + else if (g_test_verbose()) g_print ("(MSG: %s)\n", string1); break; default: ; @@ -693,7 +809,9 @@ g_test_log (GTestLogType lbit, switch (lbit) { case G_TEST_LOG_START_CASE: - if (g_test_verbose()) + if (test_tap_log) + ; + else if (g_test_verbose()) g_print ("GTest: run: %s\n", string1); else if (!g_test_quiet()) g_print ("%s: ", string1); @@ -737,6 +855,11 @@ parse_args (gint *argc_p, test_debug_log = TRUE; argv[i] = NULL; } + else if (strcmp (argv[i], "--tap") == 0) + { + test_tap_log = TRUE; + argv[i] = NULL; + } else if (strcmp ("--GTestLogFD", argv[i]) == 0 || strncmp ("--GTestLogFD=", argv[i], 13) == 0) { gchar *equal = argv[i] + 12; @@ -753,11 +876,11 @@ parse_args (gint *argc_p, { gchar *equal = argv[i] + 16; if (*equal == '=') - test_skip_count = g_ascii_strtoull (equal + 1, NULL, 0); + test_startup_skip_count = g_ascii_strtoull (equal + 1, NULL, 0); else if (i + 1 < argc) { argv[i++] = NULL; - test_skip_count = g_ascii_strtoull (argv[i], NULL, 0); + test_startup_skip_count = g_ascii_strtoull (argv[i], NULL, 0); } argv[i] = NULL; } @@ -774,7 +897,6 @@ parse_args (gint *argc_p, (void) setrlimit (RLIMIT_CORE, &limit); } #endif - _g_log_set_exit_on_fatal (); argv[i] = NULL; } else if (strcmp ("-p", argv[i]) == 0 || strncmp ("-p=", argv[i], 3) == 0) @@ -901,87 +1023,39 @@ parse_args (gint *argc_p, * Changed if any arguments were handled. * @argv: Address of the @argv parameter of main(). * Any parameters understood by g_test_init() stripped before return. - * @...: Reserved for future extension. Currently, you must pass %NULL. + * @...: %NULL-terminated list of special options. Currently the only + * defined option is `"no_g_set_prgname"`, which + * will cause g_test_init() to not call g_set_prgname(). * * Initialize the GLib testing framework, e.g. by seeding the * test random number generator, the name for g_get_prgname() * and parsing test related command line args. + * * So far, the following arguments are understood: - * - * - * - * - * List test cases available in a test executable. - * - * - * - * - * - * Provide a random seed to reproduce test runs using random numbers. - * - * - * - * - * Run tests verbosely. - * - * - * , - * Run tests quietly. - * - * - * - * - * Execute all tests matching TESTPATH. - * This can also be used to force a test to run that would otherwise - * be skipped (ie, a test whose name contains "/subprocess"). - * - * - * - * - * - * Execute tests according to these test modes: - * - * - * perf - * - * Performance tests, may take long and report results. - * - * - * - * slow, thorough - * - * Slow and thorough tests, may take quite long and - * maximize coverage. - * - * - * - * quick - * - * Quick tests, should run really quickly and give good coverage. - * - * - * - * undefined - * - * Tests for undefined behaviour, may provoke programming errors - * under g_test_trap_subprocess() or g_test_expect_messages() to check - * that appropriate assertions or warnings are given - * - * - * - * no-undefined - * - * Avoid tests for undefined behaviour - * - * - * - * - * - * - * - * Debug test logging output. - * - * + * + * - `-l`: List test cases available in a test executable. + * - `--seed=SEED`: Provide a random seed to reproduce test + * runs using random numbers. + * - `--verbose`: Run tests verbosely. + * - `-q`, `--quiet`: Run tests quietly. + * - `-p PATH`: Execute all tests matching the given path. + * This can also be used to force a test to run that would otherwise + * be skipped (ie, a test whose name contains "/subprocess"). + * - `-m {perf|slow|thorough|quick|undefined|no-undefined}`: Execute tests according to these test modes: + * + * `perf`: Performance tests, may take long and report results. + * + * `slow`, `thorough`: Slow and thorough tests, may take quite long and maximize coverage. + * + * `quick`: Quick tests, should run really quickly and give good coverage. + * + * `undefined`: Tests for undefined behaviour, may provoke programming errors + * under g_test_trap_subprocess() or g_test_expect_messages() to check + * that appropriate assertions or warnings are given + * + * `no-undefined`: Avoid tests for undefined behaviour + * + * - `--debug-log`: Debug test logging output. * * Since: 2.16 */ @@ -992,9 +1066,10 @@ g_test_init (int *argc, { static char seedstr[4 + 4 * 8 + 1]; va_list args; - gpointer vararg1; + gpointer option; /* make warnings and criticals fatal for all test programs */ GLogLevelFlags fatal_mask = (GLogLevelFlags) g_log_set_always_fatal ((GLogLevelFlags) G_LOG_FATAL_MASK); + fatal_mask = (GLogLevelFlags) (fatal_mask | G_LOG_LEVEL_WARNING | G_LOG_LEVEL_CRITICAL); g_log_set_always_fatal (fatal_mask); /* check caller args */ @@ -1004,9 +1079,12 @@ g_test_init (int *argc, mutable_test_config_vars.test_initialized = TRUE; va_start (args, argv); - vararg1 = va_arg (args, gpointer); /* reserved for future extensions */ + while ((option = va_arg (args, char *))) + { + if (g_strcmp0 (option, "no_g_set_prgname") == 0) + no_g_set_prgname = TRUE; + } va_end (args); - g_return_if_fail (vararg1 == NULL); /* setup random seed string */ g_snprintf (seedstr, sizeof (seedstr), "R02S%08x%08x%08x%08x", g_random_int(), g_random_int(), g_random_int(), g_random_int()); @@ -1014,7 +1092,8 @@ g_test_init (int *argc, /* parse args, sets up mode, changes seed, etc. */ parse_args (argc, argv); - if (!g_get_prgname()) + + if (!g_get_prgname() && !no_g_set_prgname) g_set_prgname ((*argv)[0]); /* verify GRand reliability, needed for reliable seeds */ @@ -1034,6 +1113,25 @@ g_test_init (int *argc, /* report program start */ g_log_set_default_handler (gtest_default_log_handler, NULL); g_test_log (G_TEST_LOG_START_BINARY, g_get_prgname(), test_run_seedstr, 0, NULL); + + test_argv0_dirname = g_path_get_dirname (test_argv0); + + /* Make sure we get the real dirname that the test was run from */ + if (g_str_has_suffix (test_argv0_dirname, "/.libs")) + { + gchar *tmp; + tmp = g_path_get_dirname (test_argv0_dirname); + g_free (test_argv0_dirname); + test_argv0_dirname = tmp; + } + + test_disted_files_dir = g_getenv ("G_TEST_SRCDIR"); + if (!test_disted_files_dir) + test_disted_files_dir = test_argv0_dirname; + + test_built_files_dir = g_getenv ("G_TEST_BUILDDIR"); + if (!test_built_files_dir) + test_built_files_dir = test_argv0_dirname; } static void @@ -1369,20 +1467,55 @@ g_test_get_root (void) * * Runs all tests under the toplevel suite which can be retrieved * with g_test_get_root(). Similar to g_test_run_suite(), the test - * cases to be run are filtered according to - * test path arguments (-p testpath) as - * parsed by g_test_init(). - * g_test_run_suite() or g_test_run() may only be called once - * in a program. - * - * Returns: 0 on success + * cases to be run are filtered according to test path arguments + * (`-p testpath`) as parsed by g_test_init(). g_test_run_suite() + * or g_test_run() may only be called once in a program. + * + * In general, the tests and sub-suites within each suite are run in + * the order in which they are defined. However, note that prior to + * GLib 2.36, there was a bug in the `g_test_add_*` + * functions which caused them to create multiple suites with the same + * name, meaning that if you created tests "/foo/simple", + * "/bar/simple", and "/foo/using-bar" in that order, they would get + * run in that order (since g_test_run() would run the first "/foo" + * suite, then the "/bar" suite, then the second "/foo" suite). As of + * 2.36, this bug is fixed, and adding the tests in that order would + * result in a running order of "/foo/simple", "/foo/using-bar", + * "/bar/simple". If this new ordering is sub-optimal (because it puts + * more-complicated tests before simpler ones, making it harder to + * figure out exactly what has failed), you can fix it by changing the + * test paths to group tests by suite in a way that will result in the + * desired running order. Eg, "/simple/foo", "/simple/bar", + * "/complex/foo-using-bar". + * + * However, you should never make the actual result of a test depend + * on the order that tests are run in. If you need to ensure that some + * particular code runs before or after a given test case, use + * g_test_add(), which lets you specify setup and teardown functions. + * + * If all tests are skipped, this function will return 0 if + * producing TAP output, or 77 (treated as "skip test" by Automake) otherwise. + * + * Returns: 0 on success, 1 on failure (assuming it returns at all), + * 0 or 77 if all tests were skipped with g_test_skip() * * Since: 2.16 */ int g_test_run (void) { - return g_test_run_suite (g_test_get_root()); + if (g_test_run_suite (g_test_get_root()) != 0) + return 1; + + /* 77 is special to Automake's default driver, but not Automake's TAP driver + * or Perl's prove(1) TAP driver. */ + if (test_tap_log) + return 0; + + if (test_run_count > 0 && test_run_count == test_skipped_count) + return 77; + else + return 0; } /** @@ -1542,7 +1675,107 @@ g_test_add_vtable (const char *testpath, void g_test_fail (void) { - test_run_success = FALSE; + test_run_success = G_TEST_RUN_FAILURE; +} + +/** + * g_test_incomplete: + * @msg: (allow-none): explanation + * + * Indicates that a test failed because of some incomplete + * functionality. This function can be called multiple times + * from the same test. + * + * Calling this function will not stop the test from running, you + * need to return from the test function yourself. So you can + * produce additional diagnostic messages or even continue running + * the test. + * + * If not called from inside a test, this function does nothing. + * + * Since: 2.38 + */ +void +g_test_incomplete (const gchar *msg) +{ + test_run_success = G_TEST_RUN_INCOMPLETE; + g_free (test_run_msg); + test_run_msg = g_strdup (msg); +} + +/** + * g_test_skip: + * @msg: (allow-none): explanation + * + * Indicates that a test was skipped. + * + * Calling this function will not stop the test from running, you + * need to return from the test function yourself. So you can + * produce additional diagnostic messages or even continue running + * the test. + * + * If not called from inside a test, this function does nothing. + * + * Since: 2.38 + */ +void +g_test_skip (const gchar *msg) +{ + test_run_success = G_TEST_RUN_SKIPPED; + g_free (test_run_msg); + test_run_msg = g_strdup (msg); +} + +/** + * g_test_failed: + * + * Returns whether a test has already failed. This will + * be the case when g_test_fail(), g_test_incomplete() + * or g_test_skip() have been called, but also if an + * assertion has failed. + * + * This can be useful to return early from a test if + * continuing after a failed assertion might be harmful. + * + * The return value of this function is only meaningful + * if it is called from inside a test function. + * + * Returns: %TRUE if the test has failed + * + * Since: 2.38 + */ +gboolean +g_test_failed (void) +{ + return test_run_success != G_TEST_RUN_SUCCESS; +} + +/** + * g_test_set_nonfatal_assertions: + * + * Changes the behaviour of g_assert_cmpstr(), g_assert_cmpint(), + * g_assert_cmpuint(), g_assert_cmphex(), g_assert_cmpfloat(), + * g_assert_true(), g_assert_false(), g_assert_null(), g_assert_no_error(), + * g_assert_error(), g_test_assert_expected_messages() and the various + * g_test_trap_assert_*() macros to not abort to program, but instead + * call g_test_fail() and continue. (This also changes the behavior of + * g_test_fail() so that it will not cause the test program to abort + * after completing the failed test.) + * + * Note that the g_assert_not_reached() and g_assert() are not + * affected by this. + * + * This function can only be called after g_test_init(). + * + * Since: 2.38 + */ +void +g_test_set_nonfatal_assertions (void) +{ + if (!g_test_config_vars->test_initialized) + g_error ("g_test_set_nonfatal_assertions called without g_test_init"); + test_nonfatal_assertions = TRUE; + test_mode_fatal = FALSE; } /** @@ -1565,8 +1798,7 @@ g_test_fail (void) * * If @testpath includes the component "subprocess" anywhere in it, * the test will be skipped by default, and only run if explicitly - * required via the command-line option or - * g_test_trap_subprocess(). + * required via the `-p` command-line option or g_test_trap_subprocess(). * * Since: 2.16 */ @@ -1604,8 +1836,7 @@ g_test_add_func (const char *testpath, * * If @testpath includes the component "subprocess" anywhere in it, * the test will be skipped by default, and only run if explicitly - * required via the command-line option or - * g_test_trap_subprocess(). + * required via the `-p` command-line option or g_test_trap_subprocess(). * * Since: 2.16 */ @@ -1793,7 +2024,11 @@ static gboolean test_case_run (GTestCase *tc) { gchar *old_name = test_run_name, *old_base = g_strdup (test_uri_base); - gboolean success = TRUE; + GSList **old_free_list, *filename_free_list = NULL; + gboolean success = G_TEST_RUN_SUCCESS; + + old_free_list = test_filename_free_list; + test_filename_free_list = &filename_free_list; test_run_name = g_strconcat (old_name, "/", tc->name, NULL); if (strstr (test_run_name, "/subprocess")) @@ -1818,7 +2053,7 @@ test_case_run (GTestCase *tc) } } - if (++test_run_count <= test_skip_count) + if (++test_run_count <= test_startup_skip_count) g_test_log (G_TEST_LOG_SKIP_CASE, test_run_name, NULL, 0, NULL); else if (test_run_list) { @@ -1832,7 +2067,8 @@ test_case_run (GTestCase *tc) void *fixture; g_test_log (G_TEST_LOG_START_CASE, test_run_name, NULL, 0, NULL); test_run_forks = 0; - test_run_success = TRUE; + test_run_success = G_TEST_RUN_SUCCESS; + g_clear_pointer (&test_run_msg, g_free); g_test_log_set_fatal_handler (NULL, NULL); g_timer_start (test_run_timer); fixture = tc->fixture_size ? g_malloc0 (tc->fixture_size) : tc->test_data; @@ -1854,21 +2090,25 @@ test_case_run (GTestCase *tc) g_free (fixture); g_timer_stop (test_run_timer); success = test_run_success; - test_run_success = FALSE; - largs[0] = success ? 0 : 1; /* OK */ + test_run_success = G_TEST_RUN_FAILURE; + largs[0] = success; /* OK */ largs[1] = test_run_forks; largs[2] = g_timer_elapsed (test_run_timer, NULL); - g_test_log (G_TEST_LOG_STOP_CASE, NULL, NULL, G_N_ELEMENTS (largs), largs); + g_test_log (G_TEST_LOG_STOP_CASE, test_run_name, test_run_msg, G_N_ELEMENTS (largs), largs); + g_clear_pointer (&test_run_msg, g_free); g_timer_destroy (test_run_timer); } out: + g_slist_free_full (filename_free_list, g_free); + test_filename_free_list = old_free_list; g_free (test_run_name); test_run_name = old_name; g_free (test_uri_base); test_uri_base = old_base; - return success; + return (success == G_TEST_RUN_SUCCESS || + success == G_TEST_RUN_SKIPPED); } static int @@ -1881,6 +2121,8 @@ g_test_run_suite_internal (GTestSuite *suite, g_return_val_if_fail (suite != NULL, -1); + g_test_log (G_TEST_LOG_START_SUITE, suite->name, NULL, 0, NULL); + while (path[0] == '/') path++; l = strlen (path); @@ -1911,6 +2153,8 @@ g_test_run_suite_internal (GTestSuite *suite, g_free (test_run_name); test_run_name = old_name; + g_test_log (G_TEST_LOG_STOP_SUITE, suite->name, NULL, 0, NULL); + return n_bad; } @@ -1920,8 +2164,10 @@ g_test_run_suite_internal (GTestSuite *suite, * * Execute the tests within @suite and all nested #GTestSuites. * The test suites to be executed are filtered according to - * test path arguments (-p testpath) - * as parsed by g_test_init(). + * test path arguments (`-p testpath`) as parsed by g_test_init(). + * See the g_test_run() documentation for more information on the + * order that tests are run in. + * * g_test_run_suite() or g_test_run() may only be called once * in a program. * @@ -2033,17 +2279,35 @@ g_assertion_message (const char *domain, " ", message, NULL); g_printerr ("**\n%s\n", s); + g_test_log (G_TEST_LOG_ERROR, s, NULL, 0, NULL); + + if (test_nonfatal_assertions) + { + g_free (s); + g_test_fail (); + return; + } + /* store assertion message in global variable, so that it can be found in a * core dump */ if (__glib_assert_msg != NULL) - /* free the old one */ - free (__glib_assert_msg); + /* free the old one */ + free (__glib_assert_msg); __glib_assert_msg = (char*) malloc (strlen (s) + 1); strcpy (__glib_assert_msg, s); - g_test_log (G_TEST_LOG_ERROR, s, NULL, 0, NULL); g_free (s); - _g_log_abort (); + + if (test_in_subprocess) + { + /* If this is a test case subprocess then it probably hit this + * assertion on purpose, so just exit() rather than abort()ing, + * to avoid triggering any system crash-reporting daemon. + */ + _exit (1); + } + else + abort (); } void @@ -2053,9 +2317,22 @@ g_assertion_message_expr (const char *domain, const char *func, const char *expr) { - char *s = g_strconcat ("assertion failed: (", expr, ")", NULL); + char *s; + if (!expr) + s = g_strdup ("code should not be reached"); + else + s = g_strconcat ("assertion failed: (", expr, ")", NULL); g_assertion_message (domain, file, line, func, s); g_free (s); + + /* Normally g_assertion_message() won't return, but we need this for + * when test_nonfatal_assertions is set, since + * g_assertion_message_expr() is used for always-fatal assertions. + */ + if (test_in_subprocess) + _exit (1); + else + abort (); } void @@ -2252,7 +2529,7 @@ child_read (GIOChannel *io, GIOCondition cond, gpointer user_data) GIOStatus status; gsize nread, nwrote, total; gchar buf[4096]; - int echo_fd = -1; + FILE *echo_file = NULL; status = g_io_channel_read_chars (io, buf, sizeof (buf), &nread, NULL); if (status == G_IO_STATUS_ERROR || status == G_IO_STATUS_EOF) @@ -2273,25 +2550,22 @@ child_read (GIOChannel *io, GIOCondition cond, gpointer user_data) { g_string_append_len (data->stdout_str, buf, nread); if (data->echo_stdout) - echo_fd = STDOUT_FILENO; + echo_file = stdout; } else { g_string_append_len (data->stderr_str, buf, nread); if (data->echo_stderr) - echo_fd = STDERR_FILENO; + echo_file = stderr; } - if (echo_fd != -1) + if (echo_file) { for (total = 0; total < nread; total += nwrote) { - do - nwrote = write (echo_fd, buf + total, nread - total); - while (nwrote == -1 && errno == EINTR); - if (nwrote == -1) + nwrote = fwrite (buf + total, 1, nread - total, echo_file); + if (nwrote == 0) g_error ("write failed: %s", g_strerror (errno)); - total += nwrote; } } @@ -2381,7 +2655,7 @@ wait_for_child (GPid pid, * The forking parent process then asserts successful child program * termination and validates child program outputs. * - * |[ + * |[ * static void * test_fork_patterns (void) * { @@ -2389,7 +2663,7 @@ wait_for_child (GPid pid, * { * g_print ("some stdout text: somagic17\n"); * g_printerr ("some stderr text: semagic43\n"); - * exit (0); /* successful test run */ + * exit (0); // successful test run * } * g_test_trap_assert_passed (); * g_test_trap_assert_stdout ("*somagic17*"); @@ -2425,7 +2699,11 @@ g_test_trap_fork (guint64 usec_timeout, close (stdout_pipe[0]); close (stderr_pipe[0]); if (!(test_trap_flags & G_TEST_TRAP_INHERIT_STDIN)) - fd0 = g_open ("/dev/null", O_RDONLY, 0); + { + fd0 = g_open ("/dev/null", O_RDONLY, 0); + if (fd0 < 0) + g_error ("failed to open /dev/null for stdin redirection"); + } if (sane_dup2 (stdout_pipe[1], 1) < 0 || sane_dup2 (stderr_pipe[1], 2) < 0 || (fd0 >= 0 && sane_dup2 (fd0, 0) < 0)) g_error ("failed to dup2() in forked test program: %s", g_strerror (errno)); if (fd0 >= 3) @@ -2457,18 +2735,23 @@ g_test_trap_fork (guint64 usec_timeout, /** * g_test_trap_subprocess: - * @test_name: Test to run in a subprocess + * @test_path: (allow-none): Test to run in a subprocess * @usec_timeout: Timeout for the subprocess test in micro seconds. * @test_flags: Flags to modify subprocess behaviour. * - * Respawns the test program to run only @test_name in a subprocess. + * Respawns the test program to run only @test_path in a subprocess. * This can be used for a test case that might not return, or that - * might abort. @test_name will normally be the name of the parent - * test, followed by "/subprocess/" and then a name - * for the specific subtest (or just ending with - * "/subprocess" if the test only has one child - * test); tests with names of this form will automatically be skipped - * in the parent process. + * might abort. + * + * If @test_path is %NULL then the same test is re-run in a subprocess. + * You can use g_test_subprocess() to determine whether the test is in + * a subprocess or not. + * + * @test_path can also be the name of the parent test, followed by + * "`/subprocess/`" and then a name for the specific subtest (or just + * ending with "`/subprocess`" if the test only has one child test); + * tests with names of this form will automatically be skipped in the + * parent process. * * If @usec_timeout is non-0, the test subprocess is aborted and * considered failing if its run time exceeds it. @@ -2483,25 +2766,26 @@ g_test_trap_fork (guint64 usec_timeout, * cannot be used if @test_flags specifies that the child should * inherit the parent stdout/stderr.) * - * If your main () needs to behave differently in + * If your `main ()` needs to behave differently in * the subprocess, you can call g_test_subprocess() (after calling * g_test_init()) to see whether you are in a subprocess. * * The following example tests that calling - * my_object_new(1000000) will abort with an error + * `my_object_new(1000000)` will abort with an error * message. * - * |[ + * |[ * static void * test_create_large_object_subprocess (void) * { - * my_object_new (1000000); - * } + * if (g_test_subprocess ()) + * { + * my_object_new (1000000); + * return; + * } * - * static void - * test_create_large_object (void) - * { - * g_test_trap_subprocess ("/myobject/create_large_object/subprocess", 0, 0); + * // Reruns this same test in a subprocess + * g_test_trap_subprocess (NULL, 0, 0); * g_test_trap_assert_failed (); * g_test_trap_assert_stderr ("*ERROR*too large*"); * } @@ -2513,12 +2797,6 @@ g_test_trap_fork (guint64 usec_timeout, * * g_test_add_func ("/myobject/create_large_object", * test_create_large_object); - * /* Because of the '/subprocess' in the name, this test will - * * not be run by the g_test_run () call below. - * */ - * g_test_add_func ("/myobject/create_large_object/subprocess", - * test_create_large_object_subprocess); - * * return g_test_run (); * } * ]| @@ -2539,8 +2817,15 @@ g_test_trap_subprocess (const char *test_path, /* Sanity check that they used GTestSubprocessFlags, not GTestTrapFlags */ g_assert ((test_flags & (G_TEST_TRAP_INHERIT_STDIN | G_TEST_TRAP_SILENCE_STDOUT | G_TEST_TRAP_SILENCE_STDERR)) == 0); - if (!g_test_suite_case_exists (g_test_get_root (), test_path)) - g_error ("g_test_trap_subprocess: test does not exist: %s", test_path); + if (test_path) + { + if (!g_test_suite_case_exists (g_test_get_root (), test_path)) + g_error ("g_test_trap_subprocess: test does not exist: %s", test_path); + } + else + { + test_path = test_run_name; + } if (g_test_verbose ()) g_print ("GTest: subprocess: %s\n", test_path); @@ -2630,7 +2915,7 @@ g_test_trap_has_passed (void) gboolean g_test_trap_reached_timeout (void) { - return test_trap_last_status != G_TEST_STATUS_TIMED_OUT; + return test_trap_last_status == G_TEST_STATUS_TIMED_OUT; } void @@ -2688,6 +2973,7 @@ g_test_trap_assertions (const char *domain, g_assertion_message (domain, file, line, func, msg); g_free (msg); } + g_free (process_id); } static void @@ -2794,9 +3080,11 @@ g_test_log_extract (GTestLogBuffer *tbuffer) tbuffer->msgs = g_slist_prepend (tbuffer->msgs, g_memdup (&msg, sizeof (msg))); return TRUE; } + + g_free (msg.nums); + g_strfreev (msg.strings); } - g_free (msg.nums); - g_strfreev (msg.strings); + g_error ("corrupt log stream from test program"); return FALSE; } @@ -2884,6 +3172,184 @@ g_test_log_msg_free (GTestLogMsg *tmsg) g_free (tmsg); } +static gchar * +g_test_build_filename_va (GTestFileType file_type, + const gchar *first_path, + va_list ap) +{ + const gchar *pathv[16]; + gint num_path_segments; + + if (file_type == G_TEST_DIST) + pathv[0] = test_disted_files_dir; + else if (file_type == G_TEST_BUILT) + pathv[0] = test_built_files_dir; + else + g_assert_not_reached (); + + pathv[1] = first_path; + + for (num_path_segments = 2; num_path_segments < G_N_ELEMENTS (pathv); num_path_segments++) + { + pathv[num_path_segments] = va_arg (ap, const char *); + if (pathv[num_path_segments] == NULL) + break; + } + + g_assert_cmpint (num_path_segments, <, G_N_ELEMENTS (pathv)); + + return g_build_filenamev ((gchar **) pathv); +} + +/** + * g_test_build_filename: + * @file_type: the type of file (built vs. distributed) + * @first_path: the first segment of the pathname + * @...: %NULL-terminated additional path segments + * + * Creates the pathname to a data file that is required for a test. + * + * This function is conceptually similar to g_build_filename() except + * that the first argument has been replaced with a #GTestFileType + * argument. + * + * The data file should either have been distributed with the module + * containing the test (%G_TEST_DIST) or built as part of the build + * system of that module (%G_TEST_BUILT). + * + * In order for this function to work in srcdir != builddir situations, + * the G_TEST_SRCDIR and G_TEST_BUILDDIR environment variables need to + * have been defined. As of 2.38, this is done by the glib.mk + * included in GLib. Please ensure that your copy is up to date before + * using this function. + * + * In case neither variable is set, this function will fall back to + * using the dirname portion of argv[0], possibly removing ".libs". + * This allows for casual running of tests directly from the commandline + * in the srcdir == builddir case and should also support running of + * installed tests, assuming the data files have been installed in the + * same relative path as the test binary. + * + * Returns: the path of the file, to be freed using g_free() + * + * Since: 2.38 + **/ +/** + * GTestFileType: + * @G_TEST_DIST: a file that was included in the distribution tarball + * @G_TEST_BUILT: a file that was built on the compiling machine + * + * The type of file to return the filename for, when used with + * g_test_build_filename(). + * + * These two options correspond rather directly to the 'dist' and + * 'built' terminology that automake uses and are explicitly used to + * distinguish between the 'srcdir' and 'builddir' being separate. All + * files in your project should either be dist (in the + * `DIST_EXTRA` or `dist_schema_DATA` + * sense, in which case they will always be in the srcdir) or built (in + * the `BUILT_SOURCES` sense, in which case they will + * always be in the builddir). + * + * Note: as a general rule of automake, files that are generated only as + * part of the build-from-git process (but then are distributed with the + * tarball) always go in srcdir (even if doing a srcdir != builddir + * build from git) and are considered as distributed files. + * + * Since: 2.38 + **/ +gchar * +g_test_build_filename (GTestFileType file_type, + const gchar *first_path, + ...) +{ + gchar *result; + va_list ap; + + g_assert (g_test_initialized ()); + + va_start (ap, first_path); + result = g_test_build_filename_va (file_type, first_path, ap); + va_end (ap); + + return result; +} + +/** + * g_test_get_dir: + * @file_type: the type of file (built vs. distributed) + * + * Gets the pathname of the directory containing test files of the type + * specified by @file_type. + * + * This is approximately the same as calling g_test_build_filename("."), + * but you don't need to free the return value. + * + * Returns: the path of the directory, owned by GLib + * + * Since: 2.38 + **/ +const gchar * +g_test_get_dir (GTestFileType file_type) +{ + g_assert (g_test_initialized ()); + + if (file_type == G_TEST_DIST) + return test_disted_files_dir; + else if (file_type == G_TEST_BUILT) + return test_built_files_dir; + + g_assert_not_reached (); +} + +/** + * g_test_get_filename: + * @file_type: the type of file (built vs. distributed) + * @first_path: the first segment of the pathname + * @...: %NULL-terminated additional path segments + * + * Gets the pathname to a data file that is required for a test. + * + * This is the same as g_test_build_filename() with two differences. + * The first difference is that must only use this function from within + * a testcase function. The second difference is that you need not free + * the return value -- it will be automatically freed when the testcase + * finishes running. + * + * It is safe to use this function from a thread inside of a testcase + * but you must ensure that all such uses occur before the main testcase + * function returns (ie: it is best to ensure that all threads have been + * joined). + * + * Returns: the path, automatically freed at the end of the testcase + * + * Since: 2.38 + **/ +const gchar * +g_test_get_filename (GTestFileType file_type, + const gchar *first_path, + ...) +{ + gchar *result; + GSList *node; + va_list ap; + + g_assert (g_test_initialized ()); + if (test_filename_free_list == NULL) + g_error ("g_test_get_filename() can only be used within testcase functions"); + + va_start (ap, first_path); + result = g_test_build_filename_va (file_type, first_path, ap); + va_end (ap); + + node = g_slist_prepend (NULL, result); + do + node->next = *test_filename_free_list; + while (!g_atomic_pointer_compare_and_exchange (test_filename_free_list, node->next, node)); + + return result; +} + /* --- macros docs START --- */ /** * g_test_add: