From: Parichay Kapoor Date: Tue, 17 Sep 2019 10:47:04 +0000 (+0900) Subject: [Single] Added unittests X-Git-Tag: accepted/tizen/unified/20190925.220436~17 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=4777fa746ec55330d2bf31fe23bc2c5988bb0580;p=platform%2Fupstream%2Fnnstreamer.git [Single] Added unittests Added more unittests for single-shot API testing 1. Multiple runs in parallel with varying timeouts 2. Changing timeout/closing the handle while calling the API in a different thread 3. Closing the handle multiple times 4. Check valid result after increasing the timeout, when earlier it was timing out Signed-off-by: Parichay Kapoor --- diff --git a/api/capi/src/nnstreamer-capi-single-new.c b/api/capi/src/nnstreamer-capi-single-new.c index d029f2a..d0714ba 100644 --- a/api/capi/src/nnstreamer-capi-single-new.c +++ b/api/capi/src/nnstreamer-capi-single-new.c @@ -33,6 +33,8 @@ #include "tensor_filter_single.h" +#define ML_SINGLE_MAGIC 0xfeedfeed + /** * @brief Default time to wait for an output in appsink (3 seconds). */ @@ -44,18 +46,27 @@ (ts).tv_nsec = ((msec) % 1000) * 1000000; \ } while (0) +/** verify the magic number for ml_single obj */ +#define ML_SINGLE_MAGIC_CHECK(arg) do { \ + if (arg->magic != ML_SINGLE_MAGIC) { \ + ml_loge ("The given param, single is invalid."); \ + return ML_ERROR_INVALID_PARAMETER; \ + } \ +} while (0) + /* ML single api data structure for handle */ typedef struct { GTensorFilterSingle *filter; /**< tensor filter element */ ml_tensors_info_s in_info; /**< info about input */ ml_tensors_info_s out_info; /**< info about output */ + guint magic; /**< code to verify valid handle */ pthread_t thread; /**< thread for invoking */ pthread_mutex_t mutex; /**< mutex for synchronization */ pthread_cond_t cond; /**< condition for synchronization */ ml_tensors_data_h input; /**< input received from user */ - ml_tensors_data_h * output; /**< output to be sent back to user */ + ml_tensors_data_h *output; /**< output to be sent back to user */ struct timespec timeout; /**< timeout for invoking */ gboolean data_ready; /**< data is ready to be processed */ gboolean join; /**< thread should be joined */ @@ -66,7 +77,7 @@ typedef struct * @brief thread to execute calls to invoke */ static void * -invoke_thread (void * arg) +invoke_thread (void *arg) { ml_single *single_h; GTensorFilterSingleClass *klass; @@ -108,7 +119,8 @@ invoke_thread (void * arg) for (i = 0; i < single_h->out_info.num_tensors; i++) { /** memory will be allocated by tensor_filter_single */ out_tensors[i].data = NULL; - out_tensors[i].size = ml_tensor_info_get_size (&single_h->out_info.info[i]); + out_tensors[i].size = + ml_tensor_info_get_size (&single_h->out_info.info[i]); out_tensors[i].type = single_h->out_info.info[i].type; } pthread_mutex_unlock (&single_h->mutex); @@ -139,7 +151,7 @@ invoke_thread (void * arg) } /** loop over to wait for the next element */ -wait_for_next: + wait_for_next: single_h->status = status; single_h->data_ready = FALSE; pthread_cond_broadcast (&single_h->cond); @@ -147,7 +159,6 @@ wait_for_next: exit: single_h->data_ready = FALSE; - pthread_cond_broadcast (&single_h->cond); pthread_mutex_unlock (&single_h->mutex); return NULL; } @@ -156,8 +167,8 @@ exit: * @brief Set the info for input/output tensors */ static int -ml_single_set_inout_tensors_info (GObject *object, - const gchar *prefix, ml_tensors_info_s *tensors_info) +ml_single_set_inout_tensors_info (GObject * object, + const gchar * prefix, ml_tensors_info_s * tensors_info) { int status = ML_ERROR_NONE; GstTensorsInfo info; @@ -332,6 +343,8 @@ ml_single_open (ml_single_h * single, const char *model, /** Create ml_single object */ single_h = g_new0 (ml_single, 1); g_assert (single_h); + single_h->magic = ML_SINGLE_MAGIC; + single_h->filter = g_object_new (G_TYPE_TENSOR_FILTER_SINGLE, NULL); MSEC_TO_TIMESPEC (single_h->timeout, SINGLE_DEFAULT_TIMEOUT); if (single_h->filter == NULL) { @@ -385,7 +398,7 @@ ml_single_open (ml_single_h * single, const char *model, ml_tensors_info_initialize (&single_h->in_info); ml_tensors_info_initialize (&single_h->out_info); - /* 5. Start the nnfw to egt inout configurations if needed */ + /* 5. Start the nnfw to get inout configurations if needed */ klass = g_type_class_peek (G_TYPE_TENSOR_FILTER_SINGLE); if (!klass) { status = ML_ERROR_INVALID_PARAMETER; @@ -399,7 +412,7 @@ ml_single_open (ml_single_h * single, const char *model, /* 6. Set in/out configs and metadata */ if (in_tensors_info) { /** set the tensors info here */ - if (!klass->input_configured(single_h->filter)) { + if (!klass->input_configured (single_h->filter)) { status = ml_single_set_inout_tensors_info (filter_obj, "input", in_tensors_info); if (status != ML_ERROR_NONE) @@ -438,7 +451,7 @@ ml_single_open (ml_single_h * single, const char *model, if (out_tensors_info) { /** set the tensors info here */ - if (!klass->output_configured(single_h->filter)) { + if (!klass->output_configured (single_h->filter)) { status = ml_single_set_inout_tensors_info (filter_obj, "output", out_tensors_info); if (status != ML_ERROR_NONE) @@ -480,7 +493,8 @@ ml_single_open (ml_single_h * single, const char *model, single_h->data_ready = FALSE; single_h->join = FALSE; - if (pthread_create (&single_h->thread, NULL, invoke_thread, (void *)single_h) < 0) { + if (pthread_create (&single_h->thread, NULL, invoke_thread, + (void *) single_h) < 0) { ml_loge ("Failed to create the invoke thread."); status = ML_ERROR_UNKNOWN; goto error; @@ -510,13 +524,16 @@ ml_single_close (ml_single_h single) } single_h = (ml_single *) single; + ML_SINGLE_MAGIC_CHECK (single_h); + single_h->magic = 0; pthread_mutex_lock (&single_h->mutex); single_h->join = TRUE; pthread_cond_broadcast (&single_h->cond); pthread_mutex_unlock (&single_h->mutex); pthread_join (single_h->thread, NULL); + /** locking ensures correctness with parallel calls on close */ if (single_h->filter) { GTensorFilterSingleClass *klass; klass = g_type_class_peek (G_TYPE_TENSOR_FILTER_SINGLE); @@ -557,6 +574,7 @@ ml_single_invoke (ml_single_h single, in_data = (ml_tensors_data_s *) input; *output = NULL; + ML_SINGLE_MAGIC_CHECK (single_h); if (!single_h->filter || single_h->join) { ml_loge ("The given param is invalid, model is missing."); return ML_ERROR_INVALID_PARAMETER; @@ -602,8 +620,7 @@ ml_single_invoke (ml_single_h single, status = ML_ERROR_TIMED_OUT; /** This is set to notify invoke_thread to not process if timedout */ single_h->data_ready = FALSE; - } - else if (err == EPERM) + } else if (err == EPERM) status = ML_ERROR_PERMISSION_DENIED; else status = ML_ERROR_INVALID_PARAMETER; @@ -632,6 +649,7 @@ ml_single_get_input_info (ml_single_h single, ml_tensors_info_h * info) return ML_ERROR_INVALID_PARAMETER; single_h = (ml_single *) single; + ML_SINGLE_MAGIC_CHECK (single_h); /* allocate handle for tensors info */ ml_tensors_info_create (info); @@ -686,6 +704,7 @@ ml_single_get_output_info (ml_single_h single, ml_tensors_info_h * info) return ML_ERROR_INVALID_PARAMETER; single_h = (ml_single *) single; + ML_SINGLE_MAGIC_CHECK (single_h); /* allocate handle for tensors info */ ml_tensors_info_create (info); @@ -735,7 +754,11 @@ ml_single_set_timeout (ml_single_h single, unsigned int timeout) return ML_ERROR_INVALID_PARAMETER; single_h = (ml_single *) single; + ML_SINGLE_MAGIC_CHECK (single_h); + pthread_mutex_lock (&single_h->mutex); MSEC_TO_TIMESPEC (single_h->timeout, timeout); + pthread_mutex_unlock (&single_h->mutex); + return ML_ERROR_NONE; } diff --git a/api/capi/src/nnstreamer-capi-single.c b/api/capi/src/nnstreamer-capi-single.c index f01bc92..1ccec58 100644 --- a/api/capi/src/nnstreamer-capi-single.c +++ b/api/capi/src/nnstreamer-capi-single.c @@ -35,6 +35,16 @@ #define ML_SINGLE_SUPPORT_TIMEOUT #endif +#define ML_SINGLE_MAGIC 0xfeedfeed + +/** verify the magic number for ml_single obj */ +#define ML_SINGLE_MAGIC_CHECK(arg) do { \ + if (arg->magic != ML_SINGLE_MAGIC) { \ + ml_loge ("The given param, single is invalid."); \ + return ML_ERROR_INVALID_PARAMETER; \ + } \ +} while (0) + /** * @brief Default time to wait for an output in appsink (3 seconds). */ @@ -51,9 +61,9 @@ typedef struct ml_tensors_info_s in_info; ml_tensors_info_s out_info; + guint magic; /**< code to verify valid handle */ GMutex lock; /**< Lock for internal values */ gboolean clear_previous_buffer; /**< Previous buffer was timed out, need to clear old buffer */ - gboolean is_valid; /**< True if the single handle is valid */ guint timeout; /**< Timeout in milliseconds */ } ml_single; @@ -331,13 +341,13 @@ ml_single_open (ml_single_h * single, const char *model, single_h = g_new0 (ml_single, 1); g_assert (single_h); + single_h->magic = ML_SINGLE_MAGIC; single_h->pipe = pipe; single_h->src = appsrc; single_h->sink = appsink; single_h->filter = filter; single_h->timeout = SINGLE_DEFAULT_TIMEOUT; single_h->clear_previous_buffer = FALSE; - single_h->is_valid = FALSE; ml_tensors_info_initialize (&single_h->in_info); ml_tensors_info_initialize (&single_h->out_info); g_mutex_init (&single_h->lock); @@ -400,10 +410,6 @@ ml_single_open (ml_single_h * single, const char *model, goto error; } - g_mutex_lock (&single_h->lock); - single_h->is_valid = TRUE; - g_mutex_unlock (&single_h->lock); - error: if (status != ML_ERROR_NONE) { ml_single_close (single_h); @@ -431,9 +437,10 @@ ml_single_close (ml_single_h single) } single_h = (ml_single *) single; - g_mutex_lock (&single_h->lock); + ML_SINGLE_MAGIC_CHECK (single_h); - single_h->is_valid = FALSE; + single_h->magic = 0; + g_mutex_lock (&single_h->lock); if (single_h->src) { gst_object_unref (single_h->src); @@ -486,17 +493,12 @@ ml_single_invoke (ml_single_h single, } single_h = (ml_single *) single; + ML_SINGLE_MAGIC_CHECK (single_h); g_mutex_lock (&single_h->lock); in_data = (ml_tensors_data_s *) input; *output = NULL; - if (!single_h->is_valid) { - ml_loge ("The single handle is invalid."); - status = ML_ERROR_INVALID_PARAMETER; - goto done; - } - /* Validate input data */ if (in_data->num_tensors != single_h->in_info.num_tensors) { ml_loge ("The given param input is invalid, different number of memory blocks."); @@ -558,8 +560,12 @@ ml_single_invoke (ml_single_h single, if (!sample) { ml_loge ("Failed to get the result from sink element."); +#ifdef ML_SINGLE_SUPPORT_TIMEOUT single_h->clear_previous_buffer = TRUE; status = ML_ERROR_TIMED_OUT; +#else + status = ML_ERROR_UNKNOWN; +#endif goto done; } @@ -607,14 +613,10 @@ ml_single_get_tensors_info (ml_single_h single, gboolean is_input, return ML_ERROR_INVALID_PARAMETER; single_h = (ml_single *) single; + ML_SINGLE_MAGIC_CHECK (single_h); g_mutex_lock (&single_h->lock); - if (!single_h->is_valid) { - ml_loge ("The single handle is invalid."); - status = ML_ERROR_INVALID_PARAMETER; - } else { - ml_single_get_tensors_info_from_filter (single_h->filter, is_input, info); - } + ml_single_get_tensors_info_from_filter (single_h->filter, is_input, info); g_mutex_unlock (&single_h->lock); return status; @@ -654,14 +656,10 @@ ml_single_set_timeout (ml_single_h single, unsigned int timeout) return ML_ERROR_INVALID_PARAMETER; single_h = (ml_single *) single; + ML_SINGLE_MAGIC_CHECK (single_h); g_mutex_lock (&single_h->lock); - if (single_h->is_valid) { - single_h->timeout = (guint) timeout; - } else { - ml_loge ("The single handle is invalid."); - status = ML_ERROR_INVALID_PARAMETER; - } + single_h->timeout = (guint) timeout; g_mutex_unlock (&single_h->lock); return status; diff --git a/tests/tizen_capi/unittest_tizen_capi.cpp b/tests/tizen_capi/unittest_tizen_capi.cpp index e48cf73..7513231 100644 --- a/tests/tizen_capi/unittest_tizen_capi.cpp +++ b/tests/tizen_capi/unittest_tizen_capi.cpp @@ -1876,6 +1876,91 @@ TEST (nnstreamer_capi_singleshot, failure_01_n) } /** + * @brief Structure containing values to run single shot + */ +typedef struct { + gchar *test_model; + guint num_runs; + guint timeout; + guint min_time_to_run; + gboolean expect; + ml_single_h *single; +} single_shot_thread_data; + +/** + * @brief Open and run on single shot API with provided data + */ +void * single_shot_loop_test (void * arg) +{ + guint i; + int status = ML_ERROR_NONE; + ml_single_h single; + single_shot_thread_data *ss_data = (single_shot_thread_data *) arg; + + status = ml_single_open (&single, ss_data->test_model, NULL, NULL, + ML_NNFW_TYPE_TENSORFLOW_LITE, ML_NNFW_HW_ANY); + if (ss_data->expect) + EXPECT_EQ (status, ML_ERROR_NONE); + ss_data->single = &single; + + /* set timeout */ + if (ss_data->timeout != 0) { + status = ml_single_set_timeout (single, ss_data->timeout); + if (ss_data->expect) + EXPECT_NE (status, ML_ERROR_INVALID_PARAMETER); + if (status == ML_ERROR_NOT_SUPPORTED) + ss_data->timeout = 0; + } + + ml_tensors_info_h in_info; + ml_tensors_data_h input, output; + ml_tensor_dimension in_dim; + + ml_tensors_info_create (&in_info); + + in_dim[0] = 3; + in_dim[1] = 224; + in_dim[2] = 224; + in_dim[3] = 1; + ml_tensors_info_set_count (in_info, 1); + ml_tensors_info_set_tensor_type (in_info, 0, ML_TENSOR_TYPE_UINT8); + ml_tensors_info_set_tensor_dimension (in_info, 0, in_dim); + + input = output = NULL; + + /* generate dummy data */ + status = ml_tensors_data_create (in_info, &input); + if (ss_data->expect) { + EXPECT_EQ (status, ML_ERROR_NONE); + EXPECT_TRUE (input != NULL); + } + + for (i=0; inum_runs; i++) { + status = ml_single_invoke (single, input, &output); + if (ss_data->expect) { + if (ss_data->timeout != 0 && ss_data->timeout < ss_data->min_time_to_run) { + EXPECT_EQ (status, ML_ERROR_TIMED_OUT); + EXPECT_TRUE (output == NULL); + } else { + EXPECT_EQ (status, ML_ERROR_NONE); + EXPECT_TRUE (output != NULL); + } + } + output = NULL; + } + + ml_tensors_data_destroy (output); + ml_tensors_data_destroy (input); + ml_tensors_info_destroy (in_info); + + status = ml_single_close (single); + if (ss_data->expect) + EXPECT_EQ (status, ML_ERROR_NONE); + + return NULL; +} + +/** * @brief Test NNStreamer single shot (tensorflow-lite) * @detail Testcase with timeout. */ @@ -1933,6 +2018,13 @@ TEST (nnstreamer_capi_singleshot, invoke_timeout) EXPECT_EQ (status, ML_ERROR_TIMED_OUT); EXPECT_TRUE (output == NULL); + /* set timeout 5 s */ + status = ml_single_set_timeout (single, 5000); + + status = ml_single_invoke (single, input, &output); + EXPECT_EQ (status, ML_ERROR_NONE); + EXPECT_TRUE (output != NULL); + ml_tensors_data_destroy (output); ml_tensors_data_destroy (input); ml_tensors_info_destroy (in_info); @@ -1943,6 +2035,109 @@ TEST (nnstreamer_capi_singleshot, invoke_timeout) g_free (test_model); } + +/** + * @brief Test NNStreamer single shot (tensorflow-lite) + * @detail Testcase with multiple runs in parallel. Some of the + * running instances will timeout, however others will not. + */ +TEST (nnstreamer_capi_singleshot, parallel_runs) +{ + const gchar *root_path = g_getenv ("NNSTREAMER_BUILD_ROOT_PATH"); + gchar *test_model; + const gint num_threads = 3; + const gint num_cases = 3; + pthread_t thread[num_threads * num_cases]; + single_shot_thread_data ss_data[num_cases]; + guint i, j; + + /* supposed to run test in build directory */ + if (root_path == NULL) + root_path = ".."; + + test_model = g_build_filename (root_path, "tests", "test_models", "models", + "mobilenet_v1_1.0_224_quant.tflite", NULL); + ASSERT_TRUE (g_file_test (test_model, G_FILE_TEST_EXISTS)); + + for (i=0; i