From c89a2603412cca3d5f12f55d7bedccdbb7e035a5 Mon Sep 17 00:00:00 2001 From: Jaeyun Date: Fri, 2 Aug 2019 19:57:25 +0900 Subject: [PATCH] [Api/Single] set timeout Add new function to set the timeout to wait for the output. The function try_pull is supported from gst 1.10, check gst version and return error code if is not supported. Signed-off-by: Jaeyun Jung --- api/capi/include/nnstreamer-single.h | 14 ++++++- api/capi/src/nnstreamer-capi-single.c | 40 +++++++++++++++++++- tests/tizen_capi/unittest_tizen_capi.cpp | 64 ++++++++++++++++++++++++++++++++ 3 files changed, 115 insertions(+), 3 deletions(-) diff --git a/api/capi/include/nnstreamer-single.h b/api/capi/include/nnstreamer-single.h index 8991382..3c3932d 100644 --- a/api/capi/include/nnstreamer-single.h +++ b/api/capi/include/nnstreamer-single.h @@ -91,7 +91,7 @@ int ml_single_close (ml_single_h single); * @brief Invokes the model with the given input data. * @details Even if the model has flexible input data dimensions, * input data frames of an instance of a model should share the same dimension. - * Note that this has a timeout of 3 seconds. + * Note that this has a default timeout of 3 seconds. If an application wants to change the time to wait for an output, set the timeout using ml_single_set_timeout(). * @since_tizen 5.5 * @param[in] single The model handle to be inferred. * @param[in] input The input data to be inferred. @@ -139,6 +139,18 @@ int ml_single_get_input_info (ml_single_h single, ml_tensors_info_h *info); int ml_single_get_output_info (ml_single_h single, ml_tensors_info_h *info); /** + * @brief Sets the maximum amount of time to wait for an output, in milliseconds. + * @since_tizen 5.5 + * @param[in] single The model handle. + * @param[in] timeout The time to wait for an output. + * @return @c 0 on success. Otherwise a negative error value. + * @retval #ML_ERROR_NONE Successful + * @retval #ML_ERROR_NOT_SUPPORTED Not supported. + * @retval #ML_ERROR_INVALID_PARAMETER Fail. The parameter is invalid. + */ +int ml_single_set_timeout (ml_single_h single, unsigned int timeout); + +/** * @} */ #ifdef __cplusplus diff --git a/api/capi/src/nnstreamer-capi-single.c b/api/capi/src/nnstreamer-capi-single.c index eabb799..8c0656c 100644 --- a/api/capi/src/nnstreamer-capi-single.c +++ b/api/capi/src/nnstreamer-capi-single.c @@ -29,6 +29,16 @@ #include "nnstreamer-capi-private.h" #include "nnstreamer_plugin_api.h" +#undef ML_SINGLE_SUPPORT_TIMEOUT +#if (GST_VERSION_MAJOR > 1 || (GST_VERSION_MAJOR == 1 && GST_VERSION_MINOR >= 10)) +#define ML_SINGLE_SUPPORT_TIMEOUT +#endif + +/** + * @brief Default time to wait for an output in appsink (3 seconds). + */ +#define SINGLE_DEFAULT_TIMEOUT 3000 + typedef struct { ml_pipeline_h pipe; @@ -39,6 +49,8 @@ typedef struct ml_tensors_info_s in_info; ml_tensors_info_s out_info; + + guint timeout; /* milliseconds */ } ml_single; /** @@ -254,6 +266,7 @@ ml_single_open (ml_single_h * single, const char *model, single_h->src = appsrc; single_h->sink = appsink; single_h->filter = filter; + single_h->timeout = SINGLE_DEFAULT_TIMEOUT; ml_tensors_info_initialize (&single_h->in_info); ml_tensors_info_initialize (&single_h->out_info); @@ -422,10 +435,10 @@ ml_single_invoke (ml_single_h single, } /* Try to get the result */ -#if (GST_VERSION_MAJOR > 1 || (GST_VERSION_MAJOR == 1 && GST_VERSION_MINOR >= 10)) +#ifdef ML_SINGLE_SUPPORT_TIMEOUT /* gst_app_sink_try_pull_sample() is available at gstreamer-1.10 */ sample = - gst_app_sink_try_pull_sample (GST_APP_SINK (single_h->sink), GST_SECOND * 3); + gst_app_sink_try_pull_sample (GST_APP_SINK (single_h->sink), GST_MSECOND * single_h->timeout); #else sample = gst_app_sink_pull_sample (GST_APP_SINK (single_h->sink)); #endif @@ -566,3 +579,26 @@ ml_single_get_output_info (ml_single_h single, ml_tensors_info_h * info) gst_tensors_info_free (&gst_info); return ML_ERROR_NONE; } + +/** + * @brief Sets the maximum amount of time to wait for an output, in milliseconds. + */ +int +ml_single_set_timeout (ml_single_h single, unsigned int timeout) +{ +#ifdef ML_SINGLE_SUPPORT_TIMEOUT + ml_single *single_h; + + check_feature_state (); + + if (!single || timeout == 0) + return ML_ERROR_INVALID_PARAMETER; + + single_h = (ml_single *) single; + + single_h->timeout = (guint) timeout; + return ML_ERROR_NONE; +#else + return ML_ERROR_NOT_SUPPORTED; +#endif +} diff --git a/tests/tizen_capi/unittest_tizen_capi.cpp b/tests/tizen_capi/unittest_tizen_capi.cpp index e724ba2..4b89155 100644 --- a/tests/tizen_capi/unittest_tizen_capi.cpp +++ b/tests/tizen_capi/unittest_tizen_capi.cpp @@ -1713,6 +1713,70 @@ TEST (nnstreamer_capi_singleshot, failure_01_n) ml_tensors_info_destroy (in_info); ml_tensors_info_destroy (out_info); } + +/** + * @brief Test NNStreamer single shot (tensorflow-lite) + * @detail Testcase with timeout. + */ +TEST (nnstreamer_capi_singleshot, invoke_timeout) +{ + ml_single_h single; + int status; + + const gchar *root_path = g_getenv ("NNSTREAMER_BUILD_ROOT_PATH"); + gchar *test_model; + + /* supposed to run test in build directory */ + if (root_path == NULL) + root_path = ".."; + + test_model = g_build_filename (root_path, "tests", "test_models", "models", + "mobilenet_v1_1.0_224_quant.tflite", NULL); + ASSERT_TRUE (g_file_test (test_model, G_FILE_TEST_EXISTS)); + + status = ml_single_open (&single, test_model, NULL, NULL, + ML_NNFW_TYPE_TENSORFLOW_LITE, ML_NNFW_HW_ANY); + EXPECT_EQ (status, ML_ERROR_NONE); + + /* set timeout 5 ms */ + status = ml_single_set_timeout (single, 5); + /* test timeout if supported (gstreamer ver >= 1.10) */ + if (status == ML_ERROR_NONE) { + ml_tensors_info_h in_info; + ml_tensors_data_h input, output; + ml_tensor_dimension in_dim; + + ml_tensors_info_create (&in_info); + + in_dim[0] = 3; + in_dim[1] = 224; + in_dim[2] = 224; + in_dim[3] = 1; + ml_tensors_info_set_count (in_info, 1); + ml_tensors_info_set_tensor_type (in_info, 0, ML_TENSOR_TYPE_UINT8); + ml_tensors_info_set_tensor_dimension (in_info, 0, in_dim); + + input = output = NULL; + + /* generate dummy data */ + status = ml_tensors_data_create (in_info, &input); + EXPECT_EQ (status, ML_ERROR_NONE); + EXPECT_TRUE (input != NULL); + + status = ml_single_invoke (single, input, &output); + EXPECT_EQ (status, ML_ERROR_TIMED_OUT); + EXPECT_TRUE (output == NULL); + + ml_tensors_data_destroy (output); + ml_tensors_data_destroy (input); + ml_tensors_info_destroy (in_info); + } + + status = ml_single_close (single); + EXPECT_EQ (status, ML_ERROR_NONE); + + g_free (test_model); +} #endif /* ENABLE_TENSORFLOW_LITE */ /** -- 2.7.4