* @ingroup itc
* @{
*/
-
+bool g_bCustomCallbackHit;
//& set: nnstreamer
/**
return 0;
}
+/**
+ * @function MlPipelineSinkCallbackTensor
+ * @description A tensor-sink callback for sink handle in a pipeline
+ */
+static void MlPipelineSinkCallbackTensor(const ml_tensors_data_h data, const ml_tensors_info_h info, void *user_data)
+{
+ FPRINTF("[%s:%d] MlPipelineSinkCallbackTensor callback hit\\n", __FILE__, __LINE__);
+ guint *count = (guint *) user_data;
+
+ *count = *count + 1;
+ g_bCallbackHit = true;
+}
+
+/**
+ * @function TestIfCustomCallback
+ * @description Invoke callback for tensor_if custom condition.
+ */
+static int TestIfCustomCallback (const ml_tensors_data_h data, const ml_tensors_info_h info, int *result, void *user_data)
+{
+ FPRINTF("[%s:%d] TestIfCustomCallback callback hit\\n", __FILE__, __LINE__);
+ void *data_ptr;
+ guint sum = 0, i;
+ size_t data_size;
+
+ int nRet = ml_tensors_data_get_tensor_data (data, 0, &data_ptr, &data_size);
+ PRINT_RESULT_NORETURN(ML_ERROR_NONE, nRet, "ml_tensors_data_get_tensor_data", NnStreamerGetError(nRet));
+
+ for (i = 0; i < data_size; i++)
+ sum += ((guint8 *) data_ptr)[i];
+
+ /* Sum value 30 means that the sixth buffer has arrived.*/
+ if (sum >= 30)
+ *result = 0;
+ else
+ *result = 1;
+ g_bCustomCallbackHit = true;
+ return 0;
+}
+
/**
* @function ITs_nnstreamer_capi_startup
* @description Called before each test, set the service boolean true
return 0;
}
+/**
+* @testcase ITc_nnstreamer_ml_pipeline_tensor_if_custom_register_unregister_p
+* @since_tizen 6.5
+* @author SRID(nibha.sharma)
+* @reviewer SRID(j.abhishek)
+* @type auto
+* @description Registers a tensor custom condition and unregisters it.
+* @scenario Registers a tensor custom condition and unregisters it.
+* @apicovered ml_pipeline_tensor_if_custom_register, ml_pipeline_tensor_if_custom_unregister
+* @passcase When ml_pipeline_tensor_if_custom_register, ml_pipeline_tensor_if_custom_unregister is successful.
+* @failcase If target API fails or any precondition API fails
+* @precondition None
+* @postcondition None
+*/
+//& purpose: Registers a tensor custom condition and unregisters it.
+//& type: auto
+int ITc_nnstreamer_ml_pipeline_tensor_if_custom_register_unregister_p(void)
+{
+ START_TEST;
+ int nRet = -1,nTimeoutId = 0;
+ const gchar *psz_tmpdir = g_get_tmp_dir ();
+ const gchar *psz_dirname = "nns-tizen-XXXXXX";
+ gchar *psz_fullpath = g_build_path ("/", psz_tmpdir, psz_dirname, NULL);
+ gchar *pszdir = g_mkdtemp ((gchar *)psz_fullpath);
+ gchar *psz_file = g_build_path ("/", pszdir, "output", NULL);
+ ml_pipeline_h hPipelinehandle;
+ ml_pipeline_src_h hPipeSrcHandle;
+ ml_pipeline_sink_h hPipeSinkHandle;
+ ml_pipeline_if_h hCustom;
+ ml_tensors_info_h hTensorinfo;
+ ml_tensors_data_h hTensorData;
+ unsigned int nGetCount = 0;
+ ml_tensor_type_e eTensorType = ML_TENSOR_TYPE_UNKNOWN;
+ uint8_t *nUIntArray[LIMIT];
+ uint8_t *nContent = NULL;
+ guint i;
+ gsize nLength;
+
+ gchar *pszNnpipeline = g_strdup_printf (PIPELINE_STREAM, psz_file);
+ guint *unsinkCount = (guint *)g_malloc0 (sizeof (guint));
+ CHECK_HANDLE(unsinkCount, "unsinkCount");
+ *unsinkCount = 0;
+
+ g_bCustomCallbackHit = false;
+ nRet = ml_pipeline_tensor_if_custom_register ("tif_custom_cb_name", TestIfCustomCallback, NULL, &hCustom);
+ PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_pipeline_tensor_if_custom_register", NnStreamerGetError(nRet), FreeCustomTensorMemory(&pszNnpipeline,&unsinkCount,&psz_fullpath,&psz_file));
+ CHECK_HANDLE_CLEANUP(hPipeSinkHandle, "ml_pipeline_tensor_if_custom_register", FreeCustomTensorMemory(&pszNnpipeline,&unsinkCount,&psz_fullpath,&psz_file));
+
+ nRet = ml_pipeline_construct (pszNnpipeline, NULL, NULL, &hPipelinehandle);
+ PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_pipeline_construct", NnStreamerGetError(nRet), FreeCustomTensorMemory(&pszNnpipeline,&unsinkCount,&psz_fullpath,&psz_file));
+ CHECK_HANDLE_CLEANUP(hPipeSinkHandle, "ml_pipeline_construct", FreeCustomTensorMemory(&pszNnpipeline,&unsinkCount,&psz_fullpath,&psz_file));
+
+ g_bCallbackHit = false;
+ nRet = ml_pipeline_sink_register (hPipelinehandle, "sink_false", MlPipelineSinkCallbackTensor, unsinkCount, &hPipeSinkHandle);
+ PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_pipeline_sink_register", NnStreamerGetError(nRet), ml_pipeline_destroy (hPipelinehandle);FreeCustomTensorMemory(&pszNnpipeline,&unsinkCount,&psz_fullpath,&psz_file));
+ CHECK_HANDLE_CLEANUP(hPipeSinkHandle, "ml_pipeline_sink_register", ml_pipeline_destroy (hPipelinehandle);FreeCustomTensorMemory(&pszNnpipeline,&unsinkCount,&psz_fullpath,&psz_file));
+
+ nRet = ml_pipeline_src_get_handle (hPipelinehandle, "appsrc", &hPipeSrcHandle);
+ PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_pipeline_src_get_handle", NnStreamerGetError(nRet),ml_pipeline_sink_unregister (hPipeSinkHandle); ml_pipeline_destroy (hPipelinehandle);FreeCustomTensorMemory(&pszNnpipeline,&unsinkCount,&psz_fullpath,&psz_file));
+ CHECK_HANDLE_CLEANUP(hPipeSrcHandle, "ml_pipeline_src_get_handle", ml_pipeline_destroy (hPipelinehandle);FreeCustomTensorMemory(&pszNnpipeline,&unsinkCount,&psz_fullpath,&psz_file));
+
+ nRet = ml_pipeline_start (hPipelinehandle);
+ PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_pipeline_start", NnStreamerGetError(nRet),ml_pipeline_src_release_handle (hPipeSrcHandle);ml_pipeline_sink_unregister (hPipeSinkHandle); ml_pipeline_destroy (hPipelinehandle);FreeCustomTensorMemory(&pszNnpipeline,&unsinkCount,&psz_fullpath,&psz_file));
+
+ RUN_POLLING_LOOP;
+ for (i = 0; i < LIMIT; i++) {
+ nUIntArray[i] = (uint8_t *)g_malloc (4);
+ CHECK_HANDLE(nUIntArray[i], "nUIntArray[i]");
+ nUIntArray[i][0] = i + 4;
+ nUIntArray[i][1] = i + 1;
+ nUIntArray[i][2] = i + 3;
+ nUIntArray[i][3] = i + 2;
+ }
+
+ nRet = ml_pipeline_src_get_tensors_info (hPipeSrcHandle, &hTensorinfo);
+ PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_pipeline_src_get_tensors_info", NnStreamerGetError(nRet),ml_pipeline_stop (hPipelinehandle);ml_pipeline_src_release_handle (hPipeSrcHandle);ml_pipeline_sink_unregister (hPipeSinkHandle); ml_pipeline_destroy (hPipelinehandle);FreeCustomTensorMemory(&pszNnpipeline,&unsinkCount,&psz_fullpath,&psz_file));
+ CHECK_HANDLE_CLEANUP(hTensorinfo, "ml_pipeline_src_get_tensors_info",ml_pipeline_stop (hPipelinehandle); ml_pipeline_src_release_handle (hPipeSrcHandle);ml_pipeline_sink_unregister (hPipeSinkHandle); ml_pipeline_destroy (hPipelinehandle);FreeCustomTensorMemory(&pszNnpipeline,&unsinkCount,&psz_fullpath,&psz_file));
+
+ nRet = ml_tensors_info_get_count (hTensorinfo, &nGetCount);
+ PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_tensors_info_get_count", NnStreamerGetError(nRet),ml_pipeline_stop (hPipelinehandle);ml_pipeline_src_release_handle (hPipeSrcHandle);ml_pipeline_sink_unregister (hPipeSinkHandle); ml_pipeline_destroy (hPipelinehandle); ml_tensors_info_destroy (hTensorinfo);FreeCustomTensorMemory(&pszNnpipeline,&unsinkCount,&psz_fullpath,&psz_file));
+ if (nGetCount != 1U)
+ {
+ FPRINTF("[%s:%d] ml_tensors_info_get_count value mismatch for nGetCount,nGetCount returned = (%d)\\n", __FILE__, __LINE__,nGetCount);
+ TensorInfoPipelineStop(hPipelinehandle,hPipeSrcHandle,hPipeSinkHandle,hTensorinfo);
+ FreeCustomTensorMemory(&pszNnpipeline,&unsinkCount,&psz_fullpath,&psz_file);
+ return 1;
+ }
+
+ nRet = ml_tensors_info_get_tensor_type (hTensorinfo, 0, &eTensorType);
+ PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_tensors_info_get_tensor_type", NnStreamerGetError(nRet),ml_pipeline_stop (hPipelinehandle);ml_pipeline_src_release_handle (hPipeSrcHandle);ml_pipeline_sink_unregister (hPipeSinkHandle); ml_pipeline_destroy (hPipelinehandle); ml_tensors_info_destroy (hTensorinfo);FreeCustomTensorMemory(&pszNnpipeline,&unsinkCount,&psz_fullpath,&psz_file));
+ if (eTensorType != ML_TENSOR_TYPE_UINT8)
+ {
+ FPRINTF("[%s:%d] ml_tensors_info_get_count value mismatch for nGetCount,nGetCount returned = (%d)\\n", __FILE__, __LINE__,nGetCount);
+ TensorInfoPipelineStop(hPipelinehandle,hPipeSrcHandle,hPipeSinkHandle,hTensorinfo);
+ FreeCustomTensorMemory(&pszNnpipeline,&unsinkCount,&psz_fullpath,&psz_file);
+ return 1;
+ }
+
+ nRet = ml_tensors_data_create (hTensorinfo, &hTensorData);
+ PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_tensors_data_create", NnStreamerGetError(nRet),ml_pipeline_stop (hPipelinehandle);ml_pipeline_src_release_handle (hPipeSrcHandle);ml_pipeline_sink_unregister (hPipeSinkHandle); ml_pipeline_destroy (hPipelinehandle); ml_tensors_info_destroy (hTensorinfo);FreeCustomTensorMemory(&pszNnpipeline,&unsinkCount,&psz_fullpath,&psz_file));
+
+ /* Set tensor data and push buffers to source pad */
+ for (i = 0; i < LIMIT; i++) {
+ nRet = ml_tensors_data_set_tensor_data (hTensorData, 0, nUIntArray[i], 4);
+ PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_tensors_data_set_tensor_data", NnStreamerGetError(nRet),ml_pipeline_stop (hPipelinehandle);ml_pipeline_src_release_handle (hPipeSrcHandle);ml_pipeline_sink_unregister (hPipeSinkHandle); ml_pipeline_destroy (hPipelinehandle); ml_tensors_info_destroy (hTensorinfo);ml_tensors_data_destroy (hTensorData);FreeCustomTensorMemory(&pszNnpipeline,&unsinkCount,&psz_fullpath,&psz_file));
+
+ nRet = ml_pipeline_src_input_data (hPipeSrcHandle, hTensorData, ML_PIPELINE_BUF_POLICY_DO_NOT_FREE);
+ PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_pipeline_src_input_data", NnStreamerGetError(nRet),ml_pipeline_stop (hPipelinehandle);ml_pipeline_src_release_handle (hPipeSrcHandle);ml_pipeline_sink_unregister (hPipeSinkHandle); ml_pipeline_destroy (hPipelinehandle); ml_tensors_info_destroy (hTensorinfo);ml_tensors_data_destroy (hTensorData);FreeCustomTensorMemory(&pszNnpipeline,&unsinkCount,&psz_fullpath,&psz_file));
+
+ g_usleep (50000); /* 50ms. Wait a bit. */
+ }
+ PRINT_RESULT_CLEANUP(true, g_bCallbackHit, "ml_pipeline_custom_easy_filter_register", NnStreamerGetError(nRet), ml_pipeline_src_release_handle (hPipeSrcHandle);ml_pipeline_sink_unregister (hPipeSinkHandle); ml_pipeline_destroy (hPipelinehandle); ml_tensors_info_destroy (hTensorinfo);ml_tensors_data_destroy (hTensorData);FreeCustomTensorMemory(&pszNnpipeline,&unsinkCount,&psz_fullpath,&psz_file));
+ PRINT_RESULT_CLEANUP(true, g_bCustomCallbackHit, "ml_pipeline_tensor_if_custom_register", NnStreamerGetError(nRet), ml_pipeline_src_release_handle (hPipeSrcHandle);ml_pipeline_sink_unregister (hPipeSinkHandle); ml_pipeline_destroy (hPipelinehandle); ml_tensors_info_destroy (hTensorinfo);ml_tensors_data_destroy (hTensorData);FreeCustomTensorMemory(&pszNnpipeline,&unsinkCount,&psz_fullpath,&psz_file));
+
+ nRet = ml_pipeline_stop (hPipelinehandle);
+ PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_pipeline_stop", NnStreamerGetError(nRet),ml_pipeline_src_release_handle (hPipeSrcHandle);ml_pipeline_sink_unregister (hPipeSinkHandle); ml_pipeline_destroy (hPipelinehandle); ml_tensors_info_destroy (hTensorinfo);ml_tensors_data_destroy (hTensorData);FreeCustomTensorMemory(&pszNnpipeline,&unsinkCount,&psz_fullpath,&psz_file));
+ PRINT_RESULT_CLEANUP(true, g_bCallbackHit, "ml_pipeline_stop", NnStreamerGetError(nRet), ml_pipeline_src_release_handle (hPipeSrcHandle);ml_pipeline_sink_unregister (hPipeSinkHandle); ml_pipeline_destroy (hPipelinehandle); ml_tensors_info_destroy (hTensorinfo);ml_tensors_data_destroy (hTensorData);FreeCustomTensorMemory(&pszNnpipeline,&unsinkCount,&psz_fullpath,&psz_file));
+
+ nRet = ml_pipeline_src_release_handle (hPipeSrcHandle);
+ PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_pipeline_src_release_handle", NnStreamerGetError(nRet),ml_pipeline_sink_unregister (hPipeSinkHandle); ml_pipeline_destroy (hPipelinehandle); ml_tensors_info_destroy (hTensorinfo);ml_tensors_data_destroy (hTensorData);FreeCustomTensorMemory(&pszNnpipeline,&unsinkCount,&psz_fullpath,&psz_file));
+
+ nRet = ml_pipeline_sink_unregister (hPipeSinkHandle);
+ PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_pipeline_sink_unregister", NnStreamerGetError(nRet), ml_pipeline_destroy (hPipelinehandle);ml_tensors_info_destroy (hTensorinfo);ml_tensors_data_destroy (hTensorData);FreeCustomTensorMemory(&pszNnpipeline,&unsinkCount,&psz_fullpath,&psz_file));
+
+ nRet = ml_pipeline_destroy (hPipelinehandle);
+ PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_pipeline_destroy", NnStreamerGetError(nRet),ml_tensors_info_destroy (hTensorinfo);ml_tensors_data_destroy (hTensorData);FreeCustomTensorMemory(&pszNnpipeline,&unsinkCount,&psz_fullpath,&psz_file));
+
+ nRet = ml_pipeline_tensor_if_custom_unregister (hCustom);
+ PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_pipeline_tensor_if_custom_unregister", NnStreamerGetError(nRet),ml_tensors_info_destroy (hTensorinfo);ml_tensors_data_destroy (hTensorData);FreeCustomTensorMemory(&pszNnpipeline,&unsinkCount,&psz_fullpath,&psz_file));
+
+ if( (g_file_get_contents (psz_file, (gchar **)&nContent, &nLength, NULL) != TRUE))
+ {
+ FPRINTF("[%s:%d] g_file_get_contents returned false\\n", __FILE__, __LINE__);
+ TensorInfoDataDestroy(hTensorinfo,hTensorData);
+ FreeCustomTensorMemory(&pszNnpipeline,&unsinkCount,&psz_fullpath,&psz_file);
+ return 1;
+ }
+ if( nLength != (4U * 5))
+ {
+ FPRINTF("[%s:%d] g_file_get_contents value mismatch for nLength,nLength returned = (%d)\\n", __FILE__, __LINE__,nLength);
+ TensorInfoDataDestroy(hTensorinfo,hTensorData);
+ FreeCustomTensorMemory(&pszNnpipeline,&unsinkCount,&psz_fullpath,&psz_file);
+ return 1;
+ }
+ CHECK_HANDLE(nContent, "nContent");
+
+ /* Check if the TRUE path data is received correctly. */
+ if (nContent && nLength == 20) {
+ for (i = 0; i < 5; i++) {
+ if( nContent[i * 4 + 0] != i + 4)
+ {
+ FPRINTF("[%s:%d] nContent[i * 4 + 0] value mismatch\\n", __FILE__, __LINE__);
+ g_free (nContent);
+ TensorInfoDataDestroy(hTensorinfo,hTensorData);
+ FreeCustomTensorMemory(&pszNnpipeline,&unsinkCount,&psz_fullpath,&psz_file);
+ return 1;
+ }
+ if( nContent[i * 4 + 1] != i + 1)
+ {
+ FPRINTF("[%s:%d] nContent[i * 4 + 1] value mismatch\\n", __FILE__, __LINE__);
+ g_free (nContent);
+ TensorInfoDataDestroy(hTensorinfo,hTensorData);
+ FreeCustomTensorMemory(&pszNnpipeline,&unsinkCount,&psz_fullpath,&psz_file);
+ return 1;
+ }
+ if( nContent[i * 4 + 2] != i + 3)
+ {
+ FPRINTF("[%s:%d] nContent[i * 4 + 2] value mismatch\\n", __FILE__, __LINE__);
+ g_free (nContent);
+ TensorInfoDataDestroy(hTensorinfo,hTensorData);
+ FreeCustomTensorMemory(&pszNnpipeline,&unsinkCount,&psz_fullpath,&psz_file);
+ return 1;
+ }
+ if( nContent[i * 4 + 3] != i + 2)
+ {
+ FPRINTF("[%s:%d] nContent[i * 4 + 3] value mismatch\\n", __FILE__, __LINE__);
+ g_free (nContent);
+ TensorInfoDataDestroy(hTensorinfo,hTensorData);
+ FreeCustomTensorMemory(&pszNnpipeline,&unsinkCount,&psz_fullpath,&psz_file);
+ return 1;
+ }
+ }
+ }
+ g_free (nContent);
+
+ /* The FALSE path receives 5 buffers. */
+ if( *unsinkCount != 5U)
+ {
+ FPRINTF("[%s:%d] value mismatch for unsinkCount,unsinkCount returned = (%d)\\n", __FILE__, __LINE__,unsinkCount);
+ TensorInfoDataDestroy(hTensorinfo,hTensorData);
+ FreeCustomTensorMemory(&pszNnpipeline,&unsinkCount,&psz_fullpath,&psz_file);
+ return 1;
+ }
+
+ for (i = 0; i < LIMIT; i++) {
+ g_free (nUIntArray[i]);
+ }
+ TensorInfoDataDestroy(hTensorinfo,hTensorData);
+ FreeCustomTensorMemory(&pszNnpipeline,&unsinkCount,&psz_fullpath,&psz_file);
+
+ return 0;
+}
+
/** @} */
/** @} */
return szErrorVal;
}
+void FreeCustomTensorMemory(gchar **t_pszNnpipeline,guint **t_unsinkCount,gchar ***t_psz_fullpath,gchar **t_psz_file)
+{
+ g_free (*t_pszNnpipeline);
+ g_free (*t_unsinkCount);
+ g_free (*t_psz_fullpath);
+ g_free (*t_psz_file);
+}
+void TensorInfoDataDestroy(ml_tensors_info_h hTensorinfo,ml_tensors_data_h hTensorData)
+{
+ int nRet = -1;
+ nRet = ml_tensors_info_destroy (hTensorinfo);
+ PRINT_RESULT_NORETURN(ML_ERROR_NONE, nRet, "ml_tensors_info_destroy", NnStreamerGetError(nRet));
+ nRet = ml_tensors_data_destroy (hTensorData);
+ PRINT_RESULT_NORETURN(ML_ERROR_NONE, nRet, "ml_tensors_data_destroy", NnStreamerGetError(nRet));
+}
+
+void TensorInfoPipelineStop(ml_pipeline_h hPipelinehandle,ml_pipeline_src_h hPipeSrcHandle, ml_pipeline_sink_h hPipeSinkHandle,ml_tensors_info_h hTensorinfo)
+{
+ int nRet = -1;
+ nRet = ml_pipeline_stop (hPipelinehandle);
+ PRINT_RESULT_NORETURN(ML_ERROR_NONE, nRet, "ml_pipeline_stop", NnStreamerGetError(nRet));
+ nRet = ml_pipeline_src_release_handle (hPipeSrcHandle);
+ PRINT_RESULT_NORETURN(ML_ERROR_NONE, nRet, "ml_pipeline_src_release_handle", NnStreamerGetError(nRet));
+ nRet = ml_pipeline_sink_unregister (hPipeSinkHandle);
+ PRINT_RESULT_NORETURN(ML_ERROR_NONE, nRet, "ml_pipeline_sink_unregister", NnStreamerGetError(nRet));
+ nRet = ml_tensors_info_destroy (hTensorinfo);
+ PRINT_RESULT_NORETURN(ML_ERROR_NONE, nRet, "ml_tensors_info_destroy", NnStreamerGetError(nRet));
+}
/** @} */
char* NnStreamerGetError(int nRet);
gboolean Timeout(gpointer data);
+void FreeCustomTensorMemory(gchar **t_pszNnpipeline,guint **t_unsinkCount,gchar ***t_psz_fullpath,gchar **t_psz_file);
+void TensorInfoDataDestroy(ml_tensors_info_h hTensorinfo,ml_tensors_data_h hTensorData);
+void TensorInfoPipelineStop(ml_pipeline_h hPipelinehandle,ml_pipeline_src_h hPipeSrcHandle, ml_pipeline_sink_h hPipeSinkHandle,ml_tensors_info_h hTensorinfo);
#define API_NAMESPACE "NNSTREAMER_ITC"
#define SETENUM 1
#define GETENUM 0
#define TIMEOUT_CB 10000
+#define LIMIT 10
+#define PIPELINE_STREAM "appsrc name=appsrc ! other/tensor,dimension=(string)4:1:1:1, type=(string)uint8,framerate=(fraction)0/1 ! " "tensor_if name=tif compared-value=CUSTOM compared-value-option=tif_custom_cb_name then=PASSTHROUGH else=PASSTHROUGH " "tif.src_0 ! queue ! filesink location=\"%s\" buffer-mode=unbuffered " "tif.src_1 ! queue ! tensor_sink name=sink_false sync=false async=false"
#define FEATURE_ML "http://tizen.org/feature/machine_learning"
#define FEATURE_ML_INTERFACE "http://tizen.org/feature/machine_learning.inference"
return 0;
}
+
+/*
+* @testcase ITc_nnstreamer_single_ml_single_open_full_p
+* @since_tizen 6.5
+* @author SRID(nibha.sharma)
+* @reviewer SRID(j.abhishek)
+* @type auto
+* @description To check NNStreamer single open with cusotom options
+* @scenario To check NNStreamer single open with cusotom options
+* @apicovered ml_single_open_full
+* @passcase When ml_single_open_full and precondition API is successful.
+* @failcase If target API fails or any precondition API fails
+* @precondition None
+* @postcondition None
+*/
+//& purpose: To check NNStreamer single open with cusotom options
+//& type: auto
+int ITc_nnstreamer_single_ml_single_open_full_p(void)
+{
+ START_TEST;
+ const char * pszCutomOpt = "NumThreads:2";
+ int nRet = -1;
+ ml_single_h hSingleHandle = NULL;
+
+ nRet = ml_single_open_full(&hSingleHandle, MlTestModel, NULL, NULL,ML_NNFW_TYPE_TENSORFLOW_LITE, ML_NNFW_HW_ANY, pszCutomOpt);
+ PRINT_RESULT(ML_ERROR_NONE, nRet, "ml_single_open_full", NnStreamerGetError(nRet));
+ CHECK_HANDLE(hSingleHandle, "ml_single_open_full");
+
+ nRet = ml_single_close(hSingleHandle);
+ PRINT_RESULT_NORETURN(ML_ERROR_NONE, nRet, "ml_single_close", NnStreamerGetError(nRet));
+
+ return 0;
+}
+
+/*
+* @testcase ITc_nnstreamer_single_ml_single_invoke_fast_p
+* @since_tizen 6.5
+* @author SRID(nibha.sharma)
+* @reviewer SRID(j.abhishek)
+* @type auto
+* @description To check NNStreamer invoke model with preallocated output
+* @scenario To check NNStreamer invoke model with preallocated output
+* @apicovered ml_single_invoke_fast
+* @passcase When ml_single_invoke_fast and precondition API is successful.
+* @failcase If target API fails or any precondition API fails
+* @precondition None
+* @postcondition None
+*/
+//& purpose: To check NNStreamer invoke model with preallocated output
+//& type: auto
+int ITc_nnstreamer_single_ml_single_invoke_fast_p (void)
+{
+ START_TEST;
+
+ int nRet = -1;
+ ml_single_h hSingleHandle = NULL;
+ ml_tensors_info_h hInputTensorsInfoHandle = NULL;
+ ml_tensors_info_h hOutputTensorsInfoHandle = NULL;
+ ml_tensor_dimension inputTensorDimension;
+ ml_tensor_dimension outputTensorDimension;
+ inputTensorDimension[0] = 3;
+ inputTensorDimension[1] = 224;
+ inputTensorDimension[2] = 224;
+ inputTensorDimension[3] = 1;
+ outputTensorDimension[0] = 1001;
+ outputTensorDimension[1] = 1;
+ outputTensorDimension[2] = 1;
+ outputTensorDimension[3] = 1;
+
+ ml_tensors_data_h hInputDataHandle = NULL;
+ ml_tensors_data_h hOutputDataHandle = NULL;
+
+ nRet = ml_tensors_info_create (&hInputTensorsInfoHandle);
+ PRINT_RESULT(ML_ERROR_NONE, nRet, "ml_tensors_info_create", NnStreamerGetError(nRet));
+ CHECK_HANDLE(hInputTensorsInfoHandle, "ml_tensors_info_create");
+
+ nRet = ml_tensors_info_set_count (hInputTensorsInfoHandle, 1);
+ PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_tensors_info_set_count", NnStreamerGetError(nRet), ml_tensors_info_destroy (hInputTensorsInfoHandle));
+
+ nRet = ml_tensors_info_set_tensor_type (hInputTensorsInfoHandle, 0, ML_TENSOR_TYPE_UINT8);
+ PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_tensors_info_set_tensor_type", NnStreamerGetError(nRet), ml_tensors_info_destroy (hInputTensorsInfoHandle));
+
+ nRet = ml_tensors_info_set_tensor_dimension (hInputTensorsInfoHandle, 0, inputTensorDimension);
+ PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_tensors_info_set_tensor_dimension", NnStreamerGetError(nRet), ml_tensors_info_destroy (hInputTensorsInfoHandle));
+
+ nRet = ml_tensors_info_create (&hOutputTensorsInfoHandle);
+ PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_tensors_info_create", NnStreamerGetError(nRet), ml_tensors_info_destroy (hInputTensorsInfoHandle));
+ CHECK_HANDLE_CLEANUP(hOutputTensorsInfoHandle, "ml_tensors_info_create", ml_tensors_info_destroy (hInputTensorsInfoHandle));
+
+ nRet = ml_tensors_info_set_count (hOutputTensorsInfoHandle, 1);
+ PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_tensors_info_set_count", NnStreamerGetError(nRet), ml_tensors_info_destroy (hInputTensorsInfoHandle);ml_tensors_info_destroy (hOutputTensorsInfoHandle));
+
+ nRet = ml_tensors_info_set_tensor_type (hOutputTensorsInfoHandle, 0, ML_TENSOR_TYPE_UINT8);
+ PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_tensors_info_set_tensor_type", NnStreamerGetError(nRet), ml_tensors_info_destroy (hInputTensorsInfoHandle);ml_tensors_info_destroy (hOutputTensorsInfoHandle));
+
+ nRet = ml_tensors_info_set_tensor_dimension (hOutputTensorsInfoHandle, 0, outputTensorDimension);
+ PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_tensors_info_set_tensor_dimension", NnStreamerGetError(nRet), ml_tensors_info_destroy (hInputTensorsInfoHandle);ml_tensors_info_destroy (hOutputTensorsInfoHandle));
+
+ nRet = ml_single_open (&hSingleHandle, MlTestModel, hInputTensorsInfoHandle, hOutputTensorsInfoHandle, ML_NNFW_TYPE_TENSORFLOW_LITE, ML_NNFW_HW_ANY);
+ PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_single_open", NnStreamerGetError(nRet),ml_tensors_info_destroy (hInputTensorsInfoHandle);ml_tensors_info_destroy (hOutputTensorsInfoHandle));
+ CHECK_HANDLE_CLEANUP(hInputTensorsInfoHandle, "ml_single_open",ml_tensors_info_destroy (hInputTensorsInfoHandle);ml_tensors_info_destroy (hOutputTensorsInfoHandle));
+
+ /* generate dummy data */
+ nRet = ml_tensors_data_create (hInputTensorsInfoHandle, &hInputDataHandle);
+ PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_tensors_data_create", NnStreamerGetError(nRet),ml_single_close (hSingleHandle);ml_tensors_info_destroy (hInputTensorsInfoHandle);ml_tensors_info_destroy (hOutputTensorsInfoHandle));
+ CHECK_HANDLE_CLEANUP(hInputDataHandle, "ml_tensors_data_create",ml_single_close (hSingleHandle);ml_tensors_info_destroy (hInputTensorsInfoHandle);ml_tensors_info_destroy (hOutputTensorsInfoHandle));
+
+ nRet = ml_tensors_data_create (hOutputTensorsInfoHandle, &hOutputDataHandle);
+ PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_tensors_data_create", NnStreamerGetError(nRet),ml_single_close (hSingleHandle);ml_tensors_info_destroy (hInputTensorsInfoHandle);ml_tensors_info_destroy (hOutputTensorsInfoHandle));
+ CHECK_HANDLE_CLEANUP(hOutputDataHandle, "ml_tensors_data_create",ml_single_close (hSingleHandle);ml_tensors_info_destroy (hInputTensorsInfoHandle);ml_tensors_info_destroy (hOutputTensorsInfoHandle));
+
+ nRet = ml_single_invoke_fast (hSingleHandle, hInputDataHandle, hOutputDataHandle);
+ PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_single_invoke_fast", NnStreamerGetError(nRet),ml_single_close (hSingleHandle);ml_tensors_info_destroy (hInputTensorsInfoHandle);ml_tensors_info_destroy (hOutputTensorsInfoHandle));
+ CHECK_HANDLE_CLEANUP(hOutputDataHandle, "ml_single_invoke_fast",ml_single_close (hSingleHandle);ml_tensors_info_destroy (hInputTensorsInfoHandle);ml_tensors_info_destroy (hOutputTensorsInfoHandle));
+
+ nRet = ml_tensors_data_destroy (hInputDataHandle);
+ PRINT_RESULT_NORETURN(ML_ERROR_NONE, nRet, "ml_tensors_data_destroy", NnStreamerGetError(nRet));
+
+ nRet = ml_tensors_data_destroy (hOutputDataHandle);
+ PRINT_RESULT_NORETURN(ML_ERROR_NONE, nRet, "ml_tensors_data_destroy", NnStreamerGetError(nRet));
+
+ nRet = ml_single_close (hSingleHandle);
+ PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_single_close", NnStreamerGetError(nRet),ml_tensors_info_destroy (hInputTensorsInfoHandle);ml_tensors_info_destroy (hOutputTensorsInfoHandle);ml_tensors_data_destroy (hInputDataHandle);ml_tensors_data_destroy (hOutputDataHandle));
+
+
+ nRet = ml_tensors_info_destroy (hInputTensorsInfoHandle);
+ PRINT_RESULT_NORETURN(ML_ERROR_NONE, nRet, "ml_tensors_info_destroy", NnStreamerGetError(nRet));
+
+ nRet = ml_tensors_info_destroy (hOutputTensorsInfoHandle);
+ PRINT_RESULT_NORETURN(ML_ERROR_NONE, nRet, "ml_tensors_info_destroy", NnStreamerGetError(nRet));
+
+ return 0;
+}
int ITc_nnstreamer_pipeline_ml_pipeline_sink_register_unregister_p(void)
{
START_TEST;
- int nRet = -1 ,nTimeoutId = 0;;
+ int nRet = -1 ,nTimeoutId = 0;
static ml_pipeline_sink_h hPipeSinkHandle = NULL;
g_bCallbackHit = false;
return 0;
}
+/**
+* @testcase ITc_nnstreamer_pipeline_ml_check_element_availability_p
+* @since_tizen 6.5
+* @author SRID(nibha.sharma)
+* @reviewer SRID(j.abhishek)
+* @type auto
+* @description To check NNStreamer Utility for checking an element availability.
+* @scenario To check NNStreamer Utility for checking an element availability.
+* @apicovered ml_check_element_availability
+* @passcase When ml_check_element_availability and Precondition API is successful.
+* @failcase If target API fails or any precondition API fails
+* @precondition None
+* @postcondition None
+*/
+//& purpose: To check NNStreamer Utility for checking an element availability.
+//& type: auto
+int ITc_nnstreamer_pipeline_ml_check_element_availability_p(void)
+{
+ START_TEST;
+ int nRet = -1, nArrSize = 3, nIndex;
+ bool b_isAvailable = false;
+ char *psz_testString[3] = {"tensor_converter","tensor_filter","appsrc"};
+
+ for (nIndex=0;nIndex<nArrSize;nIndex++)
+ {
+ b_isAvailable = false;
+ nRet = ml_check_element_availability (psz_testString[nIndex], &b_isAvailable);
+ PRINT_RESULT(ML_ERROR_NONE, nRet, "ml_check_element_availability", NnStreamerGetError(nRet));
+ if (b_isAvailable == false)
+ {
+ FPRINTF("[%s:%d] ml_check_element_availability api is failed, bool value b_isAvailable is false\\n", __FILE__, __LINE__);
+ return 1;
+ }
+ }
+
+ return 0;
+}
+
/** @} */
/** @} */
extern int ITc_nnstreamer_pipeline_ml_pipeline_valve_get_release_handle_p (void);
extern int ITc_nnstreamer_pipeline_ml_pipeline_valve_set_open_p (void);
extern int ITc_nnstreamer_pipeline_tizensensor_p (void);
+extern int ITc_nnstreamer_pipeline_ml_check_element_availability_p(void);
extern int ITc_nnstreamer_tensors_ml_tensors_info_create_destroy_p (void);
extern int ITc_nnstreamer_tensors_ml_tensors_info_clone_p (void);
extern int ITc_nnstreamer_tensors_ml_tensors_info_validate_p (void);
extern int ITc_nnstreamer_single_ml_get_output_info_p (void);
extern int ITc_nnstreamer_single_ml_set_get_property_p(void);
extern int ITc_nnstreamer_single_ml_invoke_dynamic_p(void);
+extern int ITc_nnstreamer_single_ml_single_invoke_fast_p (void);
+extern int ITc_nnstreamer_single_ml_single_open_full_p(void);
extern int ITc_nnstreamer_ml_pipeline_element_get_release_handle_p(void);
extern int ITc_nnstreamer_ml_pipeline_element_set_get_property_bool_p(void);
extern int ITc_nnstreamer_ml_pipeline_element_set_get_property_string_p(void);
extern int ITc_nnstreamer_ml_pipeline_element_set_get_property_double_p(void);
extern int ITc_nnstreamer_ml_pipeline_element_set_get_property_enum_p(void);
extern int ITc_nnstreamer_ml_pipeline_custom_easy_filter_register_unregister_p(void);
+extern int ITc_nnstreamer_ml_pipeline_tensor_if_custom_register_unregister_p(void);
testcase tc_array[] = {
{"ITc_nnstreamer_pipeline_ml_pipeline_construct_destroy_p", ITc_nnstreamer_pipeline_ml_pipeline_construct_destroy_p, ITs_nnstreamer_pipeline_startup, ITs_nnstreamer_pipeline_cleanup},
{"ITc_nnstreamer_pipeline_ml_pipeline_valve_get_release_handle_p", ITc_nnstreamer_pipeline_ml_pipeline_valve_get_release_handle_p, ITs_nnstreamer_pipeline_startup, ITs_nnstreamer_pipeline_cleanup},
{"ITc_nnstreamer_pipeline_ml_pipeline_valve_set_open_p", ITc_nnstreamer_pipeline_ml_pipeline_valve_set_open_p, ITs_nnstreamer_pipeline_startup, ITs_nnstreamer_pipeline_cleanup},
{"ITc_nnstreamer_pipeline_tizensensor_p", ITc_nnstreamer_pipeline_tizensensor_p, ITs_nnstreamer_pipeline_startup, ITs_nnstreamer_pipeline_cleanup},
+{"ITc_nnstreamer_pipeline_ml_check_element_availability_p", ITc_nnstreamer_pipeline_ml_check_element_availability_p, ITs_nnstreamer_pipeline_startup, ITs_nnstreamer_pipeline_cleanup},
{"ITc_nnstreamer_tensors_ml_tensors_info_create_destroy_p", ITc_nnstreamer_tensors_ml_tensors_info_create_destroy_p, ITs_nnstreamer_tensors_startup, ITs_nnstreamer_tensors_cleanup},
{"ITc_nnstreamer_tensors_ml_tensors_info_clone_p", ITc_nnstreamer_tensors_ml_tensors_info_clone_p, ITs_nnstreamer_tensors_startup, ITs_nnstreamer_tensors_cleanup},
{"ITc_nnstreamer_tensors_ml_tensors_info_validate_p", ITc_nnstreamer_tensors_ml_tensors_info_validate_p, ITs_nnstreamer_tensors_startup, ITs_nnstreamer_tensors_cleanup},
{"ITc_nnstreamer_tensors_ml_tensors_data_create_destroy_p", ITc_nnstreamer_tensors_ml_tensors_data_create_destroy_p, ITs_nnstreamer_tensors_startup, ITs_nnstreamer_tensors_cleanup},
{"ITc_nnstreamer_single_ml_set_get_property_p", ITc_nnstreamer_single_ml_set_get_property_p, ITs_nnstreamer_tensors_single_startup, ITs_nnstreamer_tensors_single_cleanup},
{"ITc_nnstreamer_single_ml_invoke_dynamic_p", ITc_nnstreamer_single_ml_invoke_dynamic_p, ITs_nnstreamer_tensors_single_startup, ITs_nnstreamer_tensors_single_cleanup},
+{"ITc_nnstreamer_single_ml_single_open_full_p", ITc_nnstreamer_single_ml_single_open_full_p, ITs_nnstreamer_tensors_single_startup, ITs_nnstreamer_tensors_single_cleanup},
+{"ITc_nnstreamer_single_ml_single_invoke_fast_p", ITc_nnstreamer_single_ml_single_invoke_fast_p, ITs_nnstreamer_tensors_single_startup, ITs_nnstreamer_tensors_single_cleanup},
{"ITc_nnstreamer_ml_pipeline_element_get_release_handle_p", ITc_nnstreamer_ml_pipeline_element_get_release_handle_p, ITs_nnstreamer_capi_startup, ITs_nnstreamer_capi_cleanup},
{"ITc_nnstreamer_ml_pipeline_element_set_get_property_bool_p", ITc_nnstreamer_ml_pipeline_element_set_get_property_bool_p, ITs_nnstreamer_capi_startup, ITs_nnstreamer_capi_cleanup},
{"ITc_nnstreamer_ml_pipeline_element_set_get_property_string_p", ITc_nnstreamer_ml_pipeline_element_set_get_property_string_p, ITs_nnstreamer_capi_startup, ITs_nnstreamer_capi_cleanup},
{"ITc_nnstreamer_ml_pipeline_element_set_get_property_double_p", ITc_nnstreamer_ml_pipeline_element_set_get_property_double_p, ITs_nnstreamer_capi_startup, ITs_nnstreamer_capi_cleanup},
{"ITc_nnstreamer_ml_pipeline_element_set_get_property_enum_p", ITc_nnstreamer_ml_pipeline_element_set_get_property_enum_p, ITs_nnstreamer_capi_startup, ITs_nnstreamer_capi_cleanup},
{"ITc_nnstreamer_ml_pipeline_custom_easy_filter_register_unregister_p", ITc_nnstreamer_ml_pipeline_custom_easy_filter_register_unregister_p, ITs_nnstreamer_capi_startup, ITs_nnstreamer_capi_cleanup},
+{"ITc_nnstreamer_ml_pipeline_tensor_if_custom_register_unregister_p", ITc_nnstreamer_ml_pipeline_tensor_if_custom_register_unregister_p, ITs_nnstreamer_capi_startup, ITs_nnstreamer_capi_cleanup},
{NULL, NULL}
};
extern int ITc_nnstreamer_pipeline_ml_pipeline_valve_get_release_handle_p (void);
extern int ITc_nnstreamer_pipeline_ml_pipeline_valve_set_open_p (void);
extern int ITc_nnstreamer_pipeline_tizensensor_p (void);
+extern int ITc_nnstreamer_pipeline_ml_check_element_availability_p(void);
extern int ITc_nnstreamer_tensors_ml_tensors_info_create_destroy_p (void);
extern int ITc_nnstreamer_tensors_ml_tensors_info_clone_p (void);
extern int ITc_nnstreamer_tensors_ml_tensors_info_validate_p (void);
extern int ITc_nnstreamer_single_ml_set_timeout_p (void);
extern int ITc_nnstreamer_single_ml_set_get_property_p(void);
extern int ITc_nnstreamer_single_ml_invoke_dynamic_p(void);
+extern int ITc_nnstreamer_single_ml_single_invoke_fast_p (void);
+extern int ITc_nnstreamer_single_ml_single_open_full_p(void);
extern int ITc_nnstreamer_ml_pipeline_element_get_release_handle_p(void);
extern int ITc_nnstreamer_ml_pipeline_element_set_get_property_bool_p(void);
extern int ITc_nnstreamer_ml_pipeline_element_set_get_property_string_p(void);
extern int ITc_nnstreamer_ml_pipeline_element_set_get_property_double_p(void);
extern int ITc_nnstreamer_ml_pipeline_element_set_get_property_enum_p(void);
extern int ITc_nnstreamer_ml_pipeline_custom_easy_filter_register_unregister_p(void);
+extern int ITc_nnstreamer_ml_pipeline_tensor_if_custom_register_unregister_p(void);
testcase tc_array[] = {
{"ITc_nnstreamer_pipeline_ml_pipeline_construct_destroy_p", ITc_nnstreamer_pipeline_ml_pipeline_construct_destroy_p, ITs_nnstreamer_pipeline_startup, ITs_nnstreamer_pipeline_cleanup},
{"ITc_nnstreamer_pipeline_ml_pipeline_valve_get_release_handle_p", ITc_nnstreamer_pipeline_ml_pipeline_valve_get_release_handle_p, ITs_nnstreamer_pipeline_startup, ITs_nnstreamer_pipeline_cleanup},
{"ITc_nnstreamer_pipeline_ml_pipeline_valve_set_open_p", ITc_nnstreamer_pipeline_ml_pipeline_valve_set_open_p, ITs_nnstreamer_pipeline_startup, ITs_nnstreamer_pipeline_cleanup},
{"ITc_nnstreamer_pipeline_tizensensor_p", ITc_nnstreamer_pipeline_tizensensor_p, ITs_nnstreamer_pipeline_startup, ITs_nnstreamer_pipeline_cleanup},
+{"ITc_nnstreamer_pipeline_ml_check_element_availability_p", ITc_nnstreamer_pipeline_ml_check_element_availability_p, ITs_nnstreamer_pipeline_startup, ITs_nnstreamer_pipeline_cleanup},
{"ITc_nnstreamer_tensors_ml_tensors_info_create_destroy_p", ITc_nnstreamer_tensors_ml_tensors_info_create_destroy_p, ITs_nnstreamer_tensors_startup, ITs_nnstreamer_tensors_cleanup},
{"ITc_nnstreamer_tensors_ml_tensors_info_clone_p", ITc_nnstreamer_tensors_ml_tensors_info_clone_p, ITs_nnstreamer_tensors_startup, ITs_nnstreamer_tensors_cleanup},
{"ITc_nnstreamer_tensors_ml_tensors_info_validate_p", ITc_nnstreamer_tensors_ml_tensors_info_validate_p, ITs_nnstreamer_tensors_startup, ITs_nnstreamer_tensors_cleanup},
{"ITc_nnstreamer_tensors_ml_tensors_data_create_destroy_p", ITc_nnstreamer_tensors_ml_tensors_data_create_destroy_p, ITs_nnstreamer_tensors_startup, ITs_nnstreamer_tensors_cleanup},
{"ITc_nnstreamer_single_ml_set_get_property_p", ITc_nnstreamer_single_ml_set_get_property_p, ITs_nnstreamer_tensors_single_startup, ITs_nnstreamer_tensors_single_cleanup},
{"ITc_nnstreamer_single_ml_invoke_dynamic_p", ITc_nnstreamer_single_ml_invoke_dynamic_p, ITs_nnstreamer_tensors_single_startup, ITs_nnstreamer_tensors_single_cleanup},
+{"ITc_nnstreamer_single_ml_single_open_full_p", ITc_nnstreamer_single_ml_single_open_full_p, ITs_nnstreamer_tensors_single_startup, ITs_nnstreamer_tensors_single_cleanup},
+{"ITc_nnstreamer_single_ml_single_invoke_fast_p", ITc_nnstreamer_single_ml_single_invoke_fast_p, ITs_nnstreamer_tensors_single_startup, ITs_nnstreamer_tensors_single_cleanup},
{"ITc_nnstreamer_ml_pipeline_element_get_release_handle_p", ITc_nnstreamer_ml_pipeline_element_get_release_handle_p, ITs_nnstreamer_capi_startup, ITs_nnstreamer_capi_cleanup},
{"ITc_nnstreamer_ml_pipeline_element_set_get_property_bool_p", ITc_nnstreamer_ml_pipeline_element_set_get_property_bool_p, ITs_nnstreamer_capi_startup, ITs_nnstreamer_capi_cleanup},
{"ITc_nnstreamer_ml_pipeline_element_set_get_property_string_p", ITc_nnstreamer_ml_pipeline_element_set_get_property_string_p, ITs_nnstreamer_capi_startup, ITs_nnstreamer_capi_cleanup},
{"ITc_nnstreamer_ml_pipeline_element_set_get_property_double_p", ITc_nnstreamer_ml_pipeline_element_set_get_property_double_p, ITs_nnstreamer_capi_startup, ITs_nnstreamer_capi_cleanup},
{"ITc_nnstreamer_ml_pipeline_element_set_get_property_enum_p", ITc_nnstreamer_ml_pipeline_element_set_get_property_enum_p, ITs_nnstreamer_capi_startup, ITs_nnstreamer_capi_cleanup},
{"ITc_nnstreamer_ml_pipeline_custom_easy_filter_register_unregister_p", ITc_nnstreamer_ml_pipeline_custom_easy_filter_register_unregister_p, ITs_nnstreamer_capi_startup, ITs_nnstreamer_capi_cleanup},
+{"ITc_nnstreamer_ml_pipeline_tensor_if_custom_register_unregister_p", ITc_nnstreamer_ml_pipeline_tensor_if_custom_register_unregister_p, ITs_nnstreamer_capi_startup, ITs_nnstreamer_capi_cleanup},
{NULL, NULL}
};
extern int ITc_nnstreamer_pipeline_ml_pipeline_valve_get_release_handle_p (void);
extern int ITc_nnstreamer_pipeline_ml_pipeline_valve_set_open_p (void);
extern int ITc_nnstreamer_pipeline_tizensensor_p (void);
+extern int ITc_nnstreamer_pipeline_ml_check_element_availability_p(void);
extern int ITc_nnstreamer_tensors_ml_tensors_info_create_destroy_p (void);
extern int ITc_nnstreamer_tensors_ml_tensors_info_clone_p (void);
extern int ITc_nnstreamer_tensors_ml_tensors_info_validate_p (void);
extern int ITc_nnstreamer_single_ml_set_timeout_p (void);
extern int ITc_nnstreamer_single_ml_set_get_property_p(void);
extern int ITc_nnstreamer_single_ml_invoke_dynamic_p(void);
+extern int ITc_nnstreamer_single_ml_single_invoke_fast_p (void);
+extern int ITc_nnstreamer_single_ml_single_open_full_p(void);
extern int ITc_nnstreamer_ml_pipeline_element_get_release_handle_p(void);
extern int ITc_nnstreamer_ml_pipeline_element_set_get_property_bool_p(void);
extern int ITc_nnstreamer_ml_pipeline_element_set_get_property_string_p(void);
extern int ITc_nnstreamer_ml_pipeline_element_set_get_property_double_p(void);
extern int ITc_nnstreamer_ml_pipeline_element_set_get_property_enum_p(void);
extern int ITc_nnstreamer_ml_pipeline_custom_easy_filter_register_unregister_p(void);
+extern int ITc_nnstreamer_ml_pipeline_tensor_if_custom_register_unregister_p(void);
testcase tc_array[] = {
{"ITc_nnstreamer_pipeline_ml_pipeline_construct_destroy_p", ITc_nnstreamer_pipeline_ml_pipeline_construct_destroy_p, ITs_nnstreamer_pipeline_startup, ITs_nnstreamer_pipeline_cleanup},
{"ITc_nnstreamer_pipeline_ml_pipeline_valve_get_release_handle_p", ITc_nnstreamer_pipeline_ml_pipeline_valve_get_release_handle_p, ITs_nnstreamer_pipeline_startup, ITs_nnstreamer_pipeline_cleanup},
{"ITc_nnstreamer_pipeline_ml_pipeline_valve_set_open_p", ITc_nnstreamer_pipeline_ml_pipeline_valve_set_open_p, ITs_nnstreamer_pipeline_startup, ITs_nnstreamer_pipeline_cleanup},
{"ITc_nnstreamer_pipeline_tizensensor_p", ITc_nnstreamer_pipeline_tizensensor_p, ITs_nnstreamer_pipeline_startup, ITs_nnstreamer_pipeline_cleanup},
+{"ITc_nnstreamer_pipeline_ml_check_element_availability_p", ITc_nnstreamer_pipeline_ml_check_element_availability_p, ITs_nnstreamer_pipeline_startup, ITs_nnstreamer_pipeline_cleanup},
{"ITc_nnstreamer_tensors_ml_tensors_info_create_destroy_p", ITc_nnstreamer_tensors_ml_tensors_info_create_destroy_p, ITs_nnstreamer_tensors_startup, ITs_nnstreamer_tensors_cleanup},
{"ITc_nnstreamer_tensors_ml_tensors_info_clone_p", ITc_nnstreamer_tensors_ml_tensors_info_clone_p, ITs_nnstreamer_tensors_startup, ITs_nnstreamer_tensors_cleanup},
{"ITc_nnstreamer_tensors_ml_tensors_info_validate_p", ITc_nnstreamer_tensors_ml_tensors_info_validate_p, ITs_nnstreamer_tensors_startup, ITs_nnstreamer_tensors_cleanup},
{"ITc_nnstreamer_tensors_ml_tensors_data_create_destroy_p", ITc_nnstreamer_tensors_ml_tensors_data_create_destroy_p, ITs_nnstreamer_tensors_startup, ITs_nnstreamer_tensors_cleanup},
{"ITc_nnstreamer_single_ml_set_get_property_p", ITc_nnstreamer_single_ml_set_get_property_p, ITs_nnstreamer_tensors_single_startup, ITs_nnstreamer_tensors_single_cleanup},
{"ITc_nnstreamer_single_ml_invoke_dynamic_p", ITc_nnstreamer_single_ml_invoke_dynamic_p, ITs_nnstreamer_tensors_single_startup, ITs_nnstreamer_tensors_single_cleanup},
+{"ITc_nnstreamer_single_ml_single_open_full_p", ITc_nnstreamer_single_ml_single_open_full_p, ITs_nnstreamer_tensors_single_startup, ITs_nnstreamer_tensors_single_cleanup},
+{"ITc_nnstreamer_single_ml_single_invoke_fast_p", ITc_nnstreamer_single_ml_single_invoke_fast_p, ITs_nnstreamer_tensors_single_startup, ITs_nnstreamer_tensors_single_cleanup},
{"ITc_nnstreamer_ml_pipeline_element_get_release_handle_p", ITc_nnstreamer_ml_pipeline_element_get_release_handle_p, ITs_nnstreamer_capi_startup, ITs_nnstreamer_capi_cleanup},
{"ITc_nnstreamer_ml_pipeline_element_set_get_property_bool_p", ITc_nnstreamer_ml_pipeline_element_set_get_property_bool_p, ITs_nnstreamer_capi_startup, ITs_nnstreamer_capi_cleanup},
{"ITc_nnstreamer_ml_pipeline_element_set_get_property_string_p", ITc_nnstreamer_ml_pipeline_element_set_get_property_string_p, ITs_nnstreamer_capi_startup, ITs_nnstreamer_capi_cleanup},
{"ITc_nnstreamer_ml_pipeline_element_set_get_property_double_p", ITc_nnstreamer_ml_pipeline_element_set_get_property_double_p, ITs_nnstreamer_capi_startup, ITs_nnstreamer_capi_cleanup},
{"ITc_nnstreamer_ml_pipeline_element_set_get_property_enum_p", ITc_nnstreamer_ml_pipeline_element_set_get_property_enum_p, ITs_nnstreamer_capi_startup, ITs_nnstreamer_capi_cleanup},
{"ITc_nnstreamer_ml_pipeline_custom_easy_filter_register_unregister_p", ITc_nnstreamer_ml_pipeline_custom_easy_filter_register_unregister_p, ITs_nnstreamer_capi_startup, ITs_nnstreamer_capi_cleanup},
+{"ITc_nnstreamer_ml_pipeline_tensor_if_custom_register_unregister_p", ITc_nnstreamer_ml_pipeline_tensor_if_custom_register_unregister_p, ITs_nnstreamer_capi_startup, ITs_nnstreamer_capi_cleanup},
{NULL, NULL}
};