}
/*
-* @testcase ITc_nnstreamer_single_ml_get_input_info_p
+* @testcase ITc_nnstreamer_single_ml_get_set_input_info_p
* @since_tizen 5.5
* @author SRID(manoj.g2)
* @reviewer SRID(shobhit.v)
* @type auto
-* @description Get tensor input info from ml single
-* @scenario Get tensor input info from ml single
-* @apicovered ml_single_get_input_info
-* @passcase When ml_single_get_input_info and precondition API is successful.
+* @description Get and Set tensor input info from ml single
+* @scenario Get and Set tensor input info from ml single and set it back
+* @apicovered ml_single_set_input_info, ml_single_get_input_info
+* @passcase When ml_single_set_input_info, ml_single_get_input_info and precondition APIs are successful.
* @failcase If target API fails or any precondition API fails
* @precondition None
* @postcondition None
*/
-//& purpose: API To Get tensor input info from ml single
+//& purpose: API To Get and Set tensor input info from ml single
//& type: auto
-int ITc_nnstreamer_single_ml_get_input_info_p(void)
+int ITc_nnstreamer_single_ml_get_set_input_info_p(void)
{
START_TEST;
+
int nRet = -1;
ml_single_h hSingleHandle = NULL;
ml_tensors_info_h hInputTensorsInfoHandle = NULL;
ml_tensors_info_h hOutputTensorsInfoHandle = NULL;
ml_tensors_info_h hGetInputTensorsInfoHandle = NULL;
+
ml_tensor_dimension inputTensorDimension;
ml_tensor_dimension outputTensorDimension;
inputTensorDimension[0] = 3;
PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_single_open", NnStreamerGetError(nRet),ml_tensors_info_destroy (hInputTensorsInfoHandle);ml_tensors_info_destroy (hOutputTensorsInfoHandle));
CHECK_HANDLE_CLEANUP(hSingleHandle, "ml_single_open",ml_tensors_info_destroy (hInputTensorsInfoHandle);ml_tensors_info_destroy (hOutputTensorsInfoHandle));
+ //Target API
nRet = ml_single_get_input_info (hSingleHandle, &hGetInputTensorsInfoHandle);
PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_single_get_input_info", NnStreamerGetError(nRet),ml_single_close (hSingleHandle);ml_tensors_info_destroy (hInputTensorsInfoHandle);ml_tensors_info_destroy (hOutputTensorsInfoHandle));
CHECK_HANDLE_CLEANUP(hGetInputTensorsInfoHandle, "ml_single_get_input_info",ml_single_close (hSingleHandle);ml_tensors_info_destroy (hInputTensorsInfoHandle);ml_tensors_info_destroy (hOutputTensorsInfoHandle));
- nRet = ml_single_close (hSingleHandle);
+ nRet = ml_tensors_info_get_tensor_dimension (hGetInputTensorsInfoHandle, 0, outputTensorDimension);
+ PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_tensors_info_get_tensor_dimensions", NnStreamerGetError(nRet), ml_single_close(hSingleHandle); ml_tensors_info_destroy(hInputTensorsInfoHandle); ml_tensors_info_destroy(hOutputTensorsInfoHandle); ml_tensors_info_destroy(hGetInputTensorsInfoHandle));
+
+ /* Dimension change */
+ outputTensorDimension[3] += 1;
+ nRet = ml_tensors_info_set_tensor_dimension(hGetInputTensorsInfoHandle, 0, outputTensorDimension);
+ PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_tensors_info_set_tensor_dimension", NnStreamerGetError(nRet), ml_single_close(hSingleHandle); ml_tensors_info_destroy(hInputTensorsInfoHandle); ml_tensors_info_destroy(hOutputTensorsInfoHandle); ml_tensors_info_destroy(hGetInputTensorsInfoHandle));
+
+ //Target API
+ nRet = ml_single_set_input_info(hSingleHandle, hGetInputTensorsInfoHandle);
+ PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_single_set_input_info", NnStreamerGetError(nRet), ml_single_close(hSingleHandle); ml_tensors_info_destroy(hInputTensorsInfoHandle); ml_tensors_info_destroy(hOutputTensorsInfoHandle); ml_tensors_info_destroy(hGetInputTensorsInfoHandle));
+
+ nRet = ml_single_close(hSingleHandle);
PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_single_close", NnStreamerGetError(nRet),ml_tensors_info_destroy (hInputTensorsInfoHandle);ml_tensors_info_destroy (hOutputTensorsInfoHandle));
nRet = ml_tensors_info_destroy (hInputTensorsInfoHandle);
nRet = ml_tensors_info_destroy (hOutputTensorsInfoHandle);
PRINT_RESULT_NORETURN(ML_ERROR_NONE, nRet, "ml_tensors_info_destroy", NnStreamerGetError(nRet));
+ nRet = ml_tensors_info_destroy (hGetInputTensorsInfoHandle);
+ PRINT_RESULT_NORETURN(ML_ERROR_NONE, nRet, "ml_tensors_info_destroy", NnStreamerGetError(nRet));
+
return 0;
}
return 0;
}
+
+/*
+* @testcase ITc_nnstreamer_single_ml_set_get_property_p
+* @since_tizen 6.0
+* @author SRID(j.abhishek)
+* @reviewer SRID(shobhit.v)
+* @type auto
+* @description To Set and Get ML single property value
+* @scenario To Set and Get ML single property value
+* @apicovered ml_single_set_property, ml_single_get_property
+* @passcase When ml_single_set_property, ml_single_get_property and precondition APIs are successful.
+* @failcase If target API fails or any precondition API fails
+* @precondition None
+* @postcondition None
+*/
+//& purpose: API To Set and Get ML single property value
+//& type: auto
+int ITc_nnstreamer_single_ml_set_get_property_p(void)
+{
+ START_TEST;
+
+ int nRet = -1;
+ ml_single_h hSingleHandle = NULL;
+ const char *pszPropertyName = "input";
+ const char *pszSetProperty = "3:224:224:1";
+ char *pszGetProperty = NULL;
+
+ nRet = ml_single_open(&hSingleHandle, MlTestModel, NULL, NULL, ML_NNFW_TYPE_TENSORFLOW_LITE, ML_NNFW_HW_ANY);
+ PRINT_RESULT(ML_ERROR_NONE, nRet, "ml_single_open", NnStreamerGetError(nRet));
+ CHECK_HANDLE(hSingleHandle, "ml_single_open");
+
+ //Target API
+ nRet = ml_single_set_property(hSingleHandle, pszPropertyName, pszSetProperty);
+ PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_single_set_property", NnStreamerGetError(nRet), ml_single_close(hSingleHandle));
+
+ //Target API
+ nRet = ml_single_get_property(hSingleHandle, pszPropertyName, &pszGetProperty);
+ PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_single_get_property", NnStreamerGetError(nRet), ml_single_close(hSingleHandle));
+ CHECK_HANDLE_CLEANUP(pszGetProperty, "ml_single_get_property", ml_single_close(hSingleHandle));
+
+ if ( 0 != strncmp(pszSetProperty, pszGetProperty, strlen(pszSetProperty)) )
+ {
+ FPRINTF("[%s:%d] TC Failed Reason; Set Value %s and Get value %s of tensor property name is not same\\n", __FILE__, __LINE__, pszSetProperty, pszGetProperty);
+ nRet = ml_single_close(hSingleHandle);
+ PRINT_RESULT_NORETURN(ML_ERROR_NONE, nRet, "ml_single_close", NnStreamerGetError(nRet));
+ return 1;
+ }
+
+ nRet = ml_single_close(hSingleHandle);
+ PRINT_RESULT_NORETURN(ML_ERROR_NONE, nRet, "ml_single_close", NnStreamerGetError(nRet));
+
+ return 0;
+}
+
+/*
+* @testcase ITc_nnstreamer_single_ml_invoke_dynamic_p
+* @since_tizen 6.0
+* @author SRID(j.abhishek)
+* @reviewer SRID(shobhit.v)
+* @type auto
+* @description To Invokes the model with the given input data with the given tensors information.
+* @scenario To Invokes the model with the given input data with the given tensors information.
+* @apicovered ml_single_invoke_dynamic
+* @passcase When ml_single_invoke_dynamic and precondition API is successful.
+* @failcase If target API fails or any precondition API fails
+* @precondition None
+* @postcondition None
+*/
+//& purpose: Invokes the model with the given input data with the given tensors information.
+//& type: auto
+int ITc_nnstreamer_single_ml_invoke_dynamic_p(void)
+{
+ START_TEST;
+
+ int nRet = -1;
+ ml_single_h hSingleHandle = NULL;
+ ml_tensors_info_h hInputTensorsInfoHandle = NULL;
+ ml_tensors_info_h hOutputTensorsInfoHandle = NULL;
+ ml_tensors_data_h hInputDataHandle = NULL;
+ ml_tensors_data_h hOutputDataHandle = NULL;
+
+ ml_tensor_type_e eTensorType = ML_TENSOR_TYPE_UNKNOWN;
+ ml_tensor_dimension nTmpTensorDimension;
+
+ const char *pszPropertyName = "input";
+ const char *pszSetProperty = "3:224:224:1";
+ float fTmpInputArr[] = {1.0};
+ unsigned int nTmpCnt = 0;
+ float *fOutBuf;
+ size_t nDataSize;
+
+ nRet = ml_single_open(&hSingleHandle, MlTestModel, NULL, NULL, ML_NNFW_TYPE_TENSORFLOW_LITE, ML_NNFW_HW_ANY);
+ PRINT_RESULT(ML_ERROR_NONE, nRet, "ml_single_open", NnStreamerGetError(nRet));
+ CHECK_HANDLE(hSingleHandle, "ml_single_open");
+
+ nRet = ml_single_get_input_info(hSingleHandle, &hInputTensorsInfoHandle);
+ PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_single_get_input_info", NnStreamerGetError(nRet), ml_single_close(hSingleHandle));
+ CHECK_HANDLE_CLEANUP(hInputTensorsInfoHandle, "ml_single_get_input_info", ml_single_close(hSingleHandle));
+
+ nRet = ml_tensors_data_create(hInputTensorsInfoHandle, &hInputDataHandle);
+ PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_tensors_data_create", NnStreamerGetError(nRet), ml_tensors_info_destroy(hInputTensorsInfoHandle); ml_single_close(hSingleHandle));
+ CHECK_HANDLE_CLEANUP(hInputDataHandle, "ml_tensors_data_create", ml_tensors_info_destroy(hInputTensorsInfoHandle); ml_single_close(hSingleHandle));
+
+ nRet = ml_tensors_data_set_tensor_data(hInputDataHandle, 0, fTmpInputArr, 1 * sizeof(float));
+ PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_tensors_data_set_tensor_data", NnStreamerGetError(nRet), ml_tensors_data_destroy(hInputDataHandle); ml_tensors_info_destroy(hInputTensorsInfoHandle); ml_single_close(hSingleHandle));
+
+ nRet = ml_tensors_info_get_count(hInputTensorsInfoHandle, &nTmpCnt);
+ PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_tensors_info_get_count", NnStreamerGetError(nRet), ml_tensors_data_destroy(hInputDataHandle); ml_tensors_info_destroy(hInputTensorsInfoHandle); ml_single_close(hSingleHandle));
+
+ nRet = ml_tensors_info_get_tensor_type(hInputTensorsInfoHandle, 0, &eTensorType);
+ PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_tensors_info_get_tensor_type", NnStreamerGetError(nRet), ml_tensors_data_destroy(hInputDataHandle); ml_tensors_info_destroy(hInputTensorsInfoHandle); ml_single_close(hSingleHandle));
+
+ nRet = ml_tensors_info_get_tensor_dimension(hInputTensorsInfoHandle, 0, nTmpTensorDimension);
+ PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_tensors_info_get_tensor_dimension", NnStreamerGetError(nRet), ml_tensors_data_destroy(hInputDataHandle); ml_tensors_info_destroy(hInputTensorsInfoHandle); ml_single_close(hSingleHandle));
+
+ //Target API
+ nRet = ml_single_invoke_dynamic(hSingleHandle, hInputDataHandle, hInputTensorsInfoHandle, &hOutputDataHandle, &hOutputTensorsInfoHandle);
+ PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_single_invoke_dynamic", NnStreamerGetError(nRet), ml_tensors_data_destroy(hInputDataHandle); ml_tensors_info_destroy(hInputTensorsInfoHandle); ml_single_close(hSingleHandle));
+ CHECK_HANDLE_CLEANUP(hOutputDataHandle, "ml_single_invoke_dynamic", ml_tensors_data_destroy(hInputDataHandle); ml_tensors_info_destroy(hInputTensorsInfoHandle); ml_single_close(hSingleHandle));
+ CHECK_HANDLE_CLEANUP(hOutputTensorsInfoHandle, "ml_single_invoke_dynamic", ml_tensors_data_destroy(hInputDataHandle); ml_tensors_info_destroy(hInputTensorsInfoHandle); ml_single_close(hSingleHandle));
+
+ nRet = ml_tensors_data_get_tensor_data(hOutputDataHandle, 0, (void **)&fOutBuf, &nDataSize);
+ PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_tensors_data_get_tensor_data", NnStreamerGetError(nRet), ml_tensors_data_destroy(hOutputDataHandle); ml_tensors_info_destroy(hOutputTensorsInfoHandle); ml_tensors_data_destroy(hInputDataHandle); ml_tensors_info_destroy(hInputTensorsInfoHandle); ml_single_close(hSingleHandle));
+
+ nRet = ml_tensors_info_get_count(hOutputTensorsInfoHandle, &nTmpCnt);
+ PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_tensors_info_get_count", NnStreamerGetError(nRet), ml_tensors_data_destroy(hOutputDataHandle); ml_tensors_info_destroy(hOutputTensorsInfoHandle); ml_tensors_data_destroy(hInputDataHandle); ml_tensors_info_destroy(hInputTensorsInfoHandle); ml_single_close(hSingleHandle));
+
+ nRet = ml_tensors_info_get_tensor_type(hOutputTensorsInfoHandle, 0, &eTensorType);
+ PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_tensors_info_get_tensor_type", NnStreamerGetError(nRet), ml_tensors_data_destroy(hOutputDataHandle); ml_tensors_info_destroy(hOutputTensorsInfoHandle); ml_tensors_data_destroy(hInputDataHandle); ml_tensors_info_destroy(hInputTensorsInfoHandle); ml_single_close(hSingleHandle));
+
+ nRet = ml_tensors_info_get_tensor_dimension(hOutputTensorsInfoHandle, 0, nTmpTensorDimension);
+ PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_tensors_info_get_tensor_dimension", NnStreamerGetError(nRet), ml_tensors_data_destroy(hOutputDataHandle); ml_tensors_info_destroy(hOutputTensorsInfoHandle); ml_tensors_data_destroy(hInputDataHandle); ml_tensors_info_destroy(hInputTensorsInfoHandle); ml_single_close(hSingleHandle));
+
+ nRet = ml_tensors_data_destroy(hInputDataHandle);
+ PRINT_RESULT_NORETURN(ML_ERROR_NONE, nRet, "ml_tensors_data_destroy", NnStreamerGetError(nRet));
+
+ nRet = ml_tensors_data_destroy(hOutputDataHandle);
+ PRINT_RESULT_NORETURN(ML_ERROR_NONE, nRet, "ml_tensors_data_destroy", NnStreamerGetError(nRet));
+
+ nRet = ml_tensors_info_destroy(hInputTensorsInfoHandle);
+ PRINT_RESULT_NORETURN(ML_ERROR_NONE, nRet, "ml_tensors_info_destroy", NnStreamerGetError(nRet));
+
+ nRet = ml_tensors_info_destroy(hOutputTensorsInfoHandle);
+ PRINT_RESULT_NORETURN(ML_ERROR_NONE, nRet, "ml_tensors_info_destroy", NnStreamerGetError(nRet));
+
+ nRet = ml_single_set_property(hSingleHandle, pszPropertyName, pszSetProperty);
+ PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_single_set_property", NnStreamerGetError(nRet), ml_single_close(hSingleHandle));
+
+ nRet = ml_single_get_input_info(hSingleHandle, &hInputTensorsInfoHandle);
+ PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_single_get_input_info", NnStreamerGetError(nRet), ml_single_close(hSingleHandle));
+ CHECK_HANDLE_CLEANUP(hInputTensorsInfoHandle, "ml_single_get_input_info", ml_single_close(hSingleHandle));
+
+ nRet = ml_tensors_data_create(hInputTensorsInfoHandle, &hInputDataHandle);
+ PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_tensors_data_create", NnStreamerGetError(nRet), ml_tensors_info_destroy(hInputTensorsInfoHandle); ml_single_close(hSingleHandle));
+ CHECK_HANDLE_CLEANUP(hInputDataHandle, "ml_tensors_data_create", ml_tensors_info_destroy(hInputTensorsInfoHandle); ml_single_close(hSingleHandle));
+
+ float fTmpInput2[] = {1.0, 2.0, 3.0, 4.0, 5.0};
+ float *fOutBuf2;
+
+ nRet = ml_tensors_data_set_tensor_data(hInputDataHandle, 0, fTmpInput2, 5 * sizeof(float));
+ PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_tensors_data_set_tensor_data", NnStreamerGetError(nRet), ml_tensors_data_destroy(hInputDataHandle); ml_tensors_info_destroy(hInputTensorsInfoHandle); ml_single_close(hSingleHandle));
+
+ nRet = ml_tensors_info_get_count(hInputTensorsInfoHandle, &nTmpCnt);
+ PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_tensors_info_get_count", NnStreamerGetError(nRet), ml_tensors_data_destroy(hInputDataHandle); ml_tensors_info_destroy(hInputTensorsInfoHandle); ml_single_close(hSingleHandle));
+
+ nRet = ml_tensors_info_get_tensor_type(hInputTensorsInfoHandle, 0, &eTensorType);
+ PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_tensors_info_get_tensor_type", NnStreamerGetError(nRet), ml_tensors_data_destroy(hInputDataHandle); ml_tensors_info_destroy(hInputTensorsInfoHandle); ml_single_close(hSingleHandle));
+
+ nRet = ml_tensors_info_get_tensor_dimension(hInputTensorsInfoHandle, 0, nTmpTensorDimension);
+ PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_tensors_info_get_tensor_dimension", NnStreamerGetError(nRet), ml_tensors_data_destroy(hInputDataHandle); ml_tensors_info_destroy(hInputTensorsInfoHandle); ml_single_close(hSingleHandle));
+
+ //Target API
+ nRet = ml_single_invoke_dynamic(hSingleHandle, hInputDataHandle, hInputTensorsInfoHandle, &hOutputDataHandle, &hOutputTensorsInfoHandle);
+ PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_single_invoke_dynamic", NnStreamerGetError(nRet), ml_tensors_data_destroy(hInputDataHandle); ml_tensors_info_destroy(hInputTensorsInfoHandle); ml_single_close(hSingleHandle));
+ CHECK_HANDLE_CLEANUP(hOutputDataHandle, "ml_single_invoke_dynamic", ml_tensors_data_destroy(hInputDataHandle); ml_tensors_info_destroy(hInputTensorsInfoHandle); ml_single_close(hSingleHandle));
+ CHECK_HANDLE_CLEANUP(hOutputTensorsInfoHandle, "ml_single_invoke_dynamic", ml_tensors_data_destroy(hInputDataHandle); ml_tensors_info_destroy(hInputTensorsInfoHandle); ml_single_close(hSingleHandle));
+
+ nRet = ml_tensors_data_get_tensor_data(hOutputDataHandle, 0, (void **) &fOutBuf2, &nDataSize);
+ PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_tensors_data_get_tensor_data", NnStreamerGetError(nRet), ml_tensors_data_destroy(hOutputDataHandle); ml_tensors_info_destroy(hOutputTensorsInfoHandle); ml_tensors_data_destroy(hInputDataHandle); ml_tensors_info_destroy(hInputTensorsInfoHandle); ml_single_close(hSingleHandle));
+
+ nRet = ml_tensors_info_get_count(hOutputTensorsInfoHandle, &nTmpCnt);
+ PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_tensors_info_get_count", NnStreamerGetError(nRet), ml_tensors_data_destroy(hOutputDataHandle); ml_tensors_info_destroy(hOutputTensorsInfoHandle); ml_tensors_data_destroy(hInputDataHandle); ml_tensors_info_destroy(hInputTensorsInfoHandle); ml_single_close(hSingleHandle));
+
+ nRet = ml_tensors_info_get_tensor_type(hOutputTensorsInfoHandle, 0, &eTensorType);
+ PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_tensors_info_get_tensor_type", NnStreamerGetError(nRet), ml_tensors_data_destroy(hOutputDataHandle); ml_tensors_info_destroy(hOutputTensorsInfoHandle); ml_tensors_data_destroy(hInputDataHandle); ml_tensors_info_destroy(hInputTensorsInfoHandle); ml_single_close(hSingleHandle));
+
+ nRet = ml_tensors_info_get_tensor_dimension(hOutputTensorsInfoHandle, 0, nTmpTensorDimension);
+ PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_tensors_info_get_tensor_dimension", NnStreamerGetError(nRet), ml_tensors_data_destroy(hOutputDataHandle); ml_tensors_info_destroy(hOutputTensorsInfoHandle); ml_tensors_data_destroy(hInputDataHandle); ml_tensors_info_destroy(hInputTensorsInfoHandle); ml_single_close(hSingleHandle));
+
+ nRet = ml_single_close(hSingleHandle);
+ PRINT_RESULT_NORETURN(ML_ERROR_NONE, nRet, "ml_single_close", NnStreamerGetError(nRet));
+
+ nRet = ml_tensors_data_destroy(hInputDataHandle);
+ PRINT_RESULT_NORETURN(ML_ERROR_NONE, nRet, "ml_tensors_data_destroy", NnStreamerGetError(nRet));
+
+ nRet = ml_tensors_data_destroy(hOutputDataHandle);
+ PRINT_RESULT_NORETURN(ML_ERROR_NONE, nRet, "ml_tensors_data_destroy", NnStreamerGetError(nRet));
+
+ nRet = ml_tensors_info_destroy(hInputTensorsInfoHandle);
+ PRINT_RESULT_NORETURN(ML_ERROR_NONE, nRet, "ml_tensors_info_destroyv", NnStreamerGetError(nRet));
+
+ nRet = ml_tensors_info_destroy(hOutputTensorsInfoHandle);
+ PRINT_RESULT_NORETURN(ML_ERROR_NONE, nRet, "ml_tensors_info_destroy", NnStreamerGetError(nRet));
+
+ return 0;
+}