gboolean playing;
} TestPipeState;
-static gchar * pszNnpipeline;
-static ml_pipeline_h g_hPipelinehandle;
-static ml_pipeline_state_e state;
-static guint * g_unsinkCount;
-static TestPipeState * g_pipePipeLineState;
+gchar * pszNnpipeline;
+ml_pipeline_h g_hPipelinehandle;
+ml_pipeline_state_e state;
+guint * g_unsinkCount;
+TestPipeState * g_pipePipeLineState;
char* NnStreamerGetError(int nRet);
gboolean Timeout(gpointer data);
-ml_nnfw_hw_e g_eMlNnHardwareType[5];
#define API_NAMESPACE "NNSTREAMER_ITC"
#define FEATURE_ML_INTERFACE "http://tizen.org/feature/machine_learning.inference"
#define START_TEST {\
- FPRINTF("[Line:%d][%s] Starting test check initialize : %s\\n", __LINE__, API_NAMESPACE, __FUNCTION__);\
- dlog_print(DLOG_INFO, "NativeTCT", "[Line:%d][%s] Starting test check initialize : %s", __LINE__, API_NAMESPACE, __FUNCTION__);\
- if ( g_bIsFeatureMismatched ) {\
- FPRINTF("[Line:%d][%s] Feature Mismatch Error\\n", __LINE__, API_NAMESPACE);\
- dlog_print(DLOG_ERROR, "NativeTCT", "[Line:%d][%s] Feature Mismatch Error", __LINE__, API_NAMESPACE);\
- return 1;\
- }\
- if ( !g_bFeatureIsSupported) {\
- FPRINTF("[Line : %d][%s] Feature Unsupported\\n", __LINE__, API_NAMESPACE);\
- dlog_print(DLOG_ERROR, "NativeTCT", "[Line:%d][%s] Feature Unsupported", __LINE__, API_NAMESPACE);\
- return 0;\
- }\
- if ( !g_bNnstreamerCreation ) {\
- FPRINTF("[Line:%d][%s] Precondition Failed\\n", __LINE__, API_NAMESPACE);\
- dlog_print(DLOG_ERROR,"NativeTCT", "[Line:%d][%s] Precondition Failed", __LINE__, API_NAMESPACE);\
- return 1;\
- }\
+ FPRINTF("[Line:%d][%s] Starting test check initialize : %s\\n", __LINE__, API_NAMESPACE, __FUNCTION__);\
+ dlog_print(DLOG_INFO, "NativeTCT", "[Line:%d][%s] Starting test check initialize : %s", __LINE__, API_NAMESPACE, __FUNCTION__);\
+ if ( g_bIsFeatureMismatched ) {\
+ FPRINTF("[Line:%d][%s] Feature Mismatch Error\\n", __LINE__, API_NAMESPACE);\
+ dlog_print(DLOG_ERROR, "NativeTCT", "[Line:%d][%s] Feature Mismatch Error", __LINE__, API_NAMESPACE);\
+ return 1;\
+ }\
+ if ( !g_bFeatureIsSupported) {\
+ FPRINTF("[Line : %d][%s] Feature Unsupported\\n", __LINE__, API_NAMESPACE);\
+ dlog_print(DLOG_ERROR, "NativeTCT", "[Line:%d][%s] Feature Unsupported", __LINE__, API_NAMESPACE);\
+ return 0;\
+ }\
+ if ( !g_bNnstreamerCreation ) {\
+ FPRINTF("[Line:%d][%s] Precondition Failed\\n", __LINE__, API_NAMESPACE);\
+ dlog_print(DLOG_ERROR,"NativeTCT", "[Line:%d][%s] Precondition Failed", __LINE__, API_NAMESPACE);\
+ return 1;\
+ }\
}
#define CHECK_HANDLE_CLEANUP(Handle, API, FreeResource) {\
START_TEST;
int nRet = -1;
ml_single_h hSingleHandle = NULL;
- ml_tensors_info_h hInputTensorsInfoHandle = NULL;
- ml_tensors_info_h hOutputTensorsInfoHandle = NULL;
- int nEnumCounter = 0;
- int nEnumSize = sizeof(g_eMlNnHardwareType) / sizeof(g_eMlNnHardwareType[0]);
+ nRet = ml_single_open (&hSingleHandle, MlTestModel, NULL, NULL, ML_NNFW_TYPE_TENSORFLOW_LITE, ML_NNFW_HW_ANY);
+ PRINT_RESULT(ML_ERROR_NONE, nRet, "ml_single_open", NnStreamerGetError(nRet));
+ CHECK_HANDLE(hSingleHandle, "ml_single_open");
- for (nEnumCounter = 0; nEnumCounter < nEnumSize; nEnumCounter++)
- {
- nRet = ml_single_open (&hSingleHandle, MlTestModel, NULL, NULL, ML_NNFW_TYPE_TENSORFLOW_LITE, g_eMlNnHardwareType[nEnumCounter]);
- PRINT_RESULT(ML_ERROR_NONE, nRet, "ml_single_open", NnStreamerGetError(nRet));
- CHECK_HANDLE(hSingleHandle, "ml_single_open");
-
- nRet = ml_single_close (hSingleHandle);
- PRINT_RESULT(ML_ERROR_NONE, nRet, "ml_single_close", NnStreamerGetError(nRet));
- }
+ nRet = ml_single_close (hSingleHandle);
+ PRINT_RESULT(ML_ERROR_NONE, nRet, "ml_single_close", NnStreamerGetError(nRet));
return 0;
}
int nRet = -1;
ml_single_h hSingleHandle = NULL;
- int nEnumCounter = 0;
- int nEnumSize = sizeof(g_eMlNnHardwareType) / sizeof(g_eMlNnHardwareType[0]);
- for (nEnumCounter = 0; nEnumCounter < nEnumSize; nEnumCounter++)
- {
- nRet = ml_single_open (&hSingleHandle, MlTestModel, NULL, NULL, ML_NNFW_TYPE_TENSORFLOW_LITE, ML_NNFW_HW_ANY);
- PRINT_RESULT(ML_ERROR_NONE, nRet, "ml_single_open", NnStreamerGetError(nRet));
- CHECK_HANDLE(hSingleHandle, "ml_single_open");
+ nRet = ml_single_open (&hSingleHandle, MlTestModel, NULL, NULL, ML_NNFW_TYPE_TENSORFLOW_LITE, ML_NNFW_HW_ANY);
+ PRINT_RESULT(ML_ERROR_NONE, nRet, "ml_single_open", NnStreamerGetError(nRet));
+ CHECK_HANDLE(hSingleHandle, "ml_single_open");
- nRet = ml_single_set_timeout(hSingleHandle, 1);
- PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_single_set_timeout", NnStreamerGetError(nRet),ml_single_close (hSingleHandle));
+ nRet = ml_single_set_timeout(hSingleHandle, 1);
+ PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_single_set_timeout", NnStreamerGetError(nRet),ml_single_close (hSingleHandle));
- nRet = ml_single_close (hSingleHandle);
- PRINT_RESULT(ML_ERROR_NONE, nRet, "ml_single_close", NnStreamerGetError(nRet));
- }
+ nRet = ml_single_close (hSingleHandle);
+ PRINT_RESULT(ML_ERROR_NONE, nRet, "ml_single_close", NnStreamerGetError(nRet));
return 0;
}
ml_tensors_data_h hInputDataHandle = NULL;
ml_tensors_data_h hOutputDataHandle = NULL;
- int nEnumCounter = 0;
- int nEnumSize = sizeof(g_eMlNnHardwareType) / sizeof(g_eMlNnHardwareType[0]);
-
-
nRet = ml_tensors_info_create (&hInputTensorsInfoHandle);
PRINT_RESULT(ML_ERROR_NONE, nRet, "ml_tensors_info_create", NnStreamerGetError(nRet));
CHECK_HANDLE(hInputTensorsInfoHandle, "ml_tensors_info_create");
nRet = ml_tensors_info_set_tensor_dimension (hOutputTensorsInfoHandle, 0, outputTensorDimension);
PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_tensors_info_set_tensor_dimension", NnStreamerGetError(nRet), ml_tensors_info_destroy (hInputTensorsInfoHandle);ml_tensors_info_destroy (hOutputTensorsInfoHandle));
+ nRet = ml_single_open (&hSingleHandle, MlTestModel, hInputTensorsInfoHandle, hOutputTensorsInfoHandle, ML_NNFW_TYPE_TENSORFLOW_LITE, ML_NNFW_HW_ANY);
+ PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_single_open", NnStreamerGetError(nRet),ml_tensors_info_destroy (hInputTensorsInfoHandle);ml_tensors_info_destroy (hOutputTensorsInfoHandle));
+ CHECK_HANDLE_CLEANUP(hInputTensorsInfoHandle, "ml_single_open",ml_tensors_info_destroy (hInputTensorsInfoHandle);ml_tensors_info_destroy (hOutputTensorsInfoHandle));
- for (nEnumCounter = 0; nEnumCounter < nEnumSize; nEnumCounter++)
- {
- nRet = ml_single_open (&hSingleHandle, MlTestModel, hInputTensorsInfoHandle, hOutputTensorsInfoHandle, ML_NNFW_TYPE_TENSORFLOW_LITE, ML_NNFW_HW_ANY);
- PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_single_open", NnStreamerGetError(nRet),ml_tensors_info_destroy (hInputTensorsInfoHandle);ml_tensors_info_destroy (hOutputTensorsInfoHandle));
- CHECK_HANDLE_CLEANUP(hInputTensorsInfoHandle, "ml_single_open",ml_tensors_info_destroy (hInputTensorsInfoHandle);ml_tensors_info_destroy (hOutputTensorsInfoHandle));
+ nRet = ml_tensors_data_create (hInputTensorsInfoHandle, &hInputDataHandle);
+ PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_tensors_data_create", NnStreamerGetError(nRet),ml_single_close (hSingleHandle);ml_tensors_info_destroy (hInputTensorsInfoHandle);ml_tensors_info_destroy (hOutputTensorsInfoHandle));
+ CHECK_HANDLE_CLEANUP(hInputDataHandle, "ml_tensors_data_create",ml_single_close (hSingleHandle);ml_tensors_info_destroy (hInputTensorsInfoHandle);ml_tensors_info_destroy (hOutputTensorsInfoHandle));
- nRet = ml_tensors_data_create (hInputTensorsInfoHandle, &hInputDataHandle);
- PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_tensors_data_create", NnStreamerGetError(nRet),ml_single_close (hSingleHandle);ml_tensors_info_destroy (hInputTensorsInfoHandle);ml_tensors_info_destroy (hOutputTensorsInfoHandle));
- CHECK_HANDLE_CLEANUP(hInputDataHandle, "ml_tensors_data_create",ml_single_close (hSingleHandle);ml_tensors_info_destroy (hInputTensorsInfoHandle);ml_tensors_info_destroy (hOutputTensorsInfoHandle));
+ /* to prevent timeout with low performance */
+ ml_single_set_timeout (hSingleHandle, 20000);
- /* to prevent timeout with low performance */
- ml_single_set_timeout (hSingleHandle, 20000);
+ nRet = ml_single_invoke (hSingleHandle, hInputDataHandle, &hOutputDataHandle);
+ PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_single_invoke", NnStreamerGetError(nRet),ml_single_close (hSingleHandle);ml_tensors_info_destroy (hInputTensorsInfoHandle);ml_tensors_info_destroy (hOutputTensorsInfoHandle);ml_tensors_data_destroy (hInputDataHandle));
+ CHECK_HANDLE_CLEANUP(hOutputDataHandle, "ml_single_invoke",ml_single_close (hSingleHandle);ml_tensors_info_destroy (hInputTensorsInfoHandle);ml_tensors_info_destroy (hOutputTensorsInfoHandle);ml_tensors_data_destroy (hInputDataHandle));
- nRet = ml_single_invoke (hSingleHandle, hInputDataHandle, &hOutputDataHandle);
- PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_single_invoke", NnStreamerGetError(nRet),ml_single_close (hSingleHandle);ml_tensors_info_destroy (hInputTensorsInfoHandle);ml_tensors_info_destroy (hOutputTensorsInfoHandle);ml_tensors_data_destroy (hInputDataHandle));
- CHECK_HANDLE_CLEANUP(hOutputDataHandle, "ml_single_invoke",ml_single_close (hSingleHandle);ml_tensors_info_destroy (hInputTensorsInfoHandle);ml_tensors_info_destroy (hOutputTensorsInfoHandle);ml_tensors_data_destroy (hInputDataHandle));
+ nRet = ml_single_close (hSingleHandle);
+ PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_single_close", NnStreamerGetError(nRet),ml_tensors_info_destroy (hInputTensorsInfoHandle);ml_tensors_info_destroy (hOutputTensorsInfoHandle);ml_tensors_data_destroy (hInputDataHandle);ml_tensors_data_destroy (hOutputDataHandle));
- nRet = ml_single_close (hSingleHandle);
- PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_single_close", NnStreamerGetError(nRet),ml_tensors_info_destroy (hInputTensorsInfoHandle);ml_tensors_info_destroy (hOutputTensorsInfoHandle);ml_tensors_data_destroy (hInputDataHandle);ml_tensors_data_destroy (hOutputDataHandle));
+ nRet = ml_tensors_data_destroy (hInputDataHandle);
+ PRINT_RESULT_NORETURN(ML_ERROR_NONE, nRet, "ml_tensors_data_destroy", NnStreamerGetError(nRet));
- nRet = ml_tensors_data_destroy (hInputDataHandle);
- PRINT_RESULT_NORETURN(ML_ERROR_NONE, nRet, "ml_tensors_data_destroy", NnStreamerGetError(nRet));
-
- nRet = ml_tensors_data_destroy (hOutputDataHandle);
- PRINT_RESULT_NORETURN(ML_ERROR_NONE, nRet, "ml_tensors_data_destroy", NnStreamerGetError(nRet));
- }
+ nRet = ml_tensors_data_destroy (hOutputDataHandle);
+ PRINT_RESULT_NORETURN(ML_ERROR_NONE, nRet, "ml_tensors_data_destroy", NnStreamerGetError(nRet));
nRet = ml_tensors_info_destroy (hInputTensorsInfoHandle);
PRINT_RESULT_NORETURN(ML_ERROR_NONE, nRet, "ml_tensors_info_destroy", NnStreamerGetError(nRet));
-
nRet = ml_tensors_info_destroy (hOutputTensorsInfoHandle);
PRINT_RESULT_NORETURN(ML_ERROR_NONE, nRet, "ml_tensors_info_destroy", NnStreamerGetError(nRet));
outputTensorDimension[2] = 1;
outputTensorDimension[3] = 1;
- int nEnumCounter = 0;
- int nEnumSize = sizeof(g_eMlNnHardwareType) / sizeof(g_eMlNnHardwareType[0]);
-
nRet = ml_tensors_info_create (&hInputTensorsInfoHandle);
PRINT_RESULT(ML_ERROR_NONE, nRet, "ml_tensors_info_create", NnStreamerGetError(nRet));
CHECK_HANDLE(hInputTensorsInfoHandle, "ml_tensors_info_create");
nRet = ml_tensors_info_set_tensor_dimension (hOutputTensorsInfoHandle, 0, outputTensorDimension);
PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_tensors_info_set_tensor_dimension", NnStreamerGetError(nRet), ml_tensors_info_destroy (hInputTensorsInfoHandle);ml_tensors_info_destroy (hOutputTensorsInfoHandle));
- for (nEnumCounter = 0; nEnumCounter < nEnumSize; nEnumCounter++)
- {
- nRet = ml_single_open (&hSingleHandle, MlTestModel, hInputTensorsInfoHandle, NULL, ML_NNFW_TYPE_TENSORFLOW_LITE, g_eMlNnHardwareType[nEnumCounter]);
- PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_single_open", NnStreamerGetError(nRet),ml_tensors_info_destroy (hInputTensorsInfoHandle);ml_tensors_info_destroy (hOutputTensorsInfoHandle));
- CHECK_HANDLE_CLEANUP(hSingleHandle, "ml_single_open",ml_tensors_info_destroy (hInputTensorsInfoHandle);ml_tensors_info_destroy (hOutputTensorsInfoHandle));
+ nRet = ml_single_open (&hSingleHandle, MlTestModel, hInputTensorsInfoHandle, NULL, ML_NNFW_TYPE_TENSORFLOW_LITE, ML_NNFW_HW_ANY);
+ PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_single_open", NnStreamerGetError(nRet),ml_tensors_info_destroy (hInputTensorsInfoHandle);ml_tensors_info_destroy (hOutputTensorsInfoHandle));
+ CHECK_HANDLE_CLEANUP(hSingleHandle, "ml_single_open",ml_tensors_info_destroy (hInputTensorsInfoHandle);ml_tensors_info_destroy (hOutputTensorsInfoHandle));
- nRet = ml_single_get_input_info (hSingleHandle, &hGetInputTensorsInfoHandle);
- PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_single_get_input_info", NnStreamerGetError(nRet),ml_single_close (hSingleHandle);ml_tensors_info_destroy (hInputTensorsInfoHandle);ml_tensors_info_destroy (hOutputTensorsInfoHandle));
- CHECK_HANDLE_CLEANUP(hGetInputTensorsInfoHandle, "ml_single_get_input_info",ml_single_close (hSingleHandle);ml_tensors_info_destroy (hInputTensorsInfoHandle);ml_tensors_info_destroy (hOutputTensorsInfoHandle));
+ nRet = ml_single_get_input_info (hSingleHandle, &hGetInputTensorsInfoHandle);
+ PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_single_get_input_info", NnStreamerGetError(nRet),ml_single_close (hSingleHandle);ml_tensors_info_destroy (hInputTensorsInfoHandle);ml_tensors_info_destroy (hOutputTensorsInfoHandle));
+ CHECK_HANDLE_CLEANUP(hGetInputTensorsInfoHandle, "ml_single_get_input_info",ml_single_close (hSingleHandle);ml_tensors_info_destroy (hInputTensorsInfoHandle);ml_tensors_info_destroy (hOutputTensorsInfoHandle));
- nRet = ml_single_close (hSingleHandle);
- PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_single_close", NnStreamerGetError(nRet),ml_tensors_info_destroy (hInputTensorsInfoHandle);ml_tensors_info_destroy (hOutputTensorsInfoHandle));
- }
+ nRet = ml_single_close (hSingleHandle);
+ PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_single_close", NnStreamerGetError(nRet),ml_tensors_info_destroy (hInputTensorsInfoHandle);ml_tensors_info_destroy (hOutputTensorsInfoHandle));
nRet = ml_tensors_info_destroy (hInputTensorsInfoHandle);
PRINT_RESULT_NORETURN(ML_ERROR_NONE, nRet, "ml_tensors_info_destroy", NnStreamerGetError(nRet));
outputTensorDimension[2] = 1;
outputTensorDimension[3] = 1;
- int nEnumCounter = 0;
- int nEnumSize = sizeof(g_eMlNnHardwareType) / sizeof(g_eMlNnHardwareType[0]);
-
nRet = ml_tensors_info_create (&hInputTensorsInfoHandle);
PRINT_RESULT(ML_ERROR_NONE, nRet, "ml_tensors_info_create", NnStreamerGetError(nRet));
CHECK_HANDLE(hInputTensorsInfoHandle, "ml_tensors_info_create");
nRet = ml_tensors_info_set_tensor_dimension (hOutputTensorsInfoHandle, 0, outputTensorDimension);
PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_tensors_info_set_tensor_dimension", NnStreamerGetError(nRet), ml_tensors_info_destroy (hInputTensorsInfoHandle);ml_tensors_info_destroy (hOutputTensorsInfoHandle));
- for (nEnumCounter = 0; nEnumCounter < nEnumSize; nEnumCounter++)
- {
- nRet = ml_single_open (&hSingleHandle, MlTestModel, hInputTensorsInfoHandle, hOutputTensorsInfoHandle, ML_NNFW_TYPE_TENSORFLOW_LITE, g_eMlNnHardwareType[nEnumCounter]);
- PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_single_open", NnStreamerGetError(nRet),ml_tensors_info_destroy (hInputTensorsInfoHandle);ml_tensors_info_destroy (hOutputTensorsInfoHandle));
- CHECK_HANDLE_CLEANUP(hSingleHandle, "ml_single_open",ml_tensors_info_destroy (hInputTensorsInfoHandle);ml_tensors_info_destroy (hOutputTensorsInfoHandle));
+ nRet = ml_single_open (&hSingleHandle, MlTestModel, hInputTensorsInfoHandle, hOutputTensorsInfoHandle, ML_NNFW_TYPE_TENSORFLOW_LITE, ML_NNFW_HW_ANY);
+ PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_single_open", NnStreamerGetError(nRet),ml_tensors_info_destroy (hInputTensorsInfoHandle);ml_tensors_info_destroy (hOutputTensorsInfoHandle));
+ CHECK_HANDLE_CLEANUP(hSingleHandle, "ml_single_open",ml_tensors_info_destroy (hInputTensorsInfoHandle);ml_tensors_info_destroy (hOutputTensorsInfoHandle));
- nRet = ml_single_get_output_info (hSingleHandle, &hGetOutputTensorsInfoHandle);
- PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_single_get_output_info", NnStreamerGetError(nRet),ml_single_close (hSingleHandle);ml_tensors_info_destroy (hInputTensorsInfoHandle);ml_tensors_info_destroy (hOutputTensorsInfoHandle));
- CHECK_HANDLE_CLEANUP(hGetOutputTensorsInfoHandle, "ml_single_get_output_info",ml_single_close (hSingleHandle);ml_tensors_info_destroy (hInputTensorsInfoHandle);ml_tensors_info_destroy (hOutputTensorsInfoHandle));
+ nRet = ml_single_get_output_info (hSingleHandle, &hGetOutputTensorsInfoHandle);
+ PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_single_get_output_info", NnStreamerGetError(nRet),ml_single_close (hSingleHandle);ml_tensors_info_destroy (hInputTensorsInfoHandle);ml_tensors_info_destroy (hOutputTensorsInfoHandle));
+ CHECK_HANDLE_CLEANUP(hGetOutputTensorsInfoHandle, "ml_single_get_output_info",ml_single_close (hSingleHandle);ml_tensors_info_destroy (hInputTensorsInfoHandle);ml_tensors_info_destroy (hOutputTensorsInfoHandle));
- nRet = ml_single_close (hSingleHandle);
- PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_single_close", NnStreamerGetError(nRet),ml_tensors_info_destroy (hInputTensorsInfoHandle);ml_tensors_info_destroy (hOutputTensorsInfoHandle));
- }
+ nRet = ml_single_close (hSingleHandle);
+ PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_single_close", NnStreamerGetError(nRet),ml_tensors_info_destroy (hInputTensorsInfoHandle);ml_tensors_info_destroy (hOutputTensorsInfoHandle));
nRet = ml_tensors_info_destroy (hInputTensorsInfoHandle);
PRINT_RESULT_NORETURN(ML_ERROR_NONE, nRet, "ml_tensors_info_destroy", NnStreamerGetError(nRet));
static ml_tensors_info_h g_hTensorsInfoHandle;
-ml_tensor_type_e g_eMlNnTensorType[] = { ML_TENSOR_TYPE_INT32,
+ml_tensor_type_e g_eMlNnTensorType[] = {
+ ML_TENSOR_TYPE_INT32,
ML_TENSOR_TYPE_UINT32,
ML_TENSOR_TYPE_INT16,
ML_TENSOR_TYPE_UINT16,
ML_TENSOR_TYPE_FLOAT32,
ML_TENSOR_TYPE_INT64,
ML_TENSOR_TYPE_UINT64,
-
};
/** @addtogroup itc-nnstreamer
PRINT_RESULT_CLEANUP(g_eMlNnTensorType[nEnumCounter], eGetTensorType, "set and get type not same", NnStreamerGetError(nRet),ml_tensors_info_destroy (hTensorinfoHandle));
}
+
nRet = ml_tensors_info_destroy (hTensorinfoHandle);
PRINT_RESULT_NORETURN(ML_ERROR_NONE, nRet, "ml_tensors_info_destroy", NnStreamerGetError(nRet));
-
return 0;
}
//& type: auto
int ITc_nnstreamer_tensors_ml_tensors_info_get_tensor_size_p(void)
{
-
START_TEST;
- int nRet = -1;
- size_t nTensorSize;
- ml_tensor_dimension inputTensorDimension;
- inputTensorDimension[0] = 3;
- inputTensorDimension[1] = 300;
- inputTensorDimension[2] = 300;
- inputTensorDimension[3] = 1;
-
+ int nRet = -1;
+ size_t nTensorSize;
+ ml_tensor_dimension inputTensorDimension;
+ inputTensorDimension[0] = 3;
+ inputTensorDimension[1] = 300;
+ inputTensorDimension[2] = 300;
+ inputTensorDimension[3] = 1;
nRet = ml_tensors_info_set_count (g_hTensorsInfoHandle, 1);
PRINT_RESULT(ML_ERROR_NONE, nRet, "ml_tensors_info_set_count", NnStreamerGetError(nRet));
PRINT_RESULT(ML_ERROR_NONE, nRet, "ml_tensors_info_set_tensor_type", NnStreamerGetError(nRet));
nRet = ml_tensors_info_set_tensor_dimension (g_hTensorsInfoHandle, 0, inputTensorDimension);
- PRINT_RESULT(ML_ERROR_NONE, nRet, "ml_tensors_info_set_tensor_dimension", NnStreamerGetError(nRet));
+ PRINT_RESULT(ML_ERROR_NONE, nRet, "ml_tensors_info_set_tensor_dimension", NnStreamerGetError(nRet));
nRet = ml_tensors_info_get_tensor_size(g_hTensorsInfoHandle, 0, &nTensorSize);
- PRINT_RESULT(ML_ERROR_NONE, nRet, "ml_tensors_info_get_tensor_size", NnStreamerGetError(nRet));
-
+ PRINT_RESULT(ML_ERROR_NONE, nRet, "ml_tensors_info_get_tensor_size", NnStreamerGetError(nRet));
+
return 0;
}
int nRet = -1;
ml_tensors_data_h hTensorsDataHandle = NULL;
uint8_t uintarray[4] = {1,1,1,1};
- int nCount = 0;
+ unsigned int i, nCount = 0;
void *dataPtr = NULL;
size_t dataSize = 0;
ml_tensor_dimension inputTensorDimension;
- inputTensorDimension[0] = 2;
- inputTensorDimension[1] = 2;
- inputTensorDimension[2] = 2;
- inputTensorDimension[3] = 2;
+ inputTensorDimension[0] = 2;
+ inputTensorDimension[1] = 2;
+ inputTensorDimension[2] = 2;
+ inputTensorDimension[3] = 2;
nRet = ml_tensors_info_set_count (g_hTensorsInfoHandle, 1);
PRINT_RESULT(ML_ERROR_NONE, nRet, "ml_tensors_info_set_count", NnStreamerGetError(nRet));
PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_tensors_data_set_tensor_data", NnStreamerGetError(nRet), ml_tensors_data_destroy (hTensorsDataHandle));
nRet = ml_tensors_info_get_count (g_hTensorsInfoHandle, &nCount);
- PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_tensors_info_get_count", NnStreamerGetError(nRet), ml_tensors_data_destroy (hTensorsDataHandle));
-
- for(int i =0;i <nCount;i++)
+ PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_tensors_info_get_count", NnStreamerGetError(nRet), ml_tensors_data_destroy (hTensorsDataHandle));
+
+ for (i = 0; i < nCount; i++)
{
nRet = ml_tensors_data_get_tensor_data (hTensorsDataHandle, i, &dataPtr, &dataSize);
PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_tensors_data_get_tensor_data", NnStreamerGetError(nRet), ml_tensors_data_destroy (hTensorsDataHandle));