[ITC][Non-ACR][Machine Learning][Remove hardware type enum test] 88/226788/1
authorJaeyun <jy1210.jung@samsung.com>
Thu, 5 Mar 2020 09:09:35 +0000 (18:09 +0900)
committerJaeyun <jy1210.jung@samsung.com>
Thu, 5 Mar 2020 09:09:35 +0000 (18:09 +0900)
Changes:
1. Remove hardware type enum in single-shot test.
We cannot assure the hw type is available in test device. This may break the test.

2. Fix indentation and build warning.

This patch also needs to be merged in tizen-5.5.

Change-Id: Ia53aa99743ae7548592e8c2cd1c24c97b79f08f2
Signed-off-by: Jaeyun <jy1210.jung@samsung.com>
src/itc/nnstreamer/ITs-nnstreamer-common.c
src/itc/nnstreamer/ITs-nnstreamer-common.h
src/itc/nnstreamer/ITs-nnstreamer-single.c
src/itc/nnstreamer/ITs-nnstreamer-tensors.c
src/itc/nnstreamer/ITs-nnstreamer.c

index bb64d3872d7d34a997f89da9da1b39fc8ac2bda5..059d60a51c0fcebbcbcea127963acc2eba6f4e34 100755 (executable)
 
 //Add Common enum
 
-ml_nnfw_hw_e g_eMlNnHardwareType[] = {  ML_NNFW_HW_ANY,
-                               ML_NNFW_HW_AUTO,
-                               ML_NNFW_HW_CPU,
-                               ML_NNFW_HW_GPU,
-                               ML_NNFW_HW_NPU};
-
-
 
 //Add helper function definitions here
 
@@ -70,6 +63,7 @@ char* NnStreamerGetError(int nRet)
        case ML_ERROR_TIMED_OUT:        szErrorVal = "ML_ERROR_TIMED_OUT";      break;
        case ML_ERROR_NOT_SUPPORTED:            szErrorVal = "ML_ERROR_NOT_SUPPORTED";          break;
        case ML_ERROR_PERMISSION_DENIED:        szErrorVal = "ML_ERROR_PERMISSION_DENIED";      break;
+       default:        szErrorVal = "Unknown error code";      break;
        }
 
        return szErrorVal;
index 4f94bf5930abd374ff04b9b647b00ac69c54d0bb..21b87b7e01b3f84a1cbfd1f73ef16381c9d65c1c 100755 (executable)
@@ -45,15 +45,14 @@ typedef struct
   gboolean playing;
 } TestPipeState;
 
-static gchar * pszNnpipeline;
-static ml_pipeline_h g_hPipelinehandle;
-static ml_pipeline_state_e state;
-static guint * g_unsinkCount;
-static TestPipeState * g_pipePipeLineState;
+gchar * pszNnpipeline;
+ml_pipeline_h g_hPipelinehandle;
+ml_pipeline_state_e state;
+guint * g_unsinkCount;
+TestPipeState * g_pipePipeLineState;
 
 char* NnStreamerGetError(int nRet);
 gboolean Timeout(gpointer data);
-ml_nnfw_hw_e g_eMlNnHardwareType[5];
 
 
 #define API_NAMESPACE                  "NNSTREAMER_ITC"
@@ -66,23 +65,23 @@ ml_nnfw_hw_e g_eMlNnHardwareType[5];
 #define FEATURE_ML_INTERFACE           "http://tizen.org/feature/machine_learning.inference"
 
 #define START_TEST {\
-        FPRINTF("[Line:%d][%s] Starting test check initialize : %s\\n", __LINE__, API_NAMESPACE, __FUNCTION__);\
-        dlog_print(DLOG_INFO, "NativeTCT", "[Line:%d][%s] Starting test check initialize : %s", __LINE__, API_NAMESPACE, __FUNCTION__);\
-        if ( g_bIsFeatureMismatched ) {\
-                FPRINTF("[Line:%d][%s] Feature Mismatch Error\\n", __LINE__, API_NAMESPACE);\
-                dlog_print(DLOG_ERROR, "NativeTCT", "[Line:%d][%s] Feature Mismatch Error", __LINE__, API_NAMESPACE);\
-                return 1;\
-        }\
-        if ( !g_bFeatureIsSupported) {\
-                FPRINTF("[Line : %d][%s] Feature Unsupported\\n", __LINE__, API_NAMESPACE);\
-                dlog_print(DLOG_ERROR, "NativeTCT", "[Line:%d][%s] Feature Unsupported", __LINE__, API_NAMESPACE);\
-                return 0;\
-        }\
-        if ( !g_bNnstreamerCreation ) {\
-                FPRINTF("[Line:%d][%s] Precondition Failed\\n", __LINE__, API_NAMESPACE);\
-                dlog_print(DLOG_ERROR,"NativeTCT", "[Line:%d][%s] Precondition Failed", __LINE__, API_NAMESPACE);\
-                return 1;\
-        }\
+       FPRINTF("[Line:%d][%s] Starting test check initialize : %s\\n", __LINE__, API_NAMESPACE, __FUNCTION__);\
+       dlog_print(DLOG_INFO, "NativeTCT", "[Line:%d][%s] Starting test check initialize : %s", __LINE__, API_NAMESPACE, __FUNCTION__);\
+       if ( g_bIsFeatureMismatched ) {\
+               FPRINTF("[Line:%d][%s] Feature Mismatch Error\\n", __LINE__, API_NAMESPACE);\
+               dlog_print(DLOG_ERROR, "NativeTCT", "[Line:%d][%s] Feature Mismatch Error", __LINE__, API_NAMESPACE);\
+               return 1;\
+       }\
+       if ( !g_bFeatureIsSupported) {\
+               FPRINTF("[Line : %d][%s] Feature Unsupported\\n", __LINE__, API_NAMESPACE);\
+               dlog_print(DLOG_ERROR, "NativeTCT", "[Line:%d][%s] Feature Unsupported", __LINE__, API_NAMESPACE);\
+               return 0;\
+       }\
+       if ( !g_bNnstreamerCreation ) {\
+               FPRINTF("[Line:%d][%s] Precondition Failed\\n", __LINE__, API_NAMESPACE);\
+               dlog_print(DLOG_ERROR,"NativeTCT", "[Line:%d][%s] Precondition Failed", __LINE__, API_NAMESPACE);\
+               return 1;\
+       }\
 }
 
 #define CHECK_HANDLE_CLEANUP(Handle, API, FreeResource) {\
index 3ecd9b2ed1a8b469227538ae27710229de6caf34..31aacb60d908eaae63c72960977542ea79a8c848 100755 (executable)
@@ -139,21 +139,13 @@ int ITc_nnstreamer_single_ml_single_open_close_p(void)
        START_TEST;
        int nRet = -1;
        ml_single_h hSingleHandle = NULL;
-       ml_tensors_info_h hInputTensorsInfoHandle = NULL;
-       ml_tensors_info_h hOutputTensorsInfoHandle = NULL;
 
-       int nEnumCounter = 0;
-       int nEnumSize = sizeof(g_eMlNnHardwareType) / sizeof(g_eMlNnHardwareType[0]);
+       nRet = ml_single_open (&hSingleHandle, MlTestModel, NULL, NULL, ML_NNFW_TYPE_TENSORFLOW_LITE, ML_NNFW_HW_ANY);
+       PRINT_RESULT(ML_ERROR_NONE, nRet, "ml_single_open", NnStreamerGetError(nRet));
+       CHECK_HANDLE(hSingleHandle, "ml_single_open");
 
-       for (nEnumCounter = 0; nEnumCounter < nEnumSize; nEnumCounter++)
-       {
-               nRet = ml_single_open (&hSingleHandle, MlTestModel, NULL, NULL, ML_NNFW_TYPE_TENSORFLOW_LITE, g_eMlNnHardwareType[nEnumCounter]);
-               PRINT_RESULT(ML_ERROR_NONE, nRet, "ml_single_open", NnStreamerGetError(nRet));
-               CHECK_HANDLE(hSingleHandle, "ml_single_open");
-
-               nRet = ml_single_close (hSingleHandle);
-               PRINT_RESULT(ML_ERROR_NONE, nRet, "ml_single_close", NnStreamerGetError(nRet));
-       }
+       nRet = ml_single_close (hSingleHandle);
+       PRINT_RESULT(ML_ERROR_NONE, nRet, "ml_single_close", NnStreamerGetError(nRet));
 
        return 0;
 }
@@ -180,20 +172,15 @@ int ITc_nnstreamer_single_ml_set_timeout_p(void)
        int nRet = -1;
        ml_single_h hSingleHandle = NULL;
 
-       int nEnumCounter = 0;
-       int nEnumSize = sizeof(g_eMlNnHardwareType) / sizeof(g_eMlNnHardwareType[0]);
-       for (nEnumCounter = 0; nEnumCounter < nEnumSize; nEnumCounter++)
-       {
-               nRet = ml_single_open (&hSingleHandle, MlTestModel, NULL, NULL, ML_NNFW_TYPE_TENSORFLOW_LITE, ML_NNFW_HW_ANY);
-               PRINT_RESULT(ML_ERROR_NONE, nRet, "ml_single_open", NnStreamerGetError(nRet));
-               CHECK_HANDLE(hSingleHandle, "ml_single_open");
+       nRet = ml_single_open (&hSingleHandle, MlTestModel, NULL, NULL, ML_NNFW_TYPE_TENSORFLOW_LITE, ML_NNFW_HW_ANY);
+       PRINT_RESULT(ML_ERROR_NONE, nRet, "ml_single_open", NnStreamerGetError(nRet));
+       CHECK_HANDLE(hSingleHandle, "ml_single_open");
 
-               nRet = ml_single_set_timeout(hSingleHandle, 1);
-               PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_single_set_timeout", NnStreamerGetError(nRet),ml_single_close (hSingleHandle));
+       nRet = ml_single_set_timeout(hSingleHandle, 1);
+       PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_single_set_timeout", NnStreamerGetError(nRet),ml_single_close (hSingleHandle));
 
-               nRet = ml_single_close (hSingleHandle);
-               PRINT_RESULT(ML_ERROR_NONE, nRet, "ml_single_close", NnStreamerGetError(nRet));
-       }
+       nRet = ml_single_close (hSingleHandle);
+       PRINT_RESULT(ML_ERROR_NONE, nRet, "ml_single_close", NnStreamerGetError(nRet));
 
        return 0;
 }
@@ -235,10 +222,6 @@ int ITc_nnstreamer_single_ml_single_invoke_p(void)
        ml_tensors_data_h hInputDataHandle = NULL;
        ml_tensors_data_h hOutputDataHandle = NULL;
 
-       int nEnumCounter = 0;
-       int nEnumSize = sizeof(g_eMlNnHardwareType) / sizeof(g_eMlNnHardwareType[0]);
-
-
        nRet = ml_tensors_info_create (&hInputTensorsInfoHandle);
        PRINT_RESULT(ML_ERROR_NONE, nRet, "ml_tensors_info_create", NnStreamerGetError(nRet));
        CHECK_HANDLE(hInputTensorsInfoHandle, "ml_tensors_info_create");
@@ -265,38 +248,33 @@ int ITc_nnstreamer_single_ml_single_invoke_p(void)
        nRet = ml_tensors_info_set_tensor_dimension (hOutputTensorsInfoHandle, 0, outputTensorDimension);
        PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_tensors_info_set_tensor_dimension", NnStreamerGetError(nRet), ml_tensors_info_destroy (hInputTensorsInfoHandle);ml_tensors_info_destroy (hOutputTensorsInfoHandle));
 
+       nRet = ml_single_open (&hSingleHandle, MlTestModel, hInputTensorsInfoHandle, hOutputTensorsInfoHandle, ML_NNFW_TYPE_TENSORFLOW_LITE, ML_NNFW_HW_ANY);
+       PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_single_open", NnStreamerGetError(nRet),ml_tensors_info_destroy (hInputTensorsInfoHandle);ml_tensors_info_destroy (hOutputTensorsInfoHandle));
+       CHECK_HANDLE_CLEANUP(hInputTensorsInfoHandle, "ml_single_open",ml_tensors_info_destroy (hInputTensorsInfoHandle);ml_tensors_info_destroy (hOutputTensorsInfoHandle));
 
-       for (nEnumCounter = 0; nEnumCounter < nEnumSize; nEnumCounter++)
-       {
-               nRet = ml_single_open (&hSingleHandle, MlTestModel, hInputTensorsInfoHandle, hOutputTensorsInfoHandle, ML_NNFW_TYPE_TENSORFLOW_LITE, ML_NNFW_HW_ANY);
-               PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_single_open", NnStreamerGetError(nRet),ml_tensors_info_destroy (hInputTensorsInfoHandle);ml_tensors_info_destroy (hOutputTensorsInfoHandle));
-               CHECK_HANDLE_CLEANUP(hInputTensorsInfoHandle, "ml_single_open",ml_tensors_info_destroy (hInputTensorsInfoHandle);ml_tensors_info_destroy (hOutputTensorsInfoHandle));
+       nRet = ml_tensors_data_create (hInputTensorsInfoHandle, &hInputDataHandle);
+       PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_tensors_data_create", NnStreamerGetError(nRet),ml_single_close (hSingleHandle);ml_tensors_info_destroy (hInputTensorsInfoHandle);ml_tensors_info_destroy (hOutputTensorsInfoHandle));
+       CHECK_HANDLE_CLEANUP(hInputDataHandle, "ml_tensors_data_create",ml_single_close (hSingleHandle);ml_tensors_info_destroy (hInputTensorsInfoHandle);ml_tensors_info_destroy (hOutputTensorsInfoHandle));
 
-               nRet = ml_tensors_data_create (hInputTensorsInfoHandle, &hInputDataHandle);
-               PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_tensors_data_create", NnStreamerGetError(nRet),ml_single_close (hSingleHandle);ml_tensors_info_destroy (hInputTensorsInfoHandle);ml_tensors_info_destroy (hOutputTensorsInfoHandle));
-               CHECK_HANDLE_CLEANUP(hInputDataHandle, "ml_tensors_data_create",ml_single_close (hSingleHandle);ml_tensors_info_destroy (hInputTensorsInfoHandle);ml_tensors_info_destroy (hOutputTensorsInfoHandle));
+       /* to prevent timeout with low performance */
+       ml_single_set_timeout (hSingleHandle, 20000);
 
-               /* to prevent timeout with low performance */
-               ml_single_set_timeout (hSingleHandle, 20000);
+       nRet = ml_single_invoke (hSingleHandle, hInputDataHandle, &hOutputDataHandle);
+       PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_single_invoke", NnStreamerGetError(nRet),ml_single_close (hSingleHandle);ml_tensors_info_destroy (hInputTensorsInfoHandle);ml_tensors_info_destroy (hOutputTensorsInfoHandle);ml_tensors_data_destroy (hInputDataHandle));
+       CHECK_HANDLE_CLEANUP(hOutputDataHandle, "ml_single_invoke",ml_single_close (hSingleHandle);ml_tensors_info_destroy (hInputTensorsInfoHandle);ml_tensors_info_destroy (hOutputTensorsInfoHandle);ml_tensors_data_destroy (hInputDataHandle));
 
-               nRet = ml_single_invoke (hSingleHandle, hInputDataHandle, &hOutputDataHandle);
-               PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_single_invoke", NnStreamerGetError(nRet),ml_single_close (hSingleHandle);ml_tensors_info_destroy (hInputTensorsInfoHandle);ml_tensors_info_destroy (hOutputTensorsInfoHandle);ml_tensors_data_destroy (hInputDataHandle));
-               CHECK_HANDLE_CLEANUP(hOutputDataHandle, "ml_single_invoke",ml_single_close (hSingleHandle);ml_tensors_info_destroy (hInputTensorsInfoHandle);ml_tensors_info_destroy (hOutputTensorsInfoHandle);ml_tensors_data_destroy (hInputDataHandle));
+       nRet = ml_single_close (hSingleHandle);
+       PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_single_close", NnStreamerGetError(nRet),ml_tensors_info_destroy (hInputTensorsInfoHandle);ml_tensors_info_destroy (hOutputTensorsInfoHandle);ml_tensors_data_destroy (hInputDataHandle);ml_tensors_data_destroy (hOutputDataHandle));
 
-               nRet = ml_single_close (hSingleHandle);
-               PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_single_close", NnStreamerGetError(nRet),ml_tensors_info_destroy (hInputTensorsInfoHandle);ml_tensors_info_destroy (hOutputTensorsInfoHandle);ml_tensors_data_destroy (hInputDataHandle);ml_tensors_data_destroy (hOutputDataHandle));
+       nRet = ml_tensors_data_destroy (hInputDataHandle);
+       PRINT_RESULT_NORETURN(ML_ERROR_NONE, nRet, "ml_tensors_data_destroy", NnStreamerGetError(nRet));
 
-               nRet = ml_tensors_data_destroy (hInputDataHandle);
-               PRINT_RESULT_NORETURN(ML_ERROR_NONE, nRet, "ml_tensors_data_destroy", NnStreamerGetError(nRet));
-
-               nRet = ml_tensors_data_destroy (hOutputDataHandle);
-               PRINT_RESULT_NORETURN(ML_ERROR_NONE, nRet, "ml_tensors_data_destroy", NnStreamerGetError(nRet));
-       }
+       nRet = ml_tensors_data_destroy (hOutputDataHandle);
+       PRINT_RESULT_NORETURN(ML_ERROR_NONE, nRet, "ml_tensors_data_destroy", NnStreamerGetError(nRet));
 
        nRet = ml_tensors_info_destroy (hInputTensorsInfoHandle);
        PRINT_RESULT_NORETURN(ML_ERROR_NONE, nRet, "ml_tensors_info_destroy", NnStreamerGetError(nRet));
 
-
        nRet = ml_tensors_info_destroy (hOutputTensorsInfoHandle);
        PRINT_RESULT_NORETURN(ML_ERROR_NONE, nRet, "ml_tensors_info_destroy", NnStreamerGetError(nRet));
 
@@ -338,9 +316,6 @@ int ITc_nnstreamer_single_ml_get_input_info_p(void)
        outputTensorDimension[2] = 1;
        outputTensorDimension[3] = 1;
 
-       int nEnumCounter = 0;
-       int nEnumSize = sizeof(g_eMlNnHardwareType) / sizeof(g_eMlNnHardwareType[0]);
-
        nRet = ml_tensors_info_create (&hInputTensorsInfoHandle);
        PRINT_RESULT(ML_ERROR_NONE, nRet, "ml_tensors_info_create", NnStreamerGetError(nRet));
        CHECK_HANDLE(hInputTensorsInfoHandle, "ml_tensors_info_create");
@@ -367,19 +342,16 @@ int ITc_nnstreamer_single_ml_get_input_info_p(void)
        nRet = ml_tensors_info_set_tensor_dimension (hOutputTensorsInfoHandle, 0, outputTensorDimension);
        PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_tensors_info_set_tensor_dimension", NnStreamerGetError(nRet), ml_tensors_info_destroy (hInputTensorsInfoHandle);ml_tensors_info_destroy (hOutputTensorsInfoHandle));
 
-       for (nEnumCounter = 0; nEnumCounter < nEnumSize; nEnumCounter++)
-       {
-               nRet = ml_single_open (&hSingleHandle, MlTestModel, hInputTensorsInfoHandle, NULL, ML_NNFW_TYPE_TENSORFLOW_LITE, g_eMlNnHardwareType[nEnumCounter]);
-               PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_single_open", NnStreamerGetError(nRet),ml_tensors_info_destroy (hInputTensorsInfoHandle);ml_tensors_info_destroy (hOutputTensorsInfoHandle));
-               CHECK_HANDLE_CLEANUP(hSingleHandle, "ml_single_open",ml_tensors_info_destroy (hInputTensorsInfoHandle);ml_tensors_info_destroy (hOutputTensorsInfoHandle));
+       nRet = ml_single_open (&hSingleHandle, MlTestModel, hInputTensorsInfoHandle, NULL, ML_NNFW_TYPE_TENSORFLOW_LITE, ML_NNFW_HW_ANY);
+       PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_single_open", NnStreamerGetError(nRet),ml_tensors_info_destroy (hInputTensorsInfoHandle);ml_tensors_info_destroy (hOutputTensorsInfoHandle));
+       CHECK_HANDLE_CLEANUP(hSingleHandle, "ml_single_open",ml_tensors_info_destroy (hInputTensorsInfoHandle);ml_tensors_info_destroy (hOutputTensorsInfoHandle));
 
-               nRet =  ml_single_get_input_info (hSingleHandle, &hGetInputTensorsInfoHandle);
-               PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_single_get_input_info", NnStreamerGetError(nRet),ml_single_close (hSingleHandle);ml_tensors_info_destroy (hInputTensorsInfoHandle);ml_tensors_info_destroy (hOutputTensorsInfoHandle));
-               CHECK_HANDLE_CLEANUP(hGetInputTensorsInfoHandle, "ml_single_get_input_info",ml_single_close (hSingleHandle);ml_tensors_info_destroy (hInputTensorsInfoHandle);ml_tensors_info_destroy (hOutputTensorsInfoHandle));
+       nRet = ml_single_get_input_info (hSingleHandle, &hGetInputTensorsInfoHandle);
+       PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_single_get_input_info", NnStreamerGetError(nRet),ml_single_close (hSingleHandle);ml_tensors_info_destroy (hInputTensorsInfoHandle);ml_tensors_info_destroy (hOutputTensorsInfoHandle));
+       CHECK_HANDLE_CLEANUP(hGetInputTensorsInfoHandle, "ml_single_get_input_info",ml_single_close (hSingleHandle);ml_tensors_info_destroy (hInputTensorsInfoHandle);ml_tensors_info_destroy (hOutputTensorsInfoHandle));
 
-               nRet = ml_single_close (hSingleHandle);
-               PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_single_close", NnStreamerGetError(nRet),ml_tensors_info_destroy (hInputTensorsInfoHandle);ml_tensors_info_destroy (hOutputTensorsInfoHandle));
-       }
+       nRet = ml_single_close (hSingleHandle);
+       PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_single_close", NnStreamerGetError(nRet),ml_tensors_info_destroy (hInputTensorsInfoHandle);ml_tensors_info_destroy (hOutputTensorsInfoHandle));
 
        nRet = ml_tensors_info_destroy (hInputTensorsInfoHandle);
        PRINT_RESULT_NORETURN(ML_ERROR_NONE, nRet, "ml_tensors_info_destroy", NnStreamerGetError(nRet));
@@ -425,9 +397,6 @@ int ITc_nnstreamer_single_ml_get_output_info_p(void)
        outputTensorDimension[2] = 1;
        outputTensorDimension[3] = 1;
 
-       int nEnumCounter = 0;
-       int nEnumSize = sizeof(g_eMlNnHardwareType) / sizeof(g_eMlNnHardwareType[0]);
-
        nRet = ml_tensors_info_create (&hInputTensorsInfoHandle);
        PRINT_RESULT(ML_ERROR_NONE, nRet, "ml_tensors_info_create", NnStreamerGetError(nRet));
        CHECK_HANDLE(hInputTensorsInfoHandle, "ml_tensors_info_create");
@@ -454,19 +423,16 @@ int ITc_nnstreamer_single_ml_get_output_info_p(void)
        nRet = ml_tensors_info_set_tensor_dimension (hOutputTensorsInfoHandle, 0, outputTensorDimension);
        PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_tensors_info_set_tensor_dimension", NnStreamerGetError(nRet), ml_tensors_info_destroy (hInputTensorsInfoHandle);ml_tensors_info_destroy (hOutputTensorsInfoHandle));
 
-       for (nEnumCounter = 0; nEnumCounter < nEnumSize; nEnumCounter++)
-       {
-               nRet = ml_single_open (&hSingleHandle, MlTestModel, hInputTensorsInfoHandle, hOutputTensorsInfoHandle, ML_NNFW_TYPE_TENSORFLOW_LITE, g_eMlNnHardwareType[nEnumCounter]);
-               PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_single_open", NnStreamerGetError(nRet),ml_tensors_info_destroy (hInputTensorsInfoHandle);ml_tensors_info_destroy (hOutputTensorsInfoHandle));
-               CHECK_HANDLE_CLEANUP(hSingleHandle, "ml_single_open",ml_tensors_info_destroy (hInputTensorsInfoHandle);ml_tensors_info_destroy (hOutputTensorsInfoHandle));
+       nRet = ml_single_open (&hSingleHandle, MlTestModel, hInputTensorsInfoHandle, hOutputTensorsInfoHandle, ML_NNFW_TYPE_TENSORFLOW_LITE, ML_NNFW_HW_ANY);
+       PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_single_open", NnStreamerGetError(nRet),ml_tensors_info_destroy (hInputTensorsInfoHandle);ml_tensors_info_destroy (hOutputTensorsInfoHandle));
+       CHECK_HANDLE_CLEANUP(hSingleHandle, "ml_single_open",ml_tensors_info_destroy (hInputTensorsInfoHandle);ml_tensors_info_destroy (hOutputTensorsInfoHandle));
 
-               nRet = ml_single_get_output_info (hSingleHandle, &hGetOutputTensorsInfoHandle);
-               PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_single_get_output_info", NnStreamerGetError(nRet),ml_single_close (hSingleHandle);ml_tensors_info_destroy (hInputTensorsInfoHandle);ml_tensors_info_destroy (hOutputTensorsInfoHandle));
-               CHECK_HANDLE_CLEANUP(hGetOutputTensorsInfoHandle, "ml_single_get_output_info",ml_single_close (hSingleHandle);ml_tensors_info_destroy (hInputTensorsInfoHandle);ml_tensors_info_destroy (hOutputTensorsInfoHandle));
+       nRet = ml_single_get_output_info (hSingleHandle, &hGetOutputTensorsInfoHandle);
+       PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_single_get_output_info", NnStreamerGetError(nRet),ml_single_close (hSingleHandle);ml_tensors_info_destroy (hInputTensorsInfoHandle);ml_tensors_info_destroy (hOutputTensorsInfoHandle));
+       CHECK_HANDLE_CLEANUP(hGetOutputTensorsInfoHandle, "ml_single_get_output_info",ml_single_close (hSingleHandle);ml_tensors_info_destroy (hInputTensorsInfoHandle);ml_tensors_info_destroy (hOutputTensorsInfoHandle));
 
-               nRet = ml_single_close (hSingleHandle);
-               PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_single_close", NnStreamerGetError(nRet),ml_tensors_info_destroy (hInputTensorsInfoHandle);ml_tensors_info_destroy (hOutputTensorsInfoHandle));
-       }
+       nRet = ml_single_close (hSingleHandle);
+       PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_single_close", NnStreamerGetError(nRet),ml_tensors_info_destroy (hInputTensorsInfoHandle);ml_tensors_info_destroy (hOutputTensorsInfoHandle));
 
        nRet = ml_tensors_info_destroy (hInputTensorsInfoHandle);
        PRINT_RESULT_NORETURN(ML_ERROR_NONE, nRet, "ml_tensors_info_destroy", NnStreamerGetError(nRet));
index ec5f05f5bd6cee3cf232a40e17a589c1237f119b..218d11f7b0403ff5cc8b02a0725ed3b3c409a429 100755 (executable)
@@ -18,7 +18,8 @@
 
 static ml_tensors_info_h g_hTensorsInfoHandle;
 
-ml_tensor_type_e g_eMlNnTensorType[] = {  ML_TENSOR_TYPE_INT32,
+ml_tensor_type_e g_eMlNnTensorType[] = {
+                               ML_TENSOR_TYPE_INT32,
                                ML_TENSOR_TYPE_UINT32,
                                ML_TENSOR_TYPE_INT16,
                                ML_TENSOR_TYPE_UINT16,
@@ -28,7 +29,6 @@ ml_tensor_type_e g_eMlNnTensorType[] = {  ML_TENSOR_TYPE_INT32,
                                ML_TENSOR_TYPE_FLOAT32,
                                ML_TENSOR_TYPE_INT64,
                                ML_TENSOR_TYPE_UINT64,
-
 };
 
 /** @addtogroup itc-nnstreamer
@@ -361,10 +361,10 @@ int ITc_nnstreamer_tensors_ml_tensors_info_set_get_tensor_type_p(void)
 
                PRINT_RESULT_CLEANUP(g_eMlNnTensorType[nEnumCounter], eGetTensorType, "set and get type not same", NnStreamerGetError(nRet),ml_tensors_info_destroy (hTensorinfoHandle));
        }
+
        nRet = ml_tensors_info_destroy (hTensorinfoHandle);
        PRINT_RESULT_NORETURN(ML_ERROR_NONE, nRet, "ml_tensors_info_destroy", NnStreamerGetError(nRet));
 
-
        return 0;
 }
 
@@ -436,16 +436,14 @@ int ITc_nnstreamer_tensors_ml_tensors_info_set_get_tensor_dimension_p(void)
 //& type: auto
 int ITc_nnstreamer_tensors_ml_tensors_info_get_tensor_size_p(void)
 {
-
        START_TEST;
-        int nRet = -1;
-        size_t nTensorSize;
-        ml_tensor_dimension inputTensorDimension;
-        inputTensorDimension[0] = 3;
-        inputTensorDimension[1] = 300;
-        inputTensorDimension[2] = 300;
-        inputTensorDimension[3] = 1;
-
+       int nRet = -1;
+       size_t nTensorSize;
+       ml_tensor_dimension inputTensorDimension;
+       inputTensorDimension[0] = 3;
+       inputTensorDimension[1] = 300;
+       inputTensorDimension[2] = 300;
+       inputTensorDimension[3] = 1;
 
        nRet = ml_tensors_info_set_count (g_hTensorsInfoHandle, 1);
        PRINT_RESULT(ML_ERROR_NONE, nRet, "ml_tensors_info_set_count", NnStreamerGetError(nRet));
@@ -454,11 +452,11 @@ int ITc_nnstreamer_tensors_ml_tensors_info_get_tensor_size_p(void)
        PRINT_RESULT(ML_ERROR_NONE, nRet, "ml_tensors_info_set_tensor_type", NnStreamerGetError(nRet));
 
        nRet = ml_tensors_info_set_tensor_dimension (g_hTensorsInfoHandle, 0, inputTensorDimension);
-        PRINT_RESULT(ML_ERROR_NONE, nRet, "ml_tensors_info_set_tensor_dimension", NnStreamerGetError(nRet));
+       PRINT_RESULT(ML_ERROR_NONE, nRet, "ml_tensors_info_set_tensor_dimension", NnStreamerGetError(nRet));
 
        nRet = ml_tensors_info_get_tensor_size(g_hTensorsInfoHandle, 0, &nTensorSize);
-        PRINT_RESULT(ML_ERROR_NONE, nRet, "ml_tensors_info_get_tensor_size", NnStreamerGetError(nRet));
-       
+       PRINT_RESULT(ML_ERROR_NONE, nRet, "ml_tensors_info_get_tensor_size", NnStreamerGetError(nRet));
+
        return 0;
 }
 
@@ -515,14 +513,14 @@ int ITc_nnstreamer_tensors_ml_tensors_data_set_get_tensor_data_p(void)
        int nRet = -1;
        ml_tensors_data_h hTensorsDataHandle = NULL;
        uint8_t uintarray[4] = {1,1,1,1};
-       int nCount = 0;
+       unsigned int i, nCount = 0;
        void *dataPtr = NULL;
        size_t dataSize = 0;
        ml_tensor_dimension inputTensorDimension;
-        inputTensorDimension[0] = 2;
-        inputTensorDimension[1] = 2;
-        inputTensorDimension[2] = 2;
-        inputTensorDimension[3] = 2;
+       inputTensorDimension[0] = 2;
+       inputTensorDimension[1] = 2;
+       inputTensorDimension[2] = 2;
+       inputTensorDimension[3] = 2;
 
        nRet = ml_tensors_info_set_count (g_hTensorsInfoHandle, 1);
        PRINT_RESULT(ML_ERROR_NONE, nRet, "ml_tensors_info_set_count", NnStreamerGetError(nRet));
@@ -541,9 +539,9 @@ int ITc_nnstreamer_tensors_ml_tensors_data_set_get_tensor_data_p(void)
        PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_tensors_data_set_tensor_data", NnStreamerGetError(nRet), ml_tensors_data_destroy (hTensorsDataHandle));
 
        nRet = ml_tensors_info_get_count (g_hTensorsInfoHandle, &nCount);
-       PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_tensors_info_get_count", NnStreamerGetError(nRet), ml_tensors_data_destroy (hTensorsDataHandle)); 
-       
-       for(int i =0;i <nCount;i++)
+       PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_tensors_info_get_count", NnStreamerGetError(nRet), ml_tensors_data_destroy (hTensorsDataHandle));
+
+       for (i = 0; i < nCount; i++)
        {
                nRet = ml_tensors_data_get_tensor_data (hTensorsDataHandle, i, &dataPtr, &dataSize);
                PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_tensors_data_get_tensor_data", NnStreamerGetError(nRet), ml_tensors_data_destroy (hTensorsDataHandle));
index b9147b7267245c1285cece265fb13366fdb944f0..c8676bd113b7cfff73593f7a73ddb0b258fd2e09 100755 (executable)
 //
 #include "ITs-nnstreamer-common.h"
 
+ml_nnfw_hw_e g_eMlNnHardwareType[] = {
+       ML_NNFW_HW_ANY,
+       ML_NNFW_HW_AUTO,
+       ML_NNFW_HW_CPU,
+       ML_NNFW_HW_GPU,
+       ML_NNFW_HW_NPU
+};
 
 
 /** @addtogroup itc-nnstreamer
@@ -130,8 +137,8 @@ void ITs_nnstreamer_pipeline_cleanup(void)
                int nRet = ml_pipeline_destroy (g_hPipelinehandle);
                if(nRet !=0)
                {
-                               FPRINTF("ml_pipeline_destroy failed\\n");
-                               dlog_print(DLOG_ERROR, "NativeTCT", "ml_pipeline_destroy failed");
+                       FPRINTF("ml_pipeline_destroy failed\\n");
+                       dlog_print(DLOG_ERROR, "NativeTCT", "ml_pipeline_destroy failed");
                }
                g_hPipelinehandle = NULL;
        }
@@ -276,7 +283,7 @@ int ITc_nnstreamer_pipeline_ml_check_nnfw_availability_p(void)
 
        for (nEnumCounter = 0; nEnumCounter < nEnumSize; nEnumCounter++)
        {
-               nRet = ml_check_nnfw_availability (ML_NNFW_TYPE_NNFW, g_eMlNnHardwareType[nEnumCounter], &isNnAvailable);
+               nRet = ml_check_nnfw_availability (ML_NNFW_TYPE_NNFW, g_eMlNnHardwareType[nEnumCounter], &isNnAvailable);
                PRINT_RESULT(ML_ERROR_NONE, nRet, "ml_check_nnfw_availability", NnStreamerGetError(nRet));
        }
 
@@ -328,9 +335,6 @@ int ITc_nnstreamer_pipeline_ml_pipeline_sink_register_unregister_p(void)
        return 0;
 }
 
-
-
-
 /**
 * @testcase                    ITc_nnstreamer_pipeline_ml_pipeline_valve_get_release_handle_p
 * @since_tizen                 5.5