[ITC][nnstreamer][ACR-1503][Add new API and enum for neural network framework] 47/230847/3
authorABHISHEK JAIN <j.abhishek@samsung.com>
Tue, 14 Apr 2020 15:29:48 +0000 (20:59 +0530)
committershobhit verma <shobhit.v@samsung.com>
Wed, 15 Apr 2020 10:45:22 +0000 (10:45 +0000)
Change-Id: I89380e10a3bfe0fd5109ac23d247de66c2cd13e0
Signed-off-by: ABHISHEK JAIN <j.abhishek@samsung.com>
src/itc/nnstreamer/ITs-nnstreamer-common.c
src/itc/nnstreamer/ITs-nnstreamer-single.c
src/itc/nnstreamer/ITs-nnstreamer.c
src/itc/nnstreamer/tct-nnstreamer-native_mobile.h
src/itc/nnstreamer/tct-nnstreamer-native_tizeniot.h
src/itc/nnstreamer/tct-nnstreamer-native_wearable.h

index 059d60a51c0fcebbcbcea127963acc2eba6f4e34..2056bd75cf379c262de136543b2eb6324125a0aa 100755 (executable)
@@ -52,18 +52,18 @@ gboolean Timeout(gpointer data)
 
 char* NnStreamerGetError(int nRet)
 {
-       char *szErrorVal = NULL;
+       char *szErrorVal = "Unknown error code";
        switch ( nRet )
        {
-       case ML_ERROR_NONE:                             szErrorVal = "ML_ERROR_NONE";                                   break;
-       case ML_ERROR_INVALID_PARAMETER:        szErrorVal = "ML_ERROR_INVALID_PARAMETER";      break;
-       case ML_ERROR_STREAMS_PIPE:     szErrorVal = "ML_ERROR_STREAMS_PIPE";   break;
-       case ML_ERROR_TRY_AGAIN:        szErrorVal = "ML_ERROR_TRY_AGAIN";      break;
-       case ML_ERROR_UNKNOWN:  szErrorVal = "ML_ERROR_UNKNOWN";        break;
-       case ML_ERROR_TIMED_OUT:        szErrorVal = "ML_ERROR_TIMED_OUT";      break;
-       case ML_ERROR_NOT_SUPPORTED:            szErrorVal = "ML_ERROR_NOT_SUPPORTED";          break;
-       case ML_ERROR_PERMISSION_DENIED:        szErrorVal = "ML_ERROR_PERMISSION_DENIED";      break;
-       default:        szErrorVal = "Unknown error code";      break;
+               case ML_ERROR_NONE:                             szErrorVal = "ML_ERROR_NONE";                                   break;
+               case ML_ERROR_INVALID_PARAMETER:        szErrorVal = "ML_ERROR_INVALID_PARAMETER";      break;
+               case ML_ERROR_STREAMS_PIPE:     szErrorVal = "ML_ERROR_STREAMS_PIPE";   break;
+               case ML_ERROR_TRY_AGAIN:        szErrorVal = "ML_ERROR_TRY_AGAIN";      break;
+               case ML_ERROR_UNKNOWN:  szErrorVal = "ML_ERROR_UNKNOWN";        break;
+               case ML_ERROR_TIMED_OUT:        szErrorVal = "ML_ERROR_TIMED_OUT";      break;
+               case ML_ERROR_NOT_SUPPORTED:            szErrorVal = "ML_ERROR_NOT_SUPPORTED";          break;
+               case ML_ERROR_PERMISSION_DENIED:        szErrorVal = "ML_ERROR_PERMISSION_DENIED";      break;
+               case ML_ERROR_OUT_OF_MEMORY:    szErrorVal = "ML_ERROR_OUT_OF_MEMORY";  break;
        }
 
        return szErrorVal;
index 31aacb60d908eaae63c72960977542ea79a8c848..4eaf21ab2434ef1552e8da8bccf78b24c64e1daf 100755 (executable)
@@ -282,29 +282,31 @@ int ITc_nnstreamer_single_ml_single_invoke_p(void)
 }
 
 /*
-* @testcase                    ITc_nnstreamer_single_ml_get_input_info_p
+* @testcase                    ITc_nnstreamer_single_ml_get_set_input_info_p
 * @since_tizen                 5.5
 * @author                      SRID(manoj.g2)
 * @reviewer                    SRID(shobhit.v)
 * @type                        auto
-* @description                 Get tensor input info from ml single
-* @scenario                    Get tensor input info from ml single
-* @apicovered                  ml_single_get_input_info
-* @passcase                    When ml_single_get_input_info and precondition API is successful.
+* @description                 Get and Set tensor input info from ml single
+* @scenario                    Get and Set tensor input info from ml single and set it back
+* @apicovered                  ml_single_set_input_info, ml_single_get_input_info
+* @passcase                    When ml_single_set_input_info, ml_single_get_input_info and precondition APIs are successful.
 * @failcase                    If target API fails or any precondition API fails
 * @precondition                        None
 * @postcondition               None
 */
-//& purpose: API To Get tensor input info from ml single
+//& purpose: API To Get and Set tensor input info from ml single
 //& type: auto
-int ITc_nnstreamer_single_ml_get_input_info_p(void)
+int ITc_nnstreamer_single_ml_get_set_input_info_p(void)
 {
        START_TEST;
+
        int nRet = -1;
        ml_single_h hSingleHandle = NULL;
        ml_tensors_info_h hInputTensorsInfoHandle = NULL;
        ml_tensors_info_h hOutputTensorsInfoHandle = NULL;
        ml_tensors_info_h hGetInputTensorsInfoHandle = NULL;
+
        ml_tensor_dimension inputTensorDimension;
        ml_tensor_dimension outputTensorDimension;
        inputTensorDimension[0] = 3;
@@ -346,11 +348,24 @@ int ITc_nnstreamer_single_ml_get_input_info_p(void)
        PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_single_open", NnStreamerGetError(nRet),ml_tensors_info_destroy (hInputTensorsInfoHandle);ml_tensors_info_destroy (hOutputTensorsInfoHandle));
        CHECK_HANDLE_CLEANUP(hSingleHandle, "ml_single_open",ml_tensors_info_destroy (hInputTensorsInfoHandle);ml_tensors_info_destroy (hOutputTensorsInfoHandle));
 
+       //Target API
        nRet = ml_single_get_input_info (hSingleHandle, &hGetInputTensorsInfoHandle);
        PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_single_get_input_info", NnStreamerGetError(nRet),ml_single_close (hSingleHandle);ml_tensors_info_destroy (hInputTensorsInfoHandle);ml_tensors_info_destroy (hOutputTensorsInfoHandle));
        CHECK_HANDLE_CLEANUP(hGetInputTensorsInfoHandle, "ml_single_get_input_info",ml_single_close (hSingleHandle);ml_tensors_info_destroy (hInputTensorsInfoHandle);ml_tensors_info_destroy (hOutputTensorsInfoHandle));
 
-       nRet = ml_single_close (hSingleHandle);
+       nRet = ml_tensors_info_get_tensor_dimension (hGetInputTensorsInfoHandle, 0, outputTensorDimension);
+       PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_tensors_info_get_tensor_dimensions", NnStreamerGetError(nRet), ml_single_close(hSingleHandle); ml_tensors_info_destroy(hInputTensorsInfoHandle); ml_tensors_info_destroy(hOutputTensorsInfoHandle); ml_tensors_info_destroy(hGetInputTensorsInfoHandle));
+
+       /* Dimension change */
+       outputTensorDimension[3] += 1;
+       nRet = ml_tensors_info_set_tensor_dimension(hGetInputTensorsInfoHandle, 0, outputTensorDimension);
+       PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_tensors_info_set_tensor_dimension", NnStreamerGetError(nRet), ml_single_close(hSingleHandle); ml_tensors_info_destroy(hInputTensorsInfoHandle); ml_tensors_info_destroy(hOutputTensorsInfoHandle); ml_tensors_info_destroy(hGetInputTensorsInfoHandle));
+
+       //Target API
+       nRet = ml_single_set_input_info(hSingleHandle, hGetInputTensorsInfoHandle);
+       PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_single_set_input_info", NnStreamerGetError(nRet), ml_single_close(hSingleHandle); ml_tensors_info_destroy(hInputTensorsInfoHandle); ml_tensors_info_destroy(hOutputTensorsInfoHandle); ml_tensors_info_destroy(hGetInputTensorsInfoHandle));
+
+       nRet = ml_single_close(hSingleHandle);
        PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_single_close", NnStreamerGetError(nRet),ml_tensors_info_destroy (hInputTensorsInfoHandle);ml_tensors_info_destroy (hOutputTensorsInfoHandle));
 
        nRet = ml_tensors_info_destroy (hInputTensorsInfoHandle);
@@ -359,6 +374,9 @@ int ITc_nnstreamer_single_ml_get_input_info_p(void)
        nRet = ml_tensors_info_destroy (hOutputTensorsInfoHandle);
        PRINT_RESULT_NORETURN(ML_ERROR_NONE, nRet, "ml_tensors_info_destroy", NnStreamerGetError(nRet));
 
+       nRet = ml_tensors_info_destroy (hGetInputTensorsInfoHandle);
+       PRINT_RESULT_NORETURN(ML_ERROR_NONE, nRet, "ml_tensors_info_destroy", NnStreamerGetError(nRet));
+
        return 0;
 }
 
@@ -442,3 +460,209 @@ int ITc_nnstreamer_single_ml_get_output_info_p(void)
 
        return 0;
 }
+
+/*
+* @testcase                    ITc_nnstreamer_single_ml_set_get_property_p
+* @since_tizen                 6.0
+* @author                      SRID(j.abhishek)
+* @reviewer                    SRID(shobhit.v)
+* @type                        auto
+* @description                 To Set and Get ML single property value
+* @scenario                    To Set and Get ML single property value
+* @apicovered                  ml_single_set_property, ml_single_get_property
+* @passcase                    When ml_single_set_property, ml_single_get_property and precondition APIs are successful.
+* @failcase                    If target API fails or any precondition API fails
+* @precondition                        None
+* @postcondition               None
+*/
+//& purpose: API To Set and Get ML single property value
+//& type: auto
+int ITc_nnstreamer_single_ml_set_get_property_p(void)
+{
+       START_TEST;
+
+       int nRet = -1;
+       ml_single_h hSingleHandle = NULL;
+       const char *pszPropertyName = "input";
+       const char *pszSetProperty = "3:224:224:1";
+       char *pszGetProperty = NULL;
+
+       nRet = ml_single_open(&hSingleHandle, MlTestModel, NULL, NULL, ML_NNFW_TYPE_TENSORFLOW_LITE, ML_NNFW_HW_ANY);
+       PRINT_RESULT(ML_ERROR_NONE, nRet, "ml_single_open", NnStreamerGetError(nRet));
+       CHECK_HANDLE(hSingleHandle, "ml_single_open");
+
+       //Target API
+       nRet = ml_single_set_property(hSingleHandle, pszPropertyName, pszSetProperty);
+       PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_single_set_property", NnStreamerGetError(nRet), ml_single_close(hSingleHandle));
+
+       //Target API
+       nRet = ml_single_get_property(hSingleHandle, pszPropertyName, &pszGetProperty);
+       PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_single_get_property", NnStreamerGetError(nRet), ml_single_close(hSingleHandle));
+       CHECK_HANDLE_CLEANUP(pszGetProperty, "ml_single_get_property", ml_single_close(hSingleHandle));
+
+       if ( 0 != strncmp(pszSetProperty, pszGetProperty, strlen(pszSetProperty)) )
+       {
+               FPRINTF("[%s:%d] TC Failed Reason; Set Value %s and Get value %s of tensor property name is not same\\n", __FILE__, __LINE__, pszSetProperty, pszGetProperty);
+               nRet = ml_single_close(hSingleHandle);
+               PRINT_RESULT_NORETURN(ML_ERROR_NONE, nRet, "ml_single_close", NnStreamerGetError(nRet));
+               return 1;
+       }
+
+       nRet = ml_single_close(hSingleHandle);
+       PRINT_RESULT_NORETURN(ML_ERROR_NONE, nRet, "ml_single_close", NnStreamerGetError(nRet));
+
+       return 0;
+}
+
+/*
+* @testcase                    ITc_nnstreamer_single_ml_invoke_dynamic_p
+* @since_tizen                 6.0
+* @author                      SRID(j.abhishek)
+* @reviewer                    SRID(shobhit.v)
+* @type                        auto
+* @description                 To Invokes the model with the given input data with the given tensors information.
+* @scenario                    To Invokes the model with the given input data with the given tensors information.
+* @apicovered                  ml_single_invoke_dynamic
+* @passcase                    When ml_single_invoke_dynamic and precondition API is successful.
+* @failcase                    If target API fails or any precondition API fails
+* @precondition                        None
+* @postcondition               None
+*/
+//& purpose: Invokes the model with the given input data with the given tensors information.
+//& type: auto
+int ITc_nnstreamer_single_ml_invoke_dynamic_p(void)
+{
+       START_TEST;
+
+       int nRet = -1;
+       ml_single_h hSingleHandle = NULL;
+       ml_tensors_info_h hInputTensorsInfoHandle = NULL;
+       ml_tensors_info_h hOutputTensorsInfoHandle = NULL;
+       ml_tensors_data_h hInputDataHandle = NULL;
+       ml_tensors_data_h hOutputDataHandle = NULL;
+
+       ml_tensor_type_e eTensorType = ML_TENSOR_TYPE_UNKNOWN;
+       ml_tensor_dimension nTmpTensorDimension;
+
+       const char *pszPropertyName = "input";
+       const char *pszSetProperty = "3:224:224:1";
+       float fTmpInputArr[] = {1.0};
+       unsigned int nTmpCnt = 0;
+       float *fOutBuf;
+       size_t nDataSize;
+
+       nRet = ml_single_open(&hSingleHandle, MlTestModel, NULL, NULL, ML_NNFW_TYPE_TENSORFLOW_LITE, ML_NNFW_HW_ANY);
+       PRINT_RESULT(ML_ERROR_NONE, nRet, "ml_single_open", NnStreamerGetError(nRet));
+       CHECK_HANDLE(hSingleHandle, "ml_single_open");
+
+       nRet = ml_single_get_input_info(hSingleHandle, &hInputTensorsInfoHandle);
+       PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_single_get_input_info", NnStreamerGetError(nRet), ml_single_close(hSingleHandle));
+       CHECK_HANDLE_CLEANUP(hInputTensorsInfoHandle, "ml_single_get_input_info", ml_single_close(hSingleHandle));
+
+       nRet = ml_tensors_data_create(hInputTensorsInfoHandle, &hInputDataHandle);
+       PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_tensors_data_create", NnStreamerGetError(nRet), ml_tensors_info_destroy(hInputTensorsInfoHandle); ml_single_close(hSingleHandle));
+       CHECK_HANDLE_CLEANUP(hInputDataHandle, "ml_tensors_data_create", ml_tensors_info_destroy(hInputTensorsInfoHandle); ml_single_close(hSingleHandle));
+
+       nRet = ml_tensors_data_set_tensor_data(hInputDataHandle, 0, fTmpInputArr, 1 * sizeof(float));
+       PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_tensors_data_set_tensor_data", NnStreamerGetError(nRet), ml_tensors_data_destroy(hInputDataHandle); ml_tensors_info_destroy(hInputTensorsInfoHandle); ml_single_close(hSingleHandle));
+
+       nRet = ml_tensors_info_get_count(hInputTensorsInfoHandle, &nTmpCnt);
+       PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_tensors_info_get_count", NnStreamerGetError(nRet), ml_tensors_data_destroy(hInputDataHandle); ml_tensors_info_destroy(hInputTensorsInfoHandle); ml_single_close(hSingleHandle));
+
+       nRet = ml_tensors_info_get_tensor_type(hInputTensorsInfoHandle, 0, &eTensorType);
+       PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_tensors_info_get_tensor_type", NnStreamerGetError(nRet), ml_tensors_data_destroy(hInputDataHandle); ml_tensors_info_destroy(hInputTensorsInfoHandle); ml_single_close(hSingleHandle));
+
+       nRet = ml_tensors_info_get_tensor_dimension(hInputTensorsInfoHandle, 0, nTmpTensorDimension);
+       PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_tensors_info_get_tensor_dimension", NnStreamerGetError(nRet), ml_tensors_data_destroy(hInputDataHandle); ml_tensors_info_destroy(hInputTensorsInfoHandle); ml_single_close(hSingleHandle));
+
+       //Target API
+       nRet = ml_single_invoke_dynamic(hSingleHandle, hInputDataHandle, hInputTensorsInfoHandle, &hOutputDataHandle, &hOutputTensorsInfoHandle);
+       PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_single_invoke_dynamic", NnStreamerGetError(nRet), ml_tensors_data_destroy(hInputDataHandle); ml_tensors_info_destroy(hInputTensorsInfoHandle); ml_single_close(hSingleHandle));
+       CHECK_HANDLE_CLEANUP(hOutputDataHandle, "ml_single_invoke_dynamic", ml_tensors_data_destroy(hInputDataHandle); ml_tensors_info_destroy(hInputTensorsInfoHandle); ml_single_close(hSingleHandle));
+       CHECK_HANDLE_CLEANUP(hOutputTensorsInfoHandle, "ml_single_invoke_dynamic", ml_tensors_data_destroy(hInputDataHandle); ml_tensors_info_destroy(hInputTensorsInfoHandle); ml_single_close(hSingleHandle));
+
+       nRet = ml_tensors_data_get_tensor_data(hOutputDataHandle, 0, (void **)&fOutBuf, &nDataSize);
+       PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_tensors_data_get_tensor_data", NnStreamerGetError(nRet), ml_tensors_data_destroy(hOutputDataHandle); ml_tensors_info_destroy(hOutputTensorsInfoHandle); ml_tensors_data_destroy(hInputDataHandle); ml_tensors_info_destroy(hInputTensorsInfoHandle); ml_single_close(hSingleHandle));
+
+       nRet = ml_tensors_info_get_count(hOutputTensorsInfoHandle, &nTmpCnt);
+       PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_tensors_info_get_count", NnStreamerGetError(nRet), ml_tensors_data_destroy(hOutputDataHandle); ml_tensors_info_destroy(hOutputTensorsInfoHandle); ml_tensors_data_destroy(hInputDataHandle); ml_tensors_info_destroy(hInputTensorsInfoHandle); ml_single_close(hSingleHandle));
+
+       nRet = ml_tensors_info_get_tensor_type(hOutputTensorsInfoHandle, 0, &eTensorType);
+       PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_tensors_info_get_tensor_type", NnStreamerGetError(nRet), ml_tensors_data_destroy(hOutputDataHandle); ml_tensors_info_destroy(hOutputTensorsInfoHandle); ml_tensors_data_destroy(hInputDataHandle); ml_tensors_info_destroy(hInputTensorsInfoHandle); ml_single_close(hSingleHandle));
+
+       nRet = ml_tensors_info_get_tensor_dimension(hOutputTensorsInfoHandle, 0, nTmpTensorDimension);
+       PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_tensors_info_get_tensor_dimension", NnStreamerGetError(nRet), ml_tensors_data_destroy(hOutputDataHandle); ml_tensors_info_destroy(hOutputTensorsInfoHandle); ml_tensors_data_destroy(hInputDataHandle); ml_tensors_info_destroy(hInputTensorsInfoHandle); ml_single_close(hSingleHandle));
+
+       nRet = ml_tensors_data_destroy(hInputDataHandle);
+       PRINT_RESULT_NORETURN(ML_ERROR_NONE, nRet, "ml_tensors_data_destroy", NnStreamerGetError(nRet));
+
+       nRet = ml_tensors_data_destroy(hOutputDataHandle);
+       PRINT_RESULT_NORETURN(ML_ERROR_NONE, nRet, "ml_tensors_data_destroy", NnStreamerGetError(nRet));
+
+       nRet = ml_tensors_info_destroy(hInputTensorsInfoHandle);
+       PRINT_RESULT_NORETURN(ML_ERROR_NONE, nRet, "ml_tensors_info_destroy", NnStreamerGetError(nRet));
+
+       nRet = ml_tensors_info_destroy(hOutputTensorsInfoHandle);
+       PRINT_RESULT_NORETURN(ML_ERROR_NONE, nRet, "ml_tensors_info_destroy", NnStreamerGetError(nRet));
+
+       nRet = ml_single_set_property(hSingleHandle, pszPropertyName, pszSetProperty);
+       PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_single_set_property", NnStreamerGetError(nRet), ml_single_close(hSingleHandle));
+
+       nRet = ml_single_get_input_info(hSingleHandle, &hInputTensorsInfoHandle);
+       PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_single_get_input_info", NnStreamerGetError(nRet), ml_single_close(hSingleHandle));
+       CHECK_HANDLE_CLEANUP(hInputTensorsInfoHandle, "ml_single_get_input_info", ml_single_close(hSingleHandle));
+
+       nRet = ml_tensors_data_create(hInputTensorsInfoHandle, &hInputDataHandle);
+       PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_tensors_data_create", NnStreamerGetError(nRet), ml_tensors_info_destroy(hInputTensorsInfoHandle); ml_single_close(hSingleHandle));
+       CHECK_HANDLE_CLEANUP(hInputDataHandle, "ml_tensors_data_create", ml_tensors_info_destroy(hInputTensorsInfoHandle); ml_single_close(hSingleHandle));
+
+       float fTmpInput2[] = {1.0, 2.0, 3.0, 4.0, 5.0};
+       float *fOutBuf2;
+
+       nRet = ml_tensors_data_set_tensor_data(hInputDataHandle, 0, fTmpInput2, 5 * sizeof(float));
+       PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_tensors_data_set_tensor_data", NnStreamerGetError(nRet), ml_tensors_data_destroy(hInputDataHandle); ml_tensors_info_destroy(hInputTensorsInfoHandle); ml_single_close(hSingleHandle));
+
+       nRet = ml_tensors_info_get_count(hInputTensorsInfoHandle, &nTmpCnt);
+       PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_tensors_info_get_count", NnStreamerGetError(nRet), ml_tensors_data_destroy(hInputDataHandle); ml_tensors_info_destroy(hInputTensorsInfoHandle); ml_single_close(hSingleHandle));
+
+       nRet = ml_tensors_info_get_tensor_type(hInputTensorsInfoHandle, 0, &eTensorType);
+       PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_tensors_info_get_tensor_type", NnStreamerGetError(nRet), ml_tensors_data_destroy(hInputDataHandle); ml_tensors_info_destroy(hInputTensorsInfoHandle); ml_single_close(hSingleHandle));
+
+       nRet = ml_tensors_info_get_tensor_dimension(hInputTensorsInfoHandle, 0, nTmpTensorDimension);
+       PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_tensors_info_get_tensor_dimension", NnStreamerGetError(nRet), ml_tensors_data_destroy(hInputDataHandle); ml_tensors_info_destroy(hInputTensorsInfoHandle); ml_single_close(hSingleHandle));
+
+       //Target API
+       nRet = ml_single_invoke_dynamic(hSingleHandle, hInputDataHandle, hInputTensorsInfoHandle, &hOutputDataHandle, &hOutputTensorsInfoHandle);
+       PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_single_invoke_dynamic", NnStreamerGetError(nRet), ml_tensors_data_destroy(hInputDataHandle); ml_tensors_info_destroy(hInputTensorsInfoHandle); ml_single_close(hSingleHandle));
+       CHECK_HANDLE_CLEANUP(hOutputDataHandle, "ml_single_invoke_dynamic", ml_tensors_data_destroy(hInputDataHandle); ml_tensors_info_destroy(hInputTensorsInfoHandle); ml_single_close(hSingleHandle));
+       CHECK_HANDLE_CLEANUP(hOutputTensorsInfoHandle, "ml_single_invoke_dynamic", ml_tensors_data_destroy(hInputDataHandle); ml_tensors_info_destroy(hInputTensorsInfoHandle); ml_single_close(hSingleHandle));
+
+       nRet = ml_tensors_data_get_tensor_data(hOutputDataHandle, 0, (void **) &fOutBuf2, &nDataSize);
+       PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_tensors_data_get_tensor_data", NnStreamerGetError(nRet), ml_tensors_data_destroy(hOutputDataHandle); ml_tensors_info_destroy(hOutputTensorsInfoHandle); ml_tensors_data_destroy(hInputDataHandle); ml_tensors_info_destroy(hInputTensorsInfoHandle); ml_single_close(hSingleHandle));
+
+       nRet = ml_tensors_info_get_count(hOutputTensorsInfoHandle, &nTmpCnt);
+       PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_tensors_info_get_count", NnStreamerGetError(nRet), ml_tensors_data_destroy(hOutputDataHandle); ml_tensors_info_destroy(hOutputTensorsInfoHandle); ml_tensors_data_destroy(hInputDataHandle); ml_tensors_info_destroy(hInputTensorsInfoHandle); ml_single_close(hSingleHandle));
+
+       nRet = ml_tensors_info_get_tensor_type(hOutputTensorsInfoHandle, 0, &eTensorType);
+       PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_tensors_info_get_tensor_type", NnStreamerGetError(nRet), ml_tensors_data_destroy(hOutputDataHandle); ml_tensors_info_destroy(hOutputTensorsInfoHandle); ml_tensors_data_destroy(hInputDataHandle); ml_tensors_info_destroy(hInputTensorsInfoHandle); ml_single_close(hSingleHandle));
+
+       nRet = ml_tensors_info_get_tensor_dimension(hOutputTensorsInfoHandle, 0, nTmpTensorDimension);
+       PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_tensors_info_get_tensor_dimension", NnStreamerGetError(nRet), ml_tensors_data_destroy(hOutputDataHandle); ml_tensors_info_destroy(hOutputTensorsInfoHandle); ml_tensors_data_destroy(hInputDataHandle); ml_tensors_info_destroy(hInputTensorsInfoHandle); ml_single_close(hSingleHandle));
+
+       nRet = ml_single_close(hSingleHandle);
+       PRINT_RESULT_NORETURN(ML_ERROR_NONE, nRet, "ml_single_close", NnStreamerGetError(nRet));
+
+       nRet = ml_tensors_data_destroy(hInputDataHandle);
+       PRINT_RESULT_NORETURN(ML_ERROR_NONE, nRet, "ml_tensors_data_destroy", NnStreamerGetError(nRet));
+
+       nRet = ml_tensors_data_destroy(hOutputDataHandle);
+       PRINT_RESULT_NORETURN(ML_ERROR_NONE, nRet, "ml_tensors_data_destroy", NnStreamerGetError(nRet));
+
+       nRet = ml_tensors_info_destroy(hInputTensorsInfoHandle);
+       PRINT_RESULT_NORETURN(ML_ERROR_NONE, nRet, "ml_tensors_info_destroyv", NnStreamerGetError(nRet));
+
+       nRet = ml_tensors_info_destroy(hOutputTensorsInfoHandle);
+       PRINT_RESULT_NORETURN(ML_ERROR_NONE, nRet, "ml_tensors_info_destroy", NnStreamerGetError(nRet));
+
+  return 0;
+}
index c8676bd113b7cfff73593f7a73ddb0b258fd2e09..07fa22495e5da3d9a2fac4651adc8bc80fac9a66 100755 (executable)
@@ -20,7 +20,12 @@ ml_nnfw_hw_e g_eMlNnHardwareType[] = {
        ML_NNFW_HW_AUTO,
        ML_NNFW_HW_CPU,
        ML_NNFW_HW_GPU,
-       ML_NNFW_HW_NPU
+       ML_NNFW_HW_NPU,
+       ML_NNFW_HW_CPU_NEON,
+       ML_NNFW_HW_NPU_MOVIDIUS,
+       ML_NNFW_HW_NPU_EDGE_TPU,
+       ML_NNFW_HW_NPU_VIVANTE,
+       ML_NNFW_HW_NPU_SR
 };
 
 
index 74f88bf045498ecf29dbf7def5cd3e75d7e6509d..874e2e2fa09ffc34c8268cdfdd3384c840533e5d 100755 (executable)
@@ -56,10 +56,10 @@ extern int ITc_nnstreamer_tensors_ml_tensors_data_set_get_tensor_data_p (void);
 extern int ITc_nnstreamer_single_ml_single_open_close_p (void);
 extern int ITc_nnstreamer_single_ml_set_timeout_p (void);
 extern int ITc_nnstreamer_single_ml_single_invoke_p (void);
-extern int ITc_nnstreamer_single_ml_get_input_info_p (void);
+extern int ITc_nnstreamer_single_ml_get_set_input_info_p (void);
 extern int ITc_nnstreamer_single_ml_get_output_info_p (void);
-
-
+extern int ITc_nnstreamer_single_ml_set_get_property_p(void);
+extern int ITc_nnstreamer_single_ml_invoke_dynamic_p(void);
 
 testcase tc_array[] = {
 {"ITc_nnstreamer_pipeline_ml_pipeline_construct_destroy_p", ITc_nnstreamer_pipeline_ml_pipeline_construct_destroy_p, ITs_nnstreamer_pipeline_startup, ITs_nnstreamer_pipeline_cleanup},
@@ -87,10 +87,11 @@ testcase tc_array[] = {
 {"ITc_nnstreamer_single_ml_single_open_close_p", ITc_nnstreamer_single_ml_single_open_close_p, ITs_nnstreamer_tensors_single_startup, ITs_nnstreamer_tensors_single_cleanup},
 {"ITc_nnstreamer_single_ml_set_timeout_p", ITc_nnstreamer_single_ml_set_timeout_p, ITs_nnstreamer_tensors_single_startup, ITs_nnstreamer_tensors_single_cleanup},
 {"ITc_nnstreamer_single_ml_single_invoke_p", ITc_nnstreamer_single_ml_single_invoke_p, ITs_nnstreamer_tensors_single_startup, ITs_nnstreamer_tensors_single_cleanup},
-{"ITc_nnstreamer_single_ml_get_input_info_p", ITc_nnstreamer_single_ml_get_input_info_p, ITs_nnstreamer_tensors_single_startup, ITs_nnstreamer_tensors_single_cleanup},
+{"ITc_nnstreamer_single_ml_get_set_input_info_p", ITc_nnstreamer_single_ml_get_set_input_info_p, ITs_nnstreamer_tensors_single_startup, ITs_nnstreamer_tensors_single_cleanup},
 {"ITc_nnstreamer_single_ml_get_output_info_p", ITc_nnstreamer_single_ml_get_output_info_p, ITs_nnstreamer_tensors_single_startup, ITs_nnstreamer_tensors_single_cleanup},
-
 {"ITc_nnstreamer_tensors_ml_tensors_data_create_destroy_p", ITc_nnstreamer_tensors_ml_tensors_data_create_destroy_p, ITs_nnstreamer_tensors_startup, ITs_nnstreamer_tensors_cleanup},
+{"ITc_nnstreamer_single_ml_set_get_property_p", ITc_nnstreamer_single_ml_set_get_property_p, ITs_nnstreamer_tensors_single_startup, ITs_nnstreamer_tensors_single_cleanup},
+{"ITc_nnstreamer_single_ml_invoke_dynamic_p", ITc_nnstreamer_single_ml_invoke_dynamic_p, ITs_nnstreamer_tensors_single_startup, ITs_nnstreamer_tensors_single_cleanup},
   {NULL, NULL}
 };
 
index 1bb5acfe962191b1198d4425a5e4fc5da586137a..bf62c65f57fc8dcc0afb31b79d8dc9b33b4e6498 100755 (executable)
@@ -55,10 +55,11 @@ extern int ITc_nnstreamer_tensors_ml_tensors_data_create_destroy_p (void);
 extern int ITc_nnstreamer_tensors_ml_tensors_data_set_get_tensor_data_p (void);
 extern int ITc_nnstreamer_single_ml_single_open_close_p (void);
 extern int ITc_nnstreamer_single_ml_single_invoke_p (void);
-extern int ITc_nnstreamer_single_ml_get_input_info_p (void);
+extern int ITc_nnstreamer_single_ml_get_set_input_info_p (void);
 extern int ITc_nnstreamer_single_ml_get_output_info_p (void);
 extern int ITc_nnstreamer_single_ml_set_timeout_p (void);
-
+extern int ITc_nnstreamer_single_ml_set_get_property_p(void);
+extern int ITc_nnstreamer_single_ml_invoke_dynamic_p(void);
 
 testcase tc_array[] = {
 {"ITc_nnstreamer_pipeline_ml_pipeline_construct_destroy_p", ITc_nnstreamer_pipeline_ml_pipeline_construct_destroy_p, ITs_nnstreamer_pipeline_startup, ITs_nnstreamer_pipeline_cleanup},
@@ -85,10 +86,12 @@ testcase tc_array[] = {
 {"ITc_nnstreamer_tensors_ml_tensors_data_set_get_tensor_data_p", ITc_nnstreamer_tensors_ml_tensors_data_set_get_tensor_data_p, ITs_nnstreamer_tensors_startup, ITs_nnstreamer_tensors_cleanup},
 {"ITc_nnstreamer_single_ml_single_open_close_p", ITc_nnstreamer_single_ml_single_open_close_p, ITs_nnstreamer_tensors_single_startup, ITs_nnstreamer_tensors_single_cleanup},
 {"ITc_nnstreamer_single_ml_single_invoke_p", ITc_nnstreamer_single_ml_single_invoke_p, ITs_nnstreamer_tensors_single_startup, ITs_nnstreamer_tensors_single_cleanup},
-{"ITc_nnstreamer_single_ml_get_input_info_p", ITc_nnstreamer_single_ml_get_input_info_p, ITs_nnstreamer_tensors_single_startup, ITs_nnstreamer_tensors_single_cleanup},
+{"ITc_nnstreamer_single_ml_get_set_input_info_p", ITc_nnstreamer_single_ml_get_set_input_info_p, ITs_nnstreamer_tensors_single_startup, ITs_nnstreamer_tensors_single_cleanup},
 {"ITc_nnstreamer_single_ml_get_output_info_p", ITc_nnstreamer_single_ml_get_output_info_p, ITs_nnstreamer_tensors_single_startup, ITs_nnstreamer_tensors_single_cleanup},
 {"ITc_nnstreamer_single_ml_set_timeout_p", ITc_nnstreamer_single_ml_set_timeout_p, ITs_nnstreamer_tensors_single_startup, ITs_nnstreamer_tensors_single_cleanup},
 {"ITc_nnstreamer_tensors_ml_tensors_data_create_destroy_p", ITc_nnstreamer_tensors_ml_tensors_data_create_destroy_p, ITs_nnstreamer_tensors_startup, ITs_nnstreamer_tensors_cleanup},
+{"ITc_nnstreamer_single_ml_set_get_property_p", ITc_nnstreamer_single_ml_set_get_property_p, ITs_nnstreamer_tensors_single_startup, ITs_nnstreamer_tensors_single_cleanup},
+{"ITc_nnstreamer_single_ml_invoke_dynamic_p", ITc_nnstreamer_single_ml_invoke_dynamic_p, ITs_nnstreamer_tensors_single_startup, ITs_nnstreamer_tensors_single_cleanup},
   {NULL, NULL}
 };
 
index 1bb5acfe962191b1198d4425a5e4fc5da586137a..bf62c65f57fc8dcc0afb31b79d8dc9b33b4e6498 100755 (executable)
@@ -55,10 +55,11 @@ extern int ITc_nnstreamer_tensors_ml_tensors_data_create_destroy_p (void);
 extern int ITc_nnstreamer_tensors_ml_tensors_data_set_get_tensor_data_p (void);
 extern int ITc_nnstreamer_single_ml_single_open_close_p (void);
 extern int ITc_nnstreamer_single_ml_single_invoke_p (void);
-extern int ITc_nnstreamer_single_ml_get_input_info_p (void);
+extern int ITc_nnstreamer_single_ml_get_set_input_info_p (void);
 extern int ITc_nnstreamer_single_ml_get_output_info_p (void);
 extern int ITc_nnstreamer_single_ml_set_timeout_p (void);
-
+extern int ITc_nnstreamer_single_ml_set_get_property_p(void);
+extern int ITc_nnstreamer_single_ml_invoke_dynamic_p(void);
 
 testcase tc_array[] = {
 {"ITc_nnstreamer_pipeline_ml_pipeline_construct_destroy_p", ITc_nnstreamer_pipeline_ml_pipeline_construct_destroy_p, ITs_nnstreamer_pipeline_startup, ITs_nnstreamer_pipeline_cleanup},
@@ -85,10 +86,12 @@ testcase tc_array[] = {
 {"ITc_nnstreamer_tensors_ml_tensors_data_set_get_tensor_data_p", ITc_nnstreamer_tensors_ml_tensors_data_set_get_tensor_data_p, ITs_nnstreamer_tensors_startup, ITs_nnstreamer_tensors_cleanup},
 {"ITc_nnstreamer_single_ml_single_open_close_p", ITc_nnstreamer_single_ml_single_open_close_p, ITs_nnstreamer_tensors_single_startup, ITs_nnstreamer_tensors_single_cleanup},
 {"ITc_nnstreamer_single_ml_single_invoke_p", ITc_nnstreamer_single_ml_single_invoke_p, ITs_nnstreamer_tensors_single_startup, ITs_nnstreamer_tensors_single_cleanup},
-{"ITc_nnstreamer_single_ml_get_input_info_p", ITc_nnstreamer_single_ml_get_input_info_p, ITs_nnstreamer_tensors_single_startup, ITs_nnstreamer_tensors_single_cleanup},
+{"ITc_nnstreamer_single_ml_get_set_input_info_p", ITc_nnstreamer_single_ml_get_set_input_info_p, ITs_nnstreamer_tensors_single_startup, ITs_nnstreamer_tensors_single_cleanup},
 {"ITc_nnstreamer_single_ml_get_output_info_p", ITc_nnstreamer_single_ml_get_output_info_p, ITs_nnstreamer_tensors_single_startup, ITs_nnstreamer_tensors_single_cleanup},
 {"ITc_nnstreamer_single_ml_set_timeout_p", ITc_nnstreamer_single_ml_set_timeout_p, ITs_nnstreamer_tensors_single_startup, ITs_nnstreamer_tensors_single_cleanup},
 {"ITc_nnstreamer_tensors_ml_tensors_data_create_destroy_p", ITc_nnstreamer_tensors_ml_tensors_data_create_destroy_p, ITs_nnstreamer_tensors_startup, ITs_nnstreamer_tensors_cleanup},
+{"ITc_nnstreamer_single_ml_set_get_property_p", ITc_nnstreamer_single_ml_set_get_property_p, ITs_nnstreamer_tensors_single_startup, ITs_nnstreamer_tensors_single_cleanup},
+{"ITc_nnstreamer_single_ml_invoke_dynamic_p", ITc_nnstreamer_single_ml_invoke_dynamic_p, ITs_nnstreamer_tensors_single_startup, ITs_nnstreamer_tensors_single_cleanup},
   {NULL, NULL}
 };