From: ABHISHEK JAIN Date: Tue, 14 Apr 2020 15:29:48 +0000 (+0530) Subject: [ITC][nnstreamer][ACR-1503][Add new API and enum for neural network framework] X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=ee1f0b23eb030bef775e7e3c1c302ff78d2b26ee;p=test%2Ftct%2Fnative%2Fapi.git [ITC][nnstreamer][ACR-1503][Add new API and enum for neural network framework] Change-Id: I89380e10a3bfe0fd5109ac23d247de66c2cd13e0 Signed-off-by: ABHISHEK JAIN --- diff --git a/src/itc/nnstreamer/ITs-nnstreamer-common.c b/src/itc/nnstreamer/ITs-nnstreamer-common.c index 059d60a51..2056bd75c 100755 --- a/src/itc/nnstreamer/ITs-nnstreamer-common.c +++ b/src/itc/nnstreamer/ITs-nnstreamer-common.c @@ -52,18 +52,18 @@ gboolean Timeout(gpointer data) char* NnStreamerGetError(int nRet) { - char *szErrorVal = NULL; + char *szErrorVal = "Unknown error code"; switch ( nRet ) { - case ML_ERROR_NONE: szErrorVal = "ML_ERROR_NONE"; break; - case ML_ERROR_INVALID_PARAMETER: szErrorVal = "ML_ERROR_INVALID_PARAMETER"; break; - case ML_ERROR_STREAMS_PIPE: szErrorVal = "ML_ERROR_STREAMS_PIPE"; break; - case ML_ERROR_TRY_AGAIN: szErrorVal = "ML_ERROR_TRY_AGAIN"; break; - case ML_ERROR_UNKNOWN: szErrorVal = "ML_ERROR_UNKNOWN"; break; - case ML_ERROR_TIMED_OUT: szErrorVal = "ML_ERROR_TIMED_OUT"; break; - case ML_ERROR_NOT_SUPPORTED: szErrorVal = "ML_ERROR_NOT_SUPPORTED"; break; - case ML_ERROR_PERMISSION_DENIED: szErrorVal = "ML_ERROR_PERMISSION_DENIED"; break; - default: szErrorVal = "Unknown error code"; break; + case ML_ERROR_NONE: szErrorVal = "ML_ERROR_NONE"; break; + case ML_ERROR_INVALID_PARAMETER: szErrorVal = "ML_ERROR_INVALID_PARAMETER"; break; + case ML_ERROR_STREAMS_PIPE: szErrorVal = "ML_ERROR_STREAMS_PIPE"; break; + case ML_ERROR_TRY_AGAIN: szErrorVal = "ML_ERROR_TRY_AGAIN"; break; + case ML_ERROR_UNKNOWN: szErrorVal = "ML_ERROR_UNKNOWN"; break; + case ML_ERROR_TIMED_OUT: szErrorVal = "ML_ERROR_TIMED_OUT"; break; + case ML_ERROR_NOT_SUPPORTED: szErrorVal = "ML_ERROR_NOT_SUPPORTED"; break; + case ML_ERROR_PERMISSION_DENIED: szErrorVal = "ML_ERROR_PERMISSION_DENIED"; break; + case ML_ERROR_OUT_OF_MEMORY: szErrorVal = "ML_ERROR_OUT_OF_MEMORY"; break; } return szErrorVal; diff --git a/src/itc/nnstreamer/ITs-nnstreamer-single.c b/src/itc/nnstreamer/ITs-nnstreamer-single.c index 31aacb60d..4eaf21ab2 100755 --- a/src/itc/nnstreamer/ITs-nnstreamer-single.c +++ b/src/itc/nnstreamer/ITs-nnstreamer-single.c @@ -282,29 +282,31 @@ int ITc_nnstreamer_single_ml_single_invoke_p(void) } /* -* @testcase ITc_nnstreamer_single_ml_get_input_info_p +* @testcase ITc_nnstreamer_single_ml_get_set_input_info_p * @since_tizen 5.5 * @author SRID(manoj.g2) * @reviewer SRID(shobhit.v) * @type auto -* @description Get tensor input info from ml single -* @scenario Get tensor input info from ml single -* @apicovered ml_single_get_input_info -* @passcase When ml_single_get_input_info and precondition API is successful. +* @description Get and Set tensor input info from ml single +* @scenario Get and Set tensor input info from ml single and set it back +* @apicovered ml_single_set_input_info, ml_single_get_input_info +* @passcase When ml_single_set_input_info, ml_single_get_input_info and precondition APIs are successful. * @failcase If target API fails or any precondition API fails * @precondition None * @postcondition None */ -//& purpose: API To Get tensor input info from ml single +//& purpose: API To Get and Set tensor input info from ml single //& type: auto -int ITc_nnstreamer_single_ml_get_input_info_p(void) +int ITc_nnstreamer_single_ml_get_set_input_info_p(void) { START_TEST; + int nRet = -1; ml_single_h hSingleHandle = NULL; ml_tensors_info_h hInputTensorsInfoHandle = NULL; ml_tensors_info_h hOutputTensorsInfoHandle = NULL; ml_tensors_info_h hGetInputTensorsInfoHandle = NULL; + ml_tensor_dimension inputTensorDimension; ml_tensor_dimension outputTensorDimension; inputTensorDimension[0] = 3; @@ -346,11 +348,24 @@ int ITc_nnstreamer_single_ml_get_input_info_p(void) PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_single_open", NnStreamerGetError(nRet),ml_tensors_info_destroy (hInputTensorsInfoHandle);ml_tensors_info_destroy (hOutputTensorsInfoHandle)); CHECK_HANDLE_CLEANUP(hSingleHandle, "ml_single_open",ml_tensors_info_destroy (hInputTensorsInfoHandle);ml_tensors_info_destroy (hOutputTensorsInfoHandle)); + //Target API nRet = ml_single_get_input_info (hSingleHandle, &hGetInputTensorsInfoHandle); PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_single_get_input_info", NnStreamerGetError(nRet),ml_single_close (hSingleHandle);ml_tensors_info_destroy (hInputTensorsInfoHandle);ml_tensors_info_destroy (hOutputTensorsInfoHandle)); CHECK_HANDLE_CLEANUP(hGetInputTensorsInfoHandle, "ml_single_get_input_info",ml_single_close (hSingleHandle);ml_tensors_info_destroy (hInputTensorsInfoHandle);ml_tensors_info_destroy (hOutputTensorsInfoHandle)); - nRet = ml_single_close (hSingleHandle); + nRet = ml_tensors_info_get_tensor_dimension (hGetInputTensorsInfoHandle, 0, outputTensorDimension); + PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_tensors_info_get_tensor_dimensions", NnStreamerGetError(nRet), ml_single_close(hSingleHandle); ml_tensors_info_destroy(hInputTensorsInfoHandle); ml_tensors_info_destroy(hOutputTensorsInfoHandle); ml_tensors_info_destroy(hGetInputTensorsInfoHandle)); + + /* Dimension change */ + outputTensorDimension[3] += 1; + nRet = ml_tensors_info_set_tensor_dimension(hGetInputTensorsInfoHandle, 0, outputTensorDimension); + PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_tensors_info_set_tensor_dimension", NnStreamerGetError(nRet), ml_single_close(hSingleHandle); ml_tensors_info_destroy(hInputTensorsInfoHandle); ml_tensors_info_destroy(hOutputTensorsInfoHandle); ml_tensors_info_destroy(hGetInputTensorsInfoHandle)); + + //Target API + nRet = ml_single_set_input_info(hSingleHandle, hGetInputTensorsInfoHandle); + PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_single_set_input_info", NnStreamerGetError(nRet), ml_single_close(hSingleHandle); ml_tensors_info_destroy(hInputTensorsInfoHandle); ml_tensors_info_destroy(hOutputTensorsInfoHandle); ml_tensors_info_destroy(hGetInputTensorsInfoHandle)); + + nRet = ml_single_close(hSingleHandle); PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_single_close", NnStreamerGetError(nRet),ml_tensors_info_destroy (hInputTensorsInfoHandle);ml_tensors_info_destroy (hOutputTensorsInfoHandle)); nRet = ml_tensors_info_destroy (hInputTensorsInfoHandle); @@ -359,6 +374,9 @@ int ITc_nnstreamer_single_ml_get_input_info_p(void) nRet = ml_tensors_info_destroy (hOutputTensorsInfoHandle); PRINT_RESULT_NORETURN(ML_ERROR_NONE, nRet, "ml_tensors_info_destroy", NnStreamerGetError(nRet)); + nRet = ml_tensors_info_destroy (hGetInputTensorsInfoHandle); + PRINT_RESULT_NORETURN(ML_ERROR_NONE, nRet, "ml_tensors_info_destroy", NnStreamerGetError(nRet)); + return 0; } @@ -442,3 +460,209 @@ int ITc_nnstreamer_single_ml_get_output_info_p(void) return 0; } + +/* +* @testcase ITc_nnstreamer_single_ml_set_get_property_p +* @since_tizen 6.0 +* @author SRID(j.abhishek) +* @reviewer SRID(shobhit.v) +* @type auto +* @description To Set and Get ML single property value +* @scenario To Set and Get ML single property value +* @apicovered ml_single_set_property, ml_single_get_property +* @passcase When ml_single_set_property, ml_single_get_property and precondition APIs are successful. +* @failcase If target API fails or any precondition API fails +* @precondition None +* @postcondition None +*/ +//& purpose: API To Set and Get ML single property value +//& type: auto +int ITc_nnstreamer_single_ml_set_get_property_p(void) +{ + START_TEST; + + int nRet = -1; + ml_single_h hSingleHandle = NULL; + const char *pszPropertyName = "input"; + const char *pszSetProperty = "3:224:224:1"; + char *pszGetProperty = NULL; + + nRet = ml_single_open(&hSingleHandle, MlTestModel, NULL, NULL, ML_NNFW_TYPE_TENSORFLOW_LITE, ML_NNFW_HW_ANY); + PRINT_RESULT(ML_ERROR_NONE, nRet, "ml_single_open", NnStreamerGetError(nRet)); + CHECK_HANDLE(hSingleHandle, "ml_single_open"); + + //Target API + nRet = ml_single_set_property(hSingleHandle, pszPropertyName, pszSetProperty); + PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_single_set_property", NnStreamerGetError(nRet), ml_single_close(hSingleHandle)); + + //Target API + nRet = ml_single_get_property(hSingleHandle, pszPropertyName, &pszGetProperty); + PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_single_get_property", NnStreamerGetError(nRet), ml_single_close(hSingleHandle)); + CHECK_HANDLE_CLEANUP(pszGetProperty, "ml_single_get_property", ml_single_close(hSingleHandle)); + + if ( 0 != strncmp(pszSetProperty, pszGetProperty, strlen(pszSetProperty)) ) + { + FPRINTF("[%s:%d] TC Failed Reason; Set Value %s and Get value %s of tensor property name is not same\\n", __FILE__, __LINE__, pszSetProperty, pszGetProperty); + nRet = ml_single_close(hSingleHandle); + PRINT_RESULT_NORETURN(ML_ERROR_NONE, nRet, "ml_single_close", NnStreamerGetError(nRet)); + return 1; + } + + nRet = ml_single_close(hSingleHandle); + PRINT_RESULT_NORETURN(ML_ERROR_NONE, nRet, "ml_single_close", NnStreamerGetError(nRet)); + + return 0; +} + +/* +* @testcase ITc_nnstreamer_single_ml_invoke_dynamic_p +* @since_tizen 6.0 +* @author SRID(j.abhishek) +* @reviewer SRID(shobhit.v) +* @type auto +* @description To Invokes the model with the given input data with the given tensors information. +* @scenario To Invokes the model with the given input data with the given tensors information. +* @apicovered ml_single_invoke_dynamic +* @passcase When ml_single_invoke_dynamic and precondition API is successful. +* @failcase If target API fails or any precondition API fails +* @precondition None +* @postcondition None +*/ +//& purpose: Invokes the model with the given input data with the given tensors information. +//& type: auto +int ITc_nnstreamer_single_ml_invoke_dynamic_p(void) +{ + START_TEST; + + int nRet = -1; + ml_single_h hSingleHandle = NULL; + ml_tensors_info_h hInputTensorsInfoHandle = NULL; + ml_tensors_info_h hOutputTensorsInfoHandle = NULL; + ml_tensors_data_h hInputDataHandle = NULL; + ml_tensors_data_h hOutputDataHandle = NULL; + + ml_tensor_type_e eTensorType = ML_TENSOR_TYPE_UNKNOWN; + ml_tensor_dimension nTmpTensorDimension; + + const char *pszPropertyName = "input"; + const char *pszSetProperty = "3:224:224:1"; + float fTmpInputArr[] = {1.0}; + unsigned int nTmpCnt = 0; + float *fOutBuf; + size_t nDataSize; + + nRet = ml_single_open(&hSingleHandle, MlTestModel, NULL, NULL, ML_NNFW_TYPE_TENSORFLOW_LITE, ML_NNFW_HW_ANY); + PRINT_RESULT(ML_ERROR_NONE, nRet, "ml_single_open", NnStreamerGetError(nRet)); + CHECK_HANDLE(hSingleHandle, "ml_single_open"); + + nRet = ml_single_get_input_info(hSingleHandle, &hInputTensorsInfoHandle); + PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_single_get_input_info", NnStreamerGetError(nRet), ml_single_close(hSingleHandle)); + CHECK_HANDLE_CLEANUP(hInputTensorsInfoHandle, "ml_single_get_input_info", ml_single_close(hSingleHandle)); + + nRet = ml_tensors_data_create(hInputTensorsInfoHandle, &hInputDataHandle); + PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_tensors_data_create", NnStreamerGetError(nRet), ml_tensors_info_destroy(hInputTensorsInfoHandle); ml_single_close(hSingleHandle)); + CHECK_HANDLE_CLEANUP(hInputDataHandle, "ml_tensors_data_create", ml_tensors_info_destroy(hInputTensorsInfoHandle); ml_single_close(hSingleHandle)); + + nRet = ml_tensors_data_set_tensor_data(hInputDataHandle, 0, fTmpInputArr, 1 * sizeof(float)); + PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_tensors_data_set_tensor_data", NnStreamerGetError(nRet), ml_tensors_data_destroy(hInputDataHandle); ml_tensors_info_destroy(hInputTensorsInfoHandle); ml_single_close(hSingleHandle)); + + nRet = ml_tensors_info_get_count(hInputTensorsInfoHandle, &nTmpCnt); + PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_tensors_info_get_count", NnStreamerGetError(nRet), ml_tensors_data_destroy(hInputDataHandle); ml_tensors_info_destroy(hInputTensorsInfoHandle); ml_single_close(hSingleHandle)); + + nRet = ml_tensors_info_get_tensor_type(hInputTensorsInfoHandle, 0, &eTensorType); + PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_tensors_info_get_tensor_type", NnStreamerGetError(nRet), ml_tensors_data_destroy(hInputDataHandle); ml_tensors_info_destroy(hInputTensorsInfoHandle); ml_single_close(hSingleHandle)); + + nRet = ml_tensors_info_get_tensor_dimension(hInputTensorsInfoHandle, 0, nTmpTensorDimension); + PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_tensors_info_get_tensor_dimension", NnStreamerGetError(nRet), ml_tensors_data_destroy(hInputDataHandle); ml_tensors_info_destroy(hInputTensorsInfoHandle); ml_single_close(hSingleHandle)); + + //Target API + nRet = ml_single_invoke_dynamic(hSingleHandle, hInputDataHandle, hInputTensorsInfoHandle, &hOutputDataHandle, &hOutputTensorsInfoHandle); + PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_single_invoke_dynamic", NnStreamerGetError(nRet), ml_tensors_data_destroy(hInputDataHandle); ml_tensors_info_destroy(hInputTensorsInfoHandle); ml_single_close(hSingleHandle)); + CHECK_HANDLE_CLEANUP(hOutputDataHandle, "ml_single_invoke_dynamic", ml_tensors_data_destroy(hInputDataHandle); ml_tensors_info_destroy(hInputTensorsInfoHandle); ml_single_close(hSingleHandle)); + CHECK_HANDLE_CLEANUP(hOutputTensorsInfoHandle, "ml_single_invoke_dynamic", ml_tensors_data_destroy(hInputDataHandle); ml_tensors_info_destroy(hInputTensorsInfoHandle); ml_single_close(hSingleHandle)); + + nRet = ml_tensors_data_get_tensor_data(hOutputDataHandle, 0, (void **)&fOutBuf, &nDataSize); + PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_tensors_data_get_tensor_data", NnStreamerGetError(nRet), ml_tensors_data_destroy(hOutputDataHandle); ml_tensors_info_destroy(hOutputTensorsInfoHandle); ml_tensors_data_destroy(hInputDataHandle); ml_tensors_info_destroy(hInputTensorsInfoHandle); ml_single_close(hSingleHandle)); + + nRet = ml_tensors_info_get_count(hOutputTensorsInfoHandle, &nTmpCnt); + PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_tensors_info_get_count", NnStreamerGetError(nRet), ml_tensors_data_destroy(hOutputDataHandle); ml_tensors_info_destroy(hOutputTensorsInfoHandle); ml_tensors_data_destroy(hInputDataHandle); ml_tensors_info_destroy(hInputTensorsInfoHandle); ml_single_close(hSingleHandle)); + + nRet = ml_tensors_info_get_tensor_type(hOutputTensorsInfoHandle, 0, &eTensorType); + PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_tensors_info_get_tensor_type", NnStreamerGetError(nRet), ml_tensors_data_destroy(hOutputDataHandle); ml_tensors_info_destroy(hOutputTensorsInfoHandle); ml_tensors_data_destroy(hInputDataHandle); ml_tensors_info_destroy(hInputTensorsInfoHandle); ml_single_close(hSingleHandle)); + + nRet = ml_tensors_info_get_tensor_dimension(hOutputTensorsInfoHandle, 0, nTmpTensorDimension); + PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_tensors_info_get_tensor_dimension", NnStreamerGetError(nRet), ml_tensors_data_destroy(hOutputDataHandle); ml_tensors_info_destroy(hOutputTensorsInfoHandle); ml_tensors_data_destroy(hInputDataHandle); ml_tensors_info_destroy(hInputTensorsInfoHandle); ml_single_close(hSingleHandle)); + + nRet = ml_tensors_data_destroy(hInputDataHandle); + PRINT_RESULT_NORETURN(ML_ERROR_NONE, nRet, "ml_tensors_data_destroy", NnStreamerGetError(nRet)); + + nRet = ml_tensors_data_destroy(hOutputDataHandle); + PRINT_RESULT_NORETURN(ML_ERROR_NONE, nRet, "ml_tensors_data_destroy", NnStreamerGetError(nRet)); + + nRet = ml_tensors_info_destroy(hInputTensorsInfoHandle); + PRINT_RESULT_NORETURN(ML_ERROR_NONE, nRet, "ml_tensors_info_destroy", NnStreamerGetError(nRet)); + + nRet = ml_tensors_info_destroy(hOutputTensorsInfoHandle); + PRINT_RESULT_NORETURN(ML_ERROR_NONE, nRet, "ml_tensors_info_destroy", NnStreamerGetError(nRet)); + + nRet = ml_single_set_property(hSingleHandle, pszPropertyName, pszSetProperty); + PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_single_set_property", NnStreamerGetError(nRet), ml_single_close(hSingleHandle)); + + nRet = ml_single_get_input_info(hSingleHandle, &hInputTensorsInfoHandle); + PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_single_get_input_info", NnStreamerGetError(nRet), ml_single_close(hSingleHandle)); + CHECK_HANDLE_CLEANUP(hInputTensorsInfoHandle, "ml_single_get_input_info", ml_single_close(hSingleHandle)); + + nRet = ml_tensors_data_create(hInputTensorsInfoHandle, &hInputDataHandle); + PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_tensors_data_create", NnStreamerGetError(nRet), ml_tensors_info_destroy(hInputTensorsInfoHandle); ml_single_close(hSingleHandle)); + CHECK_HANDLE_CLEANUP(hInputDataHandle, "ml_tensors_data_create", ml_tensors_info_destroy(hInputTensorsInfoHandle); ml_single_close(hSingleHandle)); + + float fTmpInput2[] = {1.0, 2.0, 3.0, 4.0, 5.0}; + float *fOutBuf2; + + nRet = ml_tensors_data_set_tensor_data(hInputDataHandle, 0, fTmpInput2, 5 * sizeof(float)); + PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_tensors_data_set_tensor_data", NnStreamerGetError(nRet), ml_tensors_data_destroy(hInputDataHandle); ml_tensors_info_destroy(hInputTensorsInfoHandle); ml_single_close(hSingleHandle)); + + nRet = ml_tensors_info_get_count(hInputTensorsInfoHandle, &nTmpCnt); + PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_tensors_info_get_count", NnStreamerGetError(nRet), ml_tensors_data_destroy(hInputDataHandle); ml_tensors_info_destroy(hInputTensorsInfoHandle); ml_single_close(hSingleHandle)); + + nRet = ml_tensors_info_get_tensor_type(hInputTensorsInfoHandle, 0, &eTensorType); + PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_tensors_info_get_tensor_type", NnStreamerGetError(nRet), ml_tensors_data_destroy(hInputDataHandle); ml_tensors_info_destroy(hInputTensorsInfoHandle); ml_single_close(hSingleHandle)); + + nRet = ml_tensors_info_get_tensor_dimension(hInputTensorsInfoHandle, 0, nTmpTensorDimension); + PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_tensors_info_get_tensor_dimension", NnStreamerGetError(nRet), ml_tensors_data_destroy(hInputDataHandle); ml_tensors_info_destroy(hInputTensorsInfoHandle); ml_single_close(hSingleHandle)); + + //Target API + nRet = ml_single_invoke_dynamic(hSingleHandle, hInputDataHandle, hInputTensorsInfoHandle, &hOutputDataHandle, &hOutputTensorsInfoHandle); + PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_single_invoke_dynamic", NnStreamerGetError(nRet), ml_tensors_data_destroy(hInputDataHandle); ml_tensors_info_destroy(hInputTensorsInfoHandle); ml_single_close(hSingleHandle)); + CHECK_HANDLE_CLEANUP(hOutputDataHandle, "ml_single_invoke_dynamic", ml_tensors_data_destroy(hInputDataHandle); ml_tensors_info_destroy(hInputTensorsInfoHandle); ml_single_close(hSingleHandle)); + CHECK_HANDLE_CLEANUP(hOutputTensorsInfoHandle, "ml_single_invoke_dynamic", ml_tensors_data_destroy(hInputDataHandle); ml_tensors_info_destroy(hInputTensorsInfoHandle); ml_single_close(hSingleHandle)); + + nRet = ml_tensors_data_get_tensor_data(hOutputDataHandle, 0, (void **) &fOutBuf2, &nDataSize); + PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_tensors_data_get_tensor_data", NnStreamerGetError(nRet), ml_tensors_data_destroy(hOutputDataHandle); ml_tensors_info_destroy(hOutputTensorsInfoHandle); ml_tensors_data_destroy(hInputDataHandle); ml_tensors_info_destroy(hInputTensorsInfoHandle); ml_single_close(hSingleHandle)); + + nRet = ml_tensors_info_get_count(hOutputTensorsInfoHandle, &nTmpCnt); + PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_tensors_info_get_count", NnStreamerGetError(nRet), ml_tensors_data_destroy(hOutputDataHandle); ml_tensors_info_destroy(hOutputTensorsInfoHandle); ml_tensors_data_destroy(hInputDataHandle); ml_tensors_info_destroy(hInputTensorsInfoHandle); ml_single_close(hSingleHandle)); + + nRet = ml_tensors_info_get_tensor_type(hOutputTensorsInfoHandle, 0, &eTensorType); + PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_tensors_info_get_tensor_type", NnStreamerGetError(nRet), ml_tensors_data_destroy(hOutputDataHandle); ml_tensors_info_destroy(hOutputTensorsInfoHandle); ml_tensors_data_destroy(hInputDataHandle); ml_tensors_info_destroy(hInputTensorsInfoHandle); ml_single_close(hSingleHandle)); + + nRet = ml_tensors_info_get_tensor_dimension(hOutputTensorsInfoHandle, 0, nTmpTensorDimension); + PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_tensors_info_get_tensor_dimension", NnStreamerGetError(nRet), ml_tensors_data_destroy(hOutputDataHandle); ml_tensors_info_destroy(hOutputTensorsInfoHandle); ml_tensors_data_destroy(hInputDataHandle); ml_tensors_info_destroy(hInputTensorsInfoHandle); ml_single_close(hSingleHandle)); + + nRet = ml_single_close(hSingleHandle); + PRINT_RESULT_NORETURN(ML_ERROR_NONE, nRet, "ml_single_close", NnStreamerGetError(nRet)); + + nRet = ml_tensors_data_destroy(hInputDataHandle); + PRINT_RESULT_NORETURN(ML_ERROR_NONE, nRet, "ml_tensors_data_destroy", NnStreamerGetError(nRet)); + + nRet = ml_tensors_data_destroy(hOutputDataHandle); + PRINT_RESULT_NORETURN(ML_ERROR_NONE, nRet, "ml_tensors_data_destroy", NnStreamerGetError(nRet)); + + nRet = ml_tensors_info_destroy(hInputTensorsInfoHandle); + PRINT_RESULT_NORETURN(ML_ERROR_NONE, nRet, "ml_tensors_info_destroyv", NnStreamerGetError(nRet)); + + nRet = ml_tensors_info_destroy(hOutputTensorsInfoHandle); + PRINT_RESULT_NORETURN(ML_ERROR_NONE, nRet, "ml_tensors_info_destroy", NnStreamerGetError(nRet)); + + return 0; +} diff --git a/src/itc/nnstreamer/ITs-nnstreamer.c b/src/itc/nnstreamer/ITs-nnstreamer.c index c8676bd11..07fa22495 100755 --- a/src/itc/nnstreamer/ITs-nnstreamer.c +++ b/src/itc/nnstreamer/ITs-nnstreamer.c @@ -20,7 +20,12 @@ ml_nnfw_hw_e g_eMlNnHardwareType[] = { ML_NNFW_HW_AUTO, ML_NNFW_HW_CPU, ML_NNFW_HW_GPU, - ML_NNFW_HW_NPU + ML_NNFW_HW_NPU, + ML_NNFW_HW_CPU_NEON, + ML_NNFW_HW_NPU_MOVIDIUS, + ML_NNFW_HW_NPU_EDGE_TPU, + ML_NNFW_HW_NPU_VIVANTE, + ML_NNFW_HW_NPU_SR }; diff --git a/src/itc/nnstreamer/tct-nnstreamer-native_mobile.h b/src/itc/nnstreamer/tct-nnstreamer-native_mobile.h index 74f88bf04..874e2e2fa 100755 --- a/src/itc/nnstreamer/tct-nnstreamer-native_mobile.h +++ b/src/itc/nnstreamer/tct-nnstreamer-native_mobile.h @@ -56,10 +56,10 @@ extern int ITc_nnstreamer_tensors_ml_tensors_data_set_get_tensor_data_p (void); extern int ITc_nnstreamer_single_ml_single_open_close_p (void); extern int ITc_nnstreamer_single_ml_set_timeout_p (void); extern int ITc_nnstreamer_single_ml_single_invoke_p (void); -extern int ITc_nnstreamer_single_ml_get_input_info_p (void); +extern int ITc_nnstreamer_single_ml_get_set_input_info_p (void); extern int ITc_nnstreamer_single_ml_get_output_info_p (void); - - +extern int ITc_nnstreamer_single_ml_set_get_property_p(void); +extern int ITc_nnstreamer_single_ml_invoke_dynamic_p(void); testcase tc_array[] = { {"ITc_nnstreamer_pipeline_ml_pipeline_construct_destroy_p", ITc_nnstreamer_pipeline_ml_pipeline_construct_destroy_p, ITs_nnstreamer_pipeline_startup, ITs_nnstreamer_pipeline_cleanup}, @@ -87,10 +87,11 @@ testcase tc_array[] = { {"ITc_nnstreamer_single_ml_single_open_close_p", ITc_nnstreamer_single_ml_single_open_close_p, ITs_nnstreamer_tensors_single_startup, ITs_nnstreamer_tensors_single_cleanup}, {"ITc_nnstreamer_single_ml_set_timeout_p", ITc_nnstreamer_single_ml_set_timeout_p, ITs_nnstreamer_tensors_single_startup, ITs_nnstreamer_tensors_single_cleanup}, {"ITc_nnstreamer_single_ml_single_invoke_p", ITc_nnstreamer_single_ml_single_invoke_p, ITs_nnstreamer_tensors_single_startup, ITs_nnstreamer_tensors_single_cleanup}, -{"ITc_nnstreamer_single_ml_get_input_info_p", ITc_nnstreamer_single_ml_get_input_info_p, ITs_nnstreamer_tensors_single_startup, ITs_nnstreamer_tensors_single_cleanup}, +{"ITc_nnstreamer_single_ml_get_set_input_info_p", ITc_nnstreamer_single_ml_get_set_input_info_p, ITs_nnstreamer_tensors_single_startup, ITs_nnstreamer_tensors_single_cleanup}, {"ITc_nnstreamer_single_ml_get_output_info_p", ITc_nnstreamer_single_ml_get_output_info_p, ITs_nnstreamer_tensors_single_startup, ITs_nnstreamer_tensors_single_cleanup}, - {"ITc_nnstreamer_tensors_ml_tensors_data_create_destroy_p", ITc_nnstreamer_tensors_ml_tensors_data_create_destroy_p, ITs_nnstreamer_tensors_startup, ITs_nnstreamer_tensors_cleanup}, +{"ITc_nnstreamer_single_ml_set_get_property_p", ITc_nnstreamer_single_ml_set_get_property_p, ITs_nnstreamer_tensors_single_startup, ITs_nnstreamer_tensors_single_cleanup}, +{"ITc_nnstreamer_single_ml_invoke_dynamic_p", ITc_nnstreamer_single_ml_invoke_dynamic_p, ITs_nnstreamer_tensors_single_startup, ITs_nnstreamer_tensors_single_cleanup}, {NULL, NULL} }; diff --git a/src/itc/nnstreamer/tct-nnstreamer-native_tizeniot.h b/src/itc/nnstreamer/tct-nnstreamer-native_tizeniot.h index 1bb5acfe9..bf62c65f5 100755 --- a/src/itc/nnstreamer/tct-nnstreamer-native_tizeniot.h +++ b/src/itc/nnstreamer/tct-nnstreamer-native_tizeniot.h @@ -55,10 +55,11 @@ extern int ITc_nnstreamer_tensors_ml_tensors_data_create_destroy_p (void); extern int ITc_nnstreamer_tensors_ml_tensors_data_set_get_tensor_data_p (void); extern int ITc_nnstreamer_single_ml_single_open_close_p (void); extern int ITc_nnstreamer_single_ml_single_invoke_p (void); -extern int ITc_nnstreamer_single_ml_get_input_info_p (void); +extern int ITc_nnstreamer_single_ml_get_set_input_info_p (void); extern int ITc_nnstreamer_single_ml_get_output_info_p (void); extern int ITc_nnstreamer_single_ml_set_timeout_p (void); - +extern int ITc_nnstreamer_single_ml_set_get_property_p(void); +extern int ITc_nnstreamer_single_ml_invoke_dynamic_p(void); testcase tc_array[] = { {"ITc_nnstreamer_pipeline_ml_pipeline_construct_destroy_p", ITc_nnstreamer_pipeline_ml_pipeline_construct_destroy_p, ITs_nnstreamer_pipeline_startup, ITs_nnstreamer_pipeline_cleanup}, @@ -85,10 +86,12 @@ testcase tc_array[] = { {"ITc_nnstreamer_tensors_ml_tensors_data_set_get_tensor_data_p", ITc_nnstreamer_tensors_ml_tensors_data_set_get_tensor_data_p, ITs_nnstreamer_tensors_startup, ITs_nnstreamer_tensors_cleanup}, {"ITc_nnstreamer_single_ml_single_open_close_p", ITc_nnstreamer_single_ml_single_open_close_p, ITs_nnstreamer_tensors_single_startup, ITs_nnstreamer_tensors_single_cleanup}, {"ITc_nnstreamer_single_ml_single_invoke_p", ITc_nnstreamer_single_ml_single_invoke_p, ITs_nnstreamer_tensors_single_startup, ITs_nnstreamer_tensors_single_cleanup}, -{"ITc_nnstreamer_single_ml_get_input_info_p", ITc_nnstreamer_single_ml_get_input_info_p, ITs_nnstreamer_tensors_single_startup, ITs_nnstreamer_tensors_single_cleanup}, +{"ITc_nnstreamer_single_ml_get_set_input_info_p", ITc_nnstreamer_single_ml_get_set_input_info_p, ITs_nnstreamer_tensors_single_startup, ITs_nnstreamer_tensors_single_cleanup}, {"ITc_nnstreamer_single_ml_get_output_info_p", ITc_nnstreamer_single_ml_get_output_info_p, ITs_nnstreamer_tensors_single_startup, ITs_nnstreamer_tensors_single_cleanup}, {"ITc_nnstreamer_single_ml_set_timeout_p", ITc_nnstreamer_single_ml_set_timeout_p, ITs_nnstreamer_tensors_single_startup, ITs_nnstreamer_tensors_single_cleanup}, {"ITc_nnstreamer_tensors_ml_tensors_data_create_destroy_p", ITc_nnstreamer_tensors_ml_tensors_data_create_destroy_p, ITs_nnstreamer_tensors_startup, ITs_nnstreamer_tensors_cleanup}, +{"ITc_nnstreamer_single_ml_set_get_property_p", ITc_nnstreamer_single_ml_set_get_property_p, ITs_nnstreamer_tensors_single_startup, ITs_nnstreamer_tensors_single_cleanup}, +{"ITc_nnstreamer_single_ml_invoke_dynamic_p", ITc_nnstreamer_single_ml_invoke_dynamic_p, ITs_nnstreamer_tensors_single_startup, ITs_nnstreamer_tensors_single_cleanup}, {NULL, NULL} }; diff --git a/src/itc/nnstreamer/tct-nnstreamer-native_wearable.h b/src/itc/nnstreamer/tct-nnstreamer-native_wearable.h index 1bb5acfe9..bf62c65f5 100755 --- a/src/itc/nnstreamer/tct-nnstreamer-native_wearable.h +++ b/src/itc/nnstreamer/tct-nnstreamer-native_wearable.h @@ -55,10 +55,11 @@ extern int ITc_nnstreamer_tensors_ml_tensors_data_create_destroy_p (void); extern int ITc_nnstreamer_tensors_ml_tensors_data_set_get_tensor_data_p (void); extern int ITc_nnstreamer_single_ml_single_open_close_p (void); extern int ITc_nnstreamer_single_ml_single_invoke_p (void); -extern int ITc_nnstreamer_single_ml_get_input_info_p (void); +extern int ITc_nnstreamer_single_ml_get_set_input_info_p (void); extern int ITc_nnstreamer_single_ml_get_output_info_p (void); extern int ITc_nnstreamer_single_ml_set_timeout_p (void); - +extern int ITc_nnstreamer_single_ml_set_get_property_p(void); +extern int ITc_nnstreamer_single_ml_invoke_dynamic_p(void); testcase tc_array[] = { {"ITc_nnstreamer_pipeline_ml_pipeline_construct_destroy_p", ITc_nnstreamer_pipeline_ml_pipeline_construct_destroy_p, ITs_nnstreamer_pipeline_startup, ITs_nnstreamer_pipeline_cleanup}, @@ -85,10 +86,12 @@ testcase tc_array[] = { {"ITc_nnstreamer_tensors_ml_tensors_data_set_get_tensor_data_p", ITc_nnstreamer_tensors_ml_tensors_data_set_get_tensor_data_p, ITs_nnstreamer_tensors_startup, ITs_nnstreamer_tensors_cleanup}, {"ITc_nnstreamer_single_ml_single_open_close_p", ITc_nnstreamer_single_ml_single_open_close_p, ITs_nnstreamer_tensors_single_startup, ITs_nnstreamer_tensors_single_cleanup}, {"ITc_nnstreamer_single_ml_single_invoke_p", ITc_nnstreamer_single_ml_single_invoke_p, ITs_nnstreamer_tensors_single_startup, ITs_nnstreamer_tensors_single_cleanup}, -{"ITc_nnstreamer_single_ml_get_input_info_p", ITc_nnstreamer_single_ml_get_input_info_p, ITs_nnstreamer_tensors_single_startup, ITs_nnstreamer_tensors_single_cleanup}, +{"ITc_nnstreamer_single_ml_get_set_input_info_p", ITc_nnstreamer_single_ml_get_set_input_info_p, ITs_nnstreamer_tensors_single_startup, ITs_nnstreamer_tensors_single_cleanup}, {"ITc_nnstreamer_single_ml_get_output_info_p", ITc_nnstreamer_single_ml_get_output_info_p, ITs_nnstreamer_tensors_single_startup, ITs_nnstreamer_tensors_single_cleanup}, {"ITc_nnstreamer_single_ml_set_timeout_p", ITc_nnstreamer_single_ml_set_timeout_p, ITs_nnstreamer_tensors_single_startup, ITs_nnstreamer_tensors_single_cleanup}, {"ITc_nnstreamer_tensors_ml_tensors_data_create_destroy_p", ITc_nnstreamer_tensors_ml_tensors_data_create_destroy_p, ITs_nnstreamer_tensors_startup, ITs_nnstreamer_tensors_cleanup}, +{"ITc_nnstreamer_single_ml_set_get_property_p", ITc_nnstreamer_single_ml_set_get_property_p, ITs_nnstreamer_tensors_single_startup, ITs_nnstreamer_tensors_single_cleanup}, +{"ITc_nnstreamer_single_ml_invoke_dynamic_p", ITc_nnstreamer_single_ml_invoke_dynamic_p, ITs_nnstreamer_tensors_single_startup, ITs_nnstreamer_tensors_single_cleanup}, {NULL, NULL} };