From: Sangjung Woo Date: Tue, 17 Jan 2023 08:49:12 +0000 (+0900) Subject: [ITC][nnstreamer][Non-ACR] Fix the ML Service Feature issue X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=f83d07fbe35e009992e06def566f0ea95304bd1d;p=test%2Ftct%2Fnative%2Fapi.git [ITC][nnstreamer][Non-ACR] Fix the ML Service Feature issue Before calling ML Service API, 'machine_learning.service' feature should be enabled on the target device. If not, ML_ERROR_NOT_SUPPORTED will be returned and the test case be failed. To fix this issue, this patch adds the ML_SERVICE_FEATURE_CHECK macro for each test case. * Related issue: https://code.sec.samsung.net/jira/browse/TSEVEN-4675 Change-Id: Icfb92ffbbdaf7de27cb8b1caa666b2857b25b4ab Signed-off-by: Sangjung Woo --- diff --git a/src/itc/nnstreamer/ITs-nnstreamer-ml-service.c b/src/itc/nnstreamer/ITs-nnstreamer-ml-service.c index b2408eb3f..cb472309f 100644 --- a/src/itc/nnstreamer/ITs-nnstreamer-ml-service.c +++ b/src/itc/nnstreamer/ITs-nnstreamer-ml-service.c @@ -209,6 +209,7 @@ int ITc_nnstreamer_ml_service_delete_pipeline_p(void) int nRetVal = -1; nRetVal = ml_service_set_pipeline (key, g_Pipeline); + ML_SERVICE_FEATURE_CHECK; PRINT_RESULT(ML_ERROR_NONE, nRetVal, "ml_service_set_pipeline", NnStreamerGetError(nRetVal)); nRetVal = ml_service_get_pipeline (key, &get_pipeline); @@ -225,6 +226,7 @@ int ITc_nnstreamer_ml_service_delete_pipeline_p(void) PRINT_RESULT(ML_ERROR_NONE, nRetVal, "ml_service_delete_pipeline", NnStreamerGetError(nRetVal)); nRetVal = ml_service_get_pipeline (key, &get_pipeline); + ML_SERVICE_FEATURE_CHECK; PRINT_RESULT(ML_ERROR_INVALID_PARAMETER, nRetVal, "ml_service_get_pipeline", NnStreamerGetError(nRetVal)); g_free (get_pipeline); @@ -258,6 +260,7 @@ int ITc_nnstreamer_ml_service_launch_destroy_pipeline_p(void) /* set simple pipeline */ int nRetVal = ml_service_set_pipeline(key, test_pipeline); + ML_SERVICE_FEATURE_CHECK; PRINT_RESULT(ML_ERROR_NONE, nRetVal, "ml_service_set_pipeline", NnStreamerGetError(nRetVal)); /* launch the pipeline and check the state */ @@ -283,6 +286,7 @@ int ITc_nnstreamer_ml_service_launch_destroy_pipeline_p(void) /* delete finished service */ nRetVal = ml_service_delete_pipeline(key); + ML_SERVICE_FEATURE_CHECK; PRINT_RESULT_NORETURN(ML_ERROR_NONE, nRetVal, "ml_service_delete_pipeline", NnStreamerGetError(nRetVal)); return 0; } @@ -313,10 +317,12 @@ int ITc_nnstreamer_ml_service_start_stop_pipeline_p(void) /* set simple pipeline */ int nRetVal = ml_service_set_pipeline(key, test_pipeline); + ML_SERVICE_FEATURE_CHECK; PRINT_RESULT(ML_ERROR_NONE, nRetVal, "ml_service_set_pipeline", NnStreamerGetError(nRetVal)); /* launch the pipeline and check the state */ nRetVal = ml_service_launch_pipeline(key, &service_handle); + ML_SERVICE_FEATURE_CHECK; PRINT_RESULT(ML_ERROR_NONE, nRetVal, "ml_service_launch_pipeline", NnStreamerGetError(nRetVal)); if (service_handle == NULL) { @@ -334,6 +340,7 @@ int ITc_nnstreamer_ml_service_start_stop_pipeline_p(void) g_usleep (1000 * 1000 * 1); nRetVal = ml_service_get_pipeline_state(service_handle, &state); + ML_SERVICE_FEATURE_CHECK; PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRetVal, "ml_service_get_pipeline_state", NnStreamerGetError(nRetVal), ml_service_stop_pipeline(service_handle); ml_service_destroy(service_handle); ml_service_delete_pipeline(key)); if (state != ML_PIPELINE_STATE_PLAYING) { @@ -352,6 +359,7 @@ int ITc_nnstreamer_ml_service_start_stop_pipeline_p(void) g_usleep (1000 * 1000 * 1); nRetVal = ml_service_get_pipeline_state(service_handle, &state); + ML_SERVICE_FEATURE_CHECK; PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRetVal, "ml_service_get_pipeline_state", NnStreamerGetError(nRetVal), ml_service_destroy(service_handle); ml_service_delete_pipeline(key)); if (state != ML_PIPELINE_STATE_PAUSED) { @@ -363,12 +371,14 @@ int ITc_nnstreamer_ml_service_start_stop_pipeline_p(void) /*destroy the pipeline*/ nRetVal = ml_service_destroy(service_handle); + ML_SERVICE_FEATURE_CHECK; PRINT_RESULT_NORETURN(ML_ERROR_NONE, nRetVal, "ml_service_destroy", NnStreamerGetError(nRetVal)); g_usleep (1000 * 1000 * 1); /* delete finished service */ nRetVal = ml_service_delete_pipeline(key); + ML_SERVICE_FEATURE_CHECK; PRINT_RESULT_NORETURN(ML_ERROR_NONE, nRetVal, "ml_service_delete_pipeline", NnStreamerGetError(nRetVal)); return 0; } @@ -399,12 +409,14 @@ int ITc_nnstreamer_ml_service_query_create_request_p(void) gchar *szServerPipelineDesc = g_strdup_printf("tensor_query_serversrc port=%u num-buffers=%d ! other/tensors,num_tensors=1,dimensions=3:4:4:1,types=uint8,format=static,framerate=0/1 ! tensor_query_serversink async=false sync=false", server_port, nBufferCount); /* set simple pipeline */ - int nRet = ml_service_set_pipeline(service_name, szServerPipelineDesc); - PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_service_set_pipeline", NnStreamerGetError(nRet), g_free(szServerPipelineDesc)); + int nRetVal = ml_service_set_pipeline(service_name, szServerPipelineDesc); + ML_SERVICE_FEATURE_CHECK; + PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRetVal, "ml_service_set_pipeline", NnStreamerGetError(nRetVal), g_free(szServerPipelineDesc)); gchar *szRetPipelineVal = NULL; - nRet = ml_service_get_pipeline(service_name, &szRetPipelineVal); - PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_service_get_pipeline", NnStreamerGetError(nRet), g_free(szServerPipelineDesc); g_free(szRetPipelineVal)); + nRetVal = ml_service_get_pipeline(service_name, &szRetPipelineVal); + ML_SERVICE_FEATURE_CHECK; + PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRetVal, "ml_service_get_pipeline", NnStreamerGetError(nRetVal), g_free(szServerPipelineDesc); g_free(szRetPipelineVal)); if (0 != strcmp(szRetPipelineVal, szServerPipelineDesc)) { g_free(szServerPipelineDesc); @@ -420,20 +432,23 @@ int ITc_nnstreamer_ml_service_query_create_request_p(void) ml_pipeline_state_e state; /* launch the pipeline and check the state */ - nRet = ml_service_launch_pipeline(service_name, &service_handle); - PRINT_RESULT(ML_ERROR_NONE, nRet, "ml_service_launch_pipeline", NnStreamerGetError(nRet)); + nRetVal = ml_service_launch_pipeline(service_name, &service_handle); + ML_SERVICE_FEATURE_CHECK; + PRINT_RESULT(ML_ERROR_NONE, nRetVal, "ml_service_launch_pipeline", NnStreamerGetError(nRetVal)); CHECK_HANDLE(service_handle, "ml_service_launch_pipeline"); g_usleep (1000 * 1000 * 1); /* start the pipeline and check the state */ - nRet = ml_service_start_pipeline(service_handle); - PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_service_start_pipeline", NnStreamerGetError(nRet), ml_service_destroy(service_handle); ml_service_delete_pipeline(service_name)); + nRetVal = ml_service_start_pipeline(service_handle); + ML_SERVICE_FEATURE_CHECK; + PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRetVal, "ml_service_start_pipeline", NnStreamerGetError(nRetVal), ml_service_destroy(service_handle); ml_service_delete_pipeline(service_name)); g_usleep (1000 * 1000 * 1); - nRet = ml_service_get_pipeline_state(service_handle, &state); - PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_service_get_pipeline_state", NnStreamerGetError(nRet), ml_service_stop_pipeline(service_handle); ml_service_destroy(service_handle); ml_service_delete_pipeline(service_name)); + nRetVal = ml_service_get_pipeline_state(service_handle, &state); + ML_SERVICE_FEATURE_CHECK; + PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRetVal, "ml_service_get_pipeline_state", NnStreamerGetError(nRetVal), ml_service_stop_pipeline(service_handle); ml_service_destroy(service_handle); ml_service_delete_pipeline(service_name)); if (state != ML_PIPELINE_STATE_PLAYING) { FPRINTF("[Line : %d][%s] state value mismatch for service handle\\n", __LINE__, API_NAMESPACE); @@ -446,37 +461,37 @@ int ITc_nnstreamer_ml_service_query_create_request_p(void) ml_service_h client_handle = NULL; ml_option_h query_client_option = NULL; - nRet = ml_option_create(&query_client_option); - PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_option_create", NnStreamerGetError(nRet), ml_service_stop_pipeline(service_handle); ml_service_destroy(service_handle); ml_service_delete_pipeline(service_name)); + nRetVal = ml_option_create(&query_client_option); + PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRetVal, "ml_option_create", NnStreamerGetError(nRetVal), ml_service_stop_pipeline(service_handle); ml_service_destroy(service_handle); ml_service_delete_pipeline(service_name)); CHECK_HANDLE_CLEANUP(query_client_option, "ml_option_create", ml_service_stop_pipeline(service_handle); ml_service_destroy(service_handle); ml_service_delete_pipeline(service_name)); gchar *host = g_strdup("localhost"); - nRet = ml_option_set(query_client_option, "host", host, g_free); - PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_option_set", NnStreamerGetError(nRet), ml_service_stop_pipeline(service_handle); ml_service_destroy(service_handle); ml_service_delete_pipeline(service_name)); + nRetVal = ml_option_set(query_client_option, "host", host, g_free); + PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRetVal, "ml_option_set", NnStreamerGetError(nRetVal), ml_service_stop_pipeline(service_handle); ml_service_destroy(service_handle); ml_service_delete_pipeline(service_name)); guint client_port = _get_available_port(); - nRet = ml_option_set(query_client_option, "port", &client_port, NULL); - PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_option_set", NnStreamerGetError(nRet), ml_service_stop_pipeline(service_handle); ml_service_destroy(service_handle); ml_service_delete_pipeline(service_name)); + nRetVal = ml_option_set(query_client_option, "port", &client_port, NULL); + PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRetVal, "ml_option_set", NnStreamerGetError(nRetVal), ml_service_stop_pipeline(service_handle); ml_service_destroy(service_handle); ml_service_delete_pipeline(service_name)); gchar *dest_host = g_strdup("localhost"); - nRet = ml_option_set(query_client_option, "dest-host", dest_host, g_free); - PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_option_set", NnStreamerGetError(nRet), ml_service_stop_pipeline(service_handle); ml_service_destroy(service_handle); ml_service_delete_pipeline(service_name)); + nRetVal = ml_option_set(query_client_option, "dest-host", dest_host, g_free); + PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRetVal, "ml_option_set", NnStreamerGetError(nRetVal), ml_service_stop_pipeline(service_handle); ml_service_destroy(service_handle); ml_service_delete_pipeline(service_name)); guint dest_port = server_port; - nRet = ml_option_set(query_client_option, "dest-port", &dest_port, NULL); - PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_option_set", NnStreamerGetError(nRet), ml_service_stop_pipeline(service_handle); ml_service_destroy(service_handle); ml_service_delete_pipeline(service_name)); + nRetVal = ml_option_set(query_client_option, "dest-port", &dest_port, NULL); + PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRetVal, "ml_option_set", NnStreamerGetError(nRetVal), ml_service_stop_pipeline(service_handle); ml_service_destroy(service_handle); ml_service_delete_pipeline(service_name)); gchar *connect_type = g_strdup("TCP"); - nRet = ml_option_set(query_client_option, "connect-type", connect_type, g_free); - PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_option_set", NnStreamerGetError(nRet), ml_service_stop_pipeline(service_handle); ml_service_destroy(service_handle); ml_service_delete_pipeline(service_name)); + nRetVal = ml_option_set(query_client_option, "connect-type", connect_type, g_free); + PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRetVal, "ml_option_set", NnStreamerGetError(nRetVal), ml_service_stop_pipeline(service_handle); ml_service_destroy(service_handle); ml_service_delete_pipeline(service_name)); guint timeout = 10000U; - nRet = ml_option_set(query_client_option, "timeout", &timeout, NULL); - PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_option_set", NnStreamerGetError(nRet), ml_service_stop_pipeline(service_handle); ml_service_destroy(service_handle); ml_service_delete_pipeline(service_name)); + nRetVal = ml_option_set(query_client_option, "timeout", &timeout, NULL); + PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRetVal, "ml_option_set", NnStreamerGetError(nRetVal), ml_service_stop_pipeline(service_handle); ml_service_destroy(service_handle); ml_service_delete_pipeline(service_name)); gchar *caps_str = g_strdup("other/tensors,num_tensors=1,format=static,types=uint8,dimensions=3:4:4:1,framerate=0/1"); - nRet = ml_option_set(query_client_option, "caps", caps_str, g_free); - PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_option_set", NnStreamerGetError(nRet), ml_service_stop_pipeline(service_handle); ml_service_destroy(service_handle); ml_service_delete_pipeline(service_name)); + nRetVal = ml_option_set(query_client_option, "caps", caps_str, g_free); + PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRetVal, "ml_option_set", NnStreamerGetError(nRetVal), ml_service_stop_pipeline(service_handle); ml_service_destroy(service_handle); ml_service_delete_pipeline(service_name)); /* set input tensor */ ml_tensors_info_h in_info = NULL; @@ -489,24 +504,25 @@ int ITc_nnstreamer_ml_service_query_create_request_p(void) in_dim[2] = 4; in_dim[3] = 1; - nRet = ml_tensors_info_set_count(in_info, 1); - PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_tensors_info_set_count", NnStreamerGetError(nRet), ml_service_stop_pipeline(service_handle); ml_service_destroy(service_handle); ml_service_delete_pipeline(service_name); ml_option_destroy(query_client_option)); + nRetVal = ml_tensors_info_set_count(in_info, 1); + PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRetVal, "ml_tensors_info_set_count", NnStreamerGetError(nRetVal), ml_service_stop_pipeline(service_handle); ml_service_destroy(service_handle); ml_service_delete_pipeline(service_name); ml_option_destroy(query_client_option)); - nRet = ml_tensors_info_set_tensor_type(in_info, 0, ML_TENSOR_TYPE_UINT8); - PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_tensors_info_set_tensor_type", NnStreamerGetError(nRet), ml_service_stop_pipeline(service_handle); ml_service_destroy(service_handle); ml_service_delete_pipeline(service_name); ml_option_destroy(query_client_option)); + nRetVal = ml_tensors_info_set_tensor_type(in_info, 0, ML_TENSOR_TYPE_UINT8); + PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRetVal, "ml_tensors_info_set_tensor_type", NnStreamerGetError(nRetVal), ml_service_stop_pipeline(service_handle); ml_service_destroy(service_handle); ml_service_delete_pipeline(service_name); ml_option_destroy(query_client_option)); - nRet = ml_tensors_info_set_tensor_dimension(in_info, 0, in_dim); - PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_tensors_info_set_tensor_dimension", NnStreamerGetError(nRet), ml_service_stop_pipeline(service_handle); ml_service_destroy(service_handle); ml_service_delete_pipeline(service_name); ml_option_destroy(query_client_option)); + nRetVal = ml_tensors_info_set_tensor_dimension(in_info, 0, in_dim); + PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRetVal, "ml_tensors_info_set_tensor_dimension", NnStreamerGetError(nRetVal), ml_service_stop_pipeline(service_handle); ml_service_destroy(service_handle); ml_service_delete_pipeline(service_name); ml_option_destroy(query_client_option)); //Target API - nRet = ml_service_query_create(query_client_option, &client_handle); - PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_service_query_create", NnStreamerGetError(nRet), ml_service_stop_pipeline(service_handle); ml_service_destroy(service_handle); ml_service_delete_pipeline(service_name); ml_option_destroy(query_client_option)); + nRetVal = ml_service_query_create(query_client_option, &client_handle); + ML_SERVICE_FEATURE_CHECK; + PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRetVal, "ml_service_query_create", NnStreamerGetError(nRetVal), ml_service_stop_pipeline(service_handle); ml_service_destroy(service_handle); ml_service_delete_pipeline(service_name); ml_option_destroy(query_client_option)); CHECK_HANDLE_CLEANUP(client_handle, "ml_service_query_create", ml_service_stop_pipeline(service_handle); ml_service_destroy(service_handle); ml_service_delete_pipeline(service_name); ml_option_destroy(query_client_option)); g_usleep (1000 * 1000 * 1); - nRet = ml_tensors_data_create(in_info, &input); - PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_tensors_data_create", NnStreamerGetError(nRet), ml_service_stop_pipeline(service_handle); ml_service_destroy(service_handle); ml_service_delete_pipeline(service_name); ml_option_destroy(query_client_option)); + nRetVal = ml_tensors_data_create(in_info, &input); + PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRetVal, "ml_tensors_data_create", NnStreamerGetError(nRetVal), ml_service_stop_pipeline(service_handle); ml_service_destroy(service_handle); ml_service_delete_pipeline(service_name); ml_option_destroy(query_client_option)); CHECK_HANDLE_CLEANUP(input, "ml_tensors_data_create", ml_service_stop_pipeline(service_handle); ml_service_destroy(service_handle); ml_service_delete_pipeline(service_name); ml_option_destroy(query_client_option)); /* request output tensor with input tensor */ @@ -522,15 +538,16 @@ int ITc_nnstreamer_ml_service_query_create_request_p(void) g_usleep (1000 * 1000 * 1); //Target API - nRet = ml_service_query_request(client_handle, input, &output); - PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_service_query_request", NnStreamerGetError(nRet), ml_service_stop_pipeline(service_handle); ml_service_destroy(service_handle); ml_service_delete_pipeline(service_name); ml_option_destroy(query_client_option); ml_tensors_data_destroy(input); ml_tensors_info_destroy(in_info)); + nRetVal = ml_service_query_request(client_handle, input, &output); + ML_SERVICE_FEATURE_CHECK; + PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRetVal, "ml_service_query_request", NnStreamerGetError(nRetVal), ml_service_stop_pipeline(service_handle); ml_service_destroy(service_handle); ml_service_delete_pipeline(service_name); ml_option_destroy(query_client_option); ml_tensors_data_destroy(input); ml_tensors_info_destroy(in_info)); CHECK_HANDLE_CLEANUP(output, "ml_service_query_request", ml_service_stop_pipeline(service_handle); ml_service_destroy(service_handle); ml_service_delete_pipeline(service_name); ml_option_destroy(query_client_option); ml_tensors_data_destroy(input); ml_tensors_info_destroy(in_info)); - nRet = ml_tensors_info_get_tensor_size(in_info, 0, &input_data_size); - PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_tensors_info_get_tensor_size", NnStreamerGetError(nRet), ml_service_destroy(client_handle); ml_service_stop_pipeline(service_handle); ml_service_destroy(service_handle); ml_service_delete_pipeline(service_name); ml_option_destroy(query_client_option); ml_tensors_data_destroy(input); ml_tensors_info_destroy(in_info)); + nRetVal = ml_tensors_info_get_tensor_size(in_info, 0, &input_data_size); + PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRetVal, "ml_tensors_info_get_tensor_size", NnStreamerGetError(nRetVal), ml_service_destroy(client_handle); ml_service_stop_pipeline(service_handle); ml_service_destroy(service_handle); ml_service_delete_pipeline(service_name); ml_option_destroy(query_client_option); ml_tensors_data_destroy(input); ml_tensors_info_destroy(in_info)); - nRet = ml_tensors_data_get_tensor_data(output, 0, (void **)&received, &output_data_size); - PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_tensors_data_get_tensor_data", NnStreamerGetError(nRet), ml_service_destroy(client_handle); ml_service_stop_pipeline(service_handle); ml_service_destroy(service_handle); ml_service_delete_pipeline(service_name); ml_option_destroy(query_client_option); ml_tensors_data_destroy(input); ml_tensors_info_destroy(in_info)); + nRetVal = ml_tensors_data_get_tensor_data(output, 0, (void **)&received, &output_data_size); + PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRetVal, "ml_tensors_data_get_tensor_data", NnStreamerGetError(nRetVal), ml_service_destroy(client_handle); ml_service_stop_pipeline(service_handle); ml_service_destroy(service_handle); ml_service_delete_pipeline(service_name); ml_option_destroy(query_client_option); ml_tensors_data_destroy(input); ml_tensors_info_destroy(in_info)); if (input_data_size != output_data_size) { @@ -558,40 +575,40 @@ int ITc_nnstreamer_ml_service_query_create_request_p(void) return 1; } - nRet = ml_tensors_data_destroy(output); - PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_tensors_data_destroy", NnStreamerGetError(nRet), ml_service_destroy(client_handle); ml_service_stop_pipeline(service_handle); ml_service_destroy(service_handle); ml_service_delete_pipeline(service_name); ml_option_destroy(query_client_option); ml_tensors_data_destroy(input); ml_tensors_info_destroy(in_info)); + nRetVal = ml_tensors_data_destroy(output); + PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRetVal, "ml_tensors_data_destroy", NnStreamerGetError(nRetVal), ml_service_destroy(client_handle); ml_service_stop_pipeline(service_handle); ml_service_destroy(service_handle); ml_service_delete_pipeline(service_name); ml_option_destroy(query_client_option); ml_tensors_data_destroy(input); ml_tensors_info_destroy(in_info)); } /** destroy client ml_service_h */ - nRet = ml_service_destroy(client_handle); + nRetVal = ml_service_destroy(client_handle); g_usleep (1000 * 1000 * 1); /* 1 sec */ - PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_service_destroy", NnStreamerGetError(nRet), ml_service_stop_pipeline(service_handle); ml_service_destroy(service_handle); ml_service_delete_pipeline(service_name); ml_option_destroy(query_client_option); ml_tensors_data_destroy(input); ml_tensors_info_destroy(in_info)); + PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRetVal, "ml_service_destroy", NnStreamerGetError(nRetVal), ml_service_stop_pipeline(service_handle); ml_service_destroy(service_handle); ml_service_delete_pipeline(service_name); ml_option_destroy(query_client_option); ml_tensors_data_destroy(input); ml_tensors_info_destroy(in_info)); /** destroy server pipeline */ - nRet = ml_service_stop_pipeline(service_handle); - PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_service_stop_pipeline", NnStreamerGetError(nRet), ml_service_destroy(service_handle); ml_service_delete_pipeline(service_name); ml_option_destroy(query_client_option); ml_tensors_data_destroy(input); ml_tensors_info_destroy(in_info)); + nRetVal = ml_service_stop_pipeline(service_handle); + PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRetVal, "ml_service_stop_pipeline", NnStreamerGetError(nRetVal), ml_service_destroy(service_handle); ml_service_delete_pipeline(service_name); ml_option_destroy(query_client_option); ml_tensors_data_destroy(input); ml_tensors_info_destroy(in_info)); g_usleep (1000 * 1000 * 1); /* 1 sec */ - nRet = ml_service_destroy(service_handle); - PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_service_destroy", NnStreamerGetError(nRet), ml_service_delete_pipeline(service_name); ml_option_destroy(query_client_option); ml_tensors_data_destroy(input); ml_tensors_info_destroy(in_info)); + nRetVal = ml_service_destroy(service_handle); + PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRetVal, "ml_service_destroy", NnStreamerGetError(nRetVal), ml_service_delete_pipeline(service_name); ml_option_destroy(query_client_option); ml_tensors_data_destroy(input); ml_tensors_info_destroy(in_info)); g_usleep (1000 * 1000 * 1); /* 1 sec */ /** delete finished service */ - nRet = ml_service_delete_pipeline(service_name); - PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_service_delete_pipeline", NnStreamerGetError(nRet), ml_option_destroy(query_client_option); ml_tensors_data_destroy(input); ml_tensors_info_destroy(in_info)); + nRetVal = ml_service_delete_pipeline(service_name); + PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRetVal, "ml_service_delete_pipeline", NnStreamerGetError(nRetVal), ml_option_destroy(query_client_option); ml_tensors_data_destroy(input); ml_tensors_info_destroy(in_info)); - nRet = ml_option_destroy(query_client_option); - PRINT_RESULT_NORETURN(ML_ERROR_NONE, nRet, "ml_option_destroy", NnStreamerGetError(nRet)); + nRetVal = ml_option_destroy(query_client_option); + PRINT_RESULT_NORETURN(ML_ERROR_NONE, nRetVal, "ml_option_destroy", NnStreamerGetError(nRetVal)); - nRet = ml_tensors_data_destroy(input); - PRINT_RESULT_NORETURN(ML_ERROR_NONE, nRet, "ml_tensors_data_destroy", NnStreamerGetError(nRet)); + nRetVal = ml_tensors_data_destroy(input); + PRINT_RESULT_NORETURN(ML_ERROR_NONE, nRetVal, "ml_tensors_data_destroy", NnStreamerGetError(nRetVal)); - nRet = ml_tensors_info_destroy(in_info); - PRINT_RESULT_NORETURN(ML_ERROR_NONE, nRet, "ml_tensors_info_destroy", NnStreamerGetError(nRet)); + nRetVal = ml_tensors_info_destroy(in_info); + PRINT_RESULT_NORETURN(ML_ERROR_NONE, nRetVal, "ml_tensors_info_destroy", NnStreamerGetError(nRetVal)); return 0; }