int nRetVal = -1;
nRetVal = ml_service_set_pipeline (key, g_Pipeline);
+ ML_SERVICE_FEATURE_CHECK;
PRINT_RESULT(ML_ERROR_NONE, nRetVal, "ml_service_set_pipeline", NnStreamerGetError(nRetVal));
nRetVal = ml_service_get_pipeline (key, &get_pipeline);
PRINT_RESULT(ML_ERROR_NONE, nRetVal, "ml_service_delete_pipeline", NnStreamerGetError(nRetVal));
nRetVal = ml_service_get_pipeline (key, &get_pipeline);
+ ML_SERVICE_FEATURE_CHECK;
PRINT_RESULT(ML_ERROR_INVALID_PARAMETER, nRetVal, "ml_service_get_pipeline", NnStreamerGetError(nRetVal));
g_free (get_pipeline);
/* set simple pipeline */
int nRetVal = ml_service_set_pipeline(key, test_pipeline);
+ ML_SERVICE_FEATURE_CHECK;
PRINT_RESULT(ML_ERROR_NONE, nRetVal, "ml_service_set_pipeline", NnStreamerGetError(nRetVal));
/* launch the pipeline and check the state */
/* delete finished service */
nRetVal = ml_service_delete_pipeline(key);
+ ML_SERVICE_FEATURE_CHECK;
PRINT_RESULT_NORETURN(ML_ERROR_NONE, nRetVal, "ml_service_delete_pipeline", NnStreamerGetError(nRetVal));
return 0;
}
/* set simple pipeline */
int nRetVal = ml_service_set_pipeline(key, test_pipeline);
+ ML_SERVICE_FEATURE_CHECK;
PRINT_RESULT(ML_ERROR_NONE, nRetVal, "ml_service_set_pipeline", NnStreamerGetError(nRetVal));
/* launch the pipeline and check the state */
nRetVal = ml_service_launch_pipeline(key, &service_handle);
+ ML_SERVICE_FEATURE_CHECK;
PRINT_RESULT(ML_ERROR_NONE, nRetVal, "ml_service_launch_pipeline", NnStreamerGetError(nRetVal));
if (service_handle == NULL)
{
g_usleep (1000 * 1000 * 1);
nRetVal = ml_service_get_pipeline_state(service_handle, &state);
+ ML_SERVICE_FEATURE_CHECK;
PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRetVal, "ml_service_get_pipeline_state", NnStreamerGetError(nRetVal), ml_service_stop_pipeline(service_handle); ml_service_destroy(service_handle); ml_service_delete_pipeline(key));
if (state != ML_PIPELINE_STATE_PLAYING)
{
g_usleep (1000 * 1000 * 1);
nRetVal = ml_service_get_pipeline_state(service_handle, &state);
+ ML_SERVICE_FEATURE_CHECK;
PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRetVal, "ml_service_get_pipeline_state", NnStreamerGetError(nRetVal), ml_service_destroy(service_handle); ml_service_delete_pipeline(key));
if (state != ML_PIPELINE_STATE_PAUSED)
{
/*destroy the pipeline*/
nRetVal = ml_service_destroy(service_handle);
+ ML_SERVICE_FEATURE_CHECK;
PRINT_RESULT_NORETURN(ML_ERROR_NONE, nRetVal, "ml_service_destroy", NnStreamerGetError(nRetVal));
g_usleep (1000 * 1000 * 1);
/* delete finished service */
nRetVal = ml_service_delete_pipeline(key);
+ ML_SERVICE_FEATURE_CHECK;
PRINT_RESULT_NORETURN(ML_ERROR_NONE, nRetVal, "ml_service_delete_pipeline", NnStreamerGetError(nRetVal));
return 0;
}
gchar *szServerPipelineDesc = g_strdup_printf("tensor_query_serversrc port=%u num-buffers=%d ! other/tensors,num_tensors=1,dimensions=3:4:4:1,types=uint8,format=static,framerate=0/1 ! tensor_query_serversink async=false sync=false", server_port, nBufferCount);
/* set simple pipeline */
- int nRet = ml_service_set_pipeline(service_name, szServerPipelineDesc);
- PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_service_set_pipeline", NnStreamerGetError(nRet), g_free(szServerPipelineDesc));
+ int nRetVal = ml_service_set_pipeline(service_name, szServerPipelineDesc);
+ ML_SERVICE_FEATURE_CHECK;
+ PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRetVal, "ml_service_set_pipeline", NnStreamerGetError(nRetVal), g_free(szServerPipelineDesc));
gchar *szRetPipelineVal = NULL;
- nRet = ml_service_get_pipeline(service_name, &szRetPipelineVal);
- PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_service_get_pipeline", NnStreamerGetError(nRet), g_free(szServerPipelineDesc); g_free(szRetPipelineVal));
+ nRetVal = ml_service_get_pipeline(service_name, &szRetPipelineVal);
+ ML_SERVICE_FEATURE_CHECK;
+ PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRetVal, "ml_service_get_pipeline", NnStreamerGetError(nRetVal), g_free(szServerPipelineDesc); g_free(szRetPipelineVal));
if (0 != strcmp(szRetPipelineVal, szServerPipelineDesc))
{
g_free(szServerPipelineDesc);
ml_pipeline_state_e state;
/* launch the pipeline and check the state */
- nRet = ml_service_launch_pipeline(service_name, &service_handle);
- PRINT_RESULT(ML_ERROR_NONE, nRet, "ml_service_launch_pipeline", NnStreamerGetError(nRet));
+ nRetVal = ml_service_launch_pipeline(service_name, &service_handle);
+ ML_SERVICE_FEATURE_CHECK;
+ PRINT_RESULT(ML_ERROR_NONE, nRetVal, "ml_service_launch_pipeline", NnStreamerGetError(nRetVal));
CHECK_HANDLE(service_handle, "ml_service_launch_pipeline");
g_usleep (1000 * 1000 * 1);
/* start the pipeline and check the state */
- nRet = ml_service_start_pipeline(service_handle);
- PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_service_start_pipeline", NnStreamerGetError(nRet), ml_service_destroy(service_handle); ml_service_delete_pipeline(service_name));
+ nRetVal = ml_service_start_pipeline(service_handle);
+ ML_SERVICE_FEATURE_CHECK;
+ PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRetVal, "ml_service_start_pipeline", NnStreamerGetError(nRetVal), ml_service_destroy(service_handle); ml_service_delete_pipeline(service_name));
g_usleep (1000 * 1000 * 1);
- nRet = ml_service_get_pipeline_state(service_handle, &state);
- PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_service_get_pipeline_state", NnStreamerGetError(nRet), ml_service_stop_pipeline(service_handle); ml_service_destroy(service_handle); ml_service_delete_pipeline(service_name));
+ nRetVal = ml_service_get_pipeline_state(service_handle, &state);
+ ML_SERVICE_FEATURE_CHECK;
+ PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRetVal, "ml_service_get_pipeline_state", NnStreamerGetError(nRetVal), ml_service_stop_pipeline(service_handle); ml_service_destroy(service_handle); ml_service_delete_pipeline(service_name));
if (state != ML_PIPELINE_STATE_PLAYING)
{
FPRINTF("[Line : %d][%s] state value mismatch for service handle\\n", __LINE__, API_NAMESPACE);
ml_service_h client_handle = NULL;
ml_option_h query_client_option = NULL;
- nRet = ml_option_create(&query_client_option);
- PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_option_create", NnStreamerGetError(nRet), ml_service_stop_pipeline(service_handle); ml_service_destroy(service_handle); ml_service_delete_pipeline(service_name));
+ nRetVal = ml_option_create(&query_client_option);
+ PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRetVal, "ml_option_create", NnStreamerGetError(nRetVal), ml_service_stop_pipeline(service_handle); ml_service_destroy(service_handle); ml_service_delete_pipeline(service_name));
CHECK_HANDLE_CLEANUP(query_client_option, "ml_option_create", ml_service_stop_pipeline(service_handle); ml_service_destroy(service_handle); ml_service_delete_pipeline(service_name));
gchar *host = g_strdup("localhost");
- nRet = ml_option_set(query_client_option, "host", host, g_free);
- PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_option_set", NnStreamerGetError(nRet), ml_service_stop_pipeline(service_handle); ml_service_destroy(service_handle); ml_service_delete_pipeline(service_name));
+ nRetVal = ml_option_set(query_client_option, "host", host, g_free);
+ PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRetVal, "ml_option_set", NnStreamerGetError(nRetVal), ml_service_stop_pipeline(service_handle); ml_service_destroy(service_handle); ml_service_delete_pipeline(service_name));
guint client_port = _get_available_port();
- nRet = ml_option_set(query_client_option, "port", &client_port, NULL);
- PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_option_set", NnStreamerGetError(nRet), ml_service_stop_pipeline(service_handle); ml_service_destroy(service_handle); ml_service_delete_pipeline(service_name));
+ nRetVal = ml_option_set(query_client_option, "port", &client_port, NULL);
+ PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRetVal, "ml_option_set", NnStreamerGetError(nRetVal), ml_service_stop_pipeline(service_handle); ml_service_destroy(service_handle); ml_service_delete_pipeline(service_name));
gchar *dest_host = g_strdup("localhost");
- nRet = ml_option_set(query_client_option, "dest-host", dest_host, g_free);
- PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_option_set", NnStreamerGetError(nRet), ml_service_stop_pipeline(service_handle); ml_service_destroy(service_handle); ml_service_delete_pipeline(service_name));
+ nRetVal = ml_option_set(query_client_option, "dest-host", dest_host, g_free);
+ PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRetVal, "ml_option_set", NnStreamerGetError(nRetVal), ml_service_stop_pipeline(service_handle); ml_service_destroy(service_handle); ml_service_delete_pipeline(service_name));
guint dest_port = server_port;
- nRet = ml_option_set(query_client_option, "dest-port", &dest_port, NULL);
- PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_option_set", NnStreamerGetError(nRet), ml_service_stop_pipeline(service_handle); ml_service_destroy(service_handle); ml_service_delete_pipeline(service_name));
+ nRetVal = ml_option_set(query_client_option, "dest-port", &dest_port, NULL);
+ PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRetVal, "ml_option_set", NnStreamerGetError(nRetVal), ml_service_stop_pipeline(service_handle); ml_service_destroy(service_handle); ml_service_delete_pipeline(service_name));
gchar *connect_type = g_strdup("TCP");
- nRet = ml_option_set(query_client_option, "connect-type", connect_type, g_free);
- PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_option_set", NnStreamerGetError(nRet), ml_service_stop_pipeline(service_handle); ml_service_destroy(service_handle); ml_service_delete_pipeline(service_name));
+ nRetVal = ml_option_set(query_client_option, "connect-type", connect_type, g_free);
+ PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRetVal, "ml_option_set", NnStreamerGetError(nRetVal), ml_service_stop_pipeline(service_handle); ml_service_destroy(service_handle); ml_service_delete_pipeline(service_name));
guint timeout = 10000U;
- nRet = ml_option_set(query_client_option, "timeout", &timeout, NULL);
- PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_option_set", NnStreamerGetError(nRet), ml_service_stop_pipeline(service_handle); ml_service_destroy(service_handle); ml_service_delete_pipeline(service_name));
+ nRetVal = ml_option_set(query_client_option, "timeout", &timeout, NULL);
+ PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRetVal, "ml_option_set", NnStreamerGetError(nRetVal), ml_service_stop_pipeline(service_handle); ml_service_destroy(service_handle); ml_service_delete_pipeline(service_name));
gchar *caps_str = g_strdup("other/tensors,num_tensors=1,format=static,types=uint8,dimensions=3:4:4:1,framerate=0/1");
- nRet = ml_option_set(query_client_option, "caps", caps_str, g_free);
- PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_option_set", NnStreamerGetError(nRet), ml_service_stop_pipeline(service_handle); ml_service_destroy(service_handle); ml_service_delete_pipeline(service_name));
+ nRetVal = ml_option_set(query_client_option, "caps", caps_str, g_free);
+ PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRetVal, "ml_option_set", NnStreamerGetError(nRetVal), ml_service_stop_pipeline(service_handle); ml_service_destroy(service_handle); ml_service_delete_pipeline(service_name));
/* set input tensor */
ml_tensors_info_h in_info = NULL;
in_dim[2] = 4;
in_dim[3] = 1;
- nRet = ml_tensors_info_set_count(in_info, 1);
- PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_tensors_info_set_count", NnStreamerGetError(nRet), ml_service_stop_pipeline(service_handle); ml_service_destroy(service_handle); ml_service_delete_pipeline(service_name); ml_option_destroy(query_client_option));
+ nRetVal = ml_tensors_info_set_count(in_info, 1);
+ PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRetVal, "ml_tensors_info_set_count", NnStreamerGetError(nRetVal), ml_service_stop_pipeline(service_handle); ml_service_destroy(service_handle); ml_service_delete_pipeline(service_name); ml_option_destroy(query_client_option));
- nRet = ml_tensors_info_set_tensor_type(in_info, 0, ML_TENSOR_TYPE_UINT8);
- PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_tensors_info_set_tensor_type", NnStreamerGetError(nRet), ml_service_stop_pipeline(service_handle); ml_service_destroy(service_handle); ml_service_delete_pipeline(service_name); ml_option_destroy(query_client_option));
+ nRetVal = ml_tensors_info_set_tensor_type(in_info, 0, ML_TENSOR_TYPE_UINT8);
+ PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRetVal, "ml_tensors_info_set_tensor_type", NnStreamerGetError(nRetVal), ml_service_stop_pipeline(service_handle); ml_service_destroy(service_handle); ml_service_delete_pipeline(service_name); ml_option_destroy(query_client_option));
- nRet = ml_tensors_info_set_tensor_dimension(in_info, 0, in_dim);
- PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_tensors_info_set_tensor_dimension", NnStreamerGetError(nRet), ml_service_stop_pipeline(service_handle); ml_service_destroy(service_handle); ml_service_delete_pipeline(service_name); ml_option_destroy(query_client_option));
+ nRetVal = ml_tensors_info_set_tensor_dimension(in_info, 0, in_dim);
+ PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRetVal, "ml_tensors_info_set_tensor_dimension", NnStreamerGetError(nRetVal), ml_service_stop_pipeline(service_handle); ml_service_destroy(service_handle); ml_service_delete_pipeline(service_name); ml_option_destroy(query_client_option));
//Target API
- nRet = ml_service_query_create(query_client_option, &client_handle);
- PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_service_query_create", NnStreamerGetError(nRet), ml_service_stop_pipeline(service_handle); ml_service_destroy(service_handle); ml_service_delete_pipeline(service_name); ml_option_destroy(query_client_option));
+ nRetVal = ml_service_query_create(query_client_option, &client_handle);
+ ML_SERVICE_FEATURE_CHECK;
+ PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRetVal, "ml_service_query_create", NnStreamerGetError(nRetVal), ml_service_stop_pipeline(service_handle); ml_service_destroy(service_handle); ml_service_delete_pipeline(service_name); ml_option_destroy(query_client_option));
CHECK_HANDLE_CLEANUP(client_handle, "ml_service_query_create", ml_service_stop_pipeline(service_handle); ml_service_destroy(service_handle); ml_service_delete_pipeline(service_name); ml_option_destroy(query_client_option));
g_usleep (1000 * 1000 * 1);
- nRet = ml_tensors_data_create(in_info, &input);
- PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_tensors_data_create", NnStreamerGetError(nRet), ml_service_stop_pipeline(service_handle); ml_service_destroy(service_handle); ml_service_delete_pipeline(service_name); ml_option_destroy(query_client_option));
+ nRetVal = ml_tensors_data_create(in_info, &input);
+ PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRetVal, "ml_tensors_data_create", NnStreamerGetError(nRetVal), ml_service_stop_pipeline(service_handle); ml_service_destroy(service_handle); ml_service_delete_pipeline(service_name); ml_option_destroy(query_client_option));
CHECK_HANDLE_CLEANUP(input, "ml_tensors_data_create", ml_service_stop_pipeline(service_handle); ml_service_destroy(service_handle); ml_service_delete_pipeline(service_name); ml_option_destroy(query_client_option));
/* request output tensor with input tensor */
g_usleep (1000 * 1000 * 1);
//Target API
- nRet = ml_service_query_request(client_handle, input, &output);
- PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_service_query_request", NnStreamerGetError(nRet), ml_service_stop_pipeline(service_handle); ml_service_destroy(service_handle); ml_service_delete_pipeline(service_name); ml_option_destroy(query_client_option); ml_tensors_data_destroy(input); ml_tensors_info_destroy(in_info));
+ nRetVal = ml_service_query_request(client_handle, input, &output);
+ ML_SERVICE_FEATURE_CHECK;
+ PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRetVal, "ml_service_query_request", NnStreamerGetError(nRetVal), ml_service_stop_pipeline(service_handle); ml_service_destroy(service_handle); ml_service_delete_pipeline(service_name); ml_option_destroy(query_client_option); ml_tensors_data_destroy(input); ml_tensors_info_destroy(in_info));
CHECK_HANDLE_CLEANUP(output, "ml_service_query_request", ml_service_stop_pipeline(service_handle); ml_service_destroy(service_handle); ml_service_delete_pipeline(service_name); ml_option_destroy(query_client_option); ml_tensors_data_destroy(input); ml_tensors_info_destroy(in_info));
- nRet = ml_tensors_info_get_tensor_size(in_info, 0, &input_data_size);
- PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_tensors_info_get_tensor_size", NnStreamerGetError(nRet), ml_service_destroy(client_handle); ml_service_stop_pipeline(service_handle); ml_service_destroy(service_handle); ml_service_delete_pipeline(service_name); ml_option_destroy(query_client_option); ml_tensors_data_destroy(input); ml_tensors_info_destroy(in_info));
+ nRetVal = ml_tensors_info_get_tensor_size(in_info, 0, &input_data_size);
+ PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRetVal, "ml_tensors_info_get_tensor_size", NnStreamerGetError(nRetVal), ml_service_destroy(client_handle); ml_service_stop_pipeline(service_handle); ml_service_destroy(service_handle); ml_service_delete_pipeline(service_name); ml_option_destroy(query_client_option); ml_tensors_data_destroy(input); ml_tensors_info_destroy(in_info));
- nRet = ml_tensors_data_get_tensor_data(output, 0, (void **)&received, &output_data_size);
- PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_tensors_data_get_tensor_data", NnStreamerGetError(nRet), ml_service_destroy(client_handle); ml_service_stop_pipeline(service_handle); ml_service_destroy(service_handle); ml_service_delete_pipeline(service_name); ml_option_destroy(query_client_option); ml_tensors_data_destroy(input); ml_tensors_info_destroy(in_info));
+ nRetVal = ml_tensors_data_get_tensor_data(output, 0, (void **)&received, &output_data_size);
+ PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRetVal, "ml_tensors_data_get_tensor_data", NnStreamerGetError(nRetVal), ml_service_destroy(client_handle); ml_service_stop_pipeline(service_handle); ml_service_destroy(service_handle); ml_service_delete_pipeline(service_name); ml_option_destroy(query_client_option); ml_tensors_data_destroy(input); ml_tensors_info_destroy(in_info));
if (input_data_size != output_data_size)
{
return 1;
}
- nRet = ml_tensors_data_destroy(output);
- PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_tensors_data_destroy", NnStreamerGetError(nRet), ml_service_destroy(client_handle); ml_service_stop_pipeline(service_handle); ml_service_destroy(service_handle); ml_service_delete_pipeline(service_name); ml_option_destroy(query_client_option); ml_tensors_data_destroy(input); ml_tensors_info_destroy(in_info));
+ nRetVal = ml_tensors_data_destroy(output);
+ PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRetVal, "ml_tensors_data_destroy", NnStreamerGetError(nRetVal), ml_service_destroy(client_handle); ml_service_stop_pipeline(service_handle); ml_service_destroy(service_handle); ml_service_delete_pipeline(service_name); ml_option_destroy(query_client_option); ml_tensors_data_destroy(input); ml_tensors_info_destroy(in_info));
}
/** destroy client ml_service_h */
- nRet = ml_service_destroy(client_handle);
+ nRetVal = ml_service_destroy(client_handle);
g_usleep (1000 * 1000 * 1); /* 1 sec */
- PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_service_destroy", NnStreamerGetError(nRet), ml_service_stop_pipeline(service_handle); ml_service_destroy(service_handle); ml_service_delete_pipeline(service_name); ml_option_destroy(query_client_option); ml_tensors_data_destroy(input); ml_tensors_info_destroy(in_info));
+ PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRetVal, "ml_service_destroy", NnStreamerGetError(nRetVal), ml_service_stop_pipeline(service_handle); ml_service_destroy(service_handle); ml_service_delete_pipeline(service_name); ml_option_destroy(query_client_option); ml_tensors_data_destroy(input); ml_tensors_info_destroy(in_info));
/** destroy server pipeline */
- nRet = ml_service_stop_pipeline(service_handle);
- PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_service_stop_pipeline", NnStreamerGetError(nRet), ml_service_destroy(service_handle); ml_service_delete_pipeline(service_name); ml_option_destroy(query_client_option); ml_tensors_data_destroy(input); ml_tensors_info_destroy(in_info));
+ nRetVal = ml_service_stop_pipeline(service_handle);
+ PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRetVal, "ml_service_stop_pipeline", NnStreamerGetError(nRetVal), ml_service_destroy(service_handle); ml_service_delete_pipeline(service_name); ml_option_destroy(query_client_option); ml_tensors_data_destroy(input); ml_tensors_info_destroy(in_info));
g_usleep (1000 * 1000 * 1); /* 1 sec */
- nRet = ml_service_destroy(service_handle);
- PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_service_destroy", NnStreamerGetError(nRet), ml_service_delete_pipeline(service_name); ml_option_destroy(query_client_option); ml_tensors_data_destroy(input); ml_tensors_info_destroy(in_info));
+ nRetVal = ml_service_destroy(service_handle);
+ PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRetVal, "ml_service_destroy", NnStreamerGetError(nRetVal), ml_service_delete_pipeline(service_name); ml_option_destroy(query_client_option); ml_tensors_data_destroy(input); ml_tensors_info_destroy(in_info));
g_usleep (1000 * 1000 * 1); /* 1 sec */
/** delete finished service */
- nRet = ml_service_delete_pipeline(service_name);
- PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_service_delete_pipeline", NnStreamerGetError(nRet), ml_option_destroy(query_client_option); ml_tensors_data_destroy(input); ml_tensors_info_destroy(in_info));
+ nRetVal = ml_service_delete_pipeline(service_name);
+ PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRetVal, "ml_service_delete_pipeline", NnStreamerGetError(nRetVal), ml_option_destroy(query_client_option); ml_tensors_data_destroy(input); ml_tensors_info_destroy(in_info));
- nRet = ml_option_destroy(query_client_option);
- PRINT_RESULT_NORETURN(ML_ERROR_NONE, nRet, "ml_option_destroy", NnStreamerGetError(nRet));
+ nRetVal = ml_option_destroy(query_client_option);
+ PRINT_RESULT_NORETURN(ML_ERROR_NONE, nRetVal, "ml_option_destroy", NnStreamerGetError(nRetVal));
- nRet = ml_tensors_data_destroy(input);
- PRINT_RESULT_NORETURN(ML_ERROR_NONE, nRet, "ml_tensors_data_destroy", NnStreamerGetError(nRet));
+ nRetVal = ml_tensors_data_destroy(input);
+ PRINT_RESULT_NORETURN(ML_ERROR_NONE, nRetVal, "ml_tensors_data_destroy", NnStreamerGetError(nRetVal));
- nRet = ml_tensors_info_destroy(in_info);
- PRINT_RESULT_NORETURN(ML_ERROR_NONE, nRet, "ml_tensors_info_destroy", NnStreamerGetError(nRet));
+ nRetVal = ml_tensors_info_destroy(in_info);
+ PRINT_RESULT_NORETURN(ML_ERROR_NONE, nRetVal, "ml_tensors_info_destroy", NnStreamerGetError(nRetVal));
return 0;
}