PRINT_RESULT(ML_ERROR_NONE, nRetVal, "ml_service_get_pipeline", NnStreamerGetError(nRetVal));
if (0 != strcmp (g_Pipeline, get_pipeline))
{
- FPRINTF("[Line : %d][%s] Set and Get pipeline value mismatch \\n", __LINE__, API_NAMESPACE);
- g_free (get_pipeline);
- return 1;
+ FPRINTF("[Line : %d][%s] Set and Get pipeline value mismatch \\n", __LINE__, API_NAMESPACE);
+ g_free (get_pipeline);
+ return 1;
}
g_free (get_pipeline);
PRINT_RESULT(ML_ERROR_NONE, nRetVal, "ml_service_get_pipeline", NnStreamerGetError(nRetVal));
if (0 != strcmp (pipeline2, get_pipeline))
{
- FPRINTF("[Line : %d][%s] Set and Get pipeline value mismatch for second key\\n", __LINE__, API_NAMESPACE);
- g_free (pipeline2);
- g_free (get_pipeline);
- return 1;
+ FPRINTF("[Line : %d][%s] Set and Get pipeline value mismatch for second key\\n", __LINE__, API_NAMESPACE);
+ g_free (pipeline2);
+ g_free (get_pipeline);
+ return 1;
}
g_free (pipeline2);
g_free (get_pipeline);
START_TEST_ML_SERVICE;
const gchar *key = "ServiceName";
- gchar *test_pipeline = "videotestsrc ! fakesink";
+ gchar *test_pipeline = "videotestsrc ! fakesink async=false";
ml_service_h service_handle = NULL;
/* set simple pipeline */
return 1;
}
- nRetVal = ml_service_get_pipeline_state(service_handle, &state);
- PRINT_RESULT(ML_ERROR_NONE, nRetVal, "ml_service_get_pipeline_state", NnStreamerGetError(nRetVal));
- if (state != ML_PIPELINE_STATE_PAUSED)
- {
- FPRINTF("[Line : %d][%s] state value mismatch for service handle\\n", __LINE__, API_NAMESPACE);
- ml_service_destroy(service_handle);
- ml_service_delete_pipeline(key);
- return 1;
- }
+ g_usleep (1000 * 1000 * 1); /* 1 sec */
/*destroy the pipeline*/
nRetVal = ml_service_destroy(service_handle);
ML_SERVICE_FEATURE_CHECK;
PRINT_RESULT(ML_ERROR_NONE, nRetVal, "ml_service_destroy", NnStreamerGetError(nRetVal));
+ g_usleep (1000 * 1000 * 1); /* 1 sec */
+
/* delete finished service */
nRetVal = ml_service_delete_pipeline(key);
PRINT_RESULT_NORETURN(ML_ERROR_NONE, nRetVal, "ml_service_delete_pipeline", NnStreamerGetError(nRetVal));
START_TEST_ML_SERVICE;
const gchar *key = "ServiceName";
- gchar *test_pipeline = "videotestsrc ! fakesink";
+ gchar *test_pipeline = "videotestsrc ! fakesink async=false";
ml_service_h service_handle = NULL;
/* set simple pipeline */
return 1;
}
- nRetVal = ml_service_get_pipeline_state(service_handle, &state);
- PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRetVal, "ml_service_get_pipeline_state", NnStreamerGetError(nRetVal), ml_service_start_pipeline(service_handle); ml_service_stop_pipeline(service_handle); ml_service_destroy(service_handle); ml_service_delete_pipeline(key));
- if (state != ML_PIPELINE_STATE_PAUSED)
- {
- FPRINTF("[Line : %d][%s] state value mismatch for service handle\\n", __LINE__, API_NAMESPACE);
- return 1;
- }
+ g_usleep (1000 * 1000 * 1);
/* start the pipeline and check the state */
nRetVal = ml_service_start_pipeline(service_handle);
ML_SERVICE_FEATURE_CHECK;
PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRetVal, "ml_service_start_pipeline", NnStreamerGetError(nRetVal), ml_service_stop_pipeline(service_handle); ml_service_destroy(service_handle); ml_service_delete_pipeline(key));
+ g_usleep (1000 * 1000 * 1);
+
nRetVal = ml_service_get_pipeline_state(service_handle, &state);
PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRetVal, "ml_service_get_pipeline_state", NnStreamerGetError(nRetVal), ml_service_stop_pipeline(service_handle); ml_service_destroy(service_handle); ml_service_delete_pipeline(key));
if (state != ML_PIPELINE_STATE_PLAYING)
ML_SERVICE_FEATURE_CHECK;
PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRetVal, "ml_service_stop_pipeline", NnStreamerGetError(nRetVal), ml_service_destroy(service_handle); ml_service_delete_pipeline(key));
+ g_usleep (1000 * 1000 * 1);
+
nRetVal = ml_service_get_pipeline_state(service_handle, &state);
PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRetVal, "ml_service_get_pipeline_state", NnStreamerGetError(nRetVal), ml_service_destroy(service_handle); ml_service_delete_pipeline(key));
if (state != ML_PIPELINE_STATE_PAUSED)
nRetVal = ml_service_destroy(service_handle);
PRINT_RESULT_NORETURN(ML_ERROR_NONE, nRetVal, "ml_service_destroy", NnStreamerGetError(nRetVal));
+ g_usleep (1000 * 1000 * 1);
+
/* delete finished service */
nRetVal = ml_service_delete_pipeline(key);
PRINT_RESULT_NORETURN(ML_ERROR_NONE, nRetVal, "ml_service_delete_pipeline", NnStreamerGetError(nRetVal));
START_TEST_ML_SERVICE;
/** Set server pipeline and launch it */
- const gchar *service_name = "simple_query_server_for_test";\r
- int nBufferCount = 5;\r
- guint server_port = _get_available_port ();\r
+ const gchar *service_name = "simple_query_server_for_test";
+ int nBufferCount = 3;
+ guint server_port = _get_available_port ();
gchar *szServerPipelineDesc = g_strdup_printf("tensor_query_serversrc port=%u num-buffers=%d ! other/tensors,num_tensors=1,dimensions=3:4:4:1,types=uint8,format=static,framerate=0/1 ! tensor_query_serversink async=false sync=false", server_port, nBufferCount);
/* set simple pipeline */
PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_service_get_pipeline", NnStreamerGetError(nRet), g_free(szServerPipelineDesc); g_free(szRetPipelineVal));
if (0 != strcmp(szRetPipelineVal, szServerPipelineDesc))
{
- g_free(szServerPipelineDesc);\r
+ g_free(szServerPipelineDesc);
g_free(szRetPipelineVal);
FPRINTF("[Line : %d][%s] pipeline value mismatches\\n", __LINE__, API_NAMESPACE);
return 1;
}
-\r
- g_free(szServerPipelineDesc);\r
+
+ g_free(szServerPipelineDesc);
g_free(szRetPipelineVal);
ml_service_h service_handle = NULL;
PRINT_RESULT(ML_ERROR_NONE, nRet, "ml_service_launch_pipeline", NnStreamerGetError(nRet));
CHECK_HANDLE(service_handle, "ml_service_launch_pipeline");
- nRet = ml_service_get_pipeline_state(service_handle, &state);
- PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_service_get_pipeline_state", NnStreamerGetError(nRet), ml_service_destroy(service_handle); ml_service_delete_pipeline(service_name));
- if (state != ML_PIPELINE_STATE_PAUSED)
- {
- ml_service_destroy(service_handle);
- ml_service_delete_pipeline(service_name);
- FPRINTF("[Line : %d][%s] state value mismatch for service handle\\n", __LINE__, API_NAMESPACE);
- return 1;
- }
+ g_usleep (1000 * 1000 * 1);
/* start the pipeline and check the state */
nRet = ml_service_start_pipeline(service_handle);
PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_service_start_pipeline", NnStreamerGetError(nRet), ml_service_destroy(service_handle); ml_service_delete_pipeline(service_name));
+ g_usleep (1000 * 1000 * 1);
+
nRet = ml_service_get_pipeline_state(service_handle, &state);
PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_service_get_pipeline_state", NnStreamerGetError(nRet), ml_service_stop_pipeline(service_handle); ml_service_destroy(service_handle); ml_service_delete_pipeline(service_name));
if (state != ML_PIPELINE_STATE_PLAYING)
return 1;
}
- ml_service_h client_handle = NULL;\r
+ ml_service_h client_handle = NULL;
ml_option_h query_client_option = NULL;
nRet = ml_option_create(&query_client_option);
nRet = ml_option_set(query_client_option, "connect-type", connect_type, g_free);
PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_option_set", NnStreamerGetError(nRet), ml_service_stop_pipeline(service_handle); ml_service_destroy(service_handle); ml_service_delete_pipeline(service_name));
- guint timeout = 1000U;\r
+ guint timeout = 10000U;
nRet = ml_option_set(query_client_option, "timeout", &timeout, NULL);
- PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_option_set", NnStreamerGetError(nRet), ml_service_stop_pipeline(service_handle); ml_service_destroy(service_handle); ml_service_delete_pipeline(service_name));\r
+ PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_option_set", NnStreamerGetError(nRet), ml_service_stop_pipeline(service_handle); ml_service_destroy(service_handle); ml_service_delete_pipeline(service_name));
gchar *caps_str = g_strdup("other/tensors,num_tensors=1,format=static,types=uint8,dimensions=3:4:4:1,framerate=0/1");
nRet = ml_option_set(query_client_option, "caps", caps_str, g_free);
PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_option_set", NnStreamerGetError(nRet), ml_service_stop_pipeline(service_handle); ml_service_destroy(service_handle); ml_service_delete_pipeline(service_name));
- /* set input tensor */\r
- ml_tensors_info_h in_info = NULL;\r
- ml_tensor_dimension in_dim;\r
- ml_tensors_data_h input = NULL;\r
-\r
- ml_tensors_info_create(&in_info);\r
- in_dim[0] = 3;\r
- in_dim[1] = 4;\r
- in_dim[2] = 4;\r
- in_dim[3] = 1;\r
-\r
- nRet = ml_tensors_info_set_count(in_info, 1);\r
- PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_tensors_info_set_count", NnStreamerGetError(nRet), ml_service_stop_pipeline(service_handle); ml_service_destroy(service_handle); ml_service_delete_pipeline(service_name); ml_option_destroy(query_client_option));\r
-\r
- nRet = ml_tensors_info_set_tensor_type(in_info, 0, ML_TENSOR_TYPE_UINT8);\r
- PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_tensors_info_set_tensor_type", NnStreamerGetError(nRet), ml_service_stop_pipeline(service_handle); ml_service_destroy(service_handle); ml_service_delete_pipeline(service_name); ml_option_destroy(query_client_option));\r
-\r
- nRet = ml_tensors_info_set_tensor_dimension(in_info, 0, in_dim);\r
- PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_tensors_info_set_tensor_dimension", NnStreamerGetError(nRet), ml_service_stop_pipeline(service_handle); ml_service_destroy(service_handle); ml_service_delete_pipeline(service_name); ml_option_destroy(query_client_option));\r
-\r
- //Target API\r
- nRet = ml_service_query_create(query_client_option, &client_handle);\r
- PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_service_query_create", NnStreamerGetError(nRet), ml_service_stop_pipeline(service_handle); ml_service_destroy(service_handle); ml_service_delete_pipeline(service_name); ml_option_destroy(query_client_option));\r
- CHECK_HANDLE_CLEANUP(client_handle, "ml_service_query_create", ml_service_stop_pipeline(service_handle); ml_service_destroy(service_handle); ml_service_delete_pipeline(service_name); ml_option_destroy(query_client_option));\r
-\r
- nRet = ml_tensors_data_create(in_info, &input);\r
- PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_tensors_data_create", NnStreamerGetError(nRet), ml_service_stop_pipeline(service_handle); ml_service_destroy(service_handle); ml_service_delete_pipeline(service_name); ml_option_destroy(query_client_option));\r
- CHECK_HANDLE_CLEANUP(input, "ml_tensors_data_create", ml_service_stop_pipeline(service_handle); ml_service_destroy(service_handle); ml_service_delete_pipeline(service_name); ml_option_destroy(query_client_option));\r
-
- /* request output tensor with input tensor */\r
+ /* set input tensor */
+ ml_tensors_info_h in_info = NULL;
+ ml_tensor_dimension in_dim;
+ ml_tensors_data_h input = NULL;
+
+ ml_tensors_info_create(&in_info);
+ in_dim[0] = 3;
+ in_dim[1] = 4;
+ in_dim[2] = 4;
+ in_dim[3] = 1;
+
+ nRet = ml_tensors_info_set_count(in_info, 1);
+ PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_tensors_info_set_count", NnStreamerGetError(nRet), ml_service_stop_pipeline(service_handle); ml_service_destroy(service_handle); ml_service_delete_pipeline(service_name); ml_option_destroy(query_client_option));
+
+ nRet = ml_tensors_info_set_tensor_type(in_info, 0, ML_TENSOR_TYPE_UINT8);
+ PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_tensors_info_set_tensor_type", NnStreamerGetError(nRet), ml_service_stop_pipeline(service_handle); ml_service_destroy(service_handle); ml_service_delete_pipeline(service_name); ml_option_destroy(query_client_option));
+
+ nRet = ml_tensors_info_set_tensor_dimension(in_info, 0, in_dim);
+ PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_tensors_info_set_tensor_dimension", NnStreamerGetError(nRet), ml_service_stop_pipeline(service_handle); ml_service_destroy(service_handle); ml_service_delete_pipeline(service_name); ml_option_destroy(query_client_option));
+
+ //Target API
+ nRet = ml_service_query_create(query_client_option, &client_handle);
+ PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_service_query_create", NnStreamerGetError(nRet), ml_service_stop_pipeline(service_handle); ml_service_destroy(service_handle); ml_service_delete_pipeline(service_name); ml_option_destroy(query_client_option));
+ CHECK_HANDLE_CLEANUP(client_handle, "ml_service_query_create", ml_service_stop_pipeline(service_handle); ml_service_destroy(service_handle); ml_service_delete_pipeline(service_name); ml_option_destroy(query_client_option));
+
+ g_usleep (1000 * 1000 * 1);
+
+ nRet = ml_tensors_data_create(in_info, &input);
+ PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_tensors_data_create", NnStreamerGetError(nRet), ml_service_stop_pipeline(service_handle); ml_service_destroy(service_handle); ml_service_delete_pipeline(service_name); ml_option_destroy(query_client_option));
+ CHECK_HANDLE_CLEANUP(input, "ml_tensors_data_create", ml_service_stop_pipeline(service_handle); ml_service_destroy(service_handle); ml_service_delete_pipeline(service_name); ml_option_destroy(query_client_option));
+
+ /* request output tensor with input tensor */
for (int i = 0; i < nBufferCount; i++)
{
- ml_tensors_data_h output = NULL;\r
- uint8_t *received;\r
- size_t input_data_size, output_data_size;\r
- uint8_t test_data = (uint8_t)i;\r
-\r
+ ml_tensors_data_h output = NULL;
+ uint8_t *received;
+ size_t input_data_size, output_data_size;
+ uint8_t test_data = (uint8_t)i;
+
ml_tensors_data_set_tensor_data(input, 0, &test_data, sizeof(uint8_t));
+ g_usleep (1000 * 1000 * 1);
+
//Target API
nRet = ml_service_query_request(client_handle, input, &output);
PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_service_query_request", NnStreamerGetError(nRet), ml_service_stop_pipeline(service_handle); ml_service_destroy(service_handle); ml_service_delete_pipeline(service_name); ml_option_destroy(query_client_option); ml_tensors_data_destroy(input); ml_tensors_info_destroy(in_info));
/** destroy client ml_service_h */
nRet = ml_service_destroy(client_handle);
+
+ g_usleep (1000 * 1000 * 1); /* 1 sec */
+
PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_service_destroy", NnStreamerGetError(nRet), ml_service_stop_pipeline(service_handle); ml_service_destroy(service_handle); ml_service_delete_pipeline(service_name); ml_option_destroy(query_client_option); ml_tensors_data_destroy(input); ml_tensors_info_destroy(in_info));
/** destroy server pipeline */
nRet = ml_service_stop_pipeline(service_handle);
PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_service_stop_pipeline", NnStreamerGetError(nRet), ml_service_destroy(service_handle); ml_service_delete_pipeline(service_name); ml_option_destroy(query_client_option); ml_tensors_data_destroy(input); ml_tensors_info_destroy(in_info));
- nRet = ml_service_get_pipeline_state(service_handle, &state);
- PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_service_get_pipeline_state", NnStreamerGetError(nRet), ml_service_destroy(service_handle); ml_service_delete_pipeline(service_name); ml_option_destroy(query_client_option); ml_tensors_data_destroy(input); ml_tensors_info_destroy(in_info));
-
- if (state != ML_PIPELINE_STATE_PAUSED)
- {
- FPRINTF("[Line : %d][%s] state value mismatch for service handle\\n", __LINE__, API_NAMESPACE);
- ml_service_destroy(service_handle);
- ml_service_delete_pipeline(service_name);
- ml_option_destroy(query_client_option);
- ml_tensors_data_destroy(input);
- ml_tensors_info_destroy(in_info);
- return 1;
- }
+ g_usleep (1000 * 1000 * 1); /* 1 sec */
nRet = ml_service_destroy(service_handle);
PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_service_destroy", NnStreamerGetError(nRet), ml_service_delete_pipeline(service_name); ml_option_destroy(query_client_option); ml_tensors_data_destroy(input); ml_tensors_info_destroy(in_info));
+ g_usleep (1000 * 1000 * 1); /* 1 sec */
+
/** delete finished service */
nRet = ml_service_delete_pipeline(service_name);
PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_service_delete_pipeline", NnStreamerGetError(nRet), ml_option_destroy(query_client_option); ml_tensors_data_destroy(input); ml_tensors_info_destroy(in_info));
- /** it would fail if get the removed service */
- nRet = ml_service_get_pipeline(service_name, &szRetPipelineVal);
- PRINT_RESULT_CLEANUP(ML_ERROR_INVALID_PARAMETER, nRet, "ml_service_get_pipeline", NnStreamerGetError(nRet), ml_option_destroy(query_client_option); ml_tensors_data_destroy(input); ml_tensors_info_destroy(in_info));
-
- nRet = ml_option_destroy(query_client_option);\r
+ nRet = ml_option_destroy(query_client_option);
PRINT_RESULT_NORETURN(ML_ERROR_NONE, nRet, "ml_option_destroy", NnStreamerGetError(nRet));
-\r
- nRet = ml_tensors_data_destroy(input);\r
+
+ nRet = ml_tensors_data_destroy(input);
PRINT_RESULT_NORETURN(ML_ERROR_NONE, nRet, "ml_tensors_data_destroy", NnStreamerGetError(nRet));
-\r
- nRet = ml_tensors_info_destroy(in_info);\r
+
+ nRet = ml_tensors_info_destroy(in_info);
PRINT_RESULT_NORETURN(ML_ERROR_NONE, nRet, "ml_tensors_info_destroy", NnStreamerGetError(nRet));
-\r
+
return 0;
-}
\ No newline at end of file
+}
{
IS_SUPPORT_ML_SERVICE_FEATURE;
const gchar *key = "ServiceName";
- const gchar *test_pipeline = "videotestsrc ! fakesink";
+ const gchar *test_pipeline = "videotestsrc ! fakesink async=false";
ml_service_h service_handle;
ml_pipeline_state_e state;
/* launch pipeline and check the state */
status = ml_service_launch_pipeline (key, &service_handle);
assert_eq (ML_ERROR_NONE, status);
- status = ml_service_get_pipeline_state (service_handle, &state);
- assert_eq (ML_ERROR_NONE, status);
- assert_eq (ML_PIPELINE_STATE_PAUSED, state);
+
+ g_usleep (1000 * 1000 * 1); /* 1 sec */
/* start the pipeline and check the state */
status = ml_service_start_pipeline (service_handle);
assert_eq (ML_ERROR_NONE, status);
+
+ g_usleep (1000 * 1000 * 1); /* 1 sec */
+
status = ml_service_get_pipeline_state (service_handle, &state);
assert_eq (ML_ERROR_NONE, status);
assert_eq (ML_PIPELINE_STATE_PLAYING, state);
/* stop the pipeline and check the state */
status = ml_service_stop_pipeline (service_handle);
assert_eq (ML_ERROR_NONE, status);
+
+ g_usleep (1000 * 1000 * 1); /* 1 sec */
+
status = ml_service_get_pipeline_state (service_handle, &state);
assert_eq (ML_ERROR_NONE, status);
assert_eq (ML_PIPELINE_STATE_PAUSED, state);
/** Set server pipeline and launch it */
const gchar *service_name = "simple_query_server_for_test";
- int num_buffers = 5;
+ int num_buffers = 3;
guint server_port = _get_available_port ();
gchar *server_pipeline_desc = g_strdup_printf ("tensor_query_serversrc port=%u num-buffers=%d ! other/tensors,num_tensors=1,dimensions=3:4:4:1,types=uint8,format=static,framerate=0/1 ! tensor_query_serversink async=false sync=false", server_port, num_buffers);
status = ml_service_launch_pipeline (service_name, &service);
assert_eq (ML_ERROR_NONE, status);
- status = ml_service_get_pipeline_state (service, &state);
- assert_eq (ML_ERROR_NONE, status);
- assert_eq (ML_PIPELINE_STATE_PAUSED, state);
+ g_usleep (1000 * 1000 * 1); /* 1 sec */
status = ml_service_start_pipeline (service);
assert_eq (ML_ERROR_NONE, status);
- status = ml_service_get_pipeline_state (service, &state);
- assert_eq (ML_ERROR_NONE, status);
- assert_eq (ML_PIPELINE_STATE_PLAYING, state);
+
+ g_usleep (1000 * 1000 * 1); /* 1 sec */
ml_service_h client;
ml_option_h query_client_option = NULL;
status = ml_option_set (query_client_option, "connect-type", connect_type, g_free);
assert_eq (ML_ERROR_NONE, status);
- guint timeout = 1000U;
+ guint timeout = 10000U;
status = ml_option_set (query_client_option, "timeout", &timeout, NULL);
assert_eq (ML_ERROR_NONE, status);
status = ml_service_query_create (query_client_option, &client);
assert_eq (ML_ERROR_NONE, status);
+ g_usleep (1000 * 1000 * 1); /* 1 sec */
+
status = ml_tensors_data_create (in_info, &input);
assert_eq (ML_ERROR_NONE, status);
assert (NULL != input);
size_t input_data_size, output_data_size;
uint8_t test_data = (uint8_t) i;
+ g_usleep (1000 * 1000 * 1); /* 1 sec */
+
ml_tensors_data_set_tensor_data (input, 0, &test_data, sizeof (uint8_t));
status = ml_service_query_request (client, input, &output);
status = ml_service_destroy (client);
assert_eq (ML_ERROR_NONE, status);
+ g_usleep (1000 * 1000 * 1); /* 1 sec */
+
/** destroy server pipeline */
status = ml_service_stop_pipeline (service);
assert_eq (ML_ERROR_NONE, status);
- status = ml_service_get_pipeline_state (service, &state);
- assert_eq (ML_ERROR_NONE, status);
- assert_eq (ML_PIPELINE_STATE_PAUSED, state);
+ g_usleep (1000 * 1000 * 1); /* 1 sec */
status = ml_service_destroy (service);
assert_eq (ML_ERROR_NONE, status);
+ g_usleep (1000 * 1000 * 1); /* 1 sec */
+
/** delete finished service */
status = ml_service_delete_pipeline (service_name);
assert_eq (ML_ERROR_NONE, status);
- /** it would fail if get the removed service */
- status = ml_service_get_pipeline (service_name, &ret_pipeline);
- assert_eq (ML_ERROR_INVALID_PARAMETER, status);
+ status = ml_option_destroy (query_client_option);
+ assert_eq (ML_ERROR_NONE, status);
+ status = ml_tensors_data_destroy (input);
+ assert_eq (ML_ERROR_NONE, status);
+ status = ml_tensors_info_destroy (in_info);
+ assert_eq (ML_ERROR_NONE, status);
- ml_option_destroy (query_client_option);
- ml_tensors_data_destroy (input);
- ml_tensors_info_destroy (in_info);
return 0;
}