[UTC][ITC][nnstreamer][Non-ACR] Fix when using ml-service APIs 07/282207/1
authorYongjoo Ahn <yongjoo1.ahn@samsung.com>
Wed, 28 Sep 2022 07:59:22 +0000 (16:59 +0900)
committerYongjoo Ahn <yongjoo1.ahn@samsung.com>
Wed, 28 Sep 2022 08:03:24 +0000 (17:03 +0900)
- There are occasional failures when using service APIs because of
  latency for dbus and network communication.
- To relieve this issue, add sleep times between APis and increase timeout value.
- And remove checking pipeline state after launch a new pipeline

Change-Id: Ia6a9b1bc07084cf20223e43645e4bb5128f87c33
Signed-off-by: Yongjoo Ahn <yongjoo1.ahn@samsung.com>
src/itc/nnstreamer/ITs-nnstreamer-ml-service.c
src/utc/nnstreamer/utc-machine-learning-service.c

index 98b22e69652aae33c9ce5f9ed2c1abd253af9d0f..b2408eb3fab2eaddd4515092a3e2819d97eaec93 100644 (file)
@@ -155,9 +155,9 @@ int ITc_nnstreamer_ml_service_set_get_pipeline_p(void)
     PRINT_RESULT(ML_ERROR_NONE, nRetVal, "ml_service_get_pipeline", NnStreamerGetError(nRetVal));
     if (0 != strcmp (g_Pipeline, get_pipeline))
     {
-               FPRINTF("[Line : %d][%s] Set and Get pipeline value mismatch \\n", __LINE__, API_NAMESPACE);
-               g_free (get_pipeline);
-               return 1;
+      FPRINTF("[Line : %d][%s] Set and Get pipeline value mismatch \\n", __LINE__, API_NAMESPACE);
+          g_free (get_pipeline);
+      return 1;
     }
 
     g_free (get_pipeline);
@@ -173,10 +173,10 @@ int ITc_nnstreamer_ml_service_set_get_pipeline_p(void)
     PRINT_RESULT(ML_ERROR_NONE, nRetVal, "ml_service_get_pipeline", NnStreamerGetError(nRetVal));
     if (0 != strcmp (pipeline2, get_pipeline))
     {
-       FPRINTF("[Line : %d][%s] Set and Get pipeline value mismatch for second key\\n", __LINE__, API_NAMESPACE);
-       g_free (pipeline2);
-       g_free (get_pipeline);
-       return 1;
+      FPRINTF("[Line : %d][%s] Set and Get pipeline value mismatch for second key\\n", __LINE__, API_NAMESPACE);
+      g_free (pipeline2);
+      g_free (get_pipeline);
+      return 1;
     }
     g_free (pipeline2);
     g_free (get_pipeline);
@@ -253,7 +253,7 @@ int ITc_nnstreamer_ml_service_launch_destroy_pipeline_p(void)
        START_TEST_ML_SERVICE;
 
        const gchar *key = "ServiceName";
-       gchar *test_pipeline = "videotestsrc ! fakesink";
+       gchar *test_pipeline = "videotestsrc ! fakesink async=false";
        ml_service_h service_handle = NULL;
 
        /* set simple pipeline */
@@ -272,21 +272,15 @@ int ITc_nnstreamer_ml_service_launch_destroy_pipeline_p(void)
                return 1;
        }
 
-       nRetVal = ml_service_get_pipeline_state(service_handle, &state);
-       PRINT_RESULT(ML_ERROR_NONE, nRetVal, "ml_service_get_pipeline_state", NnStreamerGetError(nRetVal));
-       if (state != ML_PIPELINE_STATE_PAUSED)
-       {
-               FPRINTF("[Line : %d][%s] state value mismatch for service handle\\n", __LINE__, API_NAMESPACE);
-               ml_service_destroy(service_handle);
-               ml_service_delete_pipeline(key);
-               return 1;
-       }
+       g_usleep (1000 * 1000 * 1); /* 1 sec */
 
        /*destroy the pipeline*/
        nRetVal = ml_service_destroy(service_handle);
        ML_SERVICE_FEATURE_CHECK;
        PRINT_RESULT(ML_ERROR_NONE, nRetVal, "ml_service_destroy", NnStreamerGetError(nRetVal));
 
+       g_usleep (1000 * 1000 * 1); /* 1 sec */
+
        /* delete finished service */
        nRetVal = ml_service_delete_pipeline(key);
        PRINT_RESULT_NORETURN(ML_ERROR_NONE, nRetVal, "ml_service_delete_pipeline", NnStreamerGetError(nRetVal));
@@ -314,7 +308,7 @@ int ITc_nnstreamer_ml_service_start_stop_pipeline_p(void)
        START_TEST_ML_SERVICE;
 
        const gchar *key = "ServiceName";
-       gchar *test_pipeline = "videotestsrc ! fakesink";
+       gchar *test_pipeline = "videotestsrc ! fakesink async=false";
        ml_service_h service_handle = NULL;
 
        /* set simple pipeline */
@@ -330,19 +324,15 @@ int ITc_nnstreamer_ml_service_start_stop_pipeline_p(void)
                return 1;
        }
 
-       nRetVal = ml_service_get_pipeline_state(service_handle, &state);
-       PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRetVal, "ml_service_get_pipeline_state", NnStreamerGetError(nRetVal), ml_service_start_pipeline(service_handle); ml_service_stop_pipeline(service_handle); ml_service_destroy(service_handle); ml_service_delete_pipeline(key));
-       if (state != ML_PIPELINE_STATE_PAUSED)
-       {
-               FPRINTF("[Line : %d][%s] state value mismatch for service handle\\n", __LINE__, API_NAMESPACE);
-               return 1;
-       }
+       g_usleep (1000 * 1000 * 1);
 
        /* start the pipeline and check the state */
        nRetVal = ml_service_start_pipeline(service_handle);
        ML_SERVICE_FEATURE_CHECK;
        PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRetVal, "ml_service_start_pipeline", NnStreamerGetError(nRetVal), ml_service_stop_pipeline(service_handle); ml_service_destroy(service_handle); ml_service_delete_pipeline(key));
 
+       g_usleep (1000 * 1000 * 1);
+
        nRetVal = ml_service_get_pipeline_state(service_handle, &state);
        PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRetVal, "ml_service_get_pipeline_state", NnStreamerGetError(nRetVal), ml_service_stop_pipeline(service_handle); ml_service_destroy(service_handle); ml_service_delete_pipeline(key));
        if (state != ML_PIPELINE_STATE_PLAYING)
@@ -359,6 +349,8 @@ int ITc_nnstreamer_ml_service_start_stop_pipeline_p(void)
        ML_SERVICE_FEATURE_CHECK;
        PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRetVal, "ml_service_stop_pipeline", NnStreamerGetError(nRetVal), ml_service_destroy(service_handle); ml_service_delete_pipeline(key));
 
+       g_usleep (1000 * 1000 * 1);
+
        nRetVal = ml_service_get_pipeline_state(service_handle, &state);
        PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRetVal, "ml_service_get_pipeline_state", NnStreamerGetError(nRetVal), ml_service_destroy(service_handle); ml_service_delete_pipeline(key));
        if (state != ML_PIPELINE_STATE_PAUSED)
@@ -373,6 +365,8 @@ int ITc_nnstreamer_ml_service_start_stop_pipeline_p(void)
        nRetVal = ml_service_destroy(service_handle);
        PRINT_RESULT_NORETURN(ML_ERROR_NONE, nRetVal, "ml_service_destroy", NnStreamerGetError(nRetVal));
 
+       g_usleep (1000 * 1000 * 1);
+
        /* delete finished service */
        nRetVal = ml_service_delete_pipeline(key);
        PRINT_RESULT_NORETURN(ML_ERROR_NONE, nRetVal, "ml_service_delete_pipeline", NnStreamerGetError(nRetVal));
@@ -399,9 +393,9 @@ int ITc_nnstreamer_ml_service_query_create_request_p(void)
        START_TEST_ML_SERVICE;
 
        /** Set server pipeline and launch it */
-       const gchar *service_name = "simple_query_server_for_test";\r
-       int nBufferCount = 5;\r
-       guint server_port = _get_available_port ();\r
+       const gchar *service_name = "simple_query_server_for_test";
+       int nBufferCount = 3;
+       guint server_port = _get_available_port ();
        gchar *szServerPipelineDesc = g_strdup_printf("tensor_query_serversrc port=%u num-buffers=%d ! other/tensors,num_tensors=1,dimensions=3:4:4:1,types=uint8,format=static,framerate=0/1 ! tensor_query_serversink async=false sync=false", server_port, nBufferCount);
 
        /* set simple pipeline */
@@ -413,13 +407,13 @@ int ITc_nnstreamer_ml_service_query_create_request_p(void)
        PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_service_get_pipeline", NnStreamerGetError(nRet), g_free(szServerPipelineDesc); g_free(szRetPipelineVal));
        if (0 != strcmp(szRetPipelineVal, szServerPipelineDesc))
        {
-               g_free(szServerPipelineDesc);\r
+               g_free(szServerPipelineDesc);
                g_free(szRetPipelineVal);
                FPRINTF("[Line : %d][%s] pipeline value mismatches\\n", __LINE__, API_NAMESPACE);
                return 1;
        }
-\r
-       g_free(szServerPipelineDesc);\r
+
+       g_free(szServerPipelineDesc);
        g_free(szRetPipelineVal);
 
        ml_service_h service_handle = NULL;
@@ -430,20 +424,14 @@ int ITc_nnstreamer_ml_service_query_create_request_p(void)
        PRINT_RESULT(ML_ERROR_NONE, nRet, "ml_service_launch_pipeline", NnStreamerGetError(nRet));
        CHECK_HANDLE(service_handle, "ml_service_launch_pipeline");
 
-       nRet = ml_service_get_pipeline_state(service_handle, &state);
-       PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_service_get_pipeline_state", NnStreamerGetError(nRet), ml_service_destroy(service_handle); ml_service_delete_pipeline(service_name));
-       if (state != ML_PIPELINE_STATE_PAUSED)
-       {
-               ml_service_destroy(service_handle);
-               ml_service_delete_pipeline(service_name);
-               FPRINTF("[Line : %d][%s] state value mismatch for service handle\\n", __LINE__, API_NAMESPACE);
-               return 1;
-       }
+       g_usleep (1000 * 1000 * 1);
 
        /* start the pipeline and check the state */
        nRet = ml_service_start_pipeline(service_handle);
        PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_service_start_pipeline", NnStreamerGetError(nRet), ml_service_destroy(service_handle); ml_service_delete_pipeline(service_name));
 
+       g_usleep (1000 * 1000 * 1);
+
        nRet = ml_service_get_pipeline_state(service_handle, &state);
        PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_service_get_pipeline_state", NnStreamerGetError(nRet), ml_service_stop_pipeline(service_handle); ml_service_destroy(service_handle); ml_service_delete_pipeline(service_name));
        if (state != ML_PIPELINE_STATE_PLAYING)
@@ -455,7 +443,7 @@ int ITc_nnstreamer_ml_service_query_create_request_p(void)
                return 1;
        }
 
-       ml_service_h client_handle = NULL;\r
+       ml_service_h client_handle = NULL;
        ml_option_h query_client_option = NULL;
 
        nRet = ml_option_create(&query_client_option);
@@ -482,53 +470,57 @@ int ITc_nnstreamer_ml_service_query_create_request_p(void)
        nRet = ml_option_set(query_client_option, "connect-type", connect_type, g_free);
        PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_option_set", NnStreamerGetError(nRet), ml_service_stop_pipeline(service_handle); ml_service_destroy(service_handle); ml_service_delete_pipeline(service_name));
 
-       guint timeout = 1000U;\r
+       guint timeout = 10000U;
        nRet = ml_option_set(query_client_option, "timeout", &timeout, NULL);
-       PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_option_set", NnStreamerGetError(nRet), ml_service_stop_pipeline(service_handle); ml_service_destroy(service_handle); ml_service_delete_pipeline(service_name));\r
+       PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_option_set", NnStreamerGetError(nRet), ml_service_stop_pipeline(service_handle); ml_service_destroy(service_handle); ml_service_delete_pipeline(service_name));
 
        gchar *caps_str = g_strdup("other/tensors,num_tensors=1,format=static,types=uint8,dimensions=3:4:4:1,framerate=0/1");
        nRet = ml_option_set(query_client_option, "caps", caps_str, g_free);
        PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_option_set", NnStreamerGetError(nRet), ml_service_stop_pipeline(service_handle); ml_service_destroy(service_handle); ml_service_delete_pipeline(service_name));
 
-       /* set input tensor */\r
-       ml_tensors_info_h in_info = NULL;\r
-       ml_tensor_dimension in_dim;\r
-       ml_tensors_data_h input = NULL;\r
-\r
-       ml_tensors_info_create(&in_info);\r
-       in_dim[0] = 3;\r
-       in_dim[1] = 4;\r
-       in_dim[2] = 4;\r
-       in_dim[3] = 1;\r
-\r
-       nRet = ml_tensors_info_set_count(in_info, 1);\r
-       PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_tensors_info_set_count", NnStreamerGetError(nRet), ml_service_stop_pipeline(service_handle); ml_service_destroy(service_handle); ml_service_delete_pipeline(service_name); ml_option_destroy(query_client_option));\r
-\r
-       nRet = ml_tensors_info_set_tensor_type(in_info, 0, ML_TENSOR_TYPE_UINT8);\r
-       PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_tensors_info_set_tensor_type", NnStreamerGetError(nRet), ml_service_stop_pipeline(service_handle); ml_service_destroy(service_handle); ml_service_delete_pipeline(service_name); ml_option_destroy(query_client_option));\r
-\r
-       nRet = ml_tensors_info_set_tensor_dimension(in_info, 0, in_dim);\r
-       PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_tensors_info_set_tensor_dimension", NnStreamerGetError(nRet), ml_service_stop_pipeline(service_handle); ml_service_destroy(service_handle); ml_service_delete_pipeline(service_name); ml_option_destroy(query_client_option));\r
-\r
-       //Target API\r
-       nRet = ml_service_query_create(query_client_option, &client_handle);\r
-       PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_service_query_create", NnStreamerGetError(nRet), ml_service_stop_pipeline(service_handle); ml_service_destroy(service_handle); ml_service_delete_pipeline(service_name); ml_option_destroy(query_client_option));\r
-       CHECK_HANDLE_CLEANUP(client_handle, "ml_service_query_create", ml_service_stop_pipeline(service_handle); ml_service_destroy(service_handle); ml_service_delete_pipeline(service_name); ml_option_destroy(query_client_option));\r
-\r
-       nRet = ml_tensors_data_create(in_info, &input);\r
-       PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_tensors_data_create", NnStreamerGetError(nRet), ml_service_stop_pipeline(service_handle); ml_service_destroy(service_handle); ml_service_delete_pipeline(service_name); ml_option_destroy(query_client_option));\r
-       CHECK_HANDLE_CLEANUP(input, "ml_tensors_data_create", ml_service_stop_pipeline(service_handle); ml_service_destroy(service_handle); ml_service_delete_pipeline(service_name); ml_option_destroy(query_client_option));\r
-
-       /* request output tensor with input tensor */\r
+       /* set input tensor */
+       ml_tensors_info_h in_info = NULL;
+       ml_tensor_dimension in_dim;
+       ml_tensors_data_h input = NULL;
+
+       ml_tensors_info_create(&in_info);
+       in_dim[0] = 3;
+       in_dim[1] = 4;
+       in_dim[2] = 4;
+       in_dim[3] = 1;
+
+       nRet = ml_tensors_info_set_count(in_info, 1);
+       PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_tensors_info_set_count", NnStreamerGetError(nRet), ml_service_stop_pipeline(service_handle); ml_service_destroy(service_handle); ml_service_delete_pipeline(service_name); ml_option_destroy(query_client_option));
+
+       nRet = ml_tensors_info_set_tensor_type(in_info, 0, ML_TENSOR_TYPE_UINT8);
+       PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_tensors_info_set_tensor_type", NnStreamerGetError(nRet), ml_service_stop_pipeline(service_handle); ml_service_destroy(service_handle); ml_service_delete_pipeline(service_name); ml_option_destroy(query_client_option));
+
+       nRet = ml_tensors_info_set_tensor_dimension(in_info, 0, in_dim);
+       PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_tensors_info_set_tensor_dimension", NnStreamerGetError(nRet), ml_service_stop_pipeline(service_handle); ml_service_destroy(service_handle); ml_service_delete_pipeline(service_name); ml_option_destroy(query_client_option));
+
+       //Target API
+       nRet = ml_service_query_create(query_client_option, &client_handle);
+       PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_service_query_create", NnStreamerGetError(nRet), ml_service_stop_pipeline(service_handle); ml_service_destroy(service_handle); ml_service_delete_pipeline(service_name); ml_option_destroy(query_client_option));
+       CHECK_HANDLE_CLEANUP(client_handle, "ml_service_query_create", ml_service_stop_pipeline(service_handle); ml_service_destroy(service_handle); ml_service_delete_pipeline(service_name); ml_option_destroy(query_client_option));
+
+       g_usleep (1000 * 1000 * 1);
+
+       nRet = ml_tensors_data_create(in_info, &input);
+       PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_tensors_data_create", NnStreamerGetError(nRet), ml_service_stop_pipeline(service_handle); ml_service_destroy(service_handle); ml_service_delete_pipeline(service_name); ml_option_destroy(query_client_option));
+       CHECK_HANDLE_CLEANUP(input, "ml_tensors_data_create", ml_service_stop_pipeline(service_handle); ml_service_destroy(service_handle); ml_service_delete_pipeline(service_name); ml_option_destroy(query_client_option));
+
+       /* request output tensor with input tensor */
        for (int i = 0; i < nBufferCount; i++)
        {
-               ml_tensors_data_h output = NULL;\r
-               uint8_t *received;\r
-               size_t input_data_size, output_data_size;\r
-               uint8_t test_data = (uint8_t)i;\r
-\r
+               ml_tensors_data_h output = NULL;
+               uint8_t *received;
+               size_t input_data_size, output_data_size;
+               uint8_t test_data = (uint8_t)i;
+
                ml_tensors_data_set_tensor_data(input, 0, &test_data, sizeof(uint8_t));
 
+               g_usleep (1000 * 1000 * 1);
+
                //Target API
                nRet = ml_service_query_request(client_handle, input, &output);
                PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_service_query_request", NnStreamerGetError(nRet), ml_service_stop_pipeline(service_handle); ml_service_destroy(service_handle); ml_service_delete_pipeline(service_name); ml_option_destroy(query_client_option); ml_tensors_data_destroy(input); ml_tensors_info_destroy(in_info));
@@ -572,45 +564,34 @@ int ITc_nnstreamer_ml_service_query_create_request_p(void)
 
        /** destroy client ml_service_h */
        nRet = ml_service_destroy(client_handle);
+
+  g_usleep (1000 * 1000 * 1); /* 1 sec */
+
        PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_service_destroy", NnStreamerGetError(nRet), ml_service_stop_pipeline(service_handle); ml_service_destroy(service_handle); ml_service_delete_pipeline(service_name); ml_option_destroy(query_client_option); ml_tensors_data_destroy(input); ml_tensors_info_destroy(in_info));
 
        /** destroy server pipeline */
        nRet = ml_service_stop_pipeline(service_handle);
        PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_service_stop_pipeline", NnStreamerGetError(nRet), ml_service_destroy(service_handle); ml_service_delete_pipeline(service_name); ml_option_destroy(query_client_option); ml_tensors_data_destroy(input); ml_tensors_info_destroy(in_info));
 
-       nRet = ml_service_get_pipeline_state(service_handle, &state);
-       PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_service_get_pipeline_state", NnStreamerGetError(nRet), ml_service_destroy(service_handle); ml_service_delete_pipeline(service_name); ml_option_destroy(query_client_option); ml_tensors_data_destroy(input); ml_tensors_info_destroy(in_info));
-
-       if (state != ML_PIPELINE_STATE_PAUSED)
-       {
-               FPRINTF("[Line : %d][%s] state value mismatch for service handle\\n", __LINE__, API_NAMESPACE);
-               ml_service_destroy(service_handle);
-               ml_service_delete_pipeline(service_name);
-               ml_option_destroy(query_client_option);
-               ml_tensors_data_destroy(input);
-               ml_tensors_info_destroy(in_info);
-               return 1;
-       }
+  g_usleep (1000 * 1000 * 1); /* 1 sec */
 
        nRet = ml_service_destroy(service_handle);
        PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_service_destroy", NnStreamerGetError(nRet), ml_service_delete_pipeline(service_name); ml_option_destroy(query_client_option); ml_tensors_data_destroy(input); ml_tensors_info_destroy(in_info));
 
+  g_usleep (1000 * 1000 * 1); /* 1 sec */
+
        /** delete finished service */
        nRet = ml_service_delete_pipeline(service_name);
        PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_service_delete_pipeline", NnStreamerGetError(nRet), ml_option_destroy(query_client_option); ml_tensors_data_destroy(input); ml_tensors_info_destroy(in_info));
 
-       /** it would fail if get the removed service */
-       nRet = ml_service_get_pipeline(service_name, &szRetPipelineVal);
-       PRINT_RESULT_CLEANUP(ML_ERROR_INVALID_PARAMETER, nRet, "ml_service_get_pipeline", NnStreamerGetError(nRet), ml_option_destroy(query_client_option); ml_tensors_data_destroy(input); ml_tensors_info_destroy(in_info));
-
-       nRet = ml_option_destroy(query_client_option);\r
+       nRet = ml_option_destroy(query_client_option);
        PRINT_RESULT_NORETURN(ML_ERROR_NONE, nRet, "ml_option_destroy", NnStreamerGetError(nRet));
-\r
-       nRet = ml_tensors_data_destroy(input);\r
+
+       nRet = ml_tensors_data_destroy(input);
        PRINT_RESULT_NORETURN(ML_ERROR_NONE, nRet, "ml_tensors_data_destroy", NnStreamerGetError(nRet));
-\r
-       nRet = ml_tensors_info_destroy(in_info);\r
+
+       nRet = ml_tensors_info_destroy(in_info);
        PRINT_RESULT_NORETURN(ML_ERROR_NONE, nRet, "ml_tensors_info_destroy", NnStreamerGetError(nRet));
-\r
+
        return 0;
-}
\ No newline at end of file
+}
index f75718fcd871b85327266b54118b061337dde870..ee9644c1ea7ff5ad6114ce56d7777d49b37eeae3 100644 (file)
@@ -365,7 +365,7 @@ int utc_ml_service_pipeline_p1 (void)
 {
   IS_SUPPORT_ML_SERVICE_FEATURE;
   const gchar *key = "ServiceName";
-  const gchar *test_pipeline = "videotestsrc ! fakesink";
+  const gchar *test_pipeline = "videotestsrc ! fakesink async=false";
   ml_service_h service_handle;
   ml_pipeline_state_e state;
 
@@ -376,13 +376,15 @@ int utc_ml_service_pipeline_p1 (void)
   /* launch pipeline and check the state */
   status = ml_service_launch_pipeline (key, &service_handle);
   assert_eq (ML_ERROR_NONE, status);
-  status = ml_service_get_pipeline_state (service_handle, &state);
-  assert_eq (ML_ERROR_NONE, status);
-  assert_eq (ML_PIPELINE_STATE_PAUSED, state);
+
+  g_usleep (1000 * 1000 * 1); /* 1 sec */
 
   /* start the pipeline and check the state */
   status = ml_service_start_pipeline (service_handle);
   assert_eq (ML_ERROR_NONE, status);
+
+  g_usleep (1000 * 1000 * 1); /* 1 sec */
+
   status = ml_service_get_pipeline_state (service_handle, &state);
   assert_eq (ML_ERROR_NONE, status);
   assert_eq (ML_PIPELINE_STATE_PLAYING, state);
@@ -390,6 +392,9 @@ int utc_ml_service_pipeline_p1 (void)
   /* stop the pipeline and check the state */
   status = ml_service_stop_pipeline (service_handle);
   assert_eq (ML_ERROR_NONE, status);
+
+  g_usleep (1000 * 1000 * 1); /* 1 sec */
+
   status = ml_service_get_pipeline_state (service_handle, &state);
   assert_eq (ML_ERROR_NONE, status);
   assert_eq (ML_PIPELINE_STATE_PAUSED, state);
@@ -509,7 +514,7 @@ int utc_ml_service_p1 (void)
 
   /** Set server pipeline and launch it */
   const gchar *service_name = "simple_query_server_for_test";
-  int num_buffers = 5;
+  int num_buffers = 3;
   guint server_port = _get_available_port ();
   gchar *server_pipeline_desc = g_strdup_printf ("tensor_query_serversrc port=%u num-buffers=%d ! other/tensors,num_tensors=1,dimensions=3:4:4:1,types=uint8,format=static,framerate=0/1 ! tensor_query_serversink async=false sync=false", server_port, num_buffers);
 
@@ -528,15 +533,12 @@ int utc_ml_service_p1 (void)
   status = ml_service_launch_pipeline (service_name, &service);
   assert_eq (ML_ERROR_NONE, status);
 
-  status = ml_service_get_pipeline_state (service, &state);
-  assert_eq (ML_ERROR_NONE, status);
-  assert_eq (ML_PIPELINE_STATE_PAUSED, state);
+  g_usleep (1000 * 1000 * 1); /* 1 sec */
 
   status = ml_service_start_pipeline (service);
   assert_eq (ML_ERROR_NONE, status);
-  status = ml_service_get_pipeline_state (service, &state);
-  assert_eq (ML_ERROR_NONE, status);
-  assert_eq (ML_PIPELINE_STATE_PLAYING, state);
+
+  g_usleep (1000 * 1000 * 1); /* 1 sec */
 
   ml_service_h client;
   ml_option_h query_client_option = NULL;
@@ -564,7 +566,7 @@ int utc_ml_service_p1 (void)
   status = ml_option_set (query_client_option, "connect-type", connect_type, g_free);
   assert_eq (ML_ERROR_NONE, status);
 
-  guint timeout = 1000U;
+  guint timeout = 10000U;
   status = ml_option_set (query_client_option, "timeout", &timeout, NULL);
   assert_eq (ML_ERROR_NONE, status);
 
@@ -590,6 +592,8 @@ int utc_ml_service_p1 (void)
   status = ml_service_query_create (query_client_option, &client);
   assert_eq (ML_ERROR_NONE, status);
 
+  g_usleep (1000 * 1000 * 1); /* 1 sec */
+
   status = ml_tensors_data_create (in_info, &input);
   assert_eq (ML_ERROR_NONE, status);
   assert (NULL != input);
@@ -601,6 +605,8 @@ int utc_ml_service_p1 (void)
     size_t input_data_size, output_data_size;
     uint8_t test_data = (uint8_t) i;
 
+    g_usleep (1000 * 1000 * 1); /* 1 sec */
+
     ml_tensors_data_set_tensor_data (input, 0, &test_data, sizeof (uint8_t));
 
     status = ml_service_query_request (client, input, &output);
@@ -623,28 +629,30 @@ int utc_ml_service_p1 (void)
   status = ml_service_destroy (client);
   assert_eq (ML_ERROR_NONE, status);
 
+  g_usleep (1000 * 1000 * 1); /* 1 sec */
+
   /** destroy server pipeline */
   status = ml_service_stop_pipeline (service);
   assert_eq (ML_ERROR_NONE, status);
 
-  status = ml_service_get_pipeline_state (service, &state);
-  assert_eq (ML_ERROR_NONE, status);
-  assert_eq (ML_PIPELINE_STATE_PAUSED, state);
+  g_usleep (1000 * 1000 * 1); /* 1 sec */
 
   status = ml_service_destroy (service);
   assert_eq (ML_ERROR_NONE, status);
 
+  g_usleep (1000 * 1000 * 1); /* 1 sec */
+
   /** delete finished service */
   status = ml_service_delete_pipeline (service_name);
   assert_eq (ML_ERROR_NONE, status);
 
-  /** it would fail if get the removed service */
-  status = ml_service_get_pipeline (service_name, &ret_pipeline);
-  assert_eq (ML_ERROR_INVALID_PARAMETER, status);
+  status = ml_option_destroy (query_client_option);
+  assert_eq (ML_ERROR_NONE, status);
+  status = ml_tensors_data_destroy (input);
+  assert_eq (ML_ERROR_NONE, status);
+  status = ml_tensors_info_destroy (in_info);
+  assert_eq (ML_ERROR_NONE, status);
 
-  ml_option_destroy (query_client_option);
-  ml_tensors_data_destroy (input);
-  ml_tensors_info_destroy (in_info);
   return 0;
 }