[UTC/ITC][nnstreamer][Non-ACR] Cleanup appsrc testcases 22/274222/11
authorgichan <gichan2.jang@samsung.com>
Mon, 25 Apr 2022 04:39:08 +0000 (13:39 +0900)
committergichan <gichan2.jang@samsung.com>
Tue, 26 Apr 2022 03:12:10 +0000 (12:12 +0900)
1. Previous test(ITc_nnstreamer_pipeline_ml_pipeline_src_set_event_cb_p) tried to release the tensor sink using the wrong function.
  - Wrong function : ml_pipeline_src_release_handle
  - Correct function : ml_pipeline_sink_unregister
2. `ml_pipeline_destroy` is called from `ITs_nnstreamer_pipeline_src_cleanup` when the test is finished. `ml_pipeline_destroy` internally release all handles, so releasing src and sink is unnecessary. Thus, remove releasing function.
3. Wait while the pipeline to process the buffers and flush the data after
stopping the pipeline

Change-Id: I1176b404a9326f445c641f7088f9f68890d1d7ae
Signed-off-by: gichan <gichan2.jang@samsung.com>
src/itc/nnstreamer/ITs-nnstreamer-src.c
src/utc/nnstreamer/utc-nnstreamer-pipeline-src.c

index edd06f8ee2b26a3714b96b081a21011ccf5e9d71..3de602ba7dbd8fcb460ca9dbf6e9072845bc1a16 100755 (executable)
@@ -205,11 +205,11 @@ int ITc_nnstreamer_pipeline_ml_pipeline_src_get_tensors_info_p(void)
        CHECK_HANDLE(hPipeSrcHandle, "ml_pipeline_src_get_handle");
 
        nRet = ml_pipeline_src_get_tensors_info (hPipeSrcHandle, &hTensorinfo);
-       PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_pipeline_src_get_tensors_info", NnStreamerGetError(nRet), ml_pipeline_src_release_handle (hPipeSrcHandle));
-       CHECK_HANDLE_CLEANUP(hTensorinfo, "ml_pipeline_src_get_tensors_info",  ml_pipeline_src_release_handle (hPipeSrcHandle));
+       PRINT_RESULT(ML_ERROR_NONE, nRet, "ml_pipeline_src_get_tensors_info", NnStreamerGetError(nRet));
+       CHECK_HANDLE(hTensorinfo, "ml_pipeline_src_get_tensors_info");
 
-       nRet = ml_pipeline_src_release_handle (hPipeSrcHandle);
-       PRINT_RESULT_NORETURN(ML_ERROR_NONE, nRet, "ml_pipeline_src_release_handle", NnStreamerGetError(nRet));
+       nRet = ml_tensors_info_destroy (hTensorinfo);
+       PRINT_RESULT(ML_ERROR_NONE, nRet, "ml_tensors_info_destroy", NnStreamerGetError(nRet));
 
        return 0;
 }
@@ -244,25 +244,24 @@ int ITc_nnstreamer_pipeline_ml_pipeline_src_input_data_p(void)
        CHECK_HANDLE(hPipeSrcHandle, "ml_pipeline_src_get_handle");
 
        nRet = ml_pipeline_src_get_tensors_info (hPipeSrcHandle, &hTensorinfo);
-       PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_pipeline_src_get_tensors_info", NnStreamerGetError(nRet), ml_pipeline_src_release_handle (hPipeSrcHandle));
-       CHECK_HANDLE_CLEANUP(hTensorinfo, "ml_pipeline_src_get_tensors_info",  ml_pipeline_src_release_handle (hPipeSrcHandle));
+       PRINT_RESULT(ML_ERROR_NONE, nRet, "ml_pipeline_src_get_tensors_info", NnStreamerGetError(nRet));
+       CHECK_HANDLE(hTensorinfo, "ml_pipeline_src_get_tensors_info");
 
        nRet = ml_tensors_data_create (hTensorinfo, &hTensorData);
-       PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_tensors_data_create", NnStreamerGetError(nRet), ml_pipeline_src_release_handle (hPipeSrcHandle));
-       CHECK_HANDLE_CLEANUP(hTensorData, "ml_tensors_data_create",  ml_pipeline_src_release_handle (hPipeSrcHandle));
+       PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_tensors_data_create", NnStreamerGetError(nRet), ml_tensors_info_destroy (hTensorinfo));
+       CHECK_HANDLE_CLEANUP(hTensorData, "ml_tensors_data_create", ml_tensors_info_destroy (hTensorinfo));
 
        nRet = ml_tensors_data_set_tensor_data (hTensorData, 0, uintarray, 4);
-       PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_tensors_data_set_tensor_data", NnStreamerGetError(nRet), ml_tensors_data_destroy (hTensorData);ml_pipeline_src_release_handle (hPipeSrcHandle));
+       PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_tensors_data_set_tensor_data", NnStreamerGetError(nRet), ml_tensors_data_destroy (hTensorData); ml_tensors_info_destroy (hTensorinfo));
 
        nRet = ml_pipeline_src_input_data (hPipeSrcHandle, hTensorData, ML_PIPELINE_BUF_POLICY_DO_NOT_FREE);
-       PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_pipeline_src_input_data", NnStreamerGetError(nRet), ml_tensors_data_destroy (hTensorData);ml_pipeline_src_release_handle (hPipeSrcHandle));
-
+       PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_pipeline_src_input_data", NnStreamerGetError(nRet), ml_tensors_data_destroy (hTensorData); ml_tensors_info_destroy (hTensorinfo));
 
        nRet = ml_tensors_data_destroy (hTensorData);
        PRINT_RESULT_NORETURN(ML_ERROR_NONE, nRet, "ml_tensors_data_destroy", NnStreamerGetError(nRet));
 
-       nRet = ml_pipeline_src_release_handle (hPipeSrcHandle);
-       PRINT_RESULT_NORETURN(ML_ERROR_NONE, nRet, "ml_pipeline_src_release_handle", NnStreamerGetError(nRet));
+       nRet = ml_tensors_info_destroy (hTensorinfo);
+       PRINT_RESULT_NORETURN(ML_ERROR_NONE, nRet, "ml_tensors_info_destroy", NnStreamerGetError(nRet));
 
        return 0;
 }
@@ -308,14 +307,14 @@ int ITc_nnstreamer_pipeline_ml_pipeline_src_set_event_cb_p (void)
        CHECK_HANDLE_CLEANUP(hPipeSinkHandle, "ml_pipeline_sink_register",FREE_MEMORY(pszCountSink));
 
        nRet = ml_pipeline_src_get_handle(g_hPipelinehandle, "srcx", &hPipeSrcHandle);
-       PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_pipeline_src_get_handle", NnStreamerGetError(nRet),ml_pipeline_src_release_handle (hPipeSinkHandle);FREE_MEMORY(pszCountSink));
-       CHECK_HANDLE_CLEANUP(hPipeSrcHandle, "ml_pipeline_src_get_handle",ml_pipeline_src_release_handle (hPipeSinkHandle);FREE_MEMORY(pszCountSink));
+       PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_pipeline_src_get_handle", NnStreamerGetError(nRet), FREE_MEMORY(pszCountSink));
+       CHECK_HANDLE_CLEANUP(hPipeSrcHandle, "ml_pipeline_src_get_handle", FREE_MEMORY(pszCountSink));
 
        nRet = ml_pipeline_src_set_event_cb (hPipeSrcHandle, &callback, NULL);
-       PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_pipeline_src_set_event_cb", NnStreamerGetError(nRet),ml_pipeline_src_release_handle (hPipeSrcHandle);ml_pipeline_src_release_handle (hPipeSinkHandle);FREE_MEMORY(pszCountSink));
+       PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_pipeline_src_set_event_cb", NnStreamerGetError(nRet), FREE_MEMORY(pszCountSink));
 
        nRet = ml_pipeline_start (g_hPipelinehandle);
-       PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_pipeline_start", NnStreamerGetError(nRet) ,ml_pipeline_src_release_handle (hPipeSrcHandle);ml_pipeline_src_release_handle (hPipeSinkHandle);FREE_MEMORY(pszCountSink));
+       PRINT_RESULT_CLEANUP(ML_ERROR_NONE, nRet, "ml_pipeline_start", NnStreamerGetError(nRet)FREE_MEMORY(pszCountSink));
 
        PushDummySrcApp(hPipeSrcHandle);
        g_usleep(50000);
@@ -324,12 +323,12 @@ int ITc_nnstreamer_pipeline_ml_pipeline_src_set_event_cb_p (void)
        nRet = ml_pipeline_stop (g_hPipelinehandle);
        PRINT_RESULT_NORETURN(ML_ERROR_NONE, nRet, "ml_pipeline_stop", NnStreamerGetError(nRet));
 
-       nRet = ml_pipeline_src_release_handle (hPipeSrcHandle);
-       PRINT_RESULT_NORETURN(ML_ERROR_NONE, nRet, "ml_pipeline_src_release_handle", NnStreamerGetError(nRet));
+       /** Wait while the pipeline processes the buffers and flush the data */
+       g_usleep (200000);
+       ml_pipeline_flush (g_hPipelinehandle, false);
+       g_usleep (200000);
 
-       nRet = ml_pipeline_src_release_handle (hPipeSinkHandle);
-       PRINT_RESULT_NORETURN(ML_ERROR_NONE, nRet, "ml_pipeline_src_release_handle", NnStreamerGetError(nRet));
        FREE_MEMORY(pszCountSink);
 
        return 0;
-}
\ No newline at end of file
+}
index d10935929c887952e0d3ac133d2e199aedceff1b..05849d135c87ec3ba05990ee62937a20ce62556e 100644 (file)
@@ -284,9 +284,6 @@ int utc_ml_pipeline_src_set_event_cb_p (void)
   count_sink = (guint *) g_malloc0 (sizeof (guint));
   assert_neq (count_sink, NULL);
 
-  status = ml_pipeline_construct (pipeline, NULL, NULL, &handle);
-  assert_eq (status, ML_ERROR_NONE);
-
   status = ml_pipeline_src_get_handle (handle, "srcx", &srchandle);
   assert_eq (status, ML_ERROR_NONE);
 
@@ -297,21 +294,19 @@ int utc_ml_pipeline_src_set_event_cb_p (void)
     handle, "sinkx", test_sink_callback_count, count_sink, &sinkhandle);
   assert_eq (status, ML_ERROR_NONE);
 
-  status = ml_pipeline_start (handle);
-  assert_eq (status, ML_ERROR_NONE);
-
   test_src_cb_push_dummy (srchandle);
   g_usleep (100000);
 
   status = ml_pipeline_stop (handle);
   assert_eq (status, ML_ERROR_NONE);
+  g_usleep (200000);
 
   assert_gt (*count_sink, 1U);
 
   status = ml_pipeline_src_release_handle (srchandle);
   assert_eq (status, ML_ERROR_NONE);
 
-  status = ml_pipeline_src_release_handle (sinkhandle);
+  status = ml_pipeline_sink_unregister (sinkhandle);
   assert_eq (status, ML_ERROR_NONE);
 
   g_free (count_sink);