"tensor_query_serversrc port=%u num-buffers=10 ! other/tensors,num_tensors=1,dimensions=3:4:4:1,types=uint8,format=static,framerate=0/1 ! tensor_query_serversink async=false",
port);
- status = ml_service_set_pipeline (service_name, pipeline_desc);
+ status = ml_service_pipeline_set (service_name, pipeline_desc);
EXPECT_EQ (ML_ERROR_NONE, status);
gchar *ret_pipeline;
- status = ml_service_get_pipeline (service_name, &ret_pipeline);
+ status = ml_service_pipeline_get (service_name, &ret_pipeline);
EXPECT_EQ (ML_ERROR_NONE, status);
EXPECT_STREQ (pipeline_desc, ret_pipeline);
g_free (ret_pipeline);
ml_service_h service;
ml_pipeline_state_e state;
- status = ml_service_launch_pipeline (service_name, &service);
+ status = ml_service_pipeline_launch (service_name, &service);
EXPECT_EQ (ML_ERROR_NONE, status);
- status = ml_service_get_pipeline_state (service, &state);
+ status = ml_service_pipeline_get_state (service, &state);
EXPECT_EQ (ML_ERROR_NONE, status);
EXPECT_EQ (ML_PIPELINE_STATE_PAUSED, state);
- status = ml_service_start_pipeline (service);
+ status = ml_service_start (service);
EXPECT_EQ (ML_ERROR_NONE, status);
- status = ml_service_get_pipeline_state (service, &state);
+ status = ml_service_pipeline_get_state (service, &state);
EXPECT_EQ (ML_ERROR_NONE, status);
EXPECT_EQ (ML_PIPELINE_STATE_PLAYING, state);
"videotestsrc num-buffers=10 ! videoconvert ! videoscale ! video/x-raw,width=4,height=4,format=RGB,framerate=10/1 ! tensor_converter ! other/tensors,num_tensors=1,format=static ! tensor_query_client dest-port=%u port=%u ! fakesink sync=true",
port, sink_port);
- status = ml_service_set_pipeline ("client", client_pipeline_desc);
+ status = ml_service_pipeline_set ("client", client_pipeline_desc);
EXPECT_EQ (ML_ERROR_NONE, status);
ml_service_h client;
- status = ml_service_launch_pipeline ("client", &client);
+ status = ml_service_pipeline_launch ("client", &client);
EXPECT_EQ (ML_ERROR_NONE, status);
g_usleep (1 * 1000 * 1000);
- status = ml_service_start_pipeline (client);
+ status = ml_service_start (client);
EXPECT_EQ (ML_ERROR_NONE, status);
g_usleep (1 * 1000 * 1000);
- status = ml_service_stop_pipeline (client);
+ status = ml_service_stop (client);
EXPECT_EQ (ML_ERROR_NONE, status);
g_usleep (1 * 1000 * 1000);
g_usleep (1 * 1000 * 1000);
- status = ml_service_stop_pipeline (service);
+ status = ml_service_stop (service);
EXPECT_EQ (ML_ERROR_NONE, status);
g_usleep (1 * 1000 * 1000);
- status = ml_service_get_pipeline_state (service, &state);
+ status = ml_service_pipeline_get_state (service, &state);
EXPECT_EQ (ML_ERROR_NONE, status);
EXPECT_EQ (ML_PIPELINE_STATE_PAUSED, state);
EXPECT_EQ (ML_ERROR_NONE, status);
/* delete finished service */
- status = ml_service_delete_pipeline (service_name);
+ status = ml_service_pipeline_delete (service_name);
EXPECT_EQ (ML_ERROR_NONE, status);
- status = ml_service_delete_pipeline ("client");
+ status = ml_service_pipeline_delete ("client");
EXPECT_EQ (ML_ERROR_NONE, status);
/* it would fail if get the removed service */
- status = ml_service_get_pipeline (service_name, &ret_pipeline);
+ status = ml_service_pipeline_get (service_name, &ret_pipeline);
EXPECT_EQ (ML_ERROR_INVALID_PARAMETER, status);
g_free (pipeline_desc);
"tensor_query_serversrc port=%u num-buffers=10 ! other/tensors,num_tensors=1,dimensions=3:4:4:1,types=uint8,format=static,framerate=0/1 ! tensor_query_serversink async=false",
port);
- status = ml_service_set_pipeline (service_name, pipeline_desc);
+ status = ml_service_pipeline_set (service_name, pipeline_desc);
EXPECT_EQ (ML_ERROR_NONE, status);
gchar *ret_pipeline;
- status = ml_service_get_pipeline (service_name, &ret_pipeline);
+ status = ml_service_pipeline_get (service_name, &ret_pipeline);
EXPECT_EQ (ML_ERROR_NONE, status);
EXPECT_STREQ (pipeline_desc, ret_pipeline);
g_free (ret_pipeline);
ml_service_h service;
ml_pipeline_state_e state;
- status = ml_service_launch_pipeline (service_name, &service);
+ status = ml_service_pipeline_launch (service_name, &service);
EXPECT_EQ (ML_ERROR_NONE, status);
- status = ml_service_get_pipeline_state (service, &state);
+ status = ml_service_pipeline_get_state (service, &state);
EXPECT_EQ (ML_ERROR_NONE, status);
EXPECT_EQ (ML_PIPELINE_STATE_PAUSED, state);
- status = ml_service_start_pipeline (service);
+ status = ml_service_start (service);
EXPECT_EQ (ML_ERROR_NONE, status);
- status = ml_service_get_pipeline_state (service, &state);
+ status = ml_service_pipeline_get_state (service, &state);
EXPECT_EQ (ML_ERROR_NONE, status);
EXPECT_EQ (ML_PIPELINE_STATE_PLAYING, state);
g_usleep (1 * 1000 * 1000);
- status = ml_service_stop_pipeline (service);
+ status = ml_service_stop (service);
EXPECT_EQ (ML_ERROR_NONE, status);
g_usleep (1 * 1000 * 1000);
- status = ml_service_get_pipeline_state (service, &state);
+ status = ml_service_pipeline_get_state (service, &state);
EXPECT_EQ (ML_ERROR_NONE, status);
EXPECT_EQ (ML_PIPELINE_STATE_PAUSED, state);
EXPECT_EQ (ML_ERROR_NONE, status);
/* delete finished service */
- status = ml_service_delete_pipeline (service_name);
+ status = ml_service_pipeline_delete (service_name);
EXPECT_EQ (ML_ERROR_NONE, status);
/* it would fail if get the removed service */
- status = ml_service_get_pipeline (service_name, &ret_pipeline);
+ status = ml_service_pipeline_get (service_name, &ret_pipeline);
EXPECT_EQ (ML_ERROR_INVALID_PARAMETER, status);
g_free (pipeline_desc);
}
/**
- * @brief Test ml_service_set_pipeline with invalid param.
+ * @brief Test ml_service_pipeline_set with invalid param.
*/
TEST_F (MLServiceAgentTest, set_pipeline_00_n)
{
int status;
- status = ml_service_set_pipeline (NULL, "some pipeline");
+ status = ml_service_pipeline_set (NULL, "some pipeline");
EXPECT_EQ (ML_ERROR_INVALID_PARAMETER, status);
}
/**
- * @brief Test ml_service_set_pipeline with invalid param.
+ * @brief Test ml_service_pipeline_set with invalid param.
*/
TEST_F (MLServiceAgentTest, set_pipeline_01_n)
{
int status;
- status = ml_service_set_pipeline ("some key", NULL);
+ status = ml_service_pipeline_set ("some key", NULL);
EXPECT_EQ (ML_ERROR_INVALID_PARAMETER, status);
}
/**
- * @brief Test ml_service_get_pipeline with invalid param.
+ * @brief Test ml_service_pipeline_get with invalid param.
*/
TEST_F (MLServiceAgentTest, get_pipeline_00_n)
{
int status;
gchar *ret_pipeline = NULL;
- status = ml_service_get_pipeline (NULL, &ret_pipeline);
+ status = ml_service_pipeline_get (NULL, &ret_pipeline);
EXPECT_EQ (ML_ERROR_INVALID_PARAMETER, status);
}
/**
- * @brief Test ml_service_get_pipeline with invalid param.
+ * @brief Test ml_service_pipeline_get with invalid param.
*/
TEST_F (MLServiceAgentTest, get_pipeline_01_n)
{
int status;
- status = ml_service_get_pipeline ("some key", NULL);
+ status = ml_service_pipeline_get ("some key", NULL);
EXPECT_EQ (ML_ERROR_INVALID_PARAMETER, status);
}
/**
- * @brief Test ml_service_delete_pipeline with invalid param.
+ * @brief Test ml_service_pipeline_delete with invalid param.
*/
TEST_F (MLServiceAgentTest, delete_pipeline_00_n)
{
int status;
- status = ml_service_delete_pipeline (NULL);
+ status = ml_service_pipeline_delete (NULL);
EXPECT_EQ (ML_ERROR_INVALID_PARAMETER, status);
}
/**
- * @brief Test ml_service_delete_pipeline with invalid param.
+ * @brief Test ml_service_pipeline_delete with invalid param.
*/
TEST_F (MLServiceAgentTest, delete_pipeline_01_n)
{
int status;
- status = ml_service_set_pipeline ("some key", "videotestsrc ! fakesink");
+ status = ml_service_pipeline_set ("some key", "videotestsrc ! fakesink");
EXPECT_EQ (ML_ERROR_NONE, status);
- status = ml_service_delete_pipeline ("invalid key");
+ status = ml_service_pipeline_delete ("invalid key");
EXPECT_EQ (ML_ERROR_INVALID_PARAMETER, status);
- status = ml_service_delete_pipeline ("some key");
+ status = ml_service_pipeline_delete ("some key");
EXPECT_EQ (ML_ERROR_NONE, status);
}
/**
- * @brief Test ml_service_delete_pipeline with invalid param.
+ * @brief Test ml_service_pipeline_delete with invalid param.
*/
TEST_F (MLServiceAgentTest, delete_pipeline_02_n)
{
int status;
- status = ml_service_set_pipeline ("some key", "videotestsrc ! fakesink");
+ status = ml_service_pipeline_set ("some key", "videotestsrc ! fakesink");
EXPECT_EQ (ML_ERROR_NONE, status);
- status = ml_service_delete_pipeline ("some key");
+ status = ml_service_pipeline_delete ("some key");
EXPECT_EQ (ML_ERROR_NONE, status);
- status = ml_service_delete_pipeline ("some key");
+ status = ml_service_pipeline_delete ("some key");
EXPECT_EQ (ML_ERROR_INVALID_PARAMETER, status);
}
/**
- * @brief Test ml_service_launch_pipeline with invalid param.
+ * @brief Test ml_service_pipeline_launch with invalid param.
*/
TEST_F (MLServiceAgentTest, launch_pipeline_00_n)
{
int status;
- status = ml_service_launch_pipeline (NULL, NULL);
+ status = ml_service_pipeline_launch (NULL, NULL);
EXPECT_EQ (ML_ERROR_INVALID_PARAMETER, status);
}
/**
- * @brief Test ml_service_launch_pipeline with invalid key.
+ * @brief Test ml_service_pipeline_launch with invalid key.
*/
TEST_F (MLServiceAgentTest, launch_pipeline_01_n)
{
int status;
ml_service_h service_h = NULL;
- status = ml_service_launch_pipeline (NULL, &service_h);
+ status = ml_service_pipeline_launch (NULL, &service_h);
EXPECT_EQ (ML_ERROR_INVALID_PARAMETER, status);
- status = ml_service_launch_pipeline ("invalid key", &service_h);
+ status = ml_service_pipeline_launch ("invalid key", &service_h);
EXPECT_EQ (ML_ERROR_INVALID_PARAMETER, status);
/* service_h is still NULL */
}
/**
- * @brief Test ml_service_launch_pipeline with invalid pipeline.
+ * @brief Test ml_service_pipeline_launch with invalid pipeline.
*/
TEST_F (MLServiceAgentTest, launch_pipeline_02_n)
{
int status;
ml_service_h h;
- status = ml_service_set_pipeline ("key", "invalid_element ! invalid_element");
+ status = ml_service_pipeline_set ("key", "invalid_element ! invalid_element");
EXPECT_EQ (ML_ERROR_NONE, status);
- status = ml_service_launch_pipeline ("key", &h);
+ status = ml_service_pipeline_launch ("key", &h);
EXPECT_EQ (ML_ERROR_STREAMS_PIPE, status);
- status = ml_service_delete_pipeline ("key");
+ status = ml_service_pipeline_delete ("key");
EXPECT_EQ (ML_ERROR_NONE, status);
}
/**
- * @brief Test ml_service_launch_pipeline with invalid tensor_filer.
+ * @brief Test ml_service_pipeline_launch with invalid tensor_filer.
*/
TEST_F (MLServiceAgentTest, launch_pipeline_03_n)
{
int status;
ml_service_h h;
- status = ml_service_set_pipeline ("key",
+ status = ml_service_pipeline_set ("key",
"appsrc name=appsrc ! "
"other/tensors,dimensions=(string)1:1:1:1:1:1:1:1,types=(string)float32,framerate=(fraction)0/1 ! "
"tensor_filter framework=tensorflow-lite model=invalid_path.tflite ! "
"tensor_sink");
EXPECT_EQ (ML_ERROR_NONE, status);
- status = ml_service_launch_pipeline ("key", &h);
+ status = ml_service_pipeline_launch ("key", &h);
EXPECT_EQ (ML_ERROR_STREAMS_PIPE, status);
- status = ml_service_delete_pipeline ("key");
+ status = ml_service_pipeline_delete ("key");
EXPECT_EQ (ML_ERROR_NONE, status);
}
/**
- * @brief Test ml_service_start_pipeline with invalid param.
+ * @brief Test ml_service_start with invalid param.
*/
TEST_F (MLServiceAgentTest, start_pipeline_00_n)
{
int status;
- status = ml_service_start_pipeline (NULL);
+ status = ml_service_start (NULL);
EXPECT_EQ (ML_ERROR_INVALID_PARAMETER, status);
}
/**
- * @brief Test ml_service_stop_pipeline with invalid param.
+ * @brief Test ml_service_stop with invalid param.
*/
TEST_F (MLServiceAgentTest, stop_pipeline_00_n)
{
int status;
- status = ml_service_stop_pipeline (NULL);
+ status = ml_service_stop (NULL);
EXPECT_EQ (ML_ERROR_INVALID_PARAMETER, status);
}
/**
- * @brief Test ml_service_get_pipeline_state with invalid param.
+ * @brief Test ml_service_pipeline_get_state with invalid param.
*/
TEST_F (MLServiceAgentTest, get_pipeline_state_00_n)
{
ml_service_h h;
ml_pipeline_state_e state;
- status = ml_service_get_pipeline_state (NULL, &state);
+ status = ml_service_pipeline_get_state (NULL, &state);
EXPECT_EQ (ML_ERROR_INVALID_PARAMETER, status);
- status = ml_service_set_pipeline ("key", "videotestsrc ! fakesink");
+ status = ml_service_pipeline_set ("key", "videotestsrc ! fakesink");
EXPECT_EQ (ML_ERROR_NONE, status);
- status = ml_service_launch_pipeline ("key", &h);
+ status = ml_service_pipeline_launch ("key", &h);
EXPECT_EQ (ML_ERROR_NONE, status);
- status = ml_service_get_pipeline_state (h, NULL);
+ status = ml_service_pipeline_get_state (h, NULL);
EXPECT_EQ (ML_ERROR_INVALID_PARAMETER, status);
status = ml_service_destroy (h);
EXPECT_EQ (ML_ERROR_NONE, status);
- status = ml_service_delete_pipeline ("key");
+ status = ml_service_pipeline_delete ("key");
EXPECT_EQ (ML_ERROR_NONE, status);
}
int status;
ml_service_h h;
- status = ml_service_set_pipeline ("key", "videotestsrc ! fakesink");
+ status = ml_service_pipeline_set ("key", "videotestsrc ! fakesink");
EXPECT_EQ (ML_ERROR_NONE, status);
- status = ml_service_launch_pipeline ("key", &h);
+ status = ml_service_pipeline_launch ("key", &h);
EXPECT_EQ (ML_ERROR_NONE, status);
ml_service_s *mls = (ml_service_s *) h;
status = ml_service_destroy (h);
EXPECT_EQ (ML_ERROR_NONE, status);
- status = ml_service_delete_pipeline ("key");
+ status = ml_service_pipeline_delete ("key");
EXPECT_EQ (ML_ERROR_NONE, status);
}
int status;
ml_service_h h;
- status = ml_service_set_pipeline ("key", "videotestsrc ! fakesink");
+ status = ml_service_pipeline_set ("key", "videotestsrc ! fakesink");
EXPECT_EQ (ML_ERROR_NONE, status);
- status = ml_service_launch_pipeline ("key", &h);
+ status = ml_service_pipeline_launch ("key", &h);
EXPECT_EQ (ML_ERROR_NONE, status);
ml_service_s *mls = (ml_service_s *) h;
gint64 _id = server->id;
server->id = 1; /* explicitly set id as invalid number */
- status = ml_service_start_pipeline (h);
+ status = ml_service_start (h);
EXPECT_EQ (ML_ERROR_INVALID_PARAMETER, status);
- status = ml_service_stop_pipeline (h);
+ status = ml_service_stop (h);
EXPECT_EQ (ML_ERROR_INVALID_PARAMETER, status);
ml_pipeline_state_e state;
- status = ml_service_get_pipeline_state (h, &state);
+ status = ml_service_pipeline_get_state (h, &state);
EXPECT_EQ (ML_ERROR_INVALID_PARAMETER, status);
status = ml_service_destroy (h);
status = ml_service_destroy (h);
EXPECT_EQ (ML_ERROR_NONE, status);
- status = ml_service_delete_pipeline ("key");
+ status = ml_service_pipeline_delete ("key");
EXPECT_EQ (ML_ERROR_NONE, status);
}
"tensor_query_serversrc port=%u num-buffers=%d ! other/tensors,num_tensors=1,dimensions=3:4:4:1,types=uint8,format=static,framerate=0/1 ! tensor_query_serversink async=false sync=false",
server_port, num_buffers);
- status = ml_service_set_pipeline (service_name, server_pipeline_desc);
+ status = ml_service_pipeline_set (service_name, server_pipeline_desc);
EXPECT_EQ (ML_ERROR_NONE, status);
gchar *ret_pipeline;
- status = ml_service_get_pipeline (service_name, &ret_pipeline);
+ status = ml_service_pipeline_get (service_name, &ret_pipeline);
EXPECT_EQ (ML_ERROR_NONE, status);
EXPECT_STREQ (server_pipeline_desc, ret_pipeline);
g_free (server_pipeline_desc);
ml_service_h service;
ml_pipeline_state_e state;
- status = ml_service_launch_pipeline (service_name, &service);
+ status = ml_service_pipeline_launch (service_name, &service);
EXPECT_EQ (ML_ERROR_NONE, status);
- status = ml_service_get_pipeline_state (service, &state);
+ status = ml_service_pipeline_get_state (service, &state);
EXPECT_EQ (ML_ERROR_NONE, status);
EXPECT_EQ (ML_PIPELINE_STATE_PAUSED, state);
- status = ml_service_start_pipeline (service);
+ status = ml_service_start (service);
EXPECT_EQ (ML_ERROR_NONE, status);
- status = ml_service_get_pipeline_state (service, &state);
+ status = ml_service_pipeline_get_state (service, &state);
EXPECT_EQ (ML_ERROR_NONE, status);
EXPECT_EQ (ML_PIPELINE_STATE_PLAYING, state);
EXPECT_EQ (ML_ERROR_NONE, status);
/* destroy server pipeline */
- status = ml_service_stop_pipeline (service);
+ status = ml_service_stop (service);
EXPECT_EQ (ML_ERROR_NONE, status);
- status = ml_service_get_pipeline_state (service, &state);
+ status = ml_service_pipeline_get_state (service, &state);
EXPECT_EQ (ML_ERROR_NONE, status);
EXPECT_EQ (ML_PIPELINE_STATE_PAUSED, state);
EXPECT_EQ (ML_ERROR_NONE, status);
/* delete finished service */
- status = ml_service_delete_pipeline (service_name);
+ status = ml_service_pipeline_delete (service_name);
EXPECT_EQ (ML_ERROR_NONE, status);
/* it would fail if get the removed service */
- status = ml_service_get_pipeline (service_name, &ret_pipeline);
+ status = ml_service_pipeline_get (service_name, &ret_pipeline);
EXPECT_EQ (ML_ERROR_INVALID_PARAMETER, status);
ml_option_destroy (query_client_option);
ml_tensors_data_create (in_info, &in_data);
/* temporal condition for ml-service handle */
- ml_service_set_pipeline ("key", "videotestsrc ! fakesink");
- status = ml_service_launch_pipeline ("key", &handle);
+ ml_service_pipeline_set ("key", "videotestsrc ! fakesink");
+ status = ml_service_pipeline_launch ("key", &handle);
EXPECT_EQ (ML_ERROR_NONE, status);
/* testcase for invalid param */
EXPECT_EQ (ML_ERROR_INVALID_PARAMETER, status);
ml_service_destroy (handle);
- ml_service_delete_pipeline ("key");
+ ml_service_pipeline_delete ("key");
ml_tensors_data_destroy (in_data);
ml_tensors_info_destroy (in_info);
}
{
int status;
- status = ml_service_set_pipeline ("test", "test");
+ status = ml_service_pipeline_set ("test", "test");
EXPECT_EQ (ML_ERROR_IO_ERROR, status);
gchar *ret_pipeline;
- status = ml_service_get_pipeline ("test", &ret_pipeline);
+ status = ml_service_pipeline_get ("test", &ret_pipeline);
EXPECT_EQ (ML_ERROR_IO_ERROR, status);
ml_service_h service;
- status = ml_service_launch_pipeline ("test", &service);
+ status = ml_service_pipeline_launch ("test", &service);
EXPECT_EQ (ML_ERROR_IO_ERROR, status);
ml_service_s *mls = _ml_service_create_internal (ML_SERVICE_TYPE_SERVER_PIPELINE);
server->id = 1; /* explicitly set id as invalid number */
service = (ml_service_h) mls;
- status = ml_service_start_pipeline (service);
+ status = ml_service_start (service);
EXPECT_EQ (ML_ERROR_IO_ERROR, status);
- status = ml_service_stop_pipeline (service);
+ status = ml_service_stop (service);
EXPECT_EQ (ML_ERROR_IO_ERROR, status);
ml_pipeline_state_e state;
- status = ml_service_get_pipeline_state (service, &state);
+ status = ml_service_pipeline_get_state (service, &state);
EXPECT_EQ (ML_ERROR_IO_ERROR, status);
status = ml_service_destroy (service);