nns_capi_single_srcs = files('ml-api-inference-single.c')
nns_capi_pipeline_srcs = files('ml-api-inference-pipeline.c')
nns_capi_service_srcs = files('ml-api-service-common.c','ml-api-service-agent-client.c', 'ml-api-service-query-client.c')
+if nnstreamer_edge_dep.found()
+ nns_capi_service_srcs += files('ml-api-remote-service.c')
+endif
# Build ML-API Common Lib First.
nns_capi_common_shared_lib = shared_library ('capi-ml-common',
include_directories: nns_capi_include
)
-
# Service API
if get_option('enable-ml-service')
+ ml_service_deps = [nns_capi_dep, ml_agentd_deps]
+ if nnstreamer_edge_dep.found()
+ ml_service_deps += nnstreamer_edge_dep
+ endif
+
nns_capi_service_shared_lib = shared_library ('capi-ml-service',
nns_capi_service_srcs,
- dependencies: [nns_capi_dep, ml_agentd_deps],
+ dependencies: ml_service_deps,
include_directories: [nns_capi_include, ml_agentd_incs],
install: true,
install_dir: api_install_libdir,
nns_capi_service_static_lib = static_library ('capi-ml-service',
nns_capi_service_srcs,
- dependencies: [nns_capi_dep, ml_agentd_deps],
+ dependencies: ml_service_deps,
include_directories: [nns_capi_include, ml_agentd_incs],
install: true,
link_with: ml_agentd_lib,
endif
nns_capi_service_dep = declare_dependency(link_with: nns_capi_service_lib,
- dependencies: [nns_capi_dep, ml_agentd_deps],
+ dependencies: ml_service_deps,
include_directories: nns_capi_include
)
endif
--- /dev/null
+/* SPDX-License-Identifier: Apache-2.0 */
+/**
+ * Copyright (c) 2023 Samsung Electronics Co., Ltd. All Rights Reserved.
+ *
+ * @file ml-api-remote-service.c
+ * @date 26 Jun 2023
+ * @brief ml-remote-service of NNStreamer/Service C-API
+ * @see https://github.com/nnstreamer/nnstreamer
+ * @author Gichan Jang <gichan2.jang@samsung.com>
+ * @bug No known bugs except for NYI items
+ */
+
+#include <glib.h>
+#include <gst/gst.h>
+#include <gst/gstbuffer.h>
+#include <gst/app/app.h>
+#include <string.h>
+
+#include "ml-api-internal.h"
+#include "ml-api-service.h"
+#include "ml-api-service-private.h"
+
+/**
+ * @brief Data struct for options.
+ */
+typedef struct
+{
+ gchar *host;
+ guint port;
+ gchar *topic;
+ gchar *dest_host;
+ guint dest_port;
+ nns_edge_connect_type_e conn_type;
+ nns_edge_node_type_e node_type;
+} edge_info_s;
+
+/**
+ * @brief Get ml-service node type from ml_option.
+ */
+static nns_edge_node_type_e
+_mlrs_get_node_type (const gchar * value)
+{
+ nns_edge_node_type_e node_type = NNS_EDGE_NODE_TYPE_UNKNOWN;
+
+ if (!value)
+ return node_type;
+
+ if (g_ascii_strcasecmp (value, "remote_sender") == 0) {
+ node_type = NNS_EDGE_NODE_TYPE_PUB;
+ } else if (g_ascii_strcasecmp (value, "remote_receiver") == 0) {
+ node_type = NNS_EDGE_NODE_TYPE_SUB;
+ } else {
+ _ml_error_report ("Invalid node type: %s, Please check ml_option.", value);
+ }
+ return node_type;
+}
+
+/**
+ * @brief Get nnstreamer-edge connection type
+ */
+static nns_edge_connect_type_e
+_mlrs_get_conn_type (const gchar * value)
+{
+ nns_edge_connect_type_e conn_type = NNS_EDGE_CONNECT_TYPE_UNKNOWN;
+
+ if (!value)
+ return conn_type;
+
+ if (0 == g_ascii_strcasecmp (value, "TCP"))
+ conn_type = NNS_EDGE_CONNECT_TYPE_TCP;
+ else if (0 == g_ascii_strcasecmp (value, "HYBRID"))
+ conn_type = NNS_EDGE_CONNECT_TYPE_HYBRID;
+ else if (0 == g_ascii_strcasecmp (value, "MQTT"))
+ conn_type = NNS_EDGE_CONNECT_TYPE_MQTT;
+ else if (0 == g_ascii_strcasecmp (value, "AITT"))
+ conn_type = NNS_EDGE_CONNECT_TYPE_AITT;
+ else
+ conn_type = NNS_EDGE_CONNECT_TYPE_UNKNOWN;
+
+ return conn_type;
+}
+
+/**
+ * @brief Get edge info from ml_option.
+ */
+static void
+_mlrs_get_edge_info (ml_option_h option, edge_info_s * edge_info)
+{
+ void *value;
+
+ if (ML_ERROR_NONE == ml_option_get (option, "host", &value)) {
+ g_free (edge_info->host);
+ edge_info->host = g_strdup (value);
+ }
+ if (ML_ERROR_NONE == ml_option_get (option, "port", &value))
+ edge_info->port = *((guint *) value);
+ if (ML_ERROR_NONE == ml_option_get (option, "dest-host", &value)) {
+ g_free (edge_info->dest_host);
+ edge_info->dest_host = g_strdup (value);
+ }
+ if (ML_ERROR_NONE == ml_option_get (option, "dest-port", &value))
+ edge_info->dest_port = *((guint *) value);
+ if (ML_ERROR_NONE == ml_option_get (option, "connect-type", &value))
+ edge_info->conn_type = _mlrs_get_conn_type (value);
+ if (ML_ERROR_NONE == ml_option_get (option, "topic", &value))
+ edge_info->topic = g_strdup (value);
+ if (ML_ERROR_NONE == ml_option_get (option, "node-type", &value))
+ edge_info->node_type = _mlrs_get_node_type (value);
+}
+
+/**
+ * @brief Set nns-edge info.
+ */
+static void
+_mlrs_set_edge_info (edge_info_s * edge_info, nns_edge_h edge_h)
+{
+ char port[6];
+
+ nns_edge_set_info (edge_h, "HOST", edge_info->host);
+ sprintf (port, "%u", edge_info->port);
+ nns_edge_set_info (edge_h, "PORT", port);
+
+ if (edge_info->topic)
+ nns_edge_set_info (edge_h, "TOPIC", edge_info->topic);
+
+ nns_edge_set_info (edge_h, "DEST_HOST", edge_info->dest_host);
+ sprintf (port, "%u", edge_info->dest_port);
+ nns_edge_set_info (edge_h, "DEST_PORT", port);
+}
+
+/**
+ * @brief Release edge info.
+ */
+static void
+_mlrs_release_edge_info (edge_info_s * edge_info)
+{
+ g_free (edge_info->dest_host);
+ g_free (edge_info->host);
+ g_free (edge_info->topic);
+}
+
+/**
+ * @brief Get ml remote service type from ml_option.
+ */
+static ml_remote_service_type_e
+_mlrs_get_service_type (gchar * service_str)
+{
+ ml_remote_service_type_e service_type = ML_REMOTE_SERVICE_TYPE_UNKNOWN;
+
+ if (!service_str)
+ return service_type;
+
+ if (g_ascii_strcasecmp (service_str, "model_raw") == 0) {
+ service_type = ML_REMOTE_SERVICE_TYPE_MODEL_RAW;
+ } else if (g_ascii_strcasecmp (service_str, "model_url") == 0) {
+ service_type = ML_REMOTE_SERVICE_TYPE_MODEL_URL;
+ } else if (g_ascii_strcasecmp (service_str, "pipeline_raw") == 0) {
+ service_type = ML_REMOTE_SERVICE_TYPE_PIPELINE_RAW;
+ } else if (g_ascii_strcasecmp (service_str, "pipeline_url") == 0) {
+ service_type = ML_REMOTE_SERVICE_TYPE_PIPELINE_URL;
+ } else {
+ _ml_error_report ("Invalid service type: %s, Please check service type.",
+ service_str);
+ }
+ return service_type;
+}
+
+/**
+ * @brief Process ml remote service
+ */
+static int
+_mlrs_process_remote_service (nns_edge_data_h data_h)
+{
+ void *data;
+ nns_size_t data_len;
+ gchar *service_str = NULL;
+ gchar *service_key = NULL;
+ ml_remote_service_type_e service_type;
+ int ret = NNS_EDGE_ERROR_NONE;
+
+ ret = nns_edge_data_get (data_h, 0, &data, &data_len);
+ if (NNS_EDGE_ERROR_NONE != ret) {
+ _ml_error_report_return (ret,
+ "Failed to get data while processing the ml-remote service.");
+ }
+
+ ret = nns_edge_data_get_info (data_h, "service-type", &service_str);
+ if (NNS_EDGE_ERROR_NONE != ret) {
+ _ml_error_report_return (ret,
+ "Failed to get service type while processing the ml-remote service.");
+ }
+ service_type = _mlrs_get_service_type (service_str);
+ ret = nns_edge_data_get_info (data_h, "service-key", &service_key);
+ if (NNS_EDGE_ERROR_NONE != ret) {
+ _ml_error_report_return (ret,
+ "Failed to get service key while processing the ml-remote service.");
+ }
+
+ switch (service_type) {
+ case ML_REMOTE_SERVICE_TYPE_MODEL_URL:
+ /** @todo Download the model file from given URL */
+ case ML_REMOTE_SERVICE_TYPE_MODEL_RAW:
+ /** @todo Save model file to given path and register the model */
+ break;
+ case ML_REMOTE_SERVICE_TYPE_PIPELINE_URL:
+ /** @todo Download the pipeline description from given URL */
+ case ML_REMOTE_SERVICE_TYPE_PIPELINE_RAW:
+ ml_service_set_pipeline (service_key, (gchar *) data);
+ break;
+ default:
+ _ml_error_report
+ ("Unknown service type or not supported yet. Service num: %d",
+ service_type);
+ break;
+ }
+ return ret;
+}
+
+/**
+ * @brief Edge event callback.
+ */
+static int
+_mlrs_edge_event_cb (nns_edge_event_h event_h, void *user_data)
+{
+ nns_edge_event_e event = NNS_EDGE_EVENT_UNKNOWN;
+ nns_edge_data_h data_h = NULL;
+ int ret = NNS_EDGE_ERROR_NONE;
+
+ ret = nns_edge_event_get_type (event_h, &event);
+ if (NNS_EDGE_ERROR_NONE != ret)
+ return ret;
+
+ switch (event) {
+ case NNS_EDGE_EVENT_NEW_DATA_RECEIVED:{
+ ret = nns_edge_event_parse_new_data (event_h, &data_h);
+ if (NNS_EDGE_ERROR_NONE != ret)
+ return ret;
+
+ ret = _mlrs_process_remote_service (data_h);
+ if (NNS_EDGE_ERROR_NONE != ret)
+ return ret;
+ break;
+ }
+ default:
+ break;
+ }
+
+ if (data_h)
+ nns_edge_data_destroy (data_h);
+
+ return ret;
+}
+
+/**
+ * @brief Create edge handle.
+ */
+static int
+_mlrs_create_edge_handle (nns_edge_h * edge_h, edge_info_s * edge_info)
+{
+ int ret = 0;
+ ret = nns_edge_create_handle (edge_info->topic, edge_info->conn_type,
+ edge_info->node_type, edge_h);
+
+ if (NNS_EDGE_ERROR_NONE != ret) {
+ _ml_error_report ("nns_edge_create_handle failed.");
+ return ret;
+ }
+
+ ret = nns_edge_set_event_callback (*edge_h, _mlrs_edge_event_cb, NULL);
+ if (NNS_EDGE_ERROR_NONE != ret) {
+ _ml_error_report ("nns_edge_set_event_callback failed.");
+ nns_edge_release_handle (*edge_h);
+ return ret;
+ }
+
+ _mlrs_set_edge_info (edge_info, *edge_h);
+
+ ret = nns_edge_start (*edge_h);
+ if (NNS_EDGE_ERROR_NONE != ret) {
+ _ml_error_report ("nns_edge_start failed.");
+ nns_edge_release_handle (*edge_h);
+ return ret;
+ }
+
+ if (edge_info->node_type == NNS_EDGE_NODE_TYPE_SUB) {
+ ret = nns_edge_connect (*edge_h, edge_info->dest_host,
+ edge_info->dest_port);
+ if (NNS_EDGE_ERROR_NONE != ret) {
+ _ml_error_report ("nns_edge_connect failed.");
+ nns_edge_release_handle (*edge_h);
+ }
+ }
+
+ return ret;
+}
+
+/**
+ * @brief Creates ml-service handle with given ml-option handle.
+ */
+int
+ml_remote_service_create (ml_option_h option, ml_service_h * handle)
+{
+ ml_service_s *mls;
+ _ml_remote_service_s *remote_s;
+ nns_edge_h edge_h = NULL;
+ edge_info_s *edge_info = NULL;
+ int ret = ML_ERROR_NONE;
+
+ check_feature_state (ML_FEATURE_SERVICE);
+ check_feature_state (ML_FEATURE_INFERENCE);
+
+ if (!option) {
+ _ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
+ "The parameter, 'option' is NULL. It should be a valid ml_option_h, which should be created by ml_option_create().");
+ }
+
+ if (!handle) {
+ _ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
+ "The parameter, 'handle' (ml_service_h), is NULL. It should be a valid ml_service_h.");
+ }
+
+ edge_info = g_new0 (edge_info_s, 1);
+ edge_info->topic = NULL;
+ edge_info->host = g_strdup ("localhost");
+ edge_info->port = 0;
+ edge_info->dest_host = g_strdup ("localhost");
+ edge_info->dest_port = 0;
+ edge_info->conn_type = NNS_EDGE_CONNECT_TYPE_UNKNOWN;
+
+ _mlrs_get_edge_info (option, edge_info);
+
+ ret = _mlrs_create_edge_handle (&edge_h, edge_info);
+ if (ML_ERROR_NONE != ret) {
+ g_free (edge_info);
+ return ret;
+ }
+
+ remote_s = g_new0 (_ml_remote_service_s, 1);
+ remote_s->edge_h = edge_h;
+ remote_s->node_type = edge_info->node_type;
+
+ mls = g_new0 (ml_service_s, 1);
+ mls->type = ML_SERVICE_TYPE_REMOTE;
+ mls->priv = remote_s;
+
+ *handle = mls;
+
+ _mlrs_release_edge_info (edge_info);
+ g_free (edge_info);
+
+ return ret;
+}
+
+/**
+ * @brief Register new information, such as neural network models or pipeline descriptions, on a remote server.
+*/
+int
+ml_remote_service_register (ml_service_h handle, ml_option_h option, void *data,
+ size_t data_len)
+{
+ ml_service_s *mls = (ml_service_s *) handle;
+ _ml_remote_service_s *remote_s = NULL;
+ gchar *service_key = NULL;
+ nns_edge_data_h data_h = NULL;
+ int ret = NNS_EDGE_ERROR_NONE;
+ gchar *service_str = NULL;
+
+ check_feature_state (ML_FEATURE_SERVICE);
+ check_feature_state (ML_FEATURE_INFERENCE);
+
+ if (!handle) {
+ _ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
+ "The parameter, 'handle' is NULL. It should be a valid ml_service_h.");
+ }
+
+ if (!option) {
+ _ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
+ "The parameter, 'option' is NULL. It should be a valid ml_option_h, which should be created by ml_option_create().");
+ }
+
+ if (!data) {
+ _ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
+ "The parameter, 'data' is NULL. It should be a valid pointer.");
+ }
+
+ if (data_len <= 0) {
+ _ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
+ "The parameter, 'data_len' should be greater than 0.");
+ }
+
+ ret = ml_option_get (option, "service-type", (void **) &service_str);
+ if (NNS_EDGE_ERROR_NONE != ret) {
+ _ml_error_report
+ ("Failed to get ml-remote service type. It should be set by ml_option_set().");
+ return ret;
+ }
+ ret = ml_option_get (option, "service-key", (void **) &service_key);
+ if (NNS_EDGE_ERROR_NONE != ret) {
+ _ml_error_report
+ ("Failed to get ml-remote service key. It should be set by ml_option_set().");
+ return ret;
+ }
+
+ remote_s = (_ml_remote_service_s *) mls->priv;
+
+ ret = nns_edge_data_create (&data_h);
+ if (NNS_EDGE_ERROR_NONE != ret) {
+ _ml_error_report ("Failed to create an edge data.");
+ return ret;
+ }
+
+ nns_edge_data_set_info (data_h, "service-type", service_str);
+ nns_edge_data_set_info (data_h, "service-key", service_key);
+
+ ret = nns_edge_data_add (data_h, data, data_len, NULL);
+ if (NNS_EDGE_ERROR_NONE != ret) {
+ _ml_error_report ("Failed to add camera data to the edge data.\n");
+ nns_edge_data_destroy (data_h);
+ }
+
+ ret = nns_edge_send (remote_s->edge_h, data_h);
+ if (NNS_EDGE_ERROR_NONE != ret) {
+ _ml_error_report
+ ("Failed to publish the data to register the remote service.");
+ nns_edge_data_destroy (data_h);
+ }
+
+ return ret;
+}
g_async_queue_unref (query->out_data_queue);
g_free (query);
+ } else if (ML_SERVICE_TYPE_REMOTE == mls->type) {
+ _ml_remote_service_s *mlrs = (_ml_remote_service_s *) mls->priv;
+ nns_edge_release_handle (mlrs->edge_h);
+ g_free (mlrs);
} else {
_ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
"Invalid type of ml_service_h.");
#include "pipeline-dbus.h"
#include "model-dbus.h"
#include "resource-dbus.h"
+#include "nnstreamer-edge.h"
#ifdef __cplusplus
extern "C" {
ML_SERVICE_TYPE_UNKNOWN = 0,
ML_SERVICE_TYPE_SERVER_PIPELINE,
ML_SERVICE_TYPE_CLIENT_QUERY,
+ ML_SERVICE_TYPE_REMOTE,
ML_SERVICE_TYPE_MAX
} ml_service_type_e;
+typedef enum {
+ ML_REMOTE_SERVICE_TYPE_UNKNOWN = 0,
+ ML_REMOTE_SERVICE_TYPE_MODEL_RAW,
+ ML_REMOTE_SERVICE_TYPE_MODEL_URL,
+ ML_REMOTE_SERVICE_TYPE_PIPELINE_RAW,
+ ML_REMOTE_SERVICE_TYPE_PIPELINE_URL,
+
+ ML_REMOTE_SERVICE_TYPE_MAX
+} ml_remote_service_type_e;
+
/**
- * @brief Structure for ml_service_h
+ * @brief Structure for ml_remote_service_h
*/
typedef struct
{
} _ml_service_query_s;
/**
+ * @brief Structure for ml_remote_service
+ */
+typedef struct
+{
+ nns_edge_h edge_h;
+ nns_edge_node_type_e node_type;
+} _ml_remote_service_s;
+
+/**
* @brief Creates ml remote service handle with given ml-option handle.
* @details The caller should set one of "remote_sender" and "remote_receiver" as a service type in @a ml_option.
* @remarks The @a handle should be destroyed using ml_service_destroy().
* gchar *activate = g_strdup ("true");
* ml_option_set (remote_service_option_h, "activate", activate, g_free);
*
- * gchar *description = g_strdup ("temp descriptio for remote model register test");
+ * gchar *description = g_strdup ("temp description for remote model register test");
* ml_option_set (remote_service_option_h, "description", description, g_free);
*
* gchar *name = g_strdup ("model_name.nnfw");
nnstreamer_internal_dep = dependency('nnstreamer-internal')
nnstreamer_single_dep = dependency('nnstreamer-single')
nnstreamer_dep = dependency('nnstreamer')
+nnstreamer_edge_dep = dependency('nnstreamer-edge', required: false)
if get_option('enable-ml-service')
libsystemd_dep = dependency('libsystemd')
%define tensorflow2_gpu_delegate_support 1
%define nnfw_support 1
%define armnn_support 0
+%define nnstreamer_edge_support 1
%define release_test 0
%define test_script $(pwd)/packaging/run_unittests.sh
BuildRequires: pkgconfig(capi-appfw-app-common)
%endif
+%if 0%{?nnstreamer_edge_support}
+BuildRequires: nnstreamer-edge-devel
+%endif
+
%description
Tizen ML(Machine Learning) native API for NNStreamer.
You can construct a data stream pipeline with neural networks easily.
# Run test
# If gcov package generation is enabled, pass the test from GBS.
%if 0%{?unit_test} && !0%{?gcov}
+bash %{test_script} ./tests/capi/unittest_capi_remote_service
bash %{test_script} ./tests/capi/unittest_capi_inference_single
bash %{test_script} ./tests/capi/unittest_capi_inference
bash %{test_script} ./tests/capi/unittest_capi_datatype_consistency
include_directories: nns_capi_include,
)
test('unittest_capi_service_agent_client', unittest_capi_service_agent_client, env: testenv, timeout: 100)
+
+ if nnstreamer_edge_dep.found()
+ unittest_capi_remote_service = executable('unittest_capi_remote_service',
+ 'unittest_capi_remote_service.cc',
+ link_with: nns_capi_service_lib,
+ dependencies: [unittest_common_dep, gdbus_gen_test_dep, lib_ml_agentd_test_dep],
+ install: get_option('install-test'),
+ install_dir: unittest_install_dir,
+ include_directories: nns_capi_include,
+ )
+ test('unittest_capi_remote_service', unittest_capi_remote_service, env: testenv, timeout: 100)
+ endif
endif
if nnfw_dep.found()
--- /dev/null
+/**
+ * @file unittest_capi_remote_service.cc
+ * @date 26 Jun 2023
+ * @brief Unit test for ML Service C-API remote service.
+ * @see https://github.com/nnstreamer/api
+ * @author Gichan Jang <gichan2.jang@samsung.com>
+ * @bug No known bugs
+ */
+
+#include <gtest/gtest.h>
+#include <gdbus-util.h>
+#include <gio/gio.h>
+#include <ml-api-inference-pipeline-internal.h>
+#include <ml-api-internal.h>
+#include <ml-api-service-private.h>
+#include <ml-api-service.h>
+
+#include <netinet/in.h>
+#include <netinet/tcp.h>
+
+/**
+ * @brief Test base class for Database of ML Service API.
+ */
+class MLRemoteService : public ::testing::Test
+{
+ protected:
+ GTestDBus *dbus;
+
+ public:
+ /**
+ * @brief Setup method for each test case.
+ */
+ void SetUp () override
+ {
+ g_autofree gchar *current_dir = g_get_current_dir ();
+ g_autofree gchar *services_dir
+ = g_build_filename (current_dir, "tests/services", NULL);
+
+ dbus = g_test_dbus_new (G_TEST_DBUS_NONE);
+ ASSERT_NE (nullptr, dbus);
+
+ g_test_dbus_add_service_dir (dbus, services_dir);
+
+ g_test_dbus_up (dbus);
+ }
+
+ /**
+ * @brief Teardown method for each test case.
+ */
+ void TearDown () override
+ {
+ if (dbus) {
+ g_test_dbus_down (dbus);
+ g_object_unref (dbus);
+ }
+ }
+
+ /**
+ * @brief Get available port number.
+ */
+ static guint _get_available_port (void)
+ {
+ struct sockaddr_in sin;
+ guint port = 0;
+ gint sock;
+ socklen_t len = sizeof (struct sockaddr);
+
+ sin.sin_family = AF_INET;
+ sin.sin_addr.s_addr = INADDR_ANY;
+ sin.sin_port = htons (0);
+
+ sock = socket (AF_INET, SOCK_STREAM, 0);
+ EXPECT_TRUE (sock > 0);
+ if (sock < 0)
+ return 0;
+
+ if (bind (sock, (struct sockaddr *) &sin, sizeof (struct sockaddr)) == 0) {
+ if (getsockname (sock, (struct sockaddr *) &sin, &len) == 0) {
+ port = ntohs (sin.sin_port);
+ }
+ }
+ close (sock);
+
+ EXPECT_TRUE (port > 0);
+ return port;
+ }
+};
+
+/**
+ * @brief use case of pipeline registration using ml remote service.
+ */
+TEST_F (MLRemoteService, registerPipeline)
+{
+ int status;
+
+ /**============= Prepare client ============= **/
+ ml_service_h client_h;
+ ml_option_h client_option_h = NULL;
+
+ status = ml_option_create (&client_option_h);
+ EXPECT_EQ (ML_ERROR_NONE, status);
+
+ gchar *client_node_type = g_strdup ("remote_sender");
+ status = ml_option_set (client_option_h, "node-type", client_node_type, g_free);
+ EXPECT_EQ (ML_ERROR_NONE, status);
+
+ gchar *client_dest_host = g_strdup ("127.0.0.1");
+ status = ml_option_set (client_option_h, "host", client_dest_host, g_free);
+ EXPECT_EQ (ML_ERROR_NONE, status);
+
+ guint dest_port = 3000;
+ status = ml_option_set (client_option_h, "port", &dest_port, NULL);
+ EXPECT_EQ (ML_ERROR_NONE, status);
+
+ gchar *client_connect_type = g_strdup ("TCP");
+ status = ml_option_set (client_option_h, "connect-type", client_connect_type, g_free);
+ EXPECT_EQ (ML_ERROR_NONE, status);
+
+ gchar *topic = g_strdup ("remote_service_test_topic");
+ status = ml_option_set (client_option_h, "topic", topic, NULL);
+ EXPECT_EQ (ML_ERROR_NONE, status);
+
+ status = ml_remote_service_create (client_option_h, &client_h);
+ EXPECT_EQ (ML_ERROR_NONE, status);
+
+ /**============= Prepare server ============= **/
+ ml_service_h server_h;
+ ml_option_h server_option_h = NULL;
+ status = ml_option_create (&server_option_h);
+ EXPECT_EQ (ML_ERROR_NONE, status);
+
+ gchar *server_node_type = g_strdup ("remote_receiver");
+ status = ml_option_set (server_option_h, "node-type", server_node_type, g_free);
+
+ gchar *dest_host = g_strdup ("127.0.0.1");
+ status = ml_option_set (server_option_h, "dest-host", dest_host, g_free);
+ EXPECT_EQ (ML_ERROR_NONE, status);
+
+ status = ml_option_set (server_option_h, "topic", topic, g_free);
+ EXPECT_EQ (ML_ERROR_NONE, status);
+
+ status = ml_option_set (server_option_h, "dest-port", &dest_port, NULL);
+ EXPECT_EQ (ML_ERROR_NONE, status);
+
+ gchar *server_connect_type = g_strdup ("TCP");
+ status = ml_option_set (server_option_h, "connect-type", server_connect_type, g_free);
+ EXPECT_EQ (ML_ERROR_NONE, status);
+
+ status = ml_remote_service_create (server_option_h, &server_h);
+ EXPECT_EQ (ML_ERROR_NONE, status);
+
+ ml_option_h remote_service_option_h = NULL;
+ status = ml_option_create (&remote_service_option_h);
+ EXPECT_EQ (ML_ERROR_NONE, status);
+
+
+ gchar *service_type = g_strdup ("pipeline_raw");
+ ml_option_set (remote_service_option_h, "service-type", service_type, g_free);
+
+ gchar *service_key = g_strdup ("pipeline_test_key");
+ ml_option_set (remote_service_option_h, "service-key", service_key, g_free);
+
+ g_autofree gchar *pipeline_desc = g_strdup ("fakesrc ! fakesink");
+
+ status = ml_remote_service_register (client_h, remote_service_option_h,
+ pipeline_desc, strlen (pipeline_desc) + 1);
+ EXPECT_EQ (ML_ERROR_NONE, status);
+
+ /** Wait for the server to register the pipeline. */
+ g_usleep (1000000);
+
+ g_autofree gchar *ret_pipeline = NULL;
+ status = ml_service_get_pipeline (service_key, &ret_pipeline);
+ EXPECT_EQ (ML_ERROR_NONE, status);
+ EXPECT_STREQ (pipeline_desc, ret_pipeline);
+
+ status = ml_service_destroy (server_h);
+ EXPECT_EQ (ML_ERROR_NONE, status);
+ status = ml_service_destroy (client_h);
+ EXPECT_EQ (ML_ERROR_NONE, status);
+ status = ml_option_destroy (server_option_h);
+ EXPECT_EQ (ML_ERROR_NONE, status);
+ status = ml_option_destroy (remote_service_option_h);
+ EXPECT_EQ (ML_ERROR_NONE, status);
+ status = ml_option_destroy (client_option_h);
+ EXPECT_EQ (ML_ERROR_NONE, status);
+}
+
+/**
+ * @brief Test ml_remote_service_create with invalid param.
+ */
+TEST_F (MLRemoteService, createInvalidParam_n)
+{
+ int status;
+ ml_option_h option_h = NULL;
+ ml_service_h service_h = NULL;
+
+ status = ml_option_create (&option_h);
+ EXPECT_EQ (ML_ERROR_NONE, status);
+
+ status = ml_remote_service_create (NULL, &service_h);
+ EXPECT_EQ (ML_ERROR_INVALID_PARAMETER, status);
+
+ status = ml_remote_service_create (option_h, NULL);
+ EXPECT_EQ (ML_ERROR_INVALID_PARAMETER, status);
+
+ status = ml_option_destroy (option_h);
+ EXPECT_EQ (ML_ERROR_NONE, status);
+}
+
+/**
+ * @brief Test ml_remote_service_register with invalid param.
+ */
+TEST_F (MLRemoteService, registerInvalidParam_n)
+{
+ int status;
+ ml_service_h service_h = NULL;
+ ml_option_h option_h = NULL;
+ g_autofree gchar *str = g_strdup ("Temp_test_str");
+ size_t len = strlen (str) + 1;
+
+ status = ml_option_create (&option_h);
+ EXPECT_EQ (ML_ERROR_NONE, status);
+
+ gchar *client_node_type = g_strdup ("remote_sender");
+ status = ml_option_set (option_h, "node-type", client_node_type, g_free);
+ EXPECT_EQ (ML_ERROR_NONE, status);
+
+ gchar *client_dest_host = g_strdup ("127.0.0.1");
+ status = ml_option_set (option_h, "dest-host", client_dest_host, g_free);
+ EXPECT_EQ (ML_ERROR_NONE, status);
+
+ guint dest_port = 1883;
+ status = ml_option_set (option_h, "dest-port", &dest_port, NULL);
+ EXPECT_EQ (ML_ERROR_NONE, status);
+
+ gchar *client_connect_type = g_strdup ("HYBRID");
+ status = ml_option_set (option_h, "connect-type", client_connect_type, g_free);
+ EXPECT_EQ (ML_ERROR_NONE, status);
+
+ gchar *topic = g_strdup ("temp_test_topic");
+ status = ml_option_set (option_h, "topic", topic, NULL);
+ EXPECT_EQ (ML_ERROR_NONE, status);
+
+ status = ml_remote_service_create (option_h, &service_h);
+ EXPECT_EQ (ML_ERROR_NONE, status);
+
+ status = ml_remote_service_register (NULL, option_h, str, len);
+ EXPECT_EQ (ML_ERROR_INVALID_PARAMETER, status);
+
+ status = ml_remote_service_register (service_h, NULL, str, len);
+ EXPECT_EQ (ML_ERROR_INVALID_PARAMETER, status);
+
+ status = ml_remote_service_register (service_h, option_h, NULL, len);
+ EXPECT_EQ (ML_ERROR_INVALID_PARAMETER, status);
+
+ status = ml_remote_service_register (service_h, option_h, str, 0);
+ EXPECT_EQ (ML_ERROR_INVALID_PARAMETER, status);
+
+ status = ml_option_destroy (option_h);
+ EXPECT_EQ (ML_ERROR_NONE, status);
+
+ status = ml_service_destroy (service_h);
+ EXPECT_EQ (ML_ERROR_NONE, status);
+}
+
+/**
+ * @brief Main gtest
+ */
+int
+main (int argc, char **argv)
+{
+ int result = -1;
+
+ try {
+ testing::InitGoogleTest (&argc, argv);
+ } catch (...) {
+ g_warning ("catch 'testing::internal::<unnamed>::ClassUniqueToAlwaysTrue'");
+ }
+
+ _ml_initialize_gstreamer ();
+
+ /* ignore tizen feature status while running the testcases */
+ set_feature_state (ML_FEATURE, SUPPORTED);
+ set_feature_state (ML_FEATURE_INFERENCE, SUPPORTED);
+ set_feature_state (ML_FEATURE_SERVICE, SUPPORTED);
+
+ try {
+ result = RUN_ALL_TESTS ();
+ } catch (...) {
+ g_warning ("catch `testing::internal::GoogleTestFailureException`");
+ }
+
+ set_feature_state (ML_FEATURE, NOT_CHECKED_YET);
+ set_feature_state (ML_FEATURE_INFERENCE, NOT_CHECKED_YET);
+ set_feature_state (ML_FEATURE_SERVICE, NOT_CHECKED_YET);
+
+ return result;
+}