--- /dev/null
+/*
+ * Copyright (c) 2021 Samsung Electronics Co., Ltd All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <gtest/gtest.h>
+#include <cerrno>
+#include <beyond/beyond.h>
+#include <beyond/plugin/peer_nn_plugin.h>
+
+static char *peer_nn_argv[] = {
+ const_cast<char *>(BEYOND_PLUGIN_PEER_NN_NAME),
+ const_cast<char *>(BEYOND_INFERENCE_OPTION_FRAMEWORK),
+ const_cast<char *>("tensorflow-lite"),
+ const_cast<char *>(BEYOND_INFERENCE_OPTION_FRAMEWORK_ACCEL),
+ const_cast<char *>("cpu"),
+};
+
+static char *peer_nn_edge_argv[] = {
+ const_cast<char *>(BEYOND_PLUGIN_PEER_NN_NAME),
+ const_cast<char *>(BEYOND_PLUGIN_PEER_NN_ARGUMENT_SERVER),
+ const_cast<char *>(BEYOND_PLUGIN_PEER_NN_ARGUMENT_STORAGE_PATH),
+ const_cast<char *>("/tmp"),
+};
+
+static struct beyond_argument peer_option = {
+ .argc = sizeof(peer_nn_argv) / sizeof(char *),
+ .argv = peer_nn_argv,
+};
+
+static struct beyond_argument peer_edge_option = {
+ .argc = sizeof(peer_nn_edge_argv) / sizeof(char *),
+ .argv = peer_nn_edge_argv,
+};
+
+static struct beyond_peer_info s_valid_edge_info = {
+ .name = const_cast<char *>("edge"),
+ .host = const_cast<char *>("0.0.0.0"),
+ .port = { 50000 },
+ .free_memory = 0llu,
+ .free_storage = 0llu,
+ .uuid = "ec0e0cec-d797-4ba5-b698-f2420c74b787",
+};
+
+static char *inference_argv[] = {
+ const_cast<char *>(BEYOND_INFERENCE_MODE_REMOTE),
+};
+
+static struct beyond_argument inference_option = {
+ .argc = sizeof(inference_argv) / sizeof(char *),
+ .argv = inference_argv,
+};
+
+TEST(inference, positive_beyond_inference_create_with_nullarg)
+{
+ auto handle = beyond_inference_create(nullptr); // Fallback to the default arguments
+ ASSERT_NE(handle, nullptr);
+ beyond_inference_destroy(handle);
+}
+
+TEST(inference, positive_beyond_inference_create)
+{
+ auto handle = beyond_inference_create(&inference_option);
+ ASSERT_NE(handle, nullptr);
+ beyond_inference_destroy(handle);
+}
+
+TEST(inference, positive_beyond_inference_get_input_tensor_info_Runtime)
+{
+ beyond_peer_info info = {
+ .name = const_cast<char *>("device"),
+ .host = const_cast<char *>("127.0.0.1"),
+ .port = { 50000 },
+ .free_memory = 0llu,
+ .free_storage = 0llu,
+ .uuid = "01234567-0123-0123-0123-0123456789ab",
+ };
+
+ auto peerEdgeHandle = beyond_peer_create(&peer_edge_option);
+ ASSERT_NE(peerEdgeHandle, nullptr);
+
+ int ret = beyond_peer_set_info(peerEdgeHandle, &s_valid_edge_info);
+ ASSERT_EQ(ret, 0);
+
+ ret = beyond_peer_activate(peerEdgeHandle);
+ ASSERT_EQ(ret, 0);
+
+ auto peerHandle = beyond_peer_create(&peer_option);
+ ASSERT_NE(peerHandle, nullptr);
+
+ ret = beyond_peer_set_info(peerHandle, &info);
+ ASSERT_EQ(ret, 0);
+
+ beyond_input_image_config image_config = {
+ .format = "RGB",
+ .width = 320,
+ .height = 192,
+ .convert_format = "RGB",
+ .convert_width = 320,
+ .convert_height = 192,
+ .transform_mode = "typecast",
+ .transform_option = "float32"
+ };
+ beyond_input_config input_config;
+ input_config.input_type = BEYOND_INPUT_TYPE_IMAGE;
+ input_config.config.image = image_config;
+ beyond_config config;
+ config.type = BEYOND_CONFIG_TYPE_INPUT;
+ config.object = &input_config;
+
+ ret = beyond_peer_configure(peerHandle, &config);
+ ASSERT_EQ(ret, 0);
+
+ auto inferenceHandle = beyond_inference_create(&inference_option);
+ ASSERT_NE(inferenceHandle, nullptr);
+
+ ret = beyond_inference_add_peer(inferenceHandle, peerHandle);
+ ASSERT_EQ(ret, 0);
+
+ const char *tflite_model = "body_detector.tflite";
+ ret = beyond_inference_load_model(inferenceHandle, &tflite_model, 1);
+ ASSERT_EQ(ret, 0);
+
+ ret = beyond_inference_prepare(inferenceHandle);
+ ASSERT_EQ(ret, 0);
+
+ const beyond_tensor_info *input_info = nullptr;
+ int nb_inputs_ = 0;
+ ret = beyond_inference_get_input_tensor_info(inferenceHandle, &input_info, &nb_inputs_);
+ ASSERT_EQ(ret, 0);
+
+ ASSERT_EQ(nb_inputs_, 1);
+ ASSERT_EQ(input_info->type, BEYOND_TENSOR_TYPE_UINT8);
+ ASSERT_EQ(input_info->size, 320 * 192 * 3);
+ ASSERT_NE(input_info->dims, nullptr);
+ ASSERT_EQ(input_info->dims->size, 2);
+ ASSERT_EQ(input_info->dims->data[0], 320);
+ ASSERT_EQ(input_info->dims->data[1], 192);
+
+ (void)beyond_peer_deactivate(peerEdgeHandle);
+ (void)beyond_inference_remove_peer(inferenceHandle, peerHandle);
+ beyond_inference_destroy(inferenceHandle);
+ beyond_peer_destroy(peerHandle);
+ beyond_peer_destroy(peerEdgeHandle);
+}