Add base code for single shot model.
Update .spec and add simple testcases.
TODO: update name rules (function names and common util)
Signed-off-by: Jaeyun Jung <jy1210.jung@samsung.com>
ninja -C build %{?_smp_mflags}
%if 0%{?unit_test}
+ export NNSTREAMER_BUILD_ROOT_PATH=$(pwd)
pushd build
export GST_PLUGIN_PATH=$(pwd)/gst/nnstreamer
export NNSTREAMER_CONF=$(pwd)/nnstreamer-test.ini
%files -n capi-nnstreamer-devel
%{_includedir}/nnstreamer/nnstreamer.h
+%{_includedir}/nnstreamer/nnstreamer-single.h
%{_libdir}/pkgconfig/capi-nnstreamer.pc
%{_libdir}/libcapi-nnstreamer.so
%{_libdir}/libcapi-nnstreamer.a
*/
#include <nnstreamer.h>
+#include <nnstreamer-single.h>
#include <gtest/gtest.h>
#include <glib.h>
#include <glib/gstdio.h> /* GStatBuf */
}
/**
+ * @brief Test NNStreamer single shot
+ */
+TEST (nnstreamer_capi_singleshot, invoke_01)
+{
+ ml_simpleshot_model_h model;
+ nns_tensors_info_s in_info, out_info;
+ nns_tensors_info_s in_res, out_res;
+ tensor_data *input, *output1, *output2;
+ int status;
+
+ const gchar *root_path = g_getenv ("NNSTREAMER_BUILD_ROOT_PATH");
+ gchar *test_model;
+
+ memset (&in_info, 0, sizeof (nns_tensors_info_s));
+ memset (&out_info, 0, sizeof (nns_tensors_info_s));
+ memset (&in_res, 0, sizeof (nns_tensors_info_s));
+ memset (&out_res, 0, sizeof (nns_tensors_info_s));
+
+ ASSERT_TRUE (root_path != NULL);
+ test_model = g_build_filename (root_path, "tests", "test_models", "models",
+ "mobilenet_v1_1.0_224_quant.tflite", NULL);
+
+ in_info.num_tensors = 1;
+ in_info.info[0].type = NNS_TENSOR_TYPE_UINT8;
+ in_info.info[0].dimension[0] = 3;
+ in_info.info[0].dimension[1] = 224;
+ in_info.info[0].dimension[2] = 224;
+ in_info.info[0].dimension[3] = 1;
+
+ out_info.num_tensors = 1;
+ out_info.info[0].type = NNS_TENSOR_TYPE_UINT8;
+ out_info.info[0].dimension[0] = 1001;
+ out_info.info[0].dimension[1] = 1;
+ out_info.info[0].dimension[2] = 1;
+ out_info.info[0].dimension[3] = 1;
+
+ status = ml_model_open (test_model, &model, &in_info, &out_info,
+ ML_NNFW_TENSORFLOW_LITE, ML_NNFW_HW_DO_NOT_CARE);
+ EXPECT_EQ (status, NNS_ERROR_NONE);
+
+ /* input tensor in filter */
+ status = ml_model_get_input_type (model, &in_res);
+ EXPECT_EQ (status, NNS_ERROR_NONE);
+
+ EXPECT_TRUE (in_info.num_tensors == in_res.num_tensors);
+ for (guint idx = 0; idx < in_res.num_tensors; idx++) {
+ EXPECT_TRUE (in_info.info[idx].type == in_res.info[idx].type);
+ EXPECT_TRUE (in_info.info[idx].dimension[0] == in_res.info[idx].dimension[0]);
+ EXPECT_TRUE (in_info.info[idx].dimension[1] == in_res.info[idx].dimension[1]);
+ EXPECT_TRUE (in_info.info[idx].dimension[2] == in_res.info[idx].dimension[2]);
+ EXPECT_TRUE (in_info.info[idx].dimension[3] == in_res.info[idx].dimension[3]);
+ }
+
+ /* output tensor in filter */
+ status = ml_model_get_output_type (model, &out_res);
+ EXPECT_EQ (status, NNS_ERROR_NONE);
+
+ EXPECT_TRUE (out_info.num_tensors == out_res.num_tensors);
+ for (guint idx = 0; idx < out_res.num_tensors; idx++) {
+ EXPECT_TRUE (out_info.info[idx].type == out_res.info[idx].type);
+ EXPECT_TRUE (out_info.info[idx].dimension[0] == out_res.info[idx].dimension[0]);
+ EXPECT_TRUE (out_info.info[idx].dimension[1] == out_res.info[idx].dimension[1]);
+ EXPECT_TRUE (out_info.info[idx].dimension[2] == out_res.info[idx].dimension[2]);
+ EXPECT_TRUE (out_info.info[idx].dimension[3] == out_res.info[idx].dimension[3]);
+ }
+
+ /* generate dummy data */
+ input = ml_model_allocate_tensor_data (&in_info);
+ EXPECT_TRUE (input != NULL);
+
+ output1 = ml_model_inference (model, input, NULL);
+ EXPECT_TRUE (output1 != NULL);
+ ml_model_free_tensor_data (output1);
+
+ output2 = ml_model_allocate_tensor_data (&out_info);
+ EXPECT_TRUE (output2 != NULL);
+
+ output1 = ml_model_inference (model, input, output2);
+ EXPECT_TRUE (output1 != NULL);
+ EXPECT_TRUE (output1 == output2);
+ ml_model_free_tensor_data (output2);
+
+ ml_model_free_tensor_data (input);
+
+ status = ml_model_close (model);
+ EXPECT_EQ (status, NNS_ERROR_NONE);
+
+ g_free (test_model);
+}
+
+/**
+ * @brief Test NNStreamer single shot
+ * @detail Start pipeline without tensor info
+ */
+TEST (nnstreamer_capi_singleshot, invoke_02)
+{
+ ml_simpleshot_model_h model;
+ nns_tensors_info_s in_info, out_info;
+ tensor_data *input, *output1, *output2;
+ int status;
+
+ const gchar *root_path = g_getenv ("NNSTREAMER_BUILD_ROOT_PATH");
+ gchar *test_model;
+
+ ASSERT_TRUE (root_path != NULL);
+ test_model = g_build_filename (root_path, "tests", "test_models", "models",
+ "mobilenet_v1_1.0_224_quant.tflite", NULL);
+
+ in_info.num_tensors = 1;
+ in_info.info[0].type = NNS_TENSOR_TYPE_UINT8;
+ in_info.info[0].dimension[0] = 3;
+ in_info.info[0].dimension[1] = 224;
+ in_info.info[0].dimension[2] = 224;
+ in_info.info[0].dimension[3] = 1;
+
+ out_info.num_tensors = 1;
+ out_info.info[0].type = NNS_TENSOR_TYPE_UINT8;
+ out_info.info[0].dimension[0] = 1001;
+ out_info.info[0].dimension[1] = 1;
+ out_info.info[0].dimension[2] = 1;
+ out_info.info[0].dimension[3] = 1;
+
+ status = ml_model_open (test_model, &model, NULL, NULL,
+ ML_NNFW_TENSORFLOW_LITE, ML_NNFW_HW_DO_NOT_CARE);
+ EXPECT_EQ (status, NNS_ERROR_NONE);
+
+ /* generate dummy data */
+ input = ml_model_allocate_tensor_data (&in_info);
+ EXPECT_TRUE (input != NULL);
+
+ output1 = ml_model_inference (model, input, NULL);
+ EXPECT_TRUE (output1 != NULL);
+ ml_model_free_tensor_data (output1);
+
+ output2 = ml_model_allocate_tensor_data (&out_info);
+ EXPECT_TRUE (output2 != NULL);
+
+ output1 = ml_model_inference (model, input, output2);
+ EXPECT_TRUE (output1 != NULL);
+ EXPECT_TRUE (output1 == output2);
+ ml_model_free_tensor_data (output2);
+
+ ml_model_free_tensor_data (input);
+
+ status = ml_model_close (model);
+ EXPECT_EQ (status, NNS_ERROR_NONE);
+
+ g_free (test_model);
+}
+
+/**
+ * @brief Test NNStreamer single shot
+ * @detail Failure case with invalid param.
+ */
+TEST (nnstreamer_capi_singleshot, failure_01)
+{
+ ml_simpleshot_model_h model;
+ nns_tensors_info_s in_info, out_info;
+ int status;
+
+ const gchar *root_path = g_getenv ("NNSTREAMER_BUILD_ROOT_PATH");
+ gchar *test_model;
+
+ memset (&in_info, 0, sizeof (nns_tensors_info_s));
+ memset (&out_info, 0, sizeof (nns_tensors_info_s));
+
+ ASSERT_TRUE (root_path != NULL);
+ test_model = g_build_filename (root_path, "tests", "test_models", "models",
+ "mobilenet_v1_1.0_224_quant.tflite", NULL);
+
+ /* invalid file path */
+ status = ml_model_open ("wrong_file_name", &model, &in_info, &out_info,
+ ML_NNFW_TENSORFLOW_LITE, ML_NNFW_HW_DO_NOT_CARE);
+ EXPECT_EQ (status, NNS_ERROR_INVALID_PARAMETER);
+
+ /* null file path */
+ status = ml_model_open (NULL, &model, &in_info, &out_info,
+ ML_NNFW_TENSORFLOW_LITE, ML_NNFW_HW_DO_NOT_CARE);
+ EXPECT_EQ (status, NNS_ERROR_INVALID_PARAMETER);
+
+ /* invalid handle */
+ status = ml_model_open (test_model, NULL, &in_info, &out_info,
+ ML_NNFW_TENSORFLOW_LITE, ML_NNFW_HW_DO_NOT_CARE);
+ EXPECT_EQ (status, NNS_ERROR_INVALID_PARAMETER);
+
+ /* invalid input tensor info */
+ status = ml_model_open (test_model, &model, &in_info, &out_info,
+ ML_NNFW_TENSORFLOW_LITE, ML_NNFW_HW_DO_NOT_CARE);
+ EXPECT_EQ (status, NNS_ERROR_INVALID_PARAMETER);
+
+ in_info.num_tensors = 1;
+ in_info.info[0].type = NNS_TENSOR_TYPE_UINT8;
+ in_info.info[0].dimension[0] = 3;
+ in_info.info[0].dimension[1] = 224;
+ in_info.info[0].dimension[2] = 224;
+ in_info.info[0].dimension[3] = 1;
+
+ /* invalid output tensor info */
+ status = ml_model_open (test_model, &model, &in_info, &out_info,
+ ML_NNFW_TENSORFLOW_LITE, ML_NNFW_HW_DO_NOT_CARE);
+ EXPECT_EQ (status, NNS_ERROR_INVALID_PARAMETER);
+
+ out_info.num_tensors = 1;
+ out_info.info[0].type = NNS_TENSOR_TYPE_UINT8;
+ out_info.info[0].dimension[0] = 1001;
+ out_info.info[0].dimension[1] = 1;
+ out_info.info[0].dimension[2] = 1;
+ out_info.info[0].dimension[3] = 1;
+
+ /* unknown fw type */
+ status = ml_model_open (test_model, &model, &in_info, &out_info,
+ ML_NNFW_UNKNOWN, ML_NNFW_HW_DO_NOT_CARE);
+ EXPECT_EQ (status, NNS_ERROR_NOT_SUPPORTED);
+
+ /* invalid handle */
+ status = ml_model_close (model);
+ EXPECT_EQ (status, NNS_ERROR_INVALID_PARAMETER);
+
+ g_free (test_model);
+}
+
+/**
* @brief Main gtest
*/
int
* @param[in] model_path This is the path to the neural network model file.
* @param[out] model This is the model opened. Users are required to close
* the given model with ml_model_close().
- * @param[in] inputtype This is required if the given model has flexible input
+ * @param[in] input_type This is required if the given model has flexible input
* dimension, where the input dimension MUST be given
* before executing the model.
* However, once it's given, the input dimension cannot
* be changed for the given model handle.
- * Is is required by some custom filters of nnstreamer.
+ * It is required by some custom filters of nnstreamer.
* You may set NULL if it's not required.
+ * @param[in] output_type This is required if the given model has flexible output dimension.
* @param[in] nnfw The nerual network framework used to open the given
* @model_path. Set ML_NNFW_UNKNOWN to let it auto-detect.
* @param[in] hw Tell the corresponding @nnfw to use a specific hardware.
* same dimension.
*/
int ml_model_open (const char *model_path, ml_simpleshot_model_h *model,
- const nns_tensors_info_s *inputtype, ml_model_nnfw nnfw,
- ml_model_hw hw);
+ const nns_tensors_info_s *input_type, const nns_tensors_info_s *output_type,
+ ml_model_nnfw nnfw, ml_model_hw hw);
/**
* @brief Close the opened model handle.
* types are available.
* @since_tizen 5.5
* @param[in] model The model to be investigated
- * @param[out] inputtype The type of input tensor.
+ * @param[out] input_type The type of input tensor.
* @return @c 0 on success. otherwise a negative error value
* @retval #NNS_ERROR_NONE Successful
*/
-int ml_model_get_inputtype (ml_simpleshot_model_h model,
- nns_tensors_info_s *inputtype);
+int ml_model_get_input_type (ml_simpleshot_model_h model,
+ nns_tensors_info_s *input_type);
/**
* @brief Get type (tensor dimension, type, name and so on) of output
* types are available.
* @since_tizen 5.5
* @param[in] model The model to be investigated
- * @param[out] outputtype The type of output tensor.
+ * @param[out] output_type The type of output tensor.
* @return @c 0 on success. otherwise a negative error value
* @retval #NNS_ERROR_NONE Successful
*/
-int ml_model_get_outputtype (ml_simpleshot_model_h model,
- nns_tensors_info_s *outputtype);
+int ml_model_get_output_type (ml_simpleshot_model_h model,
+ nns_tensors_info_s *output_type);
/**
* @brief Get the byte size of the given tensor type.
* @since_tizen 5.5
- * @param[in] tensor_type The tensor type to be investigated.
+ * @param[in] info The tensor information to be investigated.
* @return @c >= 0 on success with byte size. otherwise a negative error value
*/
-int ml_model_get_tensor_size (const nns_tensor_info_s *tensor_type);
+size_t ml_util_get_tensor_size (const nns_tensor_info_s *info);
/**
* @brief Get the byte size of the given tensors type.
* @since_tizen 5.5
- * @param[in] tensors_type The tensors type to be investigated.
+ * @param[in] info The tensors information to be investigated.
* @return @c >= 0 on success with byte size. otherwise a negative error value
*/
-int ml_model_get_tensors_size (const nns_tensors_info_s *tensors_type);
+size_t ml_util_get_tensors_size (const nns_tensors_info_s *info);
/**
- * @brief Free the tensors type pointer
+ * @brief Free the tensors type pointer.
* @since_tizen 5.5
* @param[in] type the tensors type pointer to be freed.
*/
-void ml_model_free_tensorsinfo (nns_tensors_info_s *type);
+void ml_model_free_tensors_info (nns_tensors_info_s *type);
/**
- * @brief Free the tensors data pointer
+ * @brief Free the tensors data pointer.
* @since_tizen 5.5
* @param[in] tensor the tensors data pointer to be freed.
*/
-void ml_model_free_tensordata (tensor_data *tensor);
+void ml_model_free_tensor_data (tensor_data *tensor);
/**
* @brief Allocate a tensor data frame with the given tensors type.
* @since_tizen 5.5
- * @param[in] type the tensors type pointer for the allocation
+ * @param[in] info The tensors information for the allocation
* @return @c Tensors data pointer allocated. Null if error.
* @retval NULL there is an error. call get_last_result() to get specific
* error numbers.
*/
-tensor_data *ml_model_allocate_tensors (const nns_tensors_info_s *type);
+tensor_data *ml_model_allocate_tensor_data (const nns_tensors_info_s *info);
/**
* @brief Check the availability of the given execution environments.
NNS_TENSOR_TYPE_FLOAT32, /**< Float 32bit */
NNS_TENSOR_TYPE_INT64, /**< Integer 64bit */
NNS_TENSOR_TYPE_UINT64, /**< Unsigned integer 64bit */
+ ML_TENSOR_TYPE_UNKNOWN /**< Unknown type */
} nns_tensor_type_e;
/**
capi_main = []
capi_main += join_paths(meson.current_source_dir(), 'src', 'tizen-api-pipeline.c')
+capi_main += join_paths(meson.current_source_dir(), 'src', 'nnstreamer-single.c')
capi_devel_main = []
capi_devel_main += join_paths(meson.current_source_dir(), 'include', 'nnstreamer.h')
+capi_devel_main += join_paths(meson.current_source_dir(), 'include', 'nnstreamer-single.h')
inc = include_directories('include')
nninc = include_directories('../gst')
* @bug No known bugs except for NYI items
*/
+#include <gst/app/app.h>
+
#include <nnstreamer.h> /* Uses NNStreamer/Pipeline C-API */
+#include <nnstreamer-single.h>
+#include <tizen-api-private.h>
+#include <nnstreamer/nnstreamer_plugin_api.h>
typedef struct
{
nns_pipeline_h pipe;
+
+ GstElement *src;
+ GstElement *sink;
+ GstElement *filter;
} ml_simpleshot_model;
+
+/**
+ * @brief Check the given tensor info is valid.
+ * @todo move this function to common
+ */
+static int
+ml_util_validate_tensor_info (const nns_tensor_info_s * info)
+{
+ unsigned int i;
+
+ if (!info)
+ return FALSE;
+
+ if (info->type < 0 || info->type >= ML_TENSOR_TYPE_UNKNOWN)
+ return FALSE;
+
+ for (i = 0; i < NNS_TENSOR_RANK_LIMIT; i++) {
+ if (info->dimension[i] == 0)
+ return FALSE;
+ }
+
+ return TRUE;
+}
+
/**
- * ml_simpleshot_model *model = g_new0 (ml_simpleshot_model, 1);
- * ml_simpleshot_model_h *model_h;
- * *model_h = model;
+ * @brief Check the given tensors info is valid.
+ * @todo move this function to common
*/
+static int
+ml_util_validate_tensors_info (const nns_tensors_info_s * info)
+{
+ unsigned int i;
+
+ if (!info || info->num_tensors < 1)
+ return FALSE;
+
+ for (i = 0; i < info->num_tensors; i++) {
+ if (!ml_util_validate_tensor_info (&info->info[i]))
+ return FALSE;
+ }
+
+ return TRUE;
+}
/**
- * @brief Refer to nnstreamer-single.h
+ * @brief Get caps from tensors info.
+ */
+static GstCaps *
+ml_model_get_caps_from_tensors_info (const nns_tensors_info_s * info)
+{
+ GstCaps *caps;
+ GstTensorsConfig config;
+
+ if (!info)
+ return NULL;
+
+ /** @todo Make common structure for tensor config */
+ memcpy (&config.info, info, sizeof (GstTensorsInfo));
+
+ /* set framerate 0/1 */
+ config.rate_n = 0;
+ config.rate_d = 1;
+
+ /* Supposed input type is single tensor if the number of tensors is 1. */
+ if (config.info.num_tensors == 1) {
+ GstTensorConfig c;
+
+ gst_tensor_info_copy (&c.info, &config.info.info[0]);
+ c.rate_n = 0;
+ c.rate_d = 1;
+
+ caps = gst_tensor_caps_from_config (&c);
+ gst_tensor_info_free (&c.info);
+ } else {
+ caps = gst_tensors_caps_from_config (&config);
+ }
+
+ return caps;
+}
+
+/**
+ * @brief Open an ML model and return the model as a handle. (more info in nnstreamer-single.h)
*/
int
ml_model_open (const char *model_path, ml_simpleshot_model_h * model,
- const nns_tensors_info_s * inputtype, ml_model_nnfw nnfw, ml_model_hw hw)
+ const nns_tensors_info_s * input_type, const nns_tensors_info_s * output_type,
+ ml_model_nnfw nnfw, ml_model_hw hw)
{
- ml_simpleshot_model_h *_model;
+ ml_simpleshot_model *model_h;
+ nns_pipeline_h pipe;
+ nns_pipeline *pipe_h;
+ GstElement *appsrc, *appsink, *filter;
+ GstCaps *caps;
int ret = NNS_ERROR_NONE;
- char *pipedesc; /* pipeline description */
+ gchar *pipeline_desc = NULL;
+
+ /* Validate the params */
+ if (!model) {
+ dloge ("The given param, model is invalid.");
+ return NNS_ERROR_INVALID_PARAMETER;
+ }
+
+ /* init null */
+ *model = NULL;
+
+ if (!g_file_test (model_path, G_FILE_TEST_IS_REGULAR)) {
+ dloge ("The given param, model path [%s] is invalid.",
+ GST_STR_NULL (model_path));
+ return NNS_ERROR_INVALID_PARAMETER;
+ }
+
+ if (input_type && !ml_util_validate_tensors_info (input_type)) {
+ dloge ("The given param, input tensor info is invalid.");
+ return NNS_ERROR_INVALID_PARAMETER;
+ }
+
+ if (output_type && !ml_util_validate_tensors_info (output_type)) {
+ dloge ("The given param, output tensor info is invalid.");
+ return NNS_ERROR_INVALID_PARAMETER;
+ }
/* 1. Determine nnfw */
+ /** @todo Check nnfw with file extention. */
+ switch (nnfw) {
+ case ML_NNFW_CUSTOM_FILTER:
+ pipeline_desc =
+ g_strdup_printf
+ ("appsrc name=srcx ! tensor_filter name=filterx framework=custom model=%s ! appsink name=sinkx async=false sync=false",
+ model_path);
+ break;
+ case ML_NNFW_TENSORFLOW_LITE:
+ if (!g_str_has_suffix (model_path, ".tflite")) {
+ dloge ("The given model file [%s] has invalid extension.", model_path);
+ return NNS_ERROR_INVALID_PARAMETER;
+ }
+
+ pipeline_desc =
+ g_strdup_printf
+ ("appsrc name=srcx ! tensor_filter name=filterx framework=tensorflow-lite model=%s ! appsink name=sinkx async=false sync=false",
+ model_path);
+ break;
+ default:
+ /** @todo Add other fw later. */
+ dloge ("The given nnfw is not supported.");
+ return NNS_ERROR_NOT_SUPPORTED;
+ }
/* 2. Determine hw */
+ /** @todo Now the param hw is ignored. (Supposed CPU only) Support others later. */
+
+ /* 3. Construct a pipeline */
+ ret = nns_pipeline_construct (pipeline_desc, &pipe);
+ g_free (pipeline_desc);
+ if (ret != NNS_ERROR_NONE) {
+ /* Failed to construct pipeline. */
+ return ret;
+ }
+
+ /* 4. Allocate */
+ pipe_h = (nns_pipeline *) pipe;
+ appsrc = gst_bin_get_by_name (GST_BIN (pipe_h->element), "srcx");
+ appsink = gst_bin_get_by_name (GST_BIN (pipe_h->element), "sinkx");
+ filter = gst_bin_get_by_name (GST_BIN (pipe_h->element), "filterx");
+
+ model_h = g_new0 (ml_simpleshot_model, 1);
+ *model = model_h;
+
+ model_h->pipe = pipe;
+ model_h->src = appsrc;
+ model_h->sink = appsink;
+ model_h->filter = filter;
+
+ /* 5. Set in/out caps */
+ if (input_type) {
+ caps = ml_model_get_caps_from_tensors_info (input_type);
+ } else {
+ nns_tensors_info_s in_info;
+
+ ml_model_get_input_type (model_h, &in_info);
+ if (!ml_util_validate_tensors_info (&in_info)) {
+ dloge ("Failed to get the input tensor info.");
+ goto error;
+ }
+
+ caps = ml_model_get_caps_from_tensors_info (&in_info);
+ }
+
+ gst_app_src_set_caps (GST_APP_SRC (appsrc), caps);
+ gst_caps_unref (caps);
+
+ if (output_type) {
+ caps = ml_model_get_caps_from_tensors_info (output_type);
+ } else {
+ nns_tensors_info_s out_info;
+
+ ml_model_get_output_type (model_h, &out_info);
+ if (!ml_util_validate_tensors_info (&out_info)) {
+ dloge ("Failed to get the output tensor info.");
+ goto error;
+ }
+
+ caps = ml_model_get_caps_from_tensors_info (&out_info);
+ }
+
+ gst_app_sink_set_caps (GST_APP_SINK (appsink), caps);
+ gst_caps_unref (caps);
+
+ /* 5. Start pipeline */
+ ret = nns_pipeline_start (pipe);
+ if (ret != NNS_ERROR_NONE) {
+ /* Failed to construct pipeline. */
+ goto error;
+ }
+
+ return NNS_ERROR_NONE;
+
+error:
+ ml_model_close (pipe);
+ return ret;
+}
+
+/**
+ * @brief Close the opened model handle. (more info in nnstreamer-single.h)
+ */
+int
+ml_model_close (ml_simpleshot_model_h model)
+{
+ ml_simpleshot_model *model_h;
+ int ret;
+
+ if (!model) {
+ dloge ("The given param, model is invalid.");
+ return NNS_ERROR_INVALID_PARAMETER;
+ }
+
+ model_h = (ml_simpleshot_model *) model;
+
+ if (model_h->src) {
+ gst_object_unref (model_h->src);
+ model_h->src = NULL;
+ }
+
+ if (model_h->sink) {
+ gst_object_unref (model_h->sink);
+ model_h->sink = NULL;
+ }
+
+ if (model_h->filter) {
+ gst_object_unref (model_h->filter);
+ model_h->filter = NULL;
+ }
+
+ ret = nns_pipeline_destroy (model_h->pipe);
+ g_free (model_h);
+ return ret;
+}
+
+/**
+ * @brief Invoke the model with the given input data. (more info in nnstreamer-single.h)
+ */
+tensor_data *
+ml_model_inference (ml_simpleshot_model_h model,
+ const tensor_data * input, tensor_data * output)
+{
+ ml_simpleshot_model *model_h;
+ nns_tensors_info_s out_info;
+ tensor_data *result;
+ GstSample *sample;
+ GstBuffer *buffer;
+ GstMemory *mem;
+ GstMapInfo mem_info;
+ GstFlowReturn ret;
+ int i, status;
+
+ if (!model || !input) {
+ dloge ("The given param is invalid.");
+ return NULL;
+ }
+
+ model_h = (ml_simpleshot_model *) model;
+
+ status = ml_model_get_output_type (model, &out_info);
+ if (status != NNS_ERROR_NONE)
+ return NULL;
+
+ /* Validate output memory and size */
+ if (output) {
+ if (output->num_tensors != out_info.num_tensors) {
+ dloge ("Invalid output data, the number of output is different.");
+ return NULL;
+ }
+
+ for (i = 0; i < output->num_tensors; i++) {
+ if (!output->tensor[i] ||
+ output->size[i] !=
+ ml_util_get_tensor_size (&out_info.info[i])) {
+ dloge ("Invalid output data, the size of output is different.");
+ return NULL;
+ }
+ }
+ }
+
+ buffer = gst_buffer_new ();
+
+ for (i = 0; i < input->num_tensors; i++) {
+ mem = gst_memory_new_wrapped (GST_MEMORY_FLAG_READONLY,
+ input->tensor[i], input->size[i], 0, input->size[i], NULL, NULL);
+ gst_buffer_append_memory (buffer, mem);
+ }
+
+ ret = gst_app_src_push_buffer (GST_APP_SRC (model_h->src), buffer);
+ if (ret != GST_FLOW_OK) {
+ dloge ("Cannot push a buffer into source element.");
+ return NULL;
+ }
+
+ /* Try to get the result */
+ sample =
+ gst_app_sink_try_pull_sample (GST_APP_SINK (model_h->sink), GST_SECOND);
+ if (!sample) {
+ dloge ("Failed to get the result from sink element.");
+ return NULL;
+ }
+
+ if (output) {
+ result = output;
+ } else {
+ result = ml_model_allocate_tensor_data (&out_info);
+ }
- /* 3. Determine input dimension ==> caps_filter string */
+ if (!result) {
+ dloge ("Failed to allocate the memory block.");
+ return NULL;
+ }
- /* 4. Construct a pipeline */
- _model = g_new (ml_simpleshot_model, 1);
- ret = nns_pipeline_construct (pipedesc, &_model->pipe);
+ /* Copy the result */
+ buffer = gst_sample_get_buffer (sample);
+ for (i = 0; i < result->num_tensors; i++) {
+ mem = gst_buffer_peek_memory (buffer, i);
+ gst_memory_map (mem, &mem_info, GST_MAP_READ);
- /* 5. Allocate */
- *model = _model;
+ memcpy (result->tensor[i], mem_info.data, mem_info.size);
+
+ gst_memory_unmap (mem, &mem_info);
+ }
+
+ gst_sample_unref (sample);
+ return result;
+}
+
+/**
+ * @brief Get type (tensor dimension, type, name and so on) of required input data for the given model. (more info in nnstreamer-single.h)
+ */
+int
+ml_model_get_input_type (ml_simpleshot_model_h model,
+ nns_tensors_info_s * input_type)
+{
+ ml_simpleshot_model *model_h;
+ GstTensorsInfo info;
+ gchar *val;
+ guint rank;
+
+ if (!model || !input_type)
+ return NNS_ERROR_INVALID_PARAMETER;
+
+ model_h = (ml_simpleshot_model *) model;
+
+ gst_tensors_info_init (&info);
+
+ g_object_get (model_h->filter, "input", &val, NULL);
+ rank = gst_tensors_info_parse_dimensions_string (&info, val);
+ g_free (val);
+
+ /* set the number of tensors */
+ info.num_tensors = rank;
+
+ g_object_get (model_h->filter, "inputtype", &val, NULL);
+ rank = gst_tensors_info_parse_types_string (&info, val);
+ g_free (val);
+
+ if (info.num_tensors != rank) {
+ dlogw ("Invalid state, input tensor type is mismatched in filter.");
+ }
+
+ g_object_get (model_h->filter, "inputname", &val, NULL);
+ rank = gst_tensors_info_parse_names_string (&info, val);
+ g_free (val);
+
+ if (info.num_tensors != rank) {
+ dlogw ("Invalid state, input tensor name is mismatched in filter.");
+ }
+ /** @todo Make common structure for tensor config */
+ memcpy (input_type, &info, sizeof (GstTensorsInfo));
+ return NNS_ERROR_NONE;
+}
+
+/**
+ * @brief Get type (tensor dimension, type, name and so on) of output data of the given model. (more info in nnstreamer-single.h)
+ */
+int
+ml_model_get_output_type (ml_simpleshot_model_h model,
+ nns_tensors_info_s * output_type)
+{
+ ml_simpleshot_model *model_h;
+ GstTensorsInfo info;
+ gchar *val;
+ guint rank;
+
+ if (!model || !output_type)
+ return NNS_ERROR_INVALID_PARAMETER;
+
+ model_h = (ml_simpleshot_model *) model;
+
+ gst_tensors_info_init (&info);
+
+ g_object_get (model_h->filter, "output", &val, NULL);
+ rank = gst_tensors_info_parse_dimensions_string (&info, val);
+ g_free (val);
+
+ /* set the number of tensors */
+ info.num_tensors = rank;
+
+ g_object_get (model_h->filter, "outputtype", &val, NULL);
+ rank = gst_tensors_info_parse_types_string (&info, val);
+ g_free (val);
+
+ if (info.num_tensors != rank) {
+ dlogw ("Invalid state, output tensor type is mismatched in filter.");
+ }
+
+ g_object_get (model_h->filter, "outputname", &val, NULL);
+ gst_tensors_info_parse_names_string (&info, val);
+ g_free (val);
+
+ if (info.num_tensors != rank) {
+ dlogw ("Invalid state, output tensor name is mismatched in filter.");
+ }
+ /** @todo Make common structure for tensor config */
+ memcpy (output_type, &info, sizeof (GstTensorsInfo));
+ return NNS_ERROR_NONE;
+}
+
+/**
+ * @brief Get the byte size of the given tensor type. (more info in nnstreamer-single.h)
+ */
+size_t
+ml_util_get_tensor_size (const nns_tensor_info_s * info)
+{
+ size_t tensor_size;
+ gint i;
+
+ if (!info) {
+ dloge ("The given param tensor info is invalid.");
+ return 0;
+ }
+
+ switch (info->type) {
+ case NNS_TENSOR_TYPE_INT8:
+ case NNS_TENSOR_TYPE_UINT8:
+ tensor_size = 1;
+ break;
+ case NNS_TENSOR_TYPE_INT16:
+ case NNS_TENSOR_TYPE_UINT16:
+ tensor_size = 2;
+ break;
+ case NNS_TENSOR_TYPE_INT32:
+ case NNS_TENSOR_TYPE_UINT32:
+ case NNS_TENSOR_TYPE_FLOAT32:
+ tensor_size = 4;
+ break;
+ case NNS_TENSOR_TYPE_FLOAT64:
+ case NNS_TENSOR_TYPE_INT64:
+ case NNS_TENSOR_TYPE_UINT64:
+ tensor_size = 8;
+ break;
+ default:
+ dloge ("The given param tensor_type is invalid.");
+ return 0;
+ }
+
+ for (i = 0; i < NNS_TENSOR_RANK_LIMIT; i++) {
+ tensor_size *= info->dimension[i];
+ }
+
+ return tensor_size;
+}
+
+/**
+ * @brief Get the byte size of the given tensors info. (more info in nnstreamer-single.h)
+ */
+size_t
+ml_util_get_tensors_size (const nns_tensors_info_s * info)
+{
+ size_t tensor_size;
+ gint i;
+
+ tensor_size = 0;
+ for (i = 0; i < info->num_tensors; i++) {
+ tensor_size += ml_util_get_tensor_size (&info->info[i]);
+ }
+
+ return tensor_size;
+}
+
+/**
+ * @brief Free the tensors type pointer. (more info in nnstreamer-single.h)
+ */
+void
+ml_model_free_tensors_info (nns_tensors_info_s * type)
+{
+ /** @todo Make common structure for tensor config and use gst_tensors_info_free () */
+}
+
+/**
+ * @brief Free the tensors data pointer. (more info in nnstreamer-single.h)
+ */
+void
+ml_model_free_tensor_data (tensor_data * tensor)
+{
+ gint i;
+
+ if (!tensor) {
+ dloge ("The given param tensor is invalid.");
+ return;
+ }
+
+ for (i = 0; i < tensor->num_tensors; i++) {
+ if (tensor->tensor[i]) {
+ g_free (tensor->tensor[i]);
+ tensor->tensor[i] = NULL;
+ }
+
+ tensor->size[i] = 0;
+ }
+
+ tensor->num_tensors = 0;
+}
+
+/**
+ * @brief Allocate a tensor data frame with the given tensors type. (more info in nnstreamer-single.h)
+ */
+tensor_data *
+ml_model_allocate_tensor_data (const nns_tensors_info_s * info)
+{
+ tensor_data *data;
+ gint i;
+
+ if (!info) {
+ dloge ("The given param type is invalid.");
+ return NULL;
+ }
+
+ data = g_new0 (tensor_data, 1);
+ if (!data) {
+ dloge ("Failed to allocate the memory block.");
+ return NULL;
+ }
+
+ data->num_tensors = info->num_tensors;
+ for (i = 0; i < data->num_tensors; i++) {
+ data->size[i] = ml_util_get_tensor_size (&info->info[i]);
+ data->tensor[i] = g_malloc0 (data->size[i]);
+ }
+
+ return data;
+}
+
+/**
+ * @brief Check the availability of the given execution environments. (more info in nnstreamer-single.h)
+ */
+int
+ml_model_check_nnfw (ml_model_nnfw nnfw, ml_model_hw hw)
+{
+ /** @todo fill this function */
+ return 0;
}
g_mutex_unlock (&p->lock);
g_mutex_clear (&p->lock);
+
+ g_free (p);
return NNS_ERROR_NONE;
}