This patch adds input and output ranks to ml_single.
Input and output ranks are used when user explicitly sets rank property.
Signed-off-by: Yelin Jeong <yelini.jeong@samsung.com>
}
/**
+ * @brief Initializes the tensors information with default value.
+ */
+int
+_ml_tensors_rank_initialize (guint * rank)
+{
+ guint i;
+
+ if (!rank)
+ _ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
+ "The parameter, rank, is NULL. Provide a valid pointer.");
+
+ for (i = 0; i < ML_TENSOR_SIZE_LIMIT; i++) {
+ rank[i] = 0;
+ }
+
+ return ML_ERROR_NONE;
+}
+
+/**
* @brief Compares the given tensor info.
*/
static gboolean
ml_tensors_data_s in_tensors; /**< input tensor wrapper for processing */
ml_tensors_data_s out_tensors; /**< output tensor wrapper for processing */
+ /** @todo Use only ml_tensor_info_s dimension instead of saving ranks value */
+ guint input_ranks[ML_TENSOR_SIZE_LIMIT]; /**< the rank list of input tensors, it is calculated based on the dimension string. */
+ guint output_ranks[ML_TENSOR_SIZE_LIMIT]; /**< the rank list of output tensors, it is calculated based on the dimension string. */
+
GList *destroy_data_list; /**< data to be freed by filter */
} ml_single;
_ml_tensors_info_initialize (&single_h->in_info);
_ml_tensors_info_initialize (&single_h->out_info);
+ _ml_tensors_rank_initialize (single_h->input_ranks);
+ _ml_tensors_rank_initialize (single_h->output_ranks);
g_mutex_init (&single_h->mutex);
g_cond_init (&single_h->cond);
num = gst_tensors_info_parse_types_string (&gst_info, value);
else if (g_str_has_suffix (name, "name"))
num = gst_tensors_info_parse_names_string (&gst_info, value);
- else
- num = gst_tensors_info_parse_dimensions_string (&gst_info, value);
+ else {
+ guint *rank;
+ gchar **str_dims;
+ guint i;
+
+ if (is_input) {
+ rank = single_h->input_ranks;
+ } else {
+ rank = single_h->output_ranks;
+ }
+
+ str_dims = g_strsplit_set (value, ",.", -1);
+ num = g_strv_length (str_dims);
+
+ if (num > ML_TENSOR_SIZE_LIMIT) {
+ _ml_error_report ("Invalid param, dimensions (%d) max (%d)\n",
+ num, ML_TENSOR_SIZE_LIMIT);
+
+ num = ML_TENSOR_SIZE_LIMIT;
+ }
+
+ for (i = 0; i < num; ++i) {
+ rank[i] = gst_tensor_parse_dimension (str_dims[i],
+ gst_info.info[i].dimension);
+ }
+ g_strfreev (str_dims);
+ }
if (num == gst_info.num_tensors) {
ml_tensors_info_h ml_info;
ML_SINGLE_GET_VALID_HANDLE_LOCKED (single_h, single, 0);
- if (g_str_equal (name, "input") || g_str_equal (name, "inputtype") ||
- g_str_equal (name, "inputname") || g_str_equal (name, "inputlayout") ||
- g_str_equal (name, "output") || g_str_equal (name, "outputtype") ||
+ if (g_str_equal (name, "inputtype") || g_str_equal (name, "inputname") ||
+ g_str_equal (name, "inputlayout") || g_str_equal (name, "outputtype") ||
g_str_equal (name, "outputname") || g_str_equal (name, "outputlayout") ||
g_str_equal (name, "accelerator") || g_str_equal (name, "custom")) {
/* string */
/* boolean */
g_object_get (G_OBJECT (single_h->filter), name, &bool_value, NULL);
*value = (bool_value) ? g_strdup ("true") : g_strdup ("false");
+ } else if (g_str_equal (name, "input") || g_str_equal (name, "output")) {
+ gchar *dim_str = NULL;
+ const guint *rank;
+ gboolean is_input = g_str_has_prefix (name, "input");
+ GstTensorsInfo gst_info;
+
+ if (is_input) {
+ rank = single_h->input_ranks;
+ } else {
+ rank = single_h->output_ranks;
+ }
+
+ ml_single_get_gst_info (single_h, is_input, &gst_info);
+
+ if (gst_info.num_tensors > 0) {
+ guint i;
+ GString *dimensions = g_string_new (NULL);
+
+ for (i = 0; i < gst_info.num_tensors; ++i) {
+ dim_str =
+ gst_tensor_get_rank_dimension_string (gst_info.info[i].dimension,
+ *(rank + i));
+ g_string_append (dimensions, dim_str);
+
+ if (i < gst_info.num_tensors - 1) {
+ g_string_append (dimensions, ",");
+ }
+ g_free (dim_str);
+ }
+ dim_str = g_string_free (dimensions, FALSE);
+ } else {
+ dim_str = g_strdup ("");
+ }
+ *value = dim_str;
} else {
_ml_error_report
("The property key, '%s', is not available for get_property and not recognized by the API. It should be one of {input, inputtype, inputname, inputlayout, output, outputtype, outputname, outputlayout, accelerator, custom, is-updatable}.",
int _ml_tensors_info_initialize (ml_tensors_info_s *info);
/**
+ * @brief Initializes the rank information with default value.
+ * @since_tizen 7.5
+ * @param[in] info The rank array pointer to be initialized.
+ * @return @c 0 on success. Otherwise a negative error value.
+ * @retval #ML_ERROR_NONE Successful
+ * @retval #ML_ERROR_INVALID_PARAMETER Given parameter is invalid.
+ */
+int _ml_tensors_rank_initialize (guint *rank);
+
+/**
* @brief Frees and initialize the data in tensors info.
* @since_tizen 5.5
* @param[in] info The tensors info pointer to be freed.
g_free (test_model);
}
+/**
+ * @brief Test for input_ranks and output_ranks property of the ml_single
+ * @details Given dimension string, check its value.
+ */
+TEST (nnstreamer_capi_singleshot, property_05_p)
+{
+ ml_single_h single;
+ char *prop_value;
+ int status;
+
+ const gchar *root_path = g_getenv ("MLAPI_SOURCE_ROOT_PATH");
+ gchar *test_model;
+
+ /* supposed to run test in build directory */
+ if (root_path == NULL)
+ root_path = "..";
+
+ /** add.tflite adds value 2 to all the values in the input */
+ test_model = g_build_filename (
+ root_path, "tests", "test_models", "models", "add.tflite", NULL);
+ ASSERT_TRUE (g_file_test (test_model, G_FILE_TEST_EXISTS));
+
+ status = ml_single_open (&single, test_model, NULL, NULL,
+ ML_NNFW_TYPE_TENSORFLOW_LITE, ML_NNFW_HW_ANY);
+ if (is_enabled_tensorflow_lite) {
+ EXPECT_EQ (status, ML_ERROR_NONE);
+ } else {
+ EXPECT_NE (status, ML_ERROR_NONE);
+ goto skip_test;
+ }
+
+ status = ml_single_set_property (single, "input", "5:1:1:1");
+ EXPECT_EQ (status, ML_ERROR_NONE);
+
+ status = ml_single_get_property (single, "input", &prop_value);
+ EXPECT_EQ (status, ML_ERROR_NONE);
+
+ EXPECT_STREQ (prop_value, "5:1:1:1");
+ g_free (prop_value);
+
+ status = ml_single_set_property (single, "input", "5:1:1");
+ EXPECT_EQ (status, ML_ERROR_NONE);
+
+ status = ml_single_get_property (single, "input", &prop_value);
+ EXPECT_EQ (status, ML_ERROR_NONE);
+
+ EXPECT_STREQ (prop_value, "5:1:1");
+ g_free (prop_value);
+
+ status = ml_single_set_property (single, "input", "5:1");
+ EXPECT_EQ (status, ML_ERROR_NONE);
+
+ status = ml_single_get_property (single, "input", &prop_value);
+ EXPECT_EQ (status, ML_ERROR_NONE);
+
+ EXPECT_STREQ (prop_value, "5:1");
+ g_free (prop_value);
+
+ status = ml_single_set_property (single, "input", "5");
+ EXPECT_EQ (status, ML_ERROR_NONE);
+
+ status = ml_single_get_property (single, "input", &prop_value);
+ EXPECT_EQ (status, ML_ERROR_NONE);
+
+ EXPECT_STREQ (prop_value, "5");
+ g_free (prop_value);
+
+ status = ml_single_set_property (single, "output", "5:1:1:1");
+ EXPECT_EQ (status, ML_ERROR_NONE);
+
+ status = ml_single_get_property (single, "output", &prop_value);
+ EXPECT_EQ (status, ML_ERROR_NONE);
+
+ EXPECT_STREQ (prop_value, "5:1:1:1");
+ g_free (prop_value);
+
+ status = ml_single_set_property (single, "output", "5:1:1");
+ EXPECT_EQ (status, ML_ERROR_NONE);
+
+ status = ml_single_get_property (single, "output", &prop_value);
+ EXPECT_EQ (status, ML_ERROR_NONE);
+
+ EXPECT_STREQ (prop_value, "5:1:1");
+ g_free (prop_value);
+
+ status = ml_single_set_property (single, "output", "5:1");
+ EXPECT_EQ (status, ML_ERROR_NONE);
+
+ status = ml_single_get_property (single, "output", &prop_value);
+ EXPECT_EQ (status, ML_ERROR_NONE);
+
+ EXPECT_STREQ (prop_value, "5:1");
+ g_free (prop_value);
+
+ status = ml_single_set_property (single, "output", "5");
+ EXPECT_EQ (status, ML_ERROR_NONE);
+
+ status = ml_single_get_property (single, "output", &prop_value);
+ EXPECT_EQ (status, ML_ERROR_NONE);
+
+ EXPECT_STREQ (prop_value, "5");
+ g_free (prop_value);
+
+skip_test:
+ g_free (test_model);
+}
+
#ifdef ENABLE_NNFW_RUNTIME
/**
* @brief Test NNStreamer single shot (nnfw backend)