int ml_tizen_convert_element (ml_pipeline_h pipe, gchar ** result);
#endif
+/**
+ * @brief Creates a tensor data frame wihout buffer with the given tensors information.
+ * @param[in] info The handle of tensors information for the allocation.
+ * @param[out] data The handle of tensors data.
+ * @return @c 0 on success. Otherwise a negative error value.
+ */
+int ml_tensors_data_create_no_alloc (const ml_tensors_info_h info, ml_tensors_data_h *data);
+
#ifdef __cplusplus
}
#endif /* __cplusplus */
)
# New single-shot c-api
-
capi_single_new_main = []
+capi_single_new_main += join_paths(meson.current_source_dir(), 'src', 'nnstreamer-capi-pipeline.c')
capi_single_new_main += join_paths(meson.current_source_dir(), 'src', 'nnstreamer-capi-util.c')
capi_single_new_main += join_paths(meson.current_source_dir(), 'src', 'nnstreamer-capi-single-new.c')
capi_single_new_main += join_paths(meson.current_source_dir(), 'src', 'tensor_filter_single.c')
* @author MyungJoo Ham <myungjoo.ham@samsung.com>
* @author Parichay Kapoor <pk.kapoor@samsung.com>
* @bug No known bugs except for NYI items
+ * @todo Complete the support for timeout
*/
#include <string.h>
#include "tensor_filter_single.h"
+/**
+ * @brief Default time to wait for an output in appsink (3 seconds).
+ */
+#define SINGLE_DEFAULT_TIMEOUT 3000
+
/* ML single api data structure for handle */
typedef struct
{
str_type = gst_tensors_info_get_types_string (&info);
str_name = gst_tensors_info_get_names_string (&info);
- str_type_name = g_strdup_printf("%s%s", prefix, "type");
- str_name_name = g_strdup_printf("%s%s", prefix, "name");
+ str_type_name = g_strdup_printf ("%s%s", prefix, "type");
+ str_name_name = g_strdup_printf ("%s%s", prefix, "name");
if (!str_dim || !str_type || !str_name || !str_type_name || !str_name_name) {
status = ML_ERROR_INVALID_PARAMETER;
* @brief Check the availability of the nnfw type and model
*/
static int
-ml_single_check_nnfw (const char *model, ml_nnfw_type_e *nnfw)
+ml_single_check_nnfw (const char *model, ml_nnfw_type_e * nnfw)
{
gchar *path_down;
int status = ML_ERROR_NONE;
*/
switch (nnfw) {
case ML_NNFW_TYPE_CUSTOM_FILTER:
- g_object_set (filter_obj, "framework", "custom",
- "model", model, NULL);
+ g_object_set (filter_obj, "framework", "custom", "model", model, NULL);
break;
case ML_NNFW_TYPE_TENSORFLOW_LITE:
/* We can get the tensor meta from tf-lite model. */
ml_tensors_info_h in_info;
status = ML_ERROR_INVALID_PARAMETER;
- if (!klass->input_configured(single_h->filter))
+ if (!klass->input_configured (single_h->filter))
goto error;
status = ml_single_get_input_info (single_h, &in_info);
ml_tensors_info_h out_info;
status = ML_ERROR_INVALID_PARAMETER;
- if (!klass->output_configured(single_h->filter))
+ if (!klass->output_configured (single_h->filter))
goto error;
status = ml_single_get_output_info (single_h, &out_info);
ml_single_invoke (ml_single_h single,
const ml_tensors_data_h input, ml_tensors_data_h * output)
{
- /**
- * @todo:
- * Setup input and output buffer
- * Output buffer
- * Do invoke
- * return result
- */
+ ml_single *single_h;
+ ml_tensors_data_s *in_data, *result;
+ GstTensorMemory in_tensors[NNS_TENSOR_SIZE_LIMIT];
+ GstTensorMemory out_tensors[NNS_TENSOR_SIZE_LIMIT];
+ GTensorFilterSingleClass *klass;
+ int i, status = ML_ERROR_NONE;
+
+ check_feature_state ();
+
+ if (!single || !input || !output) {
+ ml_loge ("The given param is invalid.");
+ return ML_ERROR_INVALID_PARAMETER;
+ }
+
+ single_h = (ml_single *) single;
+ in_data = (ml_tensors_data_s *) input;
+ *output = NULL;
+
+ if (!single_h->filter) {
+ ml_loge ("The given param is invalid, model is missing.");
+ return ML_ERROR_INVALID_PARAMETER;
+ }
+
+ /* Validate input data */
+ if (in_data->num_tensors != single_h->in_info.num_tensors) {
+ ml_loge ("The given param input is invalid, \
+ different number of memory blocks.");
+ return ML_ERROR_INVALID_PARAMETER;
+ }
+
+ for (i = 0; i < in_data->num_tensors; i++) {
+ size_t raw_size = ml_tensor_info_get_size (&single_h->in_info.info[i]);
+
+ if (!in_data->tensors[i].tensor || in_data->tensors[i].size != raw_size) {
+ ml_loge ("The given param input is invalid, \
+ different size of memory block.");
+ return ML_ERROR_INVALID_PARAMETER;
+ }
+ }
+
+ /** Setup input buffer */
+ for (i = 0; i < in_data->num_tensors; i++) {
+ in_tensors[i].data = in_data->tensors[i].tensor;
+ in_tensors[i].size = in_data->tensors[i].size;
+ in_tensors[i].type = single_h->in_info.info[i].type;
+ }
+
+ /** Setup output buffer */
+ for (i = 0; i < single_h->out_info.num_tensors; i++) {
+ /** memory will be allocated by tensor_filter_single */
+ out_tensors[i].data = NULL;
+ out_tensors[i].size = ml_tensor_info_get_size (&single_h->out_info.info[i]);
+ out_tensors[i].type = single_h->out_info.info[i].type;
+ }
+
+ klass = g_type_class_peek (G_TYPE_TENSOR_FILTER_SINGLE);
+ if (!klass)
+ return ML_ERROR_PERMISSION_DENIED;
+
+ /** TODO: create a new thread, which will invoke and wait with a timeout */
+ if (klass->invoke (single_h->filter, in_tensors, out_tensors) == FALSE)
+ return ML_ERROR_INVALID_PARAMETER;
+
+ /* Allocate output buffer */
+ status = ml_tensors_data_create_no_alloc (&single_h->out_info, output);
+ if (status != ML_ERROR_NONE) {
+ ml_loge ("Failed to allocate the memory block.");
+ *output = NULL;
+ return status;
+ }
+
+ result = (ml_tensors_data_s *) (*output);
+
+ /* set the result */
+ for (i = 0; i < single_h->out_info.num_tensors; i++) {
+ result->tensors[i].tensor = out_tensors[i].data;
+ }
return ML_ERROR_NONE;
}
gst_tensors_info_free (&gst_info);
return ML_ERROR_NONE;
}
+
+/**
+ * @brief Sets the maximum amount of time to wait for an output, in milliseconds.
+ */
+int
+ml_single_set_timeout (ml_single_h single, unsigned int timeout)
+{
+ return ML_ERROR_NOT_SUPPORTED;
+}
/**
* @brief Allocates a tensor data frame with the given tensors info. (more info in nnstreamer.h)
+ * @note Memory for data buffer is not allocated.
*/
int
-ml_tensors_data_create (const ml_tensors_info_h info,
+ml_tensors_data_create_no_alloc (const ml_tensors_info_h info,
ml_tensors_data_h * data)
{
ml_tensors_data_s *_data;
_data->num_tensors = tensors_info->num_tensors;
for (i = 0; i < _data->num_tensors; i++) {
_data->tensors[i].size = ml_tensor_info_get_size (&tensors_info->info[i]);
+ _data->tensors[i].tensor = NULL;
+ }
+
+ *data = _data;
+ return ML_ERROR_NONE;
+}
+
+/**
+ * @brief Allocates a tensor data frame with the given tensors info. (more info in nnstreamer.h)
+ */
+int
+ml_tensors_data_create (const ml_tensors_info_h info,
+ ml_tensors_data_h * data)
+{
+ gint status;
+ ml_tensors_data_s *_data = NULL;
+ gint i;
+
+ status = ml_tensors_data_create_no_alloc (info, (ml_tensors_data_h *) &_data);
+
+ if (status != ML_ERROR_NONE)
+ return status;
+
+ for (i = 0; i < _data->num_tensors; i++) {
_data->tensors[i].tensor = g_malloc0 (_data->tensors[i].size);
if (_data->tensors[i].tensor == NULL)
goto failed;
g_tensor_filter_single_set_property (GObject * object, guint prop_id,
const GValue * value, GParamSpec * pspec)
{
- /** TODO: share this with tensor_filter*/
+ GTensorFilterSingle *self;
+ GstTensorFilterProperties *prop;
+
+ self = G_TENSOR_FILTER_SINGLE (object);
+ prop = &self->prop;
+
+ g_debug ("Setting property for prop %d.\n", prop_id);
+
+ switch (prop_id) {
+ case PROP_SILENT:
+ self->silent = g_value_get_boolean (value);
+ g_debug ("Debug mode = %d", self->silent);
+ break;
+ case PROP_FRAMEWORK:
+ {
+ const gchar *fw_name = g_value_get_string (value);
+ const GstTensorFilterFramework *fw;
+
+ if (self->fw != NULL) {
+ /* close old framework */
+ g_tensor_filter_single_stop (self);
+ }
+
+ g_debug ("Framework = %s\n", fw_name);
+
+ fw = nnstreamer_filter_find (fw_name);
+
+ /* See if mandatory methods are filled in */
+ if (nnstreamer_filter_validate (fw)) {
+ self->fw = fw;
+ prop->fwname = g_strdup (fw_name);
+ } else {
+ g_warning ("Cannot identify the given neural network framework, %s\n",
+ fw_name);
+ }
+ break;
+ }
+ case PROP_MODEL:
+ {
+ const gchar *model_files = g_value_get_string (value);
+ guint model_num;
+
+ if (prop->model_file) {
+ g_tensor_filter_single_stop (self);
+ g_free_const (prop->model_file);
+ prop->model_file = NULL;
+ }
+
+ if (prop->model_file_sub) {
+ g_tensor_filter_single_stop (self);
+ g_free_const (prop->model_file_sub);
+ prop->model_file_sub = NULL;
+ }
+
+ /* Once configures, it cannot be changed in runtime */
+ g_assert (model_files);
+ model_num = gst_tensor_filter_parse_modelpaths_string (prop, model_files);
+ if (model_num == 1) {
+ g_debug ("Model = %s\n", prop->model_file);
+ if (!g_file_test (prop->model_file, G_FILE_TEST_IS_REGULAR))
+ g_critical ("Cannot find the model file: %s\n", prop->model_file);
+ } else if (model_num == 2) {
+ g_debug ("Init Model = %s\n", prop->model_file_sub);
+ g_debug ("Pred Model = %s\n", prop->model_file);
+ if (!g_file_test (prop->model_file_sub, G_FILE_TEST_IS_REGULAR))
+ g_critical ("Cannot find the init model file: %s\n",
+ prop->model_file_sub);
+ if (!g_file_test (prop->model_file, G_FILE_TEST_IS_REGULAR))
+ g_critical ("Cannot find the pred model file: %s\n",
+ prop->model_file);
+ } else if (model_num > 2) {
+ /** @todo if the new NN framework requires more than 2 model files, this area will be implemented */
+ g_critical
+ ("There is no NN framework that requires model files more than 2. Current Input model files are :%d\n",
+ model_num);
+ } else {
+ g_critical ("Set model file path first\n");
+ }
+ break;
+ }
+ case PROP_INPUT:
+ g_assert (!prop->input_configured && value);
+ /* Once configures, it cannot be changed in runtime */
+ {
+ guint num_dims;
+
+ num_dims = gst_tensors_info_parse_dimensions_string (&prop->input_meta,
+ g_value_get_string (value));
+
+ if (prop->input_meta.num_tensors > 0 &&
+ prop->input_meta.num_tensors != num_dims) {
+ g_warning
+ ("Invalid input-dim, given param does not match with old value.");
+ }
+
+ prop->input_meta.num_tensors = num_dims;
+ }
+ break;
+ case PROP_OUTPUT:
+ g_assert (!prop->output_configured && value);
+ /* Once configures, it cannot be changed in runtime */
+ {
+ guint num_dims;
+
+ num_dims = gst_tensors_info_parse_dimensions_string (&prop->output_meta,
+ g_value_get_string (value));
+
+ if (prop->output_meta.num_tensors > 0 &&
+ prop->output_meta.num_tensors != num_dims) {
+ g_warning
+ ("Invalid output-dim, given param does not match with old value.");
+ }
+
+ prop->output_meta.num_tensors = num_dims;
+ }
+ break;
+ case PROP_INPUTTYPE:
+ g_assert (!prop->input_configured && value);
+ /* Once configures, it cannot be changed in runtime */
+ {
+ guint num_types;
+
+ num_types = gst_tensors_info_parse_types_string (&prop->input_meta,
+ g_value_get_string (value));
+
+ if (prop->input_meta.num_tensors > 0 &&
+ prop->input_meta.num_tensors != num_types) {
+ g_warning
+ ("Invalid input-type, given param does not match with old value.");
+ }
+
+ prop->input_meta.num_tensors = num_types;
+ }
+ break;
+ case PROP_OUTPUTTYPE:
+ g_assert (!prop->output_configured && value);
+ /* Once configures, it cannot be changed in runtime */
+ {
+ guint num_types;
+
+ num_types = gst_tensors_info_parse_types_string (&prop->output_meta,
+ g_value_get_string (value));
+
+ if (prop->output_meta.num_tensors > 0 &&
+ prop->output_meta.num_tensors != num_types) {
+ g_warning
+ ("Invalid output-type, given param does not match with old value.");
+ }
+
+ prop->output_meta.num_tensors = num_types;
+ }
+ break;
+ case PROP_INPUTNAME:
+ /* INPUTNAME is required by tensorflow to designate the order of tensors */
+ g_assert (!prop->input_configured && value);
+ /* Once configures, it cannot be changed in runtime */
+ {
+ guint num_names;
+
+ num_names = gst_tensors_info_parse_names_string (&prop->input_meta,
+ g_value_get_string (value));
+
+ if (prop->input_meta.num_tensors > 0 &&
+ prop->input_meta.num_tensors != num_names) {
+ g_warning
+ ("Invalid input-name, given param does not match with old value.");
+ }
+
+ prop->input_meta.num_tensors = num_names;
+ }
+ break;
+ case PROP_OUTPUTNAME:
+ /* OUTPUTNAME is required by tensorflow to designate the order of tensors */
+ g_assert (!prop->output_configured && value);
+ /* Once configures, it cannot be changed in runtime */
+ {
+ guint num_names;
+
+ num_names = gst_tensors_info_parse_names_string (&prop->output_meta,
+ g_value_get_string (value));
+
+ if (prop->output_meta.num_tensors > 0 &&
+ prop->output_meta.num_tensors != num_names) {
+ g_warning
+ ("Invalid output-name, given param does not match with old value.");
+ }
+
+ prop->output_meta.num_tensors = num_names;
+ }
+ break;
+ case PROP_CUSTOM:
+ /* In case updated custom properties in runtime! */
+ g_free_const (prop->custom_properties);
+ prop->custom_properties = g_value_dup_string (value);
+ g_debug ("Custom Option = %s\n", prop->custom_properties);
+ break;
+ default:
+ G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec);
+ break;
+ }
}
/**
g_tensor_filter_single_invoke (GTensorFilterSingle * self,
GstTensorMemory * input, GstTensorMemory * output)
{
- gboolean status = TRUE;
- /** TODO: fill this */
+ gboolean status;
+ int i;
+
+ if (G_UNLIKELY (!self->fw) || G_UNLIKELY (!self->fw->invoke_NN))
+ return FALSE;
+ if (G_UNLIKELY (!self->fw->run_without_model) &&
+ G_UNLIKELY (!self->prop.model_file))
+ return FALSE;
/** start if not already started */
- if (self->started == FALSE)
+ if (self->started == FALSE) {
status = g_tensor_filter_single_start (self);
+ if (status == FALSE) {
+ return status;
+ }
+ self->started = TRUE;
+ }
+
+ /** Setup output buffer */
+ for (i = 0; i < self->prop.output_meta.num_tensors; i++) {
+ /* allocate memory if allocate_in_invoke is FALSE */
+ if (self->fw->allocate_in_invoke == FALSE) {
+ output[i].data = g_malloc (output[i].size);
+ if (!output[i].data)
+ goto error;
+ }
+ }
+
+ if (self->fw->invoke_NN (&self->prop, &self->privateData, input, output) == 0)
+ return TRUE;
- return status;
+error:
+ if (self->fw->allocate_in_invoke == FALSE)
+ for (i = 0; i < self->prop.output_meta.num_tensors; i++)
+ g_free (output[i].data);
+ return FALSE;
}
./tests/unittest_plugins --gst-plugin-path=. --gtest_output="xml:unittest_plugins.xml"
./tests/unittest_src_iio --gst-plugin-path=. --gtest_output="xml:unittest_src_iio.xml"
./tests/tizen_capi/unittest_tizen_capi --gst-plugin-path=. --gtest_output="xml:unittest_tizen_capi.xml"
+ ./tests/tizen_capi/unittest_tizen_capi_single_new --gst-plugin-path="xml:unittest_tizen_capi_single_new.xml"
popd
pushd tests
ssat -n
install: false
)
test('unittest_tizen_capi', unittest_tizen_capi, args: ['--gst-plugin-path=../..'])
+
+tizen_apptest_deps = [
+ nnstreamer_capi_single_new_dep,
+ gtest_dep,
+ glib_dep
+]
+
+unittest_tizen_capi_single_new = executable('unittest_tizen_capi_single_new',
+ 'unittest_tizen_capi.cpp',
+ dependencies: [tizen_apptest_deps],
+ install: false
+)
+test('unittest_tizen_capi_single_new', unittest_tizen_capi_single_new, args: ['--gst-plugin-path=../..'])