As we discussed, tensors metadata in c-api changed with info-handle instance.
1. alloc and destory functions are added for tensors-info handle.
2. refactors all functions and testcases with metadata handle.
3. update api description.
Signed-off-by: Jaeyun Jung <jy1210.jung@samsung.com>
__android_log_print (ANDROID_LOG_INFO, TAG_NAME, __VA_ARGS__)
#define ml_logw(...) \
- __android_log_print (ANDROID_LOG_WARNING, TAG_NAME, __VA_ARGS__)
+ __android_log_print (ANDROID_LOG_WARN, TAG_NAME, __VA_ARGS__)
#define ml_loge(...) \
__android_log_print (ANDROID_LOG_ERROR, TAG_NAME, __VA_ARGS__)
#endif /* __cplusplus */
/**
+ * @brief Data structure for tensor information.
+ * @since_tizen 5.5
+ */
+typedef struct {
+ char *name; /**< Name of each element in the tensor. */
+ ml_tensor_type_e type; /**< Type of each element in the tensor. */
+ ml_tensor_dimension dimension; /**< Dimension information. */
+} ml_tensor_info_s;
+
+/**
+ * @brief Data structure for tensors information, which contains multiple tensors.
+ * @since_tizen 5.5
+ */
+typedef struct {
+ unsigned int num_tensors; /**< The number of tensors. */
+ ml_tensor_info_s info[ML_TENSOR_SIZE_LIMIT]; /**< The list of tensor info. */
+} ml_tensors_info_s;
+
+/**
* @brief Possible controls on elements of a pipeline.
*/
typedef enum {
void ml_util_set_error (int error_code);
/**
+ * @brief Gets the byte size of the given tensor info.
+ */
+size_t ml_util_get_tensor_size (const ml_tensor_info_s *info);
+
+/**
+ * @brief Initializes the tensors information with default value.
+ * @since_tizen 5.5
+ * @param[in] info The tensors info pointer to be initialized.
+ * @return @c 0 on success. Otherwise a negative error value.
+ * @retval #ML_ERROR_NONE Successful
+ * @retval #ML_ERROR_INVALID_PARAMETER Given parameter is invalid.
+ */
+int ml_util_initialize_tensors_info (ml_tensors_info_s *info);
+
+/**
+ * @brief Frees the tensors info pointer.
+ * @since_tizen 5.5
+ * @param[in] info The tensors info pointer to be freed.
+ */
+void ml_util_free_tensors_info (ml_tensors_info_s *info);
+
+/**
* @brief Copies tensor metadata from gst tensors info.
*/
void ml_util_copy_tensors_info_from_gst (ml_tensors_info_s *ml_info, const GstTensorsInfo *gst_info);
*/
void ml_util_copy_tensors_info_from_ml (GstTensorsInfo *gst_info, const ml_tensors_info_s *ml_info);
+/**
+ * @brief Gets caps from tensors info.
+ */
+GstCaps *ml_util_get_caps_from_tensors_info (const ml_tensors_info_s *info);
+
#ifdef __cplusplus
}
#endif /* __cplusplus */
* @since_tizen 5.5
* @param[out] single This is the model handle opened. Users are required to close
* the given instance with ml_single_close().
- * @param[in] model_path This is the path to the neural network model file.
+ * @param[in] model This is the path to the neural network model file.
* @param[in] input_info This is required if the given model has flexible input
* dimension, where the input dimension MUST be given
* before executing the model.
* You may set NULL if it's not required.
* @param[in] output_info This is required if the given model has flexible output dimension.
* @param[in] nnfw The nerual network framework used to open the given
- * #model_path. Set ML_NNFW_UNKNOWN to let it auto-detect.
+ * #model_path. Set #ML_NNFW_TYPE_ANY to let it auto-detect.
* @param[in] hw Tell the corresponding @nnfw to use a specific hardware.
- * Set ML_NNFW_HW_DO_NOT_CARE if it does not matter.
+ * Set #ML_NNFW_HW_ANY if it does not matter.
* @return @c 0 on success. otherwise a negative error value
* @retval #ML_ERROR_NONE Successful
+ * @retval #ML_ERROR_INVALID_PARAMETER Fail. The parameter is invalid.
+ * @retval #ML_ERROR_STREAMS_PIPE Failed to start the pipeline.
*
* @detail Even if the model has flexible input data dimensions,
* input data frames of an instance of a model should share the
* same dimension.
*/
-int ml_single_open (ml_single_h *single, const char *model_path, const ml_tensors_info_s *input_info, const ml_tensors_info_s *output_info, ml_nnfw_e nnfw, ml_nnfw_hw_e hw);
+int ml_single_open (ml_single_h *single, const char *model, const ml_tensors_info_h input_info, const ml_tensors_info_h output_info, ml_nnfw_type_e nnfw, ml_nnfw_hw_e hw);
/**
* @brief Closes the opened model handle.
* types are available.
* @since_tizen 5.5
* @param[in] single The model handle to be investigated.
- * @param[out] input_info The struct of input tensors info. Caller is responsible to free the information with ml_util_free_tensors_info().
+ * @param[out] info The handle of input tensors information. Caller is responsible to free the information with ml_util_destroy_tensors_info().
* @return @c 0 on success. otherwise a negative error value.
* @retval #ML_ERROR_NONE Successful
* @retval #ML_ERROR_INVALID_PARAMETER Fail. The parameter is invalid.
*/
-int ml_single_get_input_info (ml_single_h single, ml_tensors_info_s *input_info);
+int ml_single_get_input_info (ml_single_h single, ml_tensors_info_h *info);
/**
* @brief Gets the type (tensor dimension, type, name and so on) of output
* types are available.
* @since_tizen 5.5
* @param[in] single The model handle to be investigated.
- * @param[out] output_info The struct of output tensors info. Caller is responsible to free the returned with ml_util_free_tensors_info().
+ * @param[out] info The handle of output tensors information. Caller is responsible to free the returned with ml_util_destroy_tensors_info().
* @return @c 0 on success. otherwise a negative error value.
* @retval #ML_ERROR_NONE Successful
* @retval #ML_ERROR_INVALID_PARAMETER Fail. The parameter is invalid.
*/
-int ml_single_get_output_info (ml_single_h single, ml_tensors_info_s *output_info);
+int ml_single_get_output_info (ml_single_h single, ml_tensors_info_h *info);
/**
* @}
#define __TIZEN_MACHINELEARNING_NNSTREAMER_H__
#include <stddef.h>
+#include <stdbool.h>
+
/**
* Apply modify_nnstreamer_h_for_nontizen.sh if you want to use
* in non-Tizen Linux machines
* @brief Dimension information that NNStreamer support.
* @since_tizen 5.5
*/
-typedef unsigned int ml_tensor_dim[ML_TENSOR_RANK_LIMIT];
+typedef unsigned int ml_tensor_dimension[ML_TENSOR_RANK_LIMIT];
+
+/**
+ * @brief A handle of a tensors metadata instance.
+ * @since_tizen 5.5
+ */
+typedef void *ml_tensors_info_h;
/**
* @brief A handle of an NNStreamer pipeline.
* @since_tizen 5.5
*/
typedef enum {
- ML_NNFW_UNKNOWN = 0, /**< it is unknown or we do not care this value. */
- ML_NNFW_CUSTOM_FILTER, /**< custom filter (independent shared object). */
- ML_NNFW_TENSORFLOW_LITE, /**< tensorflow-lite (.tflite). */
- ML_NNFW_TENSORFLOW, /**< tensorflow (.pb). */
-} ml_nnfw_e;
+ ML_NNFW_TYPE_ANY = 0, /**< determines the nnfw with file extension. */
+ ML_NNFW_TYPE_CUSTOM_FILTER, /**< custom filter (independent shared object). */
+ ML_NNFW_TYPE_TENSORFLOW_LITE, /**< tensorflow-lite (.tflite). */
+ ML_NNFW_TYPE_TENSORFLOW, /**< tensorflow (.pb). */
+} ml_nnfw_type_e;
/**
- * @brief Types of NNFWs. Note that if the affinity (nnn) is not supported by the driver or hardware, it is ignored.
+ * @brief Types of hardware resources to be used for NNFWs. Note that if the affinity (nnn) is not supported by the driver or hardware, it is ignored.
* @since_tizen 5.5
*/
typedef enum {
- ML_NNFW_HW_DO_NOT_CARE = 0, /**< Hardware resource is not specified. */
- ML_NNFW_HW_AUTO = 1, /**< Try to schedule and optimize if possible. */
+ ML_NNFW_HW_ANY = 0, /**< Hardware resource is not specified. */
+ ML_NNFW_HW_AUTO = 1, /**< Try to schedule and optimize if possible. */
ML_NNFW_HW_CPU = 0x1000, /**< 0x1000: any CPU. 0x1nnn: CPU # nnn-1. */
ML_NNFW_HW_GPU = 0x2000, /**< 0x2000: any GPU. 0x2nnn: GPU # nnn-1. */
ML_NNFW_HW_NPU = 0x3000, /**< 0x3000: any NPU. 0x3nnn: NPU # nnn-1. */
ML_TENSOR_TYPE_FLOAT32, /**< Float 32bit */
ML_TENSOR_TYPE_INT64, /**< Integer 64bit */
ML_TENSOR_TYPE_UINT64, /**< Unsigned integer 64bit */
- ML_TENSOR_TYPE_UNKNOWN /**< Unknown type */
+ ML_TENSOR_TYPE_UNKNOWN /**< Unknown type */
} ml_tensor_type_e;
/**
} ml_pipeline_switch_e;
/**
- * @brief Data structure for tensor information.
- * @since_tizen 5.5
- */
-typedef struct {
- char * name; /**< Name of each element in the tensor. */
- ml_tensor_type_e type; /**< Type of each element in the tensor. */
- ml_tensor_dim dimension; /**< Dimension information. */
-} ml_tensor_info_s;
-
-/**
- * @brief Data structure for tensors information, which contains multiple tensors.
- * @since_tizen 5.5
- */
-typedef struct {
- unsigned int num_tensors; /**< The number of tensors. */
- ml_tensor_info_s info[ML_TENSOR_SIZE_LIMIT]; /**< The list of tensor info. */
-} ml_tensors_info_s;
-
-/**
* @brief An instance of a single input or output frame.
* @since_tizen 5.5
*/
} ml_tensor_data_s;
/**
- * @brief An instance of input or output frames. #ml_tensors_info_s is the metadata.
+ * @brief An instance of input or output frames. #ml_tensors_info_h is the handle for tensors metadata.
* @since_tizen 5.5
*/
typedef struct {
* @since_tizen 5.5
* @remarks The @a data can be used only in the callback. To use outside, make a copy.
* @remarks The @a info can be used only in the callback. To use outside, make a copy.
- * @param[in] data The contents of the tensor output (a single frame. tensor/tensors). Number of tensors is determined by data->num_tensors. Note that max num_tensors is 16 (ML_TENSOR_SIZE_LIMIT).
- * @param[in] info The cardinality, dimension, and type of given tensor/tensors.
+ * @param[out] data The contents of the tensor output (a single frame. tensor/tensors). Number of tensors is determined by data->num_tensors. Note that max num_tensors is 16 (#ML_TENSOR_SIZE_LIMIT).
+ * @param[out] info The handle of tensors information (cardinality, dimension, and type of given tensor/tensors).
* @param[in,out] user_data User Application's Private Data.
*/
-typedef void (*ml_pipeline_sink_cb) (const ml_tensors_data_s *data, const ml_tensors_info_s *info, void *user_data);
+typedef void (*ml_pipeline_sink_cb) (const ml_tensors_data_s *data, const ml_tensors_info_h info, void *user_data);
/****************************************************
** NNStreamer Pipeline Construction (gst-parse) **
* @return @c 0 on success. Otherwise a negative error value.
* @retval #ML_ERROR_NONE Successful
* @retval #ML_ERROR_INVALID_PARAMETER Given parameter is invalid. (pipe is NULL?)
- * @retval #ML_ERROR_STREAMS_PIPE Failed to start.
+ * @retval #ML_ERROR_STREAMS_PIPE Failed to start the pipeline.
*/
int ml_pipeline_start (ml_pipeline_h pipe);
/**
* @brief Registers a callback for sink (tensor_sink) of NNStreamer pipelines.
* @since_tizen 5.5
- * @remarks If the function succeeds, @a h handle must be unregistered using ml_pipeline_sink_unregister.
+ * @remarks If the function succeeds, @a h handle must be unregistered using ml_pipeline_sink_unregister().
* @param[in] pipe The pipeline to be attached with a sink node.
* @param[in] sink_name The name of sink node, described with ml_pipeline_construct().
* @param[in] cb The function to be called by the sink node.
* @param[out] h The sink handle.
- * @param[in] pdata Private data for the callback. This value is passed to the callback when it's invoked.
+ * @param[in] user_data Private data for the callback. This value is passed to the callback when it's invoked.
* @return @c 0 on success. Otherwise a negative error value.
* @retval #ML_ERROR_NONE Successful
* @retval #ML_ERROR_INVALID_PARAMETER Given parameter is invalid. (pipe is NULL, sink_name is not found, or sink_name has an invalid type.)
* @retval #ML_ERROR_STREAMS_PIPE Failed to connect a signal to sink element.
*/
-int ml_pipeline_sink_register (ml_pipeline_h pipe, const char *sink_name, ml_pipeline_sink_cb cb, ml_pipeline_sink_h *h, void *pdata);
+int ml_pipeline_sink_register (ml_pipeline_h pipe, const char *sink_name, ml_pipeline_sink_cb cb, ml_pipeline_sink_h *h, void *user_data);
/**
* @brief Unregisters a callback for sink (tensor_sink) of NNStreamer pipelines.
* @remarks If the function succeeds, @a h handle must be released using ml_pipeline_src_put_handle().
* @param[in] pipe The pipeline to be attached with a src node.
* @param[in] src_name The name of src node, described with ml_pipeline_construct().
- * @param[out] tensors_info The cardinality, dimension, and type of given tensor/tensors.
* @param[out] h The src handle.
* @return 0 on success. Otherwise a negative error value.
* @retval #ML_ERROR_NONE Successful
* @retval #ML_ERROR_STREAMS_PIPE Fail to get SRC element.
* @retval #ML_ERROR_TRY_AGAIN The pipeline is not ready yet.
*/
-int ml_pipeline_src_get_handle (ml_pipeline_h pipe, const char *src_name, ml_tensors_info_s *tensors_info, ml_pipeline_src_h *h);
+int ml_pipeline_src_get_handle (ml_pipeline_h pipe, const char *src_name, ml_pipeline_src_h *h);
/**
* @brief Closes the given handle of a src node of NNStreamer pipelines.
/**
* @brief Puts an input data frame.
* @param[in] h The source handle returned by ml_pipeline_src_get_handle().
- * @param[in] data The input tensors, in the format of tensors info given by ml_pipeline_src_get_handle().
+ * @param[in] data The input tensors, in the format of tensors info given by ml_pipeline_src_get_tensors_info().
* @param[in] policy The policy of buf deallocation.
* @return 0 on success. Otherwise a negative error value.
* @retval #ML_ERROR_NONE Successful
*/
int ml_pipeline_src_input_data (ml_pipeline_src_h h, const ml_tensors_data_s *data, ml_pipeline_buf_policy_e policy);
+/**
+ * @brief Gets a handle for the tensors information of given src node.
+ * @since_tizen 5.5
+ * @remarks If the function succeeds, @a info_h handle must be released using ml_util_destroy_tensors_info().
+ * @param[in] h The source handle returned by ml_pipeline_src_get_handle().
+ * @param[out] info The handle of tensors information.
+ * @return 0 on success. Otherwise a negative error value.
+ * @retval #ML_ERROR_NONE Successful
+ * @retval #ML_ERROR_INVALID_PARAMETER Given parameter is invalid.
+ * @retval #ML_ERROR_STREAMS_PIPE The pipeline has inconsistent padcaps. Not negotiated?
+ * @retval #ML_ERROR_TRY_AGAIN The pipeline is not ready yet.
+ */
+int ml_pipeline_src_get_tensors_info (ml_pipeline_src_h h, ml_tensors_info_h *info);
+
/****************************************************
** NNStreamer Pipeline Switch/Valve Control **
****************************************************/
/**
* @brief Controls the switch with the given handle to select input/output nodes(pads).
* @param[in] h The switch handle returned by ml_pipeline_switch_get_handle()
- * @param[in] pad_name The name of the chosen pad to be activated. Use ml_pipeline_switch_nodelist to list the available pad names.
+ * @param[in] pad_name The name of the chosen pad to be activated. Use ml_pipeline_switch_get_pad_list() to list the available pad names.
* @return @c 0 on success. Otherwise a negative error value.
* @retval #ML_ERROR_NONE Successful
* @retval #ML_ERROR_INVALID_PARAMETER Given parameter is invalid.
* @retval #ML_ERROR_INVALID_PARAMETER Given parameter is invalid.
* @retval #ML_ERROR_STREAMS_PIPE The element is not both input and output switch (Internal data inconsistency).
*/
-int ml_pipeline_switch_nodelist (ml_pipeline_switch_h h, char *** list);
+int ml_pipeline_switch_get_pad_list (ml_pipeline_switch_h h, char ***list);
/**
* @brief Gets a handle to operate a "GstValve" node of NNStreamer pipelines.
/**
* @brief Controls the valve with the given handle.
* @param[in] h The valve handle returned by ml_pipeline_valve_get_handle()
- * @param[in] drop 1 to close (drop & stop the flow). 0 to open (let the flow pass)
+ * @param[in] open @c true to open(let the flow pass), @c false to close (drop & stop the flow)
* @return @c 0 on success. Otherwise a negative error value.
* @retval #ML_ERROR_NONE Successful
* @retval #ML_ERROR_INVALID_PARAMETER Given parameter is invalid.
*/
-int ml_pipeline_valve_control (ml_pipeline_valve_h h, int drop);
+int ml_pipeline_valve_set_open (ml_pipeline_valve_h h, bool open);
/****************************************************
** NNStreamer Utilities **
****************************************************/
/**
- * @brief Initializes the tensors info.
+ * @brief Allocates a tensors information handle with default value.
* @since_tizen 5.5
- * @param[in] info The tensors information to be initialized.
+ * @param[out] info The handle of tensors information.
+ * @return @c 0 on success. Otherwise a negative error value.
+ * @retval #ML_ERROR_NONE Successful
+ * @retval #ML_ERROR_INVALID_PARAMETER Given parameter is invalid.
*/
-void ml_util_initialize_tensors_info (ml_tensors_info_s *info);
+int ml_util_allocate_tensors_info (ml_tensors_info_h *info);
/**
- * @brief Validates the given tensor info is valid.
+ * @brief Frees the given handle of a tensors information.
* @since_tizen 5.5
- * @param[in] info The tensor information to be validated.
- * @return @c 0 on success. Otherwise a negative error value.
+ * @param[in] info The handle of tensors information.
+ * @return 0 on success. Otherwise a negative error value.
* @retval #ML_ERROR_NONE Successful
* @retval #ML_ERROR_INVALID_PARAMETER Given parameter is invalid.
*/
-int ml_util_validate_tensor_info (const ml_tensor_info_s *info);
+int ml_util_destroy_tensors_info (ml_tensors_info_h info);
/**
- * @brief Validates the given tensors info is valid.
+ * @brief Validates the given tensors information is valid.
* @since_tizen 5.5
- * @param[in] info The tensors information to be validated.
+ * @param[in] info The handle of tensors information to be validated.
* @return @c 0 on success. Otherwise a negative error value.
* @retval #ML_ERROR_NONE Successful
* @retval #ML_ERROR_INVALID_PARAMETER Given parameter is invalid.
*/
-int ml_util_validate_tensors_info (const ml_tensors_info_s *info);
+int ml_util_validate_tensors_info (const ml_tensors_info_h info);
/**
- * @brief Copies tensor meta info.
+ * @brief Copies the tensors information.
* @since_tizen 5.5
- * @param[out] dest Newly allocated tensors information.
+ * @param[out] dest A destination handle of tensors information.
* @param[in] src The tensors information to be copied.
+ * @return @c 0 on success. Otherwise a negative error value.
+ * @retval #ML_ERROR_NONE Successful
+ * @retval #ML_ERROR_INVALID_PARAMETER Given parameter is invalid.
*/
-void ml_util_copy_tensors_info (ml_tensors_info_s *dest, const ml_tensors_info_s *src);
+int ml_util_copy_tensors_info (ml_tensors_info_h dest, const ml_tensors_info_h src);
/**
- * @brief Gets the byte size of the given tensor type.
+ * @brief Sets the number of tensors with given handle of tensors information.
* @since_tizen 5.5
- * @param[in] info The tensor information to be investigated.
- * @return @c >= 0 on success with byte size.
+ * @param[in] info The handle of tensors information.
+ * @param[in] count The number of tensors.
+ * @return @c 0 on success. Otherwise a negative error value.
+ * @retval #ML_ERROR_NONE Successful
+ * @retval #ML_ERROR_INVALID_PARAMETER Given parameter is invalid.
*/
-size_t ml_util_get_tensor_size (const ml_tensor_info_s *info);
+int ml_util_set_tensors_count (ml_tensors_info_h info, const unsigned int count);
/**
- * @brief Gets the byte size of the given tensors type.
+ * @brief Gets the number of tensors with given handle of tensors information.
* @since_tizen 5.5
- * @param[in] info The tensors information to be investigated.
- * @return @c >= 0 on success with byte size.
+ * @param[in] info The handle of tensors information.
+ * @param[out] count The number of tensors.
+ * @return @c 0 on success. Otherwise a negative error value.
+ * @retval #ML_ERROR_NONE Successful
+ * @retval #ML_ERROR_INVALID_PARAMETER Given parameter is invalid.
+ */
+int ml_util_get_tensors_count (ml_tensors_info_h info, unsigned int *count);
+
+/**
+ * @brief Sets the tensor name with given handle of tensors information.
+ * @since_tizen 5.5
+ * @param[in] info The handle of tensors information.
+ * @param[in] index The index of tensor meta to be updated.
+ * @param[in] name The tensor name to be set.
+ * @return @c 0 on success. Otherwise a negative error value.
+ * @retval #ML_ERROR_NONE Successful
+ * @retval #ML_ERROR_INVALID_PARAMETER Given parameter is invalid.
*/
-size_t ml_util_get_tensors_size (const ml_tensors_info_s *info);
+int ml_util_set_tensor_name (ml_tensors_info_h info, const unsigned int index, const char *name);
/**
- * @brief Frees the tensors info pointer.
+ * @brief Gets the tensor name with given handle of tensors information.
* @since_tizen 5.5
- * @param[in] info The tensors info pointer to be freed.
+ * @param[in] info The handle of tensors information.
+ * @param[in] index The index of tensor meta.
+ * @param[out] name The tensor name.
+ * @return @c 0 on success. Otherwise a negative error value.
+ * @retval #ML_ERROR_NONE Successful
+ * @retval #ML_ERROR_INVALID_PARAMETER Given parameter is invalid.
+ */
+int ml_util_get_tensor_name (ml_tensors_info_h info, const unsigned int index, char **name);
+
+/**
+ * @brief Sets the tensor type with given handle of tensors information.
+ * @since_tizen 5.5
+ * @param[in] info The handle of tensors information.
+ * @param[in] index The index of tensor meta to be updated.
+ * @param[in] type The tensor type to be set.
+ * @return @c 0 on success. Otherwise a negative error value.
+ * @retval #ML_ERROR_NONE Successful
+ * @retval #ML_ERROR_INVALID_PARAMETER Given parameter is invalid.
+ */
+int ml_util_set_tensor_type (ml_tensors_info_h info, const unsigned int index, const ml_tensor_type_e type);
+
+/**
+ * @brief Gets the tensor type with given handle of tensors information.
+ * @since_tizen 5.5
+ * @param[in] info The handle of tensors information.
+ * @param[in] index The index of tensor meta.
+ * @param[out] type The tensor type.
+ * @return @c 0 on success. Otherwise a negative error value.
+ * @retval #ML_ERROR_NONE Successful
+ * @retval #ML_ERROR_INVALID_PARAMETER Given parameter is invalid.
*/
-void ml_util_free_tensors_info (ml_tensors_info_s *info);
+int ml_util_get_tensor_type (ml_tensors_info_h info, const unsigned int index, ml_tensor_type_e *type);
+
+/**
+ * @brief Sets the tensor dimension with given handle of tensors information.
+ * @since_tizen 5.5
+ * @param[in] info The handle of tensors information.
+ * @param[in] index The index of tensor meta to be updated.
+ * @param[in] dimension The tensor dimension to be set.
+ * @return @c 0 on success. Otherwise a negative error value.
+ * @retval #ML_ERROR_NONE Successful
+ * @retval #ML_ERROR_INVALID_PARAMETER Given parameter is invalid.
+ */
+int ml_util_set_tensor_dimension (ml_tensors_info_h info, const unsigned int index, const ml_tensor_dimension dimension);
+
+/**
+ * @brief Gets the tensor dimension with given handle of tensors information.
+ * @since_tizen 5.5
+ * @param[in] info The handle of tensors information.
+ * @param[in] index The index of tensor meta.
+ * @param[out] dimension The tensor dimension.
+ * @return @c 0 on success. Otherwise a negative error value.
+ * @retval #ML_ERROR_NONE Successful
+ * @retval #ML_ERROR_INVALID_PARAMETER Given parameter is invalid.
+ */
+int ml_util_get_tensor_dimension (ml_tensors_info_h info, const unsigned int index, ml_tensor_dimension dimension);
+
+/**
+ * @brief Gets the byte size of the given tensors type.
+ * @since_tizen 5.5
+ * @param[in] info The handle of tensors information to be investigated.
+ * @return @c >= 0 on success with byte size.
+ */
+size_t ml_util_get_tensors_size (const ml_tensors_info_h info);
/**
* @brief Frees the tensors data pointer.
void ml_util_free_tensors_data (ml_tensors_data_s **data);
/**
- * @brief Allocates a tensor data frame with the given tensors type.
+ * @brief Allocates a tensor data frame with the given tensors information.
* @since_tizen 5.5
- * @param[in] info The tensors information for the allocation.
+ * @param[in] info The handle of tensors information for the allocation.
* @return @c Tensors data pointer allocated. Null if error. Caller is responsible to free the allocated data with ml_util_free_tensors_data().
* @retval NULL There is an error. Call ml_util_get_last_error() to get specific error code.
*/
-ml_tensors_data_s *ml_util_allocate_tensors_data (const ml_tensors_info_s *info);
+ml_tensors_data_s *ml_util_allocate_tensors_data (const ml_tensors_info_h info);
/**
* @brief Checks the availability of the given execution environments.
+ * @details If the function returns an error, @a available is not changed.
* @since_tizen 5.5
* @param[in] nnfw Check if the nnfw is available in the system.
- * Set #ML_NNFW_UNKNOWN to skip checking nnfw.
+ * Set #ML_NNFW_TYPE_ANY to skip checking nnfw.
* @param[in] hw Check if the hardware is available in the system.
- * Set #ML_NNFW_HW_DO_NOT_CARE to skip checking hardware.
- * @return @c 0 if it's available. 1 if it's not available.
- * negative value if there is an error.
+ * Set #ML_NNFW_HW_ANY to skip checking hardware.
+ * @param[out] available @c true if it's available, @c false if if it's not available.
+ * @return @c 0 on success. Otherwise a negative error value.
* @retval #ML_ERROR_NONE Successful and the environments are available.
- * @retval #ML_ERROR_NOT_SUPPORTED The given option is not available.
- * @retval 1 Successful but the environments are not available.
+ * @retval #ML_ERROR_INVALID_PARAMETER Given parameter is invalid.
*/
-int ml_util_check_nnfw (ml_nnfw_e nnfw, ml_nnfw_hw_e hw);
+int ml_util_check_nnfw_availability (ml_nnfw_type_e nnfw, ml_nnfw_hw_e hw, bool *available);
/**
* @brief Gets the last error code.
ml_pipeline *p; \
ml_pipeline_element *elem; \
int ret = ML_ERROR_NONE; \
- if (h == NULL) { \
+ if ((h) == NULL) { \
ml_loge ("The given handle is invalid"); \
return ML_ERROR_INVALID_PARAMETER; \
} \
*/
int
ml_pipeline_sink_register (ml_pipeline_h pipe, const char *sink_name,
- ml_pipeline_sink_cb cb, ml_pipeline_sink_h * h, void *pdata)
+ ml_pipeline_sink_cb cb, ml_pipeline_sink_h * h, void *user_data)
{
ml_pipeline_element *elem;
ml_pipeline *p = pipe;
sink->pipe = p;
sink->element = elem;
sink->cb = cb;
- sink->pdata = pdata;
+ sink->pdata = user_data;
g_mutex_lock (&elem->lock);
}
elem->handles = g_list_remove (elem->handles, sink);
+ g_free (sink);
handle_exit (h);
}
};
/**
+ * @brief Parse tensors info of src element.
+ */
+static int
+ml_pipeline_src_parse_tensors_info (ml_pipeline_element * elem)
+{
+ int ret = ML_ERROR_NONE;
+
+ if (elem->src == NULL) {
+ elem->src = gst_element_get_static_pad (elem->element, "src");
+ elem->size = 0;
+
+ if (elem->src == NULL) {
+ ret = ML_ERROR_STREAMS_PIPE;
+ } else {
+ GstCaps *caps = gst_pad_get_allowed_caps (elem->src);
+ guint i;
+ gboolean found = FALSE;
+ size_t sz;
+
+ if (caps) {
+ found = get_tensors_info_from_caps (caps, &elem->tensors_info);
+ gst_caps_unref (caps);
+ }
+
+ if (found) {
+ for (i = 0; i < elem->tensors_info.num_tensors; i++) {
+ sz = ml_util_get_tensor_size (&elem->tensors_info.info[i]);
+ elem->size += sz;
+ }
+ } else {
+ ml_logw
+ ("Cannot find caps. The pipeline is not yet negotiated for src element [%s].",
+ elem->name);
+ gst_object_unref (elem->src);
+ elem->src = NULL;
+
+ ret = ML_ERROR_TRY_AGAIN;
+ }
+ }
+ }
+
+ return ret;
+}
+
+/**
* @brief Get a handle to operate a src (more info in nnstreamer.h)
*/
int
ml_pipeline_src_get_handle (ml_pipeline_h pipe, const char *src_name,
- ml_tensors_info_s * tensors_info, ml_pipeline_src_h * h)
+ ml_pipeline_src_h * h)
{
ml_pipeline *p = pipe;
ml_pipeline_element *elem;
ml_pipeline_src *src;
- int ret = ML_ERROR_NONE, i;
+ int ret = ML_ERROR_NONE;
if (h == NULL) {
ml_loge ("The argument source handle is not valid.");
return ML_ERROR_INVALID_PARAMETER;
}
- if (tensors_info == NULL) {
- ml_loge ("The 3rd argument, tensors info is not valid.");
- return ML_ERROR_INVALID_PARAMETER;
- }
-
g_mutex_lock (&p->lock);
elem = g_hash_table_lookup (p->namednodes, src_name);
goto unlock_return;
}
- if (elem->src == NULL) {
- elem->src = gst_element_get_static_pad (elem->element, "src");
-
- if (elem->src != NULL) {
- /** @todo : refactor this along with ml_pipeline_src_input_data */
- GstCaps *caps = gst_pad_get_allowed_caps (elem->src);
- gboolean found = FALSE;
-
- /** @todo caps may be NULL for prerolling */
- if (caps) {
- found = get_tensors_info_from_caps (caps, &elem->tensors_info);
- gst_caps_unref (caps);
- }
-
- if (found) {
- elem->size = 0;
-
- for (i = 0; i < elem->tensors_info.num_tensors; i++) {
- size_t sz = ml_util_get_tensor_size (&elem->tensors_info.info[i]);
- elem->size += sz;
- }
- } else {
- ml_logw
- ("Cannot find caps. The pipeline is not yet negotiated for tensor_src, [%s].",
- src_name);
- gst_object_unref (elem->src);
- elem->src = NULL;
-
- ret = ML_ERROR_TRY_AGAIN;
- goto unlock_return;
- }
- } else {
- ret = ML_ERROR_STREAMS_PIPE;
- goto unlock_return;
- }
- }
-
- ml_util_copy_tensors_info (tensors_info, &elem->tensors_info);
-
*h = g_new (ml_pipeline_src, 1);
src = *h;
src->id = elem->maxid;
elem->handles = g_list_append (elem->handles, src);
+ ml_pipeline_src_parse_tensors_info (elem);
g_mutex_unlock (&elem->lock);
unlock_return:
handle_init (src, src, h);
elem->handles = g_list_remove (elem->handles, src);
+ g_free (src);
handle_exit (h);
}
goto unlock_return;
}
- /** @todo This assumes that padcap is static */
- if (elem->src == NULL) {
- /* Get the src-pad-cap */
- elem->src = gst_element_get_static_pad (elem->element, "src");
- }
-
- if (elem->src != NULL && elem->size == 0) {
- /* srcpadcap available (negoticated) */
- GstCaps *caps = gst_pad_get_allowed_caps (elem->src);
+ ret = ml_pipeline_src_parse_tensors_info (elem);
- if (caps) {
- gboolean found;
-
- found = get_tensors_info_from_caps (caps, &elem->tensors_info);
- gst_caps_unref (caps);
-
- if (found) {
- elem->size = 0;
-
- if (elem->tensors_info.num_tensors != data->num_tensors) {
- ml_loge
- ("The src push of [%s] cannot be handled because the number of tensors in a frame mismatches. %u != %u",
- elem->name, elem->tensors_info.num_tensors, data->num_tensors);
+ if (ret != ML_ERROR_NONE) {
+ ml_logw ("The pipeline is not ready to accept inputs. The input is ignored.");
+ goto unlock_return;
+ }
- gst_object_unref (elem->src);
- elem->src = NULL;
- ret = ML_ERROR_STREAMS_PIPE;
- goto unlock_return;
- }
+ if (elem->tensors_info.num_tensors != data->num_tensors) {
+ ml_loge
+ ("The src push of [%s] cannot be handled because the number of tensors in a frame mismatches. %u != %u",
+ elem->name, elem->tensors_info.num_tensors, data->num_tensors);
- for (i = 0; i < elem->tensors_info.num_tensors; i++) {
- size_t sz = ml_util_get_tensor_size (&elem->tensors_info.info[i]);
+ ret = ML_ERROR_INVALID_PARAMETER;
+ goto unlock_return;
+ }
- if (sz != data->tensors[i].size) {
- ml_loge
- ("The given input tensor size (%d'th, %zu bytes) mismatches the source pad (%zu bytes)",
- i, data->tensors[i].size, sz);
+ for (i = 0; i < elem->tensors_info.num_tensors; i++) {
+ size_t sz = ml_util_get_tensor_size (&elem->tensors_info.info[i]);
- gst_object_unref (elem->src);
- elem->src = NULL;
- ret = ML_ERROR_INVALID_PARAMETER;
- goto unlock_return;
- }
+ if (sz != data->tensors[i].size) {
+ ml_loge
+ ("The given input tensor size (%d'th, %zu bytes) mismatches the source pad (%zu bytes)",
+ i, data->tensors[i].size, sz);
- elem->size += sz;
- }
- } else {
- gst_object_unref (elem->src);
- elem->src = NULL; /* invalid! */
- ret = ML_ERROR_STREAMS_PIPE;
- goto unlock_return;
- /** @todo What if it keeps being "NULL"? */
- }
+ ret = ML_ERROR_INVALID_PARAMETER;
+ goto unlock_return;
}
}
- if (elem->size == 0) {
- ml_logw ("The pipeline is not ready to accept inputs. The input is ignored.");
- ret = ML_ERROR_TRY_AGAIN;
- goto unlock_return;
- }
-
/* Create buffer to be pushed from buf[] */
buffer = gst_buffer_new ();
for (i = 0; i < data->num_tensors; i++) {
handle_exit (h);
}
+/**
+ * @brief Gets a handle for the tensors metadata of given src node.
+ */
+int
+ml_pipeline_src_get_tensors_info (ml_pipeline_src_h h,
+ ml_tensors_info_h * info)
+{
+ handle_init (src, src, h);
+
+ if (info == NULL) {
+ ret = ML_ERROR_INVALID_PARAMETER;
+ goto unlock_return;
+ }
+
+ ret = ml_pipeline_src_parse_tensors_info (elem);
+
+ if (ret == ML_ERROR_NONE) {
+ ml_util_allocate_tensors_info (info);
+ ml_util_copy_tensors_info (*info, &elem->tensors_info);
+ }
+
+ handle_exit (h);
+}
+
/****************************************************
** NNStreamer Pipeline Switch/Valve Control **
****************************************************/
goto unlock_return;
}
- if (elem->type != ML_PIPELINE_ELEMENT_SWITCH_INPUT &&
- elem->type != ML_PIPELINE_ELEMENT_SWITCH_OUTPUT) {
+ if (elem->type == ML_PIPELINE_ELEMENT_SWITCH_INPUT) {
+ if (type)
+ *type = ML_PIPELINE_SWITCH_INPUT_SELECTOR;
+ } else if (elem->type == ML_PIPELINE_ELEMENT_SWITCH_OUTPUT) {
+ if (type)
+ *type = ML_PIPELINE_SWITCH_OUTPUT_SELECTOR;
+ } else {
ml_loge
("There is an element named [%s] in the pipeline, but it is not an input/output switch",
switch_name);
+
ret = ML_ERROR_INVALID_PARAMETER;
goto unlock_return;
}
swtc->pipe = p;
swtc->element = elem;
- if (type) {
- if (elem->type == ML_PIPELINE_ELEMENT_SWITCH_INPUT)
- *type = ML_PIPELINE_SWITCH_INPUT_SELECTOR;
- else if (elem->type == ML_PIPELINE_ELEMENT_SWITCH_OUTPUT)
- *type = ML_PIPELINE_SWITCH_OUTPUT_SELECTOR;
- else {
- ml_loge ("Internal data of switch-handle [%s] is broken. It is fatal.",
- elem->name);
- ret = ML_ERROR_INVALID_PARAMETER;
- goto unlock_return;
- }
- }
-
g_mutex_lock (&elem->lock);
elem->maxid++;
handle_init (switch, swtc, h);
elem->handles = g_list_remove (elem->handles, swtc);
+ g_free (swtc);
handle_exit (h);
}
}
/**
- * @brief List nodes of a switch (more info in nnstreamer.h)
+ * @brief Gets the pad names of a switch.
*/
int
-ml_pipeline_switch_nodelist (ml_pipeline_switch_h h, char ***list)
+ml_pipeline_switch_get_pad_list (ml_pipeline_switch_h h, char ***list)
{
GstIterator *it;
GValue item = G_VALUE_INIT;
if (i > counter) {
g_list_free_full (dllist, g_free); /* This frees all strings as well */
- g_free (list);
+ g_free (*list);
+ *list = NULL;
ml_loge
("Internal data inconsistency. This could be a bug in nnstreamer. Switch [%s].",
handle_init (valve, valve, h);
elem->handles = g_list_remove (elem->handles, valve);
+ g_free (valve);
handle_exit (h);
}
* @brief Control the valve with the given handle (more info in nnstreamer.h)
*/
int
-ml_pipeline_valve_control (ml_pipeline_valve_h h, int drop)
+ml_pipeline_valve_set_open (ml_pipeline_valve_h h, bool open)
{
- gboolean current_val;
+ gboolean drop = FALSE;
handle_init (valve, valve, h);
- g_object_get (G_OBJECT (elem->element), "drop", ¤t_val, NULL);
+ g_object_get (G_OBJECT (elem->element), "drop", &drop, NULL);
- if ((drop != 0) == (current_val != FALSE)) {
+ if ((open != false) != (drop != FALSE)) {
/* Nothing to do */
- ml_logi ("Valve is called, but there is no effective changes: %d->%d",
- ! !current_val, ! !drop);
+ ml_logi ("Valve is called, but there is no effective changes");
goto unlock_return;
}
- g_object_set (G_OBJECT (elem->element), "drop", ! !drop, NULL);
- ml_logi ("Valve is changed: %d->%d", ! !current_val, ! !drop);
+ drop = (open) ? FALSE : TRUE;
+ g_object_set (G_OBJECT (elem->element), "drop", drop, NULL);
handle_exit (h);
}
} ml_single;
/**
- * @brief Gets caps from tensors info.
- */
-static GstCaps *
-ml_single_get_caps_from_tensors_info (const ml_tensors_info_s * info)
-{
- GstCaps *caps;
- GstTensorsConfig config;
-
- if (!info)
- return NULL;
-
- ml_util_copy_tensors_info_from_ml (&config.info, info);
-
- /* set framerate 0/1 */
- config.rate_n = 0;
- config.rate_d = 1;
-
- /* Supposed input type is single tensor if the number of tensors is 1. */
- if (config.info.num_tensors == 1) {
- GstTensorConfig c;
-
- gst_tensor_info_copy (&c.info, &config.info.info[0]);
- c.rate_n = 0;
- c.rate_d = 1;
-
- caps = gst_tensor_caps_from_config (&c);
- gst_tensor_info_free (&c.info);
- } else {
- caps = gst_tensors_caps_from_config (&config);
- }
-
- gst_tensors_info_free (&config.info);
- return caps;
-}
-
-/**
* @brief Opens an ML model and returns the instance as a handle.
*/
int
-ml_single_open (ml_single_h * single, const char *model_path,
- const ml_tensors_info_s * input_info, const ml_tensors_info_s * output_info,
- ml_nnfw_e nnfw, ml_nnfw_hw_e hw)
+ml_single_open (ml_single_h * single, const char *model,
+ const ml_tensors_info_h input_info, const ml_tensors_info_h output_info,
+ ml_nnfw_type_e nnfw, ml_nnfw_hw_e hw)
{
ml_single *single_h;
ml_pipeline_h pipe;
int status = ML_ERROR_NONE;
gchar *pipeline_desc = NULL;
gchar *path_down;
+ ml_tensors_info_s *in_tensors_info, *out_tensors_info;
+ bool available = false;
/* Validate the params */
if (!single) {
/* init null */
*single = NULL;
- if (input_info &&
- ml_util_validate_tensors_info (input_info) != ML_ERROR_NONE) {
+ in_tensors_info = (ml_tensors_info_s *) input_info;
+ out_tensors_info = (ml_tensors_info_s *) output_info;
+
+ if (in_tensors_info &&
+ ml_util_validate_tensors_info (in_tensors_info) != ML_ERROR_NONE) {
ml_loge ("The given param, input tensor info is invalid.");
return ML_ERROR_INVALID_PARAMETER;
}
- if (output_info &&
- ml_util_validate_tensors_info (output_info) != ML_ERROR_NONE) {
+ if (out_tensors_info &&
+ ml_util_validate_tensors_info (out_tensors_info) != ML_ERROR_NONE) {
ml_loge ("The given param, output tensor info is invalid.");
return ML_ERROR_INVALID_PARAMETER;
}
/* 1. Determine nnfw */
/* Check file extention. */
- path_down = g_ascii_strdown (model_path, -1);
+ path_down = g_ascii_strdown (model, -1);
switch (nnfw) {
- case ML_NNFW_UNKNOWN:
+ case ML_NNFW_TYPE_ANY:
if (g_str_has_suffix (path_down, ".tflite")) {
- ml_logi ("The given model [%s] is supposed a tensorflow-lite model.", model_path);
- nnfw = ML_NNFW_TENSORFLOW_LITE;
+ ml_logi ("The given model [%s] is supposed a tensorflow-lite model.", model);
+ nnfw = ML_NNFW_TYPE_TENSORFLOW_LITE;
} else if (g_str_has_suffix (path_down, ".pb")) {
- ml_logi ("The given model [%s] is supposed a tensorflow model.", model_path);
- nnfw = ML_NNFW_TENSORFLOW;
+ ml_logi ("The given model [%s] is supposed a tensorflow model.", model);
+ nnfw = ML_NNFW_TYPE_TENSORFLOW;
} else {
- ml_loge ("The given model [%s] has unknown extension.", model_path);
+ ml_loge ("The given model [%s] has unknown extension.", model);
status = ML_ERROR_INVALID_PARAMETER;
}
break;
- case ML_NNFW_CUSTOM_FILTER:
+ case ML_NNFW_TYPE_CUSTOM_FILTER:
if (!g_str_has_suffix (path_down, ".so")) {
- ml_loge ("The given model [%s] has invalid extension.", model_path);
+ ml_loge ("The given model [%s] has invalid extension.", model);
status = ML_ERROR_INVALID_PARAMETER;
}
break;
- case ML_NNFW_TENSORFLOW_LITE:
+ case ML_NNFW_TYPE_TENSORFLOW_LITE:
if (!g_str_has_suffix (path_down, ".tflite")) {
- ml_loge ("The given model [%s] has invalid extension.", model_path);
+ ml_loge ("The given model [%s] has invalid extension.", model);
status = ML_ERROR_INVALID_PARAMETER;
}
break;
- case ML_NNFW_TENSORFLOW:
+ case ML_NNFW_TYPE_TENSORFLOW:
if (!g_str_has_suffix (path_down, ".pb")) {
- ml_loge ("The given model [%s] has invalid extension.", model_path);
+ ml_loge ("The given model [%s] has invalid extension.", model);
status = ML_ERROR_INVALID_PARAMETER;
}
break;
if (status != ML_ERROR_NONE)
return status;
- if (!g_file_test (model_path, G_FILE_TEST_IS_REGULAR)) {
+ if (!g_file_test (model, G_FILE_TEST_IS_REGULAR)) {
ml_loge ("The given param, model path [%s] is invalid.",
- GST_STR_NULL (model_path));
+ GST_STR_NULL (model));
return ML_ERROR_INVALID_PARAMETER;
}
/* 2. Determine hw */
/** @todo Now the param hw is ignored. (Supposed CPU only) Support others later. */
- status = ml_util_check_nnfw (nnfw, hw);
- if (status < 0) {
+ status = ml_util_check_nnfw_availability (nnfw, hw, &available);
+ if (status != ML_ERROR_NONE || !available) {
ml_loge ("The given nnfw is not available.");
return status;
}
/* 3. Construct a pipeline */
/* Set the pipeline desc with nnfw. */
switch (nnfw) {
- case ML_NNFW_CUSTOM_FILTER:
+ case ML_NNFW_TYPE_CUSTOM_FILTER:
pipeline_desc =
g_strdup_printf
("appsrc name=srcx ! tensor_filter name=filterx framework=custom model=%s ! appsink name=sinkx sync=false",
- model_path);
+ model);
break;
- case ML_NNFW_TENSORFLOW_LITE:
+ case ML_NNFW_TYPE_TENSORFLOW_LITE:
/* We can get the tensor meta from tf-lite model. */
pipeline_desc =
g_strdup_printf
("appsrc name=srcx ! tensor_filter name=filterx framework=tensorflow-lite model=%s ! appsink name=sinkx sync=false",
- model_path);
+ model);
break;
- case ML_NNFW_TENSORFLOW:
- if (input_info && output_info) {
+ case ML_NNFW_TYPE_TENSORFLOW:
+ if (in_tensors_info && out_tensors_info) {
GstTensorsInfo in_info, out_info;
gchar *str_dim, *str_type, *str_name;
gchar *in_option, *out_option;
- ml_util_copy_tensors_info_from_ml (&in_info, input_info);
- ml_util_copy_tensors_info_from_ml (&out_info, output_info);
+ ml_util_copy_tensors_info_from_ml (&in_info, in_tensors_info);
+ ml_util_copy_tensors_info_from_ml (&out_info, out_tensors_info);
/* Set input option */
str_dim = gst_tensors_info_get_dimensions_string (&in_info);
pipeline_desc =
g_strdup_printf
("appsrc name=srcx ! tensor_filter name=filterx framework=tensorflow model=%s %s %s ! appsink name=sinkx sync=false",
- model_path, in_option, out_option);
+ model, in_option, out_option);
g_free (in_option);
g_free (out_option);
ml_util_initialize_tensors_info (&single_h->out_info);
/* 5. Set in/out caps and metadata */
- if (input_info) {
- caps = ml_single_get_caps_from_tensors_info (input_info);
- ml_util_copy_tensors_info (&single_h->in_info, input_info);
+ if (in_tensors_info) {
+ caps = ml_util_get_caps_from_tensors_info (in_tensors_info);
+ ml_util_copy_tensors_info (&single_h->in_info, in_tensors_info);
} else {
- ml_single_get_input_info (single_h, &single_h->in_info);
+ ml_tensors_info_h in_info;
+
+ ml_single_get_input_info (single_h, &in_info);
+ ml_util_copy_tensors_info (&single_h->in_info, in_info);
+ ml_util_destroy_tensors_info (in_info);
status = ml_util_validate_tensors_info (&single_h->in_info);
if (status != ML_ERROR_NONE) {
goto error;
}
- caps = ml_single_get_caps_from_tensors_info (&single_h->in_info);
+ caps = ml_util_get_caps_from_tensors_info (&single_h->in_info);
}
gst_app_src_set_caps (GST_APP_SRC (appsrc), caps);
gst_caps_unref (caps);
- if (output_info) {
- caps = ml_single_get_caps_from_tensors_info (output_info);
- ml_util_copy_tensors_info (&single_h->out_info, output_info);
+ if (out_tensors_info) {
+ caps = ml_util_get_caps_from_tensors_info (out_tensors_info);
+ ml_util_copy_tensors_info (&single_h->out_info, out_tensors_info);
} else {
- ml_single_get_output_info (single_h, &single_h->out_info);
+ ml_tensors_info_h out_info;
+
+ ml_single_get_output_info (single_h, &out_info);
+ ml_util_copy_tensors_info (&single_h->out_info, out_info);
+ ml_util_destroy_tensors_info (out_info);
status = ml_util_validate_tensors_info (&single_h->out_info);
if (status != ML_ERROR_NONE) {
goto error;
}
- caps = ml_single_get_caps_from_tensors_info (&single_h->out_info);
+ caps = ml_util_get_caps_from_tensors_info (&single_h->out_info);
}
gst_app_sink_set_caps (GST_APP_SINK (appsink), caps);
ml_single_close (ml_single_h single)
{
ml_single *single_h;
- int ret;
+ int status;
if (!single) {
ml_loge ("The given param, single is invalid.");
ml_util_free_tensors_info (&single_h->in_info);
ml_util_free_tensors_info (&single_h->out_info);
- ret = ml_pipeline_destroy (single_h->pipe);
+ status = ml_pipeline_destroy (single_h->pipe);
g_free (single_h);
- return ret;
+ return status;
}
/**
* @brief Gets the type (tensor dimension, type, name and so on) of required input data for the given handle.
*/
int
-ml_single_get_input_info (ml_single_h single,
- ml_tensors_info_s * input_info)
+ml_single_get_input_info (ml_single_h single, ml_tensors_info_h * info)
{
ml_single *single_h;
- GstTensorsInfo info;
+ ml_tensors_info_s *input_info;
+ GstTensorsInfo gst_info;
gchar *val;
guint rank;
- if (!single || !input_info)
+ if (!single || !info)
return ML_ERROR_INVALID_PARAMETER;
single_h = (ml_single *) single;
- gst_tensors_info_init (&info);
+ /* allocate handle for tensors info */
+ ml_util_allocate_tensors_info (info);
+ input_info = (ml_tensors_info_s *) (*info);
+
+ gst_tensors_info_init (&gst_info);
g_object_get (single_h->filter, "input", &val, NULL);
- rank = gst_tensors_info_parse_dimensions_string (&info, val);
+ rank = gst_tensors_info_parse_dimensions_string (&gst_info, val);
g_free (val);
/* set the number of tensors */
- info.num_tensors = rank;
+ gst_info.num_tensors = rank;
g_object_get (single_h->filter, "inputtype", &val, NULL);
- rank = gst_tensors_info_parse_types_string (&info, val);
+ rank = gst_tensors_info_parse_types_string (&gst_info, val);
g_free (val);
- if (info.num_tensors != rank) {
+ if (gst_info.num_tensors != rank) {
ml_logw ("Invalid state, input tensor type is mismatched in filter.");
}
g_object_get (single_h->filter, "inputname", &val, NULL);
- rank = gst_tensors_info_parse_names_string (&info, val);
+ rank = gst_tensors_info_parse_names_string (&gst_info, val);
g_free (val);
- if (info.num_tensors != rank) {
+ if (gst_info.num_tensors != rank) {
ml_logw ("Invalid state, input tensor name is mismatched in filter.");
}
- ml_util_copy_tensors_info_from_gst (input_info, &info);
- gst_tensors_info_free (&info);
+ ml_util_copy_tensors_info_from_gst (input_info, &gst_info);
+ gst_tensors_info_free (&gst_info);
return ML_ERROR_NONE;
}
* @brief Gets the type (tensor dimension, type, name and so on) of output data for the given handle.
*/
int
-ml_single_get_output_info (ml_single_h single,
- ml_tensors_info_s * output_info)
+ml_single_get_output_info (ml_single_h single, ml_tensors_info_h * info)
{
ml_single *single_h;
- GstTensorsInfo info;
+ ml_tensors_info_s *output_info;
+ GstTensorsInfo gst_info;
gchar *val;
guint rank;
- if (!single || !output_info)
+ if (!single || !info)
return ML_ERROR_INVALID_PARAMETER;
single_h = (ml_single *) single;
- gst_tensors_info_init (&info);
+ /* allocate handle for tensors info */
+ ml_util_allocate_tensors_info (info);
+ output_info = (ml_tensors_info_s *) (*info);
+
+ gst_tensors_info_init (&gst_info);
g_object_get (single_h->filter, "output", &val, NULL);
- rank = gst_tensors_info_parse_dimensions_string (&info, val);
+ rank = gst_tensors_info_parse_dimensions_string (&gst_info, val);
g_free (val);
/* set the number of tensors */
- info.num_tensors = rank;
+ gst_info.num_tensors = rank;
g_object_get (single_h->filter, "outputtype", &val, NULL);
- rank = gst_tensors_info_parse_types_string (&info, val);
+ rank = gst_tensors_info_parse_types_string (&gst_info, val);
g_free (val);
- if (info.num_tensors != rank) {
+ if (gst_info.num_tensors != rank) {
ml_logw ("Invalid state, output tensor type is mismatched in filter.");
}
g_object_get (single_h->filter, "outputname", &val, NULL);
- gst_tensors_info_parse_names_string (&info, val);
+ gst_tensors_info_parse_names_string (&gst_info, val);
g_free (val);
- if (info.num_tensors != rank) {
+ if (gst_info.num_tensors != rank) {
ml_logw ("Invalid state, output tensor name is mismatched in filter.");
}
- ml_util_copy_tensors_info_from_gst (output_info, &info);
- gst_tensors_info_free (&info);
+ ml_util_copy_tensors_info_from_gst (output_info, &gst_info);
+ gst_tensors_info_free (&gst_info);
return ML_ERROR_NONE;
}
}
/**
- * @brief Initializes the tensors info.
+ * @brief Allocates a tensors information handle with default value.
*/
-void
+int
+ml_util_allocate_tensors_info (ml_tensors_info_h * info)
+{
+ ml_tensors_info_s *tensors_info;
+
+ if (!info)
+ return ML_ERROR_INVALID_PARAMETER;
+
+ *info = tensors_info = g_new0 (ml_tensors_info_s, 1);
+ ml_util_initialize_tensors_info (tensors_info);
+
+ return ML_ERROR_NONE;
+}
+
+/**
+ * @brief Frees the given handle of a tensors information.
+ */
+int
+ml_util_destroy_tensors_info (ml_tensors_info_h info)
+{
+ ml_tensors_info_s *tensors_info;
+
+ tensors_info = (ml_tensors_info_s *) info;
+
+ if (!tensors_info)
+ return ML_ERROR_INVALID_PARAMETER;
+
+ ml_util_free_tensors_info (tensors_info);
+ g_free (tensors_info);
+
+ return ML_ERROR_NONE;
+}
+
+/**
+ * @brief Initializes the tensors information with default value.
+ */
+int
ml_util_initialize_tensors_info (ml_tensors_info_s * info)
{
guint i, j;
if (!info)
- return;
+ return ML_ERROR_INVALID_PARAMETER;
info->num_tensors = 0;
info->info[i].dimension[j] = 0;
}
}
+
+ return ML_ERROR_NONE;
}
/**
* @brief Validates the given tensor info is valid.
*/
-int
+static int
ml_util_validate_tensor_info (const ml_tensor_info_s * info)
{
guint i;
* @brief Validates the given tensors info is valid.
*/
int
-ml_util_validate_tensors_info (const ml_tensors_info_s * info)
+ml_util_validate_tensors_info (const ml_tensors_info_h info)
{
+ ml_tensors_info_s *tensors_info;
guint i;
- if (!info || info->num_tensors < 1)
+ tensors_info = (ml_tensors_info_s *) info;
+
+ if (!tensors_info || tensors_info->num_tensors < 1)
return ML_ERROR_INVALID_PARAMETER;
- for (i = 0; i < info->num_tensors; i++) {
+ for (i = 0; i < tensors_info->num_tensors; i++) {
/* Failed if returned value is not 0 (ML_ERROR_NONE) */
- if (ml_util_validate_tensor_info (&info->info[i]) != ML_ERROR_NONE)
+ if (ml_util_validate_tensor_info (&tensors_info->info[i]) != ML_ERROR_NONE)
return ML_ERROR_INVALID_PARAMETER;
}
}
/**
+ * @brief Sets the number of tensors with given handle of tensors information.
+ */
+int
+ml_util_set_tensors_count (ml_tensors_info_h info, const unsigned int count)
+{
+ ml_tensors_info_s *tensors_info;
+
+ if (!info || count > ML_TENSOR_SIZE_LIMIT)
+ return ML_ERROR_INVALID_PARAMETER;
+
+ tensors_info = (ml_tensors_info_s *) info;
+ tensors_info->num_tensors = count;
+
+ return ML_ERROR_NONE;
+}
+
+/**
+ * @brief Gets the number of tensors with given handle of tensors information.
+ */
+int
+ml_util_get_tensors_count (ml_tensors_info_h info, unsigned int *count)
+{
+ ml_tensors_info_s *tensors_info;
+
+ if (!info || !count)
+ return ML_ERROR_INVALID_PARAMETER;
+
+ tensors_info = (ml_tensors_info_s *) info;
+ *count = tensors_info->num_tensors;
+
+ return ML_ERROR_NONE;
+}
+
+/**
+ * @brief Sets the tensor name with given handle of tensors information.
+ */
+int
+ml_util_set_tensor_name (ml_tensors_info_h info,
+ const unsigned int index, const char *name)
+{
+ ml_tensors_info_s *tensors_info;
+
+ if (!info)
+ return ML_ERROR_INVALID_PARAMETER;
+
+ tensors_info = (ml_tensors_info_s *) info;
+
+ if (tensors_info->num_tensors <= index)
+ return ML_ERROR_INVALID_PARAMETER;
+
+ if (tensors_info->info[index].name) {
+ g_free (tensors_info->info[index].name);
+ tensors_info->info[index].name = NULL;
+ }
+
+ if (name)
+ tensors_info->info[index].name = g_strdup (name);
+
+ return ML_ERROR_NONE;
+}
+
+/**
+ * @brief Gets the tensor name with given handle of tensors information.
+ */
+int
+ml_util_get_tensor_name (ml_tensors_info_h info,
+ const unsigned int index, char **name)
+{
+ ml_tensors_info_s *tensors_info;
+
+ if (!info || !name)
+ return ML_ERROR_INVALID_PARAMETER;
+
+ tensors_info = (ml_tensors_info_s *) info;
+
+ if (tensors_info->num_tensors <= index)
+ return ML_ERROR_INVALID_PARAMETER;
+
+ *name = tensors_info->info[index].name;
+
+ return ML_ERROR_NONE;
+}
+
+/**
+ * @brief Sets the tensor type with given handle of tensors information.
+ */
+int
+ml_util_set_tensor_type (ml_tensors_info_h info,
+ const unsigned int index, const ml_tensor_type_e type)
+{
+ ml_tensors_info_s *tensors_info;
+
+ if (!info)
+ return ML_ERROR_INVALID_PARAMETER;
+
+ tensors_info = (ml_tensors_info_s *) info;
+
+ if (tensors_info->num_tensors <= index)
+ return ML_ERROR_INVALID_PARAMETER;
+
+ tensors_info->info[index].type = type;
+
+ return ML_ERROR_NONE;
+}
+
+/**
+ * @brief Gets the tensor type with given handle of tensors information.
+ */
+int
+ml_util_get_tensor_type (ml_tensors_info_h info,
+ const unsigned int index, ml_tensor_type_e * type)
+{
+ ml_tensors_info_s *tensors_info;
+
+ if (!info || !type)
+ return ML_ERROR_INVALID_PARAMETER;
+
+ tensors_info = (ml_tensors_info_s *) info;
+
+ if (tensors_info->num_tensors <= index)
+ return ML_ERROR_INVALID_PARAMETER;
+
+ *type = tensors_info->info[index].type;
+
+ return ML_ERROR_NONE;
+}
+
+/**
+ * @brief Sets the tensor dimension with given handle of tensors information.
+ */
+int
+ml_util_set_tensor_dimension (ml_tensors_info_h info,
+ const unsigned int index, const ml_tensor_dimension dimension)
+{
+ ml_tensors_info_s *tensors_info;
+ guint i;
+
+ if (!info)
+ return ML_ERROR_INVALID_PARAMETER;
+
+ tensors_info = (ml_tensors_info_s *) info;
+
+ if (tensors_info->num_tensors <= index)
+ return ML_ERROR_INVALID_PARAMETER;
+
+ for (i = 0; i < ML_TENSOR_RANK_LIMIT; i++) {
+ tensors_info->info[index].dimension[i] = dimension[i];
+ }
+
+ return ML_ERROR_NONE;
+}
+
+/**
+ * @brief Gets the tensor dimension with given handle of tensors information.
+ */
+int
+ml_util_get_tensor_dimension (ml_tensors_info_h info,
+ const unsigned int index, ml_tensor_dimension dimension)
+{
+ ml_tensors_info_s *tensors_info;
+ guint i;
+
+ if (!info)
+ return ML_ERROR_INVALID_PARAMETER;
+
+ tensors_info = (ml_tensors_info_s *) info;
+
+ if (tensors_info->num_tensors <= index)
+ return ML_ERROR_INVALID_PARAMETER;
+
+ for (i = 0; i < ML_TENSOR_RANK_LIMIT; i++) {
+ dimension[i] = tensors_info->info[index].dimension[i];
+ }
+
+ return ML_ERROR_NONE;
+}
+
+/**
* @brief Gets the byte size of the given tensor info.
*/
size_t
* @brief Gets the byte size of the given tensors info.
*/
size_t
-ml_util_get_tensors_size (const ml_tensors_info_s * info)
+ml_util_get_tensors_size (const ml_tensors_info_h info)
{
+ ml_tensors_info_s *tensors_info;
size_t tensor_size;
gint i;
- if (!info)
+ tensors_info = (ml_tensors_info_s *) info;
+
+ if (!tensors_info)
return 0;
tensor_size = 0;
- for (i = 0; i < info->num_tensors; i++) {
- tensor_size += ml_util_get_tensor_size (&info->info[i]);
+ for (i = 0; i < tensors_info->num_tensors; i++) {
+ tensor_size += ml_util_get_tensor_size (&tensors_info->info[i]);
}
return tensor_size;
* @brief Allocates a tensor data frame with the given tensors info. (more info in nnstreamer.h)
*/
ml_tensors_data_s *
-ml_util_allocate_tensors_data (const ml_tensors_info_s * info)
+ml_util_allocate_tensors_data (const ml_tensors_info_h info)
{
ml_tensors_data_s *data;
+ ml_tensors_info_s *tensors_info;
gint i;
- if (!info) {
+ tensors_info = (ml_tensors_info_s *) info;
+
+ if (!tensors_info) {
ml_util_set_error (ML_ERROR_INVALID_PARAMETER);
return NULL;
}
return NULL;
}
- data->num_tensors = info->num_tensors;
+ data->num_tensors = tensors_info->num_tensors;
for (i = 0; i < data->num_tensors; i++) {
- data->tensors[i].size = ml_util_get_tensor_size (&info->info[i]);
+ data->tensors[i].size = ml_util_get_tensor_size (&tensors_info->info[i]);
data->tensors[i].tensor = g_malloc0 (data->tensors[i].size);
}
/**
* @brief Copies tensor meta info.
*/
-void
-ml_util_copy_tensors_info (ml_tensors_info_s * dest,
- const ml_tensors_info_s * src)
+int
+ml_util_copy_tensors_info (ml_tensors_info_h dest, const ml_tensors_info_h src)
{
+ ml_tensors_info_s *dest_info, *src_info;
guint i, j;
- if (!dest || !src)
- return;
+ dest_info = (ml_tensors_info_s *) dest;
+ src_info = (ml_tensors_info_s *) src;
- ml_util_initialize_tensors_info (dest);
+ if (!dest_info || !src_info)
+ return ML_ERROR_INVALID_PARAMETER;
- dest->num_tensors = src->num_tensors;
+ ml_util_initialize_tensors_info (dest_info);
- for (i = 0; i < dest->num_tensors; i++) {
- dest->info[i].name =
- (src->info[i].name) ? g_strdup (src->info[i].name) : NULL;
- dest->info[i].type = src->info[i].type;
+ dest_info->num_tensors = src_info->num_tensors;
+
+ for (i = 0; i < dest_info->num_tensors; i++) {
+ dest_info->info[i].name =
+ (src_info->info[i].name) ? g_strdup (src_info->info[i].name) : NULL;
+ dest_info->info[i].type = src_info->info[i].type;
for (j = 0; j < ML_TENSOR_RANK_LIMIT; j++)
- dest->info[i].dimension[j] = src->info[i].dimension[j];
+ dest_info->info[i].dimension[j] = src_info->info[i].dimension[j];
}
+
+ return ML_ERROR_NONE;
}
/**
ml_info->info[i].dimension[j] = gst_info->info[i].dimension[j];
}
- for ( ; j < ML_TENSOR_RANK_LIMIT; j++) {
+ for (; j < ML_TENSOR_RANK_LIMIT; j++) {
ml_info->info[i].dimension[j] = 1;
}
}
*/
void
ml_util_copy_tensors_info_from_ml (GstTensorsInfo * gst_info,
- const ml_tensors_info_s * ml_info)
+ const ml_tensors_info_s * ml_info)
{
guint i, j;
guint max_dim;
gst_info->info[i].dimension[j] = ml_info->info[i].dimension[j];
}
- for ( ; j < NNS_TENSOR_RANK_LIMIT; j++) {
+ for (; j < NNS_TENSOR_RANK_LIMIT; j++) {
gst_info->info[i].dimension[j] = 1;
}
}
}
/**
+ * @brief Gets caps from tensors info.
+ */
+GstCaps *
+ml_util_get_caps_from_tensors_info (const ml_tensors_info_s * info)
+{
+ GstCaps *caps;
+ GstTensorsConfig config;
+
+ if (!info)
+ return NULL;
+
+ ml_util_copy_tensors_info_from_ml (&config.info, info);
+
+ /* set framerate 0/1 */
+ config.rate_n = 0;
+ config.rate_d = 1;
+
+ /* Supposed input type is single tensor if the number of tensors is 1. */
+ if (config.info.num_tensors == 1) {
+ GstTensorConfig c;
+
+ gst_tensor_info_copy (&c.info, &config.info.info[0]);
+ c.rate_n = 0;
+ c.rate_d = 1;
+
+ caps = gst_tensor_caps_from_config (&c);
+ gst_tensor_info_free (&c.info);
+ } else {
+ caps = gst_tensors_caps_from_config (&config);
+ }
+
+ gst_tensors_info_free (&config.info);
+ return caps;
+}
+
+/**
* @brief Checks the availability of the given execution environments.
*/
int
-ml_util_check_nnfw (ml_nnfw_e nnfw, ml_nnfw_hw_e hw)
+ml_util_check_nnfw_availability (ml_nnfw_type_e nnfw, ml_nnfw_hw_e hw,
+ bool *available)
{
- /** @todo fill this function */
+ if (!available)
+ return ML_ERROR_INVALID_PARAMETER;
+
+ /* init false */
+ *available = false;
+
switch (nnfw) {
- case ML_NNFW_TENSORFLOW_LITE:
+ case ML_NNFW_TYPE_TENSORFLOW_LITE:
if (nnstreamer_filter_find ("tensorflow-lite") == NULL) {
ml_logw ("Tensorflow-lite is not supported.");
- return ML_ERROR_NOT_SUPPORTED;
+ goto done;
}
break;
- case ML_NNFW_TENSORFLOW:
+ case ML_NNFW_TYPE_TENSORFLOW:
if (nnstreamer_filter_find ("tensorflow") == NULL) {
ml_logw ("Tensorflow is not supported.");
- return ML_ERROR_NOT_SUPPORTED;
+ goto done;
}
break;
default:
break;
}
+ *available = true;
+
+done:
return ML_ERROR_NONE;
}
status = ml_pipeline_start (handle);
EXPECT_EQ (status, ML_ERROR_NONE);
- status = ml_pipeline_valve_control (valve1, 1); /* close */
+ status = ml_pipeline_valve_set_open (valve1, false); /* close */
EXPECT_EQ (status, ML_ERROR_NONE);
status = ml_pipeline_get_state (handle, &state);
status = ml_pipeline_start (handle);
EXPECT_EQ (status, ML_ERROR_NONE);
- status = ml_pipeline_valve_control (valve1, 0); /* open */
+ status = ml_pipeline_valve_set_open (valve1, true); /* open */
EXPECT_EQ (status, ML_ERROR_NONE);
status = ml_pipeline_valve_put_handle (valve1); /* release valve handle */
EXPECT_EQ (status, ML_ERROR_NONE);
*/
static void
test_sink_callback_dm01 (const ml_tensors_data_s * data,
- const ml_tensors_info_s * info, void *pdata)
+ const ml_tensors_info_h info, void *user_data)
{
- gchar *filepath = (gchar *) pdata;
+ gchar *filepath = (gchar *) user_data;
+ unsigned int i, num = 0;
FILE *fp = g_fopen (filepath, "a");
+
if (fp == NULL)
return;
- int i, num = info->num_tensors;
+ ml_util_get_tensors_count (info, &num);
for (i = 0; i < num; i++) {
fwrite (data->tensors[i].tensor, data->tensors[i].size, 1, fp);
*/
static void
test_sink_callback_count (const ml_tensors_data_s * data,
- const ml_tensors_info_s * info, void *pdata)
+ const ml_tensors_info_h info, void *user_data)
{
- guint *count = (guint *) pdata;
+ guint *count = (guint *) user_data;
*count = *count + 1;
}
ml_pipeline_state_e state;
ml_pipeline_src_h srchandle;
int status;
- ml_tensors_info_s tensorsinfo;
+ ml_tensors_info_h info;
ml_tensors_data_s data1, data2;
+ unsigned int count = 0;
+ ml_tensor_type_e type = ML_TENSOR_TYPE_UNKNOWN;
+ ml_tensor_dimension dim = { 0, };
int i;
char *uintarray2[10];
EXPECT_NE (state, ML_PIPELINE_STATE_UNKNOWN);
EXPECT_NE (state, ML_PIPELINE_STATE_NULL);
- status = ml_pipeline_src_get_handle (handle, "srcx", &tensorsinfo, &srchandle);
+ status = ml_pipeline_src_get_handle (handle, "srcx", &srchandle);
+ EXPECT_EQ (status, ML_ERROR_NONE);
+
+ status = ml_pipeline_src_get_tensors_info (srchandle, &info);
EXPECT_EQ (status, ML_ERROR_NONE);
- EXPECT_EQ (tensorsinfo.num_tensors, 1U);
- EXPECT_EQ (tensorsinfo.info[0].type, ML_TENSOR_TYPE_UINT8);
- EXPECT_EQ (tensorsinfo.info[0].dimension[0], 4U);
- EXPECT_EQ (tensorsinfo.info[0].dimension[1], 1U);
- EXPECT_EQ (tensorsinfo.info[0].dimension[2], 1U);
- EXPECT_EQ (tensorsinfo.info[0].dimension[3], 1U);
+ ml_util_get_tensors_count (info, &count);
+ EXPECT_EQ (count, 1U);
- tensorsinfo.num_tensors = 1;
- tensorsinfo.info[0].type = ML_TENSOR_TYPE_UINT8;
- tensorsinfo.info[0].dimension[0] = 4;
- tensorsinfo.info[0].dimension[1] = 1;
- tensorsinfo.info[0].dimension[2] = 1;
- tensorsinfo.info[0].dimension[3] = 1;
+ ml_util_get_tensor_type (info, 0, &type);
+ EXPECT_EQ (type, ML_TENSOR_TYPE_UINT8);
+
+ ml_util_get_tensor_dimension (info, 0, dim);
+ EXPECT_EQ (dim[0], 4U);
+ EXPECT_EQ (dim[1], 1U);
+ EXPECT_EQ (dim[2], 1U);
+ EXPECT_EQ (dim[3], 1U);
+
+ ml_util_destroy_tensors_info (info);
data1.num_tensors = 1;
data1.tensors[0].tensor = uia_index[0];
status = ml_pipeline_src_put_handle (srchandle);
EXPECT_EQ (status, ML_ERROR_NONE);
- status = ml_pipeline_src_get_handle (handle, "srcx", &tensorsinfo, &srchandle);
+ status = ml_pipeline_src_get_handle (handle, "srcx", &srchandle);
+ EXPECT_EQ (status, ML_ERROR_NONE);
+
+ status = ml_pipeline_src_get_tensors_info (srchandle, &info);
EXPECT_EQ (status, ML_ERROR_NONE);
- EXPECT_EQ (tensorsinfo.num_tensors, 1U);
- EXPECT_EQ (tensorsinfo.info[0].type, ML_TENSOR_TYPE_UINT8);
- EXPECT_EQ (tensorsinfo.info[0].dimension[0], 4U);
- EXPECT_EQ (tensorsinfo.info[0].dimension[1], 1U);
- EXPECT_EQ (tensorsinfo.info[0].dimension[2], 1U);
- EXPECT_EQ (tensorsinfo.info[0].dimension[3], 1U);
+ ml_util_get_tensors_count (info, &count);
+ EXPECT_EQ (count, 1U);
+
+ ml_util_get_tensor_type (info, 0, &type);
+ EXPECT_EQ (type, ML_TENSOR_TYPE_UINT8);
+
+ ml_util_get_tensor_dimension (info, 0, dim);
+ EXPECT_EQ (dim[0], 4U);
+ EXPECT_EQ (dim[1], 1U);
+ EXPECT_EQ (dim[2], 1U);
+ EXPECT_EQ (dim[3], 1U);
for (i = 0; i < 10; i++) {
data1.num_tensors = 1;
TEST (nnstreamer_capi_src, failure_01)
{
int status;
- ml_tensors_info_s tensorsinfo;
ml_pipeline_src_h srchandle;
- status = ml_pipeline_src_get_handle (NULL, "dummy", &tensorsinfo, &srchandle);
+ status = ml_pipeline_src_get_handle (NULL, "dummy", &srchandle);
EXPECT_EQ (status, ML_ERROR_INVALID_PARAMETER);
}
{
const char *pipeline = "appsrc is-live=true name=mysource ! valve name=valvex ! filesink";
ml_pipeline_h handle;
- ml_tensors_info_s tensorsinfo;
ml_pipeline_src_h srchandle;
int status = ml_pipeline_construct (pipeline, &handle);
EXPECT_EQ (status, ML_ERROR_NONE);
/* invalid param : pipe */
- status = ml_pipeline_src_get_handle (NULL, "mysource", &tensorsinfo, &srchandle);
+ status = ml_pipeline_src_get_handle (NULL, "mysource", &srchandle);
EXPECT_EQ (status, ML_ERROR_INVALID_PARAMETER);
/* invalid param : name */
- status = ml_pipeline_src_get_handle (handle, NULL, &tensorsinfo, &srchandle);
+ status = ml_pipeline_src_get_handle (handle, NULL, &srchandle);
EXPECT_EQ (status, ML_ERROR_INVALID_PARAMETER);
/* invalid param : wrong name */
- status = ml_pipeline_src_get_handle (handle, "wrongname", &tensorsinfo, &srchandle);
+ status = ml_pipeline_src_get_handle (handle, "wrongname", &srchandle);
EXPECT_EQ (status, ML_ERROR_INVALID_PARAMETER);
/* invalid param : invalid type */
- status = ml_pipeline_src_get_handle (handle, "valvex", &tensorsinfo, &srchandle);
- EXPECT_EQ (status, ML_ERROR_INVALID_PARAMETER);
-
- /* invalid param : info */
- status = ml_pipeline_src_get_handle (handle, "mysource", NULL, &srchandle);
+ status = ml_pipeline_src_get_handle (handle, "valvex", &srchandle);
EXPECT_EQ (status, ML_ERROR_INVALID_PARAMETER);
/* invalid param : handle */
- status = ml_pipeline_src_get_handle (handle, "mysource", &tensorsinfo, NULL);
+ status = ml_pipeline_src_get_handle (handle, "mysource", NULL);
EXPECT_EQ (status, ML_ERROR_INVALID_PARAMETER);
status = ml_pipeline_destroy (handle);
const char *pipeline = "appsrc name=srcx ! other/tensor,dimension=(string)4:1:1:1,type=(string)uint8,framerate=(fraction)0/1 ! tensor_sink";
ml_pipeline_h handle;
- ml_tensors_info_s tensorsinfo;
ml_pipeline_src_h srchandle;
ml_tensors_data_s data;
status = ml_pipeline_start (handle);
EXPECT_EQ (status, ML_ERROR_NONE);
- status = ml_pipeline_src_get_handle (handle, "srcx", &tensorsinfo, &srchandle);
+ status = ml_pipeline_src_get_handle (handle, "srcx", &srchandle);
EXPECT_EQ (status, ML_ERROR_NONE);
/* null data */
EXPECT_EQ (status, ML_ERROR_NONE);
EXPECT_EQ (type, ML_PIPELINE_SWITCH_INPUT_SELECTOR);
- status = ml_pipeline_switch_nodelist (switchhandle, &node_list);
+ status = ml_pipeline_switch_get_pad_list (switchhandle, &node_list);
EXPECT_EQ (status, ML_ERROR_NONE);
if (node_list) {
EXPECT_EQ (status, ML_ERROR_NONE);
EXPECT_EQ (type, ML_PIPELINE_SWITCH_OUTPUT_SELECTOR);
- status = ml_pipeline_switch_nodelist (switchhandle, &node_list);
+ status = ml_pipeline_switch_get_pad_list (switchhandle, &node_list);
EXPECT_EQ (status, ML_ERROR_NONE);
if (node_list) {
TEST (nnstreamer_capi_singleshot, invoke_01)
{
ml_single_h single;
- ml_tensors_info_s in_info, out_info;
- ml_tensors_info_s in_res, out_res;
+ ml_tensors_info_h in_info, out_info;
+ ml_tensors_info_h in_res, out_res;
ml_tensors_data_s *input, *output1, *output2;
+ ml_tensor_dimension in_dim, out_dim, res_dim;
+ ml_tensor_type_e type = ML_TENSOR_TYPE_UNKNOWN;
+ unsigned int count = 0;
+ char *name = NULL;
int status;
const gchar *root_path = g_getenv ("NNSTREAMER_BUILD_ROOT_PATH");
"mobilenet_v1_1.0_224_quant.tflite", NULL);
ASSERT_TRUE (g_file_test (test_model, G_FILE_TEST_EXISTS));
- ml_util_initialize_tensors_info (&in_info);
- ml_util_initialize_tensors_info (&out_info);
- ml_util_initialize_tensors_info (&in_res);
- ml_util_initialize_tensors_info (&out_res);
+ ml_util_allocate_tensors_info (&in_info);
+ ml_util_allocate_tensors_info (&out_info);
+ ml_util_allocate_tensors_info (&in_res);
+ ml_util_allocate_tensors_info (&out_res);
- in_info.num_tensors = 1;
- in_info.info[0].type = ML_TENSOR_TYPE_UINT8;
- in_info.info[0].dimension[0] = 3;
- in_info.info[0].dimension[1] = 224;
- in_info.info[0].dimension[2] = 224;
- in_info.info[0].dimension[3] = 1;
+ in_dim[0] = 3;
+ in_dim[1] = 224;
+ in_dim[2] = 224;
+ in_dim[3] = 1;
+ ml_util_set_tensors_count (in_info, 1);
+ ml_util_set_tensor_type (in_info, 0, ML_TENSOR_TYPE_UINT8);
+ ml_util_set_tensor_dimension (in_info, 0, in_dim);
- out_info.num_tensors = 1;
- out_info.info[0].type = ML_TENSOR_TYPE_UINT8;
- out_info.info[0].dimension[0] = 1001;
- out_info.info[0].dimension[1] = 1;
- out_info.info[0].dimension[2] = 1;
- out_info.info[0].dimension[3] = 1;
+ out_dim[0] = 1001;
+ out_dim[1] = 1;
+ out_dim[2] = 1;
+ out_dim[3] = 1;
+ ml_util_set_tensors_count (out_info, 1);
+ ml_util_set_tensor_type (out_info, 0, ML_TENSOR_TYPE_UINT8);
+ ml_util_set_tensor_dimension (out_info, 0, out_dim);
- status = ml_single_open (&single, test_model, &in_info, &out_info,
- ML_NNFW_TENSORFLOW_LITE, ML_NNFW_HW_DO_NOT_CARE);
+ status = ml_single_open (&single, test_model, in_info, out_info,
+ ML_NNFW_TYPE_TENSORFLOW_LITE, ML_NNFW_HW_ANY);
EXPECT_EQ (status, ML_ERROR_NONE);
/* input tensor in filter */
status = ml_single_get_input_info (single, &in_res);
EXPECT_EQ (status, ML_ERROR_NONE);
- EXPECT_TRUE (in_info.num_tensors == in_res.num_tensors);
- for (guint idx = 0; idx < in_res.num_tensors; idx++) {
- EXPECT_TRUE (in_info.info[idx].type == in_res.info[idx].type);
- EXPECT_TRUE (in_info.info[idx].dimension[0] == in_res.info[idx].dimension[0]);
- EXPECT_TRUE (in_info.info[idx].dimension[1] == in_res.info[idx].dimension[1]);
- EXPECT_TRUE (in_info.info[idx].dimension[2] == in_res.info[idx].dimension[2]);
- EXPECT_TRUE (in_info.info[idx].dimension[3] == in_res.info[idx].dimension[3]);
- }
+ status = ml_util_get_tensors_count (in_res, &count);
+ EXPECT_EQ (status, ML_ERROR_NONE);
+ EXPECT_EQ (count, 1U);
+
+ status = ml_util_get_tensor_name (in_res, 0, &name);
+ EXPECT_EQ (status, ML_ERROR_NONE);
+ EXPECT_TRUE (name == NULL);
+
+ status = ml_util_get_tensor_type (in_res, 0, &type);
+ EXPECT_EQ (status, ML_ERROR_NONE);
+ EXPECT_EQ (type, ML_TENSOR_TYPE_UINT8);
+
+ ml_util_get_tensor_dimension (in_res, 0, res_dim);
+ EXPECT_TRUE (in_dim[0] == res_dim[0]);
+ EXPECT_TRUE (in_dim[1] == res_dim[1]);
+ EXPECT_TRUE (in_dim[2] == res_dim[2]);
+ EXPECT_TRUE (in_dim[3] == res_dim[3]);
/* output tensor in filter */
status = ml_single_get_output_info (single, &out_res);
EXPECT_EQ (status, ML_ERROR_NONE);
- EXPECT_TRUE (out_info.num_tensors == out_res.num_tensors);
- for (guint idx = 0; idx < out_res.num_tensors; idx++) {
- EXPECT_TRUE (out_info.info[idx].type == out_res.info[idx].type);
- EXPECT_TRUE (out_info.info[idx].dimension[0] == out_res.info[idx].dimension[0]);
- EXPECT_TRUE (out_info.info[idx].dimension[1] == out_res.info[idx].dimension[1]);
- EXPECT_TRUE (out_info.info[idx].dimension[2] == out_res.info[idx].dimension[2]);
- EXPECT_TRUE (out_info.info[idx].dimension[3] == out_res.info[idx].dimension[3]);
- }
+ status = ml_util_get_tensors_count (out_res, &count);
+ EXPECT_EQ (status, ML_ERROR_NONE);
+ EXPECT_EQ (count, 1U);
+
+ status = ml_util_get_tensor_name (out_res, 0, &name);
+ EXPECT_EQ (status, ML_ERROR_NONE);
+ EXPECT_TRUE (name == NULL);
+
+ status = ml_util_get_tensor_type (out_res, 0, &type);
+ EXPECT_EQ (status, ML_ERROR_NONE);
+ EXPECT_EQ (type, ML_TENSOR_TYPE_UINT8);
+
+ ml_util_get_tensor_dimension (out_res, 0, res_dim);
+ EXPECT_TRUE (out_dim[0] == res_dim[0]);
+ EXPECT_TRUE (out_dim[1] == res_dim[1]);
+ EXPECT_TRUE (out_dim[2] == res_dim[2]);
+ EXPECT_TRUE (out_dim[3] == res_dim[3]);
/* generate dummy data */
- input = ml_util_allocate_tensors_data (&in_info);
+ input = ml_util_allocate_tensors_data (in_info);
EXPECT_TRUE (input != NULL);
status = ml_util_get_last_error ();
ml_util_free_tensors_data (&output1);
- output2 = ml_util_allocate_tensors_data (&out_info);
+ output2 = ml_util_allocate_tensors_data (out_info);
EXPECT_TRUE (output2 != NULL);
status = ml_util_get_last_error ();
EXPECT_EQ (status, ML_ERROR_NONE);
g_free (test_model);
- ml_util_free_tensors_info (&in_res);
- ml_util_free_tensors_info (&out_res);
+ ml_util_destroy_tensors_info (in_info);
+ ml_util_destroy_tensors_info (out_info);
+ ml_util_destroy_tensors_info (in_res);
+ ml_util_destroy_tensors_info (out_res);
}
/**
TEST (nnstreamer_capi_singleshot, invoke_02)
{
ml_single_h single;
- ml_tensors_info_s in_info, out_info;
+ ml_tensors_info_h in_info, out_info;
ml_tensors_data_s *input, *output1, *output2;
+ ml_tensor_dimension in_dim, out_dim;
int status;
const gchar *root_path = g_getenv ("NNSTREAMER_BUILD_ROOT_PATH");
"mobilenet_v1_1.0_224_quant.tflite", NULL);
ASSERT_TRUE (g_file_test (test_model, G_FILE_TEST_EXISTS));
- ml_util_initialize_tensors_info (&in_info);
- ml_util_initialize_tensors_info (&out_info);
-
- in_info.num_tensors = 1;
- in_info.info[0].type = ML_TENSOR_TYPE_UINT8;
- in_info.info[0].dimension[0] = 3;
- in_info.info[0].dimension[1] = 224;
- in_info.info[0].dimension[2] = 224;
- in_info.info[0].dimension[3] = 1;
-
- out_info.num_tensors = 1;
- out_info.info[0].type = ML_TENSOR_TYPE_UINT8;
- out_info.info[0].dimension[0] = 1001;
- out_info.info[0].dimension[1] = 1;
- out_info.info[0].dimension[2] = 1;
- out_info.info[0].dimension[3] = 1;
+ ml_util_allocate_tensors_info (&in_info);
+ ml_util_allocate_tensors_info (&out_info);
+
+ in_dim[0] = 3;
+ in_dim[1] = 224;
+ in_dim[2] = 224;
+ in_dim[3] = 1;
+ ml_util_set_tensors_count (in_info, 1);
+ ml_util_set_tensor_type (in_info, 0, ML_TENSOR_TYPE_UINT8);
+ ml_util_set_tensor_dimension (in_info, 0, in_dim);
+
+ out_dim[0] = 1001;
+ out_dim[1] = 1;
+ out_dim[2] = 1;
+ out_dim[3] = 1;
+ ml_util_set_tensors_count (out_info, 1);
+ ml_util_set_tensor_type (out_info, 0, ML_TENSOR_TYPE_UINT8);
+ ml_util_set_tensor_dimension (out_info, 0, out_dim);
status = ml_single_open (&single, test_model, NULL, NULL,
- ML_NNFW_TENSORFLOW_LITE, ML_NNFW_HW_DO_NOT_CARE);
+ ML_NNFW_TYPE_TENSORFLOW_LITE, ML_NNFW_HW_ANY);
EXPECT_EQ (status, ML_ERROR_NONE);
/* generate dummy data */
- input = ml_util_allocate_tensors_data (&in_info);
+ input = ml_util_allocate_tensors_data (in_info);
EXPECT_TRUE (input != NULL);
status = ml_util_get_last_error ();
ml_util_free_tensors_data (&output1);
- output2 = ml_util_allocate_tensors_data (&out_info);
+ output2 = ml_util_allocate_tensors_data (out_info);
EXPECT_TRUE (output2 != NULL);
status = ml_util_get_last_error ();
EXPECT_EQ (status, ML_ERROR_NONE);
g_free (test_model);
+ ml_util_destroy_tensors_info (in_info);
+ ml_util_destroy_tensors_info (out_info);
}
#endif /* ENABLE_TENSORFLOW_LITE */
TEST (nnstreamer_capi_singleshot, invoke_03)
{
ml_single_h single;
- ml_tensors_info_s in_info, out_info;
+ ml_tensors_info_h in_info, out_info;
ml_tensors_data_s *input, *output1, *output2;
+ ml_tensor_dimension in_dim;
int i, status;
const gchar *root_path = g_getenv ("NNSTREAMER_BUILD_ROOT_PATH");
"libnnstreamer_customfilter_passthrough_variable.so", NULL);
ASSERT_TRUE (g_file_test (test_model, G_FILE_TEST_EXISTS));
- ml_util_initialize_tensors_info (&in_info);
- ml_util_initialize_tensors_info (&out_info);
+ ml_util_allocate_tensors_info (&in_info);
+ ml_util_allocate_tensors_info (&out_info);
+
+ ml_util_set_tensors_count (in_info, 2);
+
+ in_dim[0] = 10;
+ in_dim[1] = 1;
+ in_dim[2] = 1;
+ in_dim[3] = 1;
- in_info.num_tensors = 2;
- in_info.info[0].type = ML_TENSOR_TYPE_INT16;
- in_info.info[0].dimension[0] = 10;
- in_info.info[0].dimension[1] = 1;
- in_info.info[0].dimension[2] = 1;
- in_info.info[0].dimension[3] = 1;
- in_info.info[1].type = ML_TENSOR_TYPE_FLOAT32;
- in_info.info[1].dimension[0] = 10;
- in_info.info[1].dimension[1] = 1;
- in_info.info[1].dimension[2] = 1;
- in_info.info[1].dimension[3] = 1;
+ ml_util_set_tensor_type (in_info, 0, ML_TENSOR_TYPE_INT16);
+ ml_util_set_tensor_dimension (in_info, 0, in_dim);
- ml_util_copy_tensors_info (&out_info, &in_info);
+ ml_util_set_tensor_type (in_info, 1, ML_TENSOR_TYPE_FLOAT32);
+ ml_util_set_tensor_dimension (in_info, 1, in_dim);
- status = ml_single_open (&single, test_model, &in_info, &out_info,
- ML_NNFW_CUSTOM_FILTER, ML_NNFW_HW_DO_NOT_CARE);
+ ml_util_copy_tensors_info (out_info, in_info);
+
+ status = ml_single_open (&single, test_model, in_info, out_info,
+ ML_NNFW_TYPE_CUSTOM_FILTER, ML_NNFW_HW_ANY);
EXPECT_EQ (status, ML_ERROR_NONE);
/* generate input data */
- input = ml_util_allocate_tensors_data (&in_info);
+ input = ml_util_allocate_tensors_data (in_info);
ASSERT_TRUE (input != NULL);
EXPECT_TRUE (input->num_tensors == 2U);
status = ml_util_get_last_error ();
EXPECT_EQ (status, ML_ERROR_NONE);
- EXPECT_TRUE (output1->tensors[0].size == ml_util_get_tensor_size (&in_info.info[0]));
- EXPECT_TRUE (output1->tensors[1].size == ml_util_get_tensor_size (&in_info.info[1]));
-
for (i = 0; i < 10; i++) {
int16_t i16 = (int16_t) (i + 1);
float f32 = (float) (i + .1);
ml_util_free_tensors_data (&output1);
- output2 = ml_util_allocate_tensors_data (&out_info);
+ output2 = ml_util_allocate_tensors_data (out_info);
EXPECT_TRUE (output2 != NULL);
status = ml_util_get_last_error ();
status = ml_util_get_last_error ();
EXPECT_EQ (status, ML_ERROR_NONE);
- EXPECT_TRUE (output1->tensors[0].size == ml_util_get_tensor_size (&in_info.info[0]));
- EXPECT_TRUE (output1->tensors[1].size == ml_util_get_tensor_size (&in_info.info[1]));
-
for (i = 0; i < 10; i++) {
int16_t i16 = (int16_t) (i + 1);
float f32 = (float) (i + .1);
EXPECT_EQ (status, ML_ERROR_NONE);
g_free (test_model);
+ ml_util_destroy_tensors_info (in_info);
+ ml_util_destroy_tensors_info (out_info);
}
#ifdef ENABLE_TENSORFLOW
TEST (nnstreamer_capi_singleshot, invoke_04)
{
ml_single_h single;
- ml_tensors_info_s in_info, out_info;
- ml_tensors_info_s in_res, out_res;
+ ml_tensors_info_h in_info, out_info;
+ ml_tensors_info_h in_res, out_res;
ml_tensors_data_s *input, *output;
+ ml_tensor_dimension in_dim, out_dim, res_dim;
+ ml_tensor_type_e type = ML_TENSOR_TYPE_UNKNOWN;
+ unsigned int count = 0;
+ char *name = NULL;
int status, max_score_index;
float score, max_score;
"yes.wav", NULL);
ASSERT_TRUE (g_file_test (test_file, G_FILE_TEST_EXISTS));
- ml_util_initialize_tensors_info (&in_info);
- ml_util_initialize_tensors_info (&out_info);
- ml_util_initialize_tensors_info (&in_res);
- ml_util_initialize_tensors_info (&out_res);
-
- in_info.num_tensors = 1;
- in_info.info[0].name = g_strdup ("wav_data");
- in_info.info[0].type = ML_TENSOR_TYPE_INT16;
- in_info.info[0].dimension[0] = 1;
- in_info.info[0].dimension[1] = 16022;
- in_info.info[0].dimension[2] = 1;
- in_info.info[0].dimension[3] = 1;
-
- out_info.num_tensors = 1;
- out_info.info[0].name = g_strdup ("labels_softmax");
- out_info.info[0].type = ML_TENSOR_TYPE_FLOAT32;
- out_info.info[0].dimension[0] = 12;
- out_info.info[0].dimension[1] = 1;
- out_info.info[0].dimension[2] = 1;
- out_info.info[0].dimension[3] = 1;
+ ml_util_allocate_tensors_info (&in_info);
+ ml_util_allocate_tensors_info (&out_info);
+ ml_util_allocate_tensors_info (&in_res);
+ ml_util_allocate_tensors_info (&out_res);
+
+ in_dim[0] = 1;
+ in_dim[1] = 16022;
+ in_dim[2] = 1;
+ in_dim[3] = 1;
+ ml_util_set_tensors_count (in_info, 1);
+ ml_util_set_tensor_name (in_info, 0, "wav_data");
+ ml_util_set_tensor_type (in_info, 0, ML_TENSOR_TYPE_INT16);
+ ml_util_set_tensor_dimension (in_info, 0, in_dim);
+
+ out_dim[0] = 12;
+ out_dim[1] = 1;
+ out_dim[2] = 1;
+ out_dim[3] = 1;
+ ml_util_set_tensors_count (out_info, 1);
+ ml_util_set_tensor_name (out_info, 0, "labels_softmax");
+ ml_util_set_tensor_type (out_info, 0, ML_TENSOR_TYPE_FLOAT32);
+ ml_util_set_tensor_dimension (out_info, 0, out_dim);
ASSERT_TRUE (g_file_get_contents (test_file, &contents, &len, NULL));
- ASSERT_TRUE (len == ml_util_get_tensors_size (&in_info));
+ ASSERT_TRUE (len == ml_util_get_tensors_size (in_info));
- status = ml_single_open (&single, test_model, &in_info, &out_info,
- ML_NNFW_TENSORFLOW, ML_NNFW_HW_DO_NOT_CARE);
+ status = ml_single_open (&single, test_model, in_info, out_info,
+ ML_NNFW_TYPE_TENSORFLOW, ML_NNFW_HW_ANY);
EXPECT_EQ (status, ML_ERROR_NONE);
/* input tensor in filter */
status = ml_single_get_input_info (single, &in_res);
EXPECT_EQ (status, ML_ERROR_NONE);
- EXPECT_TRUE (in_info.num_tensors == in_res.num_tensors);
- for (guint idx = 0; idx < in_res.num_tensors; idx++) {
- EXPECT_TRUE (in_info.info[idx].type == in_res.info[idx].type);
- EXPECT_TRUE (in_info.info[idx].dimension[0] == in_res.info[idx].dimension[0]);
- EXPECT_TRUE (in_info.info[idx].dimension[1] == in_res.info[idx].dimension[1]);
- EXPECT_TRUE (in_info.info[idx].dimension[2] == in_res.info[idx].dimension[2]);
- EXPECT_TRUE (in_info.info[idx].dimension[3] == in_res.info[idx].dimension[3]);
- }
+ status = ml_util_get_tensors_count (in_res, &count);
+ EXPECT_EQ (status, ML_ERROR_NONE);
+ EXPECT_EQ (count, 1U);
+
+ status = ml_util_get_tensor_name (in_res, 0, &name);
+ EXPECT_EQ (status, ML_ERROR_NONE);
+ EXPECT_TRUE (g_str_equal (name, "wav_data"));
+
+ status = ml_util_get_tensor_type (in_res, 0, &type);
+ EXPECT_EQ (status, ML_ERROR_NONE);
+ EXPECT_EQ (type, ML_TENSOR_TYPE_INT16);
+
+ ml_util_get_tensor_dimension (in_res, 0, res_dim);
+ EXPECT_TRUE (in_dim[0] == res_dim[0]);
+ EXPECT_TRUE (in_dim[1] == res_dim[1]);
+ EXPECT_TRUE (in_dim[2] == res_dim[2]);
+ EXPECT_TRUE (in_dim[3] == res_dim[3]);
/* output tensor in filter */
status = ml_single_get_output_info (single, &out_res);
EXPECT_EQ (status, ML_ERROR_NONE);
- EXPECT_TRUE (out_info.num_tensors == out_res.num_tensors);
- for (guint idx = 0; idx < out_res.num_tensors; idx++) {
- EXPECT_TRUE (out_info.info[idx].type == out_res.info[idx].type);
- EXPECT_TRUE (out_info.info[idx].dimension[0] == out_res.info[idx].dimension[0]);
- EXPECT_TRUE (out_info.info[idx].dimension[1] == out_res.info[idx].dimension[1]);
- EXPECT_TRUE (out_info.info[idx].dimension[2] == out_res.info[idx].dimension[2]);
- EXPECT_TRUE (out_info.info[idx].dimension[3] == out_res.info[idx].dimension[3]);
- }
+ status = ml_util_get_tensors_count (out_res, &count);
+ EXPECT_EQ (status, ML_ERROR_NONE);
+ EXPECT_EQ (count, 1U);
+
+ status = ml_util_get_tensor_name (out_res, 0, &name);
+ EXPECT_EQ (status, ML_ERROR_NONE);
+ EXPECT_TRUE (g_str_equal (name, "labels_softmax"));
+
+ status = ml_util_get_tensor_type (out_res, 0, &type);
+ EXPECT_EQ (status, ML_ERROR_NONE);
+ EXPECT_EQ (type, ML_TENSOR_TYPE_FLOAT32);
+
+ ml_util_get_tensor_dimension (out_res, 0, res_dim);
+ EXPECT_TRUE (out_dim[0] == res_dim[0]);
+ EXPECT_TRUE (out_dim[1] == res_dim[1]);
+ EXPECT_TRUE (out_dim[2] == res_dim[2]);
+ EXPECT_TRUE (out_dim[3] == res_dim[3]);
/* generate input data */
- input = ml_util_allocate_tensors_data (&in_info);
+ input = ml_util_allocate_tensors_data (in_info);
EXPECT_TRUE (input != NULL);
status = ml_util_get_last_error ();
EXPECT_EQ (status, ML_ERROR_NONE);
/* check result (max score index is 2) */
- EXPECT_EQ (output->num_tensors, out_res.num_tensors);
+ EXPECT_EQ (output->num_tensors, 1U);
max_score = .0;
max_score_index = 0;
g_free (test_model);
g_free (test_file);
g_free (contents);
- ml_util_free_tensors_info (&in_info);
- ml_util_free_tensors_info (&out_info);
- ml_util_free_tensors_info (&in_res);
- ml_util_free_tensors_info (&out_res);
+ ml_util_destroy_tensors_info (in_info);
+ ml_util_destroy_tensors_info (out_info);
+ ml_util_destroy_tensors_info (in_res);
+ ml_util_destroy_tensors_info (out_res);
}
#endif /* ENABLE_TENSORFLOW */
TEST (nnstreamer_capi_singleshot, failure_01)
{
ml_single_h single;
- ml_tensors_info_s in_info, out_info;
+ ml_tensors_info_h in_info, out_info;
+ ml_tensor_dimension in_dim, out_dim;
int status;
const gchar *root_path = g_getenv ("NNSTREAMER_BUILD_ROOT_PATH");
"mobilenet_v1_1.0_224_quant.tflite", NULL);
ASSERT_TRUE (g_file_test (test_model, G_FILE_TEST_EXISTS));
- ml_util_initialize_tensors_info (&in_info);
- ml_util_initialize_tensors_info (&out_info);
+ ml_util_allocate_tensors_info (&in_info);
+ ml_util_allocate_tensors_info (&out_info);
/* invalid file path */
- status = ml_single_open (&single, "wrong_file_name", &in_info, &out_info,
- ML_NNFW_TENSORFLOW_LITE, ML_NNFW_HW_DO_NOT_CARE);
+ status = ml_single_open (&single, "wrong_file_name", in_info, out_info,
+ ML_NNFW_TYPE_TENSORFLOW_LITE, ML_NNFW_HW_ANY);
EXPECT_EQ (status, ML_ERROR_INVALID_PARAMETER);
/* null file path */
- status = ml_single_open (&single, NULL, &in_info, &out_info,
- ML_NNFW_TENSORFLOW_LITE, ML_NNFW_HW_DO_NOT_CARE);
+ status = ml_single_open (&single, NULL, in_info, out_info,
+ ML_NNFW_TYPE_TENSORFLOW_LITE, ML_NNFW_HW_ANY);
EXPECT_EQ (status, ML_ERROR_INVALID_PARAMETER);
/* invalid handle */
- status = ml_single_open (NULL, test_model, &in_info, &out_info,
- ML_NNFW_TENSORFLOW_LITE, ML_NNFW_HW_DO_NOT_CARE);
+ status = ml_single_open (NULL, test_model, in_info, out_info,
+ ML_NNFW_TYPE_TENSORFLOW_LITE, ML_NNFW_HW_ANY);
EXPECT_EQ (status, ML_ERROR_INVALID_PARAMETER);
/* invalid input tensor info */
- status = ml_single_open (&single, test_model, &in_info, &out_info,
- ML_NNFW_TENSORFLOW_LITE, ML_NNFW_HW_DO_NOT_CARE);
+ status = ml_single_open (&single, test_model, in_info, out_info,
+ ML_NNFW_TYPE_TENSORFLOW_LITE, ML_NNFW_HW_ANY);
EXPECT_EQ (status, ML_ERROR_INVALID_PARAMETER);
- in_info.num_tensors = 1;
- in_info.info[0].type = ML_TENSOR_TYPE_UINT8;
- in_info.info[0].dimension[0] = 3;
- in_info.info[0].dimension[1] = 224;
- in_info.info[0].dimension[2] = 224;
- in_info.info[0].dimension[3] = 1;
+ in_dim[0] = 3;
+ in_dim[1] = 224;
+ in_dim[2] = 224;
+ in_dim[3] = 1;
+ ml_util_set_tensors_count (in_info, 1);
+ ml_util_set_tensor_type (in_info, 0, ML_TENSOR_TYPE_UINT8);
+ ml_util_set_tensor_dimension (in_info, 0, in_dim);
/* invalid output tensor info */
- status = ml_single_open (&single, test_model, &in_info, &out_info,
- ML_NNFW_TENSORFLOW_LITE, ML_NNFW_HW_DO_NOT_CARE);
+ status = ml_single_open (&single, test_model, in_info, out_info,
+ ML_NNFW_TYPE_TENSORFLOW_LITE, ML_NNFW_HW_ANY);
EXPECT_EQ (status, ML_ERROR_INVALID_PARAMETER);
- out_info.num_tensors = 1;
- out_info.info[0].type = ML_TENSOR_TYPE_UINT8;
- out_info.info[0].dimension[0] = 1001;
- out_info.info[0].dimension[1] = 1;
- out_info.info[0].dimension[2] = 1;
- out_info.info[0].dimension[3] = 1;
+ out_dim[0] = 1001;
+ out_dim[1] = 1;
+ out_dim[2] = 1;
+ out_dim[3] = 1;
+ ml_util_set_tensors_count (out_info, 1);
+ ml_util_set_tensor_type (out_info, 0, ML_TENSOR_TYPE_UINT8);
+ ml_util_set_tensor_dimension (out_info, 0, out_dim);
/* invalid file extension */
- status = ml_single_open (&single, test_model, &in_info, &out_info,
- ML_NNFW_TENSORFLOW, ML_NNFW_HW_DO_NOT_CARE);
+ status = ml_single_open (&single, test_model, in_info, out_info,
+ ML_NNFW_TYPE_TENSORFLOW, ML_NNFW_HW_ANY);
EXPECT_EQ (status, ML_ERROR_INVALID_PARAMETER);
/* invalid handle */
EXPECT_EQ (status, ML_ERROR_INVALID_PARAMETER);
/* Successfully opened unknown fw type (tf-lite) */
- status = ml_single_open (&single, test_model, &in_info, &out_info,
- ML_NNFW_UNKNOWN, ML_NNFW_HW_DO_NOT_CARE);
+ status = ml_single_open (&single, test_model, in_info, out_info,
+ ML_NNFW_TYPE_ANY, ML_NNFW_HW_ANY);
EXPECT_EQ (status, ML_ERROR_NONE);
status = ml_single_close (single);
EXPECT_EQ (status, ML_ERROR_NONE);
g_free (test_model);
+ ml_util_destroy_tensors_info (in_info);
+ ml_util_destroy_tensors_info (out_info);
}
#endif /* ENABLE_TENSORFLOW_LITE */