From 181305562ea838ae276c1031118e2afad5099b14 Mon Sep 17 00:00:00 2001 From: MyungJoo Ham Date: Tue, 19 Oct 2021 18:54:21 +0900 Subject: [PATCH] [C/Inference] Refactoring C-API module relations. ML C-API interfaces and implementations are being refactored for better relations. ml-common will be no longer depending on nnstreamer. Then, in the future, nntrainer will be no longer depending on nnstreamer except for its nnstreamer-nntrainer subplugin. 1. Renamed headers and sources (except for Tizen Studio's files, which we need to tend later) 2. Refactored inter-file dependency and relations. Signed-off-by: MyungJoo Ham --- c/README.md | 72 +++ c/capi-ml-common.pc.in | 2 +- c/include/nnstreamer-capi-private.h | 522 --------------- c/include/nnstreamer-tizen-internal.h | 19 + c/meson.build | 38 +- ...check.c => ml-api-common-tizen-feature-check.c} | 20 +- c/src/{nnstreamer-capi-util.c => ml-api-common.c} | 698 +++------------------ c/src/ml-api-inference-internal.c | 576 +++++++++++++++++ c/src/ml-api-inference-internal.h | 287 +++++++++ ...capi-pipeline.c => ml-api-inference-pipeline.c} | 206 +++--- ...mer-capi-single.c => ml-api-inference-single.c} | 136 ++-- ....c => ml-api-inference-tizen-privilege-check.c} | 47 +- c/src/ml-api-internal.h | 299 +++++++++ .../nnstreamer/src/main/jni/Android-nnstreamer.mk | 10 +- .../nnstreamer/src/main/jni/nnstreamer-native.h | 4 +- packaging/machine-learning-api.spec | 20 + tests/capi/unittest_capi_datatype_consistency.cc | 2 +- tests/capi/unittest_capi_inference.cc | 4 +- tests/capi/unittest_capi_inference_latency.cc | 3 +- tests/capi/unittest_capi_inference_nnfw_runtime.cc | 3 +- 20 files changed, 1648 insertions(+), 1320 deletions(-) delete mode 100644 c/include/nnstreamer-capi-private.h rename c/src/{nnstreamer-capi-tizen-feature-check.c => ml-api-common-tizen-feature-check.c} (83%) rename c/src/{nnstreamer-capi-util.c => ml-api-common.c} (52%) create mode 100644 c/src/ml-api-inference-internal.c create mode 100644 c/src/ml-api-inference-internal.h rename c/src/{nnstreamer-capi-pipeline.c => ml-api-inference-pipeline.c} (92%) rename c/src/{nnstreamer-capi-single.c => ml-api-inference-single.c} (91%) rename c/src/{nnstreamer-capi-tizen-privilege-check.c => ml-api-inference-tizen-privilege-check.c} (93%) create mode 100644 c/src/ml-api-internal.h diff --git a/c/README.md b/c/README.md index 4cb84c1..70b1c89 100644 --- a/c/README.md +++ b/c/README.md @@ -20,3 +20,75 @@ - Ubuntu: +## Design + + +Target Design (there are a little work-to-do to follow this design) +```(impl)``` and ```'''``` or ```:``` = Implements +``` < V ``` and ```---``` or ```|``` = Depends +``` [ ] ``` = interface (header) +``` ( ) ``` = implementation (source) + +### Overview with other repos +``` ++----- API.git -----------------------------------------------------------+ +| | +| [........ ml-api-common ...........]<''''(impl)'''( ml-api-common-impl) | +| ^ ^ | +| | | | +| [ ml-api-training ] [ ml-api-inference ] <---- [ ml-api-service ] | +| ^ (impl) ^ (impl) ^ (impl) | +| : : : | +| ( ml-api-tr-impl ) ( ml-api-inf-impl (p/s) ) ( ml-api-serv-impl ) | ++----|---------------------------|----------------------------------------+ + V V ++----+- nntrainer.git --+ +------+-------- nnstreamer.git ----------------+ +| | | + GStreamer | ++-----------------------+ +-----------------------------------------------+ +``` +- As of 2021-10-20, ml-api-tr-impl is not yet migrated from nntrainer.git. +- As of 2021-10-20, ml-api-service and ml-api-serv-impl do not exist, yet. + +### Internal overview (trainer not included) +``` + + [ ml-api-common (pub) ]<---+-----------------------------------+ + ^ | | + : [ nnstreamer.h == ml-api-inf-pipeline (pub) ]<-- | -------------------+ + : ^ | | + : : [ nnstreamer-single.h == ml-api-inf-single (pub) ]<----+ + : : ^ | + : : : [ nnstreamer-tizen-internal.h == ml-api-private (platform only) ] + : : : ^ + ( ml-api-common ) : +'''''''''''''+ + : +''' : ''''''''''''+ + : ( ml-api-common-tiz* ) : : + : : : : + : : : : + V V : : + [ ml-api-internal (int) ] : +''''''''''''+ + ^ : : + | ( ml-api-inference-pipeline ) : + | | : + | | ( ml-api-inference-single ) + | V | +[ ml-api-inference-internal (int) ]<-----------+ + ^ + : +( ml-api-inference-internal ) + +``` +- (pub): public header / interface, exposed to SDK and applications +- (platform only): private header / interface, exposed to platform components +- (int): internal header / interface, exposed to api.git components only. + +- ml-trainer APIs may depend on ml-api-internal. + +### Headers + +/c/include/*.h : Exported headers, accessed by other packages. + - Public (ml-api-common.h, nnstreamer-single.h, nnstreamer.h) + - Supposed to be accessed by application via SDK (e.g., Tizen Studio, Android Studio) + - Platform Only (nnstreamer-tizen-internal.h) + - Supposed to be accessed by platform package (e.g., Tizen middleware) +/c/src/*.h : Not exported headers. Cannot be accessed by other packages. diff --git a/c/capi-ml-common.pc.in b/c/capi-ml-common.pc.in index afe48ab..5eaadb2 100644 --- a/c/capi-ml-common.pc.in +++ b/c/capi-ml-common.pc.in @@ -9,5 +9,5 @@ Name: tizen-api-ml-common Description: ML common API for Tizen Version: @VERSION@ Requires: @ML_COMMON_REQUIRE@ -Libs: -L${libdir} +Libs: -L${libdir} -lcapi-ml-common Cflags: -I${includedir}/nnstreamer diff --git a/c/include/nnstreamer-capi-private.h b/c/include/nnstreamer-capi-private.h deleted file mode 100644 index 8b0509e..0000000 --- a/c/include/nnstreamer-capi-private.h +++ /dev/null @@ -1,522 +0,0 @@ -/* SPDX-License-Identifier: Apache-2.0 */ -/** - * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved. - * - * @file nnstreamer-capi-private.h - * @date 07 March 2019 - * @brief NNStreamer/Pipeline(main) C-API Private Header. - * This file should NOT be exported to SDK or devel package. - * @see https://github.com/nnstreamer/nnstreamer - * @author MyungJoo Ham - * @bug No known bugs except for NYI items - */ - -#ifndef __NNSTREAMER_CAPI_PRIVATE_H__ -#define __NNSTREAMER_CAPI_PRIVATE_H__ - -#include -#include - -#include "nnstreamer.h" -#include "nnstreamer-single.h" -#include -#include - -/* Tizen ML feature */ -#if defined (__TIZEN__) -#include "nnstreamer-tizen-internal.h" - -typedef enum -{ - NOT_CHECKED_YET = -1, - NOT_SUPPORTED = 0, - SUPPORTED = 1 -} feature_state_t; - -#if defined (__FEATURE_CHECK_SUPPORT__) -#define check_feature_state() \ - do { \ - int feature_ret = ml_tizen_get_feature_enabled (); \ - if (ML_ERROR_NONE != feature_ret) \ - return feature_ret; \ - } while (0); - -#define set_feature_state(...) ml_tizen_set_feature_state(__VA_ARGS__) -#else -#define check_feature_state() -#define set_feature_state(...) -#endif /* __FEATURE_CHECK_SUPPORT__ */ - -#if defined (__PRIVILEGE_CHECK_SUPPORT__) - -#define convert_tizen_element(...) ml_tizen_convert_element(__VA_ARGS__) - -#if (TIZENVERSION >= 5) && (TIZENVERSION < 9999) -#define get_tizen_resource(...) ml_tizen_get_resource(__VA_ARGS__) -#define release_tizen_resource(...) ml_tizen_release_resource(__VA_ARGS__) -#define TIZEN5PLUS 1 - -#if ((TIZENVERSION > 6) || (TIZENVERSION == 6 && TIZENVERSIONMINOR >= 5)) -#define TIZENMMCONF 1 -#endif - -#elif (TIZENVERSION < 5) -#define get_tizen_resource(...) (0) -#define release_tizen_resource(...) do { } while (0) -typedef void * mm_resource_manager_h; -typedef enum { MM_RESOURCE_MANAGER_RES_TYPE_MAX } mm_resource_manager_res_type_e; - -#else -#error Tizen version is not defined. -#endif - -#else - -#define convert_tizen_element(...) ML_ERROR_NONE -#define get_tizen_resource(...) ML_ERROR_NONE -#define release_tizen_resource(...) - -#endif /* __PRIVILEGE_CHECK_SUPPORT__ */ - -#else -#define check_feature_state() -#define set_feature_state(...) -#define convert_tizen_element(...) ML_ERROR_NONE -#define get_tizen_resource(...) ML_ERROR_NONE -#define release_tizen_resource(...) -#endif /* __TIZEN__ */ - -#ifndef TIZEN5PLUS -#define TIZEN5PLUS 0 -#endif /* TIZEN5PLUS */ -#ifndef TIZENMMCONF -#define TIZENMMCONF 0 -#endif /* TIZENMMCONF */ - -#define EOS_MESSAGE_TIME_LIMIT 100 -#define WAIT_PAUSED_TIME_LIMIT 100 - -#ifdef __cplusplus -extern "C" { -#endif /* __cplusplus */ - -/** - * @brief Internal private representation of custom filter handle. - */ -typedef struct { - char *name; - unsigned int ref_count; - GMutex lock; - ml_tensors_info_h in_info; - ml_tensors_info_h out_info; - ml_custom_easy_invoke_cb cb; - void *pdata; -} ml_custom_filter_s; - -/** - * @brief Data structure for tensor information. - * @since_tizen 5.5 - */ -typedef struct { - char *name; /**< Name of each element in the tensor. */ - ml_tensor_type_e type; /**< Type of each element in the tensor. */ - ml_tensor_dimension dimension; /**< Dimension information. */ -} ml_tensor_info_s; - -/** - * @brief Data structure for tensors information, which contains multiple tensors. - * @since_tizen 5.5 - */ -typedef struct { - unsigned int num_tensors; /**< The number of tensors. */ - ml_tensor_info_s info[ML_TENSOR_SIZE_LIMIT]; /**< The list of tensor info. */ - GMutex lock; /**< Lock for thread safety */ - int nolock; /**< Set non-zero to avoid using m (giving up thread safety) */ -} ml_tensors_info_s; - -/** - * @brief Macro to control private lock with nolock condition (lock) - * @param sname The name of struct (ml_tensors_info_s or ml_tensors_data_s) - */ -#define G_LOCK_UNLESS_NOLOCK(sname) \ - do { \ - GMutex *l = (GMutex *) &(sname).lock; \ - if (!(sname).nolock) \ - g_mutex_lock (l); \ - } while (0) - -/** - * @brief Macro to control private lock with nolock condition (unlock) - * @param sname The name of struct (ml_tensors_info_s or ml_tensors_data_s) - */ -#define G_UNLOCK_UNLESS_NOLOCK(sname) \ - do { \ - GMutex *l = (GMutex *) &(sname).lock; \ - if (!(sname).nolock) \ - g_mutex_unlock (l); \ - } while (0) - -/** - * @brief Macro to verify private lock acquired with nolock condition (lock) - * @param sname The name of struct (ml_tensors_info_s or ml_tensors_data_s) - */ -#define G_VERIFYLOCK_UNLESS_NOLOCK(sname) \ - do { \ - GMutex *l = (GMutex *) &(sname).lock; \ - if (!(sname).nolock) { \ - if (g_mutex_trylock(l)) { \ - g_mutex_unlock(l); \ - return ML_ERROR_INVALID_PARAMETER; \ - } \ - } \ - } while (0) - - -/** - * @brief The function to be called when destroying the allocated handle. - * @since_tizen 6.5 - * @param[in] handle The handle created for ML API. - * @param[in,out] user_data The user data to pass to the callback function. - * @return @c 0 on success. Otherwise a negative error value. - */ -typedef int (*ml_handle_destroy_cb) (void *handle, void *user_data); - -/** - * @brief An instance of a single input or output frame. - * @since_tizen 5.5 - */ -typedef struct { - void *tensor; /**< The instance of tensor data. */ - size_t size; /**< The size of tensor. */ -} ml_tensor_data_s; - -/** - * @brief An instance of input or output frames. #ml_tensors_info_h is the handle for tensors metadata. - * @since_tizen 5.5 - */ -typedef struct { - unsigned int num_tensors; /**< The number of tensors. */ - ml_tensor_data_s tensors[ML_TENSOR_SIZE_LIMIT]; /**< The list of tensor data. NULL for unused tensors. */ - - /* private */ - ml_tensors_info_h info; - void *user_data; /**< The user data to pass to the callback function */ - ml_handle_destroy_cb destroy; /**< The function to be called to release the allocated buffer */ - GMutex lock; /**< Lock for thread safety */ - int nolock; /**< Set non-zero to avoid using m (giving up thread safety) */ -} ml_tensors_data_s; - -/** - * @brief Internal private representation of tensor_if custom conditon. - * @since_tizen 6.5 - */ -typedef struct { - char *name; - unsigned int ref_count; - GMutex lock; - ml_pipeline_if_custom_cb cb; - void *pdata; -} ml_if_custom_s; - -/** - * @brief Possible controls on elements of a pipeline. - */ -typedef enum { - ML_PIPELINE_ELEMENT_UNKNOWN = 0x0, - ML_PIPELINE_ELEMENT_SINK = 0x1, - ML_PIPELINE_ELEMENT_APP_SRC = 0x2, - ML_PIPELINE_ELEMENT_APP_SINK = 0x3, - ML_PIPELINE_ELEMENT_VALVE = 0x4, - ML_PIPELINE_ELEMENT_SWITCH_INPUT = 0x8, - ML_PIPELINE_ELEMENT_SWITCH_OUTPUT = 0x9, - ML_PIPELINE_ELEMENT_COMMON = 0xB, -} ml_pipeline_element_e; - -/** - * @brief Internal private representation of pipeline handle. - */ -typedef struct _ml_pipeline ml_pipeline; - -/** - * @brief An element that may be controlled individually in a pipeline. - */ -typedef struct _ml_pipeline_element { - GstElement *element; /**< The Sink/Src/Valve/Switch element */ - ml_pipeline *pipe; /**< The main pipeline */ - char *name; - ml_pipeline_element_e type; - GstPad *src; - GstPad *sink; /**< Unref this at destroy */ - ml_tensors_info_s tensors_info; - size_t size; - - GList *handles; - int maxid; /**< to allocate id for each handle */ - gulong handle_id; - - GMutex lock; /**< Lock for internal values */ - gboolean is_media_stream; - gboolean is_flexible_tensor; - - ml_handle_destroy_cb custom_destroy; - gpointer custom_data; -} ml_pipeline_element; - -/** - * @brief Internal data structure for the pipeline state callback. - */ -typedef struct { - ml_pipeline_state_cb cb; /**< Callback to notify the change of pipeline state */ - void *user_data; /**< The user data passed when calling the state change callback */ -} pipeline_state_cb_s; - -/** - * @brief Internal data structure for the resource. - */ -typedef struct { - gchar *type; /**< resource type */ - gpointer handle; /**< pointer to resource handle */ -} pipeline_resource_s; - -/** - * @brief Internal private representation of pipeline handle. - * @details This should not be exposed to applications - */ -struct _ml_pipeline { - GstElement *element; /**< The pipeline itself (GstPipeline) */ - GstBus *bus; /**< The bus of the pipeline */ - gulong signal_msg; /**< The message signal (connected to bus) */ - GMutex lock; /**< Lock for pipeline operations */ - gboolean isEOS; /**< The pipeline is EOS state */ - ml_pipeline_state_e pipe_state; /**< The state of pipeline */ - GHashTable *namednodes; /**< hash table of "element"s. */ - GHashTable *resources; /**< hash table of resources to construct the pipeline */ - pipeline_state_cb_s state_cb; /**< Callback to notify the change of pipeline state */ -}; - -/** - * @brief Internal private representation sink callback function for GstTensorSink and GstAppSink - * @details This represents a single instance of callback registration. This should not be exposed to applications. - */ -typedef struct { - ml_pipeline_sink_cb sink_cb; - ml_pipeline_src_callbacks_s src_cb; - void *pdata; -} callback_info_s; - -/** - * @brief Internal private representation of common element handle (All GstElement except AppSink and TensorSink) - * @details This represents a single instance of registration. This should not be exposed to applications. - */ -typedef struct _ml_pipeline_common_elem { - ml_pipeline *pipe; - ml_pipeline_element *element; - guint32 id; - callback_info_s *callback_info; /**< Callback function information. If element is not GstTensorSink or GstAppSink, then it should be NULL. */ -} ml_pipeline_common_elem; - -/** - * @brief An information to create single-shot instance. - */ -typedef struct { - ml_tensors_info_h input_info; /**< The input tensors information. */ - ml_tensors_info_h output_info; /**< The output tensors information. */ - ml_nnfw_type_e nnfw; /**< The neural network framework. */ - ml_nnfw_hw_e hw; /**< The type of hardware resource. */ - char *models; /**< Comma separated neural network model files. */ - char *custom_option; /**< Custom option string for neural network framework. */ -} ml_single_preset; - -/** - * @brief Opens an ML model with the custom options and returns the instance as a handle. - * This is internal function to handle various options in public APIs. - */ -int ml_single_open_custom (ml_single_h *single, ml_single_preset *info); - -/** - * @brief Macro to check the availability of given NNFW. - */ -#define ml_nnfw_is_available(f,h) ({bool a; (ml_check_nnfw_availability ((f), (h), &a) == ML_ERROR_NONE && a);}) - -/** - * @brief Macro to check the availability of given element. - */ -#define ml_element_is_available(e) ({bool a; (ml_check_element_availability ((e), &a) == ML_ERROR_NONE && a);}) - -/** - * @brief Macro to check the tensors info is valid. - */ -#define ml_tensors_info_is_valid(i) ({bool v; (ml_tensors_info_validate ((i), &v) == ML_ERROR_NONE && v);}) - -/** - * @brief Macro to compare the tensors info. - */ -#define ml_tensors_info_is_equal(i1,i2) ({bool e; (ml_tensors_info_compare ((i1), (i2), &e) == ML_ERROR_NONE && e);}) - -/** - * @brief Gets the byte size of the given tensor info. - * @note This is not thread safe. - */ -size_t ml_tensor_info_get_size (const ml_tensor_info_s *info); - -/** - * @brief Initializes the tensors information with default value. - * @since_tizen 5.5 - * @param[in] info The tensors info pointer to be initialized. - * @return @c 0 on success. Otherwise a negative error value. - * @retval #ML_ERROR_NONE Successful - * @retval #ML_ERROR_INVALID_PARAMETER Given parameter is invalid. - */ -int ml_tensors_info_initialize (ml_tensors_info_s *info); - -/** - * @brief Compares the given tensors information. - * @details If the function returns an error, @a equal is not changed. - * @since_tizen 6.0 - * @param[in] info1 The handle of tensors information to be compared. - * @param[in] info2 The handle of tensors information to be compared. - * @param[out] equal @c true if given tensors information is equal, @c false if it's not equal. - * @return @c 0 on success. Otherwise a negative error value. - * @retval #ML_ERROR_NONE Successful - * @retval #ML_ERROR_NOT_SUPPORTED Not supported. - * @retval #ML_ERROR_INVALID_PARAMETER Given parameter is invalid. - */ -int ml_tensors_info_compare (const ml_tensors_info_h info1, const ml_tensors_info_h info2, bool *equal); - -/** - * @brief Frees and initialize the data in tensors info. - * @since_tizen 5.5 - * @param[in] info The tensors info pointer to be freed. - */ -void ml_tensors_info_free (ml_tensors_info_s *info); - -/** - * @brief Allocates a tensors information handle from gst info. - */ -int ml_tensors_info_create_from_gst (ml_tensors_info_h *ml_info, GstTensorsInfo *gst_info); - -/** - * @brief Copies tensor metadata from gst tensors info. - */ -void ml_tensors_info_copy_from_gst (ml_tensors_info_s *ml_info, const GstTensorsInfo *gst_info); - -/** - * @brief Copies tensor metadata from ml tensors info. - */ -void ml_tensors_info_copy_from_ml (GstTensorsInfo *gst_info, const ml_tensors_info_s *ml_info); - -/** - * @brief Frees the tensors data handle and its data. - * @param[in] data The handle of tensors data. - * @param[in] free_data The flag to free the buffers in handle. - * @return @c 0 on success. Otherwise a negative error value. - */ -int ml_tensors_data_destroy_internal (ml_tensors_data_h data, gboolean free_data); - -/** - * @brief Creates a tensor data frame without buffer with the given tensors information. - * @details If @a info is null, this allocates data handle with empty tensor data. - * @param[in] info The handle of tensors information for the allocation. - * @param[out] data The handle of tensors data. - * @return @c 0 on success. Otherwise a negative error value. - */ -int ml_tensors_data_create_no_alloc (const ml_tensors_info_h info, ml_tensors_data_h *data); - -/** - * @brief Creates a tensor data frame without allocating new buffer cloning the given tensors data. - * @details If @a data_src is null, this returns error. - * @param[in] data_src The handle of tensors data to be cloned. - * @param[out] data The handle of tensors data. - * @return @c 0 on success. Otherwise a negative error value. - * @retval #ML_ERROR_NONE Successful - * @retval #ML_ERROR_INVALID_PARAMETER Given parameter is invalid. - * @retval #ML_ERROR_OUT_OF_MEMORY Failed to allocate required memory. - */ -int ml_tensors_data_clone_no_alloc (const ml_tensors_data_s * data_src, ml_tensors_data_h * data); - -/** - * @brief Initializes the GStreamer library. This is internal function. - */ -int ml_initialize_gstreamer (void); - -/** - * @brief Validates the nnfw model file. - * @since_tizen 5.5 - * @param[in] model List of model file paths. - * @param[in] num_models The number of model files. There are a few frameworks that require multiple model files for a single model. - * @param[in/out] nnfw The type of NNFW. - * @return @c 0 on success. Otherwise a negative error value. - * @retval #ML_ERROR_NONE Successful - * @retval #ML_ERROR_NOT_SUPPORTED Not supported. - * @retval #ML_ERROR_INVALID_PARAMETER Given parameter is invalid. - */ -int ml_validate_model_file (const char * const *model, const unsigned int num_models, ml_nnfw_type_e * nnfw); - -/** - * @brief Checks the availability of the plugin. - */ -int ml_check_plugin_availability (const char *plugin_name, const char *element_name); - -/** - * @brief Internal function to convert accelerator as tensor_filter property format. - * @note returned value must be freed by the caller - */ -char* ml_nnfw_to_str_prop (ml_nnfw_hw_e hw); - -/** - * @brief Internal function to get the sub-plugin name. - */ -const char* ml_get_nnfw_subplugin_name (ml_nnfw_type_e nnfw); - -/** - * @brief Internal function to get the nnfw type. - */ -ml_nnfw_type_e ml_get_nnfw_type_by_subplugin_name (const char *name); - -/** - * @brief Gets the element of pipeline itself (GstElement). - * @details With the returned reference, you can use GStreamer functions to handle the element in pipeline. - * Note that caller should release the returned reference using gst_object_unref(). - * @return The reference of pipeline itself. Null if the pipeline is not constructed or closed. - */ -GstElement* ml_pipeline_get_gst_element (ml_pipeline_h pipe); - -/** - * @brief Validates the given tensors info is valid without acquiring lock - * @note This function assumes that lock on ml_tensors_info_s has already been acquired - */ -int -_ml_tensors_info_validate_nolock (const ml_tensors_info_s * info, bool *valid); - -#if defined (__TIZEN__) -/** - * @brief Checks whether machine_learning.inference feature is enabled or not. - */ -int ml_tizen_get_feature_enabled (void); - -/** - * @brief Set the feature status of machine_learning.inference. - * This is only used for Unit test. - */ -int ml_tizen_set_feature_state (int state); - -/** - * @brief Releases the resource handle of Tizen. - */ -void ml_tizen_release_resource (gpointer handle, const gchar * res_type); - -/** - * @brief Gets the resource handle of Tizen. - */ -int ml_tizen_get_resource (ml_pipeline_h pipe, const gchar * res_type); - -/** - * @brief Converts predefined element for Tizen. - */ -int ml_tizen_convert_element (ml_pipeline_h pipe, gchar ** result, gboolean is_internal); -#endif - -#ifdef __cplusplus -} -#endif /* __cplusplus */ -#endif /* __NNSTREAMER_CAPI_PRIVATE_H__ */ diff --git a/c/include/nnstreamer-tizen-internal.h b/c/include/nnstreamer-tizen-internal.h index bda0039..89381cc 100644 --- a/c/include/nnstreamer-tizen-internal.h +++ b/c/include/nnstreamer-tizen-internal.h @@ -13,6 +13,7 @@ #define __TIZEN_MACHINELEARNING_NNSTREAMER_INTERNAL_H__ #include +#include #ifdef __cplusplus extern "C" { @@ -25,6 +26,24 @@ extern "C" { */ int ml_pipeline_construct_internal (const char *pipeline_description, ml_pipeline_state_cb cb, void *user_data, ml_pipeline_h *pipe); +/** + * @brief An information to create single-shot instance. + */ +typedef struct { + ml_tensors_info_h input_info; /**< The input tensors information. */ + ml_tensors_info_h output_info; /**< The output tensors information. */ + ml_nnfw_type_e nnfw; /**< The neural network framework. */ + ml_nnfw_hw_e hw; /**< The type of hardware resource. */ + char *models; /**< Comma separated neural network model files. */ + char *custom_option; /**< Custom option string for neural network framework. */ +} ml_single_preset; + +/** + * @brief Opens an ML model with the custom options and returns the instance as a handle. + * This is internal function to handle various options in public APIs. + */ +int ml_single_open_custom (ml_single_h *single, ml_single_preset *info); + #ifdef __cplusplus } #endif /* __cplusplus */ diff --git a/c/meson.build b/c/meson.build index 43399c6..d5fed19 100644 --- a/c/meson.build +++ b/c/meson.build @@ -2,22 +2,25 @@ nns_capi_include = [] nns_capi_include += include_directories('include') +nns_capi_include += include_directories('src') if not get_option('enable-tizen') nns_capi_include += include_directories('include/platform') endif nns_capi_srcs = [] -nns_capi_srcs += join_paths(meson.current_source_dir(), 'src', 'nnstreamer-capi-pipeline.c') -nns_capi_srcs += join_paths(meson.current_source_dir(), 'src', 'nnstreamer-capi-util.c') -nns_capi_srcs += join_paths(meson.current_source_dir(), 'src', 'nnstreamer-capi-single.c') +nns_capi_common_srcs = [] +nns_capi_srcs += join_paths(meson.current_source_dir(), 'src', 'ml-api-inference-pipeline.c') +nns_capi_srcs += join_paths(meson.current_source_dir(), 'src', 'ml-api-inference-single.c') +nns_capi_srcs += join_paths(meson.current_source_dir(), 'src', 'ml-api-inference-internal.c') +nns_capi_common_srcs += join_paths(meson.current_source_dir(), 'src', 'ml-api-common.c') if get_option('enable-tizen') if get_option('enable-tizen-feature-check') - nns_capi_srcs += join_paths(meson.current_source_dir(), 'src', 'nnstreamer-capi-tizen-feature-check.c') + nns_capi_common_srcs += join_paths(meson.current_source_dir(), 'src', 'ml-api-common-tizen-feature-check.c') endif if get_option('enable-tizen-privilege-check') - nns_capi_srcs += join_paths(meson.current_source_dir(), 'src', 'nnstreamer-capi-tizen-privilege-check.c') + nns_capi_srcs += join_paths(meson.current_source_dir(), 'src', 'ml-api-inference-tizen-privilege-check.c') endif endif @@ -33,8 +36,10 @@ else nns_capi_headers += join_paths(meson.current_source_dir(), 'include', 'platform', 'tizen_error.h') endif + # Dependencies nns_capi_deps = [nnstreamer_dep, glib_dep, gmodule_dep, gst_dep, gst_app_dep] +nns_capi_common_deps = [glib_dep, gmodule_dep, gst_dep] if (get_option('enable-tizen')) message('C-API is in Tizen mode') @@ -55,7 +60,30 @@ if (get_option('enable-tizen')) endif nns_capi_deps += tizen_deps + nns_capi_common_deps += tizen_deps +endif + +# Build ML-API Common Lib First. +nns_capi_common_shared_lib = shared_library ('capi-ml-common', + nns_capi_common_srcs, + dependencies: nns_capi_common_deps, + include_directories: nns_capi_include, + install: true, + install_dir: api_install_libdir, +) +nns_capi_common_static_lib = static_library ('capi-ml-common', + nns_capi_common_srcs, + dependencies: nns_capi_common_deps, + include_directories: nns_capi_include, + install: true, + install_dir: api_install_libdir, +) +nns_capi_common_lib = nns_capi_common_shared_lib +if get_option('default_library') == 'static' + nns_capi_common_lib = nns_capi_common_static_lib endif +nns_capi_common_dep = declare_dependency(link_with: nns_capi_common_lib) +nns_capi_deps += nns_capi_common_dep nns_capi_shared_lib = shared_library ('capi-nnstreamer', nns_capi_srcs, diff --git a/c/src/nnstreamer-capi-tizen-feature-check.c b/c/src/ml-api-common-tizen-feature-check.c similarity index 83% rename from c/src/nnstreamer-capi-tizen-feature-check.c rename to c/src/ml-api-common-tizen-feature-check.c index 13cbddf..0d47de4 100644 --- a/c/src/nnstreamer-capi-tizen-feature-check.c +++ b/c/src/ml-api-common-tizen-feature-check.c @@ -2,9 +2,9 @@ /** * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved. * - * @file nnstreamer-capi-tizen-feature-check.c + * @file ml-api-common-tizen-feature-check.c * @date 21 July 2020 - * @brief NNStreamer/C-API Tizen dependent functions. + * @brief NNStreamer/C-API Tizen dependent functions, common for ML APIs, common for ML APIs. * @see https://github.com/nnstreamer/nnstreamer * @author MyungJoo Ham * @bug No known bugs except for NYI items @@ -17,7 +17,7 @@ #include #include -#include "nnstreamer-capi-private.h" +#include "ml-api-internal.h" /** * @brief Tizen ML feature. @@ -86,7 +86,7 @@ ml_tizen_get_feature_enabled (void) g_mutex_unlock (&feature_info->mutex); if (NOT_SUPPORTED == feature_enabled) { - ml_loge ("machine_learning.inference NOT supported"); + mlapi_loge ("machine_learning.inference NOT supported"); return ML_ERROR_NOT_SUPPORTED; } else if (NOT_CHECKED_YET == feature_enabled) { bool ml_inf_supported = false; @@ -94,7 +94,7 @@ ml_tizen_get_feature_enabled (void) system_info_get_platform_bool (ML_INF_FEATURE_PATH, &ml_inf_supported); if (0 == ret) { if (false == ml_inf_supported) { - ml_loge ("machine_learning.inference NOT supported"); + mlapi_loge ("machine_learning.inference NOT supported"); ml_tizen_set_feature_state (NOT_SUPPORTED); return ML_ERROR_NOT_SUPPORTED; } @@ -103,23 +103,25 @@ ml_tizen_get_feature_enabled (void) } else { switch (ret) { case SYSTEM_INFO_ERROR_INVALID_PARAMETER: - ml_loge + mlapi_loge ("failed to get feature value because feature key is not vaild"); ret = ML_ERROR_NOT_SUPPORTED; break; case SYSTEM_INFO_ERROR_IO_ERROR: - ml_loge ("failed to get feature value because of input/output error"); + mlapi_loge + ("failed to get feature value because of input/output error"); ret = ML_ERROR_NOT_SUPPORTED; break; case SYSTEM_INFO_ERROR_PERMISSION_DENIED: - ml_loge ("failed to get feature value because of permission denied"); + mlapi_loge + ("failed to get feature value because of permission denied"); ret = ML_ERROR_PERMISSION_DENIED; break; default: - ml_loge ("failed to get feature value because of unknown error"); + mlapi_loge ("failed to get feature value because of unknown error"); ret = ML_ERROR_NOT_SUPPORTED; break; } diff --git a/c/src/nnstreamer-capi-util.c b/c/src/ml-api-common.c similarity index 52% rename from c/src/nnstreamer-capi-util.c rename to c/src/ml-api-common.c index 9901ad1..e676a99 100644 --- a/c/src/nnstreamer-capi-util.c +++ b/c/src/ml-api-common.c @@ -13,33 +13,7 @@ #include #include "nnstreamer.h" -#include "nnstreamer-capi-private.h" -#include "nnstreamer_plugin_api.h" -#include "nnstreamer_plugin_api_filter.h" -#include "nnstreamer_internal.h" - -/** - * @brief The name of sub-plugin for defined neural net frameworks. - * @note The sub-plugin for Android is not declared (e.g., snap) - */ -static const char *ml_nnfw_subplugin_name[] = { - [ML_NNFW_TYPE_ANY] = "any", /* DO NOT use this name ('any') to get the sub-plugin */ - [ML_NNFW_TYPE_CUSTOM_FILTER] = "custom", - [ML_NNFW_TYPE_TENSORFLOW_LITE] = "tensorflow-lite", - [ML_NNFW_TYPE_TENSORFLOW] = "tensorflow", - [ML_NNFW_TYPE_NNFW] = "nnfw", - [ML_NNFW_TYPE_MVNC] = "movidius-ncsdk2", - [ML_NNFW_TYPE_OPENVINO] = "openvino", - [ML_NNFW_TYPE_VIVANTE] = "vivante", - [ML_NNFW_TYPE_EDGE_TPU] = "edgetpu", - [ML_NNFW_TYPE_ARMNN] = "armnn", - [ML_NNFW_TYPE_SNPE] = "snpe", - [ML_NNFW_TYPE_PYTORCH] = "pytorch", - [ML_NNFW_TYPE_NNTR_INF] = "nntrainer", - [ML_NNFW_TYPE_VD_AIFW] = "vd_aifw", - [ML_NNFW_TYPE_TRIX_ENGINE] = "trix-engine", - NULL -}; +#include "ml-api-internal.h" /** * @brief Allocates a tensors information handle with default value. @@ -56,7 +30,7 @@ ml_tensors_info_create (ml_tensors_info_h * info) *info = tensors_info = g_new0 (ml_tensors_info_s, 1); if (tensors_info == NULL) { - ml_loge ("Failed to allocate the tensors info handle."); + mlapi_loge ("Failed to allocate the tensors info handle."); return ML_ERROR_OUT_OF_MEMORY; } g_mutex_init (&tensors_info->lock); @@ -66,26 +40,6 @@ ml_tensors_info_create (ml_tensors_info_h * info) } /** - * @brief Allocates a tensors information handle from gst info. - */ -int -ml_tensors_info_create_from_gst (ml_tensors_info_h * ml_info, - GstTensorsInfo * gst_info) -{ - int status; - - if (!ml_info || !gst_info) - return ML_ERROR_INVALID_PARAMETER; - - status = ml_tensors_info_create (ml_info); - if (status != ML_ERROR_NONE) - return status; - - ml_tensors_info_copy_from_gst (*ml_info, gst_info); - return ML_ERROR_NONE; -} - -/** * @brief Frees the given handle of a tensors information. */ int @@ -136,22 +90,22 @@ ml_tensors_info_initialize (ml_tensors_info_s * info) } /** - * @brief Validates the given tensor info is valid. - * @note info should be locked by caller if nolock == 0. + * @brief Compares the given tensor info. */ static gboolean -ml_tensor_info_validate (const ml_tensor_info_s * info) +ml_tensor_info_compare (const ml_tensor_info_s * i1, + const ml_tensor_info_s * i2) { guint i; - if (!info) + if (i1 == NULL || i2 == NULL) return FALSE; - if (info->type < 0 || info->type >= ML_TENSOR_TYPE_UNKNOWN) + if (i1->type != i2->type) return FALSE; for (i = 0; i < ML_TENSOR_RANK_LIMIT; i++) { - if (info->dimension[i] == 0) + if (i1->dimension[i] != i2->dimension[i]) return FALSE; } @@ -159,22 +113,22 @@ ml_tensor_info_validate (const ml_tensor_info_s * info) } /** - * @brief Compares the given tensor info. + * @brief Validates the given tensor info is valid. + * @note info should be locked by caller if nolock == 0. */ static gboolean -ml_tensor_info_compare (const ml_tensor_info_s * i1, - const ml_tensor_info_s * i2) +ml_tensor_info_validate (const ml_tensor_info_s * info) { guint i; - if (i1 == NULL || i2 == NULL) + if (!info) return FALSE; - if (i1->type != i2->type) + if (info->type < 0 || info->type >= ML_TENSOR_TYPE_UNKNOWN) return FALSE; for (i = 0; i < ML_TENSOR_RANK_LIMIT; i++) { - if (i1->dimension[i] != i2->dimension[i]) + if (info->dimension[i] == 0) return FALSE; } @@ -185,7 +139,7 @@ ml_tensor_info_compare (const ml_tensor_info_s * i1, * @brief Validates the given tensors info is valid without acquiring lock * @note This function assumes that lock on ml_tensors_info_h has already been acquired */ -int +static int _ml_tensors_info_validate_nolock (const ml_tensors_info_s * info, bool *valid) { guint i; @@ -533,7 +487,7 @@ ml_tensor_info_get_size (const ml_tensor_info_s * info) tensor_size = 8; break; default: - ml_loge ("In the given param, tensor type is invalid."); + mlapi_loge ("In the given param, tensor type is invalid."); return 0; } @@ -679,7 +633,7 @@ ml_tensors_data_create_no_alloc (const ml_tensors_info_h info, _data = g_new0 (ml_tensors_data_s, 1); if (!_data) { - ml_loge ("Failed to allocate the tensors data handle."); + mlapi_loge ("Failed to allocate the tensors data handle."); return ML_ERROR_OUT_OF_MEMORY; } g_mutex_init (&_data->lock); @@ -750,7 +704,7 @@ ml_tensors_data_create (const ml_tensors_info_h info, ml_tensors_data_h * data) return ML_ERROR_INVALID_PARAMETER; if (!ml_tensors_info_is_valid (info)) { - nns_loge ("Given tensors information is invalid."); + mlapi_loge ("Given tensors information is invalid."); return ML_ERROR_INVALID_PARAMETER; } @@ -778,7 +732,7 @@ failed: } g_free (_data); - ml_loge ("Failed to allocate the memory block."); + mlapi_loge ("Failed to allocate the memory block."); return status; } @@ -896,563 +850,81 @@ done: } /** - * @brief Copies tensor meta info from gst tensors info. - * @bug Thread safety required. Check its internal users first! - */ -void -ml_tensors_info_copy_from_gst (ml_tensors_info_s * ml_info, - const GstTensorsInfo * gst_info) -{ - guint i, j; - guint max_dim; - - if (!ml_info || !gst_info) - return; - - ml_tensors_info_initialize (ml_info); - max_dim = MIN (ML_TENSOR_RANK_LIMIT, NNS_TENSOR_RANK_LIMIT); - - ml_info->num_tensors = gst_info->num_tensors; - - for (i = 0; i < gst_info->num_tensors; i++) { - /* Copy name string */ - if (gst_info->info[i].name) { - ml_info->info[i].name = g_strdup (gst_info->info[i].name); - } - - /* Set tensor type */ - switch (gst_info->info[i].type) { - case _NNS_INT32: - ml_info->info[i].type = ML_TENSOR_TYPE_INT32; - break; - case _NNS_UINT32: - ml_info->info[i].type = ML_TENSOR_TYPE_UINT32; - break; - case _NNS_INT16: - ml_info->info[i].type = ML_TENSOR_TYPE_INT16; - break; - case _NNS_UINT16: - ml_info->info[i].type = ML_TENSOR_TYPE_UINT16; - break; - case _NNS_INT8: - ml_info->info[i].type = ML_TENSOR_TYPE_INT8; - break; - case _NNS_UINT8: - ml_info->info[i].type = ML_TENSOR_TYPE_UINT8; - break; - case _NNS_FLOAT64: - ml_info->info[i].type = ML_TENSOR_TYPE_FLOAT64; - break; - case _NNS_FLOAT32: - ml_info->info[i].type = ML_TENSOR_TYPE_FLOAT32; - break; - case _NNS_INT64: - ml_info->info[i].type = ML_TENSOR_TYPE_INT64; - break; - case _NNS_UINT64: - ml_info->info[i].type = ML_TENSOR_TYPE_UINT64; - break; - default: - ml_info->info[i].type = ML_TENSOR_TYPE_UNKNOWN; - break; - } - - /* Set dimension */ - for (j = 0; j < max_dim; j++) { - ml_info->info[i].dimension[j] = gst_info->info[i].dimension[j]; - } - - for (; j < ML_TENSOR_RANK_LIMIT; j++) { - ml_info->info[i].dimension[j] = 1; - } - } -} - -/** - * @brief Copies tensor meta info from gst tensors info. - * @bug Thread safety required. Check its internal users first! - */ -void -ml_tensors_info_copy_from_ml (GstTensorsInfo * gst_info, - const ml_tensors_info_s * ml_info) -{ - guint i, j; - guint max_dim; - - if (!gst_info || !ml_info) - return; - - G_LOCK_UNLESS_NOLOCK (*ml_info); - - gst_tensors_info_init (gst_info); - max_dim = MIN (ML_TENSOR_RANK_LIMIT, NNS_TENSOR_RANK_LIMIT); - - gst_info->num_tensors = ml_info->num_tensors; - - for (i = 0; i < ml_info->num_tensors; i++) { - /* Copy name string */ - if (ml_info->info[i].name) { - gst_info->info[i].name = g_strdup (ml_info->info[i].name); - } - - /* Set tensor type */ - switch (ml_info->info[i].type) { - case ML_TENSOR_TYPE_INT32: - gst_info->info[i].type = _NNS_INT32; - break; - case ML_TENSOR_TYPE_UINT32: - gst_info->info[i].type = _NNS_UINT32; - break; - case ML_TENSOR_TYPE_INT16: - gst_info->info[i].type = _NNS_INT16; - break; - case ML_TENSOR_TYPE_UINT16: - gst_info->info[i].type = _NNS_UINT16; - break; - case ML_TENSOR_TYPE_INT8: - gst_info->info[i].type = _NNS_INT8; - break; - case ML_TENSOR_TYPE_UINT8: - gst_info->info[i].type = _NNS_UINT8; - break; - case ML_TENSOR_TYPE_FLOAT64: - gst_info->info[i].type = _NNS_FLOAT64; - break; - case ML_TENSOR_TYPE_FLOAT32: - gst_info->info[i].type = _NNS_FLOAT32; - break; - case ML_TENSOR_TYPE_INT64: - gst_info->info[i].type = _NNS_INT64; - break; - case ML_TENSOR_TYPE_UINT64: - gst_info->info[i].type = _NNS_UINT64; - break; - default: - gst_info->info[i].type = _NNS_END; - break; - } - - /* Set dimension */ - for (j = 0; j < max_dim; j++) { - gst_info->info[i].dimension[j] = ml_info->info[i].dimension[j]; - } - - for (; j < NNS_TENSOR_RANK_LIMIT; j++) { - gst_info->info[i].dimension[j] = 1; - } - } - G_UNLOCK_UNLESS_NOLOCK (*ml_info); -} - -/** - * @brief Initializes the GStreamer library. This is internal function. - */ -int -ml_initialize_gstreamer (void) -{ - GError *err = NULL; - - if (!gst_init_check (NULL, NULL, &err)) { - if (err) { - ml_loge ("GStreamer has the following error: %s", err->message); - g_clear_error (&err); - } else { - ml_loge ("Cannot initialize GStreamer. Unknown reason."); - } - - return ML_ERROR_STREAMS_PIPE; - } - - return ML_ERROR_NONE; -} - -/** - * @brief Internal helper function to validate model files. - */ -static int -_ml_validate_model_file (const char *const *model, - const unsigned int num_models, gboolean * is_dir) -{ - guint i; - - if (!model || num_models < 1) { - ml_loge ("The required param, model is not provided (null)."); - return ML_ERROR_INVALID_PARAMETER; - } - - if (g_file_test (model[0], G_FILE_TEST_IS_DIR)) { - *is_dir = TRUE; - return ML_ERROR_NONE; - } - - for (i = 0; i < num_models; i++) { - if (!model[i] || !g_file_test (model[i], G_FILE_TEST_IS_REGULAR)) { - ml_loge ("The given param, model path [%s] is invalid or not given.", - GST_STR_NULL (model[i])); - return ML_ERROR_INVALID_PARAMETER; - } - } - - return ML_ERROR_NONE; -} - -/** - * @brief Validates the nnfw model file. - * @since_tizen 5.5 - * @param[in] model The path of model file. - * @param[in/out] nnfw The type of NNFW. - * @return @c 0 on success. Otherwise a negative error value. - * @retval #ML_ERROR_NONE Successful - * @retval #ML_ERROR_NOT_SUPPORTED Not supported, or framework to support this model file is unavailable in the environment. - * @retval #ML_ERROR_INVALID_PARAMETER Given parameter is invalid. - */ -int -ml_validate_model_file (const char *const *model, - const unsigned int num_models, ml_nnfw_type_e * nnfw) -{ - int status = ML_ERROR_NONE; - ml_nnfw_type_e detected = ML_NNFW_TYPE_ANY; - gboolean is_dir = FALSE; - gchar *pos, *fw_name; - gchar **file_ext = NULL; - guint i; - - if (!nnfw) - return ML_ERROR_INVALID_PARAMETER; - - status = _ml_validate_model_file (model, num_models, &is_dir); - if (status != ML_ERROR_NONE) - return status; - - /** - * @note detect-fw checks the file ext and returns proper fw name for given models. - * If detected fw and given nnfw are same, we don't need to check the file extension. - * If any condition for auto detection is added later, below code also should be updated. - */ - fw_name = gst_tensor_filter_detect_framework (model, num_models, TRUE); - detected = ml_get_nnfw_type_by_subplugin_name (fw_name); - g_free (fw_name); - - if (*nnfw == ML_NNFW_TYPE_ANY) { - if (detected == ML_NNFW_TYPE_ANY) { - ml_loge ("The given model has unknown or not supported extension."); - status = ML_ERROR_INVALID_PARAMETER; - } else { - ml_logi ("The given model is supposed a %s model.", - ml_get_nnfw_subplugin_name (detected)); - *nnfw = detected; - } - - goto done; - } else if (is_dir && *nnfw != ML_NNFW_TYPE_NNFW) { - /* supposed it is ONE if given model is directory */ - ml_loge ("The given model is directory, check model and framework."); - status = ML_ERROR_INVALID_PARAMETER; - goto done; - } else if (detected == *nnfw) { - /* Expected framework, nothing to do. */ - goto done; - } - - /* Handle mismatched case, check file extension. */ - file_ext = g_malloc0 (sizeof (char *) * (num_models + 1)); - for (i = 0; i < num_models; i++) { - if ((pos = strrchr (model[i], '.')) == NULL) { - ml_loge ("The given model [%s] has invalid extension.", model[i]); - status = ML_ERROR_INVALID_PARAMETER; - goto done; - } - - file_ext[i] = g_ascii_strdown (pos, -1); - } - - /** @todo Make sure num_models is correct for each nnfw type */ - switch (*nnfw) { - case ML_NNFW_TYPE_NNFW: - /** - * We cannot check the file ext with NNFW. - * NNFW itself will validate metadata and model file. - */ - break; - case ML_NNFW_TYPE_MVNC: - case ML_NNFW_TYPE_OPENVINO: - case ML_NNFW_TYPE_EDGE_TPU: - /** @todo Need to check method to validate model */ - ml_loge ("Given NNFW is not supported yet."); - status = ML_ERROR_NOT_SUPPORTED; - break; - case ML_NNFW_TYPE_VD_AIFW: - if (!g_str_equal (file_ext[0], ".nb") && - !g_str_equal (file_ext[0], ".ncp") && - !g_str_equal (file_ext[0], ".bin")) { - status = ML_ERROR_INVALID_PARAMETER; - } - break; - case ML_NNFW_TYPE_SNAP: -#if !defined (__ANDROID__) - ml_loge ("SNAP only can be included in Android (arm64-v8a only)."); - status = ML_ERROR_NOT_SUPPORTED; -#endif - /* SNAP requires multiple files, set supported if model file exists. */ - break; - case ML_NNFW_TYPE_ARMNN: - if (!g_str_equal (file_ext[0], ".caffemodel") && - !g_str_equal (file_ext[0], ".tflite") && - !g_str_equal (file_ext[0], ".pb") && - !g_str_equal (file_ext[0], ".prototxt")) { - status = ML_ERROR_INVALID_PARAMETER; + * @brief Replaces string. + * This function deallocates the input source string. + * This is copied from nnstreamer/tensor_common.c by the nnstreamer maintainer. + * @param[in] source The input string. This will be freed when returning the replaced string. + * @param[in] what The string to search for. + * @param[in] to The string to be replaced. + * @param[in] delimiters The characters which specify the place to split the string. Set NULL to replace all matched string. + * @param[out] count The count of replaced. Set NULL if it is unnecessary. + * @return Newly allocated string. The returned string should be freed with g_free(). + */ +gchar * +ml_replace_string (gchar * source, const gchar * what, const gchar * to, + const gchar * delimiters, guint * count) +{ + GString *builder; + gchar *start, *pos, *result; + guint changed = 0; + gsize len; + + g_return_val_if_fail (source, NULL); + g_return_val_if_fail (what && to, source); + + len = strlen (what); + start = source; + + builder = g_string_new (NULL); + while ((pos = g_strstr_len (start, -1, what)) != NULL) { + gboolean skip = FALSE; + + if (delimiters) { + const gchar *s; + gchar *prev, *next; + gboolean prev_split, next_split; + + prev = next = NULL; + prev_split = next_split = FALSE; + + if (pos != source) + prev = pos - 1; + if (*(pos + len) != '\0') + next = pos + len; + + for (s = delimiters; *s != '\0'; ++s) { + if (!prev || *s == *prev) + prev_split = TRUE; + if (!next || *s == *next) + next_split = TRUE; + if (prev_split && next_split) + break; } - break; - default: - status = ML_ERROR_INVALID_PARAMETER; - break; - } -done: - if (status == ML_ERROR_NONE) { - if (!ml_nnfw_is_available (*nnfw, ML_NNFW_HW_ANY)) { - ml_loge ("%s is not available.", ml_get_nnfw_subplugin_name (*nnfw)); - status = ML_ERROR_NOT_SUPPORTED; + if (!prev_split || !next_split) + skip = TRUE; } - } else { - ml_loge ("The given model file is invalid."); - } - - g_strfreev (file_ext); - return status; -} -/** - * @brief Convert c-api based hw to internal representation - */ -static accl_hw -ml_nnfw_to_accl_hw (const ml_nnfw_hw_e hw) -{ - switch (hw) { - case ML_NNFW_HW_ANY: - return ACCL_DEFAULT; - case ML_NNFW_HW_AUTO: - return ACCL_AUTO; - case ML_NNFW_HW_CPU: - return ACCL_CPU; -#if defined (__aarch64__) || defined (__arm__) - case ML_NNFW_HW_CPU_NEON: - return ACCL_CPU_NEON; -#else - case ML_NNFW_HW_CPU_SIMD: - return ACCL_CPU_SIMD; -#endif - case ML_NNFW_HW_GPU: - return ACCL_GPU; - case ML_NNFW_HW_NPU: - return ACCL_NPU; - case ML_NNFW_HW_NPU_MOVIDIUS: - return ACCL_NPU_MOVIDIUS; - case ML_NNFW_HW_NPU_EDGE_TPU: - return ACCL_NPU_EDGE_TPU; - case ML_NNFW_HW_NPU_VIVANTE: - return ACCL_NPU_VIVANTE; - case ML_NNFW_HW_NPU_SLSI: - return ACCL_NPU_SLSI; - case ML_NNFW_HW_NPU_SR: - /** @todo how to get srcn npu */ - return ACCL_NPU_SR; - default: - return ACCL_AUTO; - } -} - -/** - * @brief Internal function to convert accelerator as tensor_filter property format. - * @note returned value must be freed by the caller - * @note More details on format can be found in gst_tensor_filter_install_properties() in tensor_filter_common.c. - */ -char * -ml_nnfw_to_str_prop (const ml_nnfw_hw_e hw) -{ - const gchar *hw_name; - const gchar *use_accl = "true:"; - gchar *str_prop = NULL; - - hw_name = get_accl_hw_str (ml_nnfw_to_accl_hw (hw)); - str_prop = g_strdup_printf ("%s%s", use_accl, hw_name); - - return str_prop; -} + builder = g_string_append_len (builder, start, pos - start); -/** - * @brief Internal function to get the sub-plugin name. - */ -const char * -ml_get_nnfw_subplugin_name (ml_nnfw_type_e nnfw) -{ - /* check sub-plugin for android */ - if (nnfw == ML_NNFW_TYPE_SNAP) - return "snap"; - - return ml_nnfw_subplugin_name[nnfw]; -} - -/** - * @brief Internal function to get the nnfw type. - */ -ml_nnfw_type_e -ml_get_nnfw_type_by_subplugin_name (const char *name) -{ - ml_nnfw_type_e nnfw_type = ML_NNFW_TYPE_ANY; - int idx = -1; - - if (name == NULL) - return ML_NNFW_TYPE_ANY; - - idx = find_key_strv (ml_nnfw_subplugin_name, name); - if (idx < 0) { - /* check sub-plugin for android */ - if (g_ascii_strcasecmp (name, "snap") == 0) - nnfw_type = ML_NNFW_TYPE_SNAP; + /* replace string if found */ + if (skip) + builder = g_string_append_len (builder, pos, len); else - ml_logw ("Cannot find nnfw, %s is invalid name.", GST_STR_NULL (name)); - } else { - nnfw_type = (ml_nnfw_type_e) idx; - } - - return nnfw_type; -} - -/** - * @brief Checks the availability of the given execution environments with custom option. - */ -int -ml_check_nnfw_availability_full (ml_nnfw_type_e nnfw, ml_nnfw_hw_e hw, - const char *custom, bool *available) -{ - const char *fw_name = NULL; - - check_feature_state (); - - if (!available) - return ML_ERROR_INVALID_PARAMETER; - - /* init false */ - *available = false; - - if (nnfw == ML_NNFW_TYPE_ANY) - return ML_ERROR_INVALID_PARAMETER; - - fw_name = ml_get_nnfw_subplugin_name (nnfw); - - if (fw_name) { - if (nnstreamer_filter_find (fw_name) != NULL) { - accl_hw accl = ml_nnfw_to_accl_hw (hw); - - if (gst_tensor_filter_check_hw_availability (fw_name, accl, custom)) { - *available = true; - } else { - ml_logw ("%s is supported but not with the specified hardware.", - fw_name); - } - } else { - ml_logw ("%s is not supported.", fw_name); - } - } - - return ML_ERROR_NONE; -} - -/** - * @brief Checks the availability of the given execution environments. - */ -int -ml_check_nnfw_availability (ml_nnfw_type_e nnfw, ml_nnfw_hw_e hw, - bool *available) -{ - return ml_check_nnfw_availability_full (nnfw, hw, NULL, available); -} - -/** - * @brief Checks the element is registered and available on the pipeline. - */ -int -ml_check_element_availability (const char *element_name, bool *available) -{ - GstElementFactory *factory; - int status; - - check_feature_state (); - - if (!element_name || !available) - return ML_ERROR_INVALID_PARAMETER; - - status = ml_initialize_gstreamer (); - if (status != ML_ERROR_NONE) - return status; + builder = g_string_append (builder, to); - /* init false */ - *available = false; - - factory = gst_element_factory_find (element_name); - if (factory) { - GstPluginFeature *feature = GST_PLUGIN_FEATURE (factory); - const gchar *plugin_name = gst_plugin_feature_get_plugin_name (feature); - - /* check restricted element */ - status = ml_check_plugin_availability (plugin_name, element_name); - if (status == ML_ERROR_NONE) - *available = true; - - gst_object_unref (factory); + start = pos + len; + if (!skip) + changed++; } - return ML_ERROR_NONE; -} + /* append remains */ + builder = g_string_append (builder, start); + result = g_string_free (builder, FALSE); -/** - * @brief Checks the availability of the plugin. - */ -int -ml_check_plugin_availability (const char *plugin_name, const char *element_name) -{ - static gboolean list_loaded = FALSE; - static gchar **restricted_elements = NULL; - - if (!plugin_name || !element_name) { - ml_loge ("The name is invalid, failed to check the availability."); - return ML_ERROR_INVALID_PARAMETER; - } - - if (!list_loaded) { - gboolean restricted; - - restricted = - nnsconf_get_custom_value_bool ("element-restriction", - "enable_element_restriction", FALSE); - if (restricted) { - gchar *elements; - - /* check white-list of available plugins */ - elements = - nnsconf_get_custom_value_string ("element-restriction", - "restricted_elements"); - if (elements) { - restricted_elements = g_strsplit_set (elements, " ,;", -1); - g_free (elements); - } - } + if (count) + *count = changed; - list_loaded = TRUE; - } - - /* nnstreamer elements */ - if (g_str_has_prefix (plugin_name, "nnstreamer") && - g_str_has_prefix (element_name, "tensor_")) { - return ML_ERROR_NONE; - } - - if (restricted_elements && - find_key_strv ((const gchar **) restricted_elements, element_name) < 0) { - ml_logw ("The element %s is restricted.", element_name); - return ML_ERROR_NOT_SUPPORTED; - } - - return ML_ERROR_NONE; + g_free (source); + return result; } diff --git a/c/src/ml-api-inference-internal.c b/c/src/ml-api-inference-internal.c new file mode 100644 index 0000000..d083dd3 --- /dev/null +++ b/c/src/ml-api-inference-internal.c @@ -0,0 +1,576 @@ +/* SPDX-License-Identifier: Apache-2.0 */ +/** + * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved. + * + * @file ml-api-inference-internal.c + * @date 19 October 2021 + * @brief ML-API Internal Utility Functions for inference implementations + * @see https://github.com/nnstreamer/api + * @author MyungJoo Ham + * @bug No known bugs except for NYI items + */ + +#include + +#include +#include +#include +#include +#include +#include "ml-api-inference-internal.h" +#include "ml-api-internal.h" + +/** + * @brief The name of sub-plugin for defined neural net frameworks. + * @note The sub-plugin for Android is not declared (e.g., snap) + */ +static const char *ml_nnfw_subplugin_name[] = { + [ML_NNFW_TYPE_ANY] = "any", /* DO NOT use this name ('any') to get the sub-plugin */ + [ML_NNFW_TYPE_CUSTOM_FILTER] = "custom", + [ML_NNFW_TYPE_TENSORFLOW_LITE] = "tensorflow-lite", + [ML_NNFW_TYPE_TENSORFLOW] = "tensorflow", + [ML_NNFW_TYPE_NNFW] = "nnfw", + [ML_NNFW_TYPE_MVNC] = "movidius-ncsdk2", + [ML_NNFW_TYPE_OPENVINO] = "openvino", + [ML_NNFW_TYPE_VIVANTE] = "vivante", + [ML_NNFW_TYPE_EDGE_TPU] = "edgetpu", + [ML_NNFW_TYPE_ARMNN] = "armnn", + [ML_NNFW_TYPE_SNPE] = "snpe", + [ML_NNFW_TYPE_PYTORCH] = "pytorch", + [ML_NNFW_TYPE_NNTR_INF] = "nntrainer", + [ML_NNFW_TYPE_VD_AIFW] = "vd_aifw", + [ML_NNFW_TYPE_TRIX_ENGINE] = "trix-engine", + NULL +}; + +/** + * @brief Internal function to get the sub-plugin name. + */ +const char * +ml_get_nnfw_subplugin_name (ml_nnfw_type_e nnfw) +{ + /* check sub-plugin for android */ + if (nnfw == ML_NNFW_TYPE_SNAP) + return "snap"; + + return ml_nnfw_subplugin_name[nnfw]; +} + +/** + * @brief Allocates a tensors information handle from gst info. + */ +int +ml_tensors_info_create_from_gst (ml_tensors_info_h * ml_info, + GstTensorsInfo * gst_info) +{ + int status; + + if (!ml_info || !gst_info) + return ML_ERROR_INVALID_PARAMETER; + + status = ml_tensors_info_create (ml_info); + if (status != ML_ERROR_NONE) + return status; + + ml_tensors_info_copy_from_gst (*ml_info, gst_info); + return ML_ERROR_NONE; +} + +/** + * @brief Copies tensor meta info from gst tensors info. + * @bug Thread safety required. Check its internal users first! + */ +void +ml_tensors_info_copy_from_gst (ml_tensors_info_s * ml_info, + const GstTensorsInfo * gst_info) +{ + guint i, j; + guint max_dim; + + if (!ml_info || !gst_info) + return; + + ml_tensors_info_initialize (ml_info); + max_dim = MIN (ML_TENSOR_RANK_LIMIT, NNS_TENSOR_RANK_LIMIT); + + ml_info->num_tensors = gst_info->num_tensors; + + for (i = 0; i < gst_info->num_tensors; i++) { + /* Copy name string */ + if (gst_info->info[i].name) { + ml_info->info[i].name = g_strdup (gst_info->info[i].name); + } + + /* Set tensor type */ + switch (gst_info->info[i].type) { + case _NNS_INT32: + ml_info->info[i].type = ML_TENSOR_TYPE_INT32; + break; + case _NNS_UINT32: + ml_info->info[i].type = ML_TENSOR_TYPE_UINT32; + break; + case _NNS_INT16: + ml_info->info[i].type = ML_TENSOR_TYPE_INT16; + break; + case _NNS_UINT16: + ml_info->info[i].type = ML_TENSOR_TYPE_UINT16; + break; + case _NNS_INT8: + ml_info->info[i].type = ML_TENSOR_TYPE_INT8; + break; + case _NNS_UINT8: + ml_info->info[i].type = ML_TENSOR_TYPE_UINT8; + break; + case _NNS_FLOAT64: + ml_info->info[i].type = ML_TENSOR_TYPE_FLOAT64; + break; + case _NNS_FLOAT32: + ml_info->info[i].type = ML_TENSOR_TYPE_FLOAT32; + break; + case _NNS_INT64: + ml_info->info[i].type = ML_TENSOR_TYPE_INT64; + break; + case _NNS_UINT64: + ml_info->info[i].type = ML_TENSOR_TYPE_UINT64; + break; + default: + ml_info->info[i].type = ML_TENSOR_TYPE_UNKNOWN; + break; + } + + /* Set dimension */ + for (j = 0; j < max_dim; j++) { + ml_info->info[i].dimension[j] = gst_info->info[i].dimension[j]; + } + + for (; j < ML_TENSOR_RANK_LIMIT; j++) { + ml_info->info[i].dimension[j] = 1; + } + } +} + +/** + * @brief Copies tensor meta info from gst tensors info. + * @bug Thread safety required. Check its internal users first! + */ +void +ml_tensors_info_copy_from_ml (GstTensorsInfo * gst_info, + const ml_tensors_info_s * ml_info) +{ + guint i, j; + guint max_dim; + + if (!gst_info || !ml_info) + return; + + G_LOCK_UNLESS_NOLOCK (*ml_info); + + gst_tensors_info_init (gst_info); + max_dim = MIN (ML_TENSOR_RANK_LIMIT, NNS_TENSOR_RANK_LIMIT); + + gst_info->num_tensors = ml_info->num_tensors; + + for (i = 0; i < ml_info->num_tensors; i++) { + /* Copy name string */ + if (ml_info->info[i].name) { + gst_info->info[i].name = g_strdup (ml_info->info[i].name); + } + + /* Set tensor type */ + switch (ml_info->info[i].type) { + case ML_TENSOR_TYPE_INT32: + gst_info->info[i].type = _NNS_INT32; + break; + case ML_TENSOR_TYPE_UINT32: + gst_info->info[i].type = _NNS_UINT32; + break; + case ML_TENSOR_TYPE_INT16: + gst_info->info[i].type = _NNS_INT16; + break; + case ML_TENSOR_TYPE_UINT16: + gst_info->info[i].type = _NNS_UINT16; + break; + case ML_TENSOR_TYPE_INT8: + gst_info->info[i].type = _NNS_INT8; + break; + case ML_TENSOR_TYPE_UINT8: + gst_info->info[i].type = _NNS_UINT8; + break; + case ML_TENSOR_TYPE_FLOAT64: + gst_info->info[i].type = _NNS_FLOAT64; + break; + case ML_TENSOR_TYPE_FLOAT32: + gst_info->info[i].type = _NNS_FLOAT32; + break; + case ML_TENSOR_TYPE_INT64: + gst_info->info[i].type = _NNS_INT64; + break; + case ML_TENSOR_TYPE_UINT64: + gst_info->info[i].type = _NNS_UINT64; + break; + default: + gst_info->info[i].type = _NNS_END; + break; + } + + /* Set dimension */ + for (j = 0; j < max_dim; j++) { + gst_info->info[i].dimension[j] = ml_info->info[i].dimension[j]; + } + + for (; j < NNS_TENSOR_RANK_LIMIT; j++) { + gst_info->info[i].dimension[j] = 1; + } + } + G_UNLOCK_UNLESS_NOLOCK (*ml_info); +} + +/** + * @brief Initializes the GStreamer library. This is internal function. + */ +int +ml_initialize_gstreamer (void) +{ + GError *err = NULL; + + if (!gst_init_check (NULL, NULL, &err)) { + if (err) { + mlapi_loge ("GStreamer has the following error: %s", err->message); + g_clear_error (&err); + } else { + mlapi_loge ("Cannot initialize GStreamer. Unknown reason."); + } + + return ML_ERROR_STREAMS_PIPE; + } + + return ML_ERROR_NONE; +} + +/** + * @brief Internal helper function to validate model files. + */ +static int +_ml_validate_model_file (const char *const *model, + const unsigned int num_models, gboolean * is_dir) +{ + guint i; + + if (!model || num_models < 1) { + mlapi_loge ("The required param, model is not provided (null)."); + return ML_ERROR_INVALID_PARAMETER; + } + + if (g_file_test (model[0], G_FILE_TEST_IS_DIR)) { + *is_dir = TRUE; + return ML_ERROR_NONE; + } + + for (i = 0; i < num_models; i++) { + if (!model[i] || !g_file_test (model[i], G_FILE_TEST_IS_REGULAR)) { + mlapi_loge ("The given param, model path [%s] is invalid or not given.", + GST_STR_NULL (model[i])); + return ML_ERROR_INVALID_PARAMETER; + } + } + + return ML_ERROR_NONE; +} + +/** + * @brief Internal function to get the nnfw type. + */ +ml_nnfw_type_e +ml_get_nnfw_type_by_subplugin_name (const char *name) +{ + ml_nnfw_type_e nnfw_type = ML_NNFW_TYPE_ANY; + int idx = -1; + + if (name == NULL) + return ML_NNFW_TYPE_ANY; + + idx = find_key_strv (ml_nnfw_subplugin_name, name); + if (idx < 0) { + /* check sub-plugin for android */ + if (g_ascii_strcasecmp (name, "snap") == 0) + nnfw_type = ML_NNFW_TYPE_SNAP; + else + mlapi_logw ("Cannot find nnfw, %s is invalid name.", GST_STR_NULL (name)); + } else { + nnfw_type = (ml_nnfw_type_e) idx; + } + + return nnfw_type; +} + +/** + * @brief Validates the nnfw model file. + * @since_tizen 5.5 + * @param[in] model The path of model file. + * @param[in/out] nnfw The type of NNFW. + * @return @c 0 on success. Otherwise a negative error value. + * @retval #ML_ERROR_NONE Successful + * @retval #ML_ERROR_NOT_SUPPORTED Not supported, or framework to support this model file is unavailable in the environment. + * @retval #ML_ERROR_INVALID_PARAMETER Given parameter is invalid. + */ +int +ml_validate_model_file (const char *const *model, + const unsigned int num_models, ml_nnfw_type_e * nnfw) +{ + int status = ML_ERROR_NONE; + ml_nnfw_type_e detected = ML_NNFW_TYPE_ANY; + gboolean is_dir = FALSE; + gchar *pos, *fw_name; + gchar **file_ext = NULL; + guint i; + + if (!nnfw) + return ML_ERROR_INVALID_PARAMETER; + + status = _ml_validate_model_file (model, num_models, &is_dir); + if (status != ML_ERROR_NONE) + return status; + + /** + * @note detect-fw checks the file ext and returns proper fw name for given models. + * If detected fw and given nnfw are same, we don't need to check the file extension. + * If any condition for auto detection is added later, below code also should be updated. + */ + fw_name = gst_tensor_filter_detect_framework (model, num_models, TRUE); + detected = ml_get_nnfw_type_by_subplugin_name (fw_name); + g_free (fw_name); + + if (*nnfw == ML_NNFW_TYPE_ANY) { + if (detected == ML_NNFW_TYPE_ANY) { + mlapi_loge ("The given model has unknown or not supported extension."); + status = ML_ERROR_INVALID_PARAMETER; + } else { + mlapi_logi ("The given model is supposed a %s model.", + ml_get_nnfw_subplugin_name (detected)); + *nnfw = detected; + } + + goto done; + } else if (is_dir && *nnfw != ML_NNFW_TYPE_NNFW) { + /* supposed it is ONE if given model is directory */ + mlapi_loge ("The given model is directory, check model and framework."); + status = ML_ERROR_INVALID_PARAMETER; + goto done; + } else if (detected == *nnfw) { + /* Expected framework, nothing to do. */ + goto done; + } + + /* Handle mismatched case, check file extension. */ + file_ext = g_malloc0 (sizeof (char *) * (num_models + 1)); + for (i = 0; i < num_models; i++) { + if ((pos = strrchr (model[i], '.')) == NULL) { + mlapi_loge ("The given model [%s] has invalid extension.", model[i]); + status = ML_ERROR_INVALID_PARAMETER; + goto done; + } + + file_ext[i] = g_ascii_strdown (pos, -1); + } + + /** @todo Make sure num_models is correct for each nnfw type */ + switch (*nnfw) { + case ML_NNFW_TYPE_NNFW: + /** + * We cannot check the file ext with NNFW. + * NNFW itself will validate metadata and model file. + */ + break; + case ML_NNFW_TYPE_MVNC: + case ML_NNFW_TYPE_OPENVINO: + case ML_NNFW_TYPE_EDGE_TPU: + /** @todo Need to check method to validate model */ + mlapi_loge ("Given NNFW is not supported yet."); + status = ML_ERROR_NOT_SUPPORTED; + break; + case ML_NNFW_TYPE_VD_AIFW: + if (!g_str_equal (file_ext[0], ".nb") && + !g_str_equal (file_ext[0], ".ncp") && + !g_str_equal (file_ext[0], ".bin")) { + status = ML_ERROR_INVALID_PARAMETER; + } + break; + case ML_NNFW_TYPE_SNAP: +#if !defined (__ANDROID__) + mlapi_loge ("SNAP only can be included in Android (arm64-v8a only)."); + status = ML_ERROR_NOT_SUPPORTED; +#endif + /* SNAP requires multiple files, set supported if model file exists. */ + break; + case ML_NNFW_TYPE_ARMNN: + if (!g_str_equal (file_ext[0], ".caffemodel") && + !g_str_equal (file_ext[0], ".tflite") && + !g_str_equal (file_ext[0], ".pb") && + !g_str_equal (file_ext[0], ".prototxt")) { + status = ML_ERROR_INVALID_PARAMETER; + } + break; + default: + status = ML_ERROR_INVALID_PARAMETER; + break; + } + +done: + if (status == ML_ERROR_NONE) { + if (!ml_nnfw_is_available (*nnfw, ML_NNFW_HW_ANY)) { + mlapi_loge ("%s is not available.", ml_get_nnfw_subplugin_name (*nnfw)); + status = ML_ERROR_NOT_SUPPORTED; + } + } else { + mlapi_loge ("The given model file is invalid."); + } + + g_strfreev (file_ext); + return status; +} + +/** + * @brief Convert c-api based hw to internal representation + */ +accl_hw +ml_nnfw_to_accl_hw (const ml_nnfw_hw_e hw) +{ + switch (hw) { + case ML_NNFW_HW_ANY: + return ACCL_DEFAULT; + case ML_NNFW_HW_AUTO: + return ACCL_AUTO; + case ML_NNFW_HW_CPU: + return ACCL_CPU; +#if defined (__aarch64__) || defined (__arm__) + case ML_NNFW_HW_CPU_NEON: + return ACCL_CPU_NEON; +#else + case ML_NNFW_HW_CPU_SIMD: + return ACCL_CPU_SIMD; +#endif + case ML_NNFW_HW_GPU: + return ACCL_GPU; + case ML_NNFW_HW_NPU: + return ACCL_NPU; + case ML_NNFW_HW_NPU_MOVIDIUS: + return ACCL_NPU_MOVIDIUS; + case ML_NNFW_HW_NPU_EDGE_TPU: + return ACCL_NPU_EDGE_TPU; + case ML_NNFW_HW_NPU_VIVANTE: + return ACCL_NPU_VIVANTE; + case ML_NNFW_HW_NPU_SLSI: + return ACCL_NPU_SLSI; + case ML_NNFW_HW_NPU_SR: + /** @todo how to get srcn npu */ + return ACCL_NPU_SR; + default: + return ACCL_AUTO; + } +} + +/** + * @brief Internal function to convert accelerator as tensor_filter property format. + * @note returned value must be freed by the caller + * @note More details on format can be found in gst_tensor_filter_install_properties() in tensor_filter_common.c. + */ +char * +ml_nnfw_to_str_prop (const ml_nnfw_hw_e hw) +{ + const gchar *hw_name; + const gchar *use_accl = "true:"; + gchar *str_prop = NULL; + + hw_name = get_accl_hw_str (ml_nnfw_to_accl_hw (hw)); + str_prop = g_strdup_printf ("%s%s", use_accl, hw_name); + + return str_prop; +} + +/** + * @brief Checks the element is registered and available on the pipeline. + */ +int +ml_check_element_availability (const char *element_name, bool *available) +{ + GstElementFactory *factory; + int status; + + check_feature_state (); + + if (!element_name || !available) + return ML_ERROR_INVALID_PARAMETER; + + status = ml_initialize_gstreamer (); + if (status != ML_ERROR_NONE) + return status; + + /* init false */ + *available = false; + + factory = gst_element_factory_find (element_name); + if (factory) { + GstPluginFeature *feature = GST_PLUGIN_FEATURE (factory); + const gchar *plugin_name = gst_plugin_feature_get_plugin_name (feature); + + /* check restricted element */ + status = ml_check_plugin_availability (plugin_name, element_name); + if (status == ML_ERROR_NONE) + *available = true; + + gst_object_unref (factory); + } + + return ML_ERROR_NONE; +} + +/** + * @brief Checks the availability of the plugin. + */ +int +ml_check_plugin_availability (const char *plugin_name, const char *element_name) +{ + static gboolean list_loaded = FALSE; + static gchar **restricted_elements = NULL; + + if (!plugin_name || !element_name) { + mlapi_loge ("The name is invalid, failed to check the availability."); + return ML_ERROR_INVALID_PARAMETER; + } + + if (!list_loaded) { + gboolean restricted; + + restricted = + nnsconf_get_custom_value_bool ("element-restriction", + "enable_element_restriction", FALSE); + if (restricted) { + gchar *elements; + + /* check white-list of available plugins */ + elements = + nnsconf_get_custom_value_string ("element-restriction", + "restricted_elements"); + if (elements) { + restricted_elements = g_strsplit_set (elements, " ,;", -1); + g_free (elements); + } + } + + list_loaded = TRUE; + } + + /* nnstreamer elements */ + if (g_str_has_prefix (plugin_name, "nnstreamer") && + g_str_has_prefix (element_name, "tensor_")) { + return ML_ERROR_NONE; + } + + if (restricted_elements && + find_key_strv ((const gchar **) restricted_elements, element_name) < 0) { + mlapi_logw ("The element %s is restricted.", element_name); + return ML_ERROR_NOT_SUPPORTED; + } + + return ML_ERROR_NONE; +} diff --git a/c/src/ml-api-inference-internal.h b/c/src/ml-api-inference-internal.h new file mode 100644 index 0000000..3eb74ac --- /dev/null +++ b/c/src/ml-api-inference-internal.h @@ -0,0 +1,287 @@ +/* SPDX-License-Identifier: Apache-2.0 */ +/** + * Copyright (c) 2021 Samsung Electronics Co., Ltd. All Rights Reserved. + * + * @file ml-api-internal-nnstreamer.h + * @date 20 October 2021 + * @brief ML C-API internal header with NNStreamer deps. + * This file should NOT be exported to SDK or devel package. + * @see https://github.com/nnstreamer/api + * @author MyungJoo Ham + * @bug No known bugs except for NYI items + */ + +#ifndef __ML_API_INTERNAL_NNSTREAMER_H__ +#define __ML_API_INTERNAL_NNSTREAMER_H__ + +#include +#include +#include + +#include +#include +#include + +#include "ml-api-internal.h" + +#ifdef __cplusplus +extern "C" { +#endif /* __cplusplus */ + +/***** Wrappers of tizen-api-internal.h for pipelines *****/ +#if defined (__TIZEN__) +#if defined (__PRIVILEGE_CHECK_SUPPORT__) + +#define convert_tizen_element(...) ml_tizen_convert_element(__VA_ARGS__) + +#if (TIZENVERSION >= 5) && (TIZENVERSION < 9999) +#define get_tizen_resource(...) ml_tizen_get_resource(__VA_ARGS__) +#define release_tizen_resource(...) ml_tizen_release_resource(__VA_ARGS__) + +#elif (TIZENVERSION < 5) +#define get_tizen_resource(...) (0) +#define release_tizen_resource(...) do { } while (0) +typedef void * mm_resource_manager_h; +typedef enum { MM_RESOURCE_MANAGER_RES_TYPE_MAX } mm_resource_manager_res_type_e; + +#else /* TIZENVERSION */ +#error Tizen version is not defined. +#endif /* TIZENVERSION */ + +#else /* __PRIVILEGE_CHECK_SUPPORT__ */ + +#define convert_tizen_element(...) ML_ERROR_NONE +#define get_tizen_resource(...) ML_ERROR_NONE +#define release_tizen_resource(...) + +#endif /* __PRIVILEGE_CHECK_SUPPORT__ */ + +#else /* __TIZEN */ + +#define convert_tizen_element(...) ML_ERROR_NONE +#define get_tizen_resource(...) ML_ERROR_NONE +#define release_tizen_resource(...) + +#endif /* __TIZEN__ */ +/** + * @brief Internal private representation of custom filter handle. + */ +typedef struct { + char *name; + unsigned int ref_count; + GMutex lock; + ml_tensors_info_h in_info; + ml_tensors_info_h out_info; + ml_custom_easy_invoke_cb cb; + void *pdata; +} ml_custom_filter_s; + +/** + * @brief Internal private representation of tensor_if custom conditon. + * @since_tizen 6.5 + */ +typedef struct { + char *name; + unsigned int ref_count; + GMutex lock; + ml_pipeline_if_custom_cb cb; + void *pdata; +} ml_if_custom_s; + +/** + * @brief Possible controls on elements of a pipeline. + */ +typedef enum { + ML_PIPELINE_ELEMENT_UNKNOWN = 0x0, + ML_PIPELINE_ELEMENT_SINK = 0x1, + ML_PIPELINE_ELEMENT_APP_SRC = 0x2, + ML_PIPELINE_ELEMENT_APP_SINK = 0x3, + ML_PIPELINE_ELEMENT_VALVE = 0x4, + ML_PIPELINE_ELEMENT_SWITCH_INPUT = 0x8, + ML_PIPELINE_ELEMENT_SWITCH_OUTPUT = 0x9, + ML_PIPELINE_ELEMENT_COMMON = 0xB, +} ml_pipeline_element_e; + +/** + * @brief Internal private representation of pipeline handle. + */ +typedef struct _ml_pipeline ml_pipeline; + +/** + * @brief An element that may be controlled individually in a pipeline. + */ +typedef struct _ml_pipeline_element { + GstElement *element; /**< The Sink/Src/Valve/Switch element */ + ml_pipeline *pipe; /**< The main pipeline */ + char *name; + ml_pipeline_element_e type; + GstPad *src; + GstPad *sink; /**< Unref this at destroy */ + ml_tensors_info_s tensors_info; + size_t size; + + GList *handles; + int maxid; /**< to allocate id for each handle */ + gulong handle_id; + + GMutex lock; /**< Lock for internal values */ + gboolean is_media_stream; + gboolean is_flexible_tensor; + + ml_handle_destroy_cb custom_destroy; + gpointer custom_data; +} ml_pipeline_element; + +/** + * @brief Internal data structure for the pipeline state callback. + */ +typedef struct { + ml_pipeline_state_cb cb; /**< Callback to notify the change of pipeline state */ + void *user_data; /**< The user data passed when calling the state change callback */ +} pipeline_state_cb_s; + +/** + * @brief Internal data structure for the resource. + */ +typedef struct { + gchar *type; /**< resource type */ + gpointer handle; /**< pointer to resource handle */ +} pipeline_resource_s; + +/** + * @brief Internal private representation of pipeline handle. + * @details This should not be exposed to applications + */ +struct _ml_pipeline { + GstElement *element; /**< The pipeline itself (GstPipeline) */ + GstBus *bus; /**< The bus of the pipeline */ + gulong signal_msg; /**< The message signal (connected to bus) */ + GMutex lock; /**< Lock for pipeline operations */ + gboolean isEOS; /**< The pipeline is EOS state */ + ml_pipeline_state_e pipe_state; /**< The state of pipeline */ + GHashTable *namednodes; /**< hash table of "element"s. */ + GHashTable *resources; /**< hash table of resources to construct the pipeline */ + pipeline_state_cb_s state_cb; /**< Callback to notify the change of pipeline state */ +}; + +/** + * @brief Internal private representation sink callback function for GstTensorSink and GstAppSink + * @details This represents a single instance of callback registration. This should not be exposed to applications. + */ +typedef struct { + ml_pipeline_sink_cb sink_cb; + ml_pipeline_src_callbacks_s src_cb; + void *pdata; +} callback_info_s; + +/** + * @brief Internal private representation of common element handle (All GstElement except AppSink and TensorSink) + * @details This represents a single instance of registration. This should not be exposed to applications. + */ +typedef struct _ml_pipeline_common_elem { + ml_pipeline *pipe; + ml_pipeline_element *element; + guint32 id; + callback_info_s *callback_info; /**< Callback function information. If element is not GstTensorSink or GstAppSink, then it should be NULL. */ +} ml_pipeline_common_elem; + + +/** + * @brief Macro to check the availability of given NNFW. + */ +#define ml_nnfw_is_available(f,h) ({bool a; (ml_check_nnfw_availability ((f), (h), &a) == ML_ERROR_NONE && a);}) + +/** + * @brief Macro to check the availability of given element. + */ +#define ml_element_is_available(e) ({bool a; (ml_check_element_availability ((e), &a) == ML_ERROR_NONE && a);}) + +/** + * @brief Allocates a tensors information handle from gst info. + */ +int ml_tensors_info_create_from_gst (ml_tensors_info_h *ml_info, GstTensorsInfo *gst_info); + +/** + * @brief Copies tensor metadata from gst tensors info. + */ +void ml_tensors_info_copy_from_gst (ml_tensors_info_s *ml_info, const GstTensorsInfo *gst_info); + +/** + * @brief Copies tensor metadata from ml tensors info. + */ +void ml_tensors_info_copy_from_ml (GstTensorsInfo *gst_info, const ml_tensors_info_s *ml_info); + +/** + * @brief Internal function to get the sub-plugin name. + */ +const char * ml_get_nnfw_subplugin_name (ml_nnfw_type_e nnfw); + +/** + * @brief Convert c-api based hw to internal representation + */ +accl_hw ml_nnfw_to_accl_hw (const ml_nnfw_hw_e hw); + +/** + * @brief Internal function to get the nnfw type. + */ +ml_nnfw_type_e ml_get_nnfw_type_by_subplugin_name (const char *name); + +/** + * @brief Initializes the GStreamer library. This is internal function. + */ +int ml_initialize_gstreamer (void); + +/** + * @brief Validates the nnfw model file. (Internal only) + * @since_tizen 5.5 + * @param[in] model List of model file paths. + * @param[in] num_models The number of model files. There are a few frameworks that require multiple model files for a single model. + * @param[in/out] nnfw The type of NNFW. + * @return @c 0 on success. Otherwise a negative error value. + * @retval #ML_ERROR_NONE Successful + * @retval #ML_ERROR_NOT_SUPPORTED Not supported. + * @retval #ML_ERROR_INVALID_PARAMETER Given parameter is invalid. + */ +int ml_validate_model_file (const char * const *model, const unsigned int num_models, ml_nnfw_type_e * nnfw); + +/** + * @brief Checks the availability of the plugin. + */ +int ml_check_plugin_availability (const char *plugin_name, const char *element_name); + +/** + * @brief Internal function to convert accelerator as tensor_filter property format. + * @note returned value must be freed by the caller + */ +char* ml_nnfw_to_str_prop (ml_nnfw_hw_e hw); + +/** + * @brief Gets the element of pipeline itself (GstElement). + * @details With the returned reference, you can use GStreamer functions to handle the element in pipeline. + * Note that caller should release the returned reference using gst_object_unref(). + * @return The reference of pipeline itself. Null if the pipeline is not constructed or closed. + */ +GstElement* ml_pipeline_get_gst_element (ml_pipeline_h pipe); + +#if defined (__TIZEN__) +/****** TIZEN PRIVILEGE CHECK BEGINS ******/ +/** + * @brief Releases the resource handle of Tizen. + */ +void ml_tizen_release_resource (gpointer handle, const gchar * res_type); + +/** + * @brief Gets the resource handle of Tizen. + */ +int ml_tizen_get_resource (ml_pipeline_h pipe, const gchar * res_type); + +/** + * @brief Converts predefined element for Tizen. + */ +int ml_tizen_convert_element (ml_pipeline_h pipe, gchar ** result, gboolean is_internal); +/****** TIZEN PRIVILEGE CHECK ENDS ******/ +#endif /* __TIZEN */ +#ifdef __cplusplus +} +#endif /* __cplusplus */ +#endif /* __ML_API_INTERNAL_NNSTREAMER_H__ */ diff --git a/c/src/nnstreamer-capi-pipeline.c b/c/src/ml-api-inference-pipeline.c similarity index 92% rename from c/src/nnstreamer-capi-pipeline.c rename to c/src/ml-api-inference-pipeline.c index 6662641..1289836 100644 --- a/c/src/nnstreamer-capi-pipeline.c +++ b/c/src/ml-api-inference-pipeline.c @@ -2,7 +2,7 @@ /** * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved. * - * @file nnstreamer-capi-pipeline.c + * @file ml-api-inference-pipeline.c * @date 11 March 2019 * @brief NNStreamer/Pipeline(main) C-API Wrapper. * This allows to construct and control NNStreamer pipelines. @@ -15,12 +15,17 @@ #include #include #include /* To push data to pipeline */ +#include +#include +#include +#include +#include -#include "nnstreamer-capi-private.h" -#include "tensor_if.h" -#include "tensor_typedef.h" -#include "tensor_filter_custom_easy.h" -#include "nnstreamer_plugin_api.h" +#include +#include + +#include "ml-api-inference-internal.h" +#include "ml-api-internal.h" #define handle_init(name, h) \ @@ -30,14 +35,14 @@ int ret = ML_ERROR_NONE; \ check_feature_state (); \ if ((h) == NULL) { \ - ml_loge ("The given handle is invalid"); \ + mlapi_loge ("The given handle is invalid"); \ return ML_ERROR_INVALID_PARAMETER; \ } \ \ p = name->pipe; \ elem = name->element; \ if (p == NULL || elem == NULL || p != elem->pipe) { \ - ml_loge ("The handle appears to be broken."); \ + mlapi_loge ("The handle appears to be broken."); \ return ML_ERROR_INVALID_PARAMETER; \ } \ \ @@ -45,7 +50,7 @@ g_mutex_lock (&elem->lock); \ \ if (NULL == g_list_find (elem->handles, name)) { \ - ml_loge ("The handle does not exists."); \ + mlapi_loge ("The handle does not exists."); \ ret = ML_ERROR_INVALID_PARAMETER; \ goto unlock_return; \ } @@ -214,7 +219,7 @@ construct_element (GstElement * e, ml_pipeline * p, const char *name, ml_pipeline_element *ret = g_new0 (ml_pipeline_element, 1); if (ret == NULL) { - ml_loge ("Failed to allocate memory for the pipeline."); + mlapi_loge ("Failed to allocate memory for the pipeline."); return NULL; } @@ -288,7 +293,8 @@ cb_sink_event (GstElement * e, GstBuffer * b, gpointer user_data) num_mems = gst_buffer_n_memory (b); if (num_mems > ML_TENSOR_SIZE_LIMIT) { - ml_loge ("Number of memory chunks in a GstBuffer exceed the limit: %u > %u", + mlapi_loge + ("Number of memory chunks in a GstBuffer exceed the limit: %u > %u", num_mems, ML_TENSOR_SIZE_LIMIT); return; } @@ -297,7 +303,7 @@ cb_sink_event (GstElement * e, GstBuffer * b, gpointer user_data) status = ml_tensors_data_create_no_alloc (NULL, (ml_tensors_data_h *) & _data); if (status != ML_ERROR_NONE) { - ml_loge ("Failed to allocate memory for tensors data in sink callback."); + mlapi_loge ("Failed to allocate memory for tensors data in sink callback."); return; } @@ -343,7 +349,7 @@ cb_sink_event (GstElement * e, GstBuffer * b, gpointer user_data) } if (_info->num_tensors != num_mems) { - ml_loge + mlapi_loge ("The sink event of [%s] cannot be handled because the number of tensors mismatches.", elem->name); @@ -357,10 +363,11 @@ cb_sink_event (GstElement * e, GstBuffer * b, gpointer user_data) /* Not configured, yet. */ if (sz == 0) - ml_loge ("The caps for sink(%s) is not configured.", elem->name); + mlapi_loge ("The caps for sink(%s) is not configured.", + elem->name); if (sz != _data->tensors[i].size) { - ml_loge + mlapi_loge ("The sink event of [%s] cannot be handled because the tensor dimension mismatches.", elem->name); @@ -384,7 +391,7 @@ cb_sink_event (GstElement * e, GstBuffer * b, gpointer user_data) /* Get the data! */ if (gst_buffer_get_size (b) != total_size || (elem->size > 0 && total_size != elem->size)) { - ml_loge + mlapi_loge ("The buffersize mismatches. All the three values must be the same: %zu, %zu, %zu", total_size, elem->size, gst_buffer_get_size (b)); goto error; @@ -486,7 +493,7 @@ cb_bus_sync_message (GstBus * bus, GstMessage * message, gpointer user_data) gst_message_parse_state_changed (message, &old_state, &new_state, NULL); pipe_h->pipe_state = (ml_pipeline_state_e) new_state; - ml_logd ("The pipeline state changed from %s to %s.", + mlapi_logd ("The pipeline state changed from %s to %s.", gst_element_state_get_name (old_state), gst_element_state_get_name (new_state)); @@ -556,7 +563,7 @@ cleanup_node (gpointer data) gst_element_set_state (e->pipe->element, GST_STATE_PLAYING); if (gst_app_src_end_of_stream (GST_APP_SRC (e->element)) != GST_FLOW_OK) { - ml_logw ("Failed to set EOS in %s", e->name); + mlapi_logw ("Failed to set EOS in %s", e->name); } g_mutex_unlock (&e->lock); while (!e->pipe->isEOS) { @@ -564,7 +571,7 @@ cleanup_node (gpointer data) /** check EOS every 1ms */ g_usleep (1000); if (eos_check_cnt >= EOS_MESSAGE_TIME_LIMIT) { - ml_loge ("Failed to get EOS message"); + mlapi_loge ("Failed to get EOS message"); break; } } @@ -628,7 +635,7 @@ convert_element (ml_pipeline_h pipe, const gchar * description, gchar ** result, status = convert_tizen_element (pipe, &converted, is_internal); if (status == ML_ERROR_NONE) { - ml_logd ("Converted pipeline: %s", converted); + mlapi_logd ("Converted pipeline: %s", converted); *result = converted; } else { g_free (converted); @@ -764,7 +771,7 @@ iterate_element (ml_pipeline * pipe_h, GstElement * pipeline, g_object_get (G_OBJECT (elem), "sync", &sync, NULL); if (sync) { - ml_logw + mlapi_logw ("It is recommended to apply 'sync=false' property to a sink element in most AI applications. Otherwise, inference results of large neural networks will be frequently dropped by the synchronization mechanism at the sink element."); } } @@ -795,7 +802,7 @@ iterate_element (ml_pipeline * pipe_h, GstElement * pipeline, break; case GST_ITERATOR_RESYNC: case GST_ITERATOR_ERROR: - ml_logw + mlapi_logw ("There is an error or a resync-event while inspecting a pipeline. However, we can still execute the pipeline."); /* fallthrough */ case GST_ITERATOR_DONE: @@ -842,7 +849,7 @@ construct_pipeline_internal (const char *pipeline_description, /* prepare pipeline handle */ pipe_h = g_new0 (ml_pipeline, 1); if (pipe_h == NULL) { - ml_loge ("Failed to allocate handle for pipeline."); + mlapi_loge ("Failed to allocate handle for pipeline."); return ML_ERROR_OUT_OF_MEMORY; } @@ -867,9 +874,10 @@ construct_pipeline_internal (const char *pipeline_description, g_free (description); if (pipeline == NULL || err) { - ml_loge ("Cannot parse and launch the given pipeline = [%s]", + mlapi_loge ("Cannot parse and launch the given pipeline = [%s]", pipeline_description); - ml_loge (" - Error Message: %s", (err) ? err->message : "unknown reason"); + mlapi_loge (" - Error Message: %s", + (err) ? err->message : "unknown reason"); g_clear_error (&err); if (pipeline) @@ -981,7 +989,7 @@ ml_pipeline_destroy (ml_pipeline_h pipe) scret = gst_element_set_state (p->element, GST_STATE_PAUSED); if (scret == GST_STATE_CHANGE_FAILURE) { g_mutex_unlock (&p->lock); - ml_loge + mlapi_loge ("Failed to wait until state changed PLAYING to PAUSED. For the detail, please check the GStreamer log messages."); return ML_ERROR_STREAMS_PIPE; } @@ -993,7 +1001,7 @@ ml_pipeline_destroy (ml_pipeline_h pipe) /** check PAUSED every 1ms */ g_usleep (1000); if (check_paused_cnt >= WAIT_PAUSED_TIME_LIMIT) { - ml_loge ("Failed to wait until state changed to PAUSED"); + mlapi_loge ("Failed to wait until state changed to PAUSED"); break; } } @@ -1003,7 +1011,7 @@ ml_pipeline_destroy (ml_pipeline_h pipe) scret = gst_element_set_state (p->element, GST_STATE_NULL); if (scret != GST_STATE_CHANGE_SUCCESS) { g_mutex_unlock (&p->lock); - ml_loge + mlapi_loge ("Failed to wait until state changed to NULL(STOP). For the detail, please check the GStreamer log messages."); return ML_ERROR_STREAMS_PIPE; } @@ -1051,7 +1059,7 @@ ml_pipeline_get_state (ml_pipeline_h pipe, ml_pipeline_state_e * state) g_mutex_unlock (&p->lock); if (scret == GST_STATE_CHANGE_FAILURE) { - ml_loge + mlapi_loge ("Failed to get the state of the pipeline. For the detail, please check the GStreamer log messages."); return ML_ERROR_STREAMS_PIPE; } @@ -1098,7 +1106,7 @@ ml_pipeline_start (ml_pipeline_h pipe) scret = gst_element_set_state (p->element, GST_STATE_PLAYING); if (scret == GST_STATE_CHANGE_FAILURE) { - ml_loge + mlapi_loge ("Failed to set the state of the pipeline to PLAYING. For the detail, please check the GStreamer log messages."); status = ML_ERROR_STREAMS_PIPE; } @@ -1127,7 +1135,7 @@ ml_pipeline_stop (ml_pipeline_h pipe) g_mutex_unlock (&p->lock); if (scret == GST_STATE_CHANGE_FAILURE) { - ml_loge + mlapi_loge ("Failed to set the state of the pipeline to PAUSED. For the detail, please check the GStreamer log messages."); return ML_ERROR_STREAMS_PIPE; } @@ -1153,16 +1161,16 @@ ml_pipeline_flush (ml_pipeline_h pipe, bool start) if (status != ML_ERROR_NONE) return status; - ml_logi ("The pipeline is stopped, clear all data from the pipeline."); + mlapi_logi ("The pipeline is stopped, clear all data from the pipeline."); /* send flush event to pipeline */ g_mutex_lock (&p->lock); if (!gst_element_send_event (p->element, gst_event_new_flush_start ())) { - ml_logw ("Error occurs while sending flush_start event."); + mlapi_logw ("Error occurs while sending flush_start event."); } if (!gst_element_send_event (p->element, gst_event_new_flush_stop (TRUE))) { - ml_logw ("Error occurs while sending flush_stop event."); + mlapi_logw ("Error occurs while sending flush_stop event."); } g_mutex_unlock (&p->lock); @@ -1190,7 +1198,7 @@ ml_pipeline_sink_register (ml_pipeline_h pipe, const char *sink_name, check_feature_state (); if (h == NULL) { - ml_loge ("The argument sink handle is not valid."); + mlapi_loge ("The argument sink handle is not valid."); return ML_ERROR_INVALID_PARAMETER; } @@ -1198,17 +1206,17 @@ ml_pipeline_sink_register (ml_pipeline_h pipe, const char *sink_name, *h = NULL; if (pipe == NULL) { - ml_loge ("The first argument, pipeline handle is not valid."); + mlapi_loge ("The first argument, pipeline handle is not valid."); return ML_ERROR_INVALID_PARAMETER; } if (sink_name == NULL) { - ml_loge ("The second argument, sink name is not valid."); + mlapi_loge ("The second argument, sink name is not valid."); return ML_ERROR_INVALID_PARAMETER; } if (cb == NULL) { - ml_loge ("The callback argument, cb, is not valid."); + mlapi_loge ("The callback argument, cb, is not valid."); return ML_ERROR_INVALID_PARAMETER; } @@ -1216,14 +1224,14 @@ ml_pipeline_sink_register (ml_pipeline_h pipe, const char *sink_name, elem = g_hash_table_lookup (p->namednodes, sink_name); if (elem == NULL) { - ml_loge ("There is no element named [%s] in the pipeline.", sink_name); + mlapi_loge ("There is no element named [%s] in the pipeline.", sink_name); ret = ML_ERROR_INVALID_PARAMETER; goto unlock_return; } if (elem->type != ML_PIPELINE_ELEMENT_SINK && elem->type != ML_PIPELINE_ELEMENT_APP_SINK) { - ml_loge ("The element [%s] in the pipeline is not a sink element.", + mlapi_loge ("The element [%s] in the pipeline is not a sink element.", sink_name); ret = ML_ERROR_INVALID_PARAMETER; goto unlock_return; @@ -1231,7 +1239,7 @@ ml_pipeline_sink_register (ml_pipeline_h pipe, const char *sink_name, if (elem->handle_id > 0) { /* no need to connect signal to sink element */ - ml_logw ("Sink callback is already registered."); + mlapi_logw ("Sink callback is already registered."); } else { /* set callback for new data */ if (elem->type == ML_PIPELINE_ELEMENT_SINK) { @@ -1251,7 +1259,7 @@ ml_pipeline_sink_register (ml_pipeline_h pipe, const char *sink_name, } if (elem->handle_id == 0) { - ml_loge ("Failed to connect a signal to the element [%s].", sink_name); + mlapi_loge ("Failed to connect a signal to the element [%s].", sink_name); ret = ML_ERROR_STREAMS_PIPE; goto unlock_return; } @@ -1259,7 +1267,7 @@ ml_pipeline_sink_register (ml_pipeline_h pipe, const char *sink_name, sink = g_new0 (ml_pipeline_common_elem, 1); if (sink == NULL) { - ml_loge ("Failed to allocate the sink handle for %s.", sink_name); + mlapi_loge ("Failed to allocate the sink handle for %s.", sink_name); ret = ML_ERROR_OUT_OF_MEMORY; goto unlock_return; } @@ -1267,7 +1275,7 @@ ml_pipeline_sink_register (ml_pipeline_h pipe, const char *sink_name, sink->callback_info = g_new0 (callback_info_s, 1); if (sink->callback_info == NULL) { g_free (sink); - ml_loge ("Failed to allocate the sink handle for %s.", sink_name); + mlapi_loge ("Failed to allocate the sink handle for %s.", sink_name); ret = ML_ERROR_OUT_OF_MEMORY; goto unlock_return; } @@ -1324,7 +1332,7 @@ ml_pipeline_src_parse_tensors_info (ml_pipeline_element * elem) elem->size = 0; if (elem->src == NULL) { - ml_loge + mlapi_loge ("Failed to get the src pad of the element[%s]. For the detail, please check the GStreamer log messages.", elem->name); ret = ML_ERROR_STREAMS_PIPE; @@ -1358,7 +1366,7 @@ ml_pipeline_src_parse_tensors_info (ml_pipeline_element * elem) } } else { if (!elem->is_media_stream && !elem->is_flexible_tensor) { - ml_logw + mlapi_logw ("Cannot find caps. The pipeline is not yet negotiated for src element [%s].", elem->name); gst_object_unref (elem->src); @@ -1387,7 +1395,7 @@ ml_pipeline_src_get_handle (ml_pipeline_h pipe, const char *src_name, check_feature_state (); if (h == NULL) { - ml_loge ("The argument source handle is not valid."); + mlapi_loge ("The argument source handle is not valid."); return ML_ERROR_INVALID_PARAMETER; } @@ -1395,12 +1403,12 @@ ml_pipeline_src_get_handle (ml_pipeline_h pipe, const char *src_name, *h = NULL; if (pipe == NULL) { - ml_loge ("The first argument, pipeline handle is not valid."); + mlapi_loge ("The first argument, pipeline handle is not valid."); return ML_ERROR_INVALID_PARAMETER; } if (src_name == NULL) { - ml_loge ("The second argument, source name is not valid."); + mlapi_loge ("The second argument, source name is not valid."); return ML_ERROR_INVALID_PARAMETER; } @@ -1409,13 +1417,13 @@ ml_pipeline_src_get_handle (ml_pipeline_h pipe, const char *src_name, elem = g_hash_table_lookup (p->namednodes, src_name); if (elem == NULL) { - ml_loge ("There is no element named [%s] in the pipeline.", src_name); + mlapi_loge ("There is no element named [%s] in the pipeline.", src_name); ret = ML_ERROR_INVALID_PARAMETER; goto unlock_return; } if (elem->type != ML_PIPELINE_ELEMENT_APP_SRC) { - ml_loge ("The element [%s] in the pipeline is not a source element.", + mlapi_loge ("The element [%s] in the pipeline is not a source element.", src_name); ret = ML_ERROR_INVALID_PARAMETER; goto unlock_return; @@ -1423,7 +1431,7 @@ ml_pipeline_src_get_handle (ml_pipeline_h pipe, const char *src_name, src = *h = g_new0 (ml_pipeline_common_elem, 1); if (src == NULL) { - ml_loge ("Failed to allocate the src handle for %s.", src_name); + mlapi_loge ("Failed to allocate the src handle for %s.", src_name); ret = ML_ERROR_OUT_OF_MEMORY; goto unlock_return; } @@ -1480,14 +1488,15 @@ ml_pipeline_src_input_data (ml_pipeline_src_h h, ml_tensors_data_h data, _data = (ml_tensors_data_s *) data; if (!_data) { - ml_loge ("The given param data is invalid."); + mlapi_loge ("The given param data is invalid."); ret = ML_ERROR_INVALID_PARAMETER; goto unlock_return; } G_LOCK_UNLESS_NOLOCK (*_data); if (_data->num_tensors < 1 || _data->num_tensors > ML_TENSOR_SIZE_LIMIT) { - ml_loge ("The tensor size is invalid. It should be 1 ~ %u; where it is %u", + mlapi_loge + ("The tensor size is invalid. It should be 1 ~ %u; where it is %u", ML_TENSOR_SIZE_LIMIT, _data->num_tensors); ret = ML_ERROR_INVALID_PARAMETER; goto dont_destroy_data; @@ -1496,14 +1505,14 @@ ml_pipeline_src_input_data (ml_pipeline_src_h h, ml_tensors_data_h data, ret = ml_pipeline_src_parse_tensors_info (elem); if (ret != ML_ERROR_NONE) { - ml_logw + mlapi_logw ("The pipeline is not ready to accept inputs. The input is ignored."); goto dont_destroy_data; } if (!elem->is_media_stream && !elem->is_flexible_tensor) { if (elem->tensors_info.num_tensors != _data->num_tensors) { - ml_loge + mlapi_loge ("The src push of [%s] cannot be handled because the number of tensors in a frame mismatches. %u != %u", elem->name, elem->tensors_info.num_tensors, _data->num_tensors); @@ -1515,7 +1524,7 @@ ml_pipeline_src_input_data (ml_pipeline_src_h h, ml_tensors_data_h data, size_t sz = ml_tensor_info_get_size (&elem->tensors_info.info[i]); if (sz != _data->tensors[i].size) { - ml_loge + mlapi_loge ("The given input tensor size (%d'th, %zu bytes) mismatches the source pad (%zu bytes)", i, _data->tensors[i].size, sz); @@ -1568,11 +1577,11 @@ ml_pipeline_src_input_data (ml_pipeline_src_h h, ml_tensors_data_h data, } if (gret == GST_FLOW_FLUSHING) { - ml_logw + mlapi_logw ("The pipeline is not in PAUSED/PLAYING. The input may be ignored."); ret = ML_ERROR_TRY_AGAIN; } else if (gret == GST_FLOW_EOS) { - ml_logw ("THe pipeline is in EOS state. The input is ignored."); + mlapi_logw ("THe pipeline is in EOS state. The input is ignored."); ret = ML_ERROR_STREAMS_PIPE; } @@ -1677,7 +1686,7 @@ ml_pipeline_src_set_event_cb (ml_pipeline_src_h src_handle, if (src->callback_info == NULL) src->callback_info = g_new0 (callback_info_s, 1); if (src->callback_info == NULL) { - ml_loge ("Failed to allocate the callback info for %s.", elem->name); + mlapi_loge ("Failed to allocate the callback info for %s.", elem->name); ret = ML_ERROR_OUT_OF_MEMORY; goto unlock_return; } @@ -1737,7 +1746,7 @@ ml_pipeline_switch_get_handle (ml_pipeline_h pipe, const char *switch_name, check_feature_state (); if (h == NULL) { - ml_loge ("The argument switch handle is not valid."); + mlapi_loge ("The argument switch handle is not valid."); return ML_ERROR_INVALID_PARAMETER; } @@ -1745,12 +1754,12 @@ ml_pipeline_switch_get_handle (ml_pipeline_h pipe, const char *switch_name, *h = NULL; if (pipe == NULL) { - ml_loge ("The first argument, pipeline handle, is not valid."); + mlapi_loge ("The first argument, pipeline handle, is not valid."); return ML_ERROR_INVALID_PARAMETER; } if (switch_name == NULL) { - ml_loge ("The second argument, switch name, is not valid."); + mlapi_loge ("The second argument, switch name, is not valid."); return ML_ERROR_INVALID_PARAMETER; } @@ -1758,7 +1767,7 @@ ml_pipeline_switch_get_handle (ml_pipeline_h pipe, const char *switch_name, elem = g_hash_table_lookup (p->namednodes, switch_name); if (elem == NULL) { - ml_loge ("There is no switch element named [%s] in the pipeline.", + mlapi_loge ("There is no switch element named [%s] in the pipeline.", switch_name); ret = ML_ERROR_INVALID_PARAMETER; goto unlock_return; @@ -1771,7 +1780,7 @@ ml_pipeline_switch_get_handle (ml_pipeline_h pipe, const char *switch_name, if (type) *type = ML_PIPELINE_SWITCH_OUTPUT_SELECTOR; } else { - ml_loge + mlapi_loge ("There is an element named [%s] in the pipeline, but it is not an input/output switch", switch_name); @@ -1781,7 +1790,7 @@ ml_pipeline_switch_get_handle (ml_pipeline_h pipe, const char *switch_name, swtc = *h = g_new0 (ml_pipeline_common_elem, 1); if (swtc == NULL) { - ml_loge ("Failed to allocate the switch handle for %s.", switch_name); + mlapi_loge ("Failed to allocate the switch handle for %s.", switch_name); ret = ML_ERROR_OUT_OF_MEMORY; goto unlock_return; } @@ -1828,7 +1837,7 @@ ml_pipeline_switch_select (ml_pipeline_switch_h h, const char *pad_name) handle_init (swtc, h); if (pad_name == NULL) { - ml_loge ("The second argument, pad name, is not valid."); + mlapi_loge ("The second argument, pad name, is not valid."); ret = ML_ERROR_INVALID_PARAMETER; goto unlock_return; } @@ -1837,7 +1846,7 @@ ml_pipeline_switch_select (ml_pipeline_switch_h h, const char *pad_name) active_name = gst_pad_get_name (active_pad); if (g_strcmp0 (pad_name, active_name) == 0) { - ml_logi ("Switch is called, but there is no effective changes: %s->%s.", + mlapi_logi ("Switch is called, but there is no effective changes: %s->%s.", active_name, pad_name); g_free (active_name); gst_object_unref (active_pad); @@ -1851,7 +1860,7 @@ ml_pipeline_switch_select (ml_pipeline_switch_h h, const char *pad_name) new_pad = gst_element_get_static_pad (elem->element, pad_name); if (new_pad == NULL) { /* Not Found! */ - ml_loge ("Cannot find the pad, [%s], from the switch, [%s].", + mlapi_loge ("Cannot find the pad, [%s], from the switch, [%s].", pad_name, elem->name); ret = ML_ERROR_INVALID_PARAMETER; goto unlock_return; @@ -1860,7 +1869,7 @@ ml_pipeline_switch_select (ml_pipeline_switch_h h, const char *pad_name) g_object_set (G_OBJECT (elem->element), "active-pad", new_pad, NULL); gst_object_unref (new_pad); - ml_logi ("Switched to [%s] successfully at switch [%s].", pad_name, + mlapi_logi ("Switched to [%s] successfully at switch [%s].", pad_name, elem->name); handle_exit (h); @@ -1882,7 +1891,7 @@ ml_pipeline_switch_get_pad_list (ml_pipeline_switch_h h, char ***list) handle_init (swtc, h); if (list == NULL) { - ml_loge ("The second argument, list, is not valid."); + mlapi_loge ("The second argument, list, is not valid."); ret = ML_ERROR_INVALID_PARAMETER; goto unlock_return; } @@ -1895,7 +1904,7 @@ ml_pipeline_switch_get_pad_list (ml_pipeline_switch_h h, char ***list) else if (elem->type == ML_PIPELINE_ELEMENT_SWITCH_OUTPUT) it = gst_element_iterate_src_pads (elem->element); else { - ml_loge + mlapi_loge ("The element, [%s], is supposed to be input/output switch, but it is not. Internal data structure is broken.", elem->name); ret = ML_ERROR_STREAMS_PIPE; @@ -1917,7 +1926,7 @@ ml_pipeline_switch_get_pad_list (ml_pipeline_switch_h h, char ***list) gst_iterator_resync (it); break; case GST_ITERATOR_ERROR: - ml_loge ("Cannot access the list of pad properly of a switch, [%s].", + mlapi_loge ("Cannot access the list of pad properly of a switch, [%s].", elem->name); ret = ML_ERROR_STREAMS_PIPE; break; @@ -1936,7 +1945,7 @@ ml_pipeline_switch_get_pad_list (ml_pipeline_switch_h h, char ***list) *list = g_malloc0 (sizeof (char *) * (counter + 1)); if (*list == NULL) { - ml_loge ("Failed to allocate memory for pad list."); + mlapi_loge ("Failed to allocate memory for pad list."); ret = ML_ERROR_OUT_OF_MEMORY; goto unlock_return; } @@ -1950,7 +1959,7 @@ ml_pipeline_switch_get_pad_list (ml_pipeline_switch_h h, char ***list) g_free (*list); *list = NULL; - ml_loge + mlapi_loge ("Internal data inconsistency. This could be a bug in nnstreamer. Switch [%s].", elem->name); ret = ML_ERROR_STREAMS_PIPE; @@ -1978,7 +1987,7 @@ ml_pipeline_valve_get_handle (ml_pipeline_h pipe, const char *valve_name, check_feature_state (); if (h == NULL) { - ml_loge ("The argument valve handle is not valid."); + mlapi_loge ("The argument valve handle is not valid."); return ML_ERROR_INVALID_PARAMETER; } @@ -1986,12 +1995,12 @@ ml_pipeline_valve_get_handle (ml_pipeline_h pipe, const char *valve_name, *h = NULL; if (pipe == NULL) { - ml_loge ("The first argument, pipeline handle, is not valid."); + mlapi_loge ("The first argument, pipeline handle, is not valid."); return ML_ERROR_INVALID_PARAMETER; } if (valve_name == NULL) { - ml_loge ("The second argument, valve name, is not valid."); + mlapi_loge ("The second argument, valve name, is not valid."); return ML_ERROR_INVALID_PARAMETER; } @@ -1999,14 +2008,14 @@ ml_pipeline_valve_get_handle (ml_pipeline_h pipe, const char *valve_name, elem = g_hash_table_lookup (p->namednodes, valve_name); if (elem == NULL) { - ml_loge ("There is no valve element named [%s] in the pipeline.", + mlapi_loge ("There is no valve element named [%s] in the pipeline.", valve_name); ret = ML_ERROR_INVALID_PARAMETER; goto unlock_return; } if (elem->type != ML_PIPELINE_ELEMENT_VALVE) { - ml_loge + mlapi_loge ("There is an element named [%s] in the pipeline, but it is not a valve", valve_name); ret = ML_ERROR_INVALID_PARAMETER; @@ -2015,7 +2024,7 @@ ml_pipeline_valve_get_handle (ml_pipeline_h pipe, const char *valve_name, valve = *h = g_new0 (ml_pipeline_common_elem, 1); if (valve == NULL) { - ml_loge ("Failed to allocate the valve handle for %s.", valve_name); + mlapi_loge ("Failed to allocate the valve handle for %s.", valve_name); ret = ML_ERROR_OUT_OF_MEMORY; goto unlock_return; } @@ -2063,7 +2072,7 @@ ml_pipeline_valve_set_open (ml_pipeline_valve_h h, bool open) if ((open != false) != (drop != FALSE)) { /* Nothing to do */ - ml_logi ("Valve is called, but there is no effective changes"); + mlapi_logi ("Valve is called, but there is no effective changes"); goto unlock_return; } @@ -2090,17 +2099,17 @@ ml_pipeline_element_get_handle (ml_pipeline_h pipe, const char *element_name, /* Check input parameter */ if (pipe == NULL) { - ml_loge ("The first argument, pipeline handle, is not valid."); + mlapi_loge ("The first argument, pipeline handle, is not valid."); return ML_ERROR_INVALID_PARAMETER; } if (element_name == NULL) { - ml_loge ("The second argument, element name, is not valid."); + mlapi_loge ("The second argument, element name, is not valid."); return ML_ERROR_INVALID_PARAMETER; } if (elem_h == NULL) { - ml_loge ("The argument element handle is not valid."); + mlapi_loge ("The argument element handle is not valid."); return ML_ERROR_INVALID_PARAMETER; } *elem_h = NULL; @@ -2115,7 +2124,7 @@ ml_pipeline_element_get_handle (ml_pipeline_h pipe, const char *element_name, gst_elem = gst_bin_get_by_name (GST_BIN (p->element), element_name); if (gst_elem == NULL) { - ml_loge ("The element named [%s] is not found in the pipeline", + mlapi_loge ("The element named [%s] is not found in the pipeline", element_name); ret = ML_ERROR_INVALID_PARAMETER; goto unlock_return; @@ -2125,7 +2134,7 @@ ml_pipeline_element_get_handle (ml_pipeline_h pipe, const char *element_name, elem = construct_element (gst_elem, pipe, element_name, ML_PIPELINE_ELEMENT_COMMON); if (elem == NULL) { - ml_loge ("Failed to allocate the internal memory"); + mlapi_loge ("Failed to allocate the internal memory"); ret = ML_ERROR_OUT_OF_MEMORY; goto unlock_return; } @@ -2134,7 +2143,7 @@ ml_pipeline_element_get_handle (ml_pipeline_h pipe, const char *element_name, /* Type checking */ if (elem->type == ML_PIPELINE_ELEMENT_UNKNOWN) { - ml_loge + mlapi_loge ("There is an element named [%s] in the pipeline, but it is unknown type.", element_name); ret = ML_ERROR_INVALID_PARAMETER; @@ -2143,7 +2152,8 @@ ml_pipeline_element_get_handle (ml_pipeline_h pipe, const char *element_name, common_elem = *elem_h = g_new0 (ml_pipeline_common_elem, 1); if (common_elem == NULL) { - ml_loge ("Failed to allocate the internal handler for %s.", element_name); + mlapi_loge ("Failed to allocate the internal handler for %s.", + element_name); ret = ML_ERROR_OUT_OF_MEMORY; goto unlock_return; } @@ -2188,7 +2198,7 @@ ml_pipeline_element_check_property (GObjectClass * class, /* Check property existence */ pspec = g_object_class_find_property (class, property_name); if (pspec == NULL) { - ml_loge ("The property name [%s] does not exist.", property_name); + mlapi_loge ("The property name [%s] does not exist.", property_name); return FALSE; } @@ -2200,7 +2210,7 @@ ml_pipeline_element_check_property (GObjectClass * class, (type == G_TYPE_INT && G_TYPE_IS_ENUM (pspec->value_type)) || (type == G_TYPE_UINT && G_TYPE_IS_ENUM (pspec->value_type)) || (type == G_TYPE_DOUBLE && pspec->value_type == G_TYPE_FLOAT))) { - ml_loge ("The type of property name [%s] is '%s'", property_name, + mlapi_loge ("The type of property name [%s] is '%s'", property_name, g_type_name (pspec->value_type)); return FALSE; } @@ -2218,7 +2228,7 @@ ml_pipeline_element_set_property (ml_pipeline_element_h elem_h, /* Check the input parameter */ if (property_name == NULL) { - ml_loge ("The second argument, property name is not valid."); + mlapi_loge ("The second argument, property name is not valid."); ret = ML_ERROR_INVALID_PARAMETER; goto unlock_return; } @@ -2258,13 +2268,13 @@ ml_pipeline_element_get_property (ml_pipeline_element_h elem_h, /* Check the input parameter */ if (property_name == NULL) { - ml_loge ("The second argument, property_name is not valid."); + mlapi_loge ("The second argument, property_name is not valid."); ret = ML_ERROR_INVALID_PARAMETER; goto unlock_return; } if (pvalue == NULL) { - ml_loge ("The third argument, value is not valid."); + mlapi_loge ("The third argument, value is not valid."); ret = ML_ERROR_INVALID_PARAMETER; goto unlock_return; } @@ -2667,7 +2677,7 @@ ml_pipeline_custom_easy_filter_unregister (ml_custom_easy_filter_h custom) g_mutex_lock (&c->lock); if (c->ref_count > 0) { - ml_loge + mlapi_loge ("Failed to unregister custom filter %s, it is used in the pipeline.", c->name); status = ML_ERROR_INVALID_PARAMETER; @@ -2675,7 +2685,7 @@ ml_pipeline_custom_easy_filter_unregister (ml_custom_easy_filter_h custom) } if (NNS_custom_easy_unregister (c->name) != 0) { - ml_loge ("Failed to unregister custom filter %s.", c->name); + mlapi_loge ("Failed to unregister custom filter %s.", c->name); status = ML_ERROR_INVALID_PARAMETER; goto done; } @@ -2851,7 +2861,7 @@ ml_pipeline_tensor_if_custom_unregister (ml_pipeline_if_h if_custom) g_mutex_lock (&c->lock); if (c->ref_count > 0) { - ml_loge + mlapi_loge ("Failed to unregister custom condition %s, it is used in the pipeline.", c->name); status = ML_ERROR_INVALID_PARAMETER; @@ -2859,7 +2869,7 @@ ml_pipeline_tensor_if_custom_unregister (ml_pipeline_if_h if_custom) } if (nnstreamer_if_custom_unregister (c->name) != 0) { - ml_loge ("Failed to unregister tensor_if custom condition %s.", c->name); + mlapi_loge ("Failed to unregister tensor_if custom condition %s.", c->name); status = ML_ERROR_STREAMS_PIPE; goto done; } diff --git a/c/src/nnstreamer-capi-single.c b/c/src/ml-api-inference-single.c similarity index 91% rename from c/src/nnstreamer-capi-single.c rename to c/src/ml-api-inference-single.c index 7c83c4b..e719363 100644 --- a/c/src/nnstreamer-capi-single.c +++ b/c/src/ml-api-inference-single.c @@ -2,7 +2,7 @@ /** * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved. * - * @file nnstreamer-capi-single.c + * @file ml-api-inference-single.c * @date 29 Aug 2019 * @brief NNStreamer/Single C-API Wrapper. * This allows to invoke individual input frame with NNStreamer. @@ -13,11 +13,16 @@ */ #include -#include +#include /* Tizen platform header */ +#include +#include #include - +#include #include +#include "ml-api-inference-internal.h" +#include "ml-api-internal.h" + #define ML_SINGLE_MAGIC 0xfeedfeed /** @@ -46,7 +51,7 @@ G_LOCK_DEFINE_STATIC (magic); G_LOCK (magic); \ single_h = (ml_single *) single; \ if (G_UNLIKELY(single_h->magic != ML_SINGLE_MAGIC)) { \ - ml_loge ("The given param, single is invalid."); \ + mlapi_loge ("The given param, single is invalid."); \ G_UNLOCK (magic); \ return ML_ERROR_INVALID_PARAMETER; \ } \ @@ -106,6 +111,56 @@ typedef struct } ml_single; /** + * @brief Checks the availability of the given execution environments with custom option. + */ +int +ml_check_nnfw_availability_full (ml_nnfw_type_e nnfw, ml_nnfw_hw_e hw, + const char *custom, bool *available) +{ + const char *fw_name = NULL; + + check_feature_state (); + + if (!available) + return ML_ERROR_INVALID_PARAMETER; + + /* init false */ + *available = false; + + if (nnfw == ML_NNFW_TYPE_ANY) + return ML_ERROR_INVALID_PARAMETER; + + fw_name = ml_get_nnfw_subplugin_name (nnfw); + + if (fw_name) { + if (nnstreamer_filter_find (fw_name) != NULL) { + accl_hw accl = ml_nnfw_to_accl_hw (hw); + + if (gst_tensor_filter_check_hw_availability (fw_name, accl, custom)) { + *available = true; + } else { + mlapi_logw ("%s is supported but not with the specified hardware.", + fw_name); + } + } else { + mlapi_logw ("%s is not supported.", fw_name); + } + } + + return ML_ERROR_NONE; +} + +/** + * @brief Checks the availability of the given execution environments. + */ +int +ml_check_nnfw_availability (ml_nnfw_type_e nnfw, ml_nnfw_hw_e hw, + bool *available) +{ + return ml_check_nnfw_availability_full (nnfw, hw, NULL, available); +} + +/** * @brief setup input and output tensor memory to pass to the tensor_filter. * @note this tensor memory wrapper will be reused for each invoke. */ @@ -187,7 +242,7 @@ exit: ML_SINGLE_HANDLE_UNLOCK (single_h); if (G_UNLIKELY (status != ML_ERROR_NONE)) - ml_loge ("Failed to destroy the data."); + mlapi_loge ("Failed to destroy the data."); return status; } @@ -232,7 +287,7 @@ __invoke (ml_single * single_h, ml_tensors_data_h in, ml_tensors_data_h out) /** invoke the thread */ if (!single_h->klass->invoke (single_h->filter, in_tensors, out_tensors, single_h->free_output)) { - ml_loge ("Failed to invoke the tensors."); + mlapi_loge ("Failed to invoke the tensors."); status = ML_ERROR_STREAMS_PIPE; } @@ -412,7 +467,7 @@ ml_single_get_gst_info (ml_single * single_h, gboolean is_input, g_free (val); if (gst_info->num_tensors != num) { - ml_logw ("The number of tensor type is mismatched in filter."); + mlapi_logw ("The number of tensor type is mismatched in filter."); } /* get names */ @@ -421,7 +476,7 @@ ml_single_get_gst_info (ml_single * single_h, gboolean is_input, g_free (val); if (gst_info->num_tensors != num) { - ml_logw ("The number of tensor name is mismatched in filter."); + mlapi_logw ("The number of tensor name is mismatched in filter."); } } @@ -573,13 +628,13 @@ ml_single_create_handle (ml_nnfw_type_e nnfw) single_h = g_new0 (ml_single, 1); if (single_h == NULL) { - ml_loge ("Failed to allocate the single handle."); + mlapi_loge ("Failed to allocate the single handle."); return NULL; } single_h->filter = g_object_new (G_TYPE_TENSOR_FILTER_SINGLE, NULL); if (single_h->filter == NULL) { - ml_loge ("Failed to create a new instance for filter."); + mlapi_loge ("Failed to create a new instance for filter."); g_free (single_h); return NULL; } @@ -601,7 +656,7 @@ ml_single_create_handle (ml_nnfw_type_e nnfw) single_h->klass = g_type_class_ref (G_TYPE_TENSOR_FILTER_SINGLE); if (single_h->klass == NULL) { - ml_loge ("Failed to get class of the filter."); + mlapi_loge ("Failed to get class of the filter."); ml_single_close (single_h); return NULL; } @@ -609,7 +664,8 @@ ml_single_create_handle (ml_nnfw_type_e nnfw) single_h->thread = g_thread_try_new (NULL, invoke_thread, (gpointer) single_h, &error); if (single_h->thread == NULL) { - ml_loge ("Failed to create the invoke thread, error: %s.", error->message); + mlapi_loge ("Failed to create the invoke thread, error: %s.", + error->message); g_clear_error (&error); ml_single_close (single_h); return NULL; @@ -626,28 +682,28 @@ _ml_single_open_custom_validate_arguments (ml_single_h * single, ml_single_preset * info) { if (!single) { - ml_loge ("The given param is invalid: 'single' is NULL."); + mlapi_loge ("The given param is invalid: 'single' is NULL."); return ML_ERROR_INVALID_PARAMETER; } if (!info) { - ml_loge ("The given param is invalid: 'info' is NULL."); + mlapi_loge ("The given param is invalid: 'info' is NULL."); return ML_ERROR_INVALID_PARAMETER; } /* Validate input tensor info. */ if (info->input_info && !ml_tensors_info_is_valid (info->input_info)) { - ml_loge ("The given param, input tensor info is invalid."); + mlapi_loge ("The given param, input tensor info is invalid."); return ML_ERROR_INVALID_PARAMETER; } /* Validate output tensor info. */ if (info->output_info && !ml_tensors_info_is_valid (info->output_info)) { - ml_loge ("The given param, output tensor info is invalid."); + mlapi_loge ("The given param, output tensor info is invalid."); return ML_ERROR_INVALID_PARAMETER; } if (!info->models) { - ml_loge ("The given param, model is invalid: info->models is NULL."); + mlapi_loge ("The given param, model is invalid: info->models is NULL."); return ML_ERROR_INVALID_PARAMETER; } @@ -706,11 +762,11 @@ ml_single_open_custom (ml_single_h * single, ml_single_preset * info) * (Supposed CPU only) Support others later. */ if (!ml_nnfw_is_available (nnfw, hw)) { - ml_loge ("The given nnfw is not available."); + mlapi_loge ("The given nnfw is not available."); return ML_ERROR_NOT_SUPPORTED; } - /** Create ml_single object */ + /** Create ml_single object */ if ((single_h = ml_single_create_handle (nnfw)) == NULL) return ML_ERROR_OUT_OF_MEMORY; @@ -735,7 +791,7 @@ ml_single_open_custom (ml_single_h * single, ml_single_preset * info) if (status != ML_ERROR_NONE) goto error; } else { - ml_loge + mlapi_loge ("To run the pipeline, input and output information should be initialized."); status = ML_ERROR_INVALID_PARAMETER; goto error; @@ -770,20 +826,20 @@ ml_single_open_custom (ml_single_h * single, ml_single_preset * info) /* 4. Start the nnfw to get inout configurations if needed */ if (!single_h->klass->start (single_h->filter)) { - ml_loge ("Failed to start NNFW to get inout configurations."); + mlapi_loge ("Failed to start NNFW to get inout configurations."); status = ML_ERROR_STREAMS_PIPE; goto error; } /* 5. Set in/out configs and metadata */ if (!ml_single_set_info_in_handle (single_h, TRUE, in_tensors_info)) { - ml_loge ("The input tensor info is invalid."); + mlapi_loge ("The input tensor info is invalid."); status = ML_ERROR_INVALID_PARAMETER; goto error; } if (!ml_single_set_info_in_handle (single_h, FALSE, out_tensors_info)) { - ml_loge ("The output tensor info is invalid."); + mlapi_loge ("The output tensor info is invalid."); status = ML_ERROR_INVALID_PARAMETER; goto error; } @@ -849,7 +905,7 @@ ml_single_close (ml_single_h single) check_feature_state (); if (!single) { - ml_loge ("The given param, single is invalid."); + mlapi_loge ("The given param, single is invalid."); return ML_ERROR_INVALID_PARAMETER; } @@ -862,7 +918,7 @@ ml_single_close (ml_single_h single) /** Wait until invoke process is finished */ while (invoking) { - ml_logw ("Wait 1 ms until invoke is finished and close the handle."); + mlapi_logw ("Wait 1 ms until invoke is finished and close the handle."); g_usleep (1000); g_mutex_lock (&single_h->mutex); invoking = single_h->invoking; @@ -916,7 +972,7 @@ _ml_single_invoke_validate_data (ml_single_h single, _data = (ml_tensors_data_s *) data; if (G_UNLIKELY (!_data)) { - ml_loge ("The data handle to invoke the model is invalid."); + mlapi_loge ("The data handle to invoke the model is invalid."); return ML_ERROR_INVALID_PARAMETER; } @@ -926,7 +982,7 @@ _ml_single_invoke_validate_data (ml_single_h single, _model = &single_h->out_tensors; if (G_UNLIKELY (_data->num_tensors != _model->num_tensors)) { - ml_loge + mlapi_loge ("The number of %s tensors is not compatible with model. Given: %u, Expected: %u.", (is_input) ? "input" : "output", _data->num_tensors, _model->num_tensors); @@ -935,13 +991,13 @@ _ml_single_invoke_validate_data (ml_single_h single, for (i = 0; i < _data->num_tensors; i++) { if (G_UNLIKELY (!_data->tensors[i].tensor)) { - ml_loge ("The %d-th input tensor is not valid.", i); + mlapi_loge ("The %d-th input tensor is not valid.", i); return ML_ERROR_INVALID_PARAMETER; } raw_size = _model->tensors[i].size; if (G_UNLIKELY (_data->tensors[i].size != raw_size)) { - ml_loge + mlapi_loge ("The size of %d-th %s tensor is not compatible with model. Given: %zu, Expected: %zu (type: %d).", i, (is_input) ? "input" : "output", _data->tensors[i].size, raw_size, single_h->in_info.info[i].type); @@ -977,19 +1033,19 @@ _ml_single_invoke_internal (ml_single_h single, check_feature_state (); if (G_UNLIKELY (!single)) { - ml_loge + mlapi_loge ("The first argument of ml_single_invoke() is not valid. Please check the single handle."); return ML_ERROR_INVALID_PARAMETER; } if (G_UNLIKELY (!input)) { - ml_loge + mlapi_loge ("The second argument of ml_single_invoke() is not valid. Please check the input data handle."); return ML_ERROR_INVALID_PARAMETER; } if (G_UNLIKELY (!output)) { - ml_loge + mlapi_loge ("The third argument of ml_single_invoke() is not valid. Please check the output data handle."); return ML_ERROR_INVALID_PARAMETER; } @@ -997,7 +1053,7 @@ _ml_single_invoke_internal (ml_single_h single, ML_SINGLE_GET_VALID_HANDLE_LOCKED (single_h, single, 0); if (G_UNLIKELY (!single_h->filter)) { - ml_loge + mlapi_loge ("The tensor_filter element is not valid. It is not correctly created or already freed."); status = ML_ERROR_INVALID_PARAMETER; goto exit; @@ -1016,11 +1072,11 @@ _ml_single_invoke_internal (ml_single_h single, if (single_h->state != IDLE) { if (G_UNLIKELY (single_h->state == JOIN_REQUESTED)) { - ml_loge ("The handle is closed or being closed."); + mlapi_loge ("The handle is closed or being closed."); status = ML_ERROR_STREAMS_PIPE; goto exit; } - ml_loge ("The single invoking thread is not idle."); + mlapi_loge ("The single invoking thread is not idle."); status = ML_ERROR_TRY_AGAIN; goto exit; } @@ -1052,7 +1108,7 @@ _ml_single_invoke_internal (ml_single_h single, if (g_cond_wait_until (&single_h->cond, &single_h->mutex, end_time)) { status = single_h->status; } else { - ml_logw ("Wait for invoke has timed out"); + mlapi_logw ("Wait for invoke has timed out"); status = ML_ERROR_TIMED_OUT; /** This is set to notify invoke_thread to not process if timed out */ if (need_alloc) @@ -1083,7 +1139,7 @@ _ml_single_invoke_internal (ml_single_h single, exit: if (G_UNLIKELY (status != ML_ERROR_NONE)) { - ml_loge ("Failed to invoke the model."); + mlapi_loge ("Failed to invoke the model."); } else { if (need_alloc) *output = single_h->output; @@ -1301,7 +1357,7 @@ ml_single_set_property (ml_single_h single, const char *name, const char *value) g_object_set (G_OBJECT (single_h->filter), name, (gboolean) FALSE, NULL); } else { - ml_loge ("The property value (%s) is not available.", value); + mlapi_loge ("The property value (%s) is not available.", value); status = ML_ERROR_INVALID_PARAMETER; } } else if (g_str_equal (name, "input") || g_str_equal (name, "inputtype") @@ -1330,7 +1386,7 @@ ml_single_set_property (ml_single_h single, const char *name, const char *value) ml_tensors_info_destroy (ml_info); } else { - ml_loge ("The property value (%s) is not available.", value); + mlapi_loge ("The property value (%s) is not available.", value); status = ML_ERROR_INVALID_PARAMETER; } @@ -1378,7 +1434,7 @@ ml_single_get_property (ml_single_h single, const char *name, char **value) g_object_get (G_OBJECT (single_h->filter), name, &bool_value, NULL); *value = (bool_value) ? g_strdup ("true") : g_strdup ("false"); } else { - ml_loge ("The property %s is not available.", name); + mlapi_loge ("The property %s is not available.", name); status = ML_ERROR_NOT_SUPPORTED; } diff --git a/c/src/nnstreamer-capi-tizen-privilege-check.c b/c/src/ml-api-inference-tizen-privilege-check.c similarity index 93% rename from c/src/nnstreamer-capi-tizen-privilege-check.c rename to c/src/ml-api-inference-tizen-privilege-check.c index 4f336e7..222b187 100644 --- a/c/src/nnstreamer-capi-tizen-privilege-check.c +++ b/c/src/ml-api-inference-tizen-privilege-check.c @@ -2,9 +2,9 @@ /** * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved. * - * @file nnstreamer-capi-tizen-privilege-check.c + * @file ml-api-inference-tizen-privilege-check.c * @date 22 July 2020 - * @brief NNStreamer/C-API Tizen dependent functions. + * @brief NNStreamer/C-API Tizen dependent functions for inference APIs. * @see https://github.com/nnstreamer/nnstreamer * @author MyungJoo Ham * @bug No known bugs except for NYI items @@ -19,14 +19,14 @@ #include #include /* device policy manager */ #include -#include "nnstreamer-capi-private.h" +#include +#include "ml-api-internal.h" +#include "ml-api-inference-internal.h" #if TIZEN5PLUS #include #endif #include -#include "nnstreamer.h" -#include "nnstreamer_plugin_api.h" #if TIZENMMCONF /* We can use "MMCAM_VIDEOSRC_ELEMENT_NAME and MMCAM_AUDIOSRC_ELEMENT_NAME */ @@ -150,7 +150,7 @@ ml_tizen_check_privilege (const gchar * privilege) priv_result == PRIVACY_PRIVILEGE_MANAGER_CHECK_RESULT_ALLOW) { /* privilege allowed */ } else { - ml_loge ("Failed to check the privilege %s.", privilege); + mlapi_loge ("Failed to check the privilege %s.", privilege); status = ML_ERROR_PERMISSION_DENIED; } @@ -179,7 +179,7 @@ ml_tizen_check_dpm_restriction (device_policy_manager_h dpm_handle, int type) } if (err != DPM_ERROR_NONE || dpm_is_allowed != 1) { - ml_loge ("Failed, device policy is not allowed."); + mlapi_loge ("Failed, device policy is not allowed."); return ML_ERROR_PERMISSION_DENIED; } @@ -237,7 +237,7 @@ ml_tizen_mm_res_get_key_string (mm_resource_manager_res_type_e type) res_key = g_strdup ("tizen_mm_res_radio"); break; default: - ml_logw ("The resource type %d is invalid.", type); + mlapi_logw ("The resource type %d is invalid.", type); break; } @@ -456,7 +456,7 @@ ml_tizen_mm_res_initialize (ml_pipeline_h pipe, gboolean has_video_src, if (!res) { res = g_new0 (pipeline_resource_s, 1); if (!res) { - ml_loge ("Failed to allocate pipeline resource handle."); + mlapi_loge ("Failed to allocate pipeline resource handle."); status = ML_ERROR_OUT_OF_MEMORY; goto rm_error; } @@ -469,7 +469,7 @@ ml_tizen_mm_res_initialize (ml_pipeline_h pipe, gboolean has_video_src, if (!mm_handle) { mm_handle = g_new0 (tizen_mm_handle_s, 1); if (!mm_handle) { - ml_loge ("Failed to allocate media resource handle."); + mlapi_loge ("Failed to allocate media resource handle."); status = ML_ERROR_OUT_OF_MEMORY; goto rm_error; } @@ -482,7 +482,7 @@ ml_tizen_mm_res_initialize (ml_pipeline_h pipe, gboolean has_video_src, if (dpm_add_policy_changed_cb (mm_handle->dpm_h, "camera", ml_tizen_dpm_policy_changed_cb, pipe, &mm_handle->dpm_cb_id) != DPM_ERROR_NONE) { - ml_loge ("Failed to add device policy callback."); + mlapi_loge ("Failed to add device policy callback."); status = ML_ERROR_PERMISSION_DENIED; goto rm_error; } @@ -604,7 +604,7 @@ ml_tizen_mm_res_acquire (ml_pipeline_h pipe, if (!mm_res) { mm_res = g_new0 (pipeline_resource_s, 1); if (mm_res == NULL) { - ml_loge ("Failed to allocate media resource data."); + mlapi_loge ("Failed to allocate media resource data."); g_free (res_key); status = ML_ERROR_OUT_OF_MEMORY; goto rm_error; @@ -650,14 +650,15 @@ ml_tizen_mm_replace_element (MMHandleType * handle, camera_conf * conf, _mmcamcorder_conf_get_value_element_name (element, &src_name); if (!src_name) { - ml_loge ("Failed to get the name of %s.", name); + mlapi_loge ("Failed to get the name of %s.", name); return ML_ERROR_STREAMS_PIPE; } - *description = replace_string (*description, what, src_name, " !", &changed); + *description = + ml_replace_string (*description, what, src_name, " !", &changed); if (changed > 1) { /* allow one src in the pipeline */ - ml_loge ("Cannot parse duplicated src node."); + mlapi_loge ("Cannot parse duplicated src node."); return ML_ERROR_STREAMS_PIPE; } @@ -713,7 +714,7 @@ ml_tizen_mm_convert_element (ml_pipeline_h pipe, gchar ** result, } if ((err = mm_camcorder_create (&hcam, &cam_info)) != MM_ERROR_NONE) { - ml_loge ("Fail to call mm_camcorder_create = %x\n", err); + mlapi_loge ("Fail to call mm_camcorder_create = %x\n", err); goto mm_error; } #if TIZENMMCONF /* 6.5 or higher */ @@ -725,15 +726,15 @@ ml_tizen_mm_convert_element (ml_pipeline_h pipe, gchar ** result, MMCAM_VIDEOSRC_ELEMENT_NAME, &src_name, &size, NULL); if (err != MM_ERROR_NONE || !src_name || size < 1) { - ml_loge ("Failed to get attributes of MMCAM_VIDEOSRC_ELEMENT_NAME."); + mlapi_loge ("Failed to get attributes of MMCAM_VIDEOSRC_ELEMENT_NAME."); status = ML_ERROR_NOT_SUPPORTED; goto mm_error; } - *result = replace_string (*result, ML_TIZEN_CAM_VIDEO_SRC, src_name, + *result = ml_replace_string (*result, ML_TIZEN_CAM_VIDEO_SRC, src_name, " !", &changed); if (changed > 1) { /* Allow one src only in a pipeline */ - ml_loge ("Cannot parse duplicated Tizen video src nodes."); + mlapi_loge ("Cannot parse duplicated Tizen video src nodes."); status = ML_ERROR_INVALID_PARAMETER; goto mm_error; } @@ -746,15 +747,15 @@ ml_tizen_mm_convert_element (ml_pipeline_h pipe, gchar ** result, MMCAM_AUDIOSRC_ELEMENT_NAME, &src_name, &size, NULL); if (err != MM_ERROR_NONE || !src_name || size < 1) { - ml_loge ("Failed to get attributes of MMCAM_AUDIOSRC_ELEMENT_NAME."); + mlapi_loge ("Failed to get attributes of MMCAM_AUDIOSRC_ELEMENT_NAME."); status = ML_ERROR_NOT_SUPPORTED; goto mm_error; } - *result = replace_string (*result, ML_TIZEN_CAM_AUDIO_SRC, src_name, + *result = ml_replace_string (*result, ML_TIZEN_CAM_AUDIO_SRC, src_name, " !", &changed); if (changed > 1) { /* Allow one src only in a pipeline */ - ml_loge ("Cannot parse duplicated Tizen audio src nodes."); + mlapi_loge ("Cannot parse duplicated Tizen audio src nodes."); status = ML_ERROR_INVALID_PARAMETER; goto mm_error; } @@ -765,7 +766,7 @@ ml_tizen_mm_convert_element (ml_pipeline_h pipe, gchar ** result, err = _mmcamcorder_conf_get_info (hcam, 0, MMFW_CONFIG_MAIN_FILE, &cam_conf); if (err != MM_ERROR_NONE || !cam_conf) { - ml_loge ("Failed to load conf %s.", MMFW_CONFIG_MAIN_FILE); + mlapi_loge ("Failed to load conf %s.", MMFW_CONFIG_MAIN_FILE); status = ML_ERROR_NOT_SUPPORTED; goto mm_error; } diff --git a/c/src/ml-api-internal.h b/c/src/ml-api-internal.h new file mode 100644 index 0000000..7b88c84 --- /dev/null +++ b/c/src/ml-api-internal.h @@ -0,0 +1,299 @@ +/* SPDX-License-Identifier: Apache-2.0 */ +/** + * Copyright (c) 2021 Samsung Electronics Co., Ltd. All Rights Reserved. + * + * @file ml-api-internal.h + * @date 20 October 2021 + * @brief ML C-API internal herader without NNStreamer deps. + * This file should NOT be exported to SDK or devel package. + * @see https://github.com/nnstreamer/nnstreamer + * @author MyungJoo Ham + * @bug No known bugs except for NYI items + */ +#ifndef __ML_API_INTERNAL_H__ +#define __ML_API_INTERNAL_H__ + +#include +#include + +#ifdef __cplusplus +extern "C" { +#endif /* __cplusplus */ + +/** + * DO NOT USE THE LOG INFRA of NNSTREAMER. + * This header is supposed to be independent from nnstreamer.git + */ +#define MLAPI_TAG_NAME "ml-api" + +#if defined(__TIZEN__) +#include +#define mlapi_loge(...) \ + dlog_print (DLOG_ERROR, MLAPI_TAG_NAME, __VA_ARGS__) +#define mlapi_logi(...) \ + dlog_print (DLOG_INFO, MLAPI_TAG_NAME, __VA_ARGS__) +#define mlapi_logw(...) \ + dlog_print (DLOG_WARN, MLAPI_TAG_NAME, __VA_ARGS__) +#define mlapi_logd(...) \ + dlog_print (DLOG_DEBUG, MLAPI_TAG_NAME, __VA_ARGS__) +#elif defined(__ANDROID__) +#include +#define mlapi_loge(...) \ + __android_log_print (ANDROID_LOG_ERROR, MLAPI_TAG_NAME, __VA_ARGS__) +#define mlapi_logi(...) \ + __android_log_print (ANDROID_LOG_INFO, MLAPI_TAG_NAME, __VA_ARGS__) +#define mlapi_logw(...) \ + __android_log_print (ANDROID_LOG_WARN, MLAPI_TAG_NAME, __VA_ARGS__) +#define mlapi_logd(...) \ + __android_log_print (ANDROID_LOG_DEBUG, MLAPI_TAG_NAME, __VA_ARGS__) +#else /* Linux distro */ +#include +#define mlapi_loge g_critical +#define mlapi_logi g_info +#define mlapi_logw g_warning +#define mlapi_logd g_debug +#endif + +#if defined (__TIZEN__) +typedef enum +{ + NOT_CHECKED_YET = -1, + NOT_SUPPORTED = 0, + SUPPORTED = 1 +} feature_state_t; + +#if defined (__FEATURE_CHECK_SUPPORT__) +#define check_feature_state() \ + do { \ + int feature_ret = ml_tizen_get_feature_enabled (); \ + if (ML_ERROR_NONE != feature_ret) \ + return feature_ret; \ + } while (0); + +#define set_feature_state(...) ml_tizen_set_feature_state(__VA_ARGS__) +#else /* __FEATURE_CHECK_SUPPORT__ */ +#define check_feature_state() +#define set_feature_state(...) +#endif /* __FEATURE_CHECK_SUPPORT__ */ + +#if (TIZENVERSION >= 5) && (TIZENVERSION < 9999) +#define TIZEN5PLUS 1 +#if ((TIZENVERSION > 6) || (TIZENVERSION == 6 && TIZENVERSIONMINOR >= 5)) +#define TIZENMMCONF 1 +#endif +#endif + +#else /* __TIZEN__ */ +#define check_feature_state() +#define set_feature_state(...) +#endif /* __TIZEN__ */ + +#ifndef TIZEN5PLUS +#define TIZEN5PLUS 0 +#endif /* TIZEN5PLUS */ +#ifndef TIZENMMCONF +#define TIZENMMCONF 0 +#endif /* TIZENMMCONF */ + +#define EOS_MESSAGE_TIME_LIMIT 100 +#define WAIT_PAUSED_TIME_LIMIT 100 + +/** + * @brief Data structure for tensor information. + * @since_tizen 5.5 + */ +typedef struct { + char *name; /**< Name of each element in the tensor. */ + ml_tensor_type_e type; /**< Type of each element in the tensor. */ + ml_tensor_dimension dimension; /**< Dimension information. */ +} ml_tensor_info_s; + +/** + * @brief Data structure for tensors information, which contains multiple tensors. + * @since_tizen 5.5 + */ +typedef struct { + unsigned int num_tensors; /**< The number of tensors. */ + ml_tensor_info_s info[ML_TENSOR_SIZE_LIMIT]; /**< The list of tensor info. */ + GMutex lock; /**< Lock for thread safety */ + int nolock; /**< Set non-zero to avoid using m (giving up thread safety) */ +} ml_tensors_info_s; + +/** + * @brief Macro to control private lock with nolock condition (lock) + * @param sname The name of struct (ml_tensors_info_s or ml_tensors_data_s) + */ +#define G_LOCK_UNLESS_NOLOCK(sname) \ + do { \ + GMutex *l = (GMutex *) &(sname).lock; \ + if (!(sname).nolock) \ + g_mutex_lock (l); \ + } while (0) + +/** + * @brief Macro to control private lock with nolock condition (unlock) + * @param sname The name of struct (ml_tensors_info_s or ml_tensors_data_s) + */ +#define G_UNLOCK_UNLESS_NOLOCK(sname) \ + do { \ + GMutex *l = (GMutex *) &(sname).lock; \ + if (!(sname).nolock) \ + g_mutex_unlock (l); \ + } while (0) + +/** + * @brief Macro to verify private lock acquired with nolock condition (lock) + * @param sname The name of struct (ml_tensors_info_s or ml_tensors_data_s) + */ +#define G_VERIFYLOCK_UNLESS_NOLOCK(sname) \ + do { \ + GMutex *l = (GMutex *) &(sname).lock; \ + if (!(sname).nolock) { \ + if (g_mutex_trylock(l)) { \ + g_mutex_unlock(l); \ + return ML_ERROR_INVALID_PARAMETER; \ + } \ + } \ + } while (0) + +/** + * @brief Macro to check the tensors info is valid. + */ +#define ml_tensors_info_is_valid(i) ({bool v; (ml_tensors_info_validate ((i), &v) == ML_ERROR_NONE && v);}) + +/** + * @brief Macro to compare the tensors info. + */ +#define ml_tensors_info_is_equal(i1,i2) ({bool e; (ml_tensors_info_compare ((i1), (i2), &e) == ML_ERROR_NONE && e);}) + +/** + * @brief The function to be called when destroying the allocated handle. + * @since_tizen 6.5 + * @param[in] handle The handle created for ML API. + * @param[in,out] user_data The user data to pass to the callback function. + * @return @c 0 on success. Otherwise a negative error value. + */ +typedef int (*ml_handle_destroy_cb) (void *handle, void *user_data); + +/** + * @brief An instance of a single input or output frame. + * @since_tizen 5.5 + */ +typedef struct { + void *tensor; /**< The instance of tensor data. */ + size_t size; /**< The size of tensor. */ +} ml_tensor_data_s; + +/** + * @brief An instance of input or output frames. #ml_tensors_info_h is the handle for tensors metadata. + * @since_tizen 5.5 + */ +typedef struct { + unsigned int num_tensors; /**< The number of tensors. */ + ml_tensor_data_s tensors[ML_TENSOR_SIZE_LIMIT]; /**< The list of tensor data. NULL for unused tensors. */ + + /* private */ + ml_tensors_info_h info; + void *user_data; /**< The user data to pass to the callback function */ + ml_handle_destroy_cb destroy; /**< The function to be called to release the allocated buffer */ + GMutex lock; /**< Lock for thread safety */ + int nolock; /**< Set non-zero to avoid using m (giving up thread safety) */ +} ml_tensors_data_s; + +/** + * @brief Gets the byte size of the given tensor info. + * @note This is not thread safe. + */ +size_t ml_tensor_info_get_size (const ml_tensor_info_s *info); + +/** + * @brief Initializes the tensors information with default value. + * @since_tizen 5.5 + * @param[in] info The tensors info pointer to be initialized. + * @return @c 0 on success. Otherwise a negative error value. + * @retval #ML_ERROR_NONE Successful + * @retval #ML_ERROR_INVALID_PARAMETER Given parameter is invalid. + */ +int ml_tensors_info_initialize (ml_tensors_info_s *info); + +/** + * @brief Frees and initialize the data in tensors info. + * @since_tizen 5.5 + * @param[in] info The tensors info pointer to be freed. + */ +void ml_tensors_info_free (ml_tensors_info_s *info); + +/** + * @brief Creates a tensor data frame without allocating new buffer cloning the given tensors data. + * @details If @a data_src is null, this returns error. + * @param[in] data_src The handle of tensors data to be cloned. + * @param[out] data The handle of tensors data. + * @return @c 0 on success. Otherwise a negative error value. + * @retval #ML_ERROR_NONE Successful + * @retval #ML_ERROR_INVALID_PARAMETER Given parameter is invalid. + * @retval #ML_ERROR_OUT_OF_MEMORY Failed to allocate required memory. + */ +int ml_tensors_data_clone_no_alloc (const ml_tensors_data_s * data_src, ml_tensors_data_h * data); + +/** + * @brief Replaces string. + * This function deallocates the input source string. + * This is copied from nnstreamer/tensor_common.c by the nnstreamer maintainer. + * @param[in] source The input string. This will be freed when returning the replaced string. + * @param[in] what The string to search for. + * @param[in] to The string to be replaced. + * @param[in] delimiters The characters which specify the place to split the string. Set NULL to replace all matched string. + * @param[out] count The count of replaced. Set NULL if it is unnecessary. + * @return Newly allocated string. The returned string should be freed with g_free(). + */ +gchar * ml_replace_string (gchar * source, const gchar * what, const gchar * to, const gchar * delimiters, guint * count); + +/** + * @brief Compares the given tensors information. + * @details If the function returns an error, @a equal is not changed. + * @since_tizen 6.0 + * @param[in] info1 The handle of tensors information to be compared. + * @param[in] info2 The handle of tensors information to be compared. + * @param[out] equal @c true if given tensors information is equal, @c false if it's not equal. + * @return @c 0 on success. Otherwise a negative error value. + * @retval #ML_ERROR_NONE Successful + * @retval #ML_ERROR_NOT_SUPPORTED Not supported. + * @retval #ML_ERROR_INVALID_PARAMETER Given parameter is invalid. + */ +int ml_tensors_info_compare (const ml_tensors_info_h info1, const ml_tensors_info_h info2, bool *equal); + +/** + * @brief Frees the tensors data handle and its data. + * @param[in] data The handle of tensors data. + * @param[in] free_data The flag to free the buffers in handle. + * @return @c 0 on success. Otherwise a negative error value. + */ +int ml_tensors_data_destroy_internal (ml_tensors_data_h data, gboolean free_data); + +/** + * @brief Creates a tensor data frame without buffer with the given tensors information. + * @details If @a info is null, this allocates data handle with empty tensor data. + * @param[in] info The handle of tensors information for the allocation. + * @param[out] data The handle of tensors data. + * @return @c 0 on success. Otherwise a negative error value. + */ +int ml_tensors_data_create_no_alloc (const ml_tensors_info_h info, ml_tensors_data_h *data); + +#if defined (__TIZEN__) +/****** TIZEN CHECK FEATURE BEGINS *****/ +/** + * @brief Checks whether machine_learning.inference feature is enabled or not. + */ +int ml_tizen_get_feature_enabled (void); + +/** + * @brief Set the feature status of machine_learning.inference. + * This is only used for Unit test. + */ +int ml_tizen_set_feature_state (int state); +/****** TIZEN CHECK FEATURE ENDS *****/ +#endif /* __TIZEN__ */ +#ifdef __cplusplus +} +#endif /* __cplusplus */ +#endif /* __ML_API_INTERNAL_H__ */ diff --git a/java/android/nnstreamer/src/main/jni/Android-nnstreamer.mk b/java/android/nnstreamer/src/main/jni/Android-nnstreamer.mk index 5b5de0d..f0c4248 100644 --- a/java/android/nnstreamer/src/main/jni/Android-nnstreamer.mk +++ b/java/android/nnstreamer/src/main/jni/Android-nnstreamer.mk @@ -15,18 +15,20 @@ endif NNSTREAMER_CAPI_INCLUDES := \ $(NNSTREAMER_ROOT)/gst/nnstreamer/tensor_filter \ $(ML_API_ROOT)/c/include/platform \ - $(ML_API_ROOT)/c/include + $(ML_API_ROOT)/c/include \ + $(ML_API_ROOT)/c/src # nnstreamer and single-shot api NNSTREAMER_SRC_FILES := \ $(NNSTREAMER_COMMON_SRCS) \ - $(ML_API_ROOT)/c/src/nnstreamer-capi-util.c \ - $(ML_API_ROOT)/c/src/nnstreamer-capi-single.c + $(ML_API_ROOT)/c/src/ml-api-common.c \ + $(ML_API_ROOT)/c/src/ml-api-inference-internal.c \ + $(ML_API_ROOT)/c/src/ml-api-inference-single.c # pipeline api and nnstreamer plugins ifneq ($(NNSTREAMER_API_OPTION),single) NNSTREAMER_SRC_FILES += \ - $(ML_API_ROOT)/c/src/nnstreamer-capi-pipeline.c \ + $(ML_API_ROOT)/c/src/ml-api-inference-pipeline.c \ $(NNSTREAMER_PLUGINS_SRCS) \ $(NNSTREAMER_SOURCE_AMC_SRCS) \ $(NNSTREAMER_DECODER_BB_SRCS) \ diff --git a/java/android/nnstreamer/src/main/jni/nnstreamer-native.h b/java/android/nnstreamer/src/main/jni/nnstreamer-native.h index 90995e3..4808510 100644 --- a/java/android/nnstreamer/src/main/jni/nnstreamer-native.h +++ b/java/android/nnstreamer/src/main/jni/nnstreamer-native.h @@ -21,10 +21,12 @@ #include "nnstreamer.h" #include "nnstreamer-single.h" -#include "nnstreamer-capi-private.h" +#include "nnstreamer-tizen-internal.h" #include "nnstreamer_log.h" #include "nnstreamer_plugin_api.h" #include "nnstreamer_plugin_api_filter.h" +#include +#include #if GLIB_SIZEOF_VOID_P == 8 #define CAST_TO_LONG(p) (jlong)(p) diff --git a/packaging/machine-learning-api.spec b/packaging/machine-learning-api.spec index 57b4ad0..be5cdf1 100644 --- a/packaging/machine-learning-api.spec +++ b/packaging/machine-learning-api.spec @@ -154,12 +154,26 @@ Requires: capi-machine-learning-inference-devel = %{version}-%{release} %description devel-static Static library of capi-machine-learning-inference-devel package. +%package -n capi-machine-learning-common +Summary: Common utility functions for Tizen Machine Learning API +Group: Machine Learning/ML Framework +%description -n capi-machine-learning-common +Tizen ML(Machine Learning) native API's common parts. + %package -n capi-machine-learning-common-devel Summary: Common headers for Tizen Machine Learning API Group: Machine Learning/ML Framework +Requires: capi-machine-larning-common = %{version}-%{release} %description -n capi-machine-learning-common-devel Common headers for Tizen Machine Learning API. +%package -n capi-machine-learning-common-devel-static +Summary: Static library of common utility functions for Tizen Machine Learning API +Group: Machine Learning/ML Framework +Requires: capi-machine-larning-common-devel = %{version}-%{release} +%description -n capi-machine-learning-common-devel-static +Static library of common headers for Tizen Machine Learning API. + %package -n capi-machine-learning-tizen-internal-devel Summary: Tizen internal headers for Tizen Machine Learning API Group: Machine Learning/ML Framework @@ -290,10 +304,16 @@ cp -r result %{buildroot}%{_datadir}/ml-api/unittest/ %files devel-static %{_libdir}/libcapi-nnstreamer.a +%files -n capi-machine-learning-common +%{_libdir}/libcapi-ml-common.so* + %files -n capi-machine-learning-common-devel %{_includedir}/nnstreamer/ml-api-common.h %{_libdir}/pkgconfig/capi-ml-common.pc +%files -n capi-machine-learning-common-devel-static +%{_libdir}/libcapi-ml-common.a + %files -n capi-machine-learning-tizen-internal-devel %{_includedir}/nnstreamer/nnstreamer-tizen-internal.h diff --git a/tests/capi/unittest_capi_datatype_consistency.cc b/tests/capi/unittest_capi_datatype_consistency.cc index f5db6b3..7c6febb 100644 --- a/tests/capi/unittest_capi_datatype_consistency.cc +++ b/tests/capi/unittest_capi_datatype_consistency.cc @@ -13,7 +13,7 @@ /* ML API Side */ #include -#include +#include /* GStreamer Side */ #include diff --git a/tests/capi/unittest_capi_inference.cc b/tests/capi/unittest_capi_inference.cc index 909be15..d7589c2 100644 --- a/tests/capi/unittest_capi_inference.cc +++ b/tests/capi/unittest_capi_inference.cc @@ -13,8 +13,10 @@ #include #include #include -#include #include +#include +#include +#include #if defined (__APPLE__) #define SO_FILE_EXTENSION ".dylib" diff --git a/tests/capi/unittest_capi_inference_latency.cc b/tests/capi/unittest_capi_inference_latency.cc index 6b3295c..b4134f3 100644 --- a/tests/capi/unittest_capi_inference_latency.cc +++ b/tests/capi/unittest_capi_inference_latency.cc @@ -16,9 +16,10 @@ #include #include -#include #include #include +#include +#include /** * @brief nnstreamer invoke latency testing base class diff --git a/tests/capi/unittest_capi_inference_nnfw_runtime.cc b/tests/capi/unittest_capi_inference_nnfw_runtime.cc index 855321b..25bc65c 100644 --- a/tests/capi/unittest_capi_inference_nnfw_runtime.cc +++ b/tests/capi/unittest_capi_inference_nnfw_runtime.cc @@ -11,7 +11,8 @@ #include #include #include -#include +#include +#include static GMutex g_test_mutex; -- 2.7.4