From ef106edfcb9c98b3b21c9378287c443784feb13e Mon Sep 17 00:00:00 2001 From: Sangjung Woo Date: Fri, 26 Jul 2019 14:00:54 +0900 Subject: [PATCH] [C-Api] Check feature before processing API This patch checks the machine_learning.inference feature before processing API by calling ml_get_feature_enabled() function. In case of Unit test, feature checking could be skipped by calling internal API, ml_set_feature_status(). Signed-off-by: Sangjung Woo wip Signed-off-by: Sangjung Woo --- api/capi/include/nnstreamer-capi-private.h | 17 ++++++ api/capi/meson.build | 1 + api/capi/src/nnstreamer-capi-pipeline.c | 36 ++++++++++++ api/capi/src/nnstreamer-capi-single.c | 10 ++++ api/capi/src/nnstreamer-capi-util.c | 88 ++++++++++++++++++++++++++++++ debian/rules | 2 +- packaging/nnstreamer.spec | 1 + tests/tizen_capi/unittest_tizen_capi.cpp | 3 + 8 files changed, 157 insertions(+), 1 deletion(-) diff --git a/api/capi/include/nnstreamer-capi-private.h b/api/capi/include/nnstreamer-capi-private.h index 8f55df9..7036371 100644 --- a/api/capi/include/nnstreamer-capi-private.h +++ b/api/capi/include/nnstreamer-capi-private.h @@ -33,6 +33,12 @@ #define TAG_NAME "nnstreamer-capi" +#define ML_INF_FEATURE_PATH "tizen.org/feature/machine_learning.inference" + +#define check_feature_state() \ + if (ML_ERROR_NONE != ml_get_feature_enabled()) \ + return ML_ERROR_NOT_SUPPORTED; + #if defined(__TIZEN__) #include @@ -242,6 +248,17 @@ void ml_tensors_info_copy_from_ml (GstTensorsInfo *gst_info, const ml_tensors_in */ GstCaps * ml_tensors_info_get_caps (const ml_tensors_info_s *info); +/** + * @brief Checks whether machine_learning.inference feature is enabled or not. + */ +int ml_get_feature_enabled (void); + +/** + * @brief Set the feature status of machine_learning.inference. + * This is only used for Unit test. + */ +int ml_set_feature_status (int status); + #ifdef __cplusplus } #endif /* __cplusplus */ diff --git a/api/capi/meson.build b/api/capi/meson.build index ff5a4ef..8e70c7e 100644 --- a/api/capi/meson.build +++ b/api/capi/meson.build @@ -36,6 +36,7 @@ if (get_option('enable-tizen')) message('CAPI is in Tizen mode') tizen_deps = [ dependency('capi-base-common'), + dependency('capi-system-info'), dependency('dlog') ] else diff --git a/api/capi/src/nnstreamer-capi-pipeline.c b/api/capi/src/nnstreamer-capi-pipeline.c index d7ff9d4..b4e1d8f 100644 --- a/api/capi/src/nnstreamer-capi-pipeline.c +++ b/api/capi/src/nnstreamer-capi-pipeline.c @@ -345,6 +345,8 @@ ml_pipeline_construct (const char *pipeline_description, ml_pipeline *pipe_h; + check_feature_state (); + if (pipe == NULL) return ML_ERROR_INVALID_PARAMETER; @@ -487,6 +489,8 @@ ml_pipeline_destroy (ml_pipeline_h pipe) GstStateChangeReturn scret; GstState state; + check_feature_state (); + if (p == NULL) return ML_ERROR_INVALID_PARAMETER; @@ -545,6 +549,8 @@ ml_pipeline_get_state (ml_pipeline_h pipe, ml_pipeline_state_e * state) GstState _state; GstStateChangeReturn scret; + check_feature_state (); + if (p == NULL || state == NULL) return ML_ERROR_INVALID_PARAMETER; @@ -573,6 +579,8 @@ ml_pipeline_start (ml_pipeline_h pipe) ml_pipeline *p = pipe; GstStateChangeReturn scret; + check_feature_state (); + if (p == NULL) return ML_ERROR_INVALID_PARAMETER; @@ -595,6 +603,8 @@ ml_pipeline_stop (ml_pipeline_h pipe) ml_pipeline *p = pipe; GstStateChangeReturn scret; + check_feature_state (); + if (p == NULL) return ML_ERROR_INVALID_PARAMETER; @@ -623,6 +633,8 @@ ml_pipeline_sink_register (ml_pipeline_h pipe, const char *sink_name, ml_pipeline_sink *sink; int ret = ML_ERROR_NONE; + check_feature_state (); + if (h == NULL) { ml_loge ("The argument sink handle is not valid."); return ML_ERROR_INVALID_PARAMETER; @@ -721,6 +733,8 @@ ml_pipeline_sink_unregister (ml_pipeline_sink_h h) { handle_init (sink, sink, h); + check_feature_state (); + if (elem->handle_id > 0) { g_signal_handler_disconnect (elem->element, elem->handle_id); elem->handle_id = 0; @@ -817,6 +831,8 @@ ml_pipeline_src_get_handle (ml_pipeline_h pipe, const char *src_name, ml_pipeline_src *src; int ret = ML_ERROR_NONE; + check_feature_state (); + if (h == NULL) { ml_loge ("The argument source handle is not valid."); return ML_ERROR_INVALID_PARAMETER; @@ -881,6 +897,8 @@ ml_pipeline_src_release_handle (ml_pipeline_src_h h) { handle_init (src, src, h); + check_feature_state (); + elem->handles = g_list_remove (elem->handles, src); g_free (src); @@ -903,6 +921,8 @@ ml_pipeline_src_input_data (ml_pipeline_src_h h, ml_tensors_data_h data, handle_init (src, src, h); + check_feature_state (); + _data = (ml_tensors_data_s *) data; if (!_data) { ml_loge ("The given param data is invalid."); @@ -992,6 +1012,8 @@ ml_pipeline_src_get_tensors_info (ml_pipeline_src_h h, { handle_init (src, src, h); + check_feature_state (); + if (info == NULL) { ret = ML_ERROR_INVALID_PARAMETER; goto unlock_return; @@ -1023,6 +1045,8 @@ ml_pipeline_switch_get_handle (ml_pipeline_h pipe, const char *switch_name, ml_pipeline_switch *swtc; int ret = ML_ERROR_NONE; + check_feature_state (); + if (h == NULL) { ml_loge ("The argument switch handle is not valid."); return ML_ERROR_INVALID_PARAMETER; @@ -1093,6 +1117,8 @@ ml_pipeline_switch_release_handle (ml_pipeline_switch_h h) { handle_init (switch, swtc, h); + check_feature_state (); + elem->handles = g_list_remove (elem->handles, swtc); g_free (swtc); @@ -1110,6 +1136,8 @@ ml_pipeline_switch_select (ml_pipeline_switch_h h, const char *pad_name) handle_init (switch, swtc, h); + check_feature_state (); + if (pad_name == NULL) { ml_loge ("The second argument, pad name, is not valid."); ret = ML_ERROR_INVALID_PARAMETER; @@ -1163,6 +1191,8 @@ ml_pipeline_switch_get_pad_list (ml_pipeline_switch_h h, char ***list) handle_init (switch, swtc, h); + check_feature_state (); + if (list == NULL) { ml_loge ("The second argument, list, is not valid."); ret = ML_ERROR_INVALID_PARAMETER; @@ -1250,6 +1280,8 @@ ml_pipeline_valve_get_handle (ml_pipeline_h pipe, const char *valve_name, ml_pipeline_valve *valve; int ret = ML_ERROR_NONE; + check_feature_state (); + if (h == NULL) { ml_loge ("The argument valve handle is not valid."); return ML_ERROR_INVALID_PARAMETER; @@ -1312,6 +1344,8 @@ ml_pipeline_valve_release_handle (ml_pipeline_valve_h h) { handle_init (valve, valve, h); + check_feature_state (); + elem->handles = g_list_remove (elem->handles, valve); g_free (valve); @@ -1327,6 +1361,8 @@ ml_pipeline_valve_set_open (ml_pipeline_valve_h h, bool open) gboolean drop = FALSE; handle_init (valve, valve, h); + check_feature_state (); + g_object_get (G_OBJECT (elem->element), "drop", &drop, NULL); if ((open != false) != (drop != FALSE)) { diff --git a/api/capi/src/nnstreamer-capi-single.c b/api/capi/src/nnstreamer-capi-single.c index 0553f0e..4f230a8 100644 --- a/api/capi/src/nnstreamer-capi-single.c +++ b/api/capi/src/nnstreamer-capi-single.c @@ -65,6 +65,8 @@ ml_single_open (ml_single_h * single, const char *model, bool available = false; bool valid = false; + check_feature_state (); + /* Validate the params */ if (!single) { ml_loge ("The given param, single is invalid."); @@ -327,6 +329,8 @@ ml_single_close (ml_single_h single) ml_single *single_h; int status; + check_feature_state (); + if (!single) { ml_loge ("The given param, single is invalid."); return ML_ERROR_INVALID_PARAMETER; @@ -373,6 +377,8 @@ ml_single_invoke (ml_single_h single, GstFlowReturn ret; int i, status = ML_ERROR_NONE; + check_feature_state (); + if (!single || !input || !output) { ml_loge ("The given param is invalid."); return ML_ERROR_INVALID_PARAMETER; @@ -463,6 +469,8 @@ ml_single_get_input_info (ml_single_h single, ml_tensors_info_h * info) gchar *val; guint rank; + check_feature_state (); + if (!single || !info) return ML_ERROR_INVALID_PARAMETER; @@ -514,6 +522,8 @@ ml_single_get_output_info (ml_single_h single, ml_tensors_info_h * info) gchar *val; guint rank; + check_feature_state (); + if (!single || !info) return ML_ERROR_INVALID_PARAMETER; diff --git a/api/capi/src/nnstreamer-capi-util.c b/api/capi/src/nnstreamer-capi-util.c index b9b47c5..8e7396d 100644 --- a/api/capi/src/nnstreamer-capi-util.c +++ b/api/capi/src/nnstreamer-capi-util.c @@ -27,6 +27,13 @@ #include "nnstreamer.h" #include "nnstreamer-capi-private.h" +#if defined(__TIZEN__) + #include +#endif + +/** -1: Not checked yet, 0: Not supported, 1: Supported */ +static int feature_enabled = -1; + /** * @brief Allocates a tensors information handle with default value. */ @@ -35,6 +42,8 @@ ml_tensors_info_create (ml_tensors_info_h * info) { ml_tensors_info_s *tensors_info; + check_feature_state (); + if (!info) return ML_ERROR_INVALID_PARAMETER; @@ -52,6 +61,8 @@ ml_tensors_info_destroy (ml_tensors_info_h info) { ml_tensors_info_s *tensors_info; + check_feature_state (); + tensors_info = (ml_tensors_info_s *) info; if (!tensors_info) @@ -119,6 +130,8 @@ ml_tensors_info_validate (const ml_tensors_info_h info, bool * valid) ml_tensors_info_s *tensors_info; guint i; + check_feature_state (); + if (!valid) return ML_ERROR_INVALID_PARAMETER; @@ -150,6 +163,8 @@ ml_tensors_info_set_count (ml_tensors_info_h info, unsigned int count) { ml_tensors_info_s *tensors_info; + check_feature_state (); + if (!info || count > ML_TENSOR_SIZE_LIMIT) return ML_ERROR_INVALID_PARAMETER; @@ -167,6 +182,8 @@ ml_tensors_info_get_count (ml_tensors_info_h info, unsigned int *count) { ml_tensors_info_s *tensors_info; + check_feature_state (); + if (!info || !count) return ML_ERROR_INVALID_PARAMETER; @@ -185,6 +202,8 @@ ml_tensors_info_set_tensor_name (ml_tensors_info_h info, { ml_tensors_info_s *tensors_info; + check_feature_state (); + if (!info) return ML_ERROR_INVALID_PARAMETER; @@ -213,6 +232,8 @@ ml_tensors_info_get_tensor_name (ml_tensors_info_h info, { ml_tensors_info_s *tensors_info; + check_feature_state (); + if (!info || !name) return ML_ERROR_INVALID_PARAMETER; @@ -235,6 +256,8 @@ ml_tensors_info_set_tensor_type (ml_tensors_info_h info, { ml_tensors_info_s *tensors_info; + check_feature_state (); + if (!info) return ML_ERROR_INVALID_PARAMETER; @@ -257,6 +280,8 @@ ml_tensors_info_get_tensor_type (ml_tensors_info_h info, { ml_tensors_info_s *tensors_info; + check_feature_state (); + if (!info || !type) return ML_ERROR_INVALID_PARAMETER; @@ -280,6 +305,8 @@ ml_tensors_info_set_tensor_dimension (ml_tensors_info_h info, ml_tensors_info_s *tensors_info; guint i; + check_feature_state (); + if (!info) return ML_ERROR_INVALID_PARAMETER; @@ -305,6 +332,8 @@ ml_tensors_info_get_tensor_dimension (ml_tensors_info_h info, ml_tensors_info_s *tensors_info; guint i; + check_feature_state (); + if (!info) return ML_ERROR_INVALID_PARAMETER; @@ -416,6 +445,8 @@ ml_tensors_data_destroy (ml_tensors_data_h data) ml_tensors_data_s *_data; guint i; + check_feature_state (); + if (!data) return ML_ERROR_INVALID_PARAMETER; @@ -443,6 +474,8 @@ ml_tensors_data_create (const ml_tensors_info_h info, ml_tensors_info_s *tensors_info; gint i; + check_feature_state (); + if (!info || !data) return ML_ERROR_INVALID_PARAMETER; @@ -486,6 +519,8 @@ ml_tensors_data_get_tensor_data (ml_tensors_data_h data, unsigned int index, { ml_tensors_data_s *_data; + check_feature_state (); + if (!data) return ML_ERROR_INVALID_PARAMETER; @@ -509,6 +544,8 @@ ml_tensors_data_set_tensor_data (ml_tensors_data_h data, unsigned int index, { ml_tensors_data_s *_data; + check_feature_state (); + if (!data) return ML_ERROR_INVALID_PARAMETER; @@ -533,6 +570,8 @@ ml_tensors_info_clone (ml_tensors_info_h dest, const ml_tensors_info_h src) ml_tensors_info_s *dest_info, *src_info; guint i, j; + check_feature_state (); + dest_info = (ml_tensors_info_s *) dest; src_info = (ml_tensors_info_s *) src; @@ -742,6 +781,8 @@ int ml_check_nnfw_availability (ml_nnfw_type_e nnfw, ml_nnfw_hw_e hw, bool * available) { + check_feature_state (); + if (!available) return ML_ERROR_INVALID_PARAMETER; @@ -777,3 +818,50 @@ ml_check_nnfw_availability (ml_nnfw_type_e nnfw, ml_nnfw_hw_e hw, done: return ML_ERROR_NONE; } + +/** + * @brief Checks whether machine_learning.inference feature is enabled or not. + */ +int ml_get_feature_enabled (void) +{ +#if defined(__TIZEN__) + if (0 == feature_enabled) { + ml_loge ("machine_learning.inference NOT supported"); + return ML_ERROR_NOT_SUPPORTED; + } else if (-1 == feature_enabled) { + bool ml_inf_supported = false; + if (0 == system_info_get_platform_bool(ML_INF_FEATURE_PATH, &ml_inf_supported)) { + if (false == ml_inf_supported) { + ml_loge ("machine_learning.inference NOT supported"); + ml_set_feature_status (0); + return ML_ERROR_NOT_SUPPORTED; + } + + ml_set_feature_status (1); + } else { + ml_loge ("failed to get feature value of machine_learning.inference"); + return ML_ERROR_NOT_SUPPORTED; + } + } +#endif + return ML_ERROR_NONE; +} + +/** + * @brief Set the feature status of machine_learning.inference. + */ +int ml_set_feature_status (int status) +{ + GMutex mutex; + g_mutex_init (&mutex); + g_mutex_lock (&mutex); + + /** Update feature status + * -1: Not checked yet, 0: Not supported, 1: Supported + */ + feature_enabled = status; + + g_mutex_unlock (&mutex); + g_mutex_clear (&mutex); + return ML_ERROR_NONE; +} diff --git a/debian/rules b/debian/rules index 2ea08f5..690e41d 100755 --- a/debian/rules +++ b/debian/rules @@ -35,7 +35,7 @@ override_dh_auto_clean: override_dh_auto_configure: mkdir -p build - meson --buildtype=plain --prefix=/usr --sysconfdir=/etc --libdir=lib/$(DEB_HOST_MULTIARCH) --bindir=lib/nnstreamer/bin --includedir=include -Dinstall-example=true -Denable-tensorflow=$(enable_tf) -Denable-capi=true build + meson --buildtype=plain --prefix=/usr --sysconfdir=/etc --libdir=lib/$(DEB_HOST_MULTIARCH) --bindir=lib/nnstreamer/bin --includedir=include -Dinstall-example=true -Denable-tensorflow=$(enable_tf) -Denable-capi=true -Denable-tizen=false build override_dh_auto_build: ninja -C build diff --git a/packaging/nnstreamer.spec b/packaging/nnstreamer.spec index 6c1d6a4..ff414ba 100644 --- a/packaging/nnstreamer.spec +++ b/packaging/nnstreamer.spec @@ -64,6 +64,7 @@ BuildRequires: lcov # BuildRequires: taos-ci-unittest-coverage-assessment %endif %if %{with tizen} +BuildRequires: pkgconfig(capi-system-info) BuildRequires: pkgconfig(capi-base-common) BuildRequires: pkgconfig(dlog) BuildRequires: gst-plugins-bad-devel diff --git a/tests/tizen_capi/unittest_tizen_capi.cpp b/tests/tizen_capi/unittest_tizen_capi.cpp index 34827b9..3ad9657 100644 --- a/tests/tizen_capi/unittest_tizen_capi.cpp +++ b/tests/tizen_capi/unittest_tizen_capi.cpp @@ -12,6 +12,7 @@ #include #include #include /* GStatBuf */ +#include /** * @brief Struct to check the pipeline state changes. @@ -1617,5 +1618,7 @@ main (int argc, char **argv) { testing::InitGoogleTest (&argc, argv); + ml_set_feature_status(1); + return RUN_ALL_TESTS (); } -- 2.7.4