From: Parichay Kapoor Date: Fri, 8 Nov 2019 07:29:13 +0000 (+0900) Subject: [nnfw/single] Enable nnfw with single API X-Git-Tag: accepted/tizen/unified/20191213.115126~13 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=849b7aa9d60cac0f3adbb139c664c4efe3370f06;p=platform%2Fupstream%2Fnnstreamer.git [nnfw/single] Enable nnfw with single API - Enable nnfw with single API - nnfw and tensorflow-lite both support tflite extensions Added meson option to set default behavior which prioritizes tensorflow-lite V2: - Add check for availibilty of nnfw after assigning nnfw in case of ML_NNFW_TYPE_ANY Signed-off-by: Parichay Kapoor --- diff --git a/api/capi/src/nnstreamer-capi-single.c b/api/capi/src/nnstreamer-capi-single.c index 0066771..8489d6a 100644 --- a/api/capi/src/nnstreamer-capi-single.c +++ b/api/capi/src/nnstreamer-capi-single.c @@ -488,8 +488,12 @@ ml_single_open (ml_single_h * single, const char *model, } break; case ML_NNFW_TYPE_MVNC: - g_object_set (filter_obj, "framework", "movidius-ncsdk2", "model", model, NULL); /** @todo Verify this! (this code is not tested) */ + g_object_set (filter_obj, "framework", "movidius-ncsdk2", "model", model, NULL); + break; + case ML_NNFW_TYPE_NNFW: + /* We can get the tensor meta from tf-lite model. */ + g_object_set (filter_obj, "framework", "nnfw", "model", model, NULL); break; default: /** @todo Add other fw later. */ diff --git a/api/capi/src/nnstreamer-capi-util.c b/api/capi/src/nnstreamer-capi-util.c index 699c30a..bf1498d 100644 --- a/api/capi/src/nnstreamer-capi-util.c +++ b/api/capi/src/nnstreamer-capi-util.c @@ -908,7 +908,7 @@ ml_initialize_gstreamer (void) * @param[in/out] nnfw The type of NNFW. * @return @c 0 on success. Otherwise a negative error value. * @retval #ML_ERROR_NONE Successful - * @retval #ML_ERROR_NOT_SUPPORTED Not supported. + * @retval #ML_ERROR_NOT_SUPPORTED Not supported, or framework to support this model file is unavailable in the environment. * @retval #ML_ERROR_INVALID_PARAMETER Given parameter is invalid. */ int @@ -932,8 +932,27 @@ ml_validate_model_file (const char *model, ml_nnfw_type_e * nnfw) switch (*nnfw) { case ML_NNFW_TYPE_ANY: if (g_str_has_suffix (path_down, ".tflite")) { - ml_logi ("The given model [%s] is supposed a tensorflow-lite model.", model); - *nnfw = ML_NNFW_TYPE_TENSORFLOW_LITE; + /** + * .tflite is supported by both tensorflow and nnfw. + * Priority decided with ini file. + */ + gboolean nnfw_runtime_priority = nnsconf_get_custom_value_bool ( + "nnfw-runtime", "prioritize_tflite_ext", FALSE); + bool available_nnfw = FALSE, available_tflite = FALSE; + + ml_check_nnfw_availability (ML_NNFW_TYPE_NNFW, ML_NNFW_HW_ANY, + &available_nnfw); + ml_check_nnfw_availability (ML_NNFW_TYPE_TENSORFLOW_LITE, + ML_NNFW_HW_ANY, &available_tflite); + + if ((nnfw_runtime_priority && available_nnfw) || + (!nnfw_runtime_priority && !available_tflite)) { + ml_logi ("The given model [%s] is supposed a nnfw model.", model); + *nnfw = ML_NNFW_TYPE_NNFW; + } else { + ml_logi ("The given model [%s] is supposed a tensorflow-lite model.", model); + *nnfw = ML_NNFW_TYPE_TENSORFLOW_LITE; + } } else if (g_str_has_suffix (path_down, ".pb")) { ml_logi ("The given model [%s] is supposed a tensorflow model.", model); *nnfw = ML_NNFW_TYPE_TENSORFLOW; @@ -944,6 +963,14 @@ ml_validate_model_file (const char *model, ml_nnfw_type_e * nnfw) ml_loge ("The given model [%s] has unknown extension.", model); status = ML_ERROR_INVALID_PARAMETER; } + + if (status == ML_ERROR_NONE) { + bool available = false; + ml_check_nnfw_availability (*nnfw, ML_NNFW_HW_ANY, &available); + if (available == false) + status = ML_ERROR_NOT_SUPPORTED; + } + break; case ML_NNFW_TYPE_CUSTOM_FILTER: if (!g_str_has_suffix (path_down, NNSTREAMER_SO_FILE_EXTENSION)) { @@ -964,10 +991,27 @@ ml_validate_model_file (const char *model, ml_nnfw_type_e * nnfw) } break; case ML_NNFW_TYPE_NNFW: - /** @todo Need to check method for NNFW */ - ml_loge ("NNFW is not supported."); - status = ML_ERROR_NOT_SUPPORTED; + { + gchar *model_path = NULL; + gchar *meta = NULL; + + if (!g_str_has_suffix (path_down, ".tflite")) { + ml_loge ("The given model [%s] has invalid extension.", model); + status = ML_ERROR_INVALID_PARAMETER; + break; + } + + model_path = g_path_get_dirname (model); + meta = g_build_filename (model_path, "metadata", "MANIFEST", NULL); + if (!g_file_test (meta, G_FILE_TEST_IS_REGULAR)) { + ml_loge ("The given model path [%s] is missing metadata.", model_path); + status = ML_ERROR_INVALID_PARAMETER; + } + + g_free (model_path); + g_free (meta); break; + } case ML_NNFW_TYPE_MVNC: /** @todo Need to check method for NCSDK2 */ ml_loge ("Intel Movidius NCSDK2 is not supported."); @@ -1014,8 +1058,7 @@ ml_check_nnfw_availability (ml_nnfw_type_e nnfw, ml_nnfw_hw_e hw, } break; case ML_NNFW_TYPE_NNFW: - { - /** @todo Need to check method for NNFW */ + if (nnstreamer_filter_find ("nnfw") == NULL) { ml_logw ("NNFW is not supported."); goto done; } diff --git a/ext/nnstreamer/tensor_filter/meson.build b/ext/nnstreamer/tensor_filter/meson.build index 362567a..c6b1ea8 100644 --- a/ext/nnstreamer/tensor_filter/meson.build +++ b/ext/nnstreamer/tensor_filter/meson.build @@ -13,7 +13,6 @@ if get_option('enable-nnfw-runtime') endif nnstreamer_filter_nnfw_deps = [glib_dep, gst_dep, nnstreamer_dep, nnfw_dep] - nnfw_plugin_lib = shared_library('nnstreamer_filter_nnfw', nnstreamer_filter_nnfw_sources, dependencies: nnstreamer_filter_nnfw_deps, diff --git a/meson.build b/meson.build index 1e03eb4..9142921 100644 --- a/meson.build +++ b/meson.build @@ -270,6 +270,7 @@ if get_option('enable-test') nnstreamer_test_conf.set('ENABLE_SYMBOLIC_LINK', false) nnstreamer_test_conf.set('TORCH_USE_GPU', false) nnstreamer_test_conf.set('ELEMENT_RESTRICTION_CONFIG', '') + nnstreamer_test_conf.set('NNFW_RUNTIME_PRIORITIZE', false) configure_file(input: 'nnstreamer.ini.in', output: 'nnstreamer-test.ini', install: get_option('install-test'), @@ -285,6 +286,7 @@ nnstreamer_install_conf.merge_from(nnstreamer_conf) nnstreamer_install_conf.set('ENABLE_ENV_VAR', get_option('enable-env-var')) nnstreamer_install_conf.set('ENABLE_SYMBOLIC_LINK', get_option('enable-symbolic-link')) nnstreamer_install_conf.set('TORCH_USE_GPU', get_option('enable-pytorch-use-gpu')) +nnstreamer_install_conf.set('NNFW_RUNTIME_PRIORITIZE', get_option('nnfw-runtime-prioritize')) # Element restriction restriction_config = '' diff --git a/meson_options.txt b/meson_options.txt index 3be1043..d116315 100644 --- a/meson_options.txt +++ b/meson_options.txt @@ -19,5 +19,6 @@ option('enable-element-restriction', type: 'boolean', value: false) # true to re option('restricted-elements', type: 'string', value: '') option('enable-tflite-nnapi-delegation', type: 'boolean', value: false) # true to enable tensorflow-lite to delegate nnapi interpretation to nnfw backend in tizen option('enable-nnfw-runtime', type: 'boolean', value: false) # true to enable nnfw tensor filter element +option('nnfw-runtime-prioritize', type: 'boolean', value: false) # true to set higher priority for nnfw for tflite extension files in auto mode option('enable-cppfilter', type: 'boolean', value: true) option('enable-tizen-sensor', type: 'boolean', value: false) diff --git a/nnstreamer.ini.in b/nnstreamer.ini.in index f1bc9b8..1789909 100644 --- a/nnstreamer.ini.in +++ b/nnstreamer.ini.in @@ -19,4 +19,8 @@ enable_nnapi=False [pytorch] enable_use_gpu=@TORCH_USE_GPU@ +# Set 1 or True if you want to prioritize nnfw over tensorflow lite for .tflite extension model files when automatically selecting framework for tensor filter. +[nnfw-runtime] +prioritize_tflite_ext=@NNFW_RUNTIME_PRIORITIZE@ + @ELEMENT_RESTRICTION_CONFIG@