From: Hyoung Joo Ahn Date: Wed, 7 Aug 2019 02:07:32 +0000 (+0900) Subject: [Filter/Tensorflow] remove the unused option for memory optimization X-Git-Tag: accepted/tizen/unified/20190812.235554~15 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=2efa3c843730c2d7e12a0fe19a6c0d3b60e950dc;p=platform%2Fupstream%2Fnnstreamer.git [Filter/Tensorflow] remove the unused option for memory optimization after changing tensorflow to use `c-api`, the `mem_optmz` is not used anymore Signed-off-by: Hyoung Joo Ahn --- diff --git a/ext/nnstreamer/tensor_filter/tensor_filter_tensorflow.c b/ext/nnstreamer/tensor_filter/tensor_filter_tensorflow.c index a6be9fc..fe82b7a 100644 --- a/ext/nnstreamer/tensor_filter/tensor_filter_tensorflow.c +++ b/ext/nnstreamer/tensor_filter/tensor_filter_tensorflow.c @@ -70,7 +70,6 @@ static int tf_loadModelFile (const GstTensorFilterProperties * prop, void **private_data) { tf_data *tf; - gboolean tf_mem_optmz; if (*private_data != NULL) { tf = *private_data; @@ -81,15 +80,12 @@ tf_loadModelFile (const GstTensorFilterProperties * prop, void **private_data) } } - tf_mem_optmz = nnsconf_get_custom_value_bool ("tensorflow", - "enable_mem_optimization", FALSE); - tf = g_new0 (tf_data, 1); /** initialize tf Fill Zero! */ *private_data = tf; tf->tf_private_data = tf_core_new (prop->model_file); if (tf->tf_private_data) { - if (tf_core_init (tf->tf_private_data, prop, tf_mem_optmz)) { + if (tf_core_init (tf->tf_private_data, prop)) { g_printerr ("failed to initailize the object: tensorflow"); return -2; } diff --git a/ext/nnstreamer/tensor_filter/tensor_filter_tensorflow_core.cc b/ext/nnstreamer/tensor_filter/tensor_filter_tensorflow_core.cc index ea0f023..fe1eb79 100644 --- a/ext/nnstreamer/tensor_filter/tensor_filter_tensorflow_core.cc +++ b/ext/nnstreamer/tensor_filter/tensor_filter_tensorflow_core.cc @@ -531,8 +531,7 @@ tf_core_delete (void * tf) * @return 0 if OK. non-zero if error. */ int -tf_core_init (void * tf, const GstTensorFilterProperties * prop, - const gboolean tf_mem_optmz) +tf_core_init (void * tf, const GstTensorFilterProperties * prop) { TFCore *c = (TFCore *) tf; return c->init (prop); diff --git a/ext/nnstreamer/tensor_filter/tensor_filter_tensorflow_core.h b/ext/nnstreamer/tensor_filter/tensor_filter_tensorflow_core.h index 16116d9..e613b89 100644 --- a/ext/nnstreamer/tensor_filter/tensor_filter_tensorflow_core.h +++ b/ext/nnstreamer/tensor_filter/tensor_filter_tensorflow_core.h @@ -98,8 +98,7 @@ extern "C" void *tf_core_new (const char * _model_path); void tf_core_delete (void * tf); - int tf_core_init (void * tf, const GstTensorFilterProperties * prop, - const gboolean tf_mem_optmz); + int tf_core_init (void * tf, const GstTensorFilterProperties * prop); const char *tf_core_getModelPath (void * tf); int tf_core_getInputDim (void * tf, GstTensorsInfo * info); int tf_core_getOutputDim (void * tf, GstTensorsInfo * info); diff --git a/meson.build b/meson.build index 0438906..cf15757 100644 --- a/meson.build +++ b/meson.build @@ -251,7 +251,6 @@ nnstreamer_install_conf.merge_from(nnstreamer_conf) nnstreamer_install_conf.set('ENABLE_ENV_VAR', get_option('enable-env-var')) nnstreamer_install_conf.set('ENABLE_SYMBOLIC_LINK', get_option('enable-symbolic-link')) -nnstreamer_install_conf.set('TF_MEM_OPTMZ', get_option('enable-tensorflow-mem-optmz')) nnstreamer_install_conf.set('TORCH_USE_GPU', get_option('enable-pytorch-use-gpu')) # Install .ini diff --git a/meson_options.txt b/meson_options.txt index cc9904c..90f9903 100644 --- a/meson_options.txt +++ b/meson_options.txt @@ -2,7 +2,6 @@ option('enable-test', type: 'boolean', value: true) option('install-test', type: 'boolean', value: false) option('enable-tensorflow-lite', type: 'boolean', value: true) option('enable-tensorflow', type: 'boolean', value: true) -option('enable-tensorflow-mem-optmz', type: 'boolean', value: true) option('enable-caffe2', type: 'boolean', value: true) option('enable-pytorch', type: 'boolean', value: true) option('enable-pytorch-use-gpu', type: 'boolean', value: false) # default value, can be specified at run time diff --git a/nnstreamer.ini.in b/nnstreamer.ini.in index 3856021..f613156 100644 --- a/nnstreamer.ini.in +++ b/nnstreamer.ini.in @@ -13,8 +13,6 @@ decoders=@SUBPLUGIN_INSTALL_PREFIX@/decoders/ # Set 1 or True if you want to eliminate memcpy of tensorflow input frames for faster executions. # It may break in some special cases (running tensorflow & nnstreamer in a chroot of a AWS VM); in such a case, keep it 0 or FALSE. -[tensorflow] -enable_mem_optimization=@TF_MEM_OPTMZ@ # Set 1 or True if you want to use NNAPI with tensorflow-lite, which enables to use NNAPI backend, which may use GPU or NPU/TPU. [tensorflowlite]