[Filter/TF-Lite] Enable NNAPI with configurations
authorMyungJoo Ham <myungjoo.ham@samsung.com>
Fri, 29 Mar 2019 01:45:41 +0000 (10:45 +0900)
committerMyungJoo Ham <myungjoo.ham@samsung.com>
Fri, 29 Mar 2019 05:06:21 +0000 (14:06 +0900)
This rephrases #1013 with nnstreamer configuration support.

Add TRUE or 1 or ON of "enable_nnapi" in .ini file or
Use NNSTREAMER_tensorflowlite_enable_nnapi=1 as an environmental
variable to enable NNAPI for tensorflow-lite.

CC: @daeinki
Signed-off-by: MyungJoo Ham <myungjoo.ham@samsung.com>
ext/nnstreamer/tensor_filter/tensor_filter_tensorflow_lite_core.cc
ext/nnstreamer/tensor_filter/tensor_filter_tensorflow_lite_core.h
nnstreamer.ini.in

index cc32503..beaf2f6 100644 (file)
@@ -25,6 +25,7 @@
 #include <algorithm>
 
 #include <nnstreamer_plugin_api.h>
+#include <nnstreamer_conf.h>
 #include "tensor_filter_tensorflow_lite_core.h"
 
 /**
@@ -43,6 +44,7 @@
 TFLiteCore::TFLiteCore (const char * _model_path)
 {
   model_path = _model_path;
+  use_nnapi = nnsconf_get_custom_value_bool ("tensorflowlite", "enable_nnapi", FALSE);
 
   gst_tensors_info_init (&inputTensorMeta);
   gst_tensors_info_init (&outputTensorMeta);
@@ -127,6 +129,9 @@ TFLiteCore::loadModel ()
       return -2;
     }
 
+    /* Set inference path of tensorflow-lite */
+    interpreter->UseNNAPI (use_nnapi);
+
     /** set allocation type to dynamic for in/out tensors */
     int tensor_idx;
 
index 6b8304d..78cea50 100644 (file)
@@ -54,6 +54,7 @@ public:
 private:
 
   const char *model_path;
+  bool use_nnapi;
 
   GstTensorsInfo inputTensorMeta;  /**< The tensor info of input tensors */
   GstTensorsInfo outputTensorMeta;  /**< The tensor info of output tensors */
index 71caaa5..308f2ef 100644 (file)
@@ -9,3 +9,7 @@ decoders=@SUBPLUGIN_INSTALL_PREFIX@/decoders/
 # It may break in some special cases (running tensorflow & nnstreamer in a chroot of a AWS VM); in such a case, keep it 0 or FALSE.
 [tensorflow]
 mem_optmz=@TF_MEM_OPTMZ@
+
+# Set 1 or TRUE if you want to use NNAPI with tensorflow-lite, which enables to use NNAPI backend, which may use GPU or NPU/TPU.
+[tensorflowlite]
+enable_nnapi=FALSE