[TF2/Filter] Added a framework to support TF2-Lite (2.3.0)
authorGeunsik Lim <geunsik.lim@samsung.com>
Fri, 28 Aug 2020 04:45:29 +0000 (13:45 +0900)
committerMyungJoo Ham <myungjoo.ham@samsung.com>
Wed, 23 Sep 2020 23:58:19 +0000 (08:58 +0900)
This commit is to support Tensorflow Lite 2.3.0 (TF2-Lite) additionally,
so that we support Tensorflow 2.x based network models (e.g., ASR) in
the Tizen 6.X platform.

**Changelog**

  * Version 3:
    * Removed unnecessary tflite_nnapi delegation (@jaeyun-jung)

  * Version 2:
    * Added a debian packaging for TF-2.3.0 in tensorflow2 (review.tizen.org)
      * https://review.tizen.org/gerrit/#/c/243189/ (Reviewed, Merged)
      * https://launchpad.net/~nnstreamer/+archive/ubuntu/ppa-build-test (Published)
  * Added unit-test with ssat

  * Version 1:
    * Added tflite2_support feature for meson build procedure
    * Added TF2-Lite statements in the .spec file for Tizen
    * Generate two libraries with "-DTFLITE_VERSION" flag from
      the tensor_filter_tensorflow_lite.cc file
    * Added nnstreamer-tensorflow2-lite package

Signed-off-by: Geunsik Lim <geunsik.lim@samsung.com>
ext/nnstreamer/tensor_filter/meson.build
ext/nnstreamer/tensor_filter/tensor_filter_tensorflow_lite.cc
meson.build
meson_options.txt
packaging/nnstreamer.spec
tests/meson.build
tests/nnstreamer_filter_tensorflow2_lite/checkLabel.py [new file with mode: 0644]
tests/nnstreamer_filter_tensorflow2_lite/runTest.sh [new file with mode: 0644]

index 032f40d..723efb5 100644 (file)
@@ -114,6 +114,7 @@ if tflite_support_is_available
   if cxx.has_type('kTfLiteComplex64', prefix : '#include <tensorflow/contrib/lite/model.h>')
     tflite_compile_args += '-DTFLITE_COMPLEX64=1'
   endif
+  tflite_compile_args += '-DTFLITE_VERSION=1'
 
   tflite_extra_dep = declare_dependency(
     compile_args : tflite_compile_args,
@@ -136,6 +137,53 @@ if tflite_support_is_available
   )
 endif
 
+if tflite2_support_is_available
+  filter_sub_tflite2_sources = ['tensor_filter_tensorflow_lite.cc']
+
+  nnstreamer_filter_tflite2_sources = []
+  foreach s : filter_sub_tflite2_sources
+    nnstreamer_filter_tflite2_sources += join_paths(meson.current_source_dir(), s)
+  endforeach
+
+  nnstreamer_filter_tflite2_deps = tflite_support_deps + [thread_dep, libdl_dep, glib_dep, gst_dep, nnstreamer_dep]
+
+  tflite2_compile_args = []
+
+  if cxx.has_type('kTfLiteInt8', prefix : '#include <tensorflow2/contrib/lite/model.h>')
+    tflite2_compile_args += '-DTFLITE_INT8=1'
+  endif
+  if cxx.has_type('kTfLiteInt16', prefix : '#include <tensorflow2/contrib/lite/model.h>')
+    tflite2_compile_args += '-DTFLITE_INT16=1'
+  endif
+  if cxx.has_type('kTfLiteFloat16', prefix : '#include <tensorflow2/contrib/lite/model.h>')
+    tflite2_compile_args += '-DTFLITE_FLOAT16=1'
+  endif
+  if cxx.has_type('kTfLiteComplex64', prefix : '#include <tensorflow2/contrib/lite/model.h>')
+    tflite2_compile_args += '-DTFLITE_COMPLEX64=1'
+  endif
+  tflite2_compile_args += '-DTFLITE_VERSION=2'
+
+  tflite2_extra_dep = declare_dependency(
+    compile_args : tflite2_compile_args,
+  )
+
+  nnstreamer_filter_tflite2_deps += tflite2_extra_dep
+
+  shared_library('nnstreamer_filter_tensorflow2-lite',
+    nnstreamer_filter_tflite2_sources,
+    dependencies: nnstreamer_filter_tflite2_deps,
+    install: true,
+    install_dir: filter_subplugin_install_dir
+  )
+
+  static_library('nnstreamer_filter_tensorflow2-lite',
+    nnstreamer_filter_tflite2_sources,
+    dependencies: nnstreamer_filter_tflite2_deps,
+    install: true,
+    install_dir: nnstreamer_libdir
+  )
+endif
+
 if pytorch_support_is_available
   filter_sub_torch_sources = ['tensor_filter_pytorch.cc']
 
index 44cce95..efd29ac 100644 (file)
@@ -21,7 +21,9 @@
  * @author HyoungJoo Ahn <hello.ahn@samsung.com>
  * @bug    No known bugs except for NYI items
  *
- * This is the per-NN-framework plugin (tensorflow-lite) for tensor_filter.
+ * This is the per-NN-framework plugin (tensorflow-lite, tensorflow2-lite)
+ * for tensor_filter. The meson build system generates two .so files
+ * (e.g., TF-Lite and TF2-Lite) from this source code.
  */
 
 #include <unistd.h>
@@ -1074,7 +1076,11 @@ tflite_checkAvailability (accl_hw hw)
   return -ENOENT;
 }
 
+#if TFLITE_VERSION == 1
 static gchar filter_subplugin_tensorflow_lite[] = "tensorflow-lite";
+#else
+static gchar filter_subplugin_tensorflow_lite[] = "tensorflow2-lite";
+#endif
 
 static GstTensorFilterFramework NNS_support_tensorflow_lite = {
   .version = GST_TENSOR_FILTER_FRAMEWORK_V0,
index a0222a9..46460a6 100644 (file)
@@ -245,6 +245,10 @@ features = {
     'target': 'tensorflow-lite',
     'project_args': { 'ENABLE_TENSORFLOW_LITE': 1 }
   },
+  'tflite2-support': {
+    'target': 'tensorflow2-lite',
+    'project_args': { 'ENABLE_TENSORFLOW2_LITE': 1 }
+  },
   'pytorch-support': {
     'target': 'pytorch',
     'project_args': { 'ENABLE_PYTORCH': 1 }
index c2ea273..d77f996 100644 (file)
@@ -3,6 +3,7 @@ option('video-support', type: 'feature', value: 'enabled')
 option('audio-support', type: 'feature', value: 'enabled')
 option('tf-support', type: 'feature', value: 'auto')
 option('tflite-support', type: 'feature', value: 'auto')
+option('tflite2-support', type: 'feature', value: 'auto')
 option('pytorch-support', type: 'feature', value: 'auto')
 option('caffe2-support', type: 'feature', value: 'auto')
 option('python2-support', type: 'feature', value: 'auto')
@@ -38,4 +39,4 @@ option('enable-openvino', type: 'boolean', value: false)
 option('enable-vivante', type: 'boolean', value: false)
 option('enable-tizen-feature-check', type: 'boolean', value: true)
 option('enable-tizen-privilege-check', type: 'boolean', value: true)
-option('framework-priority-tflite', type: 'string', value: 'tensorflow-lite,nnfw,armnn,edgetpu', description: 'A comma separated prioritized list of neural network frameworks to open a .tflite file')
+option('framework-priority-tflite', type: 'string', value: 'tensorflow-lite,tensorflow2-lite,nnfw,armnn,edgetpu', description: 'A comma separated prioritized list of neural network frameworks to open a .tflite file')
index 2eee1da..2ce290f 100644 (file)
@@ -4,6 +4,7 @@
 %define                nnstexampledir  /usr/lib/nnstreamer/bin
 %define                tensorflow_support 0
 %define                tensorflow_lite_support 1
+%define                tensorflow2_lite_support 1
 %define                armnn_support 0
 %define                vivante_support 0
 %define                flatbuf_support 1
@@ -132,6 +133,10 @@ BuildRequires: flatbuffers-devel
 # for tensorflow-lite
 BuildRequires: tensorflow-lite-devel
 %endif
+%if 0%{?tensorflow2_lite_support}
+# for tensorflow2-lite
+BuildRequires: tensorflow2-lite-devel
+%endif
 # custom_example_opencv filter requires opencv-devel
 BuildRequires: opencv-devel
 # For './testAll.sh' time limit.
@@ -255,6 +260,16 @@ Requires:  nnstreamer = %{version}-%{release}
 NNStreamer's tensor_fliter subplugin of TensorFlow Lite.
 %endif
 
+# for tensorflow2-lite
+%if 0%{?tensorflow2_lite_support}
+%package tensorflow2-lite
+Summary:       NNStreamer TensorFlow2 Lite Support
+Requires:      nnstreamer = %{version}-%{release}
+# tensorflow2-lite provides .a file and it's embedded into the subplugin. No dep to tflite.
+%description tensorflow2-lite
+NNStreamer's tensor_fliter subplugin of TensorFlow2 Lite.
+%endif
+
 %if 0%{?python_support}
 %package python2
 Summary:  NNStreamer Python Custom Filter Support
@@ -498,6 +513,7 @@ NNStreamer developer utilities include nnstreamer configuration checker.
 %else
 %define enable_tf -Dtf-support=disabled
 %endif
+
 # Support tensorflow-lite
 %if 0%{?tensorflow_lite_support}
 %define enable_tf_lite -Dtflite-support=enabled
@@ -505,6 +521,13 @@ NNStreamer developer utilities include nnstreamer configuration checker.
 %define enable_tf_lite -Dtflite-support=disabled
 %endif
 
+# Support tensorflow2-lite
+%if 0%{?tensorflow2_lite_support}
+%define enable_tf2_lite -Dtflite2-support=enabled
+%else
+%define enable_tf2_lite -Dtflite2-support=disabled
+%endif
+
 # Support pytorch
 %if 0%{?pytorch_support}
 %define enable_pytorch -Dpytorch-support=enabled
@@ -580,7 +603,7 @@ mkdir -p build
 meson --buildtype=plain --prefix=%{_prefix} --sysconfdir=%{_sysconfdir} --libdir=%{_libdir} \
        --bindir=%{nnstexampledir} --includedir=%{_includedir} -Dinstall-example=true \
        %{enable_api} %{enable_tizen} %{element_restriction} -Denable-env-var=false -Denable-symbolic-link=false \
-       %{enable_tf_lite} %{enable_tf} %{enable_pytorch} %{enable_caffe2} %{enable_python} \
+       %{enable_tf_lite} %{enable_tf2_lite} %{enable_tf} %{enable_pytorch} %{enable_caffe2} %{enable_python} \
        %{enable_nnfw_runtime} %{enable_mvncsdk2} %{enable_openvino} %{enable_armnn} %{enable_edgetpu}  %{enable_vivante} %{enable_flatbuf} \
        %{enable_tizen_privilege_check} %{enable_tizen_feature_check} %{enable_tizen_sensor} %{enable_test} %{enable_test_coverage} %{install_test} \
        build
@@ -721,6 +744,14 @@ cp -r result %{buildroot}%{_datadir}/nnstreamer/unittest/
 %{_prefix}/lib/nnstreamer/filters/libnnstreamer_filter_tensorflow-lite.so
 %endif
 
+# for tensorflow2-lite
+%if 0%{?tensorflow2_lite_support}
+%files tensorflow2-lite
+%manifest nnstreamer.manifest
+%defattr(-,root,root,-)
+%{_prefix}/lib/nnstreamer/filters/libnnstreamer_filter_tensorflow2-lite.so
+%endif
+
 %if 0%{?python_support}
 %files python2
 %manifest nnstreamer.manifest
index 1ecbbd8..a6fcb82 100644 (file)
@@ -180,6 +180,9 @@ if get_option('install-test')
     install_subdir('nnstreamer_decoder_image_labeling', install_dir: unittest_tests_install_dir)
     install_subdir('nnstreamer_filter_reload', install_dir: unittest_tests_install_dir)
   endif
+  if tflite2_support_is_available
+    install_subdir('nnstreamer_filter_tensorflow2_lite', install_dir: unittest_tests_install_dir)
+  endif
   if have_python2 or have_python3
     install_subdir('nnstreamer_filter_python', install_dir: unittest_tests_install_dir)
   endif
diff --git a/tests/nnstreamer_filter_tensorflow2_lite/checkLabel.py b/tests/nnstreamer_filter_tensorflow2_lite/checkLabel.py
new file mode 100644 (file)
index 0000000..8f80449
--- /dev/null
@@ -0,0 +1,41 @@
+#!/usr/bin/env python
+
+##
+# SPDX-License-Identifier: LGPL-2.1-only
+#
+# Copyright (C) 2018 Samsung Electronics
+#
+# @file checkLabel.py
+# @brief Check the result label of tensorflow-lite model
+# @author HyoungJoo Ahn <hello.ahn@samsung.com>
+
+import sys
+import os
+import struct
+import string
+sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))
+from gen24bBMP import convert_to_bytes
+
+
+def readbyte (filename):
+  F = open(filename, 'rb')
+  readbyte = F.read()
+  F.close()
+  return readbyte
+
+
+def readlabel (filename):
+  F = open(filename, 'r')
+  line = F.readlines()
+  F.close()
+  return line
+
+onehot = readbyte(sys.argv[1])
+onehot = [convert_to_bytes(x) for x in onehot]
+idx = onehot.index(max(onehot))
+
+label_list = readlabel(sys.argv[2])
+label = label_list[idx].strip()
+
+answer = sys.argv[3].strip()
+exit(label != answer)
diff --git a/tests/nnstreamer_filter_tensorflow2_lite/runTest.sh b/tests/nnstreamer_filter_tensorflow2_lite/runTest.sh
new file mode 100644 (file)
index 0000000..45a17c5
--- /dev/null
@@ -0,0 +1,195 @@
+#!/usr/bin/env bash
+##
+## SPDX-License-Identifier: LGPL-2.1-only
+##
+## @file runTest.sh
+## @author MyungJoo Ham <myungjoo.ham@gmail.com>
+## @date Nov 01 2018
+## @brief SSAT Test Cases for NNStreamer
+##
+if [[ "$SSATAPILOADED" != "1" ]]; then
+    SILENT=0
+    INDEPENDENT=1
+    search="ssat-api.sh"
+    source $search
+    printf "${Blue}Independent Mode${NC}
+"
+fi
+
+# This is compatible with SSAT (https://github.com/myungjoo/SSAT)
+testInit $1
+
+# NNStreamer and plugins path for test
+PATH_TO_PLUGIN="../../build"
+
+if [[ -d $PATH_TO_PLUGIN ]]; then
+    ini_path="${PATH_TO_PLUGIN}/ext/nnstreamer/tensor_filter"
+    if [[ -d ${ini_path} ]]; then
+        check=$(ls ${ini_path} | grep tensorflow2-lite.so)
+        if [[ ! $check ]]; then
+            echo "Cannot find tensorflow2-lite shared lib"
+            report
+            exit
+        fi
+    else
+        echo "Cannot find ${ini_path}"
+    fi
+else
+    ini_file="/etc/nnstreamer.ini"
+    if [[ -f ${ini_file} ]]; then
+        path=$(grep "^filters" ${ini_file})
+        key=${path%=*}
+        value=${path##*=}
+
+        if [[ $key != "filters" ]]; then
+            echo "String Error"
+            report
+            exit
+        fi
+
+        if [[ -d ${value} ]]; then
+            check=$(ls ${value} | grep tensorflow2-lite.so)
+            if [[ ! $check ]]; then
+                echo "Cannot find tensorflow2-lite shared lib"
+                report
+                exit
+            fi
+        else
+            echo "Cannot file ${value}"
+            report
+            exit
+        fi
+    else
+        echo "Cannot identify nnstreamer.ini"
+        report
+        exit
+    fi
+fi
+
+PATH_TO_MODEL="../test_models/models/mobilenet_v1_1.0_224_quant.tflite"
+PATH_TO_LABEL="../test_models/labels/labels.txt"
+PATH_TO_IMAGE="../test_models/data/orange.png"
+
+gstTest "--gst-plugin-path=${PATH_TO_PLUGIN} filesrc location=${PATH_TO_IMAGE} ! pngdec ! videoscale ! imagefreeze ! videoconvert ! video/x-raw,format=RGB,framerate=0/1 ! tensor_converter ! tensor_filter framework=tensorflow2-lite model=${PATH_TO_MODEL} ! filesink location=tensorfilter.out.log" 1 0 0 $PERFORMANCE
+python checkLabel.py tensorfilter.out.log ${PATH_TO_LABEL} orange
+testResult $? 1 "Golden test comparison" 0 1
+
+# Fail test for invalid input properties
+gstTest "--gst-plugin-path=${PATH_TO_PLUGIN} filesrc location=${PATH_TO_IMAGE} ! pngdec ! videoscale ! imagefreeze ! videoconvert ! video/x-raw,format=RGB,framerate=0/1 ! tensor_converter ! tensor_filter framework=tensorflow2-lite model=${PATH_TO_MODEL} input=7:1 inputtype=float32 ! filesink location=tensorfilter.out.log" 2F_n 0 1 $PERFORMANCE
+
+# Fail test for invalid output properties
+gstTest "--gst-plugin-path=${PATH_TO_PLUGIN} filesrc location=${PATH_TO_IMAGE} ! pngdec ! videoscale ! imagefreeze ! videoconvert ! video/x-raw,format=RGB,framerate=0/1 ! tensor_converter ! tensor_filter framework=tensorflow2-lite model=${PATH_TO_MODEL} output=1:7 outputtype=int8 ! filesink location=tensorfilter.out.log" 3F_n 0 1 $PERFORMANCE
+
+PATH_TO_MULTI_TENSOR_OUTPUT_MODEL="../test_models/models/multi_person_mobilenet_v1_075_float.tflite"
+
+# Simple tests for multi-tensor output model
+# This should emit error because of invalid width and height size
+gstTest "--gst-plugin-path=${PATH_TO_PLUGIN} videotestsrc num_buffers=4 ! videoconvert ! videoscale ! video/x-raw,format=RGB,width=353,height=257 ! tensor_converter ! tensor_transform mode=arithmetic option=typecast:float32,add:-127.5,div:127.5 ! tensor_filter framework=tensorflow2-lite model=${PATH_TO_MULTI_TENSOR_OUTPUT_MODEL} ! fakesink" 4_n 0 1 $PERFORMANCE
+
+# This won't fail, but not much meaningful
+gstTest "--gst-plugin-path=${PATH_TO_PLUGIN} videotestsrc num_buffers=4 ! videoconvert ! videoscale ! video/x-raw,format=RGB,width=257,height=353 ! tensor_converter ! tensor_transform mode=arithmetic option=typecast:float32,add:-127.5,div:127.5 ! tensor_filter framework=tensorflow2-lite model=${PATH_TO_MULTI_TENSOR_OUTPUT_MODEL} ! fakesink" 5 0 0 $PERFORMANCE
+
+# Test the backend setting done with tensorflow2-lite
+# This also performs tests for generic backend configuration parsing
+function run_pipeline() {
+    gst-launch-1.0 --gst-plugin-path=${PATH_TO_PLUGIN} filesrc location=${PATH_TO_IMAGE} ! pngdec ! videoscale ! imagefreeze ! videoconvert ! video/x-raw,format=RGB,framerate=0/1 ! tensor_converter ! tensor_filter framework=tensorflow2-lite model=${PATH_TO_MODEL} accelerator=$1 ! filesink location=tensorfilter.out.log 2>info
+}
+
+arch=$(uname -m)
+if [ "$arch" = "aarch64" ] || [ "$arch" = "armv7l" ]; then
+  auto_accl="cpu.neon"
+elif [ "$arch" = "x86_64" ]; then
+  auto_accl="cpu.simd"
+else
+  auto_accl="cpu"
+fi
+
+# Property reading test for nnapi
+run_pipeline true:cpu,npu,gpu
+cat info | grep "nnapi = 1, accl = cpu$"
+testResult $? 2-1 "NNAPI activation test" 0 1
+
+# Property reading test for nnapi
+run_pipeline true:!cpu
+cat info | grep "nnapi = 1, accl = ${auto_accl}$"
+testResult $? 2-2 "NNAPI activation test" 0 1
+
+# Property reading test for nnapi
+run_pipeline true:!npu,gpu
+cat info | grep "nnapi = 1, accl = gpu$"
+testResult $? 2-3 "NNAPI activation test" 0 1
+
+# Property reading test for nnapi
+run_pipeline true:!npu,gpu,abcd
+cat info | grep "nnapi = 1, accl = gpu$"
+testResult $? 2-4 "NNAPI activation test" 0 1
+
+# Property reading test for nnapi
+run_pipeline true:!npu,!abcd,gpu
+cat info | grep "nnapi = 1, accl = gpu$"
+testResult $? 2-5 "NNAPI activation test" 0 1
+
+# Property reading test for nnapi
+run_pipeline true:auto
+cat info | grep "nnapi = 1, accl = ${auto_accl}$"
+testResult $? 2-6 "NNAPI activation test" 0 1
+
+# Property reading test for nnapi
+run_pipeline true:default,gpu
+cat info | grep "nnapi = 1, accl = cpu$"
+testResult $? 2-7 "NNAPI activation test" 0 1
+
+# Property reading test for nnapi
+run_pipeline true:!cpu,default
+cat info | grep "nnapi = 1, accl = cpu$"
+testResult $? 2-8 "NNAPI activation test" 0 1
+
+# Property reading test for nnapi
+run_pipeline true:!default
+cat info | grep "nnapi = 1, accl = ${auto_accl}$"
+testResult $? 2-9 "NNAPI activation test" 0 1
+
+# Property reading test for nnapi
+run_pipeline true:npu.srcn
+cat info | grep "nnapi = 1, accl = npu$"
+testResult $? 2-10 "NNAPI activation test" 0 1
+
+# Property reading test for nnapi
+run_pipeline false:abcd
+cat info | grep "nnapi = 0, accl = none$"
+testResult $? 2-11 "NNAPI activation test" 0 1
+
+# Property reading test for nnapi
+run_pipeline false
+cat info | grep "nnapi = 0, accl = none$"
+testResult $? 2-12 "NNAPI activation test" 0 1
+
+# Property reading test for nnapi
+run_pipeline true:
+cat info | grep "nnapi = 1, accl = ${auto_accl}$"
+testResult $? 2-13 "NNAPI activation test" 0 1
+
+# Property reading test for nnapi
+run_pipeline true
+cat info | grep "nnapi = 1, accl = ${auto_accl}$"
+testResult $? 2-14 "NNAPI activation test" 0 1
+
+# Property reading test for nnapi
+run_pipeline auto
+cat info | grep "nnapi = 0, accl = none$"
+testResult $? 2-15 "NNAPI activation test" 0 1
+
+# Property reading test for nnapi
+run_pipeline true:!npu,abcd,gpu
+cat info | grep "nnapi = 1, accl = gpu$"
+testResult $? 2-16 "NNAPI activation test" 0 1
+
+# Property reading test for nnapi
+run_pipeline true:${auto_accl},cpu
+cat info | grep "nnapi = 1, accl = ${auto_accl}$"
+testResult $? 2-17 "NNAPI activation test" 0 1
+
+# Cleanup
+rm info
+
+report