return new File[]{model};
}
+ public static File getSNPEModel() {
+ String root = Environment.getExternalStorageDirectory().getAbsolutePath();
+
+ File model = new File(root + "/nnstreamer/snpe_data/inception_v3_quantized.dlc");
+
+ if (!model.exists()) {
+ fail();
+ }
+
+ return model;
+ }
+
/**
* Verifies the byte buffer is direct buffer with native order.
*
fail();
}
}
+
+ @Test
+ public void testSNPE() {
+ if (!NNStreamer.isAvailable(NNStreamer.NNFWType.SNPE)) {
+ /* cannot run the test */
+ return;
+ }
+
+ File model = APITestCommon.getSNPEModel();
+ String desc = "appsrc name=srcx ! " +
+ "other/tensor,dimension=(string)3:299:299:1,type=(string)float32,framerate=(fraction)0/1 ! " +
+ "tensor_filter framework=snpe " + "model=" + model.getAbsolutePath() + " ! " +
+ "tensor_sink name=sinkx";
+
+ try (
+ Pipeline pipe = new Pipeline(desc);
+ TensorsInfo info = new TensorsInfo()
+ ) {
+ info.addTensorInfo(NNStreamer.TensorType.FLOAT32, new int[]{3,299,299,1});
+
+ /* register sink callback */
+ pipe.registerSinkCallback("sinkx", new Pipeline.NewDataCallback() {
+ @Override
+ public void onNewDataReceived(TensorsData data) {
+ if (data == null || data.getTensorsCount() != 1) {
+ mInvalidState = true;
+ return;
+ }
+
+ TensorsInfo info = data.getTensorsInfo();
+
+ if (info == null || info.getTensorsCount() != 1) {
+ mInvalidState = true;
+ } else {
+ ByteBuffer output = data.getTensorData(0);
+
+ if (!APITestCommon.isValidBuffer(output, 4004)) {
+ mInvalidState = true;
+ }
+ }
+
+ mReceived++;
+ }
+ });
+
+ /* start pipeline */
+ pipe.start();
+
+ /* push input buffer */
+ for (int i = 0; i < 10; i++) {
+ /* dummy input */
+ pipe.inputData("srcx", TensorsData.allocate(info));
+ Thread.sleep(100);
+ }
+
+ /* sleep 500 to invoke */
+ Thread.sleep(500);
+
+ /* stop pipeline */
+ pipe.stop();
+
+ /* check received data from sink */
+ assertFalse(mInvalidState);
+ assertTrue(mReceived > 0);
+ } catch (Exception e) {
+ fail();
+ }
+ }
}
fail();
}
}
+
+ @Test
+ public void testSNPE() {
+ if (!NNStreamer.isAvailable(NNStreamer.NNFWType.SNPE)) {
+ /* cannot run the test */
+ return;
+ }
+
+ try {
+ File model = APITestCommon.getSNPEModel();
+
+ SingleShot single = new SingleShot(model, NNStreamer.NNFWType.SNPE);
+ TensorsInfo in = single.getInputInfo();
+
+ /* let's ignore timeout (set 60 sec) */
+ single.setTimeout(60000);
+
+ /* single-shot invoke */
+ for (int i = 0; i < 5; i++) {
+ /* input data */
+ TensorsData input = in.allocate();
+
+ /* invoke */
+ TensorsData output = single.invoke(input);
+
+ /* check output: 1 tensor (float32 1:1001) */
+ assertEquals(1, output.getTensorsCount());
+ assertEquals(4004, output.getTensorData(0).capacity());
+
+ Thread.sleep(30);
+ }
+
+ single.close();
+ } catch (Exception e) {
+ fail();
+ }
+ }
+
}
--- /dev/null
+#------------------------------------------------------
+# SNPE (The Snapdragon Neural Processing Engine)
+#
+# This mk file defines snpe module with prebuilt shared library.
+# (snpe-sdk, arm64-v8a only)
+# See Qualcomm Neural Processing SDK for AI (https://developer.qualcomm.com/software/qualcomm-neural-processing-sdk) for the details.
+#------------------------------------------------------
+LOCAL_PATH := $(call my-dir)
+
+ifndef NNSTREAMER_ROOT
+$(error NNSTREAMER_ROOT is not defined!)
+endif
+
+include $(NNSTREAMER_ROOT)/jni/nnstreamer.mk
+
+SNPE_DIR := $(LOCAL_PATH)/snpe
+SNPE_INCLUDES := $(SNPE_DIR)/include/zdl/
+
+ifeq ($(TARGET_ARCH_ABI),arm64-v8a)
+SNPE_LIB_PATH := $(SNPE_DIR)/lib/aarch64-android-clang6.0
+SNPE_DSP_LIB_PATH := $(SNPE_DIR)/lib/dsp
+else
+$(error Target arch ABI not supported: $(TARGET_ARCH_ABI))
+endif
+
+#------------------------------------------------------
+# snpe-sdk (prebuilt shared library)
+#------------------------------------------------------
+include $(CLEAR_VARS)
+
+LOCAL_MODULE := libSNPE
+LOCAL_SRC_FILES := $(SNPE_LIB_PATH)/libSNPE.so
+
+include $(PREBUILT_SHARED_LIBRARY)
+
+#------------------------------------------------------
+# tensor-filter sub-plugin for snpe
+#------------------------------------------------------
+include $(CLEAR_VARS)
+
+LOCAL_MODULE := snpe
+LOCAL_SRC_FILES := $(NNSTREAMER_FILTER_SNPE_SRCS)
+LOCAL_CXXFLAGS += -std=c++11 -frtti -fexceptions -Wno-exceptions -O2 -DNDEBUG $(NNS_API_FLAGS)
+LOCAL_C_INCLUDES := $(NNSTREAMER_INCLUDES) $(SNPE_INCLUDES) $(GST_HEADERS_COMMON)
+LOCAL_SHARED_LIBRARIES := libSNPE
+LOCAL_STATIC_LIBRARIES := nnstreamer
+
+include $(BUILD_STATIC_LIBRARY)
#if defined (ENABLE_NNFW)
extern void init_filter_nnfw (void);
#endif
+#if defined (ENABLE_SNPE)
+extern void _init_filter_snpe (void);
+#endif
/**
* @brief External function from GStreamer Android.
#endif
break;
case 3: /* NNFWType.SNPE */
- /** @todo add ML_NNFW_TYPE_SNPE (for android only?) */
+ *nnfw = ML_NNFW_TYPE_SNPE;
#if !defined (ENABLE_SNPE)
nns_logw ("SNPE is not supported.");
is_supported = FALSE;
init_filter_nnfw ();
#endif
#if defined (ENABLE_SNPE)
- /** @todo register snpe sub-plugin */
+ _init_filter_snpe ();
#endif
nns_is_initilaized = TRUE;
jclass clazz, jint fw_type)
{
ml_nnfw_type_e nnfw;
-
if (!nns_get_nnfw_type (fw_type, &nnfw)) {
return JNI_FALSE;
}
if [[ $enable_snpe == "yes" ]]; then
sed -i "s|ENABLE_SNPE := false|ENABLE_SNPE := true|" external/Android-nnstreamer-prebuilt.mk
sed -i "s|ENABLE_SNPE := false|ENABLE_SNPE := true|" api/src/main/jni/Android.mk
- cp -r $SNPE_DIRECTORY/* api/src/main/jni
+ mkdir api/src/main/jni/snpe
+ cp -r $SNPE_DIRECTORY/include api/src/main/jni/snpe
+ cp -r $SNPE_DIRECTORY/lib api/src/main/jni/snpe
fi
# Update tf-lite option
ML_NNFW_TYPE_VIVANTE = 7, /**< VeriSilicon's Vivante. (Since 6.0) */
ML_NNFW_TYPE_EDGE_TPU = 8, /**< Google Coral Edge TPU (USB). (Since 6.0) */
ML_NNFW_TYPE_ARMNN = 9, /**< Arm Neural Network framework (support for caffe and tensorflow-lite). (Since 6.0) */
+ ML_NNFW_TYPE_SNPE = 10, /**< Qualcomm SNPE (Snapdgragon Neural Processing Engine (.dlc). (Since 6.0) */
ML_NNFW_TYPE_SNAP = 0x2001, /**< SNAP (Samsung Neural Acceleration Platform), only for Android. (Since 6.0) */
} ml_nnfw_type_e;
#endif
/* SNAP requires multiple files, set supported if model file exists. */
break;
+ case ML_NNFW_TYPE_SNPE:
+#if !defined(__ANDROID__)
+ ml_loge ("Given framework, SNPE is not supported yet for non Android (arm64-v8a).");
+ status = ML_ERROR_NOT_SUPPORTED;
+ break;
+#endif
+ if (g_ascii_strcasecmp (file_ext[0], ".dlc") != 0) {
+ status = ML_ERROR_INVALID_PARAMETER;
+ }
+ break;
case ML_NNFW_TYPE_ARMNN:
if (g_ascii_strcasecmp (file_ext[0], ".caffemodel") != 0 &&
g_ascii_strcasecmp (file_ext[0], ".tflite") != 0 &&
[ML_NNFW_TYPE_EDGE_TPU] = "edgetpu",
[ML_NNFW_TYPE_ARMNN] = "armnn",
[ML_NNFW_TYPE_SNAP] = "snap",
+ [ML_NNFW_TYPE_SNPE] = "snpe",
NULL
};
NNSTREAMER_FILTER_CAFFE2_SRCS := \
$(NNSTREAMER_EXT_HOME)/tensor_filter/tensor_filter_caffe2.cc
+# filter snpe
+NNSTREAMER_FILTER_SNPE_SRCS := \
+ $(NNSTREAMER_EXT_HOME)/tensor_filter/tensor_filter_snpe.cc
+
# decoder boundingbox
NNSTREAMER_DECODER_BB_SRCS := \
$(NNSTREAMER_EXT_HOME)/tensor_decoder/tensordec-boundingbox.c \