Upload nnstreamer android files.
TODO: setup android build in ci and add build script.
Signed-off-by: Jaeyun <jy1210.jung@samsung.com>
--- /dev/null
+// Top-level build file where you can add configuration options common to all sub-projects/modules.
+
+buildscript {
+ repositories {
+ jcenter()
+ google()
+ }
+ dependencies {
+ classpath 'com.android.tools.build:gradle:3.3.1'
+ // add dependency (bintray)
+
+ // NOTE: Do not place your application dependencies here; they belong
+ // in the individual module build.gradle files
+ }
+}
+
+allprojects {
+ repositories {
+ jcenter()
+ google()
+ }
+}
+
+task clean(type: Delete) {
+ delete rootProject.buildDir
+}
--- /dev/null
+apply plugin: 'com.android.library'
+// add plugin (bintray)
+
+android {
+ compileSdkVersion 28
+ buildToolsVersion '28.0.3'
+ defaultConfig {
+ minSdkVersion 28
+ targetSdkVersion 28
+ versionCode 1
+ versionName "1.0"
+ testInstrumentationRunner "android.support.test.runner.AndroidJUnitRunner"
+ externalNativeBuild {
+ ndkBuild {
+ def gstRoot
+
+ if (project.hasProperty('gstAndroidRoot'))
+ gstRoot = project.gstAndroidRoot
+ else
+ gstRoot = System.env.GSTREAMER_ROOT_ANDROID
+
+ if (gstRoot == null)
+ throw new GradleException('GSTREAMER_ROOT_ANDROID must be set, or "gstAndroidRoot" must be defined in your gradle.properties in the top level directory of the unpacked universal GStreamer Android binaries')
+
+ def nnsRoot
+
+ if (project.hasProperty('nnstreamerRoot'))
+ nnsRoot = project.nnstreamerRoot
+ else
+ nnsRoot = System.env.NNSTREAMER_ROOT
+
+ if (nnsRoot == null)
+ throw new GradleException('NNSTREAMER_ROOT must be set, or "nnstreamerRoot" must be defined in your gradle.properties')
+
+ arguments "NDK_APPLICATION_MK=src/main/jni/Application.mk",
+ "GSTREAMER_JAVA_SRC_DIR=src/main/java",
+ "GSTREAMER_ROOT_ANDROID=$gstRoot",
+ "GSTREAMER_ASSETS_DIR=src/main/assets",
+ "NNSTREAMER_ROOT=$nnsRoot"
+
+ targets "nnstreamer-native"
+
+ abiFilters 'armeabi-v7a', 'arm64-v8a', 'x86', 'x86_64'
+ }
+ }
+ }
+ buildTypes {
+ debug {
+ testCoverageEnabled true
+ }
+ release {
+ minifyEnabled false
+ proguardFiles getDefaultProguardFile('proguard-android-optimize.txt'), 'proguard-rules.pro'
+ }
+ }
+ externalNativeBuild {
+ ndkBuild {
+ path 'src/main/jni/Android.mk'
+ }
+ }
+ productFlavors {
+ }
+ sourceSets {
+ main {
+ if (project.hasProperty('SNPE_EXT_LIBRARY_PATH')) {
+ jniLibs.srcDirs += project.properties['SNPE_EXT_LIBRARY_PATH']
+ println 'Set jniLibs.srcDirs includes libraries for SNPE'
+ }
+
+ if (project.hasProperty('NNFW_EXT_LIBRARY_PATH')) {
+ jniLibs.srcDirs += project.properties['NNFW_EXT_LIBRARY_PATH']
+ println 'Set jniLibs.srcDirs includes libraries for NNFW'
+ }
+ }
+ }
+ packagingOptions {
+ // To do not show build warning messages
+ doNotStrip "*/arm64-v8a/*_skel.so"
+ }
+}
+
+dependencies {
+ implementation fileTree(include: ['*.jar'], dir: 'libs')
+ implementation 'com.android.support:appcompat-v7:28.0.0'
+ testImplementation 'junit:junit:4.12'
+ androidTestImplementation 'com.android.support.test:rules:1.0.2'
+ androidTestImplementation 'com.android.support.test:runner:1.0.2'
+ androidTestImplementation 'com.android.support.test.espresso:espresso-core:3.0.2'
+}
--- /dev/null
+package org.nnsuite.nnstreamer;
+
+import android.Manifest;
+import android.content.Context;
+import android.os.Environment;
+import android.support.test.InstrumentationRegistry;
+import android.support.test.rule.GrantPermissionRule;
+import android.support.test.runner.AndroidJUnit4;
+
+import org.junit.Before;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+
+import java.io.File;
+import java.nio.ByteBuffer;
+import java.nio.ByteOrder;
+import java.nio.file.Files;
+
+import static org.junit.Assert.*;
+
+/**
+ * Common definition to test NNStreamer API.
+ *
+ * Instrumented test, which will execute on an Android device.
+ *
+ * @see <a href="http://d.android.com/tools/testing">Testing documentation</a>
+ */
+@RunWith(AndroidJUnit4.class)
+public class APITestCommon {
+ private static boolean mInitialized = false;
+
+ /**
+ * Initializes NNStreamer API library.
+ */
+ public static void initNNStreamer() {
+ if (!mInitialized) {
+ try {
+ Context context = InstrumentationRegistry.getTargetContext();
+ mInitialized = NNStreamer.initialize(context);
+ } catch (Exception e) {
+ fail();
+ }
+ }
+ }
+
+ /**
+ * Gets the context for the test application.
+ */
+ public static Context getContext() {
+ return InstrumentationRegistry.getTargetContext();
+ }
+
+ /**
+ * Grants required runtime permissions.
+ */
+ public static GrantPermissionRule grantPermissions() {
+ return GrantPermissionRule.grant(Manifest.permission.READ_EXTERNAL_STORAGE);
+ }
+
+ /**
+ * Gets the File object of tensorflow-lite model.
+ * Note that, to invoke model in the storage, the permission READ_EXTERNAL_STORAGE is required.
+ */
+ public static File getTFLiteImgModel() {
+ String root = Environment.getExternalStorageDirectory().getAbsolutePath();
+ File model = new File(root + "/nnstreamer/test/imgclf/mobilenet_v1_1.0_224_quant.tflite");
+ File meta = new File(root + "/nnstreamer/test/imgclf/metadata/MANIFEST");
+
+ if (!model.exists() || !meta.exists()) {
+ fail();
+ }
+
+ return model;
+ }
+
+ /**
+ * Reads raw image file (orange) and returns TensorsData instance.
+ */
+ public static TensorsData readRawImageData() {
+ String root = Environment.getExternalStorageDirectory().getAbsolutePath();
+ File raw = new File(root + "/nnstreamer/test/orange.raw");
+
+ if (!raw.exists()) {
+ fail();
+ }
+
+ TensorsInfo info = new TensorsInfo();
+ info.addTensorInfo(NNStreamer.TensorType.UINT8, new int[]{3,224,224,1});
+
+ int size = info.getTensorSize(0);
+ TensorsData data = TensorsData.allocate(info);
+
+ try {
+ byte[] content = Files.readAllBytes(raw.toPath());
+ if (content.length != size) {
+ fail();
+ }
+
+ ByteBuffer buffer = TensorsData.allocateByteBuffer(size);
+ buffer.put(content);
+
+ data.setTensorData(0, buffer);
+ } catch (Exception e) {
+ fail();
+ }
+
+ return data;
+ }
+
+ /**
+ * Gets the label index with max score, for tensorflow-lite image classification.
+ */
+ public static int getMaxScore(ByteBuffer buffer) {
+ int index = -1;
+ int maxScore = 0;
+
+ if (isValidBuffer(buffer, 1001)) {
+ for (int i = 0; i < 1001; i++) {
+ /* convert unsigned byte */
+ int score = (buffer.get(i) & 0xFF);
+
+ if (score > maxScore) {
+ maxScore = score;
+ index = i;
+ }
+ }
+ }
+
+ return index;
+ }
+
+ /**
+ * Reads raw float image file (orange) and returns TensorsData instance.
+ */
+ public static TensorsData readRawImageDataPytorch() {
+ String root = Environment.getExternalStorageDirectory().getAbsolutePath();
+ File raw = new File(root + "/nnstreamer/pytorch_data/orange_float.raw");
+
+ if (!raw.exists()) {
+ fail();
+ }
+
+ TensorsInfo info = new TensorsInfo();
+ info.addTensorInfo(NNStreamer.TensorType.FLOAT32, new int[]{224, 224, 3, 1});
+
+ int size = info.getTensorSize(0);
+ TensorsData data = TensorsData.allocate(info);
+
+ try {
+ byte[] content = Files.readAllBytes(raw.toPath());
+ if (content.length != size) {
+ fail();
+ }
+
+ ByteBuffer buffer = TensorsData.allocateByteBuffer(size);
+ buffer.put(content);
+
+ data.setTensorData(0, buffer);
+ } catch (Exception e) {
+ fail();
+ }
+
+ return data;
+ }
+
+ /**
+ * Gets the label index with max score for float buffer with given length.
+ */
+ public static int getMaxScoreFloatBuffer(ByteBuffer buffer, int length) {
+ int index = -1;
+ float maxScore = -Float.MAX_VALUE;
+
+ if (isValidBuffer(buffer, 4 * length)) {
+ for (int i = 0; i < length; i++) {
+ /* convert to float */
+ float score = buffer.getFloat(i * 4);
+
+ if (score > maxScore) {
+ maxScore = score;
+ index = i;
+ }
+ }
+ }
+
+ return index;
+ }
+
+ /**
+ * Reads raw float image file (plastic_cup) and returns TensorsData instance.
+ */
+ public static TensorsData readRawImageDataSNPE() {
+ String root = Environment.getExternalStorageDirectory().getAbsolutePath();
+ File raw = new File(root + "/nnstreamer/snpe_data/plastic_cup.raw");
+
+ if (!raw.exists()) {
+ fail();
+ }
+
+ TensorsInfo info = new TensorsInfo();
+ info.addTensorInfo(NNStreamer.TensorType.FLOAT32, new int[]{3, 299, 299, 1});
+
+ int size = info.getTensorSize(0);
+ TensorsData data = TensorsData.allocate(info);
+
+ try {
+ byte[] content = Files.readAllBytes(raw.toPath());
+ if (content.length != size) {
+ fail();
+ }
+
+ ByteBuffer buffer = TensorsData.allocateByteBuffer(size);
+ buffer.put(content);
+
+ data.setTensorData(0, buffer);
+ } catch (Exception e) {
+ fail();
+ }
+
+ return data;
+ }
+
+ /**
+ * Gets the path string of tensorflow-lite add.tflite model.
+ */
+ public static String getTFLiteAddModelPath() {
+ String root = Environment.getExternalStorageDirectory().getAbsolutePath();
+ return root + "/nnstreamer/test/add";
+ }
+
+ /**
+ * Gets the File object of tensorflow-lite add.tflite model.
+ * Note that, to invoke model in the storage, the permission READ_EXTERNAL_STORAGE is required.
+ */
+ public static File getTFLiteAddModel() {
+ String path = getTFLiteAddModelPath();
+ File model = new File(path + "/add.tflite");
+ File meta = new File(path + "/metadata/MANIFEST");
+
+ if (!model.exists() || !meta.exists()) {
+ fail();
+ }
+
+ return model;
+ }
+
+ public enum SNAPComputingUnit {
+ CPU("ComputingUnit:CPU"),
+ GPU("ComputingUnit:GPU,GpuCacheSource:/sdcard/nnstreamer/"),
+ DSP("ComputingUnit:DSP"),
+ NPU("ComputingUnit:NPU");
+
+ private String computingUnit;
+
+ SNAPComputingUnit(String computingUnit) {
+ this.computingUnit = computingUnit;
+ }
+
+ public String getOptionString() {
+ return computingUnit;
+ }
+ }
+
+ /**
+ * Gets the File objects of Caffe model for SNAP.
+ * Note that, to invoke model in the storage, the permission READ_EXTERNAL_STORAGE is required.
+ */
+ public static File[] getSNAPCaffeModel() {
+ String root = Environment.getExternalStorageDirectory().getAbsolutePath();
+
+ File model = new File(root + "/nnstreamer/snap_data/prototxt/squeezenet.prototxt");
+ File weight = new File(root + "/nnstreamer/snap_data/model/squeezenet.caffemodel");
+
+ if (!model.exists() || !weight.exists()) {
+ fail();
+ }
+
+ return new File[]{model, weight};
+ }
+
+ /**
+ * Gets the option string to run Caffe model for SNAP.
+ *
+ * CPU: "custom=ModelFWType:CAFFE,ExecutionDataType:FLOAT32,ComputingUnit:CPU"
+ * GPU: "custom=ModelFWType:CAFFE,ExecutionDataType:FLOAT32,ComputingUnit:GPU,GpuCacheSource:/sdcard/nnstreamer/"
+ */
+ public static String getSNAPCaffeOption(SNAPComputingUnit CUnit) {
+ String option = "ModelFWType:CAFFE,ExecutionDataType:FLOAT32,InputFormat:NHWC,OutputFormat:NCHW,";
+ option = option + CUnit.getOptionString();
+
+ return option;
+ }
+
+ /**
+ * Gets the option string to run Tensorflow model for SNAP.
+ *
+ * CPU: "custom=ModelFWType:CAFFE,ExecutionDataType:FLOAT32,ComputingUnit:CPU"
+ * GPU: Not supported for Tensorflow model
+ * DSP: "custom=ModelFWType:CAFFE,ExecutionDataType:FLOAT32,ComputingUnit:DSP"
+ * NPU: "custom=ModelFWType:CAFFE,ExecutionDataType:FLOAT32,ComputingUnit:NPU"
+ */
+ public static String getSNAPTensorflowOption(SNAPComputingUnit CUnit) {
+ String option = "ModelFWType:TENSORFLOW,ExecutionDataType:FLOAT32,InputFormat:NHWC,OutputFormat:NHWC,";
+ option = option + CUnit.getOptionString();
+ return option;
+ }
+
+ /**
+ * Gets the File objects of Tensorflow model for SNAP.
+ * Note that, to invoke model in the storage, the permission READ_EXTERNAL_STORAGE is required.
+ */
+ public static File[] getSNAPTensorflowModel(SNAPComputingUnit CUnit) {
+ String root = Environment.getExternalStorageDirectory().getAbsolutePath();
+ String model_path = "/nnstreamer/snap_data/model/";
+
+ switch (CUnit) {
+ case CPU:
+ model_path = model_path + "yolo_new.pb";
+ break;
+ case DSP:
+ model_path = model_path + "yolo_new_tf_quantized.dlc";
+ break;
+ case NPU:
+ model_path = model_path + "yolo_new_tf_quantized_hta.dlc";
+ break;
+ case GPU:
+ default:
+ fail();
+ }
+
+ File model = new File(root + model_path);
+ if (!model.exists()) {
+ fail();
+ }
+
+ return new File[]{model};
+ }
+
+ /**
+ * Gets the File objects of SNPE model.
+ */
+ public static File getSNPEModel() {
+ String root = Environment.getExternalStorageDirectory().getAbsolutePath();
+
+ File model = new File(root + "/nnstreamer/snpe_data/inception_v3_quantized.dlc");
+
+ if (!model.exists()) {
+ fail();
+ }
+
+ return model;
+ }
+
+ /**
+ * Get the File object of SNPE model for testing multiple output.
+ * The model is converted to dlc format with SNPE SDK and it's from
+ * https://github.com/nnsuite/testcases/tree/master/DeepLearningModels/tensorflow/ssdlite_mobilenet_v2
+ */
+ public static File getMultiOutputSNPEModel() {
+ String root = Environment.getExternalStorageDirectory().getAbsolutePath();
+
+ File model = new File(root + "/nnstreamer/snpe_data/ssdlite_mobilenet_v2.dlc");
+
+ if (!model.exists()) {
+ fail();
+ }
+
+ return model;
+ }
+
+ /**
+ * Gets the File objects of Pytorch model.
+ */
+ public static File getPytorchModel() {
+ String root = Environment.getExternalStorageDirectory().getAbsolutePath();
+
+ File model = new File(root + "/nnstreamer/pytorch_data/mobilenetv2-quant_core-nnapi.pt");
+
+ if (!model.exists()) {
+ fail();
+ }
+
+ return model;
+ }
+
+ /**
+ * Verifies the byte buffer is direct buffer with native order.
+ *
+ * @param buffer The byte buffer
+ * @param expected The expected capacity
+ *
+ * @return True if the byte buffer is valid.
+ */
+ public static boolean isValidBuffer(ByteBuffer buffer, int expected) {
+ if (buffer != null && buffer.isDirect() && buffer.order() == ByteOrder.nativeOrder()) {
+ int capacity = buffer.capacity();
+
+ return (capacity == expected);
+ }
+
+ return false;
+ }
+
+ @Before
+ public void setUp() {
+ initNNStreamer();
+ }
+
+ @Test
+ public void useAppContext() {
+ Context context = InstrumentationRegistry.getTargetContext();
+
+ assertEquals("org.nnsuite.nnstreamer.test", context.getPackageName());
+ }
+
+ @Test
+ public void testInitWithInvalidCtx_n() {
+ try {
+ NNStreamer.initialize(null);
+ fail();
+ } catch (Exception e) {
+ /* expected */
+ }
+ }
+
+ @Test
+ public void enumTensorType() {
+ assertEquals(NNStreamer.TensorType.INT32, NNStreamer.TensorType.valueOf("INT32"));
+ assertEquals(NNStreamer.TensorType.UINT32, NNStreamer.TensorType.valueOf("UINT32"));
+ assertEquals(NNStreamer.TensorType.INT16, NNStreamer.TensorType.valueOf("INT16"));
+ assertEquals(NNStreamer.TensorType.UINT16, NNStreamer.TensorType.valueOf("UINT16"));
+ assertEquals(NNStreamer.TensorType.INT8, NNStreamer.TensorType.valueOf("INT8"));
+ assertEquals(NNStreamer.TensorType.UINT8, NNStreamer.TensorType.valueOf("UINT8"));
+ assertEquals(NNStreamer.TensorType.FLOAT64, NNStreamer.TensorType.valueOf("FLOAT64"));
+ assertEquals(NNStreamer.TensorType.FLOAT32, NNStreamer.TensorType.valueOf("FLOAT32"));
+ assertEquals(NNStreamer.TensorType.INT64, NNStreamer.TensorType.valueOf("INT64"));
+ assertEquals(NNStreamer.TensorType.UINT64, NNStreamer.TensorType.valueOf("UINT64"));
+ assertEquals(NNStreamer.TensorType.UNKNOWN, NNStreamer.TensorType.valueOf("UNKNOWN"));
+ }
+
+ @Test
+ public void enumNNFWType() {
+ assertEquals(NNStreamer.NNFWType.TENSORFLOW_LITE, NNStreamer.NNFWType.valueOf("TENSORFLOW_LITE"));
+ assertEquals(NNStreamer.NNFWType.SNAP, NNStreamer.NNFWType.valueOf("SNAP"));
+ assertEquals(NNStreamer.NNFWType.NNFW, NNStreamer.NNFWType.valueOf("NNFW"));
+ assertEquals(NNStreamer.NNFWType.SNPE, NNStreamer.NNFWType.valueOf("SNPE"));
+ assertEquals(NNStreamer.NNFWType.PYTORCH, NNStreamer.NNFWType.valueOf("PYTORCH"));
+ assertEquals(NNStreamer.NNFWType.UNKNOWN, NNStreamer.NNFWType.valueOf("UNKNOWN"));
+ }
+}
--- /dev/null
+package org.nnsuite.nnstreamer;
+
+import android.support.test.runner.AndroidJUnit4;
+
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+
+import java.nio.ByteBuffer;
+
+import static org.junit.Assert.*;
+
+/**
+ * Testcases for CustomFilter.
+ */
+@RunWith(AndroidJUnit4.class)
+public class APITestCustomFilter {
+ private int mReceived = 0;
+ private boolean mInvalidState = false;
+ private boolean mRegistered = false;
+ private CustomFilter mCustomPassthrough;
+ private CustomFilter mCustomConvert;
+ private CustomFilter mCustomAdd;
+
+ private Pipeline.NewDataCallback mSinkCb = new Pipeline.NewDataCallback() {
+ @Override
+ public void onNewDataReceived(TensorsData data) {
+ if (data == null || data.getTensorsCount() != 1) {
+ mInvalidState = true;
+ return;
+ }
+
+ TensorsInfo info = data.getTensorsInfo();
+
+ if (info == null || info.getTensorsCount() != 1) {
+ mInvalidState = true;
+ return;
+ }
+
+ ByteBuffer output = data.getTensorData(0);
+
+ for (int i = 0; i < 10; i++) {
+ float expected = i + 1.5f;
+
+ if (expected != output.getFloat(i * 4)) {
+ mInvalidState = true;
+ }
+ }
+
+ mReceived++;
+ }
+ };
+
+ private void registerCustomFilters() {
+ try {
+ TensorsInfo inputInfo = new TensorsInfo();
+ inputInfo.addTensorInfo(NNStreamer.TensorType.INT32, new int[]{10});
+
+ TensorsInfo outputInfo = inputInfo.clone();
+
+ /* register custom-filter (passthrough) */
+ mCustomPassthrough = CustomFilter.create("custom-passthrough",
+ inputInfo, outputInfo, new CustomFilter.Callback() {
+ @Override
+ public TensorsData invoke(TensorsData in) {
+ return in;
+ }
+ });
+
+ /* register custom-filter (convert data type to float) */
+ outputInfo.setTensorType(0, NNStreamer.TensorType.FLOAT32);
+ mCustomConvert = CustomFilter.create("custom-convert",
+ inputInfo, outputInfo, new CustomFilter.Callback() {
+ @Override
+ public TensorsData invoke(TensorsData in) {
+ TensorsInfo info = in.getTensorsInfo();
+ ByteBuffer input = in.getTensorData(0);
+
+ info.setTensorType(0, NNStreamer.TensorType.FLOAT32);
+
+ TensorsData out = info.allocate();
+ ByteBuffer output = out.getTensorData(0);
+
+ for (int i = 0; i < 10; i++) {
+ float value = (float) input.getInt(i * 4);
+ output.putFloat(i * 4, value);
+ }
+
+ out.setTensorData(0, output);
+ return out;
+ }
+ });
+
+ /* register custom-filter (add constant) */
+ inputInfo.setTensorType(0, NNStreamer.TensorType.FLOAT32);
+ mCustomAdd = CustomFilter.create("custom-add",
+ inputInfo, outputInfo, new CustomFilter.Callback() {
+ @Override
+ public TensorsData invoke(TensorsData in) {
+ TensorsInfo info = in.getTensorsInfo();
+ ByteBuffer input = in.getTensorData(0);
+
+ TensorsData out = info.allocate();
+ ByteBuffer output = out.getTensorData(0);
+
+ for (int i = 0; i < 10; i++) {
+ float value = input.getFloat(i * 4);
+
+ /* add constant */
+ value += 1.5f;
+ output.putFloat(i * 4, value);
+ }
+
+ out.setTensorData(0, output);
+ return out;
+ }
+ });
+
+ mRegistered = true;
+ } catch (Exception e) {
+ /* failed to register custom-filters */
+ fail();
+ }
+ }
+
+ @Before
+ public void setUp() {
+ APITestCommon.initNNStreamer();
+
+ mReceived = 0;
+ mInvalidState = false;
+
+ if (!mRegistered) {
+ registerCustomFilters();
+ }
+ }
+
+ @After
+ public void tearDown() {
+ if (mRegistered) {
+ mCustomPassthrough.close();
+ mCustomConvert.close();
+ mCustomAdd.close();
+ }
+ }
+
+ @Test
+ public void testGetName() {
+ try {
+ assertEquals("custom-passthrough", mCustomPassthrough.getName());
+ assertEquals("custom-convert", mCustomConvert.getName());
+ assertEquals("custom-add", mCustomAdd.getName());
+ } catch (Exception e) {
+ fail();
+ }
+ }
+
+ @Test
+ public void testCustomFilters() {
+ String desc = "appsrc name=srcx ! " +
+ "other/tensor,dimension=(string)10:1:1:1,type=(string)int32,framerate=(fraction)0/1 ! " +
+ "tensor_filter framework=custom-easy model=" + mCustomPassthrough.getName() + " ! " +
+ "tensor_filter framework=custom-easy model=" + mCustomConvert.getName() + " ! " +
+ "tensor_filter framework=custom-easy model=" + mCustomAdd.getName() + " ! " +
+ "tensor_sink name=sinkx";
+
+ try (Pipeline pipe = new Pipeline(desc)) {
+ TensorsInfo info = new TensorsInfo();
+ info.addTensorInfo(NNStreamer.TensorType.INT32, new int[]{10});
+
+ /* register sink callback */
+ pipe.registerSinkCallback("sinkx", mSinkCb);
+
+ /* start pipeline */
+ pipe.start();
+
+ /* push input buffer repeatedly */
+ for (int i = 0; i < 2048; i++) {
+ TensorsData in = TensorsData.allocate(info);
+ ByteBuffer input = in.getTensorData(0);
+
+ for (int j = 0; j < 10; j++) {
+ input.putInt(j * 4, j);
+ }
+
+ in.setTensorData(0, input);
+
+ pipe.inputData("srcx", in);
+ Thread.sleep(20);
+ }
+
+ /* sleep 300 to pass all input buffers to sink */
+ Thread.sleep(300);
+
+ /* stop pipeline */
+ pipe.stop();
+
+ /* check received data from sink */
+ assertFalse(mInvalidState);
+ assertEquals(2048, mReceived);
+ } catch (Exception e) {
+ fail();
+ }
+ }
+
+ @Test
+ public void testDropBuffer() {
+ TensorsInfo inputInfo = new TensorsInfo();
+ inputInfo.addTensorInfo(NNStreamer.TensorType.INT32, new int[]{10,1,1,1});
+
+ TensorsInfo outputInfo = inputInfo.clone();
+
+ CustomFilter customDrop = CustomFilter.create("custom-drop",
+ inputInfo, outputInfo, new CustomFilter.Callback() {
+ int received = 0;
+
+ @Override
+ public TensorsData invoke(TensorsData in) {
+ received++;
+
+ if (received <= 5) {
+ return in;
+ }
+
+ /* return null to drop the incoming buffer */
+ return null;
+ }
+ });
+
+ String desc = "appsrc name=srcx ! " +
+ "other/tensor,dimension=(string)10:1:1:1,type=(string)int32,framerate=(fraction)0/1 ! " +
+ "tensor_filter framework=custom-easy model=" + customDrop.getName() + " ! " +
+ "tensor_filter framework=custom-easy model=" + mCustomPassthrough.getName() + " ! " +
+ "tensor_filter framework=custom-easy model=" + mCustomConvert.getName() + " ! " +
+ "tensor_filter framework=custom-easy model=" + mCustomAdd.getName() + " ! " +
+ "tensor_sink name=sinkx";
+
+ try (Pipeline pipe = new Pipeline(desc)) {
+ TensorsInfo info = new TensorsInfo();
+ info.addTensorInfo(NNStreamer.TensorType.INT32, new int[]{10,1,1,1});
+
+ /* register sink callback */
+ pipe.registerSinkCallback("sinkx", mSinkCb);
+
+ /* start pipeline */
+ pipe.start();
+
+ /* push input buffer repeatedly */
+ for (int i = 0; i < 24; i++) {
+ TensorsData in = TensorsData.allocate(info);
+ ByteBuffer input = in.getTensorData(0);
+
+ for (int j = 0; j < 10; j++) {
+ input.putInt(j * 4, j);
+ }
+
+ in.setTensorData(0, input);
+
+ pipe.inputData("srcx", in);
+ Thread.sleep(20);
+ }
+
+ /* sleep 300 to pass input buffers to sink */
+ Thread.sleep(300);
+
+ /* stop pipeline */
+ pipe.stop();
+
+ /* check received data from sink */
+ assertFalse(mInvalidState);
+ assertEquals(5, mReceived);
+ } catch (Exception e) {
+ fail();
+ }
+ }
+
+ @Test
+ public void testRegisterNullName_n() {
+ TensorsInfo inputInfo = new TensorsInfo();
+ inputInfo.addTensorInfo(NNStreamer.TensorType.INT32, new int[]{10});
+
+ TensorsInfo outputInfo = inputInfo.clone();
+
+ try {
+ CustomFilter.create(null, inputInfo, outputInfo,
+ new CustomFilter.Callback() {
+ @Override
+ public TensorsData invoke(TensorsData in) {
+ return in;
+ }
+ });
+
+ fail();
+ } catch (Exception e) {
+ /* expected */
+ }
+ }
+
+ @Test
+ public void testRegisterNullInputInfo_n() {
+ TensorsInfo outputInfo = new TensorsInfo();
+ outputInfo.addTensorInfo(NNStreamer.TensorType.INT32, new int[]{10});
+
+ try {
+ CustomFilter.create("custom-invalid-info", null, outputInfo,
+ new CustomFilter.Callback() {
+ @Override
+ public TensorsData invoke(TensorsData in) {
+ return in;
+ }
+ });
+
+ fail();
+ } catch (Exception e) {
+ /* expected */
+ }
+ }
+
+ @Test
+ public void testRegisterNullOutputInfo_n() {
+ TensorsInfo inputInfo = new TensorsInfo();
+ inputInfo.addTensorInfo(NNStreamer.TensorType.INT32, new int[]{10});
+
+ try {
+ CustomFilter.create("custom-invalid-info", inputInfo, null,
+ new CustomFilter.Callback() {
+ @Override
+ public TensorsData invoke(TensorsData in) {
+ return in;
+ }
+ });
+
+ fail();
+ } catch (Exception e) {
+ /* expected */
+ }
+ }
+
+ @Test
+ public void testRegisterNullCallback_n() {
+ TensorsInfo inputInfo = new TensorsInfo();
+ inputInfo.addTensorInfo(NNStreamer.TensorType.INT32, new int[]{10});
+
+ TensorsInfo outputInfo = inputInfo.clone();
+
+ try {
+ CustomFilter.create("custom-invalid-cb", inputInfo, outputInfo, null);
+
+ fail();
+ } catch (Exception e) {
+ /* expected */
+ }
+ }
+
+ @Test
+ public void testRegisterDuplicatedName_n() {
+ TensorsInfo inputInfo = new TensorsInfo();
+ inputInfo.addTensorInfo(NNStreamer.TensorType.INT32, new int[]{10});
+
+ TensorsInfo outputInfo = inputInfo.clone();
+
+ try {
+ CustomFilter.create(mCustomPassthrough.getName(), inputInfo, outputInfo,
+ new CustomFilter.Callback() {
+ @Override
+ public TensorsData invoke(TensorsData in) {
+ return in;
+ }
+ });
+
+ fail();
+ } catch (Exception e) {
+ /* expected */
+ }
+ }
+
+ @Test
+ public void testRegisterPreservedName_n() {
+ TensorsInfo inputInfo = new TensorsInfo();
+ inputInfo.addTensorInfo(NNStreamer.TensorType.INT32, new int[]{10});
+
+ TensorsInfo outputInfo = inputInfo.clone();
+
+ try {
+ CustomFilter.create("auto", inputInfo, outputInfo,
+ new CustomFilter.Callback() {
+ @Override
+ public TensorsData invoke(TensorsData in) {
+ return in;
+ }
+ });
+
+ fail();
+ } catch (Exception e) {
+ /* expected */
+ }
+ }
+}
--- /dev/null
+package org.nnsuite.nnstreamer;
+
+import android.os.Environment;
+import android.support.test.rule.GrantPermissionRule;
+import android.support.test.runner.AndroidJUnit4;
+import android.view.SurfaceView;
+
+import org.junit.Before;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+
+import java.io.File;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+
+import static org.junit.Assert.*;
+
+/**
+ * Testcases for Pipeline.
+ */
+@RunWith(AndroidJUnit4.class)
+public class APITestPipeline {
+ private int mReceived = 0;
+ private boolean mInvalidState = false;
+ private Pipeline.State mPipelineState = Pipeline.State.NULL;
+
+ private Pipeline.NewDataCallback mSinkCb = new Pipeline.NewDataCallback() {
+ @Override
+ public void onNewDataReceived(TensorsData data) {
+ if (data == null ||
+ data.getTensorsCount() != 1 ||
+ data.getTensorData(0).capacity() != 200) {
+ mInvalidState = true;
+ return;
+ }
+
+ TensorsInfo info = data.getTensorsInfo();
+
+ /* validate received data (unit8 2:10:10:1) */
+ if (info == null ||
+ info.getTensorsCount() != 1 ||
+ info.getTensorName(0) != null ||
+ info.getTensorType(0) != NNStreamer.TensorType.UINT8 ||
+ !Arrays.equals(info.getTensorDimension(0), new int[]{2,10,10,1})) {
+ /* received data is invalid */
+ mInvalidState = true;
+ }
+
+ mReceived++;
+ }
+ };
+
+ @Rule
+ public GrantPermissionRule mPermissionRule = APITestCommon.grantPermissions();
+
+ @Before
+ public void setUp() {
+ APITestCommon.initNNStreamer();
+
+ mReceived = 0;
+ mInvalidState = false;
+ mPipelineState = Pipeline.State.NULL;
+ }
+
+ @Test
+ public void enumPipelineState() {
+ assertEquals(Pipeline.State.UNKNOWN, Pipeline.State.valueOf("UNKNOWN"));
+ assertEquals(Pipeline.State.NULL, Pipeline.State.valueOf("NULL"));
+ assertEquals(Pipeline.State.READY, Pipeline.State.valueOf("READY"));
+ assertEquals(Pipeline.State.PAUSED, Pipeline.State.valueOf("PAUSED"));
+ assertEquals(Pipeline.State.PLAYING, Pipeline.State.valueOf("PLAYING"));
+ }
+
+ @Test
+ public void testAvailableElement() {
+ try {
+ assertTrue(Pipeline.isElementAvailable("tensor_converter"));
+ assertTrue(Pipeline.isElementAvailable("tensor_filter"));
+ assertTrue(Pipeline.isElementAvailable("tensor_transform"));
+ assertTrue(Pipeline.isElementAvailable("tensor_sink"));
+ assertTrue(Pipeline.isElementAvailable("join"));
+ assertTrue(Pipeline.isElementAvailable("amcsrc"));
+ } catch (Exception e) {
+ fail();
+ }
+ }
+
+ @Test
+ public void testAvailableElementNullName_n() {
+ try {
+ Pipeline.isElementAvailable(null);
+ fail();
+ } catch (Exception e) {
+ /* expected */
+ }
+ }
+
+ @Test
+ public void testAvailableElementEmptyName_n() {
+ try {
+ Pipeline.isElementAvailable("");
+ fail();
+ } catch (Exception e) {
+ /* expected */
+ }
+ }
+
+ @Test
+ public void testAvailableElementInvalidName_n() {
+ try {
+ assertFalse(Pipeline.isElementAvailable("invalid-element"));
+ } catch (Exception e) {
+ fail();
+ }
+ }
+
+ @Test
+ public void testConstructInvalidElement_n() {
+ String desc = "videotestsrc ! videoconvert ! video/x-raw,format=RGB ! " +
+ "invalidelement ! tensor_converter ! tensor_sink";
+
+ try {
+ new Pipeline(desc);
+ fail();
+ } catch (Exception e) {
+ /* expected */
+ }
+ }
+
+ @Test
+ public void testConstructNullDescription_n() {
+ try {
+ new Pipeline(null);
+ fail();
+ } catch (Exception e) {
+ /* expected */
+ }
+ }
+
+ @Test
+ public void testConstructEmptyDescription_n() {
+ try {
+ new Pipeline("");
+ fail();
+ } catch (Exception e) {
+ /* expected */
+ }
+ }
+
+ @Test
+ public void testConstructNullStateCb() {
+ String desc = "videotestsrc ! videoconvert ! video/x-raw,format=RGB ! " +
+ "tensor_converter ! tensor_sink";
+
+ try (Pipeline pipe = new Pipeline(desc, null)) {
+ Thread.sleep(100);
+ assertEquals(Pipeline.State.PAUSED, pipe.getState());
+ Thread.sleep(100);
+ } catch (Exception e) {
+ fail();
+ }
+ }
+
+ @Test
+ public void testConstructWithStateCb() {
+ String desc = "videotestsrc ! videoconvert ! video/x-raw,format=RGB ! " +
+ "tensor_converter ! tensor_sink";
+
+ /* pipeline state callback */
+ Pipeline.StateChangeCallback stateCb = new Pipeline.StateChangeCallback() {
+ @Override
+ public void onStateChanged(Pipeline.State state) {
+ mPipelineState = state;
+ }
+ };
+
+ try (Pipeline pipe = new Pipeline(desc, stateCb)) {
+ Thread.sleep(100);
+ assertEquals(Pipeline.State.PAUSED, mPipelineState);
+
+ /* start pipeline */
+ pipe.start();
+ Thread.sleep(300);
+
+ assertEquals(Pipeline.State.PLAYING, mPipelineState);
+
+ /* stop pipeline */
+ pipe.stop();
+ Thread.sleep(300);
+
+ assertEquals(Pipeline.State.PAUSED, mPipelineState);
+ Thread.sleep(100);
+ } catch (Exception e) {
+ fail();
+ }
+ }
+
+ @Test
+ public void testGetState() {
+ String desc = "videotestsrc ! videoconvert ! video/x-raw,format=RGB ! " +
+ "tensor_converter ! tensor_sink";
+
+ try (Pipeline pipe = new Pipeline(desc)) {
+ /* start pipeline */
+ pipe.start();
+ Thread.sleep(300);
+
+ assertEquals(Pipeline.State.PLAYING, pipe.getState());
+
+ /* stop pipeline */
+ pipe.stop();
+ Thread.sleep(300);
+
+ assertEquals(Pipeline.State.PAUSED, pipe.getState());
+ Thread.sleep(100);
+ } catch (Exception e) {
+ fail();
+ }
+ }
+
+ @Test
+ public void testRegisterNullDataCb_n() {
+ String desc = "appsrc name=srcx ! " +
+ "other/tensor,dimension=(string)2:10:10:1,type=(string)uint8,framerate=(fraction)0/1 ! " +
+ "tensor_sink name=sinkx";
+
+ try (Pipeline pipe = new Pipeline(desc)) {
+ pipe.registerSinkCallback("sinkx", null);
+ fail();
+ } catch (Exception e) {
+ /* expected */
+ }
+ }
+
+ @Test
+ public void testRegisterDataCbInvalidName_n() {
+ String desc = "appsrc name=srcx ! " +
+ "other/tensor,dimension=(string)2:10:10:1,type=(string)uint8,framerate=(fraction)0/1 ! " +
+ "tensor_sink name=sinkx";
+
+ try (Pipeline pipe = new Pipeline(desc)) {
+ pipe.registerSinkCallback("invalid_sink", mSinkCb);
+ fail();
+ } catch (Exception e) {
+ /* expected */
+ }
+ }
+
+ @Test
+ public void testRegisterDataCbNullName_n() {
+ String desc = "appsrc name=srcx ! " +
+ "other/tensor,dimension=(string)2:10:10:1,type=(string)uint8,framerate=(fraction)0/1 ! " +
+ "tensor_sink name=sinkx";
+
+ try (Pipeline pipe = new Pipeline(desc)) {
+ pipe.registerSinkCallback(null, mSinkCb);
+ fail();
+ } catch (Exception e) {
+ /* expected */
+ }
+ }
+
+ @Test
+ public void testRegisterDataCbEmptyName_n() {
+ String desc = "appsrc name=srcx ! " +
+ "other/tensor,dimension=(string)2:10:10:1,type=(string)uint8,framerate=(fraction)0/1 ! " +
+ "tensor_sink name=sinkx";
+
+ try (Pipeline pipe = new Pipeline(desc)) {
+ pipe.registerSinkCallback("", mSinkCb);
+ fail();
+ } catch (Exception e) {
+ /* expected */
+ }
+ }
+
+ @Test
+ public void testUnregisterNullDataCb_n() {
+ String desc = "appsrc name=srcx ! " +
+ "other/tensor,dimension=(string)2:10:10:1,type=(string)uint8,framerate=(fraction)0/1 ! " +
+ "tensor_sink name=sinkx";
+
+ try (Pipeline pipe = new Pipeline(desc)) {
+ pipe.unregisterSinkCallback("sinkx", null);
+ fail();
+ } catch (Exception e) {
+ /* expected */
+ }
+ }
+
+ @Test
+ public void testUnregisterDataCbNullName_n() {
+ String desc = "appsrc name=srcx ! " +
+ "other/tensor,dimension=(string)2:10:10:1,type=(string)uint8,framerate=(fraction)0/1 ! " +
+ "tensor_sink name=sinkx";
+
+ try (Pipeline pipe = new Pipeline(desc)) {
+ pipe.unregisterSinkCallback(null, mSinkCb);
+ fail();
+ } catch (Exception e) {
+ /* expected */
+ }
+ }
+
+ @Test
+ public void testUnregisterDataCbEmptyName_n() {
+ String desc = "appsrc name=srcx ! " +
+ "other/tensor,dimension=(string)2:10:10:1,type=(string)uint8,framerate=(fraction)0/1 ! " +
+ "tensor_sink name=sinkx";
+
+ try (Pipeline pipe = new Pipeline(desc)) {
+ pipe.unregisterSinkCallback("", mSinkCb);
+ fail();
+ } catch (Exception e) {
+ /* expected */
+ }
+ }
+
+ @Test
+ public void testUnregisteredDataCb_n() {
+ String desc = "appsrc name=srcx ! " +
+ "other/tensor,dimension=(string)2:10:10:1,type=(string)uint8,framerate=(fraction)0/1 ! " +
+ "tensor_sink name=sinkx";
+
+ try (Pipeline pipe = new Pipeline(desc)) {
+ pipe.unregisterSinkCallback("sinkx", mSinkCb);
+ fail();
+ } catch (Exception e) {
+ /* expected */
+ }
+ }
+
+ @Test
+ public void testUnregisterInvalidCb_n() {
+ String desc = "appsrc name=srcx ! " +
+ "other/tensor,dimension=(string)2:10:10:1,type=(string)uint8,framerate=(fraction)0/1 ! " +
+ "tensor_sink name=sinkx";
+
+ try (Pipeline pipe = new Pipeline(desc)) {
+ /* register callback */
+ Pipeline.NewDataCallback cb1 = new Pipeline.NewDataCallback() {
+ @Override
+ public void onNewDataReceived(TensorsData data) {
+ mReceived++;
+ }
+ };
+
+ pipe.registerSinkCallback("sinkx", cb1);
+
+ /* unregistered callback */
+ pipe.unregisterSinkCallback("sinkx", mSinkCb);
+ fail();
+ } catch (Exception e) {
+ /* expected */
+ }
+ }
+
+ @Test
+ public void testRemoveDataCb() {
+ String desc = "appsrc name=srcx ! " +
+ "other/tensor,dimension=(string)2:10:10:1,type=(string)uint8,framerate=(fraction)0/1 ! " +
+ "tensor_sink name=sinkx";
+
+ try (Pipeline pipe = new Pipeline(desc)) {
+ TensorsInfo info = new TensorsInfo();
+ info.addTensorInfo(NNStreamer.TensorType.UINT8, new int[]{2,10,10,1});
+
+ /* register sink callback */
+ pipe.registerSinkCallback("sinkx", mSinkCb);
+
+ /* start pipeline */
+ pipe.start();
+
+ /* push input buffer */
+ for (int i = 0; i < 10; i++) {
+ /* dummy input */
+ pipe.inputData("srcx", info.allocate());
+ Thread.sleep(50);
+ }
+
+ /* pause pipeline and unregister sink callback */
+ Thread.sleep(100);
+ pipe.stop();
+
+ pipe.unregisterSinkCallback("sinkx", mSinkCb);
+ Thread.sleep(100);
+
+ /* start pipeline again */
+ pipe.start();
+
+ /* push input buffer again */
+ for (int i = 0; i < 10; i++) {
+ /* dummy input */
+ pipe.inputData("srcx", info.allocate());
+ Thread.sleep(50);
+ }
+
+ /* stop pipeline */
+ pipe.stop();
+
+ /* check received data from sink */
+ assertFalse(mInvalidState);
+ assertEquals(10, mReceived);
+ } catch (Exception e) {
+ fail();
+ }
+ }
+
+ @Test
+ public void testDuplicatedDataCb() {
+ String desc = "appsrc name=srcx ! " +
+ "other/tensor,dimension=(string)2:10:10:1,type=(string)uint8,framerate=(fraction)0/1 ! " +
+ "tensor_sink name=sinkx";
+
+ try (Pipeline pipe = new Pipeline(desc)) {
+ TensorsInfo info = new TensorsInfo();
+ info.addTensorInfo(NNStreamer.TensorType.UINT8, new int[]{2,10,10,1});
+
+ pipe.registerSinkCallback("sinkx", mSinkCb);
+
+ /* try to register same cb */
+ pipe.registerSinkCallback("sinkx", mSinkCb);
+
+ /* start pipeline */
+ pipe.start();
+
+ /* push input buffer */
+ for (int i = 0; i < 10; i++) {
+ /* dummy input */
+ pipe.inputData("srcx", info.allocate());
+ Thread.sleep(50);
+ }
+
+ /* pause pipeline and unregister sink callback */
+ Thread.sleep(100);
+ pipe.stop();
+
+ pipe.unregisterSinkCallback("sinkx", mSinkCb);
+ Thread.sleep(100);
+
+ /* start pipeline again */
+ pipe.start();
+
+ /* push input buffer again */
+ for (int i = 0; i < 10; i++) {
+ /* dummy input */
+ pipe.inputData("srcx", info.allocate());
+ Thread.sleep(50);
+ }
+
+ /* stop pipeline */
+ pipe.stop();
+
+ /* check received data from sink */
+ assertFalse(mInvalidState);
+ assertEquals(10, mReceived);
+ } catch (Exception e) {
+ fail();
+ }
+ }
+
+ @Test
+ public void testMultipleDataCb() {
+ String desc = "appsrc name=srcx ! " +
+ "other/tensor,dimension=(string)2:10:10:1,type=(string)uint8,framerate=(fraction)0/1 ! " +
+ "tensor_sink name=sinkx";
+
+ try (Pipeline pipe = new Pipeline(desc)) {
+ TensorsInfo info = new TensorsInfo();
+ info.addTensorInfo(NNStreamer.TensorType.UINT8, new int[]{2,10,10,1});
+
+ /* register three callbacks */
+ Pipeline.NewDataCallback cb1 = new Pipeline.NewDataCallback() {
+ @Override
+ public void onNewDataReceived(TensorsData data) {
+ mReceived++;
+ }
+ };
+
+ Pipeline.NewDataCallback cb2 = new Pipeline.NewDataCallback() {
+ @Override
+ public void onNewDataReceived(TensorsData data) {
+ mReceived++;
+ }
+ };
+
+ pipe.registerSinkCallback("sinkx", mSinkCb);
+ pipe.registerSinkCallback("sinkx", cb1);
+ pipe.registerSinkCallback("sinkx", cb2);
+
+ /* start pipeline */
+ pipe.start();
+
+ /* push input buffer */
+ for (int i = 0; i < 10; i++) {
+ /* dummy input */
+ pipe.inputData("srcx", info.allocate());
+ Thread.sleep(50);
+ }
+
+ /* pause pipeline and unregister sink callback */
+ Thread.sleep(100);
+ pipe.stop();
+
+ pipe.unregisterSinkCallback("sinkx", mSinkCb);
+ pipe.unregisterSinkCallback("sinkx", cb1);
+ Thread.sleep(100);
+
+ /* start pipeline again */
+ pipe.start();
+
+ /* push input buffer again */
+ for (int i = 0; i < 10; i++) {
+ /* dummy input */
+ pipe.inputData("srcx", info.allocate());
+ Thread.sleep(50);
+ }
+
+ /* stop pipeline */
+ pipe.stop();
+
+ /* check received data from sink */
+ assertFalse(mInvalidState);
+ assertEquals(40, mReceived);
+ } catch (Exception e) {
+ fail();
+ }
+ }
+
+ @Test
+ public void testPushToTensorTransform() {
+ String desc = "appsrc name=srcx ! " +
+ "other/tensor,dimension=(string)5:1:1:1,type=(string)uint8,framerate=(fraction)0/1 ! " +
+ "tensor_transform mode=arithmetic option=typecast:float32,add:0.5 ! " +
+ "tensor_sink name=sinkx";
+
+ try (Pipeline pipe = new Pipeline(desc)) {
+ TensorsInfo info = new TensorsInfo();
+ info.addTensorInfo(NNStreamer.TensorType.UINT8, new int[]{5,1,1,1});
+
+ /* register callback */
+ Pipeline.NewDataCallback cb1 = new Pipeline.NewDataCallback() {
+ @Override
+ public void onNewDataReceived(TensorsData data) {
+ if (data != null) {
+ TensorsInfo info = data.getTensorsInfo();
+ ByteBuffer buffer = data.getTensorData(0);
+
+ /* validate received data (float32 5:1:1:1) */
+ if (info == null ||
+ info.getTensorsCount() != 1 ||
+ info.getTensorType(0) != NNStreamer.TensorType.FLOAT32 ||
+ !Arrays.equals(info.getTensorDimension(0), new int[]{5,1,1,1})) {
+ /* received data is invalid */
+ mInvalidState = true;
+ }
+
+ for (int i = 0; i < 5; i++) {
+ float expected = i * 2 + mReceived + 0.5f;
+
+ if (expected != buffer.getFloat(i * 4)) {
+ mInvalidState = true;
+ }
+ }
+
+ mReceived++;
+ }
+ }
+ };
+
+ pipe.registerSinkCallback("sinkx", cb1);
+
+ /* start pipeline */
+ pipe.start();
+
+ /* push input buffer */
+ for (int i = 0; i < 10; i++) {
+ TensorsData in = info.allocate();
+ ByteBuffer buffer = in.getTensorData(0);
+
+ for (int j = 0; j < 5; j++) {
+ buffer.put(j, (byte) (j * 2 + i));
+ }
+
+ in.setTensorData(0, buffer);
+
+ pipe.inputData("srcx", in);
+ Thread.sleep(50);
+ }
+
+ /* pause pipeline and unregister sink callback */
+ Thread.sleep(200);
+ pipe.stop();
+
+ /* check received data from sink */
+ assertFalse(mInvalidState);
+ assertEquals(10, mReceived);
+ } catch (Exception e) {
+ fail();
+ }
+ }
+
+ @Test
+ public void testRunModel() {
+ if (!NNStreamer.isAvailable(NNStreamer.NNFWType.TENSORFLOW_LITE)) {
+ /* cannot run the test */
+ return;
+ }
+
+ File model = APITestCommon.getTFLiteImgModel();
+ String desc = "appsrc name=srcx ! " +
+ "other/tensor,dimension=(string)3:224:224:1,type=(string)uint8,framerate=(fraction)0/1 ! " +
+ "tensor_filter framework=tensorflow-lite model=" + model.getAbsolutePath() + " ! " +
+ "tensor_sink name=sinkx";
+
+ try (Pipeline pipe = new Pipeline(desc)) {
+ TensorsInfo info = new TensorsInfo();
+ info.addTensorInfo(NNStreamer.TensorType.UINT8, new int[]{3,224,224,1});
+
+ /* register sink callback */
+ pipe.registerSinkCallback("sinkx", new Pipeline.NewDataCallback() {
+ @Override
+ public void onNewDataReceived(TensorsData data) {
+ if (data == null || data.getTensorsCount() != 1) {
+ mInvalidState = true;
+ return;
+ }
+
+ TensorsInfo info = data.getTensorsInfo();
+
+ if (info == null || info.getTensorsCount() != 1) {
+ mInvalidState = true;
+ } else {
+ ByteBuffer output = data.getTensorData(0);
+
+ if (!APITestCommon.isValidBuffer(output, 1001)) {
+ mInvalidState = true;
+ }
+ }
+
+ mReceived++;
+ }
+ });
+
+ /* start pipeline */
+ pipe.start();
+
+ /* push input buffer */
+ for (int i = 0; i < 15; i++) {
+ /* dummy input */
+ pipe.inputData("srcx", TensorsData.allocate(info));
+ Thread.sleep(100);
+ }
+
+ /* sleep 500 to invoke */
+ Thread.sleep(500);
+
+ /* stop pipeline */
+ pipe.stop();
+
+ /* check received data from sink */
+ assertFalse(mInvalidState);
+ assertTrue(mReceived > 0);
+ } catch (Exception e) {
+ fail();
+ }
+ }
+
+ @Test
+ public void testClassificationResult() {
+ if (!NNStreamer.isAvailable(NNStreamer.NNFWType.TENSORFLOW_LITE)) {
+ /* cannot run the test */
+ return;
+ }
+
+ File model = APITestCommon.getTFLiteImgModel();
+ String desc = "appsrc name=srcx ! " +
+ "other/tensor,dimension=(string)3:224:224:1,type=(string)uint8,framerate=(fraction)0/1 ! " +
+ "tensor_filter framework=tensorflow-lite model=" + model.getAbsolutePath() + " ! " +
+ "tensor_sink name=sinkx";
+
+ try (Pipeline pipe = new Pipeline(desc)) {
+ TensorsInfo info = new TensorsInfo();
+ info.addTensorInfo(NNStreamer.TensorType.UINT8, new int[]{3,224,224,1});
+
+ /* register sink callback */
+ pipe.registerSinkCallback("sinkx", new Pipeline.NewDataCallback() {
+ @Override
+ public void onNewDataReceived(TensorsData data) {
+ if (data == null || data.getTensorsCount() != 1) {
+ mInvalidState = true;
+ return;
+ }
+
+ ByteBuffer buffer = data.getTensorData(0);
+ int labelIndex = APITestCommon.getMaxScore(buffer);
+
+ /* check label index (orange) */
+ if (labelIndex != 951) {
+ mInvalidState = true;
+ }
+
+ mReceived++;
+ }
+ });
+
+ /* start pipeline */
+ pipe.start();
+
+ /* push input buffer */
+ TensorsData in = APITestCommon.readRawImageData();
+ pipe.inputData("srcx", in);
+
+ /* sleep 1000 to invoke */
+ Thread.sleep(1000);
+
+ /* stop pipeline */
+ pipe.stop();
+
+ /* check received data from sink */
+ assertFalse(mInvalidState);
+ assertTrue(mReceived > 0);
+ } catch (Exception e) {
+ fail();
+ }
+ }
+
+ @Test
+ public void testInputBuffer() {
+ String desc = "appsrc name=srcx ! " +
+ "other/tensor,dimension=(string)2:10:10:1,type=(string)uint8,framerate=(fraction)0/1 ! " +
+ "tensor_sink name=sinkx";
+
+ try (Pipeline pipe = new Pipeline(desc)) {
+ TensorsInfo info = new TensorsInfo();
+ info.addTensorInfo(NNStreamer.TensorType.UINT8, new int[]{2,10,10,1});
+
+ /* register sink callback */
+ pipe.registerSinkCallback("sinkx", mSinkCb);
+
+ /* start pipeline */
+ pipe.start();
+
+ /* push input buffer repeatedly */
+ for (int i = 0; i < 2048; i++) {
+ /* dummy input */
+ pipe.inputData("srcx", TensorsData.allocate(info));
+ Thread.sleep(20);
+ }
+
+ /* sleep 300 to pass input buffers to sink */
+ Thread.sleep(300);
+
+ /* stop pipeline */
+ pipe.stop();
+
+ /* check received data from sink */
+ assertFalse(mInvalidState);
+ assertTrue(mReceived > 0);
+ } catch (Exception e) {
+ fail();
+ }
+ }
+
+ @Test
+ public void testInputVideo() {
+ String desc = "appsrc name=srcx ! " +
+ "video/x-raw,format=RGB,width=320,height=240,framerate=(fraction)0/1 ! " +
+ "tensor_converter ! tensor_sink name=sinkx";
+
+ /* For media format, set meta with exact buffer size. */
+ TensorsInfo info = new TensorsInfo();
+ /* input data : RGB 320x240 */
+ info.addTensorInfo(NNStreamer.TensorType.UINT8, new int[]{3 * 320 * 240});
+
+ try (Pipeline pipe = new Pipeline(desc)) {
+ /* register sink callback */
+ pipe.registerSinkCallback("sinkx", new Pipeline.NewDataCallback() {
+ @Override
+ public void onNewDataReceived(TensorsData data) {
+ if (data == null || data.getTensorsCount() != 1) {
+ mInvalidState = true;
+ return;
+ }
+
+ /* check received data */
+ TensorsInfo info = data.getTensorsInfo();
+ NNStreamer.TensorType type = info.getTensorType(0);
+ int[] dimension = info.getTensorDimension(0);
+
+ if (type != NNStreamer.TensorType.UINT8) {
+ mInvalidState = true;
+ }
+
+ if (dimension[0] != 3 || dimension[1] != 320 ||
+ dimension[2] != 240 || dimension[3] != 1) {
+ mInvalidState = true;
+ }
+
+ mReceived++;
+ }
+ });
+
+ /* start pipeline */
+ pipe.start();
+
+ /* push input buffer */
+ for (int i = 0; i < 10; i++) {
+ /* dummy input */
+ pipe.inputData("srcx", TensorsData.allocate(info));
+ Thread.sleep(30);
+ }
+
+ /* sleep 200 to invoke */
+ Thread.sleep(200);
+
+ /* stop pipeline */
+ pipe.stop();
+
+ /* check received data from sink */
+ assertFalse(mInvalidState);
+ assertTrue(mReceived > 0);
+ } catch (Exception e) {
+ fail();
+ }
+ }
+
+ @Test
+ public void testInputAudio() {
+ String desc = "appsrc name=srcx ! " +
+ "audio/x-raw,format=S16LE,rate=16000,channels=1 ! " +
+ "tensor_converter frames-per-tensor=500 ! tensor_sink name=sinkx";
+
+ /* For media format, set meta with exact buffer size. */
+ TensorsInfo info = new TensorsInfo();
+ /* input data : 16k sample rate, mono, signed 16bit little-endian, 500 samples */
+ info.addTensorInfo(NNStreamer.TensorType.INT16, new int[]{500});
+
+ try (Pipeline pipe = new Pipeline(desc)) {
+ /* register sink callback */
+ pipe.registerSinkCallback("sinkx", new Pipeline.NewDataCallback() {
+ @Override
+ public void onNewDataReceived(TensorsData data) {
+ if (data == null || data.getTensorsCount() != 1) {
+ mInvalidState = true;
+ return;
+ }
+
+ /* check received data */
+ TensorsInfo info = data.getTensorsInfo();
+ NNStreamer.TensorType type = info.getTensorType(0);
+ int[] dimension = info.getTensorDimension(0);
+
+ if (type != NNStreamer.TensorType.INT16) {
+ mInvalidState = true;
+ }
+
+ if (dimension[0] != 1 || dimension[1] != 500 ||
+ dimension[2] != 1 || dimension[3] != 1) {
+ mInvalidState = true;
+ }
+
+ mReceived++;
+ }
+ });
+
+ /* start pipeline */
+ pipe.start();
+
+ /* push input buffer */
+ for (int i = 0; i < 10; i++) {
+ /* dummy input */
+ pipe.inputData("srcx", TensorsData.allocate(info));
+ Thread.sleep(30);
+ }
+
+ /* sleep 200 to invoke */
+ Thread.sleep(200);
+
+ /* stop pipeline */
+ pipe.stop();
+
+ /* check received data from sink */
+ assertFalse(mInvalidState);
+ assertTrue(mReceived > 0);
+ } catch (Exception e) {
+ fail();
+ }
+ }
+
+ @Test
+ public void testInputInvalidName_n() {
+ String desc = "appsrc name=srcx ! " +
+ "other/tensor,dimension=(string)2:10:10:1,type=(string)uint8,framerate=(fraction)0/1 ! " +
+ "tensor_sink name=sinkx";
+
+ try (Pipeline pipe = new Pipeline(desc)) {
+ TensorsInfo info = new TensorsInfo();
+ info.addTensorInfo(NNStreamer.TensorType.UINT8, new int[]{2,10,10,1});
+
+ /* start pipeline */
+ pipe.start();
+
+ pipe.inputData("invalid_src", TensorsData.allocate(info));
+ fail();
+ } catch (Exception e) {
+ /* expected */
+ }
+ }
+
+ @Test
+ public void testInputNullName_n() {
+ String desc = "appsrc name=srcx ! " +
+ "other/tensor,dimension=(string)2:10:10:1,type=(string)uint8,framerate=(fraction)0/1 ! " +
+ "tensor_sink name=sinkx";
+
+ try (Pipeline pipe = new Pipeline(desc)) {
+ TensorsInfo info = new TensorsInfo();
+ info.addTensorInfo(NNStreamer.TensorType.UINT8, new int[]{2,10,10,1});
+
+ /* start pipeline */
+ pipe.start();
+
+ pipe.inputData(null, TensorsData.allocate(info));
+ fail();
+ } catch (Exception e) {
+ /* expected */
+ }
+ }
+
+ @Test
+ public void testInputEmptyName_n() {
+ String desc = "appsrc name=srcx ! " +
+ "other/tensor,dimension=(string)2:10:10:1,type=(string)uint8,framerate=(fraction)0/1 ! " +
+ "tensor_sink name=sinkx";
+
+ try (Pipeline pipe = new Pipeline(desc)) {
+ TensorsInfo info = new TensorsInfo();
+ info.addTensorInfo(NNStreamer.TensorType.UINT8, new int[]{2,10,10,1});
+
+ /* start pipeline */
+ pipe.start();
+
+ pipe.inputData("", TensorsData.allocate(info));
+ fail();
+ } catch (Exception e) {
+ /* expected */
+ }
+ }
+
+ @Test
+ public void testInputNullData_n() {
+ String desc = "appsrc name=srcx ! " +
+ "other/tensor,dimension=(string)2:10:10:1,type=(string)uint8,framerate=(fraction)0/1 ! " +
+ "tensor_sink name=sinkx";
+
+ try (Pipeline pipe = new Pipeline(desc)) {
+ /* start pipeline */
+ pipe.start();
+
+ pipe.inputData("srcx", null);
+ fail();
+ } catch (Exception e) {
+ /* expected */
+ }
+ }
+
+ @Test
+ public void testInputInvalidData_n() {
+ String desc = "appsrc name=srcx ! " +
+ "other/tensor,dimension=(string)2:10:10:1,type=(string)uint8,framerate=(fraction)0/1 ! " +
+ "tensor_sink name=sinkx";
+
+ try (Pipeline pipe = new Pipeline(desc)) {
+ /* start pipeline */
+ pipe.start();
+
+ TensorsInfo info = new TensorsInfo();
+ info.addTensorInfo(NNStreamer.TensorType.UINT8, new int[]{4,10,10,2});
+
+ TensorsData in = TensorsData.allocate(info);
+
+ /* push data with invalid size */
+ pipe.inputData("srcx", in);
+ fail();
+ } catch (Exception e) {
+ /* expected */
+ }
+ }
+
+ @Test
+ public void testSelectSwitch() {
+ String desc = "appsrc name=srcx ! " +
+ "other/tensor,dimension=(string)2:10:10:1,type=(string)uint8,framerate=(fraction)0/1 ! " +
+ "output-selector name=outs " +
+ "outs.src_0 ! tensor_sink name=sinkx async=false " +
+ "outs.src_1 ! tensor_sink async=false";
+
+ try (Pipeline pipe = new Pipeline(desc)) {
+ TensorsInfo info = new TensorsInfo();
+ info.addTensorInfo(NNStreamer.TensorType.UINT8, new int[]{2,10,10,1});
+
+ /* register sink callback */
+ pipe.registerSinkCallback("sinkx", mSinkCb);
+
+ /* start pipeline */
+ pipe.start();
+
+ /* push input buffer */
+ for (int i = 0; i < 15; i++) {
+ /* dummy input */
+ pipe.inputData("srcx", TensorsData.allocate(info));
+ Thread.sleep(50);
+
+ if (i == 9) {
+ /* select pad */
+ pipe.selectSwitchPad("outs", "src_1");
+ }
+ }
+
+ /* sleep 300 to pass all input buffers to sink */
+ Thread.sleep(300);
+
+ /* stop pipeline */
+ pipe.stop();
+
+ /* check received data from sink */
+ assertFalse(mInvalidState);
+ assertEquals(10, mReceived);
+ } catch (Exception e) {
+ fail();
+ }
+ }
+
+ @Test
+ public void testGetSwitchPad() {
+ String desc = "appsrc name=srcx ! " +
+ "other/tensor,dimension=(string)2:10:10:1,type=(string)uint8,framerate=(fraction)0/1 ! " +
+ "output-selector name=outs " +
+ "outs.src_0 ! tensor_sink name=sinkx async=false " +
+ "outs.src_1 ! tensor_sink async=false";
+
+ try (Pipeline pipe = new Pipeline(desc)) {
+ /* start pipeline */
+ pipe.start();
+
+ /* get pad list of output-selector */
+ String[] pads = pipe.getSwitchPads("outs");
+
+ assertEquals(2, pads.length);
+ assertEquals("src_0", pads[0]);
+ assertEquals("src_1", pads[1]);
+ } catch (Exception e) {
+ fail();
+ }
+ }
+
+ @Test
+ public void testGetSwitchInvalidName_n() {
+ String desc = "appsrc name=srcx ! " +
+ "other/tensor,dimension=(string)2:10:10:1,type=(string)uint8,framerate=(fraction)0/1 ! " +
+ "output-selector name=outs " +
+ "outs.src_0 ! tensor_sink name=sinkx async=false " +
+ "outs.src_1 ! tensor_sink async=false";
+
+ try (Pipeline pipe = new Pipeline(desc)) {
+ /* start pipeline */
+ pipe.start();
+
+ /* get pad list with invalid switch name */
+ pipe.getSwitchPads("invalid_outs");
+ fail();
+ } catch (Exception e) {
+ /* expected */
+ }
+ }
+
+ @Test
+ public void testGetSwitchNullName_n() {
+ String desc = "appsrc name=srcx ! " +
+ "other/tensor,dimension=(string)2:10:10:1,type=(string)uint8,framerate=(fraction)0/1 ! " +
+ "output-selector name=outs " +
+ "outs.src_0 ! tensor_sink name=sinkx async=false " +
+ "outs.src_1 ! tensor_sink async=false";
+
+ try (Pipeline pipe = new Pipeline(desc)) {
+ /* start pipeline */
+ pipe.start();
+
+ /* get pad list with null param */
+ pipe.getSwitchPads(null);
+ fail();
+ } catch (Exception e) {
+ /* expected */
+ }
+ }
+
+ @Test
+ public void testGetSwitchEmptyName_n() {
+ String desc = "appsrc name=srcx ! " +
+ "other/tensor,dimension=(string)2:10:10:1,type=(string)uint8,framerate=(fraction)0/1 ! " +
+ "output-selector name=outs " +
+ "outs.src_0 ! tensor_sink name=sinkx async=false " +
+ "outs.src_1 ! tensor_sink async=false";
+
+ try (Pipeline pipe = new Pipeline(desc)) {
+ /* start pipeline */
+ pipe.start();
+
+ /* get pad list with empty name */
+ pipe.getSwitchPads("");
+ fail();
+ } catch (Exception e) {
+ /* expected */
+ }
+ }
+
+ @Test
+ public void testSelectInvalidPad_n() {
+ String desc = "appsrc name=srcx ! " +
+ "other/tensor,dimension=(string)2:10:10:1,type=(string)uint8,framerate=(fraction)0/1 ! " +
+ "output-selector name=outs " +
+ "outs.src_0 ! tensor_sink name=sinkx async=false " +
+ "outs.src_1 ! tensor_sink async=false";
+
+ try (Pipeline pipe = new Pipeline(desc)) {
+ /* start pipeline */
+ pipe.start();
+
+ /* select invalid pad name */
+ pipe.selectSwitchPad("outs", "invalid_src");
+ fail();
+ } catch (Exception e) {
+ /* expected */
+ }
+ }
+
+ @Test
+ public void testSelectNullPad_n() {
+ String desc = "appsrc name=srcx ! " +
+ "other/tensor,dimension=(string)2:10:10:1,type=(string)uint8,framerate=(fraction)0/1 ! " +
+ "output-selector name=outs " +
+ "outs.src_0 ! tensor_sink name=sinkx async=false " +
+ "outs.src_1 ! tensor_sink async=false";
+
+ try (Pipeline pipe = new Pipeline(desc)) {
+ /* start pipeline */
+ pipe.start();
+
+ /* null pad name */
+ pipe.selectSwitchPad("outs", null);
+ fail();
+ } catch (Exception e) {
+ /* expected */
+ }
+ }
+
+ @Test
+ public void testSelectEmptyPad_n() {
+ String desc = "appsrc name=srcx ! " +
+ "other/tensor,dimension=(string)2:10:10:1,type=(string)uint8,framerate=(fraction)0/1 ! " +
+ "output-selector name=outs " +
+ "outs.src_0 ! tensor_sink name=sinkx async=false " +
+ "outs.src_1 ! tensor_sink async=false";
+
+ try (Pipeline pipe = new Pipeline(desc)) {
+ /* start pipeline */
+ pipe.start();
+
+ /* empty pad name */
+ pipe.selectSwitchPad("outs", "");
+ fail();
+ } catch (Exception e) {
+ /* expected */
+ }
+ }
+
+ @Test
+ public void testSelectNullSwitchName_n() {
+ String desc = "appsrc name=srcx ! " +
+ "other/tensor,dimension=(string)2:10:10:1,type=(string)uint8,framerate=(fraction)0/1 ! " +
+ "output-selector name=outs " +
+ "outs.src_0 ! tensor_sink name=sinkx async=false " +
+ "outs.src_1 ! tensor_sink async=false";
+
+ try (Pipeline pipe = new Pipeline(desc)) {
+ /* start pipeline */
+ pipe.start();
+
+ /* null switch name */
+ pipe.selectSwitchPad(null, "src_1");
+ fail();
+ } catch (Exception e) {
+ /* expected */
+ }
+ }
+
+ @Test
+ public void testSelectEmptySwitchName_n() {
+ String desc = "appsrc name=srcx ! " +
+ "other/tensor,dimension=(string)2:10:10:1,type=(string)uint8,framerate=(fraction)0/1 ! " +
+ "output-selector name=outs " +
+ "outs.src_0 ! tensor_sink name=sinkx async=false " +
+ "outs.src_1 ! tensor_sink async=false";
+
+ try (Pipeline pipe = new Pipeline(desc)) {
+ /* start pipeline */
+ pipe.start();
+
+ /* empty switch name */
+ pipe.selectSwitchPad("", "src_1");
+ fail();
+ } catch (Exception e) {
+ /* expected */
+ }
+ }
+
+ @Test
+ public void testControlValve() {
+ String desc = "appsrc name=srcx ! " +
+ "other/tensor,dimension=(string)2:10:10:1,type=(string)uint8,framerate=(fraction)0/1 ! " +
+ "tee name=t " +
+ "t. ! queue ! tensor_sink " +
+ "t. ! queue ! valve name=valvex ! tensor_sink name=sinkx";
+
+ try (Pipeline pipe = new Pipeline(desc)) {
+ TensorsInfo info = new TensorsInfo();
+ info.addTensorInfo(NNStreamer.TensorType.UINT8, new int[]{2,10,10,1});
+
+ /* register sink callback */
+ pipe.registerSinkCallback("sinkx", mSinkCb);
+
+ /* start pipeline */
+ pipe.start();
+
+ /* push input buffer */
+ for (int i = 0; i < 15; i++) {
+ /* dummy input */
+ pipe.inputData("srcx", info.allocate());
+ Thread.sleep(50);
+
+ if (i == 9) {
+ /* close valve */
+ pipe.controlValve("valvex", false);
+ }
+ }
+
+ /* sleep 300 to pass all input buffers to sink */
+ Thread.sleep(300);
+
+ /* stop pipeline */
+ pipe.stop();
+
+ /* check received data from sink */
+ assertFalse(mInvalidState);
+ assertEquals(10, mReceived);
+ } catch (Exception e) {
+ fail();
+ }
+ }
+
+ @Test
+ public void testControlInvalidValve_n() {
+ String desc = "appsrc name=srcx ! " +
+ "other/tensor,dimension=(string)2:10:10:1,type=(string)uint8,framerate=(fraction)0/1 ! " +
+ "tee name=t " +
+ "t. ! queue ! tensor_sink " +
+ "t. ! queue ! valve name=valvex ! tensor_sink name=sinkx";
+
+ try (Pipeline pipe = new Pipeline(desc)) {
+ /* start pipeline */
+ pipe.start();
+
+ /* control valve with invalid name */
+ pipe.controlValve("invalid_valve", false);
+ fail();
+ } catch (Exception e) {
+ /* expected */
+ }
+ }
+
+ @Test
+ public void testControlNullValveName_n() {
+ String desc = "appsrc name=srcx ! " +
+ "other/tensor,dimension=(string)2:10:10:1,type=(string)uint8,framerate=(fraction)0/1 ! " +
+ "tee name=t " +
+ "t. ! queue ! tensor_sink " +
+ "t. ! queue ! valve name=valvex ! tensor_sink name=sinkx";
+
+ try (Pipeline pipe = new Pipeline(desc)) {
+ /* start pipeline */
+ pipe.start();
+
+ /* control valve with null name */
+ pipe.controlValve(null, false);
+ fail();
+ } catch (Exception e) {
+ /* expected */
+ }
+ }
+
+ @Test
+ public void testControlEmptyValveName_n() {
+ String desc = "appsrc name=srcx ! " +
+ "other/tensor,dimension=(string)2:10:10:1,type=(string)uint8,framerate=(fraction)0/1 ! " +
+ "tee name=t " +
+ "t. ! queue ! tensor_sink " +
+ "t. ! queue ! valve name=valvex ! tensor_sink name=sinkx";
+
+ try (Pipeline pipe = new Pipeline(desc)) {
+ /* start pipeline */
+ pipe.start();
+
+ /* control valve with empty name */
+ pipe.controlValve("", false);
+ fail();
+ } catch (Exception e) {
+ /* expected */
+ }
+ }
+
+ @Test
+ public void testSetNullSurface() {
+ if (!Pipeline.isElementAvailable("glimagesink")) {
+ /* cannot run the test */
+ return;
+ }
+
+ String desc = "videotestsrc ! videoconvert ! glimagesink name=vsink";
+
+ try (Pipeline pipe = new Pipeline(desc)) {
+ pipe.start();
+ Thread.sleep(500);
+
+ /* Setting null surface will release old window */
+ pipe.setSurface("vsink", null);
+
+ Thread.sleep(500);
+ pipe.stop();
+ } catch (Exception e) {
+ fail();
+ }
+ }
+
+ @Test
+ public void testSetSurfaceNullName_n() {
+ if (!Pipeline.isElementAvailable("glimagesink")) {
+ /* cannot run the test */
+ return;
+ }
+
+ String desc = "videotestsrc ! videoconvert ! glimagesink name=vsink";
+
+ try (Pipeline pipe = new Pipeline(desc)) {
+ pipe.start();
+ Thread.sleep(500);
+
+ pipe.setSurface(null, null);
+ fail();
+ } catch (Exception e) {
+ /* expected */
+ }
+ }
+
+ @Test
+ public void testSetSurfaceEmptyName_n() {
+ if (!Pipeline.isElementAvailable("glimagesink")) {
+ /* cannot run the test */
+ return;
+ }
+
+ String desc = "videotestsrc ! videoconvert ! glimagesink name=vsink";
+
+ try (Pipeline pipe = new Pipeline(desc)) {
+ pipe.start();
+ Thread.sleep(500);
+
+ pipe.setSurface("", null);
+ fail();
+ } catch (Exception e) {
+ /* expected */
+ }
+ }
+
+ @Test
+ public void testSetInvalidSurface_n() {
+ if (!Pipeline.isElementAvailable("glimagesink")) {
+ /* cannot run the test */
+ return;
+ }
+
+ /* invalid surface */
+ SurfaceView surfaceView = new SurfaceView(APITestCommon.getContext());
+ if (surfaceView.getHolder().getSurface().isValid()) {
+ fail();
+ }
+
+ String desc = "videotestsrc ! videoconvert ! glimagesink name=vsink";
+
+ try (Pipeline pipe = new Pipeline(desc)) {
+ pipe.start();
+ Thread.sleep(500);
+
+ pipe.setSurface("vsink", surfaceView.getHolder());
+ fail();
+ } catch (Exception e) {
+ /* expected */
+ }
+ }
+
+ @Test
+ public void testAMCsrc() {
+ String root = Environment.getExternalStorageDirectory().getAbsolutePath();
+ String media = root + "/nnstreamer/test/test_video.mp4";
+
+ String desc = "amcsrc location=" + media + " ! " +
+ "videoconvert ! videoscale ! video/x-raw,format=RGB,width=320,height=240 ! " +
+ "tensor_converter ! tensor_sink name=sinkx";
+
+ try (Pipeline pipe = new Pipeline(desc)) {
+ /* register sink callback */
+ pipe.registerSinkCallback("sinkx", new Pipeline.NewDataCallback() {
+ @Override
+ public void onNewDataReceived(TensorsData data) {
+ if (data == null || data.getTensorsCount() != 1) {
+ mInvalidState = true;
+ return;
+ }
+
+ TensorsInfo info = data.getTensorsInfo();
+
+ if (info == null || info.getTensorsCount() != 1) {
+ mInvalidState = true;
+ } else {
+ ByteBuffer output = data.getTensorData(0);
+
+ if (!APITestCommon.isValidBuffer(output, 230400)) {
+ mInvalidState = true;
+ }
+ }
+
+ mReceived++;
+ }
+ });
+
+ /* start pipeline */
+ pipe.start();
+
+ /* sleep 2 seconds to invoke */
+ Thread.sleep(2000);
+
+ /* stop pipeline */
+ pipe.stop();
+
+ /* check received data from sink */
+ assertFalse(mInvalidState);
+ assertTrue(mReceived > 0);
+
+ /* sleep 1 second and restart */
+ Thread.sleep(1000);
+ mReceived = 0;
+
+ pipe.start();
+
+ /* sleep 2 seconds to invoke */
+ Thread.sleep(2000);
+
+ /* stop pipeline */
+ pipe.stop();
+
+ /* check received data from sink */
+ assertFalse(mInvalidState);
+ assertTrue(mReceived > 0);
+ } catch (Exception e) {
+ fail();
+ }
+ }
+
+ /**
+ * Run SNAP with Caffe model.
+ */
+ private void runSNAPCaffe(APITestCommon.SNAPComputingUnit computingUnit) {
+ File[] models = APITestCommon.getSNAPCaffeModel();
+ String option = APITestCommon.getSNAPCaffeOption(computingUnit);
+
+ String desc = "appsrc name=srcx ! " +
+ "other/tensor,dimension=(string)3:224:224:1,type=(string)float32,framerate=(fraction)0/1 ! " +
+ "tensor_filter framework=snap " +
+ "model=" + models[0].getAbsolutePath() + "," + models[1].getAbsolutePath() + " " +
+ "input=3:224:224:1 inputtype=float32 inputlayout=NHWC inputname=data " +
+ "output=1:1:1000:1 outputtype=float32 outputlayout=NCHW outputname=prob " +
+ "custom=" + option + " ! " +
+ "tensor_sink name=sinkx";
+
+ try (
+ Pipeline pipe = new Pipeline(desc);
+ TensorsInfo info = new TensorsInfo()
+ ) {
+ info.addTensorInfo(NNStreamer.TensorType.FLOAT32, new int[]{3,224,224,1});
+
+ /* register sink callback */
+ pipe.registerSinkCallback("sinkx", new Pipeline.NewDataCallback() {
+ @Override
+ public void onNewDataReceived(TensorsData data) {
+ if (data == null || data.getTensorsCount() != 1) {
+ mInvalidState = true;
+ return;
+ }
+
+ TensorsInfo info = data.getTensorsInfo();
+
+ if (info == null || info.getTensorsCount() != 1) {
+ mInvalidState = true;
+ } else {
+ ByteBuffer output = data.getTensorData(0);
+
+ if (!APITestCommon.isValidBuffer(output, 4000)) {
+ mInvalidState = true;
+ }
+ }
+
+ mReceived++;
+ }
+ });
+
+ /* start pipeline */
+ pipe.start();
+
+ /* push input buffer */
+ for (int i = 0; i < 10; i++) {
+ /* dummy input */
+ pipe.inputData("srcx", TensorsData.allocate(info));
+ Thread.sleep(100);
+ }
+
+ /* sleep 500 to invoke */
+ Thread.sleep(500);
+
+ /* stop pipeline */
+ pipe.stop();
+
+ /* check received data from sink */
+ assertFalse(mInvalidState);
+ assertTrue(mReceived > 0);
+ } catch (Exception e) {
+ fail();
+ }
+ }
+
+ @Test
+ public void testSNAPCaffeCPU() {
+ if (!NNStreamer.isAvailable(NNStreamer.NNFWType.SNAP)) {
+ /* cannot run the test */
+ return;
+ }
+
+ runSNAPCaffe(APITestCommon.SNAPComputingUnit.CPU);
+ }
+
+ @Test
+ public void testSNAPCaffeGPU() {
+ if (!NNStreamer.isAvailable(NNStreamer.NNFWType.SNAP)) {
+ /* cannot run the test */
+ return;
+ }
+
+ runSNAPCaffe(APITestCommon.SNAPComputingUnit.GPU);
+ }
+
+ /**
+ * Run SNAP with Tensorflow model.
+ */
+ private void runSNAPTensorflow(APITestCommon.SNAPComputingUnit computingUnit) {
+ File[] model = APITestCommon.getSNAPTensorflowModel(computingUnit);
+ String option = APITestCommon.getSNAPTensorflowOption(computingUnit);
+ String desc = "appsrc name=srcx ! " +
+ "other/tensor,dimension=(string)3:224:224:1,type=(string)float32,framerate=(fraction)0/1 ! " +
+ "tensor_filter framework=snap " +
+ "model=" + model[0].getAbsolutePath() + " " +
+ "input=3:224:224:1 inputtype=float32 inputlayout=NHWC inputname=input " +
+ "output=1001:1 outputtype=float32 outputlayout=NHWC outputname=MobilenetV1/Predictions/Reshape_1:0 " +
+ "custom=" + option + " ! " +
+ "tensor_sink name=sinkx";
+
+ try (
+ Pipeline pipe = new Pipeline(desc);
+ TensorsInfo info = new TensorsInfo()
+ ) {
+ info.addTensorInfo(NNStreamer.TensorType.FLOAT32, new int[]{3,224,224,1});
+
+ /* register sink callback */
+ pipe.registerSinkCallback("sinkx", new Pipeline.NewDataCallback() {
+ @Override
+ public void onNewDataReceived(TensorsData data) {
+ if (data == null || data.getTensorsCount() != 1) {
+ mInvalidState = true;
+ return;
+ }
+
+ TensorsInfo info = data.getTensorsInfo();
+
+ if (info == null || info.getTensorsCount() != 1) {
+ mInvalidState = true;
+ } else {
+ ByteBuffer output = data.getTensorData(0);
+
+ if (!APITestCommon.isValidBuffer(output, 4004)) {
+ mInvalidState = true;
+ }
+ }
+
+ mReceived++;
+ }
+ });
+
+ /* start pipeline */
+ pipe.start();
+
+ /* push input buffer */
+ for (int i = 0; i < 10; i++) {
+ /* dummy input */
+ pipe.inputData("srcx", TensorsData.allocate(info));
+ Thread.sleep(100);
+ }
+
+ /* sleep 500 to invoke */
+ Thread.sleep(500);
+
+ /* stop pipeline */
+ pipe.stop();
+
+ /* check received data from sink */
+ assertFalse(mInvalidState);
+ assertTrue(mReceived > 0);
+ } catch (Exception e) {
+ fail();
+ }
+ }
+
+ @Test
+ public void testSNAPTensorflowCPU() {
+ if (!NNStreamer.isAvailable(NNStreamer.NNFWType.SNAP)) {
+ /* cannot run the test */
+ return;
+ }
+
+ runSNAPTensorflow(APITestCommon.SNAPComputingUnit.CPU);
+ }
+
+ @Test
+ public void testSNAPTensorflowDSP() {
+ if (!NNStreamer.isAvailable(NNStreamer.NNFWType.SNAP)) {
+ /* cannot run the test */
+ return;
+ }
+
+ if (!android.os.Build.HARDWARE.equals("qcom")) {
+ /*
+ * Tensorflow model using DSP runtime can only be executed on
+ * Snapdragon SoC. Cannot run this test on exynos.
+ */
+ return;
+ }
+
+ runSNAPTensorflow(APITestCommon.SNAPComputingUnit.DSP);
+ }
+
+ @Test
+ public void testSNAPTensorflowNPU() {
+ if (!NNStreamer.isAvailable(NNStreamer.NNFWType.SNAP)) {
+ /* cannot run the test */
+ return;
+ }
+
+ if (!android.os.Build.HARDWARE.equals("qcom")) {
+ /*
+ * Tensorflow model using NPU runtime can only be executed on
+ * Snapdragon. Cannot run this test on exynos.
+ */
+ return;
+ }
+
+ runSNAPTensorflow(APITestCommon.SNAPComputingUnit.NPU);
+ }
+
+ @Test
+ public void testNNFWTFLite() {
+ if (!NNStreamer.isAvailable(NNStreamer.NNFWType.NNFW)) {
+ /* cannot run the test */
+ return;
+ }
+
+ File model = APITestCommon.getTFLiteAddModel();
+ String desc = "appsrc name=srcx ! " +
+ "other/tensor,dimension=(string)1:1:1:1,type=(string)float32,framerate=(fraction)0/1 ! " +
+ "tensor_filter framework=nnfw model=" + model.getAbsolutePath() + " ! " +
+ "tensor_sink name=sinkx";
+
+ try (Pipeline pipe = new Pipeline(desc)) {
+ TensorsInfo info = new TensorsInfo();
+ info.addTensorInfo(NNStreamer.TensorType.FLOAT32, new int[]{1,1,1,1});
+
+ /* register sink callback */
+ pipe.registerSinkCallback("sinkx", new Pipeline.NewDataCallback() {
+ @Override
+ public void onNewDataReceived(TensorsData data) {
+ if (data == null || data.getTensorsCount() != 1) {
+ mInvalidState = true;
+ return;
+ }
+
+ ByteBuffer buffer = data.getTensorData(0);
+ float expected = buffer.getFloat(0);
+
+ /* check received data */
+ if (expected != 3.5f) {
+ mInvalidState = true;
+ }
+
+ mReceived++;
+ }
+ });
+
+ /* start pipeline */
+ pipe.start();
+
+ /* push input buffer */
+ TensorsData input = info.allocate();
+
+ ByteBuffer buffer = input.getTensorData(0);
+ buffer.putFloat(0, 1.5f);
+
+ input.setTensorData(0, buffer);
+
+ pipe.inputData("srcx", input);
+
+ /* sleep 1000 to invoke */
+ Thread.sleep(1000);
+
+ /* stop pipeline */
+ pipe.stop();
+
+ /* check received data from sink */
+ assertFalse(mInvalidState);
+ assertTrue(mReceived > 0);
+ } catch (Exception e) {
+ fail();
+ }
+ }
+
+ @Test
+ public void testSNPE() {
+ if (!NNStreamer.isAvailable(NNStreamer.NNFWType.SNPE)) {
+ /* cannot run the test */
+ return;
+ }
+
+ File model = APITestCommon.getSNPEModel();
+ String desc = "appsrc name=srcx ! " +
+ "other/tensor,dimension=(string)3:299:299:1,type=(string)float32,framerate=(fraction)0/1 ! " +
+ "tensor_filter framework=snpe " + "model=" + model.getAbsolutePath() + " ! " +
+ "tensor_sink name=sinkx";
+
+ try (
+ Pipeline pipe = new Pipeline(desc);
+ TensorsInfo info = new TensorsInfo()
+ ) {
+ info.addTensorInfo(NNStreamer.TensorType.FLOAT32, new int[]{3,299,299,1});
+
+ /* register sink callback */
+ pipe.registerSinkCallback("sinkx", new Pipeline.NewDataCallback() {
+ @Override
+ public void onNewDataReceived(TensorsData data) {
+ if (data == null || data.getTensorsCount() != 1) {
+ mInvalidState = true;
+ return;
+ }
+
+ TensorsInfo info = data.getTensorsInfo();
+
+ if (info == null || info.getTensorsCount() != 1) {
+ mInvalidState = true;
+ } else {
+ ByteBuffer output = data.getTensorData(0);
+
+ if (!APITestCommon.isValidBuffer(output, 4004)) {
+ mInvalidState = true;
+ }
+ }
+
+ mReceived++;
+ }
+ });
+
+ /* start pipeline */
+ pipe.start();
+
+ /* push input buffer */
+ for (int i = 0; i < 10; i++) {
+ /* dummy input */
+ pipe.inputData("srcx", TensorsData.allocate(info));
+ Thread.sleep(100);
+ }
+
+ /* sleep 500 to invoke */
+ Thread.sleep(500);
+
+ /* stop pipeline */
+ pipe.stop();
+
+ /* check received data from sink */
+ assertFalse(mInvalidState);
+ assertTrue(mReceived > 0);
+ } catch (Exception e) {
+ fail();
+ }
+ }
+
+ private void runSNPEMultipleOutput(String desc) {
+ try (
+ Pipeline pipe = new Pipeline(desc);
+ TensorsInfo info = new TensorsInfo()
+ ) {
+ info.addTensorInfo(NNStreamer.TensorType.FLOAT32, new int[]{3,300,300,1});
+
+ /* register sink callback */
+ pipe.registerSinkCallback("sinkx", new Pipeline.NewDataCallback() {
+ @Override
+ public void onNewDataReceived(TensorsData data) {
+ if (data == null || data.getTensorsCount() != 2) {
+ mInvalidState = true;
+ return;
+ }
+
+ TensorsInfo info = data.getTensorsInfo();
+
+ if (info == null || info.getTensorsCount() != 2) {
+ mInvalidState = true;
+ } else {
+ ByteBuffer output = data.getTensorData(0);
+ if (!APITestCommon.isValidBuffer(output, 1917 * 91 * 4)) {
+ mInvalidState = true;
+ }
+
+ output = data.getTensorData(1);
+ if (!APITestCommon.isValidBuffer(output, 1917 * 4 * 4)) {
+ mInvalidState = true;
+ }
+ }
+
+ mReceived++;
+ }
+ });
+
+ /* start pipeline */
+ pipe.start();
+
+ /* push input buffer */
+ for (int i = 0; i < 10; i++) {
+ /* dummy input */
+ pipe.inputData("srcx", TensorsData.allocate(info));
+ Thread.sleep(100);
+ }
+
+ /* sleep 1000ms to invoke */
+ Thread.sleep(1000);
+
+ /* stop pipeline */
+ pipe.stop();
+
+ /* check received data from sink */
+ assertFalse(mInvalidState);
+ assertTrue(mReceived > 0);
+ } catch (Exception e) {
+ fail();
+ }
+
+ }
+
+ @Test
+ public void testSNPEMultipleOutputWithTensorInfo() {
+ if (!NNStreamer.isAvailable(NNStreamer.NNFWType.SNPE)) {
+ /* cannot run the test */
+ return;
+ }
+
+ File model = APITestCommon.getMultiOutputSNPEModel();
+ String desc = "appsrc name=srcx ! " +
+ "other/tensor,dimension=(string)3:300:300:1,type=(string)float32,framerate=(fraction)0/1 ! " +
+ "tensor_filter framework=snpe " + "model=" + model.getAbsolutePath() +
+ " output=91:1917:1:1,4:1:1917:1 outputtype=float32,float32 outputname=concat,concat_1 ! " +
+ "tensor_sink name=sinkx";
+
+ runSNPEMultipleOutput(desc);
+ }
+
+ @Test
+ public void testSNPEMultipleOutputWithCustomProp() {
+ if (!NNStreamer.isAvailable(NNStreamer.NNFWType.SNPE)) {
+ /* cannot run the test */
+ return;
+ }
+
+ File model = APITestCommon.getMultiOutputSNPEModel();
+ String desc = "appsrc name=srcx ! " +
+ "other/tensor,dimension=(string)3:300:300:1,type=(string)float32,framerate=(fraction)0/1 ! " +
+ "tensor_filter framework=snpe " + "model=" + model.getAbsolutePath() +
+ " custom=OutputLayer:concat;concat_1 ! " +
+ "tensor_sink name=sinkx";
+
+ runSNPEMultipleOutput(desc);
+ }
+
+ /**
+ * Run SNPE with inception model with given runtime.
+ */
+ private void runSNPEInception(String runtime) {
+ File model = APITestCommon.getSNPEModel();
+ String desc = "appsrc name=srcx ! " +
+ "other/tensor,dimension=(string)3:299:299:1,type=(string)float32,framerate=(fraction)0/1 ! " +
+ "tensor_filter framework=snpe model=" + model.getAbsolutePath() +
+ " custom=Runtime:" + runtime + " ! " +
+ "tensor_sink name=sinkx";
+
+ /* expected label is measuring_cup (648) */
+ final int expected_label = 648;
+ try (
+ Pipeline pipe = new Pipeline(desc)
+ ) {
+ /* register sink callback */
+ pipe.registerSinkCallback("sinkx", new Pipeline.NewDataCallback() {
+ @Override
+ public void onNewDataReceived(TensorsData data) {
+ if (data == null || data.getTensorsCount() != 1) {
+ mInvalidState = true;
+ return;
+ }
+
+ ByteBuffer buffer = data.getTensorData(0);
+ int labelIndex = APITestCommon.getMaxScoreFloatBuffer(buffer, 1001);
+
+ /* check label index (measuring cup) */
+ if (labelIndex != expected_label) {
+ mInvalidState = true;
+ }
+
+ mReceived++;
+ }
+ });
+
+ /* start pipeline */
+ pipe.start();
+
+ /* push input buffer */
+ TensorsData in = APITestCommon.readRawImageDataSNPE();
+ pipe.inputData("srcx", in);
+
+ /* sleep 1000 msec to invoke */
+ Thread.sleep(1000);
+
+ /* stop pipeline */
+ pipe.stop();
+
+ /* check received data from sink */
+ assertFalse(mInvalidState);
+ assertTrue(mReceived > 0);
+ } catch (Exception e) {
+ fail();
+ }
+ }
+
+ @Test
+ public void testSNPEClassificationResultCPU() {
+ if (!NNStreamer.isAvailable(NNStreamer.NNFWType.SNPE)) {
+ /* cannot run the test */
+ return;
+ }
+
+ runSNPEInception("CPU");
+ }
+
+ @Test
+ public void testSNPEClassificationResultGPU() {
+ if (!NNStreamer.isAvailable(NNStreamer.NNFWType.SNPE)) {
+ /* cannot run the test */
+ return;
+ }
+
+ runSNPEInception("GPU");
+ }
+
+ @Test
+ public void testSNPEClassificationResultDSP() {
+ if (!NNStreamer.isAvailable(NNStreamer.NNFWType.SNPE)) {
+ /* cannot run the test */
+ return;
+ }
+
+ runSNPEInception("DSP");
+ }
+
+ @Test
+ public void testSNPEClassificationResultNPU() {
+ if (!NNStreamer.isAvailable(NNStreamer.NNFWType.SNPE)) {
+ /* cannot run the test */
+ return;
+ }
+
+ runSNPEInception("NPU");
+ }
+
+ @Test
+ public void testPytorchClassificationResult() {
+ if (!NNStreamer.isAvailable(NNStreamer.NNFWType.PYTORCH)) {
+ /* cannot run the test */
+ return;
+ }
+
+ File model = APITestCommon.getPytorchModel();
+ String desc = "appsrc name=srcx ! " +
+ "other/tensor,dimension=(string)3:224:224:1,type=(string)uint8,framerate=(fraction)0/1 ! " +
+ "tensor_transform mode=dimchg option=0:2 ! " +
+ "tensor_transform mode=arithmetic option=typecast:float32,add:-127.5,div:127.5 ! " +
+ "tensor_filter framework=pytorch model=" + model.getAbsolutePath() + " " +
+ "input=224:224:3:1 inputtype=float32 output=1000:1 outputtype=float32 ! " +
+ "tensor_sink name=sinkx";
+
+ /* expected label is orange (950) */
+ final int expected_label = 950;
+
+ try (Pipeline pipe = new Pipeline(desc)) {
+ /* register sink callback */
+ pipe.registerSinkCallback("sinkx", new Pipeline.NewDataCallback() {
+ @Override
+ public void onNewDataReceived(TensorsData data) {
+ if (data == null || data.getTensorsCount() != 1) {
+ mInvalidState = true;
+ return;
+ }
+
+ ByteBuffer buffer = data.getTensorData(0);
+ int labelIndex = APITestCommon.getMaxScoreFloatBuffer(buffer, 1000);
+
+ /* check label index (orange) */
+ if (labelIndex != expected_label) {
+ mInvalidState = true;
+ }
+ mReceived++;
+ }
+ });
+
+ /* start pipeline */
+ pipe.start();
+
+ /* push input buffer */
+ TensorsData in = APITestCommon.readRawImageData();
+ pipe.inputData("srcx", in);
+
+ /* sleep 1000 to invoke */
+ Thread.sleep(1000);
+
+ /* stop pipeline */
+ pipe.stop();
+
+ /* check received data from sink */
+ assertFalse(mInvalidState);
+ assertTrue(mReceived > 0);
+ } catch (Exception e) {
+ fail();
+ }
+
+ }
+}
--- /dev/null
+package org.nnsuite.nnstreamer;
+
+import android.os.Environment;
+import android.support.test.rule.GrantPermissionRule;
+import android.support.test.runner.AndroidJUnit4;
+
+import org.junit.Before;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+
+import java.io.File;
+import java.nio.ByteBuffer;
+
+import static org.junit.Assert.*;
+
+/**
+ * Testcases for SingleShot.
+ */
+@RunWith(AndroidJUnit4.class)
+public class APITestSingleShot {
+ @Rule
+ public GrantPermissionRule mPermissionRule = APITestCommon.grantPermissions();
+
+ @Before
+ public void setUp() {
+ APITestCommon.initNNStreamer();
+ }
+
+ @Test
+ public void testOptionsInvalidFW_n () {
+ try {
+ new SingleShot.Options(null, APITestCommon.getTFLiteImgModel());
+ fail();
+ } catch (Exception e) {
+ /* expected */
+ }
+ }
+
+ @Test
+ public void testOptionsUnknownFW_n () {
+ try {
+ new SingleShot.Options(NNStreamer.NNFWType.UNKNOWN, APITestCommon.getTFLiteImgModel());
+ fail();
+ } catch (Exception e) {
+ /* expected */
+ }
+ }
+
+ @Test
+ public void testOptionsInvalidModelFile_n () {
+ try {
+ File f = null;
+ new SingleShot.Options(NNStreamer.NNFWType.TENSORFLOW_LITE, f);
+ fail();
+ } catch (Exception e) {
+ /* expected */
+ }
+ }
+
+ @Test
+ public void testGetInputInfo() {
+ if (!NNStreamer.isAvailable(NNStreamer.NNFWType.TENSORFLOW_LITE)) {
+ /* cannot run the test */
+ return;
+ }
+
+ try {
+ SingleShot single = new SingleShot(APITestCommon.getTFLiteImgModel());
+ TensorsInfo info = single.getInputInfo();
+
+ /* input: uint8 3:224:224:1 */
+ assertEquals(1, info.getTensorsCount());
+ assertEquals(NNStreamer.TensorType.UINT8, info.getTensorType(0));
+ assertArrayEquals(new int[]{3,224,224,1}, info.getTensorDimension(0));
+
+ single.close();
+ } catch (Exception e) {
+ fail();
+ }
+ }
+
+ @Test
+ public void testGetOutputInfo() {
+ if (!NNStreamer.isAvailable(NNStreamer.NNFWType.TENSORFLOW_LITE)) {
+ /* cannot run the test */
+ return;
+ }
+
+ try {
+ SingleShot single = new SingleShot(APITestCommon.getTFLiteImgModel());
+ TensorsInfo info = single.getOutputInfo();
+
+ /* output: uint8 1001:1 */
+ assertEquals(1, info.getTensorsCount());
+ assertEquals(NNStreamer.TensorType.UINT8, info.getTensorType(0));
+ assertArrayEquals(new int[]{1001,1,1,1}, info.getTensorDimension(0));
+
+ single.close();
+ } catch (Exception e) {
+ fail();
+ }
+ }
+
+ @Test
+ public void testSetNullInputInfo_n() {
+ if (!NNStreamer.isAvailable(NNStreamer.NNFWType.TENSORFLOW_LITE)) {
+ /* cannot run the test */
+ return;
+ }
+
+ try {
+ SingleShot single = new SingleShot(APITestCommon.getTFLiteImgModel());
+ single.setInputInfo(null);
+ fail();
+ } catch (Exception e) {
+ /* expected */
+ }
+ }
+
+ @Test
+ public void testSetInvalidInputInfo_n() {
+ if (!NNStreamer.isAvailable(NNStreamer.NNFWType.TENSORFLOW_LITE)) {
+ /* cannot run the test */
+ return;
+ }
+
+ try {
+ SingleShot single = new SingleShot(APITestCommon.getTFLiteImgModel());
+
+ TensorsInfo newInfo = new TensorsInfo();
+ newInfo.addTensorInfo(NNStreamer.TensorType.UINT8, new int[]{2,2,2,2});
+
+ single.setInputInfo(newInfo);
+ fail();
+ } catch (Exception e) {
+ /* expected */
+ }
+ }
+
+ @Test
+ public void testSetInputInfo() {
+ if (!NNStreamer.isAvailable(NNStreamer.NNFWType.TENSORFLOW_LITE)) {
+ /* cannot run the test */
+ return;
+ }
+
+ try {
+ SingleShot single = new SingleShot(APITestCommon.getTFLiteAddModel());
+ TensorsInfo info = single.getInputInfo();
+
+ /* input: float32 with dimension 1 */
+ assertEquals(1, info.getTensorsCount());
+ assertEquals(NNStreamer.TensorType.FLOAT32, info.getTensorType(0));
+ assertArrayEquals(new int[]{1,1,1,1}, info.getTensorDimension(0));
+
+ TensorsInfo newInfo = new TensorsInfo();
+ newInfo.addTensorInfo(NNStreamer.TensorType.FLOAT32, new int[]{10});
+
+ single.setInputInfo(newInfo);
+
+ info = single.getInputInfo();
+ /* input: float32 with dimension 10 */
+ assertEquals(1, info.getTensorsCount());
+ assertEquals(NNStreamer.TensorType.FLOAT32, info.getTensorType(0));
+ assertArrayEquals(new int[]{10,1,1,1}, info.getTensorDimension(0));
+
+ info = single.getOutputInfo();
+ /* output: float32 with dimension 10 */
+ assertEquals(1, info.getTensorsCount());
+ assertEquals(NNStreamer.TensorType.FLOAT32, info.getTensorType(0));
+ assertArrayEquals(new int[]{10,1,1,1}, info.getTensorDimension(0));
+
+ single.close();
+ } catch (Exception e) {
+ fail();
+ }
+ }
+
+ @Test
+ public void testInvoke() {
+ if (!NNStreamer.isAvailable(NNStreamer.NNFWType.TENSORFLOW_LITE)) {
+ /* cannot run the test */
+ return;
+ }
+
+ try {
+ SingleShot single = new SingleShot(APITestCommon.getTFLiteImgModel());
+ TensorsInfo info = single.getInputInfo();
+
+ /* let's ignore timeout (set 10 sec) */
+ single.setTimeout(10000);
+
+ /* single-shot invoke */
+ for (int i = 0; i < 600; i++) {
+ /* dummy input */
+ TensorsData out = single.invoke(info.allocate());
+
+ /* output: uint8 1001:1 */
+ assertEquals(1, out.getTensorsCount());
+ assertEquals(1001, out.getTensorData(0).capacity());
+
+ Thread.sleep(30);
+ }
+
+ single.close();
+ } catch (Exception e) {
+ fail();
+ }
+ }
+
+ /**
+ * Run image classification and validate result.
+ */
+ private void runImageClassification(NNStreamer.NNFWType fw, String custom) {
+ try {
+ SingleShot single = new SingleShot(APITestCommon.getTFLiteImgModel(), fw, custom);
+
+ /* single-shot invoke */
+ TensorsData in = APITestCommon.readRawImageData();
+ TensorsData out = single.invoke(in);
+ int labelIndex = APITestCommon.getMaxScore(out.getTensorData(0));
+
+ /* check label index (orange) */
+ if (labelIndex != 951) {
+ fail();
+ }
+
+ single.close();
+ } catch (Exception e) {
+ fail();
+ }
+ }
+
+ @Test
+ public void testClassificationResultTFLite() {
+ if (!NNStreamer.isAvailable(NNStreamer.NNFWType.TENSORFLOW_LITE)) {
+ /* cannot run the test */
+ return;
+ }
+
+ runImageClassification(NNStreamer.NNFWType.TENSORFLOW_LITE, null);
+ runImageClassification(NNStreamer.NNFWType.TENSORFLOW_LITE, "Delegate:NNAPI");
+ runImageClassification(NNStreamer.NNFWType.TENSORFLOW_LITE, "Delegate:GPU");
+ }
+
+ @Test
+ public void testClassificationResultNNFW() {
+ if (!NNStreamer.isAvailable(NNStreamer.NNFWType.NNFW)) {
+ /* cannot run the test */
+ return;
+ }
+
+ runImageClassification(NNStreamer.NNFWType.NNFW, null);
+ }
+
+ /**
+ * Run dynamic invoke with add.tflite model.
+ */
+ private void runInvokeDynamic(NNStreamer.NNFWType fw) {
+ try {
+ File model = APITestCommon.getTFLiteAddModel();
+ SingleShot single = new SingleShot(model, fw);
+ TensorsInfo info = single.getInputInfo();
+
+ /* single-shot invoke */
+ for (int i = 1; i < 5; i++) {
+ /* change input information */
+ info.setTensorDimension(0, new int[]{i,1,1,1});
+ single.setInputInfo(info);
+
+ TensorsData input = TensorsData.allocate(info);
+ ByteBuffer inBuffer = input.getTensorData(0);
+
+ for (int j = 0; j < i; j++) {
+ inBuffer.putFloat(j * 4, j + 1.5f);
+ }
+
+ input.setTensorData(0, inBuffer);
+
+ /* invoke */
+ TensorsData output = single.invoke(input);
+
+ /* output: float32 i:1:1:1 */
+ assertEquals(1, output.getTensorsCount());
+
+ ByteBuffer outBuffer = output.getTensorData(0);
+ assertEquals(i * Float.BYTES, outBuffer.capacity());
+
+ for (int j = 0; j < i; j++) {
+ float expected = j + 3.5f;
+ assertEquals(expected, outBuffer.getFloat(j * 4), 0.0f);
+ }
+
+ Thread.sleep(30);
+ }
+
+ single.close();
+ } catch (Exception e) {
+ fail();
+ }
+ }
+
+ @Test
+ public void testInvokeDynamicTFLite() {
+ if (!NNStreamer.isAvailable(NNStreamer.NNFWType.TENSORFLOW_LITE)) {
+ /* cannot run the test */
+ return;
+ }
+
+ runInvokeDynamic(NNStreamer.NNFWType.TENSORFLOW_LITE);
+ }
+
+ @Test
+ public void testInvokeDynamicNNFW() {
+ if (!NNStreamer.isAvailable(NNStreamer.NNFWType.NNFW)) {
+ /* cannot run the test */
+ return;
+ }
+
+ runInvokeDynamic(NNStreamer.NNFWType.NNFW);
+ }
+
+ @Test
+ public void testInvokeTimeout_n() {
+ if (!NNStreamer.isAvailable(NNStreamer.NNFWType.TENSORFLOW_LITE)) {
+ /* cannot run the test */
+ return;
+ }
+
+ try {
+ SingleShot single = new SingleShot(APITestCommon.getTFLiteImgModel());
+ TensorsInfo info = single.getInputInfo();
+
+ /* timeout 1ms (not enough time to invoke the model) */
+ single.setTimeout(1);
+
+ /* dummy input */
+ single.invoke(TensorsData.allocate(info));
+ fail();
+ } catch (Exception e) {
+ /* expected */
+ }
+ }
+
+ @Test
+ public void testNullFile_n() {
+ try {
+ File f = null;
+ new SingleShot(f);
+ fail();
+ } catch (Exception e) {
+ /* expected */
+ }
+ }
+
+ @Test
+ public void testNullFiles_n() {
+ try {
+ new SingleShot(null, null, null, NNStreamer.NNFWType.TENSORFLOW_LITE, null);
+ fail();
+ } catch (Exception e) {
+ /* expected */
+ }
+ }
+
+ @Test
+ public void testInvalidFile_n() {
+ String root = Environment.getExternalStorageDirectory().getAbsolutePath();
+ File model = new File(root + "/invalid_path/invalid.tflite");
+
+ try {
+ new SingleShot(model);
+ fail();
+ } catch (Exception e) {
+ /* expected */
+ }
+ }
+
+ @Test
+ public void testInvalidInputType_n() {
+ if (!NNStreamer.isAvailable(NNStreamer.NNFWType.TENSORFLOW_LITE)) {
+ /* cannot run the test */
+ return;
+ }
+
+ /* input: uint8 3:224:224:1 */
+ TensorsInfo info = new TensorsInfo();
+ info.addTensorInfo(NNStreamer.TensorType.UINT16, new int[]{3,224,224,1});
+
+ try {
+ new SingleShot(APITestCommon.getTFLiteImgModel(), info, null);
+ fail();
+ } catch (Exception e) {
+ /* expected */
+ }
+ }
+
+ @Test
+ public void testInvalidInputDimension_n() {
+ if (!NNStreamer.isAvailable(NNStreamer.NNFWType.TENSORFLOW_LITE)) {
+ /* cannot run the test */
+ return;
+ }
+
+ /* input: uint8 3:224:224:1 */
+ TensorsInfo info = new TensorsInfo();
+ info.addTensorInfo(NNStreamer.TensorType.UINT8, new int[]{2,224,224});
+
+ try {
+ new SingleShot(APITestCommon.getTFLiteImgModel(), info, null);
+ fail();
+ } catch (Exception e) {
+ /* expected */
+ }
+ }
+
+ @Test
+ public void testInvalidOutputType_n() {
+ if (!NNStreamer.isAvailable(NNStreamer.NNFWType.TENSORFLOW_LITE)) {
+ /* cannot run the test */
+ return;
+ }
+
+ /* output: uint8 1001:1 */
+ TensorsInfo info = new TensorsInfo();
+ info.addTensorInfo(NNStreamer.TensorType.INT16, new int[]{1001,1});
+
+ try {
+ new SingleShot(APITestCommon.getTFLiteImgModel(), null, info);
+ fail();
+ } catch (Exception e) {
+ /* expected */
+ }
+ }
+
+ @Test
+ public void testInvalidOutputDimension_n() {
+ if (!NNStreamer.isAvailable(NNStreamer.NNFWType.TENSORFLOW_LITE)) {
+ /* cannot run the test */
+ return;
+ }
+
+ /* output: uint8 1001:1 */
+ TensorsInfo info = new TensorsInfo();
+ info.addTensorInfo(NNStreamer.TensorType.UINT8, new int[]{1001,2,1,1});
+
+ try {
+ new SingleShot(APITestCommon.getTFLiteImgModel(), null, info);
+ fail();
+ } catch (Exception e) {
+ /* expected */
+ }
+ }
+
+ @Test
+ public void testInvokeNullData_n() {
+ if (!NNStreamer.isAvailable(NNStreamer.NNFWType.TENSORFLOW_LITE)) {
+ /* cannot run the test */
+ return;
+ }
+
+ try {
+ SingleShot single = new SingleShot(APITestCommon.getTFLiteImgModel());
+
+ single.invoke(null);
+ fail();
+ } catch (Exception e) {
+ /* expected */
+ }
+ }
+
+ @Test
+ public void testInvokeInvalidData_n() {
+ if (!NNStreamer.isAvailable(NNStreamer.NNFWType.TENSORFLOW_LITE)) {
+ /* cannot run the test */
+ return;
+ }
+
+ /* input data size: 3 * 224 * 224 */
+ TensorsInfo info = new TensorsInfo();
+ info.addTensorInfo(NNStreamer.TensorType.UINT8, new int[]{100});
+
+ try {
+ SingleShot single = new SingleShot(APITestCommon.getTFLiteImgModel());
+
+ single.invoke(TensorsData.allocate(info));
+ fail();
+ } catch (Exception e) {
+ /* expected */
+ }
+ }
+
+ @Test
+ public void testSetInvalidTimeout_n() {
+ if (!NNStreamer.isAvailable(NNStreamer.NNFWType.TENSORFLOW_LITE)) {
+ /* cannot run the test */
+ return;
+ }
+
+ try {
+ SingleShot single = new SingleShot(APITestCommon.getTFLiteImgModel());
+
+ single.setTimeout(-1);
+ fail();
+ } catch (Exception e) {
+ /* expected */
+ }
+ }
+
+ @Test
+ public void testGetInvalidPropertyName_n() {
+ if (!NNStreamer.isAvailable(NNStreamer.NNFWType.TENSORFLOW_LITE)) {
+ /* cannot run the test */
+ return;
+ }
+
+ try {
+ SingleShot single = new SingleShot(APITestCommon.getTFLiteImgModel());
+
+ single.getValue("");
+ fail();
+ } catch (Exception e) {
+ /* expected */
+ }
+ }
+
+ @Test
+ public void testGetUnknownPropertyName_n() {
+ if (!NNStreamer.isAvailable(NNStreamer.NNFWType.TENSORFLOW_LITE)) {
+ /* cannot run the test */
+ return;
+ }
+
+ try {
+ SingleShot single = new SingleShot(APITestCommon.getTFLiteImgModel());
+
+ single.getValue("unknown_prop");
+ fail();
+ } catch (Exception e) {
+ /* expected */
+ }
+ }
+
+ @Test
+ public void testGetNullPropertyName_n() {
+ if (!NNStreamer.isAvailable(NNStreamer.NNFWType.TENSORFLOW_LITE)) {
+ /* cannot run the test */
+ return;
+ }
+
+ try {
+ SingleShot single = new SingleShot(APITestCommon.getTFLiteImgModel());
+
+ single.getValue(null);
+ fail();
+ } catch (Exception e) {
+ /* expected */
+ }
+ }
+
+ @Test
+ public void testSetUnknownPropertyName_n() {
+ if (!NNStreamer.isAvailable(NNStreamer.NNFWType.TENSORFLOW_LITE)) {
+ /* cannot run the test */
+ return;
+ }
+
+ try {
+ SingleShot single = new SingleShot(APITestCommon.getTFLiteImgModel());
+
+ single.setValue("unknown_prop", "unknown");
+ fail();
+ } catch (Exception e) {
+ /* expected */
+ }
+ }
+
+ @Test
+ public void testSetNullPropertyName_n() {
+ if (!NNStreamer.isAvailable(NNStreamer.NNFWType.TENSORFLOW_LITE)) {
+ /* cannot run the test */
+ return;
+ }
+
+ try {
+ SingleShot single = new SingleShot(APITestCommon.getTFLiteImgModel());
+
+ single.setValue(null, "ANY");
+ fail();
+ } catch (Exception e) {
+ /* expected */
+ }
+ }
+
+ @Test
+ public void testSetEmptyPropertyName_n() {
+ if (!NNStreamer.isAvailable(NNStreamer.NNFWType.TENSORFLOW_LITE)) {
+ /* cannot run the test */
+ return;
+ }
+
+ try {
+ SingleShot single = new SingleShot(APITestCommon.getTFLiteImgModel());
+
+ single.setValue("", "ANY");
+ fail();
+ } catch (Exception e) {
+ /* expected */
+ }
+ }
+
+ @Test
+ public void testSetNullPropertyValue_n() {
+ if (!NNStreamer.isAvailable(NNStreamer.NNFWType.TENSORFLOW_LITE)) {
+ /* cannot run the test */
+ return;
+ }
+
+ try {
+ SingleShot single = new SingleShot(APITestCommon.getTFLiteImgModel());
+
+ single.setValue("inputlayout", null);
+ fail();
+ } catch (Exception e) {
+ /* expected */
+ }
+ }
+
+ @Test
+ public void testSetPropertyDimension() {
+ if (!NNStreamer.isAvailable(NNStreamer.NNFWType.TENSORFLOW_LITE)) {
+ /* cannot run the test */
+ return;
+ }
+
+ try {
+ SingleShot single = new SingleShot(APITestCommon.getTFLiteAddModel());
+
+ single.setValue("input", "5:1:1:1");
+ single.close();
+ } catch (Exception e) {
+ fail();
+ }
+ }
+
+ @Test
+ public void testGetPropertyDimension() {
+ if (!NNStreamer.isAvailable(NNStreamer.NNFWType.TENSORFLOW_LITE)) {
+ /* cannot run the test */
+ return;
+ }
+
+ try {
+ SingleShot single = new SingleShot(APITestCommon.getTFLiteImgModel());
+
+ assertEquals("3:224:224:1", single.getValue("input"));
+ assertEquals("1001:1:1:1", single.getValue("output"));
+
+ single.close();
+ } catch (Exception e) {
+ fail();
+ }
+ }
+
+ /**
+ * Run SNAP with Caffe model.
+ */
+ private void runSNAPCaffe(APITestCommon.SNAPComputingUnit CUnit) {
+ File[] models = APITestCommon.getSNAPCaffeModel();
+ String option = APITestCommon.getSNAPCaffeOption(CUnit);
+
+ try {
+ TensorsInfo in = new TensorsInfo();
+ in.addTensorInfo("data", NNStreamer.TensorType.FLOAT32, new int[]{3,224,224,1});
+
+ TensorsInfo out = new TensorsInfo();
+ out.addTensorInfo("prob", NNStreamer.TensorType.FLOAT32, new int[]{1,1,1000,1});
+
+ SingleShot single = new SingleShot(models, in, out, NNStreamer.NNFWType.SNAP, option);
+
+ /* let's ignore timeout (set 60 sec) */
+ single.setTimeout(60000);
+
+ /* single-shot invoke */
+ for (int i = 0; i < 10; i++) {
+ /* dummy input */
+ TensorsData output = single.invoke(in.allocate());
+
+ /* output: float32 1:1:1000:1 (NCHW format) */
+ assertEquals(1, output.getTensorsCount());
+ assertEquals(4000, output.getTensorData(0).capacity());
+
+ Thread.sleep(30);
+ }
+ } catch (Exception e) {
+ fail();
+ }
+ }
+
+ @Test
+ public void testSNAPCaffeCPU() {
+ if (!NNStreamer.isAvailable(NNStreamer.NNFWType.SNAP)) {
+ /* cannot run the test */
+ return;
+ }
+
+ runSNAPCaffe(APITestCommon.SNAPComputingUnit.CPU);
+ }
+
+ @Test
+ public void testSNAPCaffeGPU() {
+ if (!NNStreamer.isAvailable(NNStreamer.NNFWType.SNAP)) {
+ /* cannot run the test */
+ return;
+ }
+
+ runSNAPCaffe(APITestCommon.SNAPComputingUnit.GPU);
+ }
+
+ /**
+ * Run SNAP with Tensorflow model.
+ */
+ private void runSNAPTensorflow(APITestCommon.SNAPComputingUnit CUnit) {
+ File[] model = APITestCommon.getSNAPTensorflowModel(CUnit);
+ String option = APITestCommon.getSNAPTensorflowOption(CUnit);
+
+ try {
+ TensorsInfo in = new TensorsInfo();
+ in.addTensorInfo("input", NNStreamer.TensorType.FLOAT32, new int[]{3,224,224,1});
+
+ TensorsInfo out = new TensorsInfo();
+ out.addTensorInfo("MobilenetV1/Predictions/Reshape_1:0", NNStreamer.TensorType.FLOAT32, new int[]{1001, 1});
+
+ SingleShot single = new SingleShot(model, in, out, NNStreamer.NNFWType.SNAP, option);
+
+ /* let's ignore timeout (set 60 sec) */
+ single.setTimeout(60000);
+
+ /* single-shot invoke */
+ for (int i = 0; i < 10; i++) {
+ /* dummy input */
+ TensorsData output = single.invoke(in.allocate());
+
+ /* output: float32 1:1001 */
+ assertEquals(1, output.getTensorsCount());
+ assertEquals(4004, output.getTensorData(0).capacity());
+
+ Thread.sleep(30);
+ }
+
+ } catch (Exception e) {
+ fail();
+ }
+ }
+
+ @Test
+ public void testSNAPTensorflowCPU() {
+ if (!NNStreamer.isAvailable(NNStreamer.NNFWType.SNAP)) {
+ /* cannot run the test */
+ return;
+ }
+
+ runSNAPTensorflow(APITestCommon.SNAPComputingUnit.CPU);
+ }
+
+ @Test
+ public void testSNAPTensorflowDSP() {
+ if (!NNStreamer.isAvailable(NNStreamer.NNFWType.SNAP)) {
+ /* cannot run the test */
+ return;
+ }
+
+ if (!android.os.Build.HARDWARE.equals("qcom")) {
+ /*
+ * Tensorflow model using DSP runtime can only be executed on
+ * Snapdragon SoC. Cannot run this test on exynos.
+ */
+ return;
+ }
+
+ runSNAPTensorflow(APITestCommon.SNAPComputingUnit.DSP);
+ }
+
+ @Test
+ public void testSNAPTensorflowNPU() {
+ if (!NNStreamer.isAvailable(NNStreamer.NNFWType.SNAP)) {
+ /* cannot run the test */
+ return;
+ }
+
+ if (!android.os.Build.HARDWARE.equals("qcom")) {
+ /*
+ * Tensorflow model using NPU runtime can only be executed on
+ * Snapdragon. Cannot run this test on exynos.
+ */
+ return;
+ }
+
+ runSNAPTensorflow(APITestCommon.SNAPComputingUnit.NPU);
+ }
+
+ @Test
+ public void testNNFWTFLite() {
+ if (!NNStreamer.isAvailable(NNStreamer.NNFWType.NNFW)) {
+ /* cannot run the test */
+ return;
+ }
+
+ try {
+ File model = APITestCommon.getTFLiteAddModel();
+
+ SingleShot single = new SingleShot(model, NNStreamer.NNFWType.NNFW);
+ TensorsInfo in = single.getInputInfo();
+
+ /* let's ignore timeout (set 60 sec) */
+ single.setTimeout(60000);
+
+ /* single-shot invoke */
+ for (int i = 0; i < 5; i++) {
+ /* input data */
+ TensorsData input = in.allocate();
+
+ ByteBuffer buffer = input.getTensorData(0);
+ buffer.putFloat(0, i + 1.5f);
+
+ input.setTensorData(0, buffer);
+
+ /* invoke */
+ TensorsData output = single.invoke(input);
+
+ /* check output */
+ float expected = i + 3.5f;
+ assertEquals(expected, output.getTensorData(0).getFloat(0), 0.0f);
+
+ Thread.sleep(30);
+ }
+
+ single.close();
+ } catch (Exception e) {
+ fail();
+ }
+ }
+
+ @Test
+ public void testNNFWOpenDir() {
+ if (!NNStreamer.isAvailable(NNStreamer.NNFWType.NNFW)) {
+ /* cannot run the test */
+ return;
+ }
+
+ try {
+ File model = new File(APITestCommon.getTFLiteAddModelPath());
+
+ new SingleShot(model, NNStreamer.NNFWType.NNFW);
+ } catch (Exception e) {
+ fail();
+ }
+ }
+
+ @Test
+ public void testNNFWOpenInvalidDir_n() {
+ if (!NNStreamer.isAvailable(NNStreamer.NNFWType.NNFW)) {
+ /* cannot run the test */
+ return;
+ }
+
+ try {
+ File model = new File(APITestCommon.getTFLiteAddModelPath() + "/invaliddir");
+
+ new SingleShot(model, NNStreamer.NNFWType.NNFW);
+ fail();
+ } catch (Exception e) {
+ /* expected */
+ }
+ }
+
+ @Test
+ public void testSNPE() {
+ if (!NNStreamer.isAvailable(NNStreamer.NNFWType.SNPE)) {
+ /* cannot run the test */
+ return;
+ }
+
+ try {
+ File model = APITestCommon.getSNPEModel();
+
+ SingleShot single = new SingleShot(model, NNStreamer.NNFWType.SNPE);
+ TensorsInfo in = single.getInputInfo();
+
+ /* let's ignore timeout (set 60 sec) */
+ single.setTimeout(60000);
+
+ /* single-shot invoke */
+ for (int i = 0; i < 5; i++) {
+ /* input data */
+ TensorsData input = in.allocate();
+
+ /* invoke */
+ TensorsData output = single.invoke(input);
+
+ /* check output: 1 tensor (float32 1:1001) */
+ assertEquals(1, output.getTensorsCount());
+ assertEquals(4004, output.getTensorData(0).capacity());
+
+ Thread.sleep(30);
+ }
+
+ single.close();
+ } catch (Exception e) {
+ fail();
+ }
+ }
+
+ @Test
+ public void testSNPEClassificationResult() {
+ if (!NNStreamer.isAvailable(NNStreamer.NNFWType.SNPE)) {
+ /* cannot run the test */
+ return;
+ }
+
+ /* expected label is measuring_cup (648) */
+ final int expectedLabel = 648;
+
+ try {
+ File model = APITestCommon.getSNPEModel();
+
+ SingleShot single = new SingleShot(model, NNStreamer.NNFWType.SNPE);
+
+ /* let's ignore timeout (set 10 sec) */
+ single.setTimeout(10000);
+
+ /* single-shot invoke */
+ TensorsData in = APITestCommon.readRawImageDataSNPE();
+ TensorsData out = single.invoke(in);
+ int labelIndex = APITestCommon.getMaxScoreFloatBuffer(out.getTensorData(0), 1001);
+
+ /* check label index (measuring cup) */
+ if (labelIndex != expectedLabel) {
+ fail();
+ }
+
+ single.close();
+ } catch (Exception e) {
+ fail();
+ }
+ }
+
+ @Test
+ public void testPytorchClassificationResult() {
+ if (!NNStreamer.isAvailable(NNStreamer.NNFWType.PYTORCH)) {
+ /* cannot run the test */
+ return;
+ }
+
+ /* expected label is orange (950) */
+ final int expectedLabel = 950;
+
+ try {
+ File model = APITestCommon.getPytorchModel();
+
+ TensorsInfo in = new TensorsInfo();
+ in.addTensorInfo("", NNStreamer.TensorType.FLOAT32, new int[]{224, 224, 3, 1});
+
+ TensorsInfo out = new TensorsInfo();
+ out.addTensorInfo("", NNStreamer.TensorType.FLOAT32, new int[]{1000, 1});
+
+ SingleShot single = new SingleShot(new File[]{model}, in, out, NNStreamer.NNFWType.PYTORCH, "");
+
+ /* invoke */
+ TensorsData output = single.invoke(APITestCommon.readRawImageDataPytorch());
+
+ int labelIndex = APITestCommon.getMaxScoreFloatBuffer(output.getTensorData(0), 1000);
+
+ /* check label index (orange) */
+ if (labelIndex != expectedLabel) {
+ fail();
+ }
+
+ single.close();
+ } catch (Exception e) {
+ fail();
+ }
+ }
+}
--- /dev/null
+package org.nnsuite.nnstreamer;
+
+import android.support.test.runner.AndroidJUnit4;
+
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+
+import java.nio.ByteBuffer;
+import java.nio.ByteOrder;
+
+import static org.junit.Assert.*;
+
+/**
+ * Testcases for TensorsData.
+ */
+@RunWith(AndroidJUnit4.class)
+public class APITestTensorsData {
+ private TensorsData mData;
+
+ @Before
+ public void setUp() {
+ APITestCommon.initNNStreamer();
+
+ TensorsInfo info = new TensorsInfo();
+
+ info.addTensorInfo(NNStreamer.TensorType.UINT8, new int[]{100});
+ info.addTensorInfo(NNStreamer.TensorType.UINT8, new int[]{200});
+ info.addTensorInfo(NNStreamer.TensorType.UINT8, new int[]{300});
+
+ mData = TensorsData.allocate(info);
+ }
+
+ @After
+ public void tearDown() {
+ mData.close();
+ }
+
+ @Test
+ public void testAllocateByteBuffer() {
+ try {
+ ByteBuffer buffer = TensorsData.allocateByteBuffer(300);
+
+ assertTrue(APITestCommon.isValidBuffer(buffer, 300));
+ } catch (Exception e) {
+ fail();
+ }
+ }
+
+ @Test
+ public void testAllocate() {
+ try {
+ TensorsInfo info = new TensorsInfo();
+
+ info.addTensorInfo(NNStreamer.TensorType.INT16, new int[]{2});
+ info.addTensorInfo(NNStreamer.TensorType.UINT16, new int[]{2,2});
+ info.addTensorInfo(NNStreamer.TensorType.UINT32, new int[]{2,2,2});
+
+ TensorsData data = TensorsData.allocate(info);
+
+ /* index 0: 2 int16 */
+ assertTrue(APITestCommon.isValidBuffer(data.getTensorData(0), 4));
+
+ /* index 1: 2:2 uint16 */
+ assertTrue(APITestCommon.isValidBuffer(data.getTensorData(1), 8));
+
+ /* index 2: 2:2:2 uint32 */
+ assertTrue(APITestCommon.isValidBuffer(data.getTensorData(2), 32));
+ } catch (Exception e) {
+ fail();
+ }
+ }
+
+ @Test
+ public void testAllocateEmptyInfo_n() {
+ try {
+ TensorsInfo info = new TensorsInfo();
+
+ TensorsData.allocate(info);
+ fail();
+ } catch (Exception e) {
+ /* expected */
+ }
+ }
+
+ @Test
+ public void testAllocateNullInfo_n() {
+ try {
+ TensorsData.allocate(null);
+ fail();
+ } catch (Exception e) {
+ /* expected */
+ }
+ }
+
+ @Test
+ public void testGetData() {
+ try {
+ assertTrue(APITestCommon.isValidBuffer(mData.getTensorData(0), 100));
+ assertTrue(APITestCommon.isValidBuffer(mData.getTensorData(1), 200));
+ assertTrue(APITestCommon.isValidBuffer(mData.getTensorData(2), 300));
+ } catch (Exception e) {
+ fail();
+ }
+ }
+
+ @Test
+ public void testSetData() {
+ try {
+ ByteBuffer buffer = TensorsData.allocateByteBuffer(200);
+ mData.setTensorData(1, buffer);
+
+ assertEquals(3, mData.getTensorsCount());
+ assertTrue(APITestCommon.isValidBuffer(mData.getTensorData(0), 100));
+ assertTrue(APITestCommon.isValidBuffer(mData.getTensorData(1), 200));
+ assertTrue(APITestCommon.isValidBuffer(mData.getTensorData(2), 300));
+ } catch (Exception e) {
+ fail();
+ }
+ }
+
+ @Test
+ public void testSetNullByteBuffer_n() {
+ try {
+ ByteBuffer buffer = null;
+
+ mData.setTensorData(0, buffer);
+ fail();
+ } catch (Exception e) {
+ /* expected */
+ }
+ }
+
+ @Test
+ public void testSetInvalidOrderByteBuffer_n() {
+ try {
+ /* big-endian byte order */
+ ByteBuffer buffer = ByteBuffer.allocateDirect(100);
+
+ mData.setTensorData(0, buffer);
+ fail();
+ } catch (Exception e) {
+ /* expected */
+ }
+ }
+
+ @Test
+ public void testSetNonDirectByteBuffer_n() {
+ try {
+ /* non-direct byte buffer */
+ ByteBuffer buffer = ByteBuffer.allocate(100).order(ByteOrder.nativeOrder());
+
+ mData.setTensorData(0, buffer);
+ fail();
+ } catch (Exception e) {
+ /* expected */
+ }
+ }
+
+ @Test
+ public void testGetInvalidIndex_n() {
+ try {
+ mData.getTensorData(5);
+ fail();
+ } catch (Exception e) {
+ /* expected */
+ }
+ }
+
+ @Test
+ public void testSetInvalidIndex_n() {
+ try {
+ ByteBuffer buffer = TensorsData.allocateByteBuffer(500);
+
+ mData.setTensorData(5, buffer);
+ fail();
+ } catch (Exception e) {
+ /* expected */
+ }
+ }
+
+ @Test
+ public void testSetInvalidSizeByteBuffer_n() {
+ try {
+ ByteBuffer buffer = TensorsData.allocateByteBuffer(500);
+
+ mData.setTensorData(1, buffer);
+ fail();
+ } catch (Exception e) {
+ /* expected */
+ }
+ }
+
+ @Test
+ public void testAllocateInvalidSize_n() {
+ try {
+ TensorsData.allocateByteBuffer(-1);
+ fail();
+ } catch (Exception e) {
+ /* expected */
+ }
+ }
+
+ @Test
+ public void testAllocateZeroSize_n() {
+ try {
+ TensorsData.allocateByteBuffer(0);
+ fail();
+ } catch (Exception e) {
+ /* expected */
+ }
+ }
+
+ @Test
+ public void testGetInfo() {
+ try {
+ TensorsInfo info = new TensorsInfo();
+ info.addTensorInfo("name1", NNStreamer.TensorType.INT64, new int[]{10,1,1,1});
+ info.addTensorInfo("name2", NNStreamer.TensorType.UINT64, new int[]{20,1,1,1});
+
+ /* allocate data, info is cloned */
+ TensorsData data = TensorsData.allocate(info);
+
+ /* update info */
+ info.setTensorName(0, "test1");
+ info.setTensorType(0, NNStreamer.TensorType.INT16);
+ info.setTensorDimension(0, new int[]{1,1,1,1});
+
+ info.setTensorName(1, "test2");
+ info.setTensorType(1, NNStreamer.TensorType.UINT16);
+ info.setTensorDimension(1, new int[]{2,2,1,1});
+
+ info.addTensorInfo("test3", NNStreamer.TensorType.FLOAT64, new int[]{3,3,3,1});
+
+ assertEquals(3, info.getTensorsCount());
+
+ /* check cloned info */
+ TensorsInfo cloned = data.getTensorsInfo();
+
+ assertEquals(2, cloned.getTensorsCount());
+
+ assertEquals("name1", cloned.getTensorName(0));
+ assertEquals(NNStreamer.TensorType.INT64, cloned.getTensorType(0));
+ assertArrayEquals(new int[]{10,1,1,1}, cloned.getTensorDimension(0));
+
+ assertEquals("name2", cloned.getTensorName(1));
+ assertEquals(NNStreamer.TensorType.UINT64, cloned.getTensorType(1));
+ assertArrayEquals(new int[]{20,1,1,1}, cloned.getTensorDimension(1));
+ } catch (Exception e) {
+ fail();
+ }
+ }
+}
--- /dev/null
+package org.nnsuite.nnstreamer;
+
+import android.support.test.runner.AndroidJUnit4;
+
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+
+import static org.junit.Assert.*;
+
+/**
+ * Testcases for TensorsInfo.
+ */
+@RunWith(AndroidJUnit4.class)
+public class APITestTensorsInfo {
+ private TensorsInfo mInfo;
+
+ @Before
+ public void setUp() {
+ APITestCommon.initNNStreamer();
+ mInfo = new TensorsInfo();
+ }
+
+ @After
+ public void tearDown() {
+ mInfo.close();
+ }
+
+ @Test
+ public void testAddInfo() {
+ try {
+ mInfo.addTensorInfo("name1", NNStreamer.TensorType.INT8, new int[]{1});
+ assertEquals(1, mInfo.getTensorsCount());
+
+ mInfo.addTensorInfo("name2", NNStreamer.TensorType.UINT8, new int[]{2,2});
+ assertEquals(2, mInfo.getTensorsCount());
+
+ mInfo.addTensorInfo(NNStreamer.TensorType.FLOAT32, new int[]{3,3,3});
+ assertEquals(3, mInfo.getTensorsCount());
+ } catch (Exception e) {
+ fail();
+ }
+ }
+
+ @Test
+ public void testGetInfo() {
+ try {
+ testAddInfo();
+
+ assertEquals("name1", mInfo.getTensorName(0));
+ assertEquals(NNStreamer.TensorType.INT8, mInfo.getTensorType(0));
+ assertArrayEquals(new int[]{1,1,1,1}, mInfo.getTensorDimension(0));
+
+ assertEquals("name2", mInfo.getTensorName(1));
+ assertEquals(NNStreamer.TensorType.UINT8, mInfo.getTensorType(1));
+ assertArrayEquals(new int[]{2,2,1,1}, mInfo.getTensorDimension(1));
+
+ assertNull(mInfo.getTensorName(2));
+ assertEquals(NNStreamer.TensorType.FLOAT32, mInfo.getTensorType(2));
+ assertArrayEquals(new int[]{3,3,3,1}, mInfo.getTensorDimension(2));
+
+ assertEquals(3, mInfo.getTensorsCount());
+ } catch (Exception e) {
+ fail();
+ }
+ }
+
+ @Test
+ public void testClone() {
+ try {
+ testAddInfo();
+
+ /* clone */
+ TensorsInfo cloned = mInfo.clone();
+
+ /* update info */
+ mInfo.setTensorName(0, "updated1");
+ mInfo.setTensorType(0, NNStreamer.TensorType.INT16);
+ mInfo.setTensorDimension(0, new int[]{10,1,1,1});
+
+ mInfo.setTensorName(1, "updated2");
+ mInfo.setTensorType(1, NNStreamer.TensorType.UINT16);
+ mInfo.setTensorDimension(1, new int[]{20,1,1,1});
+
+ mInfo.setTensorName(2, "updated3");
+ mInfo.setTensorType(2, NNStreamer.TensorType.FLOAT64);
+ mInfo.setTensorDimension(2, new int[]{30,1,1,1});
+
+ mInfo.addTensorInfo("updated4", NNStreamer.TensorType.INT64, new int[]{40,1,1,1});
+
+ /* check cloned info */
+ assertEquals("name1", cloned.getTensorName(0));
+ assertEquals(NNStreamer.TensorType.INT8, cloned.getTensorType(0));
+ assertArrayEquals(new int[]{1,1,1,1}, cloned.getTensorDimension(0));
+
+ assertEquals("name2", cloned.getTensorName(1));
+ assertEquals(NNStreamer.TensorType.UINT8, cloned.getTensorType(1));
+ assertArrayEquals(new int[]{2,2,1,1}, cloned.getTensorDimension(1));
+
+ assertNull(cloned.getTensorName(2));
+ assertEquals(NNStreamer.TensorType.FLOAT32, cloned.getTensorType(2));
+ assertArrayEquals(new int[]{3,3,3,1}, cloned.getTensorDimension(2));
+
+ assertEquals(3, cloned.getTensorsCount());
+ } catch (Exception e) {
+ fail();
+ }
+ }
+
+ @Test
+ public void testGetSize() {
+ try {
+ testAddInfo();
+
+ /* index 0: 1 int8 */
+ assertEquals(1, mInfo.getTensorSize(0));
+
+ /* index 1: 2:2 uint8 */
+ assertEquals(4, mInfo.getTensorSize(1));
+
+ /* index 2: 3:3:3 float32 */
+ assertEquals(108, mInfo.getTensorSize(2));
+ } catch (Exception e) {
+ fail();
+ }
+ }
+
+ @Test
+ public void testAllocate() {
+ try {
+ testAddInfo();
+
+ TensorsData data = mInfo.allocate();
+
+ assertEquals(3, data.getTensorsCount());
+ assertEquals(1, data.getTensorData(0).capacity());
+ assertEquals(4, data.getTensorData(1).capacity());
+ assertEquals(108, data.getTensorData(2).capacity());
+ } catch (Exception e) {
+ fail();
+ }
+ }
+
+ @Test
+ public void testAllocateEmpty_n() {
+ try {
+ TensorsInfo info = new TensorsInfo();
+
+ info.allocate();
+ fail();
+ } catch (Exception e) {
+ /* expected */
+ }
+ }
+
+ @Test
+ public void testUpdateInfo() {
+ try {
+ testAddInfo();
+
+ mInfo.setTensorName(2, "name3");
+ assertEquals("name1", mInfo.getTensorName(0));
+ assertEquals("name2", mInfo.getTensorName(1));
+ assertEquals("name3", mInfo.getTensorName(2));
+
+ mInfo.setTensorType(2, NNStreamer.TensorType.INT64);
+ assertEquals(NNStreamer.TensorType.INT8, mInfo.getTensorType(0));
+ assertEquals(NNStreamer.TensorType.UINT8, mInfo.getTensorType(1));
+ assertEquals(NNStreamer.TensorType.INT64, mInfo.getTensorType(2));
+
+ mInfo.setTensorDimension(2, new int[]{2,3});
+ assertArrayEquals(new int[]{1,1,1,1}, mInfo.getTensorDimension(0));
+ assertArrayEquals(new int[]{2,2,1,1}, mInfo.getTensorDimension(1));
+ assertArrayEquals(new int[]{2,3,1,1}, mInfo.getTensorDimension(2));
+ } catch (Exception e) {
+ fail();
+ }
+ }
+
+ @Test
+ public void testAddUnknownType_n() {
+ try {
+ mInfo.addTensorInfo(NNStreamer.TensorType.UNKNOWN, new int[]{2,2,2,2});
+ fail();
+ } catch (Exception e) {
+ /* expected */
+ }
+
+ assertEquals(0, mInfo.getTensorsCount());
+ }
+
+ @Test
+ public void testAddInvalidRank_n() {
+ try {
+ mInfo.addTensorInfo(NNStreamer.TensorType.INT32, new int[]{2,2,2,2,2});
+ fail();
+ } catch (Exception e) {
+ /* expected */
+ }
+
+ assertEquals(0, mInfo.getTensorsCount());
+ }
+
+ @Test
+ public void testAddInvalidDimension_n() {
+ try {
+ mInfo.addTensorInfo(NNStreamer.TensorType.INT32, new int[]{1,1,-1});
+ fail();
+ } catch (Exception e) {
+ /* expected */
+ }
+
+ assertEquals(0, mInfo.getTensorsCount());
+ }
+
+ @Test
+ public void testAddNullDimension_n() {
+ try {
+ mInfo.addTensorInfo(NNStreamer.TensorType.UINT8, null);
+ fail();
+ } catch (Exception e) {
+ /* expected */
+ }
+
+ assertEquals(0, mInfo.getTensorsCount());
+ }
+
+ @Test
+ public void testGetInvalidIndex_n() {
+ try {
+ mInfo.getTensorType(0);
+ fail();
+ } catch (Exception e) {
+ /* expected */
+ }
+ }
+
+ @Test
+ public void testAddMaxInfo_n() {
+ try {
+ for (int i = 0; i <= NNStreamer.TENSOR_SIZE_LIMIT; i++) {
+ mInfo.addTensorInfo(NNStreamer.TensorType.FLOAT32, new int[]{2,2,2,2});
+ }
+ fail();
+ } catch (Exception e) {
+ /* expected */
+ }
+ }
+}
--- /dev/null
+<manifest xmlns:android="http://schemas.android.com/apk/res/android"
+ package="org.nnsuite.nnstreamer" >
+
+ <uses-feature android:glEsVersion="0x00020000"/>
+ <uses-permission android:name="android.permission.READ_EXTERNAL_STORAGE" />
+
+ <application
+ android:extractNativeLibs="true"
+ android:largeHeap="true" >
+ </application>
+</manifest>
--- /dev/null
+/* SPDX-License-Identifier: Apache-2.0 */
+/*
+ * NNStreamer Android API
+ * Copyright (C) 2019 Samsung Electronics Co., Ltd.
+ */
+
+package org.nnsuite.nnstreamer;
+
+import android.support.annotation.NonNull;
+
+/**
+ * Provides interfaces to create a custom-filter in the pipeline.<br>
+ * <br>
+ * To register a new custom-filter, an application should call
+ * {@link #create(String, TensorsInfo, TensorsInfo, Callback)}
+ * before constructing the pipeline.
+ */
+public final class CustomFilter implements AutoCloseable {
+ private long mHandle = 0;
+ private String mName = null;
+ private Callback mCallback = null;
+
+ private native long nativeInitialize(String name, TensorsInfo in, TensorsInfo out);
+ private native void nativeDestroy(long handle);
+
+ /**
+ * Interface definition for a callback to be invoked while processing the pipeline.
+ *
+ * @see #create(String, TensorsInfo, TensorsInfo, Callback)
+ */
+ public interface Callback {
+ /**
+ * Called synchronously while processing the pipeline.
+ *
+ * NNStreamer filter invokes the given custom-filter callback while processing the pipeline.
+ * Note that, if it is unnecessary to execute the input data, return null to drop the buffer.
+ *
+ * @param in The input data (a single frame, tensor/tensors)
+ *
+ * @return The output data (a single frame, tensor/tensors)
+ */
+ TensorsData invoke(TensorsData in);
+ }
+
+ /**
+ * Creates new custom-filter with input and output tensors information.
+ *
+ * NNStreamer processes the tensors with 'custom-easy' framework which can execute without the model file.
+ * Note that if given name is duplicated in the pipeline or same name already exists,
+ * the registration will be failed and throw an exception.
+ *
+ * @param name The name of custom-filter
+ * @param in The input tensors information
+ * @param out The output tensors information
+ * @param callback The function to be called while processing the pipeline
+ *
+ * @return {@link CustomFilter} instance
+ *
+ * @throws IllegalArgumentException if given param is invalid
+ * @throws IllegalStateException if failed to initialize custom-filter
+ */
+ public static CustomFilter create(@NonNull String name, @NonNull TensorsInfo in,
+ @NonNull TensorsInfo out, @NonNull Callback callback) {
+ return new CustomFilter(name, in, out, callback);
+ }
+
+ /**
+ * Gets the name of custom-filter.
+ *
+ * @return The name of custom-filter
+ */
+ public String getName() {
+ return mName;
+ }
+
+ /**
+ * Internal constructor to create and register a custom-filter.
+ *
+ * @param name The name of custom-filter
+ * @param in The input tensors information
+ * @param out The output tensors information
+ * @param callback The function to be called while processing the pipeline
+ *
+ * @throws IllegalArgumentException if given param is invalid
+ * @throws IllegalStateException if failed to initialize custom-filter
+ */
+ private CustomFilter(String name, TensorsInfo in, TensorsInfo out, Callback callback) {
+ if (name == null) {
+ throw new IllegalArgumentException("Given name is null");
+ }
+
+ if (in == null || out == null) {
+ throw new IllegalArgumentException("Given info is null");
+ }
+
+ if (callback == null) {
+ throw new IllegalArgumentException("Given callback is null");
+ }
+
+ mHandle = nativeInitialize(name, in, out);
+ if (mHandle == 0) {
+ throw new IllegalStateException("Failed to initialize custom-filter " + name);
+ }
+
+ mName = name;
+ mCallback = callback;
+ }
+
+ /**
+ * Internal method called from native while processing the pipeline.
+ */
+ private TensorsData invoke(TensorsData in) {
+ TensorsData out = null;
+
+ if (mCallback != null) {
+ out = mCallback.invoke(in);
+ }
+
+ return out;
+ }
+
+ @Override
+ protected void finalize() throws Throwable {
+ try {
+ close();
+ } finally {
+ super.finalize();
+ }
+ }
+
+ @Override
+ public void close() {
+ if (mHandle != 0) {
+ nativeDestroy(mHandle);
+ mHandle = 0;
+ }
+ }
+
+ /**
+ * Private constructor to prevent the instantiation.
+ */
+ private CustomFilter() {}
+}
--- /dev/null
+/* SPDX-License-Identifier: Apache-2.0 */
+/*
+ * NNStreamer Android API
+ * Copyright (C) 2019 Samsung Electronics Co., Ltd.
+ */
+
+package org.nnsuite.nnstreamer;
+
+import android.content.Context;
+import android.os.Build;
+
+import org.freedesktop.gstreamer.GStreamer;
+
+import java.util.Locale;
+
+/**
+ * Defines the types and limits in NNStreamer.<br>
+ * To use NNStreamer, an application should call {@link #initialize(Context)} with its context.<br>
+ * <br>
+ * NNStreamer is a set of GStreamer plugins that allow GStreamer developers to adopt neural network models easily and efficiently
+ * and neural network developers to manage stream pipelines and their filters easily and efficiently.<br>
+ * <br>
+ * Note that, to open a machine learning model in the storage,
+ * the permission {@code Manifest.permission.READ_EXTERNAL_STORAGE} is required before constructing the pipeline.
+ * <br>
+ * See <a href="https://github.com/nnstreamer/nnstreamer">NNStreamer repository</a> for the details.
+ */
+public final class NNStreamer {
+ /**
+ * The maximum rank that NNStreamer supports.
+ */
+ public static final int TENSOR_RANK_LIMIT = 4;
+
+ /**
+ * The maximum number of tensor that {@link TensorsData} instance may have.
+ */
+ public static final int TENSOR_SIZE_LIMIT = 16;
+
+ /**
+ * The enumeration for supported frameworks in NNStreamer.
+ *
+ * @see #isAvailable(NNFWType)
+ */
+ public enum NNFWType {
+ /**
+ * <a href="https://www.tensorflow.org/lite">TensorFlow Lite</a> is an open source
+ * deep learning framework for on-device inference.
+ */
+ TENSORFLOW_LITE,
+ /**
+ * SNAP (Samsung Neural Acceleration Platform)
+ * supports <a href="https://developer.samsung.com/neural">Samsung Neural SDK</a>
+ * (Version 2.0, run only on Samsung devices).<br>
+ * To construct a pipeline with SNAP, developer should set the custom option string
+ * to specify the neural network and data format.<br>
+ * <br>
+ * Custom options<br>
+ * - ModelFWType: the type of model (TensorFlow/Caffe)<br>
+ * - ExecutionDataType: the execution data type for SNAP (default float32)<br>
+ * - ComputingUnit: the computing unit to execute the model (default CPU)<br>
+ * - CpuThreadCount: the number of CPU threads to be executed (optional, default 4 if ComputingUnit is CPU)<br>
+ * - GpuCacheSource: the absolute path to GPU Kernel caching (mandatory if ComputingUnit is GPU)
+ */
+ SNAP,
+ /**
+ * NNFW is on-device neural network inference framework, which is developed by SR (Samsung Research).<br>
+ * See <a href="https://github.com/Samsung/ONE">ONE (On-device Neural Engine) repository</a> for the details.
+ */
+ NNFW,
+ /**
+ * <a href="https://developer.qualcomm.com/docs/snpe/index.html">SNPE</a> (Snapdragon Neural Processing Engine)
+ * is a Qualcomm Snapdragon software accelerated runtime for the execution of deep neural networks.<br>
+ * <br>
+ * Custom options<br>
+ * - Runtime: the computing unit to execute the model (default CPU)<br>
+ * - CPUFallback: CPU fallback mode (default false)
+ */
+ SNPE,
+ /**
+ * <a href="https://pytorch.org/mobile/home/">PyTorch Mobile</a>
+ * is an on-device solution for the open source machine learning framework, PyTorch.
+ */
+ PYTORCH,
+ /**
+ * Unknown framework (usually error)
+ */
+ UNKNOWN
+ }
+
+ /**
+ * The enumeration for possible data type of tensor in NNStreamer.
+ */
+ public enum TensorType {
+ /** Integer 32bit */ INT32,
+ /** Unsigned integer 32bit */ UINT32,
+ /** Integer 16bit */ INT16,
+ /** Unsigned integer 16bit */ UINT16,
+ /** Integer 8bit */ INT8,
+ /** Unsigned integer 8bit */ UINT8,
+ /** Float 64bit */ FLOAT64,
+ /** Float 32bit */ FLOAT32,
+ /** Integer 64bit */ INT64,
+ /** Unsigned integer 64bit */ UINT64,
+ /** Unknown data type (usually error) */ UNKNOWN
+ }
+
+ private static native boolean nativeInitialize(Context context);
+ private static native boolean nativeCheckNNFWAvailability(int fw);
+ private static native String nativeGetVersion();
+
+ /**
+ * Initializes GStreamer and NNStreamer, registering the plugins and loading necessary libraries.
+ *
+ * @param context The application context
+ *
+ * @return true if successfully initialized
+ *
+ * @throws IllegalArgumentException if given param is invalid
+ */
+ public static boolean initialize(Context context) {
+ if (context == null) {
+ throw new IllegalArgumentException("Given context is invalid");
+ }
+
+ try {
+ System.loadLibrary("gstreamer_android");
+ System.loadLibrary("nnstreamer-native");
+
+ GStreamer.init(context);
+ } catch (Exception e) {
+ e.printStackTrace();
+ return false;
+ }
+
+ return nativeInitialize(context);
+ }
+
+ /**
+ * Checks the neural network framework is available.
+ *
+ * @param fw The neural network framework
+ *
+ * @return true if the neural network framework is available
+ */
+ public static boolean isAvailable(NNFWType fw) {
+ boolean available = nativeCheckNNFWAvailability(fw.ordinal());
+
+ /* sub-plugin for given framework is available */
+ if (available) {
+ String manufacturer = Build.MANUFACTURER.toLowerCase(Locale.getDefault());
+ String hardware = Build.HARDWARE.toLowerCase(Locale.getDefault());
+
+ switch (fw) {
+ case SNPE:
+ if (!hardware.startsWith("qcom")) {
+ available = false;
+ }
+ break;
+ case SNAP:
+ if (!manufacturer.equals("samsung") ||
+ !(hardware.startsWith("qcom") || hardware.startsWith("exynos"))) {
+ available = false;
+ }
+ break;
+ default:
+ break;
+ }
+ }
+
+ return available;
+ }
+
+ /**
+ * Gets the version string of NNStreamer.
+ *
+ * @return The version string
+ */
+ public static String getVersion() {
+ return nativeGetVersion();
+ }
+
+ /**
+ * Private constructor to prevent the instantiation.
+ */
+ private NNStreamer() {}
+}
--- /dev/null
+/* SPDX-License-Identifier: Apache-2.0 */
+/*
+ * NNStreamer Android API
+ * Copyright (C) 2019 Samsung Electronics Co., Ltd.
+ */
+
+package org.nnsuite.nnstreamer;
+
+import android.support.annotation.NonNull;
+import android.support.annotation.Nullable;
+import android.view.Surface;
+import android.view.SurfaceHolder;
+
+import java.util.ArrayList;
+import java.util.HashMap;
+
+/**
+ * Provides interfaces to create and execute stream pipelines with neural networks.<br>
+ * <br>
+ * {@link Pipeline} allows the following operations with NNStreamer:<br>
+ * - Create a stream pipeline with NNStreamer plugins, GStreamer plugins.<br>
+ * - Interfaces to push data to the pipeline from the application.<br>
+ * - Interfaces to pull data from the pipeline to the application.<br>
+ * - Interfaces to start/stop/destroy the pipeline.<br>
+ * - Interfaces to control switches and valves in the pipeline.<br>
+ */
+public final class Pipeline implements AutoCloseable {
+ private long mHandle = 0;
+ private HashMap<String, ArrayList<NewDataCallback>> mSinkCallbacks = new HashMap<>();
+ private StateChangeCallback mStateCallback = null;
+
+ private static native boolean nativeCheckElementAvailability(String element);
+ private native long nativeConstruct(String description, boolean addStateCb);
+ private native void nativeDestroy(long handle);
+ private native boolean nativeStart(long handle);
+ private native boolean nativeStop(long handle);
+ private native int nativeGetState(long handle);
+ private native boolean nativeInputData(long handle, String name, TensorsData data);
+ private native String[] nativeGetSwitchPads(long handle, String name);
+ private native boolean nativeSelectSwitchPad(long handle, String name, String pad);
+ private native boolean nativeControlValve(long handle, String name, boolean open);
+ private native boolean nativeAddSinkCallback(long handle, String name);
+ private native boolean nativeRemoveSinkCallback(long handle, String name);
+ private native boolean nativeInitializeSurface(long handle, String name, Object surface);
+ private native boolean nativeFinalizeSurface(long handle, String name);
+
+ /**
+ * Interface definition for a callback to be invoked when a sink node receives new data.
+ *
+ * @see #registerSinkCallback(String, NewDataCallback)
+ */
+ public interface NewDataCallback {
+ /**
+ * Called when a sink node receives new data.
+ *
+ * If an application wants to accept data outputs of an NNStreamer stream, use this callback to get data from the stream.
+ * Note that this is synchronously called and the buffer may be deallocated after the callback is finished.
+ * Thus, if you need the data afterwards, copy the data to another buffer and return fast.
+ * Do not spend too much time in the callback. It is recommended to use very small tensors at sinks.
+ *
+ * @param data The output data (a single frame, tensor/tensors)
+ */
+ void onNewDataReceived(TensorsData data);
+ }
+
+ /**
+ * Interface definition for a callback to be invoked when the pipeline state is changed.
+ * This callback can be registered only when constructing the pipeline.
+ *
+ * @see State
+ * @see #start()
+ * @see #stop()
+ * @see Pipeline#Pipeline(String, StateChangeCallback)
+ */
+ public interface StateChangeCallback {
+ /**
+ * Called when the pipeline state is changed.
+ *
+ * If an application wants to get the change of pipeline state, use this callback.
+ * This callback can be registered when constructing the pipeline.
+ * This is synchronously called, so do not spend too much time in the callback.
+ *
+ * @param state The changed state
+ */
+ void onStateChanged(Pipeline.State state);
+ }
+
+ /**
+ * The enumeration for pipeline state.
+ * Refer to <a href="https://gstreamer.freedesktop.org/documentation/plugin-development/basics/states.html">GStreamer states</a> for the details.
+ */
+ public enum State {
+ /**
+ * Unknown state.
+ */
+ UNKNOWN,
+ /**
+ * Initial state of the pipeline.
+ */
+ NULL,
+ /**
+ * The pipeline is ready to go to PAUSED.
+ */
+ READY,
+ /**
+ * The pipeline is stopped, ready to accept and process data.
+ */
+ PAUSED,
+ /**
+ * The pipeline is started and the data is flowing.
+ */
+ PLAYING
+ }
+
+ /**
+ * Creates a new {@link Pipeline} instance with the given pipeline description.
+ *
+ * @param description The pipeline description. Refer to GStreamer manual or
+ * <a href="https://github.com/nnstreamer/nnstreamer">NNStreamer</a> documentation for examples and the grammar.
+ *
+ * @throws IllegalArgumentException if given param is null
+ * @throws IllegalStateException if failed to construct the pipeline
+ */
+ public Pipeline(@NonNull String description) {
+ this(description, null);
+ }
+
+ /**
+ * Creates a new {@link Pipeline} instance with the given pipeline description.
+ *
+ * @param description The pipeline description. Refer to GStreamer manual or
+ * <a href="https://github.com/nnstreamer/nnstreamer">NNStreamer</a> documentation for examples and the grammar.
+ * @param callback The function to be called when the pipeline state is changed.
+ * You may set null if it is not required.
+ *
+ * @throws IllegalArgumentException if given param is invalid
+ * @throws IllegalStateException if failed to construct the pipeline
+ */
+ public Pipeline(@NonNull String description, @Nullable StateChangeCallback callback) {
+ if (description == null || description.isEmpty()) {
+ throw new IllegalArgumentException("Given description is invalid");
+ }
+
+ mStateCallback = callback;
+
+ mHandle = nativeConstruct(description, (callback != null));
+ if (mHandle == 0) {
+ throw new IllegalStateException("Failed to construct the pipeline");
+ }
+ }
+
+ /**
+ * Checks the element is registered and available on the pipeline.
+ *
+ * @param element The name of GStreamer element
+ *
+ * @return true if the element is available
+ *
+ * @throws IllegalArgumentException if given param is invalid
+ */
+ public static boolean isElementAvailable(@NonNull String element) {
+ if (element == null || element.isEmpty()) {
+ throw new IllegalArgumentException("Given element is invalid");
+ }
+
+ return nativeCheckElementAvailability(element);
+ }
+
+ /**
+ * Starts the pipeline, asynchronously.
+ * The pipeline state would be changed to {@link State#PLAYING}.
+ * If you need to get the changed state, add a callback while constructing a pipeline.
+ *
+ * @throws IllegalStateException if failed to start the pipeline
+ *
+ * @see State
+ * @see StateChangeCallback
+ */
+ public void start() {
+ checkPipelineHandle();
+
+ if (!nativeStart(mHandle)) {
+ throw new IllegalStateException("Failed to start the pipeline");
+ }
+ }
+
+ /**
+ * Stops the pipeline, asynchronously.
+ * The pipeline state would be changed to {@link State#PAUSED}.
+ * If you need to get the changed state, add a callback while constructing a pipeline.
+ *
+ * @throws IllegalStateException if failed to stop the pipeline
+ *
+ * @see State
+ * @see StateChangeCallback
+ */
+ public void stop() {
+ checkPipelineHandle();
+
+ if (!nativeStop(mHandle)) {
+ throw new IllegalStateException("Failed to stop the pipeline");
+ }
+ }
+
+ /**
+ * Gets the state of pipeline.
+ *
+ * @return The state of pipeline
+ *
+ * @throws IllegalStateException if the pipeline is not constructed
+ *
+ * @see State
+ * @see StateChangeCallback
+ */
+ public State getState() {
+ checkPipelineHandle();
+
+ return convertPipelineState(nativeGetState(mHandle));
+ }
+
+ /**
+ * Adds an input data frame to source node.
+ *
+ * @param name The name of source node
+ * @param data The input data (a single frame, tensor/tensors)
+ *
+ * @throws IllegalArgumentException if given param is invalid
+ * @throws IllegalStateException if failed to push data to source node
+ */
+ public void inputData(@NonNull String name, @NonNull TensorsData data) {
+ checkPipelineHandle();
+
+ if (name == null || name.isEmpty()) {
+ throw new IllegalArgumentException("Given name is invalid");
+ }
+
+ if (data == null) {
+ throw new IllegalArgumentException("Given data is null");
+ }
+
+ if (!nativeInputData(mHandle, name, data)) {
+ throw new IllegalStateException("Failed to push data to source node " + name);
+ }
+ }
+
+ /**
+ * Gets the pad names of a switch.
+ *
+ * @param name The name of switch node
+ *
+ * @return The list of pad names
+ *
+ * @throws IllegalArgumentException if given param is invalid
+ * @throws IllegalStateException if failed to get the list of pad names
+ */
+ public String[] getSwitchPads(@NonNull String name) {
+ checkPipelineHandle();
+
+ if (name == null || name.isEmpty()) {
+ throw new IllegalArgumentException("Given name is invalid");
+ }
+
+ String[] pads = nativeGetSwitchPads(mHandle, name);
+
+ if (pads == null || pads.length == 0) {
+ throw new IllegalStateException("Failed to get the pads in switch " + name);
+ }
+
+ return pads;
+ }
+
+ /**
+ * Controls the switch to select input/output nodes (pads).
+ *
+ * @param name The name of switch node
+ * @param pad The name of the chosen pad to be activated
+ *
+ * @throws IllegalArgumentException if given param is invalid
+ * @throws IllegalStateException if failed to select the switch pad
+ */
+ public void selectSwitchPad(@NonNull String name, @NonNull String pad) {
+ checkPipelineHandle();
+
+ if (name == null || name.isEmpty()) {
+ throw new IllegalArgumentException("Given name is invalid");
+ }
+
+ if (pad == null || pad.isEmpty()) {
+ throw new IllegalArgumentException("Given pad is invalid");
+ }
+
+ if (!nativeSelectSwitchPad(mHandle, name, pad)) {
+ throw new IllegalStateException("Failed to select the pad " + pad);
+ }
+ }
+
+ /**
+ * Controls the valve.
+ * Set the flag true to open (let the flow pass), false to close (drop & stop the flow).
+ *
+ * @param name The name of valve node
+ * @param open The flag to control the flow
+ *
+ * @throws IllegalArgumentException if given param is invalid
+ * @throws IllegalStateException if failed to change the valve state
+ */
+ public void controlValve(@NonNull String name, boolean open) {
+ checkPipelineHandle();
+
+ if (name == null || name.isEmpty()) {
+ throw new IllegalArgumentException("Given name is invalid");
+ }
+
+ if (!nativeControlValve(mHandle, name, open)) {
+ throw new IllegalStateException("Failed to change the valve " + name);
+ }
+ }
+
+ /**
+ * Registers new data callback to sink node.
+ * The callback can be added in duplicate if an application tries to register multiple callbacks with same name.
+ *
+ * @param name The name of sink node
+ * @param callback The callback for new data
+ *
+ * @throws IllegalArgumentException if given param is invalid
+ * @throws IllegalStateException if failed to register the callback to sink node in the pipeline
+ */
+ public void registerSinkCallback(@NonNull String name, @NonNull NewDataCallback callback) {
+ if (name == null || name.isEmpty()) {
+ throw new IllegalArgumentException("Given name is invalid");
+ }
+
+ if (callback == null) {
+ throw new IllegalArgumentException("Given callback is null");
+ }
+
+ synchronized(this) {
+ ArrayList<NewDataCallback> cbList = mSinkCallbacks.get(name);
+
+ if (cbList != null) {
+ /* check the list already includes same callback */
+ if (!cbList.contains(callback)) {
+ cbList.add(callback);
+ }
+ } else {
+ if (nativeAddSinkCallback(mHandle, name)) {
+ cbList = new ArrayList<>();
+ cbList.add(callback);
+ mSinkCallbacks.put(name, cbList);
+ } else {
+ throw new IllegalStateException("Failed to register sink callback to " + name);
+ }
+ }
+ }
+ }
+
+ /**
+ * Unregisters data callback from sink node.
+ *
+ * @param name The name of sink node
+ * @param callback The callback object to be unregistered
+ *
+ * @throws IllegalArgumentException if given param is invalid
+ * @throws IllegalStateException if failed to unregister the callback from sink node
+ */
+ public void unregisterSinkCallback(@NonNull String name, @NonNull NewDataCallback callback) {
+ if (name == null || name.isEmpty()) {
+ throw new IllegalArgumentException("Given name is invalid");
+ }
+
+ if (callback == null) {
+ throw new IllegalArgumentException("Given callback is null");
+ }
+
+ synchronized(this) {
+ ArrayList<NewDataCallback> cbList = mSinkCallbacks.get(name);
+
+ if (cbList == null || !cbList.contains(callback)) {
+ throw new IllegalStateException("Failed to unregister sink callback from " + name);
+ }
+
+ cbList.remove(callback);
+ if (cbList.isEmpty()) {
+ /* remove callback */
+ mSinkCallbacks.remove(name);
+ nativeRemoveSinkCallback(mHandle, name);
+ }
+ }
+ }
+
+ /**
+ * Sets a surface to video sink element.
+ * If {@code holder} is null, this will stop using the old surface.
+ *
+ * @param name The name of video sink element
+ * @param holder The surface holder instance
+ *
+ * @throws IllegalArgumentException if given param is invalid
+ * @throws IllegalStateException if failed to set the surface to video sink
+ */
+ public void setSurface(@NonNull String name, @Nullable SurfaceHolder holder) {
+ if (name == null || name.isEmpty()) {
+ throw new IllegalArgumentException("Given name is invalid");
+ }
+
+ if (holder == null) {
+ nativeFinalizeSurface(mHandle, name);
+ } else {
+ Surface surface = holder.getSurface();
+
+ if (surface == null || !surface.isValid()) {
+ throw new IllegalArgumentException("The surface is not available");
+ }
+
+ if (!nativeInitializeSurface(mHandle, name, surface)) {
+ throw new IllegalStateException("Failed to set the surface to " + name);
+ }
+ }
+ }
+
+ /**
+ * Internal method called from native when a new data is available.
+ */
+ private void newDataReceived(String name, TensorsData data) {
+ synchronized(this) {
+ ArrayList<NewDataCallback> cbList = mSinkCallbacks.get(name);
+
+ if (cbList != null) {
+ for (int i = 0; i < cbList.size(); i++) {
+ cbList.get(i).onNewDataReceived(data);
+ }
+ }
+ }
+ }
+
+ /**
+ * Internal method called from native when the state of pipeline is changed.
+ */
+ private void stateChanged(int value) {
+ synchronized(this) {
+ if (mStateCallback != null) {
+ mStateCallback.onStateChanged(convertPipelineState(value));
+ }
+ }
+ }
+
+ /**
+ * Internal method to get the pipeline state from int value.
+ */
+ private State convertPipelineState(int value) {
+ State state = State.UNKNOWN;
+
+ switch (value) {
+ case 1:
+ state = State.NULL;
+ break;
+ case 2:
+ state = State.READY;
+ break;
+ case 3:
+ state = State.PAUSED;
+ break;
+ case 4:
+ state = State.PLAYING;
+ break;
+ default:
+ /* invalid or unknown state */
+ break;
+ }
+
+ return state;
+ }
+
+ /**
+ * Internal method to check native handle.
+ *
+ * @throws IllegalStateException if the pipeline is not constructed
+ */
+ private void checkPipelineHandle() {
+ if (mHandle == 0) {
+ throw new IllegalStateException("The pipeline is not constructed");
+ }
+ }
+
+ @Override
+ protected void finalize() throws Throwable {
+ try {
+ close();
+ } finally {
+ super.finalize();
+ }
+ }
+
+ @Override
+ public void close() {
+ synchronized(this) {
+ mSinkCallbacks.clear();
+ mStateCallback = null;
+ }
+
+ if (mHandle != 0) {
+ nativeDestroy(mHandle);
+ mHandle = 0;
+ }
+ }
+}
--- /dev/null
+/* SPDX-License-Identifier: Apache-2.0 */
+/*
+ * NNStreamer Android API
+ * Copyright (C) 2019 Samsung Electronics Co., Ltd.
+ */
+
+package org.nnsuite.nnstreamer;
+
+import android.support.annotation.NonNull;
+import android.support.annotation.Nullable;
+
+import java.io.File;
+
+/**
+ * Provides interfaces to invoke a neural network model with a single instance of input data.<br>
+ * This function is a syntactic sugar of NNStreamer Pipeline API with simplified features;
+ * thus, users are supposed to use NNStreamer Pipeline API directly if they want more advanced features.<br>
+ * The user is expected to preprocess the input data for the given neural network model.<br>
+ * <br>
+ * {@link SingleShot} allows the following operations with NNStreamer:<br>
+ * - Open a machine learning model.<br>
+ * - Interfaces to enter a single instance of input data to the opened model.<br>
+ * - Utility functions to get the information of opened model.<br>
+ */
+public final class SingleShot implements AutoCloseable {
+ private long mHandle = 0;
+
+ private native long nativeOpen(String[] models, TensorsInfo inputInfo, TensorsInfo outputInfo, int fw, String custom);
+ private native void nativeClose(long handle);
+ private native TensorsData nativeInvoke(long handle, TensorsData inputData);
+ private native TensorsInfo nativeGetInputInfo(long handle);
+ private native TensorsInfo nativeGetOutputInfo(long handle);
+ private native boolean nativeSetProperty(long handle, String name, String value);
+ private native String nativeGetProperty(long handle, String name);
+ private native boolean nativeSetInputInfo(long handle, TensorsInfo inputInfo);
+ private native boolean nativeSetTimeout(long handle, int timeout);
+
+ /**
+ * Creates a new {@link SingleShot} instance with the given model for TensorFlow Lite.
+ * If the model has flexible data dimensions, the pipeline will not be constructed and this will make an exception.
+ *
+ * @param model The path to the neural network model file
+ *
+ * @throws IllegalArgumentException if given param is invalid
+ * @throws IllegalStateException if this failed to construct the pipeline
+ */
+ public SingleShot(@NonNull File model) {
+ this(new File[]{model}, null, null, NNStreamer.NNFWType.TENSORFLOW_LITE, null);
+ }
+
+ /**
+ * Creates a new {@link SingleShot} instance with the given model.
+ * If the model has flexible data dimensions, the pipeline will not be constructed and this will make an exception.
+ *
+ * @param model The {@link File} object to the neural network model file
+ * @param fw The neural network framework
+ *
+ * @throws IllegalArgumentException if given param is invalid
+ * @throws IllegalStateException if this failed to construct the pipeline
+ *
+ * @see NNStreamer#isAvailable(NNStreamer.NNFWType)
+ */
+ public SingleShot(@NonNull File model, NNStreamer.NNFWType fw) {
+ this(new File[]{model}, null, null, fw, null);
+ }
+
+ /**
+ * Creates a new {@link SingleShot} instance with the given model and custom option.
+ * If the model has flexible data dimensions, the pipeline will not be constructed and this will make an exception.
+ *
+ * @param model The {@link File} object to the neural network model file
+ * @param fw The neural network framework
+ * @param custom The custom option string to open the neural network
+ *
+ * @throws IllegalArgumentException if given param is invalid
+ * @throws IllegalStateException if this failed to construct the pipeline
+ *
+ * @see NNStreamer#isAvailable(NNStreamer.NNFWType)
+ */
+ public SingleShot(@NonNull File model, NNStreamer.NNFWType fw, @Nullable String custom) {
+ this(new File[]{model}, null, null, fw, custom);
+ }
+
+ /**
+ * Creates a new {@link SingleShot} instance with the given model for TensorFlow Lite.
+ * The input and output tensors information are required if the given model has flexible data dimensions,
+ * where the information MUST be given before executing the model.
+ * However, once it's given, the dimension cannot be changed for the given model handle.
+ * You may set null if it's not required.
+ *
+ * @param model The {@link File} object to the neural network model file
+ * @param inputInfo The input tensors information
+ * @param outputInfo The output tensors information
+ *
+ * @throws IllegalArgumentException if given param is invalid
+ * @throws IllegalStateException if this failed to construct the pipeline
+ */
+ public SingleShot(@NonNull File model, @Nullable TensorsInfo inputInfo, @Nullable TensorsInfo outputInfo) {
+ this(new File[]{model}, inputInfo, outputInfo, NNStreamer.NNFWType.TENSORFLOW_LITE, null);
+ }
+
+ /**
+ * Creates a new {@link SingleShot} instance with the given files and custom option.
+ *
+ * Unlike other constructors, this handles multiple files and custom option string
+ * when the neural network requires various options and model files.
+ *
+ * @param models The array of {@link File} objects to the neural network model files
+ * @param inputInfo The input tensors information
+ * @param outputInfo The output tensors information
+ * @param fw The neural network framework
+ * @param custom The custom option string to open the neural network
+ *
+ * @throws IllegalArgumentException if given param is invalid
+ * @throws IllegalStateException if this failed to construct the pipeline
+ *
+ * @see NNStreamer#isAvailable(NNStreamer.NNFWType)
+ */
+ public SingleShot(@NonNull File[] models, @Nullable TensorsInfo inputInfo, @Nullable TensorsInfo outputInfo,
+ NNStreamer.NNFWType fw, @Nullable String custom) {
+ this(new Options(fw, models, inputInfo, outputInfo, custom));
+ }
+
+ /**
+ * Creates a new {@link SingleShot} instance with the given {@link Options}.
+ *
+ * @param options The {@link Options} object configuring the instance
+ *
+ * @throws IllegalArgumentException if given param is invalid
+ * @throws IllegalStateException if this failed to construct the pipeline
+ */
+ public SingleShot(@NonNull Options options) {
+ File[] models = options.getModels();
+ NNStreamer.NNFWType fw = options.getNNFWType();
+ TensorsInfo inputInfo = options.getInputInfo();
+ TensorsInfo outputInfo = options.getOutputInfo();
+ String custom = options.getCustom();
+
+ String[] path = new String[models.length];
+ int index = 0;
+
+ for (File model : models) {
+ path[index++] = model.getAbsolutePath();
+ }
+
+ mHandle = nativeOpen(path, inputInfo, outputInfo, fw.ordinal(), custom);
+ if (mHandle == 0) {
+ throw new IllegalStateException("Failed to construct the SingleShot instance");
+ }
+ }
+
+ /**
+ * Invokes the model with the given input data.
+ *
+ * Even if the model has flexible input data dimensions,
+ * input data frames of an instance of a model should share the same dimension.
+ * To change the input information, you should call {@link #setInputInfo(TensorsInfo)} before calling invoke method.
+ *
+ * Note that this will wait for the result until the invoke process is done.
+ * If an application wants to change the time to wait for an output,
+ * set the timeout using {@link #setTimeout(int)}.
+ *
+ * @param in The input data to be inferred (a single frame, tensor/tensors)
+ *
+ * @return The output data (a single frame, tensor/tensors)
+ *
+ * @throws IllegalStateException if this failed to invoke the model
+ * @throws IllegalArgumentException if given param is null
+ */
+ public TensorsData invoke(@NonNull TensorsData in) {
+ checkPipelineHandle();
+
+ if (in == null) {
+ throw new IllegalArgumentException("Given input data is null");
+ }
+
+ TensorsData out = nativeInvoke(mHandle, in);
+ if (out == null) {
+ throw new IllegalStateException("Failed to invoke the model");
+ }
+
+ return out;
+ }
+
+ /**
+ * Gets the information (tensor dimension, type, name and so on) of required input data for the given model.
+ *
+ * @return The tensors information
+ *
+ * @throws IllegalStateException if this failed to get the input information
+ */
+ public TensorsInfo getInputInfo() {
+ checkPipelineHandle();
+
+ TensorsInfo info = nativeGetInputInfo(mHandle);
+ if (info == null) {
+ throw new IllegalStateException("Failed to get the input information");
+ }
+
+ return info;
+ }
+
+ /**
+ * Gets the information (tensor dimension, type, name and so on) of output data for the given model.
+ *
+ * @return The tensors information
+ *
+ * @throws IllegalStateException if this failed to get the output information
+ */
+ public TensorsInfo getOutputInfo() {
+ checkPipelineHandle();
+
+ TensorsInfo info = nativeGetOutputInfo(mHandle);
+ if (info == null) {
+ throw new IllegalStateException("Failed to get the output information");
+ }
+
+ return info;
+ }
+
+ /**
+ * Sets the property value for the given model.
+ * Note that a model/framework may not support to change the property after opening the model.
+ *
+ * @param name The property name
+ * @param value The property value
+ *
+ * @throws IllegalArgumentException if given param is invalid
+ */
+ public void setValue(@NonNull String name, @NonNull String value) {
+ if (name == null || name.isEmpty()) {
+ throw new IllegalArgumentException("Given property name is invalid");
+ }
+
+ if (value == null) {
+ throw new IllegalArgumentException("Given property value is invalid");
+ }
+
+ if (!nativeSetProperty(mHandle, name, value)) {
+ throw new IllegalArgumentException("Failed to set the property");
+ }
+ }
+
+ /**
+ * Gets the property value for the given model.
+ *
+ * @param name The property name
+ *
+ * @return The property value
+ *
+ * @throws IllegalArgumentException if given param is invalid
+ */
+ public String getValue(@NonNull String name) {
+ if (name == null || name.isEmpty()) {
+ throw new IllegalArgumentException("Given property name is invalid");
+ }
+
+ String value = nativeGetProperty(mHandle, name);
+
+ if (value == null) {
+ throw new IllegalArgumentException("Failed to get the property");
+ }
+
+ return value;
+ }
+
+ /**
+ * Sets the maximum amount of time to wait for an output, in milliseconds.
+ *
+ * @param timeout The time to wait for an output
+ *
+ * @throws IllegalArgumentException if given param is invalid
+ * @throws IllegalStateException if this failed to set the timeout
+ */
+ public void setTimeout(int timeout) {
+ checkPipelineHandle();
+
+ if (timeout < 0) {
+ throw new IllegalArgumentException("Given timeout is invalid");
+ }
+
+ if (!nativeSetTimeout(mHandle, timeout)) {
+ throw new IllegalStateException("Failed to set the timeout");
+ }
+ }
+
+ /**
+ * Sets the information (tensor dimension, type, name and so on) of input data for the given model.
+ * Updates the output information for the model internally.
+ *
+ * Note that a model/framework may not support changing the information.
+ *
+ * @param in The input tensors information
+ *
+ * @throws IllegalStateException if this failed to set the input information
+ * @throws IllegalArgumentException if given param is null
+ */
+ public void setInputInfo(@NonNull TensorsInfo in) {
+ checkPipelineHandle();
+
+ if (in == null) {
+ throw new IllegalArgumentException("Given input info is null");
+ }
+
+ if (!nativeSetInputInfo(mHandle, in)) {
+ throw new IllegalStateException("Failed to set input tensor info");
+ }
+ }
+
+ /**
+ * Internal method to check native handle.
+ *
+ * @throws IllegalStateException if the pipeline is not constructed
+ */
+ private void checkPipelineHandle() {
+ if (mHandle == 0) {
+ throw new IllegalStateException("The pipeline is not constructed");
+ }
+ }
+
+ @Override
+ protected void finalize() throws Throwable {
+ try {
+ close();
+ } finally {
+ super.finalize();
+ }
+ }
+
+ @Override
+ public void close() {
+ if (mHandle != 0) {
+ nativeClose(mHandle);
+ mHandle = 0;
+ }
+ }
+
+ /**
+ * Provides interfaces to configure SingleShot instance.
+ */
+ public static class Options {
+ private NNStreamer.NNFWType fw = NNStreamer.NNFWType.UNKNOWN;
+ private File[] models;
+ private TensorsInfo inputInfo;
+ private TensorsInfo outputInfo;
+ private String custom;
+
+ /**
+ * Creates a new {@link Options} instance with the given framework and file.
+ *
+ * @param type The type of {@link NNStreamer.NNFWType}
+ * @param model The {@link File} object to the neural network model file
+ *
+ * @throws IllegalArgumentException if given model is invalid
+ * @throws IllegalStateException if given framework is not available
+ */
+ public Options(NNStreamer.NNFWType type, File model) {
+ setNNFWType(type);
+ setModels(new File[]{model});
+ }
+
+ /**
+ * Creates a new {@link Options} instance with the given framework and file.
+ *
+ * @param type The type of {@link NNStreamer.NNFWType}
+ * @param models The array of {@link File} objects to the neural network model files
+ *
+ * @throws IllegalArgumentException if given models is invalid
+ * @throws IllegalStateException if given framework is not available
+ */
+ public Options(NNStreamer.NNFWType type, File[] models) {
+ setNNFWType(type);
+ setModels(models);
+ }
+
+ /**
+ * Creates a new {@link Options} instance with the given parameters.
+ *
+ * @param type The type of {@link NNStreamer.NNFWType}
+ * @param models The array of {@link File} objects to the neural network model files
+ * @param inputInfo The input tensors information
+ * @param outputInfo The output tensors information
+ * @param custom The custom option string to open the neural network instance
+ *
+ * @throws IllegalArgumentException if given models is invalid
+ * @throws IllegalStateException if given framework is not available
+ */
+ public Options(NNStreamer.NNFWType type, File[] models, TensorsInfo inputInfo, TensorsInfo outputInfo, String custom) {
+ setNNFWType(type);
+ setModels(models);
+ setInputInfo(inputInfo);
+ setOutputInfo(outputInfo);
+ setCustom(custom);
+ }
+
+ public NNStreamer.NNFWType getNNFWType() {
+ return fw;
+ }
+
+ public void setNNFWType(NNStreamer.NNFWType fw) {
+ if (!NNStreamer.isAvailable(fw)) {
+ throw new IllegalStateException("Given framework " + fw.name() + " is not available");
+ }
+ this.fw = fw;
+ }
+
+ public File[] getModels() {
+ return models;
+ }
+
+ public void setModels(File[] models) {
+ if (models == null) {
+ throw new IllegalArgumentException("Given model is invalid");
+ }
+
+ for (File model : models) {
+ if (model == null || !model.exists()) {
+ throw new IllegalArgumentException("Given model is invalid");
+ }
+ }
+
+ this.models = models;
+ }
+
+ public TensorsInfo getInputInfo() {
+ return inputInfo;
+ }
+
+ public void setInputInfo(TensorsInfo inputInfo) {
+ this.inputInfo = inputInfo;
+ }
+
+ public TensorsInfo getOutputInfo() {
+ return outputInfo;
+ }
+
+ public void setOutputInfo(TensorsInfo outputInfo) {
+ this.outputInfo = outputInfo;
+ }
+
+ public String getCustom() {
+ return custom;
+ }
+
+ public void setCustom(String custom) {
+ this.custom = custom;
+ }
+ }
+}
--- /dev/null
+/* SPDX-License-Identifier: Apache-2.0 */
+/*
+ * NNStreamer Android API
+ * Copyright (C) 2019 Samsung Electronics Co., Ltd.
+ */
+
+package org.nnsuite.nnstreamer;
+
+import android.support.annotation.NonNull;
+
+import java.nio.ByteBuffer;
+import java.nio.ByteOrder;
+import java.util.ArrayList;
+
+/**
+ * Provides interfaces to handle tensor data frame.
+ */
+public final class TensorsData implements AutoCloseable {
+ private TensorsInfo mInfo = null;
+ private ArrayList<ByteBuffer> mDataList = new ArrayList<>();
+
+ /**
+ * Allocates a new direct byte buffer with the native byte order.
+ *
+ * @param size The byte size of the buffer
+ *
+ * @return The new byte buffer
+ *
+ * @throws IllegalArgumentException if given size is invalid
+ */
+ public static ByteBuffer allocateByteBuffer(int size) {
+ if (size <= 0) {
+ throw new IllegalArgumentException("Given size is invalid");
+ }
+
+ return ByteBuffer.allocateDirect(size).order(ByteOrder.nativeOrder());
+ }
+
+ /**
+ * Allocates a new {@link TensorsData} instance with the given tensors information.
+ *
+ * @param info The tensors information
+ *
+ * @return {@link TensorsData} instance
+ *
+ * @throws IllegalArgumentException if given info is invalid
+ */
+ public static TensorsData allocate(@NonNull TensorsInfo info) {
+ if (info == null || info.getTensorsCount() == 0) {
+ throw new IllegalArgumentException("Given info is invalid");
+ }
+
+ TensorsData data = new TensorsData(info);
+ int count = info.getTensorsCount();
+
+ for (int i = 0; i < count; i++) {
+ data.addTensorData(allocateByteBuffer(info.getTensorSize(i)));
+ }
+
+ return data;
+ }
+
+ /**
+ * Gets the tensors information.
+ *
+ * @return {@link TensorsInfo} instance cloned from current tensors information.
+ */
+ public TensorsInfo getTensorsInfo() {
+ return mInfo.clone();
+ }
+
+ /**
+ * Sets the tensors information.
+ *
+ * @param info The tensors information
+ *
+ * @throws IllegalArgumentException if given info is null
+ */
+ private void setTensorsInfo(@NonNull TensorsInfo info) {
+ if (info == null || info.getTensorsCount() == 0) {
+ throw new IllegalArgumentException("Given info is invalid");
+ }
+
+ mInfo = info.clone();
+ }
+
+ /**
+ * Gets the number of tensors in tensors data.
+ *
+ * @return The number of tensors
+ */
+ public int getTensorsCount() {
+ return mDataList.size();
+ }
+
+ /**
+ * Adds a new tensor data.
+ *
+ * @param data The tensor data to be added
+ *
+ * @throws IllegalArgumentException if given data is invalid
+ * @throws IndexOutOfBoundsException when the maximum number of tensors in the list
+ */
+ private void addTensorData(@NonNull ByteBuffer data) {
+ int index = getTensorsCount();
+
+ checkByteBuffer(index, data);
+ mDataList.add(data);
+ }
+
+ /**
+ * Gets a tensor data of given index.
+ *
+ * @param index The index of the tensor in the list
+ *
+ * @return The tensor data
+ *
+ * @throws IndexOutOfBoundsException if the given index is invalid
+ */
+ public ByteBuffer getTensorData(int index) {
+ checkIndexBounds(index);
+ return mDataList.get(index);
+ }
+
+ /**
+ * Sets a tensor data.
+ *
+ * @param index The index of the tensor in the list
+ * @param data The tensor data
+ *
+ * @throws IndexOutOfBoundsException if the given index is invalid
+ * @throws IllegalArgumentException if given data is invalid
+ */
+ public void setTensorData(int index, @NonNull ByteBuffer data) {
+ checkIndexBounds(index);
+ checkByteBuffer(index, data);
+
+ mDataList.set(index, data);
+ }
+
+ /**
+ * Internal method called from native to get the array of tensor data.
+ */
+ private Object[] getDataArray() {
+ return mDataList.toArray();
+ }
+
+ /**
+ * Internal method to check the index.
+ *
+ * @throws IndexOutOfBoundsException if the given index is invalid
+ */
+ private void checkIndexBounds(int index) {
+ if (index < 0 || index >= getTensorsCount()) {
+ throw new IndexOutOfBoundsException("Invalid index [" + index + "] of the tensors");
+ }
+ }
+
+ /**
+ * Internal method to check byte buffer.
+ *
+ * @throws IllegalArgumentException if given data is invalid
+ * @throws IndexOutOfBoundsException if the given index is invalid
+ */
+ private void checkByteBuffer(int index, ByteBuffer data) {
+ if (data == null) {
+ throw new IllegalArgumentException("Given data is null");
+ }
+
+ if (!data.isDirect()) {
+ throw new IllegalArgumentException("Given data is not a direct buffer");
+ }
+
+ if (data.order() != ByteOrder.nativeOrder()) {
+ /* Default byte order of ByteBuffer in java is big-endian, it should be a little-endian. */
+ throw new IllegalArgumentException("Given data has invalid byte order");
+ }
+
+ if (index >= NNStreamer.TENSOR_SIZE_LIMIT) {
+ throw new IndexOutOfBoundsException("Max size of the tensors is " + NNStreamer.TENSOR_SIZE_LIMIT);
+ }
+
+ /* compare to tensors info */
+ if (mInfo != null) {
+ int count = mInfo.getTensorsCount();
+
+ if (index >= count) {
+ throw new IndexOutOfBoundsException("Current information has " + count + " tensors");
+ }
+
+ int size = mInfo.getTensorSize(index);
+
+ if (data.capacity() != size) {
+ throw new IllegalArgumentException("Invalid buffer size, required size is " + size);
+ }
+ }
+ }
+
+ @Override
+ public void close() {
+ mDataList.clear();
+ mInfo = null;
+ }
+
+ /**
+ * Private constructor to prevent the instantiation.
+ */
+ private TensorsData(TensorsInfo info) {
+ setTensorsInfo(info);
+ }
+}
--- /dev/null
+/* SPDX-License-Identifier: Apache-2.0 */
+/*
+ * NNStreamer Android API
+ * Copyright (C) 2019 Samsung Electronics Co., Ltd.
+ */
+
+package org.nnsuite.nnstreamer;
+
+import android.support.annotation.NonNull;
+import android.support.annotation.Nullable;
+
+import java.util.ArrayList;
+
+/**
+ * Provides interfaces to handle tensors information.
+ *
+ * @see NNStreamer#TENSOR_RANK_LIMIT
+ * @see NNStreamer#TENSOR_SIZE_LIMIT
+ * @see NNStreamer.TensorType
+ */
+public final class TensorsInfo implements AutoCloseable, Cloneable {
+ private ArrayList<TensorInfo> mInfoList = new ArrayList<>();
+
+ /**
+ * Allocates a new {@link TensorsData} instance with the tensors information.
+ *
+ * @return {@link TensorsData} instance
+ *
+ * @throws IllegalStateException if tensors info is empty
+ */
+ public TensorsData allocate() {
+ if (getTensorsCount() == 0) {
+ throw new IllegalStateException("Empty tensor info");
+ }
+
+ return TensorsData.allocate(this);
+ }
+
+ /**
+ * Creates a new {@link TensorsInfo} instance cloned from the current tensors information.
+ *
+ * @return {@link TensorsInfo} instance
+ */
+ @Override
+ public TensorsInfo clone() {
+ TensorsInfo cloned = new TensorsInfo();
+
+ for (TensorInfo info : mInfoList) {
+ cloned.addTensorInfo(info.getName(), info.getType(), info.getDimension());
+ }
+
+ return cloned;
+ }
+
+ /**
+ * Gets the number of tensors.
+ * The maximum number of tensors is {@link NNStreamer#TENSOR_SIZE_LIMIT}.
+ *
+ * @return The number of tensors
+ */
+ public int getTensorsCount() {
+ return mInfoList.size();
+ }
+
+ /**
+ * Adds a new tensor information.
+ *
+ * @param type The tensor data type
+ * @param dimension The tensor dimension
+ *
+ * @throws IndexOutOfBoundsException when the maximum number of tensors in the list
+ * @throws IllegalArgumentException if given param is null or invalid
+ */
+ public void addTensorInfo(NNStreamer.TensorType type, @NonNull int[] dimension) {
+ addTensorInfo(null, type, dimension);
+ }
+
+ /**
+ * Adds a new tensor information.
+ *
+ * @param name The tensor name
+ * @param type The tensor data type
+ * @param dimension The tensor dimension
+ *
+ * @throws IndexOutOfBoundsException when the maximum number of tensors in the list
+ * @throws IllegalArgumentException if given param is null or invalid
+ */
+ public void addTensorInfo(@Nullable String name, NNStreamer.TensorType type, @NonNull int[] dimension) {
+ int index = getTensorsCount();
+
+ if (index >= NNStreamer.TENSOR_SIZE_LIMIT) {
+ throw new IndexOutOfBoundsException("Max number of the tensors is " + NNStreamer.TENSOR_SIZE_LIMIT);
+ }
+
+ mInfoList.add(new TensorInfo(name, type, dimension));
+ }
+
+ /**
+ * Sets the tensor name.
+ *
+ * @param index The index of the tensor information in the list
+ * @param name The tensor name
+ *
+ * @throws IndexOutOfBoundsException if the given index is invalid
+ */
+ public void setTensorName(int index, String name) {
+ checkIndexBounds(index);
+ mInfoList.get(index).setName(name);
+ }
+
+ /**
+ * Gets the tensor name of given index.
+ *
+ * @param index The index of the tensor information in the list
+ *
+ * @return The tensor name
+ *
+ * @throws IndexOutOfBoundsException if the given index is invalid
+ */
+ public String getTensorName(int index) {
+ checkIndexBounds(index);
+ return mInfoList.get(index).getName();
+ }
+
+ /**
+ * Sets the tensor data type.
+ *
+ * @param index The index of the tensor information in the list
+ * @param type The tensor type
+ *
+ * @throws IndexOutOfBoundsException if the given index is invalid
+ * @throws IllegalArgumentException if the given type is unknown or unsupported type
+ */
+ public void setTensorType(int index, NNStreamer.TensorType type) {
+ checkIndexBounds(index);
+ mInfoList.get(index).setType(type);
+ }
+
+ /**
+ * Gets the tensor data type of given index.
+ *
+ * @param index The index of the tensor information in the list
+ *
+ * @return The tensor data type
+ *
+ * @throws IndexOutOfBoundsException if the given index is invalid
+ */
+ public NNStreamer.TensorType getTensorType(int index) {
+ checkIndexBounds(index);
+ return mInfoList.get(index).getType();
+ }
+
+ /**
+ * Sets the tensor dimension
+ *
+ * @param index The index of the tensor information in the list
+ * @param dimension The tensor dimension
+ *
+ * @throws IndexOutOfBoundsException if the given index is invalid
+ * @throws IllegalArgumentException if the given dimension is null or invalid
+ */
+ public void setTensorDimension(int index, @NonNull int[] dimension) {
+ checkIndexBounds(index);
+ mInfoList.get(index).setDimension(dimension);
+ }
+
+ /**
+ * Gets the tensor dimension of given index.
+ *
+ * @param index The index of the tensor information in the list
+ *
+ * @return The tensor dimension
+ *
+ * @throws IndexOutOfBoundsException if the given index is invalid
+ */
+ public int[] getTensorDimension(int index) {
+ checkIndexBounds(index);
+ return mInfoList.get(index).getDimension();
+ }
+
+ /**
+ * Calculates the byte size of tensor data.
+ *
+ * @param index The index of the tensor information in the list
+ *
+ * @return The byte size of tensor
+ *
+ * @throws IndexOutOfBoundsException if the given index is invalid
+ * @throws IllegalStateException if data type or dimension is invalid
+ */
+ public int getTensorSize(int index) {
+ checkIndexBounds(index);
+
+ int size = mInfoList.get(index).getSize();
+ if (size <= 0) {
+ throw new IllegalStateException("Unknown data type or invalid dimension");
+ }
+
+ return size;
+ }
+
+ /**
+ * Internal method called from native to add new info.
+ */
+ private void appendInfo(String name, int type, int[] dimension) {
+ addTensorInfo(name, TensorInfo.convertType(type), dimension);
+ }
+
+ /**
+ * Internal method called from native to get the array of tensor info.
+ */
+ private Object[] getInfoArray() {
+ return mInfoList.toArray();
+ }
+
+ /**
+ * Internal method to check the index.
+ *
+ * @throws IndexOutOfBoundsException if the given index is invalid
+ */
+ private void checkIndexBounds(int index) {
+ if (index < 0 || index >= getTensorsCount()) {
+ throw new IndexOutOfBoundsException("Invalid index [" + index + "] of the tensors");
+ }
+ }
+
+ @Override
+ public void close() {
+ mInfoList.clear();
+ }
+
+ /**
+ * Internal class for tensor information.
+ */
+ private static class TensorInfo {
+ private String name = null;
+ private int type = NNStreamer.TensorType.UNKNOWN.ordinal();
+ private int[] dimension = new int[NNStreamer.TENSOR_RANK_LIMIT];
+
+ public TensorInfo(@Nullable String name, NNStreamer.TensorType type, @NonNull int[] dimension) {
+ setName(name);
+ setType(type);
+ setDimension(dimension);
+ }
+
+ public void setName(@Nullable String name) {
+ this.name = name;
+ }
+
+ public String getName() {
+ return this.name;
+ }
+
+ public void setType(NNStreamer.TensorType type) {
+ if (type == NNStreamer.TensorType.UNKNOWN) {
+ throw new IllegalArgumentException("Given tensor type is unknown or unsupported type");
+ }
+
+ this.type = type.ordinal();
+ }
+
+ public NNStreamer.TensorType getType() {
+ return convertType(this.type);
+ }
+
+ public void setDimension(@NonNull int[] dimension) {
+ if (dimension == null) {
+ throw new IllegalArgumentException("Given tensor dimension is null");
+ }
+
+ int rank = dimension.length;
+
+ if (rank > NNStreamer.TENSOR_RANK_LIMIT) {
+ throw new IllegalArgumentException("Max size of the tensor rank is " + NNStreamer.TENSOR_RANK_LIMIT);
+ }
+
+ for (int dim : dimension) {
+ if (dim <= 0) {
+ throw new IllegalArgumentException("The dimension should be a positive value");
+ }
+ }
+
+ System.arraycopy(dimension, 0, this.dimension, 0, rank);
+
+ /* fill default value */
+ for (int i = rank; i < NNStreamer.TENSOR_RANK_LIMIT; i++) {
+ this.dimension[i] = 1;
+ }
+ }
+
+ public int[] getDimension() {
+ return this.dimension;
+ }
+
+ public int getSize() {
+ int size;
+
+ switch (convertType(this.type)) {
+ case INT32:
+ case UINT32:
+ case FLOAT32:
+ size = 4;
+ break;
+ case INT16:
+ case UINT16:
+ size = 2;
+ break;
+ case INT8:
+ case UINT8:
+ size = 1;
+ break;
+ case INT64:
+ case UINT64:
+ case FLOAT64:
+ size = 8;
+ break;
+ default:
+ /* unknown type */
+ return 0;
+ }
+
+ for (int i = 0; i < NNStreamer.TENSOR_RANK_LIMIT; i++) {
+ size *= this.dimension[i];
+ }
+
+ return size;
+ }
+
+ /**
+ * Gets the tensor type from int value.
+ */
+ public static NNStreamer.TensorType convertType(int value) {
+ NNStreamer.TensorType type = NNStreamer.TensorType.UNKNOWN;
+
+ switch (value) {
+ case 0:
+ type = NNStreamer.TensorType.INT32;
+ break;
+ case 1:
+ type = NNStreamer.TensorType.UINT32;
+ break;
+ case 2:
+ type = NNStreamer.TensorType.INT16;
+ break;
+ case 3:
+ type = NNStreamer.TensorType.UINT16;
+ break;
+ case 4:
+ type = NNStreamer.TensorType.INT8;
+ break;
+ case 5:
+ type = NNStreamer.TensorType.UINT8;
+ break;
+ case 6:
+ type = NNStreamer.TensorType.FLOAT64;
+ break;
+ case 7:
+ type = NNStreamer.TensorType.FLOAT32;
+ break;
+ case 8:
+ type = NNStreamer.TensorType.INT64;
+ break;
+ case 9:
+ type = NNStreamer.TensorType.UINT64;
+ break;
+ default:
+ /* unknown type */
+ break;
+ }
+
+ return type;
+ }
+ }
+}
--- /dev/null
+#------------------------------------------------------
+# flatbuffers
+#
+# This mk file defines the flatbuffers-module with the prebuilt static library.
+#------------------------------------------------------
+LOCAL_PATH := $(call my-dir)
+
+ifndef NNSTREAMER_ROOT
+$(error NNSTREAMER_ROOT is not defined!)
+endif
+
+include $(NNSTREAMER_ROOT)/jni/nnstreamer.mk
+
+FLATBUF_VER := @FLATBUF_VER@
+ifeq ($(FLATBUF_VER),@FLATBUF_VER@)
+$(error 'FLATBUF_VER' is not properly set)
+endif
+
+ifeq ($(shell which flatc),)
+$(error No 'flatc' in your PATH, install flatbuffers-compiler from ppa:nnstreamer/ppa)
+else
+SYS_FLATC_VER := $(word 3, $(shell flatc --version))
+endif
+
+ifneq ($(SYS_FLATC_VER), $(FLATBUF_VER))
+$(error Found 'flatc' v$(SYS_FLATC_VER), but required v$(FLATBUF_VER))
+endif
+
+FLATBUF_DIR := $(LOCAL_PATH)/flatbuffers
+FLATBUF_INCLUDES := $(FLATBUF_DIR)/include
+GEN_FLATBUF_HEADER := $(shell flatc --cpp -o $(LOCAL_PATH) $(NNSTREAMER_ROOT)/ext/nnstreamer/include/nnstreamer.fbs )
+FLATBUF_HEADER_GEN := $(wildcard $(LOCAL_PATH)/nnstreamer_generated.h)
+ifeq ($(FLATBUF_HEADER_GEN), '')
+$(error Failed to generate the header file, '$(LOCAL_PATH)/nnstreamer_generated.h')
+endif
+
+FLATBUF_LIB_PATH := $(FLATBUF_DIR)/lib/$(TARGET_ARCH_ABI)
+ifeq ($(wildcard $(FLATBUF_LIB_PATH)), )
+$(error The given ABI is not supported by the flatbuffers-module: $(TARGET_ARCH_ABI))
+endif
+
+#------------------------------------------------------
+# libflatbuffers.a (prebuilt static library)
+#------------------------------------------------------
+include $(CLEAR_VARS)
+
+LOCAL_MODULE := flatbuffers-lib
+LOCAL_SRC_FILES := $(FLATBUF_LIB_PATH)/libflatbuffers.a
+
+include $(PREBUILT_STATIC_LIBRARY)
+
+#------------------------------------------------------
+# tensor-decoder sub-plugin for flatbuffers
+#------------------------------------------------------
+include $(CLEAR_VARS)
+
+LOCAL_MODULE := flatbuffers-subplugin
+LOCAL_SRC_FILES := $(NNSTREAMER_DECODER_FLATBUF_SRCS)
+LOCAL_C_INCLUDES := $(LOCAL_PATH) $(FLATBUF_INCLUDES) $(NNSTREAMER_INCLUDES) $(GST_HEADERS_COMMON)
+LOCAL_STATIC_LIBRARIES := flatbuffers-lib
+
+include $(BUILD_STATIC_LIBRARY)
--- /dev/null
+#------------------------------------------------------
+# Define GStreamer plugins and extra dependencies
+#------------------------------------------------------
+
+ifndef NNSTREAMER_API_OPTION
+$(error NNSTREAMER_API_OPTION is not defined!)
+endif
+
+ifndef GSTREAMER_NDK_BUILD_PATH
+GSTREAMER_NDK_BUILD_PATH := $(GSTREAMER_ROOT)/share/gst-android/ndk-build
+endif
+
+include $(GSTREAMER_NDK_BUILD_PATH)/plugins.mk
+
+ifeq ($(NNSTREAMER_API_OPTION),all)
+GST_REQUIRED_PLUGINS := $(GSTREAMER_PLUGINS_CORE) \
+ $(GSTREAMER_PLUGINS_CODECS) \
+ $(GSTREAMER_PLUGINS_ENCODING) \
+ $(GSTREAMER_PLUGINS_NET) \
+ $(GSTREAMER_PLUGINS_PLAYBACK) \
+ $(GSTREAMER_PLUGINS_VIS) \
+ $(GSTREAMER_PLUGINS_SYS) \
+ $(GSTREAMER_PLUGINS_EFFECTS) \
+ $(GSTREAMER_PLUGINS_CAPTURE) \
+ $(GSTREAMER_PLUGINS_CODECS_GPL) \
+ $(GSTREAMER_PLUGINS_CODECS_RESTRICTED) \
+ $(GSTREAMER_PLUGINS_NET_RESTRICTED) \
+ $(GSTREAMER_PLUGINS_GES)
+GST_REQUIRED_DEPS := gstreamer-video-1.0 gstreamer-audio-1.0 gstreamer-app-1.0
+GST_REQUIRED_LIBS :=
+else ifeq ($(NNSTREAMER_API_OPTION),lite)
+# Build with core plugins
+GST_REQUIRED_PLUGINS := $(GSTREAMER_PLUGINS_CORE)
+GST_REQUIRED_DEPS := gstreamer-video-1.0 gstreamer-audio-1.0 gstreamer-app-1.0
+GST_REQUIRED_LIBS :=
+else ifeq ($(NNSTREAMER_API_OPTION),single)
+GST_REQUIRED_PLUGINS :=
+GST_REQUIRED_DEPS :=
+GST_REQUIRED_LIBS :=
+else
+$(error Unknown build option: $(NNSTREAMER_API_OPTION))
+endif
+
--- /dev/null
+#------------------------------------------------------
+# NNFW (On-device neural network inference framework, which is developed by Samsung Research.)
+# https://github.com/Samsung/ONE
+#
+# This mk file defines prebuilt libraries for nnfw module.
+# (nnfw core libraries, arm64-v8a only)
+# You can download specific version of nnfw libraries from https://github.com/Samsung/ONE/releases.
+#------------------------------------------------------
+LOCAL_PATH := $(call my-dir)
+
+ifndef NNFW_LIB_PATH
+$(error NNFW_LIB_PATH is not defined!)
+endif
+
+NNFW_PREBUILT_LIBS :=
+
+#------------------------------------------------------
+# nnfw prebuilt shared libraries
+#------------------------------------------------------
+include $(CLEAR_VARS)
+LOCAL_MODULE := nnfw-libnnfw-dev
+LOCAL_SRC_FILES := $(NNFW_LIB_PATH)/libnnfw-dev.so
+include $(PREBUILT_SHARED_LIBRARY)
+NNFW_PREBUILT_LIBS += nnfw-libnnfw-dev
--- /dev/null
+#------------------------------------------------------
+# NNFW (On-device neural network inference framework, which is developed by Samsung Research.)
+# https://github.com/Samsung/ONE
+#
+# This mk file defines nnfw module with prebuilt shared library.
+# (nnfw core libraries, arm64-v8a only)
+#------------------------------------------------------
+LOCAL_PATH := $(call my-dir)
+
+ifndef NNSTREAMER_ROOT
+$(error NNSTREAMER_ROOT is not defined!)
+endif
+
+include $(NNSTREAMER_ROOT)/jni/nnstreamer.mk
+
+NNFW_DIR := $(LOCAL_PATH)/nnfw
+NNFW_INCLUDES := $(NNFW_DIR)/include $(NNFW_DIR)/include/nnfw
+
+ifeq ($(TARGET_ARCH_ABI),arm64-v8a)
+NNFW_LIB_PATH := $(NNFW_DIR)/lib
+else
+$(error Target arch ABI not supported: $(TARGET_ARCH_ABI))
+endif
+
+#------------------------------------------------------
+# nnfw prebuilt shared libraries
+#------------------------------------------------------
+include $(LOCAL_PATH)/Android-nnfw-prebuilt.mk
+
+#------------------------------------------------------
+# tensor-filter sub-plugin for nnfw
+#------------------------------------------------------
+include $(CLEAR_VARS)
+
+LOCAL_MODULE := nnfw-subplugin
+LOCAL_SRC_FILES := $(NNSTREAMER_FILTER_NNFW_SRCS)
+LOCAL_CFLAGS := -O3 -fPIC $(NNS_API_FLAGS)
+LOCAL_C_INCLUDES := $(NNFW_INCLUDES) $(NNSTREAMER_INCLUDES) $(GST_HEADERS_COMMON)
+LOCAL_SHARED_LIBRARIES := $(NNFW_PREBUILT_LIBS)
+
+include $(BUILD_STATIC_LIBRARY)
--- /dev/null
+#------------------------------------------------------
+# nnstreamer
+#
+# This mk file defines nnstreamer module with prebuilt shared libraries.
+# ABI: armeabi-v7a, arm64-v8a
+#------------------------------------------------------
+LOCAL_PATH := $(call my-dir)
+
+NNSTREAMER_DIR := $(LOCAL_PATH)/nnstreamer
+
+NNSTREAMER_INCLUDES := $(NNSTREAMER_DIR)/include
+NNSTREAMER_LIB_PATH := $(NNSTREAMER_DIR)/lib/$(TARGET_ARCH_ABI)
+
+ENABLE_TF_LITE := false
+ENABLE_SNAP := false
+ENABLE_NNFW := false
+ENABLE_SNPE := false
+
+#------------------------------------------------------
+# define required libraries for nnstreamer
+#------------------------------------------------------
+NNSTREAMER_LIBS := nnstreamer-native gst-android cpp-shared
+
+#------------------------------------------------------
+# nnstreamer-native
+#------------------------------------------------------
+include $(CLEAR_VARS)
+LOCAL_MODULE := nnstreamer-native
+LOCAL_SRC_FILES := $(NNSTREAMER_LIB_PATH)/libnnstreamer-native.so
+include $(PREBUILT_SHARED_LIBRARY)
+
+#------------------------------------------------------
+# gstreamer android
+#------------------------------------------------------
+include $(CLEAR_VARS)
+LOCAL_MODULE := gst-android
+LOCAL_SRC_FILES := $(NNSTREAMER_LIB_PATH)/libgstreamer_android.so
+include $(PREBUILT_SHARED_LIBRARY)
+
+#------------------------------------------------------
+# c++ shared
+#------------------------------------------------------
+include $(CLEAR_VARS)
+LOCAL_MODULE := cpp-shared
+LOCAL_SRC_FILES := $(NNSTREAMER_LIB_PATH)/libc++_shared.so
+include $(PREBUILT_SHARED_LIBRARY)
+
+#------------------------------------------------------
+# SNAP (arm64-v8a only)
+#------------------------------------------------------
+ifeq ($(ENABLE_SNAP),true)
+SNAP_LIB_PATH := $(NNSTREAMER_LIB_PATH)
+include $(LOCAL_PATH)/Android-snap-prebuilt.mk
+
+NNSTREAMER_LIBS += $(SNAP_PREBUILT_LIBS)
+endif
+
+#------------------------------------------------------
+# NNFW (arm64-v8a only)
+#------------------------------------------------------
+ifeq ($(ENABLE_NNFW),true)
+NNFW_LIB_PATH := $(NNSTREAMER_LIB_PATH)
+include $(LOCAL_PATH)/Android-nnfw-prebuilt.mk
+
+NNSTREAMER_LIBS += $(NNFW_PREBUILT_LIBS)
+endif
+
+#------------------------------------------------------
+# SNPE
+#------------------------------------------------------
+ifeq ($(ENABLE_SNPE),true)
+SNPE_LIB_PATH := $(NNSTREAMER_LIB_PATH)
+include $(LOCAL_PATH)/Android-snpe-prebuilt.mk
+
+NNSTREAMER_LIBS += $(SNPE_PREBUILT_LIBS)
+endif
+
+# Remove any duplicates.
+NNSTREAMER_LIBS := $(sort $(NNSTREAMER_LIBS))
--- /dev/null
+#------------------------------------------------------
+# nnstreamer
+#------------------------------------------------------
+LOCAL_PATH := $(call my-dir)
+
+ifndef NNSTREAMER_ROOT
+$(error NNSTREAMER_ROOT is not defined!)
+endif
+
+include $(NNSTREAMER_ROOT)/jni/nnstreamer.mk
+
+NNSTREAMER_SRC_FILES := \
+ $(NNSTREAMER_COMMON_SRCS)
+
+ifeq ($(NNSTREAMER_API_OPTION),single)
+# single-shot only
+NNSTREAMER_SRC_FILES += \
+ $(NNSTREAMER_SINGLE_SRCS)
+else
+# capi and nnstreamer plugins
+NNSTREAMER_SRC_FILES += \
+ $(NNSTREAMER_CAPI_SRCS) \
+ $(NNSTREAMER_PLUGINS_SRCS) \
+ $(NNSTREAMER_SOURCE_AMC_SRCS) \
+ $(NNSTREAMER_DECODER_BB_SRCS) \
+ $(NNSTREAMER_DECODER_DV_SRCS) \
+ $(NNSTREAMER_DECODER_IL_SRCS) \
+ $(NNSTREAMER_DECODER_PE_SRCS) \
+ $(NNSTREAMER_DECODER_IS_SRCS) \
+ $(NNSTREAMER_JOIN_SRCS)
+endif
+
+include $(CLEAR_VARS)
+
+LOCAL_MODULE := nnstreamer
+LOCAL_SRC_FILES := $(sort $(NNSTREAMER_SRC_FILES))
+LOCAL_C_INCLUDES := $(NNS_API_INCLUDES)
+LOCAL_CFLAGS := -O3 -fPIC $(NNS_API_FLAGS)
+LOCAL_CXXFLAGS := -std=c++11 -O3 -fPIC -frtti -fexceptions $(NNS_API_FLAGS)
+
+include $(BUILD_STATIC_LIBRARY)
--- /dev/null
+#------------------------------------------------------
+# PyTorch
+#
+# This mk file defines PyTorch module with prebuilt static library.
+#------------------------------------------------------
+LOCAL_PATH := $(call my-dir)
+
+ifndef NNSTREAMER_ROOT
+$(error NNSTREAMER_ROOT is not defined!)
+endif
+
+include $(NNSTREAMER_ROOT)/jni/nnstreamer.mk
+
+# To support NNAPI, which is not available in the lastest stable release (1.7.1) of PyTorch,
+# This module use commit ID 5c3788d5d76f64f6708e0b79f40b1cf45276625a for PyTorch
+# (https://github.com/pytorch/pytorch @ 5c3788d5d76f64f6708e0b79f40b1cf45276625a)
+# After a release of PyTorch which includes NNAPI support, this will be updated.
+PYTORCH_VERSION := 1.8.0
+
+PYTORCH_FLAGS := \
+ -DPYTORCH_VERSION=$(PYTORCH_VERSION) \
+ -DPYTORCH_VER_ATLEAST_1_2_0=1
+
+PYTORCH_DIR := $(LOCAL_PATH)/pytorch
+PYTORCH_INCLUDES := $(PYTORCH_DIR)/include
+
+ifeq ($(TARGET_ARCH_ABI),arm64-v8a)
+PYTORCH_LIB_PATH := $(PYTORCH_DIR)/lib/arm64
+else
+$(error Target arch ABI not supported: $(TARGET_ARCH_ABI))
+endif
+
+#------------------------------------------------------
+# pytorch (prebuilt static library)
+#------------------------------------------------------
+include $(CLEAR_VARS)
+LOCAL_MODULE := pytorch-libc10
+LOCAL_SRC_FILES := $(PYTORCH_LIB_PATH)/libc10.a
+include $(PREBUILT_STATIC_LIBRARY)
+
+include $(CLEAR_VARS)
+LOCAL_MODULE := pytorch-libclog
+LOCAL_SRC_FILES := $(PYTORCH_LIB_PATH)/libclog.a
+include $(PREBUILT_STATIC_LIBRARY)
+
+include $(CLEAR_VARS)
+LOCAL_MODULE := pytorch-libcpuinfo
+LOCAL_SRC_FILES := $(PYTORCH_LIB_PATH)/libcpuinfo.a
+include $(PREBUILT_STATIC_LIBRARY)
+
+include $(CLEAR_VARS)
+LOCAL_MODULE := pytorch-libeigen_blas
+LOCAL_SRC_FILES := $(PYTORCH_LIB_PATH)/libeigen_blas.a
+include $(PREBUILT_STATIC_LIBRARY)
+
+include $(CLEAR_VARS)
+LOCAL_MODULE := pytorch-libnnpack
+LOCAL_SRC_FILES := $(PYTORCH_LIB_PATH)/libnnpack.a
+include $(PREBUILT_STATIC_LIBRARY)
+
+include $(CLEAR_VARS)
+LOCAL_MODULE := pytorch-libpthreadpool
+LOCAL_SRC_FILES := $(PYTORCH_LIB_PATH)/libpthreadpool.a
+include $(PREBUILT_STATIC_LIBRARY)
+
+include $(CLEAR_VARS)
+LOCAL_MODULE := pytorch-libpytorch_qnnpack
+LOCAL_SRC_FILES := $(PYTORCH_LIB_PATH)/libpytorch_qnnpack.a
+include $(PREBUILT_STATIC_LIBRARY)
+
+include $(CLEAR_VARS)
+LOCAL_MODULE := pytorch-libXNNPACK
+LOCAL_SRC_FILES := $(PYTORCH_LIB_PATH)/libXNNPACK.a
+include $(PREBUILT_STATIC_LIBRARY)
+
+include $(CLEAR_VARS)
+LOCAL_MODULE := pytorch-libtorch_cpu
+LOCAL_SRC_FILES := $(PYTORCH_LIB_PATH)/libtorch_cpu.a
+include $(PREBUILT_STATIC_LIBRARY)
+
+include $(CLEAR_VARS)
+LOCAL_MODULE := pytorch-libtorch
+LOCAL_SRC_FILES := $(PYTORCH_LIB_PATH)/libtorch.a
+include $(PREBUILT_STATIC_LIBRARY)
+
+#------------------------------------------------------
+# tensor-filter sub-plugin for pytorch
+#------------------------------------------------------
+include $(CLEAR_VARS)
+
+LOCAL_MODULE := pytorch-subplugin
+LOCAL_SRC_FILES := $(NNSTREAMER_FILTER_PYTORCH_SRCS)
+LOCAL_CXXFLAGS := -std=c++14 -O3 -fPIC -frtti -fexceptions $(NNS_API_FLAGS) $(PYTORCH_FLAGS)
+LOCAL_C_INCLUDES := $(PYTORCH_INCLUDES) $(NNSTREAMER_INCLUDES) $(GST_HEADERS_COMMON)
+LOCAL_WHOLE_STATIC_LIBRARIES := pytorch-libeigen_blas pytorch-libnnpack pytorch-libpytorch_qnnpack pytorch-libXNNPACK pytorch-libtorch_cpu pytorch-libtorch pytorch-libc10 pytorch-libcpuinfo pytorch-libpthreadpool pytorch-libclog
+include $(BUILD_STATIC_LIBRARY)
--- /dev/null
+#------------------------------------------------------
+# SNAP (Samsung Neural Acceleration Platform)
+#
+# This mk file defines prebuilt libraries for snap module.
+# (snap-sdk, arm64-v8a only)
+#------------------------------------------------------
+LOCAL_PATH := $(call my-dir)
+
+ifndef SNAP_LIB_PATH
+$(error SNAP_LIB_PATH is not defined!)
+endif
+
+SNAP_PREBUILT_LIBS :=
+
+#------------------------------------------------------
+# snap-sdk (prebuilt shared library)
+#------------------------------------------------------
+include $(CLEAR_VARS)
+LOCAL_MODULE := snap-sdk
+LOCAL_SRC_FILES := $(SNAP_LIB_PATH)/libsnap_vndk.so
+include $(PREBUILT_SHARED_LIBRARY)
+SNAP_PREBUILT_LIBS += snap-sdk
--- /dev/null
+#------------------------------------------------------
+# SNAP (Samsung Neural Acceleration Platform)
+#
+# This mk file defines snap module with prebuilt shared library.
+# (snap-sdk, arm64-v8a only)
+# See Samsung Neural SDK (https://developer.samsung.com/neural) for the details.
+#------------------------------------------------------
+LOCAL_PATH := $(call my-dir)
+
+ifndef NNSTREAMER_ROOT
+$(error NNSTREAMER_ROOT is not defined!)
+endif
+
+include $(NNSTREAMER_ROOT)/jni/nnstreamer.mk
+
+SNAP_DIR := $(LOCAL_PATH)/snap
+SNAP_INCLUDES := $(SNAP_DIR)/include
+
+ifeq ($(TARGET_ARCH_ABI),arm64-v8a)
+SNAP_LIB_PATH := $(SNAP_DIR)/lib
+else
+$(error Target arch ABI not supported: $(TARGET_ARCH_ABI))
+endif
+
+#------------------------------------------------------
+# snap-sdk (prebuilt shared library)
+#------------------------------------------------------
+include $(LOCAL_PATH)/Android-snap-prebuilt.mk
+
+#------------------------------------------------------
+# tensor-filter sub-plugin for snap
+#------------------------------------------------------
+include $(CLEAR_VARS)
+
+LOCAL_MODULE := snap-subplugin
+LOCAL_SRC_FILES := $(NNSTREAMER_FILTER_SNAP_SRCS)
+LOCAL_CXXFLAGS := -std=c++11 -O3 -fPIC -frtti -fexceptions $(NNS_API_FLAGS)
+LOCAL_C_INCLUDES := $(SNAP_INCLUDES) $(NNSTREAMER_INCLUDES) $(GST_HEADERS_COMMON)
+LOCAL_STATIC_LIBRARIES := nnstreamer
+LOCAL_SHARED_LIBRARIES := $(SNAP_PREBUILT_LIBS)
+
+include $(BUILD_STATIC_LIBRARY)
--- /dev/null
+#------------------------------------------------------
+# SNPE (The Snapdragon Neural Processing Engine)
+#
+# This mk file defines prebuilt libraries for snpe module.
+# (snpe-sdk, arm64-v8a only)
+#------------------------------------------------------
+LOCAL_PATH := $(call my-dir)
+
+ifndef SNPE_LIB_PATH
+$(error SNPE_LIB_PATH is not defined!)
+endif
+
+SNPE_PREBUILT_LIBS :=
+
+#------------------------------------------------------
+# snpe-sdk (prebuilt shared library)
+#------------------------------------------------------
+include $(CLEAR_VARS)
+LOCAL_MODULE := libSNPE
+LOCAL_SRC_FILES := $(SNPE_LIB_PATH)/libSNPE.so
+include $(PREBUILT_SHARED_LIBRARY)
+SNPE_PREBUILT_LIBS += libSNPE
--- /dev/null
+#------------------------------------------------------
+# SNPE (The Snapdragon Neural Processing Engine)
+#
+# This mk file defines snpe module with prebuilt shared library.
+# (snpe-sdk, arm64-v8a only)
+# See Qualcomm Neural Processing SDK for AI (https://developer.qualcomm.com/software/qualcomm-neural-processing-sdk) for the details.
+#------------------------------------------------------
+LOCAL_PATH := $(call my-dir)
+
+ifndef NNSTREAMER_ROOT
+$(error NNSTREAMER_ROOT is not defined!)
+endif
+
+include $(NNSTREAMER_ROOT)/jni/nnstreamer.mk
+
+SNPE_DIR := $(LOCAL_PATH)/snpe
+SNPE_INCLUDES := $(SNPE_DIR)/include/zdl/
+
+ifeq ($(TARGET_ARCH_ABI),arm64-v8a)
+SNPE_LIB_PATH := $(SNPE_DIR)/lib
+else
+$(error Target arch ABI not supported: $(TARGET_ARCH_ABI))
+endif
+
+#------------------------------------------------------
+# snpe-sdk (prebuilt shared library)
+#------------------------------------------------------
+include $(LOCAL_PATH)/Android-snpe-prebuilt.mk
+
+#------------------------------------------------------
+# tensor-filter sub-plugin for snpe
+#------------------------------------------------------
+include $(CLEAR_VARS)
+
+LOCAL_MODULE := snpe-subplugin
+LOCAL_SRC_FILES := $(NNSTREAMER_FILTER_SNPE_SRCS)
+LOCAL_CXXFLAGS := -std=c++11 -O3 -fPIC -frtti -fexceptions $(NNS_API_FLAGS)
+LOCAL_C_INCLUDES := $(SNPE_INCLUDES) $(NNSTREAMER_INCLUDES) $(GST_HEADERS_COMMON)
+LOCAL_STATIC_LIBRARIES := nnstreamer
+LOCAL_SHARED_LIBRARIES := $(SNPE_PREBUILT_LIBS)
+
+include $(BUILD_STATIC_LIBRARY)
--- /dev/null
+#------------------------------------------------------
+# tensorflow-lite
+#
+# This mk file defines tensorflow-lite module with prebuilt static library.
+# To build and run the example with gstreamer binaries, we built a static library (e.g., libtensorflow-lite.a)
+# for Android/Tensorflow-lite from the Tensorflow repository of the Tizen software platform.
+# - [Tizen] Tensorflow git repository:
+# * Repository: https://review.tizen.org/gerrit/p/platform/upstream/tensorflow
+#------------------------------------------------------
+LOCAL_PATH := $(call my-dir)
+
+ifndef NNSTREAMER_ROOT
+$(error NNSTREAMER_ROOT is not defined!)
+endif
+
+include $(NNSTREAMER_ROOT)/jni/nnstreamer.mk
+
+TFLITE_VERSION := 1.13.1
+
+_TFLITE_VERSIONS = $(subst ., , $(TFLITE_VERSION))
+TFLITE_VERSION_MAJOR := $(word 1, $(_TFLITE_VERSIONS))
+TFLITE_VERSION_MINOR := $(word 2, $(_TFLITE_VERSIONS))
+TFLITE_VERSION_MICRO := $(word 3, $(_TFLITE_VERSIONS))
+
+TFLITE_FLAGS := \
+ -DTFLITE_SUBPLUGIN_NAME=\"tensorflow-lite\" \
+ -DTFLITE_VERSION=$(TFLITE_VERSION) \
+ -DTFLITE_VERSION_MAJOR=$(TFLITE_VERSION_MAJOR) \
+ -DTFLITE_VERSION_MINOR=$(TFLITE_VERSION_MINOR) \
+ -DTFLITE_VERSION_MICRO=$(TFLITE_VERSION_MICRO)
+
+# Define types and features in tensorflow-lite sub-plugin.
+# FLOAT16/COMPLEX64 for tensorflow-lite >= 2, and INT8/INT16 for tensorflow-lite >=1.13
+# GPU-delegate supported on tensorflow-lite >= 2
+# NNAPI-delegate supported on tensorflow-lite >= 1.14
+TFLITE_ENABLE_GPU_DELEGATE := false
+TFLITE_ENABLE_NNAPI_DELEGATE := false
+TFLITE_EXPORT_LDLIBS :=
+
+ifeq ($(shell test $(TFLITE_VERSION_MAJOR) -ge 2; echo $$?),0)
+TFLITE_ENABLE_GPU_DELEGATE := true
+TFLITE_ENABLE_NNAPI_DELEGATE := true
+
+TFLITE_FLAGS += -DTFLITE_INT8=1 -DTFLITE_INT16=1 -DTFLITE_FLOAT16=1 -DTFLITE_COMPLEX64=1
+else
+ifeq ($(shell test $(TFLITE_VERSION_MINOR) -ge 14; echo $$?),0)
+TFLITE_ENABLE_NNAPI_DELEGATE := true
+endif
+
+ifeq ($(shell test $(TFLITE_VERSION_MINOR) -ge 13; echo $$?),0)
+TFLITE_FLAGS += -DTFLITE_INT8=1 -DTFLITE_INT16=1
+endif
+endif
+
+ifeq ($(TFLITE_ENABLE_NNAPI_DELEGATE),true)
+TFLITE_FLAGS += -DTFLITE_NNAPI_DELEGATE_SUPPORTED=1
+endif
+
+ifeq ($(TFLITE_ENABLE_GPU_DELEGATE),true)
+TFLITE_FLAGS += -DTFLITE_GPU_DELEGATE_SUPPORTED=1
+TFLITE_EXPORT_LDLIBS += -lEGL -lGLESv2
+endif
+
+TF_LITE_DIR := $(LOCAL_PATH)/tensorflow-lite
+TF_LITE_INCLUDES := $(TF_LITE_DIR)/include
+
+ifeq ($(TARGET_ARCH_ABI),armeabi-v7a)
+TF_LITE_LIB_PATH := $(TF_LITE_DIR)/lib/armv7
+else ifeq ($(TARGET_ARCH_ABI),arm64-v8a)
+TF_LITE_LIB_PATH := $(TF_LITE_DIR)/lib/arm64
+else
+$(error Target arch ABI not supported: $(TARGET_ARCH_ABI))
+endif
+
+#------------------------------------------------------
+# tensorflow-lite (prebuilt static library)
+#------------------------------------------------------
+include $(CLEAR_VARS)
+
+LOCAL_MODULE := tensorflow-lite-lib
+LOCAL_SRC_FILES := $(TF_LITE_LIB_PATH)/libtensorflow-lite.a
+LOCAL_EXPORT_LDFLAGS := -Wl,--exclude-libs,libtensorflow-lite.a
+
+include $(PREBUILT_STATIC_LIBRARY)
+
+#------------------------------------------------------
+# tensor-filter sub-plugin for tensorflow-lite
+#------------------------------------------------------
+include $(CLEAR_VARS)
+
+LOCAL_MODULE := tensorflow-lite-subplugin
+LOCAL_SRC_FILES := $(NNSTREAMER_FILTER_TFLITE_SRCS)
+LOCAL_CXXFLAGS := -std=c++11 -O3 -fPIC -frtti -fexceptions $(NNS_API_FLAGS) $(TFLITE_FLAGS)
+LOCAL_C_INCLUDES := $(TF_LITE_INCLUDES) $(NNSTREAMER_INCLUDES) $(GST_HEADERS_COMMON)
+LOCAL_EXPORT_LDLIBS := $(TFLITE_EXPORT_LDLIBS)
+LOCAL_STATIC_LIBRARIES := tensorflow-lite-lib cpufeatures
+
+include $(BUILD_STATIC_LIBRARY)
--- /dev/null
+LOCAL_PATH := $(call my-dir)
+
+ifndef GSTREAMER_ROOT_ANDROID
+$(error GSTREAMER_ROOT_ANDROID is not defined!)
+endif
+
+ifndef NNSTREAMER_ROOT
+$(error NNSTREAMER_ROOT is not defined!)
+endif
+
+ifeq ($(TARGET_ARCH_ABI),armeabi-v7a)
+GSTREAMER_ROOT := $(GSTREAMER_ROOT_ANDROID)/armv7
+else ifeq ($(TARGET_ARCH_ABI),arm64-v8a)
+GSTREAMER_ROOT := $(GSTREAMER_ROOT_ANDROID)/arm64
+else ifeq ($(TARGET_ARCH_ABI),x86)
+GSTREAMER_ROOT := $(GSTREAMER_ROOT_ANDROID)/x86
+else ifeq ($(TARGET_ARCH_ABI),x86_64)
+GSTREAMER_ROOT := $(GSTREAMER_ROOT_ANDROID)/x86_64
+else
+$(error Target arch ABI not supported: $(TARGET_ARCH_ABI))
+endif
+
+#------------------------------------------------------
+# API build option
+#------------------------------------------------------
+include $(NNSTREAMER_ROOT)/jni/nnstreamer.mk
+
+NNSTREAMER_API_OPTION := all
+
+# tensorflow-lite (nnstreamer tf-lite subplugin)
+ENABLE_TF_LITE := false
+
+# SNAP (Samsung Neural Acceleration Platform)
+ENABLE_SNAP := false
+
+# NNFW (On-device neural network inference framework, Samsung Research)
+ENABLE_NNFW := false
+
+# SNPE (Snapdragon Neural Processing Engine)
+ENABLE_SNPE := false
+
+# PyTorch
+ENABLE_PYTORCH := false
+
+# Decoder sub-plugin for flatbuffers support
+ENABLE_DECODER_FLATBUF := false
+
+ifeq ($(ENABLE_SNAP),true)
+ifeq ($(ENABLE_SNPE),true)
+$(error DO NOT enable SNAP and SNPE both. The app would fail to use DSP or NPU runtime.)
+endif
+endif
+
+# Common options
+NNS_API_INCLUDES := \
+ $(NNSTREAMER_INCLUDES) \
+ $(NNSTREAMER_CAPI_INCLUDES) \
+ $(GST_HEADERS_COMMON)
+
+NNS_API_FLAGS := -DVERSION=\"$(NNSTREAMER_VERSION)\"
+NNS_SUBPLUGINS :=
+
+ifeq ($(NNSTREAMER_API_OPTION),single)
+NNS_API_FLAGS += -DNNS_SINGLE_ONLY=1
+endif
+
+#------------------------------------------------------
+# external libs and sub-plugins
+#------------------------------------------------------
+ifeq ($(ENABLE_TF_LITE),true)
+NNS_API_FLAGS += -DENABLE_TENSORFLOW_LITE=1
+NNS_SUBPLUGINS += tensorflow-lite-subplugin
+
+include $(LOCAL_PATH)/Android-tensorflow-lite.mk
+endif
+
+ifeq ($(ENABLE_SNAP),true)
+NNS_API_FLAGS += -DENABLE_SNAP=1
+NNS_SUBPLUGINS += snap-subplugin
+
+include $(LOCAL_PATH)/Android-snap.mk
+endif
+
+ifeq ($(ENABLE_NNFW),true)
+NNS_API_FLAGS += -DENABLE_NNFW=1
+NNS_SUBPLUGINS += nnfw-subplugin
+
+include $(LOCAL_PATH)/Android-nnfw.mk
+endif
+
+ifeq ($(ENABLE_SNPE),true)
+NNS_API_FLAGS += -DENABLE_SNPE=1
+NNS_SUBPLUGINS += snpe-subplugin
+
+include $(LOCAL_PATH)/Android-snpe.mk
+endif
+
+ifeq ($(ENABLE_PYTORCH),true)
+NNS_API_FLAGS += -DENABLE_PYTORCH=1
+NNS_SUBPLUGINS += pytorch-subplugin
+
+include $(LOCAL_PATH)/Android-pytorch.mk
+endif
+
+ifeq ($(ENABLE_DECODER_FLATBUF),true)
+include $(LOCAL_PATH)/Android-dec-flatbuf.mk
+NNS_API_FLAGS += -DENABLE_DEC_FLATBUF=1
+NNS_SUBPLUGINS += flatbuffers-subplugin
+endif
+
+#------------------------------------------------------
+# nnstreamer
+#------------------------------------------------------
+include $(LOCAL_PATH)/Android-nnstreamer.mk
+
+# Remove any duplicates.
+NNS_SUBPLUGINS := $(sort $(NNS_SUBPLUGINS))
+
+#------------------------------------------------------
+# native code for api
+#------------------------------------------------------
+include $(CLEAR_VARS)
+
+LOCAL_MODULE := nnstreamer-native
+
+LOCAL_SRC_FILES := \
+ nnstreamer-native-api.c \
+ nnstreamer-native-singleshot.c
+
+ifneq ($(NNSTREAMER_API_OPTION),single)
+LOCAL_SRC_FILES += \
+ nnstreamer-native-customfilter.c \
+ nnstreamer-native-pipeline.c
+endif
+
+LOCAL_C_INCLUDES := $(NNS_API_INCLUDES)
+LOCAL_CFLAGS := -O3 -fPIC $(NNS_API_FLAGS)
+LOCAL_STATIC_LIBRARIES := nnstreamer $(NNS_SUBPLUGINS)
+LOCAL_SHARED_LIBRARIES := gstreamer_android
+LOCAL_LDLIBS := -llog -landroid
+
+ifneq ($(NNSTREAMER_API_OPTION),single)
+# For amcsrc element
+LOCAL_LDLIBS += -lmediandk
+endif
+
+include $(BUILD_SHARED_LIBRARY)
+
+#------------------------------------------------------
+# gstreamer for android
+#------------------------------------------------------
+GSTREAMER_NDK_BUILD_PATH := $(GSTREAMER_ROOT)/share/gst-android/ndk-build/
+include $(LOCAL_PATH)/Android-gst-plugins.mk
+
+GSTREAMER_PLUGINS := $(GST_REQUIRED_PLUGINS)
+GSTREAMER_EXTRA_DEPS := $(GST_REQUIRED_DEPS) gio-2.0 gmodule-2.0
+GSTREAMER_EXTRA_LIBS := $(GST_REQUIRED_LIBS) -liconv
+
+ifeq ($(NNSTREAMER_API_OPTION),all)
+GSTREAMER_EXTRA_LIBS += -lcairo
+endif
+
+GSTREAMER_INCLUDE_FONTS := no
+GSTREAMER_INCLUDE_CA_CERTIFICATES := no
+
+include $(GSTREAMER_NDK_BUILD_PATH)/gstreamer-1.0.mk
+
+#------------------------------------------------------
+# NDK cpu-features
+#------------------------------------------------------
+$(call import-module, android/cpufeatures)
--- /dev/null
+# Set target ABI in build.gradle (externalNativeBuild - abiFilters)
+APP_ABI := armeabi-v7a arm64-v8a x86 x86_64
+APP_STL := c++_shared
--- /dev/null
+/* SPDX-License-Identifier: Apache-2.0 */
+/**
+ * NNStreamer Android API
+ * Copyright (C) 2019 Samsung Electronics Co., Ltd.
+ *
+ * @file nnstreamer-native-api.c
+ * @date 10 July 2019
+ * @brief Native code for NNStreamer API
+ * @author Jaeyun Jung <jy1210.jung@samsung.com>
+ * @bug No known bugs except for NYI items
+ */
+
+#include "nnstreamer-native.h"
+
+/* nnstreamer plugins and sub-plugins declaration */
+#if !defined (NNS_SINGLE_ONLY)
+GST_PLUGIN_STATIC_DECLARE (nnstreamer);
+GST_PLUGIN_STATIC_DECLARE (amcsrc);
+GST_PLUGIN_STATIC_DECLARE (join);
+extern void init_dv (void);
+extern void init_bb (void);
+extern void init_il (void);
+extern void init_pose (void);
+extern void init_is (void);
+#if defined (ENABLE_DEC_FLATBUF)
+extern void init_fb (void);
+#endif /* ENABLE_DEC_FLATBUF */
+#endif
+
+extern void init_filter_cpp (void);
+extern void init_filter_custom (void);
+extern void init_filter_custom_easy (void);
+
+#if defined (ENABLE_TENSORFLOW_LITE)
+extern void init_filter_tflite (void);
+#endif
+#if defined (ENABLE_SNAP)
+extern void init_filter_snap (void);
+#endif
+#if defined (ENABLE_NNFW)
+extern void init_filter_nnfw (void);
+#endif
+#if defined (ENABLE_SNPE)
+extern void init_filter_snpe (JNIEnv * env, jobject context);
+#endif
+#if defined (ENABLE_PYTORCH)
+extern void init_filter_torch (void);
+#endif
+
+/**
+ * @brief External function from GStreamer Android.
+ */
+extern void gst_android_init (JNIEnv * env, jobject context);
+
+/**
+ * @brief Global lock for native functions.
+ */
+G_LOCK_DEFINE_STATIC (nns_native_lock);
+
+/**
+ * @brief Attach thread with Java VM.
+ */
+static JNIEnv *
+nns_attach_current_thread (pipeline_info_s * pipe_info)
+{
+ JNIEnv *env;
+ JavaVM *jvm;
+ JavaVMAttachArgs args;
+
+ g_assert (pipe_info);
+ jvm = pipe_info->jvm;
+
+ args.version = pipe_info->version;
+ args.name = NULL;
+ args.group = NULL;
+
+ if ((*jvm)->AttachCurrentThread (jvm, &env, &args) < 0) {
+ nns_loge ("Failed to attach current thread.");
+ return NULL;
+ }
+
+ return env;
+}
+
+/**
+ * @brief Get JNI environment.
+ */
+JNIEnv *
+nns_get_jni_env (pipeline_info_s * pipe_info)
+{
+ JNIEnv *env;
+
+ g_assert (pipe_info);
+
+ if ((env = pthread_getspecific (pipe_info->jni_env)) == NULL) {
+ env = nns_attach_current_thread (pipe_info);
+ pthread_setspecific (pipe_info->jni_env, env);
+ }
+
+ return env;
+}
+
+/**
+ * @brief Free element handle pointer.
+ */
+void
+nns_free_element_data (gpointer data)
+{
+ element_data_s *item = (element_data_s *) data;
+
+ if (item) {
+ /* release private data */
+ if (item->priv_data) {
+ JNIEnv *env = nns_get_jni_env (item->pipe_info);
+ item->priv_destroy_func (item->priv_data, env);
+ }
+
+ switch (item->type) {
+#if !defined (NNS_SINGLE_ONLY)
+ case NNS_ELEMENT_TYPE_SRC:
+ ml_pipeline_src_release_handle ((ml_pipeline_src_h) item->handle);
+ break;
+ case NNS_ELEMENT_TYPE_SINK:
+ ml_pipeline_sink_unregister ((ml_pipeline_sink_h) item->handle);
+ break;
+ case NNS_ELEMENT_TYPE_VALVE:
+ ml_pipeline_valve_release_handle ((ml_pipeline_valve_h) item->handle);
+ break;
+ case NNS_ELEMENT_TYPE_SWITCH:
+ ml_pipeline_switch_release_handle ((ml_pipeline_switch_h) item->handle);
+ break;
+ case NNS_ELEMENT_TYPE_VIDEO_SINK:
+ ml_pipeline_element_release_handle ((ml_pipeline_element_h) item->handle);
+ break;
+#endif
+ default:
+ nns_logw ("Given element type %d is unknown.", item->type);
+ if (item->handle)
+ g_free (item->handle);
+ break;
+ }
+
+ g_free (item->name);
+ g_free (item);
+ }
+}
+
+/**
+ * @brief Construct TensorsData class info.
+ */
+static void
+nns_construct_tdata_class_info (JNIEnv * env, data_class_info_s * info)
+{
+ jclass cls;
+
+ cls = (*env)->FindClass (env, NNS_CLS_TDATA);
+ info->cls = (*env)->NewGlobalRef (env, cls);
+ (*env)->DeleteLocalRef (env, cls);
+
+ info->mid_init = (*env)->GetMethodID (env, info->cls, "<init>",
+ "(L" NNS_CLS_TINFO ";)V");
+ info->mid_alloc = (*env)->GetStaticMethodID (env, info->cls, "allocate",
+ "(L" NNS_CLS_TINFO ";)L" NNS_CLS_TDATA ";");
+ info->mid_get_array = (*env)->GetMethodID (env, info->cls, "getDataArray",
+ "()[Ljava/lang/Object;");
+ info->mid_get_info = (*env)->GetMethodID (env, info->cls, "getTensorsInfo",
+ "()L" NNS_CLS_TINFO ";");
+}
+
+/**
+ * @brief Destroy TensorsData class info.
+ */
+static void
+nns_destroy_tdata_class_info (JNIEnv * env, data_class_info_s * info)
+{
+ if (info->cls)
+ (*env)->DeleteGlobalRef (env, info->cls);
+}
+
+/**
+ * @brief Construct TensorsInfo class info.
+ */
+static void
+nns_construct_tinfo_class_info (JNIEnv * env, info_class_info_s * info)
+{
+ jclass cls;
+
+ cls = (*env)->FindClass (env, NNS_CLS_TINFO);
+ info->cls = (*env)->NewGlobalRef (env, cls);
+ (*env)->DeleteLocalRef (env, cls);
+
+ cls = (*env)->FindClass (env, NNS_CLS_TINFO "$TensorInfo");
+ info->cls_info = (*env)->NewGlobalRef (env, cls);
+ (*env)->DeleteLocalRef (env, cls);
+
+ info->mid_init = (*env)->GetMethodID (env, info->cls, "<init>", "()V");
+ info->mid_add_info = (*env)->GetMethodID (env, info->cls, "appendInfo",
+ "(Ljava/lang/String;I[I)V");
+ info->mid_get_array = (*env)->GetMethodID (env, info->cls, "getInfoArray",
+ "()[Ljava/lang/Object;");
+
+ info->fid_info_name = (*env)->GetFieldID (env, info->cls_info, "name",
+ "Ljava/lang/String;");
+ info->fid_info_type = (*env)->GetFieldID (env, info->cls_info, "type", "I");
+ info->fid_info_dim = (*env)->GetFieldID (env, info->cls_info, "dimension",
+ "[I");
+}
+
+/**
+ * @brief Destroy TensorsInfo class info.
+ */
+static void
+nns_destroy_tinfo_class_info (JNIEnv * env, info_class_info_s * info)
+{
+ if (info->cls_info)
+ (*env)->DeleteGlobalRef (env, info->cls_info);
+ if (info->cls)
+ (*env)->DeleteGlobalRef (env, info->cls);
+}
+
+/**
+ * @brief Construct pipeline info.
+ */
+gpointer
+nns_construct_pipe_info (JNIEnv * env, jobject thiz, gpointer handle,
+ nns_pipe_type_e type)
+{
+ pipeline_info_s *pipe_info;
+ jclass cls;
+
+ pipe_info = g_new0 (pipeline_info_s, 1);
+ g_return_val_if_fail (pipe_info != NULL, NULL);
+
+ pipe_info->pipeline_type = type;
+ pipe_info->pipeline_handle = handle;
+ pipe_info->element_handles =
+ g_hash_table_new_full (g_str_hash, g_str_equal, g_free,
+ nns_free_element_data);
+ g_mutex_init (&pipe_info->lock);
+
+ (*env)->GetJavaVM (env, &pipe_info->jvm);
+ g_assert (pipe_info->jvm);
+ pthread_key_create (&pipe_info->jni_env, NULL);
+
+ pipe_info->version = (*env)->GetVersion (env);
+ pipe_info->instance = (*env)->NewGlobalRef (env, thiz);
+
+ cls = (*env)->GetObjectClass (env, pipe_info->instance);
+ pipe_info->cls = (*env)->NewGlobalRef (env, cls);
+ (*env)->DeleteLocalRef (env, cls);
+
+ nns_construct_tdata_class_info (env, &pipe_info->data_cls_info);
+ nns_construct_tinfo_class_info (env, &pipe_info->info_cls_info);
+
+ return pipe_info;
+}
+
+/**
+ * @brief Destroy pipeline info.
+ */
+void
+nns_destroy_pipe_info (pipeline_info_s * pipe_info, JNIEnv * env)
+{
+ g_return_if_fail (pipe_info != NULL);
+
+ g_mutex_lock (&pipe_info->lock);
+ if (pipe_info->priv_data) {
+ if (pipe_info->priv_destroy_func)
+ pipe_info->priv_destroy_func (pipe_info->priv_data, env);
+ else
+ g_free (pipe_info->priv_data);
+
+ pipe_info->priv_data = NULL;
+ }
+
+ g_hash_table_destroy (pipe_info->element_handles);
+ pipe_info->element_handles = NULL;
+ g_mutex_unlock (&pipe_info->lock);
+
+ switch (pipe_info->pipeline_type) {
+#if !defined (NNS_SINGLE_ONLY)
+ case NNS_PIPE_TYPE_PIPELINE:
+ ml_pipeline_destroy (pipe_info->pipeline_handle);
+ break;
+ case NNS_PIPE_TYPE_CUSTOM:
+ ml_pipeline_custom_easy_filter_unregister (pipe_info->pipeline_handle);
+ break;
+#endif
+ case NNS_PIPE_TYPE_SINGLE:
+ ml_single_close (pipe_info->pipeline_handle);
+ break;
+ default:
+ nns_logw ("Given pipe type %d is unknown.", pipe_info->pipeline_type);
+ if (pipe_info->pipeline_handle)
+ g_free (pipe_info->pipeline_handle);
+ break;
+ }
+
+ g_mutex_clear (&pipe_info->lock);
+
+ nns_destroy_tdata_class_info (env, &pipe_info->data_cls_info);
+ nns_destroy_tinfo_class_info (env, &pipe_info->info_cls_info);
+ (*env)->DeleteGlobalRef (env, pipe_info->cls);
+ (*env)->DeleteGlobalRef (env, pipe_info->instance);
+
+ pthread_key_delete (pipe_info->jni_env);
+ g_free (pipe_info);
+}
+
+/**
+ * @brief Set private data in pipeline info. If destroy_func is NULL, priv_data will be released using g_free().
+ */
+void
+nns_set_priv_data (pipeline_info_s * pipe_info, gpointer data,
+ nns_priv_destroy destroy_func)
+{
+ g_return_if_fail (pipe_info != NULL);
+
+ g_mutex_lock (&pipe_info->lock);
+ pipe_info->priv_data = data;
+ pipe_info->priv_destroy_func = destroy_func;
+ g_mutex_unlock (&pipe_info->lock);
+}
+
+/**
+ * @brief Get element data of given name.
+ */
+element_data_s *
+nns_get_element_data (pipeline_info_s * pipe_info, const gchar * name)
+{
+ element_data_s *item;
+
+ g_return_val_if_fail (pipe_info, NULL);
+ g_return_val_if_fail (name, NULL);
+
+ g_mutex_lock (&pipe_info->lock);
+ item = g_hash_table_lookup (pipe_info->element_handles, name);
+ g_mutex_unlock (&pipe_info->lock);
+
+ return item;
+}
+
+/**
+ * @brief Get element handle of given name and type.
+ */
+gpointer
+nns_get_element_handle (pipeline_info_s * pipe_info, const gchar * name,
+ const nns_element_type_e type)
+{
+ element_data_s *item = nns_get_element_data (pipe_info, name);
+
+ /* check element type */
+ return (item && item->type == type) ? item->handle : NULL;
+}
+
+/**
+ * @brief Remove element data of given name.
+ */
+gboolean
+nns_remove_element_data (pipeline_info_s * pipe_info, const gchar * name)
+{
+ gboolean ret;
+
+ g_return_val_if_fail (pipe_info, FALSE);
+ g_return_val_if_fail (name, FALSE);
+
+ g_mutex_lock (&pipe_info->lock);
+ ret = g_hash_table_remove (pipe_info->element_handles, name);
+ g_mutex_unlock (&pipe_info->lock);
+
+ return ret;
+}
+
+/**
+ * @brief Add new element data of given name.
+ */
+gboolean
+nns_add_element_data (pipeline_info_s * pipe_info, const gchar * name,
+ element_data_s * item)
+{
+ gboolean ret;
+
+ g_return_val_if_fail (pipe_info, FALSE);
+ g_return_val_if_fail (name && item, FALSE);
+
+ g_mutex_lock (&pipe_info->lock);
+ ret = g_hash_table_insert (pipe_info->element_handles, g_strdup (name), item);
+ g_mutex_unlock (&pipe_info->lock);
+
+ return ret;
+}
+
+/**
+ * @brief Create new data object with given tensors info. Caller should unref the result object.
+ */
+gboolean
+nns_create_tensors_data_object (pipeline_info_s * pipe_info, JNIEnv * env,
+ jobject obj_info, jobject * result)
+{
+ data_class_info_s *dcls_info;
+ jobject obj_data;
+
+ g_return_val_if_fail (pipe_info, FALSE);
+ g_return_val_if_fail (env, FALSE);
+ g_return_val_if_fail (result, FALSE);
+ g_return_val_if_fail (obj_info, FALSE);
+
+ dcls_info = &pipe_info->data_cls_info;
+ *result = NULL;
+
+ obj_data = (*env)->CallStaticObjectMethod (env, dcls_info->cls,
+ dcls_info->mid_alloc, obj_info);
+ if ((*env)->ExceptionCheck (env) || !obj_data) {
+ nns_loge ("Failed to allocate object for tensors data.");
+ (*env)->ExceptionClear (env);
+
+ if (obj_data)
+ (*env)->DeleteLocalRef (env, obj_data);
+
+ return FALSE;
+ }
+
+ *result = obj_data;
+ return TRUE;
+}
+
+/**
+ * @brief Convert tensors data to TensorsData object.
+ */
+gboolean
+nns_convert_tensors_data (pipeline_info_s * pipe_info, JNIEnv * env,
+ ml_tensors_data_h data_h, jobject obj_info, jobject * result)
+{
+ guint i;
+ data_class_info_s *dcls_info;
+ jobject obj_data = NULL;
+ jobjectArray data_arr;
+ ml_tensors_data_s *data;
+
+ g_return_val_if_fail (pipe_info, FALSE);
+ g_return_val_if_fail (env, FALSE);
+ g_return_val_if_fail (data_h, FALSE);
+ g_return_val_if_fail (result, FALSE);
+ g_return_val_if_fail (obj_info, FALSE);
+
+ dcls_info = &pipe_info->data_cls_info;
+ data = (ml_tensors_data_s *) data_h;
+ *result = NULL;
+
+ if (!nns_create_tensors_data_object (pipe_info, env, obj_info, &obj_data))
+ return FALSE;
+
+ data_arr = (*env)->CallObjectMethod (env, obj_data, dcls_info->mid_get_array);
+
+ for (i = 0; i < data->num_tensors; i++) {
+ jobject tensor = (*env)->GetObjectArrayElement (env, data_arr, i);
+ gpointer data_ptr = (*env)->GetDirectBufferAddress (env, tensor);
+
+ memcpy (data_ptr, data->tensors[i].tensor, data->tensors[i].size);
+ (*env)->DeleteLocalRef (env, tensor);
+ }
+
+ (*env)->DeleteLocalRef (env, data_arr);
+
+ *result = obj_data;
+ return TRUE;
+}
+
+/**
+ * @brief Parse tensors data from TensorsData object.
+ */
+gboolean
+nns_parse_tensors_data (pipeline_info_s * pipe_info, JNIEnv * env,
+ jobject obj_data, gboolean clone, ml_tensors_data_h * data_h,
+ ml_tensors_info_h * info_h)
+{
+ guint i;
+ data_class_info_s *dcls_info;
+ ml_tensors_data_s *data;
+ jobjectArray data_arr;
+ gboolean failed = FALSE;
+
+ g_return_val_if_fail (pipe_info, FALSE);
+ g_return_val_if_fail (env, FALSE);
+ g_return_val_if_fail (obj_data, FALSE);
+ g_return_val_if_fail (data_h, FALSE);
+
+ if (*data_h == NULL &&
+ ml_tensors_data_create_no_alloc (NULL, data_h) != ML_ERROR_NONE) {
+ nns_loge ("Failed to create handle for tensors data.");
+ return FALSE;
+ }
+
+ dcls_info = &pipe_info->data_cls_info;
+ data = (ml_tensors_data_s *) (*data_h);
+
+ data_arr = (*env)->CallObjectMethod (env, obj_data, dcls_info->mid_get_array);
+
+ /* number of tensors data */
+ data->num_tensors = (unsigned int) (*env)->GetArrayLength (env, data_arr);
+
+ /* set tensor data */
+ for (i = 0; i < data->num_tensors; i++) {
+ jobject tensor = (*env)->GetObjectArrayElement (env, data_arr, i);
+
+ if (tensor) {
+ gsize data_size = (gsize) (*env)->GetDirectBufferCapacity (env, tensor);
+ gpointer data_ptr = (*env)->GetDirectBufferAddress (env, tensor);
+
+ if (clone) {
+ if (data->tensors[i].tensor == NULL)
+ data->tensors[i].tensor = g_malloc (data_size);
+
+ memcpy (data->tensors[i].tensor, data_ptr, data_size);
+ } else {
+ data->tensors[i].tensor = data_ptr;
+ }
+
+ data->tensors[i].size = data_size;
+
+ (*env)->DeleteLocalRef (env, tensor);
+ } else {
+ nns_loge ("Failed to get array element in tensors data object.");
+ failed = TRUE;
+ goto done;
+ }
+ }
+
+ /* parse tensors info in data class */
+ if (info_h) {
+ jobject obj_info =
+ (*env)->CallObjectMethod (env, obj_data, dcls_info->mid_get_info);
+
+ if (obj_info) {
+ nns_parse_tensors_info (pipe_info, env, obj_info, info_h);
+ (*env)->DeleteLocalRef (env, obj_info);
+ }
+ }
+
+done:
+ (*env)->DeleteLocalRef (env, data_arr);
+
+ if (failed) {
+ ml_tensors_data_destroy (*data_h);
+ *data_h = NULL;
+ }
+
+ return !failed;
+}
+
+/**
+ * @brief Convert tensors info to TensorsInfo object.
+ */
+gboolean
+nns_convert_tensors_info (pipeline_info_s * pipe_info, JNIEnv * env,
+ ml_tensors_info_h info_h, jobject * result)
+{
+ guint i;
+ info_class_info_s *icls_info;
+ ml_tensors_info_s *info;
+ jobject obj_info = NULL;
+
+ g_return_val_if_fail (pipe_info, FALSE);
+ g_return_val_if_fail (env, FALSE);
+ g_return_val_if_fail (info_h, FALSE);
+ g_return_val_if_fail (result, FALSE);
+
+ icls_info = &pipe_info->info_cls_info;
+ info = (ml_tensors_info_s *) info_h;
+
+ obj_info = (*env)->NewObject (env, icls_info->cls, icls_info->mid_init);
+ if (!obj_info) {
+ nns_loge ("Failed to allocate object for tensors info.");
+ goto done;
+ }
+
+ for (i = 0; i < info->num_tensors; i++) {
+ jstring name = NULL;
+ jint type;
+ jintArray dimension;
+
+ if (info->info[i].name)
+ name = (*env)->NewStringUTF (env, info->info[i].name);
+
+ type = (jint) info->info[i].type;
+
+ dimension = (*env)->NewIntArray (env, ML_TENSOR_RANK_LIMIT);
+ (*env)->SetIntArrayRegion (env, dimension, 0, ML_TENSOR_RANK_LIMIT,
+ (jint *) info->info[i].dimension);
+
+ (*env)->CallVoidMethod (env, obj_info, icls_info->mid_add_info,
+ name, type, dimension);
+
+ if (name)
+ (*env)->DeleteLocalRef (env, name);
+ (*env)->DeleteLocalRef (env, dimension);
+ }
+
+done:
+ *result = obj_info;
+ return (obj_info != NULL);
+}
+
+/**
+ * @brief Parse tensors info from TensorsInfo object.
+ */
+gboolean
+nns_parse_tensors_info (pipeline_info_s * pipe_info, JNIEnv * env,
+ jobject obj_info, ml_tensors_info_h * info_h)
+{
+ guint i;
+ info_class_info_s *icls_info;
+ ml_tensors_info_s *info;
+ jobjectArray info_arr;
+
+ g_return_val_if_fail (pipe_info, FALSE);
+ g_return_val_if_fail (env, FALSE);
+ g_return_val_if_fail (obj_info, FALSE);
+ g_return_val_if_fail (info_h, FALSE);
+
+ if (ml_tensors_info_create (info_h) != ML_ERROR_NONE) {
+ nns_loge ("Failed to create handle for tensors info.");
+ return FALSE;
+ }
+
+ icls_info = &pipe_info->info_cls_info;
+ info = (ml_tensors_info_s *) (*info_h);
+
+ info_arr = (*env)->CallObjectMethod (env, obj_info, icls_info->mid_get_array);
+
+ /* number of tensors info */
+ info->num_tensors = (unsigned int) (*env)->GetArrayLength (env, info_arr);
+
+ /* read tensor info */
+ for (i = 0; i < info->num_tensors; i++) {
+ jobject item = (*env)->GetObjectArrayElement (env, info_arr, i);
+
+ /* tensor name */
+ jstring name_str = (jstring) (*env)->GetObjectField (env, item,
+ icls_info->fid_info_name);
+ if (name_str) {
+ const gchar *name = (*env)->GetStringUTFChars (env, name_str, NULL);
+
+ info->info[i].name = g_strdup (name);
+ (*env)->ReleaseStringUTFChars (env, name_str, name);
+ (*env)->DeleteLocalRef (env, name_str);
+ }
+
+ /* tensor type */
+ info->info[i].type = (ml_tensor_type_e) (*env)->GetIntField (env, item,
+ icls_info->fid_info_type);
+
+ /* tensor dimension */
+ jintArray dimension = (jintArray) (*env)->GetObjectField (env, item,
+ icls_info->fid_info_dim);
+ (*env)->GetIntArrayRegion (env, dimension, 0, ML_TENSOR_RANK_LIMIT,
+ (jint *) info->info[i].dimension);
+ (*env)->DeleteLocalRef (env, dimension);
+
+ (*env)->DeleteLocalRef (env, item);
+ }
+
+ (*env)->DeleteLocalRef (env, info_arr);
+ return TRUE;
+}
+
+/**
+ * @brief Get NNFW from integer value.
+ */
+gboolean
+nns_get_nnfw_type (jint fw_type, ml_nnfw_type_e * nnfw)
+{
+ gboolean is_supported = TRUE;
+
+ if (!nnfw)
+ return FALSE;
+
+ *nnfw = ML_NNFW_TYPE_ANY;
+
+ /* enumeration defined in NNStreamer.java */
+ switch (fw_type) {
+ case 0: /* NNFWType.TENSORFLOW_LITE */
+ *nnfw = ML_NNFW_TYPE_TENSORFLOW_LITE;
+#if !defined (ENABLE_TENSORFLOW_LITE)
+ nns_logw ("tensorflow-lite is not supported.");
+ is_supported = FALSE;
+#endif
+ break;
+ case 1: /* NNFWType.SNAP */
+ *nnfw = ML_NNFW_TYPE_SNAP;
+#if !defined (ENABLE_SNAP)
+ nns_logw ("SNAP is not supported.");
+ is_supported = FALSE;
+#endif
+ break;
+ case 2: /* NNFWType.NNFW */
+ *nnfw = ML_NNFW_TYPE_NNFW;
+#if !defined (ENABLE_NNFW)
+ nns_logw ("NNFW is not supported.");
+ is_supported = FALSE;
+#endif
+ break;
+ case 3: /* NNFWType.SNPE */
+ *nnfw = ML_NNFW_TYPE_SNPE;
+#if !defined (ENABLE_SNPE)
+ nns_logw ("SNPE is not supported.");
+ is_supported = FALSE;
+#endif
+ break;
+ case 4: /* NNFWType.PYTORCH */
+ *nnfw = ML_NNFW_TYPE_PYTORCH;
+#if !defined (ENABLE_PYTORCH)
+ nns_logw ("PYTORCH is not supported.");
+ is_supported = FALSE;
+#endif
+ break;
+ default: /* Unknown */
+ nns_logw ("Unknown NNFW type (%d).", fw_type);
+ is_supported = FALSE;
+ break;
+ }
+
+ return is_supported && ml_nnfw_is_available (*nnfw, ML_NNFW_HW_ANY);
+}
+
+/**
+ * @brief Initialize NNStreamer, register required plugins.
+ */
+jboolean
+nnstreamer_native_initialize (JNIEnv * env, jobject context)
+{
+ jboolean result = JNI_FALSE;
+ static gboolean nns_is_initilaized = FALSE;
+
+ nns_logi ("Called native initialize.");
+
+ G_LOCK (nns_native_lock);
+
+#if !defined (NNS_SINGLE_ONLY)
+ /* single-shot does not require gstreamer */
+ if (!gst_is_initialized ()) {
+ if (env && context) {
+ gst_android_init (env, context);
+ } else {
+ nns_loge ("Invalid params, cannot initialize GStreamer.");
+ goto done;
+ }
+ }
+
+ if (!gst_is_initialized ()) {
+ nns_loge ("GStreamer is not initialized.");
+ goto done;
+ }
+#endif
+
+ if (nns_is_initilaized == FALSE) {
+ /* register nnstreamer plugins */
+#if !defined (NNS_SINGLE_ONLY)
+ GST_PLUGIN_STATIC_REGISTER (nnstreamer);
+
+ /* Android MediaCodec */
+ GST_PLUGIN_STATIC_REGISTER (amcsrc);
+
+ /* GStreamer join element */
+ GST_PLUGIN_STATIC_REGISTER (join);
+
+ /* tensor-decoder sub-plugins */
+ init_dv ();
+ init_bb ();
+ init_il ();
+ init_pose ();
+ init_is ();
+#if defined (ENABLE_DEC_FLATBUF)
+ init_fb ();
+#endif /* ENABLE_DEC_FLATBUF */
+#endif
+
+ /* tensor-filter sub-plugins */
+ init_filter_cpp ();
+ init_filter_custom ();
+ init_filter_custom_easy ();
+
+#if defined (ENABLE_TENSORFLOW_LITE)
+ init_filter_tflite ();
+#endif
+#if defined (ENABLE_SNAP)
+ init_filter_snap ();
+#endif
+#if defined (ENABLE_NNFW)
+ init_filter_nnfw ();
+#endif
+#if defined (ENABLE_SNPE)
+ init_filter_snpe (env, context);
+#endif
+#if defined (ENABLE_PYTORCH)
+ init_filter_torch ();
+#endif
+
+ nns_is_initilaized = TRUE;
+ }
+
+ result = JNI_TRUE;
+
+ /* print version info */
+ gchar *gst_ver = gst_version_string ();
+ gchar *nns_ver = nnstreamer_version_string ();
+
+ nns_logi ("%s %s GLib %d.%d.%d", nns_ver, gst_ver, GLIB_MAJOR_VERSION,
+ GLIB_MINOR_VERSION, GLIB_MICRO_VERSION);
+
+ g_free (gst_ver);
+ g_free (nns_ver);
+
+done:
+ G_UNLOCK (nns_native_lock);
+ return result;
+}
+
+/**
+ * @brief Native method to initialize NNStreamer.
+ */
+static jboolean
+nns_native_initialize (JNIEnv * env, jclass clazz, jobject context)
+{
+ return nnstreamer_native_initialize (env, context);
+}
+
+/**
+ * @brief Native method to check the availability of NNFW.
+ */
+static jboolean
+nns_native_check_nnfw_availability (JNIEnv * env, jclass clazz, jint fw_type)
+{
+ ml_nnfw_type_e nnfw;
+
+ if (!nns_get_nnfw_type (fw_type, &nnfw)) {
+ return JNI_FALSE;
+ }
+
+ return JNI_TRUE;
+}
+
+/**
+ * @brief Native method to get the version string of NNStreamer.
+ */
+static jstring
+nns_native_get_version (JNIEnv * env, jclass clazz)
+{
+ gchar *nns_ver = nnstreamer_version_string ();
+ jstring version = (*env)->NewStringUTF (env, nns_ver);
+
+ g_free (nns_ver);
+ return version;
+}
+
+/**
+ * @brief List of implemented native methods for NNStreamer class.
+ */
+static JNINativeMethod native_methods_nnstreamer[] = {
+ {"nativeInitialize", "(Landroid/content/Context;)Z",
+ (void *) nns_native_initialize},
+ {"nativeCheckNNFWAvailability", "(I)Z",
+ (void *) nns_native_check_nnfw_availability},
+ {"nativeGetVersion", "()Ljava/lang/String;", (void *) nns_native_get_version}
+};
+
+/**
+ * @brief Initialize native library.
+ */
+jint
+JNI_OnLoad (JavaVM * vm, void *reserved)
+{
+ JNIEnv *env = NULL;
+ jclass klass;
+
+ if ((*vm)->GetEnv (vm, (void **) &env, JNI_VERSION_1_4) != JNI_OK) {
+ nns_loge ("On initializing, failed to get JNIEnv.");
+ return 0;
+ }
+
+ klass = (*env)->FindClass (env, NNS_CLS_NNSTREAMER);
+ if (klass) {
+ if ((*env)->RegisterNatives (env, klass, native_methods_nnstreamer,
+ G_N_ELEMENTS (native_methods_nnstreamer))) {
+ nns_loge ("Failed to register native methods for NNStreamer class.");
+ return 0;
+ }
+ }
+
+ if (!nns_native_single_register_natives (env)) {
+ return 0;
+ }
+
+#if !defined (NNS_SINGLE_ONLY)
+ if (!nns_native_pipe_register_natives (env) ||
+ !nns_native_custom_register_natives (env)) {
+ return 0;
+ }
+#endif
+
+ return JNI_VERSION_1_4;
+}
--- /dev/null
+/* SPDX-License-Identifier: Apache-2.0 */
+/**
+ * NNStreamer Android API
+ * Copyright (C) 2019 Samsung Electronics Co., Ltd.
+ *
+ * @file nnstreamer-native-customfilter.c
+ * @date 10 July 2019
+ * @brief Native code for NNStreamer API
+ * @author Jaeyun Jung <jy1210.jung@samsung.com>
+ * @bug No known bugs except for NYI items
+ */
+
+#include "nnstreamer-native.h"
+
+/**
+ * @brief Private data for CustomFilter class.
+ */
+typedef struct
+{
+ jmethodID mid_invoke;
+ ml_tensors_info_h in_info;
+ jobject in_info_obj;
+} customfilter_priv_data_s;
+
+/**
+ * @brief Release private data in custom filter.
+ */
+static void
+nns_customfilter_priv_free (gpointer data, JNIEnv * env)
+{
+ customfilter_priv_data_s *priv = (customfilter_priv_data_s *) data;
+
+ ml_tensors_info_destroy (priv->in_info);
+ if (priv->in_info_obj)
+ (*env)->DeleteGlobalRef (env, priv->in_info_obj);
+
+ g_free (priv);
+}
+
+/**
+ * @brief Update input info in private data.
+ */
+static gboolean
+nns_customfilter_priv_set_in_info (pipeline_info_s * pipe_info, JNIEnv * env,
+ ml_tensors_info_h in_info)
+{
+ customfilter_priv_data_s *priv;
+ jobject obj_info = NULL;
+
+ priv = (customfilter_priv_data_s *) pipe_info->priv_data;
+
+ if (ml_tensors_info_is_equal (in_info, priv->in_info)) {
+ /* do nothing, tensors info is equal. */
+ return TRUE;
+ }
+
+ if (!nns_convert_tensors_info (pipe_info, env, in_info, &obj_info)) {
+ nns_loge ("Failed to convert tensors info.");
+ return FALSE;
+ }
+
+ ml_tensors_info_free (priv->in_info);
+ ml_tensors_info_clone (priv->in_info, in_info);
+
+ if (priv->in_info_obj)
+ (*env)->DeleteGlobalRef (env, priv->in_info_obj);
+ priv->in_info_obj = (*env)->NewGlobalRef (env, obj_info);
+ (*env)->DeleteLocalRef (env, obj_info);
+ return TRUE;
+}
+
+/**
+ * @brief The mandatory callback for custom-filter execution.
+ * @return 0 if OK. 1 to drop input buffer. Negative value if error.
+ */
+static int
+nns_customfilter_invoke (const ml_tensors_data_h in, ml_tensors_data_h out,
+ void *user_data)
+{
+ pipeline_info_s *pipe_info = NULL;
+ customfilter_priv_data_s *priv;
+ JNIEnv *env;
+ jobject obj_in_data, obj_out_data;
+ int ret = -1;
+
+ /* get pipe info and init */
+ pipe_info = (pipeline_info_s *) user_data;
+ g_return_val_if_fail (pipe_info, -1);
+
+ env = nns_get_jni_env (pipe_info);
+ g_return_val_if_fail (env, -1);
+
+ obj_in_data = obj_out_data = NULL;
+ priv = (customfilter_priv_data_s *) pipe_info->priv_data;
+
+ /* convert to data object */
+ if (!nns_convert_tensors_data (pipe_info, env, in, priv->in_info_obj,
+ &obj_in_data)) {
+ nns_loge ("Failed to convert input data to data-object.");
+ goto done;
+ }
+
+ /* call invoke callback */
+ obj_out_data = (*env)->CallObjectMethod (env, pipe_info->instance,
+ priv->mid_invoke, obj_in_data);
+
+ if ((*env)->ExceptionCheck (env)) {
+ nns_loge ("Failed to call the custom-invoke callback.");
+ (*env)->ExceptionClear (env);
+ goto done;
+ }
+
+ if (obj_out_data == NULL) {
+ /* drop current buffer */
+ ret = 1;
+ goto done;
+ }
+
+ if (!nns_parse_tensors_data (pipe_info, env, obj_out_data, TRUE, &out, NULL)) {
+ nns_loge ("Failed to parse output data.");
+ goto done;
+ }
+
+ /* callback finished */
+ ret = 0;
+
+done:
+ if (obj_in_data)
+ (*env)->DeleteLocalRef (env, obj_in_data);
+ if (obj_out_data)
+ (*env)->DeleteLocalRef (env, obj_out_data);
+
+ return ret;
+}
+
+/**
+ * @brief Native method for custom filter.
+ */
+static jlong
+nns_native_custom_initialize (JNIEnv * env, jobject thiz, jstring name,
+ jobject in, jobject out)
+{
+ pipeline_info_s *pipe_info = NULL;
+ customfilter_priv_data_s *priv;
+ ml_custom_easy_filter_h custom;
+ ml_tensors_info_h in_info, out_info;
+ gboolean is_done = FALSE;
+ int status;
+ const char *model_name = (*env)->GetStringUTFChars (env, name, NULL);
+
+ nns_logd ("Try to add custom-filter %s.", model_name);
+ in_info = out_info = NULL;
+
+ pipe_info = nns_construct_pipe_info (env, thiz, NULL, NNS_PIPE_TYPE_CUSTOM);
+ if (pipe_info == NULL) {
+ nns_loge ("Failed to create pipe info.");
+ goto done;
+ }
+
+ priv = g_new0 (customfilter_priv_data_s, 1);
+ priv->mid_invoke = (*env)->GetMethodID (env, pipe_info->cls, "invoke",
+ "(L" NNS_CLS_TDATA ";)L" NNS_CLS_TDATA ";");
+ ml_tensors_info_create (&priv->in_info);
+
+ nns_set_priv_data (pipe_info, priv, nns_customfilter_priv_free);
+
+ if (!nns_parse_tensors_info (pipe_info, env, in, &in_info)) {
+ nns_loge ("Failed to parse input info.");
+ goto done;
+ }
+
+ if (!nns_parse_tensors_info (pipe_info, env, out, &out_info)) {
+ nns_loge ("Failed to parse output info.");
+ goto done;
+ }
+
+ /* update input info */
+ if (!nns_customfilter_priv_set_in_info (pipe_info, env, in_info)) {
+ goto done;
+ }
+
+ status = ml_pipeline_custom_easy_filter_register (model_name,
+ in_info, out_info, nns_customfilter_invoke, pipe_info, &custom);
+ if (status != ML_ERROR_NONE) {
+ nns_loge ("Failed to register custom-filter %s.", model_name);
+ goto done;
+ }
+
+ pipe_info->pipeline_handle = custom;
+ is_done = TRUE;
+
+done:
+ (*env)->ReleaseStringUTFChars (env, name, model_name);
+ ml_tensors_info_destroy (in_info);
+ ml_tensors_info_destroy (out_info);
+
+ if (!is_done) {
+ nns_destroy_pipe_info (pipe_info, env);
+ pipe_info = NULL;
+ }
+
+ return CAST_TO_LONG (pipe_info);
+}
+
+/**
+ * @brief Native method for custom filter.
+ */
+static void
+nns_native_custom_destroy (JNIEnv * env, jobject thiz, jlong handle)
+{
+ pipeline_info_s *pipe_info = NULL;
+
+ pipe_info = CAST_TO_TYPE (handle, pipeline_info_s *);
+ nns_destroy_pipe_info (pipe_info, env);
+}
+
+/**
+ * @brief List of implemented native methods for CustomFilter class.
+ */
+static JNINativeMethod native_methods_customfilter[] = {
+ {"nativeInitialize", "(Ljava/lang/String;L" NNS_CLS_TINFO ";L" NNS_CLS_TINFO ";)J",
+ (void *) nns_native_custom_initialize},
+ {"nativeDestroy", "(J)V", (void *) nns_native_custom_destroy}
+};
+
+/**
+ * @brief Register native methods for CustomFilter class.
+ */
+gboolean
+nns_native_custom_register_natives (JNIEnv * env)
+{
+ jclass klass = (*env)->FindClass (env, NNS_CLS_CUSTOM_FILTER);
+
+ if (klass) {
+ if ((*env)->RegisterNatives (env, klass, native_methods_customfilter,
+ G_N_ELEMENTS (native_methods_customfilter))) {
+ nns_loge ("Failed to register native methods for CustomFilter class.");
+ return FALSE;
+ }
+ }
+
+ return TRUE;
+}
--- /dev/null
+/* SPDX-License-Identifier: Apache-2.0 */
+/**
+ * NNStreamer Android API
+ * Copyright (C) 2019 Samsung Electronics Co., Ltd.
+ *
+ * @file nnstreamer-native-pipeline.c
+ * @date 10 July 2019
+ * @brief Native code for NNStreamer API
+ * @author Jaeyun Jung <jy1210.jung@samsung.com>
+ * @bug No known bugs except for NYI items
+ */
+
+#include "nnstreamer-native.h"
+
+/**
+ * @brief Macro to release native window.
+ */
+#define release_native_window(w) do { \
+ if (w) { \
+ ANativeWindow_release (w); \
+ w = NULL; \
+ } \
+} while (0)
+
+/**
+ * @brief Private data for Pipeline class.
+ */
+typedef struct
+{
+ jmethodID mid_state_cb;
+ jmethodID mid_sink_cb;
+} pipeline_priv_data_s;
+
+/**
+ * @brief Private data for sink node.
+ */
+typedef struct
+{
+ ml_tensors_info_h out_info;
+ jobject out_info_obj;
+} pipeline_sink_priv_data_s;
+
+/**
+ * @brief Private data for video sink.
+ */
+typedef struct
+{
+ ANativeWindow *window;
+ ANativeWindow *old_window;
+} pipeline_video_sink_priv_data_s;
+
+/**
+ * @brief Release private data in pipeline info.
+ */
+static void
+nns_pipeline_priv_free (gpointer data, JNIEnv * env)
+{
+ pipeline_priv_data_s *priv = (pipeline_priv_data_s *) data;
+
+ /* nothing to free */
+ g_free (priv);
+}
+
+/**
+ * @brief Release private data in video sink.
+ */
+static void
+nns_pipeline_video_sink_priv_free (gpointer data, JNIEnv * env)
+{
+ pipeline_video_sink_priv_data_s *priv;
+
+ priv = (pipeline_video_sink_priv_data_s *) data;
+ if (priv) {
+ release_native_window (priv->old_window);
+ release_native_window (priv->window);
+
+ g_free (priv);
+ }
+}
+
+/**
+ * @brief Release private data in sink node.
+ */
+static void
+nns_pipeline_sink_priv_free (gpointer data, JNIEnv * env)
+{
+ pipeline_sink_priv_data_s *priv = (pipeline_sink_priv_data_s *) data;
+
+ ml_tensors_info_destroy (priv->out_info);
+ if (priv->out_info_obj)
+ (*env)->DeleteGlobalRef (env, priv->out_info_obj);
+
+ g_free (priv);
+}
+
+/**
+ * @brief Update output info in sink node data.
+ */
+static gboolean
+nns_pipeline_sink_priv_set_out_info (element_data_s * item, JNIEnv * env,
+ const ml_tensors_info_h out_info)
+{
+ pipeline_sink_priv_data_s *priv;
+ jobject obj_info = NULL;
+
+ if ((priv = item->priv_data) == NULL) {
+ priv = g_new0 (pipeline_sink_priv_data_s, 1);
+ ml_tensors_info_create (&priv->out_info);
+
+ item->priv_data = priv;
+ item->priv_destroy_func = nns_pipeline_sink_priv_free;
+ }
+
+ if (ml_tensors_info_is_equal (out_info, priv->out_info)) {
+ /* do nothing, tensors info is equal. */
+ return TRUE;
+ }
+
+ if (!nns_convert_tensors_info (item->pipe_info, env, out_info, &obj_info)) {
+ nns_loge ("Failed to convert output info.");
+ return FALSE;
+ }
+
+ ml_tensors_info_free (priv->out_info);
+ ml_tensors_info_clone (priv->out_info, out_info);
+
+ if (priv->out_info_obj)
+ (*env)->DeleteGlobalRef (env, priv->out_info_obj);
+ priv->out_info_obj = (*env)->NewGlobalRef (env, obj_info);
+ (*env)->DeleteLocalRef (env, obj_info);
+ return TRUE;
+}
+
+/**
+ * @brief Pipeline state change callback.
+ */
+static void
+nns_pipeline_state_cb (ml_pipeline_state_e state, void *user_data)
+{
+ pipeline_info_s *pipe_info;
+ pipeline_priv_data_s *priv;
+ jint new_state = (jint) state;
+ JNIEnv *env;
+
+ pipe_info = (pipeline_info_s *) user_data;
+ priv = (pipeline_priv_data_s *) pipe_info->priv_data;
+
+ if ((env = nns_get_jni_env (pipe_info)) == NULL) {
+ nns_logw ("Cannot get jni env in the state callback.");
+ return;
+ }
+
+ (*env)->CallVoidMethod (env, pipe_info->instance, priv->mid_state_cb,
+ new_state);
+
+ if ((*env)->ExceptionCheck (env)) {
+ nns_loge ("Failed to call the state-change callback method.");
+ (*env)->ExceptionClear (env);
+ }
+}
+
+/**
+ * @brief New data callback for sink node.
+ */
+static void
+nns_sink_data_cb (const ml_tensors_data_h data, const ml_tensors_info_h info,
+ void *user_data)
+{
+ element_data_s *item;
+ pipeline_info_s *pipe_info;
+ pipeline_priv_data_s *priv;
+ pipeline_sink_priv_data_s *priv_sink;
+ jobject obj_data = NULL;
+ JNIEnv *env;
+
+ item = (element_data_s *) user_data;
+ pipe_info = item->pipe_info;
+
+ if ((env = nns_get_jni_env (pipe_info)) == NULL) {
+ nns_logw ("Cannot get jni env in the sink callback.");
+ return;
+ }
+
+ /* cache output tensors info */
+ if (!nns_pipeline_sink_priv_set_out_info (item, env, info)) {
+ return;
+ }
+
+ priv = (pipeline_priv_data_s *) pipe_info->priv_data;
+ priv_sink = (pipeline_sink_priv_data_s *) item->priv_data;
+
+ if (nns_convert_tensors_data (pipe_info, env, data, priv_sink->out_info_obj,
+ &obj_data)) {
+ jstring sink_name = (*env)->NewStringUTF (env, item->name);
+
+ (*env)->CallVoidMethod (env, pipe_info->instance, priv->mid_sink_cb,
+ sink_name, obj_data);
+
+ if ((*env)->ExceptionCheck (env)) {
+ nns_loge ("Failed to call the new-data callback method.");
+ (*env)->ExceptionClear (env);
+ }
+
+ (*env)->DeleteLocalRef (env, sink_name);
+ (*env)->DeleteLocalRef (env, obj_data);
+ } else {
+ nns_loge ("Failed to convert the result to data object.");
+ }
+}
+
+/**
+ * @brief Get sink handle.
+ */
+static void *
+nns_get_sink_handle (pipeline_info_s * pipe_info, const gchar * element_name)
+{
+ const nns_element_type_e etype = NNS_ELEMENT_TYPE_SINK;
+ ml_pipeline_sink_h handle;
+ ml_pipeline_h pipe;
+ int status;
+
+ g_assert (pipe_info);
+ pipe = pipe_info->pipeline_handle;
+
+ handle = (ml_pipeline_sink_h) nns_get_element_handle (pipe_info,
+ element_name, etype);
+ if (handle == NULL) {
+ /* get sink handle and register to table */
+ element_data_s *item = g_new0 (element_data_s, 1);
+ if (item == NULL) {
+ nns_loge ("Failed to allocate memory for sink handle data.");
+ return NULL;
+ }
+
+ status = ml_pipeline_sink_register (pipe, element_name, nns_sink_data_cb,
+ item, &handle);
+ if (status != ML_ERROR_NONE) {
+ nns_loge ("Failed to get sink node %s.", element_name);
+ g_free (item);
+ return NULL;
+ }
+
+ item->name = g_strdup (element_name);
+ item->type = etype;
+ item->handle = handle;
+ item->pipe_info = pipe_info;
+
+ if (!nns_add_element_data (pipe_info, element_name, item)) {
+ nns_loge ("Failed to add sink node %s.", element_name);
+ nns_free_element_data (item);
+ return NULL;
+ }
+ }
+
+ return handle;
+}
+
+/**
+ * @brief Get src handle.
+ */
+static void *
+nns_get_src_handle (pipeline_info_s * pipe_info, const gchar * element_name)
+{
+ const nns_element_type_e etype = NNS_ELEMENT_TYPE_SRC;
+ ml_pipeline_src_h handle;
+ ml_pipeline_h pipe;
+ int status;
+
+ g_assert (pipe_info);
+ pipe = pipe_info->pipeline_handle;
+
+ handle = (ml_pipeline_src_h) nns_get_element_handle (pipe_info,
+ element_name, etype);
+ if (handle == NULL) {
+ /* get src handle and register to table */
+ status = ml_pipeline_src_get_handle (pipe, element_name, &handle);
+ if (status != ML_ERROR_NONE) {
+ nns_loge ("Failed to get src node %s.", element_name);
+ return NULL;
+ }
+
+ element_data_s *item = g_new0 (element_data_s, 1);
+ if (item == NULL) {
+ nns_loge ("Failed to allocate memory for src handle data.");
+ ml_pipeline_src_release_handle (handle);
+ return NULL;
+ }
+
+ item->name = g_strdup (element_name);
+ item->type = etype;
+ item->handle = handle;
+ item->pipe_info = pipe_info;
+
+ if (!nns_add_element_data (pipe_info, element_name, item)) {
+ nns_loge ("Failed to add src node %s.", element_name);
+ nns_free_element_data (item);
+ return NULL;
+ }
+ }
+
+ return handle;
+}
+
+/**
+ * @brief Get switch handle.
+ */
+static void *
+nns_get_switch_handle (pipeline_info_s * pipe_info, const gchar * element_name)
+{
+ const nns_element_type_e etype = NNS_ELEMENT_TYPE_SWITCH;
+ ml_pipeline_switch_h handle;
+ ml_pipeline_switch_e switch_type;
+ ml_pipeline_h pipe;
+ int status;
+
+ g_assert (pipe_info);
+ pipe = pipe_info->pipeline_handle;
+
+ handle = (ml_pipeline_switch_h) nns_get_element_handle (pipe_info,
+ element_name, etype);
+ if (handle == NULL) {
+ /* get switch handle and register to table */
+ status = ml_pipeline_switch_get_handle (pipe, element_name, &switch_type,
+ &handle);
+ if (status != ML_ERROR_NONE) {
+ nns_loge ("Failed to get switch %s.", element_name);
+ return NULL;
+ }
+
+ element_data_s *item = g_new0 (element_data_s, 1);
+ if (item == NULL) {
+ nns_loge ("Failed to allocate memory for switch handle data.");
+ ml_pipeline_switch_release_handle (handle);
+ return NULL;
+ }
+
+ item->name = g_strdup (element_name);
+ item->type = etype;
+ item->handle = handle;
+ item->pipe_info = pipe_info;
+
+ if (!nns_add_element_data (pipe_info, element_name, item)) {
+ nns_loge ("Failed to add switch %s.", element_name);
+ nns_free_element_data (item);
+ return NULL;
+ }
+ }
+
+ return handle;
+}
+
+/**
+ * @brief Get valve handle.
+ */
+static void *
+nns_get_valve_handle (pipeline_info_s * pipe_info, const gchar * element_name)
+{
+ const nns_element_type_e etype = NNS_ELEMENT_TYPE_VALVE;
+ ml_pipeline_valve_h handle;
+ ml_pipeline_h pipe;
+ int status;
+
+ g_assert (pipe_info);
+ pipe = pipe_info->pipeline_handle;
+
+ handle = (ml_pipeline_valve_h) nns_get_element_handle (pipe_info,
+ element_name, etype);
+ if (handle == NULL) {
+ /* get valve handle and register to table */
+ status = ml_pipeline_valve_get_handle (pipe, element_name, &handle);
+ if (status != ML_ERROR_NONE) {
+ nns_loge ("Failed to get valve %s.", element_name);
+ return NULL;
+ }
+
+ element_data_s *item = g_new0 (element_data_s, 1);
+ if (item == NULL) {
+ nns_loge ("Failed to allocate memory for valve handle data.");
+ ml_pipeline_valve_release_handle (handle);
+ return NULL;
+ }
+
+ item->name = g_strdup (element_name);
+ item->type = etype;
+ item->handle = handle;
+ item->pipe_info = pipe_info;
+
+ if (!nns_add_element_data (pipe_info, element_name, item)) {
+ nns_loge ("Failed to add valve %s.", element_name);
+ nns_free_element_data (item);
+ return NULL;
+ }
+ }
+
+ return handle;
+}
+
+/**
+ * @brief Get video sink element data in the pipeline.
+ */
+static element_data_s *
+nns_get_video_sink_data (pipeline_info_s * pipe_info,
+ const gchar * element_name)
+{
+ const nns_element_type_e etype = NNS_ELEMENT_TYPE_VIDEO_SINK;
+ ml_pipeline_h pipe;
+ element_data_s *item;
+ int status;
+
+ g_assert (pipe_info);
+ pipe = pipe_info->pipeline_handle;
+
+ item = nns_get_element_data (pipe_info, element_name);
+ if (item == NULL) {
+ ml_pipeline_element_h handle;
+ GstElement *vsink;
+
+ /* get video sink handle and register to table */
+ status = ml_pipeline_element_get_handle (pipe, element_name, &handle);
+ if (status != ML_ERROR_NONE) {
+ nns_loge ("Failed to get the handle of %s.", element_name);
+ return NULL;
+ }
+
+ vsink = ((ml_pipeline_common_elem *) handle)->element->element;
+
+ if (!GST_IS_VIDEO_OVERLAY (vsink)) {
+ nns_loge ("Given element %s cannot set the window on video sink.",
+ element_name);
+ ml_pipeline_element_release_handle (handle);
+ return NULL;
+ }
+
+ item = g_new0 (element_data_s, 1);
+ item->name = g_strdup (element_name);
+ item->type = etype;
+ item->handle = handle;
+ item->pipe_info = pipe_info;
+
+ if (!nns_add_element_data (pipe_info, element_name, item)) {
+ nns_loge ("Failed to add video sink %s.", element_name);
+ nns_free_element_data (item);
+ return NULL;
+ }
+ }
+
+ return item;
+}
+
+/**
+ * @brief Native method for pipeline API.
+ */
+static jlong
+nns_native_pipe_construct (JNIEnv * env, jobject thiz, jstring description,
+ jboolean add_state_cb)
+{
+ pipeline_info_s *pipe_info = NULL;
+ pipeline_priv_data_s *priv;
+ ml_pipeline_h pipe;
+ int status;
+ const char *pipeline = (*env)->GetStringUTFChars (env, description, NULL);
+
+ pipe_info = nns_construct_pipe_info (env, thiz, NULL, NNS_PIPE_TYPE_PIPELINE);
+ if (pipe_info == NULL) {
+ nns_loge ("Failed to create pipe info.");
+ goto done;
+ }
+
+ priv = g_new0 (pipeline_priv_data_s, 1);
+ priv->mid_state_cb =
+ (*env)->GetMethodID (env, pipe_info->cls, "stateChanged", "(I)V");
+ priv->mid_sink_cb =
+ (*env)->GetMethodID (env, pipe_info->cls, "newDataReceived",
+ "(Ljava/lang/String;L" NNS_CLS_TDATA ";)V");
+
+ nns_set_priv_data (pipe_info, priv, nns_pipeline_priv_free);
+
+ if (add_state_cb)
+ status = ml_pipeline_construct (pipeline, nns_pipeline_state_cb, pipe_info,
+ &pipe);
+ else
+ status = ml_pipeline_construct (pipeline, NULL, NULL, &pipe);
+
+ if (status != ML_ERROR_NONE) {
+ nns_loge ("Failed to create the pipeline.");
+ nns_destroy_pipe_info (pipe_info, env);
+ pipe_info = NULL;
+ } else {
+ pipe_info->pipeline_handle = pipe;
+ }
+
+done:
+ (*env)->ReleaseStringUTFChars (env, description, pipeline);
+ return CAST_TO_LONG (pipe_info);
+}
+
+/**
+ * @brief Native method for pipeline API.
+ */
+static void
+nns_native_pipe_destroy (JNIEnv * env, jobject thiz, jlong handle)
+{
+ pipeline_info_s *pipe_info = NULL;
+
+ pipe_info = CAST_TO_TYPE (handle, pipeline_info_s *);
+
+ nns_destroy_pipe_info (pipe_info, env);
+}
+
+/**
+ * @brief Native method for pipeline API.
+ */
+static jboolean
+nns_native_pipe_start (JNIEnv * env, jobject thiz, jlong handle)
+{
+ pipeline_info_s *pipe_info = NULL;
+ ml_pipeline_h pipe;
+ int status;
+
+ pipe_info = CAST_TO_TYPE (handle, pipeline_info_s *);
+ pipe = pipe_info->pipeline_handle;
+
+ status = ml_pipeline_start (pipe);
+ if (status != ML_ERROR_NONE) {
+ nns_loge ("Failed to start the pipeline.");
+ return JNI_FALSE;
+ }
+
+ return JNI_TRUE;
+}
+
+/**
+ * @brief Native method for pipeline API.
+ */
+static jboolean
+nns_native_pipe_stop (JNIEnv * env, jobject thiz, jlong handle)
+{
+ pipeline_info_s *pipe_info = NULL;
+ ml_pipeline_h pipe;
+ int status;
+
+ pipe_info = CAST_TO_TYPE (handle, pipeline_info_s *);
+ pipe = pipe_info->pipeline_handle;
+
+ status = ml_pipeline_stop (pipe);
+ if (status != ML_ERROR_NONE) {
+ nns_loge ("Failed to stop the pipeline.");
+ return JNI_FALSE;
+ }
+
+ return JNI_TRUE;
+}
+
+/**
+ * @brief Native method for pipeline API.
+ */
+static jint
+nns_native_pipe_get_state (JNIEnv * env, jobject thiz, jlong handle)
+{
+ pipeline_info_s *pipe_info = NULL;
+ ml_pipeline_h pipe;
+ ml_pipeline_state_e state;
+ int status;
+
+ pipe_info = CAST_TO_TYPE (handle, pipeline_info_s *);
+ pipe = pipe_info->pipeline_handle;
+
+ status = ml_pipeline_get_state (pipe, &state);
+ if (status != ML_ERROR_NONE) {
+ nns_loge ("Failed to get the pipeline state.");
+ state = ML_PIPELINE_STATE_UNKNOWN;
+ }
+
+ return (jint) state;
+}
+
+/**
+ * @brief Native method for pipeline API.
+ */
+static jboolean
+nns_native_pipe_input_data (JNIEnv * env, jobject thiz, jlong handle,
+ jstring name, jobject in)
+{
+ pipeline_info_s *pipe_info = NULL;
+ ml_pipeline_src_h src;
+ ml_tensors_data_h in_data = NULL;
+ int status;
+ jboolean res = JNI_FALSE;
+ const char *element_name = (*env)->GetStringUTFChars (env, name, NULL);
+
+ pipe_info = CAST_TO_TYPE (handle, pipeline_info_s *);
+
+ src = (ml_pipeline_src_h) nns_get_src_handle (pipe_info, element_name);
+ if (src == NULL) {
+ goto done;
+ }
+
+ if (!nns_parse_tensors_data (pipe_info, env, in, FALSE, &in_data, NULL)) {
+ nns_loge ("Failed to parse input data.");
+ goto done;
+ }
+
+ status = ml_pipeline_src_input_data (src, in_data,
+ ML_PIPELINE_BUF_POLICY_DO_NOT_FREE);
+ if (status != ML_ERROR_NONE) {
+ nns_loge ("Failed to input tensors data to source node %s.", element_name);
+ goto done;
+ }
+
+ res = JNI_TRUE;
+
+done:
+ (*env)->ReleaseStringUTFChars (env, name, element_name);
+ /* do not free input tensors (direct access from object) */
+ g_free (in_data);
+ return res;
+}
+
+/**
+ * @brief Native method for pipeline API.
+ */
+static jobjectArray
+nns_native_pipe_get_switch_pads (JNIEnv * env, jobject thiz, jlong handle,
+ jstring name)
+{
+ pipeline_info_s *pipe_info = NULL;
+ ml_pipeline_switch_h node;
+ int status;
+ const char *element_name = (*env)->GetStringUTFChars (env, name, NULL);
+ char **pad_list = NULL;
+ guint i, total;
+ jobjectArray result = NULL;
+
+ pipe_info = CAST_TO_TYPE (handle, pipeline_info_s *);
+
+ node = (ml_pipeline_switch_h) nns_get_switch_handle (pipe_info, element_name);
+ if (node == NULL) {
+ goto done;
+ }
+
+ status = ml_pipeline_switch_get_pad_list (node, &pad_list);
+ if (status != ML_ERROR_NONE) {
+ nns_loge ("Failed to get the pad list of switch %s.", element_name);
+ goto done;
+ }
+
+ total = g_strv_length (pad_list);
+
+ /* set string array */
+ if (total > 0) {
+ jclass cls_string = (*env)->FindClass (env, "java/lang/String");
+
+ result = (*env)->NewObjectArray (env, total, cls_string, NULL);
+ if (result == NULL) {
+ nns_loge ("Failed to allocate string array.");
+ (*env)->DeleteLocalRef (env, cls_string);
+ goto done;
+ }
+
+ for (i = 0; i < total; i++) {
+ jstring pad = (*env)->NewStringUTF (env, pad_list[i]);
+
+ (*env)->SetObjectArrayElement (env, result, i, pad);
+ (*env)->DeleteLocalRef (env, pad);
+ }
+
+ (*env)->DeleteLocalRef (env, cls_string);
+ }
+
+done:
+ g_strfreev (pad_list);
+ (*env)->ReleaseStringUTFChars (env, name, element_name);
+ return result;
+}
+
+/**
+ * @brief Native method for pipeline API.
+ */
+static jboolean
+nns_native_pipe_select_switch_pad (JNIEnv * env, jobject thiz, jlong handle,
+ jstring name, jstring pad)
+{
+ pipeline_info_s *pipe_info = NULL;
+ ml_pipeline_switch_h node;
+ int status;
+ jboolean res = JNI_FALSE;
+ const char *element_name = (*env)->GetStringUTFChars (env, name, NULL);
+ const char *pad_name = (*env)->GetStringUTFChars (env, pad, NULL);
+
+ pipe_info = CAST_TO_TYPE (handle, pipeline_info_s *);
+
+ node = (ml_pipeline_switch_h) nns_get_switch_handle (pipe_info, element_name);
+ if (node == NULL) {
+ goto done;
+ }
+
+ status = ml_pipeline_switch_select (node, pad_name);
+ if (status != ML_ERROR_NONE) {
+ nns_loge ("Failed to select switch pad %s.", pad_name);
+ goto done;
+ }
+
+ res = JNI_TRUE;
+
+done:
+ (*env)->ReleaseStringUTFChars (env, name, element_name);
+ (*env)->ReleaseStringUTFChars (env, pad, pad_name);
+ return res;
+}
+
+/**
+ * @brief Native method for pipeline API.
+ */
+static jboolean
+nns_native_pipe_control_valve (JNIEnv * env, jobject thiz, jlong handle,
+ jstring name, jboolean open)
+{
+ pipeline_info_s *pipe_info = NULL;
+ ml_pipeline_valve_h node;
+ int status;
+ jboolean res = JNI_FALSE;
+ const char *element_name = (*env)->GetStringUTFChars (env, name, NULL);
+
+ pipe_info = CAST_TO_TYPE (handle, pipeline_info_s *);
+
+ node = (ml_pipeline_valve_h) nns_get_valve_handle (pipe_info, element_name);
+ if (node == NULL) {
+ goto done;
+ }
+
+ status = ml_pipeline_valve_set_open (node, (open == JNI_TRUE));
+ if (status != ML_ERROR_NONE) {
+ nns_loge ("Failed to control valve %s.", element_name);
+ goto done;
+ }
+
+ res = JNI_TRUE;
+
+done:
+ (*env)->ReleaseStringUTFChars (env, name, element_name);
+ return res;
+}
+
+/**
+ * @brief Native method for pipeline API.
+ */
+static jboolean
+nns_native_pipe_add_sink_cb (JNIEnv * env, jobject thiz, jlong handle,
+ jstring name)
+{
+ pipeline_info_s *pipe_info = NULL;
+ ml_pipeline_sink_h sink;
+ jboolean res = JNI_FALSE;
+ const char *element_name = (*env)->GetStringUTFChars (env, name, NULL);
+
+ pipe_info = CAST_TO_TYPE (handle, pipeline_info_s *);
+
+ sink = (ml_pipeline_sink_h) nns_get_sink_handle (pipe_info, element_name);
+ if (sink == NULL) {
+ goto done;
+ }
+
+ res = JNI_TRUE;
+
+done:
+ (*env)->ReleaseStringUTFChars (env, name, element_name);
+ return res;
+}
+
+/**
+ * @brief Native method for pipeline API.
+ */
+static jboolean
+nns_native_pipe_remove_sink_cb (JNIEnv * env, jobject thiz, jlong handle,
+ jstring name)
+{
+ pipeline_info_s *pipe_info = NULL;
+ ml_pipeline_sink_h sink;
+ jboolean res = JNI_FALSE;
+ const char *element_name = (*env)->GetStringUTFChars (env, name, NULL);
+
+ pipe_info = CAST_TO_TYPE (handle, pipeline_info_s *);
+
+ /* get handle from table */
+ sink = (ml_pipeline_sink_h) nns_get_element_handle (pipe_info, element_name,
+ NNS_ELEMENT_TYPE_SINK);
+ if (sink) {
+ nns_remove_element_data (pipe_info, element_name);
+ res = JNI_TRUE;
+ }
+
+ (*env)->ReleaseStringUTFChars (env, name, element_name);
+ return res;
+}
+
+/**
+ * @brief Native method for pipeline API.
+ */
+static jboolean
+nns_native_pipe_initialize_surface (JNIEnv * env, jobject thiz, jlong handle,
+ jstring name, jobject surface)
+{
+ pipeline_info_s *pipe_info;
+ element_data_s *edata;
+ jboolean res = JNI_FALSE;
+ const char *element_name = (*env)->GetStringUTFChars (env, name, NULL);
+
+ pipe_info = CAST_TO_TYPE (handle, pipeline_info_s *);
+
+ edata = nns_get_video_sink_data (pipe_info, element_name);
+ if (edata) {
+ ANativeWindow *native_win;
+ GstElement *vsink;
+ pipeline_video_sink_priv_data_s *priv;
+ gboolean set_window = TRUE;
+
+ native_win = ANativeWindow_fromSurface (env, surface);
+ vsink = ((ml_pipeline_common_elem *) edata->handle)->element->element;
+ priv = (pipeline_video_sink_priv_data_s *) edata->priv_data;
+
+ if (priv == NULL) {
+ edata->priv_data = priv = g_new0 (pipeline_video_sink_priv_data_s, 1);
+ edata->priv_destroy_func = nns_pipeline_video_sink_priv_free;
+ }
+
+ if (priv->window) {
+ if (priv->window == native_win) {
+ set_window = FALSE;
+
+ gst_video_overlay_expose (GST_VIDEO_OVERLAY (vsink));
+ release_native_window (native_win);
+ } else {
+ /**
+ * Video sink may not change new window directly when calling set-window function.
+ * Keep old window handle to prevent invalid handle case in video sink.
+ */
+ release_native_window (priv->old_window);
+ priv->old_window = priv->window;
+ priv->window = NULL;
+ }
+ }
+
+ /* set new native window */
+ if (set_window) {
+ priv->window = native_win;
+ gst_video_overlay_set_window_handle (GST_VIDEO_OVERLAY (vsink),
+ (guintptr) native_win);
+ }
+
+ res = JNI_TRUE;
+ }
+
+ (*env)->ReleaseStringUTFChars (env, name, element_name);
+ return res;
+}
+
+/**
+ * @brief Native method for pipeline API.
+ */
+static jboolean
+nns_native_pipe_finalize_surface (JNIEnv * env, jobject thiz, jlong handle,
+ jstring name)
+{
+ pipeline_info_s *pipe_info;
+ element_data_s *edata;
+ jboolean res = JNI_FALSE;
+ const char *element_name = (*env)->GetStringUTFChars (env, name, NULL);
+
+ pipe_info = CAST_TO_TYPE (handle, pipeline_info_s *);
+
+ edata = nns_get_video_sink_data (pipe_info, element_name);
+ if (edata) {
+ GstElement *vsink;
+ pipeline_video_sink_priv_data_s *priv;
+
+ vsink = ((ml_pipeline_common_elem *) edata->handle)->element->element;
+ priv = (pipeline_video_sink_priv_data_s *) edata->priv_data;
+
+ gst_video_overlay_set_window_handle (GST_VIDEO_OVERLAY (vsink),
+ (guintptr) NULL);
+ if (priv) {
+ release_native_window (priv->old_window);
+ priv->old_window = priv->window;
+ priv->window = NULL;
+ }
+
+ res = JNI_TRUE;
+ }
+
+ (*env)->ReleaseStringUTFChars (env, name, element_name);
+ return res;
+}
+
+/**
+ * @brief Native method to check the element registration.
+ */
+static jboolean
+nns_native_check_element_availability (JNIEnv * env, jclass clazz, jstring name)
+{
+ const char *element_name = (*env)->GetStringUTFChars (env, name, NULL);
+ jboolean res = ml_element_is_available (element_name) ? JNI_TRUE : JNI_FALSE;
+
+ (*env)->ReleaseStringUTFChars (env, name, element_name);
+ return res;
+}
+
+/**
+ * @brief List of implemented native methods for Pipeline class.
+ */
+static JNINativeMethod native_methods_pipeline[] = {
+ {"nativeCheckElementAvailability", "(Ljava/lang/String;)Z",
+ (void *) nns_native_check_element_availability},
+ {"nativeConstruct", "(Ljava/lang/String;Z)J",
+ (void *) nns_native_pipe_construct},
+ {"nativeDestroy", "(J)V", (void *) nns_native_pipe_destroy},
+ {"nativeStart", "(J)Z", (void *) nns_native_pipe_start},
+ {"nativeStop", "(J)Z", (void *) nns_native_pipe_stop},
+ {"nativeGetState", "(J)I", (void *) nns_native_pipe_get_state},
+ {"nativeInputData", "(JLjava/lang/String;L" NNS_CLS_TDATA ";)Z",
+ (void *) nns_native_pipe_input_data},
+ {"nativeGetSwitchPads", "(JLjava/lang/String;)[Ljava/lang/String;",
+ (void *) nns_native_pipe_get_switch_pads},
+ {"nativeSelectSwitchPad", "(JLjava/lang/String;Ljava/lang/String;)Z",
+ (void *) nns_native_pipe_select_switch_pad},
+ {"nativeControlValve", "(JLjava/lang/String;Z)Z",
+ (void *) nns_native_pipe_control_valve},
+ {"nativeAddSinkCallback", "(JLjava/lang/String;)Z",
+ (void *) nns_native_pipe_add_sink_cb},
+ {"nativeRemoveSinkCallback", "(JLjava/lang/String;)Z",
+ (void *) nns_native_pipe_remove_sink_cb},
+ {"nativeInitializeSurface", "(JLjava/lang/String;Ljava/lang/Object;)Z",
+ (void *) nns_native_pipe_initialize_surface},
+ {"nativeFinalizeSurface", "(JLjava/lang/String;)Z",
+ (void *) nns_native_pipe_finalize_surface}
+};
+
+/**
+ * @brief Register native methods for Pipeline class.
+ */
+gboolean
+nns_native_pipe_register_natives (JNIEnv * env)
+{
+ jclass klass = (*env)->FindClass (env, NNS_CLS_PIPELINE);
+
+ if (klass) {
+ if ((*env)->RegisterNatives (env, klass, native_methods_pipeline,
+ G_N_ELEMENTS (native_methods_pipeline))) {
+ nns_loge ("Failed to register native methods for Pipeline class.");
+ return FALSE;
+ }
+ }
+
+ return TRUE;
+}
--- /dev/null
+/* SPDX-License-Identifier: Apache-2.0 */
+/**
+ * NNStreamer Android API
+ * Copyright (C) 2019 Samsung Electronics Co., Ltd.
+ *
+ * @file nnstreamer-native-singleshot.c
+ * @date 10 July 2019
+ * @brief Native code for NNStreamer API
+ * @author Jaeyun Jung <jy1210.jung@samsung.com>
+ * @bug No known bugs except for NYI items
+ */
+
+#include "nnstreamer-native.h"
+
+/**
+ * @brief Private data for SingleShot class.
+ */
+typedef struct
+{
+ ml_tensors_info_h out_info;
+ jobject out_info_obj;
+} singleshot_priv_data_s;
+
+/**
+ * @brief Release private data in pipeline info.
+ */
+static void
+nns_singleshot_priv_free (gpointer data, JNIEnv * env)
+{
+ singleshot_priv_data_s *priv = (singleshot_priv_data_s *) data;
+
+ ml_tensors_info_destroy (priv->out_info);
+ if (priv->out_info_obj)
+ (*env)->DeleteGlobalRef (env, priv->out_info_obj);
+
+ g_free (priv);
+}
+
+/**
+ * @brief Update output info in private data.
+ */
+static gboolean
+nns_singleshot_priv_set_info (pipeline_info_s * pipe_info, JNIEnv * env)
+{
+ ml_single_h single;
+ singleshot_priv_data_s *priv;
+ ml_tensors_info_h out_info;
+ jobject obj_info = NULL;
+
+ single = pipe_info->pipeline_handle;
+ priv = (singleshot_priv_data_s *) pipe_info->priv_data;
+
+ if (ml_single_get_output_info (single, &out_info) != ML_ERROR_NONE) {
+ nns_loge ("Failed to get output info.");
+ return FALSE;
+ }
+
+ if (!ml_tensors_info_is_equal (out_info, priv->out_info)) {
+ if (!nns_convert_tensors_info (pipe_info, env, out_info, &obj_info)) {
+ nns_loge ("Failed to convert output info.");
+ ml_tensors_info_destroy (out_info);
+ return FALSE;
+ }
+
+ ml_tensors_info_destroy (priv->out_info);
+ priv->out_info = out_info;
+
+ if (priv->out_info_obj)
+ (*env)->DeleteGlobalRef (env, priv->out_info_obj);
+ priv->out_info_obj = (*env)->NewGlobalRef (env, obj_info);
+ (*env)->DeleteLocalRef (env, obj_info);
+ }
+
+ return TRUE;
+}
+
+/**
+ * @brief Native method for single-shot API.
+ */
+static jlong
+nns_native_single_open (JNIEnv * env, jobject thiz,
+ jobjectArray models, jobject in, jobject out, jint fw_type, jstring option)
+{
+ pipeline_info_s *pipe_info = NULL;
+ singleshot_priv_data_s *priv;
+ ml_single_h single = NULL;
+ ml_single_preset info = { 0, };
+ gboolean opened = FALSE;
+
+ pipe_info = nns_construct_pipe_info (env, thiz, NULL, NNS_PIPE_TYPE_SINGLE);
+ if (pipe_info == NULL) {
+ nns_loge ("Failed to create pipe info.");
+ goto done;
+ }
+
+ /* parse in/out tensors information */
+ if (in) {
+ if (!nns_parse_tensors_info (pipe_info, env, in, &info.input_info)) {
+ nns_loge ("Failed to parse input tensor.");
+ goto done;
+ }
+ }
+
+ if (out) {
+ if (!nns_parse_tensors_info (pipe_info, env, out, &info.output_info)) {
+ nns_loge ("Failed to parse output tensor.");
+ goto done;
+ }
+ }
+
+ /* nnfw type and hw resource */
+ if (!nns_get_nnfw_type (fw_type, &info.nnfw)) {
+ nns_loge ("Failed, unsupported framework (%d).", fw_type);
+ goto done;
+ }
+
+ info.hw = ML_NNFW_HW_ANY;
+
+ /* parse models */
+ if (models) {
+ GString *model_str;
+ jsize i, models_count;
+
+ model_str = g_string_new (NULL);
+ models_count = (*env)->GetArrayLength (env, models);
+
+ for (i = 0; i < models_count; i++) {
+ jstring model = (jstring) (*env)->GetObjectArrayElement (env, models, i);
+ const char *model_path = (*env)->GetStringUTFChars (env, model, NULL);
+
+ g_string_append (model_str, model_path);
+ if (i < models_count - 1) {
+ g_string_append (model_str, ",");
+ }
+
+ (*env)->ReleaseStringUTFChars (env, model, model_path);
+ (*env)->DeleteLocalRef (env, model);
+ }
+
+ info.models = g_string_free (model_str, FALSE);
+ } else {
+ nns_loge ("Failed to get model file.");
+ goto done;
+ }
+
+ /* parse option string */
+ if (option) {
+ const char *option_str = (*env)->GetStringUTFChars (env, option, NULL);
+
+ info.custom_option = g_strdup (option_str);
+ (*env)->ReleaseStringUTFChars (env, option, option_str);
+ }
+
+ if (ml_single_open_custom (&single, &info) != ML_ERROR_NONE) {
+ nns_loge ("Failed to create the pipeline.");
+ goto done;
+ }
+
+ pipe_info->pipeline_handle = single;
+
+ /* set private date */
+ priv = g_new0 (singleshot_priv_data_s, 1);
+ ml_tensors_info_create (&priv->out_info);
+ nns_set_priv_data (pipe_info, priv, nns_singleshot_priv_free);
+
+ if (!nns_singleshot_priv_set_info (pipe_info, env)) {
+ nns_loge ("Failed to set the metadata.");
+ goto done;
+ }
+
+ opened = TRUE;
+
+done:
+ ml_tensors_info_destroy (info.input_info);
+ ml_tensors_info_destroy (info.output_info);
+ g_free (info.models);
+ g_free (info.custom_option);
+
+ if (!opened) {
+ nns_destroy_pipe_info (pipe_info, env);
+ pipe_info = NULL;
+ }
+
+ return CAST_TO_LONG (pipe_info);
+}
+
+/**
+ * @brief Native method for single-shot API.
+ */
+static void
+nns_native_single_close (JNIEnv * env, jobject thiz, jlong handle)
+{
+ pipeline_info_s *pipe_info;
+
+ pipe_info = CAST_TO_TYPE (handle, pipeline_info_s *);
+
+ nns_destroy_pipe_info (pipe_info, env);
+}
+
+/**
+ * @brief Native method for single-shot API.
+ */
+static jobject
+nns_native_single_invoke (JNIEnv * env, jobject thiz, jlong handle, jobject in)
+{
+ pipeline_info_s *pipe_info;
+ singleshot_priv_data_s *priv;
+ ml_single_h single;
+ ml_tensors_data_h in_data, out_data;
+ jobject result = NULL;
+ gboolean failed = FALSE;
+
+ pipe_info = CAST_TO_TYPE (handle, pipeline_info_s *);
+ priv = (singleshot_priv_data_s *) pipe_info->priv_data;
+ single = pipe_info->pipeline_handle;
+ in_data = out_data = NULL;
+
+ if (!nns_parse_tensors_data (pipe_info, env, in, FALSE, &in_data, NULL)) {
+ nns_loge ("Failed to parse input tensors data.");
+ failed = TRUE;
+ goto done;
+ }
+
+ /* create output object and get the direct buffer address */
+ if (!nns_create_tensors_data_object (pipe_info, env, priv->out_info_obj, &result) ||
+ !nns_parse_tensors_data (pipe_info, env, result, FALSE, &out_data, NULL)) {
+ nns_loge ("Failed to create output tensors object.");
+ failed = TRUE;
+ goto done;
+ }
+
+ if (ml_single_invoke_fast (single, in_data, out_data) != ML_ERROR_NONE) {
+ nns_loge ("Failed to invoke the model.");
+ failed = TRUE;
+ goto done;
+ }
+
+done:
+ if (failed) {
+ if (result) {
+ (*env)->DeleteLocalRef (env, result);
+ result = NULL;
+ }
+ }
+
+ /* do not free input/output tensors (direct access from object) */
+ g_free (in_data);
+ g_free (out_data);
+ return result;
+}
+
+/**
+ * @brief Native method for single-shot API.
+ */
+static jobject
+nns_native_single_get_input_info (JNIEnv * env, jobject thiz, jlong handle)
+{
+ pipeline_info_s *pipe_info;
+ ml_single_h single;
+ ml_tensors_info_h info;
+ jobject result = NULL;
+
+ pipe_info = CAST_TO_TYPE (handle, pipeline_info_s *);
+ single = pipe_info->pipeline_handle;
+
+ if (ml_single_get_input_info (single, &info) != ML_ERROR_NONE) {
+ nns_loge ("Failed to get input info.");
+ goto done;
+ }
+
+ if (!nns_convert_tensors_info (pipe_info, env, info, &result)) {
+ nns_loge ("Failed to convert input info.");
+ result = NULL;
+ }
+
+done:
+ ml_tensors_info_destroy (info);
+ return result;
+}
+
+/**
+ * @brief Native method for single-shot API.
+ */
+static jobject
+nns_native_single_get_output_info (JNIEnv * env, jobject thiz, jlong handle)
+{
+ pipeline_info_s *pipe_info;
+ ml_single_h single;
+ ml_tensors_info_h info;
+ jobject result = NULL;
+
+ pipe_info = CAST_TO_TYPE (handle, pipeline_info_s *);
+ single = pipe_info->pipeline_handle;
+
+ if (ml_single_get_output_info (single, &info) != ML_ERROR_NONE) {
+ nns_loge ("Failed to get output info.");
+ goto done;
+ }
+
+ if (!nns_convert_tensors_info (pipe_info, env, info, &result)) {
+ nns_loge ("Failed to convert output info.");
+ result = NULL;
+ }
+
+done:
+ ml_tensors_info_destroy (info);
+ return result;
+}
+
+/**
+ * @brief Native method for single-shot API.
+ */
+static jboolean
+nns_native_single_set_prop (JNIEnv * env, jobject thiz, jlong handle,
+ jstring name, jstring value)
+{
+ pipeline_info_s *pipe_info;
+ ml_single_h single;
+ jboolean ret = JNI_FALSE;
+
+ const char *prop_name = (*env)->GetStringUTFChars (env, name, NULL);
+ const char *prop_value = (*env)->GetStringUTFChars (env, value, NULL);
+
+ pipe_info = CAST_TO_TYPE (handle, pipeline_info_s *);
+ single = pipe_info->pipeline_handle;
+
+ /* update info when changing the tensors information */
+ if (ml_single_set_property (single, prop_name, prop_value) == ML_ERROR_NONE &&
+ nns_singleshot_priv_set_info (pipe_info, env)) {
+ ret = JNI_TRUE;
+ } else {
+ nns_loge ("Failed to set the property (%s:%s).", prop_name, prop_value);
+ }
+
+ (*env)->ReleaseStringUTFChars (env, name, prop_name);
+ (*env)->ReleaseStringUTFChars (env, name, prop_value);
+ return ret;
+}
+
+/**
+ * @brief Native method for single-shot API.
+ */
+static jstring
+nns_native_single_get_prop (JNIEnv * env, jobject thiz, jlong handle,
+ jstring name)
+{
+ pipeline_info_s *pipe_info;
+ ml_single_h single;
+
+ const char *prop_name = (*env)->GetStringUTFChars (env, name, NULL);
+ char *prop_value = NULL;
+ jstring value = NULL;
+
+ pipe_info = CAST_TO_TYPE (handle, pipeline_info_s *);
+ single = pipe_info->pipeline_handle;
+
+ if (ml_single_get_property (single, prop_name, &prop_value) == ML_ERROR_NONE) {
+ if (!prop_value) {
+ /* null string means error in java, return empty string. */
+ prop_value = g_strdup ("");
+ }
+
+ value = (*env)->NewStringUTF (env, prop_value);
+ g_free (prop_value);
+ } else {
+ nns_loge ("Failed to get the property (%s).", prop_name);
+ }
+
+ (*env)->ReleaseStringUTFChars (env, name, prop_name);
+ return value;
+}
+
+/**
+ * @brief Native method for single-shot API.
+ */
+static jboolean
+nns_native_single_set_timeout (JNIEnv * env, jobject thiz, jlong handle,
+ jint timeout)
+{
+ pipeline_info_s *pipe_info;
+ ml_single_h single;
+
+ pipe_info = CAST_TO_TYPE (handle, pipeline_info_s *);
+ single = pipe_info->pipeline_handle;
+
+ if (ml_single_set_timeout (single, (unsigned int) timeout) != ML_ERROR_NONE) {
+ nns_loge ("Failed to set the timeout.");
+ return JNI_FALSE;
+ }
+
+ nns_logi ("Successfully set the timeout, %d milliseconds.", timeout);
+ return JNI_TRUE;
+}
+
+/**
+ * @brief Native method for single-shot API.
+ */
+static jboolean
+nns_native_single_set_input_info (JNIEnv * env, jobject thiz, jlong handle,
+ jobject in)
+{
+ pipeline_info_s *pipe_info;
+ ml_single_h single;
+ ml_tensors_info_h in_info = NULL;
+ jboolean ret = JNI_FALSE;
+
+ pipe_info = CAST_TO_TYPE (handle, pipeline_info_s *);
+ single = pipe_info->pipeline_handle;
+
+ if (!nns_parse_tensors_info (pipe_info, env, in, &in_info)) {
+ nns_loge ("Failed to parse input tensor.");
+ goto done;
+ }
+
+ if (ml_single_set_input_info (single, in_info) == ML_ERROR_NONE &&
+ nns_singleshot_priv_set_info (pipe_info, env)) {
+ ret = JNI_TRUE;
+ } else {
+ nns_loge ("Failed to set input info.");
+ }
+
+done:
+ ml_tensors_info_destroy (in_info);
+ return ret;
+}
+
+/**
+ * @brief List of implemented native methods for SingleShot class.
+ */
+static JNINativeMethod native_methods_singleshot[] = {
+ {"nativeOpen", "([Ljava/lang/String;L" NNS_CLS_TINFO ";"
+ "L" NNS_CLS_TINFO ";ILjava/lang/String;)J",
+ (void *) nns_native_single_open},
+ {"nativeClose", "(J)V", (void *) nns_native_single_close},
+ {"nativeInvoke", "(JL" NNS_CLS_TDATA ";)L" NNS_CLS_TDATA ";",
+ (void *) nns_native_single_invoke},
+ {"nativeGetInputInfo", "(J)L" NNS_CLS_TINFO ";",
+ (void *) nns_native_single_get_input_info},
+ {"nativeGetOutputInfo", "(J)L" NNS_CLS_TINFO ";",
+ (void *) nns_native_single_get_output_info},
+ {"nativeSetProperty", "(JLjava/lang/String;Ljava/lang/String;)Z",
+ (void *) nns_native_single_set_prop},
+ {"nativeGetProperty", "(JLjava/lang/String;)Ljava/lang/String;",
+ (void *) nns_native_single_get_prop},
+ {"nativeSetTimeout", "(JI)Z", (void *) nns_native_single_set_timeout},
+ {"nativeSetInputInfo", "(JL" NNS_CLS_TINFO ";)Z",
+ (void *) nns_native_single_set_input_info}
+};
+
+/**
+ * @brief Register native methods for SingleShot class.
+ */
+gboolean
+nns_native_single_register_natives (JNIEnv * env)
+{
+ jclass klass = (*env)->FindClass (env, NNS_CLS_SINGLE);
+
+ if (klass) {
+ if ((*env)->RegisterNatives (env, klass, native_methods_singleshot,
+ G_N_ELEMENTS (native_methods_singleshot))) {
+ nns_loge ("Failed to register native methods for SingleShot class.");
+ return FALSE;
+ }
+ }
+
+ return TRUE;
+}
--- /dev/null
+/* SPDX-License-Identifier: Apache-2.0 */
+/**
+ * NNStreamer Android API
+ * Copyright (C) 2019 Samsung Electronics Co., Ltd.
+ *
+ * @file nnstreamer-native.h
+ * @date 10 July 2019
+ * @brief Native code for NNStreamer API
+ * @author Jaeyun Jung <jy1210.jung@samsung.com>
+ * @bug No known bugs except for NYI items
+ */
+
+#ifndef __NNSTREAMER_ANDROID_NATIVE_H__
+#define __NNSTREAMER_ANDROID_NATIVE_H__
+
+#include <jni.h>
+#include <android/native_window.h>
+#include <android/native_window_jni.h>
+
+#include <gst/gst.h>
+#include <gst/video/video.h>
+
+#include "nnstreamer.h"
+#include "nnstreamer-single.h"
+#include "nnstreamer-capi-private.h"
+#include "nnstreamer_log.h"
+#include "nnstreamer_plugin_api.h"
+#include "nnstreamer_plugin_api_filter.h"
+
+#if GLIB_SIZEOF_VOID_P == 8
+#define CAST_TO_LONG(p) (jlong)(p)
+#define CAST_TO_TYPE(l,type) (type)(l)
+#else
+#define CAST_TO_LONG(p) (jlong)(jint)(p)
+#define CAST_TO_TYPE(l,type) (type)(jint)(l)
+#endif
+
+/**
+ * @brief NNStreamer package name.
+ */
+#define NNS_PKG "org/nnsuite/nnstreamer"
+#define NNS_CLS_TDATA NNS_PKG "/TensorsData"
+#define NNS_CLS_TINFO NNS_PKG "/TensorsInfo"
+#define NNS_CLS_PIPELINE NNS_PKG "/Pipeline"
+#define NNS_CLS_SINGLE NNS_PKG "/SingleShot"
+#define NNS_CLS_CUSTOM_FILTER NNS_PKG "/CustomFilter"
+#define NNS_CLS_NNSTREAMER NNS_PKG "/NNStreamer"
+
+/**
+ * @brief Callback to destroy private data in pipe info.
+ */
+typedef void (*nns_priv_destroy)(gpointer data, JNIEnv * env);
+
+/**
+ * @brief Pipeline type in native pipe info.
+ */
+typedef enum
+{
+ NNS_PIPE_TYPE_PIPELINE = 0,
+ NNS_PIPE_TYPE_SINGLE,
+ NNS_PIPE_TYPE_CUSTOM,
+
+ NNS_PIPE_TYPE_UNKNOWN
+} nns_pipe_type_e;
+
+/**
+ * @brief Element type in native pipe info.
+ */
+typedef enum
+{
+ NNS_ELEMENT_TYPE_SRC = 0,
+ NNS_ELEMENT_TYPE_SINK,
+ NNS_ELEMENT_TYPE_VALVE,
+ NNS_ELEMENT_TYPE_SWITCH,
+ NNS_ELEMENT_TYPE_VIDEO_SINK,
+
+ NNS_ELEMENT_TYPE_UNKNOWN
+} nns_element_type_e;
+
+/**
+ * @brief Struct for TensorsData class info.
+ */
+typedef struct
+{
+ jclass cls;
+ jmethodID mid_init;
+ jmethodID mid_alloc;
+ jmethodID mid_get_array;
+ jmethodID mid_get_info;
+} data_class_info_s;
+
+/**
+ * @brief Struct for TensorsInfo class info.
+ */
+typedef struct
+{
+ jclass cls;
+ jmethodID mid_init;
+ jmethodID mid_add_info;
+ jmethodID mid_get_array;
+
+ jclass cls_info;
+ jfieldID fid_info_name;
+ jfieldID fid_info_type;
+ jfieldID fid_info_dim;
+} info_class_info_s;
+
+/**
+ * @brief Struct for constructed pipeline.
+ */
+typedef struct
+{
+ nns_pipe_type_e pipeline_type;
+ gpointer pipeline_handle;
+ GHashTable *element_handles;
+ GMutex lock;
+
+ JavaVM *jvm;
+ jint version;
+ pthread_key_t jni_env;
+
+ jobject instance;
+ jclass cls;
+ data_class_info_s data_cls_info;
+ info_class_info_s info_cls_info;
+
+ gpointer priv_data;
+ nns_priv_destroy priv_destroy_func;
+} pipeline_info_s;
+
+/**
+ * @brief Struct for element data in pipeline.
+ */
+typedef struct
+{
+ gchar *name;
+ nns_element_type_e type;
+ gpointer handle;
+ pipeline_info_s *pipe_info;
+
+ gpointer priv_data;
+ nns_priv_destroy priv_destroy_func;
+} element_data_s;
+
+/**
+ * @brief Get JNI environment.
+ */
+extern JNIEnv *
+nns_get_jni_env (pipeline_info_s * pipe_info);
+
+/**
+ * @brief Free element handle pointer.
+ */
+extern void
+nns_free_element_data (gpointer data);
+
+/**
+ * @brief Construct pipeline info.
+ */
+extern gpointer
+nns_construct_pipe_info (JNIEnv * env, jobject thiz, gpointer handle, nns_pipe_type_e type);
+
+/**
+ * @brief Destroy pipeline info.
+ */
+extern void
+nns_destroy_pipe_info (pipeline_info_s * pipe_info, JNIEnv * env);
+
+/**
+ * @brief Set private data in pipeline info.
+ */
+extern void
+nns_set_priv_data (pipeline_info_s * pipe_info, gpointer data, nns_priv_destroy destroy_func);
+
+/**
+ * @brief Get element data of given name.
+ */
+extern element_data_s *
+nns_get_element_data (pipeline_info_s * pipe_info, const gchar * name);
+
+/**
+ * @brief Get element handle of given name and type.
+ */
+extern gpointer
+nns_get_element_handle (pipeline_info_s * pipe_info, const gchar * name, const nns_element_type_e type);
+
+/**
+ * @brief Remove element data of given name.
+ */
+extern gboolean
+nns_remove_element_data (pipeline_info_s * pipe_info, const gchar * name);
+
+/**
+ * @brief Add new element data of given name.
+ */
+extern gboolean
+nns_add_element_data (pipeline_info_s * pipe_info, const gchar * name, element_data_s * item);
+
+/**
+ * @brief Create new data object with given tensors info. Caller should unref the result object.
+ */
+extern gboolean
+nns_create_tensors_data_object (pipeline_info_s * pipe_info, JNIEnv * env, jobject obj_info, jobject * result);
+
+/**
+ * @brief Convert tensors data to TensorsData object.
+ */
+extern gboolean
+nns_convert_tensors_data (pipeline_info_s * pipe_info, JNIEnv * env, ml_tensors_data_h data_h, jobject obj_info, jobject * result);
+
+/**
+ * @brief Parse tensors data from TensorsData object.
+ */
+extern gboolean
+nns_parse_tensors_data (pipeline_info_s * pipe_info, JNIEnv * env, jobject obj_data, gboolean clone, ml_tensors_data_h * data_h, ml_tensors_info_h * info_h);
+
+/**
+ * @brief Convert tensors info to TensorsInfo object.
+ */
+extern gboolean
+nns_convert_tensors_info (pipeline_info_s * pipe_info, JNIEnv * env, ml_tensors_info_h info_h, jobject * result);
+
+/**
+ * @brief Parse tensors info from TensorsInfo object.
+ */
+extern gboolean
+nns_parse_tensors_info (pipeline_info_s * pipe_info, JNIEnv * env, jobject obj_info, ml_tensors_info_h * info_h);
+
+/**
+ * @brief Get NNFW from integer value.
+ */
+extern gboolean
+nns_get_nnfw_type (jint fw_type, ml_nnfw_type_e * nnfw);
+
+/**
+ * @brief Register native methods for SingleShot class.
+ */
+extern gboolean
+nns_native_single_register_natives (JNIEnv * env);
+
+#if !defined (NNS_SINGLE_ONLY)
+/**
+ * @brief Register native methods for Pipeline class.
+ */
+extern gboolean
+nns_native_pipe_register_natives (JNIEnv * env);
+
+/**
+ * @brief Register native methods for CustomFilter class.
+ */
+extern gboolean
+nns_native_custom_register_natives (JNIEnv * env);
+#endif
+
+#endif /* __NNSTREAMER_ANDROID_NATIVE_H__ */
--- /dev/null
+include ':nnstreamer'