[Android/Test] add SNAP tests using tensorflow model
authorYongjoo Ahn <yongjoo1.ahn@samsung.com>
Wed, 22 Apr 2020 10:07:02 +0000 (19:07 +0900)
committerMyungJoo Ham <myungjoo.ham@samsung.com>
Sat, 25 Apr 2020 14:08:50 +0000 (23:08 +0900)
- For both single-shot and pipeline, add simple tests to validate SNAP with tensorflow model
- Test with CPU, DSP, NPU runtime on recent Snapdragon (S20) and preloaded model files

Signed-off-by: Yongjoo Ahn <yongjoo1.ahn@samsung.com>
api/android/api/src/androidTest/java/org/nnsuite/nnstreamer/APITestCommon.java
api/android/api/src/androidTest/java/org/nnsuite/nnstreamer/APITestPipeline.java
api/android/api/src/androidTest/java/org/nnsuite/nnstreamer/APITestSingleShot.java

index 7364a00..59c2613 100644 (file)
@@ -138,6 +138,23 @@ public class APITestCommon {
         return model;
     }
 
+    public enum SNAPComputingUnit {
+        CPU("ComputingUnit:CPU"),
+        GPU("ComputingUnit:GPU,GpuCacheSource:/sdcard/nnstreamer/"),
+        DSP("ComputingUnit:DSP"),
+        NPU("ComputingUnit:NPU");
+
+        private String computing_unit_option;
+
+        SNAPComputingUnit(String computing_unit_option) {
+            this.computing_unit_option = computing_unit_option;
+        }
+
+        public String getOptionString() {
+            return computing_unit_option;
+        }
+    }
+
     /**
      * Gets the File objects of Caffe model for SNAP.
      * Note that, to invoke model in the storage, the permission READ_EXTERNAL_STORAGE is required.
@@ -161,20 +178,59 @@ public class APITestCommon {
      * CPU: "custom=ModelFWType:CAFFE,ExecutionDataType:FLOAT32,ComputingUnit:CPU"
      * GPU: "custom=ModelFWType:CAFFE,ExecutionDataType:FLOAT32,ComputingUnit:GPU,GpuCacheSource:/sdcard/nnstreamer/"
      */
-    public static String getSNAPCaffeOption(boolean useGPU) {
+    public static String getSNAPCaffeOption(SNAPComputingUnit CUnit) {
         String option = "ModelFWType:CAFFE,ExecutionDataType:FLOAT32,InputFormat:NHWC,OutputFormat:NCHW,";
+        option = option + CUnit.getOptionString();
 
-        if (useGPU) {
-            String root = Environment.getExternalStorageDirectory().getAbsolutePath();
-            option = option + "ComputingUnit:GPU,GpuCacheSource:" + root + "/nnstreamer/";
-        } else {
-            option = option + "ComputingUnit:CPU";
-        }
+        return option;
+    }
 
+    /**
+     * Gets the option string to run Tensorflow model for SNAP.
+     *
+     * CPU: "custom=ModelFWType:CAFFE,ExecutionDataType:FLOAT32,ComputingUnit:CPU"
+     * GPU: Not supported for Tensorflow model
+     * DSP: "custom=ModelFWType:CAFFE,ExecutionDataType:FLOAT32,ComputingUnit:DSP"
+     * NPU: "custom=ModelFWType:CAFFE,ExecutionDataType:FLOAT32,ComputingUnit:NPU"
+     */
+    public static String getSNAPTensorflowOption(SNAPComputingUnit CUnit) {
+        String option = "ModelFWType:TENSORFLOW,ExecutionDataType:FLOAT32,InputFormat:NHWC,OutputFormat:NHWC,";
+        option = option + CUnit.getOptionString();
         return option;
     }
 
     /**
+     * Gets the File objects of Tensorflow model for SNAP.
+     * Note that, to invoke model in the storage, the permission READ_EXTERNAL_STORAGE is required.
+     */
+    public static File[] getSNAPTensorflowModel(SNAPComputingUnit CUnit) {
+        String root = Environment.getExternalStorageDirectory().getAbsolutePath();
+        String model_path = "/nnstreamer/snap_data/model/";
+
+        switch (CUnit) {
+            case CPU:
+                model_path = model_path + "yolo_new.pb";
+                break;
+            case DSP:
+                model_path = model_path + "yolo_new_tf_quantized.dlc";
+                break;
+            case NPU:
+                model_path = model_path + "yolo_new_tf_quantized_hta.dlc";
+                break;
+            case GPU:
+            default:
+                fail();
+        }
+
+        File model = new File(root + model_path);
+        if (!model.exists()) {
+            fail();
+        }
+
+        return new File[]{model};
+    }
+
+    /**
      * Verifies the byte buffer is direct buffer with native order.
      *
      * @param buffer   The byte buffer
index f912a6e..31445be 100644 (file)
@@ -3,6 +3,7 @@ package org.nnsuite.nnstreamer;
 import android.os.Environment;
 import android.support.test.rule.GrantPermissionRule;
 import android.support.test.runner.AndroidJUnit4;
+import android.os.Build;
 
 import org.junit.Before;
 import org.junit.Rule;
@@ -1127,9 +1128,9 @@ public class APITestPipeline {
     /**
      * Run SNAP with Caffe model.
      */
-    private void runSNAPCaffe(boolean useGPU) {
+    private void runSNAPCaffe(APITestCommon.SNAPComputingUnit CUnit) {
         File[] models = APITestCommon.getSNAPCaffeModel();
-        String option = APITestCommon.getSNAPCaffeOption(useGPU);
+        String option = APITestCommon.getSNAPCaffeOption(CUnit);
 
         String desc = "appsrc name=srcx ! " +
                 "other/tensor,dimension=(string)3:224:224:1,type=(string)float32,framerate=(fraction)0/1 ! " +
@@ -1140,8 +1141,10 @@ public class APITestPipeline {
                     "custom=" + option + " ! " +
                 "tensor_sink name=sinkx";
 
-        try (Pipeline pipe = new Pipeline(desc)) {
-            TensorsInfo info = new TensorsInfo();
+        try (
+            Pipeline pipe = new Pipeline(desc);
+            TensorsInfo info = new TensorsInfo()
+        ) {
             info.addTensorInfo(NNStreamer.TensorType.FLOAT32, new int[]{3,224,224,1});
 
             /* register sink callback */
@@ -1197,7 +1200,7 @@ public class APITestPipeline {
             return;
         }
 
-        runSNAPCaffe(false);
+        runSNAPCaffe(APITestCommon.SNAPComputingUnit.CPU);
     }
 
     @Test
@@ -1207,7 +1210,123 @@ public class APITestPipeline {
             return;
         }
 
-        runSNAPCaffe(true);
+        runSNAPCaffe(APITestCommon.SNAPComputingUnit.GPU);
+    }
+
+    /**
+     * Run SNAP with Tensorflow model.
+     */
+    private void runSNAPTensorflow(APITestCommon.SNAPComputingUnit CUnit) {
+        File[] model = APITestCommon.getSNAPTensorflowModel(CUnit);
+        String option = APITestCommon.getSNAPTensorflowOption(CUnit);
+        String desc = "appsrc name=srcx ! " +
+                "other/tensor,dimension=(string)3:224:224:1,type=(string)float32,framerate=(fraction)0/1 ! " +
+                "tensor_filter framework=snap " +
+                    "model=" + model[0].getAbsolutePath() + " " +
+                    "input=3:224:224:1 inputtype=float32 inputlayout=NHWC inputname=input " +
+                    "output=1001:1 outputtype=float32 outputlayout=NHWC outputname=MobilenetV1/Predictions/Reshape_1:0 " +
+                    "custom=" + option + " ! " +
+                "tensor_sink name=sinkx";
+
+        try (
+            Pipeline pipe = new Pipeline(desc);
+            TensorsInfo info = new TensorsInfo()
+        ) {
+            info.addTensorInfo(NNStreamer.TensorType.FLOAT32, new int[]{3,224,224,1});
+
+            /* register sink callback */
+            pipe.registerSinkCallback("sinkx", new Pipeline.NewDataCallback() {
+                @Override
+                public void onNewDataReceived(TensorsData data) {
+                    if (data == null || data.getTensorsCount() != 1) {
+                        mInvalidState = true;
+                        return;
+                    }
+
+                    TensorsInfo info = data.getTensorsInfo();
+
+                    if (info == null || info.getTensorsCount() != 1) {
+                        mInvalidState = true;
+                    } else {
+                        ByteBuffer output = data.getTensorData(0);
+
+                        if (!APITestCommon.isValidBuffer(output, 4004)) {
+                            mInvalidState = true;
+                        }
+                    }
+
+                    mReceived++;
+                }
+            });
+
+            /* start pipeline */
+            pipe.start();
+
+            /* push input buffer */
+            for (int i = 0; i < 10; i++) {
+                /* dummy input */
+                pipe.inputData("srcx", TensorsData.allocate(info));
+                Thread.sleep(100);
+            }
+
+            /* sleep 500 to invoke */
+            Thread.sleep(500);
+
+            /* stop pipeline */
+            pipe.stop();
+
+            /* check received data from sink */
+            assertFalse(mInvalidState);
+            assertTrue(mReceived > 0);
+        } catch (Exception e) {
+            fail();
+        }
+    }
+
+    @Test
+    public void testSNAPTensorflowCPU() {
+        if (!NNStreamer.isAvailable(NNStreamer.NNFWType.SNAP)) {
+            /* cannot run the test */
+            return;
+        }
+
+        runSNAPTensorflow(APITestCommon.SNAPComputingUnit.CPU);
+    }
+
+    @Test
+    public void testSNAPTensorflowDSP() {
+        if (!NNStreamer.isAvailable(NNStreamer.NNFWType.SNAP)) {
+            /* cannot run the test */
+            return;
+        }
+
+        if (!android.os.Build.HARDWARE.equals("qcom")) {
+            /** 
+             * Tensorflow model using DSP runtime can only be executed on 
+             * Snapdragon SoC. Cannot run this test on exynos.
+             */
+            return;
+        }
+
+        runSNAPTensorflow(APITestCommon.SNAPComputingUnit.DSP);
+    }
+
+    @Test
+    public void testSNAPTensorflowNPU() {
+        if (!NNStreamer.isAvailable(NNStreamer.NNFWType.SNAP)) {
+            /* cannot run the test */
+            return;
+        }
+
+        if (!android.os.Build.HARDWARE.equals("qcom")) {
+            /**
+             * Tensorflow model using NPU runtime can only be executed on 
+             * Snapdragon. Cannot run this test on exynos.
+             */
+            return;
+        }
+
+        runSNAPTensorflow(APITestCommon.SNAPComputingUnit.NPU);
     }
 
     @Test
index 1aa5416..5098ee7 100644 (file)
@@ -3,6 +3,7 @@ package org.nnsuite.nnstreamer;
 import android.os.Environment;
 import android.support.test.rule.GrantPermissionRule;
 import android.support.test.runner.AndroidJUnit4;
+import android.os.Build;
 
 import org.junit.Before;
 import org.junit.Rule;
@@ -599,9 +600,9 @@ public class APITestSingleShot {
     /**
      * Run SNAP with Caffe model.
      */
-    private void runSNAPCaffe(boolean useGPU) {
+    private void runSNAPCaffe(APITestCommon.SNAPComputingUnit CUnit) {
         File[] models = APITestCommon.getSNAPCaffeModel();
-        String option = APITestCommon.getSNAPCaffeOption(useGPU);
+        String option = APITestCommon.getSNAPCaffeOption(CUnit);
 
         try {
             TensorsInfo in = new TensorsInfo();
@@ -638,7 +639,7 @@ public class APITestSingleShot {
             return;
         }
 
-        runSNAPCaffe(false);
+        runSNAPCaffe(APITestCommon.SNAPComputingUnit.CPU);
     }
 
     @Test
@@ -648,7 +649,89 @@ public class APITestSingleShot {
             return;
         }
 
-        runSNAPCaffe(true);
+        runSNAPCaffe(APITestCommon.SNAPComputingUnit.GPU);
+    }
+
+    /**
+     * Run SNAP with Tensorflow model.
+     */
+    private void runSNAPTensorflow(APITestCommon.SNAPComputingUnit CUnit) {
+        File[] model = APITestCommon.getSNAPTensorflowModel(CUnit);
+        String option = APITestCommon.getSNAPTensorflowOption(CUnit);
+
+        try {
+            TensorsInfo in = new TensorsInfo();
+            in.addTensorInfo("input", NNStreamer.TensorType.FLOAT32, new int[]{3,224,224,1});
+
+            TensorsInfo out = new TensorsInfo();
+            out.addTensorInfo("MobilenetV1/Predictions/Reshape_1:0", NNStreamer.TensorType.FLOAT32, new int[]{1001, 1});
+
+            SingleShot single = new SingleShot(model, in, out, NNStreamer.NNFWType.SNAP, option);
+
+            /* let's ignore timeout (set 60 sec) */
+            single.setTimeout(60000);
+
+            /* single-shot invoke */
+            for (int i = 0; i < 10; i++) {
+                /* dummy input */
+                TensorsData output = single.invoke(in.allocate());
+
+                /* output: float32 1:1001 */
+                assertEquals(1, output.getTensorsCount());
+                assertEquals(4004, output.getTensorData(0).capacity());
+
+                Thread.sleep(30);
+            }
+
+        } catch (Exception e) {
+            fail();
+        }
+    }
+    
+    @Test
+    public void testSNAPTensorflowCPU() {
+        if (!NNStreamer.isAvailable(NNStreamer.NNFWType.SNAP)) {
+            /* cannot run the test */
+            return;
+        }
+
+        runSNAPTensorflow(APITestCommon.SNAPComputingUnit.CPU);
+    }
+
+    @Test
+    public void testSNAPTensorflowDSP() {
+        if (!NNStreamer.isAvailable(NNStreamer.NNFWType.SNAP)) {
+            /* cannot run the test */
+            return;
+        }
+
+        if (!android.os.Build.HARDWARE.equals("qcom")) {
+            /** 
+             * Tensorflow model using DSP runtime can only be executed on 
+             * Snapdragon SoC. Cannot run this test on exynos.
+             */
+            return;
+        }
+
+        runSNAPTensorflow(APITestCommon.SNAPComputingUnit.DSP);
+    }
+
+    @Test
+    public void testSNAPTensorflowNPU() {
+        if (!NNStreamer.isAvailable(NNStreamer.NNFWType.SNAP)) {
+            /* cannot run the test */
+            return;
+        }
+
+        if (!android.os.Build.HARDWARE.equals("qcom")) {
+            /**
+             * Tensorflow model using NPU runtime can only be executed on 
+             * Snapdragon. Cannot run this test on exynos.
+             */
+            return;
+        }
+
+        runSNAPTensorflow(APITestCommon.SNAPComputingUnit.NPU);
     }
 
     @Test