Add testing CAST_EX operation and supporting extended API's test (#1279)
author장지섭/동작제어Lab(SR)/Engineer/삼성전자 <jiseob.jang@samsung.com>
Wed, 23 May 2018 01:01:59 +0000 (10:01 +0900)
committer오형석/동작제어Lab(SR)/Senior Engineer/삼성전자 <hseok82.oh@samsung.com>
Wed, 23 May 2018 01:01:59 +0000 (10:01 +0900)
This commit adds testing CAST_EX operation and supporting extended API's test.

    - Open the way to test extended operators.
    - add tests for CAST_EX operation.

Signed-off-by: jiseob.jang <jiseob.jang@samsung.com>
externals/nnapi_test_generator/test_generator.py
runtimes/tests/include/NeuralNetworksWrapper.h
runtimes/tests/neural_networks_test/generated/all_generated_tests.cpp
runtimes/tests/neural_networks_test/generated/examples/cast_ex_float32_to_int32.example.cpp [new file with mode: 0644]
runtimes/tests/neural_networks_test/generated/examples/cast_ex_int32_to_float32.example.cpp [new file with mode: 0644]
runtimes/tests/neural_networks_test/generated/models/cast_ex_float32_to_int32.model.cpp [new file with mode: 0644]
runtimes/tests/neural_networks_test/generated/models/cast_ex_int32_to_float32.model.cpp [new file with mode: 0644]
runtimes/tests/neural_networks_test/specs/Ex/cast_ex_float32_to_int32.mod.py [new file with mode: 0644]
runtimes/tests/neural_networks_test/specs/Ex/cast_ex_int32_to_float32.mod.py [new file with mode: 0644]
runtimes/tests/neural_networks_test/specs/generate_test.sh

index 07b503b..922ef77 100755 (executable)
@@ -1,5 +1,6 @@
 #!/usr/bin/python3
 
+# Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
 # Copyright 2017, The Android Open Source Project
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
@@ -30,6 +31,7 @@ import struct
 import sys
 import contextlib
 import pprint
+import re
 
 @contextlib.contextmanager
 def smart_open(filename=None):
@@ -410,7 +412,11 @@ class Operation(Definitions, Uses, Traversable):
   def Definition(self):
     inputs = Operand.print_operands(self.ins);
     outputs = Operand.print_operands(self.outs);
-    return "model->addOperation(ANEURALNETWORKS_"+self.optype+", " + \
+    if re.search('_EX$', self.optype):
+      return "model->addOperationEx(ANEURALNETWORKS_"+self.optype+", " + \
+        "{"+", ".join(inputs)+"}, {" + ", ".join(outputs) + "});"
+    else:
+      return "model->addOperation(ANEURALNETWORKS_"+self.optype+", " + \
         "{"+", ".join(inputs)+"}, {" + ", ".join(outputs) + "});"
 
   # Get Python-ish dump for the op
index c0db78d..5662d6f 100644 (file)
@@ -21,6 +21,7 @@
 #define __NNFW_RT_NEURAL_NETWORKS_WRAPPER_H__
 
 #include "NeuralNetworks.h"
+#include "NeuralNetworksEx.h"
 
 #include <math.h>
 #include <vector>
@@ -169,6 +170,16 @@ public:
             mValid = false;
         }
     }
+
+    void addOperationEx(ANeuralNetworksOperationTypeEx type, const std::vector<uint32_t>& inputs,
+                      const std::vector<uint32_t>& outputs) {
+        if (ANeuralNetworksModel_addOperationEx(mModel, type, static_cast<uint32_t>(inputs.size()),
+                                              inputs.data(), static_cast<uint32_t>(outputs.size()),
+                                              outputs.data()) != ANEURALNETWORKS_NO_ERROR) {
+            mValid = false;
+        }
+    }
+
     void identifyInputsAndOutputs(const std::vector<uint32_t>& inputs,
                                   const std::vector<uint32_t>& outputs) {
         if (ANeuralNetworksModel_identifyInputsAndOutputs(
index f9d5af2..9c735d0 100644 (file)
@@ -2226,3 +2226,31 @@ TEST_F(GeneratedTests, sub) {
             sub::is_ignored,
             sub::examples);
 }
+
+namespace cast_ex_float32_to_int32 {
+std::vector<MixedTypedExample> examples = {
+// Generated cast_ex_float32_to_int32 test
+#include "generated/examples/cast_ex_float32_to_int32.example.cpp"
+};
+// Generated model constructor
+#include "generated/models/cast_ex_float32_to_int32.model.cpp"
+} // namespace cast_ex_float32_to_int32
+TEST_F(GeneratedTests, cast_ex_float32_to_int32) {
+    execute(cast_ex_float32_to_int32::CreateModel,
+            cast_ex_float32_to_int32::is_ignored,
+            cast_ex_float32_to_int32::examples);
+}
+
+namespace cast_ex_int32_to_float32 {
+std::vector<MixedTypedExample> examples = {
+// Generated cast_ex_int32_to_float32 test
+#include "generated/examples/cast_ex_int32_to_float32.example.cpp"
+};
+// Generated model constructor
+#include "generated/models/cast_ex_int32_to_float32.model.cpp"
+} // namespace cast_ex_int32_to_float32
+TEST_F(GeneratedTests, cast_ex_int32_to_float32) {
+    execute(cast_ex_int32_to_float32::CreateModel,
+            cast_ex_int32_to_float32::is_ignored,
+            cast_ex_int32_to_float32::examples);
+}
diff --git a/runtimes/tests/neural_networks_test/generated/examples/cast_ex_float32_to_int32.example.cpp b/runtimes/tests/neural_networks_test/generated/examples/cast_ex_float32_to_int32.example.cpp
new file mode 100644 (file)
index 0000000..bf931ad
--- /dev/null
@@ -0,0 +1,22 @@
+// Generated file (from: cast_ex_float32_to_int32.mod.py). Do not edit
+// Begin of an example
+{
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {{0, {100.0f, 20.0f, 3.0f, 0.4f, 0.999f, 1.1f}}},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {}
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {{0, {100, 20, 3, 0, 0, 1}}},
+  // int -> QUANT8_ASYMM map
+  {}
+}
+}, // End of an example
diff --git a/runtimes/tests/neural_networks_test/generated/examples/cast_ex_int32_to_float32.example.cpp b/runtimes/tests/neural_networks_test/generated/examples/cast_ex_int32_to_float32.example.cpp
new file mode 100644 (file)
index 0000000..7a076e1
--- /dev/null
@@ -0,0 +1,22 @@
+// Generated file (from: cast_ex_int32_to_float32.mod.py). Do not edit
+// Begin of an example
+{
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {{0, {100, 200, 300, 400, 500, 600}}},
+  // int -> QUANT8_ASYMM map
+  {}
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {{0, {100.0f, 200.0f, 300.0f, 400.0f, 500.0f, 600.0f}}},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {}
+}
+}, // End of an example
diff --git a/runtimes/tests/neural_networks_test/generated/models/cast_ex_float32_to_int32.model.cpp b/runtimes/tests/neural_networks_test/generated/models/cast_ex_float32_to_int32.model.cpp
new file mode 100644 (file)
index 0000000..00a9385
--- /dev/null
@@ -0,0 +1,24 @@
+// Generated file (from: cast_ex_float32_to_int32.mod.py). Do not edit
+void CreateModel(Model *model) {
+  OperandType type1(Type::INT32, {});
+  OperandType type0(Type::TENSOR_FLOAT32, {2, 3});
+  OperandType type2(Type::TENSOR_INT32, {2, 3});
+  // Phase 1, operands
+  auto op1 = model->addOperand(&type0);
+  auto act = model->addOperand(&type1);
+  auto op2 = model->addOperand(&type2);
+  // Phase 2, operations
+  static int32_t act_init[] = {0};
+  model->setOperandValue(act, act_init, sizeof(int32_t) * 1);
+  model->addOperationEx(ANEURALNETWORKS_CAST_EX, {op1, act}, {op2});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {op1},
+    {op2});
+  assert(model->isValid());
+}
+
+bool is_ignored(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
diff --git a/runtimes/tests/neural_networks_test/generated/models/cast_ex_int32_to_float32.model.cpp b/runtimes/tests/neural_networks_test/generated/models/cast_ex_int32_to_float32.model.cpp
new file mode 100644 (file)
index 0000000..3f9715f
--- /dev/null
@@ -0,0 +1,24 @@
+// Generated file (from: cast_ex_int32_to_float32.mod.py). Do not edit
+void CreateModel(Model *model) {
+  OperandType type1(Type::INT32, {});
+  OperandType type2(Type::TENSOR_FLOAT32, {2, 3});
+  OperandType type0(Type::TENSOR_INT32, {2, 3});
+  // Phase 1, operands
+  auto op1 = model->addOperand(&type0);
+  auto act = model->addOperand(&type1);
+  auto op2 = model->addOperand(&type2);
+  // Phase 2, operations
+  static int32_t act_init[] = {0};
+  model->setOperandValue(act, act_init, sizeof(int32_t) * 1);
+  model->addOperationEx(ANEURALNETWORKS_CAST_EX, {op1, act}, {op2});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {op1},
+    {op2});
+  assert(model->isValid());
+}
+
+bool is_ignored(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
diff --git a/runtimes/tests/neural_networks_test/specs/Ex/cast_ex_float32_to_int32.mod.py b/runtimes/tests/neural_networks_test/specs/Ex/cast_ex_float32_to_int32.mod.py
new file mode 100644 (file)
index 0000000..102dcdf
--- /dev/null
@@ -0,0 +1,16 @@
+# model
+model = Model()
+i1 = Input("op1", "TENSOR_FLOAT32", "{2, 3}")
+act = Int32Scalar("act", 0) # an int32_t scalar fuse_activation
+i2 = Output("op2", "TENSOR_INT32", "{2, 3}")
+model = model.Operation("CAST_EX", i1, act).To(i2)
+
+# Example 1. Input in operand 0,
+input0 = {i1: # input 0
+          [100.0, 20.0, 3.0, 0.4, 0.999, 1.1]}
+
+output0 = {i2: # output 0
+           [100, 20, 3, 0, 0, 1]}
+
+# Instantiate an example
+Example((input0, output0))
diff --git a/runtimes/tests/neural_networks_test/specs/Ex/cast_ex_int32_to_float32.mod.py b/runtimes/tests/neural_networks_test/specs/Ex/cast_ex_int32_to_float32.mod.py
new file mode 100644 (file)
index 0000000..b20b4ae
--- /dev/null
@@ -0,0 +1,16 @@
+# model
+model = Model()
+i1 = Input("op1", "TENSOR_INT32", "{2, 3}")
+act = Int32Scalar("act", 0) # an int32_t scalar fuse_activation
+i2 = Output("op2", "TENSOR_FLOAT32", "{2, 3}")
+model = model.Operation("CAST_EX", i1, act).To(i2)
+
+# Example 1. Input in operand 0,
+input0 = {i1: # input 0
+          [100, 200, 300, 400, 500, 600]}
+
+output0 = {i2: # output 0
+           [100.0, 200.0, 300.0, 400.0, 500.0, 600.0]}
+
+# Instantiate an example
+Example((input0, output0))
index f63aa58..c0676cf 100755 (executable)
@@ -17,6 +17,7 @@
 NNAPI_VERSION="
 V1_0
 V1_1
+Ex
 "
 
 # Process one test spec, and optionally provide the log file argument