Squeeze 2D input test (for neural_networks_test/generated) (#2087)
author윤현식/동작제어Lab(SR)/Principal Engineer/삼성전자 <hyunsik.yoon@samsung.com>
Tue, 31 Jul 2018 07:30:24 +0000 (16:30 +0900)
committer오형석/동작제어Lab(SR)/Staff Engineer/삼성전자 <hseok82.oh@samsung.com>
Tue, 31 Jul 2018 07:30:24 +0000 (16:30 +0900)
This commit adds Squeeze against 2D input test.

C++ files are generated by `./runtimes/tests/neural_networks_test/specs/generate_test.sh`

How to run:
`$ LD_LIBRARY_PATH=Product/out/lib Product/out/unittest/runtime_run_android_nn_test --gtest_filter=GeneratedTests.squeeze_2D*`

Signed-off-by: Hyun Sik Yoon <hyunsik.yoon@samsung.com>
runtimes/tests/neural_networks_test/generated/all_generated_tests.cpp
runtimes/tests/neural_networks_test/generated/examples/squeeze_2D_float_1_nnfw.example.cpp [new file with mode: 0644]
runtimes/tests/neural_networks_test/generated/models/squeeze_2D_float_1_nnfw.model.cpp [new file with mode: 0644]
runtimes/tests/neural_networks_test/specs/V1_1/squeeze_2D_float_1_nnfw.mod.py [new file with mode: 0644]

index f20216a..65d30d2 100644 (file)
@@ -2339,6 +2339,20 @@ TEST_F(GeneratedTests, space_to_batch_quant8_3) {
             space_to_batch_quant8_3::examples);
 }
 
+namespace squeeze_2D_float_1_nnfw {
+std::vector<MixedTypedExample> examples = {
+// Generated squeeze_2D_float_1_nnfw test
+#include "generated/examples/squeeze_2D_float_1_nnfw.example.cpp"
+};
+// Generated model constructor
+#include "generated/models/squeeze_2D_float_1_nnfw.model.cpp"
+} // namespace squeeze_2D_float_1_nnfw
+TEST_F(GeneratedTests, squeeze_2D_float_1_nnfw) {
+    execute(squeeze_2D_float_1_nnfw::CreateModel,
+            squeeze_2D_float_1_nnfw::is_ignored,
+            squeeze_2D_float_1_nnfw::examples);
+}
+
 namespace squeeze_float_1 {
 std::vector<MixedTypedExample> examples = {
 // Generated squeeze_float_1 test
diff --git a/runtimes/tests/neural_networks_test/generated/examples/squeeze_2D_float_1_nnfw.example.cpp b/runtimes/tests/neural_networks_test/generated/examples/squeeze_2D_float_1_nnfw.example.cpp
new file mode 100644 (file)
index 0000000..31a82ef
--- /dev/null
@@ -0,0 +1,22 @@
+// Generated file (from: squeeze_2D_float_1_nnfw.mod.py). Do not edit
+// Begin of an example
+{
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {{0, {1.4f, 2.3f, 3.2f, 4.1f}}},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {}
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {{0, {1.4f, 2.3f, 3.2f, 4.1f}}},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {}
+}
+}, // End of an example
diff --git a/runtimes/tests/neural_networks_test/generated/models/squeeze_2D_float_1_nnfw.model.cpp b/runtimes/tests/neural_networks_test/generated/models/squeeze_2D_float_1_nnfw.model.cpp
new file mode 100644 (file)
index 0000000..4d6621e
--- /dev/null
@@ -0,0 +1,24 @@
+// Generated file (from: squeeze_2D_float_1_nnfw.mod.py). Do not edit
+void CreateModel(Model *model) {
+  OperandType type0(Type::TENSOR_FLOAT32, {4, 1});
+  OperandType type2(Type::TENSOR_FLOAT32, {4});
+  OperandType type1(Type::TENSOR_INT32, {1});
+  // Phase 1, operands
+  auto input = model->addOperand(&type0);
+  auto squeezeDims = model->addOperand(&type1);
+  auto output = model->addOperand(&type2);
+  // Phase 2, operations
+  static int32_t squeezeDims_init[] = {1};
+  model->setOperandValue(squeezeDims, squeezeDims_init, sizeof(int32_t) * 1);
+  model->addOperation(ANEURALNETWORKS_SQUEEZE, {input, squeezeDims}, {output});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {input},
+    {output});
+  assert(model->isValid());
+}
+
+bool is_ignored(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
diff --git a/runtimes/tests/neural_networks_test/specs/V1_1/squeeze_2D_float_1_nnfw.mod.py b/runtimes/tests/neural_networks_test/specs/V1_1/squeeze_2D_float_1_nnfw.mod.py
new file mode 100644 (file)
index 0000000..8397902
--- /dev/null
@@ -0,0 +1,16 @@
+model = Model()
+i1 = Input("input", "TENSOR_FLOAT32", "{4, 1}")
+squeezeDims = Parameter("squeezeDims", "TENSOR_INT32", "{1}", [1])
+output = Output("output", "TENSOR_FLOAT32", "{4}")
+
+model = model.Operation("SQUEEZE", i1, squeezeDims).To(output)
+
+# Example 1. Input in operand 0,
+input0 = {i1: # input 0
+          [1.4, 2.3, 3.2, 4.1]}
+
+output0 = {output: # output 0
+           [1.4, 2.3, 3.2, 4.1]}
+
+# Instantiate an example
+Example((input0, output0))