Correction in pad_quant gtest (#3680)
authorShubham Gupta/SNAP /SRI-Bangalore/Engineer/삼성전자 <shub98.gupta@samsung.com>
Fri, 23 Nov 2018 02:38:02 +0000 (08:08 +0530)
committer오형석/동작제어Lab(SR)/Staff Engineer/삼성전자 <hseok82.oh@samsung.com>
Fri, 23 Nov 2018 02:38:02 +0000 (11:38 +0900)
This patch will correct the output buffer values and name of the pad_quant gtest.

Signed-off-by: shubham <shub98.gupta@samsung.com>
runtimes/tests/neural_networks_test/generated/all_generated_tests.cpp
runtimes/tests/neural_networks_test/generated/examples/pad_quant8_nnfw.example.cpp [moved from runtimes/tests/neural_networks_test/generated/examples/pad_quan8_nnfw.example.cpp with 76% similarity]
runtimes/tests/neural_networks_test/generated/models/pad_quant8_nnfw.model.cpp [moved from runtimes/tests/neural_networks_test/generated/models/pad_quan8_nnfw.model.cpp with 92% similarity]
runtimes/tests/neural_networks_test/runtime_run_android_nn_test.skip.armv7l-linux
runtimes/tests/neural_networks_test/specs/V1_1/pad_quant8_nnfw.mod.py [moved from runtimes/tests/neural_networks_test/specs/V1_1/pad_quan8_nnfw.mod.py with 82% similarity]

index 701adc4..5d5c437 100644 (file)
@@ -2451,18 +2451,18 @@ TEST_F(GeneratedTests, pad) {
             pad::examples);
 }
 
-namespace pad_quan8_nnfw {
+namespace pad_quant8_nnfw {
 std::vector<MixedTypedExample> examples = {
-// Generated pad_quan8_nnfw test
-#include "generated/examples/pad_quan8_nnfw.example.cpp"
+// Generated pad_quant8_nnfw test
+#include "generated/examples/pad_quant8_nnfw.example.cpp"
 };
 // Generated model constructor
-#include "generated/models/pad_quan8_nnfw.model.cpp"
-} // namespace pad_quan8_nnfw
-TEST_F(GeneratedTests, pad_quan8_nnfw) {
-    execute(pad_quan8_nnfw::CreateModel,
-            pad_quan8_nnfw::is_ignored,
-            pad_quan8_nnfw::examples);
+#include "generated/models/pad_quant8_nnfw.model.cpp"
+} // namespace pad_quant8_nnfw
+TEST_F(GeneratedTests, pad_quant8_nnfw) {
+    execute(pad_quant8_nnfw::CreateModel,
+            pad_quant8_nnfw::is_ignored,
+            pad_quant8_nnfw::examples);
 }
 
 namespace space_to_batch_float_1 {
@@ -1,4 +1,4 @@
-// Generated file (from: pad_quan8_nnfw.mod.py). Do not edit
+// Generated file (from: pad_quant8_nnfw.mod.py). Do not edit
 // Begin of an example
 {
 //Input(s)
@@ -17,6 +17,6 @@
   // int -> INT32 map
   {},
   // int -> QUANT8_ASYMM map
-  {{0, {0, 0, 0, 0, 0, 1, 2, 0, 0, 3, 4, 0, 0, 0, 0, 0}}}
+  {{0, {2, 2, 2, 2, 2, 1, 2, 2, 2, 3, 4, 2, 2, 2, 2, 2}}}
 }
 }, // End of an example
@@ -1,4 +1,4 @@
-// Generated file (from: pad_quan8_nnfw.mod.py). Do not edit
+// Generated file (from: pad_quant8_nnfw.mod.py). Do not edit
 void CreateModel(Model *model) {
   OperandType type1(Type::TENSOR_INT32, {4, 2});
   OperandType type0(Type::TENSOR_QUANT8_ASYMM, {1, 2, 2, 1}, 1.0, 2);
@@ -11,10 +11,10 @@ input0 = {i1: # input 0
            3, 4,]}
 
 output0 = {i3: # output 0
-           [0, 0, 0, 0,
-            0, 1, 2, 0,
-            0, 3, 4, 0,
-            0, 0, 0, 0]}
+           [2, 2, 2, 2,
+            2, 1, 2, 2,
+            2, 3, 4, 2,
+            2, 2, 2, 2]}
 
 # Instantiate an example
 Example((input0, output0))