Update nn_android_runtime_test to p-preview-4 (#1935)
author이춘석/동작제어Lab(SR)/Staff Engineer/삼성전자 <chunseok.lee@samsung.com>
Fri, 13 Jul 2018 07:49:45 +0000 (16:49 +0900)
committer오형석/동작제어Lab(SR)/Staff Engineer/삼성전자 <hseok82.oh@samsung.com>
Fri, 13 Jul 2018 07:49:45 +0000 (16:49 +0900)
commit: c73accf066d4f05372
link : https://android.googlesource.com/platform/frameworks/ml/+/android-p-preview-4

cf) all *relaxed* tests are omitted

Signed-off-by: Chunseok Lee <chunseok.lee@samsung.com>
147 files changed:
runtimes/tests/neural_networks_test/generated/all_generated_tests.cpp
runtimes/tests/neural_networks_test/generated/examples/batch_to_space.example.cpp [new file with mode: 0644]
runtimes/tests/neural_networks_test/generated/examples/batch_to_space_float_1.example.cpp [new file with mode: 0644]
runtimes/tests/neural_networks_test/generated/examples/batch_to_space_quant8_1.example.cpp [new file with mode: 0644]
runtimes/tests/neural_networks_test/generated/examples/div.example.cpp [new file with mode: 0644]
runtimes/tests/neural_networks_test/generated/examples/div_broadcast_float.example.cpp [new file with mode: 0644]
runtimes/tests/neural_networks_test/generated/examples/floor.example.cpp [new file with mode: 0644]
runtimes/tests/neural_networks_test/generated/examples/fully_connected_float_3.example.cpp [new file with mode: 0644]
runtimes/tests/neural_networks_test/generated/examples/fully_connected_float_4d_simple.example.cpp [new file with mode: 0644]
runtimes/tests/neural_networks_test/generated/examples/mean.example.cpp [new file with mode: 0644]
runtimes/tests/neural_networks_test/generated/examples/mean_float_1.example.cpp [new file with mode: 0644]
runtimes/tests/neural_networks_test/generated/examples/mean_float_2.example.cpp [new file with mode: 0644]
runtimes/tests/neural_networks_test/generated/examples/mean_quant8_1.example.cpp [new file with mode: 0644]
runtimes/tests/neural_networks_test/generated/examples/mean_quant8_2.example.cpp [new file with mode: 0644]
runtimes/tests/neural_networks_test/generated/examples/pad.example.cpp [new file with mode: 0644]
runtimes/tests/neural_networks_test/generated/examples/pad_float_1.example.cpp [new file with mode: 0644]
runtimes/tests/neural_networks_test/generated/examples/space_to_batch.example.cpp [new file with mode: 0644]
runtimes/tests/neural_networks_test/generated/examples/space_to_batch_float_1.example.cpp [new file with mode: 0644]
runtimes/tests/neural_networks_test/generated/examples/space_to_batch_float_2.example.cpp [new file with mode: 0644]
runtimes/tests/neural_networks_test/generated/examples/space_to_batch_float_3.example.cpp [new file with mode: 0644]
runtimes/tests/neural_networks_test/generated/examples/space_to_batch_quant8_1.example.cpp [new file with mode: 0644]
runtimes/tests/neural_networks_test/generated/examples/space_to_batch_quant8_2.example.cpp [new file with mode: 0644]
runtimes/tests/neural_networks_test/generated/examples/space_to_batch_quant8_3.example.cpp [new file with mode: 0644]
runtimes/tests/neural_networks_test/generated/examples/squeeze.example.cpp [new file with mode: 0644]
runtimes/tests/neural_networks_test/generated/examples/squeeze_float_1.example.cpp [new file with mode: 0644]
runtimes/tests/neural_networks_test/generated/examples/squeeze_quant8_1.example.cpp [new file with mode: 0644]
runtimes/tests/neural_networks_test/generated/examples/strided_slice_float_11.example.cpp [new file with mode: 0644]
runtimes/tests/neural_networks_test/generated/examples/strided_slice_qaunt8_10.example.cpp [new file with mode: 0644]
runtimes/tests/neural_networks_test/generated/examples/strided_slice_qaunt8_11.example.cpp [new file with mode: 0644]
runtimes/tests/neural_networks_test/generated/examples/strided_slice_quant8_1.example.cpp [new file with mode: 0644]
runtimes/tests/neural_networks_test/generated/examples/strided_slice_quant8_2.example.cpp [new file with mode: 0644]
runtimes/tests/neural_networks_test/generated/examples/strided_slice_quant8_3.example.cpp [new file with mode: 0644]
runtimes/tests/neural_networks_test/generated/examples/strided_slice_quant8_4.example.cpp [new file with mode: 0644]
runtimes/tests/neural_networks_test/generated/examples/strided_slice_quant8_5.example.cpp [new file with mode: 0644]
runtimes/tests/neural_networks_test/generated/examples/strided_slice_quant8_6.example.cpp [new file with mode: 0644]
runtimes/tests/neural_networks_test/generated/examples/strided_slice_quant8_7.example.cpp [new file with mode: 0644]
runtimes/tests/neural_networks_test/generated/examples/strided_slice_quant8_8.example.cpp [new file with mode: 0644]
runtimes/tests/neural_networks_test/generated/examples/strided_slice_quant8_9.example.cpp [new file with mode: 0644]
runtimes/tests/neural_networks_test/generated/examples/sub_broadcast_float.example.cpp
runtimes/tests/neural_networks_test/generated/examples/tanh.example.cpp [new file with mode: 0644]
runtimes/tests/neural_networks_test/generated/examples/transpose.example.cpp [new file with mode: 0644]
runtimes/tests/neural_networks_test/generated/examples/transpose_float_1.example.cpp [new file with mode: 0644]
runtimes/tests/neural_networks_test/generated/examples/transpose_quant8_1.example.cpp [new file with mode: 0644]
runtimes/tests/neural_networks_test/generated/models/batch_to_space.model.cpp [new file with mode: 0644]
runtimes/tests/neural_networks_test/generated/models/batch_to_space_float_1.model.cpp [new file with mode: 0644]
runtimes/tests/neural_networks_test/generated/models/batch_to_space_quant8_1.model.cpp [new file with mode: 0644]
runtimes/tests/neural_networks_test/generated/models/div.model.cpp [new file with mode: 0644]
runtimes/tests/neural_networks_test/generated/models/div_broadcast_float.model.cpp [new file with mode: 0644]
runtimes/tests/neural_networks_test/generated/models/floor.model.cpp [new file with mode: 0644]
runtimes/tests/neural_networks_test/generated/models/fully_connected_float_3.model.cpp [new file with mode: 0644]
runtimes/tests/neural_networks_test/generated/models/fully_connected_float_4d_simple.model.cpp [new file with mode: 0644]
runtimes/tests/neural_networks_test/generated/models/mean.model.cpp [new file with mode: 0644]
runtimes/tests/neural_networks_test/generated/models/mean_float_1.model.cpp [new file with mode: 0644]
runtimes/tests/neural_networks_test/generated/models/mean_float_2.model.cpp [new file with mode: 0644]
runtimes/tests/neural_networks_test/generated/models/mean_quant8_1.model.cpp [new file with mode: 0644]
runtimes/tests/neural_networks_test/generated/models/mean_quant8_2.model.cpp [new file with mode: 0644]
runtimes/tests/neural_networks_test/generated/models/pad.model.cpp [new file with mode: 0644]
runtimes/tests/neural_networks_test/generated/models/pad_float_1.model.cpp [new file with mode: 0644]
runtimes/tests/neural_networks_test/generated/models/space_to_batch.model.cpp [new file with mode: 0644]
runtimes/tests/neural_networks_test/generated/models/space_to_batch_float_1.model.cpp [new file with mode: 0644]
runtimes/tests/neural_networks_test/generated/models/space_to_batch_float_2.model.cpp [new file with mode: 0644]
runtimes/tests/neural_networks_test/generated/models/space_to_batch_float_3.model.cpp [new file with mode: 0644]
runtimes/tests/neural_networks_test/generated/models/space_to_batch_quant8_1.model.cpp [new file with mode: 0644]
runtimes/tests/neural_networks_test/generated/models/space_to_batch_quant8_2.model.cpp [new file with mode: 0644]
runtimes/tests/neural_networks_test/generated/models/space_to_batch_quant8_3.model.cpp [new file with mode: 0644]
runtimes/tests/neural_networks_test/generated/models/squeeze.model.cpp [new file with mode: 0644]
runtimes/tests/neural_networks_test/generated/models/squeeze_float_1.model.cpp [new file with mode: 0644]
runtimes/tests/neural_networks_test/generated/models/squeeze_quant8_1.model.cpp [new file with mode: 0644]
runtimes/tests/neural_networks_test/generated/models/strided_slice.model.cpp
runtimes/tests/neural_networks_test/generated/models/strided_slice_float_1.model.cpp
runtimes/tests/neural_networks_test/generated/models/strided_slice_float_10.model.cpp
runtimes/tests/neural_networks_test/generated/models/strided_slice_float_11.model.cpp [new file with mode: 0644]
runtimes/tests/neural_networks_test/generated/models/strided_slice_float_2.model.cpp
runtimes/tests/neural_networks_test/generated/models/strided_slice_float_3.model.cpp
runtimes/tests/neural_networks_test/generated/models/strided_slice_float_4.model.cpp
runtimes/tests/neural_networks_test/generated/models/strided_slice_float_5.model.cpp
runtimes/tests/neural_networks_test/generated/models/strided_slice_float_6.model.cpp
runtimes/tests/neural_networks_test/generated/models/strided_slice_float_7.model.cpp
runtimes/tests/neural_networks_test/generated/models/strided_slice_float_8.model.cpp
runtimes/tests/neural_networks_test/generated/models/strided_slice_float_9.model.cpp
runtimes/tests/neural_networks_test/generated/models/strided_slice_qaunt8_10.model.cpp [new file with mode: 0644]
runtimes/tests/neural_networks_test/generated/models/strided_slice_qaunt8_11.model.cpp [new file with mode: 0644]
runtimes/tests/neural_networks_test/generated/models/strided_slice_quant8_1.model.cpp [new file with mode: 0644]
runtimes/tests/neural_networks_test/generated/models/strided_slice_quant8_2.model.cpp [new file with mode: 0644]
runtimes/tests/neural_networks_test/generated/models/strided_slice_quant8_3.model.cpp [new file with mode: 0644]
runtimes/tests/neural_networks_test/generated/models/strided_slice_quant8_4.model.cpp [new file with mode: 0644]
runtimes/tests/neural_networks_test/generated/models/strided_slice_quant8_5.model.cpp [new file with mode: 0644]
runtimes/tests/neural_networks_test/generated/models/strided_slice_quant8_6.model.cpp [new file with mode: 0644]
runtimes/tests/neural_networks_test/generated/models/strided_slice_quant8_7.model.cpp [new file with mode: 0644]
runtimes/tests/neural_networks_test/generated/models/strided_slice_quant8_8.model.cpp [new file with mode: 0644]
runtimes/tests/neural_networks_test/generated/models/strided_slice_quant8_9.model.cpp [new file with mode: 0644]
runtimes/tests/neural_networks_test/generated/models/sub_broadcast_float.model.cpp
runtimes/tests/neural_networks_test/generated/models/tanh.model.cpp [new file with mode: 0644]
runtimes/tests/neural_networks_test/generated/models/transpose.model.cpp [new file with mode: 0644]
runtimes/tests/neural_networks_test/generated/models/transpose_float_1.model.cpp [new file with mode: 0644]
runtimes/tests/neural_networks_test/generated/models/transpose_quant8_1.model.cpp [new file with mode: 0644]
runtimes/tests/neural_networks_test/specs/Ex/sub_broadcast_float.mod.py [deleted file]
runtimes/tests/neural_networks_test/specs/V1_0/fully_connected_float_3.mod.py [new file with mode: 0644]
runtimes/tests/neural_networks_test/specs/V1_1/batch_to_space.mod.py [new file with mode: 0644]
runtimes/tests/neural_networks_test/specs/V1_1/batch_to_space_float_1.mod.py [new file with mode: 0644]
runtimes/tests/neural_networks_test/specs/V1_1/batch_to_space_quant8_1.mod.py [new file with mode: 0644]
runtimes/tests/neural_networks_test/specs/V1_1/div_broadcast_float.mod.py [new file with mode: 0644]
runtimes/tests/neural_networks_test/specs/V1_1/fully_connected_float_4d_simple.mod.py [new file with mode: 0644]
runtimes/tests/neural_networks_test/specs/V1_1/mean.mod.py [new file with mode: 0644]
runtimes/tests/neural_networks_test/specs/V1_1/mean_float_1.mod.py [new file with mode: 0644]
runtimes/tests/neural_networks_test/specs/V1_1/mean_float_2.mod.py [new file with mode: 0644]
runtimes/tests/neural_networks_test/specs/V1_1/mean_quant8_1.mod.py [new file with mode: 0644]
runtimes/tests/neural_networks_test/specs/V1_1/mean_quant8_2.mod.py [new file with mode: 0644]
runtimes/tests/neural_networks_test/specs/V1_1/pad.mod.py [new file with mode: 0644]
runtimes/tests/neural_networks_test/specs/V1_1/pad_float_1.mod.py [new file with mode: 0644]
runtimes/tests/neural_networks_test/specs/V1_1/space_to_batch.mod.py [new file with mode: 0644]
runtimes/tests/neural_networks_test/specs/V1_1/space_to_batch_float_1.mod.py [new file with mode: 0644]
runtimes/tests/neural_networks_test/specs/V1_1/space_to_batch_float_2.mod.py [new file with mode: 0644]
runtimes/tests/neural_networks_test/specs/V1_1/space_to_batch_float_3.mod.py [new file with mode: 0644]
runtimes/tests/neural_networks_test/specs/V1_1/space_to_batch_quant8_1.mod.py [new file with mode: 0644]
runtimes/tests/neural_networks_test/specs/V1_1/space_to_batch_quant8_2.mod.py [new file with mode: 0644]
runtimes/tests/neural_networks_test/specs/V1_1/space_to_batch_quant8_3.mod.py [new file with mode: 0644]
runtimes/tests/neural_networks_test/specs/V1_1/squeeze.mod.py [new file with mode: 0644]
runtimes/tests/neural_networks_test/specs/V1_1/squeeze_float_1.mod.py [new file with mode: 0644]
runtimes/tests/neural_networks_test/specs/V1_1/squeeze_quant8_1.mod.py [new file with mode: 0644]
runtimes/tests/neural_networks_test/specs/V1_1/strided_slice.mod.py
runtimes/tests/neural_networks_test/specs/V1_1/strided_slice_float_1.mod.py
runtimes/tests/neural_networks_test/specs/V1_1/strided_slice_float_10.mod.py
runtimes/tests/neural_networks_test/specs/V1_1/strided_slice_float_11.mod.py [new file with mode: 0644]
runtimes/tests/neural_networks_test/specs/V1_1/strided_slice_float_2.mod.py
runtimes/tests/neural_networks_test/specs/V1_1/strided_slice_float_3.mod.py
runtimes/tests/neural_networks_test/specs/V1_1/strided_slice_float_4.mod.py
runtimes/tests/neural_networks_test/specs/V1_1/strided_slice_float_5.mod.py
runtimes/tests/neural_networks_test/specs/V1_1/strided_slice_float_6.mod.py
runtimes/tests/neural_networks_test/specs/V1_1/strided_slice_float_7.mod.py
runtimes/tests/neural_networks_test/specs/V1_1/strided_slice_float_8.mod.py
runtimes/tests/neural_networks_test/specs/V1_1/strided_slice_float_9.mod.py
runtimes/tests/neural_networks_test/specs/V1_1/strided_slice_qaunt8_10.mod.py [new file with mode: 0644]
runtimes/tests/neural_networks_test/specs/V1_1/strided_slice_qaunt8_11.mod.py [new file with mode: 0644]
runtimes/tests/neural_networks_test/specs/V1_1/strided_slice_quant8_1.mod.py [new file with mode: 0644]
runtimes/tests/neural_networks_test/specs/V1_1/strided_slice_quant8_2.mod.py [new file with mode: 0644]
runtimes/tests/neural_networks_test/specs/V1_1/strided_slice_quant8_3.mod.py [new file with mode: 0644]
runtimes/tests/neural_networks_test/specs/V1_1/strided_slice_quant8_4.mod.py [new file with mode: 0644]
runtimes/tests/neural_networks_test/specs/V1_1/strided_slice_quant8_5.mod.py [new file with mode: 0644]
runtimes/tests/neural_networks_test/specs/V1_1/strided_slice_quant8_6.mod.py [new file with mode: 0644]
runtimes/tests/neural_networks_test/specs/V1_1/strided_slice_quant8_7.mod.py [new file with mode: 0644]
runtimes/tests/neural_networks_test/specs/V1_1/strided_slice_quant8_8.mod.py [new file with mode: 0644]
runtimes/tests/neural_networks_test/specs/V1_1/strided_slice_quant8_9.mod.py [new file with mode: 0644]
runtimes/tests/neural_networks_test/specs/V1_1/sub_broadcast_float.mod.py [new file with mode: 0644]
runtimes/tests/neural_networks_test/specs/V1_1/transpose.mod.py [new file with mode: 0644]
runtimes/tests/neural_networks_test/specs/V1_1/transpose_float_1.mod.py [new file with mode: 0644]
runtimes/tests/neural_networks_test/specs/V1_1/transpose_quant8_1.mod.py [new file with mode: 0644]

index 798d8f1..f20216a 100644 (file)
@@ -855,6 +855,20 @@ TEST_F(GeneratedTests, fully_connected_float_2) {
             fully_connected_float_2::examples);
 }
 
+namespace fully_connected_float_3 {
+std::vector<MixedTypedExample> examples = {
+// Generated fully_connected_float_3 test
+#include "generated/examples/fully_connected_float_3.example.cpp"
+};
+// Generated model constructor
+#include "generated/models/fully_connected_float_3.model.cpp"
+} // namespace fully_connected_float_3
+TEST_F(GeneratedTests, fully_connected_float_3) {
+    execute(fully_connected_float_3::CreateModel,
+            fully_connected_float_3::is_ignored,
+            fully_connected_float_3::examples);
+}
+
 namespace fully_connected_float_large {
 std::vector<MixedTypedExample> examples = {
 // Generated fully_connected_float_large test
@@ -2045,6 +2059,62 @@ TEST_F(GeneratedTests, tanh_) {
             tanh_::examples);
 }
 
+namespace batch_to_space_float_1 {
+std::vector<MixedTypedExample> examples = {
+// Generated batch_to_space_float_1 test
+#include "generated/examples/batch_to_space_float_1.example.cpp"
+};
+// Generated model constructor
+#include "generated/models/batch_to_space_float_1.model.cpp"
+} // namespace batch_to_space_float_1
+TEST_F(GeneratedTests, batch_to_space_float_1) {
+    execute(batch_to_space_float_1::CreateModel,
+            batch_to_space_float_1::is_ignored,
+            batch_to_space_float_1::examples);
+}
+
+namespace batch_to_space {
+std::vector<MixedTypedExample> examples = {
+// Generated batch_to_space test
+#include "generated/examples/batch_to_space.example.cpp"
+};
+// Generated model constructor
+#include "generated/models/batch_to_space.model.cpp"
+} // namespace batch_to_space
+TEST_F(GeneratedTests, batch_to_space) {
+    execute(batch_to_space::CreateModel,
+            batch_to_space::is_ignored,
+            batch_to_space::examples);
+}
+
+namespace batch_to_space_quant8_1 {
+std::vector<MixedTypedExample> examples = {
+// Generated batch_to_space_quant8_1 test
+#include "generated/examples/batch_to_space_quant8_1.example.cpp"
+};
+// Generated model constructor
+#include "generated/models/batch_to_space_quant8_1.model.cpp"
+} // namespace batch_to_space_quant8_1
+TEST_F(GeneratedTests, batch_to_space_quant8_1) {
+    execute(batch_to_space_quant8_1::CreateModel,
+            batch_to_space_quant8_1::is_ignored,
+            batch_to_space_quant8_1::examples);
+}
+
+namespace div_broadcast_float {
+std::vector<MixedTypedExample> examples = {
+// Generated div_broadcast_float test
+#include "generated/examples/div_broadcast_float.example.cpp"
+};
+// Generated model constructor
+#include "generated/models/div_broadcast_float.model.cpp"
+} // namespace div_broadcast_float
+TEST_F(GeneratedTests, div_broadcast_float) {
+    execute(div_broadcast_float::CreateModel,
+            div_broadcast_float::is_ignored,
+            div_broadcast_float::examples);
+}
+
 namespace div_ {
 std::vector<MixedTypedExample> examples = {
 // Generated div_ test
@@ -2059,6 +2129,258 @@ TEST_F(GeneratedTests, div_) {
             div_::examples);
 }
 
+namespace fully_connected_float_4d_simple {
+std::vector<MixedTypedExample> examples = {
+// Generated fully_connected_float_4d_simple test
+#include "generated/examples/fully_connected_float_4d_simple.example.cpp"
+};
+// Generated model constructor
+#include "generated/models/fully_connected_float_4d_simple.model.cpp"
+} // namespace fully_connected_float_4d_simple
+TEST_F(GeneratedTests, fully_connected_float_4d_simple) {
+    execute(fully_connected_float_4d_simple::CreateModel,
+            fully_connected_float_4d_simple::is_ignored,
+            fully_connected_float_4d_simple::examples);
+}
+
+namespace mean_float_1 {
+std::vector<MixedTypedExample> examples = {
+// Generated mean_float_1 test
+#include "generated/examples/mean_float_1.example.cpp"
+};
+// Generated model constructor
+#include "generated/models/mean_float_1.model.cpp"
+} // namespace mean_float_1
+TEST_F(GeneratedTests, mean_float_1) {
+    execute(mean_float_1::CreateModel,
+            mean_float_1::is_ignored,
+            mean_float_1::examples);
+}
+
+namespace mean_float_2 {
+std::vector<MixedTypedExample> examples = {
+// Generated mean_float_2 test
+#include "generated/examples/mean_float_2.example.cpp"
+};
+// Generated model constructor
+#include "generated/models/mean_float_2.model.cpp"
+} // namespace mean_float_2
+TEST_F(GeneratedTests, mean_float_2) {
+    execute(mean_float_2::CreateModel,
+            mean_float_2::is_ignored,
+            mean_float_2::examples);
+}
+
+namespace mean {
+std::vector<MixedTypedExample> examples = {
+// Generated mean test
+#include "generated/examples/mean.example.cpp"
+};
+// Generated model constructor
+#include "generated/models/mean.model.cpp"
+} // namespace mean
+TEST_F(GeneratedTests, mean) {
+    execute(mean::CreateModel,
+            mean::is_ignored,
+            mean::examples);
+}
+
+namespace mean_quant8_1 {
+std::vector<MixedTypedExample> examples = {
+// Generated mean_quant8_1 test
+#include "generated/examples/mean_quant8_1.example.cpp"
+};
+// Generated model constructor
+#include "generated/models/mean_quant8_1.model.cpp"
+} // namespace mean_quant8_1
+TEST_F(GeneratedTests, mean_quant8_1) {
+    execute(mean_quant8_1::CreateModel,
+            mean_quant8_1::is_ignored,
+            mean_quant8_1::examples);
+}
+
+namespace mean_quant8_2 {
+std::vector<MixedTypedExample> examples = {
+// Generated mean_quant8_2 test
+#include "generated/examples/mean_quant8_2.example.cpp"
+};
+// Generated model constructor
+#include "generated/models/mean_quant8_2.model.cpp"
+} // namespace mean_quant8_2
+TEST_F(GeneratedTests, mean_quant8_2) {
+    execute(mean_quant8_2::CreateModel,
+            mean_quant8_2::is_ignored,
+            mean_quant8_2::examples);
+}
+
+namespace pad_float_1 {
+std::vector<MixedTypedExample> examples = {
+// Generated pad_float_1 test
+#include "generated/examples/pad_float_1.example.cpp"
+};
+// Generated model constructor
+#include "generated/models/pad_float_1.model.cpp"
+} // namespace pad_float_1
+TEST_F(GeneratedTests, pad_float_1) {
+    execute(pad_float_1::CreateModel,
+            pad_float_1::is_ignored,
+            pad_float_1::examples);
+}
+
+namespace pad {
+std::vector<MixedTypedExample> examples = {
+// Generated pad test
+#include "generated/examples/pad.example.cpp"
+};
+// Generated model constructor
+#include "generated/models/pad.model.cpp"
+} // namespace pad
+TEST_F(GeneratedTests, pad) {
+    execute(pad::CreateModel,
+            pad::is_ignored,
+            pad::examples);
+}
+
+namespace space_to_batch_float_1 {
+std::vector<MixedTypedExample> examples = {
+// Generated space_to_batch_float_1 test
+#include "generated/examples/space_to_batch_float_1.example.cpp"
+};
+// Generated model constructor
+#include "generated/models/space_to_batch_float_1.model.cpp"
+} // namespace space_to_batch_float_1
+TEST_F(GeneratedTests, space_to_batch_float_1) {
+    execute(space_to_batch_float_1::CreateModel,
+            space_to_batch_float_1::is_ignored,
+            space_to_batch_float_1::examples);
+}
+
+namespace space_to_batch_float_2 {
+std::vector<MixedTypedExample> examples = {
+// Generated space_to_batch_float_2 test
+#include "generated/examples/space_to_batch_float_2.example.cpp"
+};
+// Generated model constructor
+#include "generated/models/space_to_batch_float_2.model.cpp"
+} // namespace space_to_batch_float_2
+TEST_F(GeneratedTests, space_to_batch_float_2) {
+    execute(space_to_batch_float_2::CreateModel,
+            space_to_batch_float_2::is_ignored,
+            space_to_batch_float_2::examples);
+}
+
+namespace space_to_batch_float_3 {
+std::vector<MixedTypedExample> examples = {
+// Generated space_to_batch_float_3 test
+#include "generated/examples/space_to_batch_float_3.example.cpp"
+};
+// Generated model constructor
+#include "generated/models/space_to_batch_float_3.model.cpp"
+} // namespace space_to_batch_float_3
+TEST_F(GeneratedTests, space_to_batch_float_3) {
+    execute(space_to_batch_float_3::CreateModel,
+            space_to_batch_float_3::is_ignored,
+            space_to_batch_float_3::examples);
+}
+
+namespace space_to_batch {
+std::vector<MixedTypedExample> examples = {
+// Generated space_to_batch test
+#include "generated/examples/space_to_batch.example.cpp"
+};
+// Generated model constructor
+#include "generated/models/space_to_batch.model.cpp"
+} // namespace space_to_batch
+TEST_F(GeneratedTests, space_to_batch) {
+    execute(space_to_batch::CreateModel,
+            space_to_batch::is_ignored,
+            space_to_batch::examples);
+}
+
+namespace space_to_batch_quant8_1 {
+std::vector<MixedTypedExample> examples = {
+// Generated space_to_batch_quant8_1 test
+#include "generated/examples/space_to_batch_quant8_1.example.cpp"
+};
+// Generated model constructor
+#include "generated/models/space_to_batch_quant8_1.model.cpp"
+} // namespace space_to_batch_quant8_1
+TEST_F(GeneratedTests, space_to_batch_quant8_1) {
+    execute(space_to_batch_quant8_1::CreateModel,
+            space_to_batch_quant8_1::is_ignored,
+            space_to_batch_quant8_1::examples);
+}
+
+namespace space_to_batch_quant8_2 {
+std::vector<MixedTypedExample> examples = {
+// Generated space_to_batch_quant8_2 test
+#include "generated/examples/space_to_batch_quant8_2.example.cpp"
+};
+// Generated model constructor
+#include "generated/models/space_to_batch_quant8_2.model.cpp"
+} // namespace space_to_batch_quant8_2
+TEST_F(GeneratedTests, space_to_batch_quant8_2) {
+    execute(space_to_batch_quant8_2::CreateModel,
+            space_to_batch_quant8_2::is_ignored,
+            space_to_batch_quant8_2::examples);
+}
+
+namespace space_to_batch_quant8_3 {
+std::vector<MixedTypedExample> examples = {
+// Generated space_to_batch_quant8_3 test
+#include "generated/examples/space_to_batch_quant8_3.example.cpp"
+};
+// Generated model constructor
+#include "generated/models/space_to_batch_quant8_3.model.cpp"
+} // namespace space_to_batch_quant8_3
+TEST_F(GeneratedTests, space_to_batch_quant8_3) {
+    execute(space_to_batch_quant8_3::CreateModel,
+            space_to_batch_quant8_3::is_ignored,
+            space_to_batch_quant8_3::examples);
+}
+
+namespace squeeze_float_1 {
+std::vector<MixedTypedExample> examples = {
+// Generated squeeze_float_1 test
+#include "generated/examples/squeeze_float_1.example.cpp"
+};
+// Generated model constructor
+#include "generated/models/squeeze_float_1.model.cpp"
+} // namespace squeeze_float_1
+TEST_F(GeneratedTests, squeeze_float_1) {
+    execute(squeeze_float_1::CreateModel,
+            squeeze_float_1::is_ignored,
+            squeeze_float_1::examples);
+}
+
+namespace squeeze {
+std::vector<MixedTypedExample> examples = {
+// Generated squeeze test
+#include "generated/examples/squeeze.example.cpp"
+};
+// Generated model constructor
+#include "generated/models/squeeze.model.cpp"
+} // namespace squeeze
+TEST_F(GeneratedTests, squeeze) {
+    execute(squeeze::CreateModel,
+            squeeze::is_ignored,
+            squeeze::examples);
+}
+
+namespace squeeze_quant8_1 {
+std::vector<MixedTypedExample> examples = {
+// Generated squeeze_quant8_1 test
+#include "generated/examples/squeeze_quant8_1.example.cpp"
+};
+// Generated model constructor
+#include "generated/models/squeeze_quant8_1.model.cpp"
+} // namespace squeeze_quant8_1
+TEST_F(GeneratedTests, squeeze_quant8_1) {
+    execute(squeeze_quant8_1::CreateModel,
+            squeeze_quant8_1::is_ignored,
+            squeeze_quant8_1::examples);
+}
+
 namespace strided_slice_float_10 {
 std::vector<MixedTypedExample> examples = {
 // Generated strided_slice_float_10 test
@@ -2073,6 +2395,20 @@ TEST_F(GeneratedTests, strided_slice_float_10) {
             strided_slice_float_10::examples);
 }
 
+namespace strided_slice_float_11 {
+std::vector<MixedTypedExample> examples = {
+// Generated strided_slice_float_11 test
+#include "generated/examples/strided_slice_float_11.example.cpp"
+};
+// Generated model constructor
+#include "generated/models/strided_slice_float_11.model.cpp"
+} // namespace strided_slice_float_11
+TEST_F(GeneratedTests, strided_slice_float_11) {
+    execute(strided_slice_float_11::CreateModel,
+            strided_slice_float_11::is_ignored,
+            strided_slice_float_11::examples);
+}
+
 namespace strided_slice_float_1 {
 std::vector<MixedTypedExample> examples = {
 // Generated strided_slice_float_1 test
@@ -2213,6 +2549,174 @@ TEST_F(GeneratedTests, strided_slice) {
             strided_slice::examples);
 }
 
+namespace strided_slice_qaunt8_10 {
+std::vector<MixedTypedExample> examples = {
+// Generated strided_slice_qaunt8_10 test
+#include "generated/examples/strided_slice_qaunt8_10.example.cpp"
+};
+// Generated model constructor
+#include "generated/models/strided_slice_qaunt8_10.model.cpp"
+} // namespace strided_slice_qaunt8_10
+TEST_F(GeneratedTests, strided_slice_qaunt8_10) {
+    execute(strided_slice_qaunt8_10::CreateModel,
+            strided_slice_qaunt8_10::is_ignored,
+            strided_slice_qaunt8_10::examples);
+}
+
+namespace strided_slice_qaunt8_11 {
+std::vector<MixedTypedExample> examples = {
+// Generated strided_slice_qaunt8_11 test
+#include "generated/examples/strided_slice_qaunt8_11.example.cpp"
+};
+// Generated model constructor
+#include "generated/models/strided_slice_qaunt8_11.model.cpp"
+} // namespace strided_slice_qaunt8_11
+TEST_F(GeneratedTests, strided_slice_qaunt8_11) {
+    execute(strided_slice_qaunt8_11::CreateModel,
+            strided_slice_qaunt8_11::is_ignored,
+            strided_slice_qaunt8_11::examples);
+}
+
+namespace strided_slice_quant8_1 {
+std::vector<MixedTypedExample> examples = {
+// Generated strided_slice_quant8_1 test
+#include "generated/examples/strided_slice_quant8_1.example.cpp"
+};
+// Generated model constructor
+#include "generated/models/strided_slice_quant8_1.model.cpp"
+} // namespace strided_slice_quant8_1
+TEST_F(GeneratedTests, strided_slice_quant8_1) {
+    execute(strided_slice_quant8_1::CreateModel,
+            strided_slice_quant8_1::is_ignored,
+            strided_slice_quant8_1::examples);
+}
+
+namespace strided_slice_quant8_2 {
+std::vector<MixedTypedExample> examples = {
+// Generated strided_slice_quant8_2 test
+#include "generated/examples/strided_slice_quant8_2.example.cpp"
+};
+// Generated model constructor
+#include "generated/models/strided_slice_quant8_2.model.cpp"
+} // namespace strided_slice_quant8_2
+TEST_F(GeneratedTests, strided_slice_quant8_2) {
+    execute(strided_slice_quant8_2::CreateModel,
+            strided_slice_quant8_2::is_ignored,
+            strided_slice_quant8_2::examples);
+}
+
+namespace strided_slice_quant8_3 {
+std::vector<MixedTypedExample> examples = {
+// Generated strided_slice_quant8_3 test
+#include "generated/examples/strided_slice_quant8_3.example.cpp"
+};
+// Generated model constructor
+#include "generated/models/strided_slice_quant8_3.model.cpp"
+} // namespace strided_slice_quant8_3
+TEST_F(GeneratedTests, strided_slice_quant8_3) {
+    execute(strided_slice_quant8_3::CreateModel,
+            strided_slice_quant8_3::is_ignored,
+            strided_slice_quant8_3::examples);
+}
+
+namespace strided_slice_quant8_4 {
+std::vector<MixedTypedExample> examples = {
+// Generated strided_slice_quant8_4 test
+#include "generated/examples/strided_slice_quant8_4.example.cpp"
+};
+// Generated model constructor
+#include "generated/models/strided_slice_quant8_4.model.cpp"
+} // namespace strided_slice_quant8_4
+TEST_F(GeneratedTests, strided_slice_quant8_4) {
+    execute(strided_slice_quant8_4::CreateModel,
+            strided_slice_quant8_4::is_ignored,
+            strided_slice_quant8_4::examples);
+}
+
+namespace strided_slice_quant8_5 {
+std::vector<MixedTypedExample> examples = {
+// Generated strided_slice_quant8_5 test
+#include "generated/examples/strided_slice_quant8_5.example.cpp"
+};
+// Generated model constructor
+#include "generated/models/strided_slice_quant8_5.model.cpp"
+} // namespace strided_slice_quant8_5
+TEST_F(GeneratedTests, strided_slice_quant8_5) {
+    execute(strided_slice_quant8_5::CreateModel,
+            strided_slice_quant8_5::is_ignored,
+            strided_slice_quant8_5::examples);
+}
+
+namespace strided_slice_quant8_6 {
+std::vector<MixedTypedExample> examples = {
+// Generated strided_slice_quant8_6 test
+#include "generated/examples/strided_slice_quant8_6.example.cpp"
+};
+// Generated model constructor
+#include "generated/models/strided_slice_quant8_6.model.cpp"
+} // namespace strided_slice_quant8_6
+TEST_F(GeneratedTests, strided_slice_quant8_6) {
+    execute(strided_slice_quant8_6::CreateModel,
+            strided_slice_quant8_6::is_ignored,
+            strided_slice_quant8_6::examples);
+}
+
+namespace strided_slice_quant8_7 {
+std::vector<MixedTypedExample> examples = {
+// Generated strided_slice_quant8_7 test
+#include "generated/examples/strided_slice_quant8_7.example.cpp"
+};
+// Generated model constructor
+#include "generated/models/strided_slice_quant8_7.model.cpp"
+} // namespace strided_slice_quant8_7
+TEST_F(GeneratedTests, strided_slice_quant8_7) {
+    execute(strided_slice_quant8_7::CreateModel,
+            strided_slice_quant8_7::is_ignored,
+            strided_slice_quant8_7::examples);
+}
+
+namespace strided_slice_quant8_8 {
+std::vector<MixedTypedExample> examples = {
+// Generated strided_slice_quant8_8 test
+#include "generated/examples/strided_slice_quant8_8.example.cpp"
+};
+// Generated model constructor
+#include "generated/models/strided_slice_quant8_8.model.cpp"
+} // namespace strided_slice_quant8_8
+TEST_F(GeneratedTests, strided_slice_quant8_8) {
+    execute(strided_slice_quant8_8::CreateModel,
+            strided_slice_quant8_8::is_ignored,
+            strided_slice_quant8_8::examples);
+}
+
+namespace strided_slice_quant8_9 {
+std::vector<MixedTypedExample> examples = {
+// Generated strided_slice_quant8_9 test
+#include "generated/examples/strided_slice_quant8_9.example.cpp"
+};
+// Generated model constructor
+#include "generated/models/strided_slice_quant8_9.model.cpp"
+} // namespace strided_slice_quant8_9
+TEST_F(GeneratedTests, strided_slice_quant8_9) {
+    execute(strided_slice_quant8_9::CreateModel,
+            strided_slice_quant8_9::is_ignored,
+            strided_slice_quant8_9::examples);
+}
+
+namespace sub_broadcast_float {
+std::vector<MixedTypedExample> examples = {
+// Generated sub_broadcast_float test
+#include "generated/examples/sub_broadcast_float.example.cpp"
+};
+// Generated model constructor
+#include "generated/models/sub_broadcast_float.model.cpp"
+} // namespace sub_broadcast_float
+TEST_F(GeneratedTests, sub_broadcast_float) {
+    execute(sub_broadcast_float::CreateModel,
+            sub_broadcast_float::is_ignored,
+            sub_broadcast_float::examples);
+}
+
 namespace sub {
 std::vector<MixedTypedExample> examples = {
 // Generated sub test
@@ -2227,6 +2731,48 @@ TEST_F(GeneratedTests, sub) {
             sub::examples);
 }
 
+namespace transpose_float_1 {
+std::vector<MixedTypedExample> examples = {
+// Generated transpose_float_1 test
+#include "generated/examples/transpose_float_1.example.cpp"
+};
+// Generated model constructor
+#include "generated/models/transpose_float_1.model.cpp"
+} // namespace transpose_float_1
+TEST_F(GeneratedTests, transpose_float_1) {
+    execute(transpose_float_1::CreateModel,
+            transpose_float_1::is_ignored,
+            transpose_float_1::examples);
+}
+
+namespace transpose {
+std::vector<MixedTypedExample> examples = {
+// Generated transpose test
+#include "generated/examples/transpose.example.cpp"
+};
+// Generated model constructor
+#include "generated/models/transpose.model.cpp"
+} // namespace transpose
+TEST_F(GeneratedTests, transpose) {
+    execute(transpose::CreateModel,
+            transpose::is_ignored,
+            transpose::examples);
+}
+
+namespace transpose_quant8_1 {
+std::vector<MixedTypedExample> examples = {
+// Generated transpose_quant8_1 test
+#include "generated/examples/transpose_quant8_1.example.cpp"
+};
+// Generated model constructor
+#include "generated/models/transpose_quant8_1.model.cpp"
+} // namespace transpose_quant8_1
+TEST_F(GeneratedTests, transpose_quant8_1) {
+    execute(transpose_quant8_1::CreateModel,
+            transpose_quant8_1::is_ignored,
+            transpose_quant8_1::examples);
+}
+
 namespace cast_ex_float32_to_int32 {
 std::vector<MixedTypedExample> examples = {
 // Generated cast_ex_float32_to_int32 test
@@ -2479,20 +3025,6 @@ TEST_F(GeneratedTests, strided_slice_ex_float_9) {
             strided_slice_ex_float_9::examples);
 }
 
-namespace sub_broadcast_float {
-std::vector<MixedTypedExample> examples = {
-// Generated sub_broadcast_float test
-#include "generated/examples/sub_broadcast_float.example.cpp"
-};
-// Generated model constructor
-#include "generated/models/sub_broadcast_float.model.cpp"
-} // namespace sub_broadcast_float
-TEST_F(GeneratedTests, sub_broadcast_float) {
-    execute(sub_broadcast_float::CreateModel,
-            sub_broadcast_float::is_ignored,
-            sub_broadcast_float::examples);
-}
-
 namespace tensorflowmax_ex_2D_float {
 std::vector<MixedTypedExample> examples = {
 // Generated tensorflowmax_ex_2D_float test
diff --git a/runtimes/tests/neural_networks_test/generated/examples/batch_to_space.example.cpp b/runtimes/tests/neural_networks_test/generated/examples/batch_to_space.example.cpp
new file mode 100644 (file)
index 0000000..1113262
--- /dev/null
@@ -0,0 +1,22 @@
+// Generated file (from: batch_to_space.mod.py). Do not edit
+// Begin of an example
+{
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {{0, {1.4f, 2.3f, 3.2f, 4.1f, 5.4f, 6.3f, 7.2f, 8.1f}}},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {}
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {{0, {1.4f, 2.3f, 3.2f, 4.1f, 5.4f, 6.3f, 7.2f, 8.1f}}},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {}
+}
+}, // End of an example
diff --git a/runtimes/tests/neural_networks_test/generated/examples/batch_to_space_float_1.example.cpp b/runtimes/tests/neural_networks_test/generated/examples/batch_to_space_float_1.example.cpp
new file mode 100644 (file)
index 0000000..45a7580
--- /dev/null
@@ -0,0 +1,22 @@
+// Generated file (from: batch_to_space_float_1.mod.py). Do not edit
+// Begin of an example
+{
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {{0, {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16}}},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {}
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {{0, {1, 5, 2, 6, 9, 13, 10, 14, 3, 7, 4, 8, 11, 15, 12, 16}}},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {}
+}
+}, // End of an example
diff --git a/runtimes/tests/neural_networks_test/generated/examples/batch_to_space_quant8_1.example.cpp b/runtimes/tests/neural_networks_test/generated/examples/batch_to_space_quant8_1.example.cpp
new file mode 100644 (file)
index 0000000..6d1d6f7
--- /dev/null
@@ -0,0 +1,22 @@
+// Generated file (from: batch_to_space_quant8_1.mod.py). Do not edit
+// Begin of an example
+{
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {{0, {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16}}}
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {{0, {1, 5, 2, 6, 9, 13, 10, 14, 3, 7, 4, 8, 11, 15, 12, 16}}}
+}
+}, // End of an example
diff --git a/runtimes/tests/neural_networks_test/generated/examples/div.example.cpp b/runtimes/tests/neural_networks_test/generated/examples/div.example.cpp
new file mode 100644 (file)
index 0000000..23ad4b0
--- /dev/null
@@ -0,0 +1,22 @@
+// Generated file (from: div.mod.py). Do not edit
+// Begin of an example
+{
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {{0, {2.0f, -4.0f, 8.0f, -16.0f}}, {1, {2.0f, -2.0f, -4.0f, 4.0f}}},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {}
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {{0, {1.0f, 2.0f, -2.0f, -4.0f}}},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {}
+}
+}, // End of an example
diff --git a/runtimes/tests/neural_networks_test/generated/examples/div_broadcast_float.example.cpp b/runtimes/tests/neural_networks_test/generated/examples/div_broadcast_float.example.cpp
new file mode 100644 (file)
index 0000000..ccbb571
--- /dev/null
@@ -0,0 +1,22 @@
+// Generated file (from: div_broadcast_float.mod.py). Do not edit
+// Begin of an example
+{
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {{0, {1, 2}}, {1, {1, 1, 2, 2}}},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {}
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {{0, {1, 2, 0.5f, 1}}},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {}
+}
+}, // End of an example
diff --git a/runtimes/tests/neural_networks_test/generated/examples/floor.example.cpp b/runtimes/tests/neural_networks_test/generated/examples/floor.example.cpp
new file mode 100644 (file)
index 0000000..f15d984
--- /dev/null
@@ -0,0 +1,22 @@
+// Generated file (from: floor.mod.py). Do not edit
+// Begin of an example
+{
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {{0, {-1.5f, -1.0f, -0.5f, 0.0f, 0.5f, 1.0f, 1.5f, 10.2f}}},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {}
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {{0, {-2.0f, -1.0f, -1.0f, 0.0f, 0.0f, 1.0f, 1.0f, 10}}},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {}
+}
+}, // End of an example
diff --git a/runtimes/tests/neural_networks_test/generated/examples/fully_connected_float_3.example.cpp b/runtimes/tests/neural_networks_test/generated/examples/fully_connected_float_3.example.cpp
new file mode 100644 (file)
index 0000000..14ee46d
--- /dev/null
@@ -0,0 +1,22 @@
+// Generated file (from: fully_connected_float_3.mod.py). Do not edit
+// Begin of an example
+{
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {{0, {1, 2, 2, 1}}},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {}
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {{0, {11, 9}}},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {}
+}
+}, // End of an example
diff --git a/runtimes/tests/neural_networks_test/generated/examples/fully_connected_float_4d_simple.example.cpp b/runtimes/tests/neural_networks_test/generated/examples/fully_connected_float_4d_simple.example.cpp
new file mode 100644 (file)
index 0000000..4086bc5
--- /dev/null
@@ -0,0 +1,22 @@
+// Generated file (from: fully_connected_float_4d_simple.mod.py). Do not edit
+// Begin of an example
+{
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {{0, {1, 2, 3, 4, 5, 6, 7, 8, -9, -10, 1, 2, 3, 4, 5, 6, 7, -8, 9, -10}}},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {}
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {{0, {24, 25, 26, 58, 59, 60}}},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {}
+}
+}, // End of an example
diff --git a/runtimes/tests/neural_networks_test/generated/examples/mean.example.cpp b/runtimes/tests/neural_networks_test/generated/examples/mean.example.cpp
new file mode 100644 (file)
index 0000000..27b8258
--- /dev/null
@@ -0,0 +1,22 @@
+// Generated file (from: mean.mod.py). Do not edit
+// Begin of an example
+{
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {{0, {1.0f, 2.0f, 3.0f, 4.0f}}},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {}
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {{0, {1.5f, 3.5f}}},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {}
+}
+}, // End of an example
diff --git a/runtimes/tests/neural_networks_test/generated/examples/mean_float_1.example.cpp b/runtimes/tests/neural_networks_test/generated/examples/mean_float_1.example.cpp
new file mode 100644 (file)
index 0000000..4c53d77
--- /dev/null
@@ -0,0 +1,22 @@
+// Generated file (from: mean_float_1.mod.py). Do not edit
+// Begin of an example
+{
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {{0, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f, 13.0f, 14.0f, 15.0f, 16.0f, 17.0f, 18.0f, 19.0f, 20.0f, 21.0f, 22.0f, 23.0f, 24.0f}}},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {}
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {{0, {12.0f, 13.0f}}},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {}
+}
+}, // End of an example
diff --git a/runtimes/tests/neural_networks_test/generated/examples/mean_float_2.example.cpp b/runtimes/tests/neural_networks_test/generated/examples/mean_float_2.example.cpp
new file mode 100644 (file)
index 0000000..844dd2a
--- /dev/null
@@ -0,0 +1,22 @@
+// Generated file (from: mean_float_2.mod.py). Do not edit
+// Begin of an example
+{
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {{0, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f, 13.0f, 14.0f, 15.0f, 16.0f, 17.0f, 18.0f, 19.0f, 20.0f, 21.0f, 22.0f, 23.0f, 24.0f}}},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {}
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {{0, {10.5f, 12.5f, 14.5f}}},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {}
+}
+}, // End of an example
diff --git a/runtimes/tests/neural_networks_test/generated/examples/mean_quant8_1.example.cpp b/runtimes/tests/neural_networks_test/generated/examples/mean_quant8_1.example.cpp
new file mode 100644 (file)
index 0000000..652c847
--- /dev/null
@@ -0,0 +1,22 @@
+// Generated file (from: mean_quant8_1.mod.py). Do not edit
+// Begin of an example
+{
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {{0, {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24}}}
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {{0, {12, 13}}}
+}
+}, // End of an example
diff --git a/runtimes/tests/neural_networks_test/generated/examples/mean_quant8_2.example.cpp b/runtimes/tests/neural_networks_test/generated/examples/mean_quant8_2.example.cpp
new file mode 100644 (file)
index 0000000..56dec24
--- /dev/null
@@ -0,0 +1,22 @@
+// Generated file (from: mean_quant8_2.mod.py). Do not edit
+// Begin of an example
+{
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {{0, {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24}}}
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {{0, {10, 12, 14}}}
+}
+}, // End of an example
diff --git a/runtimes/tests/neural_networks_test/generated/examples/pad.example.cpp b/runtimes/tests/neural_networks_test/generated/examples/pad.example.cpp
new file mode 100644 (file)
index 0000000..dbec433
--- /dev/null
@@ -0,0 +1,22 @@
+// Generated file (from: pad.mod.py). Do not edit
+// Begin of an example
+{
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {{0, {1.0f, 2.0f, 3.0f, 4.0f}}},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {}
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {{0, {0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 1.0f, 2.0f, 0.0f, 0.0f, 3.0f, 4.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}}},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {}
+}
+}, // End of an example
diff --git a/runtimes/tests/neural_networks_test/generated/examples/pad_float_1.example.cpp b/runtimes/tests/neural_networks_test/generated/examples/pad_float_1.example.cpp
new file mode 100644 (file)
index 0000000..873149b
--- /dev/null
@@ -0,0 +1,22 @@
+// Generated file (from: pad_float_1.mod.py). Do not edit
+// Begin of an example
+{
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {{0, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f}}},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {}
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {{0, {0, 1, 2, 3, 0, 0, 0, 0, 4, 5, 6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}}},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {}
+}
+}, // End of an example
diff --git a/runtimes/tests/neural_networks_test/generated/examples/space_to_batch.example.cpp b/runtimes/tests/neural_networks_test/generated/examples/space_to_batch.example.cpp
new file mode 100644 (file)
index 0000000..e226e36
--- /dev/null
@@ -0,0 +1,22 @@
+// Generated file (from: space_to_batch.mod.py). Do not edit
+// Begin of an example
+{
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {{0, {1.4f, 2.3f, 3.2f, 4.1f, 5.4f, 6.3f, 7.2f, 8.1f}}},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {}
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {{0, {1.4f, 2.3f, 3.2f, 4.1f, 5.4f, 6.3f, 7.2f, 8.1f}}},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {}
+}
+}, // End of an example
diff --git a/runtimes/tests/neural_networks_test/generated/examples/space_to_batch_float_1.example.cpp b/runtimes/tests/neural_networks_test/generated/examples/space_to_batch_float_1.example.cpp
new file mode 100644 (file)
index 0000000..06d0ff3
--- /dev/null
@@ -0,0 +1,22 @@
+// Generated file (from: space_to_batch_float_1.mod.py). Do not edit
+// Begin of an example
+{
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {{0, {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16}}},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {}
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {{0, {1, 3, 9, 11, 2, 4, 10, 12, 5, 7, 13, 15, 6, 8, 14, 16}}},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {}
+}
+}, // End of an example
diff --git a/runtimes/tests/neural_networks_test/generated/examples/space_to_batch_float_2.example.cpp b/runtimes/tests/neural_networks_test/generated/examples/space_to_batch_float_2.example.cpp
new file mode 100644 (file)
index 0000000..a7b0010
--- /dev/null
@@ -0,0 +1,22 @@
+// Generated file (from: space_to_batch_float_2.mod.py). Do not edit
+// Begin of an example
+{
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {{0, {1, 2, 3, 4, 5, 6, 7, 8, 9, 10}}},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {}
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {{0, {0, 0, 0, 5, 0, 0, 0, 6, 0, 1, 0, 7, 0, 2, 0, 8, 0, 3, 0, 9, 0, 4, 0, 10}}},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {}
+}
+}, // End of an example
diff --git a/runtimes/tests/neural_networks_test/generated/examples/space_to_batch_float_3.example.cpp b/runtimes/tests/neural_networks_test/generated/examples/space_to_batch_float_3.example.cpp
new file mode 100644 (file)
index 0000000..5198bae
--- /dev/null
@@ -0,0 +1,22 @@
+// Generated file (from: space_to_batch_float_3.mod.py). Do not edit
+// Begin of an example
+{
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {{0, {1, 2, 3, 4, 5, 6, 7, 8}}},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {}
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {{0, {0, 0, 0, 0, 0, 5, 0, 0, 0, 0, 0, 0, 0, 6, 0, 0, 0, 1, 0, 0, 0, 7, 0, 0, 0, 2, 0, 0, 0, 8, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 4, 0, 0, 0, 0, 0, 0}}},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {}
+}
+}, // End of an example
diff --git a/runtimes/tests/neural_networks_test/generated/examples/space_to_batch_quant8_1.example.cpp b/runtimes/tests/neural_networks_test/generated/examples/space_to_batch_quant8_1.example.cpp
new file mode 100644 (file)
index 0000000..1c86710
--- /dev/null
@@ -0,0 +1,22 @@
+// Generated file (from: space_to_batch_quant8_1.mod.py). Do not edit
+// Begin of an example
+{
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {{0, {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16}}}
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {{0, {1, 3, 9, 11, 2, 4, 10, 12, 5, 7, 13, 15, 6, 8, 14, 16}}}
+}
+}, // End of an example
diff --git a/runtimes/tests/neural_networks_test/generated/examples/space_to_batch_quant8_2.example.cpp b/runtimes/tests/neural_networks_test/generated/examples/space_to_batch_quant8_2.example.cpp
new file mode 100644 (file)
index 0000000..4e615d0
--- /dev/null
@@ -0,0 +1,22 @@
+// Generated file (from: space_to_batch_quant8_2.mod.py). Do not edit
+// Begin of an example
+{
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {{0, {1, 2, 3, 4, 5, 6, 7, 8, 9, 10}}}
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {{0, {0, 0, 0, 5, 0, 0, 0, 6, 0, 1, 0, 7, 0, 2, 0, 8, 0, 3, 0, 9, 0, 4, 0, 10}}}
+}
+}, // End of an example
diff --git a/runtimes/tests/neural_networks_test/generated/examples/space_to_batch_quant8_3.example.cpp b/runtimes/tests/neural_networks_test/generated/examples/space_to_batch_quant8_3.example.cpp
new file mode 100644 (file)
index 0000000..13745ac
--- /dev/null
@@ -0,0 +1,22 @@
+// Generated file (from: space_to_batch_quant8_3.mod.py). Do not edit
+// Begin of an example
+{
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {{0, {1, 2, 3, 4, 5, 6, 7, 8}}}
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {{0, {0, 0, 0, 0, 0, 5, 0, 0, 0, 0, 0, 0, 0, 6, 0, 0, 0, 1, 0, 0, 0, 7, 0, 0, 0, 2, 0, 0, 0, 8, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 4, 0, 0, 0, 0, 0, 0}}}
+}
+}, // End of an example
diff --git a/runtimes/tests/neural_networks_test/generated/examples/squeeze.example.cpp b/runtimes/tests/neural_networks_test/generated/examples/squeeze.example.cpp
new file mode 100644 (file)
index 0000000..bcbc54f
--- /dev/null
@@ -0,0 +1,22 @@
+// Generated file (from: squeeze.mod.py). Do not edit
+// Begin of an example
+{
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {{0, {1.4f, 2.3f, 3.2f, 4.1f, 5.4f, 6.3f, 7.2f, 8.1f}}},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {}
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {{0, {1.4f, 2.3f, 3.2f, 4.1f, 5.4f, 6.3f, 7.2f, 8.1f}}},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {}
+}
+}, // End of an example
diff --git a/runtimes/tests/neural_networks_test/generated/examples/squeeze_float_1.example.cpp b/runtimes/tests/neural_networks_test/generated/examples/squeeze_float_1.example.cpp
new file mode 100644 (file)
index 0000000..2616d65
--- /dev/null
@@ -0,0 +1,22 @@
+// Generated file (from: squeeze_float_1.mod.py). Do not edit
+// Begin of an example
+{
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {{0, {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24}}},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {}
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {{0, {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24}}},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {}
+}
+}, // End of an example
diff --git a/runtimes/tests/neural_networks_test/generated/examples/squeeze_quant8_1.example.cpp b/runtimes/tests/neural_networks_test/generated/examples/squeeze_quant8_1.example.cpp
new file mode 100644 (file)
index 0000000..53bb0a8
--- /dev/null
@@ -0,0 +1,22 @@
+// Generated file (from: squeeze_quant8_1.mod.py). Do not edit
+// Begin of an example
+{
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {{0, {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24}}}
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {{0, {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24}}}
+}
+}, // End of an example
diff --git a/runtimes/tests/neural_networks_test/generated/examples/strided_slice_float_11.example.cpp b/runtimes/tests/neural_networks_test/generated/examples/strided_slice_float_11.example.cpp
new file mode 100644 (file)
index 0000000..19bb573
--- /dev/null
@@ -0,0 +1,22 @@
+// Generated file (from: strided_slice_float_11.mod.py). Do not edit
+// Begin of an example
+{
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {{0, {1, 2, 3, 4, 5, 6}}},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {}
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {{0, {1, 2, 3}}},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {}
+}
+}, // End of an example
diff --git a/runtimes/tests/neural_networks_test/generated/examples/strided_slice_qaunt8_10.example.cpp b/runtimes/tests/neural_networks_test/generated/examples/strided_slice_qaunt8_10.example.cpp
new file mode 100644 (file)
index 0000000..091aa06
--- /dev/null
@@ -0,0 +1,22 @@
+// Generated file (from: strided_slice_qaunt8_10.mod.py). Do not edit
+// Begin of an example
+{
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {{0, {1, 2, 3, 4, 5, 6}}}
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {{0, {4, 5, 6}}}
+}
+}, // End of an example
diff --git a/runtimes/tests/neural_networks_test/generated/examples/strided_slice_qaunt8_11.example.cpp b/runtimes/tests/neural_networks_test/generated/examples/strided_slice_qaunt8_11.example.cpp
new file mode 100644 (file)
index 0000000..8e8c23e
--- /dev/null
@@ -0,0 +1,22 @@
+// Generated file (from: strided_slice_qaunt8_11.mod.py). Do not edit
+// Begin of an example
+{
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {{0, {1, 2, 3, 4, 5, 6}}}
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {{0, {1, 2, 3}}}
+}
+}, // End of an example
diff --git a/runtimes/tests/neural_networks_test/generated/examples/strided_slice_quant8_1.example.cpp b/runtimes/tests/neural_networks_test/generated/examples/strided_slice_quant8_1.example.cpp
new file mode 100644 (file)
index 0000000..6eb2d95
--- /dev/null
@@ -0,0 +1,22 @@
+// Generated file (from: strided_slice_quant8_1.mod.py). Do not edit
+// Begin of an example
+{
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {{0, {1, 2, 3, 4}}}
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {{0, {2, 3}}}
+}
+}, // End of an example
diff --git a/runtimes/tests/neural_networks_test/generated/examples/strided_slice_quant8_2.example.cpp b/runtimes/tests/neural_networks_test/generated/examples/strided_slice_quant8_2.example.cpp
new file mode 100644 (file)
index 0000000..481fe2e
--- /dev/null
@@ -0,0 +1,22 @@
+// Generated file (from: strided_slice_quant8_2.mod.py). Do not edit
+// Begin of an example
+{
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {{0, {1, 2, 3, 4}}}
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {{0, {2, 3}}}
+}
+}, // End of an example
diff --git a/runtimes/tests/neural_networks_test/generated/examples/strided_slice_quant8_3.example.cpp b/runtimes/tests/neural_networks_test/generated/examples/strided_slice_quant8_3.example.cpp
new file mode 100644 (file)
index 0000000..a68e882
--- /dev/null
@@ -0,0 +1,22 @@
+// Generated file (from: strided_slice_quant8_3.mod.py). Do not edit
+// Begin of an example
+{
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {{0, {1, 2, 3, 4}}}
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {{0, {1, 2, 3}}}
+}
+}, // End of an example
diff --git a/runtimes/tests/neural_networks_test/generated/examples/strided_slice_quant8_4.example.cpp b/runtimes/tests/neural_networks_test/generated/examples/strided_slice_quant8_4.example.cpp
new file mode 100644 (file)
index 0000000..aa486a4
--- /dev/null
@@ -0,0 +1,22 @@
+// Generated file (from: strided_slice_quant8_4.mod.py). Do not edit
+// Begin of an example
+{
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {{0, {1, 2, 3, 4}}}
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {{0, {2}}}
+}
+}, // End of an example
diff --git a/runtimes/tests/neural_networks_test/generated/examples/strided_slice_quant8_5.example.cpp b/runtimes/tests/neural_networks_test/generated/examples/strided_slice_quant8_5.example.cpp
new file mode 100644 (file)
index 0000000..db84580
--- /dev/null
@@ -0,0 +1,22 @@
+// Generated file (from: strided_slice_quant8_5.mod.py). Do not edit
+// Begin of an example
+{
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {{0, {1, 2, 3, 4}}}
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {{0, {1, 2, 3}}}
+}
+}, // End of an example
diff --git a/runtimes/tests/neural_networks_test/generated/examples/strided_slice_quant8_6.example.cpp b/runtimes/tests/neural_networks_test/generated/examples/strided_slice_quant8_6.example.cpp
new file mode 100644 (file)
index 0000000..232e8c4
--- /dev/null
@@ -0,0 +1,22 @@
+// Generated file (from: strided_slice_quant8_6.mod.py). Do not edit
+// Begin of an example
+{
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {{0, {1, 2, 3, 4}}}
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {{0, {2, 3, 4}}}
+}
+}, // End of an example
diff --git a/runtimes/tests/neural_networks_test/generated/examples/strided_slice_quant8_7.example.cpp b/runtimes/tests/neural_networks_test/generated/examples/strided_slice_quant8_7.example.cpp
new file mode 100644 (file)
index 0000000..86f32fe
--- /dev/null
@@ -0,0 +1,22 @@
+// Generated file (from: strided_slice_quant8_7.mod.py). Do not edit
+// Begin of an example
+{
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {{0, {1, 2, 3}}}
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {{0, {3, 2, 1}}}
+}
+}, // End of an example
diff --git a/runtimes/tests/neural_networks_test/generated/examples/strided_slice_quant8_8.example.cpp b/runtimes/tests/neural_networks_test/generated/examples/strided_slice_quant8_8.example.cpp
new file mode 100644 (file)
index 0000000..fe5026d
--- /dev/null
@@ -0,0 +1,22 @@
+// Generated file (from: strided_slice_quant8_8.mod.py). Do not edit
+// Begin of an example
+{
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {{0, {1, 2, 3, 4, 5, 6}}}
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {{0, {6, 5, 4}}}
+}
+}, // End of an example
diff --git a/runtimes/tests/neural_networks_test/generated/examples/strided_slice_quant8_9.example.cpp b/runtimes/tests/neural_networks_test/generated/examples/strided_slice_quant8_9.example.cpp
new file mode 100644 (file)
index 0000000..dd590e4
--- /dev/null
@@ -0,0 +1,22 @@
+// Generated file (from: strided_slice_quant8_9.mod.py). Do not edit
+// Begin of an example
+{
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {{0, {1, 2, 3, 4, 5, 6}}}
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {{0, {1, 2, 4, 5}}}
+}
+}, // End of an example
index f11c66d..4888c76 100644 (file)
@@ -4,7 +4,7 @@
 //Input(s)
 { // See tools/test_generator/include/TestHarness.h:MixedTyped
   // int -> FLOAT32 map
-  {{0, {1, 2, 3, 4}}, {1, {2}}},
+  {{0, {1, 2}}, {1, {1, 2, 3, 4}}},
   // int -> INT32 map
   {},
   // int -> QUANT8_ASYMM map
@@ -13,7 +13,7 @@
 //Output(s)
 { // See tools/test_generator/include/TestHarness.h:MixedTyped
   // int -> FLOAT32 map
-  {{0, {-1, 0, 1, 2}}},
+  {{0, {0, 0, -2, -2}}},
   // int -> INT32 map
   {},
   // int -> QUANT8_ASYMM map
diff --git a/runtimes/tests/neural_networks_test/generated/examples/tanh.example.cpp b/runtimes/tests/neural_networks_test/generated/examples/tanh.example.cpp
new file mode 100644 (file)
index 0000000..5c12ca0
--- /dev/null
@@ -0,0 +1,22 @@
+// Generated file (from: tanh.mod.py). Do not edit
+// Begin of an example
+{
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {{0, {-1, 0, 1, 10}}},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {}
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {{0, {-0.761594156f, 0, 0.761594156f, 0.999999996f}}},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {}
+}
+}, // End of an example
diff --git a/runtimes/tests/neural_networks_test/generated/examples/transpose.example.cpp b/runtimes/tests/neural_networks_test/generated/examples/transpose.example.cpp
new file mode 100644 (file)
index 0000000..790923c
--- /dev/null
@@ -0,0 +1,22 @@
+// Generated file (from: transpose.mod.py). Do not edit
+// Begin of an example
+{
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {{0, {1.0f, 2.0f, 3.0f, 4.0f}}},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {}
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {{0, {1.0f, 3.0f, 2.0f, 4.0f}}},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {}
+}
+}, // End of an example
diff --git a/runtimes/tests/neural_networks_test/generated/examples/transpose_float_1.example.cpp b/runtimes/tests/neural_networks_test/generated/examples/transpose_float_1.example.cpp
new file mode 100644 (file)
index 0000000..31f6799
--- /dev/null
@@ -0,0 +1,22 @@
+// Generated file (from: transpose_float_1.mod.py). Do not edit
+// Begin of an example
+{
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {{0, {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119}}},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {}
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {{0, {0, 1, 2, 3, 4, 20, 21, 22, 23, 24, 40, 41, 42, 43, 44, 60, 61, 62, 63, 64, 80, 81, 82, 83, 84, 100, 101, 102, 103, 104, 5, 6, 7, 8, 9, 25, 26, 27, 28, 29, 45, 46, 47, 48, 49, 65, 66, 67, 68, 69, 85, 86, 87, 88, 89, 105, 106, 107, 108, 109, 10, 11, 12, 13, 14, 30, 31, 32, 33, 34, 50, 51, 52, 53, 54, 70, 71, 72, 73, 74, 90, 91, 92, 93, 94, 110, 111, 112, 113, 114, 15, 16, 17, 18, 19, 35, 36, 37, 38, 39, 55, 56, 57, 58, 59, 75, 76, 77, 78, 79, 95, 96, 97, 98, 99, 115, 116, 117, 118, 119}}},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {}
+}
+}, // End of an example
diff --git a/runtimes/tests/neural_networks_test/generated/examples/transpose_quant8_1.example.cpp b/runtimes/tests/neural_networks_test/generated/examples/transpose_quant8_1.example.cpp
new file mode 100644 (file)
index 0000000..f1bb2fa
--- /dev/null
@@ -0,0 +1,22 @@
+// Generated file (from: transpose_quant8_1.mod.py). Do not edit
+// Begin of an example
+{
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {{0, {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119}}}
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+  // int -> FLOAT32 map
+  {},
+  // int -> INT32 map
+  {},
+  // int -> QUANT8_ASYMM map
+  {{0, {0, 1, 2, 3, 4, 20, 21, 22, 23, 24, 40, 41, 42, 43, 44, 60, 61, 62, 63, 64, 80, 81, 82, 83, 84, 100, 101, 102, 103, 104, 5, 6, 7, 8, 9, 25, 26, 27, 28, 29, 45, 46, 47, 48, 49, 65, 66, 67, 68, 69, 85, 86, 87, 88, 89, 105, 106, 107, 108, 109, 10, 11, 12, 13, 14, 30, 31, 32, 33, 34, 50, 51, 52, 53, 54, 70, 71, 72, 73, 74, 90, 91, 92, 93, 94, 110, 111, 112, 113, 114, 15, 16, 17, 18, 19, 35, 36, 37, 38, 39, 55, 56, 57, 58, 59, 75, 76, 77, 78, 79, 95, 96, 97, 98, 99, 115, 116, 117, 118, 119}}}
+}
+}, // End of an example
diff --git a/runtimes/tests/neural_networks_test/generated/models/batch_to_space.model.cpp b/runtimes/tests/neural_networks_test/generated/models/batch_to_space.model.cpp
new file mode 100644 (file)
index 0000000..6c6d590
--- /dev/null
@@ -0,0 +1,24 @@
+// Generated file (from: batch_to_space.mod.py). Do not edit
+void CreateModel(Model *model) {
+  OperandType type2(Type::TENSOR_FLOAT32, {1, 2, 2, 2});
+  OperandType type0(Type::TENSOR_FLOAT32, {4, 1, 1, 2});
+  OperandType type1(Type::TENSOR_INT32, {2});
+  // Phase 1, operands
+  auto input = model->addOperand(&type0);
+  auto block_size = model->addOperand(&type1);
+  auto output = model->addOperand(&type2);
+  // Phase 2, operations
+  static int32_t block_size_init[] = {2, 2};
+  model->setOperandValue(block_size, block_size_init, sizeof(int32_t) * 2);
+  model->addOperation(ANEURALNETWORKS_BATCH_TO_SPACE_ND, {input, block_size}, {output});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {input},
+    {output});
+  assert(model->isValid());
+}
+
+bool is_ignored(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
diff --git a/runtimes/tests/neural_networks_test/generated/models/batch_to_space_float_1.model.cpp b/runtimes/tests/neural_networks_test/generated/models/batch_to_space_float_1.model.cpp
new file mode 100644 (file)
index 0000000..e074783
--- /dev/null
@@ -0,0 +1,24 @@
+// Generated file (from: batch_to_space_float_1.mod.py). Do not edit
+void CreateModel(Model *model) {
+  OperandType type2(Type::TENSOR_FLOAT32, {1, 4, 4, 1});
+  OperandType type0(Type::TENSOR_FLOAT32, {4, 2, 2, 1});
+  OperandType type1(Type::TENSOR_INT32, {2});
+  // Phase 1, operands
+  auto input = model->addOperand(&type0);
+  auto block_size = model->addOperand(&type1);
+  auto output = model->addOperand(&type2);
+  // Phase 2, operations
+  static int32_t block_size_init[] = {2, 2};
+  model->setOperandValue(block_size, block_size_init, sizeof(int32_t) * 2);
+  model->addOperation(ANEURALNETWORKS_BATCH_TO_SPACE_ND, {input, block_size}, {output});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {input},
+    {output});
+  assert(model->isValid());
+}
+
+bool is_ignored(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
diff --git a/runtimes/tests/neural_networks_test/generated/models/batch_to_space_quant8_1.model.cpp b/runtimes/tests/neural_networks_test/generated/models/batch_to_space_quant8_1.model.cpp
new file mode 100644 (file)
index 0000000..8922740
--- /dev/null
@@ -0,0 +1,24 @@
+// Generated file (from: batch_to_space_quant8_1.mod.py). Do not edit
+void CreateModel(Model *model) {
+  OperandType type1(Type::TENSOR_INT32, {2});
+  OperandType type2(Type::TENSOR_QUANT8_ASYMM, {1, 4, 4, 1}, 1.0, 0);
+  OperandType type0(Type::TENSOR_QUANT8_ASYMM, {4, 2, 2, 1}, 1.0, 0);
+  // Phase 1, operands
+  auto input = model->addOperand(&type0);
+  auto block_size = model->addOperand(&type1);
+  auto output = model->addOperand(&type2);
+  // Phase 2, operations
+  static int32_t block_size_init[] = {2, 2};
+  model->setOperandValue(block_size, block_size_init, sizeof(int32_t) * 2);
+  model->addOperation(ANEURALNETWORKS_BATCH_TO_SPACE_ND, {input, block_size}, {output});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {input},
+    {output});
+  assert(model->isValid());
+}
+
+bool is_ignored(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
diff --git a/runtimes/tests/neural_networks_test/generated/models/div.model.cpp b/runtimes/tests/neural_networks_test/generated/models/div.model.cpp
new file mode 100644 (file)
index 0000000..31213de
--- /dev/null
@@ -0,0 +1,24 @@
+// Generated file (from: div.mod.py). Do not edit
+void CreateModel(Model *model) {
+  OperandType type1(Type::INT32, {});
+  OperandType type0(Type::TENSOR_FLOAT32, {1, 2, 2, 1});
+  // Phase 1, operands
+  auto op1 = model->addOperand(&type0);
+  auto op2 = model->addOperand(&type0);
+  auto act = model->addOperand(&type1);
+  auto op3 = model->addOperand(&type0);
+  // Phase 2, operations
+  static int32_t act_init[] = {0};
+  model->setOperandValue(act, act_init, sizeof(int32_t) * 1);
+  model->addOperation(ANEURALNETWORKS_DIV, {op1, op2, act}, {op3});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {op1, op2},
+    {op3});
+  assert(model->isValid());
+}
+
+bool is_ignored(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
diff --git a/runtimes/tests/neural_networks_test/generated/models/div_broadcast_float.model.cpp b/runtimes/tests/neural_networks_test/generated/models/div_broadcast_float.model.cpp
new file mode 100644 (file)
index 0000000..e6f442d
--- /dev/null
@@ -0,0 +1,25 @@
+// Generated file (from: div_broadcast_float.mod.py). Do not edit
+void CreateModel(Model *model) {
+  OperandType type2(Type::INT32, {});
+  OperandType type0(Type::TENSOR_FLOAT32, {1, 2});
+  OperandType type1(Type::TENSOR_FLOAT32, {2, 2});
+  // Phase 1, operands
+  auto op1 = model->addOperand(&type0);
+  auto op2 = model->addOperand(&type1);
+  auto act = model->addOperand(&type2);
+  auto op3 = model->addOperand(&type1);
+  // Phase 2, operations
+  static int32_t act_init[] = {0};
+  model->setOperandValue(act, act_init, sizeof(int32_t) * 1);
+  model->addOperation(ANEURALNETWORKS_DIV, {op1, op2, act}, {op3});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {op1, op2},
+    {op3});
+  assert(model->isValid());
+}
+
+bool is_ignored(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
diff --git a/runtimes/tests/neural_networks_test/generated/models/floor.model.cpp b/runtimes/tests/neural_networks_test/generated/models/floor.model.cpp
new file mode 100644 (file)
index 0000000..2425f47
--- /dev/null
@@ -0,0 +1,19 @@
+// Generated file (from: floor.mod.py). Do not edit
+void CreateModel(Model *model) {
+  OperandType type0(Type::TENSOR_FLOAT32, {1, 2, 2, 2});
+  // Phase 1, operands
+  auto op1 = model->addOperand(&type0);
+  auto op2 = model->addOperand(&type0);
+  // Phase 2, operations
+  model->addOperation(ANEURALNETWORKS_FLOOR, {op1}, {op2});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {op1},
+    {op2});
+  assert(model->isValid());
+}
+
+bool is_ignored(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
diff --git a/runtimes/tests/neural_networks_test/generated/models/fully_connected_float_3.model.cpp b/runtimes/tests/neural_networks_test/generated/models/fully_connected_float_3.model.cpp
new file mode 100644 (file)
index 0000000..1527525
--- /dev/null
@@ -0,0 +1,32 @@
+// Generated file (from: fully_connected_float_3.mod.py). Do not edit
+void CreateModel(Model *model) {
+  OperandType type4(Type::INT32, {});
+  OperandType type1(Type::TENSOR_FLOAT32, {1, 2});
+  OperandType type2(Type::TENSOR_FLOAT32, {1});
+  OperandType type3(Type::TENSOR_FLOAT32, {2, 1});
+  OperandType type0(Type::TENSOR_FLOAT32, {2, 2});
+  // Phase 1, operands
+  auto op1 = model->addOperand(&type0);
+  auto op2 = model->addOperand(&type1);
+  auto b0 = model->addOperand(&type2);
+  auto op3 = model->addOperand(&type3);
+  auto act = model->addOperand(&type4);
+  // Phase 2, operations
+  static float op2_init[] = {2.0f, 4.0f};
+  model->setOperandValue(op2, op2_init, sizeof(float) * 2);
+  static float b0_init[] = {1.0f};
+  model->setOperandValue(b0, b0_init, sizeof(float) * 1);
+  static int32_t act_init[] = {0};
+  model->setOperandValue(act, act_init, sizeof(int32_t) * 1);
+  model->addOperation(ANEURALNETWORKS_FULLY_CONNECTED, {op1, op2, b0, act}, {op3});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {op1},
+    {op3});
+  assert(model->isValid());
+}
+
+bool is_ignored(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
diff --git a/runtimes/tests/neural_networks_test/generated/models/fully_connected_float_4d_simple.model.cpp b/runtimes/tests/neural_networks_test/generated/models/fully_connected_float_4d_simple.model.cpp
new file mode 100644 (file)
index 0000000..aa645d9
--- /dev/null
@@ -0,0 +1,32 @@
+// Generated file (from: fully_connected_float_4d_simple.mod.py). Do not edit
+void CreateModel(Model *model) {
+  OperandType type4(Type::INT32, {});
+  OperandType type3(Type::TENSOR_FLOAT32, {2, 3});
+  OperandType type1(Type::TENSOR_FLOAT32, {3, 10});
+  OperandType type2(Type::TENSOR_FLOAT32, {3});
+  OperandType type0(Type::TENSOR_FLOAT32, {4, 1, 5, 1});
+  // Phase 1, operands
+  auto op1 = model->addOperand(&type0);
+  auto op2 = model->addOperand(&type1);
+  auto b0 = model->addOperand(&type2);
+  auto op3 = model->addOperand(&type3);
+  auto act = model->addOperand(&type4);
+  // Phase 2, operations
+  static float op2_init[] = {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f};
+  model->setOperandValue(op2, op2_init, sizeof(float) * 30);
+  static float b0_init[] = {1.0f, 2.0f, 3.0f};
+  model->setOperandValue(b0, b0_init, sizeof(float) * 3);
+  static int32_t act_init[] = {0};
+  model->setOperandValue(act, act_init, sizeof(int32_t) * 1);
+  model->addOperation(ANEURALNETWORKS_FULLY_CONNECTED, {op1, op2, b0, act}, {op3});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {op1},
+    {op3});
+  assert(model->isValid());
+}
+
+bool is_ignored(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
diff --git a/runtimes/tests/neural_networks_test/generated/models/mean.model.cpp b/runtimes/tests/neural_networks_test/generated/models/mean.model.cpp
new file mode 100644 (file)
index 0000000..7d26f9f
--- /dev/null
@@ -0,0 +1,28 @@
+// Generated file (from: mean.mod.py). Do not edit
+void CreateModel(Model *model) {
+  OperandType type2(Type::INT32, {});
+  OperandType type3(Type::TENSOR_FLOAT32, {1, 2, 1});
+  OperandType type0(Type::TENSOR_FLOAT32, {1, 2, 2, 1});
+  OperandType type1(Type::TENSOR_INT32, {1});
+  // Phase 1, operands
+  auto input = model->addOperand(&type0);
+  auto axis = model->addOperand(&type1);
+  auto keepDims = model->addOperand(&type2);
+  auto output = model->addOperand(&type3);
+  // Phase 2, operations
+  static int32_t axis_init[] = {2};
+  model->setOperandValue(axis, axis_init, sizeof(int32_t) * 1);
+  static int32_t keepDims_init[] = {0};
+  model->setOperandValue(keepDims, keepDims_init, sizeof(int32_t) * 1);
+  model->addOperation(ANEURALNETWORKS_MEAN, {input, axis, keepDims}, {output});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {input},
+    {output});
+  assert(model->isValid());
+}
+
+bool is_ignored(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
diff --git a/runtimes/tests/neural_networks_test/generated/models/mean_float_1.model.cpp b/runtimes/tests/neural_networks_test/generated/models/mean_float_1.model.cpp
new file mode 100644 (file)
index 0000000..7a3ce25
--- /dev/null
@@ -0,0 +1,28 @@
+// Generated file (from: mean_float_1.mod.py). Do not edit
+void CreateModel(Model *model) {
+  OperandType type2(Type::INT32, {});
+  OperandType type3(Type::TENSOR_FLOAT32, {2});
+  OperandType type0(Type::TENSOR_FLOAT32, {4, 3, 2});
+  OperandType type1(Type::TENSOR_INT32, {4});
+  // Phase 1, operands
+  auto input = model->addOperand(&type0);
+  auto axis = model->addOperand(&type1);
+  auto keepDims = model->addOperand(&type2);
+  auto output = model->addOperand(&type3);
+  // Phase 2, operations
+  static int32_t axis_init[] = {1, 0, -3, -3};
+  model->setOperandValue(axis, axis_init, sizeof(int32_t) * 4);
+  static int32_t keepDims_init[] = {0};
+  model->setOperandValue(keepDims, keepDims_init, sizeof(int32_t) * 1);
+  model->addOperation(ANEURALNETWORKS_MEAN, {input, axis, keepDims}, {output});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {input},
+    {output});
+  assert(model->isValid());
+}
+
+bool is_ignored(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
diff --git a/runtimes/tests/neural_networks_test/generated/models/mean_float_2.model.cpp b/runtimes/tests/neural_networks_test/generated/models/mean_float_2.model.cpp
new file mode 100644 (file)
index 0000000..9838db4
--- /dev/null
@@ -0,0 +1,28 @@
+// Generated file (from: mean_float_2.mod.py). Do not edit
+void CreateModel(Model *model) {
+  OperandType type2(Type::INT32, {});
+  OperandType type3(Type::TENSOR_FLOAT32, {1, 3, 1});
+  OperandType type0(Type::TENSOR_FLOAT32, {4, 3, 2});
+  OperandType type1(Type::TENSOR_INT32, {2});
+  // Phase 1, operands
+  auto input = model->addOperand(&type0);
+  auto axis = model->addOperand(&type1);
+  auto keepDims = model->addOperand(&type2);
+  auto output = model->addOperand(&type3);
+  // Phase 2, operations
+  static int32_t axis_init[] = {0, 2};
+  model->setOperandValue(axis, axis_init, sizeof(int32_t) * 2);
+  static int32_t keepDims_init[] = {1};
+  model->setOperandValue(keepDims, keepDims_init, sizeof(int32_t) * 1);
+  model->addOperation(ANEURALNETWORKS_MEAN, {input, axis, keepDims}, {output});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {input},
+    {output});
+  assert(model->isValid());
+}
+
+bool is_ignored(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
diff --git a/runtimes/tests/neural_networks_test/generated/models/mean_quant8_1.model.cpp b/runtimes/tests/neural_networks_test/generated/models/mean_quant8_1.model.cpp
new file mode 100644 (file)
index 0000000..bbc6c10
--- /dev/null
@@ -0,0 +1,28 @@
+// Generated file (from: mean_quant8_1.mod.py). Do not edit
+void CreateModel(Model *model) {
+  OperandType type2(Type::INT32, {});
+  OperandType type1(Type::TENSOR_INT32, {4});
+  OperandType type3(Type::TENSOR_QUANT8_ASYMM, {2}, 0.8, 5);
+  OperandType type0(Type::TENSOR_QUANT8_ASYMM, {4, 3, 2}, 0.8, 5);
+  // Phase 1, operands
+  auto input = model->addOperand(&type0);
+  auto axis = model->addOperand(&type1);
+  auto keepDims = model->addOperand(&type2);
+  auto output = model->addOperand(&type3);
+  // Phase 2, operations
+  static int32_t axis_init[] = {1, 0, -3, -3};
+  model->setOperandValue(axis, axis_init, sizeof(int32_t) * 4);
+  static int32_t keepDims_init[] = {0};
+  model->setOperandValue(keepDims, keepDims_init, sizeof(int32_t) * 1);
+  model->addOperation(ANEURALNETWORKS_MEAN, {input, axis, keepDims}, {output});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {input},
+    {output});
+  assert(model->isValid());
+}
+
+bool is_ignored(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
diff --git a/runtimes/tests/neural_networks_test/generated/models/mean_quant8_2.model.cpp b/runtimes/tests/neural_networks_test/generated/models/mean_quant8_2.model.cpp
new file mode 100644 (file)
index 0000000..dec9d81
--- /dev/null
@@ -0,0 +1,28 @@
+// Generated file (from: mean_quant8_2.mod.py). Do not edit
+void CreateModel(Model *model) {
+  OperandType type2(Type::INT32, {});
+  OperandType type1(Type::TENSOR_INT32, {2});
+  OperandType type3(Type::TENSOR_QUANT8_ASYMM, {1, 3, 1}, 0.8, 5);
+  OperandType type0(Type::TENSOR_QUANT8_ASYMM, {4, 3, 2}, 0.8, 5);
+  // Phase 1, operands
+  auto input = model->addOperand(&type0);
+  auto axis = model->addOperand(&type1);
+  auto keepDims = model->addOperand(&type2);
+  auto output = model->addOperand(&type3);
+  // Phase 2, operations
+  static int32_t axis_init[] = {0, 2};
+  model->setOperandValue(axis, axis_init, sizeof(int32_t) * 2);
+  static int32_t keepDims_init[] = {1};
+  model->setOperandValue(keepDims, keepDims_init, sizeof(int32_t) * 1);
+  model->addOperation(ANEURALNETWORKS_MEAN, {input, axis, keepDims}, {output});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {input},
+    {output});
+  assert(model->isValid());
+}
+
+bool is_ignored(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
diff --git a/runtimes/tests/neural_networks_test/generated/models/pad.model.cpp b/runtimes/tests/neural_networks_test/generated/models/pad.model.cpp
new file mode 100644 (file)
index 0000000..97e173e
--- /dev/null
@@ -0,0 +1,24 @@
+// Generated file (from: pad.mod.py). Do not edit
+void CreateModel(Model *model) {
+  OperandType type0(Type::TENSOR_FLOAT32, {1, 2, 2, 1});
+  OperandType type2(Type::TENSOR_FLOAT32, {1, 4, 4, 1});
+  OperandType type1(Type::TENSOR_INT32, {4, 2});
+  // Phase 1, operands
+  auto op1 = model->addOperand(&type0);
+  auto op2 = model->addOperand(&type1);
+  auto op3 = model->addOperand(&type2);
+  // Phase 2, operations
+  static int32_t op2_init[] = {0, 0, 1, 1, 1, 1, 0, 0};
+  model->setOperandValue(op2, op2_init, sizeof(int32_t) * 8);
+  model->addOperation(ANEURALNETWORKS_PAD, {op1, op2}, {op3});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {op1},
+    {op3});
+  assert(model->isValid());
+}
+
+bool is_ignored(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
diff --git a/runtimes/tests/neural_networks_test/generated/models/pad_float_1.model.cpp b/runtimes/tests/neural_networks_test/generated/models/pad_float_1.model.cpp
new file mode 100644 (file)
index 0000000..61ae0b7
--- /dev/null
@@ -0,0 +1,24 @@
+// Generated file (from: pad_float_1.mod.py). Do not edit
+void CreateModel(Model *model) {
+  OperandType type0(Type::TENSOR_FLOAT32, {1, 2, 3, 1});
+  OperandType type2(Type::TENSOR_FLOAT32, {1, 4, 7, 1});
+  OperandType type1(Type::TENSOR_INT32, {4, 2});
+  // Phase 1, operands
+  auto op1 = model->addOperand(&type0);
+  auto op2 = model->addOperand(&type1);
+  auto op3 = model->addOperand(&type2);
+  // Phase 2, operations
+  static int32_t op2_init[] = {0, 0, 0, 2, 1, 3, 0, 0};
+  model->setOperandValue(op2, op2_init, sizeof(int32_t) * 8);
+  model->addOperation(ANEURALNETWORKS_PAD, {op1, op2}, {op3});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {op1},
+    {op3});
+  assert(model->isValid());
+}
+
+bool is_ignored(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
diff --git a/runtimes/tests/neural_networks_test/generated/models/space_to_batch.model.cpp b/runtimes/tests/neural_networks_test/generated/models/space_to_batch.model.cpp
new file mode 100644 (file)
index 0000000..4064c94
--- /dev/null
@@ -0,0 +1,28 @@
+// Generated file (from: space_to_batch.mod.py). Do not edit
+void CreateModel(Model *model) {
+  OperandType type0(Type::TENSOR_FLOAT32, {1, 2, 2, 2});
+  OperandType type3(Type::TENSOR_FLOAT32, {4, 1, 1, 2});
+  OperandType type2(Type::TENSOR_INT32, {2, 2});
+  OperandType type1(Type::TENSOR_INT32, {2});
+  // Phase 1, operands
+  auto input = model->addOperand(&type0);
+  auto block_size = model->addOperand(&type1);
+  auto paddings = model->addOperand(&type2);
+  auto output = model->addOperand(&type3);
+  // Phase 2, operations
+  static int32_t block_size_init[] = {2, 2};
+  model->setOperandValue(block_size, block_size_init, sizeof(int32_t) * 2);
+  static int32_t paddings_init[] = {0, 0, 0, 0};
+  model->setOperandValue(paddings, paddings_init, sizeof(int32_t) * 4);
+  model->addOperation(ANEURALNETWORKS_SPACE_TO_BATCH_ND, {input, block_size, paddings}, {output});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {input},
+    {output});
+  assert(model->isValid());
+}
+
+bool is_ignored(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
diff --git a/runtimes/tests/neural_networks_test/generated/models/space_to_batch_float_1.model.cpp b/runtimes/tests/neural_networks_test/generated/models/space_to_batch_float_1.model.cpp
new file mode 100644 (file)
index 0000000..f4dfab9
--- /dev/null
@@ -0,0 +1,28 @@
+// Generated file (from: space_to_batch_float_1.mod.py). Do not edit
+void CreateModel(Model *model) {
+  OperandType type0(Type::TENSOR_FLOAT32, {1, 4, 4, 1});
+  OperandType type3(Type::TENSOR_FLOAT32, {4, 2, 2, 1});
+  OperandType type2(Type::TENSOR_INT32, {2, 2});
+  OperandType type1(Type::TENSOR_INT32, {2});
+  // Phase 1, operands
+  auto input = model->addOperand(&type0);
+  auto block_size = model->addOperand(&type1);
+  auto paddings = model->addOperand(&type2);
+  auto output = model->addOperand(&type3);
+  // Phase 2, operations
+  static int32_t block_size_init[] = {2, 2};
+  model->setOperandValue(block_size, block_size_init, sizeof(int32_t) * 2);
+  static int32_t paddings_init[] = {0, 0, 0, 0};
+  model->setOperandValue(paddings, paddings_init, sizeof(int32_t) * 4);
+  model->addOperation(ANEURALNETWORKS_SPACE_TO_BATCH_ND, {input, block_size, paddings}, {output});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {input},
+    {output});
+  assert(model->isValid());
+}
+
+bool is_ignored(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
diff --git a/runtimes/tests/neural_networks_test/generated/models/space_to_batch_float_2.model.cpp b/runtimes/tests/neural_networks_test/generated/models/space_to_batch_float_2.model.cpp
new file mode 100644 (file)
index 0000000..44dee00
--- /dev/null
@@ -0,0 +1,28 @@
+// Generated file (from: space_to_batch_float_2.mod.py). Do not edit
+void CreateModel(Model *model) {
+  OperandType type0(Type::TENSOR_FLOAT32, {1, 5, 2, 1});
+  OperandType type3(Type::TENSOR_FLOAT32, {6, 2, 2, 1});
+  OperandType type2(Type::TENSOR_INT32, {2, 2});
+  OperandType type1(Type::TENSOR_INT32, {2});
+  // Phase 1, operands
+  auto input = model->addOperand(&type0);
+  auto block_size = model->addOperand(&type1);
+  auto paddings = model->addOperand(&type2);
+  auto output = model->addOperand(&type3);
+  // Phase 2, operations
+  static int32_t block_size_init[] = {3, 2};
+  model->setOperandValue(block_size, block_size_init, sizeof(int32_t) * 2);
+  static int32_t paddings_init[] = {1, 0, 2, 0};
+  model->setOperandValue(paddings, paddings_init, sizeof(int32_t) * 4);
+  model->addOperation(ANEURALNETWORKS_SPACE_TO_BATCH_ND, {input, block_size, paddings}, {output});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {input},
+    {output});
+  assert(model->isValid());
+}
+
+bool is_ignored(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
diff --git a/runtimes/tests/neural_networks_test/generated/models/space_to_batch_float_3.model.cpp b/runtimes/tests/neural_networks_test/generated/models/space_to_batch_float_3.model.cpp
new file mode 100644 (file)
index 0000000..f2fa990
--- /dev/null
@@ -0,0 +1,28 @@
+// Generated file (from: space_to_batch_float_3.mod.py). Do not edit
+void CreateModel(Model *model) {
+  OperandType type0(Type::TENSOR_FLOAT32, {1, 4, 2, 1});
+  OperandType type3(Type::TENSOR_FLOAT32, {6, 2, 4, 1});
+  OperandType type2(Type::TENSOR_INT32, {2, 2});
+  OperandType type1(Type::TENSOR_INT32, {2});
+  // Phase 1, operands
+  auto input = model->addOperand(&type0);
+  auto block_size = model->addOperand(&type1);
+  auto paddings = model->addOperand(&type2);
+  auto output = model->addOperand(&type3);
+  // Phase 2, operations
+  static int32_t block_size_init[] = {3, 2};
+  model->setOperandValue(block_size, block_size_init, sizeof(int32_t) * 2);
+  static int32_t paddings_init[] = {1, 1, 2, 4};
+  model->setOperandValue(paddings, paddings_init, sizeof(int32_t) * 4);
+  model->addOperation(ANEURALNETWORKS_SPACE_TO_BATCH_ND, {input, block_size, paddings}, {output});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {input},
+    {output});
+  assert(model->isValid());
+}
+
+bool is_ignored(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
diff --git a/runtimes/tests/neural_networks_test/generated/models/space_to_batch_quant8_1.model.cpp b/runtimes/tests/neural_networks_test/generated/models/space_to_batch_quant8_1.model.cpp
new file mode 100644 (file)
index 0000000..cfd56c2
--- /dev/null
@@ -0,0 +1,28 @@
+// Generated file (from: space_to_batch_quant8_1.mod.py). Do not edit
+void CreateModel(Model *model) {
+  OperandType type2(Type::TENSOR_INT32, {2, 2});
+  OperandType type1(Type::TENSOR_INT32, {2});
+  OperandType type0(Type::TENSOR_QUANT8_ASYMM, {1, 4, 4, 1}, 1.0, 0);
+  OperandType type3(Type::TENSOR_QUANT8_ASYMM, {4, 2, 2, 1}, 1.0, 0);
+  // Phase 1, operands
+  auto input = model->addOperand(&type0);
+  auto block_size = model->addOperand(&type1);
+  auto paddings = model->addOperand(&type2);
+  auto output = model->addOperand(&type3);
+  // Phase 2, operations
+  static int32_t block_size_init[] = {2, 2};
+  model->setOperandValue(block_size, block_size_init, sizeof(int32_t) * 2);
+  static int32_t paddings_init[] = {0, 0, 0, 0};
+  model->setOperandValue(paddings, paddings_init, sizeof(int32_t) * 4);
+  model->addOperation(ANEURALNETWORKS_SPACE_TO_BATCH_ND, {input, block_size, paddings}, {output});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {input},
+    {output});
+  assert(model->isValid());
+}
+
+bool is_ignored(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
diff --git a/runtimes/tests/neural_networks_test/generated/models/space_to_batch_quant8_2.model.cpp b/runtimes/tests/neural_networks_test/generated/models/space_to_batch_quant8_2.model.cpp
new file mode 100644 (file)
index 0000000..8ab61a1
--- /dev/null
@@ -0,0 +1,28 @@
+// Generated file (from: space_to_batch_quant8_2.mod.py). Do not edit
+void CreateModel(Model *model) {
+  OperandType type2(Type::TENSOR_INT32, {2, 2});
+  OperandType type1(Type::TENSOR_INT32, {2});
+  OperandType type0(Type::TENSOR_QUANT8_ASYMM, {1, 5, 2, 1}, 1.0, 0);
+  OperandType type3(Type::TENSOR_QUANT8_ASYMM, {6, 2, 2, 1}, 1.0, 0);
+  // Phase 1, operands
+  auto input = model->addOperand(&type0);
+  auto block_size = model->addOperand(&type1);
+  auto paddings = model->addOperand(&type2);
+  auto output = model->addOperand(&type3);
+  // Phase 2, operations
+  static int32_t block_size_init[] = {3, 2};
+  model->setOperandValue(block_size, block_size_init, sizeof(int32_t) * 2);
+  static int32_t paddings_init[] = {1, 0, 2, 0};
+  model->setOperandValue(paddings, paddings_init, sizeof(int32_t) * 4);
+  model->addOperation(ANEURALNETWORKS_SPACE_TO_BATCH_ND, {input, block_size, paddings}, {output});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {input},
+    {output});
+  assert(model->isValid());
+}
+
+bool is_ignored(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
diff --git a/runtimes/tests/neural_networks_test/generated/models/space_to_batch_quant8_3.model.cpp b/runtimes/tests/neural_networks_test/generated/models/space_to_batch_quant8_3.model.cpp
new file mode 100644 (file)
index 0000000..7ee3884
--- /dev/null
@@ -0,0 +1,28 @@
+// Generated file (from: space_to_batch_quant8_3.mod.py). Do not edit
+void CreateModel(Model *model) {
+  OperandType type2(Type::TENSOR_INT32, {2, 2});
+  OperandType type1(Type::TENSOR_INT32, {2});
+  OperandType type0(Type::TENSOR_QUANT8_ASYMM, {1, 4, 2, 1}, 1.0, 0);
+  OperandType type3(Type::TENSOR_QUANT8_ASYMM, {6, 2, 4, 1}, 1.0, 0);
+  // Phase 1, operands
+  auto input = model->addOperand(&type0);
+  auto block_size = model->addOperand(&type1);
+  auto paddings = model->addOperand(&type2);
+  auto output = model->addOperand(&type3);
+  // Phase 2, operations
+  static int32_t block_size_init[] = {3, 2};
+  model->setOperandValue(block_size, block_size_init, sizeof(int32_t) * 2);
+  static int32_t paddings_init[] = {1, 1, 2, 4};
+  model->setOperandValue(paddings, paddings_init, sizeof(int32_t) * 4);
+  model->addOperation(ANEURALNETWORKS_SPACE_TO_BATCH_ND, {input, block_size, paddings}, {output});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {input},
+    {output});
+  assert(model->isValid());
+}
+
+bool is_ignored(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
diff --git a/runtimes/tests/neural_networks_test/generated/models/squeeze.model.cpp b/runtimes/tests/neural_networks_test/generated/models/squeeze.model.cpp
new file mode 100644 (file)
index 0000000..806a10c
--- /dev/null
@@ -0,0 +1,24 @@
+// Generated file (from: squeeze.mod.py). Do not edit
+void CreateModel(Model *model) {
+  OperandType type0(Type::TENSOR_FLOAT32, {4, 1, 1, 2});
+  OperandType type2(Type::TENSOR_FLOAT32, {4, 2});
+  OperandType type1(Type::TENSOR_INT32, {2});
+  // Phase 1, operands
+  auto input = model->addOperand(&type0);
+  auto squeezeDims = model->addOperand(&type1);
+  auto output = model->addOperand(&type2);
+  // Phase 2, operations
+  static int32_t squeezeDims_init[] = {1, 2};
+  model->setOperandValue(squeezeDims, squeezeDims_init, sizeof(int32_t) * 2);
+  model->addOperation(ANEURALNETWORKS_SQUEEZE, {input, squeezeDims}, {output});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {input},
+    {output});
+  assert(model->isValid());
+}
+
+bool is_ignored(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
diff --git a/runtimes/tests/neural_networks_test/generated/models/squeeze_float_1.model.cpp b/runtimes/tests/neural_networks_test/generated/models/squeeze_float_1.model.cpp
new file mode 100644 (file)
index 0000000..2277e38
--- /dev/null
@@ -0,0 +1,24 @@
+// Generated file (from: squeeze_float_1.mod.py). Do not edit
+void CreateModel(Model *model) {
+  OperandType type0(Type::TENSOR_FLOAT32, {1, 24, 1});
+  OperandType type2(Type::TENSOR_FLOAT32, {1, 24});
+  OperandType type1(Type::TENSOR_INT32, {1});
+  // Phase 1, operands
+  auto input = model->addOperand(&type0);
+  auto squeezeDims = model->addOperand(&type1);
+  auto output = model->addOperand(&type2);
+  // Phase 2, operations
+  static int32_t squeezeDims_init[] = {2};
+  model->setOperandValue(squeezeDims, squeezeDims_init, sizeof(int32_t) * 1);
+  model->addOperation(ANEURALNETWORKS_SQUEEZE, {input, squeezeDims}, {output});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {input},
+    {output});
+  assert(model->isValid());
+}
+
+bool is_ignored(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
diff --git a/runtimes/tests/neural_networks_test/generated/models/squeeze_quant8_1.model.cpp b/runtimes/tests/neural_networks_test/generated/models/squeeze_quant8_1.model.cpp
new file mode 100644 (file)
index 0000000..f122d43
--- /dev/null
@@ -0,0 +1,24 @@
+// Generated file (from: squeeze_quant8_1.mod.py). Do not edit
+void CreateModel(Model *model) {
+  OperandType type1(Type::TENSOR_INT32, {1});
+  OperandType type0(Type::TENSOR_QUANT8_ASYMM, {1, 24, 1}, 1.0, 0);
+  OperandType type2(Type::TENSOR_QUANT8_ASYMM, {1, 24}, 1.0, 0);
+  // Phase 1, operands
+  auto input = model->addOperand(&type0);
+  auto squeezeDims = model->addOperand(&type1);
+  auto output = model->addOperand(&type2);
+  // Phase 2, operations
+  static int32_t squeezeDims_init[] = {2};
+  model->setOperandValue(squeezeDims, squeezeDims_init, sizeof(int32_t) * 1);
+  model->addOperation(ANEURALNETWORKS_SQUEEZE, {input, squeezeDims}, {output});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {input},
+    {output});
+  assert(model->isValid());
+}
+
+bool is_ignored(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
index 371d084..5f1b875 100644 (file)
@@ -11,6 +11,7 @@ void CreateModel(Model *model) {
   auto strides = model->addOperand(&type1);
   auto beginMask = model->addOperand(&type2);
   auto endMask = model->addOperand(&type2);
+  auto shrinkAxisMask = model->addOperand(&type2);
   auto output = model->addOperand(&type3);
   // Phase 2, operations
   static int32_t begins_init[] = {0, 0};
@@ -23,7 +24,9 @@ void CreateModel(Model *model) {
   model->setOperandValue(beginMask, beginMask_init, sizeof(int32_t) * 1);
   static int32_t endMask_init[] = {0};
   model->setOperandValue(endMask, endMask_init, sizeof(int32_t) * 1);
-  model->addOperation(ANEURALNETWORKS_STRIDED_SLICE, {input, begins, ends, strides, beginMask, endMask}, {output});
+  static int32_t shrinkAxisMask_init[] = {0};
+  model->setOperandValue(shrinkAxisMask, shrinkAxisMask_init, sizeof(int32_t) * 1);
+  model->addOperation(ANEURALNETWORKS_STRIDED_SLICE, {input, begins, ends, strides, beginMask, endMask, shrinkAxisMask}, {output});
   // Phase 3, inputs and outputs
   model->identifyInputsAndOutputs(
     {input},
index d37e8e6..fcd2f6d 100644 (file)
@@ -11,6 +11,7 @@ void CreateModel(Model *model) {
   auto strides = model->addOperand(&type1);
   auto beginMask = model->addOperand(&type2);
   auto endMask = model->addOperand(&type2);
+  auto shrinkAxisMask = model->addOperand(&type2);
   auto output = model->addOperand(&type3);
   // Phase 2, operations
   static int32_t begins_init[] = {1};
@@ -23,7 +24,9 @@ void CreateModel(Model *model) {
   model->setOperandValue(beginMask, beginMask_init, sizeof(int32_t) * 1);
   static int32_t endMask_init[] = {0};
   model->setOperandValue(endMask, endMask_init, sizeof(int32_t) * 1);
-  model->addOperation(ANEURALNETWORKS_STRIDED_SLICE, {input, begins, ends, strides, beginMask, endMask}, {output});
+  static int32_t shrinkAxisMask_init[] = {0};
+  model->setOperandValue(shrinkAxisMask, shrinkAxisMask_init, sizeof(int32_t) * 1);
+  model->addOperation(ANEURALNETWORKS_STRIDED_SLICE, {input, begins, ends, strides, beginMask, endMask, shrinkAxisMask}, {output});
   // Phase 3, inputs and outputs
   model->identifyInputsAndOutputs(
     {input},
index 6fecfd3..1463f13 100644 (file)
@@ -11,6 +11,7 @@ void CreateModel(Model *model) {
   auto strides = model->addOperand(&type1);
   auto beginMask = model->addOperand(&type2);
   auto endMask = model->addOperand(&type2);
+  auto shrinkAxisMask = model->addOperand(&type2);
   auto output = model->addOperand(&type3);
   // Phase 2, operations
   static int32_t begins_init[] = {1, 0};
@@ -23,7 +24,9 @@ void CreateModel(Model *model) {
   model->setOperandValue(beginMask, beginMask_init, sizeof(int32_t) * 1);
   static int32_t endMask_init[] = {2};
   model->setOperandValue(endMask, endMask_init, sizeof(int32_t) * 1);
-  model->addOperation(ANEURALNETWORKS_STRIDED_SLICE, {input, begins, ends, strides, beginMask, endMask}, {output});
+  static int32_t shrinkAxisMask_init[] = {0};
+  model->setOperandValue(shrinkAxisMask, shrinkAxisMask_init, sizeof(int32_t) * 1);
+  model->addOperation(ANEURALNETWORKS_STRIDED_SLICE, {input, begins, ends, strides, beginMask, endMask, shrinkAxisMask}, {output});
   // Phase 3, inputs and outputs
   model->identifyInputsAndOutputs(
     {input},
diff --git a/runtimes/tests/neural_networks_test/generated/models/strided_slice_float_11.model.cpp b/runtimes/tests/neural_networks_test/generated/models/strided_slice_float_11.model.cpp
new file mode 100644 (file)
index 0000000..2197b50
--- /dev/null
@@ -0,0 +1,40 @@
+// Generated file (from: strided_slice_float_11.mod.py). Do not edit
+void CreateModel(Model *model) {
+  OperandType type2(Type::INT32, {});
+  OperandType type0(Type::TENSOR_FLOAT32, {2, 3});
+  OperandType type3(Type::TENSOR_FLOAT32, {3});
+  OperandType type1(Type::TENSOR_INT32, {2});
+  // Phase 1, operands
+  auto input = model->addOperand(&type0);
+  auto begins = model->addOperand(&type1);
+  auto ends = model->addOperand(&type1);
+  auto strides = model->addOperand(&type1);
+  auto beginMask = model->addOperand(&type2);
+  auto endMask = model->addOperand(&type2);
+  auto shrinkAxisMask = model->addOperand(&type2);
+  auto output = model->addOperand(&type3);
+  // Phase 2, operations
+  static int32_t begins_init[] = {0, 0};
+  model->setOperandValue(begins, begins_init, sizeof(int32_t) * 2);
+  static int32_t ends_init[] = {1, 3};
+  model->setOperandValue(ends, ends_init, sizeof(int32_t) * 2);
+  static int32_t strides_init[] = {1, 1};
+  model->setOperandValue(strides, strides_init, sizeof(int32_t) * 2);
+  static int32_t beginMask_init[] = {0};
+  model->setOperandValue(beginMask, beginMask_init, sizeof(int32_t) * 1);
+  static int32_t endMask_init[] = {0};
+  model->setOperandValue(endMask, endMask_init, sizeof(int32_t) * 1);
+  static int32_t shrinkAxisMask_init[] = {1};
+  model->setOperandValue(shrinkAxisMask, shrinkAxisMask_init, sizeof(int32_t) * 1);
+  model->addOperation(ANEURALNETWORKS_STRIDED_SLICE, {input, begins, ends, strides, beginMask, endMask, shrinkAxisMask}, {output});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {input},
+    {output});
+  assert(model->isValid());
+}
+
+bool is_ignored(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
index 81e8796..47179ca 100644 (file)
@@ -11,6 +11,7 @@ void CreateModel(Model *model) {
   auto strides = model->addOperand(&type1);
   auto beginMask = model->addOperand(&type2);
   auto endMask = model->addOperand(&type2);
+  auto shrinkAxisMask = model->addOperand(&type2);
   auto output = model->addOperand(&type3);
   // Phase 2, operations
   static int32_t begins_init[] = {-3};
@@ -23,7 +24,9 @@ void CreateModel(Model *model) {
   model->setOperandValue(beginMask, beginMask_init, sizeof(int32_t) * 1);
   static int32_t endMask_init[] = {0};
   model->setOperandValue(endMask, endMask_init, sizeof(int32_t) * 1);
-  model->addOperation(ANEURALNETWORKS_STRIDED_SLICE, {input, begins, ends, strides, beginMask, endMask}, {output});
+  static int32_t shrinkAxisMask_init[] = {0};
+  model->setOperandValue(shrinkAxisMask, shrinkAxisMask_init, sizeof(int32_t) * 1);
+  model->addOperation(ANEURALNETWORKS_STRIDED_SLICE, {input, begins, ends, strides, beginMask, endMask, shrinkAxisMask}, {output});
   // Phase 3, inputs and outputs
   model->identifyInputsAndOutputs(
     {input},
index 343455a..113c775 100644 (file)
@@ -11,6 +11,7 @@ void CreateModel(Model *model) {
   auto strides = model->addOperand(&type1);
   auto beginMask = model->addOperand(&type2);
   auto endMask = model->addOperand(&type2);
+  auto shrinkAxisMask = model->addOperand(&type2);
   auto output = model->addOperand(&type3);
   // Phase 2, operations
   static int32_t begins_init[] = {-5};
@@ -23,7 +24,9 @@ void CreateModel(Model *model) {
   model->setOperandValue(beginMask, beginMask_init, sizeof(int32_t) * 1);
   static int32_t endMask_init[] = {0};
   model->setOperandValue(endMask, endMask_init, sizeof(int32_t) * 1);
-  model->addOperation(ANEURALNETWORKS_STRIDED_SLICE, {input, begins, ends, strides, beginMask, endMask}, {output});
+  static int32_t shrinkAxisMask_init[] = {0};
+  model->setOperandValue(shrinkAxisMask, shrinkAxisMask_init, sizeof(int32_t) * 1);
+  model->addOperation(ANEURALNETWORKS_STRIDED_SLICE, {input, begins, ends, strides, beginMask, endMask, shrinkAxisMask}, {output});
   // Phase 3, inputs and outputs
   model->identifyInputsAndOutputs(
     {input},
index b7524de..af5ffa8 100644 (file)
@@ -11,6 +11,7 @@ void CreateModel(Model *model) {
   auto strides = model->addOperand(&type1);
   auto beginMask = model->addOperand(&type2);
   auto endMask = model->addOperand(&type2);
+  auto shrinkAxisMask = model->addOperand(&type2);
   auto output = model->addOperand(&type3);
   // Phase 2, operations
   static int32_t begins_init[] = {1};
@@ -23,7 +24,9 @@ void CreateModel(Model *model) {
   model->setOperandValue(beginMask, beginMask_init, sizeof(int32_t) * 1);
   static int32_t endMask_init[] = {0};
   model->setOperandValue(endMask, endMask_init, sizeof(int32_t) * 1);
-  model->addOperation(ANEURALNETWORKS_STRIDED_SLICE, {input, begins, ends, strides, beginMask, endMask}, {output});
+  static int32_t shrinkAxisMask_init[] = {0};
+  model->setOperandValue(shrinkAxisMask, shrinkAxisMask_init, sizeof(int32_t) * 1);
+  model->addOperation(ANEURALNETWORKS_STRIDED_SLICE, {input, begins, ends, strides, beginMask, endMask, shrinkAxisMask}, {output});
   // Phase 3, inputs and outputs
   model->identifyInputsAndOutputs(
     {input},
index a590fc5..a0280d3 100644 (file)
@@ -11,6 +11,7 @@ void CreateModel(Model *model) {
   auto strides = model->addOperand(&type1);
   auto beginMask = model->addOperand(&type2);
   auto endMask = model->addOperand(&type2);
+  auto shrinkAxisMask = model->addOperand(&type2);
   auto output = model->addOperand(&type3);
   // Phase 2, operations
   static int32_t begins_init[] = {1};
@@ -23,7 +24,9 @@ void CreateModel(Model *model) {
   model->setOperandValue(beginMask, beginMask_init, sizeof(int32_t) * 1);
   static int32_t endMask_init[] = {0};
   model->setOperandValue(endMask, endMask_init, sizeof(int32_t) * 1);
-  model->addOperation(ANEURALNETWORKS_STRIDED_SLICE, {input, begins, ends, strides, beginMask, endMask}, {output});
+  static int32_t shrinkAxisMask_init[] = {0};
+  model->setOperandValue(shrinkAxisMask, shrinkAxisMask_init, sizeof(int32_t) * 1);
+  model->addOperation(ANEURALNETWORKS_STRIDED_SLICE, {input, begins, ends, strides, beginMask, endMask, shrinkAxisMask}, {output});
   // Phase 3, inputs and outputs
   model->identifyInputsAndOutputs(
     {input},
index db1de2b..cb40c85 100644 (file)
@@ -11,6 +11,7 @@ void CreateModel(Model *model) {
   auto strides = model->addOperand(&type1);
   auto beginMask = model->addOperand(&type2);
   auto endMask = model->addOperand(&type2);
+  auto shrinkAxisMask = model->addOperand(&type2);
   auto output = model->addOperand(&type3);
   // Phase 2, operations
   static int32_t begins_init[] = {1};
@@ -23,7 +24,9 @@ void CreateModel(Model *model) {
   model->setOperandValue(beginMask, beginMask_init, sizeof(int32_t) * 1);
   static int32_t endMask_init[] = {1};
   model->setOperandValue(endMask, endMask_init, sizeof(int32_t) * 1);
-  model->addOperation(ANEURALNETWORKS_STRIDED_SLICE, {input, begins, ends, strides, beginMask, endMask}, {output});
+  static int32_t shrinkAxisMask_init[] = {0};
+  model->setOperandValue(shrinkAxisMask, shrinkAxisMask_init, sizeof(int32_t) * 1);
+  model->addOperation(ANEURALNETWORKS_STRIDED_SLICE, {input, begins, ends, strides, beginMask, endMask, shrinkAxisMask}, {output});
   // Phase 3, inputs and outputs
   model->identifyInputsAndOutputs(
     {input},
index d4d12e9..1580128 100644 (file)
@@ -10,6 +10,7 @@ void CreateModel(Model *model) {
   auto strides = model->addOperand(&type1);
   auto beginMask = model->addOperand(&type2);
   auto endMask = model->addOperand(&type2);
+  auto shrinkAxisMask = model->addOperand(&type2);
   auto output = model->addOperand(&type0);
   // Phase 2, operations
   static int32_t begins_init[] = {-1};
@@ -22,7 +23,9 @@ void CreateModel(Model *model) {
   model->setOperandValue(beginMask, beginMask_init, sizeof(int32_t) * 1);
   static int32_t endMask_init[] = {0};
   model->setOperandValue(endMask, endMask_init, sizeof(int32_t) * 1);
-  model->addOperation(ANEURALNETWORKS_STRIDED_SLICE, {input, begins, ends, strides, beginMask, endMask}, {output});
+  static int32_t shrinkAxisMask_init[] = {0};
+  model->setOperandValue(shrinkAxisMask, shrinkAxisMask_init, sizeof(int32_t) * 1);
+  model->addOperation(ANEURALNETWORKS_STRIDED_SLICE, {input, begins, ends, strides, beginMask, endMask, shrinkAxisMask}, {output});
   // Phase 3, inputs and outputs
   model->identifyInputsAndOutputs(
     {input},
index 30c96b5..0dd3884 100644 (file)
@@ -11,6 +11,7 @@ void CreateModel(Model *model) {
   auto strides = model->addOperand(&type1);
   auto beginMask = model->addOperand(&type2);
   auto endMask = model->addOperand(&type2);
+  auto shrinkAxisMask = model->addOperand(&type2);
   auto output = model->addOperand(&type3);
   // Phase 2, operations
   static int32_t begins_init[] = {1, -1};
@@ -23,7 +24,9 @@ void CreateModel(Model *model) {
   model->setOperandValue(beginMask, beginMask_init, sizeof(int32_t) * 1);
   static int32_t endMask_init[] = {0};
   model->setOperandValue(endMask, endMask_init, sizeof(int32_t) * 1);
-  model->addOperation(ANEURALNETWORKS_STRIDED_SLICE, {input, begins, ends, strides, beginMask, endMask}, {output});
+  static int32_t shrinkAxisMask_init[] = {0};
+  model->setOperandValue(shrinkAxisMask, shrinkAxisMask_init, sizeof(int32_t) * 1);
+  model->addOperation(ANEURALNETWORKS_STRIDED_SLICE, {input, begins, ends, strides, beginMask, endMask, shrinkAxisMask}, {output});
   // Phase 3, inputs and outputs
   model->identifyInputsAndOutputs(
     {input},
index a93a1f8..22e0e70 100644 (file)
@@ -11,6 +11,7 @@ void CreateModel(Model *model) {
   auto strides = model->addOperand(&type1);
   auto beginMask = model->addOperand(&type2);
   auto endMask = model->addOperand(&type2);
+  auto shrinkAxisMask = model->addOperand(&type2);
   auto output = model->addOperand(&type3);
   // Phase 2, operations
   static int32_t begins_init[] = {1, 0};
@@ -23,7 +24,9 @@ void CreateModel(Model *model) {
   model->setOperandValue(beginMask, beginMask_init, sizeof(int32_t) * 1);
   static int32_t endMask_init[] = {0};
   model->setOperandValue(endMask, endMask_init, sizeof(int32_t) * 1);
-  model->addOperation(ANEURALNETWORKS_STRIDED_SLICE, {input, begins, ends, strides, beginMask, endMask}, {output});
+  static int32_t shrinkAxisMask_init[] = {0};
+  model->setOperandValue(shrinkAxisMask, shrinkAxisMask_init, sizeof(int32_t) * 1);
+  model->addOperation(ANEURALNETWORKS_STRIDED_SLICE, {input, begins, ends, strides, beginMask, endMask, shrinkAxisMask}, {output});
   // Phase 3, inputs and outputs
   model->identifyInputsAndOutputs(
     {input},
diff --git a/runtimes/tests/neural_networks_test/generated/models/strided_slice_qaunt8_10.model.cpp b/runtimes/tests/neural_networks_test/generated/models/strided_slice_qaunt8_10.model.cpp
new file mode 100644 (file)
index 0000000..a6eec78
--- /dev/null
@@ -0,0 +1,40 @@
+// Generated file (from: strided_slice_qaunt8_10.mod.py). Do not edit
+void CreateModel(Model *model) {
+  OperandType type2(Type::INT32, {});
+  OperandType type1(Type::TENSOR_INT32, {2});
+  OperandType type3(Type::TENSOR_QUANT8_ASYMM, {1, 3}, 1.0, 0);
+  OperandType type0(Type::TENSOR_QUANT8_ASYMM, {2, 3}, 1.0, 0);
+  // Phase 1, operands
+  auto input = model->addOperand(&type0);
+  auto begins = model->addOperand(&type1);
+  auto ends = model->addOperand(&type1);
+  auto strides = model->addOperand(&type1);
+  auto beginMask = model->addOperand(&type2);
+  auto endMask = model->addOperand(&type2);
+  auto shrinkAxisMask = model->addOperand(&type2);
+  auto output = model->addOperand(&type3);
+  // Phase 2, operations
+  static int32_t begins_init[] = {1, 0};
+  model->setOperandValue(begins, begins_init, sizeof(int32_t) * 2);
+  static int32_t ends_init[] = {2, 2};
+  model->setOperandValue(ends, ends_init, sizeof(int32_t) * 2);
+  static int32_t strides_init[] = {1, 1};
+  model->setOperandValue(strides, strides_init, sizeof(int32_t) * 2);
+  static int32_t beginMask_init[] = {0};
+  model->setOperandValue(beginMask, beginMask_init, sizeof(int32_t) * 1);
+  static int32_t endMask_init[] = {2};
+  model->setOperandValue(endMask, endMask_init, sizeof(int32_t) * 1);
+  static int32_t shrinkAxisMask_init[] = {0};
+  model->setOperandValue(shrinkAxisMask, shrinkAxisMask_init, sizeof(int32_t) * 1);
+  model->addOperation(ANEURALNETWORKS_STRIDED_SLICE, {input, begins, ends, strides, beginMask, endMask, shrinkAxisMask}, {output});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {input},
+    {output});
+  assert(model->isValid());
+}
+
+bool is_ignored(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
diff --git a/runtimes/tests/neural_networks_test/generated/models/strided_slice_qaunt8_11.model.cpp b/runtimes/tests/neural_networks_test/generated/models/strided_slice_qaunt8_11.model.cpp
new file mode 100644 (file)
index 0000000..170dc7e
--- /dev/null
@@ -0,0 +1,40 @@
+// Generated file (from: strided_slice_qaunt8_11.mod.py). Do not edit
+void CreateModel(Model *model) {
+  OperandType type2(Type::INT32, {});
+  OperandType type1(Type::TENSOR_INT32, {2});
+  OperandType type0(Type::TENSOR_QUANT8_ASYMM, {2, 3}, 1.0, 0);
+  OperandType type3(Type::TENSOR_QUANT8_ASYMM, {3}, 1.0, 0);
+  // Phase 1, operands
+  auto input = model->addOperand(&type0);
+  auto begins = model->addOperand(&type1);
+  auto ends = model->addOperand(&type1);
+  auto strides = model->addOperand(&type1);
+  auto beginMask = model->addOperand(&type2);
+  auto endMask = model->addOperand(&type2);
+  auto shrinkAxisMask = model->addOperand(&type2);
+  auto output = model->addOperand(&type3);
+  // Phase 2, operations
+  static int32_t begins_init[] = {0, 0};
+  model->setOperandValue(begins, begins_init, sizeof(int32_t) * 2);
+  static int32_t ends_init[] = {1, 3};
+  model->setOperandValue(ends, ends_init, sizeof(int32_t) * 2);
+  static int32_t strides_init[] = {1, 1};
+  model->setOperandValue(strides, strides_init, sizeof(int32_t) * 2);
+  static int32_t beginMask_init[] = {0};
+  model->setOperandValue(beginMask, beginMask_init, sizeof(int32_t) * 1);
+  static int32_t endMask_init[] = {0};
+  model->setOperandValue(endMask, endMask_init, sizeof(int32_t) * 1);
+  static int32_t shrinkAxisMask_init[] = {1};
+  model->setOperandValue(shrinkAxisMask, shrinkAxisMask_init, sizeof(int32_t) * 1);
+  model->addOperation(ANEURALNETWORKS_STRIDED_SLICE, {input, begins, ends, strides, beginMask, endMask, shrinkAxisMask}, {output});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {input},
+    {output});
+  assert(model->isValid());
+}
+
+bool is_ignored(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
diff --git a/runtimes/tests/neural_networks_test/generated/models/strided_slice_quant8_1.model.cpp b/runtimes/tests/neural_networks_test/generated/models/strided_slice_quant8_1.model.cpp
new file mode 100644 (file)
index 0000000..7f8e602
--- /dev/null
@@ -0,0 +1,40 @@
+// Generated file (from: strided_slice_quant8_1.mod.py). Do not edit
+void CreateModel(Model *model) {
+  OperandType type2(Type::INT32, {});
+  OperandType type1(Type::TENSOR_INT32, {1});
+  OperandType type3(Type::TENSOR_QUANT8_ASYMM, {2}, 1.0, 0);
+  OperandType type0(Type::TENSOR_QUANT8_ASYMM, {4}, 1.0, 0);
+  // Phase 1, operands
+  auto input = model->addOperand(&type0);
+  auto begins = model->addOperand(&type1);
+  auto ends = model->addOperand(&type1);
+  auto strides = model->addOperand(&type1);
+  auto beginMask = model->addOperand(&type2);
+  auto endMask = model->addOperand(&type2);
+  auto shrinkAxisMask = model->addOperand(&type2);
+  auto output = model->addOperand(&type3);
+  // Phase 2, operations
+  static int32_t begins_init[] = {1};
+  model->setOperandValue(begins, begins_init, sizeof(int32_t) * 1);
+  static int32_t ends_init[] = {3};
+  model->setOperandValue(ends, ends_init, sizeof(int32_t) * 1);
+  static int32_t strides_init[] = {1};
+  model->setOperandValue(strides, strides_init, sizeof(int32_t) * 1);
+  static int32_t beginMask_init[] = {0};
+  model->setOperandValue(beginMask, beginMask_init, sizeof(int32_t) * 1);
+  static int32_t endMask_init[] = {0};
+  model->setOperandValue(endMask, endMask_init, sizeof(int32_t) * 1);
+  static int32_t shrinkAxisMask_init[] = {0};
+  model->setOperandValue(shrinkAxisMask, shrinkAxisMask_init, sizeof(int32_t) * 1);
+  model->addOperation(ANEURALNETWORKS_STRIDED_SLICE, {input, begins, ends, strides, beginMask, endMask, shrinkAxisMask}, {output});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {input},
+    {output});
+  assert(model->isValid());
+}
+
+bool is_ignored(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
diff --git a/runtimes/tests/neural_networks_test/generated/models/strided_slice_quant8_2.model.cpp b/runtimes/tests/neural_networks_test/generated/models/strided_slice_quant8_2.model.cpp
new file mode 100644 (file)
index 0000000..e604214
--- /dev/null
@@ -0,0 +1,40 @@
+// Generated file (from: strided_slice_quant8_2.mod.py). Do not edit
+void CreateModel(Model *model) {
+  OperandType type2(Type::INT32, {});
+  OperandType type1(Type::TENSOR_INT32, {1});
+  OperandType type3(Type::TENSOR_QUANT8_ASYMM, {2}, 1.0, 0);
+  OperandType type0(Type::TENSOR_QUANT8_ASYMM, {4}, 1.0, 0);
+  // Phase 1, operands
+  auto input = model->addOperand(&type0);
+  auto begins = model->addOperand(&type1);
+  auto ends = model->addOperand(&type1);
+  auto strides = model->addOperand(&type1);
+  auto beginMask = model->addOperand(&type2);
+  auto endMask = model->addOperand(&type2);
+  auto shrinkAxisMask = model->addOperand(&type2);
+  auto output = model->addOperand(&type3);
+  // Phase 2, operations
+  static int32_t begins_init[] = {-3};
+  model->setOperandValue(begins, begins_init, sizeof(int32_t) * 1);
+  static int32_t ends_init[] = {3};
+  model->setOperandValue(ends, ends_init, sizeof(int32_t) * 1);
+  static int32_t strides_init[] = {1};
+  model->setOperandValue(strides, strides_init, sizeof(int32_t) * 1);
+  static int32_t beginMask_init[] = {0};
+  model->setOperandValue(beginMask, beginMask_init, sizeof(int32_t) * 1);
+  static int32_t endMask_init[] = {0};
+  model->setOperandValue(endMask, endMask_init, sizeof(int32_t) * 1);
+  static int32_t shrinkAxisMask_init[] = {0};
+  model->setOperandValue(shrinkAxisMask, shrinkAxisMask_init, sizeof(int32_t) * 1);
+  model->addOperation(ANEURALNETWORKS_STRIDED_SLICE, {input, begins, ends, strides, beginMask, endMask, shrinkAxisMask}, {output});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {input},
+    {output});
+  assert(model->isValid());
+}
+
+bool is_ignored(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
diff --git a/runtimes/tests/neural_networks_test/generated/models/strided_slice_quant8_3.model.cpp b/runtimes/tests/neural_networks_test/generated/models/strided_slice_quant8_3.model.cpp
new file mode 100644 (file)
index 0000000..2cc75a4
--- /dev/null
@@ -0,0 +1,40 @@
+// Generated file (from: strided_slice_quant8_3.mod.py). Do not edit
+void CreateModel(Model *model) {
+  OperandType type2(Type::INT32, {});
+  OperandType type1(Type::TENSOR_INT32, {1});
+  OperandType type3(Type::TENSOR_QUANT8_ASYMM, {3}, 1.0, 0);
+  OperandType type0(Type::TENSOR_QUANT8_ASYMM, {4}, 1.0, 0);
+  // Phase 1, operands
+  auto input = model->addOperand(&type0);
+  auto begins = model->addOperand(&type1);
+  auto ends = model->addOperand(&type1);
+  auto strides = model->addOperand(&type1);
+  auto beginMask = model->addOperand(&type2);
+  auto endMask = model->addOperand(&type2);
+  auto shrinkAxisMask = model->addOperand(&type2);
+  auto output = model->addOperand(&type3);
+  // Phase 2, operations
+  static int32_t begins_init[] = {-5};
+  model->setOperandValue(begins, begins_init, sizeof(int32_t) * 1);
+  static int32_t ends_init[] = {3};
+  model->setOperandValue(ends, ends_init, sizeof(int32_t) * 1);
+  static int32_t strides_init[] = {1};
+  model->setOperandValue(strides, strides_init, sizeof(int32_t) * 1);
+  static int32_t beginMask_init[] = {0};
+  model->setOperandValue(beginMask, beginMask_init, sizeof(int32_t) * 1);
+  static int32_t endMask_init[] = {0};
+  model->setOperandValue(endMask, endMask_init, sizeof(int32_t) * 1);
+  static int32_t shrinkAxisMask_init[] = {0};
+  model->setOperandValue(shrinkAxisMask, shrinkAxisMask_init, sizeof(int32_t) * 1);
+  model->addOperation(ANEURALNETWORKS_STRIDED_SLICE, {input, begins, ends, strides, beginMask, endMask, shrinkAxisMask}, {output});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {input},
+    {output});
+  assert(model->isValid());
+}
+
+bool is_ignored(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
diff --git a/runtimes/tests/neural_networks_test/generated/models/strided_slice_quant8_4.model.cpp b/runtimes/tests/neural_networks_test/generated/models/strided_slice_quant8_4.model.cpp
new file mode 100644 (file)
index 0000000..2fe2277
--- /dev/null
@@ -0,0 +1,40 @@
+// Generated file (from: strided_slice_quant8_4.mod.py). Do not edit
+void CreateModel(Model *model) {
+  OperandType type2(Type::INT32, {});
+  OperandType type1(Type::TENSOR_INT32, {1});
+  OperandType type3(Type::TENSOR_QUANT8_ASYMM, {1}, 1.0, 0);
+  OperandType type0(Type::TENSOR_QUANT8_ASYMM, {4}, 1.0, 0);
+  // Phase 1, operands
+  auto input = model->addOperand(&type0);
+  auto begins = model->addOperand(&type1);
+  auto ends = model->addOperand(&type1);
+  auto strides = model->addOperand(&type1);
+  auto beginMask = model->addOperand(&type2);
+  auto endMask = model->addOperand(&type2);
+  auto shrinkAxisMask = model->addOperand(&type2);
+  auto output = model->addOperand(&type3);
+  // Phase 2, operations
+  static int32_t begins_init[] = {1};
+  model->setOperandValue(begins, begins_init, sizeof(int32_t) * 1);
+  static int32_t ends_init[] = {-2};
+  model->setOperandValue(ends, ends_init, sizeof(int32_t) * 1);
+  static int32_t strides_init[] = {1};
+  model->setOperandValue(strides, strides_init, sizeof(int32_t) * 1);
+  static int32_t beginMask_init[] = {0};
+  model->setOperandValue(beginMask, beginMask_init, sizeof(int32_t) * 1);
+  static int32_t endMask_init[] = {0};
+  model->setOperandValue(endMask, endMask_init, sizeof(int32_t) * 1);
+  static int32_t shrinkAxisMask_init[] = {0};
+  model->setOperandValue(shrinkAxisMask, shrinkAxisMask_init, sizeof(int32_t) * 1);
+  model->addOperation(ANEURALNETWORKS_STRIDED_SLICE, {input, begins, ends, strides, beginMask, endMask, shrinkAxisMask}, {output});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {input},
+    {output});
+  assert(model->isValid());
+}
+
+bool is_ignored(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
diff --git a/runtimes/tests/neural_networks_test/generated/models/strided_slice_quant8_5.model.cpp b/runtimes/tests/neural_networks_test/generated/models/strided_slice_quant8_5.model.cpp
new file mode 100644 (file)
index 0000000..1ed3ed1
--- /dev/null
@@ -0,0 +1,40 @@
+// Generated file (from: strided_slice_quant8_5.mod.py). Do not edit
+void CreateModel(Model *model) {
+  OperandType type2(Type::INT32, {});
+  OperandType type1(Type::TENSOR_INT32, {1});
+  OperandType type3(Type::TENSOR_QUANT8_ASYMM, {3}, 1.0, 0);
+  OperandType type0(Type::TENSOR_QUANT8_ASYMM, {4}, 1.0, 0);
+  // Phase 1, operands
+  auto input = model->addOperand(&type0);
+  auto begins = model->addOperand(&type1);
+  auto ends = model->addOperand(&type1);
+  auto strides = model->addOperand(&type1);
+  auto beginMask = model->addOperand(&type2);
+  auto endMask = model->addOperand(&type2);
+  auto shrinkAxisMask = model->addOperand(&type2);
+  auto output = model->addOperand(&type3);
+  // Phase 2, operations
+  static int32_t begins_init[] = {1};
+  model->setOperandValue(begins, begins_init, sizeof(int32_t) * 1);
+  static int32_t ends_init[] = {3};
+  model->setOperandValue(ends, ends_init, sizeof(int32_t) * 1);
+  static int32_t strides_init[] = {1};
+  model->setOperandValue(strides, strides_init, sizeof(int32_t) * 1);
+  static int32_t beginMask_init[] = {1};
+  model->setOperandValue(beginMask, beginMask_init, sizeof(int32_t) * 1);
+  static int32_t endMask_init[] = {0};
+  model->setOperandValue(endMask, endMask_init, sizeof(int32_t) * 1);
+  static int32_t shrinkAxisMask_init[] = {0};
+  model->setOperandValue(shrinkAxisMask, shrinkAxisMask_init, sizeof(int32_t) * 1);
+  model->addOperation(ANEURALNETWORKS_STRIDED_SLICE, {input, begins, ends, strides, beginMask, endMask, shrinkAxisMask}, {output});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {input},
+    {output});
+  assert(model->isValid());
+}
+
+bool is_ignored(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
diff --git a/runtimes/tests/neural_networks_test/generated/models/strided_slice_quant8_6.model.cpp b/runtimes/tests/neural_networks_test/generated/models/strided_slice_quant8_6.model.cpp
new file mode 100644 (file)
index 0000000..73da2fc
--- /dev/null
@@ -0,0 +1,40 @@
+// Generated file (from: strided_slice_quant8_6.mod.py). Do not edit
+void CreateModel(Model *model) {
+  OperandType type2(Type::INT32, {});
+  OperandType type1(Type::TENSOR_INT32, {1});
+  OperandType type3(Type::TENSOR_QUANT8_ASYMM, {3}, 1.0, 0);
+  OperandType type0(Type::TENSOR_QUANT8_ASYMM, {4}, 1.0, 0);
+  // Phase 1, operands
+  auto input = model->addOperand(&type0);
+  auto begins = model->addOperand(&type1);
+  auto ends = model->addOperand(&type1);
+  auto strides = model->addOperand(&type1);
+  auto beginMask = model->addOperand(&type2);
+  auto endMask = model->addOperand(&type2);
+  auto shrinkAxisMask = model->addOperand(&type2);
+  auto output = model->addOperand(&type3);
+  // Phase 2, operations
+  static int32_t begins_init[] = {1};
+  model->setOperandValue(begins, begins_init, sizeof(int32_t) * 1);
+  static int32_t ends_init[] = {3};
+  model->setOperandValue(ends, ends_init, sizeof(int32_t) * 1);
+  static int32_t strides_init[] = {1};
+  model->setOperandValue(strides, strides_init, sizeof(int32_t) * 1);
+  static int32_t beginMask_init[] = {0};
+  model->setOperandValue(beginMask, beginMask_init, sizeof(int32_t) * 1);
+  static int32_t endMask_init[] = {1};
+  model->setOperandValue(endMask, endMask_init, sizeof(int32_t) * 1);
+  static int32_t shrinkAxisMask_init[] = {0};
+  model->setOperandValue(shrinkAxisMask, shrinkAxisMask_init, sizeof(int32_t) * 1);
+  model->addOperation(ANEURALNETWORKS_STRIDED_SLICE, {input, begins, ends, strides, beginMask, endMask, shrinkAxisMask}, {output});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {input},
+    {output});
+  assert(model->isValid());
+}
+
+bool is_ignored(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
diff --git a/runtimes/tests/neural_networks_test/generated/models/strided_slice_quant8_7.model.cpp b/runtimes/tests/neural_networks_test/generated/models/strided_slice_quant8_7.model.cpp
new file mode 100644 (file)
index 0000000..089388b
--- /dev/null
@@ -0,0 +1,39 @@
+// Generated file (from: strided_slice_quant8_7.mod.py). Do not edit
+void CreateModel(Model *model) {
+  OperandType type2(Type::INT32, {});
+  OperandType type1(Type::TENSOR_INT32, {1});
+  OperandType type0(Type::TENSOR_QUANT8_ASYMM, {3}, 1.0, 0);
+  // Phase 1, operands
+  auto input = model->addOperand(&type0);
+  auto begins = model->addOperand(&type1);
+  auto ends = model->addOperand(&type1);
+  auto strides = model->addOperand(&type1);
+  auto beginMask = model->addOperand(&type2);
+  auto endMask = model->addOperand(&type2);
+  auto shrinkAxisMask = model->addOperand(&type2);
+  auto output = model->addOperand(&type0);
+  // Phase 2, operations
+  static int32_t begins_init[] = {-1};
+  model->setOperandValue(begins, begins_init, sizeof(int32_t) * 1);
+  static int32_t ends_init[] = {-4};
+  model->setOperandValue(ends, ends_init, sizeof(int32_t) * 1);
+  static int32_t strides_init[] = {-1};
+  model->setOperandValue(strides, strides_init, sizeof(int32_t) * 1);
+  static int32_t beginMask_init[] = {0};
+  model->setOperandValue(beginMask, beginMask_init, sizeof(int32_t) * 1);
+  static int32_t endMask_init[] = {0};
+  model->setOperandValue(endMask, endMask_init, sizeof(int32_t) * 1);
+  static int32_t shrinkAxisMask_init[] = {0};
+  model->setOperandValue(shrinkAxisMask, shrinkAxisMask_init, sizeof(int32_t) * 1);
+  model->addOperation(ANEURALNETWORKS_STRIDED_SLICE, {input, begins, ends, strides, beginMask, endMask, shrinkAxisMask}, {output});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {input},
+    {output});
+  assert(model->isValid());
+}
+
+bool is_ignored(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
diff --git a/runtimes/tests/neural_networks_test/generated/models/strided_slice_quant8_8.model.cpp b/runtimes/tests/neural_networks_test/generated/models/strided_slice_quant8_8.model.cpp
new file mode 100644 (file)
index 0000000..ef55fc1
--- /dev/null
@@ -0,0 +1,40 @@
+// Generated file (from: strided_slice_quant8_8.mod.py). Do not edit
+void CreateModel(Model *model) {
+  OperandType type2(Type::INT32, {});
+  OperandType type1(Type::TENSOR_INT32, {2});
+  OperandType type3(Type::TENSOR_QUANT8_ASYMM, {1, 3}, 1.0, 0);
+  OperandType type0(Type::TENSOR_QUANT8_ASYMM, {2, 3}, 1.0, 0);
+  // Phase 1, operands
+  auto input = model->addOperand(&type0);
+  auto begins = model->addOperand(&type1);
+  auto ends = model->addOperand(&type1);
+  auto strides = model->addOperand(&type1);
+  auto beginMask = model->addOperand(&type2);
+  auto endMask = model->addOperand(&type2);
+  auto shrinkAxisMask = model->addOperand(&type2);
+  auto output = model->addOperand(&type3);
+  // Phase 2, operations
+  static int32_t begins_init[] = {1, -1};
+  model->setOperandValue(begins, begins_init, sizeof(int32_t) * 2);
+  static int32_t ends_init[] = {2, -4};
+  model->setOperandValue(ends, ends_init, sizeof(int32_t) * 2);
+  static int32_t strides_init[] = {2, -1};
+  model->setOperandValue(strides, strides_init, sizeof(int32_t) * 2);
+  static int32_t beginMask_init[] = {0};
+  model->setOperandValue(beginMask, beginMask_init, sizeof(int32_t) * 1);
+  static int32_t endMask_init[] = {0};
+  model->setOperandValue(endMask, endMask_init, sizeof(int32_t) * 1);
+  static int32_t shrinkAxisMask_init[] = {0};
+  model->setOperandValue(shrinkAxisMask, shrinkAxisMask_init, sizeof(int32_t) * 1);
+  model->addOperation(ANEURALNETWORKS_STRIDED_SLICE, {input, begins, ends, strides, beginMask, endMask, shrinkAxisMask}, {output});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {input},
+    {output});
+  assert(model->isValid());
+}
+
+bool is_ignored(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
diff --git a/runtimes/tests/neural_networks_test/generated/models/strided_slice_quant8_9.model.cpp b/runtimes/tests/neural_networks_test/generated/models/strided_slice_quant8_9.model.cpp
new file mode 100644 (file)
index 0000000..37bb289
--- /dev/null
@@ -0,0 +1,40 @@
+// Generated file (from: strided_slice_quant8_9.mod.py). Do not edit
+void CreateModel(Model *model) {
+  OperandType type2(Type::INT32, {});
+  OperandType type1(Type::TENSOR_INT32, {2});
+  OperandType type3(Type::TENSOR_QUANT8_ASYMM, {2, 2}, 1.0, 0);
+  OperandType type0(Type::TENSOR_QUANT8_ASYMM, {2, 3}, 1.0, 0);
+  // Phase 1, operands
+  auto input = model->addOperand(&type0);
+  auto begins = model->addOperand(&type1);
+  auto ends = model->addOperand(&type1);
+  auto strides = model->addOperand(&type1);
+  auto beginMask = model->addOperand(&type2);
+  auto endMask = model->addOperand(&type2);
+  auto shrinkAxisMask = model->addOperand(&type2);
+  auto output = model->addOperand(&type3);
+  // Phase 2, operations
+  static int32_t begins_init[] = {1, 0};
+  model->setOperandValue(begins, begins_init, sizeof(int32_t) * 2);
+  static int32_t ends_init[] = {2, 2};
+  model->setOperandValue(ends, ends_init, sizeof(int32_t) * 2);
+  static int32_t strides_init[] = {1, 1};
+  model->setOperandValue(strides, strides_init, sizeof(int32_t) * 2);
+  static int32_t beginMask_init[] = {1};
+  model->setOperandValue(beginMask, beginMask_init, sizeof(int32_t) * 1);
+  static int32_t endMask_init[] = {0};
+  model->setOperandValue(endMask, endMask_init, sizeof(int32_t) * 1);
+  static int32_t shrinkAxisMask_init[] = {0};
+  model->setOperandValue(shrinkAxisMask, shrinkAxisMask_init, sizeof(int32_t) * 1);
+  model->addOperation(ANEURALNETWORKS_STRIDED_SLICE, {input, begins, ends, strides, beginMask, endMask, shrinkAxisMask}, {output});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {input},
+    {output});
+  assert(model->isValid());
+}
+
+bool is_ignored(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
index f48feaf..cf1f61a 100644 (file)
@@ -1,17 +1,17 @@
 // Generated file (from: sub_broadcast_float.mod.py). Do not edit
 void CreateModel(Model *model) {
   OperandType type2(Type::INT32, {});
-  OperandType type0(Type::TENSOR_FLOAT32, {1, 2, 2, 1});
-  OperandType type1(Type::TENSOR_FLOAT32, {1});
+  OperandType type0(Type::TENSOR_FLOAT32, {1, 2});
+  OperandType type1(Type::TENSOR_FLOAT32, {2, 2});
   // Phase 1, operands
   auto op1 = model->addOperand(&type0);
   auto op2 = model->addOperand(&type1);
   auto act = model->addOperand(&type2);
-  auto op3 = model->addOperand(&type0);
+  auto op3 = model->addOperand(&type1);
   // Phase 2, operations
   static int32_t act_init[] = {0};
   model->setOperandValue(act, act_init, sizeof(int32_t) * 1);
-  model->addOperationEx(ANEURALNETWORKS_SUB_EX, {op1, op2, act}, {op3});
+  model->addOperation(ANEURALNETWORKS_SUB, {op1, op2, act}, {op3});
   // Phase 3, inputs and outputs
   model->identifyInputsAndOutputs(
     {op1, op2},
diff --git a/runtimes/tests/neural_networks_test/generated/models/tanh.model.cpp b/runtimes/tests/neural_networks_test/generated/models/tanh.model.cpp
new file mode 100644 (file)
index 0000000..bc3cd4a
--- /dev/null
@@ -0,0 +1,19 @@
+// Generated file (from: tanh.mod.py). Do not edit
+void CreateModel(Model *model) {
+  OperandType type0(Type::TENSOR_FLOAT32, {1, 2, 2, 1});
+  // Phase 1, operands
+  auto op1 = model->addOperand(&type0);
+  auto op2 = model->addOperand(&type0);
+  // Phase 2, operations
+  model->addOperation(ANEURALNETWORKS_TANH, {op1}, {op2});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {op1},
+    {op2});
+  assert(model->isValid());
+}
+
+bool is_ignored(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
diff --git a/runtimes/tests/neural_networks_test/generated/models/transpose.model.cpp b/runtimes/tests/neural_networks_test/generated/models/transpose.model.cpp
new file mode 100644 (file)
index 0000000..e4c7414
--- /dev/null
@@ -0,0 +1,23 @@
+// Generated file (from: transpose.mod.py). Do not edit
+void CreateModel(Model *model) {
+  OperandType type0(Type::TENSOR_FLOAT32, {1, 2, 2, 1});
+  OperandType type1(Type::TENSOR_INT32, {4});
+  // Phase 1, operands
+  auto input = model->addOperand(&type0);
+  auto perms = model->addOperand(&type1);
+  auto output = model->addOperand(&type0);
+  // Phase 2, operations
+  static int32_t perms_init[] = {0, 2, 1, 3};
+  model->setOperandValue(perms, perms_init, sizeof(int32_t) * 4);
+  model->addOperation(ANEURALNETWORKS_TRANSPOSE, {input, perms}, {output});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {input},
+    {output});
+  assert(model->isValid());
+}
+
+bool is_ignored(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
diff --git a/runtimes/tests/neural_networks_test/generated/models/transpose_float_1.model.cpp b/runtimes/tests/neural_networks_test/generated/models/transpose_float_1.model.cpp
new file mode 100644 (file)
index 0000000..f6d0d08
--- /dev/null
@@ -0,0 +1,24 @@
+// Generated file (from: transpose_float_1.mod.py). Do not edit
+void CreateModel(Model *model) {
+  OperandType type0(Type::TENSOR_FLOAT32, {2, 3, 4, 5});
+  OperandType type2(Type::TENSOR_FLOAT32, {4, 2, 3, 5});
+  OperandType type1(Type::TENSOR_INT32, {4});
+  // Phase 1, operands
+  auto input = model->addOperand(&type0);
+  auto perms = model->addOperand(&type1);
+  auto output = model->addOperand(&type2);
+  // Phase 2, operations
+  static int32_t perms_init[] = {2, 0, 1, 3};
+  model->setOperandValue(perms, perms_init, sizeof(int32_t) * 4);
+  model->addOperation(ANEURALNETWORKS_TRANSPOSE, {input, perms}, {output});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {input},
+    {output});
+  assert(model->isValid());
+}
+
+bool is_ignored(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
diff --git a/runtimes/tests/neural_networks_test/generated/models/transpose_quant8_1.model.cpp b/runtimes/tests/neural_networks_test/generated/models/transpose_quant8_1.model.cpp
new file mode 100644 (file)
index 0000000..808ad2b
--- /dev/null
@@ -0,0 +1,24 @@
+// Generated file (from: transpose_quant8_1.mod.py). Do not edit
+void CreateModel(Model *model) {
+  OperandType type1(Type::TENSOR_INT32, {4});
+  OperandType type0(Type::TENSOR_QUANT8_ASYMM, {2, 3, 4, 5}, 1.0, 0);
+  OperandType type2(Type::TENSOR_QUANT8_ASYMM, {4, 2, 3, 5}, 1.0, 0);
+  // Phase 1, operands
+  auto input = model->addOperand(&type0);
+  auto perms = model->addOperand(&type1);
+  auto output = model->addOperand(&type2);
+  // Phase 2, operations
+  static int32_t perms_init[] = {2, 0, 1, 3};
+  model->setOperandValue(perms, perms_init, sizeof(int32_t) * 4);
+  model->addOperation(ANEURALNETWORKS_TRANSPOSE, {input, perms}, {output});
+  // Phase 3, inputs and outputs
+  model->identifyInputsAndOutputs(
+    {input},
+    {output});
+  assert(model->isValid());
+}
+
+bool is_ignored(int i) {
+  static std::set<int> ignore = {};
+  return ignore.find(i) != ignore.end();
+}
diff --git a/runtimes/tests/neural_networks_test/specs/Ex/sub_broadcast_float.mod.py b/runtimes/tests/neural_networks_test/specs/Ex/sub_broadcast_float.mod.py
deleted file mode 100644 (file)
index 2a3ee5e..0000000
+++ /dev/null
@@ -1,19 +0,0 @@
-# model
-model = Model()
-i1 = Input("op1", "TENSOR_FLOAT32", "{1, 2, 2, 1}")
-i2 = Input("op2", "TENSOR_FLOAT32", "{1}")
-act = Int32Scalar("act", 0)
-i3 = Output("op3", "TENSOR_FLOAT32", "{1, 2, 2, 1}")
-model = model.Operation("SUB_EX", i1, i2, act).To(i3)
-
-# Example 1. Input in operand 0,
-input0 = {i1: # input 0
-          [1, 2, 3, 4],
-          i2: # input 1
-          [2]}
-
-output0 = {i3: # output 0
-           [-1, 0, 1, 2]}
-
-# Instantiate an example
-Example((input0, output0))
diff --git a/runtimes/tests/neural_networks_test/specs/V1_0/fully_connected_float_3.mod.py b/runtimes/tests/neural_networks_test/specs/V1_0/fully_connected_float_3.mod.py
new file mode 100644 (file)
index 0000000..804f812
--- /dev/null
@@ -0,0 +1,32 @@
+#
+# Copyright (C) 2018 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+model = Model()
+in0 = Input("op1", "TENSOR_FLOAT32", "{2, 2}")
+weights = Parameter("op2", "TENSOR_FLOAT32", "{1, 2}", [2, 4])
+bias = Parameter("b0", "TENSOR_FLOAT32", "{1}", [1])
+out0 = Output("op3", "TENSOR_FLOAT32", "{2, 1}")
+act = Int32Scalar("act", 0)
+model = model.Operation("FULLY_CONNECTED", in0, weights, bias, act).To(out0)
+
+# Example 1. Input in operand 0,
+input0 = {in0: # input 0
+          [1, 2, 2, 1]}
+output0 = {out0: # output 0
+               [11, 9]}
+
+# Instantiate an example
+Example((input0, output0))
diff --git a/runtimes/tests/neural_networks_test/specs/V1_1/batch_to_space.mod.py b/runtimes/tests/neural_networks_test/specs/V1_1/batch_to_space.mod.py
new file mode 100644 (file)
index 0000000..bf8f56a
--- /dev/null
@@ -0,0 +1,16 @@
+model = Model()
+i1 = Input("input", "TENSOR_FLOAT32", "{4, 1, 1, 2}")
+block = Parameter("block_size", "TENSOR_INT32", "{2}", [2, 2])
+output = Output("output", "TENSOR_FLOAT32", "{1, 2, 2, 2}")
+
+model = model.Operation("BATCH_TO_SPACE_ND", i1, block).To(output)
+
+# Example 1. Input in operand 0,
+input0 = {i1: # input 0
+          [1.4, 2.3, 3.2, 4.1, 5.4, 6.3, 7.2, 8.1]}
+
+output0 = {output: # output 0
+           [1.4, 2.3, 3.2, 4.1, 5.4, 6.3, 7.2, 8.1]}
+
+# Instantiate an example
+Example((input0, output0))
diff --git a/runtimes/tests/neural_networks_test/specs/V1_1/batch_to_space_float_1.mod.py b/runtimes/tests/neural_networks_test/specs/V1_1/batch_to_space_float_1.mod.py
new file mode 100644 (file)
index 0000000..019242a
--- /dev/null
@@ -0,0 +1,16 @@
+model = Model()
+i1 = Input("input", "TENSOR_FLOAT32", "{4, 2, 2, 1}")
+block = Parameter("block_size", "TENSOR_INT32", "{2}", [2, 2])
+output = Output("output", "TENSOR_FLOAT32", "{1, 4, 4, 1}")
+
+model = model.Operation("BATCH_TO_SPACE_ND", i1, block).To(output)
+
+# Example 1. Input in operand 0,
+input0 = {i1: # input 0
+          [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]}
+
+output0 = {output: # output 0
+           [1, 5, 2, 6, 9, 13, 10, 14, 3, 7, 4, 8, 11, 15, 12, 16]}
+
+# Instantiate an example
+Example((input0, output0))
\ No newline at end of file
diff --git a/runtimes/tests/neural_networks_test/specs/V1_1/batch_to_space_quant8_1.mod.py b/runtimes/tests/neural_networks_test/specs/V1_1/batch_to_space_quant8_1.mod.py
new file mode 100644 (file)
index 0000000..8c6a727
--- /dev/null
@@ -0,0 +1,16 @@
+model = Model()
+i1 = Input("input", "TENSOR_QUANT8_ASYMM", "{4, 2, 2, 1}, 1.0, 0")
+block = Parameter("block_size", "TENSOR_INT32", "{2}", [2, 2])
+output = Output("output", "TENSOR_QUANT8_ASYMM", "{1, 4, 4, 1}, 1.0, 0")
+
+model = model.Operation("BATCH_TO_SPACE_ND", i1, block).To(output)
+
+# Example 1. Input in operand 0,
+input0 = {i1: # input 0
+          [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]}
+
+output0 = {output: # output 0
+           [1, 5, 2, 6, 9, 13, 10, 14, 3, 7, 4, 8, 11, 15, 12, 16]}
+
+# Instantiate an example
+Example((input0, output0))
\ No newline at end of file
diff --git a/runtimes/tests/neural_networks_test/specs/V1_1/div_broadcast_float.mod.py b/runtimes/tests/neural_networks_test/specs/V1_1/div_broadcast_float.mod.py
new file mode 100644 (file)
index 0000000..d4e0ea9
--- /dev/null
@@ -0,0 +1,19 @@
+# model
+model = Model()
+i1 = Input("op1", "TENSOR_FLOAT32", "{1, 2}")
+i2 = Input("op2", "TENSOR_FLOAT32", "{2, 2}")
+act = Int32Scalar("act", 0)
+i3 = Output("op3", "TENSOR_FLOAT32", "{2, 2}")
+model = model.Operation("DIV", i1, i2, act).To(i3)
+
+# Example 1. Input in operand 0,
+input0 = {i1: # input 0
+          [1, 2],
+          i2: # input 1
+          [1, 1, 2, 2]}
+
+output0 = {i3: # output 0
+           [1, 2, 0.5, 1]}
+
+# Instantiate an example
+Example((input0, output0))
diff --git a/runtimes/tests/neural_networks_test/specs/V1_1/fully_connected_float_4d_simple.mod.py b/runtimes/tests/neural_networks_test/specs/V1_1/fully_connected_float_4d_simple.mod.py
new file mode 100644 (file)
index 0000000..b9a6290
--- /dev/null
@@ -0,0 +1,42 @@
+#
+# Copyright (C) 2018 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# This test is for testing the input requirements of Fully Connected Op:
+# the input's first dimension doesn't have to be the batch size, the
+# input is reshaped as needed.
+
+model = Model()
+in0 = Input("op1", "TENSOR_FLOAT32", "{4, 1, 5, 1}")
+weights = Parameter("op2", "TENSOR_FLOAT32", "{3, 10}", [
+      1, 2, 3, 4, 5, 6, 7, 8, 9, 10,  # u = 0
+      1, 2, 3, 4, 5, 6, 7, 8, 9, 10,  # u = 1
+      1, 2, 3, 4, 5, 6, 7, 8, 9, 10,  # u = 1
+])
+bias = Parameter("b0", "TENSOR_FLOAT32", "{3}", [1, 2, 3])
+out0 = Output("op3", "TENSOR_FLOAT32", "{2, 3}")
+act = Int32Scalar("act", 0)
+model = model.Operation("FULLY_CONNECTED", in0, weights, bias, act).To(out0)
+
+# Example 1. Input in operand 0,
+input0 = {in0: # input 0
+          [1, 2, 3, 4, 5, 6, 7,  8, -9, -10,
+           1, 2, 3, 4, 5, 6, 7, -8,  9, -10]}
+output0 = {out0: # output 0
+               [24, 25, 26,
+                58, 59, 60]}
+
+# Instantiate an example
+Example((input0, output0))
diff --git a/runtimes/tests/neural_networks_test/specs/V1_1/mean.mod.py b/runtimes/tests/neural_networks_test/specs/V1_1/mean.mod.py
new file mode 100644 (file)
index 0000000..28bd6af
--- /dev/null
@@ -0,0 +1,19 @@
+model = Model()
+i1 = Input("input", "TENSOR_FLOAT32", "{1, 2, 2, 1}")
+axis = Parameter("axis", "TENSOR_INT32", "{1}", [2])
+keepDims = Int32Scalar("keepDims", 0)
+output = Output("output", "TENSOR_FLOAT32", "{1, 2, 1}")
+
+model = model.Operation("MEAN", i1, axis, keepDims).To(output)
+
+# Example 1. Input in operand 0,
+input0 = {i1: # input 0
+          [1.0, 2.0,
+           3.0, 4.0]}
+
+output0 = {output: # output 0
+          [1.5,
+           3.5]}
+
+# Instantiate an example
+Example((input0, output0))
diff --git a/runtimes/tests/neural_networks_test/specs/V1_1/mean_float_1.mod.py b/runtimes/tests/neural_networks_test/specs/V1_1/mean_float_1.mod.py
new file mode 100644 (file)
index 0000000..5fde65d
--- /dev/null
@@ -0,0 +1,18 @@
+model = Model()
+i1 = Input("input", "TENSOR_FLOAT32", "{4, 3, 2}")
+axis = Parameter("axis", "TENSOR_INT32", "{4}", [1, 0, -3, -3])
+keepDims = Int32Scalar("keepDims", 0)
+output = Output("output", "TENSOR_FLOAT32", "{2}")
+
+model = model.Operation("MEAN", i1, axis, keepDims).To(output)
+
+# Example 1. Input in operand 0,
+input0 = {i1: # input 0
+          [1.0,  2.0,  3.0,  4.0,  5.0,  6.0,  7.0,  8.0,  9.0,  10.0, 11.0, 12.0,
+           13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0]}
+
+output0 = {output: # output 0
+          [12.0, 13.0]}
+
+# Instantiate an example
+Example((input0, output0))
diff --git a/runtimes/tests/neural_networks_test/specs/V1_1/mean_float_2.mod.py b/runtimes/tests/neural_networks_test/specs/V1_1/mean_float_2.mod.py
new file mode 100644 (file)
index 0000000..4b71d47
--- /dev/null
@@ -0,0 +1,18 @@
+model = Model()
+i1 = Input("input", "TENSOR_FLOAT32", "{4, 3, 2}")
+axis = Parameter("axis", "TENSOR_INT32", "{2}", [0, 2])
+keepDims = Int32Scalar("keepDims", 1)
+output = Output("output", "TENSOR_FLOAT32", "{1, 3, 1}")
+
+model = model.Operation("MEAN", i1, axis, keepDims).To(output)
+
+# Example 1. Input in operand 0,
+input0 = {i1: # input 0
+          [1.0,  2.0,  3.0,  4.0,  5.0,  6.0,  7.0,  8.0,  9.0,  10.0, 11.0, 12.0,
+           13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0]}
+
+output0 = {output: # output 0
+          [10.5, 12.5, 14.5]}
+
+# Instantiate an example
+Example((input0, output0))
diff --git a/runtimes/tests/neural_networks_test/specs/V1_1/mean_quant8_1.mod.py b/runtimes/tests/neural_networks_test/specs/V1_1/mean_quant8_1.mod.py
new file mode 100644 (file)
index 0000000..666b0c2
--- /dev/null
@@ -0,0 +1,19 @@
+model = Model()
+i1 = Input("input", "TENSOR_QUANT8_ASYMM", "{4, 3, 2}, 0.8, 5")
+axis = Parameter("axis", "TENSOR_INT32", "{4}", [1, 0, -3, -3])
+keepDims = Int32Scalar("keepDims", 0)
+output = Output("output", "TENSOR_QUANT8_ASYMM", "{2}, 0.8, 5")
+
+model = model.Operation("MEAN", i1, axis, keepDims).To(output)
+
+# Example 1. Input in operand 0,
+input0 = {i1: # input 0
+          [1,  2,  3,  4,  5,  6,  7,  8,
+           9,  10, 11, 12, 13, 14, 15, 16,
+           17, 18, 19, 20, 21, 22, 23, 24]}
+
+output0 = {output: # output 0
+          [12, 13]}
+
+# Instantiate an example
+Example((input0, output0))
diff --git a/runtimes/tests/neural_networks_test/specs/V1_1/mean_quant8_2.mod.py b/runtimes/tests/neural_networks_test/specs/V1_1/mean_quant8_2.mod.py
new file mode 100644 (file)
index 0000000..23fd87c
--- /dev/null
@@ -0,0 +1,19 @@
+model = Model()
+i1 = Input("input", "TENSOR_QUANT8_ASYMM", "{4, 3, 2}, 0.8, 5")
+axis = Parameter("axis", "TENSOR_INT32", "{2}", [0, 2])
+keepDims = Int32Scalar("keepDims", 1)
+output = Output("output", "TENSOR_QUANT8_ASYMM", "{1, 3, 1}, 0.8, 5")
+
+model = model.Operation("MEAN", i1, axis, keepDims).To(output)
+
+# Example 1. Input in operand 0,
+input0 = {i1: # input 0
+          [1,  2,  3,  4,  5,  6,  7,  8,
+           9,  10, 11, 12, 13, 14, 15, 16,
+           17, 18, 19, 20, 21, 22, 23, 24]}
+
+output0 = {output: # output 0
+          [10, 12, 14]}
+
+# Instantiate an example
+Example((input0, output0))
diff --git a/runtimes/tests/neural_networks_test/specs/V1_1/pad.mod.py b/runtimes/tests/neural_networks_test/specs/V1_1/pad.mod.py
new file mode 100644 (file)
index 0000000..54a5a46
--- /dev/null
@@ -0,0 +1,20 @@
+# model
+model = Model()
+i1 = Input("op1", "TENSOR_FLOAT32", "{1, 2, 2, 1}")
+i2 = Parameter("op2", "TENSOR_INT32", "{4, 2}", [0, 0, 1, 1, 1, 1, 0, 0])
+i3 = Output("op3", "TENSOR_FLOAT32", "{1, 4, 4, 1}")
+model = model.Operation("PAD", i1, i2).To(i3)
+
+# Example 1. Input in operand 0,
+input0 = {i1: # input 0
+          [1.0, 2.0,
+           3.0, 4.0,]}
+
+output0 = {i3: # output 0
+           [0.0, 0.0, 0.0, 0.0,
+            0.0, 1.0, 2.0, 0.0,
+            0.0, 3.0, 4.0, 0.0,
+            0.0, 0.0, 0.0, 0.0]}
+
+# Instantiate an example
+Example((input0, output0))
diff --git a/runtimes/tests/neural_networks_test/specs/V1_1/pad_float_1.mod.py b/runtimes/tests/neural_networks_test/specs/V1_1/pad_float_1.mod.py
new file mode 100644 (file)
index 0000000..0817127
--- /dev/null
@@ -0,0 +1,18 @@
+# model
+model = Model()
+i1 = Input("op1", "TENSOR_FLOAT32", "{1, 2, 3, 1}")
+i2 = Parameter("op2", "TENSOR_INT32", "{4, 2}", [0, 0, 0, 2, 1, 3, 0, 0])
+i3 = Output("op3", "TENSOR_FLOAT32", "{1, 4, 7, 1}")
+model = model.Operation("PAD", i1, i2).To(i3)
+
+# Example 1. Input in operand 0,
+input0 = {i1: # input 0
+          [1.0, 2.0, 3.0,
+           4.0, 5.0, 6.0]}
+
+output0 = {i3: # output 0
+           [0, 1, 2, 3, 0, 0, 0, 0, 4, 5, 6, 0, 0, 0,
+            0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]}
+
+# Instantiate an example
+Example((input0, output0))
diff --git a/runtimes/tests/neural_networks_test/specs/V1_1/space_to_batch.mod.py b/runtimes/tests/neural_networks_test/specs/V1_1/space_to_batch.mod.py
new file mode 100644 (file)
index 0000000..8c10231
--- /dev/null
@@ -0,0 +1,17 @@
+model = Model()
+i1 = Input("input", "TENSOR_FLOAT32", "{1, 2, 2, 2}")
+block = Parameter("block_size", "TENSOR_INT32", "{2}", [2, 2])
+paddings = Parameter("paddings", "TENSOR_INT32", "{2, 2}", [0, 0, 0, 0])
+output = Output("output", "TENSOR_FLOAT32", "{4, 1, 1, 2}")
+
+model = model.Operation("SPACE_TO_BATCH_ND", i1, block, paddings).To(output)
+
+# Example 1. Input in operand 0,
+input0 = {i1: # input 0
+          [1.4, 2.3, 3.2, 4.1, 5.4, 6.3, 7.2, 8.1]}
+
+output0 = {output: # output 0
+           [1.4, 2.3, 3.2, 4.1, 5.4, 6.3, 7.2, 8.1]}
+
+# Instantiate an example
+Example((input0, output0))
diff --git a/runtimes/tests/neural_networks_test/specs/V1_1/space_to_batch_float_1.mod.py b/runtimes/tests/neural_networks_test/specs/V1_1/space_to_batch_float_1.mod.py
new file mode 100644 (file)
index 0000000..890ced8
--- /dev/null
@@ -0,0 +1,17 @@
+model = Model()
+i1 = Input("input", "TENSOR_FLOAT32", "{1, 4, 4, 1}")
+block = Parameter("block_size", "TENSOR_INT32", "{2}", [2, 2])
+paddings = Parameter("paddings", "TENSOR_INT32", "{2, 2}", [0, 0, 0, 0])
+output = Output("output", "TENSOR_FLOAT32", "{4, 2, 2, 1}")
+
+model = model.Operation("SPACE_TO_BATCH_ND", i1, block, paddings).To(output)
+
+# Example 1. Input in operand 0,
+input0 = {i1: # input 0
+          [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]}
+
+output0 = {output: # output 0
+           [1, 3, 9, 11, 2, 4, 10, 12, 5, 7, 13, 15, 6, 8, 14, 16]}
+
+# Instantiate an example
+Example((input0, output0))
diff --git a/runtimes/tests/neural_networks_test/specs/V1_1/space_to_batch_float_2.mod.py b/runtimes/tests/neural_networks_test/specs/V1_1/space_to_batch_float_2.mod.py
new file mode 100644 (file)
index 0000000..c625900
--- /dev/null
@@ -0,0 +1,18 @@
+model = Model()
+i1 = Input("input", "TENSOR_FLOAT32", "{1, 5, 2, 1}")
+block = Parameter("block_size", "TENSOR_INT32", "{2}", [3, 2])
+paddings = Parameter("paddings", "TENSOR_INT32", "{2, 2}", [1, 0, 2, 0])
+output = Output("output", "TENSOR_FLOAT32", "{6, 2, 2, 1}")
+
+model = model.Operation("SPACE_TO_BATCH_ND", i1, block, paddings).To(output)
+
+# Example 1. Input in operand 0,
+input0 = {i1: # input 0
+          [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]}
+
+output0 = {output: # output 0
+           [0, 0, 0, 5, 0, 0, 0, 6, 0, 1, 0, 7,
+            0, 2, 0, 8, 0, 3, 0, 9, 0, 4, 0, 10]}
+
+# Instantiate an example
+Example((input0, output0))
diff --git a/runtimes/tests/neural_networks_test/specs/V1_1/space_to_batch_float_3.mod.py b/runtimes/tests/neural_networks_test/specs/V1_1/space_to_batch_float_3.mod.py
new file mode 100644 (file)
index 0000000..9d7c8b3
--- /dev/null
@@ -0,0 +1,19 @@
+model = Model()
+i1 = Input("input", "TENSOR_FLOAT32", "{1, 4, 2, 1}")
+block = Parameter("block_size", "TENSOR_INT32", "{2}", [3, 2])
+paddings = Parameter("paddings", "TENSOR_INT32", "{2, 2}", [1, 1, 2, 4])
+output = Output("output", "TENSOR_FLOAT32", "{6, 2, 4, 1}")
+
+model = model.Operation("SPACE_TO_BATCH_ND", i1, block, paddings).To(output)
+
+# Example 1. Input in operand 0,
+input0 = {i1: # input 0
+          [1, 2, 3, 4, 5, 6, 7, 8]}
+
+output0 = {output: # output 0
+           [0, 0, 0, 0, 0, 5, 0, 0, 0, 0, 0, 0, 0, 6, 0, 0,
+            0, 1, 0, 0, 0, 7, 0, 0, 0, 2, 0, 0, 0, 8, 0, 0,
+            0, 3, 0, 0, 0, 0, 0, 0, 0, 4, 0, 0, 0, 0, 0, 0]}
+
+# Instantiate an example
+Example((input0, output0))
diff --git a/runtimes/tests/neural_networks_test/specs/V1_1/space_to_batch_quant8_1.mod.py b/runtimes/tests/neural_networks_test/specs/V1_1/space_to_batch_quant8_1.mod.py
new file mode 100644 (file)
index 0000000..726250d
--- /dev/null
@@ -0,0 +1,17 @@
+model = Model()
+i1 = Input("input", "TENSOR_QUANT8_ASYMM", "{1, 4, 4, 1}, 1.0, 0")
+block = Parameter("block_size", "TENSOR_INT32", "{2}", [2, 2])
+paddings = Parameter("paddings", "TENSOR_INT32", "{2, 2}", [0, 0, 0, 0])
+output = Output("output", "TENSOR_QUANT8_ASYMM", "{4, 2, 2, 1}, 1.0, 0")
+
+model = model.Operation("SPACE_TO_BATCH_ND", i1, block, paddings).To(output)
+
+# Example 1. Input in operand 0,
+input0 = {i1: # input 0
+          [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]}
+
+output0 = {output: # output 0
+           [1, 3, 9, 11, 2, 4, 10, 12, 5, 7, 13, 15, 6, 8, 14, 16]}
+
+# Instantiate an example
+Example((input0, output0))
diff --git a/runtimes/tests/neural_networks_test/specs/V1_1/space_to_batch_quant8_2.mod.py b/runtimes/tests/neural_networks_test/specs/V1_1/space_to_batch_quant8_2.mod.py
new file mode 100644 (file)
index 0000000..8adc262
--- /dev/null
@@ -0,0 +1,18 @@
+model = Model()
+i1 = Input("input", "TENSOR_QUANT8_ASYMM", "{1, 5, 2, 1}, 1.0, 0")
+block = Parameter("block_size", "TENSOR_INT32", "{2}", [3, 2])
+paddings = Parameter("paddings", "TENSOR_INT32", "{2, 2}", [1, 0, 2, 0])
+output = Output("output", "TENSOR_QUANT8_ASYMM", "{6, 2, 2, 1}, 1.0, 0")
+
+model = model.Operation("SPACE_TO_BATCH_ND", i1, block, paddings).To(output)
+
+# Example 1. Input in operand 0,
+input0 = {i1: # input 0
+          [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]}
+
+output0 = {output: # output 0
+           [0, 0, 0, 5, 0, 0, 0, 6, 0, 1, 0, 7,
+            0, 2, 0, 8, 0, 3, 0, 9, 0, 4, 0, 10]}
+
+# Instantiate an example
+Example((input0, output0))
diff --git a/runtimes/tests/neural_networks_test/specs/V1_1/space_to_batch_quant8_3.mod.py b/runtimes/tests/neural_networks_test/specs/V1_1/space_to_batch_quant8_3.mod.py
new file mode 100644 (file)
index 0000000..e9e88bb
--- /dev/null
@@ -0,0 +1,19 @@
+model = Model()
+i1 = Input("input", "TENSOR_QUANT8_ASYMM", "{1, 4, 2, 1}, 1.0, 0")
+block = Parameter("block_size", "TENSOR_INT32", "{2}", [3, 2])
+paddings = Parameter("paddings", "TENSOR_INT32", "{2, 2}", [1, 1, 2, 4])
+output = Output("output", "TENSOR_QUANT8_ASYMM", "{6, 2, 4, 1}, 1.0, 0")
+
+model = model.Operation("SPACE_TO_BATCH_ND", i1, block, paddings).To(output)
+
+# Example 1. Input in operand 0,
+input0 = {i1: # input 0
+          [1, 2, 3, 4, 5, 6, 7, 8]}
+
+output0 = {output: # output 0
+           [0, 0, 0, 0, 0, 5, 0, 0, 0, 0, 0, 0, 0, 6, 0, 0,
+            0, 1, 0, 0, 0, 7, 0, 0, 0, 2, 0, 0, 0, 8, 0, 0,
+            0, 3, 0, 0, 0, 0, 0, 0, 0, 4, 0, 0, 0, 0, 0, 0]}
+
+# Instantiate an example
+Example((input0, output0))
diff --git a/runtimes/tests/neural_networks_test/specs/V1_1/squeeze.mod.py b/runtimes/tests/neural_networks_test/specs/V1_1/squeeze.mod.py
new file mode 100644 (file)
index 0000000..4bf3189
--- /dev/null
@@ -0,0 +1,16 @@
+model = Model()
+i1 = Input("input", "TENSOR_FLOAT32", "{4, 1, 1, 2}")
+squeezeDims = Parameter("squeezeDims", "TENSOR_INT32", "{2}", [1, 2])
+output = Output("output", "TENSOR_FLOAT32", "{4, 2}")
+
+model = model.Operation("SQUEEZE", i1, squeezeDims).To(output)
+
+# Example 1. Input in operand 0,
+input0 = {i1: # input 0
+          [1.4, 2.3, 3.2, 4.1, 5.4, 6.3, 7.2, 8.1]}
+
+output0 = {output: # output 0
+           [1.4, 2.3, 3.2, 4.1, 5.4, 6.3, 7.2, 8.1]}
+
+# Instantiate an example
+Example((input0, output0))
diff --git a/runtimes/tests/neural_networks_test/specs/V1_1/squeeze_float_1.mod.py b/runtimes/tests/neural_networks_test/specs/V1_1/squeeze_float_1.mod.py
new file mode 100644 (file)
index 0000000..1a54ae7
--- /dev/null
@@ -0,0 +1,18 @@
+model = Model()
+i1 = Input("input", "TENSOR_FLOAT32", "{1, 24, 1}")
+squeezeDims = Parameter("squeezeDims", "TENSOR_INT32", "{1}", [2])
+output = Output("output", "TENSOR_FLOAT32", "{1, 24}")
+
+model = model.Operation("SQUEEZE", i1, squeezeDims).To(output)
+
+# Example 1. Input in operand 0,
+input0 = {i1: # input 0
+          [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,
+           13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24]}
+
+output0 = {output: # output 0
+          [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,
+           13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24]}
+
+# Instantiate an example
+Example((input0, output0))
diff --git a/runtimes/tests/neural_networks_test/specs/V1_1/squeeze_quant8_1.mod.py b/runtimes/tests/neural_networks_test/specs/V1_1/squeeze_quant8_1.mod.py
new file mode 100644 (file)
index 0000000..5710c1d
--- /dev/null
@@ -0,0 +1,18 @@
+model = Model()
+i1 = Input("input", "TENSOR_QUANT8_ASYMM", "{1, 24, 1}, 1.0, 0")
+squeezeDims = Parameter("squeezeDims", "TENSOR_INT32", "{1}", [2])
+output = Output("output", "TENSOR_QUANT8_ASYMM", "{1, 24}, 1.0, 0")
+
+model = model.Operation("SQUEEZE", i1, squeezeDims).To(output)
+
+# Example 1. Input in operand 0,
+input0 = {i1: # input 0
+          [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,
+           13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24]}
+
+output0 = {output: # output 0
+          [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,
+           13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24]}
+
+# Instantiate an example
+Example((input0, output0))
index 26c65e7..9bc94d1 100644 (file)
@@ -5,10 +5,11 @@ ends = Parameter("ends", "TENSOR_INT32", "{2}", [2, 3])
 strides = Parameter("strides", "TENSOR_INT32", "{2}", [2, 2])
 beginMask = Int32Scalar("beginMask", 0)
 endMask = Int32Scalar("endMask", 0)
+shrinkAxisMask = Int32Scalar("shrinkAxisMask", 0)
 
 output = Output("output", "TENSOR_FLOAT32", "{1, 2}")
 
-model = model.Operation("STRIDED_SLICE", i1, begins, ends, strides, beginMask, endMask).To(output)
+model = model.Operation("STRIDED_SLICE", i1, begins, ends, strides, beginMask, endMask, shrinkAxisMask).To(output)
 
 # Example 1. Input in operand 0,
 input0 = {i1: # input 0
index 507dac6..0725cff 100644 (file)
@@ -5,10 +5,11 @@ ends = Parameter("ends", "TENSOR_INT32", "{1}", [3])
 strides = Parameter("strides", "TENSOR_INT32", "{1}", [1])
 beginMask = Int32Scalar("beginMask", 0)
 endMask = Int32Scalar("endMask", 0)
+shrinkAxisMask = Int32Scalar("shrinkAxisMask", 0)
 
 output = Output("output", "TENSOR_FLOAT32", "{2}")
 
-model = model.Operation("STRIDED_SLICE", i1, begins, ends, strides, beginMask, endMask).To(output)
+model = model.Operation("STRIDED_SLICE", i1, begins, ends, strides, beginMask, endMask, shrinkAxisMask).To(output)
 
 # Example 1. Input in operand 0,
 input0 = {i1: # input 0
index 804f259..178421f 100644 (file)
@@ -5,10 +5,11 @@ ends = Parameter("ends", "TENSOR_INT32", "{2}", [2, 2])
 strides = Parameter("strides", "TENSOR_INT32", "{2}", [1, 1])
 beginMask = Int32Scalar("beginMask", 0)
 endMask = Int32Scalar("endMask", 2)
+shrinkAxisMask = Int32Scalar("shrinkAxisMask", 0)
 
 output = Output("output", "TENSOR_FLOAT32", "{1, 3}")
 
-model = model.Operation("STRIDED_SLICE", i1, begins, ends, strides, beginMask, endMask).To(output)
+model = model.Operation("STRIDED_SLICE", i1, begins, ends, strides, beginMask, endMask, shrinkAxisMask).To(output)
 
 # Example 1. Input in operand 0,
 input0 = {i1: # input 0
diff --git a/runtimes/tests/neural_networks_test/specs/V1_1/strided_slice_float_11.mod.py b/runtimes/tests/neural_networks_test/specs/V1_1/strided_slice_float_11.mod.py
new file mode 100644 (file)
index 0000000..444ae63
--- /dev/null
@@ -0,0 +1,22 @@
+model = Model()
+i1 = Input("input", "TENSOR_FLOAT32", "{2, 3}")
+begins = Parameter("begins", "TENSOR_INT32", "{2}", [0, 0])
+ends = Parameter("ends", "TENSOR_INT32", "{2}", [1, 3])
+strides = Parameter("strides", "TENSOR_INT32", "{2}", [1, 1])
+beginMask = Int32Scalar("beginMask", 0)
+endMask = Int32Scalar("endMask", 0)
+shrinkAxisMask = Int32Scalar("shrinkAxisMask", 1)
+
+output = Output("output", "TENSOR_FLOAT32", "{3}")
+
+model = model.Operation("STRIDED_SLICE", i1, begins, ends, strides, beginMask, endMask, shrinkAxisMask).To(output)
+
+# Example 1. Input in operand 0,
+input0 = {i1: # input 0
+          [1, 2, 3, 4, 5, 6]}
+
+output0 = {output: # output 0
+           [1, 2, 3]}
+
+# Instantiate an example
+Example((input0, output0))
index 483d23e..7dd3d83 100644 (file)
@@ -5,10 +5,11 @@ ends = Parameter("ends", "TENSOR_INT32", "{1}", [3])
 strides = Parameter("strides", "TENSOR_INT32", "{1}", [1])
 beginMask = Int32Scalar("beginMask", 0)
 endMask = Int32Scalar("endMask", 0)
+shrinkAxisMask = Int32Scalar("shrinkAxisMask", 0)
 
 output = Output("output", "TENSOR_FLOAT32", "{2}")
 
-model = model.Operation("STRIDED_SLICE", i1, begins, ends, strides, beginMask, endMask).To(output)
+model = model.Operation("STRIDED_SLICE", i1, begins, ends, strides, beginMask, endMask, shrinkAxisMask).To(output)
 
 # Example 1. Input in operand 0,
 input0 = {i1: # input 0
index 6f71c31..e476bca 100644 (file)
@@ -5,10 +5,11 @@ ends = Parameter("ends", "TENSOR_INT32", "{1}", [3])
 strides = Parameter("strides", "TENSOR_INT32", "{1}", [1])
 beginMask = Int32Scalar("beginMask", 0)
 endMask = Int32Scalar("endMask", 0)
+shrinkAxisMask = Int32Scalar("shrinkAxisMask", 0)
 
 output = Output("output", "TENSOR_FLOAT32", "{3}")
 
-model = model.Operation("STRIDED_SLICE", i1, begins, ends, strides, beginMask, endMask).To(output)
+model = model.Operation("STRIDED_SLICE", i1, begins, ends, strides, beginMask, endMask, shrinkAxisMask).To(output)
 
 # Example 1. Input in operand 0,
 input0 = {i1: # input 0
index 3583465..939cc14 100644 (file)
@@ -5,10 +5,11 @@ ends = Parameter("ends", "TENSOR_INT32", "{1}", [-2])
 strides = Parameter("strides", "TENSOR_INT32", "{1}", [1])
 beginMask = Int32Scalar("beginMask", 0)
 endMask = Int32Scalar("endMask", 0)
+shrinkAxisMask = Int32Scalar("shrinkAxisMask", 0)
 
 output = Output("output", "TENSOR_FLOAT32", "{1}")
 
-model = model.Operation("STRIDED_SLICE", i1, begins, ends, strides, beginMask, endMask).To(output)
+model = model.Operation("STRIDED_SLICE", i1, begins, ends, strides, beginMask, endMask, shrinkAxisMask).To(output)
 
 # Example 1. Input in operand 0,
 input0 = {i1: # input 0
index 48107de..db73727 100644 (file)
@@ -5,10 +5,11 @@ ends = Parameter("ends", "TENSOR_INT32", "{1}", [3])
 strides = Parameter("strides", "TENSOR_INT32", "{1}", [1])
 beginMask = Int32Scalar("beginMask", 1)
 endMask = Int32Scalar("endMask", 0)
+shrinkAxisMask = Int32Scalar("shrinkAxisMask", 0)
 
 output = Output("output", "TENSOR_FLOAT32", "{3}")
 
-model = model.Operation("STRIDED_SLICE", i1, begins, ends, strides, beginMask, endMask).To(output)
+model = model.Operation("STRIDED_SLICE", i1, begins, ends, strides, beginMask, endMask, shrinkAxisMask).To(output)
 
 # Example 1. Input in operand 0,
 input0 = {i1: # input 0
index d6ce07d..c8d42d9 100644 (file)
@@ -5,10 +5,11 @@ ends = Parameter("ends", "TENSOR_INT32", "{1}", [3])
 strides = Parameter("strides", "TENSOR_INT32", "{1}", [1])
 beginMask = Int32Scalar("beginMask", 0)
 endMask = Int32Scalar("endMask", 1)
+shrinkAxisMask = Int32Scalar("shrinkAxisMask", 0)
 
 output = Output("output", "TENSOR_FLOAT32", "{3}")
 
-model = model.Operation("STRIDED_SLICE", i1, begins, ends, strides, beginMask, endMask).To(output)
+model = model.Operation("STRIDED_SLICE", i1, begins, ends, strides, beginMask, endMask, shrinkAxisMask).To(output)
 
 # Example 1. Input in operand 0,
 input0 = {i1: # input 0
index cfe10db..668748a 100644 (file)
@@ -5,10 +5,11 @@ ends = Parameter("ends", "TENSOR_INT32", "{1}", [-4])
 strides = Parameter("strides", "TENSOR_INT32", "{1}", [-1])
 beginMask = Int32Scalar("beginMask", 0)
 endMask = Int32Scalar("endMask", 0)
+shrinkAxisMask = Int32Scalar("shrinkAxisMask", 0)
 
 output = Output("output", "TENSOR_FLOAT32", "{3}")
 
-model = model.Operation("STRIDED_SLICE", i1, begins, ends, strides, beginMask, endMask).To(output)
+model = model.Operation("STRIDED_SLICE", i1, begins, ends, strides, beginMask, endMask, shrinkAxisMask).To(output)
 
 # Example 1. Input in operand 0,
 input0 = {i1: # input 0
index 0c2cb79..2c1cc94 100644 (file)
@@ -5,10 +5,11 @@ ends = Parameter("ends", "TENSOR_INT32", "{2}", [2, -4])
 strides = Parameter("strides", "TENSOR_INT32", "{2}", [2, -1])
 beginMask = Int32Scalar("beginMask", 0)
 endMask = Int32Scalar("endMask", 0)
+shrinkAxisMask = Int32Scalar("shrinkAxisMask", 0)
 
 output = Output("output", "TENSOR_FLOAT32", "{1, 3}")
 
-model = model.Operation("STRIDED_SLICE", i1, begins, ends, strides, beginMask, endMask).To(output)
+model = model.Operation("STRIDED_SLICE", i1, begins, ends, strides, beginMask, endMask, shrinkAxisMask).To(output)
 
 # Example 1. Input in operand 0,
 input0 = {i1: # input 0
index d1e0ad8..4bafd3d 100644 (file)
@@ -5,10 +5,11 @@ ends = Parameter("ends", "TENSOR_INT32", "{2}", [2, 2])
 strides = Parameter("strides", "TENSOR_INT32", "{2}", [1, 1])
 beginMask = Int32Scalar("beginMask", 1)
 endMask = Int32Scalar("endMask", 0)
+shrinkAxisMask = Int32Scalar("shrinkAxisMask", 0)
 
 output = Output("output", "TENSOR_FLOAT32", "{2, 2}")
 
-model = model.Operation("STRIDED_SLICE", i1, begins, ends, strides, beginMask, endMask).To(output)
+model = model.Operation("STRIDED_SLICE", i1, begins, ends, strides, beginMask, endMask, shrinkAxisMask).To(output)
 
 # Example 1. Input in operand 0,
 input0 = {i1: # input 0
diff --git a/runtimes/tests/neural_networks_test/specs/V1_1/strided_slice_qaunt8_10.mod.py b/runtimes/tests/neural_networks_test/specs/V1_1/strided_slice_qaunt8_10.mod.py
new file mode 100644 (file)
index 0000000..fc29552
--- /dev/null
@@ -0,0 +1,22 @@
+model = Model()
+i1 = Input("input", "TENSOR_QUANT8_ASYMM", "{2, 3}, 1.0, 0")
+begins = Parameter("begins", "TENSOR_INT32", "{2}", [1, 0])
+ends = Parameter("ends", "TENSOR_INT32", "{2}", [2, 2])
+strides = Parameter("strides", "TENSOR_INT32", "{2}", [1, 1])
+beginMask = Int32Scalar("beginMask", 0)
+endMask = Int32Scalar("endMask", 2)
+shrinkAxisMask = Int32Scalar("shrinkAxisMask", 0)
+
+output = Output("output", "TENSOR_QUANT8_ASYMM", "{1, 3}, 1.0, 0")
+
+model = model.Operation("STRIDED_SLICE", i1, begins, ends, strides, beginMask, endMask, shrinkAxisMask).To(output)
+
+# Example 1. Input in operand 0,
+input0 = {i1: # input 0
+          [1, 2, 3, 4, 5, 6]}
+
+output0 = {output: # output 0
+           [4, 5, 6]}
+
+# Instantiate an example
+Example((input0, output0))
diff --git a/runtimes/tests/neural_networks_test/specs/V1_1/strided_slice_qaunt8_11.mod.py b/runtimes/tests/neural_networks_test/specs/V1_1/strided_slice_qaunt8_11.mod.py
new file mode 100644 (file)
index 0000000..d7374ab
--- /dev/null
@@ -0,0 +1,22 @@
+model = Model()
+i1 = Input("input", "TENSOR_QUANT8_ASYMM", "{2, 3}, 1.0, 0")
+begins = Parameter("begins", "TENSOR_INT32", "{2}", [0, 0])
+ends = Parameter("ends", "TENSOR_INT32", "{2}", [1, 3])
+strides = Parameter("strides", "TENSOR_INT32", "{2}", [1, 1])
+beginMask = Int32Scalar("beginMask", 0)
+endMask = Int32Scalar("endMask", 0)
+shrinkAxisMask = Int32Scalar("shrinkAxisMask", 1)
+
+output = Output("output", "TENSOR_QUANT8_ASYMM", "{3}, 1.0, 0")
+
+model = model.Operation("STRIDED_SLICE", i1, begins, ends, strides, beginMask, endMask, shrinkAxisMask).To(output)
+
+# Example 1. Input in operand 0,
+input0 = {i1: # input 0
+          [1, 2, 3, 4, 5, 6]}
+
+output0 = {output: # output 0
+           [1, 2, 3]}
+
+# Instantiate an example
+Example((input0, output0))
diff --git a/runtimes/tests/neural_networks_test/specs/V1_1/strided_slice_quant8_1.mod.py b/runtimes/tests/neural_networks_test/specs/V1_1/strided_slice_quant8_1.mod.py
new file mode 100644 (file)
index 0000000..4b76de2
--- /dev/null
@@ -0,0 +1,22 @@
+model = Model()
+i1 = Input("input", "TENSOR_QUANT8_ASYMM", "{4}, 1.0, 0")
+begins = Parameter("begins", "TENSOR_INT32", "{1}", [1])
+ends = Parameter("ends", "TENSOR_INT32", "{1}", [3])
+strides = Parameter("strides", "TENSOR_INT32", "{1}", [1])
+beginMask = Int32Scalar("beginMask", 0)
+endMask = Int32Scalar("endMask", 0)
+shrinkAxisMask = Int32Scalar("shrinkAxisMask", 0)
+
+output = Output("output", "TENSOR_QUANT8_ASYMM", "{2}, 1.0, 0")
+
+model = model.Operation("STRIDED_SLICE", i1, begins, ends, strides, beginMask, endMask, shrinkAxisMask).To(output)
+
+# Example 1. Input in operand 0,
+input0 = {i1: # input 0
+          [1, 2, 3, 4]}
+
+output0 = {output: # output 0
+           [2, 3]}
+
+# Instantiate an example
+Example((input0, output0))
diff --git a/runtimes/tests/neural_networks_test/specs/V1_1/strided_slice_quant8_2.mod.py b/runtimes/tests/neural_networks_test/specs/V1_1/strided_slice_quant8_2.mod.py
new file mode 100644 (file)
index 0000000..d6cd6aa
--- /dev/null
@@ -0,0 +1,22 @@
+model = Model()
+i1 = Input("input", "TENSOR_QUANT8_ASYMM", "{4}, 1.0, 0")
+begins = Parameter("begins", "TENSOR_INT32", "{1}", [-3])
+ends = Parameter("ends", "TENSOR_INT32", "{1}", [3])
+strides = Parameter("strides", "TENSOR_INT32", "{1}", [1])
+beginMask = Int32Scalar("beginMask", 0)
+endMask = Int32Scalar("endMask", 0)
+shrinkAxisMask = Int32Scalar("shrinkAxisMask", 0)
+
+output = Output("output", "TENSOR_QUANT8_ASYMM", "{2}, 1.0, 0")
+
+model = model.Operation("STRIDED_SLICE", i1, begins, ends, strides, beginMask, endMask, shrinkAxisMask).To(output)
+
+# Example 1. Input in operand 0,
+input0 = {i1: # input 0
+          [1, 2, 3, 4]}
+
+output0 = {output: # output 0
+           [2, 3]}
+
+# Instantiate an example
+Example((input0, output0))
diff --git a/runtimes/tests/neural_networks_test/specs/V1_1/strided_slice_quant8_3.mod.py b/runtimes/tests/neural_networks_test/specs/V1_1/strided_slice_quant8_3.mod.py
new file mode 100644 (file)
index 0000000..411a6fa
--- /dev/null
@@ -0,0 +1,22 @@
+model = Model()
+i1 = Input("input", "TENSOR_QUANT8_ASYMM", "{4}, 1.0, 0")
+begins = Parameter("begins", "TENSOR_INT32", "{1}", [-5])
+ends = Parameter("ends", "TENSOR_INT32", "{1}", [3])
+strides = Parameter("strides", "TENSOR_INT32", "{1}", [1])
+beginMask = Int32Scalar("beginMask", 0)
+endMask = Int32Scalar("endMask", 0)
+shrinkAxisMask = Int32Scalar("shrinkAxisMask", 0)
+
+output = Output("output", "TENSOR_QUANT8_ASYMM", "{3}, 1.0, 0")
+
+model = model.Operation("STRIDED_SLICE", i1, begins, ends, strides, beginMask, endMask, shrinkAxisMask).To(output)
+
+# Example 1. Input in operand 0,
+input0 = {i1: # input 0
+          [1, 2, 3, 4]}
+
+output0 = {output: # output 0
+           [1, 2, 3]}
+
+# Instantiate an example
+Example((input0, output0))
diff --git a/runtimes/tests/neural_networks_test/specs/V1_1/strided_slice_quant8_4.mod.py b/runtimes/tests/neural_networks_test/specs/V1_1/strided_slice_quant8_4.mod.py
new file mode 100644 (file)
index 0000000..f8a54f2
--- /dev/null
@@ -0,0 +1,22 @@
+model = Model()
+i1 = Input("input", "TENSOR_QUANT8_ASYMM", "{4}, 1.0, 0")
+begins = Parameter("begins", "TENSOR_INT32", "{1}", [1])
+ends = Parameter("ends", "TENSOR_INT32", "{1}", [-2])
+strides = Parameter("strides", "TENSOR_INT32", "{1}", [1])
+beginMask = Int32Scalar("beginMask", 0)
+endMask = Int32Scalar("endMask", 0)
+shrinkAxisMask = Int32Scalar("shrinkAxisMask", 0)
+
+output = Output("output", "TENSOR_QUANT8_ASYMM", "{1}, 1.0, 0")
+
+model = model.Operation("STRIDED_SLICE", i1, begins, ends, strides, beginMask, endMask, shrinkAxisMask).To(output)
+
+# Example 1. Input in operand 0,
+input0 = {i1: # input 0
+          [1, 2, 3, 4]}
+
+output0 = {output: # output 0
+           [2]}
+
+# Instantiate an example
+Example((input0, output0))
diff --git a/runtimes/tests/neural_networks_test/specs/V1_1/strided_slice_quant8_5.mod.py b/runtimes/tests/neural_networks_test/specs/V1_1/strided_slice_quant8_5.mod.py
new file mode 100644 (file)
index 0000000..4fa42f5
--- /dev/null
@@ -0,0 +1,22 @@
+model = Model()
+i1 = Input("input", "TENSOR_QUANT8_ASYMM", "{4}, 1.0, 0")
+begins = Parameter("begins", "TENSOR_INT32", "{1}", [1])
+ends = Parameter("ends", "TENSOR_INT32", "{1}", [3])
+strides = Parameter("strides", "TENSOR_INT32", "{1}", [1])
+beginMask = Int32Scalar("beginMask", 1)
+endMask = Int32Scalar("endMask", 0)
+shrinkAxisMask = Int32Scalar("shrinkAxisMask", 0)
+
+output = Output("output", "TENSOR_QUANT8_ASYMM", "{3}, 1.0, 0")
+
+model = model.Operation("STRIDED_SLICE", i1, begins, ends, strides, beginMask, endMask, shrinkAxisMask).To(output)
+
+# Example 1. Input in operand 0,
+input0 = {i1: # input 0
+          [1, 2, 3, 4]}
+
+output0 = {output: # output 0
+           [1, 2, 3]}
+
+# Instantiate an example
+Example((input0, output0))
diff --git a/runtimes/tests/neural_networks_test/specs/V1_1/strided_slice_quant8_6.mod.py b/runtimes/tests/neural_networks_test/specs/V1_1/strided_slice_quant8_6.mod.py
new file mode 100644 (file)
index 0000000..bcd8841
--- /dev/null
@@ -0,0 +1,22 @@
+model = Model()
+i1 = Input("input", "TENSOR_QUANT8_ASYMM", "{4}, 1.0, 0")
+begins = Parameter("begins", "TENSOR_INT32", "{1}", [1])
+ends = Parameter("ends", "TENSOR_INT32", "{1}", [3])
+strides = Parameter("strides", "TENSOR_INT32", "{1}", [1])
+beginMask = Int32Scalar("beginMask", 0)
+endMask = Int32Scalar("endMask", 1)
+shrinkAxisMask = Int32Scalar("shrinkAxisMask", 0)
+
+output = Output("output", "TENSOR_QUANT8_ASYMM", "{3}, 1.0, 0")
+
+model = model.Operation("STRIDED_SLICE", i1, begins, ends, strides, beginMask, endMask, shrinkAxisMask).To(output)
+
+# Example 1. Input in operand 0,
+input0 = {i1: # input 0
+          [1, 2, 3, 4]}
+
+output0 = {output: # output 0
+           [2, 3, 4]}
+
+# Instantiate an example
+Example((input0, output0))
diff --git a/runtimes/tests/neural_networks_test/specs/V1_1/strided_slice_quant8_7.mod.py b/runtimes/tests/neural_networks_test/specs/V1_1/strided_slice_quant8_7.mod.py
new file mode 100644 (file)
index 0000000..e1ae9db
--- /dev/null
@@ -0,0 +1,22 @@
+model = Model()
+i1 = Input("input", "TENSOR_QUANT8_ASYMM", "{3}, 1.0, 0")
+begins = Parameter("begins", "TENSOR_INT32", "{1}", [-1])
+ends = Parameter("ends", "TENSOR_INT32", "{1}", [-4])
+strides = Parameter("strides", "TENSOR_INT32", "{1}", [-1])
+beginMask = Int32Scalar("beginMask", 0)
+endMask = Int32Scalar("endMask", 0)
+shrinkAxisMask = Int32Scalar("shrinkAxisMask", 0)
+
+output = Output("output", "TENSOR_QUANT8_ASYMM", "{3}, 1.0, 0")
+
+model = model.Operation("STRIDED_SLICE", i1, begins, ends, strides, beginMask, endMask, shrinkAxisMask).To(output)
+
+# Example 1. Input in operand 0,
+input0 = {i1: # input 0
+          [1, 2, 3]}
+
+output0 = {output: # output 0
+           [3, 2, 1]}
+
+# Instantiate an example
+Example((input0, output0))
diff --git a/runtimes/tests/neural_networks_test/specs/V1_1/strided_slice_quant8_8.mod.py b/runtimes/tests/neural_networks_test/specs/V1_1/strided_slice_quant8_8.mod.py
new file mode 100644 (file)
index 0000000..6531dd3
--- /dev/null
@@ -0,0 +1,22 @@
+model = Model()
+i1 = Input("input", "TENSOR_QUANT8_ASYMM", "{2, 3}, 1.0, 0")
+begins = Parameter("begins", "TENSOR_INT32", "{2}", [1, -1])
+ends = Parameter("ends", "TENSOR_INT32", "{2}", [2, -4])
+strides = Parameter("strides", "TENSOR_INT32", "{2}", [2, -1])
+beginMask = Int32Scalar("beginMask", 0)
+endMask = Int32Scalar("endMask", 0)
+shrinkAxisMask = Int32Scalar("shrinkAxisMask", 0)
+
+output = Output("output", "TENSOR_QUANT8_ASYMM", "{1, 3}, 1.0, 0")
+
+model = model.Operation("STRIDED_SLICE", i1, begins, ends, strides, beginMask, endMask, shrinkAxisMask).To(output)
+
+# Example 1. Input in operand 0,
+input0 = {i1: # input 0
+          [1, 2, 3, 4, 5, 6]}
+
+output0 = {output: # output 0
+           [6, 5, 4]}
+
+# Instantiate an example
+Example((input0, output0))
diff --git a/runtimes/tests/neural_networks_test/specs/V1_1/strided_slice_quant8_9.mod.py b/runtimes/tests/neural_networks_test/specs/V1_1/strided_slice_quant8_9.mod.py
new file mode 100644 (file)
index 0000000..7f06601
--- /dev/null
@@ -0,0 +1,22 @@
+model = Model()
+i1 = Input("input", "TENSOR_QUANT8_ASYMM", "{2, 3}, 1.0, 0")
+begins = Parameter("begins", "TENSOR_INT32", "{2}", [1, 0])
+ends = Parameter("ends", "TENSOR_INT32", "{2}", [2, 2])
+strides = Parameter("strides", "TENSOR_INT32", "{2}", [1, 1])
+beginMask = Int32Scalar("beginMask", 1)
+endMask = Int32Scalar("endMask", 0)
+shrinkAxisMask = Int32Scalar("shrinkAxisMask", 0)
+
+output = Output("output", "TENSOR_QUANT8_ASYMM", "{2, 2}, 1.0, 0")
+
+model = model.Operation("STRIDED_SLICE", i1, begins, ends, strides, beginMask, endMask, shrinkAxisMask).To(output)
+
+# Example 1. Input in operand 0,
+input0 = {i1: # input 0
+          [1, 2, 3, 4, 5, 6]}
+
+output0 = {output: # output 0
+           [1, 2, 4, 5]}
+
+# Instantiate an example
+Example((input0, output0))
diff --git a/runtimes/tests/neural_networks_test/specs/V1_1/sub_broadcast_float.mod.py b/runtimes/tests/neural_networks_test/specs/V1_1/sub_broadcast_float.mod.py
new file mode 100644 (file)
index 0000000..53bdf9e
--- /dev/null
@@ -0,0 +1,19 @@
+# model
+model = Model()
+i1 = Input("op1", "TENSOR_FLOAT32", "{1, 2}")
+i2 = Input("op2", "TENSOR_FLOAT32", "{2, 2}")
+act = Int32Scalar("act", 0)
+i3 = Output("op3", "TENSOR_FLOAT32", "{2, 2}")
+model = model.Operation("SUB", i1, i2, act).To(i3)
+
+# Example 1. Input in operand 0,
+input0 = {i1: # input 0
+          [1, 2],
+          i2: # input 1
+          [1, 2, 3, 4]}
+
+output0 = {i3: # output 0
+           [0, 0, -2, -2]}
+
+# Instantiate an example
+Example((input0, output0))
diff --git a/runtimes/tests/neural_networks_test/specs/V1_1/transpose.mod.py b/runtimes/tests/neural_networks_test/specs/V1_1/transpose.mod.py
new file mode 100644 (file)
index 0000000..49f15a7
--- /dev/null
@@ -0,0 +1,18 @@
+model = Model()
+i1 = Input("input", "TENSOR_FLOAT32", "{1, 2, 2, 1}")
+perms = Parameter("perms", "TENSOR_INT32", "{4}", [0, 2, 1, 3])
+output = Output("output", "TENSOR_FLOAT32", "{1, 2, 2, 1}")
+
+model = model.Operation("TRANSPOSE", i1, perms).To(output)
+
+# Example 1. Input in operand 0,
+input0 = {i1: # input 0
+          [1.0, 2.0,
+           3.0, 4.0]}
+
+output0 = {output: # output 0
+          [1.0, 3.0,
+           2.0, 4.0]}
+
+# Instantiate an example
+Example((input0, output0))
diff --git a/runtimes/tests/neural_networks_test/specs/V1_1/transpose_float_1.mod.py b/runtimes/tests/neural_networks_test/specs/V1_1/transpose_float_1.mod.py
new file mode 100644 (file)
index 0000000..e8f0ea8
--- /dev/null
@@ -0,0 +1,32 @@
+model = Model()
+i1 = Input("input", "TENSOR_FLOAT32", "{2, 3, 4, 5}")
+perms = Parameter("perms", "TENSOR_INT32", "{4}", [2, 0, 1, 3])
+output = Output("output", "TENSOR_FLOAT32", "{4, 2, 3, 5}")
+
+model = model.Operation("TRANSPOSE", i1, perms).To(output)
+
+# Example 1. Input in operand 0,
+input0 = {i1: # input 0
+          [0,   1,   2,   3,   4,   5,   6,   7,   8,   9,   10,  11,
+           12,  13,  14,  15,  16,  17,  18,  19,  20,  21,  22,  23,
+           24,  25,  26,  27,  28,  29,  30,  31,  32,  33,  34,  35,
+           36,  37,  38,  39,  40,  41,  42,  43,  44,  45,  46,  47,
+           48,  49,  50,  51,  52,  53,  54,  55,  56,  57,  58,  59,
+           60,  61,  62,  63,  64,  65,  66,  67,  68,  69,  70,  71,
+           72,  73,  74,  75,  76,  77,  78,  79,  80,  81,  82,  83,
+           84,  85,  86,  87,  88,  89,  90,  91,  92,  93,  94,  95,
+           96,  97,  98,  99,  100, 101, 102, 103, 104, 105, 106, 107,
+           108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119]}
+
+output0 = {output: # output 0
+          [0,  1,  2,  3,  4,  20, 21, 22, 23, 24, 40,  41,  42,  43,  44,
+           60, 61, 62, 63, 64, 80, 81, 82, 83, 84, 100, 101, 102, 103, 104,
+           5,  6,  7,  8,  9,  25, 26, 27, 28, 29, 45,  46,  47,  48,  49,
+           65, 66, 67, 68, 69, 85, 86, 87, 88, 89, 105, 106, 107, 108, 109,
+           10, 11, 12, 13, 14, 30, 31, 32, 33, 34, 50,  51,  52,  53,  54,
+           70, 71, 72, 73, 74, 90, 91, 92, 93, 94, 110, 111, 112, 113, 114,
+           15, 16, 17, 18, 19, 35, 36, 37, 38, 39, 55,  56,  57,  58,  59,
+           75, 76, 77, 78, 79, 95, 96, 97, 98, 99, 115, 116, 117, 118, 119]}
+
+# Instantiate an example
+Example((input0, output0))
diff --git a/runtimes/tests/neural_networks_test/specs/V1_1/transpose_quant8_1.mod.py b/runtimes/tests/neural_networks_test/specs/V1_1/transpose_quant8_1.mod.py
new file mode 100644 (file)
index 0000000..6893a62
--- /dev/null
@@ -0,0 +1,32 @@
+model = Model()
+i1 = Input("input", "TENSOR_QUANT8_ASYMM", "{2, 3, 4, 5}, 1.0, 0")
+perms = Parameter("perms", "TENSOR_INT32", "{4}", [2, 0, 1, 3])
+output = Output("output", "TENSOR_QUANT8_ASYMM", "{4, 2, 3, 5}, 1.0, 0")
+
+model = model.Operation("TRANSPOSE", i1, perms).To(output)
+
+# Example 1. Input in operand 0,
+input0 = {i1: # input 0
+          [0,   1,   2,   3,   4,   5,   6,   7,   8,   9,   10,  11,
+           12,  13,  14,  15,  16,  17,  18,  19,  20,  21,  22,  23,
+           24,  25,  26,  27,  28,  29,  30,  31,  32,  33,  34,  35,
+           36,  37,  38,  39,  40,  41,  42,  43,  44,  45,  46,  47,
+           48,  49,  50,  51,  52,  53,  54,  55,  56,  57,  58,  59,
+           60,  61,  62,  63,  64,  65,  66,  67,  68,  69,  70,  71,
+           72,  73,  74,  75,  76,  77,  78,  79,  80,  81,  82,  83,
+           84,  85,  86,  87,  88,  89,  90,  91,  92,  93,  94,  95,
+           96,  97,  98,  99,  100, 101, 102, 103, 104, 105, 106, 107,
+           108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119]}
+
+output0 = {output: # output 0
+          [0,  1,  2,  3,  4,  20, 21, 22, 23, 24, 40,  41,  42,  43,  44,
+           60, 61, 62, 63, 64, 80, 81, 82, 83, 84, 100, 101, 102, 103, 104,
+           5,  6,  7,  8,  9,  25, 26, 27, 28, 29, 45,  46,  47,  48,  49,
+           65, 66, 67, 68, 69, 85, 86, 87, 88, 89, 105, 106, 107, 108, 109,
+           10, 11, 12, 13, 14, 30, 31, 32, 33, 34, 50,  51,  52,  53,  54,
+           70, 71, 72, 73, 74, 90, 91, 92, 93, 94, 110, 111, 112, 113, 114,
+           15, 16, 17, 18, 19, 35, 36, 37, 38, 39, 55,  56,  57,  58,  59,
+           75, 76, 77, 78, 79, 95, 96, 97, 98, 99, 115, 116, 117, 118, 119]}
+
+# Instantiate an example
+Example((input0, output0))