fully_connected_float_2::examples);
}
+namespace fully_connected_float_3 {
+std::vector<MixedTypedExample> examples = {
+// Generated fully_connected_float_3 test
+#include "generated/examples/fully_connected_float_3.example.cpp"
+};
+// Generated model constructor
+#include "generated/models/fully_connected_float_3.model.cpp"
+} // namespace fully_connected_float_3
+TEST_F(GeneratedTests, fully_connected_float_3) {
+ execute(fully_connected_float_3::CreateModel,
+ fully_connected_float_3::is_ignored,
+ fully_connected_float_3::examples);
+}
+
namespace fully_connected_float_large {
std::vector<MixedTypedExample> examples = {
// Generated fully_connected_float_large test
tanh_::examples);
}
+namespace batch_to_space_float_1 {
+std::vector<MixedTypedExample> examples = {
+// Generated batch_to_space_float_1 test
+#include "generated/examples/batch_to_space_float_1.example.cpp"
+};
+// Generated model constructor
+#include "generated/models/batch_to_space_float_1.model.cpp"
+} // namespace batch_to_space_float_1
+TEST_F(GeneratedTests, batch_to_space_float_1) {
+ execute(batch_to_space_float_1::CreateModel,
+ batch_to_space_float_1::is_ignored,
+ batch_to_space_float_1::examples);
+}
+
+namespace batch_to_space {
+std::vector<MixedTypedExample> examples = {
+// Generated batch_to_space test
+#include "generated/examples/batch_to_space.example.cpp"
+};
+// Generated model constructor
+#include "generated/models/batch_to_space.model.cpp"
+} // namespace batch_to_space
+TEST_F(GeneratedTests, batch_to_space) {
+ execute(batch_to_space::CreateModel,
+ batch_to_space::is_ignored,
+ batch_to_space::examples);
+}
+
+namespace batch_to_space_quant8_1 {
+std::vector<MixedTypedExample> examples = {
+// Generated batch_to_space_quant8_1 test
+#include "generated/examples/batch_to_space_quant8_1.example.cpp"
+};
+// Generated model constructor
+#include "generated/models/batch_to_space_quant8_1.model.cpp"
+} // namespace batch_to_space_quant8_1
+TEST_F(GeneratedTests, batch_to_space_quant8_1) {
+ execute(batch_to_space_quant8_1::CreateModel,
+ batch_to_space_quant8_1::is_ignored,
+ batch_to_space_quant8_1::examples);
+}
+
+namespace div_broadcast_float {
+std::vector<MixedTypedExample> examples = {
+// Generated div_broadcast_float test
+#include "generated/examples/div_broadcast_float.example.cpp"
+};
+// Generated model constructor
+#include "generated/models/div_broadcast_float.model.cpp"
+} // namespace div_broadcast_float
+TEST_F(GeneratedTests, div_broadcast_float) {
+ execute(div_broadcast_float::CreateModel,
+ div_broadcast_float::is_ignored,
+ div_broadcast_float::examples);
+}
+
namespace div_ {
std::vector<MixedTypedExample> examples = {
// Generated div_ test
div_::examples);
}
+namespace fully_connected_float_4d_simple {
+std::vector<MixedTypedExample> examples = {
+// Generated fully_connected_float_4d_simple test
+#include "generated/examples/fully_connected_float_4d_simple.example.cpp"
+};
+// Generated model constructor
+#include "generated/models/fully_connected_float_4d_simple.model.cpp"
+} // namespace fully_connected_float_4d_simple
+TEST_F(GeneratedTests, fully_connected_float_4d_simple) {
+ execute(fully_connected_float_4d_simple::CreateModel,
+ fully_connected_float_4d_simple::is_ignored,
+ fully_connected_float_4d_simple::examples);
+}
+
+namespace mean_float_1 {
+std::vector<MixedTypedExample> examples = {
+// Generated mean_float_1 test
+#include "generated/examples/mean_float_1.example.cpp"
+};
+// Generated model constructor
+#include "generated/models/mean_float_1.model.cpp"
+} // namespace mean_float_1
+TEST_F(GeneratedTests, mean_float_1) {
+ execute(mean_float_1::CreateModel,
+ mean_float_1::is_ignored,
+ mean_float_1::examples);
+}
+
+namespace mean_float_2 {
+std::vector<MixedTypedExample> examples = {
+// Generated mean_float_2 test
+#include "generated/examples/mean_float_2.example.cpp"
+};
+// Generated model constructor
+#include "generated/models/mean_float_2.model.cpp"
+} // namespace mean_float_2
+TEST_F(GeneratedTests, mean_float_2) {
+ execute(mean_float_2::CreateModel,
+ mean_float_2::is_ignored,
+ mean_float_2::examples);
+}
+
+namespace mean {
+std::vector<MixedTypedExample> examples = {
+// Generated mean test
+#include "generated/examples/mean.example.cpp"
+};
+// Generated model constructor
+#include "generated/models/mean.model.cpp"
+} // namespace mean
+TEST_F(GeneratedTests, mean) {
+ execute(mean::CreateModel,
+ mean::is_ignored,
+ mean::examples);
+}
+
+namespace mean_quant8_1 {
+std::vector<MixedTypedExample> examples = {
+// Generated mean_quant8_1 test
+#include "generated/examples/mean_quant8_1.example.cpp"
+};
+// Generated model constructor
+#include "generated/models/mean_quant8_1.model.cpp"
+} // namespace mean_quant8_1
+TEST_F(GeneratedTests, mean_quant8_1) {
+ execute(mean_quant8_1::CreateModel,
+ mean_quant8_1::is_ignored,
+ mean_quant8_1::examples);
+}
+
+namespace mean_quant8_2 {
+std::vector<MixedTypedExample> examples = {
+// Generated mean_quant8_2 test
+#include "generated/examples/mean_quant8_2.example.cpp"
+};
+// Generated model constructor
+#include "generated/models/mean_quant8_2.model.cpp"
+} // namespace mean_quant8_2
+TEST_F(GeneratedTests, mean_quant8_2) {
+ execute(mean_quant8_2::CreateModel,
+ mean_quant8_2::is_ignored,
+ mean_quant8_2::examples);
+}
+
+namespace pad_float_1 {
+std::vector<MixedTypedExample> examples = {
+// Generated pad_float_1 test
+#include "generated/examples/pad_float_1.example.cpp"
+};
+// Generated model constructor
+#include "generated/models/pad_float_1.model.cpp"
+} // namespace pad_float_1
+TEST_F(GeneratedTests, pad_float_1) {
+ execute(pad_float_1::CreateModel,
+ pad_float_1::is_ignored,
+ pad_float_1::examples);
+}
+
+namespace pad {
+std::vector<MixedTypedExample> examples = {
+// Generated pad test
+#include "generated/examples/pad.example.cpp"
+};
+// Generated model constructor
+#include "generated/models/pad.model.cpp"
+} // namespace pad
+TEST_F(GeneratedTests, pad) {
+ execute(pad::CreateModel,
+ pad::is_ignored,
+ pad::examples);
+}
+
+namespace space_to_batch_float_1 {
+std::vector<MixedTypedExample> examples = {
+// Generated space_to_batch_float_1 test
+#include "generated/examples/space_to_batch_float_1.example.cpp"
+};
+// Generated model constructor
+#include "generated/models/space_to_batch_float_1.model.cpp"
+} // namespace space_to_batch_float_1
+TEST_F(GeneratedTests, space_to_batch_float_1) {
+ execute(space_to_batch_float_1::CreateModel,
+ space_to_batch_float_1::is_ignored,
+ space_to_batch_float_1::examples);
+}
+
+namespace space_to_batch_float_2 {
+std::vector<MixedTypedExample> examples = {
+// Generated space_to_batch_float_2 test
+#include "generated/examples/space_to_batch_float_2.example.cpp"
+};
+// Generated model constructor
+#include "generated/models/space_to_batch_float_2.model.cpp"
+} // namespace space_to_batch_float_2
+TEST_F(GeneratedTests, space_to_batch_float_2) {
+ execute(space_to_batch_float_2::CreateModel,
+ space_to_batch_float_2::is_ignored,
+ space_to_batch_float_2::examples);
+}
+
+namespace space_to_batch_float_3 {
+std::vector<MixedTypedExample> examples = {
+// Generated space_to_batch_float_3 test
+#include "generated/examples/space_to_batch_float_3.example.cpp"
+};
+// Generated model constructor
+#include "generated/models/space_to_batch_float_3.model.cpp"
+} // namespace space_to_batch_float_3
+TEST_F(GeneratedTests, space_to_batch_float_3) {
+ execute(space_to_batch_float_3::CreateModel,
+ space_to_batch_float_3::is_ignored,
+ space_to_batch_float_3::examples);
+}
+
+namespace space_to_batch {
+std::vector<MixedTypedExample> examples = {
+// Generated space_to_batch test
+#include "generated/examples/space_to_batch.example.cpp"
+};
+// Generated model constructor
+#include "generated/models/space_to_batch.model.cpp"
+} // namespace space_to_batch
+TEST_F(GeneratedTests, space_to_batch) {
+ execute(space_to_batch::CreateModel,
+ space_to_batch::is_ignored,
+ space_to_batch::examples);
+}
+
+namespace space_to_batch_quant8_1 {
+std::vector<MixedTypedExample> examples = {
+// Generated space_to_batch_quant8_1 test
+#include "generated/examples/space_to_batch_quant8_1.example.cpp"
+};
+// Generated model constructor
+#include "generated/models/space_to_batch_quant8_1.model.cpp"
+} // namespace space_to_batch_quant8_1
+TEST_F(GeneratedTests, space_to_batch_quant8_1) {
+ execute(space_to_batch_quant8_1::CreateModel,
+ space_to_batch_quant8_1::is_ignored,
+ space_to_batch_quant8_1::examples);
+}
+
+namespace space_to_batch_quant8_2 {
+std::vector<MixedTypedExample> examples = {
+// Generated space_to_batch_quant8_2 test
+#include "generated/examples/space_to_batch_quant8_2.example.cpp"
+};
+// Generated model constructor
+#include "generated/models/space_to_batch_quant8_2.model.cpp"
+} // namespace space_to_batch_quant8_2
+TEST_F(GeneratedTests, space_to_batch_quant8_2) {
+ execute(space_to_batch_quant8_2::CreateModel,
+ space_to_batch_quant8_2::is_ignored,
+ space_to_batch_quant8_2::examples);
+}
+
+namespace space_to_batch_quant8_3 {
+std::vector<MixedTypedExample> examples = {
+// Generated space_to_batch_quant8_3 test
+#include "generated/examples/space_to_batch_quant8_3.example.cpp"
+};
+// Generated model constructor
+#include "generated/models/space_to_batch_quant8_3.model.cpp"
+} // namespace space_to_batch_quant8_3
+TEST_F(GeneratedTests, space_to_batch_quant8_3) {
+ execute(space_to_batch_quant8_3::CreateModel,
+ space_to_batch_quant8_3::is_ignored,
+ space_to_batch_quant8_3::examples);
+}
+
+namespace squeeze_float_1 {
+std::vector<MixedTypedExample> examples = {
+// Generated squeeze_float_1 test
+#include "generated/examples/squeeze_float_1.example.cpp"
+};
+// Generated model constructor
+#include "generated/models/squeeze_float_1.model.cpp"
+} // namespace squeeze_float_1
+TEST_F(GeneratedTests, squeeze_float_1) {
+ execute(squeeze_float_1::CreateModel,
+ squeeze_float_1::is_ignored,
+ squeeze_float_1::examples);
+}
+
+namespace squeeze {
+std::vector<MixedTypedExample> examples = {
+// Generated squeeze test
+#include "generated/examples/squeeze.example.cpp"
+};
+// Generated model constructor
+#include "generated/models/squeeze.model.cpp"
+} // namespace squeeze
+TEST_F(GeneratedTests, squeeze) {
+ execute(squeeze::CreateModel,
+ squeeze::is_ignored,
+ squeeze::examples);
+}
+
+namespace squeeze_quant8_1 {
+std::vector<MixedTypedExample> examples = {
+// Generated squeeze_quant8_1 test
+#include "generated/examples/squeeze_quant8_1.example.cpp"
+};
+// Generated model constructor
+#include "generated/models/squeeze_quant8_1.model.cpp"
+} // namespace squeeze_quant8_1
+TEST_F(GeneratedTests, squeeze_quant8_1) {
+ execute(squeeze_quant8_1::CreateModel,
+ squeeze_quant8_1::is_ignored,
+ squeeze_quant8_1::examples);
+}
+
namespace strided_slice_float_10 {
std::vector<MixedTypedExample> examples = {
// Generated strided_slice_float_10 test
strided_slice_float_10::examples);
}
+namespace strided_slice_float_11 {
+std::vector<MixedTypedExample> examples = {
+// Generated strided_slice_float_11 test
+#include "generated/examples/strided_slice_float_11.example.cpp"
+};
+// Generated model constructor
+#include "generated/models/strided_slice_float_11.model.cpp"
+} // namespace strided_slice_float_11
+TEST_F(GeneratedTests, strided_slice_float_11) {
+ execute(strided_slice_float_11::CreateModel,
+ strided_slice_float_11::is_ignored,
+ strided_slice_float_11::examples);
+}
+
namespace strided_slice_float_1 {
std::vector<MixedTypedExample> examples = {
// Generated strided_slice_float_1 test
strided_slice::examples);
}
+namespace strided_slice_qaunt8_10 {
+std::vector<MixedTypedExample> examples = {
+// Generated strided_slice_qaunt8_10 test
+#include "generated/examples/strided_slice_qaunt8_10.example.cpp"
+};
+// Generated model constructor
+#include "generated/models/strided_slice_qaunt8_10.model.cpp"
+} // namespace strided_slice_qaunt8_10
+TEST_F(GeneratedTests, strided_slice_qaunt8_10) {
+ execute(strided_slice_qaunt8_10::CreateModel,
+ strided_slice_qaunt8_10::is_ignored,
+ strided_slice_qaunt8_10::examples);
+}
+
+namespace strided_slice_qaunt8_11 {
+std::vector<MixedTypedExample> examples = {
+// Generated strided_slice_qaunt8_11 test
+#include "generated/examples/strided_slice_qaunt8_11.example.cpp"
+};
+// Generated model constructor
+#include "generated/models/strided_slice_qaunt8_11.model.cpp"
+} // namespace strided_slice_qaunt8_11
+TEST_F(GeneratedTests, strided_slice_qaunt8_11) {
+ execute(strided_slice_qaunt8_11::CreateModel,
+ strided_slice_qaunt8_11::is_ignored,
+ strided_slice_qaunt8_11::examples);
+}
+
+namespace strided_slice_quant8_1 {
+std::vector<MixedTypedExample> examples = {
+// Generated strided_slice_quant8_1 test
+#include "generated/examples/strided_slice_quant8_1.example.cpp"
+};
+// Generated model constructor
+#include "generated/models/strided_slice_quant8_1.model.cpp"
+} // namespace strided_slice_quant8_1
+TEST_F(GeneratedTests, strided_slice_quant8_1) {
+ execute(strided_slice_quant8_1::CreateModel,
+ strided_slice_quant8_1::is_ignored,
+ strided_slice_quant8_1::examples);
+}
+
+namespace strided_slice_quant8_2 {
+std::vector<MixedTypedExample> examples = {
+// Generated strided_slice_quant8_2 test
+#include "generated/examples/strided_slice_quant8_2.example.cpp"
+};
+// Generated model constructor
+#include "generated/models/strided_slice_quant8_2.model.cpp"
+} // namespace strided_slice_quant8_2
+TEST_F(GeneratedTests, strided_slice_quant8_2) {
+ execute(strided_slice_quant8_2::CreateModel,
+ strided_slice_quant8_2::is_ignored,
+ strided_slice_quant8_2::examples);
+}
+
+namespace strided_slice_quant8_3 {
+std::vector<MixedTypedExample> examples = {
+// Generated strided_slice_quant8_3 test
+#include "generated/examples/strided_slice_quant8_3.example.cpp"
+};
+// Generated model constructor
+#include "generated/models/strided_slice_quant8_3.model.cpp"
+} // namespace strided_slice_quant8_3
+TEST_F(GeneratedTests, strided_slice_quant8_3) {
+ execute(strided_slice_quant8_3::CreateModel,
+ strided_slice_quant8_3::is_ignored,
+ strided_slice_quant8_3::examples);
+}
+
+namespace strided_slice_quant8_4 {
+std::vector<MixedTypedExample> examples = {
+// Generated strided_slice_quant8_4 test
+#include "generated/examples/strided_slice_quant8_4.example.cpp"
+};
+// Generated model constructor
+#include "generated/models/strided_slice_quant8_4.model.cpp"
+} // namespace strided_slice_quant8_4
+TEST_F(GeneratedTests, strided_slice_quant8_4) {
+ execute(strided_slice_quant8_4::CreateModel,
+ strided_slice_quant8_4::is_ignored,
+ strided_slice_quant8_4::examples);
+}
+
+namespace strided_slice_quant8_5 {
+std::vector<MixedTypedExample> examples = {
+// Generated strided_slice_quant8_5 test
+#include "generated/examples/strided_slice_quant8_5.example.cpp"
+};
+// Generated model constructor
+#include "generated/models/strided_slice_quant8_5.model.cpp"
+} // namespace strided_slice_quant8_5
+TEST_F(GeneratedTests, strided_slice_quant8_5) {
+ execute(strided_slice_quant8_5::CreateModel,
+ strided_slice_quant8_5::is_ignored,
+ strided_slice_quant8_5::examples);
+}
+
+namespace strided_slice_quant8_6 {
+std::vector<MixedTypedExample> examples = {
+// Generated strided_slice_quant8_6 test
+#include "generated/examples/strided_slice_quant8_6.example.cpp"
+};
+// Generated model constructor
+#include "generated/models/strided_slice_quant8_6.model.cpp"
+} // namespace strided_slice_quant8_6
+TEST_F(GeneratedTests, strided_slice_quant8_6) {
+ execute(strided_slice_quant8_6::CreateModel,
+ strided_slice_quant8_6::is_ignored,
+ strided_slice_quant8_6::examples);
+}
+
+namespace strided_slice_quant8_7 {
+std::vector<MixedTypedExample> examples = {
+// Generated strided_slice_quant8_7 test
+#include "generated/examples/strided_slice_quant8_7.example.cpp"
+};
+// Generated model constructor
+#include "generated/models/strided_slice_quant8_7.model.cpp"
+} // namespace strided_slice_quant8_7
+TEST_F(GeneratedTests, strided_slice_quant8_7) {
+ execute(strided_slice_quant8_7::CreateModel,
+ strided_slice_quant8_7::is_ignored,
+ strided_slice_quant8_7::examples);
+}
+
+namespace strided_slice_quant8_8 {
+std::vector<MixedTypedExample> examples = {
+// Generated strided_slice_quant8_8 test
+#include "generated/examples/strided_slice_quant8_8.example.cpp"
+};
+// Generated model constructor
+#include "generated/models/strided_slice_quant8_8.model.cpp"
+} // namespace strided_slice_quant8_8
+TEST_F(GeneratedTests, strided_slice_quant8_8) {
+ execute(strided_slice_quant8_8::CreateModel,
+ strided_slice_quant8_8::is_ignored,
+ strided_slice_quant8_8::examples);
+}
+
+namespace strided_slice_quant8_9 {
+std::vector<MixedTypedExample> examples = {
+// Generated strided_slice_quant8_9 test
+#include "generated/examples/strided_slice_quant8_9.example.cpp"
+};
+// Generated model constructor
+#include "generated/models/strided_slice_quant8_9.model.cpp"
+} // namespace strided_slice_quant8_9
+TEST_F(GeneratedTests, strided_slice_quant8_9) {
+ execute(strided_slice_quant8_9::CreateModel,
+ strided_slice_quant8_9::is_ignored,
+ strided_slice_quant8_9::examples);
+}
+
+namespace sub_broadcast_float {
+std::vector<MixedTypedExample> examples = {
+// Generated sub_broadcast_float test
+#include "generated/examples/sub_broadcast_float.example.cpp"
+};
+// Generated model constructor
+#include "generated/models/sub_broadcast_float.model.cpp"
+} // namespace sub_broadcast_float
+TEST_F(GeneratedTests, sub_broadcast_float) {
+ execute(sub_broadcast_float::CreateModel,
+ sub_broadcast_float::is_ignored,
+ sub_broadcast_float::examples);
+}
+
namespace sub {
std::vector<MixedTypedExample> examples = {
// Generated sub test
sub::examples);
}
+namespace transpose_float_1 {
+std::vector<MixedTypedExample> examples = {
+// Generated transpose_float_1 test
+#include "generated/examples/transpose_float_1.example.cpp"
+};
+// Generated model constructor
+#include "generated/models/transpose_float_1.model.cpp"
+} // namespace transpose_float_1
+TEST_F(GeneratedTests, transpose_float_1) {
+ execute(transpose_float_1::CreateModel,
+ transpose_float_1::is_ignored,
+ transpose_float_1::examples);
+}
+
+namespace transpose {
+std::vector<MixedTypedExample> examples = {
+// Generated transpose test
+#include "generated/examples/transpose.example.cpp"
+};
+// Generated model constructor
+#include "generated/models/transpose.model.cpp"
+} // namespace transpose
+TEST_F(GeneratedTests, transpose) {
+ execute(transpose::CreateModel,
+ transpose::is_ignored,
+ transpose::examples);
+}
+
+namespace transpose_quant8_1 {
+std::vector<MixedTypedExample> examples = {
+// Generated transpose_quant8_1 test
+#include "generated/examples/transpose_quant8_1.example.cpp"
+};
+// Generated model constructor
+#include "generated/models/transpose_quant8_1.model.cpp"
+} // namespace transpose_quant8_1
+TEST_F(GeneratedTests, transpose_quant8_1) {
+ execute(transpose_quant8_1::CreateModel,
+ transpose_quant8_1::is_ignored,
+ transpose_quant8_1::examples);
+}
+
namespace cast_ex_float32_to_int32 {
std::vector<MixedTypedExample> examples = {
// Generated cast_ex_float32_to_int32 test
strided_slice_ex_float_9::examples);
}
-namespace sub_broadcast_float {
-std::vector<MixedTypedExample> examples = {
-// Generated sub_broadcast_float test
-#include "generated/examples/sub_broadcast_float.example.cpp"
-};
-// Generated model constructor
-#include "generated/models/sub_broadcast_float.model.cpp"
-} // namespace sub_broadcast_float
-TEST_F(GeneratedTests, sub_broadcast_float) {
- execute(sub_broadcast_float::CreateModel,
- sub_broadcast_float::is_ignored,
- sub_broadcast_float::examples);
-}
-
namespace tensorflowmax_ex_2D_float {
std::vector<MixedTypedExample> examples = {
// Generated tensorflowmax_ex_2D_float test
--- /dev/null
+// Generated file (from: batch_to_space.mod.py). Do not edit
+// Begin of an example
+{
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+ // int -> FLOAT32 map
+ {{0, {1.4f, 2.3f, 3.2f, 4.1f, 5.4f, 6.3f, 7.2f, 8.1f}}},
+ // int -> INT32 map
+ {},
+ // int -> QUANT8_ASYMM map
+ {}
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+ // int -> FLOAT32 map
+ {{0, {1.4f, 2.3f, 3.2f, 4.1f, 5.4f, 6.3f, 7.2f, 8.1f}}},
+ // int -> INT32 map
+ {},
+ // int -> QUANT8_ASYMM map
+ {}
+}
+}, // End of an example
--- /dev/null
+// Generated file (from: batch_to_space_float_1.mod.py). Do not edit
+// Begin of an example
+{
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+ // int -> FLOAT32 map
+ {{0, {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16}}},
+ // int -> INT32 map
+ {},
+ // int -> QUANT8_ASYMM map
+ {}
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+ // int -> FLOAT32 map
+ {{0, {1, 5, 2, 6, 9, 13, 10, 14, 3, 7, 4, 8, 11, 15, 12, 16}}},
+ // int -> INT32 map
+ {},
+ // int -> QUANT8_ASYMM map
+ {}
+}
+}, // End of an example
--- /dev/null
+// Generated file (from: batch_to_space_quant8_1.mod.py). Do not edit
+// Begin of an example
+{
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+ // int -> FLOAT32 map
+ {},
+ // int -> INT32 map
+ {},
+ // int -> QUANT8_ASYMM map
+ {{0, {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16}}}
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+ // int -> FLOAT32 map
+ {},
+ // int -> INT32 map
+ {},
+ // int -> QUANT8_ASYMM map
+ {{0, {1, 5, 2, 6, 9, 13, 10, 14, 3, 7, 4, 8, 11, 15, 12, 16}}}
+}
+}, // End of an example
--- /dev/null
+// Generated file (from: div.mod.py). Do not edit
+// Begin of an example
+{
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+ // int -> FLOAT32 map
+ {{0, {2.0f, -4.0f, 8.0f, -16.0f}}, {1, {2.0f, -2.0f, -4.0f, 4.0f}}},
+ // int -> INT32 map
+ {},
+ // int -> QUANT8_ASYMM map
+ {}
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+ // int -> FLOAT32 map
+ {{0, {1.0f, 2.0f, -2.0f, -4.0f}}},
+ // int -> INT32 map
+ {},
+ // int -> QUANT8_ASYMM map
+ {}
+}
+}, // End of an example
--- /dev/null
+// Generated file (from: div_broadcast_float.mod.py). Do not edit
+// Begin of an example
+{
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+ // int -> FLOAT32 map
+ {{0, {1, 2}}, {1, {1, 1, 2, 2}}},
+ // int -> INT32 map
+ {},
+ // int -> QUANT8_ASYMM map
+ {}
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+ // int -> FLOAT32 map
+ {{0, {1, 2, 0.5f, 1}}},
+ // int -> INT32 map
+ {},
+ // int -> QUANT8_ASYMM map
+ {}
+}
+}, // End of an example
--- /dev/null
+// Generated file (from: floor.mod.py). Do not edit
+// Begin of an example
+{
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+ // int -> FLOAT32 map
+ {{0, {-1.5f, -1.0f, -0.5f, 0.0f, 0.5f, 1.0f, 1.5f, 10.2f}}},
+ // int -> INT32 map
+ {},
+ // int -> QUANT8_ASYMM map
+ {}
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+ // int -> FLOAT32 map
+ {{0, {-2.0f, -1.0f, -1.0f, 0.0f, 0.0f, 1.0f, 1.0f, 10}}},
+ // int -> INT32 map
+ {},
+ // int -> QUANT8_ASYMM map
+ {}
+}
+}, // End of an example
--- /dev/null
+// Generated file (from: fully_connected_float_3.mod.py). Do not edit
+// Begin of an example
+{
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+ // int -> FLOAT32 map
+ {{0, {1, 2, 2, 1}}},
+ // int -> INT32 map
+ {},
+ // int -> QUANT8_ASYMM map
+ {}
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+ // int -> FLOAT32 map
+ {{0, {11, 9}}},
+ // int -> INT32 map
+ {},
+ // int -> QUANT8_ASYMM map
+ {}
+}
+}, // End of an example
--- /dev/null
+// Generated file (from: fully_connected_float_4d_simple.mod.py). Do not edit
+// Begin of an example
+{
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+ // int -> FLOAT32 map
+ {{0, {1, 2, 3, 4, 5, 6, 7, 8, -9, -10, 1, 2, 3, 4, 5, 6, 7, -8, 9, -10}}},
+ // int -> INT32 map
+ {},
+ // int -> QUANT8_ASYMM map
+ {}
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+ // int -> FLOAT32 map
+ {{0, {24, 25, 26, 58, 59, 60}}},
+ // int -> INT32 map
+ {},
+ // int -> QUANT8_ASYMM map
+ {}
+}
+}, // End of an example
--- /dev/null
+// Generated file (from: mean.mod.py). Do not edit
+// Begin of an example
+{
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+ // int -> FLOAT32 map
+ {{0, {1.0f, 2.0f, 3.0f, 4.0f}}},
+ // int -> INT32 map
+ {},
+ // int -> QUANT8_ASYMM map
+ {}
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+ // int -> FLOAT32 map
+ {{0, {1.5f, 3.5f}}},
+ // int -> INT32 map
+ {},
+ // int -> QUANT8_ASYMM map
+ {}
+}
+}, // End of an example
--- /dev/null
+// Generated file (from: mean_float_1.mod.py). Do not edit
+// Begin of an example
+{
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+ // int -> FLOAT32 map
+ {{0, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f, 13.0f, 14.0f, 15.0f, 16.0f, 17.0f, 18.0f, 19.0f, 20.0f, 21.0f, 22.0f, 23.0f, 24.0f}}},
+ // int -> INT32 map
+ {},
+ // int -> QUANT8_ASYMM map
+ {}
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+ // int -> FLOAT32 map
+ {{0, {12.0f, 13.0f}}},
+ // int -> INT32 map
+ {},
+ // int -> QUANT8_ASYMM map
+ {}
+}
+}, // End of an example
--- /dev/null
+// Generated file (from: mean_float_2.mod.py). Do not edit
+// Begin of an example
+{
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+ // int -> FLOAT32 map
+ {{0, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f, 13.0f, 14.0f, 15.0f, 16.0f, 17.0f, 18.0f, 19.0f, 20.0f, 21.0f, 22.0f, 23.0f, 24.0f}}},
+ // int -> INT32 map
+ {},
+ // int -> QUANT8_ASYMM map
+ {}
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+ // int -> FLOAT32 map
+ {{0, {10.5f, 12.5f, 14.5f}}},
+ // int -> INT32 map
+ {},
+ // int -> QUANT8_ASYMM map
+ {}
+}
+}, // End of an example
--- /dev/null
+// Generated file (from: mean_quant8_1.mod.py). Do not edit
+// Begin of an example
+{
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+ // int -> FLOAT32 map
+ {},
+ // int -> INT32 map
+ {},
+ // int -> QUANT8_ASYMM map
+ {{0, {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24}}}
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+ // int -> FLOAT32 map
+ {},
+ // int -> INT32 map
+ {},
+ // int -> QUANT8_ASYMM map
+ {{0, {12, 13}}}
+}
+}, // End of an example
--- /dev/null
+// Generated file (from: mean_quant8_2.mod.py). Do not edit
+// Begin of an example
+{
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+ // int -> FLOAT32 map
+ {},
+ // int -> INT32 map
+ {},
+ // int -> QUANT8_ASYMM map
+ {{0, {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24}}}
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+ // int -> FLOAT32 map
+ {},
+ // int -> INT32 map
+ {},
+ // int -> QUANT8_ASYMM map
+ {{0, {10, 12, 14}}}
+}
+}, // End of an example
--- /dev/null
+// Generated file (from: pad.mod.py). Do not edit
+// Begin of an example
+{
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+ // int -> FLOAT32 map
+ {{0, {1.0f, 2.0f, 3.0f, 4.0f}}},
+ // int -> INT32 map
+ {},
+ // int -> QUANT8_ASYMM map
+ {}
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+ // int -> FLOAT32 map
+ {{0, {0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 1.0f, 2.0f, 0.0f, 0.0f, 3.0f, 4.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}}},
+ // int -> INT32 map
+ {},
+ // int -> QUANT8_ASYMM map
+ {}
+}
+}, // End of an example
--- /dev/null
+// Generated file (from: pad_float_1.mod.py). Do not edit
+// Begin of an example
+{
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+ // int -> FLOAT32 map
+ {{0, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f}}},
+ // int -> INT32 map
+ {},
+ // int -> QUANT8_ASYMM map
+ {}
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+ // int -> FLOAT32 map
+ {{0, {0, 1, 2, 3, 0, 0, 0, 0, 4, 5, 6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}}},
+ // int -> INT32 map
+ {},
+ // int -> QUANT8_ASYMM map
+ {}
+}
+}, // End of an example
--- /dev/null
+// Generated file (from: space_to_batch.mod.py). Do not edit
+// Begin of an example
+{
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+ // int -> FLOAT32 map
+ {{0, {1.4f, 2.3f, 3.2f, 4.1f, 5.4f, 6.3f, 7.2f, 8.1f}}},
+ // int -> INT32 map
+ {},
+ // int -> QUANT8_ASYMM map
+ {}
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+ // int -> FLOAT32 map
+ {{0, {1.4f, 2.3f, 3.2f, 4.1f, 5.4f, 6.3f, 7.2f, 8.1f}}},
+ // int -> INT32 map
+ {},
+ // int -> QUANT8_ASYMM map
+ {}
+}
+}, // End of an example
--- /dev/null
+// Generated file (from: space_to_batch_float_1.mod.py). Do not edit
+// Begin of an example
+{
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+ // int -> FLOAT32 map
+ {{0, {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16}}},
+ // int -> INT32 map
+ {},
+ // int -> QUANT8_ASYMM map
+ {}
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+ // int -> FLOAT32 map
+ {{0, {1, 3, 9, 11, 2, 4, 10, 12, 5, 7, 13, 15, 6, 8, 14, 16}}},
+ // int -> INT32 map
+ {},
+ // int -> QUANT8_ASYMM map
+ {}
+}
+}, // End of an example
--- /dev/null
+// Generated file (from: space_to_batch_float_2.mod.py). Do not edit
+// Begin of an example
+{
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+ // int -> FLOAT32 map
+ {{0, {1, 2, 3, 4, 5, 6, 7, 8, 9, 10}}},
+ // int -> INT32 map
+ {},
+ // int -> QUANT8_ASYMM map
+ {}
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+ // int -> FLOAT32 map
+ {{0, {0, 0, 0, 5, 0, 0, 0, 6, 0, 1, 0, 7, 0, 2, 0, 8, 0, 3, 0, 9, 0, 4, 0, 10}}},
+ // int -> INT32 map
+ {},
+ // int -> QUANT8_ASYMM map
+ {}
+}
+}, // End of an example
--- /dev/null
+// Generated file (from: space_to_batch_float_3.mod.py). Do not edit
+// Begin of an example
+{
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+ // int -> FLOAT32 map
+ {{0, {1, 2, 3, 4, 5, 6, 7, 8}}},
+ // int -> INT32 map
+ {},
+ // int -> QUANT8_ASYMM map
+ {}
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+ // int -> FLOAT32 map
+ {{0, {0, 0, 0, 0, 0, 5, 0, 0, 0, 0, 0, 0, 0, 6, 0, 0, 0, 1, 0, 0, 0, 7, 0, 0, 0, 2, 0, 0, 0, 8, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 4, 0, 0, 0, 0, 0, 0}}},
+ // int -> INT32 map
+ {},
+ // int -> QUANT8_ASYMM map
+ {}
+}
+}, // End of an example
--- /dev/null
+// Generated file (from: space_to_batch_quant8_1.mod.py). Do not edit
+// Begin of an example
+{
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+ // int -> FLOAT32 map
+ {},
+ // int -> INT32 map
+ {},
+ // int -> QUANT8_ASYMM map
+ {{0, {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16}}}
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+ // int -> FLOAT32 map
+ {},
+ // int -> INT32 map
+ {},
+ // int -> QUANT8_ASYMM map
+ {{0, {1, 3, 9, 11, 2, 4, 10, 12, 5, 7, 13, 15, 6, 8, 14, 16}}}
+}
+}, // End of an example
--- /dev/null
+// Generated file (from: space_to_batch_quant8_2.mod.py). Do not edit
+// Begin of an example
+{
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+ // int -> FLOAT32 map
+ {},
+ // int -> INT32 map
+ {},
+ // int -> QUANT8_ASYMM map
+ {{0, {1, 2, 3, 4, 5, 6, 7, 8, 9, 10}}}
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+ // int -> FLOAT32 map
+ {},
+ // int -> INT32 map
+ {},
+ // int -> QUANT8_ASYMM map
+ {{0, {0, 0, 0, 5, 0, 0, 0, 6, 0, 1, 0, 7, 0, 2, 0, 8, 0, 3, 0, 9, 0, 4, 0, 10}}}
+}
+}, // End of an example
--- /dev/null
+// Generated file (from: space_to_batch_quant8_3.mod.py). Do not edit
+// Begin of an example
+{
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+ // int -> FLOAT32 map
+ {},
+ // int -> INT32 map
+ {},
+ // int -> QUANT8_ASYMM map
+ {{0, {1, 2, 3, 4, 5, 6, 7, 8}}}
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+ // int -> FLOAT32 map
+ {},
+ // int -> INT32 map
+ {},
+ // int -> QUANT8_ASYMM map
+ {{0, {0, 0, 0, 0, 0, 5, 0, 0, 0, 0, 0, 0, 0, 6, 0, 0, 0, 1, 0, 0, 0, 7, 0, 0, 0, 2, 0, 0, 0, 8, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 4, 0, 0, 0, 0, 0, 0}}}
+}
+}, // End of an example
--- /dev/null
+// Generated file (from: squeeze.mod.py). Do not edit
+// Begin of an example
+{
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+ // int -> FLOAT32 map
+ {{0, {1.4f, 2.3f, 3.2f, 4.1f, 5.4f, 6.3f, 7.2f, 8.1f}}},
+ // int -> INT32 map
+ {},
+ // int -> QUANT8_ASYMM map
+ {}
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+ // int -> FLOAT32 map
+ {{0, {1.4f, 2.3f, 3.2f, 4.1f, 5.4f, 6.3f, 7.2f, 8.1f}}},
+ // int -> INT32 map
+ {},
+ // int -> QUANT8_ASYMM map
+ {}
+}
+}, // End of an example
--- /dev/null
+// Generated file (from: squeeze_float_1.mod.py). Do not edit
+// Begin of an example
+{
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+ // int -> FLOAT32 map
+ {{0, {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24}}},
+ // int -> INT32 map
+ {},
+ // int -> QUANT8_ASYMM map
+ {}
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+ // int -> FLOAT32 map
+ {{0, {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24}}},
+ // int -> INT32 map
+ {},
+ // int -> QUANT8_ASYMM map
+ {}
+}
+}, // End of an example
--- /dev/null
+// Generated file (from: squeeze_quant8_1.mod.py). Do not edit
+// Begin of an example
+{
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+ // int -> FLOAT32 map
+ {},
+ // int -> INT32 map
+ {},
+ // int -> QUANT8_ASYMM map
+ {{0, {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24}}}
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+ // int -> FLOAT32 map
+ {},
+ // int -> INT32 map
+ {},
+ // int -> QUANT8_ASYMM map
+ {{0, {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24}}}
+}
+}, // End of an example
--- /dev/null
+// Generated file (from: strided_slice_float_11.mod.py). Do not edit
+// Begin of an example
+{
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+ // int -> FLOAT32 map
+ {{0, {1, 2, 3, 4, 5, 6}}},
+ // int -> INT32 map
+ {},
+ // int -> QUANT8_ASYMM map
+ {}
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+ // int -> FLOAT32 map
+ {{0, {1, 2, 3}}},
+ // int -> INT32 map
+ {},
+ // int -> QUANT8_ASYMM map
+ {}
+}
+}, // End of an example
--- /dev/null
+// Generated file (from: strided_slice_qaunt8_10.mod.py). Do not edit
+// Begin of an example
+{
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+ // int -> FLOAT32 map
+ {},
+ // int -> INT32 map
+ {},
+ // int -> QUANT8_ASYMM map
+ {{0, {1, 2, 3, 4, 5, 6}}}
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+ // int -> FLOAT32 map
+ {},
+ // int -> INT32 map
+ {},
+ // int -> QUANT8_ASYMM map
+ {{0, {4, 5, 6}}}
+}
+}, // End of an example
--- /dev/null
+// Generated file (from: strided_slice_qaunt8_11.mod.py). Do not edit
+// Begin of an example
+{
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+ // int -> FLOAT32 map
+ {},
+ // int -> INT32 map
+ {},
+ // int -> QUANT8_ASYMM map
+ {{0, {1, 2, 3, 4, 5, 6}}}
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+ // int -> FLOAT32 map
+ {},
+ // int -> INT32 map
+ {},
+ // int -> QUANT8_ASYMM map
+ {{0, {1, 2, 3}}}
+}
+}, // End of an example
--- /dev/null
+// Generated file (from: strided_slice_quant8_1.mod.py). Do not edit
+// Begin of an example
+{
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+ // int -> FLOAT32 map
+ {},
+ // int -> INT32 map
+ {},
+ // int -> QUANT8_ASYMM map
+ {{0, {1, 2, 3, 4}}}
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+ // int -> FLOAT32 map
+ {},
+ // int -> INT32 map
+ {},
+ // int -> QUANT8_ASYMM map
+ {{0, {2, 3}}}
+}
+}, // End of an example
--- /dev/null
+// Generated file (from: strided_slice_quant8_2.mod.py). Do not edit
+// Begin of an example
+{
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+ // int -> FLOAT32 map
+ {},
+ // int -> INT32 map
+ {},
+ // int -> QUANT8_ASYMM map
+ {{0, {1, 2, 3, 4}}}
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+ // int -> FLOAT32 map
+ {},
+ // int -> INT32 map
+ {},
+ // int -> QUANT8_ASYMM map
+ {{0, {2, 3}}}
+}
+}, // End of an example
--- /dev/null
+// Generated file (from: strided_slice_quant8_3.mod.py). Do not edit
+// Begin of an example
+{
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+ // int -> FLOAT32 map
+ {},
+ // int -> INT32 map
+ {},
+ // int -> QUANT8_ASYMM map
+ {{0, {1, 2, 3, 4}}}
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+ // int -> FLOAT32 map
+ {},
+ // int -> INT32 map
+ {},
+ // int -> QUANT8_ASYMM map
+ {{0, {1, 2, 3}}}
+}
+}, // End of an example
--- /dev/null
+// Generated file (from: strided_slice_quant8_4.mod.py). Do not edit
+// Begin of an example
+{
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+ // int -> FLOAT32 map
+ {},
+ // int -> INT32 map
+ {},
+ // int -> QUANT8_ASYMM map
+ {{0, {1, 2, 3, 4}}}
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+ // int -> FLOAT32 map
+ {},
+ // int -> INT32 map
+ {},
+ // int -> QUANT8_ASYMM map
+ {{0, {2}}}
+}
+}, // End of an example
--- /dev/null
+// Generated file (from: strided_slice_quant8_5.mod.py). Do not edit
+// Begin of an example
+{
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+ // int -> FLOAT32 map
+ {},
+ // int -> INT32 map
+ {},
+ // int -> QUANT8_ASYMM map
+ {{0, {1, 2, 3, 4}}}
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+ // int -> FLOAT32 map
+ {},
+ // int -> INT32 map
+ {},
+ // int -> QUANT8_ASYMM map
+ {{0, {1, 2, 3}}}
+}
+}, // End of an example
--- /dev/null
+// Generated file (from: strided_slice_quant8_6.mod.py). Do not edit
+// Begin of an example
+{
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+ // int -> FLOAT32 map
+ {},
+ // int -> INT32 map
+ {},
+ // int -> QUANT8_ASYMM map
+ {{0, {1, 2, 3, 4}}}
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+ // int -> FLOAT32 map
+ {},
+ // int -> INT32 map
+ {},
+ // int -> QUANT8_ASYMM map
+ {{0, {2, 3, 4}}}
+}
+}, // End of an example
--- /dev/null
+// Generated file (from: strided_slice_quant8_7.mod.py). Do not edit
+// Begin of an example
+{
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+ // int -> FLOAT32 map
+ {},
+ // int -> INT32 map
+ {},
+ // int -> QUANT8_ASYMM map
+ {{0, {1, 2, 3}}}
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+ // int -> FLOAT32 map
+ {},
+ // int -> INT32 map
+ {},
+ // int -> QUANT8_ASYMM map
+ {{0, {3, 2, 1}}}
+}
+}, // End of an example
--- /dev/null
+// Generated file (from: strided_slice_quant8_8.mod.py). Do not edit
+// Begin of an example
+{
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+ // int -> FLOAT32 map
+ {},
+ // int -> INT32 map
+ {},
+ // int -> QUANT8_ASYMM map
+ {{0, {1, 2, 3, 4, 5, 6}}}
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+ // int -> FLOAT32 map
+ {},
+ // int -> INT32 map
+ {},
+ // int -> QUANT8_ASYMM map
+ {{0, {6, 5, 4}}}
+}
+}, // End of an example
--- /dev/null
+// Generated file (from: strided_slice_quant8_9.mod.py). Do not edit
+// Begin of an example
+{
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+ // int -> FLOAT32 map
+ {},
+ // int -> INT32 map
+ {},
+ // int -> QUANT8_ASYMM map
+ {{0, {1, 2, 3, 4, 5, 6}}}
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+ // int -> FLOAT32 map
+ {},
+ // int -> INT32 map
+ {},
+ // int -> QUANT8_ASYMM map
+ {{0, {1, 2, 4, 5}}}
+}
+}, // End of an example
//Input(s)
{ // See tools/test_generator/include/TestHarness.h:MixedTyped
// int -> FLOAT32 map
- {{0, {1, 2, 3, 4}}, {1, {2}}},
+ {{0, {1, 2}}, {1, {1, 2, 3, 4}}},
// int -> INT32 map
{},
// int -> QUANT8_ASYMM map
//Output(s)
{ // See tools/test_generator/include/TestHarness.h:MixedTyped
// int -> FLOAT32 map
- {{0, {-1, 0, 1, 2}}},
+ {{0, {0, 0, -2, -2}}},
// int -> INT32 map
{},
// int -> QUANT8_ASYMM map
--- /dev/null
+// Generated file (from: tanh.mod.py). Do not edit
+// Begin of an example
+{
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+ // int -> FLOAT32 map
+ {{0, {-1, 0, 1, 10}}},
+ // int -> INT32 map
+ {},
+ // int -> QUANT8_ASYMM map
+ {}
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+ // int -> FLOAT32 map
+ {{0, {-0.761594156f, 0, 0.761594156f, 0.999999996f}}},
+ // int -> INT32 map
+ {},
+ // int -> QUANT8_ASYMM map
+ {}
+}
+}, // End of an example
--- /dev/null
+// Generated file (from: transpose.mod.py). Do not edit
+// Begin of an example
+{
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+ // int -> FLOAT32 map
+ {{0, {1.0f, 2.0f, 3.0f, 4.0f}}},
+ // int -> INT32 map
+ {},
+ // int -> QUANT8_ASYMM map
+ {}
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+ // int -> FLOAT32 map
+ {{0, {1.0f, 3.0f, 2.0f, 4.0f}}},
+ // int -> INT32 map
+ {},
+ // int -> QUANT8_ASYMM map
+ {}
+}
+}, // End of an example
--- /dev/null
+// Generated file (from: transpose_float_1.mod.py). Do not edit
+// Begin of an example
+{
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+ // int -> FLOAT32 map
+ {{0, {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119}}},
+ // int -> INT32 map
+ {},
+ // int -> QUANT8_ASYMM map
+ {}
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+ // int -> FLOAT32 map
+ {{0, {0, 1, 2, 3, 4, 20, 21, 22, 23, 24, 40, 41, 42, 43, 44, 60, 61, 62, 63, 64, 80, 81, 82, 83, 84, 100, 101, 102, 103, 104, 5, 6, 7, 8, 9, 25, 26, 27, 28, 29, 45, 46, 47, 48, 49, 65, 66, 67, 68, 69, 85, 86, 87, 88, 89, 105, 106, 107, 108, 109, 10, 11, 12, 13, 14, 30, 31, 32, 33, 34, 50, 51, 52, 53, 54, 70, 71, 72, 73, 74, 90, 91, 92, 93, 94, 110, 111, 112, 113, 114, 15, 16, 17, 18, 19, 35, 36, 37, 38, 39, 55, 56, 57, 58, 59, 75, 76, 77, 78, 79, 95, 96, 97, 98, 99, 115, 116, 117, 118, 119}}},
+ // int -> INT32 map
+ {},
+ // int -> QUANT8_ASYMM map
+ {}
+}
+}, // End of an example
--- /dev/null
+// Generated file (from: transpose_quant8_1.mod.py). Do not edit
+// Begin of an example
+{
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+ // int -> FLOAT32 map
+ {},
+ // int -> INT32 map
+ {},
+ // int -> QUANT8_ASYMM map
+ {{0, {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119}}}
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+ // int -> FLOAT32 map
+ {},
+ // int -> INT32 map
+ {},
+ // int -> QUANT8_ASYMM map
+ {{0, {0, 1, 2, 3, 4, 20, 21, 22, 23, 24, 40, 41, 42, 43, 44, 60, 61, 62, 63, 64, 80, 81, 82, 83, 84, 100, 101, 102, 103, 104, 5, 6, 7, 8, 9, 25, 26, 27, 28, 29, 45, 46, 47, 48, 49, 65, 66, 67, 68, 69, 85, 86, 87, 88, 89, 105, 106, 107, 108, 109, 10, 11, 12, 13, 14, 30, 31, 32, 33, 34, 50, 51, 52, 53, 54, 70, 71, 72, 73, 74, 90, 91, 92, 93, 94, 110, 111, 112, 113, 114, 15, 16, 17, 18, 19, 35, 36, 37, 38, 39, 55, 56, 57, 58, 59, 75, 76, 77, 78, 79, 95, 96, 97, 98, 99, 115, 116, 117, 118, 119}}}
+}
+}, // End of an example
--- /dev/null
+// Generated file (from: batch_to_space.mod.py). Do not edit
+void CreateModel(Model *model) {
+ OperandType type2(Type::TENSOR_FLOAT32, {1, 2, 2, 2});
+ OperandType type0(Type::TENSOR_FLOAT32, {4, 1, 1, 2});
+ OperandType type1(Type::TENSOR_INT32, {2});
+ // Phase 1, operands
+ auto input = model->addOperand(&type0);
+ auto block_size = model->addOperand(&type1);
+ auto output = model->addOperand(&type2);
+ // Phase 2, operations
+ static int32_t block_size_init[] = {2, 2};
+ model->setOperandValue(block_size, block_size_init, sizeof(int32_t) * 2);
+ model->addOperation(ANEURALNETWORKS_BATCH_TO_SPACE_ND, {input, block_size}, {output});
+ // Phase 3, inputs and outputs
+ model->identifyInputsAndOutputs(
+ {input},
+ {output});
+ assert(model->isValid());
+}
+
+bool is_ignored(int i) {
+ static std::set<int> ignore = {};
+ return ignore.find(i) != ignore.end();
+}
--- /dev/null
+// Generated file (from: batch_to_space_float_1.mod.py). Do not edit
+void CreateModel(Model *model) {
+ OperandType type2(Type::TENSOR_FLOAT32, {1, 4, 4, 1});
+ OperandType type0(Type::TENSOR_FLOAT32, {4, 2, 2, 1});
+ OperandType type1(Type::TENSOR_INT32, {2});
+ // Phase 1, operands
+ auto input = model->addOperand(&type0);
+ auto block_size = model->addOperand(&type1);
+ auto output = model->addOperand(&type2);
+ // Phase 2, operations
+ static int32_t block_size_init[] = {2, 2};
+ model->setOperandValue(block_size, block_size_init, sizeof(int32_t) * 2);
+ model->addOperation(ANEURALNETWORKS_BATCH_TO_SPACE_ND, {input, block_size}, {output});
+ // Phase 3, inputs and outputs
+ model->identifyInputsAndOutputs(
+ {input},
+ {output});
+ assert(model->isValid());
+}
+
+bool is_ignored(int i) {
+ static std::set<int> ignore = {};
+ return ignore.find(i) != ignore.end();
+}
--- /dev/null
+// Generated file (from: batch_to_space_quant8_1.mod.py). Do not edit
+void CreateModel(Model *model) {
+ OperandType type1(Type::TENSOR_INT32, {2});
+ OperandType type2(Type::TENSOR_QUANT8_ASYMM, {1, 4, 4, 1}, 1.0, 0);
+ OperandType type0(Type::TENSOR_QUANT8_ASYMM, {4, 2, 2, 1}, 1.0, 0);
+ // Phase 1, operands
+ auto input = model->addOperand(&type0);
+ auto block_size = model->addOperand(&type1);
+ auto output = model->addOperand(&type2);
+ // Phase 2, operations
+ static int32_t block_size_init[] = {2, 2};
+ model->setOperandValue(block_size, block_size_init, sizeof(int32_t) * 2);
+ model->addOperation(ANEURALNETWORKS_BATCH_TO_SPACE_ND, {input, block_size}, {output});
+ // Phase 3, inputs and outputs
+ model->identifyInputsAndOutputs(
+ {input},
+ {output});
+ assert(model->isValid());
+}
+
+bool is_ignored(int i) {
+ static std::set<int> ignore = {};
+ return ignore.find(i) != ignore.end();
+}
--- /dev/null
+// Generated file (from: div.mod.py). Do not edit
+void CreateModel(Model *model) {
+ OperandType type1(Type::INT32, {});
+ OperandType type0(Type::TENSOR_FLOAT32, {1, 2, 2, 1});
+ // Phase 1, operands
+ auto op1 = model->addOperand(&type0);
+ auto op2 = model->addOperand(&type0);
+ auto act = model->addOperand(&type1);
+ auto op3 = model->addOperand(&type0);
+ // Phase 2, operations
+ static int32_t act_init[] = {0};
+ model->setOperandValue(act, act_init, sizeof(int32_t) * 1);
+ model->addOperation(ANEURALNETWORKS_DIV, {op1, op2, act}, {op3});
+ // Phase 3, inputs and outputs
+ model->identifyInputsAndOutputs(
+ {op1, op2},
+ {op3});
+ assert(model->isValid());
+}
+
+bool is_ignored(int i) {
+ static std::set<int> ignore = {};
+ return ignore.find(i) != ignore.end();
+}
--- /dev/null
+// Generated file (from: div_broadcast_float.mod.py). Do not edit
+void CreateModel(Model *model) {
+ OperandType type2(Type::INT32, {});
+ OperandType type0(Type::TENSOR_FLOAT32, {1, 2});
+ OperandType type1(Type::TENSOR_FLOAT32, {2, 2});
+ // Phase 1, operands
+ auto op1 = model->addOperand(&type0);
+ auto op2 = model->addOperand(&type1);
+ auto act = model->addOperand(&type2);
+ auto op3 = model->addOperand(&type1);
+ // Phase 2, operations
+ static int32_t act_init[] = {0};
+ model->setOperandValue(act, act_init, sizeof(int32_t) * 1);
+ model->addOperation(ANEURALNETWORKS_DIV, {op1, op2, act}, {op3});
+ // Phase 3, inputs and outputs
+ model->identifyInputsAndOutputs(
+ {op1, op2},
+ {op3});
+ assert(model->isValid());
+}
+
+bool is_ignored(int i) {
+ static std::set<int> ignore = {};
+ return ignore.find(i) != ignore.end();
+}
--- /dev/null
+// Generated file (from: floor.mod.py). Do not edit
+void CreateModel(Model *model) {
+ OperandType type0(Type::TENSOR_FLOAT32, {1, 2, 2, 2});
+ // Phase 1, operands
+ auto op1 = model->addOperand(&type0);
+ auto op2 = model->addOperand(&type0);
+ // Phase 2, operations
+ model->addOperation(ANEURALNETWORKS_FLOOR, {op1}, {op2});
+ // Phase 3, inputs and outputs
+ model->identifyInputsAndOutputs(
+ {op1},
+ {op2});
+ assert(model->isValid());
+}
+
+bool is_ignored(int i) {
+ static std::set<int> ignore = {};
+ return ignore.find(i) != ignore.end();
+}
--- /dev/null
+// Generated file (from: fully_connected_float_3.mod.py). Do not edit
+void CreateModel(Model *model) {
+ OperandType type4(Type::INT32, {});
+ OperandType type1(Type::TENSOR_FLOAT32, {1, 2});
+ OperandType type2(Type::TENSOR_FLOAT32, {1});
+ OperandType type3(Type::TENSOR_FLOAT32, {2, 1});
+ OperandType type0(Type::TENSOR_FLOAT32, {2, 2});
+ // Phase 1, operands
+ auto op1 = model->addOperand(&type0);
+ auto op2 = model->addOperand(&type1);
+ auto b0 = model->addOperand(&type2);
+ auto op3 = model->addOperand(&type3);
+ auto act = model->addOperand(&type4);
+ // Phase 2, operations
+ static float op2_init[] = {2.0f, 4.0f};
+ model->setOperandValue(op2, op2_init, sizeof(float) * 2);
+ static float b0_init[] = {1.0f};
+ model->setOperandValue(b0, b0_init, sizeof(float) * 1);
+ static int32_t act_init[] = {0};
+ model->setOperandValue(act, act_init, sizeof(int32_t) * 1);
+ model->addOperation(ANEURALNETWORKS_FULLY_CONNECTED, {op1, op2, b0, act}, {op3});
+ // Phase 3, inputs and outputs
+ model->identifyInputsAndOutputs(
+ {op1},
+ {op3});
+ assert(model->isValid());
+}
+
+bool is_ignored(int i) {
+ static std::set<int> ignore = {};
+ return ignore.find(i) != ignore.end();
+}
--- /dev/null
+// Generated file (from: fully_connected_float_4d_simple.mod.py). Do not edit
+void CreateModel(Model *model) {
+ OperandType type4(Type::INT32, {});
+ OperandType type3(Type::TENSOR_FLOAT32, {2, 3});
+ OperandType type1(Type::TENSOR_FLOAT32, {3, 10});
+ OperandType type2(Type::TENSOR_FLOAT32, {3});
+ OperandType type0(Type::TENSOR_FLOAT32, {4, 1, 5, 1});
+ // Phase 1, operands
+ auto op1 = model->addOperand(&type0);
+ auto op2 = model->addOperand(&type1);
+ auto b0 = model->addOperand(&type2);
+ auto op3 = model->addOperand(&type3);
+ auto act = model->addOperand(&type4);
+ // Phase 2, operations
+ static float op2_init[] = {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f};
+ model->setOperandValue(op2, op2_init, sizeof(float) * 30);
+ static float b0_init[] = {1.0f, 2.0f, 3.0f};
+ model->setOperandValue(b0, b0_init, sizeof(float) * 3);
+ static int32_t act_init[] = {0};
+ model->setOperandValue(act, act_init, sizeof(int32_t) * 1);
+ model->addOperation(ANEURALNETWORKS_FULLY_CONNECTED, {op1, op2, b0, act}, {op3});
+ // Phase 3, inputs and outputs
+ model->identifyInputsAndOutputs(
+ {op1},
+ {op3});
+ assert(model->isValid());
+}
+
+bool is_ignored(int i) {
+ static std::set<int> ignore = {};
+ return ignore.find(i) != ignore.end();
+}
--- /dev/null
+// Generated file (from: mean.mod.py). Do not edit
+void CreateModel(Model *model) {
+ OperandType type2(Type::INT32, {});
+ OperandType type3(Type::TENSOR_FLOAT32, {1, 2, 1});
+ OperandType type0(Type::TENSOR_FLOAT32, {1, 2, 2, 1});
+ OperandType type1(Type::TENSOR_INT32, {1});
+ // Phase 1, operands
+ auto input = model->addOperand(&type0);
+ auto axis = model->addOperand(&type1);
+ auto keepDims = model->addOperand(&type2);
+ auto output = model->addOperand(&type3);
+ // Phase 2, operations
+ static int32_t axis_init[] = {2};
+ model->setOperandValue(axis, axis_init, sizeof(int32_t) * 1);
+ static int32_t keepDims_init[] = {0};
+ model->setOperandValue(keepDims, keepDims_init, sizeof(int32_t) * 1);
+ model->addOperation(ANEURALNETWORKS_MEAN, {input, axis, keepDims}, {output});
+ // Phase 3, inputs and outputs
+ model->identifyInputsAndOutputs(
+ {input},
+ {output});
+ assert(model->isValid());
+}
+
+bool is_ignored(int i) {
+ static std::set<int> ignore = {};
+ return ignore.find(i) != ignore.end();
+}
--- /dev/null
+// Generated file (from: mean_float_1.mod.py). Do not edit
+void CreateModel(Model *model) {
+ OperandType type2(Type::INT32, {});
+ OperandType type3(Type::TENSOR_FLOAT32, {2});
+ OperandType type0(Type::TENSOR_FLOAT32, {4, 3, 2});
+ OperandType type1(Type::TENSOR_INT32, {4});
+ // Phase 1, operands
+ auto input = model->addOperand(&type0);
+ auto axis = model->addOperand(&type1);
+ auto keepDims = model->addOperand(&type2);
+ auto output = model->addOperand(&type3);
+ // Phase 2, operations
+ static int32_t axis_init[] = {1, 0, -3, -3};
+ model->setOperandValue(axis, axis_init, sizeof(int32_t) * 4);
+ static int32_t keepDims_init[] = {0};
+ model->setOperandValue(keepDims, keepDims_init, sizeof(int32_t) * 1);
+ model->addOperation(ANEURALNETWORKS_MEAN, {input, axis, keepDims}, {output});
+ // Phase 3, inputs and outputs
+ model->identifyInputsAndOutputs(
+ {input},
+ {output});
+ assert(model->isValid());
+}
+
+bool is_ignored(int i) {
+ static std::set<int> ignore = {};
+ return ignore.find(i) != ignore.end();
+}
--- /dev/null
+// Generated file (from: mean_float_2.mod.py). Do not edit
+void CreateModel(Model *model) {
+ OperandType type2(Type::INT32, {});
+ OperandType type3(Type::TENSOR_FLOAT32, {1, 3, 1});
+ OperandType type0(Type::TENSOR_FLOAT32, {4, 3, 2});
+ OperandType type1(Type::TENSOR_INT32, {2});
+ // Phase 1, operands
+ auto input = model->addOperand(&type0);
+ auto axis = model->addOperand(&type1);
+ auto keepDims = model->addOperand(&type2);
+ auto output = model->addOperand(&type3);
+ // Phase 2, operations
+ static int32_t axis_init[] = {0, 2};
+ model->setOperandValue(axis, axis_init, sizeof(int32_t) * 2);
+ static int32_t keepDims_init[] = {1};
+ model->setOperandValue(keepDims, keepDims_init, sizeof(int32_t) * 1);
+ model->addOperation(ANEURALNETWORKS_MEAN, {input, axis, keepDims}, {output});
+ // Phase 3, inputs and outputs
+ model->identifyInputsAndOutputs(
+ {input},
+ {output});
+ assert(model->isValid());
+}
+
+bool is_ignored(int i) {
+ static std::set<int> ignore = {};
+ return ignore.find(i) != ignore.end();
+}
--- /dev/null
+// Generated file (from: mean_quant8_1.mod.py). Do not edit
+void CreateModel(Model *model) {
+ OperandType type2(Type::INT32, {});
+ OperandType type1(Type::TENSOR_INT32, {4});
+ OperandType type3(Type::TENSOR_QUANT8_ASYMM, {2}, 0.8, 5);
+ OperandType type0(Type::TENSOR_QUANT8_ASYMM, {4, 3, 2}, 0.8, 5);
+ // Phase 1, operands
+ auto input = model->addOperand(&type0);
+ auto axis = model->addOperand(&type1);
+ auto keepDims = model->addOperand(&type2);
+ auto output = model->addOperand(&type3);
+ // Phase 2, operations
+ static int32_t axis_init[] = {1, 0, -3, -3};
+ model->setOperandValue(axis, axis_init, sizeof(int32_t) * 4);
+ static int32_t keepDims_init[] = {0};
+ model->setOperandValue(keepDims, keepDims_init, sizeof(int32_t) * 1);
+ model->addOperation(ANEURALNETWORKS_MEAN, {input, axis, keepDims}, {output});
+ // Phase 3, inputs and outputs
+ model->identifyInputsAndOutputs(
+ {input},
+ {output});
+ assert(model->isValid());
+}
+
+bool is_ignored(int i) {
+ static std::set<int> ignore = {};
+ return ignore.find(i) != ignore.end();
+}
--- /dev/null
+// Generated file (from: mean_quant8_2.mod.py). Do not edit
+void CreateModel(Model *model) {
+ OperandType type2(Type::INT32, {});
+ OperandType type1(Type::TENSOR_INT32, {2});
+ OperandType type3(Type::TENSOR_QUANT8_ASYMM, {1, 3, 1}, 0.8, 5);
+ OperandType type0(Type::TENSOR_QUANT8_ASYMM, {4, 3, 2}, 0.8, 5);
+ // Phase 1, operands
+ auto input = model->addOperand(&type0);
+ auto axis = model->addOperand(&type1);
+ auto keepDims = model->addOperand(&type2);
+ auto output = model->addOperand(&type3);
+ // Phase 2, operations
+ static int32_t axis_init[] = {0, 2};
+ model->setOperandValue(axis, axis_init, sizeof(int32_t) * 2);
+ static int32_t keepDims_init[] = {1};
+ model->setOperandValue(keepDims, keepDims_init, sizeof(int32_t) * 1);
+ model->addOperation(ANEURALNETWORKS_MEAN, {input, axis, keepDims}, {output});
+ // Phase 3, inputs and outputs
+ model->identifyInputsAndOutputs(
+ {input},
+ {output});
+ assert(model->isValid());
+}
+
+bool is_ignored(int i) {
+ static std::set<int> ignore = {};
+ return ignore.find(i) != ignore.end();
+}
--- /dev/null
+// Generated file (from: pad.mod.py). Do not edit
+void CreateModel(Model *model) {
+ OperandType type0(Type::TENSOR_FLOAT32, {1, 2, 2, 1});
+ OperandType type2(Type::TENSOR_FLOAT32, {1, 4, 4, 1});
+ OperandType type1(Type::TENSOR_INT32, {4, 2});
+ // Phase 1, operands
+ auto op1 = model->addOperand(&type0);
+ auto op2 = model->addOperand(&type1);
+ auto op3 = model->addOperand(&type2);
+ // Phase 2, operations
+ static int32_t op2_init[] = {0, 0, 1, 1, 1, 1, 0, 0};
+ model->setOperandValue(op2, op2_init, sizeof(int32_t) * 8);
+ model->addOperation(ANEURALNETWORKS_PAD, {op1, op2}, {op3});
+ // Phase 3, inputs and outputs
+ model->identifyInputsAndOutputs(
+ {op1},
+ {op3});
+ assert(model->isValid());
+}
+
+bool is_ignored(int i) {
+ static std::set<int> ignore = {};
+ return ignore.find(i) != ignore.end();
+}
--- /dev/null
+// Generated file (from: pad_float_1.mod.py). Do not edit
+void CreateModel(Model *model) {
+ OperandType type0(Type::TENSOR_FLOAT32, {1, 2, 3, 1});
+ OperandType type2(Type::TENSOR_FLOAT32, {1, 4, 7, 1});
+ OperandType type1(Type::TENSOR_INT32, {4, 2});
+ // Phase 1, operands
+ auto op1 = model->addOperand(&type0);
+ auto op2 = model->addOperand(&type1);
+ auto op3 = model->addOperand(&type2);
+ // Phase 2, operations
+ static int32_t op2_init[] = {0, 0, 0, 2, 1, 3, 0, 0};
+ model->setOperandValue(op2, op2_init, sizeof(int32_t) * 8);
+ model->addOperation(ANEURALNETWORKS_PAD, {op1, op2}, {op3});
+ // Phase 3, inputs and outputs
+ model->identifyInputsAndOutputs(
+ {op1},
+ {op3});
+ assert(model->isValid());
+}
+
+bool is_ignored(int i) {
+ static std::set<int> ignore = {};
+ return ignore.find(i) != ignore.end();
+}
--- /dev/null
+// Generated file (from: space_to_batch.mod.py). Do not edit
+void CreateModel(Model *model) {
+ OperandType type0(Type::TENSOR_FLOAT32, {1, 2, 2, 2});
+ OperandType type3(Type::TENSOR_FLOAT32, {4, 1, 1, 2});
+ OperandType type2(Type::TENSOR_INT32, {2, 2});
+ OperandType type1(Type::TENSOR_INT32, {2});
+ // Phase 1, operands
+ auto input = model->addOperand(&type0);
+ auto block_size = model->addOperand(&type1);
+ auto paddings = model->addOperand(&type2);
+ auto output = model->addOperand(&type3);
+ // Phase 2, operations
+ static int32_t block_size_init[] = {2, 2};
+ model->setOperandValue(block_size, block_size_init, sizeof(int32_t) * 2);
+ static int32_t paddings_init[] = {0, 0, 0, 0};
+ model->setOperandValue(paddings, paddings_init, sizeof(int32_t) * 4);
+ model->addOperation(ANEURALNETWORKS_SPACE_TO_BATCH_ND, {input, block_size, paddings}, {output});
+ // Phase 3, inputs and outputs
+ model->identifyInputsAndOutputs(
+ {input},
+ {output});
+ assert(model->isValid());
+}
+
+bool is_ignored(int i) {
+ static std::set<int> ignore = {};
+ return ignore.find(i) != ignore.end();
+}
--- /dev/null
+// Generated file (from: space_to_batch_float_1.mod.py). Do not edit
+void CreateModel(Model *model) {
+ OperandType type0(Type::TENSOR_FLOAT32, {1, 4, 4, 1});
+ OperandType type3(Type::TENSOR_FLOAT32, {4, 2, 2, 1});
+ OperandType type2(Type::TENSOR_INT32, {2, 2});
+ OperandType type1(Type::TENSOR_INT32, {2});
+ // Phase 1, operands
+ auto input = model->addOperand(&type0);
+ auto block_size = model->addOperand(&type1);
+ auto paddings = model->addOperand(&type2);
+ auto output = model->addOperand(&type3);
+ // Phase 2, operations
+ static int32_t block_size_init[] = {2, 2};
+ model->setOperandValue(block_size, block_size_init, sizeof(int32_t) * 2);
+ static int32_t paddings_init[] = {0, 0, 0, 0};
+ model->setOperandValue(paddings, paddings_init, sizeof(int32_t) * 4);
+ model->addOperation(ANEURALNETWORKS_SPACE_TO_BATCH_ND, {input, block_size, paddings}, {output});
+ // Phase 3, inputs and outputs
+ model->identifyInputsAndOutputs(
+ {input},
+ {output});
+ assert(model->isValid());
+}
+
+bool is_ignored(int i) {
+ static std::set<int> ignore = {};
+ return ignore.find(i) != ignore.end();
+}
--- /dev/null
+// Generated file (from: space_to_batch_float_2.mod.py). Do not edit
+void CreateModel(Model *model) {
+ OperandType type0(Type::TENSOR_FLOAT32, {1, 5, 2, 1});
+ OperandType type3(Type::TENSOR_FLOAT32, {6, 2, 2, 1});
+ OperandType type2(Type::TENSOR_INT32, {2, 2});
+ OperandType type1(Type::TENSOR_INT32, {2});
+ // Phase 1, operands
+ auto input = model->addOperand(&type0);
+ auto block_size = model->addOperand(&type1);
+ auto paddings = model->addOperand(&type2);
+ auto output = model->addOperand(&type3);
+ // Phase 2, operations
+ static int32_t block_size_init[] = {3, 2};
+ model->setOperandValue(block_size, block_size_init, sizeof(int32_t) * 2);
+ static int32_t paddings_init[] = {1, 0, 2, 0};
+ model->setOperandValue(paddings, paddings_init, sizeof(int32_t) * 4);
+ model->addOperation(ANEURALNETWORKS_SPACE_TO_BATCH_ND, {input, block_size, paddings}, {output});
+ // Phase 3, inputs and outputs
+ model->identifyInputsAndOutputs(
+ {input},
+ {output});
+ assert(model->isValid());
+}
+
+bool is_ignored(int i) {
+ static std::set<int> ignore = {};
+ return ignore.find(i) != ignore.end();
+}
--- /dev/null
+// Generated file (from: space_to_batch_float_3.mod.py). Do not edit
+void CreateModel(Model *model) {
+ OperandType type0(Type::TENSOR_FLOAT32, {1, 4, 2, 1});
+ OperandType type3(Type::TENSOR_FLOAT32, {6, 2, 4, 1});
+ OperandType type2(Type::TENSOR_INT32, {2, 2});
+ OperandType type1(Type::TENSOR_INT32, {2});
+ // Phase 1, operands
+ auto input = model->addOperand(&type0);
+ auto block_size = model->addOperand(&type1);
+ auto paddings = model->addOperand(&type2);
+ auto output = model->addOperand(&type3);
+ // Phase 2, operations
+ static int32_t block_size_init[] = {3, 2};
+ model->setOperandValue(block_size, block_size_init, sizeof(int32_t) * 2);
+ static int32_t paddings_init[] = {1, 1, 2, 4};
+ model->setOperandValue(paddings, paddings_init, sizeof(int32_t) * 4);
+ model->addOperation(ANEURALNETWORKS_SPACE_TO_BATCH_ND, {input, block_size, paddings}, {output});
+ // Phase 3, inputs and outputs
+ model->identifyInputsAndOutputs(
+ {input},
+ {output});
+ assert(model->isValid());
+}
+
+bool is_ignored(int i) {
+ static std::set<int> ignore = {};
+ return ignore.find(i) != ignore.end();
+}
--- /dev/null
+// Generated file (from: space_to_batch_quant8_1.mod.py). Do not edit
+void CreateModel(Model *model) {
+ OperandType type2(Type::TENSOR_INT32, {2, 2});
+ OperandType type1(Type::TENSOR_INT32, {2});
+ OperandType type0(Type::TENSOR_QUANT8_ASYMM, {1, 4, 4, 1}, 1.0, 0);
+ OperandType type3(Type::TENSOR_QUANT8_ASYMM, {4, 2, 2, 1}, 1.0, 0);
+ // Phase 1, operands
+ auto input = model->addOperand(&type0);
+ auto block_size = model->addOperand(&type1);
+ auto paddings = model->addOperand(&type2);
+ auto output = model->addOperand(&type3);
+ // Phase 2, operations
+ static int32_t block_size_init[] = {2, 2};
+ model->setOperandValue(block_size, block_size_init, sizeof(int32_t) * 2);
+ static int32_t paddings_init[] = {0, 0, 0, 0};
+ model->setOperandValue(paddings, paddings_init, sizeof(int32_t) * 4);
+ model->addOperation(ANEURALNETWORKS_SPACE_TO_BATCH_ND, {input, block_size, paddings}, {output});
+ // Phase 3, inputs and outputs
+ model->identifyInputsAndOutputs(
+ {input},
+ {output});
+ assert(model->isValid());
+}
+
+bool is_ignored(int i) {
+ static std::set<int> ignore = {};
+ return ignore.find(i) != ignore.end();
+}
--- /dev/null
+// Generated file (from: space_to_batch_quant8_2.mod.py). Do not edit
+void CreateModel(Model *model) {
+ OperandType type2(Type::TENSOR_INT32, {2, 2});
+ OperandType type1(Type::TENSOR_INT32, {2});
+ OperandType type0(Type::TENSOR_QUANT8_ASYMM, {1, 5, 2, 1}, 1.0, 0);
+ OperandType type3(Type::TENSOR_QUANT8_ASYMM, {6, 2, 2, 1}, 1.0, 0);
+ // Phase 1, operands
+ auto input = model->addOperand(&type0);
+ auto block_size = model->addOperand(&type1);
+ auto paddings = model->addOperand(&type2);
+ auto output = model->addOperand(&type3);
+ // Phase 2, operations
+ static int32_t block_size_init[] = {3, 2};
+ model->setOperandValue(block_size, block_size_init, sizeof(int32_t) * 2);
+ static int32_t paddings_init[] = {1, 0, 2, 0};
+ model->setOperandValue(paddings, paddings_init, sizeof(int32_t) * 4);
+ model->addOperation(ANEURALNETWORKS_SPACE_TO_BATCH_ND, {input, block_size, paddings}, {output});
+ // Phase 3, inputs and outputs
+ model->identifyInputsAndOutputs(
+ {input},
+ {output});
+ assert(model->isValid());
+}
+
+bool is_ignored(int i) {
+ static std::set<int> ignore = {};
+ return ignore.find(i) != ignore.end();
+}
--- /dev/null
+// Generated file (from: space_to_batch_quant8_3.mod.py). Do not edit
+void CreateModel(Model *model) {
+ OperandType type2(Type::TENSOR_INT32, {2, 2});
+ OperandType type1(Type::TENSOR_INT32, {2});
+ OperandType type0(Type::TENSOR_QUANT8_ASYMM, {1, 4, 2, 1}, 1.0, 0);
+ OperandType type3(Type::TENSOR_QUANT8_ASYMM, {6, 2, 4, 1}, 1.0, 0);
+ // Phase 1, operands
+ auto input = model->addOperand(&type0);
+ auto block_size = model->addOperand(&type1);
+ auto paddings = model->addOperand(&type2);
+ auto output = model->addOperand(&type3);
+ // Phase 2, operations
+ static int32_t block_size_init[] = {3, 2};
+ model->setOperandValue(block_size, block_size_init, sizeof(int32_t) * 2);
+ static int32_t paddings_init[] = {1, 1, 2, 4};
+ model->setOperandValue(paddings, paddings_init, sizeof(int32_t) * 4);
+ model->addOperation(ANEURALNETWORKS_SPACE_TO_BATCH_ND, {input, block_size, paddings}, {output});
+ // Phase 3, inputs and outputs
+ model->identifyInputsAndOutputs(
+ {input},
+ {output});
+ assert(model->isValid());
+}
+
+bool is_ignored(int i) {
+ static std::set<int> ignore = {};
+ return ignore.find(i) != ignore.end();
+}
--- /dev/null
+// Generated file (from: squeeze.mod.py). Do not edit
+void CreateModel(Model *model) {
+ OperandType type0(Type::TENSOR_FLOAT32, {4, 1, 1, 2});
+ OperandType type2(Type::TENSOR_FLOAT32, {4, 2});
+ OperandType type1(Type::TENSOR_INT32, {2});
+ // Phase 1, operands
+ auto input = model->addOperand(&type0);
+ auto squeezeDims = model->addOperand(&type1);
+ auto output = model->addOperand(&type2);
+ // Phase 2, operations
+ static int32_t squeezeDims_init[] = {1, 2};
+ model->setOperandValue(squeezeDims, squeezeDims_init, sizeof(int32_t) * 2);
+ model->addOperation(ANEURALNETWORKS_SQUEEZE, {input, squeezeDims}, {output});
+ // Phase 3, inputs and outputs
+ model->identifyInputsAndOutputs(
+ {input},
+ {output});
+ assert(model->isValid());
+}
+
+bool is_ignored(int i) {
+ static std::set<int> ignore = {};
+ return ignore.find(i) != ignore.end();
+}
--- /dev/null
+// Generated file (from: squeeze_float_1.mod.py). Do not edit
+void CreateModel(Model *model) {
+ OperandType type0(Type::TENSOR_FLOAT32, {1, 24, 1});
+ OperandType type2(Type::TENSOR_FLOAT32, {1, 24});
+ OperandType type1(Type::TENSOR_INT32, {1});
+ // Phase 1, operands
+ auto input = model->addOperand(&type0);
+ auto squeezeDims = model->addOperand(&type1);
+ auto output = model->addOperand(&type2);
+ // Phase 2, operations
+ static int32_t squeezeDims_init[] = {2};
+ model->setOperandValue(squeezeDims, squeezeDims_init, sizeof(int32_t) * 1);
+ model->addOperation(ANEURALNETWORKS_SQUEEZE, {input, squeezeDims}, {output});
+ // Phase 3, inputs and outputs
+ model->identifyInputsAndOutputs(
+ {input},
+ {output});
+ assert(model->isValid());
+}
+
+bool is_ignored(int i) {
+ static std::set<int> ignore = {};
+ return ignore.find(i) != ignore.end();
+}
--- /dev/null
+// Generated file (from: squeeze_quant8_1.mod.py). Do not edit
+void CreateModel(Model *model) {
+ OperandType type1(Type::TENSOR_INT32, {1});
+ OperandType type0(Type::TENSOR_QUANT8_ASYMM, {1, 24, 1}, 1.0, 0);
+ OperandType type2(Type::TENSOR_QUANT8_ASYMM, {1, 24}, 1.0, 0);
+ // Phase 1, operands
+ auto input = model->addOperand(&type0);
+ auto squeezeDims = model->addOperand(&type1);
+ auto output = model->addOperand(&type2);
+ // Phase 2, operations
+ static int32_t squeezeDims_init[] = {2};
+ model->setOperandValue(squeezeDims, squeezeDims_init, sizeof(int32_t) * 1);
+ model->addOperation(ANEURALNETWORKS_SQUEEZE, {input, squeezeDims}, {output});
+ // Phase 3, inputs and outputs
+ model->identifyInputsAndOutputs(
+ {input},
+ {output});
+ assert(model->isValid());
+}
+
+bool is_ignored(int i) {
+ static std::set<int> ignore = {};
+ return ignore.find(i) != ignore.end();
+}
auto strides = model->addOperand(&type1);
auto beginMask = model->addOperand(&type2);
auto endMask = model->addOperand(&type2);
+ auto shrinkAxisMask = model->addOperand(&type2);
auto output = model->addOperand(&type3);
// Phase 2, operations
static int32_t begins_init[] = {0, 0};
model->setOperandValue(beginMask, beginMask_init, sizeof(int32_t) * 1);
static int32_t endMask_init[] = {0};
model->setOperandValue(endMask, endMask_init, sizeof(int32_t) * 1);
- model->addOperation(ANEURALNETWORKS_STRIDED_SLICE, {input, begins, ends, strides, beginMask, endMask}, {output});
+ static int32_t shrinkAxisMask_init[] = {0};
+ model->setOperandValue(shrinkAxisMask, shrinkAxisMask_init, sizeof(int32_t) * 1);
+ model->addOperation(ANEURALNETWORKS_STRIDED_SLICE, {input, begins, ends, strides, beginMask, endMask, shrinkAxisMask}, {output});
// Phase 3, inputs and outputs
model->identifyInputsAndOutputs(
{input},
auto strides = model->addOperand(&type1);
auto beginMask = model->addOperand(&type2);
auto endMask = model->addOperand(&type2);
+ auto shrinkAxisMask = model->addOperand(&type2);
auto output = model->addOperand(&type3);
// Phase 2, operations
static int32_t begins_init[] = {1};
model->setOperandValue(beginMask, beginMask_init, sizeof(int32_t) * 1);
static int32_t endMask_init[] = {0};
model->setOperandValue(endMask, endMask_init, sizeof(int32_t) * 1);
- model->addOperation(ANEURALNETWORKS_STRIDED_SLICE, {input, begins, ends, strides, beginMask, endMask}, {output});
+ static int32_t shrinkAxisMask_init[] = {0};
+ model->setOperandValue(shrinkAxisMask, shrinkAxisMask_init, sizeof(int32_t) * 1);
+ model->addOperation(ANEURALNETWORKS_STRIDED_SLICE, {input, begins, ends, strides, beginMask, endMask, shrinkAxisMask}, {output});
// Phase 3, inputs and outputs
model->identifyInputsAndOutputs(
{input},
auto strides = model->addOperand(&type1);
auto beginMask = model->addOperand(&type2);
auto endMask = model->addOperand(&type2);
+ auto shrinkAxisMask = model->addOperand(&type2);
auto output = model->addOperand(&type3);
// Phase 2, operations
static int32_t begins_init[] = {1, 0};
model->setOperandValue(beginMask, beginMask_init, sizeof(int32_t) * 1);
static int32_t endMask_init[] = {2};
model->setOperandValue(endMask, endMask_init, sizeof(int32_t) * 1);
- model->addOperation(ANEURALNETWORKS_STRIDED_SLICE, {input, begins, ends, strides, beginMask, endMask}, {output});
+ static int32_t shrinkAxisMask_init[] = {0};
+ model->setOperandValue(shrinkAxisMask, shrinkAxisMask_init, sizeof(int32_t) * 1);
+ model->addOperation(ANEURALNETWORKS_STRIDED_SLICE, {input, begins, ends, strides, beginMask, endMask, shrinkAxisMask}, {output});
// Phase 3, inputs and outputs
model->identifyInputsAndOutputs(
{input},
--- /dev/null
+// Generated file (from: strided_slice_float_11.mod.py). Do not edit
+void CreateModel(Model *model) {
+ OperandType type2(Type::INT32, {});
+ OperandType type0(Type::TENSOR_FLOAT32, {2, 3});
+ OperandType type3(Type::TENSOR_FLOAT32, {3});
+ OperandType type1(Type::TENSOR_INT32, {2});
+ // Phase 1, operands
+ auto input = model->addOperand(&type0);
+ auto begins = model->addOperand(&type1);
+ auto ends = model->addOperand(&type1);
+ auto strides = model->addOperand(&type1);
+ auto beginMask = model->addOperand(&type2);
+ auto endMask = model->addOperand(&type2);
+ auto shrinkAxisMask = model->addOperand(&type2);
+ auto output = model->addOperand(&type3);
+ // Phase 2, operations
+ static int32_t begins_init[] = {0, 0};
+ model->setOperandValue(begins, begins_init, sizeof(int32_t) * 2);
+ static int32_t ends_init[] = {1, 3};
+ model->setOperandValue(ends, ends_init, sizeof(int32_t) * 2);
+ static int32_t strides_init[] = {1, 1};
+ model->setOperandValue(strides, strides_init, sizeof(int32_t) * 2);
+ static int32_t beginMask_init[] = {0};
+ model->setOperandValue(beginMask, beginMask_init, sizeof(int32_t) * 1);
+ static int32_t endMask_init[] = {0};
+ model->setOperandValue(endMask, endMask_init, sizeof(int32_t) * 1);
+ static int32_t shrinkAxisMask_init[] = {1};
+ model->setOperandValue(shrinkAxisMask, shrinkAxisMask_init, sizeof(int32_t) * 1);
+ model->addOperation(ANEURALNETWORKS_STRIDED_SLICE, {input, begins, ends, strides, beginMask, endMask, shrinkAxisMask}, {output});
+ // Phase 3, inputs and outputs
+ model->identifyInputsAndOutputs(
+ {input},
+ {output});
+ assert(model->isValid());
+}
+
+bool is_ignored(int i) {
+ static std::set<int> ignore = {};
+ return ignore.find(i) != ignore.end();
+}
auto strides = model->addOperand(&type1);
auto beginMask = model->addOperand(&type2);
auto endMask = model->addOperand(&type2);
+ auto shrinkAxisMask = model->addOperand(&type2);
auto output = model->addOperand(&type3);
// Phase 2, operations
static int32_t begins_init[] = {-3};
model->setOperandValue(beginMask, beginMask_init, sizeof(int32_t) * 1);
static int32_t endMask_init[] = {0};
model->setOperandValue(endMask, endMask_init, sizeof(int32_t) * 1);
- model->addOperation(ANEURALNETWORKS_STRIDED_SLICE, {input, begins, ends, strides, beginMask, endMask}, {output});
+ static int32_t shrinkAxisMask_init[] = {0};
+ model->setOperandValue(shrinkAxisMask, shrinkAxisMask_init, sizeof(int32_t) * 1);
+ model->addOperation(ANEURALNETWORKS_STRIDED_SLICE, {input, begins, ends, strides, beginMask, endMask, shrinkAxisMask}, {output});
// Phase 3, inputs and outputs
model->identifyInputsAndOutputs(
{input},
auto strides = model->addOperand(&type1);
auto beginMask = model->addOperand(&type2);
auto endMask = model->addOperand(&type2);
+ auto shrinkAxisMask = model->addOperand(&type2);
auto output = model->addOperand(&type3);
// Phase 2, operations
static int32_t begins_init[] = {-5};
model->setOperandValue(beginMask, beginMask_init, sizeof(int32_t) * 1);
static int32_t endMask_init[] = {0};
model->setOperandValue(endMask, endMask_init, sizeof(int32_t) * 1);
- model->addOperation(ANEURALNETWORKS_STRIDED_SLICE, {input, begins, ends, strides, beginMask, endMask}, {output});
+ static int32_t shrinkAxisMask_init[] = {0};
+ model->setOperandValue(shrinkAxisMask, shrinkAxisMask_init, sizeof(int32_t) * 1);
+ model->addOperation(ANEURALNETWORKS_STRIDED_SLICE, {input, begins, ends, strides, beginMask, endMask, shrinkAxisMask}, {output});
// Phase 3, inputs and outputs
model->identifyInputsAndOutputs(
{input},
auto strides = model->addOperand(&type1);
auto beginMask = model->addOperand(&type2);
auto endMask = model->addOperand(&type2);
+ auto shrinkAxisMask = model->addOperand(&type2);
auto output = model->addOperand(&type3);
// Phase 2, operations
static int32_t begins_init[] = {1};
model->setOperandValue(beginMask, beginMask_init, sizeof(int32_t) * 1);
static int32_t endMask_init[] = {0};
model->setOperandValue(endMask, endMask_init, sizeof(int32_t) * 1);
- model->addOperation(ANEURALNETWORKS_STRIDED_SLICE, {input, begins, ends, strides, beginMask, endMask}, {output});
+ static int32_t shrinkAxisMask_init[] = {0};
+ model->setOperandValue(shrinkAxisMask, shrinkAxisMask_init, sizeof(int32_t) * 1);
+ model->addOperation(ANEURALNETWORKS_STRIDED_SLICE, {input, begins, ends, strides, beginMask, endMask, shrinkAxisMask}, {output});
// Phase 3, inputs and outputs
model->identifyInputsAndOutputs(
{input},
auto strides = model->addOperand(&type1);
auto beginMask = model->addOperand(&type2);
auto endMask = model->addOperand(&type2);
+ auto shrinkAxisMask = model->addOperand(&type2);
auto output = model->addOperand(&type3);
// Phase 2, operations
static int32_t begins_init[] = {1};
model->setOperandValue(beginMask, beginMask_init, sizeof(int32_t) * 1);
static int32_t endMask_init[] = {0};
model->setOperandValue(endMask, endMask_init, sizeof(int32_t) * 1);
- model->addOperation(ANEURALNETWORKS_STRIDED_SLICE, {input, begins, ends, strides, beginMask, endMask}, {output});
+ static int32_t shrinkAxisMask_init[] = {0};
+ model->setOperandValue(shrinkAxisMask, shrinkAxisMask_init, sizeof(int32_t) * 1);
+ model->addOperation(ANEURALNETWORKS_STRIDED_SLICE, {input, begins, ends, strides, beginMask, endMask, shrinkAxisMask}, {output});
// Phase 3, inputs and outputs
model->identifyInputsAndOutputs(
{input},
auto strides = model->addOperand(&type1);
auto beginMask = model->addOperand(&type2);
auto endMask = model->addOperand(&type2);
+ auto shrinkAxisMask = model->addOperand(&type2);
auto output = model->addOperand(&type3);
// Phase 2, operations
static int32_t begins_init[] = {1};
model->setOperandValue(beginMask, beginMask_init, sizeof(int32_t) * 1);
static int32_t endMask_init[] = {1};
model->setOperandValue(endMask, endMask_init, sizeof(int32_t) * 1);
- model->addOperation(ANEURALNETWORKS_STRIDED_SLICE, {input, begins, ends, strides, beginMask, endMask}, {output});
+ static int32_t shrinkAxisMask_init[] = {0};
+ model->setOperandValue(shrinkAxisMask, shrinkAxisMask_init, sizeof(int32_t) * 1);
+ model->addOperation(ANEURALNETWORKS_STRIDED_SLICE, {input, begins, ends, strides, beginMask, endMask, shrinkAxisMask}, {output});
// Phase 3, inputs and outputs
model->identifyInputsAndOutputs(
{input},
auto strides = model->addOperand(&type1);
auto beginMask = model->addOperand(&type2);
auto endMask = model->addOperand(&type2);
+ auto shrinkAxisMask = model->addOperand(&type2);
auto output = model->addOperand(&type0);
// Phase 2, operations
static int32_t begins_init[] = {-1};
model->setOperandValue(beginMask, beginMask_init, sizeof(int32_t) * 1);
static int32_t endMask_init[] = {0};
model->setOperandValue(endMask, endMask_init, sizeof(int32_t) * 1);
- model->addOperation(ANEURALNETWORKS_STRIDED_SLICE, {input, begins, ends, strides, beginMask, endMask}, {output});
+ static int32_t shrinkAxisMask_init[] = {0};
+ model->setOperandValue(shrinkAxisMask, shrinkAxisMask_init, sizeof(int32_t) * 1);
+ model->addOperation(ANEURALNETWORKS_STRIDED_SLICE, {input, begins, ends, strides, beginMask, endMask, shrinkAxisMask}, {output});
// Phase 3, inputs and outputs
model->identifyInputsAndOutputs(
{input},
auto strides = model->addOperand(&type1);
auto beginMask = model->addOperand(&type2);
auto endMask = model->addOperand(&type2);
+ auto shrinkAxisMask = model->addOperand(&type2);
auto output = model->addOperand(&type3);
// Phase 2, operations
static int32_t begins_init[] = {1, -1};
model->setOperandValue(beginMask, beginMask_init, sizeof(int32_t) * 1);
static int32_t endMask_init[] = {0};
model->setOperandValue(endMask, endMask_init, sizeof(int32_t) * 1);
- model->addOperation(ANEURALNETWORKS_STRIDED_SLICE, {input, begins, ends, strides, beginMask, endMask}, {output});
+ static int32_t shrinkAxisMask_init[] = {0};
+ model->setOperandValue(shrinkAxisMask, shrinkAxisMask_init, sizeof(int32_t) * 1);
+ model->addOperation(ANEURALNETWORKS_STRIDED_SLICE, {input, begins, ends, strides, beginMask, endMask, shrinkAxisMask}, {output});
// Phase 3, inputs and outputs
model->identifyInputsAndOutputs(
{input},
auto strides = model->addOperand(&type1);
auto beginMask = model->addOperand(&type2);
auto endMask = model->addOperand(&type2);
+ auto shrinkAxisMask = model->addOperand(&type2);
auto output = model->addOperand(&type3);
// Phase 2, operations
static int32_t begins_init[] = {1, 0};
model->setOperandValue(beginMask, beginMask_init, sizeof(int32_t) * 1);
static int32_t endMask_init[] = {0};
model->setOperandValue(endMask, endMask_init, sizeof(int32_t) * 1);
- model->addOperation(ANEURALNETWORKS_STRIDED_SLICE, {input, begins, ends, strides, beginMask, endMask}, {output});
+ static int32_t shrinkAxisMask_init[] = {0};
+ model->setOperandValue(shrinkAxisMask, shrinkAxisMask_init, sizeof(int32_t) * 1);
+ model->addOperation(ANEURALNETWORKS_STRIDED_SLICE, {input, begins, ends, strides, beginMask, endMask, shrinkAxisMask}, {output});
// Phase 3, inputs and outputs
model->identifyInputsAndOutputs(
{input},
--- /dev/null
+// Generated file (from: strided_slice_qaunt8_10.mod.py). Do not edit
+void CreateModel(Model *model) {
+ OperandType type2(Type::INT32, {});
+ OperandType type1(Type::TENSOR_INT32, {2});
+ OperandType type3(Type::TENSOR_QUANT8_ASYMM, {1, 3}, 1.0, 0);
+ OperandType type0(Type::TENSOR_QUANT8_ASYMM, {2, 3}, 1.0, 0);
+ // Phase 1, operands
+ auto input = model->addOperand(&type0);
+ auto begins = model->addOperand(&type1);
+ auto ends = model->addOperand(&type1);
+ auto strides = model->addOperand(&type1);
+ auto beginMask = model->addOperand(&type2);
+ auto endMask = model->addOperand(&type2);
+ auto shrinkAxisMask = model->addOperand(&type2);
+ auto output = model->addOperand(&type3);
+ // Phase 2, operations
+ static int32_t begins_init[] = {1, 0};
+ model->setOperandValue(begins, begins_init, sizeof(int32_t) * 2);
+ static int32_t ends_init[] = {2, 2};
+ model->setOperandValue(ends, ends_init, sizeof(int32_t) * 2);
+ static int32_t strides_init[] = {1, 1};
+ model->setOperandValue(strides, strides_init, sizeof(int32_t) * 2);
+ static int32_t beginMask_init[] = {0};
+ model->setOperandValue(beginMask, beginMask_init, sizeof(int32_t) * 1);
+ static int32_t endMask_init[] = {2};
+ model->setOperandValue(endMask, endMask_init, sizeof(int32_t) * 1);
+ static int32_t shrinkAxisMask_init[] = {0};
+ model->setOperandValue(shrinkAxisMask, shrinkAxisMask_init, sizeof(int32_t) * 1);
+ model->addOperation(ANEURALNETWORKS_STRIDED_SLICE, {input, begins, ends, strides, beginMask, endMask, shrinkAxisMask}, {output});
+ // Phase 3, inputs and outputs
+ model->identifyInputsAndOutputs(
+ {input},
+ {output});
+ assert(model->isValid());
+}
+
+bool is_ignored(int i) {
+ static std::set<int> ignore = {};
+ return ignore.find(i) != ignore.end();
+}
--- /dev/null
+// Generated file (from: strided_slice_qaunt8_11.mod.py). Do not edit
+void CreateModel(Model *model) {
+ OperandType type2(Type::INT32, {});
+ OperandType type1(Type::TENSOR_INT32, {2});
+ OperandType type0(Type::TENSOR_QUANT8_ASYMM, {2, 3}, 1.0, 0);
+ OperandType type3(Type::TENSOR_QUANT8_ASYMM, {3}, 1.0, 0);
+ // Phase 1, operands
+ auto input = model->addOperand(&type0);
+ auto begins = model->addOperand(&type1);
+ auto ends = model->addOperand(&type1);
+ auto strides = model->addOperand(&type1);
+ auto beginMask = model->addOperand(&type2);
+ auto endMask = model->addOperand(&type2);
+ auto shrinkAxisMask = model->addOperand(&type2);
+ auto output = model->addOperand(&type3);
+ // Phase 2, operations
+ static int32_t begins_init[] = {0, 0};
+ model->setOperandValue(begins, begins_init, sizeof(int32_t) * 2);
+ static int32_t ends_init[] = {1, 3};
+ model->setOperandValue(ends, ends_init, sizeof(int32_t) * 2);
+ static int32_t strides_init[] = {1, 1};
+ model->setOperandValue(strides, strides_init, sizeof(int32_t) * 2);
+ static int32_t beginMask_init[] = {0};
+ model->setOperandValue(beginMask, beginMask_init, sizeof(int32_t) * 1);
+ static int32_t endMask_init[] = {0};
+ model->setOperandValue(endMask, endMask_init, sizeof(int32_t) * 1);
+ static int32_t shrinkAxisMask_init[] = {1};
+ model->setOperandValue(shrinkAxisMask, shrinkAxisMask_init, sizeof(int32_t) * 1);
+ model->addOperation(ANEURALNETWORKS_STRIDED_SLICE, {input, begins, ends, strides, beginMask, endMask, shrinkAxisMask}, {output});
+ // Phase 3, inputs and outputs
+ model->identifyInputsAndOutputs(
+ {input},
+ {output});
+ assert(model->isValid());
+}
+
+bool is_ignored(int i) {
+ static std::set<int> ignore = {};
+ return ignore.find(i) != ignore.end();
+}
--- /dev/null
+// Generated file (from: strided_slice_quant8_1.mod.py). Do not edit
+void CreateModel(Model *model) {
+ OperandType type2(Type::INT32, {});
+ OperandType type1(Type::TENSOR_INT32, {1});
+ OperandType type3(Type::TENSOR_QUANT8_ASYMM, {2}, 1.0, 0);
+ OperandType type0(Type::TENSOR_QUANT8_ASYMM, {4}, 1.0, 0);
+ // Phase 1, operands
+ auto input = model->addOperand(&type0);
+ auto begins = model->addOperand(&type1);
+ auto ends = model->addOperand(&type1);
+ auto strides = model->addOperand(&type1);
+ auto beginMask = model->addOperand(&type2);
+ auto endMask = model->addOperand(&type2);
+ auto shrinkAxisMask = model->addOperand(&type2);
+ auto output = model->addOperand(&type3);
+ // Phase 2, operations
+ static int32_t begins_init[] = {1};
+ model->setOperandValue(begins, begins_init, sizeof(int32_t) * 1);
+ static int32_t ends_init[] = {3};
+ model->setOperandValue(ends, ends_init, sizeof(int32_t) * 1);
+ static int32_t strides_init[] = {1};
+ model->setOperandValue(strides, strides_init, sizeof(int32_t) * 1);
+ static int32_t beginMask_init[] = {0};
+ model->setOperandValue(beginMask, beginMask_init, sizeof(int32_t) * 1);
+ static int32_t endMask_init[] = {0};
+ model->setOperandValue(endMask, endMask_init, sizeof(int32_t) * 1);
+ static int32_t shrinkAxisMask_init[] = {0};
+ model->setOperandValue(shrinkAxisMask, shrinkAxisMask_init, sizeof(int32_t) * 1);
+ model->addOperation(ANEURALNETWORKS_STRIDED_SLICE, {input, begins, ends, strides, beginMask, endMask, shrinkAxisMask}, {output});
+ // Phase 3, inputs and outputs
+ model->identifyInputsAndOutputs(
+ {input},
+ {output});
+ assert(model->isValid());
+}
+
+bool is_ignored(int i) {
+ static std::set<int> ignore = {};
+ return ignore.find(i) != ignore.end();
+}
--- /dev/null
+// Generated file (from: strided_slice_quant8_2.mod.py). Do not edit
+void CreateModel(Model *model) {
+ OperandType type2(Type::INT32, {});
+ OperandType type1(Type::TENSOR_INT32, {1});
+ OperandType type3(Type::TENSOR_QUANT8_ASYMM, {2}, 1.0, 0);
+ OperandType type0(Type::TENSOR_QUANT8_ASYMM, {4}, 1.0, 0);
+ // Phase 1, operands
+ auto input = model->addOperand(&type0);
+ auto begins = model->addOperand(&type1);
+ auto ends = model->addOperand(&type1);
+ auto strides = model->addOperand(&type1);
+ auto beginMask = model->addOperand(&type2);
+ auto endMask = model->addOperand(&type2);
+ auto shrinkAxisMask = model->addOperand(&type2);
+ auto output = model->addOperand(&type3);
+ // Phase 2, operations
+ static int32_t begins_init[] = {-3};
+ model->setOperandValue(begins, begins_init, sizeof(int32_t) * 1);
+ static int32_t ends_init[] = {3};
+ model->setOperandValue(ends, ends_init, sizeof(int32_t) * 1);
+ static int32_t strides_init[] = {1};
+ model->setOperandValue(strides, strides_init, sizeof(int32_t) * 1);
+ static int32_t beginMask_init[] = {0};
+ model->setOperandValue(beginMask, beginMask_init, sizeof(int32_t) * 1);
+ static int32_t endMask_init[] = {0};
+ model->setOperandValue(endMask, endMask_init, sizeof(int32_t) * 1);
+ static int32_t shrinkAxisMask_init[] = {0};
+ model->setOperandValue(shrinkAxisMask, shrinkAxisMask_init, sizeof(int32_t) * 1);
+ model->addOperation(ANEURALNETWORKS_STRIDED_SLICE, {input, begins, ends, strides, beginMask, endMask, shrinkAxisMask}, {output});
+ // Phase 3, inputs and outputs
+ model->identifyInputsAndOutputs(
+ {input},
+ {output});
+ assert(model->isValid());
+}
+
+bool is_ignored(int i) {
+ static std::set<int> ignore = {};
+ return ignore.find(i) != ignore.end();
+}
--- /dev/null
+// Generated file (from: strided_slice_quant8_3.mod.py). Do not edit
+void CreateModel(Model *model) {
+ OperandType type2(Type::INT32, {});
+ OperandType type1(Type::TENSOR_INT32, {1});
+ OperandType type3(Type::TENSOR_QUANT8_ASYMM, {3}, 1.0, 0);
+ OperandType type0(Type::TENSOR_QUANT8_ASYMM, {4}, 1.0, 0);
+ // Phase 1, operands
+ auto input = model->addOperand(&type0);
+ auto begins = model->addOperand(&type1);
+ auto ends = model->addOperand(&type1);
+ auto strides = model->addOperand(&type1);
+ auto beginMask = model->addOperand(&type2);
+ auto endMask = model->addOperand(&type2);
+ auto shrinkAxisMask = model->addOperand(&type2);
+ auto output = model->addOperand(&type3);
+ // Phase 2, operations
+ static int32_t begins_init[] = {-5};
+ model->setOperandValue(begins, begins_init, sizeof(int32_t) * 1);
+ static int32_t ends_init[] = {3};
+ model->setOperandValue(ends, ends_init, sizeof(int32_t) * 1);
+ static int32_t strides_init[] = {1};
+ model->setOperandValue(strides, strides_init, sizeof(int32_t) * 1);
+ static int32_t beginMask_init[] = {0};
+ model->setOperandValue(beginMask, beginMask_init, sizeof(int32_t) * 1);
+ static int32_t endMask_init[] = {0};
+ model->setOperandValue(endMask, endMask_init, sizeof(int32_t) * 1);
+ static int32_t shrinkAxisMask_init[] = {0};
+ model->setOperandValue(shrinkAxisMask, shrinkAxisMask_init, sizeof(int32_t) * 1);
+ model->addOperation(ANEURALNETWORKS_STRIDED_SLICE, {input, begins, ends, strides, beginMask, endMask, shrinkAxisMask}, {output});
+ // Phase 3, inputs and outputs
+ model->identifyInputsAndOutputs(
+ {input},
+ {output});
+ assert(model->isValid());
+}
+
+bool is_ignored(int i) {
+ static std::set<int> ignore = {};
+ return ignore.find(i) != ignore.end();
+}
--- /dev/null
+// Generated file (from: strided_slice_quant8_4.mod.py). Do not edit
+void CreateModel(Model *model) {
+ OperandType type2(Type::INT32, {});
+ OperandType type1(Type::TENSOR_INT32, {1});
+ OperandType type3(Type::TENSOR_QUANT8_ASYMM, {1}, 1.0, 0);
+ OperandType type0(Type::TENSOR_QUANT8_ASYMM, {4}, 1.0, 0);
+ // Phase 1, operands
+ auto input = model->addOperand(&type0);
+ auto begins = model->addOperand(&type1);
+ auto ends = model->addOperand(&type1);
+ auto strides = model->addOperand(&type1);
+ auto beginMask = model->addOperand(&type2);
+ auto endMask = model->addOperand(&type2);
+ auto shrinkAxisMask = model->addOperand(&type2);
+ auto output = model->addOperand(&type3);
+ // Phase 2, operations
+ static int32_t begins_init[] = {1};
+ model->setOperandValue(begins, begins_init, sizeof(int32_t) * 1);
+ static int32_t ends_init[] = {-2};
+ model->setOperandValue(ends, ends_init, sizeof(int32_t) * 1);
+ static int32_t strides_init[] = {1};
+ model->setOperandValue(strides, strides_init, sizeof(int32_t) * 1);
+ static int32_t beginMask_init[] = {0};
+ model->setOperandValue(beginMask, beginMask_init, sizeof(int32_t) * 1);
+ static int32_t endMask_init[] = {0};
+ model->setOperandValue(endMask, endMask_init, sizeof(int32_t) * 1);
+ static int32_t shrinkAxisMask_init[] = {0};
+ model->setOperandValue(shrinkAxisMask, shrinkAxisMask_init, sizeof(int32_t) * 1);
+ model->addOperation(ANEURALNETWORKS_STRIDED_SLICE, {input, begins, ends, strides, beginMask, endMask, shrinkAxisMask}, {output});
+ // Phase 3, inputs and outputs
+ model->identifyInputsAndOutputs(
+ {input},
+ {output});
+ assert(model->isValid());
+}
+
+bool is_ignored(int i) {
+ static std::set<int> ignore = {};
+ return ignore.find(i) != ignore.end();
+}
--- /dev/null
+// Generated file (from: strided_slice_quant8_5.mod.py). Do not edit
+void CreateModel(Model *model) {
+ OperandType type2(Type::INT32, {});
+ OperandType type1(Type::TENSOR_INT32, {1});
+ OperandType type3(Type::TENSOR_QUANT8_ASYMM, {3}, 1.0, 0);
+ OperandType type0(Type::TENSOR_QUANT8_ASYMM, {4}, 1.0, 0);
+ // Phase 1, operands
+ auto input = model->addOperand(&type0);
+ auto begins = model->addOperand(&type1);
+ auto ends = model->addOperand(&type1);
+ auto strides = model->addOperand(&type1);
+ auto beginMask = model->addOperand(&type2);
+ auto endMask = model->addOperand(&type2);
+ auto shrinkAxisMask = model->addOperand(&type2);
+ auto output = model->addOperand(&type3);
+ // Phase 2, operations
+ static int32_t begins_init[] = {1};
+ model->setOperandValue(begins, begins_init, sizeof(int32_t) * 1);
+ static int32_t ends_init[] = {3};
+ model->setOperandValue(ends, ends_init, sizeof(int32_t) * 1);
+ static int32_t strides_init[] = {1};
+ model->setOperandValue(strides, strides_init, sizeof(int32_t) * 1);
+ static int32_t beginMask_init[] = {1};
+ model->setOperandValue(beginMask, beginMask_init, sizeof(int32_t) * 1);
+ static int32_t endMask_init[] = {0};
+ model->setOperandValue(endMask, endMask_init, sizeof(int32_t) * 1);
+ static int32_t shrinkAxisMask_init[] = {0};
+ model->setOperandValue(shrinkAxisMask, shrinkAxisMask_init, sizeof(int32_t) * 1);
+ model->addOperation(ANEURALNETWORKS_STRIDED_SLICE, {input, begins, ends, strides, beginMask, endMask, shrinkAxisMask}, {output});
+ // Phase 3, inputs and outputs
+ model->identifyInputsAndOutputs(
+ {input},
+ {output});
+ assert(model->isValid());
+}
+
+bool is_ignored(int i) {
+ static std::set<int> ignore = {};
+ return ignore.find(i) != ignore.end();
+}
--- /dev/null
+// Generated file (from: strided_slice_quant8_6.mod.py). Do not edit
+void CreateModel(Model *model) {
+ OperandType type2(Type::INT32, {});
+ OperandType type1(Type::TENSOR_INT32, {1});
+ OperandType type3(Type::TENSOR_QUANT8_ASYMM, {3}, 1.0, 0);
+ OperandType type0(Type::TENSOR_QUANT8_ASYMM, {4}, 1.0, 0);
+ // Phase 1, operands
+ auto input = model->addOperand(&type0);
+ auto begins = model->addOperand(&type1);
+ auto ends = model->addOperand(&type1);
+ auto strides = model->addOperand(&type1);
+ auto beginMask = model->addOperand(&type2);
+ auto endMask = model->addOperand(&type2);
+ auto shrinkAxisMask = model->addOperand(&type2);
+ auto output = model->addOperand(&type3);
+ // Phase 2, operations
+ static int32_t begins_init[] = {1};
+ model->setOperandValue(begins, begins_init, sizeof(int32_t) * 1);
+ static int32_t ends_init[] = {3};
+ model->setOperandValue(ends, ends_init, sizeof(int32_t) * 1);
+ static int32_t strides_init[] = {1};
+ model->setOperandValue(strides, strides_init, sizeof(int32_t) * 1);
+ static int32_t beginMask_init[] = {0};
+ model->setOperandValue(beginMask, beginMask_init, sizeof(int32_t) * 1);
+ static int32_t endMask_init[] = {1};
+ model->setOperandValue(endMask, endMask_init, sizeof(int32_t) * 1);
+ static int32_t shrinkAxisMask_init[] = {0};
+ model->setOperandValue(shrinkAxisMask, shrinkAxisMask_init, sizeof(int32_t) * 1);
+ model->addOperation(ANEURALNETWORKS_STRIDED_SLICE, {input, begins, ends, strides, beginMask, endMask, shrinkAxisMask}, {output});
+ // Phase 3, inputs and outputs
+ model->identifyInputsAndOutputs(
+ {input},
+ {output});
+ assert(model->isValid());
+}
+
+bool is_ignored(int i) {
+ static std::set<int> ignore = {};
+ return ignore.find(i) != ignore.end();
+}
--- /dev/null
+// Generated file (from: strided_slice_quant8_7.mod.py). Do not edit
+void CreateModel(Model *model) {
+ OperandType type2(Type::INT32, {});
+ OperandType type1(Type::TENSOR_INT32, {1});
+ OperandType type0(Type::TENSOR_QUANT8_ASYMM, {3}, 1.0, 0);
+ // Phase 1, operands
+ auto input = model->addOperand(&type0);
+ auto begins = model->addOperand(&type1);
+ auto ends = model->addOperand(&type1);
+ auto strides = model->addOperand(&type1);
+ auto beginMask = model->addOperand(&type2);
+ auto endMask = model->addOperand(&type2);
+ auto shrinkAxisMask = model->addOperand(&type2);
+ auto output = model->addOperand(&type0);
+ // Phase 2, operations
+ static int32_t begins_init[] = {-1};
+ model->setOperandValue(begins, begins_init, sizeof(int32_t) * 1);
+ static int32_t ends_init[] = {-4};
+ model->setOperandValue(ends, ends_init, sizeof(int32_t) * 1);
+ static int32_t strides_init[] = {-1};
+ model->setOperandValue(strides, strides_init, sizeof(int32_t) * 1);
+ static int32_t beginMask_init[] = {0};
+ model->setOperandValue(beginMask, beginMask_init, sizeof(int32_t) * 1);
+ static int32_t endMask_init[] = {0};
+ model->setOperandValue(endMask, endMask_init, sizeof(int32_t) * 1);
+ static int32_t shrinkAxisMask_init[] = {0};
+ model->setOperandValue(shrinkAxisMask, shrinkAxisMask_init, sizeof(int32_t) * 1);
+ model->addOperation(ANEURALNETWORKS_STRIDED_SLICE, {input, begins, ends, strides, beginMask, endMask, shrinkAxisMask}, {output});
+ // Phase 3, inputs and outputs
+ model->identifyInputsAndOutputs(
+ {input},
+ {output});
+ assert(model->isValid());
+}
+
+bool is_ignored(int i) {
+ static std::set<int> ignore = {};
+ return ignore.find(i) != ignore.end();
+}
--- /dev/null
+// Generated file (from: strided_slice_quant8_8.mod.py). Do not edit
+void CreateModel(Model *model) {
+ OperandType type2(Type::INT32, {});
+ OperandType type1(Type::TENSOR_INT32, {2});
+ OperandType type3(Type::TENSOR_QUANT8_ASYMM, {1, 3}, 1.0, 0);
+ OperandType type0(Type::TENSOR_QUANT8_ASYMM, {2, 3}, 1.0, 0);
+ // Phase 1, operands
+ auto input = model->addOperand(&type0);
+ auto begins = model->addOperand(&type1);
+ auto ends = model->addOperand(&type1);
+ auto strides = model->addOperand(&type1);
+ auto beginMask = model->addOperand(&type2);
+ auto endMask = model->addOperand(&type2);
+ auto shrinkAxisMask = model->addOperand(&type2);
+ auto output = model->addOperand(&type3);
+ // Phase 2, operations
+ static int32_t begins_init[] = {1, -1};
+ model->setOperandValue(begins, begins_init, sizeof(int32_t) * 2);
+ static int32_t ends_init[] = {2, -4};
+ model->setOperandValue(ends, ends_init, sizeof(int32_t) * 2);
+ static int32_t strides_init[] = {2, -1};
+ model->setOperandValue(strides, strides_init, sizeof(int32_t) * 2);
+ static int32_t beginMask_init[] = {0};
+ model->setOperandValue(beginMask, beginMask_init, sizeof(int32_t) * 1);
+ static int32_t endMask_init[] = {0};
+ model->setOperandValue(endMask, endMask_init, sizeof(int32_t) * 1);
+ static int32_t shrinkAxisMask_init[] = {0};
+ model->setOperandValue(shrinkAxisMask, shrinkAxisMask_init, sizeof(int32_t) * 1);
+ model->addOperation(ANEURALNETWORKS_STRIDED_SLICE, {input, begins, ends, strides, beginMask, endMask, shrinkAxisMask}, {output});
+ // Phase 3, inputs and outputs
+ model->identifyInputsAndOutputs(
+ {input},
+ {output});
+ assert(model->isValid());
+}
+
+bool is_ignored(int i) {
+ static std::set<int> ignore = {};
+ return ignore.find(i) != ignore.end();
+}
--- /dev/null
+// Generated file (from: strided_slice_quant8_9.mod.py). Do not edit
+void CreateModel(Model *model) {
+ OperandType type2(Type::INT32, {});
+ OperandType type1(Type::TENSOR_INT32, {2});
+ OperandType type3(Type::TENSOR_QUANT8_ASYMM, {2, 2}, 1.0, 0);
+ OperandType type0(Type::TENSOR_QUANT8_ASYMM, {2, 3}, 1.0, 0);
+ // Phase 1, operands
+ auto input = model->addOperand(&type0);
+ auto begins = model->addOperand(&type1);
+ auto ends = model->addOperand(&type1);
+ auto strides = model->addOperand(&type1);
+ auto beginMask = model->addOperand(&type2);
+ auto endMask = model->addOperand(&type2);
+ auto shrinkAxisMask = model->addOperand(&type2);
+ auto output = model->addOperand(&type3);
+ // Phase 2, operations
+ static int32_t begins_init[] = {1, 0};
+ model->setOperandValue(begins, begins_init, sizeof(int32_t) * 2);
+ static int32_t ends_init[] = {2, 2};
+ model->setOperandValue(ends, ends_init, sizeof(int32_t) * 2);
+ static int32_t strides_init[] = {1, 1};
+ model->setOperandValue(strides, strides_init, sizeof(int32_t) * 2);
+ static int32_t beginMask_init[] = {1};
+ model->setOperandValue(beginMask, beginMask_init, sizeof(int32_t) * 1);
+ static int32_t endMask_init[] = {0};
+ model->setOperandValue(endMask, endMask_init, sizeof(int32_t) * 1);
+ static int32_t shrinkAxisMask_init[] = {0};
+ model->setOperandValue(shrinkAxisMask, shrinkAxisMask_init, sizeof(int32_t) * 1);
+ model->addOperation(ANEURALNETWORKS_STRIDED_SLICE, {input, begins, ends, strides, beginMask, endMask, shrinkAxisMask}, {output});
+ // Phase 3, inputs and outputs
+ model->identifyInputsAndOutputs(
+ {input},
+ {output});
+ assert(model->isValid());
+}
+
+bool is_ignored(int i) {
+ static std::set<int> ignore = {};
+ return ignore.find(i) != ignore.end();
+}
// Generated file (from: sub_broadcast_float.mod.py). Do not edit
void CreateModel(Model *model) {
OperandType type2(Type::INT32, {});
- OperandType type0(Type::TENSOR_FLOAT32, {1, 2, 2, 1});
- OperandType type1(Type::TENSOR_FLOAT32, {1});
+ OperandType type0(Type::TENSOR_FLOAT32, {1, 2});
+ OperandType type1(Type::TENSOR_FLOAT32, {2, 2});
// Phase 1, operands
auto op1 = model->addOperand(&type0);
auto op2 = model->addOperand(&type1);
auto act = model->addOperand(&type2);
- auto op3 = model->addOperand(&type0);
+ auto op3 = model->addOperand(&type1);
// Phase 2, operations
static int32_t act_init[] = {0};
model->setOperandValue(act, act_init, sizeof(int32_t) * 1);
- model->addOperationEx(ANEURALNETWORKS_SUB_EX, {op1, op2, act}, {op3});
+ model->addOperation(ANEURALNETWORKS_SUB, {op1, op2, act}, {op3});
// Phase 3, inputs and outputs
model->identifyInputsAndOutputs(
{op1, op2},
--- /dev/null
+// Generated file (from: tanh.mod.py). Do not edit
+void CreateModel(Model *model) {
+ OperandType type0(Type::TENSOR_FLOAT32, {1, 2, 2, 1});
+ // Phase 1, operands
+ auto op1 = model->addOperand(&type0);
+ auto op2 = model->addOperand(&type0);
+ // Phase 2, operations
+ model->addOperation(ANEURALNETWORKS_TANH, {op1}, {op2});
+ // Phase 3, inputs and outputs
+ model->identifyInputsAndOutputs(
+ {op1},
+ {op2});
+ assert(model->isValid());
+}
+
+bool is_ignored(int i) {
+ static std::set<int> ignore = {};
+ return ignore.find(i) != ignore.end();
+}
--- /dev/null
+// Generated file (from: transpose.mod.py). Do not edit
+void CreateModel(Model *model) {
+ OperandType type0(Type::TENSOR_FLOAT32, {1, 2, 2, 1});
+ OperandType type1(Type::TENSOR_INT32, {4});
+ // Phase 1, operands
+ auto input = model->addOperand(&type0);
+ auto perms = model->addOperand(&type1);
+ auto output = model->addOperand(&type0);
+ // Phase 2, operations
+ static int32_t perms_init[] = {0, 2, 1, 3};
+ model->setOperandValue(perms, perms_init, sizeof(int32_t) * 4);
+ model->addOperation(ANEURALNETWORKS_TRANSPOSE, {input, perms}, {output});
+ // Phase 3, inputs and outputs
+ model->identifyInputsAndOutputs(
+ {input},
+ {output});
+ assert(model->isValid());
+}
+
+bool is_ignored(int i) {
+ static std::set<int> ignore = {};
+ return ignore.find(i) != ignore.end();
+}
--- /dev/null
+// Generated file (from: transpose_float_1.mod.py). Do not edit
+void CreateModel(Model *model) {
+ OperandType type0(Type::TENSOR_FLOAT32, {2, 3, 4, 5});
+ OperandType type2(Type::TENSOR_FLOAT32, {4, 2, 3, 5});
+ OperandType type1(Type::TENSOR_INT32, {4});
+ // Phase 1, operands
+ auto input = model->addOperand(&type0);
+ auto perms = model->addOperand(&type1);
+ auto output = model->addOperand(&type2);
+ // Phase 2, operations
+ static int32_t perms_init[] = {2, 0, 1, 3};
+ model->setOperandValue(perms, perms_init, sizeof(int32_t) * 4);
+ model->addOperation(ANEURALNETWORKS_TRANSPOSE, {input, perms}, {output});
+ // Phase 3, inputs and outputs
+ model->identifyInputsAndOutputs(
+ {input},
+ {output});
+ assert(model->isValid());
+}
+
+bool is_ignored(int i) {
+ static std::set<int> ignore = {};
+ return ignore.find(i) != ignore.end();
+}
--- /dev/null
+// Generated file (from: transpose_quant8_1.mod.py). Do not edit
+void CreateModel(Model *model) {
+ OperandType type1(Type::TENSOR_INT32, {4});
+ OperandType type0(Type::TENSOR_QUANT8_ASYMM, {2, 3, 4, 5}, 1.0, 0);
+ OperandType type2(Type::TENSOR_QUANT8_ASYMM, {4, 2, 3, 5}, 1.0, 0);
+ // Phase 1, operands
+ auto input = model->addOperand(&type0);
+ auto perms = model->addOperand(&type1);
+ auto output = model->addOperand(&type2);
+ // Phase 2, operations
+ static int32_t perms_init[] = {2, 0, 1, 3};
+ model->setOperandValue(perms, perms_init, sizeof(int32_t) * 4);
+ model->addOperation(ANEURALNETWORKS_TRANSPOSE, {input, perms}, {output});
+ // Phase 3, inputs and outputs
+ model->identifyInputsAndOutputs(
+ {input},
+ {output});
+ assert(model->isValid());
+}
+
+bool is_ignored(int i) {
+ static std::set<int> ignore = {};
+ return ignore.find(i) != ignore.end();
+}
+++ /dev/null
-# model
-model = Model()
-i1 = Input("op1", "TENSOR_FLOAT32", "{1, 2, 2, 1}")
-i2 = Input("op2", "TENSOR_FLOAT32", "{1}")
-act = Int32Scalar("act", 0)
-i3 = Output("op3", "TENSOR_FLOAT32", "{1, 2, 2, 1}")
-model = model.Operation("SUB_EX", i1, i2, act).To(i3)
-
-# Example 1. Input in operand 0,
-input0 = {i1: # input 0
- [1, 2, 3, 4],
- i2: # input 1
- [2]}
-
-output0 = {i3: # output 0
- [-1, 0, 1, 2]}
-
-# Instantiate an example
-Example((input0, output0))
--- /dev/null
+#
+# Copyright (C) 2018 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+model = Model()
+in0 = Input("op1", "TENSOR_FLOAT32", "{2, 2}")
+weights = Parameter("op2", "TENSOR_FLOAT32", "{1, 2}", [2, 4])
+bias = Parameter("b0", "TENSOR_FLOAT32", "{1}", [1])
+out0 = Output("op3", "TENSOR_FLOAT32", "{2, 1}")
+act = Int32Scalar("act", 0)
+model = model.Operation("FULLY_CONNECTED", in0, weights, bias, act).To(out0)
+
+# Example 1. Input in operand 0,
+input0 = {in0: # input 0
+ [1, 2, 2, 1]}
+output0 = {out0: # output 0
+ [11, 9]}
+
+# Instantiate an example
+Example((input0, output0))
--- /dev/null
+model = Model()
+i1 = Input("input", "TENSOR_FLOAT32", "{4, 1, 1, 2}")
+block = Parameter("block_size", "TENSOR_INT32", "{2}", [2, 2])
+output = Output("output", "TENSOR_FLOAT32", "{1, 2, 2, 2}")
+
+model = model.Operation("BATCH_TO_SPACE_ND", i1, block).To(output)
+
+# Example 1. Input in operand 0,
+input0 = {i1: # input 0
+ [1.4, 2.3, 3.2, 4.1, 5.4, 6.3, 7.2, 8.1]}
+
+output0 = {output: # output 0
+ [1.4, 2.3, 3.2, 4.1, 5.4, 6.3, 7.2, 8.1]}
+
+# Instantiate an example
+Example((input0, output0))
--- /dev/null
+model = Model()
+i1 = Input("input", "TENSOR_FLOAT32", "{4, 2, 2, 1}")
+block = Parameter("block_size", "TENSOR_INT32", "{2}", [2, 2])
+output = Output("output", "TENSOR_FLOAT32", "{1, 4, 4, 1}")
+
+model = model.Operation("BATCH_TO_SPACE_ND", i1, block).To(output)
+
+# Example 1. Input in operand 0,
+input0 = {i1: # input 0
+ [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]}
+
+output0 = {output: # output 0
+ [1, 5, 2, 6, 9, 13, 10, 14, 3, 7, 4, 8, 11, 15, 12, 16]}
+
+# Instantiate an example
+Example((input0, output0))
\ No newline at end of file
--- /dev/null
+model = Model()
+i1 = Input("input", "TENSOR_QUANT8_ASYMM", "{4, 2, 2, 1}, 1.0, 0")
+block = Parameter("block_size", "TENSOR_INT32", "{2}", [2, 2])
+output = Output("output", "TENSOR_QUANT8_ASYMM", "{1, 4, 4, 1}, 1.0, 0")
+
+model = model.Operation("BATCH_TO_SPACE_ND", i1, block).To(output)
+
+# Example 1. Input in operand 0,
+input0 = {i1: # input 0
+ [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]}
+
+output0 = {output: # output 0
+ [1, 5, 2, 6, 9, 13, 10, 14, 3, 7, 4, 8, 11, 15, 12, 16]}
+
+# Instantiate an example
+Example((input0, output0))
\ No newline at end of file
--- /dev/null
+# model
+model = Model()
+i1 = Input("op1", "TENSOR_FLOAT32", "{1, 2}")
+i2 = Input("op2", "TENSOR_FLOAT32", "{2, 2}")
+act = Int32Scalar("act", 0)
+i3 = Output("op3", "TENSOR_FLOAT32", "{2, 2}")
+model = model.Operation("DIV", i1, i2, act).To(i3)
+
+# Example 1. Input in operand 0,
+input0 = {i1: # input 0
+ [1, 2],
+ i2: # input 1
+ [1, 1, 2, 2]}
+
+output0 = {i3: # output 0
+ [1, 2, 0.5, 1]}
+
+# Instantiate an example
+Example((input0, output0))
--- /dev/null
+#
+# Copyright (C) 2018 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# This test is for testing the input requirements of Fully Connected Op:
+# the input's first dimension doesn't have to be the batch size, the
+# input is reshaped as needed.
+
+model = Model()
+in0 = Input("op1", "TENSOR_FLOAT32", "{4, 1, 5, 1}")
+weights = Parameter("op2", "TENSOR_FLOAT32", "{3, 10}", [
+ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, # u = 0
+ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, # u = 1
+ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, # u = 1
+])
+bias = Parameter("b0", "TENSOR_FLOAT32", "{3}", [1, 2, 3])
+out0 = Output("op3", "TENSOR_FLOAT32", "{2, 3}")
+act = Int32Scalar("act", 0)
+model = model.Operation("FULLY_CONNECTED", in0, weights, bias, act).To(out0)
+
+# Example 1. Input in operand 0,
+input0 = {in0: # input 0
+ [1, 2, 3, 4, 5, 6, 7, 8, -9, -10,
+ 1, 2, 3, 4, 5, 6, 7, -8, 9, -10]}
+output0 = {out0: # output 0
+ [24, 25, 26,
+ 58, 59, 60]}
+
+# Instantiate an example
+Example((input0, output0))
--- /dev/null
+model = Model()
+i1 = Input("input", "TENSOR_FLOAT32", "{1, 2, 2, 1}")
+axis = Parameter("axis", "TENSOR_INT32", "{1}", [2])
+keepDims = Int32Scalar("keepDims", 0)
+output = Output("output", "TENSOR_FLOAT32", "{1, 2, 1}")
+
+model = model.Operation("MEAN", i1, axis, keepDims).To(output)
+
+# Example 1. Input in operand 0,
+input0 = {i1: # input 0
+ [1.0, 2.0,
+ 3.0, 4.0]}
+
+output0 = {output: # output 0
+ [1.5,
+ 3.5]}
+
+# Instantiate an example
+Example((input0, output0))
--- /dev/null
+model = Model()
+i1 = Input("input", "TENSOR_FLOAT32", "{4, 3, 2}")
+axis = Parameter("axis", "TENSOR_INT32", "{4}", [1, 0, -3, -3])
+keepDims = Int32Scalar("keepDims", 0)
+output = Output("output", "TENSOR_FLOAT32", "{2}")
+
+model = model.Operation("MEAN", i1, axis, keepDims).To(output)
+
+# Example 1. Input in operand 0,
+input0 = {i1: # input 0
+ [1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0,
+ 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0]}
+
+output0 = {output: # output 0
+ [12.0, 13.0]}
+
+# Instantiate an example
+Example((input0, output0))
--- /dev/null
+model = Model()
+i1 = Input("input", "TENSOR_FLOAT32", "{4, 3, 2}")
+axis = Parameter("axis", "TENSOR_INT32", "{2}", [0, 2])
+keepDims = Int32Scalar("keepDims", 1)
+output = Output("output", "TENSOR_FLOAT32", "{1, 3, 1}")
+
+model = model.Operation("MEAN", i1, axis, keepDims).To(output)
+
+# Example 1. Input in operand 0,
+input0 = {i1: # input 0
+ [1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0,
+ 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0]}
+
+output0 = {output: # output 0
+ [10.5, 12.5, 14.5]}
+
+# Instantiate an example
+Example((input0, output0))
--- /dev/null
+model = Model()
+i1 = Input("input", "TENSOR_QUANT8_ASYMM", "{4, 3, 2}, 0.8, 5")
+axis = Parameter("axis", "TENSOR_INT32", "{4}", [1, 0, -3, -3])
+keepDims = Int32Scalar("keepDims", 0)
+output = Output("output", "TENSOR_QUANT8_ASYMM", "{2}, 0.8, 5")
+
+model = model.Operation("MEAN", i1, axis, keepDims).To(output)
+
+# Example 1. Input in operand 0,
+input0 = {i1: # input 0
+ [1, 2, 3, 4, 5, 6, 7, 8,
+ 9, 10, 11, 12, 13, 14, 15, 16,
+ 17, 18, 19, 20, 21, 22, 23, 24]}
+
+output0 = {output: # output 0
+ [12, 13]}
+
+# Instantiate an example
+Example((input0, output0))
--- /dev/null
+model = Model()
+i1 = Input("input", "TENSOR_QUANT8_ASYMM", "{4, 3, 2}, 0.8, 5")
+axis = Parameter("axis", "TENSOR_INT32", "{2}", [0, 2])
+keepDims = Int32Scalar("keepDims", 1)
+output = Output("output", "TENSOR_QUANT8_ASYMM", "{1, 3, 1}, 0.8, 5")
+
+model = model.Operation("MEAN", i1, axis, keepDims).To(output)
+
+# Example 1. Input in operand 0,
+input0 = {i1: # input 0
+ [1, 2, 3, 4, 5, 6, 7, 8,
+ 9, 10, 11, 12, 13, 14, 15, 16,
+ 17, 18, 19, 20, 21, 22, 23, 24]}
+
+output0 = {output: # output 0
+ [10, 12, 14]}
+
+# Instantiate an example
+Example((input0, output0))
--- /dev/null
+# model
+model = Model()
+i1 = Input("op1", "TENSOR_FLOAT32", "{1, 2, 2, 1}")
+i2 = Parameter("op2", "TENSOR_INT32", "{4, 2}", [0, 0, 1, 1, 1, 1, 0, 0])
+i3 = Output("op3", "TENSOR_FLOAT32", "{1, 4, 4, 1}")
+model = model.Operation("PAD", i1, i2).To(i3)
+
+# Example 1. Input in operand 0,
+input0 = {i1: # input 0
+ [1.0, 2.0,
+ 3.0, 4.0,]}
+
+output0 = {i3: # output 0
+ [0.0, 0.0, 0.0, 0.0,
+ 0.0, 1.0, 2.0, 0.0,
+ 0.0, 3.0, 4.0, 0.0,
+ 0.0, 0.0, 0.0, 0.0]}
+
+# Instantiate an example
+Example((input0, output0))
--- /dev/null
+# model
+model = Model()
+i1 = Input("op1", "TENSOR_FLOAT32", "{1, 2, 3, 1}")
+i2 = Parameter("op2", "TENSOR_INT32", "{4, 2}", [0, 0, 0, 2, 1, 3, 0, 0])
+i3 = Output("op3", "TENSOR_FLOAT32", "{1, 4, 7, 1}")
+model = model.Operation("PAD", i1, i2).To(i3)
+
+# Example 1. Input in operand 0,
+input0 = {i1: # input 0
+ [1.0, 2.0, 3.0,
+ 4.0, 5.0, 6.0]}
+
+output0 = {i3: # output 0
+ [0, 1, 2, 3, 0, 0, 0, 0, 4, 5, 6, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]}
+
+# Instantiate an example
+Example((input0, output0))
--- /dev/null
+model = Model()
+i1 = Input("input", "TENSOR_FLOAT32", "{1, 2, 2, 2}")
+block = Parameter("block_size", "TENSOR_INT32", "{2}", [2, 2])
+paddings = Parameter("paddings", "TENSOR_INT32", "{2, 2}", [0, 0, 0, 0])
+output = Output("output", "TENSOR_FLOAT32", "{4, 1, 1, 2}")
+
+model = model.Operation("SPACE_TO_BATCH_ND", i1, block, paddings).To(output)
+
+# Example 1. Input in operand 0,
+input0 = {i1: # input 0
+ [1.4, 2.3, 3.2, 4.1, 5.4, 6.3, 7.2, 8.1]}
+
+output0 = {output: # output 0
+ [1.4, 2.3, 3.2, 4.1, 5.4, 6.3, 7.2, 8.1]}
+
+# Instantiate an example
+Example((input0, output0))
--- /dev/null
+model = Model()
+i1 = Input("input", "TENSOR_FLOAT32", "{1, 4, 4, 1}")
+block = Parameter("block_size", "TENSOR_INT32", "{2}", [2, 2])
+paddings = Parameter("paddings", "TENSOR_INT32", "{2, 2}", [0, 0, 0, 0])
+output = Output("output", "TENSOR_FLOAT32", "{4, 2, 2, 1}")
+
+model = model.Operation("SPACE_TO_BATCH_ND", i1, block, paddings).To(output)
+
+# Example 1. Input in operand 0,
+input0 = {i1: # input 0
+ [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]}
+
+output0 = {output: # output 0
+ [1, 3, 9, 11, 2, 4, 10, 12, 5, 7, 13, 15, 6, 8, 14, 16]}
+
+# Instantiate an example
+Example((input0, output0))
--- /dev/null
+model = Model()
+i1 = Input("input", "TENSOR_FLOAT32", "{1, 5, 2, 1}")
+block = Parameter("block_size", "TENSOR_INT32", "{2}", [3, 2])
+paddings = Parameter("paddings", "TENSOR_INT32", "{2, 2}", [1, 0, 2, 0])
+output = Output("output", "TENSOR_FLOAT32", "{6, 2, 2, 1}")
+
+model = model.Operation("SPACE_TO_BATCH_ND", i1, block, paddings).To(output)
+
+# Example 1. Input in operand 0,
+input0 = {i1: # input 0
+ [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]}
+
+output0 = {output: # output 0
+ [0, 0, 0, 5, 0, 0, 0, 6, 0, 1, 0, 7,
+ 0, 2, 0, 8, 0, 3, 0, 9, 0, 4, 0, 10]}
+
+# Instantiate an example
+Example((input0, output0))
--- /dev/null
+model = Model()
+i1 = Input("input", "TENSOR_FLOAT32", "{1, 4, 2, 1}")
+block = Parameter("block_size", "TENSOR_INT32", "{2}", [3, 2])
+paddings = Parameter("paddings", "TENSOR_INT32", "{2, 2}", [1, 1, 2, 4])
+output = Output("output", "TENSOR_FLOAT32", "{6, 2, 4, 1}")
+
+model = model.Operation("SPACE_TO_BATCH_ND", i1, block, paddings).To(output)
+
+# Example 1. Input in operand 0,
+input0 = {i1: # input 0
+ [1, 2, 3, 4, 5, 6, 7, 8]}
+
+output0 = {output: # output 0
+ [0, 0, 0, 0, 0, 5, 0, 0, 0, 0, 0, 0, 0, 6, 0, 0,
+ 0, 1, 0, 0, 0, 7, 0, 0, 0, 2, 0, 0, 0, 8, 0, 0,
+ 0, 3, 0, 0, 0, 0, 0, 0, 0, 4, 0, 0, 0, 0, 0, 0]}
+
+# Instantiate an example
+Example((input0, output0))
--- /dev/null
+model = Model()
+i1 = Input("input", "TENSOR_QUANT8_ASYMM", "{1, 4, 4, 1}, 1.0, 0")
+block = Parameter("block_size", "TENSOR_INT32", "{2}", [2, 2])
+paddings = Parameter("paddings", "TENSOR_INT32", "{2, 2}", [0, 0, 0, 0])
+output = Output("output", "TENSOR_QUANT8_ASYMM", "{4, 2, 2, 1}, 1.0, 0")
+
+model = model.Operation("SPACE_TO_BATCH_ND", i1, block, paddings).To(output)
+
+# Example 1. Input in operand 0,
+input0 = {i1: # input 0
+ [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]}
+
+output0 = {output: # output 0
+ [1, 3, 9, 11, 2, 4, 10, 12, 5, 7, 13, 15, 6, 8, 14, 16]}
+
+# Instantiate an example
+Example((input0, output0))
--- /dev/null
+model = Model()
+i1 = Input("input", "TENSOR_QUANT8_ASYMM", "{1, 5, 2, 1}, 1.0, 0")
+block = Parameter("block_size", "TENSOR_INT32", "{2}", [3, 2])
+paddings = Parameter("paddings", "TENSOR_INT32", "{2, 2}", [1, 0, 2, 0])
+output = Output("output", "TENSOR_QUANT8_ASYMM", "{6, 2, 2, 1}, 1.0, 0")
+
+model = model.Operation("SPACE_TO_BATCH_ND", i1, block, paddings).To(output)
+
+# Example 1. Input in operand 0,
+input0 = {i1: # input 0
+ [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]}
+
+output0 = {output: # output 0
+ [0, 0, 0, 5, 0, 0, 0, 6, 0, 1, 0, 7,
+ 0, 2, 0, 8, 0, 3, 0, 9, 0, 4, 0, 10]}
+
+# Instantiate an example
+Example((input0, output0))
--- /dev/null
+model = Model()
+i1 = Input("input", "TENSOR_QUANT8_ASYMM", "{1, 4, 2, 1}, 1.0, 0")
+block = Parameter("block_size", "TENSOR_INT32", "{2}", [3, 2])
+paddings = Parameter("paddings", "TENSOR_INT32", "{2, 2}", [1, 1, 2, 4])
+output = Output("output", "TENSOR_QUANT8_ASYMM", "{6, 2, 4, 1}, 1.0, 0")
+
+model = model.Operation("SPACE_TO_BATCH_ND", i1, block, paddings).To(output)
+
+# Example 1. Input in operand 0,
+input0 = {i1: # input 0
+ [1, 2, 3, 4, 5, 6, 7, 8]}
+
+output0 = {output: # output 0
+ [0, 0, 0, 0, 0, 5, 0, 0, 0, 0, 0, 0, 0, 6, 0, 0,
+ 0, 1, 0, 0, 0, 7, 0, 0, 0, 2, 0, 0, 0, 8, 0, 0,
+ 0, 3, 0, 0, 0, 0, 0, 0, 0, 4, 0, 0, 0, 0, 0, 0]}
+
+# Instantiate an example
+Example((input0, output0))
--- /dev/null
+model = Model()
+i1 = Input("input", "TENSOR_FLOAT32", "{4, 1, 1, 2}")
+squeezeDims = Parameter("squeezeDims", "TENSOR_INT32", "{2}", [1, 2])
+output = Output("output", "TENSOR_FLOAT32", "{4, 2}")
+
+model = model.Operation("SQUEEZE", i1, squeezeDims).To(output)
+
+# Example 1. Input in operand 0,
+input0 = {i1: # input 0
+ [1.4, 2.3, 3.2, 4.1, 5.4, 6.3, 7.2, 8.1]}
+
+output0 = {output: # output 0
+ [1.4, 2.3, 3.2, 4.1, 5.4, 6.3, 7.2, 8.1]}
+
+# Instantiate an example
+Example((input0, output0))
--- /dev/null
+model = Model()
+i1 = Input("input", "TENSOR_FLOAT32", "{1, 24, 1}")
+squeezeDims = Parameter("squeezeDims", "TENSOR_INT32", "{1}", [2])
+output = Output("output", "TENSOR_FLOAT32", "{1, 24}")
+
+model = model.Operation("SQUEEZE", i1, squeezeDims).To(output)
+
+# Example 1. Input in operand 0,
+input0 = {i1: # input 0
+ [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,
+ 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24]}
+
+output0 = {output: # output 0
+ [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,
+ 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24]}
+
+# Instantiate an example
+Example((input0, output0))
--- /dev/null
+model = Model()
+i1 = Input("input", "TENSOR_QUANT8_ASYMM", "{1, 24, 1}, 1.0, 0")
+squeezeDims = Parameter("squeezeDims", "TENSOR_INT32", "{1}", [2])
+output = Output("output", "TENSOR_QUANT8_ASYMM", "{1, 24}, 1.0, 0")
+
+model = model.Operation("SQUEEZE", i1, squeezeDims).To(output)
+
+# Example 1. Input in operand 0,
+input0 = {i1: # input 0
+ [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,
+ 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24]}
+
+output0 = {output: # output 0
+ [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,
+ 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24]}
+
+# Instantiate an example
+Example((input0, output0))
strides = Parameter("strides", "TENSOR_INT32", "{2}", [2, 2])
beginMask = Int32Scalar("beginMask", 0)
endMask = Int32Scalar("endMask", 0)
+shrinkAxisMask = Int32Scalar("shrinkAxisMask", 0)
output = Output("output", "TENSOR_FLOAT32", "{1, 2}")
-model = model.Operation("STRIDED_SLICE", i1, begins, ends, strides, beginMask, endMask).To(output)
+model = model.Operation("STRIDED_SLICE", i1, begins, ends, strides, beginMask, endMask, shrinkAxisMask).To(output)
# Example 1. Input in operand 0,
input0 = {i1: # input 0
strides = Parameter("strides", "TENSOR_INT32", "{1}", [1])
beginMask = Int32Scalar("beginMask", 0)
endMask = Int32Scalar("endMask", 0)
+shrinkAxisMask = Int32Scalar("shrinkAxisMask", 0)
output = Output("output", "TENSOR_FLOAT32", "{2}")
-model = model.Operation("STRIDED_SLICE", i1, begins, ends, strides, beginMask, endMask).To(output)
+model = model.Operation("STRIDED_SLICE", i1, begins, ends, strides, beginMask, endMask, shrinkAxisMask).To(output)
# Example 1. Input in operand 0,
input0 = {i1: # input 0
strides = Parameter("strides", "TENSOR_INT32", "{2}", [1, 1])
beginMask = Int32Scalar("beginMask", 0)
endMask = Int32Scalar("endMask", 2)
+shrinkAxisMask = Int32Scalar("shrinkAxisMask", 0)
output = Output("output", "TENSOR_FLOAT32", "{1, 3}")
-model = model.Operation("STRIDED_SLICE", i1, begins, ends, strides, beginMask, endMask).To(output)
+model = model.Operation("STRIDED_SLICE", i1, begins, ends, strides, beginMask, endMask, shrinkAxisMask).To(output)
# Example 1. Input in operand 0,
input0 = {i1: # input 0
--- /dev/null
+model = Model()
+i1 = Input("input", "TENSOR_FLOAT32", "{2, 3}")
+begins = Parameter("begins", "TENSOR_INT32", "{2}", [0, 0])
+ends = Parameter("ends", "TENSOR_INT32", "{2}", [1, 3])
+strides = Parameter("strides", "TENSOR_INT32", "{2}", [1, 1])
+beginMask = Int32Scalar("beginMask", 0)
+endMask = Int32Scalar("endMask", 0)
+shrinkAxisMask = Int32Scalar("shrinkAxisMask", 1)
+
+output = Output("output", "TENSOR_FLOAT32", "{3}")
+
+model = model.Operation("STRIDED_SLICE", i1, begins, ends, strides, beginMask, endMask, shrinkAxisMask).To(output)
+
+# Example 1. Input in operand 0,
+input0 = {i1: # input 0
+ [1, 2, 3, 4, 5, 6]}
+
+output0 = {output: # output 0
+ [1, 2, 3]}
+
+# Instantiate an example
+Example((input0, output0))
strides = Parameter("strides", "TENSOR_INT32", "{1}", [1])
beginMask = Int32Scalar("beginMask", 0)
endMask = Int32Scalar("endMask", 0)
+shrinkAxisMask = Int32Scalar("shrinkAxisMask", 0)
output = Output("output", "TENSOR_FLOAT32", "{2}")
-model = model.Operation("STRIDED_SLICE", i1, begins, ends, strides, beginMask, endMask).To(output)
+model = model.Operation("STRIDED_SLICE", i1, begins, ends, strides, beginMask, endMask, shrinkAxisMask).To(output)
# Example 1. Input in operand 0,
input0 = {i1: # input 0
strides = Parameter("strides", "TENSOR_INT32", "{1}", [1])
beginMask = Int32Scalar("beginMask", 0)
endMask = Int32Scalar("endMask", 0)
+shrinkAxisMask = Int32Scalar("shrinkAxisMask", 0)
output = Output("output", "TENSOR_FLOAT32", "{3}")
-model = model.Operation("STRIDED_SLICE", i1, begins, ends, strides, beginMask, endMask).To(output)
+model = model.Operation("STRIDED_SLICE", i1, begins, ends, strides, beginMask, endMask, shrinkAxisMask).To(output)
# Example 1. Input in operand 0,
input0 = {i1: # input 0
strides = Parameter("strides", "TENSOR_INT32", "{1}", [1])
beginMask = Int32Scalar("beginMask", 0)
endMask = Int32Scalar("endMask", 0)
+shrinkAxisMask = Int32Scalar("shrinkAxisMask", 0)
output = Output("output", "TENSOR_FLOAT32", "{1}")
-model = model.Operation("STRIDED_SLICE", i1, begins, ends, strides, beginMask, endMask).To(output)
+model = model.Operation("STRIDED_SLICE", i1, begins, ends, strides, beginMask, endMask, shrinkAxisMask).To(output)
# Example 1. Input in operand 0,
input0 = {i1: # input 0
strides = Parameter("strides", "TENSOR_INT32", "{1}", [1])
beginMask = Int32Scalar("beginMask", 1)
endMask = Int32Scalar("endMask", 0)
+shrinkAxisMask = Int32Scalar("shrinkAxisMask", 0)
output = Output("output", "TENSOR_FLOAT32", "{3}")
-model = model.Operation("STRIDED_SLICE", i1, begins, ends, strides, beginMask, endMask).To(output)
+model = model.Operation("STRIDED_SLICE", i1, begins, ends, strides, beginMask, endMask, shrinkAxisMask).To(output)
# Example 1. Input in operand 0,
input0 = {i1: # input 0
strides = Parameter("strides", "TENSOR_INT32", "{1}", [1])
beginMask = Int32Scalar("beginMask", 0)
endMask = Int32Scalar("endMask", 1)
+shrinkAxisMask = Int32Scalar("shrinkAxisMask", 0)
output = Output("output", "TENSOR_FLOAT32", "{3}")
-model = model.Operation("STRIDED_SLICE", i1, begins, ends, strides, beginMask, endMask).To(output)
+model = model.Operation("STRIDED_SLICE", i1, begins, ends, strides, beginMask, endMask, shrinkAxisMask).To(output)
# Example 1. Input in operand 0,
input0 = {i1: # input 0
strides = Parameter("strides", "TENSOR_INT32", "{1}", [-1])
beginMask = Int32Scalar("beginMask", 0)
endMask = Int32Scalar("endMask", 0)
+shrinkAxisMask = Int32Scalar("shrinkAxisMask", 0)
output = Output("output", "TENSOR_FLOAT32", "{3}")
-model = model.Operation("STRIDED_SLICE", i1, begins, ends, strides, beginMask, endMask).To(output)
+model = model.Operation("STRIDED_SLICE", i1, begins, ends, strides, beginMask, endMask, shrinkAxisMask).To(output)
# Example 1. Input in operand 0,
input0 = {i1: # input 0
strides = Parameter("strides", "TENSOR_INT32", "{2}", [2, -1])
beginMask = Int32Scalar("beginMask", 0)
endMask = Int32Scalar("endMask", 0)
+shrinkAxisMask = Int32Scalar("shrinkAxisMask", 0)
output = Output("output", "TENSOR_FLOAT32", "{1, 3}")
-model = model.Operation("STRIDED_SLICE", i1, begins, ends, strides, beginMask, endMask).To(output)
+model = model.Operation("STRIDED_SLICE", i1, begins, ends, strides, beginMask, endMask, shrinkAxisMask).To(output)
# Example 1. Input in operand 0,
input0 = {i1: # input 0
strides = Parameter("strides", "TENSOR_INT32", "{2}", [1, 1])
beginMask = Int32Scalar("beginMask", 1)
endMask = Int32Scalar("endMask", 0)
+shrinkAxisMask = Int32Scalar("shrinkAxisMask", 0)
output = Output("output", "TENSOR_FLOAT32", "{2, 2}")
-model = model.Operation("STRIDED_SLICE", i1, begins, ends, strides, beginMask, endMask).To(output)
+model = model.Operation("STRIDED_SLICE", i1, begins, ends, strides, beginMask, endMask, shrinkAxisMask).To(output)
# Example 1. Input in operand 0,
input0 = {i1: # input 0
--- /dev/null
+model = Model()
+i1 = Input("input", "TENSOR_QUANT8_ASYMM", "{2, 3}, 1.0, 0")
+begins = Parameter("begins", "TENSOR_INT32", "{2}", [1, 0])
+ends = Parameter("ends", "TENSOR_INT32", "{2}", [2, 2])
+strides = Parameter("strides", "TENSOR_INT32", "{2}", [1, 1])
+beginMask = Int32Scalar("beginMask", 0)
+endMask = Int32Scalar("endMask", 2)
+shrinkAxisMask = Int32Scalar("shrinkAxisMask", 0)
+
+output = Output("output", "TENSOR_QUANT8_ASYMM", "{1, 3}, 1.0, 0")
+
+model = model.Operation("STRIDED_SLICE", i1, begins, ends, strides, beginMask, endMask, shrinkAxisMask).To(output)
+
+# Example 1. Input in operand 0,
+input0 = {i1: # input 0
+ [1, 2, 3, 4, 5, 6]}
+
+output0 = {output: # output 0
+ [4, 5, 6]}
+
+# Instantiate an example
+Example((input0, output0))
--- /dev/null
+model = Model()
+i1 = Input("input", "TENSOR_QUANT8_ASYMM", "{2, 3}, 1.0, 0")
+begins = Parameter("begins", "TENSOR_INT32", "{2}", [0, 0])
+ends = Parameter("ends", "TENSOR_INT32", "{2}", [1, 3])
+strides = Parameter("strides", "TENSOR_INT32", "{2}", [1, 1])
+beginMask = Int32Scalar("beginMask", 0)
+endMask = Int32Scalar("endMask", 0)
+shrinkAxisMask = Int32Scalar("shrinkAxisMask", 1)
+
+output = Output("output", "TENSOR_QUANT8_ASYMM", "{3}, 1.0, 0")
+
+model = model.Operation("STRIDED_SLICE", i1, begins, ends, strides, beginMask, endMask, shrinkAxisMask).To(output)
+
+# Example 1. Input in operand 0,
+input0 = {i1: # input 0
+ [1, 2, 3, 4, 5, 6]}
+
+output0 = {output: # output 0
+ [1, 2, 3]}
+
+# Instantiate an example
+Example((input0, output0))
--- /dev/null
+model = Model()
+i1 = Input("input", "TENSOR_QUANT8_ASYMM", "{4}, 1.0, 0")
+begins = Parameter("begins", "TENSOR_INT32", "{1}", [1])
+ends = Parameter("ends", "TENSOR_INT32", "{1}", [3])
+strides = Parameter("strides", "TENSOR_INT32", "{1}", [1])
+beginMask = Int32Scalar("beginMask", 0)
+endMask = Int32Scalar("endMask", 0)
+shrinkAxisMask = Int32Scalar("shrinkAxisMask", 0)
+
+output = Output("output", "TENSOR_QUANT8_ASYMM", "{2}, 1.0, 0")
+
+model = model.Operation("STRIDED_SLICE", i1, begins, ends, strides, beginMask, endMask, shrinkAxisMask).To(output)
+
+# Example 1. Input in operand 0,
+input0 = {i1: # input 0
+ [1, 2, 3, 4]}
+
+output0 = {output: # output 0
+ [2, 3]}
+
+# Instantiate an example
+Example((input0, output0))
--- /dev/null
+model = Model()
+i1 = Input("input", "TENSOR_QUANT8_ASYMM", "{4}, 1.0, 0")
+begins = Parameter("begins", "TENSOR_INT32", "{1}", [-3])
+ends = Parameter("ends", "TENSOR_INT32", "{1}", [3])
+strides = Parameter("strides", "TENSOR_INT32", "{1}", [1])
+beginMask = Int32Scalar("beginMask", 0)
+endMask = Int32Scalar("endMask", 0)
+shrinkAxisMask = Int32Scalar("shrinkAxisMask", 0)
+
+output = Output("output", "TENSOR_QUANT8_ASYMM", "{2}, 1.0, 0")
+
+model = model.Operation("STRIDED_SLICE", i1, begins, ends, strides, beginMask, endMask, shrinkAxisMask).To(output)
+
+# Example 1. Input in operand 0,
+input0 = {i1: # input 0
+ [1, 2, 3, 4]}
+
+output0 = {output: # output 0
+ [2, 3]}
+
+# Instantiate an example
+Example((input0, output0))
--- /dev/null
+model = Model()
+i1 = Input("input", "TENSOR_QUANT8_ASYMM", "{4}, 1.0, 0")
+begins = Parameter("begins", "TENSOR_INT32", "{1}", [-5])
+ends = Parameter("ends", "TENSOR_INT32", "{1}", [3])
+strides = Parameter("strides", "TENSOR_INT32", "{1}", [1])
+beginMask = Int32Scalar("beginMask", 0)
+endMask = Int32Scalar("endMask", 0)
+shrinkAxisMask = Int32Scalar("shrinkAxisMask", 0)
+
+output = Output("output", "TENSOR_QUANT8_ASYMM", "{3}, 1.0, 0")
+
+model = model.Operation("STRIDED_SLICE", i1, begins, ends, strides, beginMask, endMask, shrinkAxisMask).To(output)
+
+# Example 1. Input in operand 0,
+input0 = {i1: # input 0
+ [1, 2, 3, 4]}
+
+output0 = {output: # output 0
+ [1, 2, 3]}
+
+# Instantiate an example
+Example((input0, output0))
--- /dev/null
+model = Model()
+i1 = Input("input", "TENSOR_QUANT8_ASYMM", "{4}, 1.0, 0")
+begins = Parameter("begins", "TENSOR_INT32", "{1}", [1])
+ends = Parameter("ends", "TENSOR_INT32", "{1}", [-2])
+strides = Parameter("strides", "TENSOR_INT32", "{1}", [1])
+beginMask = Int32Scalar("beginMask", 0)
+endMask = Int32Scalar("endMask", 0)
+shrinkAxisMask = Int32Scalar("shrinkAxisMask", 0)
+
+output = Output("output", "TENSOR_QUANT8_ASYMM", "{1}, 1.0, 0")
+
+model = model.Operation("STRIDED_SLICE", i1, begins, ends, strides, beginMask, endMask, shrinkAxisMask).To(output)
+
+# Example 1. Input in operand 0,
+input0 = {i1: # input 0
+ [1, 2, 3, 4]}
+
+output0 = {output: # output 0
+ [2]}
+
+# Instantiate an example
+Example((input0, output0))
--- /dev/null
+model = Model()
+i1 = Input("input", "TENSOR_QUANT8_ASYMM", "{4}, 1.0, 0")
+begins = Parameter("begins", "TENSOR_INT32", "{1}", [1])
+ends = Parameter("ends", "TENSOR_INT32", "{1}", [3])
+strides = Parameter("strides", "TENSOR_INT32", "{1}", [1])
+beginMask = Int32Scalar("beginMask", 1)
+endMask = Int32Scalar("endMask", 0)
+shrinkAxisMask = Int32Scalar("shrinkAxisMask", 0)
+
+output = Output("output", "TENSOR_QUANT8_ASYMM", "{3}, 1.0, 0")
+
+model = model.Operation("STRIDED_SLICE", i1, begins, ends, strides, beginMask, endMask, shrinkAxisMask).To(output)
+
+# Example 1. Input in operand 0,
+input0 = {i1: # input 0
+ [1, 2, 3, 4]}
+
+output0 = {output: # output 0
+ [1, 2, 3]}
+
+# Instantiate an example
+Example((input0, output0))
--- /dev/null
+model = Model()
+i1 = Input("input", "TENSOR_QUANT8_ASYMM", "{4}, 1.0, 0")
+begins = Parameter("begins", "TENSOR_INT32", "{1}", [1])
+ends = Parameter("ends", "TENSOR_INT32", "{1}", [3])
+strides = Parameter("strides", "TENSOR_INT32", "{1}", [1])
+beginMask = Int32Scalar("beginMask", 0)
+endMask = Int32Scalar("endMask", 1)
+shrinkAxisMask = Int32Scalar("shrinkAxisMask", 0)
+
+output = Output("output", "TENSOR_QUANT8_ASYMM", "{3}, 1.0, 0")
+
+model = model.Operation("STRIDED_SLICE", i1, begins, ends, strides, beginMask, endMask, shrinkAxisMask).To(output)
+
+# Example 1. Input in operand 0,
+input0 = {i1: # input 0
+ [1, 2, 3, 4]}
+
+output0 = {output: # output 0
+ [2, 3, 4]}
+
+# Instantiate an example
+Example((input0, output0))
--- /dev/null
+model = Model()
+i1 = Input("input", "TENSOR_QUANT8_ASYMM", "{3}, 1.0, 0")
+begins = Parameter("begins", "TENSOR_INT32", "{1}", [-1])
+ends = Parameter("ends", "TENSOR_INT32", "{1}", [-4])
+strides = Parameter("strides", "TENSOR_INT32", "{1}", [-1])
+beginMask = Int32Scalar("beginMask", 0)
+endMask = Int32Scalar("endMask", 0)
+shrinkAxisMask = Int32Scalar("shrinkAxisMask", 0)
+
+output = Output("output", "TENSOR_QUANT8_ASYMM", "{3}, 1.0, 0")
+
+model = model.Operation("STRIDED_SLICE", i1, begins, ends, strides, beginMask, endMask, shrinkAxisMask).To(output)
+
+# Example 1. Input in operand 0,
+input0 = {i1: # input 0
+ [1, 2, 3]}
+
+output0 = {output: # output 0
+ [3, 2, 1]}
+
+# Instantiate an example
+Example((input0, output0))
--- /dev/null
+model = Model()
+i1 = Input("input", "TENSOR_QUANT8_ASYMM", "{2, 3}, 1.0, 0")
+begins = Parameter("begins", "TENSOR_INT32", "{2}", [1, -1])
+ends = Parameter("ends", "TENSOR_INT32", "{2}", [2, -4])
+strides = Parameter("strides", "TENSOR_INT32", "{2}", [2, -1])
+beginMask = Int32Scalar("beginMask", 0)
+endMask = Int32Scalar("endMask", 0)
+shrinkAxisMask = Int32Scalar("shrinkAxisMask", 0)
+
+output = Output("output", "TENSOR_QUANT8_ASYMM", "{1, 3}, 1.0, 0")
+
+model = model.Operation("STRIDED_SLICE", i1, begins, ends, strides, beginMask, endMask, shrinkAxisMask).To(output)
+
+# Example 1. Input in operand 0,
+input0 = {i1: # input 0
+ [1, 2, 3, 4, 5, 6]}
+
+output0 = {output: # output 0
+ [6, 5, 4]}
+
+# Instantiate an example
+Example((input0, output0))
--- /dev/null
+model = Model()
+i1 = Input("input", "TENSOR_QUANT8_ASYMM", "{2, 3}, 1.0, 0")
+begins = Parameter("begins", "TENSOR_INT32", "{2}", [1, 0])
+ends = Parameter("ends", "TENSOR_INT32", "{2}", [2, 2])
+strides = Parameter("strides", "TENSOR_INT32", "{2}", [1, 1])
+beginMask = Int32Scalar("beginMask", 1)
+endMask = Int32Scalar("endMask", 0)
+shrinkAxisMask = Int32Scalar("shrinkAxisMask", 0)
+
+output = Output("output", "TENSOR_QUANT8_ASYMM", "{2, 2}, 1.0, 0")
+
+model = model.Operation("STRIDED_SLICE", i1, begins, ends, strides, beginMask, endMask, shrinkAxisMask).To(output)
+
+# Example 1. Input in operand 0,
+input0 = {i1: # input 0
+ [1, 2, 3, 4, 5, 6]}
+
+output0 = {output: # output 0
+ [1, 2, 4, 5]}
+
+# Instantiate an example
+Example((input0, output0))
--- /dev/null
+# model
+model = Model()
+i1 = Input("op1", "TENSOR_FLOAT32", "{1, 2}")
+i2 = Input("op2", "TENSOR_FLOAT32", "{2, 2}")
+act = Int32Scalar("act", 0)
+i3 = Output("op3", "TENSOR_FLOAT32", "{2, 2}")
+model = model.Operation("SUB", i1, i2, act).To(i3)
+
+# Example 1. Input in operand 0,
+input0 = {i1: # input 0
+ [1, 2],
+ i2: # input 1
+ [1, 2, 3, 4]}
+
+output0 = {i3: # output 0
+ [0, 0, -2, -2]}
+
+# Instantiate an example
+Example((input0, output0))
--- /dev/null
+model = Model()
+i1 = Input("input", "TENSOR_FLOAT32", "{1, 2, 2, 1}")
+perms = Parameter("perms", "TENSOR_INT32", "{4}", [0, 2, 1, 3])
+output = Output("output", "TENSOR_FLOAT32", "{1, 2, 2, 1}")
+
+model = model.Operation("TRANSPOSE", i1, perms).To(output)
+
+# Example 1. Input in operand 0,
+input0 = {i1: # input 0
+ [1.0, 2.0,
+ 3.0, 4.0]}
+
+output0 = {output: # output 0
+ [1.0, 3.0,
+ 2.0, 4.0]}
+
+# Instantiate an example
+Example((input0, output0))
--- /dev/null
+model = Model()
+i1 = Input("input", "TENSOR_FLOAT32", "{2, 3, 4, 5}")
+perms = Parameter("perms", "TENSOR_INT32", "{4}", [2, 0, 1, 3])
+output = Output("output", "TENSOR_FLOAT32", "{4, 2, 3, 5}")
+
+model = model.Operation("TRANSPOSE", i1, perms).To(output)
+
+# Example 1. Input in operand 0,
+input0 = {i1: # input 0
+ [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11,
+ 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23,
+ 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35,
+ 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47,
+ 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59,
+ 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71,
+ 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83,
+ 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95,
+ 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107,
+ 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119]}
+
+output0 = {output: # output 0
+ [0, 1, 2, 3, 4, 20, 21, 22, 23, 24, 40, 41, 42, 43, 44,
+ 60, 61, 62, 63, 64, 80, 81, 82, 83, 84, 100, 101, 102, 103, 104,
+ 5, 6, 7, 8, 9, 25, 26, 27, 28, 29, 45, 46, 47, 48, 49,
+ 65, 66, 67, 68, 69, 85, 86, 87, 88, 89, 105, 106, 107, 108, 109,
+ 10, 11, 12, 13, 14, 30, 31, 32, 33, 34, 50, 51, 52, 53, 54,
+ 70, 71, 72, 73, 74, 90, 91, 92, 93, 94, 110, 111, 112, 113, 114,
+ 15, 16, 17, 18, 19, 35, 36, 37, 38, 39, 55, 56, 57, 58, 59,
+ 75, 76, 77, 78, 79, 95, 96, 97, 98, 99, 115, 116, 117, 118, 119]}
+
+# Instantiate an example
+Example((input0, output0))
--- /dev/null
+model = Model()
+i1 = Input("input", "TENSOR_QUANT8_ASYMM", "{2, 3, 4, 5}, 1.0, 0")
+perms = Parameter("perms", "TENSOR_INT32", "{4}", [2, 0, 1, 3])
+output = Output("output", "TENSOR_QUANT8_ASYMM", "{4, 2, 3, 5}, 1.0, 0")
+
+model = model.Operation("TRANSPOSE", i1, perms).To(output)
+
+# Example 1. Input in operand 0,
+input0 = {i1: # input 0
+ [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11,
+ 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23,
+ 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35,
+ 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47,
+ 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59,
+ 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71,
+ 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83,
+ 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95,
+ 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107,
+ 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119]}
+
+output0 = {output: # output 0
+ [0, 1, 2, 3, 4, 20, 21, 22, 23, 24, 40, 41, 42, 43, 44,
+ 60, 61, 62, 63, 64, 80, 81, 82, 83, 84, 100, 101, 102, 103, 104,
+ 5, 6, 7, 8, 9, 25, 26, 27, 28, 29, 45, 46, 47, 48, 49,
+ 65, 66, 67, 68, 69, 85, 86, 87, 88, 89, 105, 106, 107, 108, 109,
+ 10, 11, 12, 13, 14, 30, 31, 32, 33, 34, 50, 51, 52, 53, 54,
+ 70, 71, 72, 73, 74, 90, 91, 92, 93, 94, 110, 111, 112, 113, 114,
+ 15, 16, 17, 18, 19, 35, 36, 37, 38, 39, 55, 56, 57, 58, 59,
+ 75, 76, 77, 78, 79, 95, 96, 97, 98, 99, 115, 116, 117, 118, 119]}
+
+# Instantiate an example
+Example((input0, output0))