The input tensor data is treated as buffer.
Along any given axis, the output buffer are to be generated
according to the strides (determined by axis and num_splits)
Signed-off-by: Vishal Keshav <vishal.k1@samsung.com>
--- /dev/null
+// Generated file (from: unpack_ex_3D_float.mod.py). Do not edit
+// Begin of an example
+{
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+ // int -> FLOAT32 map
+ {{0, {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71}}},
+ // int -> INT32 map
+ {},
+ // int -> QUANT8_ASYMM map
+ {}
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+ // int -> FLOAT32 map
+ {{0, {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23}}, {1, {24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47}}, {2, {48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71}}},
+ // int -> INT32 map
+ {},
+ // int -> QUANT8_ASYMM map
+ {}
+}
+}, // End of an example
--- /dev/null
+// Generated file (from: unpack_ex_3D_float2.mod.py). Do not edit
+// Begin of an example
+{
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+ // int -> FLOAT32 map
+ {{0, {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71}}},
+ // int -> INT32 map
+ {},
+ // int -> QUANT8_ASYMM map
+ {}
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+ // int -> FLOAT32 map
+ {{0, {0, 1, 2, 3, 12, 13, 14, 15, 24, 25, 26, 27, 36, 37, 38, 39, 48, 49, 50, 51, 60, 61, 62, 63}}, {1, {4, 5, 6, 7, 16, 17, 18, 19, 28, 29, 30, 31, 40, 41, 42, 43, 52, 53, 54, 55, 64, 65, 66, 67}}, {2, {8, 9, 10, 11, 20, 21, 22, 23, 32, 33, 34, 35, 44, 45, 46, 47, 56, 57, 58, 59, 68, 69, 70, 71}}},
+ // int -> INT32 map
+ {},
+ // int -> QUANT8_ASYMM map
+ {}
+}
+}, // End of an example
--- /dev/null
+// Generated file (from: unpack_ex_3D_float.mod.py). Do not edit
+void CreateModel(Model *model) {
+ OperandType type1(Type::INT32, {});
+ OperandType type2(Type::TENSOR_FLOAT32, {2, 3, 4});
+ OperandType type0(Type::TENSOR_FLOAT32, {3, 2, 3, 4});
+ // Phase 1, operands
+ auto input = model->addOperand(&type0);
+ auto axis = model->addOperand(&type1);
+ auto num_splits = model->addOperand(&type1);
+ auto output1 = model->addOperand(&type2);
+ auto output2 = model->addOperand(&type2);
+ auto output3 = model->addOperand(&type2);
+ // Phase 2, operations
+ static int32_t axis_init[] = {0};
+ model->setOperandValue(axis, axis_init, sizeof(int32_t) * 1);
+ static int32_t num_splits_init[] = {3};
+ model->setOperandValue(num_splits, num_splits_init, sizeof(int32_t) * 1);
+ model->addOperationEx(ANEURALNETWORKS_UNPACK_EX, {input, axis, num_splits}, {output1, output2, output3});
+ // Phase 3, inputs and outputs
+ model->identifyInputsAndOutputs(
+ {input},
+ {output1, output2, output3});
+ assert(model->isValid());
+}
+
+bool is_ignored(int i) {
+ static std::set<int> ignore = {};
+ return ignore.find(i) != ignore.end();
+}
--- /dev/null
+// Generated file (from: unpack_ex_3D_float2.mod.py). Do not edit
+void CreateModel(Model *model) {
+ OperandType type1(Type::INT32, {});
+ OperandType type0(Type::TENSOR_FLOAT32, {3, 2, 3, 4});
+ OperandType type2(Type::TENSOR_FLOAT32, {3, 2, 4});
+ // Phase 1, operands
+ auto input = model->addOperand(&type0);
+ auto axis = model->addOperand(&type1);
+ auto num_splits = model->addOperand(&type1);
+ auto output1 = model->addOperand(&type2);
+ auto output2 = model->addOperand(&type2);
+ auto output3 = model->addOperand(&type2);
+ // Phase 2, operations
+ static int32_t axis_init[] = {2};
+ model->setOperandValue(axis, axis_init, sizeof(int32_t) * 1);
+ static int32_t num_splits_init[] = {3};
+ model->setOperandValue(num_splits, num_splits_init, sizeof(int32_t) * 1);
+ model->addOperationEx(ANEURALNETWORKS_UNPACK_EX, {input, axis, num_splits}, {output1, output2, output3});
+ // Phase 3, inputs and outputs
+ model->identifyInputsAndOutputs(
+ {input},
+ {output1, output2, output3});
+ assert(model->isValid());
+}
+
+bool is_ignored(int i) {
+ static std::set<int> ignore = {};
+ return ignore.find(i) != ignore.end();
+}
--- /dev/null
+# Sample UnPack model, axis = 0
+model = Model()
+input = Input("input", "TENSOR_FLOAT32", "{3, 2, 3, 4}")
+axis = Int32Scalar("axis", 0)
+num_splits = Int32Scalar("num_splits", 3)
+out1 = Output("output1", "TENSOR_FLOAT32", "{2, 3, 4}")
+out2 = Output("output2", "TENSOR_FLOAT32", "{2, 3, 4}")
+out3 = Output("output3", "TENSOR_FLOAT32", "{2, 3, 4}")
+model = model.Operation("UNPACK_EX", input, axis, num_splits).To([out1, out2, out3])
+
+input0 = {input: # input 0
+ [ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17,
+ 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35,
+ 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53,
+ 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71]}
+
+output0 = {out1: # output 0
+ [ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23],
+ out2: # output 1
+ [24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47],
+ out3: # output 2
+ [48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71]}
+
+# Instantiate an example
+Example((input0, output0))
--- /dev/null
+# Sample UnPack model, axis = 2
+model = Model()
+input = Input("input", "TENSOR_FLOAT32", "{3, 2, 3, 4}")
+axis = Int32Scalar("axis", 2)
+num_splits = Int32Scalar("num_splits", 3)
+out1 = Output("output1", "TENSOR_FLOAT32", "{3, 2, 4}")
+out2 = Output("output2", "TENSOR_FLOAT32", "{3, 2, 4}")
+out3 = Output("output3", "TENSOR_FLOAT32", "{3, 2, 4}")
+model = model.Operation("UNPACK_EX", input, axis, num_splits).To([out1, out2, out3])
+
+input0 = {input: # input 0
+ [ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17,
+ 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35,
+ 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53,
+ 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71]}
+
+output0 = {out1: # output 0
+ [ 0, 1, 2, 3, 12, 13, 14, 15, 24, 25, 26, 27, 36, 37, 38, 39, 48, 49, 50, 51, 60, 61, 62, 63],
+ out2: # output 1
+ [ 4, 5, 6, 7, 16, 17, 18, 19, 28, 29, 30, 31, 40, 41, 42, 43, 52, 53, 54, 55, 64, 65, 66, 67],
+ out3: # output 2
+ [ 8, 9, 10, 11, 20, 21, 22, 23, 32, 33, 34, 35, 44, 45, 46, 47, 56, 57, 58, 59, 68, 69, 70, 71]}
+
+# Instantiate an example
+Example((input0, output0))