This commit adds GeneratedTests of ReduceMax(TensorflowMax) for 4-D.
Signed-off-by: jiseob.jang <jiseob.jang@samsung.com>
tensorflowmax_ex_2D_int32::examples);
}
+namespace tensorflowmax_ex_4D_float_reducing_C {
+std::vector<MixedTypedExample> examples = {
+// Generated tensorflowmax_ex_4D_float_reducing_C test
+#include "generated/examples/tensorflowmax_ex_4D_float_reducing_C.example.cpp"
+};
+// Generated model constructor
+#include "generated/models/tensorflowmax_ex_4D_float_reducing_C.model.cpp"
+} // namespace tensorflowmax_ex_4D_float_reducing_C
+TEST_F(GeneratedTests, tensorflowmax_ex_4D_float_reducing_C) {
+ execute(tensorflowmax_ex_4D_float_reducing_C::CreateModel,
+ tensorflowmax_ex_4D_float_reducing_C::is_ignored,
+ tensorflowmax_ex_4D_float_reducing_C::examples);
+}
+
+namespace tensorflowmax_ex_4D_float_reducing_HW {
+std::vector<MixedTypedExample> examples = {
+// Generated tensorflowmax_ex_4D_float_reducing_HW test
+#include "generated/examples/tensorflowmax_ex_4D_float_reducing_HW.example.cpp"
+};
+// Generated model constructor
+#include "generated/models/tensorflowmax_ex_4D_float_reducing_HW.model.cpp"
+} // namespace tensorflowmax_ex_4D_float_reducing_HW
+TEST_F(GeneratedTests, tensorflowmax_ex_4D_float_reducing_HW) {
+ execute(tensorflowmax_ex_4D_float_reducing_HW::CreateModel,
+ tensorflowmax_ex_4D_float_reducing_HW::is_ignored,
+ tensorflowmax_ex_4D_float_reducing_HW::examples);
+}
+
namespace tensorflowmax_ex_float_1 {
std::vector<MixedTypedExample> examples = {
// Generated tensorflowmax_ex_float_1 test
--- /dev/null
+// Generated file (from: tensorflowmax_ex_4D_float_reducing_C.mod.py). Do not edit
+// Begin of an example
+{
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+ // int -> FLOAT32 map
+ {{0, {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119}}},
+ // int -> INT32 map
+ {},
+ // int -> QUANT8_ASYMM map
+ {}
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+ // int -> FLOAT32 map
+ {{0, {4, 9, 14, 19, 24, 29, 34, 39, 44, 49, 54, 59, 64, 69, 74, 79, 84, 89, 94, 99, 104, 109, 114, 119}}},
+ // int -> INT32 map
+ {},
+ // int -> QUANT8_ASYMM map
+ {}
+}
+}, // End of an example
--- /dev/null
+// Generated file (from: tensorflowmax_ex_4D_float_reducing_HW.mod.py). Do not edit
+// Begin of an example
+{
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+ // int -> FLOAT32 map
+ {{0, {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119}}},
+ // int -> INT32 map
+ {},
+ // int -> QUANT8_ASYMM map
+ {}
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+ // int -> FLOAT32 map
+ {{0, {55, 56, 57, 58, 59, 115, 116, 117, 118, 119}}},
+ // int -> INT32 map
+ {},
+ // int -> QUANT8_ASYMM map
+ {}
+}
+}, // End of an example
--- /dev/null
+// Generated file (from: tensorflowmax_ex_4D_float_reducing_C.mod.py). Do not edit
+void CreateModel(Model *model) {
+ OperandType type0(Type::TENSOR_FLOAT32, {2, 3, 4, 5});
+ OperandType type2(Type::TENSOR_FLOAT32, {2, 3, 4});
+ OperandType type1(Type::TENSOR_INT32, {2});
+ // Phase 1, operands
+ auto input = model->addOperand(&type0);
+ auto axis = model->addOperand(&type1);
+ auto output = model->addOperand(&type2);
+ // Phase 2, operations
+ static int32_t axis_init[] = {3, -1};
+ model->setOperandValue(axis, axis_init, sizeof(int32_t) * 2);
+ model->addOperationEx(ANEURALNETWORKS_TENSORFLOW_MAX_EX, {input, axis}, {output});
+ // Phase 3, inputs and outputs
+ model->identifyInputsAndOutputs(
+ {input},
+ {output});
+ assert(model->isValid());
+}
+
+bool is_ignored(int i) {
+ static std::set<int> ignore = {};
+ return ignore.find(i) != ignore.end();
+}
--- /dev/null
+// Generated file (from: tensorflowmax_ex_4D_float_reducing_HW.mod.py). Do not edit
+void CreateModel(Model *model) {
+ OperandType type0(Type::TENSOR_FLOAT32, {2, 3, 4, 5});
+ OperandType type2(Type::TENSOR_FLOAT32, {2, 5});
+ OperandType type1(Type::TENSOR_INT32, {4});
+ // Phase 1, operands
+ auto input = model->addOperand(&type0);
+ auto axis = model->addOperand(&type1);
+ auto output = model->addOperand(&type2);
+ // Phase 2, operations
+ static int32_t axis_init[] = {1, 2, -3, -2};
+ model->setOperandValue(axis, axis_init, sizeof(int32_t) * 4);
+ model->addOperationEx(ANEURALNETWORKS_TENSORFLOW_MAX_EX, {input, axis}, {output});
+ // Phase 3, inputs and outputs
+ model->identifyInputsAndOutputs(
+ {input},
+ {output});
+ assert(model->isValid());
+}
+
+bool is_ignored(int i) {
+ static std::set<int> ignore = {};
+ return ignore.find(i) != ignore.end();
+}
--- /dev/null
+batch = 2
+rows = 3
+cols = 4
+depth = 5
+
+input_table = [x for x in range(batch * rows * cols * depth)]
+for i in range(batch):
+ for j in range(rows):
+ for k in range(cols):
+ for l in range(depth):
+ input_table[i * rows * cols * depth + j * cols * depth + k * depth + l] = i * rows * cols * depth + j * cols * depth + k * depth + l;
+
+output_table = [x for x in range(batch * rows * cols)]
+for i in range(batch):
+ for j in range(rows):
+ for k in range(cols):
+ output_table[i * rows * cols + j * cols + k] = i * rows * cols * depth + j * cols * depth + k * depth + depth - 1;
+
+model = Model()
+i1 = Input("input", "TENSOR_FLOAT32", "{%d, %d, %d, %d}" % (batch, rows, cols, depth))
+axis = Parameter("axis", "TENSOR_INT32", "{2}", [3, -1])
+output = Output("output", "TENSOR_FLOAT32", "{%d, %d, %d}" % (batch, rows, cols))
+
+model = model.Operation("TENSORFLOW_MAX_EX", i1, axis).To(output)
+
+# Example 1. Input in operand 0,
+input0 = {i1: # input 0
+ input_table}
+
+output0 = {output: # output 0
+ output_table}
+
+# Instantiate an example
+Example((input0, output0))
--- /dev/null
+batch = 2
+rows = 3
+cols = 4
+depth = 5
+
+input_table = [x for x in range(batch * rows * cols * depth)]
+for i in range(batch):
+ for j in range(rows):
+ for k in range(cols):
+ for l in range(depth):
+ input_table[i * rows * cols * depth + j * cols * depth + k * depth + l] = i * rows * cols * depth + j * cols * depth + k * depth + l;
+
+# Since the axises to be reduced are {rows, cols} and the value of the input always increases in here, the output's values are i * rows * cols * depth + (rows - 1) * cols * depth + (cols - 1) * depth + l.
+output_table = [x for x in range(batch * depth)]
+for i in range(batch):
+ for l in range(depth):
+ output_table[i * depth + l] = i * rows * cols * depth + (rows - 1) * cols * depth + (cols - 1) * depth + l;
+
+model = Model()
+i1 = Input("input", "TENSOR_FLOAT32", "{%d, %d, %d, %d}" % (batch, rows, cols, depth))
+axis = Parameter("axis", "TENSOR_INT32", "{4}", [1, 2, -3, -2])
+output = Output("output", "TENSOR_FLOAT32", "{%d, %d}" % (batch, depth))
+
+model = model.Operation("TENSORFLOW_MAX_EX", i1, axis).To(output)
+
+# Example 1. Input in operand 0,
+input0 = {i1: # input 0
+ input_table}
+
+output0 = {output: # output 0
+ output_table}
+
+# Instantiate an example
+Example((input0, output0))