This commit Add generated tests of Mean for 4-D.
Signed-off-by: jiseob.jang <jiseob.jang@samsung.com>
fully_connected_float_4d_simple::examples);
}
+namespace mean_4D_float_reducing_C_nnfw {
+std::vector<MixedTypedExample> examples = {
+// Generated mean_4D_float_reducing_C_nnfw test
+#include "generated/examples/mean_4D_float_reducing_C_nnfw.example.cpp"
+};
+// Generated model constructor
+#include "generated/models/mean_4D_float_reducing_C_nnfw.model.cpp"
+} // namespace mean_4D_float_reducing_C_nnfw
+TEST_F(GeneratedTests, mean_4D_float_reducing_C_nnfw) {
+ execute(mean_4D_float_reducing_C_nnfw::CreateModel,
+ mean_4D_float_reducing_C_nnfw::is_ignored,
+ mean_4D_float_reducing_C_nnfw::examples);
+}
+
+namespace mean_4D_float_reducing_HW_nnfw {
+std::vector<MixedTypedExample> examples = {
+// Generated mean_4D_float_reducing_HW_nnfw test
+#include "generated/examples/mean_4D_float_reducing_HW_nnfw.example.cpp"
+};
+// Generated model constructor
+#include "generated/models/mean_4D_float_reducing_HW_nnfw.model.cpp"
+} // namespace mean_4D_float_reducing_HW_nnfw
+TEST_F(GeneratedTests, mean_4D_float_reducing_HW_nnfw) {
+ execute(mean_4D_float_reducing_HW_nnfw::CreateModel,
+ mean_4D_float_reducing_HW_nnfw::is_ignored,
+ mean_4D_float_reducing_HW_nnfw::examples);
+}
+
namespace mean_axis01_1_nnfw {
std::vector<MixedTypedExample> examples = {
// Generated mean_axis01_1_nnfw test
--- /dev/null
+// Generated file (from: mean_4D_float_reducing_C_nnfw.mod.py). Do not edit
+// Begin of an example
+{
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+ // int -> FLOAT32 map
+ {{0, {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119}}},
+ // int -> INT32 map
+ {},
+ // int -> QUANT8_ASYMM map
+ {}
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+ // int -> FLOAT32 map
+ {{0, {2.0f, 7.0f, 12.0f, 17.0f, 22.0f, 27.0f, 32.0f, 37.0f, 42.0f, 47.0f, 52.0f, 57.0f, 62.0f, 67.0f, 72.0f, 77.0f, 82.0f, 87.0f, 92.0f, 97.0f, 102.0f, 107.0f, 112.0f, 117.0f}}},
+ // int -> INT32 map
+ {},
+ // int -> QUANT8_ASYMM map
+ {}
+}
+}, // End of an example
--- /dev/null
+// Generated file (from: mean_4D_float_reducing_HW_nnfw.mod.py). Do not edit
+// Begin of an example
+{
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+ // int -> FLOAT32 map
+ {{0, {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119}}},
+ // int -> INT32 map
+ {},
+ // int -> QUANT8_ASYMM map
+ {}
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+ // int -> FLOAT32 map
+ {{0, {27.5f, 28.5f, 29.5f, 30.5f, 31.5f, 87.5f, 88.5f, 89.5f, 90.5f, 91.5f}}},
+ // int -> INT32 map
+ {},
+ // int -> QUANT8_ASYMM map
+ {}
+}
+}, // End of an example
--- /dev/null
+// Generated file (from: mean_4D_float_reducing_C_nnfw.mod.py). Do not edit
+void CreateModel(Model *model) {
+ OperandType type2(Type::INT32, {});
+ OperandType type0(Type::TENSOR_FLOAT32, {2, 3, 4, 5});
+ OperandType type3(Type::TENSOR_FLOAT32, {2, 3, 4});
+ OperandType type1(Type::TENSOR_INT32, {2});
+ // Phase 1, operands
+ auto input = model->addOperand(&type0);
+ auto axis = model->addOperand(&type1);
+ auto keepDims = model->addOperand(&type2);
+ auto output = model->addOperand(&type3);
+ // Phase 2, operations
+ static int32_t axis_init[] = {3, -1};
+ model->setOperandValue(axis, axis_init, sizeof(int32_t) * 2);
+ static int32_t keepDims_init[] = {0};
+ model->setOperandValue(keepDims, keepDims_init, sizeof(int32_t) * 1);
+ model->addOperation(ANEURALNETWORKS_MEAN, {input, axis, keepDims}, {output});
+ // Phase 3, inputs and outputs
+ model->identifyInputsAndOutputs(
+ {input},
+ {output});
+ assert(model->isValid());
+}
+
+bool is_ignored(int i) {
+ static std::set<int> ignore = {};
+ return ignore.find(i) != ignore.end();
+}
--- /dev/null
+// Generated file (from: mean_4D_float_reducing_HW_nnfw.mod.py). Do not edit
+void CreateModel(Model *model) {
+ OperandType type2(Type::INT32, {});
+ OperandType type0(Type::TENSOR_FLOAT32, {2, 3, 4, 5});
+ OperandType type3(Type::TENSOR_FLOAT32, {2, 5});
+ OperandType type1(Type::TENSOR_INT32, {4});
+ // Phase 1, operands
+ auto input = model->addOperand(&type0);
+ auto axis = model->addOperand(&type1);
+ auto keepDims = model->addOperand(&type2);
+ auto output = model->addOperand(&type3);
+ // Phase 2, operations
+ static int32_t axis_init[] = {1, 2, -3, -2};
+ model->setOperandValue(axis, axis_init, sizeof(int32_t) * 4);
+ static int32_t keepDims_init[] = {0};
+ model->setOperandValue(keepDims, keepDims_init, sizeof(int32_t) * 1);
+ model->addOperation(ANEURALNETWORKS_MEAN, {input, axis, keepDims}, {output});
+ // Phase 3, inputs and outputs
+ model->identifyInputsAndOutputs(
+ {input},
+ {output});
+ assert(model->isValid());
+}
+
+bool is_ignored(int i) {
+ static std::set<int> ignore = {};
+ return ignore.find(i) != ignore.end();
+}
--- /dev/null
+batch = 2
+rows = 3
+cols = 4
+depth = 5
+
+input_table = [x for x in range(batch * rows * cols * depth)]
+for i in range(batch):
+ for j in range(rows):
+ for k in range(cols):
+ for l in range(depth):
+ input_table[i * rows * cols * depth + j * cols * depth + k * depth + l] = i * rows * cols * depth + j * cols * depth + k * depth + l;
+
+output_table = [0 for x in range(batch * rows * cols)]
+for i in range(batch):
+ for j in range(rows):
+ for k in range(cols):
+ for l in range(depth):
+ output_table[i * rows * cols + j * cols + k] += input_table[i * rows * cols * depth + j * cols * depth + k * depth + l];
+
+for i in range(batch * rows * cols):
+ output_table[i] /= float(depth);
+
+model = Model()
+i1 = Input("input", "TENSOR_FLOAT32", "{%d, %d, %d, %d}" % (batch, rows, cols, depth))
+axis = Parameter("axis", "TENSOR_INT32", "{2}", [3, -1])
+keepDims = Int32Scalar("keepDims", 0)
+output = Output("output", "TENSOR_FLOAT32", "{%d, %d, %d}" % (batch, rows, cols))
+
+model = model.Operation("MEAN", i1, axis, keepDims).To(output)
+
+# Example 1. Input in operand 0,
+input0 = {i1: # input 0
+ input_table}
+
+output0 = {output: # output 0
+ output_table}
+
+# Instantiate an example
+Example((input0, output0))
--- /dev/null
+batch = 2
+rows = 3
+cols = 4
+depth = 5
+
+input_table = [x for x in range(batch * rows * cols * depth)]
+for i in range(batch):
+ for j in range(rows):
+ for k in range(cols):
+ for l in range(depth):
+ input_table[i * rows * cols * depth + j * cols * depth + k * depth + l] = i * rows * cols * depth + j * cols * depth + k * depth + l;
+
+output_table = [0 for x in range(batch * depth)]
+for i in range(batch):
+ for j in range(rows):
+ for k in range(cols):
+ for l in range(depth):
+ output_table[i * depth + l] += input_table[i * rows * cols * depth + j * cols * depth + k * depth + l];
+
+for i in range(batch * depth):
+ output_table[i] /= float(rows * cols);
+
+model = Model()
+i1 = Input("input", "TENSOR_FLOAT32", "{%d, %d, %d, %d}" % (batch, rows, cols, depth))
+axis = Parameter("axis", "TENSOR_INT32", "{4}", [1, 2, -3, -2])
+keepDims = Int32Scalar("keepDims", 0)
+output = Output("output", "TENSOR_FLOAT32", "{%d, %d}" % (batch, depth))
+
+model = model.Operation("MEAN", i1, axis, keepDims).To(output)
+
+# Example 1. Input in operand 0,
+input0 = {i1: # input 0
+ input_table}
+
+output0 = {output: # output 0
+ output_table}
+
+# Instantiate an example
+Example((input0, output0))