This patch adds GTest for reduce_min op.
Added files:
```
reduce_min_ex
reduce_min_ex_float_1
reduce_min_ex_float_2
```
Signed-off-by: prasannar <prasanna.r@samsung.com>
pack_ex_3D_float::examples);
}
+namespace reduce_min_ex_float_1 {
+std::vector<MixedTypedExample> examples = {
+// Generated reduce_min_ex_float_1 test
+#include "generated/examples/reduce_min_ex_float_1.example.cpp"
+};
+// Generated model constructor
+#include "generated/models/reduce_min_ex_float_1.model.cpp"
+} // namespace reduce_min_ex_float_1
+TEST_F(GeneratedTests, reduce_min_ex_float_1) {
+ execute(reduce_min_ex_float_1::CreateModel,
+ reduce_min_ex_float_1::is_ignored,
+ reduce_min_ex_float_1::examples);
+}
+
+namespace reduce_min_ex_float_2 {
+std::vector<MixedTypedExample> examples = {
+// Generated reduce_min_ex_float_2 test
+#include "generated/examples/reduce_min_ex_float_2.example.cpp"
+};
+// Generated model constructor
+#include "generated/models/reduce_min_ex_float_2.model.cpp"
+} // namespace reduce_min_ex_float_2
+TEST_F(GeneratedTests, reduce_min_ex_float_2) {
+ execute(reduce_min_ex_float_2::CreateModel,
+ reduce_min_ex_float_2::is_ignored,
+ reduce_min_ex_float_2::examples);
+}
+
+namespace reduce_min_ex {
+std::vector<MixedTypedExample> examples = {
+// Generated reduce_min_ex test
+#include "generated/examples/reduce_min_ex.example.cpp"
+};
+// Generated model constructor
+#include "generated/models/reduce_min_ex.model.cpp"
+} // namespace reduce_min_ex
+TEST_F(GeneratedTests, reduce_min_ex) {
+ execute(reduce_min_ex::CreateModel,
+ reduce_min_ex::is_ignored,
+ reduce_min_ex::examples);
+}
+
namespace rsqrt_ex_float_1 {
std::vector<MixedTypedExample> examples = {
// Generated rsqrt_ex_float_1 test
--- /dev/null
+// Generated file (from: reduce_min_ex.mod.py). Do not edit
+// Begin of an example
+{
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+ // int -> FLOAT32 map
+ {{0, {2.0f, 1.0f, 3.0f, 4.0f}}},
+ // int -> INT32 map
+ {},
+ // int -> QUANT8_ASYMM map
+ {}
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+ // int -> FLOAT32 map
+ {{0, {1.0f, 3.0f}}},
+ // int -> INT32 map
+ {},
+ // int -> QUANT8_ASYMM map
+ {}
+}
+}, // End of an example
--- /dev/null
+// Generated file (from: reduce_min_ex_float_1.mod.py). Do not edit
+// Begin of an example
+{
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+ // int -> FLOAT32 map
+ {{0, {23.0f, 24.0f, 13.0f, 22.0f, 5.0f, 18.0f, 7.0f, 8.0f, 9.0f, 15.0f, 11.0f, 12.0f, 3.0f, 14.0f, 10.0f, 16.0f, 17.0f, 6.0f, 19.0f, 20.0f, 21.0f, 4.0f, 1.0f, 2.0f}}},
+ // int -> INT32 map
+ {},
+ // int -> QUANT8_ASYMM map
+ {}
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+ // int -> FLOAT32 map
+ {{0, {1.0f, 2.0f}}},
+ // int -> INT32 map
+ {},
+ // int -> QUANT8_ASYMM map
+ {}
+}
+}, // End of an example
--- /dev/null
+// Generated file (from: reduce_min_ex_float_2.mod.py). Do not edit
+// Begin of an example
+{
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+ // int -> FLOAT32 map
+ {{0, {20.0f, 2.0f, 22.0f, 4.0f, 24.0f, 18.0f, 7.0f, 8.0f, 19.0f, 10.0f, 14.0f, 12.0f, 13.0f, 11.0f, 15.0f, 16.0f, 17.0f, 6.0f, 9.0f, 1.0f, 21.0f, 3.0f, 23.0f, 5.0f}}},
+ // int -> INT32 map
+ {},
+ // int -> QUANT8_ASYMM map
+ {}
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+ // int -> FLOAT32 map
+ {{0, {1.0f, 3.0f, 5.0f}}},
+ // int -> INT32 map
+ {},
+ // int -> QUANT8_ASYMM map
+ {}
+}
+}, // End of an example
--- /dev/null
+// Generated file (from: reduce_min_ex.mod.py). Do not edit
+void CreateModel(Model *model) {
+ OperandType type2(Type::TENSOR_FLOAT32, {1, 2, 1});
+ OperandType type0(Type::TENSOR_FLOAT32, {1, 2, 2, 1});
+ OperandType type1(Type::TENSOR_INT32, {1});
+ // Phase 1, operands
+ auto input = model->addOperand(&type0);
+ auto axis = model->addOperand(&type1);
+ auto output = model->addOperand(&type2);
+ // Phase 2, operations
+ static int32_t axis_init[] = {2};
+ model->setOperandValue(axis, axis_init, sizeof(int32_t) * 1);
+ model->addOperationEx(ANEURALNETWORKS_REDUCE_MIN_EX, {input, axis}, {output});
+ // Phase 3, inputs and outputs
+ model->identifyInputsAndOutputs(
+ {input},
+ {output});
+ assert(model->isValid());
+}
+
+bool is_ignored(int i) {
+ static std::set<int> ignore = {};
+ return ignore.find(i) != ignore.end();
+}
--- /dev/null
+// Generated file (from: reduce_min_ex_float_1.mod.py). Do not edit
+void CreateModel(Model *model) {
+ OperandType type2(Type::TENSOR_FLOAT32, {2});
+ OperandType type0(Type::TENSOR_FLOAT32, {4, 3, 2});
+ OperandType type1(Type::TENSOR_INT32, {4});
+ // Phase 1, operands
+ auto input = model->addOperand(&type0);
+ auto axis = model->addOperand(&type1);
+ auto output = model->addOperand(&type2);
+ // Phase 2, operations
+ static int32_t axis_init[] = {1, 0, -3, -3};
+ model->setOperandValue(axis, axis_init, sizeof(int32_t) * 4);
+ model->addOperationEx(ANEURALNETWORKS_REDUCE_MIN_EX, {input, axis}, {output});
+ // Phase 3, inputs and outputs
+ model->identifyInputsAndOutputs(
+ {input},
+ {output});
+ assert(model->isValid());
+}
+
+bool is_ignored(int i) {
+ static std::set<int> ignore = {};
+ return ignore.find(i) != ignore.end();
+}
--- /dev/null
+// Generated file (from: reduce_min_ex_float_2.mod.py). Do not edit
+void CreateModel(Model *model) {
+ OperandType type2(Type::TENSOR_FLOAT32, {1, 3, 1});
+ OperandType type0(Type::TENSOR_FLOAT32, {4, 3, 2});
+ OperandType type1(Type::TENSOR_INT32, {2});
+ // Phase 1, operands
+ auto input = model->addOperand(&type0);
+ auto axis = model->addOperand(&type1);
+ auto output = model->addOperand(&type2);
+ // Phase 2, operations
+ static int32_t axis_init[] = {0, 2};
+ model->setOperandValue(axis, axis_init, sizeof(int32_t) * 2);
+ model->addOperationEx(ANEURALNETWORKS_REDUCE_MIN_EX, {input, axis}, {output});
+ // Phase 3, inputs and outputs
+ model->identifyInputsAndOutputs(
+ {input},
+ {output});
+ assert(model->isValid());
+}
+
+bool is_ignored(int i) {
+ static std::set<int> ignore = {};
+ return ignore.find(i) != ignore.end();
+}
GeneratedTests.mean_quant8_1
GeneratedTests.mean_quant8_2
GeneratedTests.mobilenet_quantized
+GeneratedTests.reduce_min_ex*
GeneratedTests.svdf
GeneratedTests.svdf2
GeneratedTests.svdf_state
GeneratedTests.neg_ex_1D_float
GeneratedTests.neg_ex_2D_float
GeneratedTests.notequal*
+GeneratedTests.reduce_min*
GeneratedTests.relu1*
GeneratedTests.relu6*
GeneratedTests.relu*
GeneratedTests.notequal*
GeneratedTests.pad
GeneratedTests.pad_float_1
+GeneratedTests.reduce_min_ex*
GeneratedTests.rsqrt_ex_float_1
GeneratedTests.space_to_batch
GeneratedTests.space_to_batch_float_1
GeneratedTests.neg_ex_1D_float
GeneratedTests.neg_ex_2D_float
GeneratedTests.notequal*
+GeneratedTests.reduce_min_ex*
GeneratedTests.relu1_float_1
GeneratedTests.relu1_float_2
GeneratedTests.relu1_quant8_1
--- /dev/null
+model = Model()
+i1 = Input("input", "TENSOR_FLOAT32", "{1, 2, 2, 1}")
+axis = Parameter("axis", "TENSOR_INT32", "{1}", [2])
+output = Output("output", "TENSOR_FLOAT32", "{1, 2, 1}")
+
+model = model.Operation("REDUCE_MIN_EX", i1, axis).To(output)
+
+# Example 1. Input in operand 0,
+input0 = {i1: # input 0
+ [2.0, 1.0,
+ 3.0, 4.0]}
+
+output0 = {output: # output 0
+ [1.0,
+ 3.0]}
+
+# Instantiate an example
+Example((input0, output0))
--- /dev/null
+model = Model()
+i1 = Input("input", "TENSOR_FLOAT32", "{4, 3, 2}")
+axis = Parameter("axis", "TENSOR_INT32", "{4}", [1, 0, -3, -3])
+output = Output("output", "TENSOR_FLOAT32", "{2}")
+
+model = model.Operation("REDUCE_MIN_EX", i1, axis).To(output)
+
+# Example 1. Input in operand 0,
+input0 = {i1: # input 0
+ [23.0, 24.0, 13.0, 22.0, 5.0, 18.0, 7.0, 8.0, 9.0, 15.0, 11.0, 12.0,
+ 3.0, 14.0, 10.0, 16.0, 17.0, 6.0, 19.0, 20.0, 21.0, 4.0, 1.0, 2.0]}
+
+output0 = {output: # output 0
+ [1.0, 2.0]}
+
+# Instantiate an example
+Example((input0, output0))
--- /dev/null
+model = Model()
+i1 = Input("input", "TENSOR_FLOAT32", "{4, 3, 2}")
+axis = Parameter("axis", "TENSOR_INT32", "{2}", [0, 2])
+output = Output("output", "TENSOR_FLOAT32", "{1, 3, 1}")
+
+model = model.Operation("REDUCE_MIN_EX", i1, axis).To(output)
+
+# Example 1. Input in operand 0,
+input0 = {i1: # input 0
+ [20.0, 2.0, 22.0, 4.0, 24.0, 18.0, 7.0, 8.0, 19.0, 10.0, 14.0, 12.0,
+ 13.0, 11.0, 15.0, 16.0, 17.0, 6.0, 9.0, 1.0, 21.0, 3.0, 23.0, 5.0]}
+
+output0 = {output: # output 0
+ [1.0, 3.0, 5.0]}
+
+# Instantiate an example
+Example((input0, output0))