This patch adds a generated test for testing broadcast DIV.
This consist of testing broadcast div with tensors of shape 4D & 2D.
Added GTest : div_broadcast_float_4D_2D_nnfw
Signed-off-by: prasannar <prasanna.r@samsung.com>
batch_to_space_quant8_1::examples);
}
+namespace div_broadcast_float_4D_2D_nnfw {
+std::vector<MixedTypedExample> examples = {
+// Generated div_broadcast_float_4D_2D_nnfw test
+#include "generated/examples/div_broadcast_float_4D_2D_nnfw.example.cpp"
+};
+// Generated model constructor
+#include "generated/models/div_broadcast_float_4D_2D_nnfw.model.cpp"
+} // namespace div_broadcast_float_4D_2D_nnfw
+TEST_F(GeneratedTests, div_broadcast_float_4D_2D_nnfw) {
+ execute(div_broadcast_float_4D_2D_nnfw::CreateModel,
+ div_broadcast_float_4D_2D_nnfw::is_ignored,
+ div_broadcast_float_4D_2D_nnfw::examples);
+}
+
namespace div_broadcast_float {
std::vector<MixedTypedExample> examples = {
// Generated div_broadcast_float test
--- /dev/null
+// Generated file (from: div_broadcast_float_4D_2D_nnfw.mod.py). Do not edit
+// Begin of an example
+{
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+ // int -> FLOAT32 map
+ {{0, {16, 11, 23, 3, 9, 14, 9, 2, 0, 23, 13, 2, 13, 17, 16, 10, 15, 19, 12, 16, 15, 20, 9, 7}}, {1, {16, 7, 23, 16}}},
+ // int -> INT32 map
+ {},
+ // int -> QUANT8_ASYMM map
+ {}
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+ // int -> FLOAT32 map
+ {{0, {1.0f, 1.57142857f, 1.0f, 0.1875f, 0.5625f, 2.0f, 0.39130435f, 0.125f, 0, 3.28571429f, 0.56521739f, 0.125f, 0.8125f, 2.42857143f, 0.69565217f, 0.625f, 0.9375f, 2.71428571f, 0.52173913f, 1.0f, 0.9375f, 2.85714286f, 0.39130435f, 0.4375f}}},
+ // int -> INT32 map
+ {},
+ // int -> QUANT8_ASYMM map
+ {}
+}
+}, // End of an example
--- /dev/null
+// Generated file (from: div_broadcast_float_4D_2D_nnfw.mod.py). Do not edit
+void CreateModel(Model *model) {
+ OperandType type2(Type::INT32, {});
+ OperandType type1(Type::TENSOR_FLOAT32, {2, 2});
+ OperandType type0(Type::TENSOR_FLOAT32, {3, 2, 2, 2});
+ // Phase 1, operands
+ auto op1 = model->addOperand(&type0);
+ auto op2 = model->addOperand(&type1);
+ auto act = model->addOperand(&type2);
+ auto op3 = model->addOperand(&type0);
+ // Phase 2, operations
+ static int32_t act_init[] = {0};
+ model->setOperandValue(act, act_init, sizeof(int32_t) * 1);
+ model->addOperation(ANEURALNETWORKS_DIV, {op1, op2, act}, {op3});
+ // Phase 3, inputs and outputs
+ model->identifyInputsAndOutputs(
+ {op1, op2},
+ {op3});
+ assert(model->isValid());
+}
+
+bool is_ignored(int i) {
+ static std::set<int> ignore = {};
+ return ignore.find(i) != ignore.end();
+}
--- /dev/null
+# model
+model = Model()
+i1 = Input("op1", "TENSOR_FLOAT32", "{3, 2, 2, 2}")
+i2 = Input("op2", "TENSOR_FLOAT32", "{2, 2}")
+act = Int32Scalar("act", 0)
+i3 = Output("op3", "TENSOR_FLOAT32", "{3, 2, 2, 2}")
+model = model.Operation("DIV", i1, i2, act).To(i3)
+
+# Example 1. Input in operand 0,
+input0 = {i1: # input 0
+ [16, 11, 23, 3, 9, 14, 9, 2, 0, 23, 13, 2, 13, 17, 16, 10, 15, 19, 12, 16, 15, 20, 9, 7],
+ i2: # input 1
+ [16, 7, 23, 16]}
+
+output0 = {i3: # output 0
+ [1.0, 1.57142857, 1.0, 0.1875, 0.5625, 2.0, 0.39130435, 0.125,
+ 0, 3.28571429, 0.56521739, 0.125, 0.8125, 2.42857143, 0.69565217, 0.625,
+ 0.9375, 2.71428571, 0.52173913, 1.0, 0.9375, 2.85714286, 0.39130435, 0.4375]
+ }
+
+# Instantiate an example
+Example((input0, output0))