mobilenet_quantized::examples);
}
+namespace mul_broadcast_3D_1D_1_nnfw {
+std::vector<MixedTypedExample> examples = {
+// Generated mul_broadcast_3D_1D_1_nnfw test
+#include "generated/examples/mul_broadcast_3D_1D_1_nnfw.example.cpp"
+};
+// Generated model constructor
+#include "generated/models/mul_broadcast_3D_1D_1_nnfw.model.cpp"
+} // namespace mul_broadcast_3D_1D_1_nnfw
+TEST_F(GeneratedTests, mul_broadcast_3D_1D_1_nnfw) {
+ execute(mul_broadcast_3D_1D_1_nnfw::CreateModel,
+ mul_broadcast_3D_1D_1_nnfw::is_ignored,
+ mul_broadcast_3D_1D_1_nnfw::examples);
+}
+
+namespace mul_broadcast_3D_1D_2_nnfw {
+std::vector<MixedTypedExample> examples = {
+// Generated mul_broadcast_3D_1D_2_nnfw test
+#include "generated/examples/mul_broadcast_3D_1D_2_nnfw.example.cpp"
+};
+// Generated model constructor
+#include "generated/models/mul_broadcast_3D_1D_2_nnfw.model.cpp"
+} // namespace mul_broadcast_3D_1D_2_nnfw
+TEST_F(GeneratedTests, mul_broadcast_3D_1D_2_nnfw) {
+ execute(mul_broadcast_3D_1D_2_nnfw::CreateModel,
+ mul_broadcast_3D_1D_2_nnfw::is_ignored,
+ mul_broadcast_3D_1D_2_nnfw::examples);
+}
+
namespace mul_broadcast_quant8 {
std::vector<MixedTypedExample> examples = {
// Generated mul_broadcast_quant8 test
--- /dev/null
+// Generated file (from: mul_broadcast_3D_1D_1_nnfw.mod.py). Do not edit
+// Begin of an example
+{
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+ // int -> FLOAT32 map
+ {{0, {0.8364028931f, 0.6620308161f, 1.1811592579f, 0.4827561378f, -0.384627074f, -1.7236120701f, 3.5318591595f, 0.2959995866f, 1.6260499954f, -0.7885181308f, -0.8246002197f, -1.1367146969f}}, {1, {0.8364028931f, -0.384627074f, 1.6260499954f, 0.6620308161f}}},
+ // int -> INT32 map
+ {},
+ // int -> QUANT8_ASYMM map
+ {}
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+ // int -> FLOAT32 map
+ {{0, {0.6995698214f, -0.2546349764f, 1.9206240177f, 0.3195994496f, -0.3217031956f, 0.6629478931f, 5.7429795265f, 0.1959608495f, 1.3600329161f, 0.3032854199f, -1.3408411741f, -0.7525401711f}}},
+ // int -> INT32 map
+ {},
+ // int -> QUANT8_ASYMM map
+ {}
+}
+}, // End of an example
--- /dev/null
+// Generated file (from: mul_broadcast_3D_1D_2_nnfw.mod.py). Do not edit
+// Begin of an example
+{
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+ // int -> FLOAT32 map
+ {{0, {2.2774236202f, -2.4773113728f, -0.4044751823f, -0.8101355433f, -1.9691983461f, 2.2676842213f, -2.2757787704f, -0.8289190531f, 0.0121828541f, -1.7484937906f, -0.5269883871f, -0.6346995831f, 2.4886128902f, -1.5107979774f, -0.7372134924f, -0.5374289751f, -1.2039715052f, 1.527836442f, 0.8248311877f, -2.4172706604f, 0.6997106671f, -0.8929677606f, 0.3650484681f, 1.3652951717f}}, {1, {2.2774236202f, 0.0121828541f, -1.2039715052f, -1.9691983461f}}},
+ // int -> INT32 map
+ {},
+ // int -> QUANT8_ASYMM map
+ {}
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+ // int -> FLOAT32 map
+ {{0, {5.1866583824f, -0.0301807225f, 0.4869765937f, 1.5953176022f, -4.4846987724f, 0.0276268665f, 2.7399728298f, 1.6323059797f, 0.0277455188f, -0.0213016439f, 0.6344789863f, 1.2498493195f, 5.6676259041f, -0.0184058305f, 0.8875840306f, 1.0583041906f, -2.7419531345f, 0.0186134093f, -0.993073225f, 4.7600855827f, 1.593537569f, -0.0108788963f, -0.4395079613f, -2.6885368824f}}},
+ // int -> INT32 map
+ {},
+ // int -> QUANT8_ASYMM map
+ {}
+}
+}, // End of an example
--- /dev/null
+// Generated file (from: mul_broadcast_3D_1D_1_nnfw.mod.py). Do not edit
+void CreateModel(Model *model) {
+ OperandType type2(Type::INT32, {});
+ OperandType type0(Type::TENSOR_FLOAT32, {3, 1, 4});
+ OperandType type1(Type::TENSOR_FLOAT32, {4});
+ // Phase 1, operands
+ auto op1 = model->addOperand(&type0);
+ auto op2 = model->addOperand(&type1);
+ auto act = model->addOperand(&type2);
+ auto op3 = model->addOperand(&type0);
+ // Phase 2, operations
+ static int32_t act_init[] = {0};
+ model->setOperandValue(act, act_init, sizeof(int32_t) * 1);
+ model->addOperation(ANEURALNETWORKS_MUL, {op1, op2, act}, {op3});
+ // Phase 3, inputs and outputs
+ model->identifyInputsAndOutputs(
+ {op1, op2},
+ {op3});
+ assert(model->isValid());
+}
+
+bool is_ignored(int i) {
+ static std::set<int> ignore = {};
+ return ignore.find(i) != ignore.end();
+}
--- /dev/null
+// Generated file (from: mul_broadcast_3D_1D_2_nnfw.mod.py). Do not edit
+void CreateModel(Model *model) {
+ OperandType type2(Type::INT32, {});
+ OperandType type0(Type::TENSOR_FLOAT32, {3, 2, 4});
+ OperandType type1(Type::TENSOR_FLOAT32, {4});
+ // Phase 1, operands
+ auto op1 = model->addOperand(&type0);
+ auto op2 = model->addOperand(&type1);
+ auto act = model->addOperand(&type2);
+ auto op3 = model->addOperand(&type0);
+ // Phase 2, operations
+ static int32_t act_init[] = {0};
+ model->setOperandValue(act, act_init, sizeof(int32_t) * 1);
+ model->addOperation(ANEURALNETWORKS_MUL, {op1, op2, act}, {op3});
+ // Phase 3, inputs and outputs
+ model->identifyInputsAndOutputs(
+ {op1, op2},
+ {op3});
+ assert(model->isValid());
+}
+
+bool is_ignored(int i) {
+ static std::set<int> ignore = {};
+ return ignore.find(i) != ignore.end();
+}
--- /dev/null
+# ------ file: report/tensor_mul_1.log ------
+tensor_shape_gen = []
+tensor_value_gen = []
+
+# input tensors
+# tensor name: left
+# tflite::interpreter.tensor(1) -> tensor_value_gen[0]
+tensor_shape_gen.append('{3, 1, 4}')
+tensor_value_gen.append([0.8364028931, 0.6620308161, 1.1811592579, 0.4827561378, -0.3846270740, -1.7236120701, 3.5318591595, 0.2959995866, 1.6260499954, -0.7885181308, -0.8246002197, -1.1367146969, ])
+
+# input tensors
+# tensor name: right
+# tflite::interpreter.tensor(2) -> tensor_value_gen[1]
+tensor_shape_gen.append('{4}')
+tensor_value_gen.append([0.8364028931, -0.3846270740, 1.6260499954, 0.6620308161, ])
+
+# output tensors
+# tensor name: output
+# tflite::interpreter.tensor(0) -> tensor_value_gen[2]
+tensor_shape_gen.append('{3, 1, 4}')
+tensor_value_gen.append([0.6995698214, -0.2546349764, 1.9206240177, 0.3195994496, -0.3217031956, 0.6629478931, 5.7429795265, 0.1959608495, 1.3600329161, 0.3032854199, -1.3408411741, -0.7525401711, ])
+
+# --------- tensor shape and value defined above ---------
+# model
+model = Model()
+i1 = Input("op1", "TENSOR_FLOAT32", tensor_shape_gen[0])
+i2 = Input("op2", "TENSOR_FLOAT32", tensor_shape_gen[1])
+act = Int32Scalar("act", 0) # an int32_t scalar fuse_activation
+i3 = Output("op3", "TENSOR_FLOAT32", tensor_shape_gen[2])
+model = model.Operation("MUL", i1, i2, act).To(i3)
+
+# Example 1. Input in operand 0,
+input0 = {i1: # input 0
+ tensor_value_gen[0],
+ i2: # input 1
+ tensor_value_gen[1]}
+
+output0 = {i3: # output 0
+ tensor_value_gen[2]}
+
+# Instantiate an example
+Example((input0, output0))
--- /dev/null
+# ------ broadcast test when dim is not 1 -------
+tensor_shape_gen = []
+tensor_value_gen = []
+
+# input tensors
+# tensor name: left
+# tflite::interpreter.tensor(1) -> tensor_value_gen[0]
+tensor_shape_gen.append('{3, 2, 4}')
+tensor_value_gen.append([2.2774236202, -2.4773113728, -0.4044751823, -0.8101355433, -1.9691983461, 2.2676842213, -2.2757787704, -0.8289190531, 0.0121828541, -1.7484937906, -0.5269883871, -0.6346995831, 2.4886128902, -1.5107979774, -0.7372134924, -0.5374289751, -1.2039715052, 1.5278364420, 0.8248311877, -2.4172706604, 0.6997106671, -0.8929677606, 0.3650484681, 1.3652951717, ])
+
+# input tensors
+# tensor name: right
+# tflite::interpreter.tensor(2) -> tensor_value_gen[1]
+tensor_shape_gen.append('{4}')
+tensor_value_gen.append([2.2774236202, 0.0121828541, -1.2039715052, -1.9691983461, ])
+
+# output tensors
+# tensor name: output
+# tflite::interpreter.tensor(0) -> tensor_value_gen[2]
+tensor_shape_gen.append('{3, 2, 4}')
+tensor_value_gen.append([5.1866583824, -0.0301807225, 0.4869765937, 1.5953176022, -4.4846987724, 0.0276268665, 2.7399728298, 1.6323059797, 0.0277455188, -0.0213016439, 0.6344789863, 1.2498493195, 5.6676259041, -0.0184058305, 0.8875840306, 1.0583041906, -2.7419531345, 0.0186134093, -0.9930732250, 4.7600855827, 1.5935375690, -0.0108788963, -0.4395079613, -2.6885368824, ])
+
+# --------- tensor shape and value defined above ---------
+
+# model
+model = Model()
+i1 = Input("op1", "TENSOR_FLOAT32", tensor_shape_gen[0])
+i2 = Input("op2", "TENSOR_FLOAT32", tensor_shape_gen[1])
+act = Int32Scalar("act", 0) # an int32_t scalar fuse_activation
+i3 = Output("op3", "TENSOR_FLOAT32", tensor_shape_gen[2])
+model = model.Operation("MUL", i1, i2, act).To(i3)
+
+# Example 1. Input in operand 0,
+input0 = {i1: # input 0
+ tensor_value_gen[0],
+ i2: # input 1
+ tensor_value_gen[1]}
+
+output0 = {i3: # output 0
+ tensor_value_gen[2]}
+
+# Instantiate an example
+Example((input0, output0))