fully_connected_float_4d_simple::examples);
}
+namespace mean_axis01_1_nnfw {
+std::vector<MixedTypedExample> examples = {
+// Generated mean_axis01_1_nnfw test
+#include "generated/examples/mean_axis01_1_nnfw.example.cpp"
+};
+// Generated model constructor
+#include "generated/models/mean_axis01_1_nnfw.model.cpp"
+} // namespace mean_axis01_1_nnfw
+TEST_F(GeneratedTests, mean_axis01_1_nnfw) {
+ execute(mean_axis01_1_nnfw::CreateModel,
+ mean_axis01_1_nnfw::is_ignored,
+ mean_axis01_1_nnfw::examples);
+}
+
+namespace mean_axis01_2_nnfw {
+std::vector<MixedTypedExample> examples = {
+// Generated mean_axis01_2_nnfw test
+#include "generated/examples/mean_axis01_2_nnfw.example.cpp"
+};
+// Generated model constructor
+#include "generated/models/mean_axis01_2_nnfw.model.cpp"
+} // namespace mean_axis01_2_nnfw
+TEST_F(GeneratedTests, mean_axis01_2_nnfw) {
+ execute(mean_axis01_2_nnfw::CreateModel,
+ mean_axis01_2_nnfw::is_ignored,
+ mean_axis01_2_nnfw::examples);
+}
+
namespace mean_float_1 {
std::vector<MixedTypedExample> examples = {
// Generated mean_float_1 test
--- /dev/null
+// Generated file (from: mean_axis01_1_nnfw.mod.py). Do not edit
+// Begin of an example
+{
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+ // int -> FLOAT32 map
+ {{0, {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f}}},
+ // int -> INT32 map
+ {},
+ // int -> QUANT8_ASYMM map
+ {}
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+ // int -> FLOAT32 map
+ {{0, {1.0f, 1.0f}}},
+ // int -> INT32 map
+ {},
+ // int -> QUANT8_ASYMM map
+ {}
+}
+}, // End of an example
--- /dev/null
+// Generated file (from: mean_axis01_2_nnfw.mod.py). Do not edit
+// Begin of an example
+{
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+ // int -> FLOAT32 map
+ {{0, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f, 13.0f, 14.0f, 15.0f, 16.0f, 17.0f, 18.0f, 19.0f, 20.0f, 21.0f, 22.0f, 23.0f, 24.0f}}},
+ // int -> INT32 map
+ {},
+ // int -> QUANT8_ASYMM map
+ {}
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+ // int -> FLOAT32 map
+ {{0, {12.0f, 13.0f}}},
+ // int -> INT32 map
+ {},
+ // int -> QUANT8_ASYMM map
+ {}
+}
+}, // End of an example
--- /dev/null
+// Generated file (from: mean_axis01_1_nnfw.mod.py). Do not edit
+void CreateModel(Model *model) {
+ OperandType type2(Type::INT32, {});
+ OperandType type3(Type::TENSOR_FLOAT32, {1, 1, 1, 2});
+ OperandType type0(Type::TENSOR_FLOAT32, {1, 4, 3, 2});
+ OperandType type1(Type::TENSOR_INT32, {2});
+ // Phase 1, operands
+ auto input = model->addOperand(&type0);
+ auto axis = model->addOperand(&type1);
+ auto keepDims = model->addOperand(&type2);
+ auto output = model->addOperand(&type3);
+ // Phase 2, operations
+ static int32_t axis_init[] = {1, 2};
+ model->setOperandValue(axis, axis_init, sizeof(int32_t) * 2);
+ static int32_t keepDims_init[] = {1};
+ model->setOperandValue(keepDims, keepDims_init, sizeof(int32_t) * 1);
+ model->addOperation(ANEURALNETWORKS_MEAN, {input, axis, keepDims}, {output});
+ // Phase 3, inputs and outputs
+ model->identifyInputsAndOutputs(
+ {input},
+ {output});
+ assert(model->isValid());
+}
+
+bool is_ignored(int i) {
+ static std::set<int> ignore = {};
+ return ignore.find(i) != ignore.end();
+}
--- /dev/null
+// Generated file (from: mean_axis01_2_nnfw.mod.py). Do not edit
+void CreateModel(Model *model) {
+ OperandType type2(Type::INT32, {});
+ OperandType type3(Type::TENSOR_FLOAT32, {1, 1, 1, 2});
+ OperandType type0(Type::TENSOR_FLOAT32, {1, 4, 3, 2});
+ OperandType type1(Type::TENSOR_INT32, {4});
+ // Phase 1, operands
+ auto input = model->addOperand(&type0);
+ auto axis = model->addOperand(&type1);
+ auto keepDims = model->addOperand(&type2);
+ auto output = model->addOperand(&type3);
+ // Phase 2, operations
+ static int32_t axis_init[] = {1, 2, -3, -3};
+ model->setOperandValue(axis, axis_init, sizeof(int32_t) * 4);
+ static int32_t keepDims_init[] = {1};
+ model->setOperandValue(keepDims, keepDims_init, sizeof(int32_t) * 1);
+ model->addOperation(ANEURALNETWORKS_MEAN, {input, axis, keepDims}, {output});
+ // Phase 3, inputs and outputs
+ model->identifyInputsAndOutputs(
+ {input},
+ {output});
+ assert(model->isValid());
+}
+
+bool is_ignored(int i) {
+ static std::set<int> ignore = {};
+ return ignore.find(i) != ignore.end();
+}
--- /dev/null
+model = Model()
+i1 = Input("input", "TENSOR_FLOAT32", "{1, 4, 3, 2}")
+axis = Parameter("axis", "TENSOR_INT32", "{2}", [1, 2])
+keepDims = Int32Scalar("keepDims", 1)
+output = Output("output", "TENSOR_FLOAT32", "{1, 1, 1, 2}")
+
+model = model.Operation("MEAN", i1, axis, keepDims).To(output)
+
+# Example 1. Input in operand 0,
+input0 = {i1: # input 0
+ [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
+ 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]}
+
+output0 = {output: # output 0
+ [1.0, 1.0]}
+
+# Instantiate an example
+Example((input0, output0))
--- /dev/null
+model = Model()
+i1 = Input("input", "TENSOR_FLOAT32", "{1, 4, 3, 2}")
+axis = Parameter("axis", "TENSOR_INT32", "{4}", [1, 2, -3, -3])
+keepDims = Int32Scalar("keepDims", 1)
+output = Output("output", "TENSOR_FLOAT32", "{1, 1, 1, 2}")
+model = model.Operation("MEAN", i1, axis, keepDims).To(output)
+
+# Example 1. Input in operand 0,
+input0 = {i1: # input 0
+ [1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0,
+ 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0]}
+
+output0 = {output: # output 0
+ [12.0, 13.0]}
+
+# Instantiate an example
+Example((input0, output0))