This commit adds Squeeze against 2D input test.
C++ files are generated by `./runtimes/tests/neural_networks_test/specs/generate_test.sh`
How to run:
`$ LD_LIBRARY_PATH=Product/out/lib Product/out/unittest/runtime_run_android_nn_test --gtest_filter=GeneratedTests.squeeze_2D*`
Signed-off-by: Hyun Sik Yoon <hyunsik.yoon@samsung.com>
space_to_batch_quant8_3::examples);
}
+namespace squeeze_2D_float_1_nnfw {
+std::vector<MixedTypedExample> examples = {
+// Generated squeeze_2D_float_1_nnfw test
+#include "generated/examples/squeeze_2D_float_1_nnfw.example.cpp"
+};
+// Generated model constructor
+#include "generated/models/squeeze_2D_float_1_nnfw.model.cpp"
+} // namespace squeeze_2D_float_1_nnfw
+TEST_F(GeneratedTests, squeeze_2D_float_1_nnfw) {
+ execute(squeeze_2D_float_1_nnfw::CreateModel,
+ squeeze_2D_float_1_nnfw::is_ignored,
+ squeeze_2D_float_1_nnfw::examples);
+}
+
namespace squeeze_float_1 {
std::vector<MixedTypedExample> examples = {
// Generated squeeze_float_1 test
--- /dev/null
+// Generated file (from: squeeze_2D_float_1_nnfw.mod.py). Do not edit
+// Begin of an example
+{
+//Input(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+ // int -> FLOAT32 map
+ {{0, {1.4f, 2.3f, 3.2f, 4.1f}}},
+ // int -> INT32 map
+ {},
+ // int -> QUANT8_ASYMM map
+ {}
+},
+//Output(s)
+{ // See tools/test_generator/include/TestHarness.h:MixedTyped
+ // int -> FLOAT32 map
+ {{0, {1.4f, 2.3f, 3.2f, 4.1f}}},
+ // int -> INT32 map
+ {},
+ // int -> QUANT8_ASYMM map
+ {}
+}
+}, // End of an example
--- /dev/null
+// Generated file (from: squeeze_2D_float_1_nnfw.mod.py). Do not edit
+void CreateModel(Model *model) {
+ OperandType type0(Type::TENSOR_FLOAT32, {4, 1});
+ OperandType type2(Type::TENSOR_FLOAT32, {4});
+ OperandType type1(Type::TENSOR_INT32, {1});
+ // Phase 1, operands
+ auto input = model->addOperand(&type0);
+ auto squeezeDims = model->addOperand(&type1);
+ auto output = model->addOperand(&type2);
+ // Phase 2, operations
+ static int32_t squeezeDims_init[] = {1};
+ model->setOperandValue(squeezeDims, squeezeDims_init, sizeof(int32_t) * 1);
+ model->addOperation(ANEURALNETWORKS_SQUEEZE, {input, squeezeDims}, {output});
+ // Phase 3, inputs and outputs
+ model->identifyInputsAndOutputs(
+ {input},
+ {output});
+ assert(model->isValid());
+}
+
+bool is_ignored(int i) {
+ static std::set<int> ignore = {};
+ return ignore.find(i) != ignore.end();
+}
--- /dev/null
+model = Model()
+i1 = Input("input", "TENSOR_FLOAT32", "{4, 1}")
+squeezeDims = Parameter("squeezeDims", "TENSOR_INT32", "{1}", [1])
+output = Output("output", "TENSOR_FLOAT32", "{4}")
+
+model = model.Operation("SQUEEZE", i1, squeezeDims).To(output)
+
+# Example 1. Input in operand 0,
+input0 = {i1: # input 0
+ [1.4, 2.3, 3.2, 4.1]}
+
+output0 = {output: # output 0
+ [1.4, 2.3, 3.2, 4.1]}
+
+# Instantiate an example
+Example((input0, output0))