From a0664f718b044de6781d15d29429934376e63222 Mon Sep 17 00:00:00 2001 From: =?utf8?q?=EC=9C=A4=ED=98=84=EC=8B=9D/=EB=8F=99=EC=9E=91=EC=A0=9C?= =?utf8?q?=EC=96=B4Lab=28SR=29/Principal=20Engineer/=EC=82=BC=EC=84=B1?= =?utf8?q?=EC=A0=84=EC=9E=90?= Date: Tue, 21 Aug 2018 10:31:54 +0900 Subject: [PATCH] [Generated Test] addining test for Fully_Connected (#2370) Currently all of generated tests for FC(Fully_Connected) fail. This commit adds a test case for FC, which uses same data with nnapi_quickcheck_fully_connected_1. The purpose of adding this test is to check whether any modification against current FC code works correctly. (More specifically, to see if NEON FC code works for #2334) Signed-off-by: Hyun Sik Yoon --- .../generated/all_generated_tests.cpp | 14 +++++++++ .../fully_connected_float_1_nnfw.example.cpp | 22 ++++++++++++++ .../models/fully_connected_float_1_nnfw.model.cpp | 32 ++++++++++++++++++++ .../specs/V1_0/fully_connected_float_1_nnfw.mod.py | 34 ++++++++++++++++++++++ 4 files changed, 102 insertions(+) create mode 100644 runtimes/tests/neural_networks_test/generated/examples/fully_connected_float_1_nnfw.example.cpp create mode 100644 runtimes/tests/neural_networks_test/generated/models/fully_connected_float_1_nnfw.model.cpp create mode 100644 runtimes/tests/neural_networks_test/specs/V1_0/fully_connected_float_1_nnfw.mod.py diff --git a/runtimes/tests/neural_networks_test/generated/all_generated_tests.cpp b/runtimes/tests/neural_networks_test/generated/all_generated_tests.cpp index 5b2013d..388828d 100644 --- a/runtimes/tests/neural_networks_test/generated/all_generated_tests.cpp +++ b/runtimes/tests/neural_networks_test/generated/all_generated_tests.cpp @@ -855,6 +855,20 @@ TEST_F(GeneratedTests, floor_) { floor_::examples); } +namespace fully_connected_float_1_nnfw { +std::vector examples = { +// Generated fully_connected_float_1_nnfw test +#include "generated/examples/fully_connected_float_1_nnfw.example.cpp" +}; +// Generated model constructor +#include "generated/models/fully_connected_float_1_nnfw.model.cpp" +} // namespace fully_connected_float_1_nnfw +TEST_F(GeneratedTests, fully_connected_float_1_nnfw) { + execute(fully_connected_float_1_nnfw::CreateModel, + fully_connected_float_1_nnfw::is_ignored, + fully_connected_float_1_nnfw::examples); +} + namespace fully_connected_float_2 { std::vector examples = { // Generated fully_connected_float_2 test diff --git a/runtimes/tests/neural_networks_test/generated/examples/fully_connected_float_1_nnfw.example.cpp b/runtimes/tests/neural_networks_test/generated/examples/fully_connected_float_1_nnfw.example.cpp new file mode 100644 index 0000000..1a46ccd --- /dev/null +++ b/runtimes/tests/neural_networks_test/generated/examples/fully_connected_float_1_nnfw.example.cpp @@ -0,0 +1,22 @@ +// Generated file (from: fully_connected_float_1_nnfw.mod.py). Do not edit +// Begin of an example +{ +//Input(s) +{ // See tools/test_generator/include/TestHarness.h:MixedTyped + // int -> FLOAT32 map + {{0, {1.4910057783f, 3.4019672871f, -0.0598693565f, -0.0065411143f, -0.646147728f, 1.9235717058f, 1.0784962177f, 0.176592201f, -2.2495496273f, -1.6010370255f, -2.4747757912f, -0.3825767934f, 2.305898428f, 0.7288306952f, -0.8964791894f, -2.7584488392f, -0.287591964f, 0.1335377693f, 1.8338065147f, -2.6112849712f, 0.9390821457f, 1.984385252f, -1.2190681696f, 1.0274435282f}}}, + // int -> INT32 map + {}, + // int -> QUANT8_ASYMM map + {} +}, +//Output(s) +{ // See tools/test_generator/include/TestHarness.h:MixedTyped + // int -> FLOAT32 map + {{0, {2.0375289917f}}}, + // int -> INT32 map + {}, + // int -> QUANT8_ASYMM map + {} +} +}, // End of an example diff --git a/runtimes/tests/neural_networks_test/generated/models/fully_connected_float_1_nnfw.model.cpp b/runtimes/tests/neural_networks_test/generated/models/fully_connected_float_1_nnfw.model.cpp new file mode 100644 index 0000000..04c4efe --- /dev/null +++ b/runtimes/tests/neural_networks_test/generated/models/fully_connected_float_1_nnfw.model.cpp @@ -0,0 +1,32 @@ +// Generated file (from: fully_connected_float_1_nnfw.mod.py). Do not edit +void CreateModel(Model *model) { + OperandType type4(Type::INT32, {}); + OperandType type3(Type::TENSOR_FLOAT32, {1, 1}); + OperandType type1(Type::TENSOR_FLOAT32, {1, 24}); + OperandType type0(Type::TENSOR_FLOAT32, {1, 3, 4, 2}); + OperandType type2(Type::TENSOR_FLOAT32, {1}); + // Phase 1, operands + auto op1 = model->addOperand(&type0); + auto op2 = model->addOperand(&type1); + auto b0 = model->addOperand(&type2); + auto op3 = model->addOperand(&type3); + auto act_relu = model->addOperand(&type4); + // Phase 2, operations + static float op2_init[] = {-0.25449711f, 0.0f, -2.1247749f, 0.0f, -1.143796f, 0.0f, -1.0299346f, 0.0f, -2.2373879f, 0.0f, -0.083096743f, 0.0f, -1.3230739f, 0.0f, 0.15294921f, 0.0f, -0.53045893f, 0.0f, -0.46075189f, 0.0f, -1.4482396f, 0.0f, -1.609534f, 0.0f}; + model->setOperandValue(op2, op2_init, sizeof(float) * 24); + static float b0_init[] = {0.70098364f}; + model->setOperandValue(b0, b0_init, sizeof(float) * 1); + static int32_t act_relu_init[] = {0}; + model->setOperandValue(act_relu, act_relu_init, sizeof(int32_t) * 1); + model->addOperation(ANEURALNETWORKS_FULLY_CONNECTED, {op1, op2, b0, act_relu}, {op3}); + // Phase 3, inputs and outputs + model->identifyInputsAndOutputs( + {op1}, + {op3}); + assert(model->isValid()); +} + +bool is_ignored(int i) { + static std::set ignore = {}; + return ignore.find(i) != ignore.end(); +} diff --git a/runtimes/tests/neural_networks_test/specs/V1_0/fully_connected_float_1_nnfw.mod.py b/runtimes/tests/neural_networks_test/specs/V1_0/fully_connected_float_1_nnfw.mod.py new file mode 100644 index 0000000..6030efe --- /dev/null +++ b/runtimes/tests/neural_networks_test/specs/V1_0/fully_connected_float_1_nnfw.mod.py @@ -0,0 +1,34 @@ +# +# Copyright (C) 2018 The Android Open Source Project +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +model = Model() +in0 = Input("op1", "TENSOR_FLOAT32", "{1, 3, 4, 2}") +weights = Parameter("op2", "TENSOR_FLOAT32", "{1, 24}", + [-0.25449711, 0, -2.1247749, 0, -1.143796, 0, -1.0299346, 0, -2.2373879, 0, -0.083096743, 0, -1.3230739, 0, 0.15294921, 0, -0.53045893, 0, -0.46075189, 0, -1.4482396, 0, -1.609534, 0]) +bias = Parameter("b0", "TENSOR_FLOAT32", "{1}", + [0.70098364,]) +out0 = Output("op3", "TENSOR_FLOAT32", "{1, 1}") +act_relu = Int32Scalar("act_relu", 0) +model = model.Operation("FULLY_CONNECTED", in0, weights, bias, act_relu).To(out0) + +# Example 1. Input in operand 0, +input0 = {in0: # input 0 + [1.4910057783, 3.4019672871, -0.0598693565, -0.0065411143, -0.6461477280, 1.9235717058, 1.0784962177, 0.1765922010, -2.2495496273, -1.6010370255, -2.4747757912, -0.3825767934, 2.3058984280, 0.7288306952, -0.8964791894, -2.7584488392, -0.2875919640, 0.1335377693, 1.8338065147, -2.6112849712, 0.9390821457, 1.9843852520, -1.2190681696, 1.0274435282, ]} +output0 = {out0: # output 0 + [2.0375289917]} + +# Instantiate an example +Example((input0, output0)) -- 2.7.4