From e0b1ab77d97965628f52d1fa6c3d623e78f4050d Mon Sep 17 00:00:00 2001 From: =?utf8?q?=EC=9C=A4=ED=98=84=EC=8B=9D/On-Device=20Lab=28SR=29/Princip?= =?utf8?q?al=20Engineer/=EC=82=BC=EC=84=B1=EC=A0=84=EC=9E=90?= Date: Tue, 15 Oct 2019 16:49:27 +0900 Subject: [PATCH] [exo-tflite] test cases for FuseConv2DAddSubPass (#8101) * [exo-tflite] test cases for FuseConv2DAddSubPass This adds four test cases for FuseConv2DAddSubPass. Signed-off-by: Hyun Sik Yoon * make const value 1.5, 3 --- .../exo/src/Pass/FuseConv2DAddSubPass.test.cpp | 260 +++++++++++++++++++++ compiler/exo/src/TestGraph.h | 37 +++ 2 files changed, 297 insertions(+) create mode 100644 compiler/exo/src/Pass/FuseConv2DAddSubPass.test.cpp diff --git a/compiler/exo/src/Pass/FuseConv2DAddSubPass.test.cpp b/compiler/exo/src/Pass/FuseConv2DAddSubPass.test.cpp new file mode 100644 index 0000000..972d108 --- /dev/null +++ b/compiler/exo/src/Pass/FuseConv2DAddSubPass.test.cpp @@ -0,0 +1,260 @@ +/* + * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "FuseConv2DAddSubPass.h" + +#include "Dialect/IR/TFLNodes.h" +#include "TestGraph.h" +#include "TestHelper.h" + +#include + +#include + +namespace +{ + +void init(loco::Pull *pull) +{ + pull->dtype(loco::DataType::FLOAT32); + pull->shape({2, 3, 3, 2}); +} + +/// @brief Initializes TFLConv2D and related filter and bias +void init(locoex::TFLConv2D *conv2d, locoex::TFLConst *filter, locoex::TFLConst *bias) +{ + // set conv2d + { + conv2d->fusedActivationFunction(locoex::FusedActFunc::NONE); + conv2d->padding(locoex::Padding::VALID); + } + + // set filter + { + filter->dtype(loco::DataType::FLOAT32); + filter->shape({2, 3, 3, 2}); + filter->size(2 * 3 * 3 * 2); + + for (uint32_t x = 0; x < 2 * 3 * 3 * 2; x++) + filter->at(x) = 0.0; + } + + // set bias + { + bias->dtype(loco::DataType::FLOAT32); + bias->shape({2}); + bias->size(2); + + for (uint32_t x = 0; x < 2; x++) + bias->at(x) = 0.0; + } +} + +/// @brief Initializes one param of TFLAdd or TFLSub +void init(locoex::TFLConst *addsub_param) +{ + // set addsub_param : y() value of TFLAdd or TFLSub + addsub_param->dtype(loco::DataType::FLOAT32); + addsub_param->shape({2}); + addsub_param->size(2); + + for (uint32_t x = 0; x < 2; x++) + addsub_param->at(x) = (x + 1) * 1.5; // 1.5, 3 +} + +} // namespace + +// A case when +// - TFLConv2D has bias (0, 0) +// - TFLAdd, of which x() or y() == TFLConv2D +// - Another param of TFLAdd is TFLConst, (1.5, 3) +// +// After fusion, bias shold be (1.5, 3) +TEST(FuseConv2DBiasAddPassTest, Conv2D_Add_01_basic) +{ + exo::test::TestGraph g; + auto filter = g.append(); + auto bias = g.append(); + auto conv2d = g.append(g.pull, filter, bias); + + auto add_y = g.append(); + auto add = g.append(conv2d, add_y); + + g.complete(add); + + init(g.pull); + init(conv2d, filter, bias); + init(add_y); + + // let's run fusion + { + exo::test::TypeShapeReadyPhase test_phase; + + test_phase.add_pass(); + test_phase.run(g.graph()); + } + + auto a_conv2d = exo::test::find_first_node_bytype(g.graph()); + ASSERT_TRUE(a_conv2d != nullptr); + + auto a_bias = dynamic_cast(a_conv2d->bias()); + ASSERT_TRUE(a_bias != nullptr); + + ASSERT_TRUE(a_bias->dim(0) == 2); + ASSERT_FLOAT_EQ(a_bias->at(0), + bias->at(0) + add_y->at(0)); + ASSERT_FLOAT_EQ(a_bias->at(1), + bias->at(1) + add_y->at(1)); +} + +// A case when +// - TFLConv2D has bias (0, 0) +// - TFLAdd, of which x() or y() == TFLConv2D +// - Another param of TFLAdd is TFLConst, (1.5) <-- scalar +// +// After fusion, bias shold be (1.5, 1.5) +TEST(FuseConv2DBiasAddPassTest, Conv2D_Add_02_TFLAdd_y_is_scalar) +{ + exo::test::TestGraph g; + auto filter = g.append(); + auto bias = g.append(); + auto conv2d = g.append(g.pull, filter, bias); + + auto add_y = g.append(); + auto add = g.append(conv2d, add_y); + + g.complete(add); + + init(g.pull); + init(conv2d, filter, bias); // channel of conv2d is 2 + + { + // Size of this TFLConst is 1. + // Note that this should be widened later to the shape of [channel of Conv2D], which is [2] + add_y->dtype(loco::DataType::FLOAT32); + add_y->shape({1}); + add_y->size(1); + add_y->at(0) = 1.5; + } + + // let's run fusion + { + exo::test::TypeShapeReadyPhase test_phase; + + test_phase.add_pass(); + test_phase.run(g.graph()); + } + + auto a_conv2d = exo::test::find_first_node_bytype(g.graph()); + ASSERT_TRUE(a_conv2d != nullptr); + + auto a_bias = dynamic_cast(a_conv2d->bias()); + ASSERT_TRUE(a_bias != nullptr); + + ASSERT_TRUE(a_bias->dim(0) == 2); + ASSERT_FLOAT_EQ(a_bias->at(0), + bias->at(0) + 1.5); + ASSERT_FLOAT_EQ(a_bias->at(1), + bias->at(1) + 1.5); +} + +// A case when +// - TFLConv2D has bias (0, 0) +// - TFLSub.x() == TFLConv2D +// - TFLSub.y() == TFLConst, (1.5, 3) +// +// After fusion, bias shold be (-1.5, -3) +TEST(FuseConv2DBiasAddPassTest, Conv2D_Sub_01_basic) +{ + exo::test::TestGraph g; + auto filter = g.append(); + auto bias = g.append(); + auto conv2d = g.append(g.pull, filter, bias); + + auto sub_y = g.append(); + auto sub = g.append(conv2d, sub_y); + + g.complete(sub); + + init(g.pull); + init(conv2d, filter, bias); + init(sub_y); + + // let's run fusion + { + exo::test::TypeShapeReadyPhase test_phase; + + test_phase.add_pass(); + test_phase.run(g.graph()); + } + + auto a_conv2d = exo::test::find_first_node_bytype(g.graph()); + ASSERT_TRUE(a_conv2d != nullptr); + + auto a_bias = dynamic_cast(a_conv2d->bias()); + ASSERT_TRUE(a_bias != nullptr); + + ASSERT_TRUE(a_bias->dim(0) == 2); + ASSERT_FLOAT_EQ(a_bias->at(0), + bias->at(0) - sub_y->at(0)); + ASSERT_FLOAT_EQ(a_bias->at(1), + bias->at(1) - sub_y->at(1)); +} + +// A case when TFLConv2D is input of TFLSub but fusion cannot be performed. +// - TFLSub.x() == TFLConst +// - TFLSub.y() == TFLConv2D +// +// Here, TFLSub cannot be fused into TFLConst. To be fused, TFLSub.x() should be TFLConv2D and +// TFLSub.y() should be TFLConst. So fusion will NOT happen. +TEST(FuseConv2DBiasAddPassTest, Conv2D_Sub_02_fusing_will_not_performed) +{ + exo::test::TestGraph g; + auto filter = g.append(); + auto bias = g.append(); + auto conv2d = g.append(g.pull, filter, bias); + + auto sub_y = g.append(); + auto sub = g.append(sub_y, conv2d); // This WON'T be fused + + g.complete(sub); + + init(g.pull); + init(conv2d, filter, bias); + init(sub_y); + + // let's run fusion + { + exo::test::TypeShapeReadyPhase test_phase; + + test_phase.add_pass(); + test_phase.run(g.graph()); + } + + auto a_conv2d = exo::test::find_first_node_bytype(g.graph()); + ASSERT_TRUE(a_conv2d != nullptr); + + auto a_bias = dynamic_cast(a_conv2d->bias()); + ASSERT_TRUE(a_bias != nullptr); + + ASSERT_TRUE(a_bias->dim(0) == 2); + ASSERT_FLOAT_EQ(a_bias->at(0), 0); + ASSERT_FLOAT_EQ(a_bias->at(1), 0); + + auto a_sub = exo::test::find_first_node_bytype(g.graph()); + ASSERT_TRUE(a_sub != nullptr); + ASSERT_TRUE(a_sub->y() == a_conv2d); // Checking 'not-fused' state +} diff --git a/compiler/exo/src/TestGraph.h b/compiler/exo/src/TestGraph.h index 9b5a2a2..d9122b1 100644 --- a/compiler/exo/src/TestGraph.h +++ b/compiler/exo/src/TestGraph.h @@ -61,6 +61,8 @@ public: _next_input = pull; } + loco::Graph *graph() { return g.get(); } + /// @brief Creates node with NO arg and appends it to graph template T *append() { @@ -90,6 +92,16 @@ public: return node; } + /// @brief Creates op T (arity=3) with arg1, arg2, arg3 as inputs and appends it to graph + template T *append(loco::Node *arg1, loco::Node *arg2, loco::Node *arg3) + { + auto node = g->nodes()->create(); + setInput(node, arg1, arg2, arg3); + _next_input = node; + + return node; + } + // push will get the last appended node void complete() { push->from(_next_input); } @@ -118,6 +130,12 @@ private: // arity 2 void setInput(loco::Node *node, loco::Node *, loco::Node *) { assert(false && "NYI"); }; + void setInput(loco::Conv2D *node, loco::Node *input, loco::Node *filter) + { + node->ifm(input); + node->ker(filter); + } + void setInput(loco::EltwiseAdd *node, loco::Node *arg1, loco::Node *arg2) { node->lhs(arg1); @@ -142,12 +160,31 @@ private: node->y(arg2); }; + void setInput(locoex::TFLSub *node, loco::Node *arg1, loco::Node *arg2) + { + node->x(arg1); + node->y(arg2); + }; + void setInput(locoex::TFLTranspose *node, loco::Node *arg1, loco::Node *arg2) { node->a(arg1); node->perm(arg2); }; + // arity 3 + void setInput(loco::Node *node, loco::Node *, loco::Node *, loco::Node *) + { + assert(false && "NYI"); + }; + + void setInput(locoex::TFLConv2D *node, loco::Node *input, loco::Node *filter, loco::Node *bias) + { + node->input(input); + node->filter(filter); + node->bias(bias); + } + private: loco::Node *_next_input; }; -- 2.7.4