From 30886b2442bf0ef7794b560942dff83f1966540d Mon Sep 17 00:00:00 2001 From: =?utf8?q?=EA=B9=80=EC=88=98=EC=A7=84/On-Device=20Lab=28SR=29/Enginee?= =?utf8?q?r/=EC=82=BC=EC=84=B1=EC=A0=84=EC=9E=90?= Date: Wed, 13 Mar 2019 14:08:45 +0900 Subject: [PATCH] [neurun] Enable ResizeBilinear, ReLU1, ReLU6 ops (#4667) Related : #4259 This commit enables `ResizeBilinear`, `ReLU1`, `ReLU6` for `acl_cl`. Signed-off-by: sjsujinkim --- .../neurun/src/backend/acl_cl/StageGenerator.cc | 124 +++++++++++++++++++++ .../neurun/src/backend/acl_cl/StageGenerator.h | 3 + .../src/frontend/wrapper/OperationFactory.cc | 45 ++++++++ runtimes/neurun/src/model/operation/Node.Include.h | 3 + runtimes/neurun/src/model/operation/Op.lst | 3 + runtimes/neurun/src/model/operation/ReLU1Node.cc | 39 +++++++ runtimes/neurun/src/model/operation/ReLU1Node.h | 49 ++++++++ runtimes/neurun/src/model/operation/ReLU6Node.cc | 39 +++++++ runtimes/neurun/src/model/operation/ReLU6Node.h | 49 ++++++++ .../src/model/operation/ResizeBilinearNode.cc | 40 +++++++ .../src/model/operation/ResizeBilinearNode.h | 64 +++++++++++ tests/nnapi/nnapi_gtest.skip.armv7l-linux.neurun | 1 + .../neurun_frameworktest_list.armv7l.acl_cl.txt | 5 +- 13 files changed, 463 insertions(+), 1 deletion(-) create mode 100644 runtimes/neurun/src/model/operation/ReLU1Node.cc create mode 100644 runtimes/neurun/src/model/operation/ReLU1Node.h create mode 100644 runtimes/neurun/src/model/operation/ReLU6Node.cc create mode 100644 runtimes/neurun/src/model/operation/ReLU6Node.h create mode 100644 runtimes/neurun/src/model/operation/ResizeBilinearNode.cc create mode 100644 runtimes/neurun/src/model/operation/ResizeBilinearNode.h diff --git a/runtimes/neurun/src/backend/acl_cl/StageGenerator.cc b/runtimes/neurun/src/backend/acl_cl/StageGenerator.cc index e8bcbd3..f292299 100644 --- a/runtimes/neurun/src/backend/acl_cl/StageGenerator.cc +++ b/runtimes/neurun/src/backend/acl_cl/StageGenerator.cc @@ -1736,6 +1736,130 @@ void StageGenerator::visit(const model::operation::ReLUNode &node) }); } +void StageGenerator::visit(const model::operation::ResizeBilinearNode &node) +{ + const auto ofm_index{node.getOutputs().at(0)}; + + const auto ifm_index{node.getInputs().at(model::operation::ResizeBilinearNode::Input::INPUT)}; + const auto height_index{node.param().height_index}; + const auto width_index{node.param().width_index}; + + struct Param + { + model::operand::Index ofm_index; + model::operand::Index ifm_index; + + int32_t new_height; + int32_t new_width; + }; + + Param param; + + param.ofm_index = ofm_index; + param.ifm_index = ifm_index; + param.new_height = _ctx.at(height_index).asScalar(); + param.new_width = _ctx.at(width_index).asScalar(); + + auto tensors = _tensor_builder; + + returnStage([tensors, param](IExecutionBuilder &builder) { + auto ofm_alloc = tensors->at(param.ofm_index).get(); + auto ifm_alloc = tensors->at(param.ifm_index).get(); + + std::unique_ptr<::arm_compute::IFunction> fn; + + auto l = make_layer<::arm_compute::CLScale>(); + + l->configure(ifm_alloc->handle(), ofm_alloc->handle(), + ::arm_compute::InterpolationPolicy::BILINEAR, ::arm_compute::BorderMode::REPLICATE, + ::arm_compute::PixelValue(0.f), ::arm_compute::SamplingPolicy::TOP_LEFT); + + fn = std::move(l); + + auto acl_fn = make_cl_function(std::move(fn)); + + builder.append(std::move(acl_fn)); + }); +} + +void StageGenerator::visit(const model::operation::ReLU1Node &node) +{ + const auto ofm_index{node.getOutputs().at(0)}; + const auto ifm_index{node.getInputs().at(model::operation::ReLU1Node::Input::INPUT)}; + + struct Param + { + model::operand::Index ofm_index; + model::operand::Index ifm_index; + }; + + Param param; + + param.ofm_index = ofm_index; + param.ifm_index = ifm_index; + + auto tensors = _tensor_builder; + + returnStage([tensors, param](IExecutionBuilder &builder) { + auto ofm_alloc = tensors->at(param.ofm_index).get(); + auto ifm_alloc = tensors->at(param.ifm_index).get(); + + const ::arm_compute::ActivationLayerInfo act_info{ + ::arm_compute::ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, 1.0f, -1.0f}; + + std::unique_ptr<::arm_compute::IFunction> fn; + + auto l = make_layer<::arm_compute::CLActivationLayer>(); + + l->configure(ifm_alloc->handle(), ofm_alloc->handle(), act_info); + + fn = std::move(l); + + auto acl_fn = make_cl_function(std::move(fn)); + + builder.append(std::move(acl_fn)); + }); +} + +void StageGenerator::visit(const model::operation::ReLU6Node &node) +{ + const auto ofm_index{node.getOutputs().at(0)}; + const auto ifm_index{node.getInputs().at(model::operation::ReLU6Node::Input::INPUT)}; + + struct Param + { + model::operand::Index ofm_index; + model::operand::Index ifm_index; + }; + + Param param; + + param.ofm_index = ofm_index; + param.ifm_index = ifm_index; + + auto tensors = _tensor_builder; + + returnStage([tensors, param](IExecutionBuilder &builder) { + auto ofm_alloc = tensors->at(param.ofm_index).get(); + auto ifm_alloc = tensors->at(param.ifm_index).get(); + + const ::arm_compute::ActivationLayerInfo act_info{ + ::arm_compute::ActivationLayerInfo::ActivationFunction::BOUNDED_RELU, 6.0f}; + + std::unique_ptr<::arm_compute::IFunction> fn; + + auto l = make_layer<::arm_compute::CLActivationLayer>(); + + l->configure(ifm_alloc->handle(), ofm_alloc->handle(), act_info); + + fn = std::move(l); + + auto acl_fn = make_cl_function(std::move(fn)); + + builder.append(std::move(acl_fn)); + }); +} + } // namespace acl_cl } // namespace backend } // namespace neurun diff --git a/runtimes/neurun/src/backend/acl_cl/StageGenerator.h b/runtimes/neurun/src/backend/acl_cl/StageGenerator.h index 686293c..0e289a5 100644 --- a/runtimes/neurun/src/backend/acl_cl/StageGenerator.h +++ b/runtimes/neurun/src/backend/acl_cl/StageGenerator.h @@ -62,6 +62,9 @@ public: virtual void visit(const model::operation::LogicalAndNode &) override; virtual void visit(const model::operation::RSQRTNode &) override; virtual void visit(const model::operation::ReLUNode &) override; + virtual void visit(const model::operation::ResizeBilinearNode &) override; + virtual void visit(const model::operation::ReLU1Node &) override; + virtual void visit(const model::operation::ReLU6Node &) override; private: const neurun::model::operand::Set &_ctx; diff --git a/runtimes/neurun/src/frontend/wrapper/OperationFactory.cc b/runtimes/neurun/src/frontend/wrapper/OperationFactory.cc index 8275e07..48e8f16 100644 --- a/runtimes/neurun/src/frontend/wrapper/OperationFactory.cc +++ b/runtimes/neurun/src/frontend/wrapper/OperationFactory.cc @@ -655,6 +655,51 @@ OperationFactory::OperationFactory() return new operation::ReLUNode{inputs, outputs}; }; + + _map[ANEURALNETWORKS_RESIZE_BILINEAR] = [](const OperationFactory::Param &init_param) { + assert(init_param.input_count == 3 && init_param.output_count == 1); + + operand::IndexSet outputs{init_param.outputs[0]}; + + // Each input should be interpreted as follows: + // + // 0 -> IFM Index + // 1 -> Height Index + // 2 -> Width Index + operand::IndexSet inputs{init_param.inputs[0]}; + + operation::ResizeBilinearNode::Param param; + param.height_index = operand::Index{init_param.inputs[1]}; + param.width_index = operand::Index{init_param.inputs[2]}; + + return new operation::ResizeBilinearNode{inputs, outputs, param}; + }; + + _map[ANEURALNETWORKS_RELU1] = [](const OperationFactory::Param &init_param) { + assert(init_param.input_count == 1 && init_param.output_count == 1); + + operand::IndexSet outputs{init_param.outputs[0]}; + + // Each input should be interpreted as follows: + // + // 0 -> input Tensor Index + operand::IndexSet inputs{init_param.inputs[0]}; + + return new operation::ReLU1Node{inputs, outputs}; + }; + + _map[ANEURALNETWORKS_RELU6] = [](const OperationFactory::Param &init_param) { + assert(init_param.input_count == 1 && init_param.output_count == 1); + + operand::IndexSet outputs{init_param.outputs[0]}; + + // Each input should be interpreted as follows: + // + // 0 -> input Tensor Index + operand::IndexSet inputs{init_param.inputs[0]}; + + return new operation::ReLU6Node{inputs, outputs}; + }; } neurun::model::operation::Node *OperationFactory::create(ANeuralNetworksOperationType type, diff --git a/runtimes/neurun/src/model/operation/Node.Include.h b/runtimes/neurun/src/model/operation/Node.Include.h index 7289c9d..6104f27 100644 --- a/runtimes/neurun/src/model/operation/Node.Include.h +++ b/runtimes/neurun/src/model/operation/Node.Include.h @@ -42,3 +42,6 @@ #include "LogicalAndNode.h" #include "RSQRTNode.h" #include "ReLUNode.h" +#include "ResizeBilinearNode.h" +#include "ReLU1Node.h" +#include "ReLU6Node.h" diff --git a/runtimes/neurun/src/model/operation/Op.lst b/runtimes/neurun/src/model/operation/Op.lst index a3ea626..ed2d6db 100644 --- a/runtimes/neurun/src/model/operation/Op.lst +++ b/runtimes/neurun/src/model/operation/Op.lst @@ -46,4 +46,7 @@ OP(NotEqualNode , true , NOT_EQUAL_EX) OP(LogicalAndNode , true , LOGICAL_AND_EX) OP(RSQRTNode , true , RSQRT_EX) OP(ReLUNode , true , RELU) +OP(ResizeBilinearNode , true , RESIZE_BILINEAR) +OP(ReLU1Node , true , RELU1) +OP(ReLU6Node , true , RELU6) OP(PermuteNode , false , NOT_AVAILABLE) diff --git a/runtimes/neurun/src/model/operation/ReLU1Node.cc b/runtimes/neurun/src/model/operation/ReLU1Node.cc new file mode 100644 index 0000000..576f83c --- /dev/null +++ b/runtimes/neurun/src/model/operation/ReLU1Node.cc @@ -0,0 +1,39 @@ +/* + * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "ReLU1Node.h" + +#include + +#include "NodeVisitor.h" + +namespace neurun +{ +namespace model +{ +namespace operation +{ + +void ReLU1Node::accept(NodeVisitor &&v) const { v.visit(*this); } + +ReLU1Node::ReLU1Node(const operand::IndexSet &inputs, const operand::IndexSet &outputs) + : model::operation::Node{OperandConstraint::createExact(1u), inputs, outputs} +{ +} + +} // namespace operation +} // namespace model +} // namespace neurun diff --git a/runtimes/neurun/src/model/operation/ReLU1Node.h b/runtimes/neurun/src/model/operation/ReLU1Node.h new file mode 100644 index 0000000..5697c9f --- /dev/null +++ b/runtimes/neurun/src/model/operation/ReLU1Node.h @@ -0,0 +1,49 @@ +/* + * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __NEURUN_MODEL_OPERATION_ReLU1_NODE_H__ +#define __NEURUN_MODEL_OPERATION_ReLU1_NODE_H__ + +#include "model/operation/Node.h" + +namespace neurun +{ +namespace model +{ +namespace operation +{ + +class ReLU1Node : public model::operation::Node +{ +public: + enum Input + { + INPUT = 0 + }; + +public: + ReLU1Node(const operand::IndexSet &inputs, const operand::IndexSet &outputs); + +public: + virtual void accept(NodeVisitor &&) const override; + virtual std::string getName() const override { return "ReLU1"; } +}; + +} // namespace operation +} // namespace model +} // namespace neurun + +#endif // __NEURUN_MODEL_OPERATION_ReLU1_NODE_H__ diff --git a/runtimes/neurun/src/model/operation/ReLU6Node.cc b/runtimes/neurun/src/model/operation/ReLU6Node.cc new file mode 100644 index 0000000..37d58e8 --- /dev/null +++ b/runtimes/neurun/src/model/operation/ReLU6Node.cc @@ -0,0 +1,39 @@ +/* + * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "ReLU6Node.h" + +#include + +#include "NodeVisitor.h" + +namespace neurun +{ +namespace model +{ +namespace operation +{ + +void ReLU6Node::accept(NodeVisitor &&v) const { v.visit(*this); } + +ReLU6Node::ReLU6Node(const operand::IndexSet &inputs, const operand::IndexSet &outputs) + : model::operation::Node{OperandConstraint::createExact(1u), inputs, outputs} +{ +} + +} // namespace operation +} // namespace model +} // namespace neurun diff --git a/runtimes/neurun/src/model/operation/ReLU6Node.h b/runtimes/neurun/src/model/operation/ReLU6Node.h new file mode 100644 index 0000000..69ebff3 --- /dev/null +++ b/runtimes/neurun/src/model/operation/ReLU6Node.h @@ -0,0 +1,49 @@ +/* + * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __NEURUN_MODEL_OPERATION_ReLU6_NODE_H__ +#define __NEURUN_MODEL_OPERATION_ReLU6_NODE_H__ + +#include "model/operation/Node.h" + +namespace neurun +{ +namespace model +{ +namespace operation +{ + +class ReLU6Node : public model::operation::Node +{ +public: + enum Input + { + INPUT = 0 + }; + +public: + ReLU6Node(const operand::IndexSet &inputs, const operand::IndexSet &outputs); + +public: + virtual void accept(NodeVisitor &&) const override; + virtual std::string getName() const override { return "ReLU6"; } +}; + +} // namespace operation +} // namespace model +} // namespace neurun + +#endif // __NEURUN_MODEL_OPERATION_ReLU6_NODE_H__ diff --git a/runtimes/neurun/src/model/operation/ResizeBilinearNode.cc b/runtimes/neurun/src/model/operation/ResizeBilinearNode.cc new file mode 100644 index 0000000..df9b93a --- /dev/null +++ b/runtimes/neurun/src/model/operation/ResizeBilinearNode.cc @@ -0,0 +1,40 @@ +/* + * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "ResizeBilinearNode.h" + +#include + +#include "NodeVisitor.h" + +namespace neurun +{ +namespace model +{ +namespace operation +{ + +void ResizeBilinearNode::accept(NodeVisitor &&v) const { v.visit(*this); } + +ResizeBilinearNode::ResizeBilinearNode(const operand::IndexSet &inputs, + const operand::IndexSet &outputs, const Param ¶m) + : model::operation::Node{OperandConstraint::createExact(1u), inputs, outputs}, _param{param} +{ +} + +} // namespace operation +} // namespace model +} // namespace neurun diff --git a/runtimes/neurun/src/model/operation/ResizeBilinearNode.h b/runtimes/neurun/src/model/operation/ResizeBilinearNode.h new file mode 100644 index 0000000..f29adb2 --- /dev/null +++ b/runtimes/neurun/src/model/operation/ResizeBilinearNode.h @@ -0,0 +1,64 @@ +/* + * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __NEURUN_MODEL_OPERATION_RESIZE_BILINEAR_NODE_H__ +#define __NEURUN_MODEL_OPERATION_RESIZE_BILINEAR_NODE_H__ + +#include + +#include "model/operation/Node.h" + +namespace neurun +{ +namespace model +{ +namespace operation +{ + +class ResizeBilinearNode : public model::operation::Node +{ +public: + enum Input + { + INPUT = 0 + }; + + struct Param + { + operand::Index height_index; + operand::Index width_index; + }; + +public: + ResizeBilinearNode(const operand::IndexSet &inputs, const operand::IndexSet &outputs, + const Param ¶m); + +public: + virtual void accept(NodeVisitor &&) const override; + virtual std::string getName() const override { return "ResizeBilinear"; } + +public: + const Param ¶m() const { return _param; } + +private: + Param _param; +}; + +} // namespace operation +} // namespace model +} // namespace neurun + +#endif // __NEURUN_MODEL_OPERATION_RESIZE_BILINEAR_NODE_H__ diff --git a/tests/nnapi/nnapi_gtest.skip.armv7l-linux.neurun b/tests/nnapi/nnapi_gtest.skip.armv7l-linux.neurun index 7d91c12..9674b2f 100644 --- a/tests/nnapi/nnapi_gtest.skip.armv7l-linux.neurun +++ b/tests/nnapi/nnapi_gtest.skip.armv7l-linux.neurun @@ -33,6 +33,7 @@ GeneratedTests.reduce_min* GeneratedTests.relu1* GeneratedTests.relu6* GeneratedTests.resize_bilinear* +GeneratedTests.relu* GeneratedTests.rnn* GeneratedTests.mean* GeneratedTests.pad* diff --git a/tests/scripts/neurun_frameworktest_list.armv7l.acl_cl.txt b/tests/scripts/neurun_frameworktest_list.armv7l.acl_cl.txt index ec63189..2a89033 100644 --- a/tests/scripts/neurun_frameworktest_list.armv7l.acl_cl.txt +++ b/tests/scripts/neurun_frameworktest_list.armv7l.acl_cl.txt @@ -17,9 +17,12 @@ reduce_max relu reshape rsqrt +relu6 +reshape +resize_bilinear strided_slice sub/broadcast tanh transpose MODELS/inception_module -squeeze \ No newline at end of file +squeeze -- 2.7.4