From 8eb3790461e4a70175566fe3794aea8d2de98014 Mon Sep 17 00:00:00 2001 From: =?utf8?q?=EB=82=A8=EA=B6=81=EC=84=9D/=EB=8F=99=EC=9E=91=EC=A0=9C?= =?utf8?q?=EC=96=B4Lab=28SR=29/Engineer/=EC=82=BC=EC=84=B1=EC=A0=84?= =?utf8?q?=EC=9E=90?= Date: Fri, 20 Jul 2018 14:36:08 +0900 Subject: [PATCH] Add functions for Tanh in pure acl runtime (#1993) Add appendTanh function in ActivationBuilder Add visit function Add tanh case in addOperation function Signed-off-by: Seok NamKoong --- runtimes/pure_arm_compute/src/compilation.cc | 49 +++++++++++++++++++++++++++- runtimes/pure_arm_compute/src/model.cc | 12 +++++++ 2 files changed, 60 insertions(+), 1 deletion(-) diff --git a/runtimes/pure_arm_compute/src/compilation.cc b/runtimes/pure_arm_compute/src/compilation.cc index 4b28821..e17ddb8 100644 --- a/runtimes/pure_arm_compute/src/compilation.cc +++ b/runtimes/pure_arm_compute/src/compilation.cc @@ -235,6 +235,7 @@ private: void appendReLU(::arm_compute::ICLTensor *tensor); void appendReLU6(::arm_compute::ICLTensor *tensor); void appendReLU1(::arm_compute::ICLTensor *tensor); + void appendTanh(::arm_compute::ICLTensor *tensor); public: void append(FuseCode code, ::arm_compute::ICLTensor *tensor); @@ -279,6 +280,18 @@ void ActivationBuilder::appendReLU6(::arm_compute::ICLTensor *ifm_alloc) _builder.append("ReLU6", std::move(fn)); } +void ActivationBuilder::appendTanh(::arm_compute::ICLTensor *ifm_alloc) +{ + const ::arm_compute::ActivationLayerInfo act_info{ + ::arm_compute::ActivationLayerInfo::ActivationFunction::TANH, 1.0f, 1.0f}; + + auto fn = nnfw::make_unique<::arm_compute::CLActivationLayer>(); + + fn->configure(ifm_alloc, nullptr, act_info); + + _builder.append("Tanh", std::move(fn)); +} + void ActivationBuilder::append(FuseCode code, ::arm_compute::ICLTensor *ifm_alloc) { switch (code) @@ -2525,7 +2538,41 @@ void Planner::visit(const ::internal::tflite::op::Tanh::Node &node) { VERBOSE(Tanh) << "Configure Tanh operation" << std::endl; - throw std::runtime_error{"Not supported operation"}; + const ::internal::tflite::operand::Index ofm_index{node.param().ofm_index}; + const ::internal::tflite::operand::Index ifm_index{node.param().ifm_index}; + + // Set shape constraints + _builder.addShapeConstr(ofm_index, + asTensorInfo(_ctx.at(ofm_index).shape(), _ctx.at(ofm_index).type())); + _builder.addShapeConstr(ifm_index, + asTensorInfo(_ctx.at(ifm_index).shape(), _ctx.at(ifm_index).type())); + + struct Param + { + int ofm_index; + int ifm_index; + }; + + Param param; + + param.ofm_index = ofm_index.asInt(); + param.ifm_index = ifm_index.asInt(); + + auto stage = [param](const IAllocationContext &ctx, IExecutionBuilder &builder) { + auto ofm_alloc = ctx.at(::internal::tflite::operand::Index{param.ofm_index}); + auto ifm_alloc = ctx.at(::internal::tflite::operand::Index{param.ifm_index}); + + const ::arm_compute::ActivationLayerInfo act_info{ + ::arm_compute::ActivationLayerInfo::ActivationFunction::TANH, 1.0f, 1.0f}; + + auto fn = nnfw::make_unique<::arm_compute::CLActivationLayer>(); + + fn->configure(ifm_alloc, ofm_alloc, act_info); + + builder.append("Tanh", std::move(fn)); + }; + + _builder.addStage(stage); } class AllocationContext final : public IAllocationContext diff --git a/runtimes/pure_arm_compute/src/model.cc b/runtimes/pure_arm_compute/src/model.cc index b5959cd..ee308a5 100644 --- a/runtimes/pure_arm_compute/src/model.cc +++ b/runtimes/pure_arm_compute/src/model.cc @@ -368,6 +368,18 @@ int ANeuralNetworksModel_addOperation(ANeuralNetworksModel *model, break; } + case ANEURALNETWORKS_TANH: + { + using internal::tflite::op::Tanh::Param; + using internal::tflite::op::Tanh::Node; + + // Add 'operations' + auto &operations = model->deref().operations(); + + operations.emplace_back(Param{inputCount, inputs, outputCount, outputs}); + + break; + } default: throw std::runtime_error{"Not supported operation"}; }; -- 2.7.4