From e150f99b8a67d9a4ae8a4181ed49a87ff2cf4bc6 Mon Sep 17 00:00:00 2001 From: =?utf8?q?=EA=B9=80=EC=88=98=EC=A7=84/On-Device=20Lab=28SR=29/Enginee?= =?utf8?q?r/=EC=82=BC=EC=84=B1=EC=A0=84=EC=9E=90?= Date: Mon, 18 Mar 2019 15:42:37 +0900 Subject: [PATCH] [neurun] Enable PReLU, TransposeConv, SQRT ops (#4733) This commit enables `PReLU`, `TransposeConv`, `SQRT` ops for `acl_cl`. Signed-off-by: sjsujinkim --- .../frontend/nnapi/wrapper/OperationFactory.cc | 50 +++++++ .../neurun/src/backend/acl_cl/StageGenerator.cc | 166 +++++++++++++++++++++ .../neurun/src/backend/acl_cl/StageGenerator.h | 3 + runtimes/neurun/src/compiler/OperationValidator.cc | 47 ++++++ runtimes/neurun/src/compiler/OperationValidator.h | 1 + runtimes/neurun/src/model/operation/Node.Include.h | 3 + runtimes/neurun/src/model/operation/Op.lst | 3 + runtimes/neurun/src/model/operation/PReLUNode.cc | 39 +++++ runtimes/neurun/src/model/operation/PReLUNode.h | 50 +++++++ runtimes/neurun/src/model/operation/SQRTNode.cc | 39 +++++ runtimes/neurun/src/model/operation/SQRTNode.h | 49 ++++++ .../src/model/operation/TransposeConvNode.cc | 40 +++++ .../neurun/src/model/operation/TransposeConvNode.h | 67 +++++++++ tests/nnapi/nnapi_gtest.skip.armv7l-linux.neurun | 3 - .../neurun_frameworktest_list.armv7l.acl_cl.txt | 2 + 15 files changed, 559 insertions(+), 3 deletions(-) create mode 100644 runtimes/neurun/src/model/operation/PReLUNode.cc create mode 100644 runtimes/neurun/src/model/operation/PReLUNode.h create mode 100644 runtimes/neurun/src/model/operation/SQRTNode.cc create mode 100644 runtimes/neurun/src/model/operation/SQRTNode.h create mode 100644 runtimes/neurun/src/model/operation/TransposeConvNode.cc create mode 100644 runtimes/neurun/src/model/operation/TransposeConvNode.h diff --git a/runtimes/neurun/frontend/nnapi/wrapper/OperationFactory.cc b/runtimes/neurun/frontend/nnapi/wrapper/OperationFactory.cc index c900665..59de2d5 100644 --- a/runtimes/neurun/frontend/nnapi/wrapper/OperationFactory.cc +++ b/runtimes/neurun/frontend/nnapi/wrapper/OperationFactory.cc @@ -832,6 +832,56 @@ OperationFactory::OperationFactory() return new operation::HashtableLookupNode{inputs, outputs}; }; + + _map[ANEURALNETWORKS_PRELU_EX] = [](const OperationFactory::Param &init_param) { + assert(init_param.input_count == 2 && init_param.output_count == 1); + + operand::IndexSet outputs{init_param.outputs[0]}; + + // Each input should be interpreted as follows: + // + // 0 -> input Tensor Index + // 1 -> alpha Tensor Index + operand::IndexSet inputs{init_param.inputs[0], init_param.inputs[1]}; + + return new operation::PReLUNode{inputs, outputs}; + }; + + _map[ANEURALNETWORKS_TRANSPOSE_CONV_EX] = [](const OperationFactory::Param &init_param) { + assert(init_param.input_count == 6 && init_param.output_count == 1); + + operand::IndexSet outputs{init_param.outputs[0]}; + + // Each input should be interpreted as follows: + // + // 0 -> Output Shape Index + // 1 -> Weights Index + // 2 -> Input Tensor Index + // 3 -> Padding Type + // 4 -> Stride width + // 5 -> Stride height + + operand::IndexSet inputs{init_param.inputs[0], init_param.inputs[1], init_param.inputs[2]}; + + operation::TransposeConvNode::Param param; + param.padding_index = operand::Index{init_param.inputs[3]}; + param.hstride_index = operand::Index{init_param.inputs[4]}; + param.vstride_index = operand::Index{init_param.inputs[5]}; + + return new operation::TransposeConvNode{inputs, outputs, param}; + }; + + _map[ANEURALNETWORKS_SQRT_EX] = [](const OperationFactory::Param &init_param) { + assert(init_param.input_count == 1 && init_param.output_count == 1); + + operand::IndexSet outputs{init_param.outputs[0]}; + + // Each input should be interpreted as follows: + // 0 -> input Tensor Index + + operand::IndexSet inputs{init_param.inputs[0]}; + return new operation::SQRTNode{inputs, outputs}; + }; } neurun::model::operation::Node *OperationFactory::create(ANeuralNetworksOperationType type, diff --git a/runtimes/neurun/src/backend/acl_cl/StageGenerator.cc b/runtimes/neurun/src/backend/acl_cl/StageGenerator.cc index f04e056..d98040b 100644 --- a/runtimes/neurun/src/backend/acl_cl/StageGenerator.cc +++ b/runtimes/neurun/src/backend/acl_cl/StageGenerator.cc @@ -2204,6 +2204,172 @@ void StageGenerator::visit(const model::operation::HashtableLookupNode &node) }); } +void StageGenerator::visit(const model::operation::PReLUNode &node) +{ + const auto ofm_index{node.getOutputs().at(0)}; + const auto ifm_index{node.getInputs().at(model::operation::PReLUNode::Input::INPUT)}; + const auto alpha_index{node.getInputs().at(model::operation::PReLUNode::Input::ALPHA)}; + + if (!(_ctx.at(ifm_index).shape() == _ctx.at(alpha_index).shape())) + { + const auto broadcast_rank = + std::max(_ctx.at(ifm_index).shape().rank(), _ctx.at(alpha_index).shape().rank()); + const_cast<::neurun::model::operand::Shape &>(_ctx.at(ifm_index).shape()) + .extendRank(broadcast_rank); + const_cast<::neurun::model::operand::Shape &>(_ctx.at(alpha_index).shape()) + .extendRank(broadcast_rank); + } + + struct Param + { + model::operand::Index ofm_index; + model::operand::Index ifm_index; + model::operand::Index alpha_index; + }; + + Param param; + + param.ofm_index = ofm_index; + param.ifm_index = ifm_index; + param.alpha_index = alpha_index; + + auto tensors = _tensor_builder; + + returnStage([tensors, param](IExecutionBuilder &builder) { + auto ofm_alloc = tensors->at(param.ofm_index).get(); + auto ifm_alloc = tensors->at(param.ifm_index).get(); + auto alpha_alloc = tensors->at(param.alpha_index).get(); + + std::unique_ptr<::arm_compute::IFunction> fn; + + auto l = make_layer<::arm_compute::CLPReLU>(); + + l->configure(ifm_alloc->handle(), alpha_alloc->handle(), ofm_alloc->handle()); + + fn = std::move(l); + + auto acl_fn = make_cl_function(std::move(fn)); + + builder.append(std::move(acl_fn)); + }); +} + +void StageGenerator::visit(const model::operation::TransposeConvNode &node) +{ + const auto ofm_index{node.getOutputs().at(0)}; + const auto output_shape_index{ + node.getInputs().at(model::operation::TransposeConvNode::Input::OUTPUT_SHAPE)}; + const auto ker_index{node.getInputs().at(model::operation::TransposeConvNode::Input::KERNEL)}; + const auto ifm_index{node.getInputs().at(model::operation::TransposeConvNode::Input::INPUT)}; + + const auto padding_index{node.param().padding_index}; + const auto hstride_index{node.param().hstride_index}; + const auto vstride_index{node.param().vstride_index}; + + const auto ofm_shape = _ctx.at(ofm_index).shape().asFeature(); + const auto ifm_shape = _ctx.at(ifm_index).shape().asFeature(); + const auto ker_shape = _ctx.at(ker_index).shape().asFeature(); + + const int32_t vstride = _ctx.at(vstride_index).asScalar(); + const int32_t hstride = _ctx.at(hstride_index).asScalar(); + + const PaddingCode padding_type = + static_cast(_ctx.at(padding_index).asScalar()); + + // Construct operation parameters + struct Param + { + model::operand::Index ofm_index; + model::operand::Index ifm_index; + model::operand::Index ker_index; + + neurun::util::Padding padding; + neurun::util::Stride stride; + }; + + Param param; + + param.ofm_index = ofm_index; + param.ifm_index = ifm_index; + param.ker_index = ker_index; + + param.stride.horizontal = hstride; + param.stride.vertical = vstride; + + param.padding = + (padding_type == ANEURALNETWORKS_PADDING_SAME) + ? neurun::util::same_padding(ifm_shape, ofm_shape, param.stride, ker_shape.W, ker_shape.H) + : neurun::util::valid_padding(); + + auto tensors = _tensor_builder; + + returnStage([tensors, param](IExecutionBuilder &builder) { + auto ofm_alloc = tensors->at(param.ofm_index).get(); + auto ifm_alloc = tensors->at(param.ifm_index).get(); + auto ker_alloc = tensors->at(param.ker_index).get(); + + std::unique_ptr<::arm_compute::IFunction> fn; + + auto l = make_layer<::arm_compute::CLDeconvolutionLayerEx>(); + + auto padding = param.padding; + auto inner_border_right = padding.right - padding.left; + auto inner_border_top = padding.bottom - padding.top; + + padding.left = padding.right; + padding.top = padding.bottom; + auto symmetric_tconv_info = asPadStrideInfo(padding, param.stride); + + l->configure(ifm_alloc->handle(), ker_alloc->handle(), nullptr, ofm_alloc->handle(), + symmetric_tconv_info, inner_border_right, inner_border_top); + + fn = std::move(l); + + auto acl_fn = make_cl_function(std::move(fn)); + + builder.append(std::move(acl_fn)); + }); +} + +void StageGenerator::visit(const model::operation::SQRTNode &node) +{ + const auto output_index{node.getOutputs().at(0)}; + const auto input_index{node.getInputs().at(model::operation::SQRTNode::Input::INPUT)}; + + struct Param + { + model::operand::Index output_index; + model::operand::Index input_index; + }; + + Param param; + + param.output_index = output_index; + param.input_index = input_index; + + auto tensors = _tensor_builder; + + returnStage([tensors, param](IExecutionBuilder &builder) { + auto output_alloc = tensors->at(param.output_index).get(); + auto input_alloc = tensors->at(param.input_index).get(); + + const ::arm_compute::ActivationLayerInfo act_info{ + ::arm_compute::ActivationLayerInfo::ActivationFunction::SQRT}; + + std::unique_ptr<::arm_compute::IFunction> fn; + + auto l = make_layer<::arm_compute::CLActivationLayer>(); + + l->configure(input_alloc->handle(), output_alloc->handle(), act_info); + + fn = std::move(l); + + auto acl_fn = make_cl_function(std::move(fn)); + + builder.append(std::move(acl_fn)); + }); +} + } // namespace acl_cl } // namespace backend } // namespace neurun diff --git a/runtimes/neurun/src/backend/acl_cl/StageGenerator.h b/runtimes/neurun/src/backend/acl_cl/StageGenerator.h index 7b468ca..ab6e036 100644 --- a/runtimes/neurun/src/backend/acl_cl/StageGenerator.h +++ b/runtimes/neurun/src/backend/acl_cl/StageGenerator.h @@ -71,6 +71,9 @@ public: virtual void visit(const model::operation::EmbeddingLookupNode &) override; virtual void visit(const model::operation::L2NormalizationNode &) override; virtual void visit(const model::operation::HashtableLookupNode &) override; + virtual void visit(const model::operation::PReLUNode &) override; + virtual void visit(const model::operation::TransposeConvNode &) override; + virtual void visit(const model::operation::SQRTNode &) override; private: const neurun::model::operand::Set &_ctx; diff --git a/runtimes/neurun/src/compiler/OperationValidator.cc b/runtimes/neurun/src/compiler/OperationValidator.cc index 6986101..0058b4d 100644 --- a/runtimes/neurun/src/compiler/OperationValidator.cc +++ b/runtimes/neurun/src/compiler/OperationValidator.cc @@ -310,5 +310,52 @@ void OperationValidator::visit(const model::operation::HashtableLookupNode &node assert(lookups_shape.dim(0) == output_shape.dim(0)); } +void OperationValidator::visit(const model::operation::TransposeConvNode &node) +{ + const auto ofm_index{node.getOutputs().at(0)}; + const auto out_shape_index{ + node.getInputs().at(model::operation::TransposeConvNode::Input::OUTPUT_SHAPE)}; + const auto ifm_index{node.getInputs().at(model::operation::TransposeConvNode::Input::INPUT)}; + const auto ker_index{node.getInputs().at(model::operation::TransposeConvNode::Input::KERNEL)}; + + const auto padding_index{node.param().padding_index}; + const auto hstride_index{node.param().hstride_index}; + const auto vstride_index{node.param().vstride_index}; + + // Only 4D tensors are supported + assert(_ctx.at(ofm_index).shape().rank() == 4); + assert(_ctx.at(ofm_index).shape().rank() == _ctx.at(ifm_index).shape().rank()); + assert(_ctx.at(ofm_index).shape().rank() == _ctx.at(ker_index).shape().rank()); + + const auto ofm_shape = _ctx.at(ofm_index).shape().asFeature(); + const auto ifm_shape = _ctx.at(ifm_index).shape().asFeature(); + const auto ker_shape = _ctx.at(ker_index).shape().asFeature(); + + UNUSED_RELEASE(ofm_shape); + UNUSED_RELEASE(ifm_shape); + UNUSED_RELEASE(ker_shape); + + assert(_ctx.at(padding_index).hasData() == true); + + const int32_t vstride = _ctx.at(vstride_index).asScalar(); + const int32_t hstride = _ctx.at(hstride_index).asScalar(); + + UNUSED_RELEASE(vstride); + UNUSED_RELEASE(hstride); + + const PaddingCode padding_type = + static_cast(_ctx.at(padding_index).asScalar()); + + UNUSED_RELEASE(padding_type); + + assert(vstride > 0); + assert(hstride > 0); + assert((ANEURALNETWORKS_PADDING_SAME == padding_type) || + (ANEURALNETWORKS_PADDING_VALID == padding_type)); + assert(ifm_shape.N == ofm_shape.N); + assert(ifm_shape.C == ker_shape.C); + assert(ker_shape.N == ofm_shape.C); +} + } // namespace compiler } // namespace neurun diff --git a/runtimes/neurun/src/compiler/OperationValidator.h b/runtimes/neurun/src/compiler/OperationValidator.h index f8384e6..20f3ba8 100644 --- a/runtimes/neurun/src/compiler/OperationValidator.h +++ b/runtimes/neurun/src/compiler/OperationValidator.h @@ -50,6 +50,7 @@ public: virtual void visit(const model::operation::SpaceToDepthNode &node) override; virtual void visit(const model::operation::EmbeddingLookupNode &node) override; virtual void visit(const model::operation::HashtableLookupNode &node) override; + virtual void visit(const model::operation::TransposeConvNode &node) override; private: const neurun::model::operand::Set &_ctx; diff --git a/runtimes/neurun/src/model/operation/Node.Include.h b/runtimes/neurun/src/model/operation/Node.Include.h index 428ebdc..5e2a266 100644 --- a/runtimes/neurun/src/model/operation/Node.Include.h +++ b/runtimes/neurun/src/model/operation/Node.Include.h @@ -51,3 +51,6 @@ #include "EmbeddingLookupNode.h" #include "L2NormalizationNode.h" #include "HashtableLookupNode.h" +#include "PReLUNode.h" +#include "TransposeConvNode.h" +#include "SQRTNode.h" diff --git a/runtimes/neurun/src/model/operation/Op.lst b/runtimes/neurun/src/model/operation/Op.lst index 1e48219..c470eb8 100644 --- a/runtimes/neurun/src/model/operation/Op.lst +++ b/runtimes/neurun/src/model/operation/Op.lst @@ -55,4 +55,7 @@ OP(L2Pool2DNode , true) OP(EmbeddingLookupNode , true) OP(L2NormalizationNode , true) OP(HashtableLookupNode , true) +OP(PReLUNode , true) +OP(TransposeConvNode , true) +OP(SQRTNode , true) OP(PermuteNode , false) diff --git a/runtimes/neurun/src/model/operation/PReLUNode.cc b/runtimes/neurun/src/model/operation/PReLUNode.cc new file mode 100644 index 0000000..3f40de6 --- /dev/null +++ b/runtimes/neurun/src/model/operation/PReLUNode.cc @@ -0,0 +1,39 @@ +/* + * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "PReLUNode.h" + +#include + +#include "NodeVisitor.h" + +namespace neurun +{ +namespace model +{ +namespace operation +{ + +void PReLUNode::accept(NodeVisitor &&v) const { v.visit(*this); } + +PReLUNode::PReLUNode(const operand::IndexSet &inputs, const operand::IndexSet &outputs) + : model::operation::Node{OperandConstraint::createExact(2u), inputs, outputs} +{ +} + +} // namespace operation +} // namespace model +} // namespace neurun diff --git a/runtimes/neurun/src/model/operation/PReLUNode.h b/runtimes/neurun/src/model/operation/PReLUNode.h new file mode 100644 index 0000000..c07c45b --- /dev/null +++ b/runtimes/neurun/src/model/operation/PReLUNode.h @@ -0,0 +1,50 @@ +/* + * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __NEURUN_MODEL_OPERATION_PRELU_NODE_H__ +#define __NEURUN_MODEL_OPERATION_PRELU_NODE_H__ + +#include "model/operation/Node.h" + +namespace neurun +{ +namespace model +{ +namespace operation +{ + +class PReLUNode : public model::operation::Node +{ +public: + enum Input + { + INPUT = 0, + ALPHA = 1 + }; + +public: + PReLUNode(const operand::IndexSet &inputs, const operand::IndexSet &outputs); + +public: + virtual void accept(NodeVisitor &&) const override; + virtual std::string getName() const override { return "PReLU"; } +}; + +} // namespace operation +} // namespace model +} // namespace neurun + +#endif // __NEURUN_MODEL_OPERATION_PRELU_NODE_H__ diff --git a/runtimes/neurun/src/model/operation/SQRTNode.cc b/runtimes/neurun/src/model/operation/SQRTNode.cc new file mode 100644 index 0000000..1643500 --- /dev/null +++ b/runtimes/neurun/src/model/operation/SQRTNode.cc @@ -0,0 +1,39 @@ +/* + * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "SQRTNode.h" + +#include + +#include "NodeVisitor.h" + +namespace neurun +{ +namespace model +{ +namespace operation +{ + +void SQRTNode::accept(NodeVisitor &&v) const { v.visit(*this); } + +SQRTNode::SQRTNode(const operand::IndexSet &inputs, const operand::IndexSet &outputs) + : model::operation::Node{OperandConstraint::createExact(1u), inputs, outputs} +{ +} + +} // namespace operation +} // namespace model +} // namespace neurun diff --git a/runtimes/neurun/src/model/operation/SQRTNode.h b/runtimes/neurun/src/model/operation/SQRTNode.h new file mode 100644 index 0000000..a52e50d --- /dev/null +++ b/runtimes/neurun/src/model/operation/SQRTNode.h @@ -0,0 +1,49 @@ +/* + * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __NEURUN_MODEL_OPERATION_SQRT_NODE_H__ +#define __NEURUN_MODEL_OPERATION_SQRT_NODE_H__ + +#include "model/operation/Node.h" + +namespace neurun +{ +namespace model +{ +namespace operation +{ + +class SQRTNode : public model::operation::Node +{ +public: + enum Input + { + INPUT = 0 + }; + +public: + SQRTNode(const operand::IndexSet &inputs, const operand::IndexSet &outputs); + +public: + virtual void accept(NodeVisitor &&) const override; + virtual std::string getName() const override { return "SQRT"; } +}; + +} // namespace operation +} // namespace model +} // namespace neurun + +#endif // __NEURUN_MODEL_OPERATION_SQRT_NODE_H__ diff --git a/runtimes/neurun/src/model/operation/TransposeConvNode.cc b/runtimes/neurun/src/model/operation/TransposeConvNode.cc new file mode 100644 index 0000000..40a8731 --- /dev/null +++ b/runtimes/neurun/src/model/operation/TransposeConvNode.cc @@ -0,0 +1,40 @@ +/* + * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "TransposeConvNode.h" + +#include + +#include "NodeVisitor.h" + +namespace neurun +{ +namespace model +{ +namespace operation +{ + +void TransposeConvNode::accept(NodeVisitor &&v) const { v.visit(*this); } + +TransposeConvNode::TransposeConvNode(const operand::IndexSet &inputs, + const operand::IndexSet &outputs, const Param ¶m) + : model::operation::Node{OperandConstraint::createExact(3u), inputs, outputs}, _param{param} +{ +} + +} // namespace operation +} // namespace model +} // namespace neurun diff --git a/runtimes/neurun/src/model/operation/TransposeConvNode.h b/runtimes/neurun/src/model/operation/TransposeConvNode.h new file mode 100644 index 0000000..efb6bbc --- /dev/null +++ b/runtimes/neurun/src/model/operation/TransposeConvNode.h @@ -0,0 +1,67 @@ +/* + * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __NEURUN_MODEL_OPERATION_TRANSPOSE_CONV_NODE_H__ +#define __NEURUN_MODEL_OPERATION_TRANSPOSE_CONV_NODE_H__ + +#include + +#include "model/operation/Node.h" + +namespace neurun +{ +namespace model +{ +namespace operation +{ + +class TransposeConvNode : public model::operation::Node +{ +public: + enum Input + { + OUTPUT_SHAPE = 0, + KERNEL, + INPUT + }; + + struct Param + { + operand::Index padding_index; + operand::Index hstride_index; + operand::Index vstride_index; + }; + +public: + TransposeConvNode(const operand::IndexSet &inputs, const operand::IndexSet &outputs, + const Param ¶m); + +public: + virtual void accept(NodeVisitor &&) const override; + virtual std::string getName() const override { return "TransposeConv"; } + +public: + const Param ¶m() const { return _param; } + +private: + Param _param; +}; + +} // namespace operation +} // namespace model +} // namespace neurun + +#endif // __NEURUN_MODEL_OPERATION_TRANSPOSE_CONV_NODE_H__ diff --git a/tests/nnapi/nnapi_gtest.skip.armv7l-linux.neurun b/tests/nnapi/nnapi_gtest.skip.armv7l-linux.neurun index 5667517..ba096a2 100644 --- a/tests/nnapi/nnapi_gtest.skip.armv7l-linux.neurun +++ b/tests/nnapi/nnapi_gtest.skip.armv7l-linux.neurun @@ -21,12 +21,10 @@ GeneratedTests.lsh_projection* GeneratedTests.lstm* GeneratedTests.mobilenet* GeneratedTests.neg* -GeneratedTests.prelu_ex* GeneratedTests.reduce_min* GeneratedTests.rnn* GeneratedTests.mean* GeneratedTests.pad* -GeneratedTests.sqrt_ex* GeneratedTests.squared_difference_ex* GeneratedTests.svdf* GeneratedTests.batch_to_space* @@ -35,7 +33,6 @@ GeneratedTests.gather_ex* GeneratedTests.topk_v2* # Unexpected result GeneratedTests.split* -GeneratedTests.transpose_conv* GeneratedTests.pack* GeneratedTests.unpack* GeneratedTests.logical_not_ex* diff --git a/tests/scripts/neurun_frameworktest_list.armv7l.acl_cl.txt b/tests/scripts/neurun_frameworktest_list.armv7l.acl_cl.txt index dcd8426..76ebeb9 100644 --- a/tests/scripts/neurun_frameworktest_list.armv7l.acl_cl.txt +++ b/tests/scripts/neurun_frameworktest_list.armv7l.acl_cl.txt @@ -17,6 +17,7 @@ max_pool_2d MODELS/mobilenet mul/broadcast not_equal +prelu softmax reduce_max relu @@ -30,5 +31,6 @@ strided_slice sub/broadcast tanh transpose +transpose_conv MODELS/inception_module squeeze -- 2.7.4