#include "Config.h"
#include "StageGenerator.h"
+#include "ShapeFixer.h"
#include "MemoryManager.h"
namespace neurun
class Backend : public ::neurun::backend::Backend
{
public:
- Backend(const neurun::model::Operands &operand_ctx)
- : ::neurun::backend::Backend{
- std::make_shared<Config>(),
- std::make_shared<StageGenerator>(
- operand_ctx, std::make_shared<TensorBuilder>(createMemoryManager()))}
+ Backend(const neurun::model::Operands &operand_ctx,
+ std::shared_ptr<TensorBuilder> tensor_builder =
+ std::make_shared<TensorBuilder>(createMemoryManager()))
+ : ::neurun::backend::Backend{std::make_shared<Config>(),
+ std::make_shared<StageGenerator>(operand_ctx, tensor_builder),
+ std::make_shared<ShapeFixer>(operand_ctx, tensor_builder)}
{
// DO NOTHING
}
--- /dev/null
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "ShapeFixer.h"
+
+#include <arm_compute/runtime/CL/CLFunctions.h> // Include all ARM Compute CL functions
+#include <arm_compute/runtime/CL/CLFunctionsEx.h> // Include all ARM Compute EX CL functions
+#include <arm_compute/runtime/misc/functions/GenericGather.h>
+#include <arm_compute/runtime/misc/functions/GenericReshapeLayer.h>
+
+#include "kernel/ConcatLayer.h"
+#include "model/Index.h"
+#include "model/DataType.h"
+#include "model/InternalType.h"
+#include "compiler/IExecutionBuilder.h"
+#include "exec/NopFunction.h"
+#include "util/logging.h"
+#include "util/Utils.h"
+#include "util/Padding.h"
+#include "acl_common/AclFunction.h"
+#include "acl_common/Convert.h"
+#include "acl_common/Swizzle.h"
+
+using ::neurun::compiler::IExecutionBuilder;
+
+namespace neurun
+{
+namespace backend
+{
+namespace acl_cl
+{
+
+using ::neurun::backend::acl_common::asAclFunction;
+
+ShapeFixer::ShapeFixer(const neurun::model::Operands &ctx,
+ const std::shared_ptr<TensorBuilder> &tensor_builder)
+ : _ctx(ctx), _tensor_builder(tensor_builder)
+{
+ assert(tensor_builder);
+}
+
+void ShapeFixer::visit(const model::operation::CastNode &) { /* DO NOTHING */}
+
+void ShapeFixer::visit(const model::operation::Conv2DNode &) { /* DO NOTHING */}
+
+void ShapeFixer::visit(const model::operation::DepthwiseConv2DNode &) { /* DO NOTHING */}
+
+void ShapeFixer::visit(const model::operation::MaxPool2DNode &) { /* DO NOTHING */}
+
+void ShapeFixer::visit(const model::operation::AvgPool2DNode &) { /* DO NOTHING */}
+
+void ShapeFixer::visit(const model::operation::ConcatNode &node)
+{
+ const auto ofm_index{node.getOutputs().at(0)};
+ _tensor_builder->dimCorrection(ofm_index, false);
+ for (const auto &input : node.getInputs())
+ _tensor_builder->dimCorrection(input, false);
+}
+
+void ShapeFixer::visit(const model::operation::FullyConnectedNode &node)
+{
+ using model::operation::FullyConnectedNode;
+ const auto input_index{node.getInputs().at(FullyConnectedNode::Input::INPUT)};
+ const auto input_rank = _ctx.at(input_index).shape().rank();
+ if (input_rank == 4)
+ _tensor_builder->dimCorrection(input_index, false);
+}
+
+void ShapeFixer::visit(const model::operation::MulNode &node)
+{
+ const auto lhs_index{node.getInputs().at(model::operation::MulNode::Input::LHS)};
+ const auto rhs_index{node.getInputs().at(model::operation::MulNode::Input::RHS)};
+
+ if (!(_ctx.at(lhs_index).shape() == _ctx.at(rhs_index).shape()))
+ {
+ const auto broadcast_rank =
+ std::max(_ctx.at(lhs_index).shape().rank(), _ctx.at(rhs_index).shape().rank());
+
+ // TODO remove const_cast later. For example, _ctx may need to be a non const variable or
+ // a node to extend shape may be inserted in front of this operation
+ const_cast<::neurun::model::Shape &>(_ctx.at(lhs_index).shape()).extendRank(broadcast_rank);
+ const_cast<::neurun::model::Shape &>(_ctx.at(rhs_index).shape()).extendRank(broadcast_rank);
+ }
+}
+
+void ShapeFixer::visit(const model::operation::ReduceSumNode &) { /* DO NOTHING */}
+
+void ShapeFixer::visit(const model::operation::ReshapeNode &node)
+{
+ const auto output_index{node.getOutputs().at(0)};
+ const auto input_index{node.getInputs().at(model::operation::ReshapeNode::Input::INPUT)};
+ _tensor_builder->dimCorrection(input_index, false);
+ _tensor_builder->dimCorrection(output_index, false);
+}
+
+void ShapeFixer::visit(const model::operation::SqueezeNode &node)
+{
+ const auto output_index{node.getOutputs().at(0)};
+ const auto input_index{node.getInputs().at(model::operation::SqueezeNode::Input::INPUT)};
+ _tensor_builder->dimCorrection(input_index, false);
+ _tensor_builder->dimCorrection(output_index, false);
+}
+
+void ShapeFixer::visit(const model::operation::TanhNode &) { /* DO NOTHING */}
+
+void ShapeFixer::visit(const model::operation::SoftmaxNode &) { /* DO NOTHING */}
+
+void ShapeFixer::visit(const model::operation::StridedSliceNode &) { /* DO NOTHING */}
+
+void ShapeFixer::visit(const model::operation::TransposeNode &) { /* DO NOTHING */}
+
+void ShapeFixer::visit(const model::operation::AddNode &node)
+{
+ const auto lhs_index{node.getInputs().at(model::operation::AddNode::Input::LHS)};
+ const auto rhs_index{node.getInputs().at(model::operation::AddNode::Input::RHS)};
+
+ if (!(_ctx.at(lhs_index).shape() == _ctx.at(rhs_index).shape()))
+ {
+ const auto broadcast_rank =
+ std::max(_ctx.at(lhs_index).shape().rank(), _ctx.at(rhs_index).shape().rank());
+ const_cast<::neurun::model::Shape &>(_ctx.at(lhs_index).shape()).extendRank(broadcast_rank);
+ const_cast<::neurun::model::Shape &>(_ctx.at(rhs_index).shape()).extendRank(broadcast_rank);
+ }
+}
+
+void ShapeFixer::visit(const model::operation::SubNode &node)
+{
+ const auto lhs_index{node.getInputs().at(model::operation::SubNode::Input::LHS)};
+ const auto rhs_index{node.getInputs().at(model::operation::SubNode::Input::RHS)};
+
+ if (!(_ctx.at(lhs_index).shape() == _ctx.at(rhs_index).shape()))
+ {
+ const auto broadcast_rank =
+ std::max(_ctx.at(lhs_index).shape().rank(), _ctx.at(rhs_index).shape().rank());
+
+ // TODO remove const_cast later. For example, _ctx may need to be a non const variable or
+ // a node to extend shape may be inserted in front of this operation
+ const_cast<::neurun::model::Shape &>(_ctx.at(lhs_index).shape()).extendRank(broadcast_rank);
+ const_cast<::neurun::model::Shape &>(_ctx.at(rhs_index).shape()).extendRank(broadcast_rank);
+ }
+}
+
+void ShapeFixer::visit(const model::operation::DivNode &node)
+{
+ const auto lhs_index{node.getInputs().at(model::operation::DivNode::Input::LHS)};
+ const auto rhs_index{node.getInputs().at(model::operation::DivNode::Input::RHS)};
+
+ if (!(_ctx.at(lhs_index).shape() == _ctx.at(rhs_index).shape()))
+ {
+ const auto broadcast_rank =
+ std::max(_ctx.at(lhs_index).shape().rank(), _ctx.at(rhs_index).shape().rank());
+
+ // TODO remove const_cast later. For example, _ctx may need to be a non const variable or
+ // a node to extend shape may be inserted in front of this operation
+ const_cast<::neurun::model::Shape &>(_ctx.at(lhs_index).shape()).extendRank(broadcast_rank);
+ const_cast<::neurun::model::Shape &>(_ctx.at(rhs_index).shape()).extendRank(broadcast_rank);
+ }
+}
+
+void ShapeFixer::visit(const model::operation::ExpNode &) { /* DO NOTHING */}
+
+void ShapeFixer::visit(const model::operation::LogisticNode &) { /* DO NOTHING */}
+
+void ShapeFixer::visit(const model::operation::LogicalAndNode &node)
+{
+ const auto input0_index{node.getInputs().at(model::operation::LogicalAndNode::Input::INPUT0)};
+ const auto input1_index{node.getInputs().at(model::operation::LogicalAndNode::Input::INPUT1)};
+
+ if (!(_ctx.at(input0_index).shape() == _ctx.at(input1_index).shape()))
+ {
+ const auto broadcast_rank =
+ std::max(_ctx.at(input0_index).shape().rank(), _ctx.at(input1_index).shape().rank());
+
+ // TODO remove const_cast later. For example, _ctx may need to be a non const variable or
+ // a node to extend shape may be inserted in front of this operation
+ const_cast<::neurun::model::Shape &>(_ctx.at(input0_index).shape()).extendRank(broadcast_rank);
+ const_cast<::neurun::model::Shape &>(_ctx.at(input1_index).shape()).extendRank(broadcast_rank);
+ }
+}
+
+void ShapeFixer::visit(const model::operation::LSTMNode &) { /* DO NOTHING */}
+
+void ShapeFixer::visit(const model::operation::ReduceMaxNode &) { /* DO NOTHING */}
+
+void ShapeFixer::visit(const model::operation::ComparisonNode &node)
+{
+ const auto input0_index{node.getInputs().at(model::operation::ComparisonNode::Input::INPUT0)};
+ const auto input1_index{node.getInputs().at(model::operation::ComparisonNode::Input::INPUT1)};
+
+ if (!(_ctx.at(input0_index).shape() == _ctx.at(input1_index).shape()))
+ {
+ const auto broadcast_rank =
+ std::max(_ctx.at(input0_index).shape().rank(), _ctx.at(input1_index).shape().rank());
+
+ // TODO remove const_cast later. For example, _ctx may need to be a non const variable or
+ // a node to extend shape may be inserted in front of this operation
+ const_cast<::neurun::model::Shape &>(_ctx.at(input0_index).shape()).extendRank(broadcast_rank);
+ const_cast<::neurun::model::Shape &>(_ctx.at(input1_index).shape()).extendRank(broadcast_rank);
+ }
+}
+
+void ShapeFixer::visit(const model::operation::RSQRTNode &) { /* DO NOTHING */}
+
+void ShapeFixer::visit(const model::operation::ReLUNode &) { /* DO NOTHING */}
+
+void ShapeFixer::visit(const model::operation::ResizeBilinearNode &) { /* DO NOTHING */}
+
+void ShapeFixer::visit(const model::operation::ReLU1Node &) { /* DO NOTHING */}
+
+void ShapeFixer::visit(const model::operation::ReLU6Node &) { /* DO NOTHING */}
+
+void ShapeFixer::visit(const model::operation::RNNNode &) { /* DO NOTHING */}
+
+void ShapeFixer::visit(const model::operation::FloorNode &) { /* DO NOTHING */}
+
+void ShapeFixer::visit(const model::operation::SpaceToDepthNode &node)
+{
+ const auto ofm_index{node.getOutputs().at(0)};
+ const auto ifm_index{node.getInputs().at(model::operation::SpaceToDepthNode::Input::INPUT)};
+ _tensor_builder->dimCorrection(ofm_index, false);
+ _tensor_builder->dimCorrection(ifm_index, false);
+}
+
+void ShapeFixer::visit(const model::operation::L2Pool2DNode &) { /* DO NOTHING */}
+
+void ShapeFixer::visit(const model::operation::EmbeddingLookupNode &node)
+{
+ const auto output_index{node.getOutputs().at(0)};
+ const auto values_index{
+ node.getInputs().at(model::operation::EmbeddingLookupNode::Input::VALUES)};
+ _tensor_builder->dimCorrection(values_index, false);
+ _tensor_builder->dimCorrection(output_index, false);
+}
+
+void ShapeFixer::visit(const model::operation::L2NormalizationNode &) { /* DO NOTHING */}
+
+void ShapeFixer::visit(const model::operation::HashtableLookupNode &) { /* DO NOTHING */}
+
+void ShapeFixer::visit(const model::operation::PReLUNode &node)
+{
+ const auto ifm_index{node.getInputs().at(model::operation::PReLUNode::Input::INPUT)};
+ const auto alpha_index{node.getInputs().at(model::operation::PReLUNode::Input::ALPHA)};
+
+ if (!(_ctx.at(ifm_index).shape() == _ctx.at(alpha_index).shape()))
+ {
+ const auto broadcast_rank =
+ std::max(_ctx.at(ifm_index).shape().rank(), _ctx.at(alpha_index).shape().rank());
+ const_cast<::neurun::model::Shape &>(_ctx.at(ifm_index).shape()).extendRank(broadcast_rank);
+ const_cast<::neurun::model::Shape &>(_ctx.at(alpha_index).shape()).extendRank(broadcast_rank);
+ }
+}
+
+void ShapeFixer::visit(const model::operation::TransposeConvNode &) { /* DO NOTHING */}
+
+void ShapeFixer::visit(const model::operation::SQRTNode &) { /* DO NOTHING */}
+
+void ShapeFixer::visit(const model::operation::LogicalOrNode &node)
+{
+ const auto input0_index{node.getInputs().at(model::operation::LogicalOrNode::Input::INPUT0)};
+ const auto input1_index{node.getInputs().at(model::operation::LogicalOrNode::Input::INPUT1)};
+
+ if (!(_ctx.at(input0_index).shape() == _ctx.at(input1_index).shape()))
+ {
+ const auto broadcast_rank =
+ std::max(_ctx.at(input0_index).shape().rank(), _ctx.at(input1_index).shape().rank());
+ const_cast<::neurun::model::Shape &>(_ctx.at(input0_index).shape()).extendRank(broadcast_rank);
+ const_cast<::neurun::model::Shape &>(_ctx.at(input1_index).shape()).extendRank(broadcast_rank);
+ }
+}
+
+void ShapeFixer::visit(const model::operation::LogicalNotNode &) { /* DO NOTHING */}
+
+void ShapeFixer::visit(const model::operation::SquaredDifferenceNode &node)
+{
+ const auto lhs_index{node.getInputs().at(model::operation::SquaredDifferenceNode::Input::LHS)};
+ const auto rhs_index{node.getInputs().at(model::operation::SquaredDifferenceNode::Input::RHS)};
+
+ if (!(_ctx.at(lhs_index).shape() == _ctx.at(rhs_index).shape()))
+ {
+ const auto broadcast_rank =
+ std::max(_ctx.at(lhs_index).shape().rank(), _ctx.at(rhs_index).shape().rank());
+ const_cast<neurun::model::Shape &>(_ctx.at(lhs_index).shape()).extendRank(broadcast_rank);
+ const_cast<neurun::model::Shape &>(_ctx.at(rhs_index).shape()).extendRank(broadcast_rank);
+ }
+}
+
+void ShapeFixer::visit(const model::operation::TopKV2Node &) { /* DO NOTHING */}
+
+void ShapeFixer::visit(const model::operation::GatherNode &node)
+{
+ const auto ofm_index{node.getOutputs().at(0)};
+ const auto ifm_index{node.getInputs().at(model::operation::GatherNode::Input::INPUT)};
+ const auto indices_index{node.getInputs().at(model::operation::GatherNode::Input::INDICES)};
+ _tensor_builder->dimCorrection(ofm_index, false);
+ _tensor_builder->dimCorrection(ifm_index, false);
+ _tensor_builder->dimCorrection(indices_index, false);
+}
+
+void ShapeFixer::visit(const model::operation::NegNode &) { /* DO NOTHING */}
+
+void ShapeFixer::visit(const model::operation::AbsNode &) { /* DO NOTHING */}
+
+void ShapeFixer::visit(const model::operation::ArgMaxNode &node)
+{
+ const auto ofm_index{node.getOutputs().at(0)};
+ const auto ifm_index{node.getInputs().at(model::operation::ArgMaxNode::Input::INPUT)};
+ _tensor_builder->dimCorrection(ofm_index, false);
+ _tensor_builder->dimCorrection(ifm_index, false);
+}
+
+void ShapeFixer::visit(const model::operation::DequantizeNode &) { /* DO NOTHING */}
+
+void ShapeFixer::visit(const model::operation::MeanNode &) { /* DO NOTHING */}
+
+void ShapeFixer::visit(const model::operation::LocalResponseNormalizationNode &) { /* DO NOTHING */}
+
+void ShapeFixer::visit(const model::operation::DepthToSpaceNode &) { /* DO NOTHING */}
+
+void ShapeFixer::visit(const model::operation::ReduceMinNode &) { /* DO NOTHING */}
+
+void ShapeFixer::visit(const model::operation::SplitNode &node)
+{
+ const auto input_index{node.getInputs().at(model::operation::SplitNode::Input::INPUT)};
+ _tensor_builder->dimCorrection(input_index, false);
+ for (const auto &output : node.getOutputs())
+ _tensor_builder->dimCorrection(output, false);
+}
+
+void ShapeFixer::visit(const model::operation::UnpackNode &node)
+{
+ const auto input_index{node.getInputs().at(model::operation::UnpackNode::Input::INPUT)};
+ _tensor_builder->dimCorrection(input_index, false);
+ for (const auto &output_index : node.getOutputs())
+ _tensor_builder->dimCorrection(output_index, false);
+}
+
+void ShapeFixer::visit(const model::operation::PadNode &node)
+{
+ const auto input_index{node.getInputs().at(model::operation::PadNode::Input::INPUT)};
+ const auto output_index{node.getOutputs().at(0)};
+ _tensor_builder->dimCorrection(input_index, false);
+ _tensor_builder->dimCorrection(output_index, false);
+}
+
+} // namespace acl_cl
+} // namespace backend
+} // namespace neurun
--- /dev/null
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_BACKEND_ACL_CL_SHAPE_FIXER_H__
+#define __NEURUN_BACKEND_ACL_CL_SHAPE_FIXER_H__
+
+#include <backend/IShapeFixer.h>
+
+#include "model/Operands.h"
+#include "TensorBuilder.h"
+
+namespace neurun
+{
+namespace backend
+{
+namespace acl_cl
+{
+
+class ShapeFixer : public IShapeFixer
+{
+public:
+ ShapeFixer(const neurun::model::Operands &ctx,
+ const std::shared_ptr<TensorBuilder> &tensor_builder);
+
+ std::shared_ptr<ITensorBuilder> tensor_builder() override { return _tensor_builder; }
+
+ void visit(const model::operation::Conv2DNode &) override;
+ void visit(const model::operation::DepthwiseConv2DNode &) override;
+ void visit(const model::operation::MaxPool2DNode &) override;
+ void visit(const model::operation::AvgPool2DNode &) override;
+ void visit(const model::operation::ConcatNode &) override;
+ void visit(const model::operation::FullyConnectedNode &) override;
+ void visit(const model::operation::MulNode &) override;
+ void visit(const model::operation::ReduceSumNode &) override;
+ void visit(const model::operation::ReshapeNode &) override;
+ void visit(const model::operation::SqueezeNode &) override;
+ void visit(const model::operation::TanhNode &) override;
+ void visit(const model::operation::SoftmaxNode &) override;
+ void visit(const model::operation::StridedSliceNode &) override;
+ void visit(const model::operation::TransposeNode &) override;
+ void visit(const model::operation::AddNode &) override;
+ void visit(const model::operation::SubNode &) override;
+ void visit(const model::operation::CastNode &) override;
+ void visit(const model::operation::DivNode &) override;
+ void visit(const model::operation::ExpNode &) override;
+ void visit(const model::operation::LogisticNode &) override;
+ void visit(const model::operation::ReduceMaxNode &) override;
+ void visit(const model::operation::ComparisonNode &) override;
+ void visit(const model::operation::LogicalAndNode &) override;
+ void visit(const model::operation::LSTMNode &) override;
+ void visit(const model::operation::RSQRTNode &) override;
+ void visit(const model::operation::ReLUNode &) override;
+ void visit(const model::operation::ResizeBilinearNode &) override;
+ void visit(const model::operation::ReLU1Node &) override;
+ void visit(const model::operation::ReLU6Node &) override;
+ void visit(const model::operation::RNNNode &) override;
+ void visit(const model::operation::FloorNode &) override;
+ void visit(const model::operation::SpaceToDepthNode &) override;
+ void visit(const model::operation::L2Pool2DNode &) override;
+ void visit(const model::operation::EmbeddingLookupNode &) override;
+ void visit(const model::operation::L2NormalizationNode &) override;
+ void visit(const model::operation::HashtableLookupNode &) override;
+ void visit(const model::operation::PReLUNode &) override;
+ void visit(const model::operation::TransposeConvNode &) override;
+ void visit(const model::operation::SQRTNode &) override;
+ void visit(const model::operation::LogicalOrNode &) override;
+ void visit(const model::operation::LogicalNotNode &) override;
+ void visit(const model::operation::SquaredDifferenceNode &) override;
+ void visit(const model::operation::TopKV2Node &) override;
+ void visit(const model::operation::GatherNode &) override;
+ void visit(const model::operation::NegNode &) override;
+ void visit(const model::operation::AbsNode &) override;
+ void visit(const model::operation::ArgMaxNode &) override;
+ void visit(const model::operation::DequantizeNode &) override;
+ void visit(const model::operation::MeanNode &) override;
+ void visit(const model::operation::LocalResponseNormalizationNode &) override;
+ void visit(const model::operation::DepthToSpaceNode &) override;
+ void visit(const model::operation::ReduceMinNode &) override;
+ void visit(const model::operation::SplitNode &) override;
+ void visit(const model::operation::UnpackNode &) override;
+ void visit(const model::operation::PadNode &) override;
+
+private:
+ const neurun::model::Operands &_ctx;
+ std::shared_ptr<TensorBuilder> _tensor_builder;
+};
+
+} // namespace acl_cl
+} // namespace backend
+} // namespace neurun
+
+#endif // __NEURUN_BACKEND_ACL_CL_SHAPE_FIXER_H__
#include "Config.h"
#include "StageGenerator.h"
+#include "ShapeFixer.h"
#include "MemoryManager.h"
namespace neurun
class Backend : public ::neurun::backend::Backend
{
public:
- Backend(const neurun::model::Operands &operand_ctx)
- : ::neurun::backend::Backend{
- std::make_shared<Config>(),
- std::make_shared<StageGenerator>(
- operand_ctx, std::make_shared<TensorBuilder>(createMemoryManager()))}
+ Backend(const neurun::model::Operands &operand_ctx,
+ std::shared_ptr<TensorBuilder> tensor_builder =
+ std::make_shared<TensorBuilder>(createMemoryManager()))
+ : ::neurun::backend::Backend{std::make_shared<Config>(),
+ std::make_shared<StageGenerator>(operand_ctx, tensor_builder),
+ std::make_shared<ShapeFixer>(operand_ctx, tensor_builder)}
{
// DO NOTHING
}
--- /dev/null
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "ShapeFixer.h"
+
+#include <arm_compute/runtime/NEON/functions/NESoftmaxLayer.h>
+#include <arm_compute/runtime/NEON/functions/NEArithmeticAddition.h>
+#include <arm_compute/runtime/NEON/functions/NEArithmeticSubtraction.h>
+#include <arm_compute/runtime/NEON/functions/NEPixelWiseMultiplication.h>
+#include <arm_compute/runtime/NEON/functions/NEPoolingLayer.h>
+#include <arm_compute/runtime/NEON/functions/NEActivationLayer.h>
+#include <arm_compute/runtime/NEON/functions/NEConvolutionLayer.h>
+#include <arm_compute/runtime/NEON/functions/NEDepthwiseConvolutionLayer.h>
+#include <arm_compute/runtime/NEON/functions/NEReshapeLayer.h>
+#include <arm_compute/runtime/NEON/functions/NEFullyConnectedLayer.h>
+#include <arm_compute/runtime/NEON/functions/NEFullyConnectedReshapingLayer.h>
+
+#include "kernel/ConcatLayer.h"
+#include "util/Padding.h"
+#include "model/Index.h"
+#include "model/DataType.h"
+#include "model/InternalType.h"
+#include "compiler/IExecutionBuilder.h"
+#include "exec/NopFunction.h"
+#include "util/logging.h"
+#include "util/Utils.h"
+#include "acl_common/Convert.h"
+#include "acl_common/Swizzle.h"
+
+using ::neurun::compiler::IExecutionBuilder;
+
+namespace neurun
+{
+namespace backend
+{
+namespace acl_neon
+{
+
+using ::neurun::backend::acl_common::asAclFunction;
+
+ShapeFixer::ShapeFixer(const neurun::model::Operands &ctx,
+ const std::shared_ptr<TensorBuilder> &tensor_builder)
+ : _ctx(ctx), _tensor_builder(tensor_builder)
+{
+ assert(tensor_builder);
+}
+
+void ShapeFixer::visit(const model::operation::Conv2DNode &) { /* DO NOTHING */}
+
+void ShapeFixer::visit(const model::operation::DepthwiseConv2DNode &) { /* DO NOTHING */}
+
+void ShapeFixer::visit(const model::operation::MaxPool2DNode &) { /* DO NOTHING */}
+
+void ShapeFixer::visit(const model::operation::AvgPool2DNode &) { /* DO NOTHING */}
+
+void ShapeFixer::visit(const model::operation::ConcatNode &node)
+{
+ const auto ofm_index{node.getOutputs().at(0)};
+ _tensor_builder->dimCorrection(ofm_index, false);
+ for (const auto &inputs : node.getInputs())
+ _tensor_builder->dimCorrection(inputs, false);
+}
+
+void ShapeFixer::visit(const model::operation::FullyConnectedNode &node)
+{
+ using model::operation::FullyConnectedNode;
+ const auto input_index{node.getInputs().at(FullyConnectedNode::Input::INPUT)};
+ const auto input_rank = _ctx.at(input_index).shape().rank();
+ // TODO Currently we are not handling where the case is that the input's rank is 3.
+ // The handling should be added in the future.
+ assert(input_rank != 3);
+ // Check for reshaping input's shape into rank-2
+ if (input_rank == 4)
+ _tensor_builder->dimCorrection(input_index, false);
+}
+
+void ShapeFixer::visit(const model::operation::MulNode &node)
+{
+ const auto lhs_index{node.getInputs().at(model::operation::MulNode::Input::LHS)};
+ const auto rhs_index{node.getInputs().at(model::operation::MulNode::Input::RHS)};
+
+ if (!(_ctx.at(lhs_index).shape() == _ctx.at(rhs_index).shape()))
+ {
+ const auto broadcast_rank =
+ std::max(_ctx.at(lhs_index).shape().rank(), _ctx.at(rhs_index).shape().rank());
+ auto lhs_shape = _ctx.at(lhs_index).shape();
+ auto rhs_shape = _ctx.at(rhs_index).shape();
+
+ lhs_shape.extendRank(broadcast_rank);
+ rhs_shape.extendRank(broadcast_rank);
+ }
+
+ // Nontrivial broadcasting isn't supported yet
+ if (!(_ctx.at(lhs_index).shape() == _ctx.at(rhs_index).shape()))
+ {
+ throw std::runtime_error("NYI");
+ }
+}
+
+void ShapeFixer::visit(const model::operation::ReshapeNode &node)
+{
+ const auto output_index{node.getOutputs().at(0)};
+ const auto input_index{node.getInputs().at(model::operation::ReshapeNode::Input::INPUT)};
+
+ _tensor_builder->dimCorrection(input_index, false);
+ _tensor_builder->dimCorrection(output_index, false);
+}
+
+void ShapeFixer::visit(const model::operation::TanhNode &) { /* DO NOTHING */}
+
+void ShapeFixer::visit(const model::operation::SoftmaxNode &) { /* DO NOTHING */}
+
+void ShapeFixer::visit(const model::operation::AddNode &node)
+{
+ const auto lhs_index{node.getInputs().at(model::operation::AddNode::Input::LHS)};
+ const auto rhs_index{node.getInputs().at(model::operation::AddNode::Input::RHS)};
+
+ if (!(_ctx.at(lhs_index).shape() == _ctx.at(rhs_index).shape()))
+ {
+ const auto broadcast_rank =
+ std::max(_ctx.at(lhs_index).shape().rank(), _ctx.at(rhs_index).shape().rank());
+ const_cast<::neurun::model::Shape &>(_ctx.at(lhs_index).shape()).extendRank(broadcast_rank);
+ const_cast<::neurun::model::Shape &>(_ctx.at(rhs_index).shape()).extendRank(broadcast_rank);
+ }
+}
+
+} // namespace acl_neon
+} // namespace backend
+} // namespace neurun
--- /dev/null
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_BACKEND_ACL_NEON_SHAPE_FIXER_H__
+#define __NEURUN_BACKEND_ACL_NEON_SHAPE_FIXER_H__
+
+#include <backend/IShapeFixer.h>
+
+#include "model/Operands.h"
+#include "TensorBuilder.h"
+
+namespace neurun
+{
+namespace backend
+{
+namespace acl_neon
+{
+
+class ShapeFixer : public IShapeFixer
+{
+public:
+ ShapeFixer(const neurun::model::Operands &ctx,
+ const std::shared_ptr<TensorBuilder> &tensor_builder);
+
+ std::shared_ptr<ITensorBuilder> tensor_builder() override { return _tensor_builder; }
+
+ void visit(const model::operation::Conv2DNode &) override;
+ void visit(const model::operation::DepthwiseConv2DNode &) override;
+ void visit(const model::operation::MaxPool2DNode &) override;
+ void visit(const model::operation::AvgPool2DNode &) override;
+ void visit(const model::operation::ConcatNode &) override;
+ void visit(const model::operation::FullyConnectedNode &) override;
+ void visit(const model::operation::MulNode &) override;
+ void visit(const model::operation::ReshapeNode &) override;
+ void visit(const model::operation::TanhNode &) override;
+ void visit(const model::operation::SoftmaxNode &) override;
+ void visit(const model::operation::AddNode &) override;
+
+private:
+ const neurun::model::Operands &_ctx;
+ std::shared_ptr<TensorBuilder> _tensor_builder;
+};
+
+} // namespace acl_neon
+} // namespace backend
+} // namespace neurun
+
+#endif // __NEURUN_BACKEND_ACL_NEON_SHAPE_FIXER_H__
#include "Config.h"
#include "StageGenerator.h"
+#include "ShapeFixer.h"
namespace neurun
{
class Backend : public ::neurun::backend::Backend
{
public:
- Backend(const neurun::model::Operands &operand_ctx)
- : ::neurun::backend::Backend{
- std::make_shared<Config>(),
- std::make_shared<StageGenerator>(operand_ctx, std::make_shared<TensorBuilder>())}
+ Backend(const neurun::model::Operands &operand_ctx,
+ std::shared_ptr<TensorBuilder> tensor_builder = std::make_shared<TensorBuilder>())
+ : ::neurun::backend::Backend{std::make_shared<Config>(),
+ std::make_shared<StageGenerator>(operand_ctx, tensor_builder),
+ std::make_shared<ShapeFixer>(operand_ctx, tensor_builder)}
{
// DO NOTHING
}
--- /dev/null
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "ShapeFixer.h"
+
+#include <stdexcept>
+
+#include "cpp14/memory.h"
+#include "util/Padding.h"
+#include "kernel/OperationUtils.h"
+#include "kernel/ConvolutionLayer.h"
+#include "kernel/AvgPoolLayer.h"
+#include "kernel/MaxPoolLayer.h"
+#include "kernel/ConcatLayer.h"
+#include "kernel/FullyConnectedLayer.h"
+#include "kernel/ReshapeLayer.h"
+#include "kernel/SoftMaxLayer.h"
+#include "kernel/PermuteLayer.h"
+#include "kernel/DepthwiseConvolutionLayer.h"
+#include "kernel/AddLayer.h"
+
+#include <backend/Backend.h>
+#include <backend/IConfig.h>
+#include "compiler/IExecutionBuilder.h"
+
+#include "util/logging.h"
+
+#include "util/Utils.h"
+
+namespace neurun
+{
+namespace backend
+{
+namespace cpu
+{
+
+ShapeFixer::ShapeFixer(const neurun::model::Operands &operand_ctx,
+ const std::shared_ptr<TensorBuilder> &tensor_builder)
+ : _ctx(operand_ctx), _tensor_builder(tensor_builder)
+{
+ assert(tensor_builder);
+}
+
+void ShapeFixer::visit(const model::operation::Conv2DNode &) { /* DO NOTHING */}
+
+void ShapeFixer::visit(const model::operation::DepthwiseConv2DNode &) { /* DO NOTHING */}
+
+void ShapeFixer::visit(const model::operation::MaxPool2DNode &) { /* DO NOTHING */}
+
+void ShapeFixer::visit(const model::operation::AvgPool2DNode &) { /* DO NOTHING */}
+
+void ShapeFixer::visit(const model::operation::ConcatNode &) { /* DO NOTHING */}
+
+void ShapeFixer::visit(const model::operation::FullyConnectedNode &) { /* DO NOTHING */}
+
+void ShapeFixer::visit(const model::operation::MulNode &) { throw std::runtime_error("NYI"); }
+
+void ShapeFixer::visit(const model::operation::ReshapeNode &) { /* DO NOTHING */}
+
+void ShapeFixer::visit(const model::operation::SoftmaxNode &) { /* DO NOTHING */}
+
+void ShapeFixer::visit(const model::operation::AddNode &node)
+{
+ const auto lhs_index{node.getInputs().at(model::operation::AddNode::Input::LHS)};
+ const auto rhs_index{node.getInputs().at(model::operation::AddNode::Input::RHS)};
+
+ // Broadcasting and quantization
+ if (!(_ctx.at(lhs_index).shape() == _ctx.at(rhs_index).shape()) ||
+ _ctx.at(lhs_index).typeInfo().type() == model::DataType::QUANT8_ASYMM)
+ {
+ throw std::runtime_error{"NYI"};
+ }
+}
+
+void ShapeFixer::visit(const model::operation::PermuteNode &) { /* DO NOTHING */}
+
+} // namespace cpu
+} // namespace backend
+} // namespace neurun
--- /dev/null
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_BACKEND_CPU_SHAPE_FIXER_H__
+#define __NEURUN_BACKEND_CPU_SHAPE_FIXER_H__
+
+#include <backend/IShapeFixer.h>
+
+#include "model/Operands.h"
+#include "operand/Tensor.h"
+#include "TensorBuilder.h"
+
+namespace neurun
+{
+namespace backend
+{
+namespace cpu
+{
+
+class ShapeFixer : public IShapeFixer
+{
+public:
+ ShapeFixer(const neurun::model::Operands &ctx,
+ const std::shared_ptr<TensorBuilder> &tensor_builder);
+
+ std::shared_ptr<ITensorBuilder> tensor_builder() override { return _tensor_builder; }
+
+ void visit(const model::operation::Conv2DNode &) override;
+ void visit(const model::operation::DepthwiseConv2DNode &) override;
+ void visit(const model::operation::MaxPool2DNode &) override;
+ void visit(const model::operation::AvgPool2DNode &) override;
+ void visit(const model::operation::ConcatNode &) override;
+ void visit(const model::operation::FullyConnectedNode &) override;
+ void visit(const model::operation::MulNode &) override;
+ void visit(const model::operation::ReshapeNode &) override;
+ void visit(const model::operation::SoftmaxNode &) override;
+ void visit(const model::operation::AddNode &) override;
+ void visit(const model::operation::PermuteNode &) override;
+
+private:
+ const neurun::model::Operands &_ctx;
+ std::shared_ptr<TensorBuilder> _tensor_builder;
+};
+
+} // namespace cpu
+} // namespace backend
+} // namespace neurun
+
+#endif // __NEURUN_BACKEND_CPU_SHAPE_FIXER_H__
struct IConfig;
class IStageGenerator;
+class IShapeFixer;
struct ITensorBuilder;
class Backend
{
public:
Backend(const std::shared_ptr<neurun::backend::IConfig> &backend_config,
- const std::shared_ptr<neurun::backend::IStageGenerator> &stage_gen);
+ const std::shared_ptr<neurun::backend::IStageGenerator> &stage_gen,
+ const std::shared_ptr<neurun::backend::IShapeFixer> &shape_fixer);
- Backend(void) : _config(nullptr), _stage_gen(nullptr)
+ Backend(void) : _config(nullptr), _stage_gen(nullptr), _shape_fixer(nullptr)
{
// DO NOTHING
}
public:
const std::shared_ptr<neurun::backend::IConfig> config() const;
const std::shared_ptr<neurun::backend::IStageGenerator> stage_gen() const;
+ const std::shared_ptr<neurun::backend::IShapeFixer> shape_fixer() const;
const std::shared_ptr<neurun::backend::ITensorBuilder> tensor_builder() const;
private:
std::shared_ptr<neurun::backend::IConfig> _config;
std::shared_ptr<neurun::backend::IStageGenerator> _stage_gen;
+ std::shared_ptr<neurun::backend::IShapeFixer> _shape_fixer;
};
} // namespace backend
--- /dev/null
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_BACKEND_ISHAPE_FIXER_H__
+#define __NEURUN_BACKEND_ISHAPE_FIXER_H__
+
+#include <memory>
+#include <functional>
+
+#include "ITensorBuilder.h"
+#include "model/OperationVisitor.h"
+#include "model/Subgraph.h"
+#include "cpp14/memory.h"
+
+namespace neurun
+{
+namespace backend
+{
+
+class IShapeFixer : model::OperationVisitor
+{
+public:
+ virtual ~IShapeFixer() = default;
+
+ virtual std::shared_ptr<ITensorBuilder> tensor_builder() = 0;
+
+protected:
+#define OP(InternalName, IsNnApi) \
+ virtual void visit(const model::operation::InternalName &) override \
+ { \
+ throw std::runtime_error("NYI"); \
+ }
+#include "model/Operations.lst"
+#undef OP
+
+public:
+ void fix(const model::Operation &node) { node.accept(*this); }
+};
+
+} // namespace backend
+} // namespace neurun
+
+#endif // __NEURUN_BACKEND_ISHAPE_FIXER_H__
#include "backend/IConfig.h"
#include "backend/ITensorBuilder.h"
#include "backend/IStageGenerator.h"
+#include "backend/IShapeFixer.h"
namespace neurun
{
{
Backend::Backend(const std::shared_ptr<neurun::backend::IConfig> &backend_config,
- const std::shared_ptr<neurun::backend::IStageGenerator> &stage_gen)
- : _config(backend_config), _stage_gen(stage_gen)
+ const std::shared_ptr<neurun::backend::IStageGenerator> &stage_gen,
+ const std::shared_ptr<neurun::backend::IShapeFixer> &shape_fixer)
+ : _config(backend_config), _stage_gen(stage_gen), _shape_fixer(shape_fixer)
{
backend_config->initialize();
}
return _stage_gen;
}
+const std::shared_ptr<neurun::backend::IShapeFixer> Backend::shape_fixer() const
+{
+ return _shape_fixer;
+}
+
const std::shared_ptr<neurun::backend::ITensorBuilder> Backend::tensor_builder() const
{
return _stage_gen->tensor_builder();
#include "backend/ExecTime.h"
#include "backend/IConfig.h"
#include "backend/IStageGenerator.h"
+#include "backend/IShapeFixer.h"
#include "backend/Backend.h"
#include <gtest/gtest.h>
#include <fstream>
std::shared_ptr<ITensorBuilder> tensor_builder() final { return nullptr; }
};
+struct MockShapeFixer : IShapeFixer
+{
+ std::shared_ptr<ITensorBuilder> tensor_builder() final { return nullptr; }
+};
+
struct MockBackend : public ::neurun::backend::Backend
{
/** @brief Stub backend for testing.
* Required because we use pointers to backends instead of string identifiers.
*/
MockBackend()
- : Backend{std::make_shared<MockConfig>(), std::make_shared<MockStageGenerator>()} {};
+ : Backend{std::make_shared<MockConfig>(), std::make_shared<MockStageGenerator>(),
+ std::make_shared<MockShapeFixer>()} {};
};
TEST(ExecTime, roundtrip_ok)