--- /dev/null
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_BACKEND_ITENSOR_REGISTER_H__
+#define __NEURUN_BACKEND_ITENSOR_REGISTER_H__
+
+#include "compiler/SubTensorInfo.h"
+#include "graph/LowerInfoMap.h"
+#include "graph/operand/ParentInfo.h"
+#include "ITensorBuilder.h"
+#include "model/Layout.h"
+#include "model/OperandIndexSequence.h"
+#include "model/OperandInfo.h"
+#include "model/Operands.h"
+#include "model/OperationVisitor.h"
+
+namespace
+{
+
+neurun::model::Shape permuteTensorShape(const neurun::model::Shape &shape,
+ neurun::model::Layout frontend_layout,
+ neurun::model::Layout backend_layout)
+{
+ assert(shape.rank() <= 4);
+ neurun::model::Shape backend_shape{shape};
+ if (shape.rank() == 4 && frontend_layout == neurun::model::Layout::NHWC &&
+ backend_layout == neurun::model::Layout::NCHW)
+ {
+ backend_shape.dim(1) = shape.dim(3);
+ backend_shape.dim(2) = shape.dim(1);
+ backend_shape.dim(3) = shape.dim(2);
+ }
+ else if (shape.rank() == 4 && frontend_layout == neurun::model::Layout::NCHW &&
+ backend_layout == neurun::model::Layout::NHWC)
+ {
+ backend_shape.dim(1) = shape.dim(2);
+ backend_shape.dim(2) = shape.dim(3);
+ backend_shape.dim(3) = shape.dim(1);
+ }
+ return backend_shape;
+}
+} // namespace
+
+namespace neurun
+{
+namespace backend
+{
+
+class ITensorRegister : public model::OperationVisitor
+{
+public:
+ virtual ~ITensorRegister() = default;
+
+public:
+ virtual void registerTensors(const model::Subgraph &subg,
+ const graph::LowerInfoMap *lower_info_map) final
+ {
+ _current_subg_layout = subg.getLayout();
+ _lower_info_map = lower_info_map;
+ assert(_lower_info_map != nullptr);
+ assert(tensor_builder().get() != nullptr);
+ subg.accept(*this);
+ }
+
+protected:
+ virtual const model::Operands &operands() const = 0;
+ virtual std::shared_ptr<ITensorBuilder> tensor_builder() const = 0;
+ virtual bool supportSubTensor() const = 0;
+
+protected:
+#define OP(InternalName) \
+ virtual void visit(const model::operation::InternalName &node) override \
+ { \
+ model::OperandIndexSequence indices{node.getInputs()}; \
+ indices.append(node.getOutputs()); \
+ for (const auto &index : indices) \
+ { \
+ defaultRegisterTensorInfo(index); \
+ } \
+ }
+#include "model/Operations.lst"
+#undef OP
+
+protected:
+ void defaultRegisterTensorInfo(const model::OperandIndex &index) const
+ {
+ if (tensor_builder()->isRegistered(index))
+ {
+ return;
+ }
+
+ const auto &obj = operands().at(index);
+ const auto frontend_layout = frontendLayout();
+ const auto backend_layout = backendLayout(index);
+ if (supportSubTensor() && obj.parent_info() != nullptr)
+ {
+ tensor_builder()->registerSubTensorInfo(
+ index, generateSubTensorInfo(obj, frontend_layout, backend_layout));
+ }
+ else
+ {
+ model::OperandInfo backend_info{
+ permuteTensorShape(obj.shape(), frontend_layout, backend_layout), obj.typeInfo()};
+ tensor_builder()->registerTensorInfo(index, backend_info, frontend_layout, backend_layout,
+ obj.isConstant());
+ }
+ }
+
+protected:
+ virtual model::Layout frontendLayout() const final { return _current_subg_layout; }
+ virtual model::Layout backendLayout(const model::OperandIndex &index) const final
+ {
+ assert(_lower_info_map != nullptr);
+ const auto lower_info = _lower_info_map->operand.at(index).get();
+ return lower_info->def_factors().getOnlyElement().layout();
+ }
+
+private:
+ compiler::SubTensorInfo generateSubTensorInfo(const model::Operand &obj,
+ model::Layout frontend_layout,
+ model::Layout backend_layout) const
+ {
+ assert(obj.shape().rank() <= 4);
+ const auto parent_index = obj.parent_info()->parent();
+ auto shape = obj.shape();
+ auto offset = obj.parent_info()->offset();
+ if (operands().at(parent_index).shape().rank() == 4 && frontend_layout == model::Layout::NHWC &&
+ backend_layout == model::Layout::NCHW)
+ {
+ shape.extendRank(4);
+ offset = {offset[0], offset[3], offset[1], offset[2]};
+ }
+ else if (operands().at(parent_index).shape().rank() == 4 &&
+ frontend_layout == model::Layout::NHWC && backend_layout == model::Layout::NCHW)
+ {
+ shape.extendRank(4);
+ offset = {offset[0], offset[2], offset[3], offset[1]};
+ }
+ model::Operand subtensor_obj{permuteTensorShape(shape, frontend_layout, backend_layout),
+ obj.typeInfo()};
+ subtensor_obj.parent_info(
+ nnfw::cpp14::make_unique<graph::operand::ParentInfo>(parent_index, offset));
+ return compiler::SubTensorInfo{subtensor_obj};
+ }
+
+private:
+ model::Layout _current_subg_layout;
+ const graph::LowerInfoMap *_lower_info_map{nullptr};
+};
+
+} // namespace backend
+} // namespace neurun
+
+#endif // __NEURUN_BACKEND_ITENSOR_REGISTER_H__