From 165a89f24a3b97f52fb6afccedb90fe236228a57 Mon Sep 17 00:00:00 2001 From: Sergei Barannikov/AI Tools Lab /SRR/Engineer/Samsung Electronics Date: Thu, 5 Dec 2019 14:32:06 +0300 Subject: [PATCH] [neurun] Move Layout.h into ir directory (#9391) * Move `Layout.h` in `ir` directory. * Move `Layout` to `neurun::ir` namespace, fixing uses where possible. Signed-off-by: Sergei Barannikov --- runtime/neurun/api/src/nnfw_api_internal.cc | 8 ++-- runtime/neurun/backend/acl_cl/KernelGenerator.cc | 2 +- runtime/neurun/backend/acl_cl/KernelGenerator.h | 2 +- runtime/neurun/backend/acl_common/Convert.cc | 23 +++++------ runtime/neurun/backend/acl_common/Convert.h | 14 +++---- runtime/neurun/backend/acl_common/IACLTensor.cc | 5 +-- runtime/neurun/backend/acl_common/IACLTensor.h | 2 +- runtime/neurun/backend/acl_common/Swizzle.h | 23 +++++------ .../neurun/backend/acl_common/TemplTensorBuilder.h | 12 +++--- runtime/neurun/backend/acl_neon/KernelGenerator.cc | 2 +- runtime/neurun/backend/acl_neon/KernelGenerator.h | 2 +- runtime/neurun/backend/cpu/KernelGenerator.cc | 33 ++++++++------- runtime/neurun/backend/cpu/KernelGenerator.h | 2 +- runtime/neurun/backend/cpu/TensorBuilder.cc | 2 +- runtime/neurun/backend/cpu/TensorBuilder.h | 2 +- .../neurun/backend/cpu/kernel/OperationUtils.cc | 5 +-- runtime/neurun/backend/cpu/kernel/OperationUtils.h | 7 ++-- runtime/neurun/backend/cpu/operand/Tensor.h | 2 +- runtime/neurun/backend/srcn/ConstantInitializer.cc | 48 ++++++++-------------- runtime/neurun/backend/srcn/Convert.cc | 6 +-- runtime/neurun/backend/srcn/Convert.h | 8 ++-- runtime/neurun/backend/srcn/KernelGenerator.cc | 2 +- runtime/neurun/backend/srcn/KernelGenerator.h | 2 +- runtime/neurun/backend/srcn/MemoryManager.cc | 2 +- runtime/neurun/backend/srcn/MemoryManager.h | 2 +- runtime/neurun/backend/srcn/Swizzle.h | 10 ++--- runtime/neurun/backend/srcn/TensorBuilder.cc | 2 +- runtime/neurun/backend/srcn/TensorBuilder.h | 4 +- runtime/neurun/backend/srcn/TensorManager.cc | 2 +- runtime/neurun/backend/srcn/TensorManager.h | 2 +- runtime/neurun/backend/srcn/TensorRegister.cc | 42 ++++++++----------- runtime/neurun/backend/srcn/kernel/AddLayer.cc | 2 +- runtime/neurun/backend/srcn/kernel/AddLayer.h | 4 +- .../neurun/backend/srcn/kernel/ConvolutionLayer.cc | 2 +- .../neurun/backend/srcn/kernel/ConvolutionLayer.h | 2 +- .../srcn/kernel/DepthwiseConvolutionLayer.cc | 10 ++--- .../srcn/kernel/DepthwiseConvolutionLayer.h | 2 +- .../backend/srcn/kernel/InstanceNormLayer.cc | 8 ++-- .../neurun/backend/srcn/kernel/InstanceNormLayer.h | 4 +- .../neurun/backend/srcn/kernel/OperationUtils.cc | 21 +++++----- .../neurun/backend/srcn/kernel/OperationUtils.h | 7 ++-- .../backend/srcn/kernel/TransposeConvLayer.cc | 2 +- .../backend/srcn/kernel/TransposeConvLayer.h | 2 +- runtime/neurun/backend/srcn/operand/Tensor.h | 8 ++-- .../core/include/backend/IConstantInitializer.h | 8 ++-- .../neurun/core/include/backend/ITensorBuilder.h | 4 +- .../neurun/core/include/backend/ITensorRegister.h | 30 +++++++------- .../neurun/core/include/backend/operand/ITensor.h | 4 +- runtime/neurun/core/include/exec/Execution.h | 14 +++---- runtime/neurun/core/include/exec/IODescription.h | 8 ++-- runtime/neurun/core/include/ir/Graph.h | 5 +-- runtime/neurun/core/include/{model => ir}/Layout.h | 26 +++++++----- .../neurun/core/include/ir/operand/PermuteFactor.h | 10 ++--- .../neurun/core/include/ir/operation/LowerInfo.h | 4 +- runtime/neurun/core/include/model/OperandInfo.h | 2 +- runtime/neurun/core/include/model/Shape.h | 2 +- runtime/neurun/core/include/model/Subgraph.h | 4 +- runtime/neurun/core/include/util/ShapeInference.h | 10 ++--- runtime/neurun/core/include/util/Utils.h | 6 +-- .../neurun/core/include/util/feature/nchw/Reader.h | 2 +- .../neurun/core/include/util/feature/nchw/View.h | 2 +- .../neurun/core/include/util/feature/nhwc/Reader.h | 2 +- .../neurun/core/include/util/feature/nhwc/View.h | 2 +- .../neurun/core/src/compiler/OperationValidator.cc | 2 +- .../neurun/core/src/compiler/OperationValidator.h | 6 +-- runtime/neurun/core/src/exec/Execution.cc | 13 +++--- runtime/neurun/core/src/exec/ExecutorBase.cc | 4 +- runtime/neurun/core/src/exec/ExecutorBase.h | 24 +++++------ runtime/neurun/core/src/exec/Sink.h | 16 ++++---- runtime/neurun/core/src/exec/Source.h | 18 ++++---- runtime/neurun/core/src/exec/interp/Tensor.cc | 8 ++-- runtime/neurun/core/src/exec/interp/Tensor.h | 6 +-- .../core/src/exec/interp/operations/AvgPool2D.cc | 4 +- .../core/src/exec/interp/operations/Conv2D.cc | 4 +- .../src/exec/interp/operations/DepthwiseConv.cc | 4 +- .../core/src/exec/interp/operations/MaxPool2D.cc | 4 +- runtime/neurun/core/src/ir/Graph.cc | 24 +++++------ runtime/neurun/core/src/ir/operation/LowerInfo.cc | 2 +- .../core/src/ir/pass/PermutationEliminationPass.cc | 8 ++-- .../core/src/ir/pass/PermutationInsertionPass.cc | 6 +-- .../core/src/ir/pass/PermutationOperationPass.cc | 2 +- runtime/neurun/core/src/model/LayoutSet.h | 2 +- runtime/neurun/core/src/util/ShapeInference.cc | 18 ++++---- runtime/neurun/core/src/util/Utils.cc | 8 ++-- .../nnapi/wrapper/ANeuralNetworksExecution.cc | 6 +-- runtime/neurun/test/graph/operand/LayoutSet.cc | 2 +- runtime/neurun/test/util/ShapeInference.cc | 2 +- 87 files changed, 330 insertions(+), 362 deletions(-) rename runtime/neurun/core/include/{model => ir}/Layout.h (69%) diff --git a/runtime/neurun/api/src/nnfw_api_internal.cc b/runtime/neurun/api/src/nnfw_api_internal.cc index 480a2c0..574f844 100644 --- a/runtime/neurun/api/src/nnfw_api_internal.cc +++ b/runtime/neurun/api/src/nnfw_api_internal.cc @@ -46,17 +46,17 @@ static bool null_terminating(const char *str, uint32_t length) return false; } -static neurun::model::Layout convertLayout(NNFW_LAYOUT layout) +static neurun::ir::Layout convertLayout(NNFW_LAYOUT layout) { if (layout == NNFW_LAYOUT_CHANNELS_LAST) { - return neurun::model::Layout::NHWC; + return neurun::ir::Layout::NHWC; } else if (layout == NNFW_LAYOUT_CHANNELS_FIRST) { - return neurun::model::Layout::NCHW; + return neurun::ir::Layout::NCHW; } - return neurun::model::Layout::UNKNOWN; + return neurun::ir::Layout::UNKNOWN; } nnfw_session::nnfw_session() diff --git a/runtime/neurun/backend/acl_cl/KernelGenerator.cc b/runtime/neurun/backend/acl_cl/KernelGenerator.cc index 032f7d6..08d77c8 100644 --- a/runtime/neurun/backend/acl_cl/KernelGenerator.cc +++ b/runtime/neurun/backend/acl_cl/KernelGenerator.cc @@ -145,7 +145,7 @@ void ActivationBuilder::append(model::Activation code, ::arm_compute::ICLTensor // KernelGenerator::KernelGenerator(const neurun::model::Operands &ctx, const std::shared_ptr &tensor_builder) - : _ctx(ctx), _tensor_builder(tensor_builder), _current_subg_layout(model::Layout::UNKNOWN) + : _ctx(ctx), _tensor_builder(tensor_builder), _current_subg_layout(ir::Layout::UNKNOWN) { // DO NOTHING } diff --git a/runtime/neurun/backend/acl_cl/KernelGenerator.h b/runtime/neurun/backend/acl_cl/KernelGenerator.h index 2f8d90b..df178a8 100644 --- a/runtime/neurun/backend/acl_cl/KernelGenerator.h +++ b/runtime/neurun/backend/acl_cl/KernelGenerator.h @@ -100,7 +100,7 @@ public: private: const neurun::model::Operands &_ctx; std::shared_ptr _tensor_builder; - model::Layout _current_subg_layout; + ir::Layout _current_subg_layout; }; } // namespace acl_cl diff --git a/runtime/neurun/backend/acl_common/Convert.cc b/runtime/neurun/backend/acl_common/Convert.cc index b814587..6d1bc72 100644 --- a/runtime/neurun/backend/acl_common/Convert.cc +++ b/runtime/neurun/backend/acl_common/Convert.cc @@ -23,13 +23,13 @@ namespace { -::arm_compute::DataLayout asDataLayout(::neurun::model::Layout layout) +::arm_compute::DataLayout asDataLayout(neurun::ir::Layout layout) { switch (layout) { - case ::neurun::model::Layout::NHWC: + case neurun::ir::Layout::NHWC: return ::arm_compute::DataLayout::NHWC; - case ::neurun::model::Layout::NCHW: + case neurun::ir::Layout::NCHW: return ::arm_compute::DataLayout::NCHW; default: return ::arm_compute::DataLayout::UNKNOWN; @@ -46,8 +46,7 @@ namespace acl_common { ::arm_compute::TensorShape asTensorShape(const ::neurun::model::Shape &shape, - ::neurun::model::Layout frontend_layout, - ::neurun::model::Layout backend_layout, + ir::Layout frontend_layout, ir::Layout backend_layout, bool apply_dim_correction) { const uint32_t rank = shape.rank(); @@ -73,8 +72,7 @@ namespace acl_common } ::arm_compute::Coordinates asTensorCoordinate(const ::neurun::util::Coordinates &coord, - ::neurun::model::Layout frontend_layout, - ::neurun::model::Layout backend_layout) + ir::Layout frontend_layout, ir::Layout backend_layout) { const uint32_t rank = coord.size(); @@ -117,8 +115,7 @@ namespace acl_common ::arm_compute::TensorInfo asTensorInfo(const ::neurun::model::Shape &shape, const ::neurun::model::TypeInfo &typeInfo, - ::neurun::model::Layout frontend_layout, - ::neurun::model::Layout backend_layout, + ir::Layout frontend_layout, ir::Layout backend_layout, bool apply_dim_correction) { ::arm_compute::TensorInfo info( @@ -178,16 +175,16 @@ std::unique_ptr asAclFunction(std::unique_ptr<::arm_compute::IFunct return nnfw::cpp14::make_unique(std::move(layer)); } -::neurun::model::Layout asRuntimeLayout(::arm_compute::DataLayout data_layout) +ir::Layout asRuntimeLayout(::arm_compute::DataLayout data_layout) { switch (data_layout) { case ::arm_compute::DataLayout::NHWC: - return ::neurun::model::Layout::NHWC; + return ir::Layout::NHWC; case ::arm_compute::DataLayout::NCHW: - return ::neurun::model::Layout::NCHW; + return ir::Layout::NCHW; default: - return ::neurun::model::Layout::UNKNOWN; + return ir::Layout::UNKNOWN; } } diff --git a/runtime/neurun/backend/acl_common/Convert.h b/runtime/neurun/backend/acl_common/Convert.h index 37bb296..81bfc6e 100644 --- a/runtime/neurun/backend/acl_common/Convert.h +++ b/runtime/neurun/backend/acl_common/Convert.h @@ -21,7 +21,7 @@ #include #include -#include "model/Layout.h" +#include "ir/Layout.h" #include "model/InternalType.h" #include "model/Operand.h" #include "model/Shape.h" @@ -42,17 +42,15 @@ namespace acl_common { ::arm_compute::TensorShape asTensorShape(const ::neurun::model::Shape &shape, - ::neurun::model::Layout frontend_layout, - ::neurun::model::Layout backend_layout, + ir::Layout frontend_layout, ir::Layout backend_layout, bool apply_dim_correction = true); ::arm_compute::Coordinates asTensorCoordinate(const ::neurun::util::Coordinates &coord, - ::neurun::model::Layout frontend_layout, - ::neurun::model::Layout backend_layout); + ir::Layout frontend_layout, + ir::Layout backend_layout); ::arm_compute::DataType asDataType(const ::neurun::model::DataType &type); ::arm_compute::TensorInfo asTensorInfo(const ::neurun::model::Shape &shape, const ::neurun::model::TypeInfo &typeInfo, - ::neurun::model::Layout frontend_layout, - ::neurun::model::Layout backend_layout, + ir::Layout frontend_layout, ir::Layout backend_layout, bool apply_dim_correction = true); ::arm_compute::PadStrideInfo asPadStrideInfo(const model::ExplicitPadding &padding, @@ -63,7 +61,7 @@ asActivationLayerInfo(const ::neurun::model::Activation &act_code); std::unique_ptr asAclFunction(std::unique_ptr<::arm_compute::IFunction> &&layer); -::neurun::model::Layout asRuntimeLayout(::arm_compute::DataLayout data_layout); +ir::Layout asRuntimeLayout(::arm_compute::DataLayout data_layout); } // namespace acl_common } // namespace backend diff --git a/runtime/neurun/backend/acl_common/IACLTensor.cc b/runtime/neurun/backend/acl_common/IACLTensor.cc index 0f7cce7..70ffbdc 100644 --- a/runtime/neurun/backend/acl_common/IACLTensor.cc +++ b/runtime/neurun/backend/acl_common/IACLTensor.cc @@ -56,10 +56,7 @@ size_t IACLTensor::calcOffset(const neurun::util::Coordinates &coords) const return info()->offset_element_in_bytes(acl_coords); } -model::Layout IACLTensor::layout() const -{ - return acl_common::asRuntimeLayout(info()->data_layout()); -} +ir::Layout IACLTensor::layout() const { return acl_common::asRuntimeLayout(info()->data_layout()); } } // namespace acl_common } // namespace backend diff --git a/runtime/neurun/backend/acl_common/IACLTensor.h b/runtime/neurun/backend/acl_common/IACLTensor.h index 1dc79f4..b0dcbb4 100644 --- a/runtime/neurun/backend/acl_common/IACLTensor.h +++ b/runtime/neurun/backend/acl_common/IACLTensor.h @@ -42,7 +42,7 @@ public: size_t dimension(size_t index) const final; size_t num_dimensions() const override; size_t calcOffset(const neurun::util::Coordinates &coords) const final; - model::Layout layout() const final; + ir::Layout layout() const final; bool has_padding() const override { return info()->has_padding(); } public: diff --git a/runtime/neurun/backend/acl_common/Swizzle.h b/runtime/neurun/backend/acl_common/Swizzle.h index dfa5dab..11874b5 100644 --- a/runtime/neurun/backend/acl_common/Swizzle.h +++ b/runtime/neurun/backend/acl_common/Swizzle.h @@ -18,7 +18,7 @@ #define __NEURUN_BACKEND_ACL_COMMON_SWIZZLE_H__ #include -#include +#include namespace neurun { @@ -47,14 +47,14 @@ private: // Convert axis in acl order inline ARMComputeAxis ToARMComputeAxis(uint32_t rank, uint32_t axis, - const model::Layout org_layout = model::Layout::UNKNOWN, - const model::Layout acl_layout = model::Layout::UNKNOWN) + const ir::Layout org_layout = ir::Layout::UNKNOWN, + const ir::Layout acl_layout = ir::Layout::UNKNOWN) { assert(rank > axis); const ARMComputeAxis reversed{(rank - axis) - 1}; - if (rank >= 4 && org_layout == model::Layout::NHWC && acl_layout == model::Layout::NCHW) + if (rank >= 4 && org_layout == ir::Layout::NHWC && acl_layout == ir::Layout::NCHW) { // NHWC -> WHCN // DEPTH @@ -73,7 +73,7 @@ inline ARMComputeAxis ToARMComputeAxis(uint32_t rank, uint32_t axis, return ARMComputeAxis{1}; } } - if (rank >= 4 && org_layout == model::Layout::NCHW && acl_layout == model::Layout::NHWC) + if (rank >= 4 && org_layout == ir::Layout::NCHW && acl_layout == ir::Layout::NHWC) { // NCHW -> CWHN // WIDTH @@ -97,8 +97,8 @@ inline ARMComputeAxis ToARMComputeAxis(uint32_t rank, uint32_t axis, } inline ::arm_compute::Coordinates -getARMComputeAxises(uint32_t rank, const model::Layout org_layout = model::Layout::UNKNOWN, - const model::Layout acl_layout = model::Layout::UNKNOWN) +getARMComputeAxises(uint32_t rank, const ir::Layout org_layout = ir::Layout::UNKNOWN, + const ir::Layout acl_layout = ir::Layout::UNKNOWN) { ::arm_compute::Coordinates res{}; @@ -115,8 +115,8 @@ getARMComputeAxises(uint32_t rank, const model::Layout org_layout = model::Layou // Restructure runtime_permutationVector to ACL_permutationVector inline ::arm_compute::PermutationVector getARMComputePermutationVector(uint32_t rank, const std::vector runtime_pv, - const model::Layout org_layout = model::Layout::UNKNOWN, - const model::Layout acl_layout = model::Layout::UNKNOWN) + const ir::Layout org_layout = ir::Layout::UNKNOWN, + const ir::Layout acl_layout = ir::Layout::UNKNOWN) { // rank upto 4 is supported assert(rank <= 4); @@ -138,9 +138,8 @@ getARMComputePermutationVector(uint32_t rank, const std::vector runtime } template -inline T ReorderBits(T in, size_t numOfBits, - const model::Layout org_layout = model::Layout::UNKNOWN, - const model::Layout acl_layout = model::Layout::UNKNOWN) +inline T ReorderBits(T in, size_t numOfBits, const ir::Layout org_layout = ir::Layout::UNKNOWN, + const ir::Layout acl_layout = ir::Layout::UNKNOWN) { assert(numOfBits > 0); T out = 0; diff --git a/runtime/neurun/backend/acl_common/TemplTensorBuilder.h b/runtime/neurun/backend/acl_common/TemplTensorBuilder.h index 2b9102a..41d70a4 100644 --- a/runtime/neurun/backend/acl_common/TemplTensorBuilder.h +++ b/runtime/neurun/backend/acl_common/TemplTensorBuilder.h @@ -55,7 +55,7 @@ public: * @param[in] layout Tensor data layout */ void registerTensorInfo(const model::OperandIndex &ind, const model::OperandInfo &info, - model::Layout backend_layout, bool as_const) override; + ir::Layout backend_layout, bool as_const) override; /** * @brief Register subtensor information to allocate on ACL-CL backend * @param[in] ind Operand index @@ -114,7 +114,7 @@ private: model::OperandIndexMap _tensor_info_map; model::OperandIndexMap _subtensor_info_map; model::OperandIndexMap _apply_dim_correction_map; - model::OperandIndexMap _tensor_layout_map; + model::OperandIndexMap _tensor_layout_map; model::OperandIndexMap _uses_count_map; std::unique_ptr _tensor_mgr; @@ -162,7 +162,7 @@ TemplTensorBuilder::TemplTensorBuilder( template void TemplTensorBuilder::registerTensorInfo( - const model::OperandIndex &ind, const model::OperandInfo &info, model::Layout backend_layout, + const model::OperandIndex &ind, const model::OperandInfo &info, ir::Layout backend_layout, bool as_const) { assert(_tensor_mgr->constTensors().size() == 0); @@ -329,7 +329,7 @@ void TemplTensorBuilder::buildTensors(void) // NOTE SubTensor's layout must be the same with layout of parent tensor const auto &root_parent = findRootParent(ind); const auto &backend_layout = _tensor_layout_map[root_parent]; - auto tensor_info = asTensorInfo(info.shape(), info.typeInfo(), model::Layout::UNKNOWN, + auto tensor_info = asTensorInfo(info.shape(), info.typeInfo(), ir::Layout::UNKNOWN, backend_layout, _apply_dim_correction_map[ind]); _tensor_mgr->buildTensor(ind, tensor_info, info.shape().rank(), _constants.contains(ind), _uses_count_map[ind]); @@ -394,10 +394,10 @@ void TemplTensorBuilder::buildSubtensors(void) const auto &root_parent = findRootParent(parent); const auto &backend_layout = _tensor_layout_map[root_parent]; - auto shape = asTensorShape(info.shape(), model::Layout::UNKNOWN, backend_layout, + auto shape = asTensorShape(info.shape(), ir::Layout::UNKNOWN, backend_layout, _apply_dim_correction_map[current]); ::arm_compute::Coordinates coordinates = - asTensorCoordinate(info.offset(), model::Layout::UNKNOWN, backend_layout); + asTensorCoordinate(info.offset(), ir::Layout::UNKNOWN, backend_layout); _tensor_mgr->buildSubtensor(parent, current, shape, coordinates, info.shape().rank(), true); stack.pop(); } diff --git a/runtime/neurun/backend/acl_neon/KernelGenerator.cc b/runtime/neurun/backend/acl_neon/KernelGenerator.cc index 33711cd..2e3b51c 100644 --- a/runtime/neurun/backend/acl_neon/KernelGenerator.cc +++ b/runtime/neurun/backend/acl_neon/KernelGenerator.cc @@ -144,7 +144,7 @@ void ActivationBuilder::append(model::Activation act, ::arm_compute::ITensor *if // KernelGenerator::KernelGenerator(const neurun::model::Operands &ctx, const std::shared_ptr &tensor_builder) - : _ctx(ctx), _tensor_builder(tensor_builder), _current_subg_layout(model::Layout::UNKNOWN) + : _ctx(ctx), _tensor_builder(tensor_builder), _current_subg_layout(ir::Layout::UNKNOWN) { // DO NOTHING } diff --git a/runtime/neurun/backend/acl_neon/KernelGenerator.h b/runtime/neurun/backend/acl_neon/KernelGenerator.h index 0f26eff..4bc1d2d 100644 --- a/runtime/neurun/backend/acl_neon/KernelGenerator.h +++ b/runtime/neurun/backend/acl_neon/KernelGenerator.h @@ -99,7 +99,7 @@ public: private: const neurun::model::Operands &_ctx; std::shared_ptr _tensor_builder; - model::Layout _current_subg_layout; + ir::Layout _current_subg_layout; }; } // namespace acl_neon diff --git a/runtime/neurun/backend/cpu/KernelGenerator.cc b/runtime/neurun/backend/cpu/KernelGenerator.cc index 580f550..c7a1045 100644 --- a/runtime/neurun/backend/cpu/KernelGenerator.cc +++ b/runtime/neurun/backend/cpu/KernelGenerator.cc @@ -56,7 +56,7 @@ KernelGenerator::KernelGenerator( const std::shared_ptr &tensor_builder, const std::shared_ptr &kernel_builer) : _ctx(operand_ctx), _tensor_builder(tensor_builder), _kernel_builder(kernel_builer), - _current_subg_layout(model::Layout::UNKNOWN) + _current_subg_layout(ir::Layout::UNKNOWN) { // DO NOTHING } @@ -97,10 +97,10 @@ void KernelGenerator::visit(const model::operation::Conv2D &node) ::neurun::backend::cpu::kernel::getTensorDescriptor(_ctx.at(ofm_index), _current_subg_layout); const auto ifm_backend_descr = ::neurun::backend::cpu::kernel::getTensorDescriptor(_ctx.at(ifm_index), _current_subg_layout); - const auto ker_backend_descr = ::neurun::backend::cpu::kernel::getTensorDescriptor( - _ctx.at(ker_index), model::Layout::UNKNOWN); - const auto bias_backend_descr = ::neurun::backend::cpu::kernel::getTensorDescriptor( - _ctx.at(bias_index), model::Layout::UNKNOWN); + const auto ker_backend_descr = + ::neurun::backend::cpu::kernel::getTensorDescriptor(_ctx.at(ker_index), ir::Layout::UNKNOWN); + const auto bias_backend_descr = + ::neurun::backend::cpu::kernel::getTensorDescriptor(_ctx.at(bias_index), ir::Layout::UNKNOWN); auto ofm_alloc = _tensor_builder->at(ofm_index); auto ifm_alloc = _tensor_builder->at(ifm_index); @@ -140,10 +140,10 @@ void KernelGenerator::visit(const model::operation::DepthwiseConv2D &node) ::neurun::backend::cpu::kernel::getTensorDescriptor(_ctx.at(ofm_index), _current_subg_layout); const auto ifm_backend_descr = ::neurun::backend::cpu::kernel::getTensorDescriptor(_ctx.at(ifm_index), _current_subg_layout); - const auto ker_backend_descr = ::neurun::backend::cpu::kernel::getTensorDescriptor( - _ctx.at(ker_index), model::Layout::UNKNOWN); - const auto bias_backend_descr = ::neurun::backend::cpu::kernel::getTensorDescriptor( - _ctx.at(bias_index), model::Layout::UNKNOWN); + const auto ker_backend_descr = + ::neurun::backend::cpu::kernel::getTensorDescriptor(_ctx.at(ker_index), ir::Layout::UNKNOWN); + const auto bias_backend_descr = + ::neurun::backend::cpu::kernel::getTensorDescriptor(_ctx.at(bias_index), ir::Layout::UNKNOWN); const auto multiplier = node.param().multiplier; const auto activation = node.param().activation; @@ -268,9 +268,9 @@ void KernelGenerator::visit(const model::operation::FullyConnected &node) const auto ifm_backend_descr = ::neurun::backend::cpu::kernel::getTensorDescriptor( _ctx.at(input_index), _current_subg_layout); const auto weight_backend_descr = ::neurun::backend::cpu::kernel::getTensorDescriptor( - _ctx.at(weight_index), model::Layout::UNKNOWN); - const auto bias_backend_descr = ::neurun::backend::cpu::kernel::getTensorDescriptor( - _ctx.at(bias_index), model::Layout::UNKNOWN); + _ctx.at(weight_index), ir::Layout::UNKNOWN); + const auto bias_backend_descr = + ::neurun::backend::cpu::kernel::getTensorDescriptor(_ctx.at(bias_index), ir::Layout::UNKNOWN); const auto activation = node.param().activation; @@ -499,7 +499,7 @@ void KernelGenerator::visit(const model::operation::Permute &node) // TODO Support NCHW frontend auto out_shape = shape; - if (shape.rank() == 4 && output_object->layout() == model::Layout::NCHW) + if (shape.rank() == 4 && output_object->layout() == ir::Layout::NCHW) { out_shape.dim(1) = shape.dim(3); out_shape.dim(2) = shape.dim(1); @@ -509,13 +509,12 @@ void KernelGenerator::visit(const model::operation::Permute &node) const auto permute_type = node.getPermuteType(); // Check Permutation Type const auto inferPermuteType = [&]() { - if (input_object->layout() == model::Layout::NHWC && - output_object->layout() == model::Layout::NCHW) + if (input_object->layout() == ir::Layout::NHWC && output_object->layout() == ir::Layout::NCHW) { return model::operation::Permute::Type::NHWC_TO_NCHW; } - else if (input_object->layout() == model::Layout::NCHW && - output_object->layout() == model::Layout::NHWC) + else if (input_object->layout() == ir::Layout::NCHW && + output_object->layout() == ir::Layout::NHWC) { return model::operation::Permute::Type::NCHW_TO_NHWC; } diff --git a/runtime/neurun/backend/cpu/KernelGenerator.h b/runtime/neurun/backend/cpu/KernelGenerator.h index 3a84fda..711ebc5 100644 --- a/runtime/neurun/backend/cpu/KernelGenerator.h +++ b/runtime/neurun/backend/cpu/KernelGenerator.h @@ -62,7 +62,7 @@ private: const neurun::model::Operands &_ctx; std::shared_ptr _tensor_builder; std::shared_ptr _kernel_builder; - model::Layout _current_subg_layout; + ir::Layout _current_subg_layout; }; } // namespace cpu diff --git a/runtime/neurun/backend/cpu/TensorBuilder.cc b/runtime/neurun/backend/cpu/TensorBuilder.cc index 8ff3ce7..5484cb3 100644 --- a/runtime/neurun/backend/cpu/TensorBuilder.cc +++ b/runtime/neurun/backend/cpu/TensorBuilder.cc @@ -33,7 +33,7 @@ TensorBuilder::TensorBuilder() : _tensor_mgr{new TensorManager()} } void TensorBuilder::registerTensorInfo(const model::OperandIndex &ind, - const model::OperandInfo &info, model::Layout, bool as_const) + const model::OperandInfo &info, ir::Layout, bool as_const) { _tensor_info_map.emplace(ind, info); diff --git a/runtime/neurun/backend/cpu/TensorBuilder.h b/runtime/neurun/backend/cpu/TensorBuilder.h index a377307..0ec1eab 100644 --- a/runtime/neurun/backend/cpu/TensorBuilder.h +++ b/runtime/neurun/backend/cpu/TensorBuilder.h @@ -43,7 +43,7 @@ public: * @param[in] layout Operand data layout */ void registerTensorInfo(const model::OperandIndex &ind, const model::OperandInfo &info, - model::Layout backend_layout, bool as_const) override; + ir::Layout backend_layout, bool as_const) override; /** * @brief Register subtensor information to allocate on CPU backend * @param[in] ind Operand index diff --git a/runtime/neurun/backend/cpu/kernel/OperationUtils.cc b/runtime/neurun/backend/cpu/kernel/OperationUtils.cc index 33ee66e..e9f1140 100644 --- a/runtime/neurun/backend/cpu/kernel/OperationUtils.cc +++ b/runtime/neurun/backend/cpu/kernel/OperationUtils.cc @@ -192,13 +192,12 @@ int32_t CalculateInputRadius(int input_integer_bits, int input_left_shift) return static_cast(std::floor(max_input_rescaled)); } -TensorDescriptor getTensorDescriptor(const ::neurun::model::Operand &o, - ::neurun::model::Layout frontend_layout) +TensorDescriptor getTensorDescriptor(const ::neurun::model::Operand &o, ir::Layout frontend_layout) { TensorDescriptor descriptor; auto dims = o.shape().dims(); - if (frontend_layout == ::neurun::model::Layout::NCHW && o.shape().rank() == 4) + if (frontend_layout == ir::Layout::NCHW && o.shape().rank() == 4) { // NCHW -> NHWC uint32_t permutation[4] = {0, 2, 3, 1}; diff --git a/runtime/neurun/backend/cpu/kernel/OperationUtils.h b/runtime/neurun/backend/cpu/kernel/OperationUtils.h index e6bb208..b8765a5 100644 --- a/runtime/neurun/backend/cpu/kernel/OperationUtils.h +++ b/runtime/neurun/backend/cpu/kernel/OperationUtils.h @@ -101,7 +101,7 @@ inline nnfw::cker::Shape convertTensorDescriptorToCkerShape(const TensorDescript return nnfw::cker::GetShape(raw_shape); } -inline int32_t getAxis(uint32_t rank, int32_t axis, ::neurun::model::Layout frontend_layout) +inline int32_t getAxis(uint32_t rank, int32_t axis, ir::Layout frontend_layout) { auto ret = axis; @@ -111,7 +111,7 @@ inline int32_t getAxis(uint32_t rank, int32_t axis, ::neurun::model::Layout fron } // NCHW -> NHWC - if (frontend_layout == ::neurun::model::Layout::NCHW) + if (frontend_layout == ir::Layout::NCHW) { int32_t permutation[4] = {0, 3, 1, 2}; ret = permutation[ret]; @@ -139,8 +139,7 @@ void CalculateActivationRangeUint8(model::Activation activation, int32_t CalculateInputRadius(int input_integer_bits, int input_left_shift); -TensorDescriptor getTensorDescriptor(const ::neurun::model::Operand &o, - ::neurun::model::Layout frontend_layout); +TensorDescriptor getTensorDescriptor(const ::neurun::model::Operand &o, ir::Layout frontend_layout); uint32_t sizeOfData(OperandType type, const std::vector &dimensions); diff --git a/runtime/neurun/backend/cpu/operand/Tensor.h b/runtime/neurun/backend/cpu/operand/Tensor.h index 69f7237..7d83cda 100644 --- a/runtime/neurun/backend/cpu/operand/Tensor.h +++ b/runtime/neurun/backend/cpu/operand/Tensor.h @@ -60,7 +60,7 @@ public: size_t num_dimensions() const override { return _info.shape().rank(); } size_t total_size() const override { return _info.total_size(); } size_t calcOffset(const neurun::util::Coordinates &coords) const override; - model::Layout layout() const override { return model::Layout::NHWC; } + ir::Layout layout() const override { return ir::Layout::NHWC; } bool has_padding() const override { return false; } void access(const std::function &fn) final; diff --git a/runtime/neurun/backend/srcn/ConstantInitializer.cc b/runtime/neurun/backend/srcn/ConstantInitializer.cc index 592e0df..549248b 100644 --- a/runtime/neurun/backend/srcn/ConstantInitializer.cc +++ b/runtime/neurun/backend/srcn/ConstantInitializer.cc @@ -130,16 +130,12 @@ void ConstantInitializer::visit(const model::operation::Conv2D &node) util::Coordinates permutation{0, 1, 2, 3}; const auto frontend_layout = _current_subg_layout; const auto backend_layout = _tensor_builder->tensorAt(kernel_index)->layout(); - assert(frontend_layout == neurun::model::Layout::NHWC || - frontend_layout == neurun::model::Layout::NCHW); - assert(backend_layout == neurun::model::Layout::NHWC || - backend_layout == neurun::model::Layout::NCHW); - const auto frontend_filter_layout = frontend_layout == neurun::model::Layout::NHWC - ? kernel::FilterLayout::OHWI - : kernel::FilterLayout::OIHW; - const auto backend_filter_layout = backend_layout == neurun::model::Layout::NHWC - ? kernel::FilterLayout::HWIO - : kernel::FilterLayout::OIHW; + assert(frontend_layout == ir::Layout::NHWC || frontend_layout == ir::Layout::NCHW); + assert(backend_layout == ir::Layout::NHWC || backend_layout == ir::Layout::NCHW); + const auto frontend_filter_layout = + frontend_layout == ir::Layout::NHWC ? kernel::FilterLayout::OHWI : kernel::FilterLayout::OIHW; + const auto backend_filter_layout = + backend_layout == ir::Layout::NHWC ? kernel::FilterLayout::HWIO : kernel::FilterLayout::OIHW; registerPermuteKernelInitializer( kernel_index, kernel_obj, kernel::getFilterPermutation(frontend_filter_layout, backend_filter_layout)); @@ -152,16 +148,12 @@ void ConstantInitializer::visit(const model::operation::DepthwiseConv2D &node) util::Coordinates permutation{0, 1, 2, 3}; const auto frontend_layout = _current_subg_layout; const auto backend_layout = _tensor_builder->tensorAt(kernel_index)->layout(); - assert(frontend_layout == neurun::model::Layout::NHWC || - frontend_layout == neurun::model::Layout::NCHW); - assert(backend_layout == neurun::model::Layout::NHWC || - backend_layout == neurun::model::Layout::NCHW); - const auto frontend_filter_layout = frontend_layout == neurun::model::Layout::NHWC - ? kernel::FilterLayout::OHWI - : kernel::FilterLayout::OIHW; - const auto backend_filter_layout = backend_layout == neurun::model::Layout::NHWC - ? kernel::FilterLayout::HWIO - : kernel::FilterLayout::OIHW; + assert(frontend_layout == ir::Layout::NHWC || frontend_layout == ir::Layout::NCHW); + assert(backend_layout == ir::Layout::NHWC || backend_layout == ir::Layout::NCHW); + const auto frontend_filter_layout = + frontend_layout == ir::Layout::NHWC ? kernel::FilterLayout::OHWI : kernel::FilterLayout::OIHW; + const auto backend_filter_layout = + backend_layout == ir::Layout::NHWC ? kernel::FilterLayout::HWIO : kernel::FilterLayout::OIHW; registerPermuteKernelInitializer( kernel_index, kernel_obj, kernel::getFilterPermutation(frontend_filter_layout, backend_filter_layout)); @@ -180,16 +172,12 @@ void ConstantInitializer::visit(const model::operation::TransposeConv &node) const auto &kernel_obj = _operands.at(kernel_index); const auto frontend_layout = _current_subg_layout; const auto backend_layout = _tensor_builder->tensorAt(kernel_index)->layout(); - assert(frontend_layout == neurun::model::Layout::NHWC || - frontend_layout == neurun::model::Layout::NCHW); - assert(backend_layout == neurun::model::Layout::NHWC || - backend_layout == neurun::model::Layout::NCHW); - const auto frontend_filter_layout = frontend_layout == neurun::model::Layout::NHWC - ? kernel::FilterLayout::OHWI - : kernel::FilterLayout::OIHW; - const auto backend_filter_layout = backend_layout == neurun::model::Layout::NHWC - ? kernel::FilterLayout::HWOI - : kernel::FilterLayout::IOHW; + assert(frontend_layout == ir::Layout::NHWC || frontend_layout == ir::Layout::NCHW); + assert(backend_layout == ir::Layout::NHWC || backend_layout == ir::Layout::NCHW); + const auto frontend_filter_layout = + frontend_layout == ir::Layout::NHWC ? kernel::FilterLayout::OHWI : kernel::FilterLayout::OIHW; + const auto backend_filter_layout = + backend_layout == ir::Layout::NHWC ? kernel::FilterLayout::HWOI : kernel::FilterLayout::IOHW; registerPermuteKernelInitializer( kernel_index, kernel_obj, kernel::getFilterPermutation(frontend_filter_layout, backend_filter_layout)); diff --git a/runtime/neurun/backend/srcn/Convert.cc b/runtime/neurun/backend/srcn/Convert.cc index 8f3f82a..0e347a1 100644 --- a/runtime/neurun/backend/srcn/Convert.cc +++ b/runtime/neurun/backend/srcn/Convert.cc @@ -47,8 +47,8 @@ model::Shape asKernelShape(const model::Shape &shape, kernel::FilterLayout front shape.dim(permutation[2]), shape.dim(permutation[3])}; } -model::Shape asTensorShape(const model::Shape &shape, model::Layout frontend_layout, - model::Layout backend_layout) +model::Shape asTensorShape(const model::Shape &shape, ir::Layout frontend_layout, + ir::Layout backend_layout) { const uint32_t rank = shape.rank(); @@ -63,7 +63,7 @@ model::Shape asTensorShape(const model::Shape &shape, model::Layout frontend_lay } model::OperandInfo asTensorInfo(const model::Shape &shape, const model::TypeInfo &typeInfo, - model::Layout frontend_layout, model::Layout backend_layout) + ir::Layout frontend_layout, ir::Layout backend_layout) { model::OperandInfo info(asTensorShape(shape, frontend_layout, backend_layout), typeInfo); diff --git a/runtime/neurun/backend/srcn/Convert.h b/runtime/neurun/backend/srcn/Convert.h index 67c2d9b..6f4c4e3 100644 --- a/runtime/neurun/backend/srcn/Convert.h +++ b/runtime/neurun/backend/srcn/Convert.h @@ -18,7 +18,7 @@ #define __NEURUN_BACKEND_SRCN_CONVERT_H__ #include "kernel/OperationUtils.h" -#include +#include #include #include #include @@ -33,11 +33,11 @@ namespace srcn model::Shape asKernelShape(const model::Shape &shape, kernel::FilterLayout frontend_layout, kernel::FilterLayout backend_layout); -model::Shape asTensorShape(const model::Shape &shape, model::Layout frontend_layout, - model::Layout backend_layout); +model::Shape asTensorShape(const model::Shape &shape, ir::Layout frontend_layout, + ir::Layout backend_layout); model::OperandInfo asTensorInfo(const model::Shape &shape, const model::TypeInfo &typeInfo, - model::Layout frontend_layout, model::Layout backend_layout); + ir::Layout frontend_layout, ir::Layout backend_layout); } // namespace srcn } // namespace backend diff --git a/runtime/neurun/backend/srcn/KernelGenerator.cc b/runtime/neurun/backend/srcn/KernelGenerator.cc index 2733b91..c37109f 100644 --- a/runtime/neurun/backend/srcn/KernelGenerator.cc +++ b/runtime/neurun/backend/srcn/KernelGenerator.cc @@ -44,7 +44,7 @@ KernelGenerator::KernelGenerator(const neurun::model::Operands &operand_ctx, const std::shared_ptr &tensor_builder, const std::shared_ptr &kb) : _ctx(operand_ctx), _tensor_builder(tensor_builder), _kernel_builder(kb), - _current_subg_layout(model::Layout::UNKNOWN) + _current_subg_layout(ir::Layout::UNKNOWN) { // DO NOTHING } diff --git a/runtime/neurun/backend/srcn/KernelGenerator.h b/runtime/neurun/backend/srcn/KernelGenerator.h index 85f023b..34b44a5 100644 --- a/runtime/neurun/backend/srcn/KernelGenerator.h +++ b/runtime/neurun/backend/srcn/KernelGenerator.h @@ -50,7 +50,7 @@ private: const neurun::model::Operands &_ctx; std::shared_ptr _tensor_builder; std::shared_ptr _kernel_builder; - model::Layout _current_subg_layout; + ir::Layout _current_subg_layout; }; } // namespace srcn diff --git a/runtime/neurun/backend/srcn/MemoryManager.cc b/runtime/neurun/backend/srcn/MemoryManager.cc index 5043735..7871f29 100644 --- a/runtime/neurun/backend/srcn/MemoryManager.cc +++ b/runtime/neurun/backend/srcn/MemoryManager.cc @@ -41,7 +41,7 @@ IMemoryPlanner *MemoryManager::createMemoryPlanner() } void MemoryManager::buildTensor(const model::OperandIndex &ind, const model::OperandInfo &info, - model::Layout layout) + ir::Layout layout) { auto tensor = std::make_shared(info, layout); _tensors[ind] = tensor; diff --git a/runtime/neurun/backend/srcn/MemoryManager.h b/runtime/neurun/backend/srcn/MemoryManager.h index c59d14b..a6bd7e0 100644 --- a/runtime/neurun/backend/srcn/MemoryManager.h +++ b/runtime/neurun/backend/srcn/MemoryManager.h @@ -39,7 +39,7 @@ public: void deallocate(void) override { _mem_alloc->release(); } void buildTensor(const model::OperandIndex &ind, const model::OperandInfo &info, - model::Layout layout); + ir::Layout layout); void claimPlan(const model::OperandIndex &ind, uint32_t size); void releasePlan(const model::OperandIndex &ind); diff --git a/runtime/neurun/backend/srcn/Swizzle.h b/runtime/neurun/backend/srcn/Swizzle.h index d7404f0..d1f9223 100644 --- a/runtime/neurun/backend/srcn/Swizzle.h +++ b/runtime/neurun/backend/srcn/Swizzle.h @@ -18,7 +18,7 @@ #define __NEURUN_BACKEND_SRCN_SWIZZLE_H__ #include -#include +#include namespace neurun { @@ -29,12 +29,12 @@ namespace srcn // Convert axis in ncnn order inline uint32_t ToNCNNAxis(uint32_t rank, uint32_t axis, - const model::Layout org_layout = model::Layout::UNKNOWN, - const model::Layout ncnn_layout = model::Layout::UNKNOWN) + const ir::Layout org_layout = ir::Layout::UNKNOWN, + const ir::Layout ncnn_layout = ir::Layout::UNKNOWN) { assert(rank > axis); - if (rank >= 4 && org_layout == model::Layout::NHWC && ncnn_layout == model::Layout::NCHW) + if (rank >= 4 && org_layout == ir::Layout::NHWC && ncnn_layout == ir::Layout::NCHW) { // NHWC -> NCHW // DEPTH @@ -54,7 +54,7 @@ inline uint32_t ToNCNNAxis(uint32_t rank, uint32_t axis, } } - if (rank >= 4 && org_layout == model::Layout::NCHW && ncnn_layout == model::Layout::NHWC) + if (rank >= 4 && org_layout == ir::Layout::NCHW && ncnn_layout == ir::Layout::NHWC) { // NCHW -> NHWC // WIDTH diff --git a/runtime/neurun/backend/srcn/TensorBuilder.cc b/runtime/neurun/backend/srcn/TensorBuilder.cc index dd440e3..bbf59ed 100644 --- a/runtime/neurun/backend/srcn/TensorBuilder.cc +++ b/runtime/neurun/backend/srcn/TensorBuilder.cc @@ -34,7 +34,7 @@ TensorBuilder::TensorBuilder() : _tensor_mgr{new TensorManager()} void TensorBuilder::registerTensorInfo(const model::OperandIndex &ind, const model::OperandInfo &tensor_info, - model::Layout backend_layout, bool as_const) + ir::Layout backend_layout, bool as_const) { _tensor_info_map.emplace(ind, tensor_info); _tensor_layout_map.emplace(ind, backend_layout); diff --git a/runtime/neurun/backend/srcn/TensorBuilder.h b/runtime/neurun/backend/srcn/TensorBuilder.h index db1db49..53a8123 100644 --- a/runtime/neurun/backend/srcn/TensorBuilder.h +++ b/runtime/neurun/backend/srcn/TensorBuilder.h @@ -43,7 +43,7 @@ public: * @param[in] layout Operand data layout */ void registerTensorInfo(const model::OperandIndex &ind, const model::OperandInfo &info, - model::Layout backend_layout, bool as_const) override; + ir::Layout backend_layout, bool as_const) override; /** * @brief Register subtensor information to allocate on CPU backend * @param[in] ind Operand index @@ -78,7 +78,7 @@ public: private: std::unique_ptr _tensor_mgr; model::OperandIndexMap _tensor_info_map; - model::OperandIndexMap _tensor_layout_map; + model::OperandIndexMap _tensor_layout_map; model::OperandIndexSequence _constants; }; diff --git a/runtime/neurun/backend/srcn/TensorManager.cc b/runtime/neurun/backend/srcn/TensorManager.cc index b32b030..e6462db 100644 --- a/runtime/neurun/backend/srcn/TensorManager.cc +++ b/runtime/neurun/backend/srcn/TensorManager.cc @@ -37,7 +37,7 @@ void TensorManager::deallocateConsts(void) { _const_mgr->deallocate(); } void TensorManager::deallocateNonconsts(void) { _nonconst_mgr->deallocate(); } void TensorManager::buildTensor(const model::OperandIndex &ind, - const model::OperandInfo &tensor_info, model::Layout layout, + const model::OperandInfo &tensor_info, ir::Layout layout, bool as_const) { assert(_ind_to_mgr.find(ind) == _ind_to_mgr.end()); diff --git a/runtime/neurun/backend/srcn/TensorManager.h b/runtime/neurun/backend/srcn/TensorManager.h index 5bdeaff..393828f 100644 --- a/runtime/neurun/backend/srcn/TensorManager.h +++ b/runtime/neurun/backend/srcn/TensorManager.h @@ -40,7 +40,7 @@ public: void deallocateNonconsts(void) override; void buildTensor(const model::OperandIndex &ind, const model::OperandInfo &tensor_info, - model::Layout layout, bool as_const); + ir::Layout layout, bool as_const); void claimPlan(const model::OperandIndex &ind, uint32_t size); void releasePlan(const model::OperandIndex &ind); diff --git a/runtime/neurun/backend/srcn/TensorRegister.cc b/runtime/neurun/backend/srcn/TensorRegister.cc index 6a893ef..d35b15f 100644 --- a/runtime/neurun/backend/srcn/TensorRegister.cc +++ b/runtime/neurun/backend/srcn/TensorRegister.cc @@ -45,15 +45,13 @@ void TensorRegister::visit(const model::operation::Conv2D &node) const auto &kernel_obj = _operands.at(kernel_index); const auto frontend_layout = frontendLayout(); - assert(frontend_layout == model::Layout::NCHW || frontend_layout == model::Layout::NHWC); - const auto frontend_filter_layout = frontend_layout == model::Layout::NHWC - ? kernel::FilterLayout::OHWI - : kernel::FilterLayout::OIHW; + assert(frontend_layout == ir::Layout::NCHW || frontend_layout == ir::Layout::NHWC); + const auto frontend_filter_layout = + frontend_layout == ir::Layout::NHWC ? kernel::FilterLayout::OHWI : kernel::FilterLayout::OIHW; const auto backend_layout = backendLayout(kernel_index); - assert(backend_layout == model::Layout::NCHW || backend_layout == model::Layout::NHWC); - const auto backend_filter_layout = backend_layout == model::Layout::NHWC - ? kernel::FilterLayout::HWIO - : kernel::FilterLayout::OIHW; + assert(backend_layout == ir::Layout::NCHW || backend_layout == ir::Layout::NHWC); + const auto backend_filter_layout = + backend_layout == ir::Layout::NHWC ? kernel::FilterLayout::HWIO : kernel::FilterLayout::OIHW; model::OperandInfo backend_info{ asKernelShape(kernel_obj.shape(), frontend_filter_layout, backend_filter_layout), @@ -74,15 +72,13 @@ void TensorRegister::visit(const model::operation::DepthwiseConv2D &node) const auto &kernel_obj = _operands.at(kernel_index); const auto frontend_layout = frontendLayout(); - assert(frontend_layout == model::Layout::NCHW || frontend_layout == model::Layout::NHWC); - const auto frontend_filter_layout = frontend_layout == model::Layout::NHWC - ? kernel::FilterLayout::OHWI - : kernel::FilterLayout::OIHW; + assert(frontend_layout == ir::Layout::NCHW || frontend_layout == ir::Layout::NHWC); + const auto frontend_filter_layout = + frontend_layout == ir::Layout::NHWC ? kernel::FilterLayout::OHWI : kernel::FilterLayout::OIHW; const auto backend_layout = backendLayout(kernel_index); - assert(backend_layout == model::Layout::NCHW || backend_layout == model::Layout::NHWC); - const auto backend_filter_layout = backend_layout == model::Layout::NHWC - ? kernel::FilterLayout::HWIO - : kernel::FilterLayout::OIHW; + assert(backend_layout == ir::Layout::NCHW || backend_layout == ir::Layout::NHWC); + const auto backend_filter_layout = + backend_layout == ir::Layout::NHWC ? kernel::FilterLayout::HWIO : kernel::FilterLayout::OIHW; model::OperandInfo backend_info{ asKernelShape(kernel_obj.shape(), frontend_filter_layout, backend_filter_layout), @@ -102,15 +98,13 @@ void TensorRegister::visit(const model::operation::TransposeConv &node) const auto &kernel_obj = _operands.at(kernel_index); const auto frontend_layout = frontendLayout(); - assert(frontend_layout == model::Layout::NCHW || frontend_layout == model::Layout::NHWC); - const auto frontend_filter_layout = frontend_layout == model::Layout::NHWC - ? kernel::FilterLayout::OHWI - : kernel::FilterLayout::OIHW; + assert(frontend_layout == ir::Layout::NCHW || frontend_layout == ir::Layout::NHWC); + const auto frontend_filter_layout = + frontend_layout == ir::Layout::NHWC ? kernel::FilterLayout::OHWI : kernel::FilterLayout::OIHW; const auto backend_layout = backendLayout(kernel_index); - assert(backend_layout == model::Layout::NCHW || backend_layout == model::Layout::NHWC); - const auto backend_filter_layout = backend_layout == model::Layout::NHWC - ? kernel::FilterLayout::HWOI - : kernel::FilterLayout::IOHW; + assert(backend_layout == ir::Layout::NCHW || backend_layout == ir::Layout::NHWC); + const auto backend_filter_layout = + backend_layout == ir::Layout::NHWC ? kernel::FilterLayout::HWOI : kernel::FilterLayout::IOHW; model::OperandInfo backend_info{ asKernelShape(kernel_obj.shape(), frontend_filter_layout, backend_filter_layout), diff --git a/runtime/neurun/backend/srcn/kernel/AddLayer.cc b/runtime/neurun/backend/srcn/kernel/AddLayer.cc index 6b51507..80b7a34 100644 --- a/runtime/neurun/backend/srcn/kernel/AddLayer.cc +++ b/runtime/neurun/backend/srcn/kernel/AddLayer.cc @@ -92,7 +92,7 @@ void AddLayer::addQuant8() void AddLayer::configure(uint8_t *lhsData, const TensorDescriptor &lhsDescr, uint8_t *rhsData, const TensorDescriptor &rhsDescr, const model::Activation activation, uint8_t *outputData, const TensorDescriptor &outputDescr, - const model::Layout backendLayout) + const ir::Layout backendLayout) { _lhsData.u8 = lhsData; _lhsDescr = lhsDescr; diff --git a/runtime/neurun/backend/srcn/kernel/AddLayer.h b/runtime/neurun/backend/srcn/kernel/AddLayer.h index 9d6edb9..9995754 100644 --- a/runtime/neurun/backend/srcn/kernel/AddLayer.h +++ b/runtime/neurun/backend/srcn/kernel/AddLayer.h @@ -46,7 +46,7 @@ public: void configure(uint8_t *lhsData, const TensorDescriptor &lhsDescr, uint8_t *rhsData, const TensorDescriptor &rhsDescr, const model::Activation activation, uint8_t *outputData, const TensorDescriptor &outputDescr, - const model::Layout backendLayout); + const ir::Layout backendLayout); void run(); void runSync() @@ -69,7 +69,7 @@ private: OperandType _inputType{OperandType::FLOAT32}; - model::Layout _backendLayout{model::Layout::UNKNOWN}; + ir::Layout _backendLayout{ir::Layout::UNKNOWN}; }; } // namespace kernel diff --git a/runtime/neurun/backend/srcn/kernel/ConvolutionLayer.cc b/runtime/neurun/backend/srcn/kernel/ConvolutionLayer.cc index 761f10c..4e70f63 100644 --- a/runtime/neurun/backend/srcn/kernel/ConvolutionLayer.cc +++ b/runtime/neurun/backend/srcn/kernel/ConvolutionLayer.cc @@ -188,7 +188,7 @@ void ConvolutionLayer::configure(uint8_t *inputData, const TensorDescriptor inpu const uint32_t paddingRight, const uint32_t paddingTop, const uint32_t paddingBottom, const uint32_t strideWidth, const uint32_t strideHeight, uint8_t *outputData, - const TensorDescriptor outputDescr, model::Layout layout) + const TensorDescriptor outputDescr, ir::Layout layout) { assert(inputDescr.dimensions.size() == 4); assert(kernelDescr.dimensions.size() == 4); diff --git a/runtime/neurun/backend/srcn/kernel/ConvolutionLayer.h b/runtime/neurun/backend/srcn/kernel/ConvolutionLayer.h index a90a4bd..4edafaa 100644 --- a/runtime/neurun/backend/srcn/kernel/ConvolutionLayer.h +++ b/runtime/neurun/backend/srcn/kernel/ConvolutionLayer.h @@ -44,7 +44,7 @@ public: const TensorDescriptor biasDescr, const uint32_t paddingType, const uint32_t paddingLeft, const uint32_t paddingRight, const uint32_t paddingTop, const uint32_t paddingBottom, const uint32_t strideW, const uint32_t strideH, - uint8_t *outputData, const TensorDescriptor outputDescr, model::Layout layout); + uint8_t *outputData, const TensorDescriptor outputDescr, ir::Layout layout); void run(); void runSync() diff --git a/runtime/neurun/backend/srcn/kernel/DepthwiseConvolutionLayer.cc b/runtime/neurun/backend/srcn/kernel/DepthwiseConvolutionLayer.cc index 400744c..6857947 100644 --- a/runtime/neurun/backend/srcn/kernel/DepthwiseConvolutionLayer.cc +++ b/runtime/neurun/backend/srcn/kernel/DepthwiseConvolutionLayer.cc @@ -152,16 +152,16 @@ void DepthwiseConvolutionLayer::configure(uint8_t *inputData, const TensorDescri const uint32_t paddingRight, const uint32_t paddingTop, const uint32_t paddingBottom, const uint32_t strideWidth, const uint32_t strideHeight, uint8_t *outputData, - const TensorDescriptor outputDescr, model::Layout layout) + const TensorDescriptor outputDescr, ir::Layout layout) { - if (layout == model::Layout::NHWC) + if (layout == ir::Layout::NHWC) { throw std::runtime_error("DepthwiseConv of ncnn does not support layout in NHWC"); } - assert(layout == model::Layout::NHWC || layout == model::Layout::NCHW); - const auto height_index = layout == model::Layout::NHWC ? 1 : 2; - const auto width_index = layout == model::Layout::NHWC ? 2 : 3; + assert(layout == ir::Layout::NHWC || layout == ir::Layout::NCHW); + const auto height_index = layout == ir::Layout::NHWC ? 1 : 2; + const auto width_index = layout == ir::Layout::NHWC ? 2 : 3; if (kernelDescr.dimensions[height_index] != 3 || kernelDescr.dimensions[width_index] != 3) { throw std::runtime_error("DepthwiseConv of ncnn supports only 3x3 kernel"); diff --git a/runtime/neurun/backend/srcn/kernel/DepthwiseConvolutionLayer.h b/runtime/neurun/backend/srcn/kernel/DepthwiseConvolutionLayer.h index bd2b1e1..e94acff 100644 --- a/runtime/neurun/backend/srcn/kernel/DepthwiseConvolutionLayer.h +++ b/runtime/neurun/backend/srcn/kernel/DepthwiseConvolutionLayer.h @@ -43,7 +43,7 @@ public: const TensorDescriptor biasDescr, const uint32_t paddingType, const uint32_t paddingLeft, const uint32_t paddingRight, const uint32_t paddingTop, const uint32_t paddingBottom, const uint32_t strideW, const uint32_t strideH, - uint8_t *outputData, const TensorDescriptor outputDescr, model::Layout layout); + uint8_t *outputData, const TensorDescriptor outputDescr, ir::Layout layout); void run(); void runSync() diff --git a/runtime/neurun/backend/srcn/kernel/InstanceNormLayer.cc b/runtime/neurun/backend/srcn/kernel/InstanceNormLayer.cc index cc386d8..d7627bc 100644 --- a/runtime/neurun/backend/srcn/kernel/InstanceNormLayer.cc +++ b/runtime/neurun/backend/srcn/kernel/InstanceNormLayer.cc @@ -31,7 +31,7 @@ namespace kernel InstanceNormLayer::InstanceNormLayer() : _inputData(), _gammaData(), _betaData(), _outputData(), _inputDescr(), _gammaDescr(), _betaDescr(), _outputDescr(), _epsilon(1e-5), _activation(model::Activation::NONE), - _inputType(OperandType::FLOAT32), _backendLayout(model::Layout::UNKNOWN) + _inputType(OperandType::FLOAT32), _backendLayout(ir::Layout::UNKNOWN) { // DO NOTHING } @@ -39,7 +39,7 @@ InstanceNormLayer::InstanceNormLayer() void InstanceNormLayer::instanceNormFloat32() { // Call kernel for NCHW data layout - if (_backendLayout == model::Layout::NCHW) + if (_backendLayout == ir::Layout::NCHW) { // Supports single batch only assert(_inputDescr.dimensions[0] == 1); @@ -76,7 +76,7 @@ void InstanceNormLayer::instanceNormFloat32() } } // Call kernel for NHWC data layout - else if (_backendLayout == model::Layout::NHWC) + else if (_backendLayout == ir::Layout::NHWC) { // Supports single batch only assert(_inputDescr.dimensions[0] == 1); @@ -122,7 +122,7 @@ void InstanceNormLayer::configure(uint8_t *inputData, const TensorDescriptor inp uint8_t *betaData, const TensorDescriptor betaDescr, uint8_t *outputData, const TensorDescriptor outputDescr, float epsilon, model::Activation activation, - model::Layout backendLayout) + ir::Layout backendLayout) { _inputData.u8 = inputData; _inputDescr = inputDescr; diff --git a/runtime/neurun/backend/srcn/kernel/InstanceNormLayer.h b/runtime/neurun/backend/srcn/kernel/InstanceNormLayer.h index cbb5d7e..2cad9e0 100644 --- a/runtime/neurun/backend/srcn/kernel/InstanceNormLayer.h +++ b/runtime/neurun/backend/srcn/kernel/InstanceNormLayer.h @@ -41,7 +41,7 @@ public: const TensorDescriptor gammaDescr, uint8_t *betaData, const TensorDescriptor betaDescr, uint8_t *outputData, const TensorDescriptor outputDescr, float epsilon, model::Activation activation, - model::Layout backendLayout); + ir::Layout backendLayout); void run(); void runSync() @@ -66,7 +66,7 @@ private: model::Activation _activation; OperandType _inputType; - model::Layout _backendLayout; + ir::Layout _backendLayout; }; } // namespace kernel diff --git a/runtime/neurun/backend/srcn/kernel/OperationUtils.cc b/runtime/neurun/backend/srcn/kernel/OperationUtils.cc index aeb5515..4795c38 100644 --- a/runtime/neurun/backend/srcn/kernel/OperationUtils.cc +++ b/runtime/neurun/backend/srcn/kernel/OperationUtils.cc @@ -80,14 +80,14 @@ Coordinates convertCoordinates(const Coordinates &coordinates, FilterLayout from coordinates[permutation[2]], coordinates[permutation[3]]}; } -nnfw::srcn::convType_t convertLayout(model::Layout layout) +nnfw::srcn::convType_t convertLayout(ir::Layout layout) { - assert(layout == model::Layout::NHWC || layout == model::Layout::NCHW); - if (layout == model::Layout::NHWC) + assert(layout == ir::Layout::NHWC || layout == ir::Layout::NCHW); + if (layout == ir::Layout::NHWC) { return nnfw::srcn::col_major; } - else if (layout == model::Layout::NCHW) + else if (layout == ir::Layout::NCHW) { return nnfw::srcn::row_major; } @@ -97,15 +97,14 @@ nnfw::srcn::convType_t convertLayout(model::Layout layout) } } -TensorDescriptor getTensorDescriptor(const ::neurun::model::Operand &o, - ::neurun::model::Layout frontend_layout, - ::neurun::model::Layout backend_layout) +TensorDescriptor getTensorDescriptor(const ::neurun::model::Operand &o, ir::Layout frontend_layout, + ir::Layout backend_layout) { TensorDescriptor descriptor; auto dims = o.shape().dims(); - if (frontend_layout == ::neurun::model::Layout::NHWC && - backend_layout == ::neurun::model::Layout::NCHW && o.shape().rank() == 4) + if (frontend_layout == ir::Layout::NHWC && backend_layout == ir::Layout::NCHW && + o.shape().rank() == 4) { // NHWC -> NCHW uint32_t permutation[4] = {0, 3, 1, 2}; @@ -114,8 +113,8 @@ TensorDescriptor getTensorDescriptor(const ::neurun::model::Operand &o, dims.at(i) = o.shape().dim(permutation[i]); } } - else if (frontend_layout == ::neurun::model::Layout::NCHW && - backend_layout == ::neurun::model::Layout::NHWC && o.shape().rank() == 4) + else if (frontend_layout == ir::Layout::NCHW && backend_layout == ir::Layout::NHWC && + o.shape().rank() == 4) { // NCHW -> NHWC uint32_t permutation[4] = {0, 2, 3, 1}; diff --git a/runtime/neurun/backend/srcn/kernel/OperationUtils.h b/runtime/neurun/backend/srcn/kernel/OperationUtils.h index 75b081f..4b6f12d 100644 --- a/runtime/neurun/backend/srcn/kernel/OperationUtils.h +++ b/runtime/neurun/backend/srcn/kernel/OperationUtils.h @@ -71,11 +71,10 @@ std::vector getFilterPermutation(FilterLayout from_layout, FilterLayout Coordinates convertCoordinates(const Coordinates &from_coordinates, FilterLayout from_layout, FilterLayout to_layout); -nnfw::srcn::convType_t convertLayout(model::Layout layout); +nnfw::srcn::convType_t convertLayout(ir::Layout layout); -TensorDescriptor getTensorDescriptor(const ::neurun::model::Operand &o, - ::neurun::model::Layout frontend_layout, - ::neurun::model::Layout backend_layout); +TensorDescriptor getTensorDescriptor(const ::neurun::model::Operand &o, ir::Layout frontend_layout, + ir::Layout backend_layout); } // namespace kernel } // namespace srcn diff --git a/runtime/neurun/backend/srcn/kernel/TransposeConvLayer.cc b/runtime/neurun/backend/srcn/kernel/TransposeConvLayer.cc index 5065633..26469f7 100644 --- a/runtime/neurun/backend/srcn/kernel/TransposeConvLayer.cc +++ b/runtime/neurun/backend/srcn/kernel/TransposeConvLayer.cc @@ -99,7 +99,7 @@ void TransposeConvLayer::configure(uint8_t *inputData, const TensorDescriptor in const uint32_t paddingRight, const uint32_t paddingTop, const uint32_t paddingBottom, const uint32_t strideWidth, const uint32_t strideHeight, uint8_t *outputData, - const TensorDescriptor outputDescr, model::Layout layout) + const TensorDescriptor outputDescr, ir::Layout layout) { _layout = convertLayout(layout); _inputData.u8 = inputData; diff --git a/runtime/neurun/backend/srcn/kernel/TransposeConvLayer.h b/runtime/neurun/backend/srcn/kernel/TransposeConvLayer.h index 6eac9b4..cd88d41 100644 --- a/runtime/neurun/backend/srcn/kernel/TransposeConvLayer.h +++ b/runtime/neurun/backend/srcn/kernel/TransposeConvLayer.h @@ -43,7 +43,7 @@ public: const uint32_t paddingLeft, const uint32_t paddingRight, const uint32_t paddingTop, const uint32_t paddingBottom, const uint32_t strideW, const uint32_t strideH, uint8_t *outputData, const TensorDescriptor outputDescr, - model::Layout backend_layout); + ir::Layout backend_layout); void run(); void runSync() diff --git a/runtime/neurun/backend/srcn/operand/Tensor.h b/runtime/neurun/backend/srcn/operand/Tensor.h index dd58a29..5cfb34d 100644 --- a/runtime/neurun/backend/srcn/operand/Tensor.h +++ b/runtime/neurun/backend/srcn/operand/Tensor.h @@ -18,7 +18,7 @@ #define __NEURUN_BACKEND_SRCN_OPERAND_TENSOR_H__ #include -#include +#include #include "model/OperandInfo.h" namespace neurun @@ -36,7 +36,7 @@ public: Tensor() = delete; public: - Tensor(const model::OperandInfo &info, model::Layout layout) : _info(info), _layout(layout) + Tensor(const model::OperandInfo &info, ir::Layout layout) : _info(info), _layout(layout) { // DO NOTHING } @@ -61,14 +61,14 @@ public: size_t num_dimensions() const override { return _info.shape().rank(); } size_t total_size() const override { return _info.total_size(); } size_t calcOffset(const neurun::util::Coordinates &coords) const override; - model::Layout layout() const override { return _layout; } + ir::Layout layout() const override { return _layout; } bool has_padding() const override { return false; } void access(const std::function &fn) final; private: model::OperandInfo _info; uint8_t *_buffer = nullptr; - model::Layout _layout; + ir::Layout _layout; }; } // namespace operand diff --git a/runtime/neurun/core/include/backend/IConstantInitializer.h b/runtime/neurun/core/include/backend/IConstantInitializer.h index 736ce6e..0927d48 100644 --- a/runtime/neurun/core/include/backend/IConstantInitializer.h +++ b/runtime/neurun/core/include/backend/IConstantInitializer.h @@ -21,7 +21,7 @@ #include #include "ITensorBuilder.h" -#include "model/Layout.h" +#include "ir/Layout.h" #include "model/Operand.h" #include "model/Operands.h" #include "model/OperationVisitor.h" @@ -34,7 +34,7 @@ namespace template static void Init(const neurun::model::Operand &model_obj, neurun::backend::operand::ITensor &obj, const bool copy, - const neurun::model::Layout frontend_layout = neurun::model::Layout::UNKNOWN) + const neurun::ir::Layout frontend_layout = neurun::ir::Layout::UNKNOWN) { const auto shape = model_obj.shape(); auto base = reinterpret_cast(model_obj.data().base()); @@ -141,7 +141,7 @@ void copyInit(const neurun::model::Operand &model_obj, neurun::backend::operand: template void permuteInit(const neurun::model::Operand &model_obj, neurun::backend::operand::ITensor &obj, - const neurun::model::Layout frontend_layout) + const neurun::ir::Layout frontend_layout) { const bool copy = frontend_layout == obj.layout(); Init(model_obj, obj, copy, frontend_layout); @@ -276,7 +276,7 @@ private: protected: std::unordered_map _init_map; - model::Layout _current_subg_layout; + ir::Layout _current_subg_layout; }; } // namespace backend diff --git a/runtime/neurun/core/include/backend/ITensorBuilder.h b/runtime/neurun/core/include/backend/ITensorBuilder.h index 04a4e31..8fdc2ef 100644 --- a/runtime/neurun/core/include/backend/ITensorBuilder.h +++ b/runtime/neurun/core/include/backend/ITensorBuilder.h @@ -22,7 +22,7 @@ #include "model/Index.h" #include "model/OperandInfo.h" #include "model/Operation.h" -#include "model/Layout.h" +#include "ir/Layout.h" #include "operand/ITensor.h" #include "compiler/SubTensorInfo.h" #include "ITensorManager.h" @@ -43,7 +43,7 @@ struct ITensorBuilder * @brief Register tensor information to allocate on backend */ virtual void registerTensorInfo(const model::OperandIndex &, const model::OperandInfo &, - model::Layout backend_layout, bool as_const) = 0; + ir::Layout backend_layout, bool as_const) = 0; /** * @brief Register subtensor information to allocate on backend */ diff --git a/runtime/neurun/core/include/backend/ITensorRegister.h b/runtime/neurun/core/include/backend/ITensorRegister.h index 851006b..286823a 100644 --- a/runtime/neurun/core/include/backend/ITensorRegister.h +++ b/runtime/neurun/core/include/backend/ITensorRegister.h @@ -21,7 +21,7 @@ #include "ir/LowerInfoMap.h" #include "ir/operand/ParentInfo.h" #include "ITensorBuilder.h" -#include "model/Layout.h" +#include "ir/Layout.h" #include "model/OperandIndexSequence.h" #include "model/OperandInfo.h" #include "model/Operands.h" @@ -31,20 +31,20 @@ namespace { neurun::model::Shape permuteTensorShape(const neurun::model::Shape &shape, - neurun::model::Layout frontend_layout, - neurun::model::Layout backend_layout) + neurun::ir::Layout frontend_layout, + neurun::ir::Layout backend_layout) { assert(shape.rank() <= 4); neurun::model::Shape backend_shape{shape}; - if (shape.rank() == 4 && frontend_layout == neurun::model::Layout::NHWC && - backend_layout == neurun::model::Layout::NCHW) + if (shape.rank() == 4 && frontend_layout == neurun::ir::Layout::NHWC && + backend_layout == neurun::ir::Layout::NCHW) { backend_shape.dim(1) = shape.dim(3); backend_shape.dim(2) = shape.dim(1); backend_shape.dim(3) = shape.dim(2); } - else if (shape.rank() == 4 && frontend_layout == neurun::model::Layout::NCHW && - backend_layout == neurun::model::Layout::NHWC) + else if (shape.rank() == 4 && frontend_layout == neurun::ir::Layout::NCHW && + backend_layout == neurun::ir::Layout::NHWC) { backend_shape.dim(1) = shape.dim(2); backend_shape.dim(2) = shape.dim(3); @@ -119,8 +119,8 @@ protected: } protected: - virtual model::Layout frontendLayout() const final { return _current_subg_layout; } - virtual model::Layout backendLayout(const model::OperandIndex &index) const final + virtual ir::Layout frontendLayout() const final { return _current_subg_layout; } + virtual ir::Layout backendLayout(const model::OperandIndex &index) const final { assert(_lower_info_map != nullptr); const auto lower_info = _lower_info_map->operand.at(index).get(); @@ -129,21 +129,21 @@ protected: private: compiler::SubTensorInfo generateSubTensorInfo(const model::Operand &obj, - model::Layout frontend_layout, - model::Layout backend_layout) const + ir::Layout frontend_layout, + ir::Layout backend_layout) const { assert(obj.shape().rank() <= 4); const auto parent_index = obj.parent_info()->parent(); auto shape = obj.shape(); auto offset = obj.parent_info()->offset(); - if (operands().at(parent_index).shape().rank() == 4 && frontend_layout == model::Layout::NHWC && - backend_layout == model::Layout::NCHW) + if (operands().at(parent_index).shape().rank() == 4 && frontend_layout == ir::Layout::NHWC && + backend_layout == ir::Layout::NCHW) { shape.extendRank(4); offset = {offset[0], offset[3], offset[1], offset[2]}; } else if (operands().at(parent_index).shape().rank() == 4 && - frontend_layout == model::Layout::NHWC && backend_layout == model::Layout::NCHW) + frontend_layout == ir::Layout::NHWC && backend_layout == ir::Layout::NCHW) { shape.extendRank(4); offset = {offset[0], offset[2], offset[3], offset[1]}; @@ -156,7 +156,7 @@ private: } private: - model::Layout _current_subg_layout; + ir::Layout _current_subg_layout; const graph::LowerInfoMap *_lower_info_map{nullptr}; }; diff --git a/runtime/neurun/core/include/backend/operand/ITensor.h b/runtime/neurun/core/include/backend/operand/ITensor.h index f7a79cf..c278b01 100644 --- a/runtime/neurun/core/include/backend/operand/ITensor.h +++ b/runtime/neurun/core/include/backend/operand/ITensor.h @@ -21,7 +21,7 @@ #include #include -#include "model/Layout.h" +#include "ir/Layout.h" #include "util/Coordinates.h" namespace neurun @@ -42,7 +42,7 @@ public: virtual size_t dimension(size_t index) const = 0; virtual size_t num_dimensions() const = 0; virtual size_t calcOffset(const neurun::util::Coordinates &coords) const = 0; - virtual model::Layout layout() const = 0; + virtual ir::Layout layout() const = 0; virtual bool has_padding() const = 0; virtual void access(const std::function &fn) = 0; }; diff --git a/runtime/neurun/core/include/exec/Execution.h b/runtime/neurun/core/include/exec/Execution.h index d5c6c3f..c23ac09 100644 --- a/runtime/neurun/core/include/exec/Execution.h +++ b/runtime/neurun/core/include/exec/Execution.h @@ -21,7 +21,7 @@ #ifndef __NEURUN_EXEC_EXECUTION_H__ #define __NEURUN_EXEC_EXECUTION_H__ -#include "model/Layout.h" +#include "ir/Layout.h" #include "exec/IExecutor.h" #include "IODescription.h" @@ -60,7 +60,7 @@ public: * @param[in] layout Input data's data format */ void setInput(const model::IOIndex &index, const void *buffer, size_t length, - model::Layout layout = model::Layout::NHWC); + ir::Layout layout = ir::Layout::NHWC); /** * @brief Set input data's information, especially to specify unknown dimensions on model * build time. @@ -72,7 +72,7 @@ public: * @param[in] layout Input data's data format */ void setInput(const model::IOIndex &index, const model::TypeInfo &type, const model::Shape &shape, - const void *buffer, size_t length, model::Layout layout = model::Layout::NHWC); + const void *buffer, size_t length, ir::Layout layout = ir::Layout::NHWC); /** * @brief Set output data's information * @param[in] index Output index @@ -81,7 +81,7 @@ public: * @param[in] layout Output data's data format */ void setOutput(const model::IOIndex &index, void *buffer, size_t length, - model::Layout layout = model::Layout::NHWC); + ir::Layout layout = ir::Layout::NHWC); /** * @brief Set output data's information, especially to specify unknown dimensions on model * build time. @@ -94,19 +94,19 @@ public: */ void setOutput(const model::IOIndex &index, const model::TypeInfo &type, const model::Shape &shape, void *buffer, size_t length, - model::Layout layout = model::Layout::NHWC); + ir::Layout layout = ir::Layout::NHWC); /** * @brief Set input data's data format * @param[in] index Input index * @param[in] layout Input data's data format */ - void setInputLayout(const model::IOIndex &index, model::Layout layout); + void setInputLayout(const model::IOIndex &index, ir::Layout layout); /** * @brief Set output data's data format * @param[in] index Output index * @param[in] layout Output data's data format */ - void setOutputLayout(const model::IOIndex &index, model::Layout layout); + void setOutputLayout(const model::IOIndex &index, ir::Layout layout); /** * @brief Execution * @note It should be called after setting input and output buffer diff --git a/runtime/neurun/core/include/exec/IODescription.h b/runtime/neurun/core/include/exec/IODescription.h index 692411b..fc766c6 100644 --- a/runtime/neurun/core/include/exec/IODescription.h +++ b/runtime/neurun/core/include/exec/IODescription.h @@ -31,11 +31,11 @@ struct InputDesc const model::OperandInfo info; const void *buffer; const size_t size; - const model::Layout layout; + const ir::Layout layout; InputDesc(void) = delete; InputDesc(const model::OperandInfo &info, const void *buffer, const size_t size, - model::Layout layout) + ir::Layout layout) : info(info), buffer(buffer), size(size), layout(layout) { } @@ -46,10 +46,10 @@ struct OutputDesc const model::OperandInfo info; void *buffer; const size_t size; - const model::Layout layout; + const ir::Layout layout; OutputDesc(void) = delete; - OutputDesc(const model::OperandInfo &info, void *buffer, const size_t size, model::Layout layout) + OutputDesc(const model::OperandInfo &info, void *buffer, const size_t size, ir::Layout layout) : info(info), buffer(buffer), size(size), layout(layout) { } diff --git a/runtime/neurun/core/include/ir/Graph.h b/runtime/neurun/core/include/ir/Graph.h index 6895b66..be78f3a 100644 --- a/runtime/neurun/core/include/ir/Graph.h +++ b/runtime/neurun/core/include/ir/Graph.h @@ -199,10 +199,9 @@ private: model::OperandIndexMap> &operands_lower_info); void dumpLowerInfo(); bool mergeable(const model::SubgraphIndex &subg_index, const model::OperationIndex &node_index, - model::Layout layout); + ir::Layout layout); model::SubgraphIndex appendFreshSingleOpSubgraph(const model::OperationIndex &node_index, - const model::Operation &node, - model::Layout layout); + const model::Operation &node, ir::Layout layout); private: std::unique_ptr _backend_resolver; diff --git a/runtime/neurun/core/include/model/Layout.h b/runtime/neurun/core/include/ir/Layout.h similarity index 69% rename from runtime/neurun/core/include/model/Layout.h rename to runtime/neurun/core/include/ir/Layout.h index db46f42..fd4f7bc 100644 --- a/runtime/neurun/core/include/model/Layout.h +++ b/runtime/neurun/core/include/ir/Layout.h @@ -14,15 +14,15 @@ * limitations under the License. */ -#ifndef __NEURUN_MODEL_LAYOUT_H__ -#define __NEURUN_MODEL_LAYOUT_H__ +#ifndef __NEURUN_IR_LAYOUT_H__ +#define __NEURUN_IR_LAYOUT_H__ #include #include namespace neurun { -namespace model +namespace ir { enum class Layout @@ -32,36 +32,42 @@ enum class Layout NCHW }; -inline std::string to_string(model::Layout layout) +inline std::string to_string(Layout layout) { switch (layout) { case Layout::NHWC: return std::string{"NHWC"}; - case model::Layout::NCHW: + case Layout::NCHW: return std::string{"NCHW"}; - case model::Layout::UNKNOWN: + case Layout::UNKNOWN: return std::string{"UNKNOWN"}; default: throw std::runtime_error("WRONG LAYOUT"); } } +} // namespace ir + +// TODO Remove after merging 'graph' and 'model' namespaces. +namespace model +{ +using Layout = ir::Layout; } // namespace model } // namespace neurun namespace std { -template <> struct hash<::neurun::model::Layout> +template <> struct hash { - size_t operator()(::neurun::model::Layout value) const noexcept + size_t operator()(neurun::ir::Layout value) const noexcept { - using type = typename std::underlying_type<::neurun::model::Layout>::type; + using type = typename std::underlying_type::type; return hash()(static_cast(value)); } }; } // namespace std -#endif // __NEURUN_MODEL_LAYOUT_H__ +#endif // __NEURUN_IR_LAYOUT_H__ diff --git a/runtime/neurun/core/include/ir/operand/PermuteFactor.h b/runtime/neurun/core/include/ir/operand/PermuteFactor.h index 480e95c..240c420 100644 --- a/runtime/neurun/core/include/ir/operand/PermuteFactor.h +++ b/runtime/neurun/core/include/ir/operand/PermuteFactor.h @@ -25,7 +25,7 @@ #include -#include "model/Layout.h" +#include "ir/Layout.h" namespace neurun { @@ -53,7 +53,7 @@ public: * @param backend The backend factor * @param backend The layout factor */ - PermuteFactor(const backend::Backend *backend, model::Layout layout) + PermuteFactor(const backend::Backend *backend, ir::Layout layout) : _backend{backend}, _layout{layout} { // DO NOTHING @@ -82,7 +82,7 @@ public: * * @return Layout factor */ - model::Layout layout() const { return _layout; } + ir::Layout layout() const { return _layout; } public: /** @@ -103,7 +103,7 @@ public: private: const backend::Backend *_backend{nullptr}; - model::Layout _layout{model::Layout::UNKNOWN}; + ir::Layout _layout{ir::Layout::UNKNOWN}; }; } // namespace operand @@ -123,7 +123,7 @@ template <> struct hash size_t operator()(const PermuteFactor &factor) const noexcept { hash b_hash{}; - hash<::neurun::model::Layout> l_hash{}; + hash l_hash{}; return b_hash(factor.backend()) ^ (l_hash(factor.layout()) << 1); } }; diff --git a/runtime/neurun/core/include/ir/operation/LowerInfo.h b/runtime/neurun/core/include/ir/operation/LowerInfo.h index 83f67d5..6f02686 100644 --- a/runtime/neurun/core/include/ir/operation/LowerInfo.h +++ b/runtime/neurun/core/include/ir/operation/LowerInfo.h @@ -39,9 +39,9 @@ namespace operation class LowerInfo { public: - LowerInfo(const backend::Backend *backend, model::Layout layout); + LowerInfo(const backend::Backend *backend, ir::Layout layout); const backend::Backend *backend() const { return _permute_factor.backend(); } - model::Layout layout() const { return _permute_factor.layout(); } + ir::Layout layout() const { return _permute_factor.layout(); } private: graph::operand::PermuteFactor _permute_factor; diff --git a/runtime/neurun/core/include/model/OperandInfo.h b/runtime/neurun/core/include/model/OperandInfo.h index a821f3f..7b0fdb0 100644 --- a/runtime/neurun/core/include/model/OperandInfo.h +++ b/runtime/neurun/core/include/model/OperandInfo.h @@ -23,7 +23,7 @@ #include "Shape.h" #include "TypeInfo.h" -#include "Layout.h" +#include "ir/Layout.h" namespace neurun { diff --git a/runtime/neurun/core/include/model/Shape.h b/runtime/neurun/core/include/model/Shape.h index c8d9866..e7d2553 100644 --- a/runtime/neurun/core/include/model/Shape.h +++ b/runtime/neurun/core/include/model/Shape.h @@ -17,7 +17,7 @@ #ifndef __NEURUN_MODEL_SHAPE_H__ #define __NEURUN_MODEL_SHAPE_H__ -#include "Layout.h" +#include "ir/Layout.h" #include "misc/feature/Shape.h" #include diff --git a/runtime/neurun/core/include/model/Subgraph.h b/runtime/neurun/core/include/model/Subgraph.h index d751cb7..a59db64 100644 --- a/runtime/neurun/core/include/model/Subgraph.h +++ b/runtime/neurun/core/include/model/Subgraph.h @@ -21,7 +21,7 @@ #include #include -#include "Layout.h" +#include "ir/Layout.h" #include "Index.h" #include "Operation.h" @@ -47,7 +47,7 @@ struct Element class Subgraph : public Operation { public: - explicit Subgraph(model::Layout layout); + explicit Subgraph(Layout layout); Subgraph(const Subgraph &) = delete; public: diff --git a/runtime/neurun/core/include/util/ShapeInference.h b/runtime/neurun/core/include/util/ShapeInference.h index c58586a..fce8bf2 100644 --- a/runtime/neurun/core/include/util/ShapeInference.h +++ b/runtime/neurun/core/include/util/ShapeInference.h @@ -24,7 +24,7 @@ #include "model/operation/DepthwiseConv2D.h" #include "model/Operands.h" #include "model/Index.h" -#include "model/Layout.h" +#include "ir/Layout.h" namespace neurun { @@ -37,21 +37,21 @@ Shapes inferEltwiseShape(const model::Shape &lhs_shape, const model::Shape &rhs_ Shapes inferAvgPoolShape(const model::Shape &in_shape, const model::operation::AvgPool2D::Param ¶m, - model::Layout layout = model::Layout::NHWC); + ir::Layout layout = ir::Layout::NHWC); Shapes inferConcatShape(const Shapes &in_shapes, const model::operation::Concat::Param ¶m); Shapes inferMaxPoolShape(const model::Shape &in_shape, const model::operation::MaxPool2D::Param ¶m, - model::Layout layout = model::Layout::NHWC); + ir::Layout layout = ir::Layout::NHWC); Shapes inferConv2DShape(const model::Shape &in_shape, const model::Shape &ker_shape, const model::operation::Conv2D::Param ¶m, - model::Layout layout = model::Layout::NHWC); + ir::Layout layout = ir::Layout::NHWC); Shapes inferDepthwiseConv2DShape(const model::Shape &in_shape, const model::Shape &ker_shape, const model::operation::DepthwiseConv2D::Param ¶m, - model::Layout layout = model::Layout::NHWC); + ir::Layout layout = ir::Layout::NHWC); Shapes inferFullyConnectedShape(const model::Shape &in_shape, const model::Shape &ker_shape); diff --git a/runtime/neurun/core/include/util/Utils.h b/runtime/neurun/core/include/util/Utils.h index 36edd9e..06cd638 100644 --- a/runtime/neurun/core/include/util/Utils.h +++ b/runtime/neurun/core/include/util/Utils.h @@ -24,7 +24,7 @@ #define __NEURUN_UTIL_UTILS_H__ #include "model/InternalType.h" -#include "model/Layout.h" +#include "ir/Layout.h" #include "model/Operand.h" #include "util/Coordinates.h" @@ -42,8 +42,8 @@ namespace util */ const char *to_string(const model::PaddingType &type); -Coordinates convertCoordinates(const Coordinates &from_coordinates, model::Layout from_layout, - model::Layout to_layout); +Coordinates convertCoordinates(const Coordinates &from_coordinates, ir::Layout from_layout, + ir::Layout to_layout); } // namespace util } // namespace neurun diff --git a/runtime/neurun/core/include/util/feature/nchw/Reader.h b/runtime/neurun/core/include/util/feature/nchw/Reader.h index 755625c..0305bdf 100644 --- a/runtime/neurun/core/include/util/feature/nchw/Reader.h +++ b/runtime/neurun/core/include/util/feature/nchw/Reader.h @@ -54,7 +54,7 @@ public: Reader(backend::operand::ITensor *tensor) : _ptr{tensor->buffer() + tensor->calcOffset({0, 0, 0, 0})}, _len{tensor->total_size()} { - assert(tensor->layout() == model::Layout::NCHW); + assert(tensor->layout() == ir::Layout::NCHW); const auto start_offset = tensor->calcOffset({0, 0, 0, 0}); _strides.W = tensor->dimension(3) == 1 ? 0 : tensor->calcOffset({0, 0, 0, 1}) - start_offset; diff --git a/runtime/neurun/core/include/util/feature/nchw/View.h b/runtime/neurun/core/include/util/feature/nchw/View.h index 581ebb0..d747937 100644 --- a/runtime/neurun/core/include/util/feature/nchw/View.h +++ b/runtime/neurun/core/include/util/feature/nchw/View.h @@ -54,7 +54,7 @@ public: View(::neurun::backend::operand::ITensor *tensor) : _ptr{tensor->buffer() + tensor->calcOffset({0, 0, 0, 0})}, _len{tensor->total_size()} { - assert(tensor->layout() == model::Layout::NCHW); + assert(tensor->layout() == ir::Layout::NCHW); const auto start_offset = tensor->calcOffset({0, 0, 0, 0}); _strides.W = tensor->dimension(3) == 1 ? 0 : tensor->calcOffset({0, 0, 0, 1}) - start_offset; diff --git a/runtime/neurun/core/include/util/feature/nhwc/Reader.h b/runtime/neurun/core/include/util/feature/nhwc/Reader.h index 0e9e076..0df7be4 100644 --- a/runtime/neurun/core/include/util/feature/nhwc/Reader.h +++ b/runtime/neurun/core/include/util/feature/nhwc/Reader.h @@ -55,7 +55,7 @@ public: Reader(const backend::operand::ITensor *tensor) : _ptr{tensor->buffer() + tensor->calcOffset({0, 0, 0, 0})}, _len{tensor->total_size()} { - assert(tensor->layout() == model::Layout::NHWC); + assert(tensor->layout() == ir::Layout::NHWC); const auto start_offset = tensor->calcOffset({0, 0, 0, 0}); _strides.C = tensor->dimension(3) == 1 ? 0 : tensor->calcOffset({0, 0, 0, 1}) - start_offset; diff --git a/runtime/neurun/core/include/util/feature/nhwc/View.h b/runtime/neurun/core/include/util/feature/nhwc/View.h index 924a441..b9d98e9 100644 --- a/runtime/neurun/core/include/util/feature/nhwc/View.h +++ b/runtime/neurun/core/include/util/feature/nhwc/View.h @@ -56,7 +56,7 @@ public: View(backend::operand::ITensor *tensor) : _ptr{tensor->buffer() + tensor->calcOffset({0, 0, 0, 0})}, _len{tensor->total_size()} { - assert(tensor->layout() == model::Layout::NHWC); + assert(tensor->layout() == ir::Layout::NHWC); const auto start_offset = tensor->calcOffset({0, 0, 0, 0}); _strides.C = tensor->dimension(3) == 1 ? 0 : tensor->calcOffset({0, 0, 0, 1}) - start_offset; diff --git a/runtime/neurun/core/src/compiler/OperationValidator.cc b/runtime/neurun/core/src/compiler/OperationValidator.cc index 9558fff..8cdda66 100644 --- a/runtime/neurun/core/src/compiler/OperationValidator.cc +++ b/runtime/neurun/core/src/compiler/OperationValidator.cc @@ -487,7 +487,7 @@ void OperationValidator::visit(const model::operation::TransposeConv &node) // H -> H // W -> W // O -> C - const auto ker_shape = _ctx.at(ker_index).shape().asFeature(model::Layout::NHWC); + const auto ker_shape = _ctx.at(ker_index).shape().asFeature(ir::Layout::NHWC); UNUSED_RELEASE(ofm_shape); UNUSED_RELEASE(ifm_shape); diff --git a/runtime/neurun/core/src/compiler/OperationValidator.h b/runtime/neurun/core/src/compiler/OperationValidator.h index fd8b406..4ea62ea 100644 --- a/runtime/neurun/core/src/compiler/OperationValidator.h +++ b/runtime/neurun/core/src/compiler/OperationValidator.h @@ -17,7 +17,7 @@ #ifndef __NEURUN_COMPILER_OPERATION_VALIDATOR_H__ #define __NEURUN_COMPILER_OPERATION_VALIDATOR_H__ -#include "model/Layout.h" +#include "ir/Layout.h" #include "model/OperationVisitor.h" namespace neurun @@ -37,7 +37,7 @@ class OperationValidator : public model::OperationVisitor { public: OperationValidator(const neurun::model::Operands &ctx) - : _ctx{ctx}, _current_subg_layout{model::Layout::UNKNOWN} + : _ctx{ctx}, _current_subg_layout{ir::Layout::UNKNOWN} { } @@ -72,7 +72,7 @@ public: private: const neurun::model::Operands &_ctx; - model::Layout _current_subg_layout; + ir::Layout _current_subg_layout; }; } // namespace compiler diff --git a/runtime/neurun/core/src/exec/Execution.cc b/runtime/neurun/core/src/exec/Execution.cc index 2921599..e135617 100644 --- a/runtime/neurun/core/src/exec/Execution.cc +++ b/runtime/neurun/core/src/exec/Execution.cc @@ -31,7 +31,7 @@ Execution::Execution(const std::shared_ptr &executor) : _executor{exe // TODO Remove default parameter void Execution::setInput(const model::IOIndex &index, const void *buffer, size_t length, - model::Layout layout) + ir::Layout layout) { const auto input_index = graph().getInputs().at(index); const auto info = graph().operands().at(input_index).info(); @@ -48,7 +48,7 @@ void Execution::setInput(const model::IOIndex &index, const void *buffer, size_t // TODO Remove default parameter void Execution::setInput(const model::IOIndex &index, const model::TypeInfo &type, const model::Shape &shape, const void *buffer, size_t length, - model::Layout layout) + ir::Layout layout) { const model::OperandInfo info{shape, type}; @@ -63,7 +63,7 @@ void Execution::setInput(const model::IOIndex &index, const model::TypeInfo &typ // TODO Remove default parameter void Execution::setOutput(const model::IOIndex &index, void *buffer, size_t length, - model::Layout layout) + ir::Layout layout) { const auto output_index = graph().getOutputs().at(index); const auto info = graph().operands().at(output_index).info(); @@ -79,8 +79,7 @@ void Execution::setOutput(const model::IOIndex &index, void *buffer, size_t leng // TODO Remove default parameter void Execution::setOutput(const model::IOIndex &index, const model::TypeInfo &type, - const model::Shape &shape, void *buffer, size_t length, - model::Layout layout) + const model::Shape &shape, void *buffer, size_t length, ir::Layout layout) { const model::OperandInfo info{shape, type}; @@ -93,14 +92,14 @@ void Execution::setOutput(const model::IOIndex &index, const model::TypeInfo &ty nnfw::cpp14::make_unique(info, buffer, length, layout); } -void Execution::setInputLayout(const model::IOIndex &index, model::Layout layout) +void Execution::setInputLayout(const model::IOIndex &index, ir::Layout layout) { const auto &input_desc = _io_desc.inputs.at(index.value()); _io_desc.inputs.at(index.value()) = nnfw::cpp14::make_unique( input_desc->info, input_desc->buffer, input_desc->size, layout); } -void Execution::setOutputLayout(const model::IOIndex &index, model::Layout layout) +void Execution::setOutputLayout(const model::IOIndex &index, ir::Layout layout) { const auto &output_desc = _io_desc.outputs.at(index.value()); _io_desc.outputs.at(index.value()) = nnfw::cpp14::make_unique( diff --git a/runtime/neurun/core/src/exec/ExecutorBase.cc b/runtime/neurun/core/src/exec/ExecutorBase.cc index d9a9378..006c02f 100644 --- a/runtime/neurun/core/src/exec/ExecutorBase.cc +++ b/runtime/neurun/core/src/exec/ExecutorBase.cc @@ -32,7 +32,7 @@ ExecutorBase::ExecutorBase(const graph::Graph &graph, std::unique_ptr ExecutorBase::source(const model::IOIndex &index, const model::TypeInfo &type, const void *buffer, - size_t length, model::Layout io_layout) + size_t length, ir::Layout io_layout) { using ::neurun::model::DataType; switch (type.type()) @@ -52,7 +52,7 @@ std::unique_ptr ExecutorBase::source(const model::IOIndex &index, } std::unique_ptr ExecutorBase::sink(const model::IOIndex &index, const model::TypeInfo &type, - void *buffer, size_t length, model::Layout io_layout) + void *buffer, size_t length, ir::Layout io_layout) { using ::neurun::model::DataType; switch (type.type()) diff --git a/runtime/neurun/core/src/exec/ExecutorBase.h b/runtime/neurun/core/src/exec/ExecutorBase.h index 2b9f996..5abec6b 100644 --- a/runtime/neurun/core/src/exec/ExecutorBase.h +++ b/runtime/neurun/core/src/exec/ExecutorBase.h @@ -64,13 +64,13 @@ public: private: std::unique_ptr source(const model::IOIndex &index, const model::TypeInfo &type, - const void *buffer, size_t length, model::Layout io_layout); + const void *buffer, size_t length, ir::Layout io_layout); std::unique_ptr sink(const model::IOIndex &index, const model::TypeInfo &type, - void *buffer, size_t length, model::Layout io_layout); + void *buffer, size_t length, ir::Layout io_layout); template std::unique_ptr source(const model::IOIndex &index, const void *buffer, size_t length, - model::Layout io_layout) + ir::Layout io_layout) { const auto operand_index = _graph.getInputs().at(index); const auto &operand = _graph.operands().at(operand_index); @@ -78,36 +78,36 @@ private: const auto tensor = _operand_context->at(operand_index); const auto tensor_layout = tensor->layout(); - if (((io_layout == model::Layout::NHWC) && (tensor_layout == model::Layout::NCHW)) || - ((io_layout == model::Layout::NCHW) && (tensor_layout == model::Layout::NHWC))) + if (((io_layout == ir::Layout::NHWC) && (tensor_layout == ir::Layout::NCHW)) || + ((io_layout == ir::Layout::NCHW) && (tensor_layout == ir::Layout::NHWC))) { return nnfw::cpp14::make_unique>(buffer, length, operand.shape(), io_layout); } // TODO Change this to return error - assert(io_layout != model::Layout::UNKNOWN || - (tensor_layout != model::Layout::NCHW && tensor_layout != model::Layout::NCHW)); + assert(io_layout != ir::Layout::UNKNOWN || + (tensor_layout != ir::Layout::NCHW && tensor_layout != ir::Layout::NCHW)); return nnfw::cpp14::make_unique>(buffer, length, operand.shape()); } template std::unique_ptr sink(const model::IOIndex &index, void *buffer, size_t length, - model::Layout io_layout) + ir::Layout io_layout) { const auto operand_index = _graph.getOutputs().at(index); const auto &operand = _graph.operands().at(operand_index); const auto tensor = _operand_context->at(operand_index); const auto tensor_layout = tensor->layout(); - if (((tensor_layout == model::Layout::NCHW) && (io_layout == model::Layout::NHWC)) || - ((tensor_layout == model::Layout::NHWC) && (io_layout == model::Layout::NCHW))) + if (((tensor_layout == ir::Layout::NCHW) && (io_layout == ir::Layout::NHWC)) || + ((tensor_layout == ir::Layout::NHWC) && (io_layout == ir::Layout::NCHW))) { return nnfw::cpp14::make_unique>(buffer, length, operand.shape(), io_layout); } // TODO Change this to return error - assert(io_layout != model::Layout::UNKNOWN || - (tensor_layout != model::Layout::NCHW && tensor_layout != model::Layout::NCHW)); + assert(io_layout != ir::Layout::UNKNOWN || + (tensor_layout != ir::Layout::NCHW && tensor_layout != ir::Layout::NCHW)); return nnfw::cpp14::make_unique>(buffer, length, operand.shape()); } diff --git a/runtime/neurun/core/src/exec/Sink.h b/runtime/neurun/core/src/exec/Sink.h index af5ca58..07b72aa 100644 --- a/runtime/neurun/core/src/exec/Sink.h +++ b/runtime/neurun/core/src/exec/Sink.h @@ -43,7 +43,7 @@ template class ITemplSink : public ISink { public: ITemplSink(void *output_buffer, const size_t &output_size, const model::Shape &shape, - const bool copy, model::Layout io_layout) + const bool copy, ir::Layout io_layout) : _output_buffer{reinterpret_cast(output_buffer)}, _output_size{output_size}, _shape{shape}, _copy{copy}, _io_layout{io_layout} { @@ -52,8 +52,8 @@ public: protected: void pullUnif(neurun::backend::operand::ITensor &tensor) const { - assert(((_io_layout == model::Layout::NHWC && tensor.layout() == model::Layout::NCHW) || - (_io_layout == model::Layout::NCHW && tensor.layout() == model::Layout::NHWC)) || + assert(((_io_layout == ir::Layout::NHWC && tensor.layout() == ir::Layout::NCHW) || + (_io_layout == ir::Layout::NCHW && tensor.layout() == ir::Layout::NHWC)) || _copy); auto input_buffer = tensor.buffer(); auto rank = _shape.rank(); @@ -125,7 +125,7 @@ protected: { const auto shape = _shape.asFeature(_io_layout); - if (_io_layout == model::Layout::NHWC) + if (_io_layout == ir::Layout::NHWC) { const util::feature::nchw::Reader from(&tensor); util::feature::nhwc::View into(shape, _output_buffer, _output_size); @@ -135,7 +135,7 @@ protected: into.at(batch, row, col, ch) = value; }; } - else if (_io_layout == model::Layout::NCHW) + else if (_io_layout == ir::Layout::NCHW) { const util::feature::nhwc::Reader from(&tensor); util::feature::nchw::View into(shape, _output_buffer, _output_size); @@ -163,14 +163,14 @@ private: const size_t _output_size; const model::Shape _shape; const bool _copy; - const model::Layout _io_layout; + const ir::Layout _io_layout; }; template class PermutateSink final : public ITemplSink { public: PermutateSink(void *output_buffer, const size_t &output_size, const model::Shape &shape, - model::Layout io_layout) + ir::Layout io_layout) : ITemplSink(output_buffer, output_size, shape, false, io_layout) { } @@ -187,7 +187,7 @@ template class CopySink final : public ITemplSink { public: CopySink(void *output_buffer, const size_t &output_size, const model::Shape &shape, - model::Layout io_layout = model::Layout::UNKNOWN) + ir::Layout io_layout = ir::Layout::UNKNOWN) : ITemplSink(output_buffer, output_size, shape, true, io_layout) { } diff --git a/runtime/neurun/core/src/exec/Source.h b/runtime/neurun/core/src/exec/Source.h index c4b2ce8..3272d07 100644 --- a/runtime/neurun/core/src/exec/Source.h +++ b/runtime/neurun/core/src/exec/Source.h @@ -26,7 +26,7 @@ #include "util/feature/nhwc/View.h" #include "util/Utils.h" #include -#include +#include #include "model/Shape.h" namespace neurun @@ -46,7 +46,7 @@ template class ITemplSource : public ISource { public: ITemplSource(const void *input_buffer, const size_t &input_size, const model::Shape &shape, - const bool copy, model::Layout io_layout) + const bool copy, ir::Layout io_layout) : _input_buffer{reinterpret_cast(input_buffer)}, _input_size{input_size}, _shape{shape}, _copy(copy), _io_layout{io_layout} { @@ -57,8 +57,8 @@ public: protected: void pushUnif(neurun::backend::operand::ITensor &tensor) const { - assert(((_io_layout == model::Layout::NHWC && tensor.layout() == model::Layout::NCHW) || - (_io_layout == model::Layout::NCHW && tensor.layout() == model::Layout::NHWC)) || + assert(((_io_layout == ir::Layout::NHWC && tensor.layout() == ir::Layout::NCHW) || + (_io_layout == ir::Layout::NCHW && tensor.layout() == ir::Layout::NHWC)) || _copy); auto output_buffer = tensor.buffer(); auto rank = _shape.rank(); @@ -130,7 +130,7 @@ protected: { const auto shape = _shape.asFeature(_io_layout); - if (_io_layout == model::Layout::NCHW) + if (_io_layout == ir::Layout::NCHW) { const util::feature::nchw::Reader from(shape, _input_buffer, _input_size); util::feature::nhwc::View into(&tensor); @@ -140,7 +140,7 @@ protected: into.at(batch, row, col, ch) = value; }; } - else if (_io_layout == model::Layout::NHWC) + else if (_io_layout == ir::Layout::NHWC) { const util::feature::nhwc::Reader from(shape, _input_buffer, _input_size); util::feature::nchw::View into(&tensor); @@ -169,14 +169,14 @@ private: const size_t _input_size; const model::Shape _shape; const bool _copy; - const model::Layout _io_layout; + const ir::Layout _io_layout; }; template class PermutateSource final : public ITemplSource { public: PermutateSource(const void *input_buffer, const size_t &input_size, const model::Shape &shape, - model::Layout io_layout) + ir::Layout io_layout) : ITemplSource(input_buffer, input_size, shape, false, io_layout) { } @@ -193,7 +193,7 @@ template class CopySource final : public ITemplSource { public: CopySource(const void *input_buffer, const size_t &input_size, const model::Shape &shape, - model::Layout io_layout = model::Layout::UNKNOWN) + ir::Layout io_layout = ir::Layout::UNKNOWN) : ITemplSource(input_buffer, input_size, shape, true, io_layout) { } diff --git a/runtime/neurun/core/src/exec/interp/Tensor.cc b/runtime/neurun/core/src/exec/interp/Tensor.cc index af752dd..5c1da35 100644 --- a/runtime/neurun/core/src/exec/interp/Tensor.cc +++ b/runtime/neurun/core/src/exec/interp/Tensor.cc @@ -42,16 +42,16 @@ size_t Tensor::calcOffset(const neurun::util::Coordinates &coords) const throw std::runtime_error("offset_element_in_bytes is not supported for cpu::Tensor now."); } -model::Layout ROTensor::layout() const +ir::Layout ROTensor::layout() const { // TODO Changes to return frontend layout - return model::Layout::NHWC; + return ir::Layout::NHWC; } -model::Layout Tensor::layout() const +ir::Layout Tensor::layout() const { // TODO Changes to return frontend layout - return model::Layout::NHWC; + return ir::Layout::NHWC; } } // namespace interp diff --git a/runtime/neurun/core/src/exec/interp/Tensor.h b/runtime/neurun/core/src/exec/interp/Tensor.h index a48160b..4617f7e 100644 --- a/runtime/neurun/core/src/exec/interp/Tensor.h +++ b/runtime/neurun/core/src/exec/interp/Tensor.h @@ -25,7 +25,7 @@ #include "model/OperandInfo.h" #include "backend/operand/ITensor.h" -#include "model/Layout.h" +#include "ir/Layout.h" namespace neurun { @@ -123,7 +123,7 @@ public: size_t dimension(size_t index) const override { return _info.shape().dim(index); } size_t num_dimensions() const override { return _info.shape().rank(); } size_t calcOffset(const util::Coordinates &coords) const override; - model::Layout layout() const override; + ir::Layout layout() const override; bool has_padding() const override { return false; } model::DataType data_type() const override { return _info.typeInfo().type(); } const model::OperandInfo &tensorInfo() const override { return _info; } @@ -162,7 +162,7 @@ public: size_t dimension(size_t index) const override { return _info.shape().dim(index); } size_t num_dimensions() const override { return _info.shape().rank(); } size_t calcOffset(const util::Coordinates &coords) const override; - model::Layout layout() const override; + ir::Layout layout() const override; bool has_padding() const override { return false; } model::DataType data_type() const override { return _info.typeInfo().type(); } const model::OperandInfo &tensorInfo() const override { return _info; } diff --git a/runtime/neurun/core/src/exec/interp/operations/AvgPool2D.cc b/runtime/neurun/core/src/exec/interp/operations/AvgPool2D.cc index 8c9f544..660514b 100644 --- a/runtime/neurun/core/src/exec/interp/operations/AvgPool2D.cc +++ b/runtime/neurun/core/src/exec/interp/operations/AvgPool2D.cc @@ -71,8 +71,8 @@ void invoke(const ITensor *in_tensor, const ITensor *out_tensor, const model::operation::AvgPool2D::Param ¶m) { // TODO Support NCHW frontend - const auto ifm_shape = in_tensor->tensorInfo().shape().asFeature(model::Layout::NHWC); - const auto ofm_shape = out_tensor->tensorInfo().shape().asFeature(model::Layout::NHWC); + const auto ifm_shape = in_tensor->tensorInfo().shape().asFeature(ir::Layout::NHWC); + const auto ofm_shape = out_tensor->tensorInfo().shape().asFeature(ir::Layout::NHWC); const auto padding = neurun::util::calculatePadding(param.padding, ifm_shape, ofm_shape, param.stride, param.kw, param.kh); // Calculate diff --git a/runtime/neurun/core/src/exec/interp/operations/Conv2D.cc b/runtime/neurun/core/src/exec/interp/operations/Conv2D.cc index a30508c..d3b046f 100644 --- a/runtime/neurun/core/src/exec/interp/operations/Conv2D.cc +++ b/runtime/neurun/core/src/exec/interp/operations/Conv2D.cc @@ -80,8 +80,8 @@ void invoke(const ITensor *ifm_tensor, const ITensor *ker_tensor, const ITensor const ITensor *ofm_tensor, const model::operation::Conv2D::Param ¶m) { // TODO Support NCHW frontned - const auto ifm_shape = ifm_tensor->tensorInfo().shape().asFeature(model::Layout::NHWC); - const auto ofm_shape = ofm_tensor->tensorInfo().shape().asFeature(model::Layout::NHWC); + const auto ifm_shape = ifm_tensor->tensorInfo().shape().asFeature(ir::Layout::NHWC); + const auto ofm_shape = ofm_tensor->tensorInfo().shape().asFeature(ir::Layout::NHWC); // Kernel format is [depth_out, kernel_height, kernel_width, depth_in]. const auto &ker_shape = ker_tensor->tensorInfo().shape(); const auto ker_height = ker_shape.dim(1); diff --git a/runtime/neurun/core/src/exec/interp/operations/DepthwiseConv.cc b/runtime/neurun/core/src/exec/interp/operations/DepthwiseConv.cc index 4506429..66dab69 100644 --- a/runtime/neurun/core/src/exec/interp/operations/DepthwiseConv.cc +++ b/runtime/neurun/core/src/exec/interp/operations/DepthwiseConv.cc @@ -84,8 +84,8 @@ void invoke(const ITensor *ifm_tensor, const ITensor *ker_tensor, const ITensor const ITensor *ofm_tensor, const model::operation::DepthwiseConv2D::Param ¶m) { // TODO Support NCHW frontend - const auto ifm_shape = ifm_tensor->tensorInfo().shape().asFeature(model::Layout::NHWC); - const auto ofm_shape = ofm_tensor->tensorInfo().shape().asFeature(model::Layout::NHWC); + const auto ifm_shape = ifm_tensor->tensorInfo().shape().asFeature(ir::Layout::NHWC); + const auto ofm_shape = ofm_tensor->tensorInfo().shape().asFeature(ir::Layout::NHWC); // Kernel format is [1, kernel_height, kernel_width, depth_out]. const auto &ker_shape = ker_tensor->tensorInfo().shape(); const auto ker_height = ker_shape.dim(1); diff --git a/runtime/neurun/core/src/exec/interp/operations/MaxPool2D.cc b/runtime/neurun/core/src/exec/interp/operations/MaxPool2D.cc index ac10c42..cd4e721 100644 --- a/runtime/neurun/core/src/exec/interp/operations/MaxPool2D.cc +++ b/runtime/neurun/core/src/exec/interp/operations/MaxPool2D.cc @@ -71,8 +71,8 @@ void invoke(const ITensor *in_tensor, const ITensor *out_tensor, const model::operation::MaxPool2D::Param ¶m) { // TODO support NCHW frontend - const auto ifm_shape = in_tensor->tensorInfo().shape().asFeature(model::Layout::NHWC); - const auto ofm_shape = out_tensor->tensorInfo().shape().asFeature(model::Layout::NHWC); + const auto ifm_shape = in_tensor->tensorInfo().shape().asFeature(ir::Layout::NHWC); + const auto ofm_shape = out_tensor->tensorInfo().shape().asFeature(ir::Layout::NHWC); const auto padding = neurun::util::calculatePadding(param.padding, ifm_shape, ofm_shape, param.stride, param.kw, param.kh); // Calculate diff --git a/runtime/neurun/core/src/ir/Graph.cc b/runtime/neurun/core/src/ir/Graph.cc index a1fad2c..afd0a7c 100644 --- a/runtime/neurun/core/src/ir/Graph.cc +++ b/runtime/neurun/core/src/ir/Graph.cc @@ -247,7 +247,7 @@ void Graph::makeSubgraphs( auto backend = _backend_resolver->getBackend(node_index); // TODO How to get frontend layout of this node from IR - auto frontend_layout = model::Layout::NHWC; + auto frontend_layout = ir::Layout::NHWC; auto backend_layout = frontend_layout; // The layout of each backend should be set at another place @@ -258,11 +258,11 @@ void Graph::makeSubgraphs( const std::string acl_layout_str = util::getConfigString(util::config::ACL_LAYOUT); if (acl_layout_str == "NHWC") { - backend_layout = model::Layout::NHWC; + backend_layout = ir::Layout::NHWC; } else if (acl_layout_str == "NCHW") { - backend_layout = model::Layout::NCHW; + backend_layout = ir::Layout::NCHW; } } else if (backend->config()->id() == "srcn") @@ -270,16 +270,16 @@ void Graph::makeSubgraphs( const std::string ncnn_layout_str = util::getConfigString(util::config::NCNN_LAYOUT); if (ncnn_layout_str == "NHWC") { - backend_layout = model::Layout::NHWC; + backend_layout = ir::Layout::NHWC; } else if (ncnn_layout_str == "NCHW") { - backend_layout = model::Layout::NCHW; + backend_layout = ir::Layout::NCHW; } } else if (backend->config()->id() == "cpu") { - backend_layout = model::Layout::NHWC; + backend_layout = ir::Layout::NHWC; } for (auto operand : node.getInputs()) @@ -398,7 +398,7 @@ void Graph::manipulateLowerInfo( { lower_info->addDefPermuteFactor(operand::PermuteFactor{ default_backend, - model::Layout::NHWC // TODO Get frontend layout of this node from IR + ir::Layout::NHWC // TODO Get frontend layout of this node from IR }); } } @@ -425,7 +425,7 @@ void Graph::dumpLowerInfo() for (auto factor : factors) { str += factor.backend()->config()->id(); - str += "(" + model::to_string(factor.layout()) + ")"; + str += "(" + ir::to_string(factor.layout()) + ")"; str += " "; } return "{ " + str + "}"; @@ -476,7 +476,7 @@ void Graph::dumpLowerInfo() } bool Graph::mergeable(const model::SubgraphIndex &subg_index, - const model::OperationIndex &node_index, model::Layout layout) + const model::OperationIndex &node_index, ir::Layout layout) { // Are they mergeable? // 1. the same backend id and layout? @@ -490,9 +490,9 @@ bool Graph::mergeable(const model::SubgraphIndex &subg_index, const auto &subg_backend_id = getLowerInfo(subg_index)->backend()->config()->id(); const auto &node_backend_id = _backend_resolver->getBackend(node_index)->config()->id(); VERBOSE(Lower) << "SUBG#" << subg_index.value() << " { " << subg_backend_id << "(" - << model::to_string(subg_backend_layout) << ") } " + << ir::to_string(subg_backend_layout) << ") } " << " NODE#" << node_index.value() << " (" << node.name() << ") { " - << node_backend_id << "(" << model::to_string(layout) << ") } " << std::endl; + << node_backend_id << "(" << ir::to_string(layout) << ") } " << std::endl; if (subg_backend_id != node_backend_id || subg_backend_layout != layout) return false; } @@ -549,7 +549,7 @@ bool Graph::mergeable(const model::SubgraphIndex &subg_index, model::SubgraphIndex Graph::appendFreshSingleOpSubgraph(const model::OperationIndex &node_index, const model::Operation &node, - model::Layout layout) + ir::Layout layout) { // Create a fresh subgraph with one operation, and append it to subgraphs // Create a fresh subgraph diff --git a/runtime/neurun/core/src/ir/operation/LowerInfo.cc b/runtime/neurun/core/src/ir/operation/LowerInfo.cc index e33eed0..93bfc4e 100644 --- a/runtime/neurun/core/src/ir/operation/LowerInfo.cc +++ b/runtime/neurun/core/src/ir/operation/LowerInfo.cc @@ -23,7 +23,7 @@ namespace graph namespace operation { -LowerInfo::LowerInfo(const backend::Backend *backend, model::Layout layout) +LowerInfo::LowerInfo(const backend::Backend *backend, ir::Layout layout) : _permute_factor{backend, layout} { // DO NOTHING diff --git a/runtime/neurun/core/src/ir/pass/PermutationEliminationPass.cc b/runtime/neurun/core/src/ir/pass/PermutationEliminationPass.cc index 52d7b33..e848c88 100644 --- a/runtime/neurun/core/src/ir/pass/PermutationEliminationPass.cc +++ b/runtime/neurun/core/src/ir/pass/PermutationEliminationPass.cc @@ -185,13 +185,13 @@ bool PermutationEliminationPass::isPermuteLayerToEliminate( if (is_for_model_input) { // check if this is NHWC_TO_NCHW permutation: must have single input, which is model's input - return (inp_indexes.size() == 1 && input_layout == model::Layout::NHWC && - output_layout == model::Layout::NCHW); + return (inp_indexes.size() == 1 && input_layout == ir::Layout::NHWC && + output_layout == ir::Layout::NCHW); } // check if this is NCHW_TO_NHWC permutation: must have single output, which is model's output - return (out_indexes.size() == 1 && input_layout == model::Layout::NCHW && - output_layout == model::Layout::NHWC); + return (out_indexes.size() == 1 && input_layout == ir::Layout::NCHW && + output_layout == ir::Layout::NHWC); } } // namespace pass diff --git a/runtime/neurun/core/src/ir/pass/PermutationInsertionPass.cc b/runtime/neurun/core/src/ir/pass/PermutationInsertionPass.cc index ad823cb..13a751b 100644 --- a/runtime/neurun/core/src/ir/pass/PermutationInsertionPass.cc +++ b/runtime/neurun/core/src/ir/pass/PermutationInsertionPass.cc @@ -140,7 +140,7 @@ PermutationInsertionPass::insertPermute(const model::OperandIndex &operand_index auto output_backend = factor.backend(); // NOTE Permute may not have specific layout because the layout of input and output may be // different. - const auto permute_node_layout = model::Layout::UNKNOWN; + const auto permute_node_layout = ir::Layout::UNKNOWN; const auto permute_node_backend = backend::BackendManager::get().getDefault(); const operand::PermuteFactor permute_node_factor{permute_node_backend, permute_node_layout}; @@ -169,11 +169,11 @@ PermutationInsertionPass::insertPermute(const model::OperandIndex &operand_index const auto output_layout = factor.layout(); using Permute = model::operation::Permute; const auto permute_type = [&]() { - if (input_layout == model::Layout::NHWC && output_layout == model::Layout::NCHW) + if (input_layout == ir::Layout::NHWC && output_layout == ir::Layout::NCHW) { return Permute::Type::NHWC_TO_NCHW; } - else if (input_layout == model::Layout::NCHW && output_layout == model::Layout::NHWC) + else if (input_layout == ir::Layout::NCHW && output_layout == ir::Layout::NHWC) { return Permute::Type::NCHW_TO_NHWC; } diff --git a/runtime/neurun/core/src/ir/pass/PermutationOperationPass.cc b/runtime/neurun/core/src/ir/pass/PermutationOperationPass.cc index 0b5929b..9faeb45 100644 --- a/runtime/neurun/core/src/ir/pass/PermutationOperationPass.cc +++ b/runtime/neurun/core/src/ir/pass/PermutationOperationPass.cc @@ -53,7 +53,7 @@ void PermutationOperationPass::changeToKeepLayout(const model::Operation &node) if (_graph.getLowerInfo(subg_index)->backend()->config()->id() != "cpu") { // TODO Change backend of this node - assert(frontend_layout == model::Layout::NHWC || backend_layout == model::Layout::UNKNOWN); + assert(frontend_layout == ir::Layout::NHWC || backend_layout == ir::Layout::UNKNOWN); } // Divide subgraph based on target operation diff --git a/runtime/neurun/core/src/model/LayoutSet.h b/runtime/neurun/core/src/model/LayoutSet.h index be75c8e..445961a 100644 --- a/runtime/neurun/core/src/model/LayoutSet.h +++ b/runtime/neurun/core/src/model/LayoutSet.h @@ -20,7 +20,7 @@ #include #include -#include "model/Layout.h" +#include "ir/Layout.h" namespace neurun { diff --git a/runtime/neurun/core/src/util/ShapeInference.cc b/runtime/neurun/core/src/util/ShapeInference.cc index 06597ec..ffb8dab 100644 --- a/runtime/neurun/core/src/util/ShapeInference.cc +++ b/runtime/neurun/core/src/util/ShapeInference.cc @@ -106,10 +106,9 @@ Shapes inferEltwiseShape(const model::Shape &lhs_shape, const model::Shape &rhs_ } Shapes inferAvgPoolShape(const model::Shape &in_shape, - const model::operation::AvgPool2D::Param ¶m, - const model::Layout layout) + const model::operation::AvgPool2D::Param ¶m, const ir::Layout layout) { - assert(layout == model::Layout::NHWC); + assert(layout == ir::Layout::NHWC); auto ifm_shape = in_shape.asFeature(layout); const auto out_h_w = calcConvLikeHeightAndWidth(ifm_shape.H, ifm_shape.W, param.kh, param.kw, param.padding, param.stride); @@ -139,10 +138,9 @@ Shapes inferConcatShape(const Shapes &in_shapes, const model::operation::Concat: } Shapes inferMaxPoolShape(const model::Shape &in_shape, - const model::operation::MaxPool2D::Param ¶m, - const model::Layout layout) + const model::operation::MaxPool2D::Param ¶m, const ir::Layout layout) { - assert(layout == model::Layout::NHWC); + assert(layout == ir::Layout::NHWC); auto ifm_shape = in_shape.asFeature(layout); const auto out_h_w = calcConvLikeHeightAndWidth(ifm_shape.H, ifm_shape.W, param.kh, param.kw, param.padding, param.stride); @@ -151,9 +149,9 @@ Shapes inferMaxPoolShape(const model::Shape &in_shape, } Shapes inferConv2DShape(const model::Shape &in_shape, const model::Shape &ker_shape, - const model::operation::Conv2D::Param ¶m, model::Layout layout) + const model::operation::Conv2D::Param ¶m, ir::Layout layout) { - assert(layout == model::Layout::NHWC); + assert(layout == ir::Layout::NHWC); auto ifm_shape = in_shape.asFeature(layout); // Kernel format is [depth_out, kernel_height, kernel_width, depth_in] @@ -168,9 +166,9 @@ Shapes inferConv2DShape(const model::Shape &in_shape, const model::Shape &ker_sh Shapes inferDepthwiseConv2DShape(const model::Shape &in_shape, const model::Shape &ker_shape, const model::operation::DepthwiseConv2D::Param ¶m, - model::Layout layout) + ir::Layout layout) { - assert(layout == model::Layout::NHWC); + assert(layout == ir::Layout::NHWC); auto ifm_shape = in_shape.asFeature(layout); // Kernel format is [1, kernel_height, kernel_width, depth_out] diff --git a/runtime/neurun/core/src/util/Utils.cc b/runtime/neurun/core/src/util/Utils.cc index cd912a8..f8daa02 100644 --- a/runtime/neurun/core/src/util/Utils.cc +++ b/runtime/neurun/core/src/util/Utils.cc @@ -41,19 +41,19 @@ const char *to_string(const model::PaddingType &type) return nullptr; } -Coordinates convertCoordinates(const Coordinates &from_coordinates, model::Layout from_layout, - model::Layout to_layout) +Coordinates convertCoordinates(const Coordinates &from_coordinates, ir::Layout from_layout, + ir::Layout to_layout) { assert(from_coordinates.size() == 4); Coordinates to{from_coordinates}; - if (from_layout == model::Layout::NHWC && to_layout == model::Layout::NCHW) + if (from_layout == ir::Layout::NHWC && to_layout == ir::Layout::NCHW) { to.set(0, from_coordinates[0]); to.set(1, from_coordinates[3]); to.set(2, from_coordinates[1]); to.set(3, from_coordinates[2]); } - else if (from_layout == model::Layout::NCHW && to_layout == model::Layout::NHWC) + else if (from_layout == ir::Layout::NCHW && to_layout == ir::Layout::NHWC) { to.set(0, from_coordinates[0]); to.set(1, from_coordinates[2]); diff --git a/runtime/neurun/frontend/nnapi/wrapper/ANeuralNetworksExecution.cc b/runtime/neurun/frontend/nnapi/wrapper/ANeuralNetworksExecution.cc index 7b580f1..883d325 100644 --- a/runtime/neurun/frontend/nnapi/wrapper/ANeuralNetworksExecution.cc +++ b/runtime/neurun/frontend/nnapi/wrapper/ANeuralNetworksExecution.cc @@ -136,8 +136,7 @@ bool ANeuralNetworksExecution::setInput(uint32_t index, const ANeuralNetworksOpe // words, we can assume that io_layout from nnapi always is the same as layout of the used // model. // TODO Set layout of model - _execution->setInput(input_index, type_info, shape, buffer, length, - neurun::model::Layout::NHWC); + _execution->setInput(input_index, type_info, shape, buffer, length, neurun::ir::Layout::NHWC); } catch (const std::exception &e) { @@ -165,8 +164,7 @@ bool ANeuralNetworksExecution::setOutput(uint32_t index, const ANeuralNetworksOp // words, we can assume that io_layout from nnapi always is the same as layout of the used // model. // TODO Set layout of model - _execution->setOutput(output_index, type_info, shape, buffer, length, - neurun::model::Layout::NHWC); + _execution->setOutput(output_index, type_info, shape, buffer, length, neurun::ir::Layout::NHWC); } catch (const std::exception &e) { diff --git a/runtime/neurun/test/graph/operand/LayoutSet.cc b/runtime/neurun/test/graph/operand/LayoutSet.cc index b79fd13..0111c8b 100644 --- a/runtime/neurun/test/graph/operand/LayoutSet.cc +++ b/runtime/neurun/test/graph/operand/LayoutSet.cc @@ -18,7 +18,7 @@ #include "model/LayoutSet.h" -using neurun::model::Layout; +using neurun::ir::Layout; using neurun::model::LayoutSet; TEST(graph_operand_LayoutSet, layout_set_operators) diff --git a/runtime/neurun/test/util/ShapeInference.cc b/runtime/neurun/test/util/ShapeInference.cc index 0e2928f..87934e0 100644 --- a/runtime/neurun/test/util/ShapeInference.cc +++ b/runtime/neurun/test/util/ShapeInference.cc @@ -16,7 +16,7 @@ #include -#include "model/Layout.h" +#include "ir/Layout.h" #include "util/ShapeInference.h" using namespace neurun::model; -- 2.7.4