From b703003a69b61923049c2f35d67c074de9460062 Mon Sep 17 00:00:00 2001 From: Sergei Barannikov/AI Tools Lab /SRR/Engineer/Samsung Electronics Date: Fri, 6 Dec 2019 14:39:36 +0300 Subject: [PATCH] [neurun] Move TypeInfo.h and Shape.h into ir directory (#9427) * Move `TypeInfo.h` and `Shape.h` in `ir` directory. * Move `TypeInfo`, `Shape`, `FeatureShape` to `neurun::ir` namespace, fixing uses where possible. Signed-off-by: Sergei Barannikov --- runtime/neurun/backend/acl_cl/KernelGenerator.cc | 2 +- runtime/neurun/backend/acl_cl/ShapeFixer.cc | 38 +++++++++++----------- runtime/neurun/backend/acl_common/Convert.cc | 8 ++--- runtime/neurun/backend/acl_common/Convert.h | 11 +++---- runtime/neurun/backend/acl_neon/KernelGenerator.cc | 4 +-- runtime/neurun/backend/acl_neon/ShapeFixer.cc | 36 ++++++++++---------- runtime/neurun/backend/cpu/ShapeFixer.cc | 12 +++---- runtime/neurun/backend/cpu/kernel/PermuteLayer.cc | 2 +- runtime/neurun/backend/cpu/kernel/PermuteLayer.h | 7 ++-- runtime/neurun/backend/srcn/Convert.cc | 18 +++++----- runtime/neurun/backend/srcn/Convert.h | 14 ++++---- .../neurun/core/include/backend/ITensorRegister.h | 8 ++--- .../neurun/core/include/compiler/SubTensorInfo.h | 8 ++--- runtime/neurun/core/include/exec/Execution.h | 7 ++-- runtime/neurun/core/include/ir/Graph.h | 2 +- runtime/neurun/core/include/{model => ir}/Shape.h | 15 ++++++--- .../neurun/core/include/{model => ir}/TypeInfo.h | 15 ++++++--- runtime/neurun/core/include/model/OperandInfo.h | 4 +-- runtime/neurun/core/include/util/Padding.h | 11 +++---- runtime/neurun/core/include/util/ShapeInference.h | 16 ++++----- runtime/neurun/core/src/exec/Execution.cc | 8 ++--- runtime/neurun/core/src/exec/ExecutorBase.cc | 8 ++--- runtime/neurun/core/src/exec/ExecutorBase.h | 6 ++-- runtime/neurun/core/src/exec/Sink.h | 8 ++--- runtime/neurun/core/src/exec/Source.h | 10 +++--- .../src/exec/interp/operations/FullyConnected.cc | 2 +- .../src/exec/interp/operations/OperationUtil.h | 6 ++-- runtime/neurun/core/src/ir/Graph.cc | 2 +- runtime/neurun/core/src/{model => ir}/Shape.cc | 6 ++-- runtime/neurun/core/src/{model => ir}/TypeInfo.cc | 6 ++-- .../neurun/core/src/ir/operand/Shape4DConvert.h | 2 +- runtime/neurun/core/src/util/Padding.cc | 13 ++++---- runtime/neurun/core/src/util/ShapeInference.cc | 34 +++++++++---------- runtime/neurun/frontend/base_loader/base_loader.h | 8 ++--- .../neurun/frontend/nnapi/wrapper/NNAPIConvert.h | 8 ++--- runtime/neurun/test/core/compiler/Scheduler.cc | 4 +-- runtime/neurun/test/graph/operand/Set.cc | 6 ++-- runtime/neurun/test/graph/operand/UseDef.cc | 4 +-- runtime/neurun/test/graph/operation/SetIO.cc | 8 ++--- runtime/neurun/test/graph/verifier/Verifier.cc | 4 +-- 40 files changed, 196 insertions(+), 195 deletions(-) rename runtime/neurun/core/include/{model => ir}/Shape.h (89%) rename runtime/neurun/core/include/{model => ir}/TypeInfo.h (85%) rename runtime/neurun/core/src/{model => ir}/Shape.cc (97%) rename runtime/neurun/core/src/{model => ir}/TypeInfo.cc (94%) diff --git a/runtime/neurun/backend/acl_cl/KernelGenerator.cc b/runtime/neurun/backend/acl_cl/KernelGenerator.cc index 0be03c5..436366a 100644 --- a/runtime/neurun/backend/acl_cl/KernelGenerator.cc +++ b/runtime/neurun/backend/acl_cl/KernelGenerator.cc @@ -454,7 +454,7 @@ void KernelGenerator::visit(const model::operation::FullyConnected &node) // Check for reshaping input's shape into rank-2 bool needs_reshape = false; - neurun::model::Shape reshape(2); + ir::Shape reshape(2); if (input_rank == 4) { const auto feature_size = _ctx.at(input_index).shape().num_elements(); diff --git a/runtime/neurun/backend/acl_cl/ShapeFixer.cc b/runtime/neurun/backend/acl_cl/ShapeFixer.cc index 58efe0d..674b3d6 100644 --- a/runtime/neurun/backend/acl_cl/ShapeFixer.cc +++ b/runtime/neurun/backend/acl_cl/ShapeFixer.cc @@ -96,8 +96,8 @@ void ShapeFixer::visit(const model::operation::Mul &node) // TODO remove const_cast later. For example, _ctx may need to be a non const variable or // a node to extend shape may be inserted in front of this operation - const_cast<::neurun::model::Shape &>(_ctx.at(lhs_index).shape()).extendRank(broadcast_rank); - const_cast<::neurun::model::Shape &>(_ctx.at(rhs_index).shape()).extendRank(broadcast_rank); + const_cast(_ctx.at(lhs_index).shape()).extendRank(broadcast_rank); + const_cast(_ctx.at(rhs_index).shape()).extendRank(broadcast_rank); } } @@ -115,7 +115,7 @@ void ShapeFixer::visit(const model::operation::Squeeze &node) { const auto output_index{node.getOutputs().at(0)}; if (_ctx.at(output_index).shape().rank() == 0) - const_cast<::neurun::model::Shape &>(_ctx.at(output_index).shape()).extendRank(1); + const_cast(_ctx.at(output_index).shape()).extendRank(1); const auto input_index{node.getInputs().at(model::operation::Squeeze::Input::INPUT)}; _tensor_builder->dimCorrection(input_index, false); _tensor_builder->dimCorrection(output_index, false); @@ -138,8 +138,8 @@ void ShapeFixer::visit(const model::operation::Add &node) { const auto broadcast_rank = std::max(_ctx.at(lhs_index).shape().rank(), _ctx.at(rhs_index).shape().rank()); - const_cast<::neurun::model::Shape &>(_ctx.at(lhs_index).shape()).extendRank(broadcast_rank); - const_cast<::neurun::model::Shape &>(_ctx.at(rhs_index).shape()).extendRank(broadcast_rank); + const_cast(_ctx.at(lhs_index).shape()).extendRank(broadcast_rank); + const_cast(_ctx.at(rhs_index).shape()).extendRank(broadcast_rank); } } @@ -155,8 +155,8 @@ void ShapeFixer::visit(const model::operation::Sub &node) // TODO remove const_cast later. For example, _ctx may need to be a non const variable or // a node to extend shape may be inserted in front of this operation - const_cast<::neurun::model::Shape &>(_ctx.at(lhs_index).shape()).extendRank(broadcast_rank); - const_cast<::neurun::model::Shape &>(_ctx.at(rhs_index).shape()).extendRank(broadcast_rank); + const_cast(_ctx.at(lhs_index).shape()).extendRank(broadcast_rank); + const_cast(_ctx.at(rhs_index).shape()).extendRank(broadcast_rank); } } @@ -172,8 +172,8 @@ void ShapeFixer::visit(const model::operation::Div &node) // TODO remove const_cast later. For example, _ctx may need to be a non const variable or // a node to extend shape may be inserted in front of this operation - const_cast<::neurun::model::Shape &>(_ctx.at(lhs_index).shape()).extendRank(broadcast_rank); - const_cast<::neurun::model::Shape &>(_ctx.at(rhs_index).shape()).extendRank(broadcast_rank); + const_cast(_ctx.at(lhs_index).shape()).extendRank(broadcast_rank); + const_cast(_ctx.at(rhs_index).shape()).extendRank(broadcast_rank); } } @@ -195,8 +195,8 @@ void ShapeFixer::visit(const model::operation::LogicalAnd &node) // TODO remove const_cast later. For example, _ctx may need to be a non const variable or // a node to extend shape may be inserted in front of this operation - const_cast<::neurun::model::Shape &>(_ctx.at(input0_index).shape()).extendRank(broadcast_rank); - const_cast<::neurun::model::Shape &>(_ctx.at(input1_index).shape()).extendRank(broadcast_rank); + const_cast(_ctx.at(input0_index).shape()).extendRank(broadcast_rank); + const_cast(_ctx.at(input1_index).shape()).extendRank(broadcast_rank); } } @@ -216,8 +216,8 @@ void ShapeFixer::visit(const model::operation::Comparison &node) // TODO remove const_cast later. For example, _ctx may need to be a non const variable or // a node to extend shape may be inserted in front of this operation - const_cast<::neurun::model::Shape &>(_ctx.at(input0_index).shape()).extendRank(broadcast_rank); - const_cast<::neurun::model::Shape &>(_ctx.at(input1_index).shape()).extendRank(broadcast_rank); + const_cast(_ctx.at(input0_index).shape()).extendRank(broadcast_rank); + const_cast(_ctx.at(input1_index).shape()).extendRank(broadcast_rank); } } @@ -278,8 +278,8 @@ void ShapeFixer::visit(const model::operation::PReLU &node) { const auto broadcast_rank = std::max(_ctx.at(ifm_index).shape().rank(), _ctx.at(alpha_index).shape().rank()); - const_cast<::neurun::model::Shape &>(_ctx.at(ifm_index).shape()).extendRank(broadcast_rank); - const_cast<::neurun::model::Shape &>(_ctx.at(alpha_index).shape()).extendRank(broadcast_rank); + const_cast(_ctx.at(ifm_index).shape()).extendRank(broadcast_rank); + const_cast(_ctx.at(alpha_index).shape()).extendRank(broadcast_rank); } } @@ -296,8 +296,8 @@ void ShapeFixer::visit(const model::operation::LogicalOr &node) { const auto broadcast_rank = std::max(_ctx.at(input0_index).shape().rank(), _ctx.at(input1_index).shape().rank()); - const_cast<::neurun::model::Shape &>(_ctx.at(input0_index).shape()).extendRank(broadcast_rank); - const_cast<::neurun::model::Shape &>(_ctx.at(input1_index).shape()).extendRank(broadcast_rank); + const_cast(_ctx.at(input0_index).shape()).extendRank(broadcast_rank); + const_cast(_ctx.at(input1_index).shape()).extendRank(broadcast_rank); } } @@ -312,8 +312,8 @@ void ShapeFixer::visit(const model::operation::SquaredDifference &node) { const auto broadcast_rank = std::max(_ctx.at(lhs_index).shape().rank(), _ctx.at(rhs_index).shape().rank()); - const_cast(_ctx.at(lhs_index).shape()).extendRank(broadcast_rank); - const_cast(_ctx.at(rhs_index).shape()).extendRank(broadcast_rank); + const_cast(_ctx.at(lhs_index).shape()).extendRank(broadcast_rank); + const_cast(_ctx.at(rhs_index).shape()).extendRank(broadcast_rank); } } diff --git a/runtime/neurun/backend/acl_common/Convert.cc b/runtime/neurun/backend/acl_common/Convert.cc index b3e22e6..c39fa66 100644 --- a/runtime/neurun/backend/acl_common/Convert.cc +++ b/runtime/neurun/backend/acl_common/Convert.cc @@ -45,9 +45,8 @@ namespace backend namespace acl_common { -::arm_compute::TensorShape asTensorShape(const ::neurun::model::Shape &shape, - ir::Layout frontend_layout, ir::Layout backend_layout, - bool apply_dim_correction) +::arm_compute::TensorShape asTensorShape(const ir::Shape &shape, ir::Layout frontend_layout, + ir::Layout backend_layout, bool apply_dim_correction) { const uint32_t rank = shape.rank(); @@ -113,8 +112,7 @@ namespace acl_common return ::arm_compute::QuantizationInfo(scale, offset); } -::arm_compute::TensorInfo asTensorInfo(const ::neurun::model::Shape &shape, - const ::neurun::model::TypeInfo &typeInfo, +::arm_compute::TensorInfo asTensorInfo(const ir::Shape &shape, const ir::TypeInfo &typeInfo, ir::Layout frontend_layout, ir::Layout backend_layout, bool apply_dim_correction) { diff --git a/runtime/neurun/backend/acl_common/Convert.h b/runtime/neurun/backend/acl_common/Convert.h index f8564b7..33e6815 100644 --- a/runtime/neurun/backend/acl_common/Convert.h +++ b/runtime/neurun/backend/acl_common/Convert.h @@ -24,8 +24,8 @@ #include "ir/Layout.h" #include "ir/InternalType.h" #include "model/Operand.h" -#include "model/Shape.h" -#include "model/TypeInfo.h" +#include "ir/Shape.h" +#include "ir/TypeInfo.h" #include "misc/feature/Shape.h" #include "misc/kernel/Shape.h" @@ -41,15 +41,14 @@ namespace backend namespace acl_common { -::arm_compute::TensorShape asTensorShape(const ::neurun::model::Shape &shape, - ir::Layout frontend_layout, ir::Layout backend_layout, +::arm_compute::TensorShape asTensorShape(const ir::Shape &shape, ir::Layout frontend_layout, + ir::Layout backend_layout, bool apply_dim_correction = true); ::arm_compute::Coordinates asTensorCoordinate(const ::neurun::util::Coordinates &coord, ir::Layout frontend_layout, ir::Layout backend_layout); ::arm_compute::DataType asDataType(ir::DataType type); -::arm_compute::TensorInfo asTensorInfo(const ::neurun::model::Shape &shape, - const ::neurun::model::TypeInfo &typeInfo, +::arm_compute::TensorInfo asTensorInfo(const ir::Shape &shape, const ir::TypeInfo &typeInfo, ir::Layout frontend_layout, ir::Layout backend_layout, bool apply_dim_correction = true); diff --git a/runtime/neurun/backend/acl_neon/KernelGenerator.cc b/runtime/neurun/backend/acl_neon/KernelGenerator.cc index 84e9177..080a38e 100644 --- a/runtime/neurun/backend/acl_neon/KernelGenerator.cc +++ b/runtime/neurun/backend/acl_neon/KernelGenerator.cc @@ -622,10 +622,10 @@ void KernelGenerator::visit(const model::operation::FullyConnected &node) // Check for reshaping input's shape into rank-2 bool needs_reshape = false; - neurun::model::Shape reshape(2); + ir::Shape reshape(2); if (input_rank == 4) { - model::FeatureShape ifm_shape_feature = + ir::FeatureShape ifm_shape_feature = _ctx.at(input_index).shape().asFeature(_current_subg_layout); auto feature_size = ifm_shape_feature.N * ifm_shape_feature.C * ifm_shape_feature.H * ifm_shape_feature.W; diff --git a/runtime/neurun/backend/acl_neon/ShapeFixer.cc b/runtime/neurun/backend/acl_neon/ShapeFixer.cc index 80f539a..54c95a7 100644 --- a/runtime/neurun/backend/acl_neon/ShapeFixer.cc +++ b/runtime/neurun/backend/acl_neon/ShapeFixer.cc @@ -156,8 +156,8 @@ void ShapeFixer::visit(const model::operation::LogicalAnd &node) // TODO remove const_cast later. For example, _ctx may need to be a non const variable or // a node to extend shape may be inserted in front of this operation - const_cast<::neurun::model::Shape &>(_ctx.at(input0_index).shape()).extendRank(broadcast_rank); - const_cast<::neurun::model::Shape &>(_ctx.at(input1_index).shape()).extendRank(broadcast_rank); + const_cast(_ctx.at(input0_index).shape()).extendRank(broadcast_rank); + const_cast(_ctx.at(input1_index).shape()).extendRank(broadcast_rank); } } @@ -175,8 +175,8 @@ void ShapeFixer::visit(const model::operation::LogicalOr &node) // TODO remove const_cast later. For example, _ctx may need to be a non const variable or // a node to extend shape may be inserted in front of this operation - const_cast<::neurun::model::Shape &>(_ctx.at(input0_index).shape()).extendRank(broadcast_rank); - const_cast<::neurun::model::Shape &>(_ctx.at(input1_index).shape()).extendRank(broadcast_rank); + const_cast(_ctx.at(input0_index).shape()).extendRank(broadcast_rank); + const_cast(_ctx.at(input1_index).shape()).extendRank(broadcast_rank); } } @@ -206,8 +206,8 @@ void ShapeFixer::visit(const model::operation::Mul &node) // TODO remove const_cast later. For example, _ctx may need to be a non const variable or // a node to extend shape may be inserted in front of this operation - const_cast<::neurun::model::Shape &>(_ctx.at(lhs_index).shape()).extendRank(broadcast_rank); - const_cast<::neurun::model::Shape &>(_ctx.at(rhs_index).shape()).extendRank(broadcast_rank); + const_cast(_ctx.at(lhs_index).shape()).extendRank(broadcast_rank); + const_cast(_ctx.at(rhs_index).shape()).extendRank(broadcast_rank); } } @@ -224,8 +224,8 @@ void ShapeFixer::visit(const model::operation::PReLU &node) { const auto broadcast_rank = std::max(_ctx.at(ifm_index).shape().rank(), _ctx.at(alpha_index).shape().rank()); - const_cast<::neurun::model::Shape &>(_ctx.at(ifm_index).shape()).extendRank(broadcast_rank); - const_cast<::neurun::model::Shape &>(_ctx.at(alpha_index).shape()).extendRank(broadcast_rank); + const_cast(_ctx.at(ifm_index).shape()).extendRank(broadcast_rank); + const_cast(_ctx.at(alpha_index).shape()).extendRank(broadcast_rank); } } @@ -266,8 +266,8 @@ void ShapeFixer::visit(const model::operation::Comparison &node) // TODO remove const_cast later. For example, _ctx may need to be a non const variable or // a node to extend shape may be inserted in front of this operation - const_cast<::neurun::model::Shape &>(_ctx.at(input0_index).shape()).extendRank(broadcast_rank); - const_cast<::neurun::model::Shape &>(_ctx.at(input1_index).shape()).extendRank(broadcast_rank); + const_cast(_ctx.at(input0_index).shape()).extendRank(broadcast_rank); + const_cast(_ctx.at(input1_index).shape()).extendRank(broadcast_rank); } } @@ -319,8 +319,8 @@ void ShapeFixer::visit(const model::operation::SquaredDifference &node) // TODO remove const_cast later. For example, _ctx may need to be a non const variable or // a node to extend shape may be inserted in front of this operation - const_cast<::neurun::model::Shape &>(_ctx.at(lhs_index).shape()).extendRank(broadcast_rank); - const_cast<::neurun::model::Shape &>(_ctx.at(rhs_index).shape()).extendRank(broadcast_rank); + const_cast(_ctx.at(lhs_index).shape()).extendRank(broadcast_rank); + const_cast(_ctx.at(rhs_index).shape()).extendRank(broadcast_rank); } } @@ -335,8 +335,8 @@ void ShapeFixer::visit(const model::operation::Sub &node) std::max(_ctx.at(lhs_index).shape().rank(), _ctx.at(rhs_index).shape().rank()); // TODO remove const_cast later. For example, _ctx may need to be a non const variable or // a node to extend shape may be inserted in front of this operation - const_cast<::neurun::model::Shape &>(_ctx.at(lhs_index).shape()).extendRank(broadcast_rank); - const_cast<::neurun::model::Shape &>(_ctx.at(rhs_index).shape()).extendRank(broadcast_rank); + const_cast(_ctx.at(lhs_index).shape()).extendRank(broadcast_rank); + const_cast(_ctx.at(rhs_index).shape()).extendRank(broadcast_rank); } } @@ -361,8 +361,8 @@ void ShapeFixer::visit(const model::operation::Add &node) { const auto broadcast_rank = std::max(_ctx.at(lhs_index).shape().rank(), _ctx.at(rhs_index).shape().rank()); - const_cast<::neurun::model::Shape &>(_ctx.at(lhs_index).shape()).extendRank(broadcast_rank); - const_cast<::neurun::model::Shape &>(_ctx.at(rhs_index).shape()).extendRank(broadcast_rank); + const_cast(_ctx.at(lhs_index).shape()).extendRank(broadcast_rank); + const_cast(_ctx.at(rhs_index).shape()).extendRank(broadcast_rank); } } @@ -378,8 +378,8 @@ void ShapeFixer::visit(const model::operation::Div &node) // TODO remove const_cast later. For example, _ctx may need to be a non const variable or // a node to extend shape may be inserted in front of this operation - const_cast<::neurun::model::Shape &>(_ctx.at(lhs_index).shape()).extendRank(broadcast_rank); - const_cast<::neurun::model::Shape &>(_ctx.at(rhs_index).shape()).extendRank(broadcast_rank); + const_cast(_ctx.at(lhs_index).shape()).extendRank(broadcast_rank); + const_cast(_ctx.at(rhs_index).shape()).extendRank(broadcast_rank); } } diff --git a/runtime/neurun/backend/cpu/ShapeFixer.cc b/runtime/neurun/backend/cpu/ShapeFixer.cc index 679d2cd..5ec7ebd 100644 --- a/runtime/neurun/backend/cpu/ShapeFixer.cc +++ b/runtime/neurun/backend/cpu/ShapeFixer.cc @@ -94,8 +94,8 @@ void ShapeFixer::visit(const model::operation::Add &node) throw std::runtime_error{"ShapeFixer: NYI for broadcast Add"}; const auto broadcast_rank = std::max(_ctx.at(lhs_index).shape().rank(), _ctx.at(rhs_index).shape().rank()); - const_cast<::neurun::model::Shape &>(_ctx.at(lhs_index).shape()).extendRank(broadcast_rank); - const_cast<::neurun::model::Shape &>(_ctx.at(rhs_index).shape()).extendRank(broadcast_rank); + const_cast(_ctx.at(lhs_index).shape()).extendRank(broadcast_rank); + const_cast(_ctx.at(rhs_index).shape()).extendRank(broadcast_rank); } } @@ -118,8 +118,8 @@ void ShapeFixer::visit(const model::operation::Sub &node) throw std::runtime_error{"ShapeFixer: NYI for broadcast Sub"}; const auto broadcast_rank = std::max(_ctx.at(lhs_index).shape().rank(), _ctx.at(rhs_index).shape().rank()); - const_cast<::neurun::model::Shape &>(_ctx.at(lhs_index).shape()).extendRank(broadcast_rank); - const_cast<::neurun::model::Shape &>(_ctx.at(rhs_index).shape()).extendRank(broadcast_rank); + const_cast(_ctx.at(lhs_index).shape()).extendRank(broadcast_rank); + const_cast(_ctx.at(rhs_index).shape()).extendRank(broadcast_rank); } } void ShapeFixer::visit(const model::operation::Mul &node) @@ -140,8 +140,8 @@ void ShapeFixer::visit(const model::operation::Mul &node) throw std::runtime_error{"ShapeFixer: NYI for broadcast Mul"}; const auto broadcast_rank = std::max(_ctx.at(lhs_index).shape().rank(), _ctx.at(rhs_index).shape().rank()); - const_cast<::neurun::model::Shape &>(_ctx.at(lhs_index).shape()).extendRank(broadcast_rank); - const_cast<::neurun::model::Shape &>(_ctx.at(rhs_index).shape()).extendRank(broadcast_rank); + const_cast(_ctx.at(lhs_index).shape()).extendRank(broadcast_rank); + const_cast(_ctx.at(rhs_index).shape()).extendRank(broadcast_rank); } } diff --git a/runtime/neurun/backend/cpu/kernel/PermuteLayer.cc b/runtime/neurun/backend/cpu/kernel/PermuteLayer.cc index fc758cf..9d596cb 100644 --- a/runtime/neurun/backend/cpu/kernel/PermuteLayer.cc +++ b/runtime/neurun/backend/cpu/kernel/PermuteLayer.cc @@ -29,7 +29,7 @@ using Type = model::operation::Permute::Type; void PermuteLayer::configure(std::shared_ptr input, std::shared_ptr output, - const model::Shape &output_shape, Type type, ir::DataType dataType) + const ir::Shape &output_shape, Type type, ir::DataType dataType) { _input = input; _output = output; diff --git a/runtime/neurun/backend/cpu/kernel/PermuteLayer.h b/runtime/neurun/backend/cpu/kernel/PermuteLayer.h index 3fec953..cf89168 100644 --- a/runtime/neurun/backend/cpu/kernel/PermuteLayer.h +++ b/runtime/neurun/backend/cpu/kernel/PermuteLayer.h @@ -43,9 +43,8 @@ public: public: void configure(std::shared_ptr input, - std::shared_ptr output, - const model::Shape &output_shape, model::operation::Permute::Type type, - ir::DataType dataType); + std::shared_ptr output, const ir::Shape &output_shape, + model::operation::Permute::Type type, ir::DataType dataType); void run(); void runSync() { @@ -197,7 +196,7 @@ private: private: std::shared_ptr _input{nullptr}; std::shared_ptr _output{nullptr}; - model::Shape _output_shape{}; + ir::Shape _output_shape{}; model::operation::Permute::Type _type{model::operation::Permute::Type::COPY}; ir::DataType _dataType{ir::DataType::FLOAT32}; }; diff --git a/runtime/neurun/backend/srcn/Convert.cc b/runtime/neurun/backend/srcn/Convert.cc index 267f62b..46b11ca 100644 --- a/runtime/neurun/backend/srcn/Convert.cc +++ b/runtime/neurun/backend/srcn/Convert.cc @@ -29,13 +29,13 @@ namespace backend namespace srcn { -model::Shape asKernelShape(const model::Shape &shape, kernel::FilterLayout frontend_layout, - kernel::FilterLayout backend_layout) +ir::Shape asKernelShape(const ir::Shape &shape, kernel::FilterLayout frontend_layout, + kernel::FilterLayout backend_layout) { assert(shape.rank() == 4); if (frontend_layout == backend_layout) { - return model::Shape{shape.dim(0), shape.dim(1), shape.dim(2), shape.dim(3)}; + return ir::Shape{shape.dim(0), shape.dim(1), shape.dim(2), shape.dim(3)}; } const auto permutation = getFilterPermutation(frontend_layout, backend_layout); @@ -43,16 +43,16 @@ model::Shape asKernelShape(const model::Shape &shape, kernel::FilterLayout front { throw std::runtime_error("Not supported FilterLayout"); } - return model::Shape{shape.dim(permutation[0]), shape.dim(permutation[1]), - shape.dim(permutation[2]), shape.dim(permutation[3])}; + return ir::Shape{shape.dim(permutation[0]), shape.dim(permutation[1]), shape.dim(permutation[2]), + shape.dim(permutation[3])}; } -model::Shape asTensorShape(const model::Shape &shape, ir::Layout frontend_layout, - ir::Layout backend_layout) +ir::Shape asTensorShape(const ir::Shape &shape, ir::Layout frontend_layout, + ir::Layout backend_layout) { const uint32_t rank = shape.rank(); - model::Shape ret(rank); + ir::Shape ret(rank); for (uint32_t axis = 0; axis < rank; ++axis) { const auto ncnn_axis = ToNCNNAxis(rank, axis, frontend_layout, backend_layout); @@ -62,7 +62,7 @@ model::Shape asTensorShape(const model::Shape &shape, ir::Layout frontend_layout return ret; } -model::OperandInfo asTensorInfo(const model::Shape &shape, const model::TypeInfo &typeInfo, +model::OperandInfo asTensorInfo(const ir::Shape &shape, const ir::TypeInfo &typeInfo, ir::Layout frontend_layout, ir::Layout backend_layout) { model::OperandInfo info(asTensorShape(shape, frontend_layout, backend_layout), typeInfo); diff --git a/runtime/neurun/backend/srcn/Convert.h b/runtime/neurun/backend/srcn/Convert.h index 6f4c4e3..3268da2 100644 --- a/runtime/neurun/backend/srcn/Convert.h +++ b/runtime/neurun/backend/srcn/Convert.h @@ -19,8 +19,8 @@ #include "kernel/OperationUtils.h" #include -#include -#include +#include +#include #include namespace neurun @@ -30,13 +30,13 @@ namespace backend namespace srcn { -model::Shape asKernelShape(const model::Shape &shape, kernel::FilterLayout frontend_layout, - kernel::FilterLayout backend_layout); +ir::Shape asKernelShape(const ir::Shape &shape, kernel::FilterLayout frontend_layout, + kernel::FilterLayout backend_layout); -model::Shape asTensorShape(const model::Shape &shape, ir::Layout frontend_layout, - ir::Layout backend_layout); +ir::Shape asTensorShape(const ir::Shape &shape, ir::Layout frontend_layout, + ir::Layout backend_layout); -model::OperandInfo asTensorInfo(const model::Shape &shape, const model::TypeInfo &typeInfo, +model::OperandInfo asTensorInfo(const ir::Shape &shape, const ir::TypeInfo &typeInfo, ir::Layout frontend_layout, ir::Layout backend_layout); } // namespace srcn diff --git a/runtime/neurun/core/include/backend/ITensorRegister.h b/runtime/neurun/core/include/backend/ITensorRegister.h index 286823a..d5be3a2 100644 --- a/runtime/neurun/core/include/backend/ITensorRegister.h +++ b/runtime/neurun/core/include/backend/ITensorRegister.h @@ -30,12 +30,12 @@ namespace { -neurun::model::Shape permuteTensorShape(const neurun::model::Shape &shape, - neurun::ir::Layout frontend_layout, - neurun::ir::Layout backend_layout) +neurun::ir::Shape permuteTensorShape(const neurun::ir::Shape &shape, + neurun::ir::Layout frontend_layout, + neurun::ir::Layout backend_layout) { assert(shape.rank() <= 4); - neurun::model::Shape backend_shape{shape}; + neurun::ir::Shape backend_shape{shape}; if (shape.rank() == 4 && frontend_layout == neurun::ir::Layout::NHWC && backend_layout == neurun::ir::Layout::NCHW) { diff --git a/runtime/neurun/core/include/compiler/SubTensorInfo.h b/runtime/neurun/core/include/compiler/SubTensorInfo.h index 92b2759..60405af 100644 --- a/runtime/neurun/core/include/compiler/SubTensorInfo.h +++ b/runtime/neurun/core/include/compiler/SubTensorInfo.h @@ -58,12 +58,12 @@ public: * @brief Return tensor shape * @return Tensor shape */ - const model::Shape shape(void) const { return _shape; } + const ir::Shape &shape(void) const { return _shape; } /** * @brief Return tensor type * @return Tensor type */ - const model::TypeInfo type(void) const { return _type; } + const ir::TypeInfo &type(void) const { return _type; } /** * @brief Return tensor's offset in parent tensor * @return Tensor offset @@ -72,8 +72,8 @@ public: private: const model::OperandIndex _parent; - const model::Shape _shape; - const model::TypeInfo _type; + const ir::Shape _shape; + const ir::TypeInfo _type; const neurun::util::Coordinates _offset; }; diff --git a/runtime/neurun/core/include/exec/Execution.h b/runtime/neurun/core/include/exec/Execution.h index c23ac09..7a00741 100644 --- a/runtime/neurun/core/include/exec/Execution.h +++ b/runtime/neurun/core/include/exec/Execution.h @@ -71,7 +71,7 @@ public: * @param[in] length Input data's length * @param[in] layout Input data's data format */ - void setInput(const model::IOIndex &index, const model::TypeInfo &type, const model::Shape &shape, + void setInput(const model::IOIndex &index, const ir::TypeInfo &type, const ir::Shape &shape, const void *buffer, size_t length, ir::Layout layout = ir::Layout::NHWC); /** * @brief Set output data's information @@ -92,9 +92,8 @@ public: * @param[in] length Output data's length * @param[in] layout Output data's data format */ - void setOutput(const model::IOIndex &index, const model::TypeInfo &type, - const model::Shape &shape, void *buffer, size_t length, - ir::Layout layout = ir::Layout::NHWC); + void setOutput(const model::IOIndex &index, const ir::TypeInfo &type, const ir::Shape &shape, + void *buffer, size_t length, ir::Layout layout = ir::Layout::NHWC); /** * @brief Set input data's data format * @param[in] index Input index diff --git a/runtime/neurun/core/include/ir/Graph.h b/runtime/neurun/core/include/ir/Graph.h index be78f3a..0ad5135 100644 --- a/runtime/neurun/core/include/ir/Graph.h +++ b/runtime/neurun/core/include/ir/Graph.h @@ -124,7 +124,7 @@ public: // Graph Building public: - model::OperandIndex addOperand(const model::Shape &shape, const model::TypeInfo &type); + model::OperandIndex addOperand(const ir::Shape &shape, const ir::TypeInfo &type); model::OperationIndex addOperation(std::unique_ptr &&node); void setOperandValue(const model::OperandIndex &ind, std::unique_ptr &&data); void addInput(const model::OperandIndex &ind); diff --git a/runtime/neurun/core/include/model/Shape.h b/runtime/neurun/core/include/ir/Shape.h similarity index 89% rename from runtime/neurun/core/include/model/Shape.h rename to runtime/neurun/core/include/ir/Shape.h index e7d2553..fed2ae8 100644 --- a/runtime/neurun/core/include/model/Shape.h +++ b/runtime/neurun/core/include/ir/Shape.h @@ -14,8 +14,8 @@ * limitations under the License. */ -#ifndef __NEURUN_MODEL_SHAPE_H__ -#define __NEURUN_MODEL_SHAPE_H__ +#ifndef __NEURUN_IR_SHAPE_H__ +#define __NEURUN_IR_SHAPE_H__ #include "ir/Layout.h" #include "misc/feature/Shape.h" @@ -25,7 +25,7 @@ namespace neurun { -namespace model +namespace ir { // TODO Remove this dependency. @@ -77,7 +77,14 @@ private: inline bool operator==(const Shape &lhs, const Shape &rhs) { return lhs.dims() == rhs.dims(); } +} // namespace ir + +// TODO Remove after merging 'graph' and 'model' namespaces. +namespace model +{ +using FeatureShape = ir::FeatureShape; +using Shape = ir::Shape; } // namespace model } // namespace neurun -#endif // __NEURUN_MODEL_SHAPE_H__ +#endif // __NEURUN_IR_SHAPE_H__ diff --git a/runtime/neurun/core/include/model/TypeInfo.h b/runtime/neurun/core/include/ir/TypeInfo.h similarity index 85% rename from runtime/neurun/core/include/model/TypeInfo.h rename to runtime/neurun/core/include/ir/TypeInfo.h index 7b29085..77f8b4e 100644 --- a/runtime/neurun/core/include/model/TypeInfo.h +++ b/runtime/neurun/core/include/ir/TypeInfo.h @@ -14,8 +14,8 @@ * limitations under the License. */ -#ifndef __NEURUN_MODEL_TYPEINFO_H__ -#define __NEURUN_MODEL_TYPEINFO_H__ +#ifndef __NEURUN_IR_TYPEINFO_H__ +#define __NEURUN_IR_TYPEINFO_H__ #include @@ -23,7 +23,7 @@ namespace neurun { -namespace model +namespace ir { class TypeInfo @@ -53,7 +53,14 @@ private: bool operator==(const TypeInfo &lhs, const TypeInfo &rhs); bool operator!=(const TypeInfo &lhs, const TypeInfo &rhs); +} // namespace ir + +// TODO Remove after merging 'graph' and 'model' namespaces. +namespace model +{ +using TypeInfo = ir::TypeInfo; } // namespace model + } // namespace neurun -#endif // __NEURUN_MODEL_TYPEINFO_H__ +#endif // __NEURUN_IR_TYPEINFO_H__ diff --git a/runtime/neurun/core/include/model/OperandInfo.h b/runtime/neurun/core/include/model/OperandInfo.h index 66272c7..7a97d06 100644 --- a/runtime/neurun/core/include/model/OperandInfo.h +++ b/runtime/neurun/core/include/model/OperandInfo.h @@ -21,8 +21,8 @@ #ifndef __NEURUN_MODEL_OPERAND_INFO_H__ #define __NEURUN_MODEL_OPERAND_INFO_H__ -#include "Shape.h" -#include "TypeInfo.h" +#include "ir/Shape.h" +#include "ir/TypeInfo.h" #include "ir/Layout.h" namespace neurun diff --git a/runtime/neurun/core/include/util/Padding.h b/runtime/neurun/core/include/util/Padding.h index 7bc8b65..3c707b8 100644 --- a/runtime/neurun/core/include/util/Padding.h +++ b/runtime/neurun/core/include/util/Padding.h @@ -19,7 +19,7 @@ #include -#include "model/Shape.h" +#include "ir/Shape.h" #include "ir/InternalType.h" namespace neurun @@ -28,12 +28,11 @@ namespace util { ir::ExplicitPadding validPadding(void); -ir::ExplicitPadding samePadding(const model::FeatureShape &ifm_shape, - const model::FeatureShape &ofm_shape, const ir::Stride &stride, +ir::ExplicitPadding samePadding(const ir::FeatureShape &ifm_shape, + const ir::FeatureShape &ofm_shape, const ir::Stride &stride, uint32_t kw, uint32_t kh); -ir::ExplicitPadding calculatePadding(const ir::Padding &padding, - const model::FeatureShape &ifm_shape, - const model::FeatureShape &ofm_shape, const ir::Stride &stride, +ir::ExplicitPadding calculatePadding(const ir::Padding &padding, const ir::FeatureShape &ifm_shape, + const ir::FeatureShape &ofm_shape, const ir::Stride &stride, uint32_t kw, uint32_t kh); } // namespace util diff --git a/runtime/neurun/core/include/util/ShapeInference.h b/runtime/neurun/core/include/util/ShapeInference.h index fce8bf2..1a6e5ec 100644 --- a/runtime/neurun/core/include/util/ShapeInference.h +++ b/runtime/neurun/core/include/util/ShapeInference.h @@ -31,29 +31,27 @@ namespace neurun namespace shape_inference { -using Shapes = std::vector; +using Shapes = std::vector; -Shapes inferEltwiseShape(const model::Shape &lhs_shape, const model::Shape &rhs_shape); +Shapes inferEltwiseShape(const ir::Shape &lhs_shape, const ir::Shape &rhs_shape); -Shapes inferAvgPoolShape(const model::Shape &in_shape, - const model::operation::AvgPool2D::Param ¶m, +Shapes inferAvgPoolShape(const ir::Shape &in_shape, const model::operation::AvgPool2D::Param ¶m, ir::Layout layout = ir::Layout::NHWC); Shapes inferConcatShape(const Shapes &in_shapes, const model::operation::Concat::Param ¶m); -Shapes inferMaxPoolShape(const model::Shape &in_shape, - const model::operation::MaxPool2D::Param ¶m, +Shapes inferMaxPoolShape(const ir::Shape &in_shape, const model::operation::MaxPool2D::Param ¶m, ir::Layout layout = ir::Layout::NHWC); -Shapes inferConv2DShape(const model::Shape &in_shape, const model::Shape &ker_shape, +Shapes inferConv2DShape(const ir::Shape &in_shape, const ir::Shape &ker_shape, const model::operation::Conv2D::Param ¶m, ir::Layout layout = ir::Layout::NHWC); -Shapes inferDepthwiseConv2DShape(const model::Shape &in_shape, const model::Shape &ker_shape, +Shapes inferDepthwiseConv2DShape(const ir::Shape &in_shape, const ir::Shape &ker_shape, const model::operation::DepthwiseConv2D::Param ¶m, ir::Layout layout = ir::Layout::NHWC); -Shapes inferFullyConnectedShape(const model::Shape &in_shape, const model::Shape &ker_shape); +Shapes inferFullyConnectedShape(const ir::Shape &in_shape, const ir::Shape &ker_shape); } // namespace shape_inference } // namespace neurun diff --git a/runtime/neurun/core/src/exec/Execution.cc b/runtime/neurun/core/src/exec/Execution.cc index e135617..bbbbba2 100644 --- a/runtime/neurun/core/src/exec/Execution.cc +++ b/runtime/neurun/core/src/exec/Execution.cc @@ -46,8 +46,8 @@ void Execution::setInput(const model::IOIndex &index, const void *buffer, size_t } // TODO Remove default parameter -void Execution::setInput(const model::IOIndex &index, const model::TypeInfo &type, - const model::Shape &shape, const void *buffer, size_t length, +void Execution::setInput(const model::IOIndex &index, const ir::TypeInfo &type, + const ir::Shape &shape, const void *buffer, size_t length, ir::Layout layout) { const model::OperandInfo info{shape, type}; @@ -78,8 +78,8 @@ void Execution::setOutput(const model::IOIndex &index, void *buffer, size_t leng } // TODO Remove default parameter -void Execution::setOutput(const model::IOIndex &index, const model::TypeInfo &type, - const model::Shape &shape, void *buffer, size_t length, ir::Layout layout) +void Execution::setOutput(const model::IOIndex &index, const ir::TypeInfo &type, + const ir::Shape &shape, void *buffer, size_t length, ir::Layout layout) { const model::OperandInfo info{shape, type}; diff --git a/runtime/neurun/core/src/exec/ExecutorBase.cc b/runtime/neurun/core/src/exec/ExecutorBase.cc index ba316ed..2a2bf5e 100644 --- a/runtime/neurun/core/src/exec/ExecutorBase.cc +++ b/runtime/neurun/core/src/exec/ExecutorBase.cc @@ -30,9 +30,9 @@ ExecutorBase::ExecutorBase(const graph::Graph &graph, // DO NOTHING } -std::unique_ptr ExecutorBase::source(const model::IOIndex &index, - const model::TypeInfo &type, const void *buffer, - size_t length, ir::Layout io_layout) +std::unique_ptr ExecutorBase::source(const model::IOIndex &index, const ir::TypeInfo &type, + const void *buffer, size_t length, + ir::Layout io_layout) { using ir::DataType; switch (type.type()) @@ -51,7 +51,7 @@ std::unique_ptr ExecutorBase::source(const model::IOIndex &index, } } -std::unique_ptr ExecutorBase::sink(const model::IOIndex &index, const model::TypeInfo &type, +std::unique_ptr ExecutorBase::sink(const model::IOIndex &index, const ir::TypeInfo &type, void *buffer, size_t length, ir::Layout io_layout) { using ir::DataType; diff --git a/runtime/neurun/core/src/exec/ExecutorBase.h b/runtime/neurun/core/src/exec/ExecutorBase.h index 5abec6b..618d14b 100644 --- a/runtime/neurun/core/src/exec/ExecutorBase.h +++ b/runtime/neurun/core/src/exec/ExecutorBase.h @@ -63,10 +63,10 @@ public: void addObserver(std::unique_ptr ref) { _subject.add(std::move(ref)); }; private: - std::unique_ptr source(const model::IOIndex &index, const model::TypeInfo &type, + std::unique_ptr source(const model::IOIndex &index, const ir::TypeInfo &type, const void *buffer, size_t length, ir::Layout io_layout); - std::unique_ptr sink(const model::IOIndex &index, const model::TypeInfo &type, - void *buffer, size_t length, ir::Layout io_layout); + std::unique_ptr sink(const model::IOIndex &index, const ir::TypeInfo &type, void *buffer, + size_t length, ir::Layout io_layout); template std::unique_ptr source(const model::IOIndex &index, const void *buffer, size_t length, diff --git a/runtime/neurun/core/src/exec/Sink.h b/runtime/neurun/core/src/exec/Sink.h index 07b72aa..bb2a6c5 100644 --- a/runtime/neurun/core/src/exec/Sink.h +++ b/runtime/neurun/core/src/exec/Sink.h @@ -42,7 +42,7 @@ struct ISink template class ITemplSink : public ISink { public: - ITemplSink(void *output_buffer, const size_t &output_size, const model::Shape &shape, + ITemplSink(void *output_buffer, const size_t &output_size, const ir::Shape &shape, const bool copy, ir::Layout io_layout) : _output_buffer{reinterpret_cast(output_buffer)}, _output_size{output_size}, _shape{shape}, _copy{copy}, _io_layout{io_layout} @@ -161,7 +161,7 @@ protected: private: T *_output_buffer; const size_t _output_size; - const model::Shape _shape; + const ir::Shape _shape; const bool _copy; const ir::Layout _io_layout; }; @@ -169,7 +169,7 @@ private: template class PermutateSink final : public ITemplSink { public: - PermutateSink(void *output_buffer, const size_t &output_size, const model::Shape &shape, + PermutateSink(void *output_buffer, const size_t &output_size, const ir::Shape &shape, ir::Layout io_layout) : ITemplSink(output_buffer, output_size, shape, false, io_layout) { @@ -186,7 +186,7 @@ public: template class CopySink final : public ITemplSink { public: - CopySink(void *output_buffer, const size_t &output_size, const model::Shape &shape, + CopySink(void *output_buffer, const size_t &output_size, const ir::Shape &shape, ir::Layout io_layout = ir::Layout::UNKNOWN) : ITemplSink(output_buffer, output_size, shape, true, io_layout) { diff --git a/runtime/neurun/core/src/exec/Source.h b/runtime/neurun/core/src/exec/Source.h index 3272d07..fd52dd5 100644 --- a/runtime/neurun/core/src/exec/Source.h +++ b/runtime/neurun/core/src/exec/Source.h @@ -27,7 +27,7 @@ #include "util/Utils.h" #include #include -#include "model/Shape.h" +#include "ir/Shape.h" namespace neurun { @@ -45,7 +45,7 @@ struct ISource template class ITemplSource : public ISource { public: - ITemplSource(const void *input_buffer, const size_t &input_size, const model::Shape &shape, + ITemplSource(const void *input_buffer, const size_t &input_size, const ir::Shape &shape, const bool copy, ir::Layout io_layout) : _input_buffer{reinterpret_cast(input_buffer)}, _input_size{input_size}, _shape{shape}, _copy(copy), _io_layout{io_layout} @@ -167,7 +167,7 @@ protected: private: const T *_input_buffer; const size_t _input_size; - const model::Shape _shape; + const ir::Shape _shape; const bool _copy; const ir::Layout _io_layout; }; @@ -175,7 +175,7 @@ private: template class PermutateSource final : public ITemplSource { public: - PermutateSource(const void *input_buffer, const size_t &input_size, const model::Shape &shape, + PermutateSource(const void *input_buffer, const size_t &input_size, const ir::Shape &shape, ir::Layout io_layout) : ITemplSource(input_buffer, input_size, shape, false, io_layout) { @@ -192,7 +192,7 @@ public: template class CopySource final : public ITemplSource { public: - CopySource(const void *input_buffer, const size_t &input_size, const model::Shape &shape, + CopySource(const void *input_buffer, const size_t &input_size, const ir::Shape &shape, ir::Layout io_layout = ir::Layout::UNKNOWN) : ITemplSource(input_buffer, input_size, shape, true, io_layout) { diff --git a/runtime/neurun/core/src/exec/interp/operations/FullyConnected.cc b/runtime/neurun/core/src/exec/interp/operations/FullyConnected.cc index 0f1c0b9..f12f2fe 100644 --- a/runtime/neurun/core/src/exec/interp/operations/FullyConnected.cc +++ b/runtime/neurun/core/src/exec/interp/operations/FullyConnected.cc @@ -58,7 +58,7 @@ void prepareFC(ExecEnv *env, const model::Operation &node) assert(num_units == bias_tensor->dimension(0)); // Make output tensor info - model::Shape output_shape(2); + ir::Shape output_shape(2); output_shape.dim(0) = batch_size; output_shape.dim(1) = num_units; const model::OperandInfo out_info{output_shape, in_tensor->tensorInfo().typeInfo()}; diff --git a/runtime/neurun/core/src/exec/interp/operations/OperationUtil.h b/runtime/neurun/core/src/exec/interp/operations/OperationUtil.h index 8df4d41..8124a38 100644 --- a/runtime/neurun/core/src/exec/interp/operations/OperationUtil.h +++ b/runtime/neurun/core/src/exec/interp/operations/OperationUtil.h @@ -17,7 +17,7 @@ #ifndef __NEURUN_EXEC_INTERP_OPERATIONS_OPERATION_UTILS_H_ #define __NEURUN_EXEC_INTERP_OPERATIONS_OPERATION_UTILS_H_ -#include "model/Shape.h" +#include "ir/Shape.h" #include "ir/InternalType.h" #include @@ -29,7 +29,7 @@ namespace exec namespace interp { -inline nnfw::cker::Shape convertShape(const model::Shape &shape) +inline nnfw::cker::Shape convertShape(const ir::Shape &shape) { auto dimensions = std::vector(shape.dims().begin(), shape.dims().end()); @@ -51,7 +51,7 @@ inline nnfw::cker::Shape convertShape(const model::Shape &shape) return nnfw::cker::GetShape(raw_shape); } -inline nnfw::cker::Shape convertExtendShape(const model::Shape &shape) +inline nnfw::cker::Shape convertExtendShape(const ir::Shape &shape) { auto dimensions = std::vector(shape.dims().begin(), shape.dims().end()); diff --git a/runtime/neurun/core/src/ir/Graph.cc b/runtime/neurun/core/src/ir/Graph.cc index afd0a7c..cd2e291 100644 --- a/runtime/neurun/core/src/ir/Graph.cc +++ b/runtime/neurun/core/src/ir/Graph.cc @@ -43,7 +43,7 @@ Graph::Graph() = default; Graph::~Graph(void) = default; -model::OperandIndex Graph::addOperand(const model::Shape &shape, const model::TypeInfo &type) +model::OperandIndex Graph::addOperand(const ir::Shape &shape, const ir::TypeInfo &type) { return _operands.emplace(shape, type); } diff --git a/runtime/neurun/core/src/model/Shape.cc b/runtime/neurun/core/src/ir/Shape.cc similarity index 97% rename from runtime/neurun/core/src/model/Shape.cc rename to runtime/neurun/core/src/ir/Shape.cc index b7f7bff..64e9aa9 100644 --- a/runtime/neurun/core/src/model/Shape.cc +++ b/runtime/neurun/core/src/ir/Shape.cc @@ -14,7 +14,7 @@ * limitations under the License. */ -#include "model/Shape.h" +#include "ir/Shape.h" #include "util/Utils.h" #include @@ -23,7 +23,7 @@ namespace neurun { -namespace model +namespace ir { FeatureShape Shape::asFeature(Layout layout) const @@ -82,5 +82,5 @@ uint64_t Shape::num_elements() const std::multiplies()); } -} // namespace model +} // namespace ir } // namespace neurun diff --git a/runtime/neurun/core/src/model/TypeInfo.cc b/runtime/neurun/core/src/ir/TypeInfo.cc similarity index 94% rename from runtime/neurun/core/src/model/TypeInfo.cc rename to runtime/neurun/core/src/ir/TypeInfo.cc index 46ac2d4..280146b 100644 --- a/runtime/neurun/core/src/model/TypeInfo.cc +++ b/runtime/neurun/core/src/ir/TypeInfo.cc @@ -14,11 +14,11 @@ * limitations under the License. */ -#include "model/TypeInfo.h" +#include "ir/TypeInfo.h" namespace neurun { -namespace model +namespace ir { bool operator==(const TypeInfo &lhs, const TypeInfo &rhs) @@ -43,5 +43,5 @@ bool operator==(const TypeInfo &lhs, const TypeInfo &rhs) bool operator!=(const TypeInfo &lhs, const TypeInfo &rhs) { return !(lhs == rhs); } -} // namespace model +} // namespace ir } // namespace neurun diff --git a/runtime/neurun/core/src/ir/operand/Shape4DConvert.h b/runtime/neurun/core/src/ir/operand/Shape4DConvert.h index 36058fd..60e0555 100644 --- a/runtime/neurun/core/src/ir/operand/Shape4DConvert.h +++ b/runtime/neurun/core/src/ir/operand/Shape4DConvert.h @@ -26,7 +26,7 @@ namespace graph namespace operand { -inline LowerInfo::Shape4D asShape4D(const model::Shape &shape) +inline LowerInfo::Shape4D asShape4D(const ir::Shape &shape) { switch (shape.rank()) { diff --git a/runtime/neurun/core/src/util/Padding.cc b/runtime/neurun/core/src/util/Padding.cc index 89e4577..2e2202b 100644 --- a/runtime/neurun/core/src/util/Padding.cc +++ b/runtime/neurun/core/src/util/Padding.cc @@ -46,8 +46,8 @@ ir::ExplicitPadding validPadding(void) return padding; } -ir::ExplicitPadding samePaddingUsingIFM(const model::FeatureShape &ifm_shape, - const ir::Stride &stride, uint32_t kw, uint32_t kh) +ir::ExplicitPadding samePaddingUsingIFM(const ir::FeatureShape &ifm_shape, const ir::Stride &stride, + uint32_t kw, uint32_t kh) { ir::ExplicitPadding padding; @@ -76,8 +76,8 @@ ir::ExplicitPadding samePaddingUsingIFM(const model::FeatureShape &ifm_shape, return padding; } -ir::ExplicitPadding samePadding(const model::FeatureShape &ifm_shape, - const model::FeatureShape &ofm_shape, const ir::Stride &stride, +ir::ExplicitPadding samePadding(const ir::FeatureShape &ifm_shape, + const ir::FeatureShape &ofm_shape, const ir::Stride &stride, uint32_t kw, uint32_t kh) { const int32_t vertical_expected_output = (ifm_shape.H + stride.vertical - 1) / stride.vertical; @@ -93,9 +93,8 @@ ir::ExplicitPadding samePadding(const model::FeatureShape &ifm_shape, return samePaddingUsingIFM(ifm_shape, stride, kw, kh); } -ir::ExplicitPadding calculatePadding(const ir::Padding &padding, - const model::FeatureShape &ifm_shape, - const model::FeatureShape &ofm_shape, const ir::Stride &stride, +ir::ExplicitPadding calculatePadding(const ir::Padding &padding, const ir::FeatureShape &ifm_shape, + const ir::FeatureShape &ofm_shape, const ir::Stride &stride, uint32_t kw, uint32_t kh) { if (padding.type == ir::PaddingType::EXPLICIT) diff --git a/runtime/neurun/core/src/util/ShapeInference.cc b/runtime/neurun/core/src/util/ShapeInference.cc index de30b9b..44857ef 100644 --- a/runtime/neurun/core/src/util/ShapeInference.cc +++ b/runtime/neurun/core/src/util/ShapeInference.cc @@ -16,7 +16,7 @@ #include "util/Utils.h" #include "ir/InternalType.h" -#include "model/Shape.h" +#include "ir/Shape.h" #include "model/operation/AvgPool2D.h" #include "model/operation/MaxPool2D.h" #include "util/ShapeInference.h" @@ -43,9 +43,9 @@ ceil_div(T dividend, U divisor) } // Calculate the result of broadcast of two shapes -model::Shape broadcastShapes(const model::Shape &lhs_shape, const model::Shape &rhs_shape) +ir::Shape broadcastShapes(const ir::Shape &lhs_shape, const ir::Shape &rhs_shape) { - model::Shape out_shape; + ir::Shape out_shape; auto max_rank = std::max(lhs_shape.rank(), rhs_shape.rank()); for (int idx = 0; idx < max_rank; ++idx) @@ -100,20 +100,20 @@ std::pair calcConvLikeHeightAndWidth(const int in_h, const int in_w, c // Shape inference // -Shapes inferEltwiseShape(const model::Shape &lhs_shape, const model::Shape &rhs_shape) +Shapes inferEltwiseShape(const ir::Shape &lhs_shape, const ir::Shape &rhs_shape) { return {broadcastShapes(lhs_shape, rhs_shape)}; } -Shapes inferAvgPoolShape(const model::Shape &in_shape, - const model::operation::AvgPool2D::Param ¶m, const ir::Layout layout) +Shapes inferAvgPoolShape(const ir::Shape &in_shape, const model::operation::AvgPool2D::Param ¶m, + const ir::Layout layout) { assert(layout == ir::Layout::NHWC); auto ifm_shape = in_shape.asFeature(layout); const auto out_h_w = calcConvLikeHeightAndWidth(ifm_shape.H, ifm_shape.W, param.kh, param.kw, param.padding, param.stride); // Pooling don't change number of channels and batch size - return {model::Shape{ifm_shape.N, out_h_w.first, out_h_w.second, ifm_shape.C}}; + return {ir::Shape{ifm_shape.N, out_h_w.first, out_h_w.second, ifm_shape.C}}; } Shapes inferConcatShape(const Shapes &in_shapes, const model::operation::Concat::Param ¶m) @@ -130,25 +130,25 @@ Shapes inferConcatShape(const Shapes &in_shapes, const model::operation::Concat: } // Calculate output shape - model::Shape out_shape(first_in_shape); + ir::Shape out_shape(first_in_shape); out_shape.dim(concat_axis) = 0; for (const auto &in_shape : in_shapes) out_shape.dim(concat_axis) += in_shape.dim(concat_axis); return {out_shape}; } -Shapes inferMaxPoolShape(const model::Shape &in_shape, - const model::operation::MaxPool2D::Param ¶m, const ir::Layout layout) +Shapes inferMaxPoolShape(const ir::Shape &in_shape, const model::operation::MaxPool2D::Param ¶m, + const ir::Layout layout) { assert(layout == ir::Layout::NHWC); auto ifm_shape = in_shape.asFeature(layout); const auto out_h_w = calcConvLikeHeightAndWidth(ifm_shape.H, ifm_shape.W, param.kh, param.kw, param.padding, param.stride); // Pooling don't change number of channels and batch size - return {model::Shape{ifm_shape.N, out_h_w.first, out_h_w.second, ifm_shape.C}}; + return {ir::Shape{ifm_shape.N, out_h_w.first, out_h_w.second, ifm_shape.C}}; } -Shapes inferConv2DShape(const model::Shape &in_shape, const model::Shape &ker_shape, +Shapes inferConv2DShape(const ir::Shape &in_shape, const ir::Shape &ker_shape, const model::operation::Conv2D::Param ¶m, ir::Layout layout) { assert(layout == ir::Layout::NHWC); @@ -161,10 +161,10 @@ Shapes inferConv2DShape(const model::Shape &in_shape, const model::Shape &ker_sh const auto out_h_w = calcConvLikeHeightAndWidth(ifm_shape.H, ifm_shape.W, kf_shape.H, kf_shape.W, param.padding, param.stride); - return {model::Shape{ifm_shape.N, out_h_w.first, out_h_w.second, kf_shape.N}}; + return {ir::Shape{ifm_shape.N, out_h_w.first, out_h_w.second, kf_shape.N}}; } -Shapes inferDepthwiseConv2DShape(const model::Shape &in_shape, const model::Shape &ker_shape, +Shapes inferDepthwiseConv2DShape(const ir::Shape &in_shape, const ir::Shape &ker_shape, const model::operation::DepthwiseConv2D::Param ¶m, ir::Layout layout) { @@ -179,10 +179,10 @@ Shapes inferDepthwiseConv2DShape(const model::Shape &in_shape, const model::Shap const auto out_h_w = calcConvLikeHeightAndWidth(ifm_shape.H, ifm_shape.W, kf_shape.H, kf_shape.W, param.padding, param.stride); - return {model::Shape{ifm_shape.N, out_h_w.first, out_h_w.second, kf_shape.C}}; + return {ir::Shape{ifm_shape.N, out_h_w.first, out_h_w.second, kf_shape.C}}; } -Shapes inferFullyConnectedShape(const model::Shape &in_shape, const model::Shape &ker_shape) +Shapes inferFullyConnectedShape(const ir::Shape &in_shape, const ir::Shape &ker_shape) { assert(in_shape.rank() >= 2); assert(ker_shape.rank() == 2); @@ -193,7 +193,7 @@ Shapes inferFullyConnectedShape(const model::Shape &in_shape, const model::Shape const auto batch_size = input_size_with_batch / input_size; assert(input_size_with_batch % input_size == 0); - return {{model::Shape({static_cast(batch_size), num_units})}}; + return {{ir::Shape({static_cast(batch_size), num_units})}}; } } // namespace shape_inference diff --git a/runtime/neurun/frontend/base_loader/base_loader.h b/runtime/neurun/frontend/base_loader/base_loader.h index 153e07a..2578fee 100644 --- a/runtime/neurun/frontend/base_loader/base_loader.h +++ b/runtime/neurun/frontend/base_loader/base_loader.h @@ -196,7 +196,7 @@ BaseLoader::BaseLoader::tensorTypeToDataType(const template model::OperandIndex BaseLoader::loadOperand(const Tensor *tensor) { - model::Shape shape; + ir::Shape shape; // Shape const auto *tensor_shape = tensor->shape(); for (const auto &dim : *tensor_shape) @@ -236,7 +236,7 @@ model::OperandIndex BaseLoader::loadOperand(const throw std::runtime_error("Custom Quantization is not supported"); } // Create TypeInfo - model::TypeInfo type_info(data_type, scale, zero_point); + ir::TypeInfo type_info(data_type, scale, zero_point); // Create operand const auto operand_index = _graph.addOperand(shape, type_info); @@ -278,7 +278,6 @@ template void BaseLoader::loadStridesAndPaddings(Param ¶m, const OptionsType *options) { - model::Shape shape; // Strides param.stride.vertical = options->stride_w(); param.stride.horizontal = options->stride_h(); @@ -298,7 +297,6 @@ void BaseLoader::loadPool2D(Param ¶m, // Strides and Paddings loadStridesAndPaddings(param, options); // Filter width and height - model::Shape shape; // Strides param.kw = options->filter_width(); param.kh = options->filter_height(); @@ -336,7 +334,6 @@ void BaseLoader::loadDepthwiseConv2D(const Operato param.activation = convertActivation(options->fused_activation_function()); loadStridesAndPaddings(param, options); // Multiplier - model::Shape shape; param.multiplier = options->depth_multiplier(); // Dilation h/w factor unused std::unique_ptr new_op( @@ -437,7 +434,6 @@ void BaseLoader::loadConcatenation(const Operator model::operation::Concat::Param param; const auto *options = op->builtin_options_as_ConcatenationOptions(); // Axis - model::Shape shape; param.axis = options->axis(); // activation unused diff --git a/runtime/neurun/frontend/nnapi/wrapper/NNAPIConvert.h b/runtime/neurun/frontend/nnapi/wrapper/NNAPIConvert.h index 093c66f..91f84b9 100644 --- a/runtime/neurun/frontend/nnapi/wrapper/NNAPIConvert.h +++ b/runtime/neurun/frontend/nnapi/wrapper/NNAPIConvert.h @@ -24,8 +24,8 @@ #include -#include -#include +#include +#include #include class NNAPIConvert @@ -44,14 +44,14 @@ public: * @param[in] type NNAPI's operand type * @return neurun's internal operand type info */ - static ::neurun::model::TypeInfo getTypeInfo(const ANeuralNetworksOperandType *type); + static neurun::ir::TypeInfo getTypeInfo(const ANeuralNetworksOperandType *type); /** * @brief Convert operand shape info from NNAPI to internal operand shape * @param[in] type NNAPI's operand type * @return neurun's internal operand shape */ - static ::neurun::model::Shape getShape(const ANeuralNetworksOperandType *type); + static neurun::ir::Shape getShape(const ANeuralNetworksOperandType *type); /** * @brief Calcaulate operand size from NNAPI type diff --git a/runtime/neurun/test/core/compiler/Scheduler.cc b/runtime/neurun/test/core/compiler/Scheduler.cc index 3623608..72350f4 100644 --- a/runtime/neurun/test/core/compiler/Scheduler.cc +++ b/runtime/neurun/test/core/compiler/Scheduler.cc @@ -18,9 +18,9 @@ #include #include -#include +#include #include -#include +#include #include #include diff --git a/runtime/neurun/test/graph/operand/Set.cc b/runtime/neurun/test/graph/operand/Set.cc index 44ede8c..ee365684 100644 --- a/runtime/neurun/test/graph/operand/Set.cc +++ b/runtime/neurun/test/graph/operand/Set.cc @@ -22,15 +22,15 @@ TEST(graph_operand_Set, set_test) { neurun::model::Operands set; - ::neurun::model::Shape shape0{1, 2, 3}; + neurun::ir::Shape shape0{1, 2, 3}; - ::neurun::model::Shape shape1(4); + neurun::ir::Shape shape1(4); shape1.dim(0) = 10; shape1.dim(1) = 20; shape1.dim(2) = 30; shape1.dim(3) = 40; - ::neurun::model::TypeInfo type{neurun::ir::DataType::INT32}; + neurun::ir::TypeInfo type{neurun::ir::DataType::INT32}; set.emplace(shape0, type); set.emplace(shape1, type); diff --git a/runtime/neurun/test/graph/operand/UseDef.cc b/runtime/neurun/test/graph/operand/UseDef.cc index 9e945ab..b049b8e 100644 --- a/runtime/neurun/test/graph/operand/UseDef.cc +++ b/runtime/neurun/test/graph/operand/UseDef.cc @@ -36,8 +36,8 @@ TEST(graph_operand_usedef, usedef_test) neurun::graph::Graph graph; neurun::graph::verifier::DAGChecker verifier; - neurun::model::Shape shape(3); - neurun::model::TypeInfo type{neurun::ir::DataType::INT32}; + neurun::ir::Shape shape(3); + neurun::ir::TypeInfo type{neurun::ir::DataType::INT32}; // Model Input/Output auto input_operand = graph.addOperand(shape, type); diff --git a/runtime/neurun/test/graph/operation/SetIO.cc b/runtime/neurun/test/graph/operation/SetIO.cc index 95f1f13..31950b0 100644 --- a/runtime/neurun/test/graph/operation/SetIO.cc +++ b/runtime/neurun/test/graph/operation/SetIO.cc @@ -33,8 +33,8 @@ TEST(graph_operation_setIO, operation_setIO_conv) { neurun::graph::Graph graph; - neurun::model::Shape shape{3}; - neurun::model::TypeInfo type{neurun::ir::DataType::INT32}; + neurun::ir::Shape shape{3}; + neurun::ir::TypeInfo type{neurun::ir::DataType::INT32}; // Add Conv using Graph = neurun::model::operation::Conv2D; @@ -66,9 +66,9 @@ TEST(graph_operation_setIO, operation_setIO_concat) { neurun::graph::Graph graph; - neurun::model::Shape shape{3}; + neurun::ir::Shape shape{3}; - neurun::model::TypeInfo type{neurun::ir::DataType::INT32}; + neurun::ir::TypeInfo type{neurun::ir::DataType::INT32}; using Graph = neurun::model::operation::Concat; diff --git a/runtime/neurun/test/graph/verifier/Verifier.cc b/runtime/neurun/test/graph/verifier/Verifier.cc index b430261..ced5bda 100644 --- a/runtime/neurun/test/graph/verifier/Verifier.cc +++ b/runtime/neurun/test/graph/verifier/Verifier.cc @@ -30,8 +30,8 @@ TEST(Verifier, dag_checker) { neurun::graph::Graph graph; - ::neurun::model::Shape shape{3}; - ::neurun::model::TypeInfo type{neurun::ir::DataType::INT32}; + neurun::ir::Shape shape{3}; + neurun::ir::TypeInfo type{neurun::ir::DataType::INT32}; auto operand1 = graph.addOperand(shape, type); auto operand2 = graph.addOperand(shape, type); -- 2.7.4