From 5414861798ab5a5e3a65f595c21105e7711daf2b Mon Sep 17 00:00:00 2001 From: =?utf8?q?=D0=A1=D0=B5=D1=80=D0=B3=D0=B5=D0=B9=20=D0=91=D0=B0=D1=80?= =?utf8?q?=D0=B0=D0=BD=D0=BD=D0=B8=D0=BA=D0=BE=D0=B2/AI=20Tools=20Lab=20/S?= =?utf8?q?RR/Engineer/=EC=82=BC=EC=84=B1=EC=A0=84=EC=9E=90?= Date: Mon, 15 Jul 2019 11:52:26 +0300 Subject: [PATCH] Make dedicated class for neurun's tensor shape (#5621) Related issue: #5472 Remove dependency of model::Shape on misc::tensor::Shape. Signed-off-by: Sergei Barannikov --- runtimes/neurun/backend/acl_cl/KernelGenerator.cc | 2 +- runtimes/neurun/backend/cpu/operand/Tensor.h | 2 +- runtimes/neurun/core/include/model/Shape.h | 54 ++++++++++++++++------ .../neurun/core/src/compiler/OperationValidator.cc | 4 +- .../neurun/core/src/compiler/SubTensorAnalyzer.cc | 4 +- runtimes/neurun/core/src/exec/interp/Tensor.h | 4 +- runtimes/neurun/core/src/model/Shape.cc | 29 ++++++------ runtimes/neurun/core/src/util/ShapeInference.cc | 10 ++-- .../nnapi/wrapper/ANeuralNetworksExecution.cc | 28 ++--------- 9 files changed, 71 insertions(+), 66 deletions(-) diff --git a/runtimes/neurun/backend/acl_cl/KernelGenerator.cc b/runtimes/neurun/backend/acl_cl/KernelGenerator.cc index 83bd02e..2834ae5 100644 --- a/runtimes/neurun/backend/acl_cl/KernelGenerator.cc +++ b/runtimes/neurun/backend/acl_cl/KernelGenerator.cc @@ -719,7 +719,7 @@ void KernelGenerator::visit(const model::operation::TransposeNode &node) for (int32_t n = 0; n < perm_size; ++n) { int32_t perm_value = *(reinterpret_cast(perm_base) + n); - assert(static_cast(perm_value) < rank); + assert(perm_value < rank); pv.emplace_back(perm_value); } diff --git a/runtimes/neurun/backend/cpu/operand/Tensor.h b/runtimes/neurun/backend/cpu/operand/Tensor.h index a397603..07ce11f 100644 --- a/runtimes/neurun/backend/cpu/operand/Tensor.h +++ b/runtimes/neurun/backend/cpu/operand/Tensor.h @@ -57,7 +57,7 @@ public: * C : dimension(3) */ size_t dimension(size_t index) const override { return _info.shape().dim(index); } - size_t num_dimensions() const override { return _info.shape().dims().size(); } + size_t num_dimensions() const override { return _info.shape().rank(); } size_t total_size() const override { return _info.total_size(); } size_t calcOffset(const neurun::util::Coordinates &coords) override; model::Layout layout() const override { return model::Layout::NHWC; } diff --git a/runtimes/neurun/core/include/model/Shape.h b/runtimes/neurun/core/include/model/Shape.h index dd34074..92f9b1f 100644 --- a/runtimes/neurun/core/include/model/Shape.h +++ b/runtimes/neurun/core/include/model/Shape.h @@ -17,40 +17,66 @@ #ifndef __NEURUN_MODEL_SHAPE_H__ #define __NEURUN_MODEL_SHAPE_H__ -#include -#include - -#include "misc/feature/Shape.h" -#include "misc/matrix/Shape.h" -#include "misc/tensor/Shape.h" #include "Layout.h" +#include "misc/feature/Shape.h" + +#include +#include namespace neurun { namespace model { +// TODO Remove this dependency. using FeatureShape = nnfw::misc::feature::Shape; -struct Shape : public nnfw::misc::tensor::Shape +struct Shape { public: - Shape() : Shape(0) {} - explicit Shape(uint32_t rank); - Shape(const std::initializer_list &dims) : nnfw::misc::tensor::Shape(dims) {} + Shape() = default; + + explicit Shape(int rank) : _dimensions(rank) {} + + Shape(std::initializer_list dimensions) : _dimensions(dimensions) {} + + int rank() const { return _dimensions.size(); } + + const std::vector &dims() const { return _dimensions; } + + int32_t dim(int i) const { return _dimensions.at(i); } + + int32_t &dim(int i) { return _dimensions.at(i); } + + int64_t num_elements() const; public: FeatureShape asFeature(Layout layout = Layout::NHWC) const; -public: + /** + * @brief Add dimension to the beginning + * @param[in] d dimension to add to the beginning + */ + void prepend(int32_t d) { _dimensions.insert(_dimensions.cbegin(), d); } + + /** + * @brief Add dimension to the end + * @param[in] d dimension to add to the end + */ + void append(int32_t d) { _dimensions.emplace_back(d); } + /** * @brief Extend rank of Shape object for operand with param. - * @param [in] to_rank The rank value to be extended to - * @return N/A + * @param[in] to_rank The rank value to be extended to */ - void extendRank(size_t); + void extendRank(int to_rank); + +private: + std::vector _dimensions; }; +inline bool operator==(const Shape &lhs, const Shape &rhs) { return lhs.dims() == rhs.dims(); } + } // namespace model } // namespace neurun diff --git a/runtimes/neurun/core/src/compiler/OperationValidator.cc b/runtimes/neurun/core/src/compiler/OperationValidator.cc index e018b3d..a2b4a39 100644 --- a/runtimes/neurun/core/src/compiler/OperationValidator.cc +++ b/runtimes/neurun/core/src/compiler/OperationValidator.cc @@ -154,7 +154,7 @@ void OperationValidator::visit(const model::operation::TransposeNode &node) UNUSED_RELEASE(perm_shape); assert(perm_shape.rank() == 1); - assert(input_shape.rank() == static_cast(perm_shape.dim(0))); + assert(input_shape.rank() == perm_shape.dim(0)); assert(input_shape.rank() == output_shape.rank()); } @@ -327,7 +327,7 @@ void OperationValidator::visit(const model::operation::EmbeddingLookupNode &node // the first dimension which has the same size as lookups' only dimension. assert(output_shape.rank() == values_shape.rank()); assert(output_shape.dim(0) == lookups_shape.dim(0)); - for (size_t n = 1; n < output_shape.rank(); ++n) + for (int n = 1; n < output_shape.rank(); ++n) { assert(output_shape.dim(n) == values_shape.dim(n)); } diff --git a/runtimes/neurun/core/src/compiler/SubTensorAnalyzer.cc b/runtimes/neurun/core/src/compiler/SubTensorAnalyzer.cc index 9cfd0bc..c7e5bd1 100644 --- a/runtimes/neurun/core/src/compiler/SubTensorAnalyzer.cc +++ b/runtimes/neurun/core/src/compiler/SubTensorAnalyzer.cc @@ -50,7 +50,7 @@ void SubTensorAnalyzer::visit(const model::operation::ConcatNode &node) int32_t axis_point = 0; const auto rank = _ctx.at(output_index).shape().rank(); - uint32_t axis = axis_raw < 0 ? (axis_raw + rank) : axis_raw; + int32_t axis = axis_raw < 0 ? (axis_raw + rank) : axis_raw; assert(rank > axis); // NOTE Not support multiple parent tensor yet @@ -68,7 +68,7 @@ void SubTensorAnalyzer::visit(const model::operation::ConcatNode &node) assert(rank == input_shape.rank()); neurun::util::Coordinates coordinate_info{}; - for (uint32_t i = 0; i < rank; i++) + for (int i = 0; i < rank; i++) { coordinate_info.set(i, 0); } diff --git a/runtimes/neurun/core/src/exec/interp/Tensor.h b/runtimes/neurun/core/src/exec/interp/Tensor.h index 6571c7b..90642ac 100644 --- a/runtimes/neurun/core/src/exec/interp/Tensor.h +++ b/runtimes/neurun/core/src/exec/interp/Tensor.h @@ -120,7 +120,7 @@ public: size_t total_size() const override { return _info.total_size(); } size_t dimension(size_t index) const override { return _info.shape().dim(index); } - size_t num_dimensions() const override { return _info.shape().dims().size(); } + size_t num_dimensions() const override { return _info.shape().rank(); } size_t calcOffset(const util::Coordinates &coords) override; model::Layout layout() const override; bool has_padding() const override { return false; } @@ -159,7 +159,7 @@ public: size_t total_size() const override { return _info.total_size(); } size_t dimension(size_t index) const override { return _info.shape().dim(index); } - size_t num_dimensions() const override { return _info.shape().dims().size(); } + size_t num_dimensions() const override { return _info.shape().rank(); } size_t calcOffset(const util::Coordinates &coords) override; model::Layout layout() const override; bool has_padding() const override { return false; } diff --git a/runtimes/neurun/core/src/model/Shape.cc b/runtimes/neurun/core/src/model/Shape.cc index fd30882..98283a7 100644 --- a/runtimes/neurun/core/src/model/Shape.cc +++ b/runtimes/neurun/core/src/model/Shape.cc @@ -14,22 +14,19 @@ * limitations under the License. */ -#include - #include "model/Shape.h" #include "util/Utils.h" +#include +#include +#include + namespace neurun { namespace model { -Shape::Shape(uint32_t rank) : nnfw::misc::tensor::Shape(rank) -{ - // DO NOTHING -} - -FeatureShape Shape::asFeature(const Layout layout) const +FeatureShape Shape::asFeature(Layout layout) const { assert(rank() == 4); // Handle NHWC only @@ -46,16 +43,20 @@ FeatureShape Shape::asFeature(const Layout layout) const const auto height = dim(1); const auto width = dim(2); - return FeatureShape(batch, depth, height, width); + return {batch, depth, height, width}; } // Extended dimension is filled with 1. -void Shape::extendRank(size_t to_rank) +void Shape::extendRank(int to_rank) +{ + assert(to_rank - rank() >= 0); + _dimensions.insert(_dimensions.cbegin(), to_rank - rank(), 1); +} + +int64_t Shape::num_elements() const { - for (size_t i = rank() + 1; i <= to_rank; ++i) - { - prepend(1); - } + return std::accumulate(_dimensions.cbegin(), _dimensions.cend(), INT64_C(1), + std::multiplies()); } } // namespace model diff --git a/runtimes/neurun/core/src/util/ShapeInference.cc b/runtimes/neurun/core/src/util/ShapeInference.cc index 249fc8a..93e48db 100644 --- a/runtimes/neurun/core/src/util/ShapeInference.cc +++ b/runtimes/neurun/core/src/util/ShapeInference.cc @@ -46,14 +46,14 @@ model::Shape calcEltwiseOutShape(const model::Shape &lhs_shape, const model::Sha model::Shape out_shape; auto max_rank = std::max(lhs_shape.rank(), rhs_shape.rank()); - for (uint32_t idx = 0; idx < max_rank; ++idx) + for (int idx = 0; idx < max_rank; ++idx) { // Go over operands dimensions from right to left - uint32_t lhs_idx = lhs_shape.rank() - idx - 1; - uint32_t rhs_idx = rhs_shape.rank() - idx - 1; + int lhs_idx = lhs_shape.rank() - idx - 1; + int rhs_idx = rhs_shape.rank() - idx - 1; - int32_t lhs_dim = lhs_shape.rank() > lhs_idx ? lhs_shape.dim(lhs_idx) : 1; - int32_t rhs_dim = rhs_shape.rank() > rhs_idx ? rhs_shape.dim(rhs_idx) : 1; + int32_t lhs_dim = lhs_idx >= 0 ? lhs_shape.dim(lhs_idx) : 1; + int32_t rhs_dim = rhs_idx >= 0 ? rhs_shape.dim(rhs_idx) : 1; if (lhs_dim != 1 && rhs_dim != 1 && lhs_dim != rhs_dim) throw std::runtime_error("Incompatible shapes for broadcast"); diff --git a/runtimes/neurun/frontend/nnapi/wrapper/ANeuralNetworksExecution.cc b/runtimes/neurun/frontend/nnapi/wrapper/ANeuralNetworksExecution.cc index 03378f4..754385a 100644 --- a/runtimes/neurun/frontend/nnapi/wrapper/ANeuralNetworksExecution.cc +++ b/runtimes/neurun/frontend/nnapi/wrapper/ANeuralNetworksExecution.cc @@ -78,38 +78,16 @@ bool ANeuralNetworksExecution::compareDataType(const ANeuralNetworksOperandType bool ANeuralNetworksExecution::compareShape(const ANeuralNetworksOperandType *type, const neurun::model::OperandIndex index) noexcept { - const auto operand_shape = _executor->model().operands.at(index).shape(); - const auto shape_from_type = NNAPIConvert::getShape(type); - // Passed shape should be specified if (haveUnspecifiedDims(index)) { return false; } - // Same shape - if (operand_shape == shape_from_type) - { - return true; - } + const auto &operand_shape = _executor->model().operands.at(index).shape(); + const auto &shape_from_type = NNAPIConvert::getShape(type); - // If shape in model is not specified (specify shape at execution) - if (operand_shape.rank() != type->dimensionCount) - { - // Rank mismatch - return false; - } - for (uint axis = 0; axis < operand_shape.rank(); axis++) - { - uint32_t dim = operand_shape.dim(axis); - if ((dim != 0) && (dim != type->dimensions[axis])) - { - // Specified dimension on model is mismatched - return false; - } - } - - return true; + return operand_shape == shape_from_type; } bool ANeuralNetworksExecution::haveUnspecifiedDims(const neurun::model::OperandIndex index) noexcept -- 2.7.4