From: Сергей Баранников/AI Tools Lab /SRR/Engineer/삼성전자 Date: Tue, 2 Jul 2019 04:56:48 +0000 (+0300) Subject: Remove `model::Shape::asVector` replacing calls to it with `num_elements()` or `dim... X-Git-Tag: submit/tizen/20190809.050447~613 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=e468408755a1e310b2586bfb62a6c6640fd9875c;p=platform%2Fcore%2Fml%2Fnnfw.git Remove `model::Shape::asVector` replacing calls to it with `num_elements()` or `dim(0)`. (#5498) Also remove unused `model::Shape::asMatrix` and `model::Shape::asTensor`. This is a preparation step before removing dependency of `model::Shape` on `misc::tensor::Shape`. Signed-off-by: Sergei Barannikov --- diff --git a/runtimes/neurun/backend/acl_cl/StageGenerator.cc b/runtimes/neurun/backend/acl_cl/StageGenerator.cc index 23bf4d9..9ba4c1a 100644 --- a/runtimes/neurun/backend/acl_cl/StageGenerator.cc +++ b/runtimes/neurun/backend/acl_cl/StageGenerator.cc @@ -908,9 +908,9 @@ void StageGenerator::visit(const model::operation::StridedSliceNode &node) auto startData_base = _ctx.at(startData_index).data().base(); auto endData_base = _ctx.at(endData_index).data().base(); auto stridesData_base = _ctx.at(stridesData_index).data().base(); - const auto startData_size = _ctx.at(startData_index).shape().asVector(); - const auto endData_size = _ctx.at(endData_index).shape().asVector(); - const auto stridesData_size = _ctx.at(stridesData_index).shape().asVector(); + const int startData_size = _ctx.at(startData_index).shape().num_elements(); + const int endData_size = _ctx.at(endData_index).shape().num_elements(); + const int stridesData_size = _ctx.at(stridesData_index).shape().num_elements(); using neurun::model::DataType; @@ -1012,7 +1012,7 @@ void StageGenerator::visit(const model::operation::TransposeNode &node) const auto rank = _ctx.at(ifm_idx).shape().rank(); std::vector pv; const auto perm_base = _ctx.at(perm).data().base(); - const auto perm_size = _ctx.at(perm).shape().asVector(); + const int perm_size = _ctx.at(perm).shape().num_elements(); assert(perm_base != nullptr); for (int32_t n = 0; n < perm_size; ++n) @@ -1645,7 +1645,7 @@ void StageGenerator::visit(const model::operation::ReduceMaxNode &node) case 1: // vector { const auto axis_base = _ctx.at(axis_index).data().base(); - const auto axis_size = _ctx.at(axis_index).shape().asVector(); + const int axis_size = axis_shape.num_elements(); // If axis's data does not exist as constant values and can be gotten as input data, we have // to find a way to infer output shape when sinking output. @@ -2849,7 +2849,7 @@ void StageGenerator::visit(const model::operation::ArgMaxNode &node) _tensor_builder->dimCorrection(ifm_index, false); std::vector l_axis; - const auto axis_size = _ctx.at(axis_index).shape().asVector(); + const int axis_size = axis_shape.num_elements(); auto axis_base = _ctx.at(axis_index).data().base(); // TODO Should support axis size > 1. assert(axis_size == 1); @@ -2984,7 +2984,7 @@ void StageGenerator::visit(const model::operation::MeanNode &node) case 1: // vector { const auto axis_base = _ctx.at(axis_index).data().base(); - const auto axis_size = _ctx.at(axis_index).shape().asVector(); + const int axis_size = axis_shape.num_elements(); // If axis's data does not exist as constant values and can be gotten as input data, we have // to find a way to infer output shape when sinking output. @@ -3192,7 +3192,7 @@ void StageGenerator::visit(const model::operation::ReduceMinNode &node) case 1: // vector { const auto axis_base = _ctx.at(axis_index).data().base(); - const auto axis_size = _ctx.at(axis_index).shape().asVector(); + const int axis_size = axis_shape.num_elements(); // If axis's data does not exist as constant values and can be gotten as input data, we have // to find a way to infer output shape when sinking output. diff --git a/runtimes/neurun/core/include/model/Shape.h b/runtimes/neurun/core/include/model/Shape.h index 354f777..dd34074 100644 --- a/runtimes/neurun/core/include/model/Shape.h +++ b/runtimes/neurun/core/include/model/Shape.h @@ -40,10 +40,7 @@ public: Shape(const std::initializer_list &dims) : nnfw::misc::tensor::Shape(dims) {} public: - int32_t asVector(void) const; - nnfw::misc::matrix::Shape asMatrix(void) const; FeatureShape asFeature(Layout layout = Layout::NHWC) const; - nnfw::misc::tensor::Shape asTensor(void) const; public: /** diff --git a/runtimes/neurun/core/include/util/Utils.h b/runtimes/neurun/core/include/util/Utils.h index 2fb6893..debd950 100644 --- a/runtimes/neurun/core/include/util/Utils.h +++ b/runtimes/neurun/core/include/util/Utils.h @@ -57,7 +57,7 @@ void initConstant(neurun::backend::operand::IObject &obj, const neurun::model::O { case 1: { - auto vec_size = shape.asVector(); + auto vec_size = shape.dim(0); for (int32_t n = 0; n < vec_size; ++n) { const T *from = reinterpret_cast(base) + n; diff --git a/runtimes/neurun/core/src/compiler/ConstantInitializer.cc b/runtimes/neurun/core/src/compiler/ConstantInitializer.cc index 85967db..12fd143 100644 --- a/runtimes/neurun/core/src/compiler/ConstantInitializer.cc +++ b/runtimes/neurun/core/src/compiler/ConstantInitializer.cc @@ -48,7 +48,7 @@ void ConstantInitializer::run(neurun::backend::operand::IObject &obj, { case 1: { - auto vec_size = shape.asVector(); + auto vec_size = shape.dim(0); for (int32_t n = 0; n < vec_size; ++n) { const T *from = reinterpret_cast(base) + n; diff --git a/runtimes/neurun/core/src/compiler/OperationValidator.cc b/runtimes/neurun/core/src/compiler/OperationValidator.cc index 37672a5..e018b3d 100644 --- a/runtimes/neurun/core/src/compiler/OperationValidator.cc +++ b/runtimes/neurun/core/src/compiler/OperationValidator.cc @@ -141,16 +141,21 @@ void OperationValidator::visit(const model::operation::ReduceSumNode &node) void OperationValidator::visit(const model::operation::TransposeNode &node) { - const auto ofm_idx{node.getOutputs().at(0)}; - const auto ifm_idx{node.getInputs().at(model::operation::TransposeNode::Input::INPUT)}; - const auto perm{node.param().perm}; + const auto output_index{node.getOutputs().at(0)}; + const auto input_index{node.getInputs().at(model::operation::TransposeNode::Input::INPUT)}; + const auto perm_idx{node.param().perm}; + + const auto &output_shape = _ctx.at(output_index).shape(); + const auto &input_shape = _ctx.at(input_index).shape(); + const auto &perm_shape = _ctx.at(perm_idx).shape(); - UNUSED_RELEASE(ofm_idx); - UNUSED_RELEASE(ifm_idx); - UNUSED_RELEASE(perm); + UNUSED_RELEASE(output_shape); + UNUSED_RELEASE(input_shape); + UNUSED_RELEASE(perm_shape); - assert(_ctx.at(ifm_idx).shape().rank() == static_cast(_ctx.at(perm).shape().dim(0))); - assert(_ctx.at(ifm_idx).shape().rank() == _ctx.at(ofm_idx).shape().rank()); + assert(perm_shape.rank() == 1); + assert(input_shape.rank() == static_cast(perm_shape.dim(0))); + assert(input_shape.rank() == output_shape.rank()); } void OperationValidator::visit(const model::operation::ReduceMaxNode &node) diff --git a/runtimes/neurun/core/src/model/Shape.cc b/runtimes/neurun/core/src/model/Shape.cc index 12d7886..fd30882 100644 --- a/runtimes/neurun/core/src/model/Shape.cc +++ b/runtimes/neurun/core/src/model/Shape.cc @@ -29,23 +29,6 @@ Shape::Shape(uint32_t rank) : nnfw::misc::tensor::Shape(rank) // DO NOTHING } -int32_t Shape::asVector(void) const -{ - assert(rank() == 1); - - return dim(0); -} - -nnfw::misc::matrix::Shape Shape::asMatrix(void) const -{ - assert(rank() == 2); - - const auto height = dim(0); - const auto width = dim(1); - - return nnfw::misc::matrix::Shape(height, width); -} - FeatureShape Shape::asFeature(const Layout layout) const { assert(rank() == 4); @@ -66,17 +49,6 @@ FeatureShape Shape::asFeature(const Layout layout) const return FeatureShape(batch, depth, height, width); } -nnfw::misc::tensor::Shape Shape::asTensor(void) const -{ - nnfw::misc::tensor::Shape shape{}; - for (uint32_t i = 0; i < rank(); ++i) - { - shape.append(dim(i)); - } - - return shape; // this shape represents shape of NNAPI -} - // Extended dimension is filled with 1. void Shape::extendRank(size_t to_rank) {