Also remove unused `model::Shape::asMatrix` and `model::Shape::asTensor`.
This is a preparation step before removing dependency of `model::Shape` on `misc::tensor::Shape`.
Signed-off-by: Sergei Barannikov <s.barannikov@samsung.com>
auto startData_base = _ctx.at(startData_index).data().base();
auto endData_base = _ctx.at(endData_index).data().base();
auto stridesData_base = _ctx.at(stridesData_index).data().base();
- const auto startData_size = _ctx.at(startData_index).shape().asVector();
- const auto endData_size = _ctx.at(endData_index).shape().asVector();
- const auto stridesData_size = _ctx.at(stridesData_index).shape().asVector();
+ const int startData_size = _ctx.at(startData_index).shape().num_elements();
+ const int endData_size = _ctx.at(endData_index).shape().num_elements();
+ const int stridesData_size = _ctx.at(stridesData_index).shape().num_elements();
using neurun::model::DataType;
const auto rank = _ctx.at(ifm_idx).shape().rank();
std::vector<int32_t> pv;
const auto perm_base = _ctx.at(perm).data().base();
- const auto perm_size = _ctx.at(perm).shape().asVector();
+ const int perm_size = _ctx.at(perm).shape().num_elements();
assert(perm_base != nullptr);
for (int32_t n = 0; n < perm_size; ++n)
case 1: // vector
{
const auto axis_base = _ctx.at(axis_index).data().base();
- const auto axis_size = _ctx.at(axis_index).shape().asVector();
+ const int axis_size = axis_shape.num_elements();
// If axis's data does not exist as constant values and can be gotten as input data, we have
// to find a way to infer output shape when sinking output.
_tensor_builder->dimCorrection(ifm_index, false);
std::vector<uint32_t> l_axis;
- const auto axis_size = _ctx.at(axis_index).shape().asVector();
+ const int axis_size = axis_shape.num_elements();
auto axis_base = _ctx.at(axis_index).data().base();
// TODO Should support axis size > 1.
assert(axis_size == 1);
case 1: // vector
{
const auto axis_base = _ctx.at(axis_index).data().base();
- const auto axis_size = _ctx.at(axis_index).shape().asVector();
+ const int axis_size = axis_shape.num_elements();
// If axis's data does not exist as constant values and can be gotten as input data, we have
// to find a way to infer output shape when sinking output.
case 1: // vector
{
const auto axis_base = _ctx.at(axis_index).data().base();
- const auto axis_size = _ctx.at(axis_index).shape().asVector();
+ const int axis_size = axis_shape.num_elements();
// If axis's data does not exist as constant values and can be gotten as input data, we have
// to find a way to infer output shape when sinking output.
Shape(const std::initializer_list<int32_t> &dims) : nnfw::misc::tensor::Shape(dims) {}
public:
- int32_t asVector(void) const;
- nnfw::misc::matrix::Shape asMatrix(void) const;
FeatureShape asFeature(Layout layout = Layout::NHWC) const;
- nnfw::misc::tensor::Shape asTensor(void) const;
public:
/**
{
case 1:
{
- auto vec_size = shape.asVector();
+ auto vec_size = shape.dim(0);
for (int32_t n = 0; n < vec_size; ++n)
{
const T *from = reinterpret_cast<const T *>(base) + n;
{
case 1:
{
- auto vec_size = shape.asVector();
+ auto vec_size = shape.dim(0);
for (int32_t n = 0; n < vec_size; ++n)
{
const T *from = reinterpret_cast<const T *>(base) + n;
void OperationValidator::visit(const model::operation::TransposeNode &node)
{
- const auto ofm_idx{node.getOutputs().at(0)};
- const auto ifm_idx{node.getInputs().at(model::operation::TransposeNode::Input::INPUT)};
- const auto perm{node.param().perm};
+ const auto output_index{node.getOutputs().at(0)};
+ const auto input_index{node.getInputs().at(model::operation::TransposeNode::Input::INPUT)};
+ const auto perm_idx{node.param().perm};
+
+ const auto &output_shape = _ctx.at(output_index).shape();
+ const auto &input_shape = _ctx.at(input_index).shape();
+ const auto &perm_shape = _ctx.at(perm_idx).shape();
- UNUSED_RELEASE(ofm_idx);
- UNUSED_RELEASE(ifm_idx);
- UNUSED_RELEASE(perm);
+ UNUSED_RELEASE(output_shape);
+ UNUSED_RELEASE(input_shape);
+ UNUSED_RELEASE(perm_shape);
- assert(_ctx.at(ifm_idx).shape().rank() == static_cast<uint32_t>(_ctx.at(perm).shape().dim(0)));
- assert(_ctx.at(ifm_idx).shape().rank() == _ctx.at(ofm_idx).shape().rank());
+ assert(perm_shape.rank() == 1);
+ assert(input_shape.rank() == static_cast<uint32_t>(perm_shape.dim(0)));
+ assert(input_shape.rank() == output_shape.rank());
}
void OperationValidator::visit(const model::operation::ReduceMaxNode &node)
// DO NOTHING
}
-int32_t Shape::asVector(void) const
-{
- assert(rank() == 1);
-
- return dim(0);
-}
-
-nnfw::misc::matrix::Shape Shape::asMatrix(void) const
-{
- assert(rank() == 2);
-
- const auto height = dim(0);
- const auto width = dim(1);
-
- return nnfw::misc::matrix::Shape(height, width);
-}
-
FeatureShape Shape::asFeature(const Layout layout) const
{
assert(rank() == 4);
return FeatureShape(batch, depth, height, width);
}
-nnfw::misc::tensor::Shape Shape::asTensor(void) const
-{
- nnfw::misc::tensor::Shape shape{};
- for (uint32_t i = 0; i < rank(); ++i)
- {
- shape.append(dim(i));
- }
-
- return shape; // this shape represents shape of NNAPI
-}
-
// Extended dimension is filled with 1.
void Shape::extendRank(size_t to_rank)
{