Add a constructor to mir::Shape that accepts rank of the shape.
Signed-off-by: Sergei Barannikov <s.barannikov@samsung.com>
// Shape Deserialization
//
-static Shape deserializeFromMessage(const proto::TensorShapeProto& object_as_message)
-{
- Shape res;
- auto rank = (uint32_t) object_as_message.dims_size();
- res.resize((uint32_t) rank);
- for (uint32_t i = 0; i < rank; i++)
- {
- res.dim(i) = (uint32_t) object_as_message.dims(i);
- }
+static Shape deserializeFromMessage(const proto::TensorShapeProto& object_as_message) {
+ int rank = object_as_message.dims_size();
+ Shape res(rank);
+ for (int i = 0; i < rank; ++i)
+ res.dim(i) = object_as_message.dims(i);
return res;
}
#include <algorithm>
#include <cassert>
-namespace nnc
-{
-namespace mir
-{
+namespace nnc {
+namespace mir {
constexpr int32_t mir::Shape::autoDim;
-Shape::Shape(std::initializer_list<int32_t> &&l) : _dims{l}
-{
- // DO NOTHING
-}
-
-int32_t Shape::rank() const { return _dims.size(); }
-Shape &Shape::resize(int32_t size)
-{
+void Shape::resize(int32_t size) {
_dims.resize(size);
- return *this;
}
int32_t Shape::dim(int32_t axis) const {
return _dims.at(dim);
}
-int32_t Shape::numElements() const
-{
- if (rank() == 0)
- {
+int32_t Shape::numElements() const {
+ if (rank() == 0) {
return 0;
}
int32_t res = 1;
- for (int32_t axis = 0; axis < rank(); ++axis)
- {
+ for (int32_t axis = 0; axis < rank(); ++axis) {
assert(dim(axis) != Shape::autoDim);
res *= dim(axis);
}
return res;
}
-bool Shape::equal(const Shape &rhs) const
-{
- if (rank() != rhs.rank())
- {
- return false;
- }
-
- int32_t r = rank();
- for (int32_t axis = 0; axis < r; ++axis)
- {
- if (dim(axis) != rhs.dim(axis))
- {
- return false;
- }
- }
-
- return true;
-}
-
-std::ostream &operator<<(std::ostream &s, const Shape &sh)
-{
+std::ostream& operator<<(std::ostream& s, const Shape& sh) {
int32_t rank = sh.rank();
s << "[";
- for (int32_t axis = 0; axis < rank; ++axis)
- {
+ for (int32_t axis = 0; axis < rank; ++axis) {
if (axis != 0)
s << ", ";
if (sh.dim(axis) == Shape::autoDim)
assert(_paddingBefore.size() == 2);
assert(_paddingAfter.size() == 2);
- Shape output_shape;
- output_shape.resize(4);
+ Shape output_shape(4);
// Batch size and number of channels.
output_shape.dim(0) = input_shape.dim(0);
assert(kernel_shape.rank() == 4);
assert(kernel_shape.dim(3) == input_shape.dim(3));
- Shape output_shape;
- output_shape.resize(4);
+ Shape output_shape(4);
// Batch size and number of channels.
output_shape.dim(0) = input_shape.dim(0);
assert(_paddingBefore.size() == 2);
assert(_paddingAfter.size() == 2);
- Shape output_shape;
- output_shape.resize(4);
+ Shape output_shape(4);
// Batch size and number of channels.
output_shape.dim(0) = input_shape.dim(0);
assert(num_dims == getNumDim());
- Shape out_shape;
- out_shape.resize(num_dims);
+ Shape out_shape(num_dims);
for (int32_t dim = 0; dim < num_dims; ++dim) {
std::pair<int32_t, int32_t> padding = getPaddingForDim(dim);
out_shape.dim(dim) = padding.first + input_shape.dim(dim) + padding.second;
assert(_paddingBefore.size() == 2);
assert(_paddingAfter.size() == 2);
- Shape output_shape;
- output_shape.resize(4);
+ Shape output_shape(4);
// Batch size and number of channels.
output_shape.dim(0) = input_shape.dim(0);
const auto& input_shape = getInputShape(0);
int32_t input_rank = input_shape.rank();
- Shape output_shape;
- int32_t output_rank = 0;
std::vector<int32_t> dims_to_squeeze;
return;
}
+ int32_t output_rank = 0;
size_t squeezing_idx = 0;
- output_shape.resize(input_rank - dims_to_squeeze.size());
+ Shape output_shape(input_rank - dims_to_squeeze.size());
for (int32_t i = 0; i < input_rank; ++i) {
if (squeezing_idx < dims_to_squeeze.size() && i == dims_to_squeeze[squeezing_idx]) {
if (input_shape.dim(i) != 1)
void TransposeOp::inferOutputShapes() {
auto& input_shape = getInputShape(0);
- Shape output_shape;
- output_shape.resize(input_shape.rank());
+ Shape output_shape(input_shape.rank());
for (std::size_t i = 0; i < _axisOrder.size(); ++i)
- output_shape.dim(i) = input_shape.dim(static_cast<int32_t>(_axisOrder.at(i)));
+ output_shape.dim(static_cast<int32_t>(i)) = input_shape.dim(static_cast<int32_t>(_axisOrder.at(i)));
setOutputShape(0, output_shape);
}
#include <cstdint>
#include <ostream>
-namespace nnc
-{
-namespace mir
-{
+namespace nnc {
+namespace mir {
-class Shape
-{
+class Shape {
public:
static constexpr int32_t autoDim = -1;
Shape() = default;
- Shape(std::initializer_list<int32_t> &&l);
- explicit Shape(std::vector<int32_t> d) : _dims(d) {}
- int32_t rank() const;
+ explicit Shape(int32_t rank) : _dims(static_cast<decltype(_dims)::size_type>(rank)) {}
- Shape &resize(int32_t size);
+ Shape(std::initializer_list<int32_t> dims) : _dims(dims) {}
+
+ explicit Shape(const std::vector<int32_t>& dims) : _dims(dims) {}
+
+ int32_t rank() const { return static_cast<int32_t>(_dims.size()); }
+
+ void resize(int32_t size);
int32_t& dim(int32_t axis);
- int32_t dim(int32_t axis) const;
+
+ int32_t dim(int32_t axis) const;
int32_t numElements() const;
- bool operator==(const Shape& rhs) const
- {
- return equal(rhs);
- }
+ bool operator==(const Shape& rhs) const { return _dims == rhs._dims; }
- bool operator!=(const Shape& rhs) const
- {
- return !equal(rhs);
- }
+ bool operator!=(const Shape& rhs) const { return _dims != rhs._dims; }
private:
-
- bool equal(const Shape& rhs) const;
-
std::vector<int32_t> _dims;
};
-std::ostream &operator<<(std::ostream &s, const Shape &sh);
+std::ostream& operator<<(std::ostream& s, const Shape& sh);
} // namespace mir
} // namespace nnc
// Infer output shape based on given scales.
auto& input_shape = getInputShape(0);
assert(input_shape.rank() == 4 && _scales.size() == 4);
- Shape output_shape;
- output_shape.resize(input_shape.rank());
+ Shape output_shape(input_shape.rank());
for (int32_t i = 0; i < input_shape.rank(); ++i) {
output_shape.dim(i) = (int32_t)lroundf(_scales.at(i) * input_shape.dim(i));
return mir::Shape{1};
}
- mir::Shape sh;
- sh.resize(static_cast<int32_t>(size));
+ mir::Shape sh(static_cast<int32_t>(size));
unsigned int i = 0;
for (auto dim : iter)
string operation_name = out->name() + "_bias_add_layer";
// Reshape the IR biases tensor and generate the corresponding DOM tensor.
- Shape ir_biases_shape;
const auto ir_input_shape = op.getInputShape(0);
- ir_biases_shape.resize(ir_input_shape.rank());
+ Shape ir_biases_shape(ir_input_shape.rank());
// TODO remove this if after batch axis is restored in all operations in Model IR
if (op.getPrevNodes()[0].op->getType() == Operation::Type::fullyConnected) {
const auto& ir_scales = op.getWeights();
// Reshape the IR scales tensor and generate the corresponding DOM tensor.
- Shape ir_scales_shape;
const auto ir_input_shape = transposeShape<2, 1, 3, 0>(op.getInputShape(0));
- ir_scales_shape.resize(ir_input_shape.rank());
+ Shape ir_scales_shape(ir_input_shape.rank());
// ACL CLArithmeticDivision supports input tensors broadcasting.
for (int i = 0; i < ir_input_shape.rank(); ++i)
// Create a unit tensor with the rank = ir.shape.rank() and having all dimensions = 1. It is
// possible to use such a tensor in the operation because of the broadcasting support for the
// input tensors in the CLArithmeticDivision operation.
- Shape ir_unit_shape;
- ir_unit_shape.resize(ir_shape.rank());
+ Shape ir_unit_shape(ir_shape.rank());
for (int i = 0; i < ir_unit_shape.rank(); ++i)
ir_unit_shape.dim(i) = 1;
auto& input_shape = inputs[0]->getOutputShape(0);
assert(input_shape.rank() == 4);
mir::Tensor<int> out_shapes = mir::Tensor<int>(*params[0].get());
- Shape res_shape;
- res_shape.resize(4);
+ Shape res_shape(4);
res_shape.dim(0) = input_shape.dim(0);
res_shape.dim(1) = out_shapes.at(Index{0});
res_shape.dim(2) = out_shapes.at(Index{1});