This commit changes dimension function of ACLTensor to start from higher dimension.
Signed-off-by: jiseob.jang <jiseob.jang@samsung.com>
{
if (axis == i)
continue;
- if (t1->dimension(i) != t2->dimension(i))
+ if (t1->info()->dimension(i) != t2->info()->dimension(i))
return false;
}
return true;
{
assert(_output_alloc->ptr()->layout() == input->ptr()->layout());
assert(matchSizeExceptAxis(_output_alloc->ptr(), input->ptr(), _axis));
- axis_sum += input->ptr()->dimension(_axis);
+ axis_sum += input->ptr()->info()->dimension(_axis);
}
- assert(_output_alloc->ptr()->dimension(_axis) == axis_sum);
+ assert(_output_alloc->ptr()->info()->dimension(_axis) == axis_sum);
}
VERBOSE(Concat_RUN) << "START Concat" << std::endl;
{
CLSubTensor::CLSubTensor(ICLTensor *parent, const arm_compute::TensorShape &tensor_shape,
- const arm_compute::Coordinates &coords, bool extend_parent)
+ const arm_compute::Coordinates &coords, size_t rank, bool extend_parent)
: _cl_sub_tensor(std::make_shared<arm_compute::CLSubTensor>(parent->handle(), tensor_shape,
- coords, extend_parent))
+ coords, extend_parent)),
+ _rank{rank}
{
// DO NOTHING
}
public:
CLSubTensor(ICLTensor *parent, const arm_compute::TensorShape &tensor_shape,
- const arm_compute::Coordinates &coords, bool extend_parent = false);
+ const arm_compute::Coordinates &coords, size_t rank, bool extend_parent = false);
+
+public:
+ size_t num_dimensions() const final { return _rank; }
public:
const arm_compute::CLSubTensor *handle() const override;
private:
std::shared_ptr<arm_compute::CLSubTensor> _cl_sub_tensor;
+ size_t _rank;
};
} // namespace operand
namespace operand
{
-CLTensor::CLTensor(const arm_compute::TensorInfo &info)
- : _cl_tensor(std::make_shared<arm_compute::CLTensor>())
+CLTensor::CLTensor(const arm_compute::TensorInfo &info, size_t rank)
+ : _cl_tensor(std::make_shared<arm_compute::CLTensor>()), _rank{rank}
{
allocator()->init(info);
}
CLTensor() = delete;
public:
- CLTensor(const arm_compute::TensorInfo &info);
+ CLTensor(const arm_compute::TensorInfo &info, size_t rank);
+
+public:
+ size_t num_dimensions() const final { return _rank; }
public:
const arm_compute::CLTensor *handle() const override;
private:
std::shared_ptr<arm_compute::CLTensor> _cl_tensor;
+ size_t _rank;
};
} // namespace operand
return nullptr;
}
- void buildTensor(const model::OperandIndex &ind, const ::arm_compute::TensorInfo &info)
+ void buildTensor(const model::OperandIndex &ind, const ::arm_compute::TensorInfo &info,
+ size_t rank)
{
- auto tensor = std::make_shared<T_Tensor>(info);
+ auto tensor = std::make_shared<T_Tensor>(info, rank);
_tensors[ind] = tensor;
}
namespace acl_common
{
+size_t IACLTensor::num_dimensions() const
+{
+ throw std::runtime_error("No definition of num_dimensions()");
+ return 0;
+}
+
+size_t IACLTensor::dimension(size_t index) const
+{
+ // Assume that the front is higher dimensional.
+ // i.g. N: 0, C: 1, H: 2, W: 3 for NCHW layout
+ // NOTE This tensor must not be applied dim correction
+ const ARMComputeAxis reversed{(num_dimensions() - index) - 1};
+ return info()->dimension(reversed.value());
+}
+
size_t IACLTensor::calcOffset(const neurun::util::Coordinates &coords)
{
const auto rank = coords.size();
public:
uint8_t *buffer() const final { return handle()->buffer(); }
size_t total_size() const final { return info()->total_size(); }
- size_t dimension(size_t index) const final { return info()->dimension(index); }
- size_t num_dimensions() const final { return info()->num_dimensions(); }
+ size_t dimension(size_t index) const final;
+ size_t num_dimensions() const override;
size_t calcOffset(const neurun::util::Coordinates &coords) final;
model::Layout layout() const final;
bool has_padding() const override { return info()->has_padding(); }
const auto &backend_layout = _tensor_layouts_map[root_parent].second;
auto tensor_info = asTensorInfo(info.shape(), info.typeInfo(), frontend_layout, backend_layout,
_apply_dim_correction_map[ind]);
- _mem_mgr->buildTensor(ind, tensor_info);
+ _mem_mgr->buildTensor(ind, tensor_info, info.shape().rank());
}
}
_apply_dim_correction_map[current]);
::arm_compute::Coordinates coordinates =
asTensorCoordinate(info.offset(), frontend_layout, backend_layout);
- auto tensor = std::make_shared<T_SubTensor>(parent_tensor.get(), shape, coordinates, true);
+ auto tensor = std::make_shared<T_SubTensor>(parent_tensor.get(), shape, coordinates,
+ info.shape().rank(), true);
subtensors[current] = tensor;
stack.pop();
}
{
if (axis == i)
continue;
- if (t1->dimension(i) != t2->dimension(i))
+ if (t1->info()->dimension(i) != t2->info()->dimension(i))
return false;
}
return true;
{
assert(_output_alloc->layout() == input->layout());
assert(matchSizeExceptAxis(_output_alloc, input, _axis));
- axis_sum += input->dimension(_axis);
+ axis_sum += input->info()->dimension(_axis);
}
- assert(_output_alloc->dimension(_axis) == axis_sum);
+ assert(_output_alloc->info()->dimension(_axis) == axis_sum);
}
VERBOSE(Concat_RUN) << "START Concat" << std::endl;
{
NESubTensor::NESubTensor(INETensor *parent, const arm_compute::TensorShape &tensor_shape,
- const arm_compute::Coordinates &coords, bool extend_parent)
+ const arm_compute::Coordinates &coords, size_t rank, bool extend_parent)
: _ne_sub_tensor(std::make_shared<arm_compute::SubTensor>(parent->handle(), tensor_shape,
- coords, extend_parent))
+ coords, extend_parent)),
+ _rank{rank}
{
// DO NOTHING
}
public:
NESubTensor(INETensor *parent, const arm_compute::TensorShape &tensor_shape,
- const arm_compute::Coordinates &coords, bool extend_parent = false);
+ const arm_compute::Coordinates &coords, size_t rank, bool extend_parent = false);
+
+public:
+ size_t num_dimensions() const final { return _rank; }
public:
const arm_compute::SubTensor *handle() const override;
private:
std::shared_ptr<arm_compute::SubTensor> _ne_sub_tensor;
+ size_t _rank;
};
} // namespace operand
namespace operand
{
-NETensor::NETensor(const arm_compute::TensorInfo &info)
- : _ne_tensor(std::make_shared<arm_compute::Tensor>())
+NETensor::NETensor(const arm_compute::TensorInfo &info, size_t rank)
+ : _ne_tensor(std::make_shared<arm_compute::Tensor>()), _rank{rank}
{
allocator()->init(info);
}
NETensor() = delete;
public:
- NETensor(const arm_compute::TensorInfo &info);
+ NETensor(const arm_compute::TensorInfo &info, size_t rank);
+
+public:
+ size_t num_dimensions() const final { return _rank; }
public:
const arm_compute::Tensor *handle() const override;
private:
std::shared_ptr<arm_compute::Tensor> _ne_tensor;
+ size_t _rank;
};
} // namespace operand
public:
View(::neurun::backend::operand::ITensor *tensor) : _tensor{tensor}
{
- // TODO Validate whether tensor is a feature map, or not
- _shape.N = tensor->dimension(3);
- _shape.C = tensor->dimension(2);
- _shape.H = tensor->dimension(1);
- _shape.W = tensor->dimension(0);
+ assert(tensor->num_dimensions() == 4 && tensor->layout() == model::Layout::NCHW);
+ _shape.N = tensor->dimension(0);
+ _shape.C = tensor->dimension(1);
+ _shape.H = tensor->dimension(2);
+ _shape.W = tensor->dimension(3);
}
public: