This commit introduce checking padding of the tensor.
Signed-off-by: jiseob.jang <jiseob.jang@samsung.com>
return info()->offset_element_in_bytes(coordinates);
}
+bool ICLTensor::has_padding() const { return info()->has_padding(); }
+
arm_compute::DataType ICLTensor::data_type() const { return info()->data_type(); }
uint8_t *ICLTensor::buffer() const { return handle()->buffer(); }
size_t dimension(size_t index) const override;
size_t num_dimensions() const override;
size_t calcOffset(const neurun::util::feature::Coordinate4D &coords) override;
+ bool has_padding() const override;
public:
arm_compute::DataType data_type() const;
size_t num_dimensions() const override { return _info.shape().dims().size(); }
size_t total_size() const override { return _info.total_size(); }
size_t calcOffset(const neurun::util::feature::Coordinate4D &coords) override;
+ bool has_padding() const override { return false; }
private:
compiler::TensorInfo _info;
virtual size_t dimension(size_t index) const = 0;
virtual size_t num_dimensions() const = 0;
virtual size_t calcOffset(const neurun::util::feature::Coordinate4D &coords) = 0;
+ virtual bool has_padding() const = 0;
};
} // namespace operand
auto input_buffer = tensor.buffer();
auto rank = _shape.rank();
+ if (!tensor.has_padding() && rank < 4)
+ {
+ memcpy(_output_buffer, input_buffer, _output_size);
+ return;
+ }
+
switch (rank)
{
case 0:
auto output_buffer = tensor.buffer();
auto rank = _shape.rank();
+ if (!tensor.has_padding() && rank < 4)
+ {
+ memcpy(output_buffer, _input_buffer, _input_size);
+ return;
+ }
+
switch (rank)
{
case 0: