This commit applies other types for source/sink.
Signed-off-by: jiseob.jang <jiseob.jang@samsung.com>
namespace feature
{
-template <typename T> class View;
-
-template <> class View<float> final : public nnfw::util::feature::Reader<float>
+template <typename T> class View final : public nnfw::util::feature::Reader<T>
{
public:
View(::arm_compute::ITensor *tensor) : _tensor{tensor}
const ::nnfw::util::feature::Shape &shape(void) const { return _shape; }
public:
- float at(uint32_t ch, uint32_t row, uint32_t col) const override
+ T at(uint32_t ch, uint32_t row, uint32_t col) const override
{
const auto offset = feature_index_to_byte_offset(ch, row, col);
- float *ptr = reinterpret_cast<float *>(_tensor->buffer() + offset);
+ T *ptr = reinterpret_cast<T *>(_tensor->buffer() + offset);
return *ptr;
}
- float at(uint32_t batch, uint32_t ch, uint32_t row, uint32_t col) const override
+ T at(uint32_t batch, uint32_t ch, uint32_t row, uint32_t col) const override
{
const auto offset = feature_index_to_byte_offset(batch, ch, row, col);
- float *ptr = reinterpret_cast<float *>(_tensor->buffer() + offset);
+ T *ptr = reinterpret_cast<T *>(_tensor->buffer() + offset);
return *ptr;
}
public:
- float &at(uint32_t ch, uint32_t row, uint32_t col)
+ T &at(uint32_t ch, uint32_t row, uint32_t col)
{
const auto offset = feature_index_to_byte_offset(ch, row, col);
- float *ptr = reinterpret_cast<float *>(_tensor->buffer() + offset);
+ T *ptr = reinterpret_cast<T *>(_tensor->buffer() + offset);
return *ptr;
}
- float &at(uint32_t batch, uint32_t ch, uint32_t row, uint32_t col)
+ T &at(uint32_t batch, uint32_t ch, uint32_t row, uint32_t col)
{
const auto offset = feature_index_to_byte_offset(batch, ch, row, col);
- float *ptr = reinterpret_cast<float *>(_tensor->buffer() + offset);
+ T *ptr = reinterpret_cast<T *>(_tensor->buffer() + offset);
return *ptr;
}
//
// VectorSink
//
-class VectorSink final : public Sink
+template <typename T> class VectorSink final : public Sink
{
public:
- VectorSink(const int32_t vlen, uint8_t *base, const size_t size)
- : _vlen{vlen}, _base{base}, _size{size}
+ VectorSink(const int32_t vlen, T *base, const size_t size) : _vlen{vlen}, _base{base}, _size{size}
{
- assert(size >= _vlen * sizeof(float));
+ assert(size == _vlen * sizeof(T));
}
public:
private:
const int32_t _vlen;
- uint8_t *const _base;
+ T *const _base;
const size_t _size;
};
//
// FeatureSink
//
-class FeatureSink final : public Sink
+template <typename T> class FeatureSink final : public Sink
{
public:
- FeatureSink(const nnfw::util::feature::Shape &shape, uint8_t *base, const size_t size)
+ FeatureSink(const nnfw::util::feature::Shape &shape, T *base, const size_t size)
: _shape{shape}, _base{base}, _size{size}
{
- // DO NOTHING
+ assert(size == _shape.N * _shape.H * _shape.W * _shape.C * sizeof(T));
}
public:
}
else if (typeid(tensor) == typeid(::arm_compute::CLTensor))
{
- const ::internal::arm_compute::feature::View<float> from{&tensor};
- ::internal::nnapi::feature::View<float> into{_shape, _base, _size};
+ const ::internal::arm_compute::feature::View<T> from{&tensor};
+ ::internal::nnapi::feature::View<T> into{_shape, _base, _size};
::nnfw::util::feature::iterate(_shape)
<< [&](uint32_t bat, uint32_t ch, uint32_t row, uint32_t col) {
private:
const nnfw::util::feature::Shape _shape;
- uint8_t *const _base;
+ T *const _base;
const size_t _size;
};
//
// VectorSource
//
-class VectorSource final : public Source
+template <typename T> class VectorSource final : public Source
{
public:
- VectorSource(const int32_t vlen, const uint8_t *base, const size_t size)
+ VectorSource(const int32_t vlen, const T *base, const size_t size)
: _vlen{vlen}, _base{base}, _size{size}
{
- assert(size >= _vlen * sizeof(float));
+ assert(size == _vlen * sizeof(T));
}
public:
private:
const int32_t _vlen;
- const uint8_t *const _base;
+ const T *const _base;
const size_t _size;
};
//
// FeatureSource
//
-class FeatureSource final : public Source
+template <typename T> class FeatureSource final : public Source
{
public:
- FeatureSource(const nnfw::util::feature::Shape &shape, const uint8_t *base, const size_t size)
+ FeatureSource(const nnfw::util::feature::Shape &shape, const T *base, const size_t size)
: _shape{shape}, _base{base}, _size{size}
{
- // DO NOTHING
+ assert(_size == _shape.N * _shape.H * _shape.W * _shape.C * sizeof(T));
}
public:
}
else if (typeid(tensor) == typeid(::arm_compute::CLTensor))
{
- const ::internal::nnapi::feature::Reader<float> from{_shape, _base, _size};
- ::internal::arm_compute::feature::View<float> into{&tensor};
+ const ::internal::nnapi::feature::Reader<T> from{_shape, _base, _size};
+ ::internal::arm_compute::feature::View<T> into{&tensor};
::nnfw::util::feature::iterate(_shape)
<< [&](uint32_t bat, uint32_t ch, uint32_t row, uint32_t col) {
private:
const nnfw::util::feature::Shape _shape;
- const uint8_t *const _base;
+ const T *const _base;
const size_t _size;
};
const auto len = operands.at(operand_index).shape().dim(1);
- execution->source<neurun::exec::VectorSource>(
- index, len, reinterpret_cast<const uint8_t *>(buffer), length);
+ execution->source<neurun::exec::VectorSource<float>>(
+ index, len, reinterpret_cast<const float *>(buffer), length);
}
else if (operands.at(operand_index).shape().rank() == 4)
{
const auto &operand_shape = operands.at(operand_index).shape().asFeature();
- execution->source<neurun::exec::FeatureSource>(
- index, operand_shape, reinterpret_cast<const uint8_t *>(buffer), length);
+ execution->source<neurun::exec::FeatureSource<float>>(
+ index, operand_shape, reinterpret_cast<const float *>(buffer), length);
}
else
{
const auto len = operands.at(operand_index).shape().dim(1);
- execution->sink<neurun::exec::VectorSink>(index, len, reinterpret_cast<uint8_t *>(buffer),
- length);
+ execution->sink<neurun::exec::VectorSink<float>>(index, len, reinterpret_cast<float *>(buffer),
+ length);
}
else if (operands.at(operand_index).shape().rank() == 4)
{
const auto &operand_shape = operands.at(operand_index).shape().asFeature();
- execution->sink<neurun::exec::FeatureSink>(index, operand_shape,
- reinterpret_cast<uint8_t *>(buffer), length);
+ execution->sink<neurun::exec::FeatureSink<float>>(index, operand_shape,
+ reinterpret_cast<float *>(buffer), length);
}
else
{
namespace feature
{
-template <typename T> class Reader;
-
-template <> class Reader<float> final : public nnfw::util::feature::Reader<float>
+template <typename T> class Reader final : public nnfw::util::feature::Reader<T>
{
public:
- Reader(const ::nnfw::util::feature::Shape &shape, const uint8_t *ptr, size_t len)
- : _shape{shape}, _ptr{ptr}, _len{len}
+ Reader(const ::nnfw::util::feature::Shape &shape, const T *ptr, size_t len)
+ : _shape{shape}, _ptr{ptr}
{
- // DO NOTHING
+ (void)len; // Workaround for unused variable in release mode
+ assert(shape.N * shape.C * shape.H * shape.W * sizeof(T) == len);
}
public:
const nnfw::util::feature::Shape &shape(void) const { return _shape; }
public:
- float at(uint32_t ch, uint32_t row, uint32_t col) const override
+ T at(uint32_t ch, uint32_t row, uint32_t col) const override
{
uint32_t index = index_of(_shape, ch, row, col);
- const auto arr = reinterpret_cast<const float *>(_ptr);
-
- return arr[index];
+ return _ptr[index];
}
- float at(uint32_t batch, uint32_t ch, uint32_t row, uint32_t col) const override
+ T at(uint32_t batch, uint32_t ch, uint32_t row, uint32_t col) const override
{
uint32_t index = index_of(_shape, batch, ch, row, col);
- const auto arr = reinterpret_cast<const float *>(_ptr);
-
- return arr[index];
+ return _ptr[index];
}
private:
nnfw::util::feature::Shape _shape;
private:
- const uint8_t *_ptr;
- const size_t _len;
+ const T *_ptr;
};
} // namespace feature
#ifndef __INTERNAL_NNAPI_FEATURE_VIEW_H__
#define __INTERNAL_NNAPI_FEATURE_VIEW_H__
+#include <cassert>
+
#include "internal/nnapi/feature/Utils.h"
#include "util/feature/Reader.h"
namespace feature
{
-template <typename T> class View final : public nnfw::util::feature::Reader<float>
+template <typename T> class View final : public nnfw::util::feature::Reader<T>
{
public:
- View(const ::nnfw::util::feature::Shape &shape, uint8_t *ptr, size_t len)
- : _shape{shape}, _ptr{ptr}, _len{len}
+ View(const ::nnfw::util::feature::Shape &shape, T *ptr, size_t len) : _shape{shape}, _ptr{ptr}
{
- // DO NOTHING
+ (void)len; // Workaround for unused variable in release mode
+ assert(shape.N * shape.C * shape.H * shape.W * sizeof(T) == len);
}
public:
{
uint32_t index = index_of(_shape, ch, row, col);
- T *arr = reinterpret_cast<T *>(_ptr);
-
- return arr[index];
+ return _ptr[index];
}
T at(uint32_t batch, uint32_t ch, uint32_t row, uint32_t col) const override
{
uint32_t index = index_of(_shape, batch, ch, row, col);
- T *arr = reinterpret_cast<T *>(_ptr);
-
- return arr[index];
+ return _ptr[index];
}
T &at(uint32_t ch, uint32_t row, uint32_t col)
{
uint32_t index = index_of(_shape, ch, row, col);
- T *arr = reinterpret_cast<T *>(_ptr);
-
- return arr[index];
+ return _ptr[index];
}
T &at(uint32_t batch, uint32_t ch, uint32_t row, uint32_t col)
{
uint32_t index = index_of(_shape, batch, ch, row, col);
- T *arr = reinterpret_cast<T *>(_ptr);
-
- return arr[index];
+ return _ptr[index];
}
private:
nnfw::util::feature::Shape _shape;
private:
- uint8_t *_ptr;
- const size_t _len;
+ T *_ptr;
};
} // namespace feature
{
case Type::NHWC_TO_NCHW:
{
- const ::internal::nnapi::feature::Reader<float> from{feature, input_buffer, input_size};
+ const ::internal::nnapi::feature::Reader<float> from{
+ feature, reinterpret_cast<const float *>(input_buffer), input_size};
::internal::arm_compute::feature::View<float> into{_output};
// TODO Fix this workaround (We may need codegen::operand::Object instead of ITensor)
_input_cl->map(queue);
const ::internal::arm_compute::feature::View<float> from{_input};
- ::internal::nnapi::feature::View<float> into{feature, output_buffer, output_size};
+ ::internal::nnapi::feature::View<float> into{
+ feature, reinterpret_cast<float *>(output_buffer), output_size};
::nnfw::util::feature::iterate(feature)
<< [&](uint32_t batch, uint32_t ch, uint32_t row, uint32_t col) {