const size_t _size;
};
+static void asVectorSource(ANeuralNetworksExecution *execution, int32_t type, int32_t index,
+ int32_t len, const void *buffer, size_t length)
+{
+ switch (type)
+ {
+ case ANEURALNETWORKS_FLOAT32:
+ case ANEURALNETWORKS_TENSOR_FLOAT32:
+ execution->source<VectorSource<float>>(index, len, reinterpret_cast<const float *>(buffer),
+ length);
+ break;
+ case ANEURALNETWORKS_INT32:
+ case ANEURALNETWORKS_TENSOR_INT32:
+ execution->source<VectorSource<int32_t>>(index, len,
+ reinterpret_cast<const int32_t *>(buffer), length);
+ break;
+ case ANEURALNETWORKS_UINT32:
+ execution->source<VectorSource<uint32_t>>(index, len,
+ reinterpret_cast<const uint32_t *>(buffer), length);
+ break;
+ default:
+ throw std::runtime_error("Not supported, yet");
+ break;
+ }
+}
+
+static void asFeatureSource(ANeuralNetworksExecution *execution, int32_t type, int32_t index,
+ const nnfw::util::feature::Shape &shape, const void *buffer,
+ size_t length)
+{
+ switch (type)
+ {
+ case ANEURALNETWORKS_FLOAT32:
+ case ANEURALNETWORKS_TENSOR_FLOAT32:
+ execution->source<FeatureSource<float>>(index, shape, reinterpret_cast<const float *>(buffer),
+ length);
+ break;
+ case ANEURALNETWORKS_INT32:
+ case ANEURALNETWORKS_TENSOR_INT32:
+ execution->source<FeatureSource<int32_t>>(index, shape,
+ reinterpret_cast<const int32_t *>(buffer), length);
+ break;
+ case ANEURALNETWORKS_UINT32:
+ execution->source<FeatureSource<uint32_t>>(
+ index, shape, reinterpret_cast<const uint32_t *>(buffer), length);
+ break;
+ default:
+ throw std::runtime_error("Not supported, yet");
+ break;
+ }
+}
+
//
// NNAPI Implementation
//
if (operands.at(operand_index).shape().rank() == 1)
{
const auto len = operands.at(operand_index).shape().dim(0);
- if (type != nullptr && type->type == OperandCode::ANEURALNETWORKS_INT32)
- {
- execution->source<VectorSource<int32_t>>(index, len,
- reinterpret_cast<const uint8_t *>(buffer), length);
- }
- else
- {
- execution->source<VectorSource<float>>(index, len, reinterpret_cast<const uint8_t *>(buffer),
- length);
- }
+
+ asVectorSource(execution, type->type, index, len, buffer, length);
}
else if (operands.at(operand_index).shape().rank() == 2)
{
// TODO check whether the following assert is needed or not
// assert(operands.at(operand_index).shape().dim(0) == 1);
- int32_t len = 0;
- if (operands.at(operand_index).shape().dim(0) == 1)
- {
- len = operands.at(operand_index).shape().dim(1);
- }
- else
- {
- len = operands.at(operand_index).shape().dim(0) * operands.at(operand_index).shape().dim(1);
- }
- execution->source<VectorSource<float>>(index, len, reinterpret_cast<const uint8_t *>(buffer),
- length);
+ const auto len =
+ operands.at(operand_index).shape().dim(0) * operands.at(operand_index).shape().dim(1);
+
+ asVectorSource(execution, type->type, index, len, buffer, length);
}
else if (operands.at(operand_index).shape().rank() == 4)
{
const auto &operand_shape = operands.at(operand_index).shape().asFeature();
- execution->source<FeatureSource>(index, operand_shape,
- reinterpret_cast<const uint8_t *>(buffer), length);
+ asFeatureSource(execution, type->type, index, operand_shape, buffer, length);
}
else
{
#include "internal/nnapi/feature/Reader.h"
#include "internal/arm_compute/feature/View.h"
-class FeatureSource final : public Source
+template <typename T> class FeatureSource final : public Source
{
public:
- FeatureSource(const nnfw::util::feature::Shape &shape, const uint8_t *base, const size_t size)
+ FeatureSource(const nnfw::util::feature::Shape &shape, const T *base, const size_t size)
: _shape{shape}, _base{base}, _size{size}
{
// DO NOTHING
public:
void push(::arm_compute::ITensor &tensor) const override
{
- const ::internal::nnapi::feature::Reader<float> from{_shape, _base, _size};
- ::internal::arm_compute::feature::View<float> into{&tensor};
+ // TODO Should replace the Construct parameter of Reader and View from uint8_t * with typename
+ // T.
+ // Inevitably casting must be done.
+ const ::internal::nnapi::feature::Reader<T> from{
+ _shape, reinterpret_cast<const uint8_t *>(_base), _size};
+ ::internal::arm_compute::feature::View<T> into{&tensor};
::nnfw::util::feature::iterate(_shape) << [&](uint32_t ch, uint32_t row, uint32_t col) {
const auto value = from.at(ch, row, col);
private:
const nnfw::util::feature::Shape _shape;
- const uint8_t *const _base;
+ const T *const _base;
const size_t _size;
};
template <typename T> class VectorSource final : public Source
{
public:
- VectorSource(const int32_t vlen, const uint8_t *base, const size_t size)
- : _vlen{vlen}, _base{base}
+ VectorSource(const int32_t vlen, const T *base, const size_t size) : _vlen{vlen}, _base{base}
{
- assert(size >= _vlen * sizeof(float));
+ assert(size >= _vlen * sizeof(T));
}
public:
void push(::arm_compute::ITensor &tensor) const override
{
- auto base = reinterpret_cast<const T *>(_base);
-
for (int32_t n = 0; n < _vlen; ++n)
{
- auto from = base + n;
+ auto from = _base + n;
auto into = reinterpret_cast<T *>(tensor.ptr_to_element(::arm_compute::Coordinates{n}));
*into = *from;
private:
const int32_t _vlen;
- const uint8_t *const _base;
+ const T *const _base;
};
#endif // __INTERNAL_VECTOR_SOURCE_H__