From 7070d99af5fd8c2362d5569d4a370d3620a6bede Mon Sep 17 00:00:00 2001 From: =?utf8?q?=EC=9E=A5=EC=A7=80=EC=84=AD/=EB=8F=99=EC=9E=91=EC=A0=9C?= =?utf8?q?=EC=96=B4Lab=28SR=29/Engineer/=EC=82=BC=EC=84=B1=EC=A0=84?= =?utf8?q?=EC=9E=90?= Date: Tue, 5 Jun 2018 17:42:15 +0900 Subject: [PATCH] Support other type of operand for setting input of execution (#1566) This commit support other type of operand for setting input of execution in pure runtime. - enable class VectorSource to use other type. - enable calss FeatureSource to use other type. Signed-off-by: jiseob.jang --- runtimes/pure_arm_compute/src/execution.cc | 81 ++++++++++++++++------ .../pure_arm_compute/src/internal/FeatureSource.h | 14 ++-- .../pure_arm_compute/src/internal/VectorSource.h | 11 ++- 3 files changed, 71 insertions(+), 35 deletions(-) diff --git a/runtimes/pure_arm_compute/src/execution.cc b/runtimes/pure_arm_compute/src/execution.cc index 1a4442f..239e1e1 100644 --- a/runtimes/pure_arm_compute/src/execution.cc +++ b/runtimes/pure_arm_compute/src/execution.cc @@ -74,6 +74,57 @@ private: const size_t _size; }; +static void asVectorSource(ANeuralNetworksExecution *execution, int32_t type, int32_t index, + int32_t len, const void *buffer, size_t length) +{ + switch (type) + { + case ANEURALNETWORKS_FLOAT32: + case ANEURALNETWORKS_TENSOR_FLOAT32: + execution->source>(index, len, reinterpret_cast(buffer), + length); + break; + case ANEURALNETWORKS_INT32: + case ANEURALNETWORKS_TENSOR_INT32: + execution->source>(index, len, + reinterpret_cast(buffer), length); + break; + case ANEURALNETWORKS_UINT32: + execution->source>(index, len, + reinterpret_cast(buffer), length); + break; + default: + throw std::runtime_error("Not supported, yet"); + break; + } +} + +static void asFeatureSource(ANeuralNetworksExecution *execution, int32_t type, int32_t index, + const nnfw::util::feature::Shape &shape, const void *buffer, + size_t length) +{ + switch (type) + { + case ANEURALNETWORKS_FLOAT32: + case ANEURALNETWORKS_TENSOR_FLOAT32: + execution->source>(index, shape, reinterpret_cast(buffer), + length); + break; + case ANEURALNETWORKS_INT32: + case ANEURALNETWORKS_TENSOR_INT32: + execution->source>(index, shape, + reinterpret_cast(buffer), length); + break; + case ANEURALNETWORKS_UINT32: + execution->source>( + index, shape, reinterpret_cast(buffer), length); + break; + default: + throw std::runtime_error("Not supported, yet"); + break; + } +} + // // NNAPI Implementation // @@ -104,39 +155,23 @@ int ANeuralNetworksExecution_setInput(ANeuralNetworksExecution *execution, int32 if (operands.at(operand_index).shape().rank() == 1) { const auto len = operands.at(operand_index).shape().dim(0); - if (type != nullptr && type->type == OperandCode::ANEURALNETWORKS_INT32) - { - execution->source>(index, len, - reinterpret_cast(buffer), length); - } - else - { - execution->source>(index, len, reinterpret_cast(buffer), - length); - } + + asVectorSource(execution, type->type, index, len, buffer, length); } else if (operands.at(operand_index).shape().rank() == 2) { // TODO check whether the following assert is needed or not // assert(operands.at(operand_index).shape().dim(0) == 1); - int32_t len = 0; - if (operands.at(operand_index).shape().dim(0) == 1) - { - len = operands.at(operand_index).shape().dim(1); - } - else - { - len = operands.at(operand_index).shape().dim(0) * operands.at(operand_index).shape().dim(1); - } - execution->source>(index, len, reinterpret_cast(buffer), - length); + const auto len = + operands.at(operand_index).shape().dim(0) * operands.at(operand_index).shape().dim(1); + + asVectorSource(execution, type->type, index, len, buffer, length); } else if (operands.at(operand_index).shape().rank() == 4) { const auto &operand_shape = operands.at(operand_index).shape().asFeature(); - execution->source(index, operand_shape, - reinterpret_cast(buffer), length); + asFeatureSource(execution, type->type, index, operand_shape, buffer, length); } else { diff --git a/runtimes/pure_arm_compute/src/internal/FeatureSource.h b/runtimes/pure_arm_compute/src/internal/FeatureSource.h index e365fa6..f2479be 100644 --- a/runtimes/pure_arm_compute/src/internal/FeatureSource.h +++ b/runtimes/pure_arm_compute/src/internal/FeatureSource.h @@ -7,10 +7,10 @@ #include "internal/nnapi/feature/Reader.h" #include "internal/arm_compute/feature/View.h" -class FeatureSource final : public Source +template class FeatureSource final : public Source { public: - FeatureSource(const nnfw::util::feature::Shape &shape, const uint8_t *base, const size_t size) + FeatureSource(const nnfw::util::feature::Shape &shape, const T *base, const size_t size) : _shape{shape}, _base{base}, _size{size} { // DO NOTHING @@ -19,8 +19,12 @@ public: public: void push(::arm_compute::ITensor &tensor) const override { - const ::internal::nnapi::feature::Reader from{_shape, _base, _size}; - ::internal::arm_compute::feature::View into{&tensor}; + // TODO Should replace the Construct parameter of Reader and View from uint8_t * with typename + // T. + // Inevitably casting must be done. + const ::internal::nnapi::feature::Reader from{ + _shape, reinterpret_cast(_base), _size}; + ::internal::arm_compute::feature::View into{&tensor}; ::nnfw::util::feature::iterate(_shape) << [&](uint32_t ch, uint32_t row, uint32_t col) { const auto value = from.at(ch, row, col); @@ -30,7 +34,7 @@ public: private: const nnfw::util::feature::Shape _shape; - const uint8_t *const _base; + const T *const _base; const size_t _size; }; diff --git a/runtimes/pure_arm_compute/src/internal/VectorSource.h b/runtimes/pure_arm_compute/src/internal/VectorSource.h index fefd5af..ada408e 100644 --- a/runtimes/pure_arm_compute/src/internal/VectorSource.h +++ b/runtimes/pure_arm_compute/src/internal/VectorSource.h @@ -6,20 +6,17 @@ template class VectorSource final : public Source { public: - VectorSource(const int32_t vlen, const uint8_t *base, const size_t size) - : _vlen{vlen}, _base{base} + VectorSource(const int32_t vlen, const T *base, const size_t size) : _vlen{vlen}, _base{base} { - assert(size >= _vlen * sizeof(float)); + assert(size >= _vlen * sizeof(T)); } public: void push(::arm_compute::ITensor &tensor) const override { - auto base = reinterpret_cast(_base); - for (int32_t n = 0; n < _vlen; ++n) { - auto from = base + n; + auto from = _base + n; auto into = reinterpret_cast(tensor.ptr_to_element(::arm_compute::Coordinates{n})); *into = *from; @@ -28,7 +25,7 @@ public: private: const int32_t _vlen; - const uint8_t *const _base; + const T *const _base; }; #endif // __INTERNAL_VECTOR_SOURCE_H__ -- 2.7.4