#include <arm_compute/core/ITensor.h>
-#include <util/feature/Shape.h>
-#include <util/feature/IndexIterator.h>
-#include <util/matrix/Shape.h>
-#include <util/tensor/Shape.h>
-
-#include "backend/cpu/operand/Tensor.h" // TODO Remove this dependency to backend
-#include "util/feature/nhwc/View.h"
-#include "util/feature/nhwc/Reader.h"
-
namespace neurun
{
namespace exec
{
-struct Sink
+struct ISink
{
- virtual ~Sink() = default;
+ virtual ~ISink() = default;
virtual void pull(::arm_compute::ITensor &tensor) const = 0;
};
-//
-// VectorSink
-//
-template <typename T> class VectorSink final : public Sink
+template <typename T> class Sink final : public ISink
{
public:
- VectorSink(const int32_t vlen, T *base, const size_t size) : _vlen{vlen}, _base{base}, _size{size}
- {
- assert(size == _vlen * sizeof(T));
- }
+ Sink(T *base, const size_t size) : _base{base}, _size{size} {}
public:
void pull(::arm_compute::ITensor &tensor) const override
}
private:
- const int32_t _vlen;
- T *const _base;
- const size_t _size;
-};
-
-//
-// MatrixSink
-//
-template <typename T> class MatrixSink final : public Sink
-{
-public:
- MatrixSink(const int32_t H, const int32_t W, T *base, const size_t size)
- : _height{H}, _width{W}, _base{base}, _size{size}
- {
- assert(size == _height * _width * sizeof(T));
- }
-
-public:
- void pull(::arm_compute::ITensor &tensor) const override
- {
- // TODO: This is just workaround codes, It needs to refactor.
- if (typeid(tensor) == typeid(neurun::backend::cpu::operand::Tensor))
- {
- memcpy(_base, tensor.buffer(), _size);
- }
- else if (typeid(tensor) == typeid(::arm_compute::CLTensor))
- {
- using ::arm_compute::Window;
- using ::arm_compute::Iterator;
-
- Window window;
- window.use_tensor_dimensions(tensor.info()->tensor_shape(), Window::DimY);
-
- Iterator it(&tensor, window);
-
- const auto &y = window[Window::DimY];
- for (auto h = y.start(); h < y.end(); h += y.step(), it.increment(Window::DimY))
- {
- memcpy(_base + h * _width, it.ptr(), _width * sizeof(T));
- }
- }
- }
-
-private:
- const int32_t _height;
- const int32_t _width;
-
-private:
- T *const _base;
- const size_t _size;
-};
-
-//
-// Tensor3DSink
-//
-template <typename T> class Tensor3DSink final : public Sink
-{
-public:
- Tensor3DSink(const nnfw::util::tensor::Shape &shape, T *base, const size_t size)
- : _shape{shape}, _base{base}, _size{size}
- {
- assert(size == _shape.element_nums() * sizeof(T));
- }
-
-public:
- void pull(::arm_compute::ITensor &tensor) const override
- {
- // TODO: This is just workaround codes, It needs to refactor.
- if (typeid(tensor) == typeid(neurun::backend::cpu::operand::Tensor))
- {
- memcpy(_base, tensor.buffer(), _size);
- }
- else if (typeid(tensor) == typeid(::arm_compute::CLTensor))
- {
- using ::arm_compute::Window;
- using ::arm_compute::Iterator;
-
- const int32_t height_width = _shape.dim(1) * _shape.dim(2);
- const int32_t width = _shape.dim(2);
-
- Window window;
- window.use_tensor_dimensions(tensor.info()->tensor_shape(), Window::DimY);
-
- Iterator it(&tensor, window);
-
- const auto &z = window[Window::DimZ];
- const auto &y = window[Window::DimY];
- for (auto c = z.start(); c < z.end(); c += z.step(), it.increment(Window::DimZ))
- {
- for (auto h = y.start(); h < y.end(); h += y.step(), it.increment(Window::DimY))
- {
- memcpy(_base + c * height_width + h * width, it.ptr(), width * sizeof(T));
- }
- }
- }
- }
-
-private:
- const nnfw::util::tensor::Shape _shape;
-
-private:
- T *const _base;
- const size_t _size;
-};
-
-//
-// FeatureSink
-//
-template <typename T> class FeatureSink final : public Sink
-{
-public:
- FeatureSink(const nnfw::util::feature::Shape &shape, T *base, const size_t size)
- : _shape{shape}, _base{base}, _size{size}
- {
- assert(size == _shape.N * _shape.H * _shape.W * _shape.C * sizeof(T));
- }
-
-public:
- void pull(::arm_compute::ITensor &tensor) const override
- {
- // TODO: This is just workaround codes, It needs to refactor.
- if (typeid(tensor) == typeid(neurun::backend::cpu::operand::Tensor))
- {
- memcpy(_base, tensor.buffer(), _size);
- }
- else if (typeid(tensor) == typeid(::arm_compute::CLTensor))
- {
- const util::feature::nchw::View<T> from{&tensor};
- util::feature::nhwc::View<T> into{_shape, _base, _size};
-
- ::nnfw::util::feature::iterate(_shape)
- << [&](uint32_t bat, uint32_t ch, uint32_t row, uint32_t col) {
- const auto value = from.at(bat, ch, row, col);
- into.at(bat, ch, row, col) = value;
- };
- }
- }
-
-private:
- const nnfw::util::feature::Shape _shape;
T *const _base;
const size_t _size;
};
#include <cassert>
-#include <arm_compute/runtime/CL/CLTensor.h>
-
-#include <util/feature/IndexIterator.h>
-#include <util/feature/Shape.h>
-#include <util/matrix/Shape.h>
-#include <util/tensor/Shape.h>
-
-#include "backend/cpu/operand/Tensor.h" // TODO Remove this dependency to backend
-#include "util/feature/nhwc/Reader.h"
-#include "util/feature/nhwc/View.h"
-
-#include "util/feature/nchw/View.h"
+#include <arm_compute/core/ITensor.h>
namespace neurun
{
namespace exec
{
-struct Source
+struct ISource
{
- virtual ~Source() = default;
+ virtual ~ISource() = default;
virtual void push(::arm_compute::ITensor &tensor) const = 0;
};
-//
-// VectorSource
-//
-template <typename T> class VectorSource final : public Source
+template <typename T> class Source final : public ISource
{
public:
- VectorSource(const int32_t vlen, const T *base, const size_t size)
- : _vlen{vlen}, _base{base}, _size{size}
- {
- assert(size == _vlen * sizeof(T));
- }
+ Source(const T *base, const size_t size) : _base{base}, _size{size} {}
public:
void push(::arm_compute::ITensor &tensor) const override
}
private:
- const int32_t _vlen;
- const T *const _base;
- const size_t _size;
-};
-
-//
-// MatrixSource
-//
-template <typename T> class MatrixSource final : public Source
-{
-public:
- MatrixSource(const nnfw::util::matrix::Shape &shape, const T *base, const size_t size)
- : _shape{shape}, _base{base}, _size{size}
- {
- assert(size == _shape.H * _shape.W * sizeof(T));
- }
-
-public:
- void push(::arm_compute::ITensor &tensor) const override
- {
- // TODO: This is just workaround codes, It needs to refactor.
- if (typeid(tensor) == typeid(neurun::backend::cpu::operand::Tensor))
- {
- memcpy(tensor.buffer(), _base, _size);
- }
- else if (typeid(tensor) == typeid(::arm_compute::CLTensor))
- {
- using ::arm_compute::Window;
- using ::arm_compute::Iterator;
-
- Window window;
- window.use_tensor_dimensions(tensor.info()->tensor_shape(), Window::DimY);
-
- Iterator it(&tensor, window);
-
- const auto &y = window[Window::DimY];
- for (auto h = y.start(); h < y.end(); h += y.step(), it.increment(Window::DimY))
- {
- memcpy(it.ptr(), _base + h * _shape.W, _shape.W * sizeof(T));
- }
- }
- }
-
-private:
- const nnfw::util::matrix::Shape _shape;
- const T *const _base;
- const size_t _size;
-};
-
-//
-// Tensor3DSource
-//
-template <typename T> class Tensor3DSource final : public Source
-{
-public:
- Tensor3DSource(const nnfw::util::tensor::Shape &shape, const T *base, const size_t size)
- : _shape{shape}, _base{base}, _size{size}
- {
- assert(size == _shape.element_nums() * sizeof(T));
- }
-
-public:
- void push(::arm_compute::ITensor &tensor) const override
- {
- // TODO: This is just workaround codes, It needs to refactor.
- if (typeid(tensor) == typeid(neurun::backend::cpu::operand::Tensor))
- {
- memcpy(tensor.buffer(), _base, _size);
- }
- else if (typeid(tensor) == typeid(::arm_compute::CLTensor))
- {
- using ::arm_compute::Window;
- using ::arm_compute::Iterator;
-
- const int32_t height_width = _shape.dim(1) * _shape.dim(2);
- const int32_t width = _shape.dim(2);
-
- Window window;
- window.use_tensor_dimensions(tensor.info()->tensor_shape(), Window::DimY);
-
- Iterator it(&tensor, window);
-
- const auto &z = window[Window::DimZ];
- const auto &y = window[Window::DimY];
- for (auto c = z.start(); c < z.end(); c += z.step(), it.increment(Window::DimZ))
- {
- for (auto h = y.start(); h < y.end(); h += y.step(), it.increment(Window::DimY))
- {
- memcpy(it.ptr(), _base + c * height_width + h * width, width * sizeof(T));
- }
- }
- }
- }
-
-private:
- const nnfw::util::tensor::Shape _shape;
-
-private:
- const T *const _base;
- const size_t _size;
-};
-
-//
-// FeatureSource
-//
-template <typename T> class FeatureSource final : public Source
-{
-public:
- FeatureSource(const nnfw::util::feature::Shape &shape, const T *base, const size_t size)
- : _shape{shape}, _base{base}, _size{size}
- {
- assert(_size == _shape.N * _shape.H * _shape.W * _shape.C * sizeof(T));
- }
-
-public:
- void push(::arm_compute::ITensor &tensor) const override
- {
- // TODO: This is just workaround codes, It needs to refactor.
- if (typeid(tensor) == typeid(neurun::backend::cpu::operand::Tensor))
- {
- memcpy(tensor.buffer(), _base, _size);
- }
- else if (typeid(tensor) == typeid(::arm_compute::CLTensor))
- {
- const util::feature::nhwc::Reader<T> from{_shape, _base, _size};
- util::feature::nchw::View<T> into{&tensor};
-
- ::nnfw::util::feature::iterate(_shape)
- << [&](uint32_t bat, uint32_t ch, uint32_t row, uint32_t col) {
- const auto value = from.at(bat, ch, row, col);
- into.at(bat, ch, row, col) = value;
- };
- }
- }
-
-private:
- const nnfw::util::feature::Shape _shape;
const T *const _base;
const size_t _size;
};
#include "graph/operand/DataType.h"
#include "graph/operand/Index.h"
-inline void sourceAsVector(ANeuralNetworksExecution *execution,
- const ::neurun::graph::operand::DataType &type, int32_t index,
- int32_t len, const void *buffer, size_t length)
+inline void source(ANeuralNetworksExecution *execution,
+ const ::neurun::graph::operand::DataType &type, int32_t index,
+ const void *buffer, size_t length)
{
using ::neurun::graph::operand::DataType;
switch (type)
{
case DataType::SCALAR_FLOAT32:
case DataType::TENSOR_FLOAT32:
- execution->source<::neurun::exec::VectorSource<float>>(
- index, len, reinterpret_cast<const float *>(buffer), length);
+ execution->source<::neurun::exec::Source<float>>(
+ index, reinterpret_cast<const float *>(buffer), length);
break;
case DataType::SCALAR_INT32:
case DataType::TENSOR_INT32:
- execution->source<::neurun::exec::VectorSource<int32_t>>(
- index, len, reinterpret_cast<const int32_t *>(buffer), length);
+ execution->source<::neurun::exec::Source<int32_t>>(
+ index, reinterpret_cast<const int32_t *>(buffer), length);
break;
case DataType::SCALAR_UINT32:
- execution->source<::neurun::exec::VectorSource<uint32_t>>(
- index, len, reinterpret_cast<const uint32_t *>(buffer), length);
+ execution->source<::neurun::exec::Source<uint32_t>>(
+ index, reinterpret_cast<const uint32_t *>(buffer), length);
break;
case DataType::TENSOR_QUANT8_ASYMM:
- execution->source<::neurun::exec::VectorSource<uint8_t>>(
- index, len, reinterpret_cast<const uint8_t *>(buffer), length);
+ execution->source<::neurun::exec::Source<uint8_t>>(
+ index, reinterpret_cast<const uint8_t *>(buffer), length);
break;
default:
throw std::runtime_error("Not supported, yet");
}
}
-inline void sourceAsMatrix(ANeuralNetworksExecution *execution,
- const ::neurun::graph::operand::DataType &type, int32_t index,
- const nnfw::util::matrix::Shape &shape, const void *buffer,
- size_t length)
+inline void sink(ANeuralNetworksExecution *execution,
+ const ::neurun::graph::operand::DataType &type, int32_t index, void *buffer,
+ size_t length)
{
using ::neurun::graph::operand::DataType;
switch (type)
{
case DataType::SCALAR_FLOAT32:
case DataType::TENSOR_FLOAT32:
- execution->source<::neurun::exec::MatrixSource<float>>(
- index, shape, reinterpret_cast<const float *>(buffer), length);
+ execution->sink<::neurun::exec::Sink<float>>(index, reinterpret_cast<float *>(buffer),
+ length);
break;
case DataType::SCALAR_INT32:
case DataType::TENSOR_INT32:
- execution->source<::neurun::exec::MatrixSource<int32_t>>(
- index, shape, reinterpret_cast<const int32_t *>(buffer), length);
+ execution->sink<::neurun::exec::Sink<int32_t>>(index, reinterpret_cast<int32_t *>(buffer),
+ length);
break;
case DataType::SCALAR_UINT32:
- execution->source<::neurun::exec::MatrixSource<uint32_t>>(
- index, shape, reinterpret_cast<const uint32_t *>(buffer), length);
+ execution->sink<::neurun::exec::Sink<uint32_t>>(index, reinterpret_cast<uint32_t *>(buffer),
+ length);
break;
case DataType::TENSOR_QUANT8_ASYMM:
- execution->source<::neurun::exec::MatrixSource<uint8_t>>(
- index, shape, reinterpret_cast<const uint8_t *>(buffer), length);
- break;
- default:
- throw std::runtime_error("Not supported, yet");
- break;
- }
-}
-
-inline void sourceAsTensor3D(ANeuralNetworksExecution *execution,
- const ::neurun::graph::operand::DataType &type, int32_t index,
- const nnfw::util::tensor::Shape &shape, const void *buffer,
- size_t length)
-{
- assert(shape.rank() == 3);
-
- using ::neurun::graph::operand::DataType;
- switch (type)
- {
- case DataType::SCALAR_FLOAT32:
- case DataType::TENSOR_FLOAT32:
- execution->source<::neurun::exec::Tensor3DSource<float>>(
- index, shape, reinterpret_cast<const float *>(buffer), length);
- break;
- case DataType::SCALAR_INT32:
- case DataType::TENSOR_INT32:
- execution->source<::neurun::exec::Tensor3DSource<int32_t>>(
- index, shape, reinterpret_cast<const int32_t *>(buffer), length);
- break;
- case DataType::SCALAR_UINT32:
- execution->source<::neurun::exec::Tensor3DSource<uint32_t>>(
- index, shape, reinterpret_cast<const uint32_t *>(buffer), length);
- break;
- case DataType::TENSOR_QUANT8_ASYMM:
- execution->source<::neurun::exec::Tensor3DSource<uint8_t>>(
- index, shape, reinterpret_cast<const uint8_t *>(buffer), length);
- break;
- default:
- throw std::runtime_error("Not supported, yet");
- break;
- }
-}
-
-inline void sourceAsFeature(ANeuralNetworksExecution *execution,
- const ::neurun::graph::operand::DataType &type, int32_t index,
- const nnfw::util::feature::Shape &shape, const void *buffer,
- size_t length)
-{
- using ::neurun::graph::operand::DataType;
- switch (type)
- {
- case DataType::SCALAR_FLOAT32:
- case DataType::TENSOR_FLOAT32:
- execution->source<::neurun::exec::FeatureSource<float>>(
- index, shape, reinterpret_cast<const float *>(buffer), length);
- break;
- case DataType::SCALAR_INT32:
- case DataType::TENSOR_INT32:
- execution->source<::neurun::exec::FeatureSource<int32_t>>(
- index, shape, reinterpret_cast<const int32_t *>(buffer), length);
- break;
- case DataType::SCALAR_UINT32:
- execution->source<::neurun::exec::FeatureSource<uint32_t>>(
- index, shape, reinterpret_cast<const uint32_t *>(buffer), length);
- break;
- case DataType::TENSOR_QUANT8_ASYMM:
- execution->source<::neurun::exec::FeatureSource<uint8_t>>(
- index, shape, reinterpret_cast<const uint8_t *>(buffer), length);
- break;
- default:
- throw std::runtime_error("Not supported, yet");
- break;
- }
-}
-
-inline void sinkAsVector(ANeuralNetworksExecution *execution,
- const ::neurun::graph::operand::DataType &type, int32_t index, int32_t len,
- void *buffer, size_t length)
-{
- using ::neurun::graph::operand::DataType;
- switch (type)
- {
- case DataType::SCALAR_FLOAT32:
- case DataType::TENSOR_FLOAT32:
- execution->sink<::neurun::exec::VectorSink<float>>(index, len,
- reinterpret_cast<float *>(buffer), length);
- break;
- case DataType::SCALAR_INT32:
- case DataType::TENSOR_INT32:
- execution->sink<::neurun::exec::VectorSink<int32_t>>(
- index, len, reinterpret_cast<int32_t *>(buffer), length);
- break;
- case DataType::SCALAR_UINT32:
- execution->sink<::neurun::exec::VectorSink<uint32_t>>(
- index, len, reinterpret_cast<uint32_t *>(buffer), length);
- break;
- case DataType::TENSOR_QUANT8_ASYMM:
- execution->sink<::neurun::exec::VectorSink<uint8_t>>(
- index, len, reinterpret_cast<uint8_t *>(buffer), length);
- break;
- default:
- throw std::runtime_error("Not supported, yet");
- break;
- }
-}
-
-inline void sinkAsMatrix(ANeuralNetworksExecution *execution,
- const ::neurun::graph::operand::DataType &type, int32_t index,
- const nnfw::util::matrix::Shape &shape, void *buffer, size_t length)
-{
- using ::neurun::graph::operand::DataType;
- switch (type)
- {
- case DataType::SCALAR_FLOAT32:
- case DataType::TENSOR_FLOAT32:
- execution->sink<::neurun::exec::MatrixSink<float>>(index, shape.H, shape.W,
- reinterpret_cast<float *>(buffer), length);
- break;
- case DataType::SCALAR_INT32:
- case DataType::TENSOR_INT32:
- execution->sink<::neurun::exec::MatrixSink<int32_t>>(
- index, shape.H, shape.W, reinterpret_cast<int32_t *>(buffer), length);
- break;
- case DataType::SCALAR_UINT32:
- execution->sink<::neurun::exec::MatrixSink<uint32_t>>(
- index, shape.H, shape.W, reinterpret_cast<uint32_t *>(buffer), length);
- break;
- case DataType::TENSOR_QUANT8_ASYMM:
- execution->sink<::neurun::exec::MatrixSink<uint8_t>>(
- index, shape.H, shape.W, reinterpret_cast<uint8_t *>(buffer), length);
- break;
- default:
- throw std::runtime_error("Not supported, yet");
- break;
- }
-}
-
-inline void sinkAsTensor3D(ANeuralNetworksExecution *execution,
- const ::neurun::graph::operand::DataType &type, int32_t index,
- const nnfw::util::tensor::Shape &shape, void *buffer, size_t length)
-{
- assert(shape.rank() == 3);
-
- using ::neurun::graph::operand::DataType;
- switch (type)
- {
- case DataType::SCALAR_FLOAT32:
- case DataType::TENSOR_FLOAT32:
- execution->sink<::neurun::exec::Tensor3DSink<float>>(
- index, shape, reinterpret_cast<float *>(buffer), length);
- break;
- case DataType::SCALAR_INT32:
- case DataType::TENSOR_INT32:
- execution->sink<::neurun::exec::Tensor3DSink<int32_t>>(
- index, shape, reinterpret_cast<int32_t *>(buffer), length);
- break;
- case DataType::SCALAR_UINT32:
- execution->sink<::neurun::exec::Tensor3DSink<uint32_t>>(
- index, shape, reinterpret_cast<uint32_t *>(buffer), length);
- break;
- case DataType::TENSOR_QUANT8_ASYMM:
- execution->sink<::neurun::exec::Tensor3DSink<uint8_t>>(
- index, shape, reinterpret_cast<uint8_t *>(buffer), length);
- break;
- default:
- throw std::runtime_error("Not supported, yet");
- break;
- }
-}
-
-inline void sinkAsFeature(ANeuralNetworksExecution *execution,
- const ::neurun::graph::operand::DataType &type, int32_t index,
- const nnfw::util::feature::Shape &shape, void *buffer, size_t length)
-{
- using ::neurun::graph::operand::DataType;
- switch (type)
- {
- case DataType::SCALAR_FLOAT32:
- case DataType::TENSOR_FLOAT32:
- execution->sink<::neurun::exec::FeatureSink<float>>(
- index, shape, reinterpret_cast<float *>(buffer), length);
- break;
- case DataType::SCALAR_INT32:
- case DataType::TENSOR_INT32:
- execution->sink<::neurun::exec::FeatureSink<int32_t>>(
- index, shape, reinterpret_cast<int32_t *>(buffer), length);
- break;
- case DataType::SCALAR_UINT32:
- execution->sink<::neurun::exec::FeatureSink<uint32_t>>(
- index, shape, reinterpret_cast<uint32_t *>(buffer), length);
- break;
- case DataType::TENSOR_QUANT8_ASYMM:
- execution->sink<::neurun::exec::FeatureSink<uint8_t>>(
- index, shape, reinterpret_cast<uint8_t *>(buffer), length);
+ execution->sink<::neurun::exec::Sink<uint8_t>>(index, reinterpret_cast<uint8_t *>(buffer),
+ length);
break;
default:
throw std::runtime_error("Not supported, yet");
const auto data_type = operands.at(operand_index).typeInfo().type();
const auto operand_shape = operands.at(operand_index).shape();
- if (operand_shape.rank() == 1)
- {
- const auto len = operand_shape.dim(0);
-
- sourceAsVector(execution, data_type, index, len, buffer, length);
- }
- else if (operand_shape.rank() == 2)
- {
- const auto &input_shape = operand_shape.asMatrix();
-
- sourceAsMatrix(execution, data_type, index, input_shape, buffer, length);
- }
- else if (operand_shape.rank() == 3)
- {
- const auto &input_shape = operand_shape.asTensor();
-
- sourceAsTensor3D(execution, data_type, index, input_shape, buffer, length);
- }
- else if (operand_shape.rank() == 4)
- {
- const auto &input_shape = operand_shape.asFeature();
-
- sourceAsFeature(execution, data_type, index, input_shape, buffer, length);
- }
- else
- {
- throw std::runtime_error{"Not supported, yet"};
- }
+ source(execution, data_type, index, buffer, length);
return ANEURALNETWORKS_NO_ERROR;
}
const auto data_type = operands.at(operand_index).typeInfo().type();
const auto operand_shape = operands.at(operand_index).shape();
- if (operand_shape.rank() == 1)
- {
- const auto len = operand_shape.dim(0);
-
- sinkAsVector(execution, data_type, index, len, buffer, length);
- }
- else if (operand_shape.rank() == 2)
- {
- const auto &output_shape = operand_shape.asMatrix();
-
- sinkAsMatrix(execution, data_type, index, output_shape, buffer, length);
- }
- else if (operand_shape.rank() == 3)
- {
- const auto &output_shape = operand_shape.asTensor();
-
- sinkAsTensor3D(execution, data_type, index, output_shape, buffer, length);
- }
- else if (operand_shape.rank() == 4)
- {
- const auto &output_shape = operand_shape.asFeature();
-
- sinkAsFeature(execution, data_type, index, output_shape, buffer, length);
- }
- else
- {
- throw std::runtime_error{"Not supported, yet"};
- }
+ sink(execution, data_type, index, buffer, length);
return ANEURALNETWORKS_NO_ERROR;
}