#include <util/feature/Shape.h>
#include <util/feature/IndexIterator.h>
+#include <util/matrix/Shape.h>
+#include <util/tensor/Shape.h>
#include "backend/cpu/operand/Tensor.h" // TODO Remove this dependency to backend
#include "internal/nnapi/feature/View.h"
};
//
+// MatrixSink
+//
+template <typename T> class MatrixSink final : public Sink
+{
+public:
+ MatrixSink(const int32_t H, const int32_t W, T *base, const size_t size)
+ : _height{H}, _width{W}, _base{base}, _size{size}
+ {
+ assert(size == _height * _width * sizeof(T));
+ }
+
+public:
+ void pull(::arm_compute::ITensor &tensor) const override
+ {
+ // TODO: This is just workaround codes, It needs to refactor.
+ if (typeid(tensor) == typeid(neurun::backend::cpu::operand::Tensor))
+ {
+ memcpy(_base, tensor.buffer(), _size);
+ }
+ else if (typeid(tensor) == typeid(::arm_compute::CLTensor))
+ {
+ assert(tensor.info()->dimension(0) == _width);
+ assert(tensor.info()->dimension(1) == _height);
+
+ using ::arm_compute::Window;
+ using ::arm_compute::Iterator;
+ using ::arm_compute::Coordinates;
+ using ::arm_compute::execute_window_loop;
+
+ Window window;
+
+ window.use_tensor_dimensions(tensor.info()->tensor_shape(), ::arm_compute::Window::DimY);
+
+ Iterator it(&tensor, window);
+ execute_window_loop(window,
+ [&](const Coordinates &id) {
+ const auto row = id.y();
+ memcpy(_base + row * _width, it.ptr(), _width * sizeof(T));
+ },
+ it);
+ }
+ }
+
+private:
+ const int32_t _height;
+ const int32_t _width;
+
+private:
+ T *const _base;
+ const size_t _size;
+};
+
+//
+// Tensor3DSink
+//
+template <typename T> class Tensor3DSink final : public Sink
+{
+public:
+ Tensor3DSink(const nnfw::util::tensor::Shape &shape, T *base, const size_t size)
+ : _shape{shape}, _base{base}, _size{size}
+ {
+ assert(size == _shape.element_nums() * sizeof(T));
+ }
+
+public:
+ void pull(::arm_compute::ITensor &tensor) const override
+ {
+ // TODO: This is just workaround codes, It needs to refactor.
+ if (typeid(tensor) == typeid(neurun::backend::cpu::operand::Tensor))
+ {
+ memcpy(_base, tensor.buffer(), _size);
+ }
+ else if (typeid(tensor) == typeid(::arm_compute::CLTensor))
+ {
+ using ::arm_compute::Window;
+ using ::arm_compute::Iterator;
+ using ::arm_compute::Coordinates;
+ using ::arm_compute::execute_window_loop;
+
+ Window window;
+
+ window.use_tensor_dimensions(tensor.info()->tensor_shape(), ::arm_compute::Window::DimY);
+ int32_t height_width = _shape.dim(1) * _shape.dim(2);
+ int32_t width = _shape.dim(2);
+
+ Iterator it(&tensor, window);
+ execute_window_loop(window,
+ [&](const Coordinates &id) {
+ const auto z = id.z();
+ const auto y = id.y();
+ memcpy(_base + z * height_width + y * width, it.ptr(),
+ width * sizeof(T));
+ },
+ it);
+ }
+ }
+
+private:
+ const nnfw::util::tensor::Shape _shape;
+
+private:
+ T *const _base;
+ const size_t _size;
+};
+
+//
// FeatureSink
//
class FeatureSink final : public Sink
#include <util/feature/Shape.h>
#include <util/feature/IndexIterator.h>
+#include <util/matrix/Shape.h>
+#include <util/tensor/Shape.h>
#include "backend/cpu/operand/Tensor.h" // TODO Remove this dependency to backend
#include "internal/nnapi/feature/Reader.h"
};
//
+// MatrixSource
+//
+template <typename T> class MatrixSource final : public Source
+{
+public:
+ MatrixSource(const nnfw::util::matrix::Shape &shape, const T *base, const size_t size)
+ : _shape{shape}, _base{base}, _size{size}
+ {
+ assert(size == _shape.H * _shape.W * sizeof(T));
+ }
+
+public:
+ void push(::arm_compute::ITensor &tensor) const override
+ {
+ // TODO: This is just workaround codes, It needs to refactor.
+ if (typeid(tensor) == typeid(neurun::backend::cpu::operand::Tensor))
+ {
+ memcpy(tensor.buffer(), _base, _size);
+ }
+ else if (typeid(tensor) == typeid(::arm_compute::CLTensor))
+ {
+ using ::arm_compute::Window;
+ using ::arm_compute::Iterator;
+ using ::arm_compute::Coordinates;
+ using ::arm_compute::execute_window_loop;
+
+ Window window;
+ window.use_tensor_dimensions(tensor.info()->tensor_shape(), ::arm_compute::Window::DimY);
+
+ int32_t width = _shape.W;
+
+ Iterator it(&tensor, window);
+ execute_window_loop(window,
+ [&](const Coordinates &id) {
+ const auto height = id.y();
+ memcpy(it.ptr(), _base + height * width, width * sizeof(T));
+ },
+ it);
+ }
+ }
+
+private:
+ const nnfw::util::matrix::Shape _shape;
+ const T *const _base;
+ const size_t _size;
+};
+
+//
+// Tensor3DSource
+//
+template <typename T> class Tensor3DSource final : public Source
+{
+public:
+ Tensor3DSource(const nnfw::util::tensor::Shape &shape, const T *base, const size_t size)
+ : _shape{shape}, _base{base}, _size{size}
+ {
+ assert(size == _shape.element_nums() * sizeof(T));
+ }
+
+public:
+ void push(::arm_compute::ITensor &tensor) const override
+ {
+ // TODO: This is just workaround codes, It needs to refactor.
+ if (typeid(tensor) == typeid(neurun::backend::cpu::operand::Tensor))
+ {
+ memcpy(tensor.buffer(), _base, _size);
+ }
+ else if (typeid(tensor) == typeid(::arm_compute::CLTensor))
+ {
+ using ::arm_compute::Window;
+ using ::arm_compute::Iterator;
+ using ::arm_compute::Coordinates;
+ using ::arm_compute::execute_window_loop;
+
+ Window window;
+
+ window.use_tensor_dimensions(tensor.info()->tensor_shape(), ::arm_compute::Window::DimY);
+ int32_t height_width = _shape.dim(1) * _shape.dim(2);
+ int32_t width = _shape.dim(2);
+
+ Iterator it(&tensor, window);
+ execute_window_loop(window,
+ [&](const Coordinates &id) {
+ const auto z = id.z();
+ const auto y = id.y();
+ memcpy(it.ptr(), _base + z * height_width + y * width,
+ width * sizeof(T));
+ },
+ it);
+ }
+ }
+
+private:
+ const nnfw::util::tensor::Shape _shape;
+
+private:
+ const T *const _base;
+ const size_t _size;
+};
+
+//
// FeatureSource
//
class FeatureSource final : public Source