[neurun] Remove Source/Sink codes divided to dimensions (#3213)
author김수진/동작제어Lab(SR)/Engineer/삼성전자 <sjsujin.kim@samsung.com>
Wed, 17 Oct 2018 06:01:53 +0000 (15:01 +0900)
committer오형석/동작제어Lab(SR)/Staff Engineer/삼성전자 <hseok82.oh@samsung.com>
Wed, 17 Oct 2018 06:01:53 +0000 (15:01 +0900)
Related : #2874

Part of : #3178

Because #3208 and #3187 are merged, we can remove `Source`/`Sink` codes divided to dimensions.

Signed-off-by: sjsujinkim <sjsujin.kim@samsung.com>
runtimes/neurun/src/exec/Sink.h
runtimes/neurun/src/exec/Source.h
runtimes/neurun/src/frontend/execution.cc
runtimes/neurun/src/frontend/wrapper/execution.h

index 5837e8f..3cc7a05 100644 (file)
 
 #include <arm_compute/core/ITensor.h>
 
-#include <util/feature/Shape.h>
-#include <util/feature/IndexIterator.h>
-#include <util/matrix/Shape.h>
-#include <util/tensor/Shape.h>
-
-#include "backend/cpu/operand/Tensor.h" // TODO Remove this dependency to backend
-#include "util/feature/nhwc/View.h"
-#include "util/feature/nhwc/Reader.h"
-
 namespace neurun
 {
 namespace exec
 {
 
-struct Sink
+struct ISink
 {
-  virtual ~Sink() = default;
+  virtual ~ISink() = default;
 
   virtual void pull(::arm_compute::ITensor &tensor) const = 0;
 };
 
-//
-// VectorSink
-//
-template <typename T> class VectorSink final : public Sink
+template <typename T> class Sink final : public ISink
 {
 public:
-  VectorSink(const int32_t vlen, T *base, const size_t size) : _vlen{vlen}, _base{base}, _size{size}
-  {
-    assert(size == _vlen * sizeof(T));
-  }
+  Sink(T *base, const size_t size) : _base{base}, _size{size} {}
 
 public:
   void pull(::arm_compute::ITensor &tensor) const override
@@ -60,146 +45,6 @@ public:
   }
 
 private:
-  const int32_t _vlen;
-  T *const _base;
-  const size_t _size;
-};
-
-//
-// MatrixSink
-//
-template <typename T> class MatrixSink final : public Sink
-{
-public:
-  MatrixSink(const int32_t H, const int32_t W, T *base, const size_t size)
-      : _height{H}, _width{W}, _base{base}, _size{size}
-  {
-    assert(size == _height * _width * sizeof(T));
-  }
-
-public:
-  void pull(::arm_compute::ITensor &tensor) const override
-  {
-    // TODO: This is just workaround codes, It needs to refactor.
-    if (typeid(tensor) == typeid(neurun::backend::cpu::operand::Tensor))
-    {
-      memcpy(_base, tensor.buffer(), _size);
-    }
-    else if (typeid(tensor) == typeid(::arm_compute::CLTensor))
-    {
-      using ::arm_compute::Window;
-      using ::arm_compute::Iterator;
-
-      Window window;
-      window.use_tensor_dimensions(tensor.info()->tensor_shape(), Window::DimY);
-
-      Iterator it(&tensor, window);
-
-      const auto &y = window[Window::DimY];
-      for (auto h = y.start(); h < y.end(); h += y.step(), it.increment(Window::DimY))
-      {
-        memcpy(_base + h * _width, it.ptr(), _width * sizeof(T));
-      }
-    }
-  }
-
-private:
-  const int32_t _height;
-  const int32_t _width;
-
-private:
-  T *const _base;
-  const size_t _size;
-};
-
-//
-// Tensor3DSink
-//
-template <typename T> class Tensor3DSink final : public Sink
-{
-public:
-  Tensor3DSink(const nnfw::util::tensor::Shape &shape, T *base, const size_t size)
-      : _shape{shape}, _base{base}, _size{size}
-  {
-    assert(size == _shape.element_nums() * sizeof(T));
-  }
-
-public:
-  void pull(::arm_compute::ITensor &tensor) const override
-  {
-    // TODO: This is just workaround codes, It needs to refactor.
-    if (typeid(tensor) == typeid(neurun::backend::cpu::operand::Tensor))
-    {
-      memcpy(_base, tensor.buffer(), _size);
-    }
-    else if (typeid(tensor) == typeid(::arm_compute::CLTensor))
-    {
-      using ::arm_compute::Window;
-      using ::arm_compute::Iterator;
-
-      const int32_t height_width = _shape.dim(1) * _shape.dim(2);
-      const int32_t width = _shape.dim(2);
-
-      Window window;
-      window.use_tensor_dimensions(tensor.info()->tensor_shape(), Window::DimY);
-
-      Iterator it(&tensor, window);
-
-      const auto &z = window[Window::DimZ];
-      const auto &y = window[Window::DimY];
-      for (auto c = z.start(); c < z.end(); c += z.step(), it.increment(Window::DimZ))
-      {
-        for (auto h = y.start(); h < y.end(); h += y.step(), it.increment(Window::DimY))
-        {
-          memcpy(_base + c * height_width + h * width, it.ptr(), width * sizeof(T));
-        }
-      }
-    }
-  }
-
-private:
-  const nnfw::util::tensor::Shape _shape;
-
-private:
-  T *const _base;
-  const size_t _size;
-};
-
-//
-// FeatureSink
-//
-template <typename T> class FeatureSink final : public Sink
-{
-public:
-  FeatureSink(const nnfw::util::feature::Shape &shape, T *base, const size_t size)
-      : _shape{shape}, _base{base}, _size{size}
-  {
-    assert(size == _shape.N * _shape.H * _shape.W * _shape.C * sizeof(T));
-  }
-
-public:
-  void pull(::arm_compute::ITensor &tensor) const override
-  {
-    // TODO: This is just workaround codes, It needs to refactor.
-    if (typeid(tensor) == typeid(neurun::backend::cpu::operand::Tensor))
-    {
-      memcpy(_base, tensor.buffer(), _size);
-    }
-    else if (typeid(tensor) == typeid(::arm_compute::CLTensor))
-    {
-      const util::feature::nchw::View<T> from{&tensor};
-      util::feature::nhwc::View<T> into{_shape, _base, _size};
-
-      ::nnfw::util::feature::iterate(_shape)
-          << [&](uint32_t bat, uint32_t ch, uint32_t row, uint32_t col) {
-               const auto value = from.at(bat, ch, row, col);
-               into.at(bat, ch, row, col) = value;
-             };
-    }
-  }
-
-private:
-  const nnfw::util::feature::Shape _shape;
   T *const _base;
   const size_t _size;
 };
index b071921..47d1c73 100644 (file)
 
 #include <cassert>
 
-#include <arm_compute/runtime/CL/CLTensor.h>
-
-#include <util/feature/IndexIterator.h>
-#include <util/feature/Shape.h>
-#include <util/matrix/Shape.h>
-#include <util/tensor/Shape.h>
-
-#include "backend/cpu/operand/Tensor.h" // TODO Remove this dependency to backend
-#include "util/feature/nhwc/Reader.h"
-#include "util/feature/nhwc/View.h"
-
-#include "util/feature/nchw/View.h"
+#include <arm_compute/core/ITensor.h>
 
 namespace neurun
 {
 namespace exec
 {
 
-struct Source
+struct ISource
 {
-  virtual ~Source() = default;
+  virtual ~ISource() = default;
 
   virtual void push(::arm_compute::ITensor &tensor) const = 0;
 };
 
-//
-// VectorSource
-//
-template <typename T> class VectorSource final : public Source
+template <typename T> class Source final : public ISource
 {
 public:
-  VectorSource(const int32_t vlen, const T *base, const size_t size)
-      : _vlen{vlen}, _base{base}, _size{size}
-  {
-    assert(size == _vlen * sizeof(T));
-  }
+  Source(const T *base, const size_t size) : _base{base}, _size{size} {}
 
 public:
   void push(::arm_compute::ITensor &tensor) const override
@@ -63,143 +45,6 @@ public:
   }
 
 private:
-  const int32_t _vlen;
-  const T *const _base;
-  const size_t _size;
-};
-
-//
-// MatrixSource
-//
-template <typename T> class MatrixSource final : public Source
-{
-public:
-  MatrixSource(const nnfw::util::matrix::Shape &shape, const T *base, const size_t size)
-      : _shape{shape}, _base{base}, _size{size}
-  {
-    assert(size == _shape.H * _shape.W * sizeof(T));
-  }
-
-public:
-  void push(::arm_compute::ITensor &tensor) const override
-  {
-    // TODO: This is just workaround codes, It needs to refactor.
-    if (typeid(tensor) == typeid(neurun::backend::cpu::operand::Tensor))
-    {
-      memcpy(tensor.buffer(), _base, _size);
-    }
-    else if (typeid(tensor) == typeid(::arm_compute::CLTensor))
-    {
-      using ::arm_compute::Window;
-      using ::arm_compute::Iterator;
-
-      Window window;
-      window.use_tensor_dimensions(tensor.info()->tensor_shape(), Window::DimY);
-
-      Iterator it(&tensor, window);
-
-      const auto &y = window[Window::DimY];
-      for (auto h = y.start(); h < y.end(); h += y.step(), it.increment(Window::DimY))
-      {
-        memcpy(it.ptr(), _base + h * _shape.W, _shape.W * sizeof(T));
-      }
-    }
-  }
-
-private:
-  const nnfw::util::matrix::Shape _shape;
-  const T *const _base;
-  const size_t _size;
-};
-
-//
-// Tensor3DSource
-//
-template <typename T> class Tensor3DSource final : public Source
-{
-public:
-  Tensor3DSource(const nnfw::util::tensor::Shape &shape, const T *base, const size_t size)
-      : _shape{shape}, _base{base}, _size{size}
-  {
-    assert(size == _shape.element_nums() * sizeof(T));
-  }
-
-public:
-  void push(::arm_compute::ITensor &tensor) const override
-  {
-    // TODO: This is just workaround codes, It needs to refactor.
-    if (typeid(tensor) == typeid(neurun::backend::cpu::operand::Tensor))
-    {
-      memcpy(tensor.buffer(), _base, _size);
-    }
-    else if (typeid(tensor) == typeid(::arm_compute::CLTensor))
-    {
-      using ::arm_compute::Window;
-      using ::arm_compute::Iterator;
-
-      const int32_t height_width = _shape.dim(1) * _shape.dim(2);
-      const int32_t width = _shape.dim(2);
-
-      Window window;
-      window.use_tensor_dimensions(tensor.info()->tensor_shape(), Window::DimY);
-
-      Iterator it(&tensor, window);
-
-      const auto &z = window[Window::DimZ];
-      const auto &y = window[Window::DimY];
-      for (auto c = z.start(); c < z.end(); c += z.step(), it.increment(Window::DimZ))
-      {
-        for (auto h = y.start(); h < y.end(); h += y.step(), it.increment(Window::DimY))
-        {
-          memcpy(it.ptr(), _base + c * height_width + h * width, width * sizeof(T));
-        }
-      }
-    }
-  }
-
-private:
-  const nnfw::util::tensor::Shape _shape;
-
-private:
-  const T *const _base;
-  const size_t _size;
-};
-
-//
-// FeatureSource
-//
-template <typename T> class FeatureSource final : public Source
-{
-public:
-  FeatureSource(const nnfw::util::feature::Shape &shape, const T *base, const size_t size)
-      : _shape{shape}, _base{base}, _size{size}
-  {
-    assert(_size == _shape.N * _shape.H * _shape.W * _shape.C * sizeof(T));
-  }
-
-public:
-  void push(::arm_compute::ITensor &tensor) const override
-  {
-    // TODO: This is just workaround codes, It needs to refactor.
-    if (typeid(tensor) == typeid(neurun::backend::cpu::operand::Tensor))
-    {
-      memcpy(tensor.buffer(), _base, _size);
-    }
-    else if (typeid(tensor) == typeid(::arm_compute::CLTensor))
-    {
-      const util::feature::nhwc::Reader<T> from{_shape, _base, _size};
-      util::feature::nchw::View<T> into{&tensor};
-
-      ::nnfw::util::feature::iterate(_shape)
-          << [&](uint32_t bat, uint32_t ch, uint32_t row, uint32_t col) {
-               const auto value = from.at(bat, ch, row, col);
-               into.at(bat, ch, row, col) = value;
-             };
-    }
-  }
-
-private:
-  const nnfw::util::feature::Shape _shape;
   const T *const _base;
   const size_t _size;
 };
index 30c9451..b4261d9 100644 (file)
 #include "graph/operand/DataType.h"
 #include "graph/operand/Index.h"
 
-inline void sourceAsVector(ANeuralNetworksExecution *execution,
-                           const ::neurun::graph::operand::DataType &type, int32_t index,
-                           int32_t len, const void *buffer, size_t length)
+inline void source(ANeuralNetworksExecution *execution,
+                   const ::neurun::graph::operand::DataType &type, int32_t index,
+                   const void *buffer, size_t length)
 {
   using ::neurun::graph::operand::DataType;
   switch (type)
   {
     case DataType::SCALAR_FLOAT32:
     case DataType::TENSOR_FLOAT32:
-      execution->source<::neurun::exec::VectorSource<float>>(
-          index, len, reinterpret_cast<const float *>(buffer), length);
+      execution->source<::neurun::exec::Source<float>>(
+          index, reinterpret_cast<const float *>(buffer), length);
       break;
     case DataType::SCALAR_INT32:
     case DataType::TENSOR_INT32:
-      execution->source<::neurun::exec::VectorSource<int32_t>>(
-          index, len, reinterpret_cast<const int32_t *>(buffer), length);
+      execution->source<::neurun::exec::Source<int32_t>>(
+          index, reinterpret_cast<const int32_t *>(buffer), length);
       break;
     case DataType::SCALAR_UINT32:
-      execution->source<::neurun::exec::VectorSource<uint32_t>>(
-          index, len, reinterpret_cast<const uint32_t *>(buffer), length);
+      execution->source<::neurun::exec::Source<uint32_t>>(
+          index, reinterpret_cast<const uint32_t *>(buffer), length);
       break;
     case DataType::TENSOR_QUANT8_ASYMM:
-      execution->source<::neurun::exec::VectorSource<uint8_t>>(
-          index, len, reinterpret_cast<const uint8_t *>(buffer), length);
+      execution->source<::neurun::exec::Source<uint8_t>>(
+          index, reinterpret_cast<const uint8_t *>(buffer), length);
       break;
     default:
       throw std::runtime_error("Not supported, yet");
@@ -56,223 +56,30 @@ inline void sourceAsVector(ANeuralNetworksExecution *execution,
   }
 }
 
-inline void sourceAsMatrix(ANeuralNetworksExecution *execution,
-                           const ::neurun::graph::operand::DataType &type, int32_t index,
-                           const nnfw::util::matrix::Shape &shape, const void *buffer,
-                           size_t length)
+inline void sink(ANeuralNetworksExecution *execution,
+                 const ::neurun::graph::operand::DataType &type, int32_t index, void *buffer,
+                 size_t length)
 {
   using ::neurun::graph::operand::DataType;
   switch (type)
   {
     case DataType::SCALAR_FLOAT32:
     case DataType::TENSOR_FLOAT32:
-      execution->source<::neurun::exec::MatrixSource<float>>(
-          index, shape, reinterpret_cast<const float *>(buffer), length);
+      execution->sink<::neurun::exec::Sink<float>>(index, reinterpret_cast<float *>(buffer),
+                                                   length);
       break;
     case DataType::SCALAR_INT32:
     case DataType::TENSOR_INT32:
-      execution->source<::neurun::exec::MatrixSource<int32_t>>(
-          index, shape, reinterpret_cast<const int32_t *>(buffer), length);
+      execution->sink<::neurun::exec::Sink<int32_t>>(index, reinterpret_cast<int32_t *>(buffer),
+                                                     length);
       break;
     case DataType::SCALAR_UINT32:
-      execution->source<::neurun::exec::MatrixSource<uint32_t>>(
-          index, shape, reinterpret_cast<const uint32_t *>(buffer), length);
+      execution->sink<::neurun::exec::Sink<uint32_t>>(index, reinterpret_cast<uint32_t *>(buffer),
+                                                      length);
       break;
     case DataType::TENSOR_QUANT8_ASYMM:
-      execution->source<::neurun::exec::MatrixSource<uint8_t>>(
-          index, shape, reinterpret_cast<const uint8_t *>(buffer), length);
-      break;
-    default:
-      throw std::runtime_error("Not supported, yet");
-      break;
-  }
-}
-
-inline void sourceAsTensor3D(ANeuralNetworksExecution *execution,
-                             const ::neurun::graph::operand::DataType &type, int32_t index,
-                             const nnfw::util::tensor::Shape &shape, const void *buffer,
-                             size_t length)
-{
-  assert(shape.rank() == 3);
-
-  using ::neurun::graph::operand::DataType;
-  switch (type)
-  {
-    case DataType::SCALAR_FLOAT32:
-    case DataType::TENSOR_FLOAT32:
-      execution->source<::neurun::exec::Tensor3DSource<float>>(
-          index, shape, reinterpret_cast<const float *>(buffer), length);
-      break;
-    case DataType::SCALAR_INT32:
-    case DataType::TENSOR_INT32:
-      execution->source<::neurun::exec::Tensor3DSource<int32_t>>(
-          index, shape, reinterpret_cast<const int32_t *>(buffer), length);
-      break;
-    case DataType::SCALAR_UINT32:
-      execution->source<::neurun::exec::Tensor3DSource<uint32_t>>(
-          index, shape, reinterpret_cast<const uint32_t *>(buffer), length);
-      break;
-    case DataType::TENSOR_QUANT8_ASYMM:
-      execution->source<::neurun::exec::Tensor3DSource<uint8_t>>(
-          index, shape, reinterpret_cast<const uint8_t *>(buffer), length);
-      break;
-    default:
-      throw std::runtime_error("Not supported, yet");
-      break;
-  }
-}
-
-inline void sourceAsFeature(ANeuralNetworksExecution *execution,
-                            const ::neurun::graph::operand::DataType &type, int32_t index,
-                            const nnfw::util::feature::Shape &shape, const void *buffer,
-                            size_t length)
-{
-  using ::neurun::graph::operand::DataType;
-  switch (type)
-  {
-    case DataType::SCALAR_FLOAT32:
-    case DataType::TENSOR_FLOAT32:
-      execution->source<::neurun::exec::FeatureSource<float>>(
-          index, shape, reinterpret_cast<const float *>(buffer), length);
-      break;
-    case DataType::SCALAR_INT32:
-    case DataType::TENSOR_INT32:
-      execution->source<::neurun::exec::FeatureSource<int32_t>>(
-          index, shape, reinterpret_cast<const int32_t *>(buffer), length);
-      break;
-    case DataType::SCALAR_UINT32:
-      execution->source<::neurun::exec::FeatureSource<uint32_t>>(
-          index, shape, reinterpret_cast<const uint32_t *>(buffer), length);
-      break;
-    case DataType::TENSOR_QUANT8_ASYMM:
-      execution->source<::neurun::exec::FeatureSource<uint8_t>>(
-          index, shape, reinterpret_cast<const uint8_t *>(buffer), length);
-      break;
-    default:
-      throw std::runtime_error("Not supported, yet");
-      break;
-  }
-}
-
-inline void sinkAsVector(ANeuralNetworksExecution *execution,
-                         const ::neurun::graph::operand::DataType &type, int32_t index, int32_t len,
-                         void *buffer, size_t length)
-{
-  using ::neurun::graph::operand::DataType;
-  switch (type)
-  {
-    case DataType::SCALAR_FLOAT32:
-    case DataType::TENSOR_FLOAT32:
-      execution->sink<::neurun::exec::VectorSink<float>>(index, len,
-                                                         reinterpret_cast<float *>(buffer), length);
-      break;
-    case DataType::SCALAR_INT32:
-    case DataType::TENSOR_INT32:
-      execution->sink<::neurun::exec::VectorSink<int32_t>>(
-          index, len, reinterpret_cast<int32_t *>(buffer), length);
-      break;
-    case DataType::SCALAR_UINT32:
-      execution->sink<::neurun::exec::VectorSink<uint32_t>>(
-          index, len, reinterpret_cast<uint32_t *>(buffer), length);
-      break;
-    case DataType::TENSOR_QUANT8_ASYMM:
-      execution->sink<::neurun::exec::VectorSink<uint8_t>>(
-          index, len, reinterpret_cast<uint8_t *>(buffer), length);
-      break;
-    default:
-      throw std::runtime_error("Not supported, yet");
-      break;
-  }
-}
-
-inline void sinkAsMatrix(ANeuralNetworksExecution *execution,
-                         const ::neurun::graph::operand::DataType &type, int32_t index,
-                         const nnfw::util::matrix::Shape &shape, void *buffer, size_t length)
-{
-  using ::neurun::graph::operand::DataType;
-  switch (type)
-  {
-    case DataType::SCALAR_FLOAT32:
-    case DataType::TENSOR_FLOAT32:
-      execution->sink<::neurun::exec::MatrixSink<float>>(index, shape.H, shape.W,
-                                                         reinterpret_cast<float *>(buffer), length);
-      break;
-    case DataType::SCALAR_INT32:
-    case DataType::TENSOR_INT32:
-      execution->sink<::neurun::exec::MatrixSink<int32_t>>(
-          index, shape.H, shape.W, reinterpret_cast<int32_t *>(buffer), length);
-      break;
-    case DataType::SCALAR_UINT32:
-      execution->sink<::neurun::exec::MatrixSink<uint32_t>>(
-          index, shape.H, shape.W, reinterpret_cast<uint32_t *>(buffer), length);
-      break;
-    case DataType::TENSOR_QUANT8_ASYMM:
-      execution->sink<::neurun::exec::MatrixSink<uint8_t>>(
-          index, shape.H, shape.W, reinterpret_cast<uint8_t *>(buffer), length);
-      break;
-    default:
-      throw std::runtime_error("Not supported, yet");
-      break;
-  }
-}
-
-inline void sinkAsTensor3D(ANeuralNetworksExecution *execution,
-                           const ::neurun::graph::operand::DataType &type, int32_t index,
-                           const nnfw::util::tensor::Shape &shape, void *buffer, size_t length)
-{
-  assert(shape.rank() == 3);
-
-  using ::neurun::graph::operand::DataType;
-  switch (type)
-  {
-    case DataType::SCALAR_FLOAT32:
-    case DataType::TENSOR_FLOAT32:
-      execution->sink<::neurun::exec::Tensor3DSink<float>>(
-          index, shape, reinterpret_cast<float *>(buffer), length);
-      break;
-    case DataType::SCALAR_INT32:
-    case DataType::TENSOR_INT32:
-      execution->sink<::neurun::exec::Tensor3DSink<int32_t>>(
-          index, shape, reinterpret_cast<int32_t *>(buffer), length);
-      break;
-    case DataType::SCALAR_UINT32:
-      execution->sink<::neurun::exec::Tensor3DSink<uint32_t>>(
-          index, shape, reinterpret_cast<uint32_t *>(buffer), length);
-      break;
-    case DataType::TENSOR_QUANT8_ASYMM:
-      execution->sink<::neurun::exec::Tensor3DSink<uint8_t>>(
-          index, shape, reinterpret_cast<uint8_t *>(buffer), length);
-      break;
-    default:
-      throw std::runtime_error("Not supported, yet");
-      break;
-  }
-}
-
-inline void sinkAsFeature(ANeuralNetworksExecution *execution,
-                          const ::neurun::graph::operand::DataType &type, int32_t index,
-                          const nnfw::util::feature::Shape &shape, void *buffer, size_t length)
-{
-  using ::neurun::graph::operand::DataType;
-  switch (type)
-  {
-    case DataType::SCALAR_FLOAT32:
-    case DataType::TENSOR_FLOAT32:
-      execution->sink<::neurun::exec::FeatureSink<float>>(
-          index, shape, reinterpret_cast<float *>(buffer), length);
-      break;
-    case DataType::SCALAR_INT32:
-    case DataType::TENSOR_INT32:
-      execution->sink<::neurun::exec::FeatureSink<int32_t>>(
-          index, shape, reinterpret_cast<int32_t *>(buffer), length);
-      break;
-    case DataType::SCALAR_UINT32:
-      execution->sink<::neurun::exec::FeatureSink<uint32_t>>(
-          index, shape, reinterpret_cast<uint32_t *>(buffer), length);
-      break;
-    case DataType::TENSOR_QUANT8_ASYMM:
-      execution->sink<::neurun::exec::FeatureSink<uint8_t>>(
-          index, shape, reinterpret_cast<uint8_t *>(buffer), length);
+      execution->sink<::neurun::exec::Sink<uint8_t>>(index, reinterpret_cast<uint8_t *>(buffer),
+                                                     length);
       break;
     default:
       throw std::runtime_error("Not supported, yet");
@@ -327,34 +134,7 @@ int ANeuralNetworksExecution_setInput(ANeuralNetworksExecution *execution, int32
   const auto data_type = operands.at(operand_index).typeInfo().type();
   const auto operand_shape = operands.at(operand_index).shape();
 
-  if (operand_shape.rank() == 1)
-  {
-    const auto len = operand_shape.dim(0);
-
-    sourceAsVector(execution, data_type, index, len, buffer, length);
-  }
-  else if (operand_shape.rank() == 2)
-  {
-    const auto &input_shape = operand_shape.asMatrix();
-
-    sourceAsMatrix(execution, data_type, index, input_shape, buffer, length);
-  }
-  else if (operand_shape.rank() == 3)
-  {
-    const auto &input_shape = operand_shape.asTensor();
-
-    sourceAsTensor3D(execution, data_type, index, input_shape, buffer, length);
-  }
-  else if (operand_shape.rank() == 4)
-  {
-    const auto &input_shape = operand_shape.asFeature();
-
-    sourceAsFeature(execution, data_type, index, input_shape, buffer, length);
-  }
-  else
-  {
-    throw std::runtime_error{"Not supported, yet"};
-  }
+  source(execution, data_type, index, buffer, length);
 
   return ANEURALNETWORKS_NO_ERROR;
 }
@@ -382,34 +162,7 @@ int ANeuralNetworksExecution_setOutput(ANeuralNetworksExecution *execution, int3
   const auto data_type = operands.at(operand_index).typeInfo().type();
   const auto operand_shape = operands.at(operand_index).shape();
 
-  if (operand_shape.rank() == 1)
-  {
-    const auto len = operand_shape.dim(0);
-
-    sinkAsVector(execution, data_type, index, len, buffer, length);
-  }
-  else if (operand_shape.rank() == 2)
-  {
-    const auto &output_shape = operand_shape.asMatrix();
-
-    sinkAsMatrix(execution, data_type, index, output_shape, buffer, length);
-  }
-  else if (operand_shape.rank() == 3)
-  {
-    const auto &output_shape = operand_shape.asTensor();
-
-    sinkAsTensor3D(execution, data_type, index, output_shape, buffer, length);
-  }
-  else if (operand_shape.rank() == 4)
-  {
-    const auto &output_shape = operand_shape.asFeature();
-
-    sinkAsFeature(execution, data_type, index, output_shape, buffer, length);
-  }
-  else
-  {
-    throw std::runtime_error{"Not supported, yet"};
-  }
+  sink(execution, data_type, index, buffer, length);
 
   return ANEURALNETWORKS_NO_ERROR;
 }
index 374201e..0bef1a5 100644 (file)
@@ -38,7 +38,7 @@ private:
 
 public:
   // TODO Use InputIndex instead of int
-  void source(int n, std::unique_ptr<neurun::exec::Source> &&source)
+  void source(int n, std::unique_ptr<neurun::exec::ISource> &&source)
   {
     _sources.at(n) = std::move(source);
   }
@@ -48,22 +48,22 @@ public:
   }
 
 public:
-  const neurun::exec::Source &source(int n) const { return *(_sources.at(n)); }
+  const neurun::exec::ISource &source(int n) const { return *(_sources.at(n)); }
 
 public:
   // TODO Use OutputIndex instead of int
-  void sink(int n, std::unique_ptr<neurun::exec::Sink> &&sink) { _sinks.at(n) = std::move(sink); }
+  void sink(int n, std::unique_ptr<neurun::exec::ISink> &&sink) { _sinks.at(n) = std::move(sink); }
   template <typename T, typename... Args> void sink(int n, Args &&... args)
   {
     sink(n, std::unique_ptr<T>{new T{std::forward<Args>(args)...}});
   }
 
 public:
-  const neurun::exec::Sink &sink(int n) const { return *(_sinks.at(n)); }
+  const neurun::exec::ISink &sink(int n) const { return *(_sinks.at(n)); }
 
 private:
-  std::vector<std::unique_ptr<neurun::exec::Source>> _sources;
-  std::vector<std::unique_ptr<neurun::exec::Sink>> _sinks;
+  std::vector<std::unique_ptr<neurun::exec::ISource>> _sources;
+  std::vector<std::unique_ptr<neurun::exec::ISink>> _sinks;
 };
 
 #endif