This commit Introduces CopySource and CopySink.
Signed-off-by: jiseob.jang <jiseob.jang@samsung.com>
public:
/**
+ * @brief Set batch offset
+ * @param[in] n Batch offset
+ */
+ void n(int32_t n) { _n = n; }
+ /**
+ * @brief Set height offset
+ * @param[in] h Height offset
+ */
+ void h(int32_t h) { _h = h; }
+ /**
+ * @brief Set width offset
+ * @param[in] w Width offset
+ */
+ void w(int32_t w) { _w = w; }
+ /**
+ * @brief Set channel offset
+ * @param[in] c Channel offset
+ */
+ void c(int32_t c) { _c = c; }
+
+public:
+ /**
* @brief Return batch offset
* @return Batch offset
*/
return;
}
- source<Source<T>>(index, reinterpret_cast<const T *>(buffer), length);
+ source<CopySource<T>>(index, buffer, length, operand.shape());
}
template <typename T> void sink(const model::IOIndex &index, void *buffer, size_t length)
return;
}
- sink<Sink<int32_t>>(index, reinterpret_cast<int32_t *>(buffer), length);
+ sink<CopySink<T>>(index, buffer, length, operand.shape());
}
template <typename T, typename... Args> void source(const model::IOIndex &index, Args &&... args)
#include <cassert>
#include "cpp14/memory.h"
+#include "graph/operand/Layout.h"
#include "util/feature/nhwc/View.h"
#include "util/feature/nchw/View.h"
#include <misc/feature/IndexIterator.h>
virtual void pull(::neurun::backend::operand::ITensor &tensor) const = 0;
};
-template <typename T> class Sink final : public ISink
-{
-public:
- Sink(T *base, const size_t size) : _base{base}, _size{size} {}
-
-public:
- void pull(::neurun::backend::operand::ITensor &tensor) const override
- {
- memcpy(_base, tensor.buffer(), _size);
- }
-
-private:
- T *const _base;
- const size_t _size;
-};
-
template <typename T> class PermutateSink final : public ISink
{
public:
const model::Shape _shape;
};
+// Only supports NHWC format front-end(NNAPI) now
+template <typename T> class CopySink final : public ISink
+{
+public:
+ CopySink(void *output_buffer, const size_t &output_size, const model::Shape &shape)
+ : _output_buffer{reinterpret_cast<T *>(output_buffer)}, _output_size{output_size},
+ _shape{shape}
+ {
+ }
+
+public:
+ void pull(neurun::backend::operand::ITensor &tensor) const override
+ {
+ auto input_buffer = tensor.buffer();
+ auto rank = _shape.rank();
+
+ if (!tensor.has_padding() && rank <= 4)
+ {
+ memcpy(_output_buffer, input_buffer, _output_size);
+ return;
+ }
+
+ switch (rank)
+ {
+ case 0:
+ case 1:
+ {
+ memcpy(_output_buffer, input_buffer, _output_size);
+ break;
+ }
+ case 2:
+ {
+ const int32_t copy_len = _shape.dim(1);
+
+ for (auto i = 0; i < _shape.dim(0); ++i)
+ {
+ neurun::util::feature::Coordinate4D coord;
+ if (tensor.layout() == neurun::graph::operand::Layout::NHWC)
+ {
+ coord.w(i);
+ }
+ else if (tensor.layout() == neurun::graph::operand::Layout::NCHW)
+ {
+ coord.h(i);
+ }
+ else
+ {
+ throw std::runtime_error("Wrong Layout");
+ }
+ memcpy(_output_buffer + i * copy_len, input_buffer + tensor.calcOffset(coord),
+ copy_len * sizeof(T));
+ }
+ break;
+ }
+ case 3:
+ {
+ const int32_t width = _shape.dim(1);
+ const int32_t copy_len = _shape.dim(2);
+
+ for (auto i = 0; i < _shape.dim(0); ++i)
+ {
+ for (auto j = 0; j < _shape.dim(1); ++j)
+ {
+ neurun::util::feature::Coordinate4D coord;
+ if (tensor.layout() == neurun::graph::operand::Layout::NHWC)
+ {
+ coord.h(i);
+ coord.w(j);
+ }
+ else if (tensor.layout() == neurun::graph::operand::Layout::NCHW)
+ {
+ coord.c(i);
+ coord.h(j);
+ }
+ else
+ {
+ throw std::runtime_error("Wrong Layout");
+ }
+ memcpy(_output_buffer + i * width * copy_len + j * copy_len,
+ input_buffer + tensor.calcOffset(coord), copy_len * sizeof(T));
+ }
+ }
+ break;
+ }
+ case 4:
+ {
+ const int32_t height = _shape.dim(1);
+ const int32_t width = _shape.dim(2);
+ const int32_t copy_len = _shape.dim(3);
+
+ for (auto i = 0; i < _shape.dim(0); ++i)
+ {
+ for (auto j = 0; j < _shape.dim(1); ++j)
+ {
+ for (auto k = 0; k < _shape.dim(2); ++k)
+ {
+ neurun::util::feature::Coordinate4D coord;
+ if (tensor.layout() == neurun::graph::operand::Layout::NHWC)
+ {
+ coord.n(i);
+ coord.h(j);
+ coord.w(k);
+ }
+ else if (tensor.layout() == neurun::graph::operand::Layout::NCHW)
+ {
+ coord.n(i);
+ coord.c(j);
+ coord.h(k);
+ }
+ else
+ {
+ throw std::runtime_error("Wrong Layout");
+ }
+ memcpy(_output_buffer + i * height * width * copy_len + j * width * copy_len +
+ k * copy_len,
+ input_buffer + tensor.calcOffset(coord), copy_len * sizeof(T));
+ }
+ }
+ }
+ break;
+ }
+ default:
+ throw std::runtime_error("NYI");
+ break;
+ }
+ }
+
+private:
+ T *_output_buffer;
+ const size_t _output_size;
+ const model::Shape _shape;
+};
+
} // namespace exec
} // namespace neurun
#include <cassert>
#include "cpp14/memory.h"
+#include "graph/operand/Layout.h"
#include "util/feature/nchw/View.h"
#include "util/feature/nhwc/Reader.h"
#include "util/feature/Coordinate4D.h"
virtual void push(::neurun::backend::operand::ITensor &tensor) const = 0;
};
-template <typename T> class Source final : public ISource
-{
-public:
- Source(const T *base, const size_t size) : _base{base}, _size{size} {}
-
-public:
- void push(::neurun::backend::operand::ITensor &tensor) const override
- {
- memcpy(tensor.buffer(), _base, _size);
- }
-
-private:
- const T *const _base;
- const size_t _size;
-};
-
template <typename T> class PermutateSource final : public ISource
{
public:
const model::Shape _shape;
};
+// Only supports NHWC format front-end(NNAPI) now
+template <typename T> class CopySource final : public ISource
+{
+public:
+ CopySource(const void *input_buffer, const size_t &input_size, const model::Shape &shape)
+ : _input_buffer{reinterpret_cast<const T *>(input_buffer)}, _input_size{input_size},
+ _shape{shape}
+ {
+ }
+
+public:
+ void push(neurun::backend::operand::ITensor &tensor) const override
+ {
+ auto output_buffer = tensor.buffer();
+ auto rank = _shape.rank();
+
+ if (!tensor.has_padding() && rank <= 4)
+ {
+ memcpy(output_buffer, _input_buffer, _input_size);
+ return;
+ }
+
+ switch (rank)
+ {
+ case 0:
+ case 1:
+ {
+ memcpy(output_buffer, _input_buffer, _input_size);
+ break;
+ }
+ case 2:
+ {
+ const int32_t copy_len = _shape.dim(1);
+
+ for (auto i = 0; i < _shape.dim(0); ++i) // w
+ {
+ neurun::util::feature::Coordinate4D coord;
+ if (tensor.layout() == neurun::graph::operand::Layout::NHWC)
+ {
+ coord.w(i);
+ }
+ else if (tensor.layout() == neurun::graph::operand::Layout::NCHW)
+ {
+ coord.h(i);
+ }
+ else
+ {
+ throw std::runtime_error("Wrong Layout");
+ }
+ memcpy(output_buffer + tensor.calcOffset(coord), _input_buffer + i * copy_len,
+ copy_len * sizeof(T));
+ }
+ break;
+ }
+ case 3:
+ {
+ const int32_t width = _shape.dim(1);
+ const int32_t copy_len = _shape.dim(2);
+
+ for (auto i = 0; i < _shape.dim(0); ++i) // h
+ {
+ for (auto j = 0; j < _shape.dim(1); ++j) // w
+ {
+ neurun::util::feature::Coordinate4D coord;
+ if (tensor.layout() == neurun::graph::operand::Layout::NHWC)
+ {
+ coord.h(i);
+ coord.w(j);
+ }
+ else if (tensor.layout() == neurun::graph::operand::Layout::NCHW)
+ {
+ coord.c(i);
+ coord.h(j);
+ }
+ else
+ {
+ throw std::runtime_error("Wrong Layout");
+ }
+ memcpy(output_buffer + tensor.calcOffset(coord),
+ _input_buffer + i * width * copy_len + j * copy_len, copy_len * sizeof(T));
+ }
+ }
+ break;
+ }
+ case 4:
+ {
+ const int32_t height = _shape.dim(1);
+ const int32_t width = _shape.dim(2);
+ const int32_t copy_len = _shape.dim(3);
+ for (auto i = 0; i < _shape.dim(0); ++i) // n
+ {
+ for (auto j = 0; j < _shape.dim(1); ++j) // h
+ {
+ for (auto k = 0; k < _shape.dim(2); ++k) // w
+ {
+ neurun::util::feature::Coordinate4D coord;
+ if (tensor.layout() == neurun::graph::operand::Layout::NHWC)
+ {
+ coord.n(i);
+ coord.h(j);
+ coord.w(k);
+ }
+ else if (tensor.layout() == neurun::graph::operand::Layout::NCHW)
+ {
+ coord.n(i);
+ coord.c(j);
+ coord.h(k);
+ }
+ else
+ {
+ throw std::runtime_error("Wrong Layout");
+ }
+ memcpy(output_buffer + tensor.calcOffset(coord),
+ _input_buffer + i * height * width * copy_len + j * width * copy_len +
+ k * copy_len,
+ copy_len * sizeof(T));
+ }
+ }
+ }
+ break;
+ }
+ default:
+ throw std::runtime_error("NYI");
+ break;
+ }
+ }
+
+private:
+ const T *_input_buffer;
+ const size_t _input_size;
+ const model::Shape _shape;
+};
+
} // namespace exec
} // namespace neurun