From 8e0b51f535521b73e701d64e7a11b3c333c212f5 Mon Sep 17 00:00:00 2001 From: =?utf8?q?=EA=B9=80=EC=88=98=EC=A7=84/=EB=8F=99=EC=9E=91=EC=A0=9C?= =?utf8?q?=EC=96=B4Lab=28SR=29/Engineer/=EC=82=BC=EC=84=B1=EC=A0=84?= =?utf8?q?=EC=9E=90?= Date: Wed, 17 Oct 2018 15:01:53 +0900 Subject: [PATCH] [neurun] Remove Source/Sink codes divided to dimensions (#3213) Related : #2874 Part of : #3178 Because #3208 and #3187 are merged, we can remove `Source`/`Sink` codes divided to dimensions. Signed-off-by: sjsujinkim --- runtimes/neurun/src/exec/Sink.h | 163 +------------ runtimes/neurun/src/exec/Source.h | 165 +------------ runtimes/neurun/src/frontend/execution.cc | 295 ++--------------------- runtimes/neurun/src/frontend/wrapper/execution.h | 12 +- 4 files changed, 39 insertions(+), 596 deletions(-) diff --git a/runtimes/neurun/src/exec/Sink.h b/runtimes/neurun/src/exec/Sink.h index 5837e8f..3cc7a05 100644 --- a/runtimes/neurun/src/exec/Sink.h +++ b/runtimes/neurun/src/exec/Sink.h @@ -21,37 +21,22 @@ #include -#include -#include -#include -#include - -#include "backend/cpu/operand/Tensor.h" // TODO Remove this dependency to backend -#include "util/feature/nhwc/View.h" -#include "util/feature/nhwc/Reader.h" - namespace neurun { namespace exec { -struct Sink +struct ISink { - virtual ~Sink() = default; + virtual ~ISink() = default; virtual void pull(::arm_compute::ITensor &tensor) const = 0; }; -// -// VectorSink -// -template class VectorSink final : public Sink +template class Sink final : public ISink { public: - VectorSink(const int32_t vlen, T *base, const size_t size) : _vlen{vlen}, _base{base}, _size{size} - { - assert(size == _vlen * sizeof(T)); - } + Sink(T *base, const size_t size) : _base{base}, _size{size} {} public: void pull(::arm_compute::ITensor &tensor) const override @@ -60,146 +45,6 @@ public: } private: - const int32_t _vlen; - T *const _base; - const size_t _size; -}; - -// -// MatrixSink -// -template class MatrixSink final : public Sink -{ -public: - MatrixSink(const int32_t H, const int32_t W, T *base, const size_t size) - : _height{H}, _width{W}, _base{base}, _size{size} - { - assert(size == _height * _width * sizeof(T)); - } - -public: - void pull(::arm_compute::ITensor &tensor) const override - { - // TODO: This is just workaround codes, It needs to refactor. - if (typeid(tensor) == typeid(neurun::backend::cpu::operand::Tensor)) - { - memcpy(_base, tensor.buffer(), _size); - } - else if (typeid(tensor) == typeid(::arm_compute::CLTensor)) - { - using ::arm_compute::Window; - using ::arm_compute::Iterator; - - Window window; - window.use_tensor_dimensions(tensor.info()->tensor_shape(), Window::DimY); - - Iterator it(&tensor, window); - - const auto &y = window[Window::DimY]; - for (auto h = y.start(); h < y.end(); h += y.step(), it.increment(Window::DimY)) - { - memcpy(_base + h * _width, it.ptr(), _width * sizeof(T)); - } - } - } - -private: - const int32_t _height; - const int32_t _width; - -private: - T *const _base; - const size_t _size; -}; - -// -// Tensor3DSink -// -template class Tensor3DSink final : public Sink -{ -public: - Tensor3DSink(const nnfw::util::tensor::Shape &shape, T *base, const size_t size) - : _shape{shape}, _base{base}, _size{size} - { - assert(size == _shape.element_nums() * sizeof(T)); - } - -public: - void pull(::arm_compute::ITensor &tensor) const override - { - // TODO: This is just workaround codes, It needs to refactor. - if (typeid(tensor) == typeid(neurun::backend::cpu::operand::Tensor)) - { - memcpy(_base, tensor.buffer(), _size); - } - else if (typeid(tensor) == typeid(::arm_compute::CLTensor)) - { - using ::arm_compute::Window; - using ::arm_compute::Iterator; - - const int32_t height_width = _shape.dim(1) * _shape.dim(2); - const int32_t width = _shape.dim(2); - - Window window; - window.use_tensor_dimensions(tensor.info()->tensor_shape(), Window::DimY); - - Iterator it(&tensor, window); - - const auto &z = window[Window::DimZ]; - const auto &y = window[Window::DimY]; - for (auto c = z.start(); c < z.end(); c += z.step(), it.increment(Window::DimZ)) - { - for (auto h = y.start(); h < y.end(); h += y.step(), it.increment(Window::DimY)) - { - memcpy(_base + c * height_width + h * width, it.ptr(), width * sizeof(T)); - } - } - } - } - -private: - const nnfw::util::tensor::Shape _shape; - -private: - T *const _base; - const size_t _size; -}; - -// -// FeatureSink -// -template class FeatureSink final : public Sink -{ -public: - FeatureSink(const nnfw::util::feature::Shape &shape, T *base, const size_t size) - : _shape{shape}, _base{base}, _size{size} - { - assert(size == _shape.N * _shape.H * _shape.W * _shape.C * sizeof(T)); - } - -public: - void pull(::arm_compute::ITensor &tensor) const override - { - // TODO: This is just workaround codes, It needs to refactor. - if (typeid(tensor) == typeid(neurun::backend::cpu::operand::Tensor)) - { - memcpy(_base, tensor.buffer(), _size); - } - else if (typeid(tensor) == typeid(::arm_compute::CLTensor)) - { - const util::feature::nchw::View from{&tensor}; - util::feature::nhwc::View into{_shape, _base, _size}; - - ::nnfw::util::feature::iterate(_shape) - << [&](uint32_t bat, uint32_t ch, uint32_t row, uint32_t col) { - const auto value = from.at(bat, ch, row, col); - into.at(bat, ch, row, col) = value; - }; - } - } - -private: - const nnfw::util::feature::Shape _shape; T *const _base; const size_t _size; }; diff --git a/runtimes/neurun/src/exec/Source.h b/runtimes/neurun/src/exec/Source.h index b071921..47d1c73 100644 --- a/runtimes/neurun/src/exec/Source.h +++ b/runtimes/neurun/src/exec/Source.h @@ -19,42 +19,24 @@ #include -#include - -#include -#include -#include -#include - -#include "backend/cpu/operand/Tensor.h" // TODO Remove this dependency to backend -#include "util/feature/nhwc/Reader.h" -#include "util/feature/nhwc/View.h" - -#include "util/feature/nchw/View.h" +#include namespace neurun { namespace exec { -struct Source +struct ISource { - virtual ~Source() = default; + virtual ~ISource() = default; virtual void push(::arm_compute::ITensor &tensor) const = 0; }; -// -// VectorSource -// -template class VectorSource final : public Source +template class Source final : public ISource { public: - VectorSource(const int32_t vlen, const T *base, const size_t size) - : _vlen{vlen}, _base{base}, _size{size} - { - assert(size == _vlen * sizeof(T)); - } + Source(const T *base, const size_t size) : _base{base}, _size{size} {} public: void push(::arm_compute::ITensor &tensor) const override @@ -63,143 +45,6 @@ public: } private: - const int32_t _vlen; - const T *const _base; - const size_t _size; -}; - -// -// MatrixSource -// -template class MatrixSource final : public Source -{ -public: - MatrixSource(const nnfw::util::matrix::Shape &shape, const T *base, const size_t size) - : _shape{shape}, _base{base}, _size{size} - { - assert(size == _shape.H * _shape.W * sizeof(T)); - } - -public: - void push(::arm_compute::ITensor &tensor) const override - { - // TODO: This is just workaround codes, It needs to refactor. - if (typeid(tensor) == typeid(neurun::backend::cpu::operand::Tensor)) - { - memcpy(tensor.buffer(), _base, _size); - } - else if (typeid(tensor) == typeid(::arm_compute::CLTensor)) - { - using ::arm_compute::Window; - using ::arm_compute::Iterator; - - Window window; - window.use_tensor_dimensions(tensor.info()->tensor_shape(), Window::DimY); - - Iterator it(&tensor, window); - - const auto &y = window[Window::DimY]; - for (auto h = y.start(); h < y.end(); h += y.step(), it.increment(Window::DimY)) - { - memcpy(it.ptr(), _base + h * _shape.W, _shape.W * sizeof(T)); - } - } - } - -private: - const nnfw::util::matrix::Shape _shape; - const T *const _base; - const size_t _size; -}; - -// -// Tensor3DSource -// -template class Tensor3DSource final : public Source -{ -public: - Tensor3DSource(const nnfw::util::tensor::Shape &shape, const T *base, const size_t size) - : _shape{shape}, _base{base}, _size{size} - { - assert(size == _shape.element_nums() * sizeof(T)); - } - -public: - void push(::arm_compute::ITensor &tensor) const override - { - // TODO: This is just workaround codes, It needs to refactor. - if (typeid(tensor) == typeid(neurun::backend::cpu::operand::Tensor)) - { - memcpy(tensor.buffer(), _base, _size); - } - else if (typeid(tensor) == typeid(::arm_compute::CLTensor)) - { - using ::arm_compute::Window; - using ::arm_compute::Iterator; - - const int32_t height_width = _shape.dim(1) * _shape.dim(2); - const int32_t width = _shape.dim(2); - - Window window; - window.use_tensor_dimensions(tensor.info()->tensor_shape(), Window::DimY); - - Iterator it(&tensor, window); - - const auto &z = window[Window::DimZ]; - const auto &y = window[Window::DimY]; - for (auto c = z.start(); c < z.end(); c += z.step(), it.increment(Window::DimZ)) - { - for (auto h = y.start(); h < y.end(); h += y.step(), it.increment(Window::DimY)) - { - memcpy(it.ptr(), _base + c * height_width + h * width, width * sizeof(T)); - } - } - } - } - -private: - const nnfw::util::tensor::Shape _shape; - -private: - const T *const _base; - const size_t _size; -}; - -// -// FeatureSource -// -template class FeatureSource final : public Source -{ -public: - FeatureSource(const nnfw::util::feature::Shape &shape, const T *base, const size_t size) - : _shape{shape}, _base{base}, _size{size} - { - assert(_size == _shape.N * _shape.H * _shape.W * _shape.C * sizeof(T)); - } - -public: - void push(::arm_compute::ITensor &tensor) const override - { - // TODO: This is just workaround codes, It needs to refactor. - if (typeid(tensor) == typeid(neurun::backend::cpu::operand::Tensor)) - { - memcpy(tensor.buffer(), _base, _size); - } - else if (typeid(tensor) == typeid(::arm_compute::CLTensor)) - { - const util::feature::nhwc::Reader from{_shape, _base, _size}; - util::feature::nchw::View into{&tensor}; - - ::nnfw::util::feature::iterate(_shape) - << [&](uint32_t bat, uint32_t ch, uint32_t row, uint32_t col) { - const auto value = from.at(bat, ch, row, col); - into.at(bat, ch, row, col) = value; - }; - } - } - -private: - const nnfw::util::feature::Shape _shape; const T *const _base; const size_t _size; }; diff --git a/runtimes/neurun/src/frontend/execution.cc b/runtimes/neurun/src/frontend/execution.cc index 30c9451..b4261d9 100644 --- a/runtimes/neurun/src/frontend/execution.cc +++ b/runtimes/neurun/src/frontend/execution.cc @@ -25,30 +25,30 @@ #include "graph/operand/DataType.h" #include "graph/operand/Index.h" -inline void sourceAsVector(ANeuralNetworksExecution *execution, - const ::neurun::graph::operand::DataType &type, int32_t index, - int32_t len, const void *buffer, size_t length) +inline void source(ANeuralNetworksExecution *execution, + const ::neurun::graph::operand::DataType &type, int32_t index, + const void *buffer, size_t length) { using ::neurun::graph::operand::DataType; switch (type) { case DataType::SCALAR_FLOAT32: case DataType::TENSOR_FLOAT32: - execution->source<::neurun::exec::VectorSource>( - index, len, reinterpret_cast(buffer), length); + execution->source<::neurun::exec::Source>( + index, reinterpret_cast(buffer), length); break; case DataType::SCALAR_INT32: case DataType::TENSOR_INT32: - execution->source<::neurun::exec::VectorSource>( - index, len, reinterpret_cast(buffer), length); + execution->source<::neurun::exec::Source>( + index, reinterpret_cast(buffer), length); break; case DataType::SCALAR_UINT32: - execution->source<::neurun::exec::VectorSource>( - index, len, reinterpret_cast(buffer), length); + execution->source<::neurun::exec::Source>( + index, reinterpret_cast(buffer), length); break; case DataType::TENSOR_QUANT8_ASYMM: - execution->source<::neurun::exec::VectorSource>( - index, len, reinterpret_cast(buffer), length); + execution->source<::neurun::exec::Source>( + index, reinterpret_cast(buffer), length); break; default: throw std::runtime_error("Not supported, yet"); @@ -56,223 +56,30 @@ inline void sourceAsVector(ANeuralNetworksExecution *execution, } } -inline void sourceAsMatrix(ANeuralNetworksExecution *execution, - const ::neurun::graph::operand::DataType &type, int32_t index, - const nnfw::util::matrix::Shape &shape, const void *buffer, - size_t length) +inline void sink(ANeuralNetworksExecution *execution, + const ::neurun::graph::operand::DataType &type, int32_t index, void *buffer, + size_t length) { using ::neurun::graph::operand::DataType; switch (type) { case DataType::SCALAR_FLOAT32: case DataType::TENSOR_FLOAT32: - execution->source<::neurun::exec::MatrixSource>( - index, shape, reinterpret_cast(buffer), length); + execution->sink<::neurun::exec::Sink>(index, reinterpret_cast(buffer), + length); break; case DataType::SCALAR_INT32: case DataType::TENSOR_INT32: - execution->source<::neurun::exec::MatrixSource>( - index, shape, reinterpret_cast(buffer), length); + execution->sink<::neurun::exec::Sink>(index, reinterpret_cast(buffer), + length); break; case DataType::SCALAR_UINT32: - execution->source<::neurun::exec::MatrixSource>( - index, shape, reinterpret_cast(buffer), length); + execution->sink<::neurun::exec::Sink>(index, reinterpret_cast(buffer), + length); break; case DataType::TENSOR_QUANT8_ASYMM: - execution->source<::neurun::exec::MatrixSource>( - index, shape, reinterpret_cast(buffer), length); - break; - default: - throw std::runtime_error("Not supported, yet"); - break; - } -} - -inline void sourceAsTensor3D(ANeuralNetworksExecution *execution, - const ::neurun::graph::operand::DataType &type, int32_t index, - const nnfw::util::tensor::Shape &shape, const void *buffer, - size_t length) -{ - assert(shape.rank() == 3); - - using ::neurun::graph::operand::DataType; - switch (type) - { - case DataType::SCALAR_FLOAT32: - case DataType::TENSOR_FLOAT32: - execution->source<::neurun::exec::Tensor3DSource>( - index, shape, reinterpret_cast(buffer), length); - break; - case DataType::SCALAR_INT32: - case DataType::TENSOR_INT32: - execution->source<::neurun::exec::Tensor3DSource>( - index, shape, reinterpret_cast(buffer), length); - break; - case DataType::SCALAR_UINT32: - execution->source<::neurun::exec::Tensor3DSource>( - index, shape, reinterpret_cast(buffer), length); - break; - case DataType::TENSOR_QUANT8_ASYMM: - execution->source<::neurun::exec::Tensor3DSource>( - index, shape, reinterpret_cast(buffer), length); - break; - default: - throw std::runtime_error("Not supported, yet"); - break; - } -} - -inline void sourceAsFeature(ANeuralNetworksExecution *execution, - const ::neurun::graph::operand::DataType &type, int32_t index, - const nnfw::util::feature::Shape &shape, const void *buffer, - size_t length) -{ - using ::neurun::graph::operand::DataType; - switch (type) - { - case DataType::SCALAR_FLOAT32: - case DataType::TENSOR_FLOAT32: - execution->source<::neurun::exec::FeatureSource>( - index, shape, reinterpret_cast(buffer), length); - break; - case DataType::SCALAR_INT32: - case DataType::TENSOR_INT32: - execution->source<::neurun::exec::FeatureSource>( - index, shape, reinterpret_cast(buffer), length); - break; - case DataType::SCALAR_UINT32: - execution->source<::neurun::exec::FeatureSource>( - index, shape, reinterpret_cast(buffer), length); - break; - case DataType::TENSOR_QUANT8_ASYMM: - execution->source<::neurun::exec::FeatureSource>( - index, shape, reinterpret_cast(buffer), length); - break; - default: - throw std::runtime_error("Not supported, yet"); - break; - } -} - -inline void sinkAsVector(ANeuralNetworksExecution *execution, - const ::neurun::graph::operand::DataType &type, int32_t index, int32_t len, - void *buffer, size_t length) -{ - using ::neurun::graph::operand::DataType; - switch (type) - { - case DataType::SCALAR_FLOAT32: - case DataType::TENSOR_FLOAT32: - execution->sink<::neurun::exec::VectorSink>(index, len, - reinterpret_cast(buffer), length); - break; - case DataType::SCALAR_INT32: - case DataType::TENSOR_INT32: - execution->sink<::neurun::exec::VectorSink>( - index, len, reinterpret_cast(buffer), length); - break; - case DataType::SCALAR_UINT32: - execution->sink<::neurun::exec::VectorSink>( - index, len, reinterpret_cast(buffer), length); - break; - case DataType::TENSOR_QUANT8_ASYMM: - execution->sink<::neurun::exec::VectorSink>( - index, len, reinterpret_cast(buffer), length); - break; - default: - throw std::runtime_error("Not supported, yet"); - break; - } -} - -inline void sinkAsMatrix(ANeuralNetworksExecution *execution, - const ::neurun::graph::operand::DataType &type, int32_t index, - const nnfw::util::matrix::Shape &shape, void *buffer, size_t length) -{ - using ::neurun::graph::operand::DataType; - switch (type) - { - case DataType::SCALAR_FLOAT32: - case DataType::TENSOR_FLOAT32: - execution->sink<::neurun::exec::MatrixSink>(index, shape.H, shape.W, - reinterpret_cast(buffer), length); - break; - case DataType::SCALAR_INT32: - case DataType::TENSOR_INT32: - execution->sink<::neurun::exec::MatrixSink>( - index, shape.H, shape.W, reinterpret_cast(buffer), length); - break; - case DataType::SCALAR_UINT32: - execution->sink<::neurun::exec::MatrixSink>( - index, shape.H, shape.W, reinterpret_cast(buffer), length); - break; - case DataType::TENSOR_QUANT8_ASYMM: - execution->sink<::neurun::exec::MatrixSink>( - index, shape.H, shape.W, reinterpret_cast(buffer), length); - break; - default: - throw std::runtime_error("Not supported, yet"); - break; - } -} - -inline void sinkAsTensor3D(ANeuralNetworksExecution *execution, - const ::neurun::graph::operand::DataType &type, int32_t index, - const nnfw::util::tensor::Shape &shape, void *buffer, size_t length) -{ - assert(shape.rank() == 3); - - using ::neurun::graph::operand::DataType; - switch (type) - { - case DataType::SCALAR_FLOAT32: - case DataType::TENSOR_FLOAT32: - execution->sink<::neurun::exec::Tensor3DSink>( - index, shape, reinterpret_cast(buffer), length); - break; - case DataType::SCALAR_INT32: - case DataType::TENSOR_INT32: - execution->sink<::neurun::exec::Tensor3DSink>( - index, shape, reinterpret_cast(buffer), length); - break; - case DataType::SCALAR_UINT32: - execution->sink<::neurun::exec::Tensor3DSink>( - index, shape, reinterpret_cast(buffer), length); - break; - case DataType::TENSOR_QUANT8_ASYMM: - execution->sink<::neurun::exec::Tensor3DSink>( - index, shape, reinterpret_cast(buffer), length); - break; - default: - throw std::runtime_error("Not supported, yet"); - break; - } -} - -inline void sinkAsFeature(ANeuralNetworksExecution *execution, - const ::neurun::graph::operand::DataType &type, int32_t index, - const nnfw::util::feature::Shape &shape, void *buffer, size_t length) -{ - using ::neurun::graph::operand::DataType; - switch (type) - { - case DataType::SCALAR_FLOAT32: - case DataType::TENSOR_FLOAT32: - execution->sink<::neurun::exec::FeatureSink>( - index, shape, reinterpret_cast(buffer), length); - break; - case DataType::SCALAR_INT32: - case DataType::TENSOR_INT32: - execution->sink<::neurun::exec::FeatureSink>( - index, shape, reinterpret_cast(buffer), length); - break; - case DataType::SCALAR_UINT32: - execution->sink<::neurun::exec::FeatureSink>( - index, shape, reinterpret_cast(buffer), length); - break; - case DataType::TENSOR_QUANT8_ASYMM: - execution->sink<::neurun::exec::FeatureSink>( - index, shape, reinterpret_cast(buffer), length); + execution->sink<::neurun::exec::Sink>(index, reinterpret_cast(buffer), + length); break; default: throw std::runtime_error("Not supported, yet"); @@ -327,34 +134,7 @@ int ANeuralNetworksExecution_setInput(ANeuralNetworksExecution *execution, int32 const auto data_type = operands.at(operand_index).typeInfo().type(); const auto operand_shape = operands.at(operand_index).shape(); - if (operand_shape.rank() == 1) - { - const auto len = operand_shape.dim(0); - - sourceAsVector(execution, data_type, index, len, buffer, length); - } - else if (operand_shape.rank() == 2) - { - const auto &input_shape = operand_shape.asMatrix(); - - sourceAsMatrix(execution, data_type, index, input_shape, buffer, length); - } - else if (operand_shape.rank() == 3) - { - const auto &input_shape = operand_shape.asTensor(); - - sourceAsTensor3D(execution, data_type, index, input_shape, buffer, length); - } - else if (operand_shape.rank() == 4) - { - const auto &input_shape = operand_shape.asFeature(); - - sourceAsFeature(execution, data_type, index, input_shape, buffer, length); - } - else - { - throw std::runtime_error{"Not supported, yet"}; - } + source(execution, data_type, index, buffer, length); return ANEURALNETWORKS_NO_ERROR; } @@ -382,34 +162,7 @@ int ANeuralNetworksExecution_setOutput(ANeuralNetworksExecution *execution, int3 const auto data_type = operands.at(operand_index).typeInfo().type(); const auto operand_shape = operands.at(operand_index).shape(); - if (operand_shape.rank() == 1) - { - const auto len = operand_shape.dim(0); - - sinkAsVector(execution, data_type, index, len, buffer, length); - } - else if (operand_shape.rank() == 2) - { - const auto &output_shape = operand_shape.asMatrix(); - - sinkAsMatrix(execution, data_type, index, output_shape, buffer, length); - } - else if (operand_shape.rank() == 3) - { - const auto &output_shape = operand_shape.asTensor(); - - sinkAsTensor3D(execution, data_type, index, output_shape, buffer, length); - } - else if (operand_shape.rank() == 4) - { - const auto &output_shape = operand_shape.asFeature(); - - sinkAsFeature(execution, data_type, index, output_shape, buffer, length); - } - else - { - throw std::runtime_error{"Not supported, yet"}; - } + sink(execution, data_type, index, buffer, length); return ANEURALNETWORKS_NO_ERROR; } diff --git a/runtimes/neurun/src/frontend/wrapper/execution.h b/runtimes/neurun/src/frontend/wrapper/execution.h index 374201e..0bef1a5 100644 --- a/runtimes/neurun/src/frontend/wrapper/execution.h +++ b/runtimes/neurun/src/frontend/wrapper/execution.h @@ -38,7 +38,7 @@ private: public: // TODO Use InputIndex instead of int - void source(int n, std::unique_ptr &&source) + void source(int n, std::unique_ptr &&source) { _sources.at(n) = std::move(source); } @@ -48,22 +48,22 @@ public: } public: - const neurun::exec::Source &source(int n) const { return *(_sources.at(n)); } + const neurun::exec::ISource &source(int n) const { return *(_sources.at(n)); } public: // TODO Use OutputIndex instead of int - void sink(int n, std::unique_ptr &&sink) { _sinks.at(n) = std::move(sink); } + void sink(int n, std::unique_ptr &&sink) { _sinks.at(n) = std::move(sink); } template void sink(int n, Args &&... args) { sink(n, std::unique_ptr{new T{std::forward(args)...}}); } public: - const neurun::exec::Sink &sink(int n) const { return *(_sinks.at(n)); } + const neurun::exec::ISink &sink(int n) const { return *(_sinks.at(n)); } private: - std::vector> _sources; - std::vector> _sinks; + std::vector> _sources; + std::vector> _sinks; }; #endif -- 2.7.4