From b37be196d6109ccbc86f3a670603d2705ee7310b Mon Sep 17 00:00:00 2001 From: =?utf8?q?=EC=98=A4=ED=98=95=EC=84=9D/On-Device=20Lab=28SR=29/Staff?= =?utf8?q?=20Engineer/=EC=82=BC=EC=84=B1=EC=A0=84=EC=9E=90?= Date: Tue, 26 Feb 2019 16:48:35 +0900 Subject: [PATCH] Set input and output for interpreter (#4498) Collect input and output information for interpreter Prepare set input and output Signed-off-by: Hyeongseok Oh --- runtimes/neurun/src/exec/interp/Interpreter.cc | 65 ++++++++++++++++++++++---- runtimes/neurun/src/exec/interp/Interpreter.h | 23 ++++++++- 2 files changed, 79 insertions(+), 9 deletions(-) diff --git a/runtimes/neurun/src/exec/interp/Interpreter.cc b/runtimes/neurun/src/exec/interp/Interpreter.cc index b635318..f1d207b 100644 --- a/runtimes/neurun/src/exec/interp/Interpreter.cc +++ b/runtimes/neurun/src/exec/interp/Interpreter.cc @@ -23,18 +23,67 @@ namespace exec namespace interp { -void Interpreter::setInput(const neurun::model::operand::IO::Index &, - const neurun::model::operand::TypeInfo &, - const neurun::model::operand::Shape &, const void *, size_t) +void Interpreter::setInput(const neurun::model::operand::IO::Index &index, + const neurun::model::operand::TypeInfo &type, + const neurun::model::operand::Shape &shape, const void *buffer, + size_t length) { - throw std::runtime_error{"NYI: Interpreter setInput"}; + using ::neurun::model::operand::DataType; + switch (type.type()) + { + case DataType::SCALAR_FLOAT32: + case DataType::TENSOR_FLOAT32: + source>(index, reinterpret_cast(buffer), length); + break; + case DataType::SCALAR_INT32: + case DataType::TENSOR_INT32: + source>(index, reinterpret_cast(buffer), length); + break; + case DataType::SCALAR_UINT32: + source>(index, reinterpret_cast(buffer), length); + break; + case DataType::TENSOR_QUANT8_ASYMM: + source>(index, reinterpret_cast(buffer), length); + break; + default: + throw std::runtime_error("Not supported, yet"); + break; + } + + const auto input_index = _model->inputs.at(index); + const auto info = compiler::TensorInfo(shape, type); + _tensor_info_map.insert({input_index, info}); } -void Interpreter::setOutput(const neurun::model::operand::IO::Index &, - const neurun::model::operand::TypeInfo &, - const neurun::model::operand::Shape &, void *, size_t) +void Interpreter::setOutput(const neurun::model::operand::IO::Index &index, + const neurun::model::operand::TypeInfo &type, + const neurun::model::operand::Shape &shape, void *buffer, size_t length) { - throw std::runtime_error{"NYI: Interpreter setOutput"}; + using ::neurun::model::operand::DataType; + switch (type.type()) + { + case DataType::SCALAR_FLOAT32: + case DataType::TENSOR_FLOAT32: + sink>(index, reinterpret_cast(buffer), length); + break; + case DataType::SCALAR_INT32: + case DataType::TENSOR_INT32: + sink>(index, reinterpret_cast(buffer), length); + break; + case DataType::SCALAR_UINT32: + sink>(index, reinterpret_cast(buffer), length); + break; + case DataType::TENSOR_QUANT8_ASYMM: + sink>(index, reinterpret_cast(buffer), length); + break; + default: + throw std::runtime_error("Not supported, yet"); + break; + } + + const auto output_index = _model->outputs.at(index); + const auto info = compiler::TensorInfo(shape, type); + _tensor_info_map.insert({output_index, info}); } void Interpreter::execute(void) { throw std::runtime_error{"NYI: Interpreter execute"}; } diff --git a/runtimes/neurun/src/exec/interp/Interpreter.h b/runtimes/neurun/src/exec/interp/Interpreter.h index 913f067..8148bc4 100644 --- a/runtimes/neurun/src/exec/interp/Interpreter.h +++ b/runtimes/neurun/src/exec/interp/Interpreter.h @@ -21,8 +21,11 @@ #ifndef __NEURUN_EXEC_INTERP_INTERPRETER_H_ #define __NEURUN_EXEC_INTERP_INTERPRETER_H_ -#include "compiler/Plan.h" +#include "model/operand/IndexMap.h" +#include "compiler/TensorInfo.h" #include "exec/IExecutor.h" +#include "exec/Source.h" +#include "exec/Sink.h" namespace neurun { @@ -75,7 +78,25 @@ public: void execute(void); private: + template + void source(const neurun::model::operand::IO::Index &index, Args &&... args) + { + _sources.at(index.value()) = std::move(std::unique_ptr{new T{std::forward(args)...}}); + } + template + void sink(const neurun::model::operand::IO::Index &index, Args &&... args) + { + _sinks.at(index.value()) = std::move(std::unique_ptr{new T{std::forward(args)...}}); + } + +private: std::shared_ptr _model; + // TODO use own TensorInfo instead of using compiler's TensorInfo struct + // or define independent TensorInfo struct to use both compiler and interpreter + // TODO use execution environment to handle tensor for each inference + model::operand::IndexMap _tensor_info_map; + std::vector> _sources; + std::vector> _sinks; }; } // namespace interp -- 2.7.4