From: 오형석/On-Device Lab(SR)/Staff Engineer/삼성전자 Date: Thu, 21 Mar 2019 10:48:06 +0000 (+0900) Subject: Set input and output tensor info for interpreter (#4798) X-Git-Tag: submit/tizen/20190325.013700~10 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=30e7e6a3a67621c3320569ffce9a4a293dc35722;p=platform%2Fcore%2Fml%2Fnnfw.git Set input and output tensor info for interpreter (#4798) - Set input and output tensor info for interpreter - Fix TensorInfo bug - Remove sink/source implementation Signed-off-by: Hyeongseok Oh --- diff --git a/runtimes/neurun/core/src/exec/interp/ExecManager.cc b/runtimes/neurun/core/src/exec/interp/ExecManager.cc index 5873d2d..0e9c5d4 100644 --- a/runtimes/neurun/core/src/exec/interp/ExecManager.cc +++ b/runtimes/neurun/core/src/exec/interp/ExecManager.cc @@ -25,69 +25,34 @@ namespace interp void ExecManager::setInput(const neurun::model::operand::IO::Index &index, const neurun::model::operand::TypeInfo &type, - const neurun::model::operand::Shape &shape, const void *buffer, - size_t length) + const neurun::model::operand::Shape &shape, const void *, size_t length) { - using ::neurun::model::operand::DataType; - switch (type.type()) + const auto input_index = _model->inputs.at(index); + const TensorInfo info{shape, type}; + _tensor_info_map.insert({input_index, info}); + + if (length < info.total_size()) { - case DataType::SCALAR_FLOAT32: - case DataType::TENSOR_FLOAT32: - source>(index, reinterpret_cast(buffer), length); - break; - case DataType::SCALAR_INT32: - case DataType::TENSOR_INT32: - source>(index, reinterpret_cast(buffer), length); - break; - case DataType::SCALAR_UINT32: - source>(index, reinterpret_cast(buffer), length); - break; - case DataType::SCALAR_BOOL: - case DataType::TENSOR_BOOL8: - case DataType::TENSOR_QUANT8_ASYMM: - source>(index, reinterpret_cast(buffer), length); - break; - default: - throw std::runtime_error("Not supported, yet"); - break; + throw std::runtime_error{"Too small length"}; } - const auto input_index = _model->inputs.at(index); - const auto info = compiler::TensorInfo(shape, type); - _tensor_info_map.insert({input_index, info}); + // TODO Make interpreter tensor using buffer and info } void ExecManager::setOutput(const neurun::model::operand::IO::Index &index, const neurun::model::operand::TypeInfo &type, - const neurun::model::operand::Shape &shape, void *buffer, size_t length) + const neurun::model::operand::Shape &shape, void *, size_t length) { - using ::neurun::model::operand::DataType; - switch (type.type()) + const auto output_index = _model->outputs.at(index); + const TensorInfo info{shape, type}; + _tensor_info_map.insert({output_index, info}); + + if (length < info.total_size()) { - case DataType::SCALAR_FLOAT32: - case DataType::TENSOR_FLOAT32: - sink>(index, reinterpret_cast(buffer), length); - break; - case DataType::SCALAR_INT32: - case DataType::TENSOR_INT32: - sink>(index, reinterpret_cast(buffer), length); - break; - case DataType::SCALAR_UINT32: - sink>(index, reinterpret_cast(buffer), length); - break; - case DataType::SCALAR_BOOL: - case DataType::TENSOR_BOOL8: - case DataType::TENSOR_QUANT8_ASYMM: - sink>(index, reinterpret_cast(buffer), length); - break; - default: - throw std::runtime_error("Not supported, yet"); - break; + throw std::runtime_error{"Too small length"}; } - const auto output_index = _model->outputs.at(index); - const auto info = compiler::TensorInfo(shape, type); - _tensor_info_map.insert({output_index, info}); + // TODO Make interpreter tensor using buffer and info } void ExecManager::execute(void) { throw std::runtime_error{"NYI: ExecManager execute"}; } diff --git a/runtimes/neurun/core/src/exec/interp/ExecManager.h b/runtimes/neurun/core/src/exec/interp/ExecManager.h index 8ddbf5e..d3021bb 100644 --- a/runtimes/neurun/core/src/exec/interp/ExecManager.h +++ b/runtimes/neurun/core/src/exec/interp/ExecManager.h @@ -23,10 +23,8 @@ #define __NEURUN_EXEC_INTERP_EXEC_MANAGER_H_ #include "model/operand/IndexMap.h" -#include "compiler/TensorInfo.h" #include "exec/IExecutor.h" -#include "exec/Source.h" -#include "exec/Sink.h" +#include "TensorInfo.h" namespace neurun { @@ -79,25 +77,9 @@ public: void execute(void); private: - template - void source(const neurun::model::operand::IO::Index &index, Args &&... args) - { - _sources.at(index.value()) = std::move(std::unique_ptr{new T{std::forward(args)...}}); - } - template - void sink(const neurun::model::operand::IO::Index &index, Args &&... args) - { - _sinks.at(index.value()) = std::move(std::unique_ptr{new T{std::forward(args)...}}); - } - -private: std::shared_ptr _model; - // TODO use own TensorInfo instead of using compiler's TensorInfo struct - // or define independent TensorInfo struct to use both compiler and interpreter // TODO use execution environment to handle tensor for each inference - model::operand::IndexMap _tensor_info_map; - std::vector> _sources; - std::vector> _sinks; + model::operand::IndexMap _tensor_info_map; }; } // namespace interp diff --git a/runtimes/neurun/core/src/exec/interp/TensorInfo.h b/runtimes/neurun/core/src/exec/interp/TensorInfo.h index 167f83a..8dfb5a0 100644 --- a/runtimes/neurun/core/src/exec/interp/TensorInfo.h +++ b/runtimes/neurun/core/src/exec/interp/TensorInfo.h @@ -59,7 +59,7 @@ public: * @brief Construct a new Tensor Info object * @param[in] origin Tensor info for copy */ - TensorInfo(const TensorInfo &origin) : _shape(origin.shape), _typeInfo(origin.typeInfo) + TensorInfo(const TensorInfo &origin) : _shape(origin.shape()), _typeInfo(origin.typeInfo()) { // DO NOTHING } diff --git a/runtimes/neurun/test/interp/ExecManager.cc b/runtimes/neurun/test/interp/ExecManager.cc index f710ef1..233b12a 100644 --- a/runtimes/neurun/test/interp/ExecManager.cc +++ b/runtimes/neurun/test/interp/ExecManager.cc @@ -42,8 +42,6 @@ protected: // model output: add result // lhs, rhs, result shape: {1, 2, 2, 1} // activation: none (constant) - ::neurun::graph::Graph graph; - operand::Shape shape{4}; shape.dim(0) = 1; shape.dim(1) = 2; @@ -82,6 +80,7 @@ protected: } virtual void TearDown() { _executor = nullptr; } + ::neurun::graph::Graph graph{}; std::unique_ptr _executor{nullptr}; const int32_t _activation_value{0}; }; @@ -98,4 +97,48 @@ TEST_F(InterpExecManagerTest, create_simple) ASSERT_NE(_executor, nullptr); } +TEST_F(InterpExecManagerTest, setInput) +{ + CreateSimpleModel(); + + auto input1 = operand::IO::Index{0}; + auto input1_idx = graph.getInputs().at(input1); + + auto input1_type = graph.operands().at(input1_idx).typeInfo(); + auto input1_shape = graph.operands().at(input1_idx).shape(); + + const int32_t input1_buffer[4] = {1, 0, -1, -2}; + + EXPECT_THROW(_executor->setInput(input1, input1_type, input1_shape, + reinterpret_cast(input1_buffer), 4), + std::runtime_error); + EXPECT_THROW(_executor->setInput(input1, input1_type, input1_shape, + reinterpret_cast(input1_buffer), 12), + std::runtime_error); + EXPECT_NO_THROW(_executor->setInput(input1, input1_type, input1_shape, + reinterpret_cast(input1_buffer), 16)); +} + +TEST_F(InterpExecManagerTest, setOutput) +{ + CreateSimpleModel(); + + auto output = operand::IO::Index{0}; + auto output_idx = graph.getOutputs().at(output); + + auto output_type = graph.operands().at(output_idx).typeInfo(); + auto output_shape = graph.operands().at(output_idx).shape(); + + int32_t input1_buffer[4] = {}; + + EXPECT_THROW(_executor->setOutput(output, output_type, output_shape, + reinterpret_cast(input1_buffer), 4), + std::runtime_error); + EXPECT_THROW(_executor->setOutput(output, output_type, output_shape, + reinterpret_cast(input1_buffer), 12), + std::runtime_error); + EXPECT_NO_THROW(_executor->setOutput(output, output_type, output_shape, + reinterpret_cast(input1_buffer), 16)); +} + } // namespace