From: 오형석/On-Device Lab(SR)/Staff Engineer/삼성전자 Date: Mon, 25 Mar 2019 03:21:16 +0000 (+0900) Subject: Introduce tensor interface and class for interpreter (#4818) X-Git-Tag: accepted/tizen/unified/20190430.113441~125 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=1d5238e39054ad69fd72ed277bc283a1082ddfda;p=platform%2Fcore%2Fml%2Fnnfw.git Introduce tensor interface and class for interpreter (#4818) Introduce tensor interface for interpreter Introduce ROTensor (read-only tensor) and Tensor (writable tensor) class Generate interpreter tensor when input/output is set Signed-off-by: Hyeongseok Oh --- diff --git a/runtimes/neurun/core/src/exec/interp/ExecManager.cc b/runtimes/neurun/core/src/exec/interp/ExecManager.cc index 0e9c5d4..7b70e68 100644 --- a/runtimes/neurun/core/src/exec/interp/ExecManager.cc +++ b/runtimes/neurun/core/src/exec/interp/ExecManager.cc @@ -25,33 +25,38 @@ namespace interp void ExecManager::setInput(const neurun::model::operand::IO::Index &index, const neurun::model::operand::TypeInfo &type, - const neurun::model::operand::Shape &shape, const void *, size_t length) + const neurun::model::operand::Shape &shape, const void *buffer, + size_t length) { const auto input_index = _model->inputs.at(index); const TensorInfo info{shape, type}; - _tensor_info_map.insert({input_index, info}); if (length < info.total_size()) { throw std::runtime_error{"Too small length"}; } - // TODO Make interpreter tensor using buffer and info + auto input_tensor = std::make_shared(info); + input_tensor->setBuffer(reinterpret_cast(buffer)); + _tensor_map.insert({input_index, input_tensor}); } void ExecManager::setOutput(const neurun::model::operand::IO::Index &index, const neurun::model::operand::TypeInfo &type, - const neurun::model::operand::Shape &shape, void *, size_t length) + const neurun::model::operand::Shape &shape, void *buffer, size_t length) { const auto output_index = _model->outputs.at(index); const TensorInfo info{shape, type}; - _tensor_info_map.insert({output_index, info}); if (length < info.total_size()) { throw std::runtime_error{"Too small length"}; } + auto output_tensor = std::make_shared(info); + output_tensor->setBuffer(reinterpret_cast(buffer)); + _tensor_map.insert({output_index, output_tensor}); + // TODO Make interpreter tensor using buffer and info } diff --git a/runtimes/neurun/core/src/exec/interp/ExecManager.h b/runtimes/neurun/core/src/exec/interp/ExecManager.h index d3021bb..7181f2a 100644 --- a/runtimes/neurun/core/src/exec/interp/ExecManager.h +++ b/runtimes/neurun/core/src/exec/interp/ExecManager.h @@ -24,7 +24,7 @@ #include "model/operand/IndexMap.h" #include "exec/IExecutor.h" -#include "TensorInfo.h" +#include "Tensor.h" namespace neurun { @@ -79,7 +79,7 @@ public: private: std::shared_ptr _model; // TODO use execution environment to handle tensor for each inference - model::operand::IndexMap _tensor_info_map; + model::operand::IndexMap> _tensor_map; }; } // namespace interp diff --git a/runtimes/neurun/core/src/exec/interp/Tensor.cc b/runtimes/neurun/core/src/exec/interp/Tensor.cc new file mode 100644 index 0000000..df00d61 --- /dev/null +++ b/runtimes/neurun/core/src/exec/interp/Tensor.cc @@ -0,0 +1,42 @@ +/* + * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "Tensor.h" + +#define NO_USE(a) (void)(a) + +namespace neurun +{ +namespace exec +{ +namespace interp +{ + +size_t ROTensor::calcOffset(const neurun::util::feature::Coordinate4D &coords) +{ + NO_USE(coords); + throw std::runtime_error("offset_element_in_bytes is not supported for cpu::Tensor now."); +} + +size_t Tensor::calcOffset(const neurun::util::feature::Coordinate4D &coords) +{ + NO_USE(coords); + throw std::runtime_error("offset_element_in_bytes is not supported for cpu::Tensor now."); +} + +} // namespace interp +} // namespace exec +} // namespace neurun diff --git a/runtimes/neurun/core/src/exec/interp/Tensor.h b/runtimes/neurun/core/src/exec/interp/Tensor.h new file mode 100644 index 0000000..5989848 --- /dev/null +++ b/runtimes/neurun/core/src/exec/interp/Tensor.h @@ -0,0 +1,148 @@ +/* + * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * @file Tensor.h + * @brief This file contains ITensor interface, ROTensor class, and Tensor class + */ +#ifndef __NEURUN_EXEC_INTERP_TENSOR_H__ +#define __NEURUN_EXEC_INTERP_TENSOR_H__ + +#include "util/feature/Coordinate4D.h" +#include "TensorInfo.h" + +namespace neurun +{ +namespace exec +{ +namespace interp +{ + +/** + * @brief Interface to handle Tensor in interpreter + * @note TODO merge or inherit backend::ITensor + */ +class ITensor +{ +public: + virtual ~ITensor() = default; + +public: + virtual uint8_t *buffer() const = 0; + /** + * @brief Return read-only buffer pointer + * @return Read-only buffer pointer + */ + virtual const uint8_t *bufferRO() const = 0; + /** + * @brief Set the buffer + * @param[in] buffer Buffer pointer + */ + virtual void setBuffer(uint8_t *buffer) = 0; + /** + * @brief Set the read-only buffer + * @param[in] buffer Buffer pointer to set read-only + */ + virtual void setBuffer(const uint8_t *buffer) = 0; + virtual size_t total_size() const = 0; + virtual size_t dimension(size_t index) const = 0; + virtual size_t num_dimensions() const = 0; + virtual size_t calcOffset(const util::feature::Coordinate4D &coords) = 0; + virtual bool has_padding() const = 0; + /** + * @brief Return data type of tensor + * @return Data type of tensor + */ + virtual model::operand::DataType data_type() const = 0; + /** + * @brief Return TensorInfo + * @return TensorInfo + */ + virtual const TensorInfo &tensorInfo() const = 0; + /** + * @brief Return number of elements + * @return Number of elements + */ + virtual uint64_t element_nums() const = 0; +}; + +/** + * @brief Class to handle tensor in interpreter as read-only + */ +class ROTensor final : public ITensor +{ +public: + ROTensor() = delete; + ROTensor(const TensorInfo &info) : _info(info) + { + // DO NOTHING + } + +public: + uint8_t *buffer() const override { throw std::runtime_error{"Read only tensor"}; } + const uint8_t *bufferRO() const override { return _buffer; } + void setBuffer(uint8_t *buffer) override { _buffer = buffer; } + void setBuffer(const uint8_t *buffer) override { _buffer = buffer; } + size_t total_size() const override { return _info.total_size(); } + size_t dimension(size_t index) const override { return _info.shape().dim(index); } + size_t num_dimensions() const override { return _info.shape().dims().size(); } + size_t calcOffset(const util::feature::Coordinate4D &coords) override; + bool has_padding() const override { return false; } + model::operand::DataType data_type() const override { return _info.typeInfo().type(); } + const TensorInfo &tensorInfo() const override { return _info; } + uint64_t element_nums() const override { return _info.shape().element_nums(); }; + +private: + const TensorInfo _info; + const uint8_t *_buffer{nullptr}; +}; + +/** + * @brief Class to handle tensor in interpreter as writable + */ +class Tensor final : public ITensor +{ +public: + Tensor() = delete; + Tensor(const TensorInfo &info) : _info(info) + { + // DO NOTHING + } + +public: + uint8_t *buffer() const override { return _buffer; } + const uint8_t *bufferRO() const override { return _buffer; } + void setBuffer(uint8_t *buffer) override { _buffer = buffer; } + void setBuffer(const uint8_t *) override { throw std::runtime_error{"Writeable tensor"}; } + size_t total_size() const override { return _info.total_size(); } + size_t dimension(size_t index) const override { return _info.shape().dim(index); } + size_t num_dimensions() const override { return _info.shape().dims().size(); } + size_t calcOffset(const util::feature::Coordinate4D &coords) override; + bool has_padding() const override { return false; } + model::operand::DataType data_type() const override { return _info.typeInfo().type(); } + const interp::TensorInfo &tensorInfo() const override { return _info; } + uint64_t element_nums() const override { return _info.shape().element_nums(); }; + +private: + const TensorInfo _info; + uint8_t *_buffer{nullptr}; +}; + +} // namespace interp +} // namespace exec +} // namespace neurun + +#endif // __NEURUN_EXEC_INTERP_TENSOR_H__