void ExecManager::setInput(const neurun::model::operand::IO::Index &index,
const neurun::model::operand::TypeInfo &type,
- const neurun::model::operand::Shape &shape, const void *, size_t length)
+ const neurun::model::operand::Shape &shape, const void *buffer,
+ size_t length)
{
const auto input_index = _model->inputs.at(index);
const TensorInfo info{shape, type};
- _tensor_info_map.insert({input_index, info});
if (length < info.total_size())
{
throw std::runtime_error{"Too small length"};
}
- // TODO Make interpreter tensor using buffer and info
+ auto input_tensor = std::make_shared<ROTensor>(info);
+ input_tensor->setBuffer(reinterpret_cast<const uint8_t *>(buffer));
+ _tensor_map.insert({input_index, input_tensor});
}
void ExecManager::setOutput(const neurun::model::operand::IO::Index &index,
const neurun::model::operand::TypeInfo &type,
- const neurun::model::operand::Shape &shape, void *, size_t length)
+ const neurun::model::operand::Shape &shape, void *buffer, size_t length)
{
const auto output_index = _model->outputs.at(index);
const TensorInfo info{shape, type};
- _tensor_info_map.insert({output_index, info});
if (length < info.total_size())
{
throw std::runtime_error{"Too small length"};
}
+ auto output_tensor = std::make_shared<Tensor>(info);
+ output_tensor->setBuffer(reinterpret_cast<uint8_t *>(buffer));
+ _tensor_map.insert({output_index, output_tensor});
+
// TODO Make interpreter tensor using buffer and info
}
--- /dev/null
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Tensor.h"
+
+#define NO_USE(a) (void)(a)
+
+namespace neurun
+{
+namespace exec
+{
+namespace interp
+{
+
+size_t ROTensor::calcOffset(const neurun::util::feature::Coordinate4D &coords)
+{
+ NO_USE(coords);
+ throw std::runtime_error("offset_element_in_bytes is not supported for cpu::Tensor now.");
+}
+
+size_t Tensor::calcOffset(const neurun::util::feature::Coordinate4D &coords)
+{
+ NO_USE(coords);
+ throw std::runtime_error("offset_element_in_bytes is not supported for cpu::Tensor now.");
+}
+
+} // namespace interp
+} // namespace exec
+} // namespace neurun
--- /dev/null
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * @file Tensor.h
+ * @brief This file contains ITensor interface, ROTensor class, and Tensor class
+ */
+#ifndef __NEURUN_EXEC_INTERP_TENSOR_H__
+#define __NEURUN_EXEC_INTERP_TENSOR_H__
+
+#include "util/feature/Coordinate4D.h"
+#include "TensorInfo.h"
+
+namespace neurun
+{
+namespace exec
+{
+namespace interp
+{
+
+/**
+ * @brief Interface to handle Tensor in interpreter
+ * @note TODO merge or inherit backend::ITensor
+ */
+class ITensor
+{
+public:
+ virtual ~ITensor() = default;
+
+public:
+ virtual uint8_t *buffer() const = 0;
+ /**
+ * @brief Return read-only buffer pointer
+ * @return Read-only buffer pointer
+ */
+ virtual const uint8_t *bufferRO() const = 0;
+ /**
+ * @brief Set the buffer
+ * @param[in] buffer Buffer pointer
+ */
+ virtual void setBuffer(uint8_t *buffer) = 0;
+ /**
+ * @brief Set the read-only buffer
+ * @param[in] buffer Buffer pointer to set read-only
+ */
+ virtual void setBuffer(const uint8_t *buffer) = 0;
+ virtual size_t total_size() const = 0;
+ virtual size_t dimension(size_t index) const = 0;
+ virtual size_t num_dimensions() const = 0;
+ virtual size_t calcOffset(const util::feature::Coordinate4D &coords) = 0;
+ virtual bool has_padding() const = 0;
+ /**
+ * @brief Return data type of tensor
+ * @return Data type of tensor
+ */
+ virtual model::operand::DataType data_type() const = 0;
+ /**
+ * @brief Return TensorInfo
+ * @return TensorInfo
+ */
+ virtual const TensorInfo &tensorInfo() const = 0;
+ /**
+ * @brief Return number of elements
+ * @return Number of elements
+ */
+ virtual uint64_t element_nums() const = 0;
+};
+
+/**
+ * @brief Class to handle tensor in interpreter as read-only
+ */
+class ROTensor final : public ITensor
+{
+public:
+ ROTensor() = delete;
+ ROTensor(const TensorInfo &info) : _info(info)
+ {
+ // DO NOTHING
+ }
+
+public:
+ uint8_t *buffer() const override { throw std::runtime_error{"Read only tensor"}; }
+ const uint8_t *bufferRO() const override { return _buffer; }
+ void setBuffer(uint8_t *buffer) override { _buffer = buffer; }
+ void setBuffer(const uint8_t *buffer) override { _buffer = buffer; }
+ size_t total_size() const override { return _info.total_size(); }
+ size_t dimension(size_t index) const override { return _info.shape().dim(index); }
+ size_t num_dimensions() const override { return _info.shape().dims().size(); }
+ size_t calcOffset(const util::feature::Coordinate4D &coords) override;
+ bool has_padding() const override { return false; }
+ model::operand::DataType data_type() const override { return _info.typeInfo().type(); }
+ const TensorInfo &tensorInfo() const override { return _info; }
+ uint64_t element_nums() const override { return _info.shape().element_nums(); };
+
+private:
+ const TensorInfo _info;
+ const uint8_t *_buffer{nullptr};
+};
+
+/**
+ * @brief Class to handle tensor in interpreter as writable
+ */
+class Tensor final : public ITensor
+{
+public:
+ Tensor() = delete;
+ Tensor(const TensorInfo &info) : _info(info)
+ {
+ // DO NOTHING
+ }
+
+public:
+ uint8_t *buffer() const override { return _buffer; }
+ const uint8_t *bufferRO() const override { return _buffer; }
+ void setBuffer(uint8_t *buffer) override { _buffer = buffer; }
+ void setBuffer(const uint8_t *) override { throw std::runtime_error{"Writeable tensor"}; }
+ size_t total_size() const override { return _info.total_size(); }
+ size_t dimension(size_t index) const override { return _info.shape().dim(index); }
+ size_t num_dimensions() const override { return _info.shape().dims().size(); }
+ size_t calcOffset(const util::feature::Coordinate4D &coords) override;
+ bool has_padding() const override { return false; }
+ model::operand::DataType data_type() const override { return _info.typeInfo().type(); }
+ const interp::TensorInfo &tensorInfo() const override { return _info; }
+ uint64_t element_nums() const override { return _info.shape().element_nums(); };
+
+private:
+ const TensorInfo _info;
+ uint8_t *_buffer{nullptr};
+};
+
+} // namespace interp
+} // namespace exec
+} // namespace neurun
+
+#endif // __NEURUN_EXEC_INTERP_TENSOR_H__