}
auto input_tensor = std::make_shared<ROTensor>(info);
- input_tensor->setBuffer(reinterpret_cast<const uint8_t *>(buffer));
+ input_tensor->setData(std::make_shared<const model::ExternalData>(
+ reinterpret_cast<const uint8_t *>(buffer), length));
_tensor_map.insert({input_index, input_tensor});
}
}
auto input_tensor = std::make_shared<ROTensor>(info);
- input_tensor->setBuffer(reinterpret_cast<const uint8_t *>(buffer));
+ input_tensor->setData(std::make_shared<const model::ExternalData>(
+ reinterpret_cast<const uint8_t *>(buffer), length));
_tensor_map.insert({input_index, input_tensor});
}
}
auto output_tensor = std::make_shared<Tensor>(info);
- output_tensor->setBuffer(reinterpret_cast<uint8_t *>(buffer));
+ output_tensor->setBuffer(
+ std::make_shared<ExternalBuffer>(reinterpret_cast<uint8_t *>(buffer), length));
_tensor_map.insert({output_index, output_tensor});
}
}
auto output_tensor = std::make_shared<Tensor>(info);
- output_tensor->setBuffer(reinterpret_cast<uint8_t *>(buffer));
+ output_tensor->setBuffer(
+ std::make_shared<ExternalBuffer>(reinterpret_cast<uint8_t *>(buffer), length));
_tensor_map.insert({output_index, output_tensor});
}
auto const_tensor = std::make_shared<ROTensor>(obj.info());
// Assume that interpreter's tensor layout is same with model (NHWC)
- const_tensor->setBuffer(obj.data().base());
+ const_tensor->setData(
+ std::make_shared<model::ExternalData>(obj.data().base(), obj.info().total_size()));
interp_env->assignTensor(ind, const_tensor);
}
});
#ifndef __NEURUN_EXEC_INTERP_TENSOR_H__
#define __NEURUN_EXEC_INTERP_TENSOR_H__
+#include "Buffer.h"
+
#include "model/OperandInfo.h"
#include "backend/operand/ITensor.h"
#include "graph/operand/Layout.h"
public:
virtual uint8_t *buffer() const = 0;
/**
+ * @brief Return shared pointer for buffer
+ * @return Buffer shared pointer
+ */
+ virtual std::shared_ptr<const Buffer> shareBuffer() const = 0;
+ /**
* @brief Return read-only buffer pointer
* @return Read-only buffer pointer
*/
virtual const uint8_t *bufferRO() const = 0;
/**
- * @brief Set the buffer
+ * @brief Return shared pointer for data
+ * @return Data shared pointer
+ */
+ virtual std::shared_ptr<const model::Data> shareData() const = 0;
+ /**
+ * @brief Set internal/external buffer
* @param[in] buffer Buffer pointer
*/
- virtual void setBuffer(uint8_t *buffer) = 0;
+ virtual void setBuffer(std::shared_ptr<const Buffer> buffer) = 0;
/**
- * @brief Set the read-only buffer
- * @param[in] buffer Buffer pointer to set read-only
+ * @brief Set data reference (including constant, input)
+ * @param[in] data Data pointer
*/
- virtual void setBuffer(const uint8_t *buffer) = 0;
+ virtual void setData(std::shared_ptr<const model::Data> data) = 0;
+ virtual void releaseData() = 0;
+
virtual size_t total_size() const = 0;
virtual size_t dimension(size_t index) const = 0;
virtual size_t num_dimensions() const = 0;
public:
uint8_t *buffer() const override { throw std::runtime_error{"Read only tensor"}; }
- const uint8_t *bufferRO() const override { return _buffer; }
- void setBuffer(uint8_t *buffer) override { _buffer = buffer; }
- void setBuffer(const uint8_t *buffer) override { _buffer = buffer; }
+ std::shared_ptr<const Buffer> shareBuffer() const override
+ {
+ throw std::runtime_error{"Read only tensor"};
+ }
+ const uint8_t *bufferRO() const override { return _data->base(); }
+ std::shared_ptr<const model::Data> shareData() const override { return _data; }
+ void setBuffer(std::shared_ptr<const Buffer> buffer) override { _data = buffer; }
+ void setData(std::shared_ptr<const model::Data> data) override { _data = data; }
+ void releaseData() override { _data = nullptr; }
+
size_t total_size() const override { return _info.total_size(); }
size_t dimension(size_t index) const override { return _info.shape().dim(index); }
size_t num_dimensions() const override { return _info.shape().dims().size(); }
private:
const model::OperandInfo _info;
- const uint8_t *_buffer{nullptr};
+ std::shared_ptr<const model::Data> _data{nullptr};
};
/**
}
public:
- uint8_t *buffer() const override { return _buffer; }
- const uint8_t *bufferRO() const override { return _buffer; }
- void setBuffer(uint8_t *buffer) override { _buffer = buffer; }
- void setBuffer(const uint8_t *) override { throw std::runtime_error{"Writeable tensor"}; }
+ uint8_t *buffer() const override { return _buffer->baseWritable(); }
+ std::shared_ptr<const Buffer> shareBuffer() const override { return _buffer; };
+ const uint8_t *bufferRO() const override { return _buffer->base(); }
+ std::shared_ptr<const model::Data> shareData() const override { return _buffer; }
+ void setBuffer(std::shared_ptr<const Buffer> buffer) override { _buffer = buffer; }
+ void setData(std::shared_ptr<const model::Data>) override
+ {
+ throw std::runtime_error{"Passed data may read-only"};
+ }
+ void releaseData() override { _buffer = nullptr; }
+
size_t total_size() const override { return _info.total_size(); }
size_t dimension(size_t index) const override { return _info.shape().dim(index); }
size_t num_dimensions() const override { return _info.shape().dims().size(); }
private:
const model::OperandInfo _info;
- uint8_t *_buffer{nullptr};
+ std::shared_ptr<const Buffer> _buffer{nullptr};
};
} // namespace interp