[Interp] Use Buffer class in interpreter tensor (#5436)
author오형석/On-Device Lab(SR)/Staff Engineer/삼성전자 <hseok82.oh@samsung.com>
Thu, 20 Jun 2019 02:10:24 +0000 (11:10 +0900)
committerGitHub Enterprise <noreply-CODE@samsung.com>
Thu, 20 Jun 2019 02:10:24 +0000 (11:10 +0900)
* [Interp] Use Buffer class in interpreter tensor

Instead of use pointer directly, use Buffer class in interpreter tensor
Rename tensor data pointer field setter

Signed-off-by: Hyeongseok Oh <hseok82.oh@samsung.com>
* Fix error message

* Update Tensor.h

- Introduce getter to share Data shared pointer
- Change setter to set Data instead of ExternalData
- Add const keyword for Data object

runtimes/neurun/core/src/exec/interp/ExecEnv.h
runtimes/neurun/core/src/exec/interp/ExecManager.cc
runtimes/neurun/core/src/exec/interp/Tensor.h

index a090ca9..ab44b95 100644 (file)
@@ -104,7 +104,7 @@ public:
     }
 
     auto tensor = std::make_shared<Tensor>(info);
-    tensor->setBuffer(new uint8_t[tensor->total_size()]);
+    tensor->setBuffer(std::make_shared<InternalBuffer>(tensor->total_size()));
     assignTensor(index, tensor);
     _buffers.insert(index);
   }
@@ -118,7 +118,7 @@ public:
   {
     if (_buffers.find(index) != _buffers.end())
     {
-      delete[] _tensors.at(index)->buffer();
+      _tensors.at(index)->releaseData();
     }
   }
 
index f1740d0..f2c940f 100644 (file)
@@ -41,7 +41,8 @@ void ExecManager::setInput(const neurun::model::IOIndex &index, const neurun::mo
   }
 
   auto input_tensor = std::make_shared<ROTensor>(info);
-  input_tensor->setBuffer(reinterpret_cast<const uint8_t *>(buffer));
+  input_tensor->setData(std::make_shared<const model::ExternalData>(
+      reinterpret_cast<const uint8_t *>(buffer), length));
   _tensor_map.insert({input_index, input_tensor});
 }
 
@@ -56,7 +57,8 @@ void ExecManager::setInput(const neurun::model::IOIndex &index, const void *buff
   }
 
   auto input_tensor = std::make_shared<ROTensor>(info);
-  input_tensor->setBuffer(reinterpret_cast<const uint8_t *>(buffer));
+  input_tensor->setData(std::make_shared<const model::ExternalData>(
+      reinterpret_cast<const uint8_t *>(buffer), length));
   _tensor_map.insert({input_index, input_tensor});
 }
 
@@ -73,7 +75,8 @@ void ExecManager::setOutput(const neurun::model::IOIndex &index,
   }
 
   auto output_tensor = std::make_shared<Tensor>(info);
-  output_tensor->setBuffer(reinterpret_cast<uint8_t *>(buffer));
+  output_tensor->setBuffer(
+      std::make_shared<ExternalBuffer>(reinterpret_cast<uint8_t *>(buffer), length));
   _tensor_map.insert({output_index, output_tensor});
 }
 
@@ -88,7 +91,8 @@ void ExecManager::setOutput(const neurun::model::IOIndex &index, void *buffer, s
   }
 
   auto output_tensor = std::make_shared<Tensor>(info);
-  output_tensor->setBuffer(reinterpret_cast<uint8_t *>(buffer));
+  output_tensor->setBuffer(
+      std::make_shared<ExternalBuffer>(reinterpret_cast<uint8_t *>(buffer), length));
   _tensor_map.insert({output_index, output_tensor});
 }
 
@@ -138,7 +142,8 @@ void ExecManager::execute(void)
 
       auto const_tensor = std::make_shared<ROTensor>(obj.info());
       // Assume that interpreter's tensor layout is same with model (NHWC)
-      const_tensor->setBuffer(obj.data().base());
+      const_tensor->setData(
+          std::make_shared<model::ExternalData>(obj.data().base(), obj.info().total_size()));
       interp_env->assignTensor(ind, const_tensor);
     }
   });
index 1a4723d..48d7a13 100644 (file)
@@ -21,6 +21,8 @@
 #ifndef __NEURUN_EXEC_INTERP_TENSOR_H__
 #define __NEURUN_EXEC_INTERP_TENSOR_H__
 
+#include "Buffer.h"
+
 #include "model/OperandInfo.h"
 #include "backend/operand/ITensor.h"
 #include "graph/operand/Layout.h"
@@ -43,20 +45,32 @@ public:
 public:
   virtual uint8_t *buffer() const = 0;
   /**
+   * @brief   Return shared pointer for buffer
+   * @return  Buffer shared pointer
+   */
+  virtual std::shared_ptr<const Buffer> shareBuffer() const = 0;
+  /**
    * @brief   Return read-only buffer pointer
    * @return  Read-only buffer pointer
    */
   virtual const uint8_t *bufferRO() const = 0;
   /**
-   * @brief     Set the buffer
+   * @brief   Return shared pointer for data
+   * @return  Data shared pointer
+   */
+  virtual std::shared_ptr<const model::Data> shareData() const = 0;
+  /**
+   * @brief     Set internal/external buffer
    * @param[in] buffer  Buffer pointer
    */
-  virtual void setBuffer(uint8_t *buffer) = 0;
+  virtual void setBuffer(std::shared_ptr<const Buffer> buffer) = 0;
   /**
-   * @brief     Set the read-only buffer
-   * @param[in] buffer  Buffer pointer to set read-only
+   * @brief     Set data reference (including constant, input)
+   * @param[in] data  Data pointer
    */
-  virtual void setBuffer(const uint8_t *buffer) = 0;
+  virtual void setData(std::shared_ptr<const model::Data> data) = 0;
+  virtual void releaseData() = 0;
+
   virtual size_t total_size() const = 0;
   virtual size_t dimension(size_t index) const = 0;
   virtual size_t num_dimensions() const = 0;
@@ -95,9 +109,16 @@ public:
 
 public:
   uint8_t *buffer() const override { throw std::runtime_error{"Read only tensor"}; }
-  const uint8_t *bufferRO() const override { return _buffer; }
-  void setBuffer(uint8_t *buffer) override { _buffer = buffer; }
-  void setBuffer(const uint8_t *buffer) override { _buffer = buffer; }
+  std::shared_ptr<const Buffer> shareBuffer() const override
+  {
+    throw std::runtime_error{"Read only tensor"};
+  }
+  const uint8_t *bufferRO() const override { return _data->base(); }
+  std::shared_ptr<const model::Data> shareData() const override { return _data; }
+  void setBuffer(std::shared_ptr<const Buffer> buffer) override { _data = buffer; }
+  void setData(std::shared_ptr<const model::Data> data) override { _data = data; }
+  void releaseData() override { _data = nullptr; }
+
   size_t total_size() const override { return _info.total_size(); }
   size_t dimension(size_t index) const override { return _info.shape().dim(index); }
   size_t num_dimensions() const override { return _info.shape().dims().size(); }
@@ -110,7 +131,7 @@ public:
 
 private:
   const model::OperandInfo _info;
-  const uint8_t *_buffer{nullptr};
+  std::shared_ptr<const model::Data> _data{nullptr};
 };
 
 /**
@@ -126,10 +147,17 @@ public:
   }
 
 public:
-  uint8_t *buffer() const override { return _buffer; }
-  const uint8_t *bufferRO() const override { return _buffer; }
-  void setBuffer(uint8_t *buffer) override { _buffer = buffer; }
-  void setBuffer(const uint8_t *) override { throw std::runtime_error{"Writeable tensor"}; }
+  uint8_t *buffer() const override { return _buffer->baseWritable(); }
+  std::shared_ptr<const Buffer> shareBuffer() const override { return _buffer; };
+  const uint8_t *bufferRO() const override { return _buffer->base(); }
+  std::shared_ptr<const model::Data> shareData() const override { return _buffer; }
+  void setBuffer(std::shared_ptr<const Buffer> buffer) override { _buffer = buffer; }
+  void setData(std::shared_ptr<const model::Data>) override
+  {
+    throw std::runtime_error{"Passed data may read-only"};
+  }
+  void releaseData() override { _buffer = nullptr; }
+
   size_t total_size() const override { return _info.total_size(); }
   size_t dimension(size_t index) const override { return _info.shape().dim(index); }
   size_t num_dimensions() const override { return _info.shape().dims().size(); }
@@ -142,7 +170,7 @@ public:
 
 private:
   const model::OperandInfo _info;
-  uint8_t *_buffer{nullptr};
+  std::shared_ptr<const Buffer> _buffer{nullptr};
 };
 
 } // namespace interp