[MemoryManager] Apply MemoryManager to TensorBuilder on CPU (#5406)
author김용섭/On-Device Lab(SR)/Engineer/삼성전자 <yons.kim@samsung.com>
Tue, 18 Jun 2019 04:41:44 +0000 (13:41 +0900)
committer이춘석/On-Device Lab(SR)/Staff Engineer/삼성전자 <chunseok.lee@samsung.com>
Tue, 18 Jun 2019 04:41:44 +0000 (13:41 +0900)
MemoryManager on CPU has MemoryPlanner, Allocator and Tensors instead of
TensorBuilder on CPU.

Signed-off-by: Yongseop Kim <yons.kim@samsung.com>
runtimes/neurun/backend/cpu/TensorBuilder.cc
runtimes/neurun/backend/cpu/TensorBuilder.h

index 237041c..34c6e8e 100644 (file)
@@ -18,9 +18,7 @@
 
 #include <cassert>
 
-#include <backend/operand/Object.h>
 #include "util/logging.h"
-#include "MemoryPlannerFactory.h"
 
 namespace neurun
 {
@@ -29,17 +27,11 @@ namespace backend
 namespace cpu
 {
 
-TensorBuilder::TensorBuilder() : _mem_planner{createMemoryPlanner()}
+TensorBuilder::TensorBuilder() : _mem_mgr{new MemoryManager()}
 {
   // DO NOTHING
 }
 
-IMemoryPlanner *TensorBuilder::createMemoryPlanner()
-{
-  auto planner_id = config::ConfigManager::instance().get<std::string>(config::CPU_MEMORY_PLANNER);
-  return MemoryPlannerFactory::instance().create(planner_id);
-}
-
 void TensorBuilder::registerTensorInfo(const model::OperandIndex &ind,
                                        const model::OperandInfo &info,
                                        const graph::operand::Layout &)
@@ -59,38 +51,15 @@ void TensorBuilder::notifyFirstUse(const model::OperandIndex &ind)
 {
   assert(_tensor_info_map.find(ind) != _tensor_info_map.end());
   const auto &info = _tensor_info_map.at(ind);
+  _mem_mgr->buildTensor(ind, info);
 
   const auto size = info.total_size();
-  _mem_planner->claim(ind, size);
+  _mem_mgr->claimPlan(ind, size);
 }
 
-void TensorBuilder::notifyLastUse(const model::OperandIndex &ind) { _mem_planner->release(ind); }
-
-void TensorBuilder::prepare(void)
-{
-  assert(_tensors.size() == 0);
-
-  _mem_alloc = std::make_shared<Allocator>(_mem_planner->capacity());
-  assert(_mem_alloc->base());
-
-  for (auto &mem_plan : _mem_planner->memory_plans())
-  {
-    auto ind = mem_plan.first;
-    auto mem_blk = mem_plan.second;
-    const auto &info = _tensor_info_map.at(ind);
-
-    uint8_t *buffer = _mem_alloc->base() + mem_blk.offset;
-    auto tensor = std::make_shared<operand::Tensor>(info);
-    tensor->setBuffer(buffer);
-    _tensors[ind] = tensor;
-
-    VERBOSE(CPU_TENSORBUILDER) << "TENSOR(#" << ind.value() << "): " << static_cast<void *>(buffer)
-                               << std::endl;
+void TensorBuilder::notifyLastUse(const model::OperandIndex &ind) { _mem_mgr->releasePlan(ind); }
 
-    // If we do not make tensor here currently, stages would cause segmentation fault.
-    // See also : Comments in `allocate` method.
-  }
-}
+void TensorBuilder::prepare(void) { _mem_mgr->allocate(); }
 
 void TensorBuilder::allocate(void)
 {
@@ -101,24 +70,17 @@ void TensorBuilder::allocate(void)
 std::shared_ptr<::neurun::backend::operand::ITensor>
 TensorBuilder::tensorAt(const model::OperandIndex &ind)
 {
-  return _tensors.at(ind);
+  return _mem_mgr->tensors().at(ind);
 }
 
 std::shared_ptr<backend::operand::IObject> TensorBuilder::wrapTensor(const model::OperandIndex &ind)
 {
-  if (_objects.find(ind) != _objects.end())
-  {
-    return _objects.at(ind);
-  }
-  else
-  {
-    return _objects[ind] = std::make_shared<::neurun::backend::operand::Object>(_tensors.at(ind));
-  }
+  return _mem_mgr->wrapTensor(ind);
 }
 
 void TensorBuilder::iterate(const IterateFunction &fn)
 {
-  for (auto it : _tensors)
+  for (auto it : _mem_mgr->tensors())
   {
     fn(it.first);
   }
@@ -126,7 +88,7 @@ void TensorBuilder::iterate(const IterateFunction &fn)
 
 std::shared_ptr<operand::Tensor> TensorBuilder::at(const ::neurun::model::OperandIndex &ind)
 {
-  return _tensors.at(ind);
+  return _mem_mgr->tensors().at(ind);
 }
 
 } // namespace cpu
index 4c219b9..d1d97b6 100644 (file)
@@ -23,7 +23,7 @@
 #include <backend/operand/Object.h>
 #include "operand/Tensor.h"
 #include "model/OperandIndexMap.h"
-#include "MemoryPlanner.h"
+#include "MemoryManager.h"
 
 namespace neurun
 {
@@ -34,9 +34,6 @@ namespace cpu
 
 class TensorBuilder : public ITensorBuilder
 {
-private:
-  IMemoryPlanner *createMemoryPlanner();
-
 public:
   TensorBuilder();
 
@@ -71,15 +68,14 @@ public:
 
   void iterate(const IterateFunction &fn) override;
 
+  // TODO Pass MemoryManager to Executor instead of TensorBuilder
+  // std::unique_ptr<IMemoryManager> releaseMemoryManager(void) override;
+
   std::shared_ptr<operand::Tensor> at(const ::neurun::model::OperandIndex &ind);
 
 private:
+  std::unique_ptr<MemoryManager> _mem_mgr;
   model::OperandIndexMap<model::OperandInfo> _tensor_info_map;
-  model::OperandIndexMap<std::shared_ptr<operand::Tensor>> _tensors;
-  model::OperandIndexMap<std::shared_ptr<::neurun::backend::operand::Object>> _objects;
-  model::OperandIndexMap<Block> _tensor_mem_map;
-  std::shared_ptr<IMemoryPlanner> _mem_planner;
-  std::shared_ptr<Allocator> _mem_alloc;
 };
 
 } // namespace cpu