[neurun] Implement classes inherit to ITensorManager (#7083)
author김용섭/On-Device Lab(SR)/Engineer/삼성전자 <yons.kim@samsung.com>
Wed, 4 Sep 2019 11:54:29 +0000 (20:54 +0900)
committer이한종/On-Device Lab(SR)/Engineer/삼성전자 <hanjoung.lee@samsung.com>
Wed, 4 Sep 2019 11:54:29 +0000 (20:54 +0900)
* [neurun] Implement classes inherit to ITensorManager

Implement classes inherit to ITensorManager such as cpu/TensorManager
and AclTensorManager

Signed-off-by: Yongseop Kim <yons.kim@samsung.com>
* Add findTensorAsParent()

* Merge two buildXXXTensor into one with as_const param

runtimes/neurun/backend/acl_cl/MemoryManager.h
runtimes/neurun/backend/acl_common/AclTensorManager.h [new file with mode: 0644]
runtimes/neurun/backend/acl_common/TemplTensorBuilder.h
runtimes/neurun/backend/acl_neon/MemoryManager.h
runtimes/neurun/backend/cpu/TensorBuilder.h
runtimes/neurun/backend/cpu/TensorManager.cc [new file with mode: 0644]
runtimes/neurun/backend/cpu/TensorManager.h [new file with mode: 0644]

index ef15a49..ec509ac 100644 (file)
@@ -14,6 +14,7 @@
  * limitations under the License.
  */
 
+// TODO Rename this to TensorManager.h
 #ifndef __NEURUN_BACKEND_ACL_CL_MEMORY_MANAGER_H__
 #define __NEURUN_BACKEND_ACL_CL_MEMORY_MANAGER_H__
 
@@ -25,6 +26,8 @@
 
 #include <AclMemoryManager.h>
 #include <AclLinearMemoryManager.h>
+#include <AclInternalBufferManager.h>
+#include <AclTensorManager.h>
 
 #include "operand/CLTensor.h"
 #include "operand/CLSubTensor.h"
@@ -49,6 +52,7 @@ using LinearMemoryManager = ::neurun::backend::acl_common::AclLinearMemoryManage
     ::arm_compute::BlobLifetimeManager, ::arm_compute::CLBufferAllocator,
     ::arm_compute::CLMemoryGroup>;
 
+// TODO Remove this
 MemoryManager *createMemoryManager()
 {
   const std::string executor_str = util::getConfigString(util::config::EXECUTOR);
@@ -65,6 +69,35 @@ MemoryManager *createMemoryManager()
   }
 }
 
+// TODO Enable this
+/*
+using InternalBufferManager = ::neurun::backend::acl_common::AclInternalBufferManager<
+    ::arm_compute::MemoryManagerOnDemand, ::arm_compute::PoolManager,
+    ::arm_compute::BlobLifetimeManager, ::arm_compute::CLBufferAllocator>;
+
+using TensorManager =
+    ::neurun::backend::acl_common::AclTensorManager<::neurun::backend::acl_cl::operand::ICLTensor,
+                                                    operand::CLTensor, operand::CLSubTensor,
+                                                    operand::Object>;
+
+TensorManager *createTensorManager()
+{
+  const std::string executor_str = util::getConfigString(util::config::EXECUTOR);
+
+  if (executor_str == "Linear")
+  {
+    VERBOSE(acl_cl_createTensorManager) << "AclTensorManager as Linear" << std::endl;
+    return new TensorManager(new MemoryManager(), new LinearMemoryManager(),
+                             new InternalBufferManager());
+  }
+  else
+  {
+    VERBOSE(acl_cl_createTensorManager) << "AclTensorManager" << std::endl;
+    return new TensorManager(new MemoryManager(), new MemoryManager(), new InternalBufferManager());
+  }
+}
+*/
+
 } // namespace acl_cl
 } // namespace backend
 } // namespace neurun
diff --git a/runtimes/neurun/backend/acl_common/AclTensorManager.h b/runtimes/neurun/backend/acl_common/AclTensorManager.h
new file mode 100644 (file)
index 0000000..c4935ad
--- /dev/null
@@ -0,0 +1,312 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_BACKEND_ACL_COMMON_TENSOR_MANAGER_H__
+#define __NEURUN_BACKEND_ACL_COMMON_TENSOR_MANAGER_H__
+
+#include <arm_compute/runtime/IMemoryManager.h>
+
+#include "backend/ITensorManager.h"
+#include "AclMemoryManager.h"
+#include "AclInternalBufferManager.h"
+#include "model/OperandIndexMap.h"
+
+namespace neurun
+{
+namespace backend
+{
+namespace acl_common
+{
+
+template <typename T_ITensor, typename T_Tensor, typename T_SubTensor, typename T_Object>
+class AclTensorManager : public backend::ITensorManager
+{
+public:
+  using T_AclMemoryManager = AclMemoryManager<T_Tensor, T_SubTensor, T_Object>;
+
+  AclTensorManager(T_AclMemoryManager *const_mgr, T_AclMemoryManager *nonconst_mgr,
+                   IInternalBufferManager *inter_mgr);
+
+  virtual ~AclTensorManager() = default;
+
+  void allocateConsts(void) override;
+  void allocateNonconsts(void) override;
+  void deallocateConsts(void) override;
+  void deallocateNonconsts(void) override;
+
+  void allocateInternalBufferManager(void);
+  void deallocateInternalBufferManager(void);
+
+  void buildTensor(const model::OperandIndex &ind, const ::arm_compute::TensorInfo &info,
+                   bool as_const);
+  void buildSubtensor(const model::OperandIndex &parent, const model::OperandIndex &child,
+                      const ::arm_compute::TensorShape &shape,
+                      const ::arm_compute::Coordinates &coordinates);
+
+  std::shared_ptr<T_ITensor> findTensorAsParent(const model::OperandIndex &ind);
+
+  void startLifetime(const model::OperandIndex &ind);
+  void finishLifetime(const model::OperandIndex &ind);
+
+  std::shared_ptr<backend::operand::IObject> wrapTensor(const model::OperandIndex &ind);
+  std::shared_ptr<T_ITensor> at(const ::neurun::model::OperandIndex &ind);
+
+  model::OperandIndexMap<std::shared_ptr<T_Tensor>> &constTensors(void);
+  model::OperandIndexMap<std::shared_ptr<T_Tensor>> &nonconstTensors(void);
+  model::OperandIndexMap<std::shared_ptr<T_SubTensor>> &nonconstSubtensors(void);
+
+  std::shared_ptr<::arm_compute::IMemoryManager> internal_buffer_manager(void);
+
+  void iterate(const std::function<void(const model::OperandIndex &)> &fn);
+
+  void tryDeallocConstants(void);
+
+private:
+  std::unique_ptr<T_AclMemoryManager> _const_mgr;
+  std::unique_ptr<T_AclMemoryManager> _nonconst_mgr;
+  std::unique_ptr<IInternalBufferManager> _inter_mgr;
+  model::OperandIndexMap<T_AclMemoryManager &> _ind_to_mgr;
+};
+
+} // namespace acl_common
+} // namespace backend
+} // namespace neurun
+
+#include <cassert>
+#include "util/logging.h"
+
+namespace neurun
+{
+namespace backend
+{
+namespace acl_common
+{
+
+template <typename T_ITensor, typename T_Tensor, typename T_SubTensor, typename T_Object>
+AclTensorManager<T_ITensor, T_Tensor, T_SubTensor, T_Object>::AclTensorManager(
+    T_AclMemoryManager *const_mgr, T_AclMemoryManager *nonconst_mgr,
+    IInternalBufferManager *inter_mgr)
+    : _const_mgr{const_mgr}, _nonconst_mgr{nonconst_mgr}, _inter_mgr{inter_mgr}
+{
+  // DO NOTHING
+}
+
+template <typename T_ITensor, typename T_Tensor, typename T_SubTensor, typename T_Object>
+void AclTensorManager<T_ITensor, T_Tensor, T_SubTensor, T_Object>::allocateConsts(void)
+{
+  _const_mgr->allocate();
+}
+
+template <typename T_ITensor, typename T_Tensor, typename T_SubTensor, typename T_Object>
+void AclTensorManager<T_ITensor, T_Tensor, T_SubTensor, T_Object>::allocateNonconsts(void)
+{
+  _nonconst_mgr->allocate();
+}
+
+template <typename T_ITensor, typename T_Tensor, typename T_SubTensor, typename T_Object>
+void AclTensorManager<T_ITensor, T_Tensor, T_SubTensor, T_Object>::deallocateConsts(void)
+{
+  _const_mgr->deallocate();
+}
+
+template <typename T_ITensor, typename T_Tensor, typename T_SubTensor, typename T_Object>
+void AclTensorManager<T_ITensor, T_Tensor, T_SubTensor, T_Object>::deallocateNonconsts(void)
+{
+  _nonconst_mgr->deallocate();
+}
+
+template <typename T_ITensor, typename T_Tensor, typename T_SubTensor, typename T_Object>
+void AclTensorManager<T_ITensor, T_Tensor, T_SubTensor, T_Object>::allocateInternalBufferManager(
+    void)
+{
+  _inter_mgr->allocate();
+}
+
+template <typename T_ITensor, typename T_Tensor, typename T_SubTensor, typename T_Object>
+void AclTensorManager<T_ITensor, T_Tensor, T_SubTensor, T_Object>::deallocateInternalBufferManager(
+    void)
+{
+  _inter_mgr->deallocate();
+}
+
+template <typename T_ITensor, typename T_Tensor, typename T_SubTensor, typename T_Object>
+void AclTensorManager<T_ITensor, T_Tensor, T_SubTensor, T_Object>::buildTensor(
+    const model::OperandIndex &ind, const ::arm_compute::TensorInfo &info, bool as_const)
+{
+  assert(_ind_to_mgr.find(ind) == _ind_to_mgr.end());
+  if (as_const)
+  {
+    _const_mgr->buildTensor(ind, info);
+    _ind_to_mgr.insert({ind, *_const_mgr});
+  }
+  else
+  {
+    _nonconst_mgr->buildTensor(ind, info);
+    _ind_to_mgr.insert({ind, *_nonconst_mgr});
+  }
+}
+
+template <typename T_ITensor, typename T_Tensor, typename T_SubTensor, typename T_Object>
+void AclTensorManager<T_ITensor, T_Tensor, T_SubTensor, T_Object>::buildSubtensor(
+    const model::OperandIndex &parent, const model::OperandIndex &child,
+    const ::arm_compute::TensorShape &shape, const ::arm_compute::Coordinates &coordinates)
+{
+  assert(_ind_to_mgr.find(child) == _ind_to_mgr.end());
+  std::shared_ptr<T_ITensor> parent_tensor = findTensorAsParent(parent);
+  assert(parent_tensor);
+  _nonconst_mgr->buildSubtensor(parent_tensor, child, shape, coordinates);
+  _ind_to_mgr.insert({child, *_nonconst_mgr});
+}
+
+template <typename T_ITensor, typename T_Tensor, typename T_SubTensor, typename T_Object>
+std::shared_ptr<T_ITensor>
+AclTensorManager<T_ITensor, T_Tensor, T_SubTensor, T_Object>::findTensorAsParent(
+    const model::OperandIndex &ind)
+{
+
+  auto &tensors = _nonconst_mgr->tensors();
+  auto &subtensors = _nonconst_mgr->subtensors();
+  if (tensors.find(ind) != tensors.end())
+  {
+    // Parent is allocated as tensor
+    return tensors[ind];
+  }
+  else if (subtensors.find(ind) != subtensors.end())
+  {
+    // Parent is allocated as subtensor
+    return subtensors[ind];
+  }
+  else
+  {
+    return nullptr;
+  }
+}
+
+template <typename T_ITensor, typename T_Tensor, typename T_SubTensor, typename T_Object>
+void AclTensorManager<T_ITensor, T_Tensor, T_SubTensor, T_Object>::startLifetime(
+    const model::OperandIndex &ind)
+{
+  assert(_ind_to_mgr.find(ind) != _ind_to_mgr.end());
+  _ind_to_mgr.at(ind).startLifetime(ind);
+}
+
+template <typename T_ITensor, typename T_Tensor, typename T_SubTensor, typename T_Object>
+void AclTensorManager<T_ITensor, T_Tensor, T_SubTensor, T_Object>::finishLifetime(
+    const model::OperandIndex &ind)
+{
+  assert(_ind_to_mgr.find(ind) != _ind_to_mgr.end());
+  _ind_to_mgr.at(ind).finishLifetime(ind);
+}
+
+template <typename T_ITensor, typename T_Tensor, typename T_SubTensor, typename T_Object>
+std::shared_ptr<backend::operand::IObject>
+AclTensorManager<T_ITensor, T_Tensor, T_SubTensor, T_Object>::wrapTensor(
+    const model::OperandIndex &ind)
+{
+  assert(_ind_to_mgr.find(ind) != _ind_to_mgr.end());
+  return _ind_to_mgr.at(ind).wrapTensor(ind);
+}
+
+template <typename T_ITensor, typename T_Tensor, typename T_SubTensor, typename T_Object>
+std::shared_ptr<T_ITensor> AclTensorManager<T_ITensor, T_Tensor, T_SubTensor, T_Object>::at(
+    const ::neurun::model::OperandIndex &ind)
+{
+  assert(_ind_to_mgr.find(ind) != _ind_to_mgr.end());
+
+  auto &tensors = _ind_to_mgr.at(ind).tensors();
+  if (tensors.find(ind) != tensors.end())
+  {
+    return tensors.at(ind);
+  }
+  else
+  {
+    return _ind_to_mgr.at(ind).subtensors().at(ind);
+  }
+}
+
+template <typename T_ITensor, typename T_Tensor, typename T_SubTensor, typename T_Object>
+model::OperandIndexMap<std::shared_ptr<T_Tensor>> &
+AclTensorManager<T_ITensor, T_Tensor, T_SubTensor, T_Object>::constTensors(void)
+{
+  return _const_mgr->tensors();
+}
+
+template <typename T_ITensor, typename T_Tensor, typename T_SubTensor, typename T_Object>
+model::OperandIndexMap<std::shared_ptr<T_Tensor>> &
+AclTensorManager<T_ITensor, T_Tensor, T_SubTensor, T_Object>::nonconstTensors(void)
+{
+  return _nonconst_mgr->tensors();
+}
+
+template <typename T_ITensor, typename T_Tensor, typename T_SubTensor, typename T_Object>
+model::OperandIndexMap<std::shared_ptr<T_SubTensor>> &
+AclTensorManager<T_ITensor, T_Tensor, T_SubTensor, T_Object>::nonconstSubtensors(void)
+{
+  return _nonconst_mgr->subtensors();
+}
+
+template <typename T_ITensor, typename T_Tensor, typename T_SubTensor, typename T_Object>
+std::shared_ptr<::arm_compute::IMemoryManager>
+AclTensorManager<T_ITensor, T_Tensor, T_SubTensor, T_Object>::internal_buffer_manager(void)
+{
+  return _inter_mgr->internal_buffer_manager();
+}
+
+template <typename T_ITensor, typename T_Tensor, typename T_SubTensor, typename T_Object>
+void AclTensorManager<T_ITensor, T_Tensor, T_SubTensor, T_Object>::iterate(
+    const std::function<void(const model::OperandIndex &)> &fn)
+{
+  for (auto it : _nonconst_mgr->tensors())
+    fn(it.first);
+
+  for (auto it : _nonconst_mgr->subtensors())
+    fn(it.first);
+
+  for (auto it : _const_mgr->tensors())
+    fn(it.first);
+}
+
+template <typename T_ITensor, typename T_Tensor, typename T_SubTensor, typename T_Object>
+void AclTensorManager<T_ITensor, T_Tensor, T_SubTensor, T_Object>::tryDeallocConstants(void)
+{
+  auto &tensors = _const_mgr->tensors();
+  auto &objects = _const_mgr->objects();
+
+  for (auto it = tensors.begin(); it != tensors.end();)
+  {
+    const auto &ind = it->first;
+    auto tensor = it->second;
+    if (tensor->handle() && !tensor->handle()->is_used())
+    {
+      VERBOSE(AclTensorManager) << "Tensor #" << ind.value()
+                                << " will be deallocated as an unused constant tensor" << std::endl;
+      tensor->allocator()->free();
+      tensor.reset();
+      it = tensors.erase(it);
+      objects.erase(ind);
+    }
+    else
+    {
+      ++it;
+    }
+  }
+}
+
+} // namespace acl_common
+} // namespace backend
+} // namespace neurun
+
+#endif // __NEURUN_BACKEND_ACL_COMMON_TENSOR_MANAGER_H__
index 19d6c3d..c8913ef 100644 (file)
@@ -23,7 +23,8 @@
 #include <arm_compute/core/Types.h>
 #include <backend/ITensorBuilder.h>
 #include "model/OperandIndexMap.h"
-#include "AclMemoryManager.h"
+#include "AclMemoryManager.h" // TODO Remove this
+#include "AclTensorManager.h"
 #include "cpp14/memory.h"
 #include <util/Utils.h>
 
@@ -111,7 +112,8 @@ private:
   model::OperandIndexMap<compiler::SubTensorInfo> _subtensor_info_map;
   model::OperandIndexMap<bool> _apply_dim_correction_map;
   model::OperandIndexMap<std::pair<model::Layout, model::Layout>> _tensor_layouts_map;
-  ;
+
+  // TODO Replace this by TensorManager
   std::unique_ptr<T_AclMemoryManager> _mem_mgr;
 
   // TODO Consider dividing TensorBuilder into Linear and others
index ef7b60a..ac7c91a 100644 (file)
@@ -14,6 +14,7 @@
  * limitations under the License.
  */
 
+// TODO Rename this to TensorManager.h
 #ifndef __NEURUN_BACKEND_ACL_NEON_MEMORY_MANAGER_H__
 #define __NEURUN_BACKEND_ACL_NEON_MEMORY_MANAGER_H__
 
@@ -25,6 +26,8 @@
 
 #include <AclMemoryManager.h>
 #include <AclLinearMemoryManager.h>
+#include <AclInternalBufferManager.h>
+#include <AclTensorManager.h>
 
 #include "operand/NETensor.h"
 #include "operand/NESubTensor.h"
@@ -49,6 +52,7 @@ using LinearMemoryManager = ::neurun::backend::acl_common::AclLinearMemoryManage
     ::arm_compute::MemoryManagerOnDemand, ::arm_compute::PoolManager,
     ::arm_compute::OffsetLifetimeManager, ::arm_compute::Allocator, ::arm_compute::MemoryGroup>;
 
+// TODO Remove this
 MemoryManager *createMemoryManager()
 {
   const std::string executor_str = util::getConfigString(util::config::EXECUTOR);
@@ -64,6 +68,34 @@ MemoryManager *createMemoryManager()
   }
 }
 
+// TODO Enable this instead of createMemoryManager()
+/*
+using InternalBufferManager = ::neurun::backend::acl_common::AclInternalBufferManager<
+    ::arm_compute::MemoryManagerOnDemand, ::arm_compute::PoolManager,
+    ::arm_compute::OffsetLifetimeManager, ::arm_compute::Allocator>;
+
+using TensorManager =
+    ::neurun::backend::acl_common::AclTensorManager<::neurun::backend::acl_neon::operand::INETensor,
+                                                    operand::NETensor, operand::NESubTensor,
+                                                    ::neurun::backend::operand::Object>;
+
+TensorManager *createTensorManager()
+{
+  const std::string executor_str = util::getConfigString(util::config::EXECUTOR);
+  if (executor_str == "Linear")
+  {
+    VERBOSE(acl_neon_createTensorManager) << "AclTensorManager as Linear" << std::endl;
+    return new TensorManager(new MemoryManager(), new LinearMemoryManager(),
+                             new InternalBufferManager());
+  }
+  else
+  {
+    VERBOSE(acl_neon_createTensorManager) << "AclTensorManager" << std::endl;
+    return new TensorManager(new MemoryManager(), new MemoryManager(), new InternalBufferManager());
+  }
+}
+*/
+
 } // namespace acl_neon
 } // namespace backend
 } // namespace neurun
index 275793f..5bcbb6c 100644 (file)
@@ -23,7 +23,8 @@
 #include <backend/operand/Object.h>
 #include "operand/Tensor.h"
 #include "model/OperandIndexMap.h"
-#include "MemoryManager.h"
+#include "MemoryManager.h" // TODO Remove this
+#include "TensorManager.h"
 
 namespace neurun
 {
@@ -83,6 +84,7 @@ public:
   std::shared_ptr<operand::Tensor> at(const ::neurun::model::OperandIndex &ind);
 
 private:
+  // TODO Replace this by TensorManager
   std::unique_ptr<MemoryManager> _mem_mgr;
   model::OperandIndexMap<model::OperandInfo> _tensor_info_map;
   model::OperandIndexMap<std::pair<model::Layout, model::Layout>> _tensor_layouts_map;
diff --git a/runtimes/neurun/backend/cpu/TensorManager.cc b/runtimes/neurun/backend/cpu/TensorManager.cc
new file mode 100644 (file)
index 0000000..22d874b
--- /dev/null
@@ -0,0 +1,100 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "TensorManager.h"
+
+namespace neurun
+{
+namespace backend
+{
+namespace cpu
+{
+
+TensorManager::TensorManager() : _const_mgr{new MemoryManager()}, _nonconst_mgr{new MemoryManager()}
+{
+  // DO NOTHING
+}
+
+void TensorManager::allocateConsts(void) { _const_mgr->allocate(); }
+
+void TensorManager::allocateNonconsts(void) { _nonconst_mgr->allocate(); }
+
+void TensorManager::deallocateConsts(void) { _const_mgr->deallocate(); }
+
+void TensorManager::deallocateNonconsts(void) { _nonconst_mgr->deallocate(); }
+
+void TensorManager::buildTensor(const model::OperandIndex &ind,
+                                const model::OperandInfo &tensor_info, bool as_const)
+{
+  assert(_ind_to_mgr.find(ind) == _ind_to_mgr.end());
+  if (as_const)
+  {
+    _const_mgr->buildTensor(ind, tensor_info);
+    _ind_to_mgr.insert({ind, *_const_mgr});
+  }
+  else
+  {
+    _nonconst_mgr->buildTensor(ind, tensor_info);
+    _ind_to_mgr.insert({ind, *_nonconst_mgr});
+  }
+}
+
+void TensorManager::claimPlan(const model::OperandIndex &ind, uint32_t size)
+{
+  assert(_ind_to_mgr.find(ind) != _ind_to_mgr.end());
+  _ind_to_mgr.at(ind).claimPlan(ind, size);
+}
+
+void TensorManager::releasePlan(const model::OperandIndex &ind)
+{
+  assert(_ind_to_mgr.find(ind) != _ind_to_mgr.end());
+  _ind_to_mgr.at(ind).releasePlan(ind);
+}
+
+std::shared_ptr<backend::operand::IObject> TensorManager::wrapTensor(const model::OperandIndex &ind)
+{
+  assert(_ind_to_mgr.find(ind) != _ind_to_mgr.end());
+  return _ind_to_mgr.at(ind).wrapTensor(ind);
+}
+
+std::shared_ptr<operand::Tensor> TensorManager::at(const ::neurun::model::OperandIndex &ind)
+{
+  assert(_ind_to_mgr.find(ind) != _ind_to_mgr.end());
+  return _ind_to_mgr.at(ind).tensors().at(ind);
+}
+
+model::OperandIndexMap<std::shared_ptr<operand::Tensor>> &TensorManager::constTensors(void)
+{
+  return _const_mgr->tensors();
+}
+
+model::OperandIndexMap<std::shared_ptr<operand::Tensor>> &TensorManager::nonconstTensors(void)
+{
+  return _nonconst_mgr->tensors();
+}
+
+void TensorManager::iterate(const std::function<void(const model::OperandIndex &)> &fn)
+{
+  for (auto it : _nonconst_mgr->tensors())
+    fn(it.first);
+
+  for (auto it : _const_mgr->tensors())
+    fn(it.first);
+}
+
+} // namespace cpu
+} // namespace backend
+} // namespace neurun
diff --git a/runtimes/neurun/backend/cpu/TensorManager.h b/runtimes/neurun/backend/cpu/TensorManager.h
new file mode 100644 (file)
index 0000000..c1f4a00
--- /dev/null
@@ -0,0 +1,66 @@
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_BACKEND_CPU_TENSOR_MANAGER_H__
+#define __NEURUN_BACKEND_CPU_TENSOR_MANAGER_H__
+
+#include "backend/ITensorManager.h"
+#include "MemoryManager.h"
+#include "model/OperandIndexMap.h"
+
+namespace neurun
+{
+namespace backend
+{
+namespace cpu
+{
+
+class TensorManager : public backend::ITensorManager
+{
+public:
+  TensorManager();
+  virtual ~TensorManager() = default;
+
+  void allocateConsts(void) override;
+  void allocateNonconsts(void) override;
+  void deallocateConsts(void) override;
+  void deallocateNonconsts(void) override;
+
+  void buildTensor(const model::OperandIndex &ind, const model::OperandInfo &tensor_info,
+                   bool as_const);
+
+  void claimPlan(const model::OperandIndex &ind, uint32_t size);
+  void releasePlan(const model::OperandIndex &ind);
+
+  std::shared_ptr<backend::operand::IObject> wrapTensor(const model::OperandIndex &ind);
+  std::shared_ptr<operand::Tensor> at(const ::neurun::model::OperandIndex &ind);
+
+  model::OperandIndexMap<std::shared_ptr<operand::Tensor>> &constTensors(void);
+  model::OperandIndexMap<std::shared_ptr<operand::Tensor>> &nonconstTensors(void);
+
+  void iterate(const std::function<void(const model::OperandIndex &)> &fn);
+
+private:
+  std::unique_ptr<MemoryManager> _const_mgr;
+  std::unique_ptr<MemoryManager> _nonconst_mgr;
+  model::OperandIndexMap<MemoryManager &> _ind_to_mgr;
+};
+
+} // namespace cpu
+} // namespace backend
+} // namespace neurun
+
+#endif // __NEURUN_BACKEND_CPU_TENSOR_MANAGER_H__