[neurun] Appends apis to ITensorBuilder (#6973)
author김용섭/On-Device Lab(SR)/Engineer/삼성전자 <yons.kim@samsung.com>
Tue, 3 Sep 2019 05:39:38 +0000 (14:39 +0900)
committer이한종/On-Device Lab(SR)/Engineer/삼성전자 <hanjoung.lee@samsung.com>
Tue, 3 Sep 2019 05:39:38 +0000 (14:39 +0900)
* [neurun] Appends apis to ITensorBuilder

Appends apis to ITensorBuilder for deallocating constant tensors if they
are unused and separating constant tensors and nonconstant tensors

Signed-off-by: Yongseop Kim <yons.kim@samsung.com>
* Change afterFunctionPrepare to postFunctionPrepare

* Fix default parameter of registerTensorInfo

runtimes/neurun/backend/acl_common/TemplTensorBuilder.h
runtimes/neurun/backend/cpu/TensorBuilder.cc
runtimes/neurun/backend/cpu/TensorBuilder.h
runtimes/neurun/core/include/backend/ITensorBuilder.h
runtimes/neurun/core/src/compiler/ExecutorFactory.cc
runtimes/neurun/core/src/compiler/Linear.cc

index 4c723f9..6d7d41c 100644 (file)
@@ -55,7 +55,8 @@ public:
    * @param[in] layout Tensor data layout
    */
   void registerTensorInfo(const model::OperandIndex &ind, const model::OperandInfo &info,
-                          model::Layout frontend_layout, model::Layout backend_layout) override;
+                          model::Layout frontend_layout, model::Layout backend_layout,
+                          bool as_const) override;
   /**
    * @brief     Register subtensor information to allocate on ACL-CL backend
    * @param[in] ind   Operand index
@@ -70,6 +71,12 @@ public:
   void prepare(void) override;
   void allocate(void) override;
 
+  // TODO Fill these
+  void allocateConsts() override {}
+  void allocateNonconsts() override {}
+  void postFunctionPrepare() override {}
+  void finalize() override {}
+
   std::shared_ptr<::neurun::backend::operand::ITensor>
   tensorAt(const model::OperandIndex &ind) override;
   std::shared_ptr<backend::operand::IObject> wrapTensor(const model::OperandIndex &ind) override;
@@ -150,8 +157,9 @@ TemplTensorBuilder<T_ITensor, T_Tensor, T_SubTensor, T_Object>::TemplTensorBuild
 template <typename T_ITensor, typename T_Tensor, typename T_SubTensor, typename T_Object>
 void TemplTensorBuilder<T_ITensor, T_Tensor, T_SubTensor, T_Object>::registerTensorInfo(
     const model::OperandIndex &ind, const model::OperandInfo &info, model::Layout frontend_layout,
-    model::Layout backend_layout)
+    model::Layout backend_layout, bool /*as_const*/)
 {
+  // TODO Adding handling tensor as const
   assert(_mem_mgr->tensors().size() == 0);
 
   _tensor_info_map.emplace(ind, info);
index 09ba9c1..87e81bd 100644 (file)
@@ -60,8 +60,10 @@ TensorBuilder::TensorBuilder() : _mem_mgr{new MemoryManager()}
 
 void TensorBuilder::registerTensorInfo(const model::OperandIndex &ind,
                                        const model::OperandInfo &info,
-                                       model::Layout frontend_layout, model::Layout backend_layout)
+                                       model::Layout frontend_layout, model::Layout backend_layout,
+                                       bool /*as_const*/)
 {
+  // TODO Adding handling tensor as const
   _tensor_info_map.emplace(ind, info);
   _tensor_layouts_map.insert({ind, std::make_pair(frontend_layout, backend_layout)});
 }
index 1afe363..275793f 100644 (file)
@@ -44,7 +44,8 @@ public:
    * @param[in] layout Operand data layout
    */
   void registerTensorInfo(const model::OperandIndex &ind, const model::OperandInfo &info,
-                          model::Layout frontend_layout, model::Layout backend_layout) override;
+                          model::Layout frontend_layout, model::Layout backend_layout,
+                          bool as_const) override;
   /**
    * @brief     Register subtensor information to allocate on CPU backend
    * @param[in] ind   Operand index
@@ -61,6 +62,12 @@ public:
 
   void allocate(void) override;
 
+  // TODO Fill these
+  void allocateConsts() override {}
+  void allocateNonconsts() override {}
+  void postFunctionPrepare() override {}
+  void finalize() override {}
+
   std::shared_ptr<::neurun::backend::operand::ITensor>
   tensorAt(const model::OperandIndex &ind) override;
 
index ef126e0..3735d54 100644 (file)
@@ -44,7 +44,8 @@ struct ITensorBuilder
    * @brief Register tensor information to allocate on backend
    */
   virtual void registerTensorInfo(const model::OperandIndex &, const model::OperandInfo &,
-                                  model::Layout frontend_layout, model::Layout backend_layout) = 0;
+                                  model::Layout frontend_layout, model::Layout backend_layout,
+                                  bool as_const) = 0;
   /**
    * @brief     Register subtensor information to allocate on backend
    */
@@ -55,8 +56,14 @@ struct ITensorBuilder
   virtual void notifyLastUse(const model::OperandIndex &) = 0;
 
   virtual void prepare(void) = 0;
+  // TODO Remove after all of apis appended land
   virtual void allocate(void) = 0;
 
+  virtual void allocateConsts() = 0;
+  virtual void allocateNonconsts() = 0;
+  virtual void postFunctionPrepare() = 0;
+  virtual void finalize() = 0;
+
   virtual std::shared_ptr<::neurun::backend::operand::ITensor>
   tensorAt(const model::OperandIndex &ind) = 0;
   virtual std::shared_ptr<backend::operand::IObject> wrapTensor(const model::OperandIndex &ind) = 0;
index a619eaf..8c562c6 100644 (file)
@@ -227,7 +227,7 @@ exec::IExecutor *ExecutorFactory::createDataflowExecutor(graph::Graph &graph, bo
           frontend_layout = graph.subgraphs().at(graph.subgraphs().getOperation(use)).getLayout();
         }
         const auto backend_layout = lower_info->def_factors().getOnlyElement().layout();
-        tensor_builder->registerTensorInfo(ind, info, frontend_layout, backend_layout);
+        tensor_builder->registerTensorInfo(ind, info, frontend_layout, backend_layout, false);
         // To make this never be deallocated, this is a workaround to use static memory planner
         tensor_builder->notifyFirstUse(ind);
       }
index 2182c49..059d0d2 100644 (file)
@@ -221,7 +221,7 @@ void Linear::planTensors()
           frontend_layout = _subgraphs->at(_subgraphs->getOperation(use)).getLayout();
         }
         const auto backend_layout = lower_info->def_factors().getOnlyElement().layout();
-        tensor_builder->registerTensorInfo(ind, info, frontend_layout, backend_layout);
+        tensor_builder->registerTensorInfo(ind, info, frontend_layout, backend_layout, false);
       }
 
       tensor_builder_map[ind] = tensor_builder;