Replace resistering tensors with using TensorRegister (#9158)
author장지섭/On-Device Lab(SR)/Engineer/삼성전자 <jiseob.jang@samsung.com>
Mon, 25 Nov 2019 09:40:08 +0000 (18:40 +0900)
committer이한종/On-Device Lab(SR)/Engineer/삼성전자 <hanjoung.lee@samsung.com>
Mon, 25 Nov 2019 09:40:08 +0000 (18:40 +0900)
This commit replaces resistering tensors with using TensorRegister.

Signed-off-by: jiseob.jang <jiseob.jang@samsung.com>
runtime/neurun/backend/acl_common/TemplTensorBuilder.h
runtime/neurun/backend/cpu/TensorBuilder.cc
runtime/neurun/backend/cpu/TensorBuilder.h
runtime/neurun/core/src/compiler/ExecutorFactory.cc
runtime/neurun/core/src/compiler/Linear.cc

index 22a3e9a..41faecd 100644 (file)
@@ -156,7 +156,7 @@ TemplTensorBuilder<T_ITensor, T_Tensor, T_SubTensor, T_Object>::TemplTensorBuild
 
 template <typename T_ITensor, typename T_Tensor, typename T_SubTensor, typename T_Object>
 void TemplTensorBuilder<T_ITensor, T_Tensor, T_SubTensor, T_Object>::registerTensorInfo(
-    const model::OperandIndex &ind, const model::OperandInfo &info, model::Layout frontend_layout,
+    const model::OperandIndex &ind, const model::OperandInfo &info, model::Layout,
     model::Layout backend_layout, bool as_const)
 {
   assert(_tensor_mgr->constTensors().size() == 0);
@@ -164,7 +164,7 @@ void TemplTensorBuilder<T_ITensor, T_Tensor, T_SubTensor, T_Object>::registerTen
 
   _tensor_info_map.emplace(ind, info);
   _apply_dim_correction_map.emplace(ind, true);
-  _tensor_layouts_map.insert({ind, std::make_pair(frontend_layout, backend_layout)});
+  _tensor_layouts_map.insert({ind, std::make_pair(model::Layout::UNKNOWN, backend_layout)});
   if (as_const)
     _constants.append(ind);
 
@@ -332,10 +332,9 @@ void TemplTensorBuilder<T_ITensor, T_Tensor, T_SubTensor, T_Object>::buildTensor
     const auto &info = entry.second;
     // NOTE SubTensor's layout must be the same with layout of parent tensor
     const auto &root_parent = findRootParent(ind);
-    const auto &frontend_layout = _tensor_layouts_map[root_parent].first;
     const auto &backend_layout = _tensor_layouts_map[root_parent].second;
-    auto tensor_info = asTensorInfo(info.shape(), info.typeInfo(), frontend_layout, backend_layout,
-                                    _apply_dim_correction_map[ind]);
+    auto tensor_info = asTensorInfo(info.shape(), info.typeInfo(), model::Layout::UNKNOWN,
+                                    backend_layout, _apply_dim_correction_map[ind]);
     _tensor_mgr->buildTensor(ind, tensor_info, info.shape().rank(), _constants.contains(ind));
   }
 }
@@ -396,13 +395,12 @@ void TemplTensorBuilder<T_ITensor, T_Tensor, T_SubTensor, T_Object>::buildSubten
 
       // NOTE SubTensor's layout must be the same with layout of parent tensor
       const auto &root_parent = findRootParent(parent);
-      const auto &frontend_layout = _tensor_layouts_map[root_parent].first;
       const auto &backend_layout = _tensor_layouts_map[root_parent].second;
 
-      auto shape = asTensorShape(info.shape(), frontend_layout, backend_layout,
+      auto shape = asTensorShape(info.shape(), model::Layout::UNKNOWN, backend_layout,
                                  _apply_dim_correction_map[current]);
       ::arm_compute::Coordinates coordinates =
-          asTensorCoordinate(info.offset(), frontend_layout, backend_layout);
+          asTensorCoordinate(info.offset(), model::Layout::UNKNOWN, backend_layout);
       _tensor_mgr->buildSubtensor(parent, current, shape, coordinates, info.shape().rank(), true);
       stack.pop();
     }
index 9c185fd..991ce99 100644 (file)
@@ -59,12 +59,10 @@ TensorBuilder::TensorBuilder() : _tensor_mgr{new TensorManager()}
 }
 
 void TensorBuilder::registerTensorInfo(const model::OperandIndex &ind,
-                                       const model::OperandInfo &info,
-                                       model::Layout frontend_layout, model::Layout backend_layout,
+                                       const model::OperandInfo &info, model::Layout, model::Layout,
                                        bool as_const)
 {
   _tensor_info_map.emplace(ind, info);
-  _tensor_layouts_map.insert({ind, std::make_pair(frontend_layout, backend_layout)});
 
   if (as_const)
     _constants.append(ind);
@@ -80,7 +78,7 @@ void TensorBuilder::registerSubTensorInfo(const model::OperandIndex &,
 void TensorBuilder::notifyFirstUse(const model::OperandIndex &ind)
 {
   assert(_tensor_info_map.find(ind) != _tensor_info_map.end());
-  const auto tensor_info = asTensorInfo(_tensor_info_map.at(ind), _tensor_layouts_map[ind].first);
+  const auto tensor_info = asTensorInfo(_tensor_info_map.at(ind), model::Layout::UNKNOWN);
   const auto size = tensor_info.total_size();
   _tensor_mgr->buildTensor(ind, tensor_info, _constants.contains(ind));
   _tensor_mgr->claimPlan(ind, size);
index f749c1d..22de58b 100644 (file)
@@ -82,7 +82,6 @@ public:
 private:
   std::unique_ptr<TensorManager> _tensor_mgr;
   model::OperandIndexMap<model::OperandInfo> _tensor_info_map;
-  model::OperandIndexMap<std::pair<model::Layout, model::Layout>> _tensor_layouts_map;
   model::OperandIndexSequence _constants;
 };
 
index 4cc6836..9b54ee3 100644 (file)
@@ -30,6 +30,7 @@
 #include "backend/IConstantInitializer.h"
 #include "backend/IKernelGenerator.h"
 #include "backend/IShapeFixer.h"
+#include "backend/ITensorRegister.h"
 #include "cpp14/memory.h"
 
 namespace neurun
@@ -207,53 +208,42 @@ exec::IExecutor *ExecutorFactory::createDataflowExecutor(graph::Graph &graph, bo
     subg.accept(subtensor_analyzer);
   });
 
-  // Fix shapes
+  // Fix shapes and register tensors
   graph.subgraphs().iterate(
       [&](const model::SubgraphIndex &subg_index, const model::Subgraph &subg) {
         auto backend = graph.getLowerInfo(subg_index)->backend();
         auto shape_fixer = graph.backend_resolver()->getBackendContext(backend)->shape_fixer;
         shape_fixer->setLowerInfoMap(graph.getLowerInfo());
         shape_fixer->fix(subg);
+        const auto tensor_register =
+            graph.backend_resolver()->getBackendContext(backend)->tensor_register;
+        tensor_register->registerTensors(subg, graph.getLowerInfo());
       });
 
   graph.operands().iterate([&](const model::OperandIndex &ind, const model::Operand &obj) {
     const auto lower_info = graph.getLowerInfo(ind);
     for (auto factor : lower_info->def_factors())
     {
-      bool isSubTensor = false;
       auto backend = factor.backend();
       auto tensor_builder = graph.backend_resolver()->getBackendContext(backend)->tensor_builder;
 
-      if (backend->config()->SupportSubTensorAlloc())
+      if (!tensor_builder->isRegistered(ind))
       {
-        const auto parentInfo = obj.parent_info();
-        if (parentInfo != nullptr)
-        {
-          isSubTensor = true;
-        }
-      }
+        // These tensors do not exist in any subgraph (No use and def)
+        // These tensors cannot be a SubTensor
+        assert(obj.parent_info() == nullptr);
 
-      if (isSubTensor)
-      {
-        const compiler::SubTensorInfo info(obj);
-        tensor_builder->registerSubTensorInfo(ind, info);
-      }
-      else
-      {
         const auto info = obj.info();
-        // NOTE This assumes an operand can have one layout, and only Permutate can have
-        // different layouts for input and output
-        const auto &def = *obj.getDef().list().cbegin();
-        auto frontend_layout =
-            graph.subgraphs().at(graph.subgraphs().getOperation(def)).getLayout();
-        if (frontend_layout == model::Layout::UNKNOWN)
-        {
-          const auto &use = *obj.getUses().list().cbegin();
-          frontend_layout = graph.subgraphs().at(graph.subgraphs().getOperation(use)).getLayout();
-        }
+        // TODO Get layout of this operand on frontend
+        const auto frontend_layout = model::Layout::UNKNOWN;
         const auto backend_layout = lower_info->def_factors().getOnlyElement().layout();
         tensor_builder->registerTensorInfo(ind, info, frontend_layout, backend_layout,
                                            obj.isConstant());
+      }
+
+      // Is not SubTensor?
+      if (!backend->config()->SupportSubTensorAlloc() || obj.parent_info() == nullptr)
+      {
         // To make this never be deallocated, this is a workaround to use static memory planner
         tensor_builder->notifyFirstUse(ind);
       }
index 126afeb..3eb547d 100644 (file)
@@ -21,6 +21,7 @@
 #include "backend/IShapeFixer.h"
 #include "backend/IConfig.h"
 #include "backend/IConstantInitializer.h"
+#include "backend/ITensorRegister.h"
 #include "backend/Backend.h"
 #include "compiler/SubTensorInfo.h"
 
@@ -157,6 +158,13 @@ void Linear::planTensors()
   model::OperandIndexMap<uint32_t> def_map;
   model::OperandIndexSequence constants;
 
+  iterate([&](const neurun::compiler::Linear::Element &element) {
+    const auto backend = element.lower_info->backend();
+    const auto tensor_register =
+        _graph.backend_resolver()->getBackendContext(backend)->tensor_register;
+    tensor_register->registerTensors(*element.subgraph, _graph.getLowerInfo());
+  });
+
   // Prepare scanning
   _graph.operands().iterate([&](const model::OperandIndex &ind, const model::Operand &obj) {
     const auto lower_info = _graph.getLowerInfo(ind);
@@ -179,46 +187,20 @@ void Linear::planTensors()
       constants.append(ind);
     }
 
-    model::Subgraphs &subgraphs = _graph.subgraphs();
     for (auto factor : lower_info->def_factors())
     {
-      bool isSubTensor = false;
       auto backend = factor.backend();
       auto tensor_builder = _graph.backend_resolver()->getBackendContext(backend)->tensor_builder;
 
-      if (backend->config()->SupportSubTensorAlloc())
+      if (!tensor_builder->isRegistered(ind))
       {
-        const auto parentInfo = obj.parent_info();
-        if (parentInfo != nullptr)
-        {
-          isSubTensor = true;
-        }
-      }
+        // These tensors do not exist in any subgraph (No use and def)
+        // These tensors cannot be a SubTensor
+        assert(obj.parent_info() == nullptr);
 
-      if (isSubTensor)
-      {
-        const compiler::SubTensorInfo info(obj);
-        tensor_builder->registerSubTensorInfo(ind, info);
-      }
-      else
-      {
         const auto info = obj.info();
-
-        // NOTE This assumes an operand can have one layout, and only Permutate can have
-        // different layouts for input and output
-        const auto &def = *obj.getDef().list().cbegin();
+        // TODO Get layout of this operand on frontend
         auto frontend_layout = model::Layout::UNKNOWN;
-
-        if (subgraphs.containsOperation(def))
-        {
-          frontend_layout = subgraphs.at(subgraphs.getOperation(def)).getLayout();
-          if (frontend_layout == model::Layout::UNKNOWN)
-          {
-            const auto &use = *obj.getUses().list().cbegin();
-            frontend_layout = subgraphs.at(subgraphs.getOperation(use)).getLayout();
-          }
-        }
-
         const auto backend_layout = lower_info->def_factors().getOnlyElement().layout();
         tensor_builder->registerTensorInfo(ind, info, frontend_layout, backend_layout, is_const);
       }