[onert] Do not use TensorBuilder in Executors (#4023)
authorHanjoung Lee <hanjoung.lee@samsung.com>
Fri, 28 Aug 2020 05:39:07 +0000 (14:39 +0900)
committerGitHub <noreply@github.com>
Fri, 28 Aug 2020 05:39:07 +0000 (14:39 +0900)
Do not use TensorBuilder in Executors as what executors need are just
TensorRegistries and TensorManagers.

% This is part of refactoring(distinguishing TensorBuilder and
TensorRegistry).

ONE-DCO-1.0-Signed-off-by: Hanjoung Lee <hanjoung.lee@samsung.com>

runtime/onert/core/src/compiler/ExecutorFactory.cc
runtime/onert/core/src/compiler/ExecutorFactory.h
runtime/onert/core/src/compiler/TensorRegistries.h
runtime/onert/core/src/exec/DataflowExecutor.cc
runtime/onert/core/src/exec/DataflowExecutor.h
runtime/onert/core/src/exec/ExecutorBase.cc
runtime/onert/core/src/exec/ExecutorBase.h
runtime/onert/core/src/exec/LinearExecutor.h
runtime/onert/core/src/exec/ParallelExecutor.cc
runtime/onert/core/src/exec/ParallelExecutor.h

index d826c8e..074545a 100644 (file)
@@ -25,6 +25,7 @@
 #include "compiler/ExecutionBuilder.h"
 #include "exec/ExecTime.h"
 #include "compiler/Linear.h"
+#include "compiler/TensorBuilders.h"
 #include "backend/IConstantInitializer.h"
 #include "backend/IKernelGenerator.h"
 #include "backend/IOptimizer.h"
@@ -64,6 +65,23 @@ private:
   std::shared_ptr<backend::IConfig> _config;
 };
 
+// TODO Think of a better way to manage TensorManagers
+backend::TensorManagerSet createTensorManagerSet(const compiler::TensorBuilders &tensor_builders)
+{
+  backend::TensorManagerSet tensor_mgrs;
+  for (auto &tensor_builder : tensor_builders)
+  {
+    auto s_tensor_manager = tensor_builder->releaseStaticTensorManager();
+    if (s_tensor_manager != nullptr)
+      tensor_mgrs.insert(std::move(s_tensor_manager));
+
+    auto d_tensor_manager = tensor_builder->releaseDynamicTensorManager();
+    if (d_tensor_manager != nullptr)
+      tensor_mgrs.insert(std::move(d_tensor_manager));
+  }
+  return tensor_mgrs;
+}
+
 } // namespace
 } // namespace onert
 
@@ -352,9 +370,11 @@ ExecutorFactory::createLinearExecutor(std::unique_ptr<compiler::LoweredGraph> lo
     });
   }
 
-  auto exec =
-      new exec::LinearExecutor{std::move(lowered_graph), input_tensors,       output_tensors,
-                               tensor_builders,          std::move(code_map), order};
+  TensorRegistries tensor_regs{lowered_graph->backend_contexts(), true};
+  backend::TensorManagerSet tensor_mgrs = createTensorManagerSet(tensor_builders);
+  auto exec = new exec::LinearExecutor{
+      std::move(lowered_graph), input_tensors,       output_tensors, tensor_regs,
+      std::move(tensor_mgrs),   std::move(code_map), order};
 
   if (!options.trace_filepath.empty())
   {
@@ -457,17 +477,21 @@ exec::IExecutor *ExecutorFactory::createDataflowExecutor(
     });
   }
 
+  TensorRegistries tensor_regs{lowered_graph->backend_contexts(), true};
+  backend::TensorManagerSet tensor_mgrs = createTensorManagerSet(tensor_builders);
+
   exec::ExecutorBase *exec = nullptr;
   if (parallel)
   {
-    exec = new exec::ParallelExecutor{std::move(lowered_graph), input_tensors, output_tensors,
-                                      tensor_builders, std::move(code_map)};
+    exec = new exec::ParallelExecutor{std::move(lowered_graph), input_tensors,
+                                      output_tensors,           tensor_regs,
+                                      std::move(tensor_mgrs),   std::move(code_map)};
   }
   else
   {
-    auto dataflow_exec =
-        new exec::DataflowExecutor{std::move(lowered_graph), input_tensors, output_tensors,
-                                   tensor_builders, std::move(code_map)};
+    auto dataflow_exec = new exec::DataflowExecutor{std::move(lowered_graph), input_tensors,
+                                                    output_tensors,           tensor_regs,
+                                                    std::move(tensor_mgrs),   std::move(code_map)};
     if (options.he_profiling_mode)
     {
       std::vector<const backend::Backend *> backends;
index 26a5fee..b8893c0 100644 (file)
@@ -22,7 +22,6 @@
 #include "backend/ITensor.h"
 #include "exec/IExecutor.h"
 #include "compiler/LoweredGraph.h"
-#include "TensorBuilders.h"
 #include "TensorRegistries.h"
 
 namespace onert
index 16cd35d..811d086 100644 (file)
@@ -69,7 +69,7 @@ public:
     return _cf_tensor_reg;
   }
 
-  std::shared_ptr<backend::ITensor> getITensor(ir::OperandIndex ind)
+  std::shared_ptr<backend::ITensor> getITensor(ir::OperandIndex ind) const
   {
     for (auto &tensor_reg : _tensor_regs)
     {
index 77c9667..a69ae9c 100644 (file)
@@ -81,8 +81,10 @@ DataflowExecutor::DataflowExecutor(
     std::unique_ptr<compiler::LoweredGraph> lowered_graph,
     const std::vector<std::shared_ptr<backend::ITensor>> &input_tensors,
     const std::vector<std::shared_ptr<backend::ITensor>> &output_tensors,
-    const compiler::TensorBuilders &tensor_builders, compiler::CodeMap &&code_map)
-    : ExecutorBase{std::move(lowered_graph), input_tensors, output_tensors, tensor_builders},
+    const compiler::TensorRegistries &tensor_regs, backend::TensorManagerSet &&tensor_mgrs,
+    compiler::CodeMap &&code_map)
+    : ExecutorBase{std::move(lowered_graph), input_tensors, output_tensors, tensor_regs,
+                   std::move(tensor_mgrs)},
       _code_map{std::move(code_map)}
 {
   VERBOSE(DataflowExecutor) << "Constructing Dataflow Executor" << std::endl;
index d414ce4..8d60e3e 100644 (file)
@@ -52,7 +52,8 @@ public:
   DataflowExecutor(std::unique_ptr<compiler::LoweredGraph> lowered_graph,
                    const std::vector<std::shared_ptr<backend::ITensor>> &input_tensors,
                    const std::vector<std::shared_ptr<backend::ITensor>> &output_tensors,
-                   const compiler::TensorBuilders &tensor_builders, compiler::CodeMap &&code_map);
+                   const compiler::TensorRegistries &tensor_regs,
+                   backend::TensorManagerSet &&tensor_mgrs, compiler::CodeMap &&code_map);
 
   void executeImpl() override;
 
index 0bd8cbb..f835a96 100644 (file)
@@ -29,9 +29,11 @@ namespace exec
 ExecutorBase::ExecutorBase(std::unique_ptr<compiler::LoweredGraph> &&lowered_graph,
                            const std::vector<std::shared_ptr<backend::ITensor>> &input_tensors,
                            const std::vector<std::shared_ptr<backend::ITensor>> &output_tensors,
-                           const compiler::TensorBuilders &tensor_builders)
+                           const compiler::TensorRegistries &tensor_regs,
+                           backend::TensorManagerSet &&tensor_mgrs)
     : _lowered_graph{std::move(lowered_graph)}, _graph{_lowered_graph->graph()},
-      _input_tensors{input_tensors}, _output_tensors{output_tensors}, _mutex()
+      _input_tensors{input_tensors}, _output_tensors{output_tensors},
+      _tensor_mgrs{std::move(tensor_mgrs)}, _mutex()
 {
   // TODO Fix the way of knowing whether it is primary or not
   bool primary_executor = !(_input_tensors.empty() && _output_tensors.empty());
@@ -41,20 +43,10 @@ ExecutorBase::ExecutorBase(std::unique_ptr<compiler::LoweredGraph> &&lowered_gra
       std::vector<std::shared_ptr<backend::ITensor>> list;
       for (auto ind : ind_seq)
       {
-        std::shared_ptr<backend::ITensor> tensor;
-        for (auto &tensor_builder : tensor_builders)
-        {
-          auto tensor_registry = tensor_builder->tensorRegistry();
-          assert(tensor_registry);
-          tensor = tensor_registry->getNativeITensor(ind);
-          if (tensor != nullptr)
-          {
-            DynAllocInfo dyn_alloc_info{ind};
-            _input_to_dyn_alloc_info.emplace(tensor, dyn_alloc_info);
-            break;
-          }
-        }
+        std::shared_ptr<backend::ITensor> tensor = tensor_regs.getITensor(ind);
         assert(tensor != nullptr);
+        DynAllocInfo dyn_alloc_info{ind};
+        _input_to_dyn_alloc_info.emplace(tensor, dyn_alloc_info);
         list.push_back(tensor);
       }
       return list;
@@ -63,20 +55,10 @@ ExecutorBase::ExecutorBase(std::unique_ptr<compiler::LoweredGraph> &&lowered_gra
       std::vector<std::shared_ptr<backend::ITensor>> list;
       for (auto ind : ind_seq)
       {
-        std::shared_ptr<backend::ITensor> tensor;
-        for (auto &tensor_builder : tensor_builders)
-        {
-          auto tensor_registry = tensor_builder->tensorRegistry();
-          assert(tensor_registry);
-          tensor = tensor_registry->getNativeITensor(ind);
-          if (tensor != nullptr)
-          {
-            DynAllocInfo dyn_alloc_info{ind};
-            _output_to_dyn_alloc_info.emplace(tensor, dyn_alloc_info);
-            break;
-          }
-        }
+        std::shared_ptr<backend::ITensor> tensor = tensor_regs.getITensor(ind);
         assert(tensor != nullptr);
+        DynAllocInfo dyn_alloc_info{ind};
+        _output_to_dyn_alloc_info.emplace(tensor, dyn_alloc_info);
         list.push_back(tensor);
       }
       return list;
@@ -86,10 +68,6 @@ ExecutorBase::ExecutorBase(std::unique_ptr<compiler::LoweredGraph> &&lowered_gra
   }
   else
   {
-    // If primary graph, all the inputs and outputs belong to controlflow backend
-    auto cf_dyn_tensor_builder = tensor_builders.getControlflowTensorBuilder();
-    assert(cf_dyn_tensor_builder);
-
     assert(input_tensors.size() == _graph.getInputs().size());
     assert(output_tensors.size() == _graph.getOutputs().size());
     for (uint32_t i = 0; i < input_tensors.size(); i++)
@@ -107,18 +85,6 @@ ExecutorBase::ExecutorBase(std::unique_ptr<compiler::LoweredGraph> &&lowered_gra
       _output_to_dyn_alloc_info.emplace(tensor, dyn_alloc_info);
     }
   }
-
-  // Prepare each TensorManager on each backend
-  for (auto &tensor_builder : tensor_builders)
-  {
-    auto s_tensor_manager = tensor_builder->releaseStaticTensorManager();
-    if (s_tensor_manager != nullptr)
-      _tensor_mgrs.insert(std::move(s_tensor_manager));
-
-    auto d_tensor_manager = tensor_builder->releaseDynamicTensorManager();
-    if (d_tensor_manager != nullptr)
-      _tensor_mgrs.insert(std::move(d_tensor_manager));
-  }
 }
 
 void ExecutorBase::execute(const std::vector<std::shared_ptr<backend::ITensor>> &src_tensors,
index 0d0f80b..a13be7d 100644 (file)
@@ -33,9 +33,8 @@
 #include "exec/IFunction.h"
 #include "backend/IDynamicTensorManager.h"
 #include "backend/ITensorManager.h"
-#include "backend/ITensorBuilder.h"
 #include "exec/ExecutionObservee.h"
-#include "compiler/TensorBuilders.h"
+#include "compiler/TensorRegistries.h"
 #include <list>
 
 namespace onert
@@ -54,7 +53,8 @@ public:
   ExecutorBase(std::unique_ptr<compiler::LoweredGraph> &&lowered_graph,
                const std::vector<std::shared_ptr<backend::ITensor>> &input_tensors,
                const std::vector<std::shared_ptr<backend::ITensor>> &output_tensors,
-               const compiler::TensorBuilders &tensor_builders);
+               const compiler::TensorRegistries &tensor_regs,
+               backend::TensorManagerSet &&tensor_mgrs);
 
   virtual ~ExecutorBase() = default;
 
index 5da178a..c224d3f 100644 (file)
@@ -49,9 +49,11 @@ public:
   LinearExecutor(std::unique_ptr<compiler::LoweredGraph> lowered_graph,
                  const std::vector<std::shared_ptr<backend::ITensor>> &input_tensors,
                  const std::vector<std::shared_ptr<backend::ITensor>> &output_tensors,
-                 const compiler::TensorBuilders &tensor_builders, compiler::CodeMap &&code_map,
+                 const compiler::TensorRegistries &tensor_regs,
+                 backend::TensorManagerSet &&tensor_mgrs, compiler::CodeMap &&code_map,
                  const std::vector<ir::OpSequenceIndex> &order)
-      : ExecutorBase{std::move(lowered_graph), input_tensors, output_tensors, tensor_builders}
+      : ExecutorBase{std::move(lowered_graph), input_tensors, output_tensors, tensor_regs,
+                     std::move(tensor_mgrs)}
   {
     for (auto index : order)
     {
index 3cce7eb..ab234aa 100644 (file)
@@ -63,9 +63,10 @@ ParallelExecutor::ParallelExecutor(
     std::unique_ptr<compiler::LoweredGraph> lowered_graph,
     const std::vector<std::shared_ptr<backend::ITensor>> &input_tensors,
     const std::vector<std::shared_ptr<backend::ITensor>> &output_tensors,
-    const compiler::TensorBuilders &tensor_builders, compiler::CodeMap &&code_map)
-    : DataflowExecutor{std::move(lowered_graph), input_tensors, output_tensors, tensor_builders,
-                       std::move(code_map)}
+    const compiler::TensorRegistries &tensor_regs, backend::TensorManagerSet &&tensor_mgrs,
+    compiler::CodeMap &&code_map)
+    : DataflowExecutor{std::move(lowered_graph), input_tensors,      output_tensors, tensor_regs,
+                       std::move(tensor_mgrs),   std::move(code_map)}
 {
   VERBOSE(ParallelExecutor) << "Constructing Parallel Executor" << std::endl;
 }
index 223de0b..929edfc 100644 (file)
@@ -53,7 +53,8 @@ public:
   ParallelExecutor(std::unique_ptr<compiler::LoweredGraph> lowered_graph,
                    const std::vector<std::shared_ptr<backend::ITensor>> &input_tensors,
                    const std::vector<std::shared_ptr<backend::ITensor>> &output_tensors,
-                   const compiler::TensorBuilders &tensor_builders, compiler::CodeMap &&code_map);
+                   const compiler::TensorRegistries &tensor_regs,
+                   backend::TensorManagerSet &&tensor_mgrs, compiler::CodeMap &&code_map);
 
   void executeImpl() override;