Introduce the layout into TensorBuilder (#5233)
author장지섭/On-Device Lab(SR)/Engineer/삼성전자 <jiseob.jang@samsung.com>
Mon, 27 May 2019 07:35:45 +0000 (16:35 +0900)
committer오형석/On-Device Lab(SR)/Staff Engineer/삼성전자 <hseok82.oh@samsung.com>
Mon, 27 May 2019 07:35:45 +0000 (16:35 +0900)
* Introduce the layout into TensorBuilder

This commit introduces the layout into TensorBuilder.
  - Introduce layout into TensorBuilder
  - Remove setting the layout with hard-coding from TensorBuilder

Signed-off-by: jiseob.jang <jiseob.jang@samsung.com>
* Update TensorBuilder.h

runtimes/neurun/backend/acl_common/TemplTensorBuilder.h
runtimes/neurun/backend/cpu/TensorBuilder.cc
runtimes/neurun/backend/cpu/TensorBuilder.h
runtimes/neurun/core/include/backend/ITensorBuilder.h
runtimes/neurun/core/include/graph/operand/LowerInfo.h
runtimes/neurun/core/src/compiler/ExecutorFactory.cc
runtimes/neurun/core/src/linear/Linear.cc

index 0228328..71278cd 100644 (file)
@@ -38,10 +38,12 @@ public:
 
   /**
    * @brief     Register tensor information to allocate on ACL-CL backend
-   * @param[in] ind   Operand index
-   * @param[in] info  Tensor information
+   * @param[in] ind    Operand index
+   * @param[in] info   Tensor information
+   * @param[in] layout Tensor data layout
    */
-  void registerTensorInfo(const model::OperandIndex &ind, const model::OperandInfo &info) override;
+  void registerTensorInfo(const model::OperandIndex &ind, const model::OperandInfo &info,
+                          const graph::operand::Layout &layout) override;
   /**
    * @brief     Register subtensor information to allocate on ACL-CL backend
    * @param[in] ind   Operand index
@@ -79,6 +81,7 @@ private:
   model::OperandIndexMap<std::shared_ptr<T_Tensor>> _tensors;
   model::OperandIndexMap<std::shared_ptr<T_SubTensor>> _subtensors;
   model::OperandIndexMap<std::shared_ptr<T_Object>> _objects;
+  graph::operand::Layout _layout;
 };
 
 } // namespace acl_common
@@ -107,12 +110,14 @@ TemplTensorBuilder<T_ITensor, T_Tensor, T_SubTensor, T_Object>::TemplTensorBuild
 
 template <typename T_ITensor, typename T_Tensor, typename T_SubTensor, typename T_Object>
 void TemplTensorBuilder<T_ITensor, T_Tensor, T_SubTensor, T_Object>::registerTensorInfo(
-    const model::OperandIndex &ind, const model::OperandInfo &info)
+    const model::OperandIndex &ind, const model::OperandInfo &info,
+    const graph::operand::Layout &layout)
 {
   assert(_tensors.size() == 0);
 
   _tensor_info_map.insert({ind, info});
   _apply_dim_correction_map.insert({ind, true});
+  _layout = layout;
 }
 
 template <typename T_ITensor, typename T_Tensor, typename T_SubTensor, typename T_Object>
@@ -149,30 +154,12 @@ void TemplTensorBuilder<T_ITensor, T_Tensor, T_SubTensor, T_Object>::prepare(voi
   //      Allocated subtensor will be mapped to _subtensors instead of _tensors
   assert(_subtensors.size() == 0);
 
-  const std::string layout_str =
-      config::ConfigManager::instance().get<std::string>(config::ACL_DEFAULT_LAYOUT);
-  ::neurun::graph::operand::Layout default_layout;
-  if (layout_str == "NHWC")
-  {
-    default_layout = ::neurun::graph::operand::Layout::NHWC;
-  }
-  else if (layout_str == "NCHW")
-  {
-    default_layout = ::neurun::graph::operand::Layout::NCHW;
-  }
-  else
-  {
-    throw std::runtime_error("Invalid ACL_DEFAULT_LAYOUT settings");
-  }
-
   for (auto &entry : _tensor_info_map)
   {
     auto ind = entry.first;
     const auto &info = entry.second;
-    // The default data_layout of tensors depends on data_layout of front-end
-    // TODO Change to set data_layout for each front-end
     auto tensor_info =
-        asTensorInfo(info.shape(), info.typeInfo(), default_layout, _apply_dim_correction_map[ind]);
+        asTensorInfo(info.shape(), info.typeInfo(), _layout, _apply_dim_correction_map[ind]);
     auto tensor = std::make_shared<T_Tensor>(tensor_info);
     _tensors[ind] = tensor;
   }
@@ -233,7 +220,7 @@ void TemplTensorBuilder<T_ITensor, T_Tensor, T_SubTensor, T_Object>::prepare(voi
       assert(info.type().scale() == parent_tensor->info()->quantization_info().scale);
       assert(asDataType(info.type().type()) == parent_tensor->info()->data_type());
       // TODO Change to set data_layout for each front-end
-      auto shape = asTensorShape(info.shape(), default_layout, _apply_dim_correction_map[current]);
+      auto shape = asTensorShape(info.shape(), _layout, _apply_dim_correction_map[current]);
 
       // Only support axis: 3 (channel)
       ::arm_compute::Coordinates coordinates;
index 1ecaae1..237041c 100644 (file)
@@ -41,9 +41,11 @@ IMemoryPlanner *TensorBuilder::createMemoryPlanner()
 }
 
 void TensorBuilder::registerTensorInfo(const model::OperandIndex &ind,
-                                       const model::OperandInfo &info)
+                                       const model::OperandInfo &info,
+                                       const graph::operand::Layout &)
 {
   _tensor_info_map.insert({ind, info});
+  // TODO set the layout
 }
 
 void TensorBuilder::registerSubTensorInfo(const model::OperandIndex &,
index 68f1c6e..4c219b9 100644 (file)
@@ -42,10 +42,12 @@ public:
 
   /**
    * @brief     Register tensor information to allocate on CPU backend
-   * @param[in] ind   Operand index
-   * @param[in] info  Operand information
+   * @param[in] ind    Operand index
+   * @param[in] info   Operand information
+   * @param[in] layout Operand data layout
    */
-  void registerTensorInfo(const model::OperandIndex &ind, const model::OperandInfo &info) override;
+  void registerTensorInfo(const model::OperandIndex &ind, const model::OperandInfo &info,
+                          const graph::operand::Layout &layout) override;
   /**
    * @brief     Register subtensor information to allocate on CPU backend
    * @param[in] ind   Operand index
index 1763b02..4bddca1 100644 (file)
@@ -24,6 +24,7 @@
 #include "operand/IObject.h"
 #include "compiler/SubTensorInfo.h"
 #include "operand/ITensor.h"
+#include "graph/operand/Layout.h"
 
 namespace neurun
 {
@@ -40,7 +41,8 @@ struct ITensorBuilder
   /**
    * @brief Register tensor information to allocate on backend
    */
-  virtual void registerTensorInfo(const model::OperandIndex &, const model::OperandInfo &) = 0;
+  virtual void registerTensorInfo(const model::OperandIndex &, const model::OperandInfo &,
+                                  const graph::operand::Layout &) = 0;
   /**
    * @brief     Register subtensor information to allocate on backend
    */
index 42540e8..e4f1c86 100644 (file)
@@ -72,7 +72,7 @@ public:
 
 public:
   void setLayout(const Layout &layout) { _layout = layout; }
-  const Layout &layout() { return _layout; }
+  Layout layout() const { return _layout; }
 
 private:
   Shape4D _shape;
index 407a60f..d75c4d5 100644 (file)
@@ -132,7 +132,8 @@ exec::IExecutor *ExecutorFactory::createDataflowExecutor(graph::Graph &graph)
     {
       auto tensor_builder = backend->tensor_builder();
       const auto info = obj.info();
-      tensor_builder->registerTensorInfo(ind, info);
+      const auto layout = lower_info->layout();
+      tensor_builder->registerTensorInfo(ind, info, layout);
       tensor_builder->notifyFirstUse(ind);
       tensor_builders.insert(tensor_builder);
     }
@@ -220,7 +221,8 @@ exec::IExecutor *ExecutorFactory::createParallelExecutor(graph::Graph &graph)
     {
       auto tensor_builder = backend->tensor_builder();
       const auto info = obj.info();
-      tensor_builder->registerTensorInfo(ind, info);
+      const auto layout = lower_info->layout();
+      tensor_builder->registerTensorInfo(ind, info, layout);
       tensor_builder->notifyFirstUse(ind);
       tensor_builders.insert(tensor_builder);
     }
index f6147d4..d0c1120 100644 (file)
@@ -206,7 +206,8 @@ backend::TensorBuilderSet Linear::planTensors()
       else
       {
         const auto info = obj.info();
-        tensor_builder->registerTensorInfo(ind, info);
+        const auto layout = lower_info->layout();
+        tensor_builder->registerTensorInfo(ind, info, layout);
       }
 
       // Prepare tensor builders to be returned