Apply finding frontend layout of operand (#7065)
author장지섭/On-Device Lab(SR)/Engineer/삼성전자 <jiseob.jang@samsung.com>
Mon, 2 Sep 2019 04:08:32 +0000 (13:08 +0900)
committer이한종/On-Device Lab(SR)/Engineer/삼성전자 <hanjoung.lee@samsung.com>
Mon, 2 Sep 2019 04:08:32 +0000 (13:08 +0900)
This commit applies finding frontend layout of operand.

Signed-off-by: jiseob.jang <jiseob.jang@samsung.com>
runtimes/neurun/core/src/compiler/ExecutorFactory.cc
runtimes/neurun/core/src/compiler/Linear.cc
runtimes/neurun/core/src/graph/pass/PermutationInsertionPass.cc

index df88945..a619eaf 100644 (file)
@@ -216,9 +216,18 @@ exec::IExecutor *ExecutorFactory::createDataflowExecutor(graph::Graph &graph, bo
       else
       {
         const auto info = obj.info();
-        const auto layout = lower_info->def_factors().getOnlyElement().layout();
-        // TODO Support NCHW frontend
-        tensor_builder->registerTensorInfo(ind, info, model::Layout::NHWC, layout);
+        // NOTE This assumes an operand can have one layout, and only PermutateNode can have
+        // different layouts for input and output
+        const auto &def = *obj.getDef().list().cbegin();
+        auto frontend_layout =
+            graph.subgraphs().at(graph.subgraphs().getOperation(def)).getLayout();
+        if (frontend_layout == model::Layout::UNKNOWN)
+        {
+          const auto &use = *obj.getUses().list().cbegin();
+          frontend_layout = graph.subgraphs().at(graph.subgraphs().getOperation(use)).getLayout();
+        }
+        const auto backend_layout = lower_info->def_factors().getOnlyElement().layout();
+        tensor_builder->registerTensorInfo(ind, info, frontend_layout, backend_layout);
         // To make this never be deallocated, this is a workaround to use static memory planner
         tensor_builder->notifyFirstUse(ind);
       }
index 2a933d2..2182c49 100644 (file)
@@ -210,9 +210,18 @@ void Linear::planTensors()
       else
       {
         const auto info = obj.info();
-        const auto layout = lower_info->def_factors().getOnlyElement().layout();
-        // TODO Support NCHW frontend
-        tensor_builder->registerTensorInfo(ind, info, model::Layout::NHWC, layout);
+
+        // NOTE This assumes an operand can have one layout, and only PermutateNode can have
+        // different layouts for input and output
+        const auto &def = *obj.getDef().list().cbegin();
+        auto frontend_layout = _subgraphs->at(_subgraphs->getOperation(def)).getLayout();
+        if (frontend_layout == model::Layout::UNKNOWN)
+        {
+          const auto &use = *obj.getUses().list().cbegin();
+          frontend_layout = _subgraphs->at(_subgraphs->getOperation(use)).getLayout();
+        }
+        const auto backend_layout = lower_info->def_factors().getOnlyElement().layout();
+        tensor_builder->registerTensorInfo(ind, info, frontend_layout, backend_layout);
       }
 
       tensor_builder_map[ind] = tensor_builder;
index 76e0d8c..1f54334 100644 (file)
@@ -136,15 +136,11 @@ PermutationInsertionPass::insertPermute(const model::OperandIndex &operand_index
   }
 
   // Find PermuteNode information
-  const auto input_layout =
-      _graph.getLowerInfo(operand_index)->def_factors().getOnlyElement().layout();
-  const auto output_layout = factor.layout();
   auto input_backend = _graph.getLowerInfo(operand_index)->def_factors().getOnlyElement().backend();
   auto output_backend = factor.backend();
   // NOTE PermuteNode may not have specific layout because the layout of input and output may be
   // different.
-  const auto permute_node_layout =
-      input_layout == output_layout ? output_layout : model::Layout::UNKNOWN;
+  const auto permute_node_layout = model::Layout::UNKNOWN;
   const auto permute_node_backend = backend::BackendManager::instance().getDefault();
   const operand::PermuteFactor permute_node_factor{permute_node_backend, permute_node_layout};