Make helper converting functions support NCHW (#6606)
author장지섭/On-Device Lab(SR)/Engineer/삼성전자 <jiseob.jang@samsung.com>
Fri, 16 Aug 2019 00:37:03 +0000 (09:37 +0900)
committer오형석/On-Device Lab(SR)/Staff Engineer/삼성전자 <hseok82.oh@samsung.com>
Fri, 16 Aug 2019 00:37:03 +0000 (09:37 +0900)
This commit makes converting functions support NCHW.

Signed-off-by: jiseob.jang <jiseob.jang@samsung.com>
runtimes/neurun/backend/acl_cl/KernelGenerator.cc
runtimes/neurun/backend/acl_common/Convert.cc
runtimes/neurun/backend/acl_common/Convert.h
runtimes/neurun/backend/acl_common/TemplTensorBuilder.h
runtimes/neurun/backend/acl_neon/KernelGenerator.cc

index a58242a..7c98131 100644 (file)
@@ -464,10 +464,13 @@ void KernelGenerator::visit(const model::operation::FullyConnectedNode &node)
   auto fn = nnfw::cpp14::make_unique<arm_compute::CLFullyConnectedReshapingLayer>(
       _tensor_builder->acl_memory_manager()->internal_buffer_manager());
 
-  fn->configure(input_alloc->handle(), weight_alloc->handle(), bias_alloc->handle(),
-                output_alloc->handle(), needs_reshape,
-                ::neurun::backend::acl_common::asTensorShape(
-                    reshape, ::neurun::backend::acl_common::asRuntimeLayout(acl_layout)));
+  fn->configure(
+      input_alloc->handle(), weight_alloc->handle(), bias_alloc->handle(), output_alloc->handle(),
+      needs_reshape,
+      ::neurun::backend::acl_common::asTensorShape(/* TODO Support NCHW frontend */
+                                                   reshape, model::Layout::NHWC,
+                                                   ::neurun::backend::acl_common::asRuntimeLayout(
+                                                       acl_layout)));
 
   auto acl_fn = asAclFunction(std::move(fn));
 
index a7bad1d..b814587 100644 (file)
@@ -45,9 +45,10 @@ namespace backend
 namespace acl_common
 {
 
-// TODO Support front-end as NCHW for this function
 ::arm_compute::TensorShape asTensorShape(const ::neurun::model::Shape &shape,
-                                         ::neurun::model::Layout layout, bool apply_dim_correction)
+                                         ::neurun::model::Layout frontend_layout,
+                                         ::neurun::model::Layout backend_layout,
+                                         bool apply_dim_correction)
 {
   const uint32_t rank = shape.rank();
 
@@ -64,24 +65,16 @@ namespace acl_common
     // However, if the dimension correction is applied to input_to_input_weights with input_size
     // equal to 1, it will be changed to 1-D.
     // So input_to_input_weights is not used by the weight of FullyConnected.
-    if (rank == 4 && layout == ::neurun::model::Layout::NCHW)
-    {
-      // NHWC -> NCHW
-      int32_t permutation[4] = {0, 2, 3, 1};
-      res.set(ToARMComputeAxis(rank, permutation[axis]).value(), shape.dim(axis),
-              apply_dim_correction);
-    }
-    else
-    {
-      res.set(ToARMComputeAxis(rank, axis).value(), shape.dim(axis), apply_dim_correction);
-    }
+    res.set(ToARMComputeAxis(rank, axis, frontend_layout, backend_layout).value(), shape.dim(axis),
+            apply_dim_correction);
   }
 
   return res;
 }
 
 ::arm_compute::Coordinates asTensorCoordinate(const ::neurun::util::Coordinates &coord,
-                                              ::neurun::model::Layout layout)
+                                              ::neurun::model::Layout frontend_layout,
+                                              ::neurun::model::Layout backend_layout)
 {
   const uint32_t rank = coord.size();
 
@@ -91,16 +84,7 @@ namespace acl_common
 
   for (uint32_t axis = 0; axis < rank; ++axis)
   {
-    if (rank == 4 && layout == ::neurun::model::Layout::NCHW)
-    {
-      // NHWC -> NCHW
-      int32_t permutation[4] = {0, 2, 3, 1};
-      res.set(ToARMComputeAxis(rank, permutation[axis]).value(), coord[axis]);
-    }
-    else
-    {
-      res.set(ToARMComputeAxis(rank, axis).value(), coord[axis]);
-    }
+    res.set(ToARMComputeAxis(rank, axis, frontend_layout, backend_layout).value(), coord[axis]);
   }
 
   return res;
@@ -131,15 +115,16 @@ namespace acl_common
   return ::arm_compute::QuantizationInfo(scale, offset);
 }
 
-// TODO Support front-end as NCHW for this function
 ::arm_compute::TensorInfo asTensorInfo(const ::neurun::model::Shape &shape,
                                        const ::neurun::model::TypeInfo &typeInfo,
-                                       ::neurun::model::Layout layout, bool apply_dim_correction)
+                                       ::neurun::model::Layout frontend_layout,
+                                       ::neurun::model::Layout backend_layout,
+                                       bool apply_dim_correction)
 {
-  ::arm_compute::TensorInfo info(asTensorShape(shape, layout, apply_dim_correction), 1,
-                                 asDataType(typeInfo.type()),
-                                 asQuantizationInfo(typeInfo.scale(), typeInfo.offset()));
-  info.set_data_layout(asDataLayout(layout));
+  ::arm_compute::TensorInfo info(
+      asTensorShape(shape, frontend_layout, backend_layout, apply_dim_correction), 1,
+      asDataType(typeInfo.type()), asQuantizationInfo(typeInfo.scale(), typeInfo.offset()));
+  info.set_data_layout(asDataLayout(backend_layout));
   return info;
 }
 
index e98d839..37bb296 100644 (file)
@@ -42,14 +42,17 @@ namespace acl_common
 {
 
 ::arm_compute::TensorShape asTensorShape(const ::neurun::model::Shape &shape,
-                                         ::neurun::model::Layout layout,
+                                         ::neurun::model::Layout frontend_layout,
+                                         ::neurun::model::Layout backend_layout,
                                          bool apply_dim_correction = true);
 ::arm_compute::Coordinates asTensorCoordinate(const ::neurun::util::Coordinates &coord,
-                                              ::neurun::model::Layout layout);
+                                              ::neurun::model::Layout frontend_layout,
+                                              ::neurun::model::Layout backend_layout);
 ::arm_compute::DataType asDataType(const ::neurun::model::DataType &type);
 ::arm_compute::TensorInfo asTensorInfo(const ::neurun::model::Shape &shape,
                                        const ::neurun::model::TypeInfo &typeInfo,
-                                       ::neurun::model::Layout layout,
+                                       ::neurun::model::Layout frontend_layout,
+                                       ::neurun::model::Layout backend_layout,
                                        bool apply_dim_correction = true);
 
 ::arm_compute::PadStrideInfo asPadStrideInfo(const model::ExplicitPadding &padding,
index a10ed8c..6fba82d 100644 (file)
@@ -311,8 +311,9 @@ void TemplTensorBuilder<T_ITensor, T_Tensor, T_SubTensor, T_Object>::buildTensor
   {
     auto ind = entry.first;
     const auto &info = entry.second;
-    auto tensor_info =
-        asTensorInfo(info.shape(), info.typeInfo(), _layout, _apply_dim_correction_map[ind]);
+    // TODO Support NCHW frontend
+    auto tensor_info = asTensorInfo(info.shape(), info.typeInfo(), model::Layout::NHWC, _layout,
+                                    _apply_dim_correction_map[ind]);
     _mem_mgr->buildTensor(ind, tensor_info);
   }
 }
@@ -383,8 +384,10 @@ void TemplTensorBuilder<T_ITensor, T_Tensor, T_SubTensor, T_Object>::buildSubten
       assert(info.type().scale() == parent_tensor->info()->quantization_info().scale);
       assert(asDataType(info.type().type()) == parent_tensor->info()->data_type());
       // TODO Change to set data_layout for each front-end
-      auto shape = asTensorShape(info.shape(), _layout, _apply_dim_correction_map[current]);
-      ::arm_compute::Coordinates coordinates = asTensorCoordinate(info.offset(), _layout);
+      auto shape = asTensorShape(info.shape(), model::Layout::NHWC, _layout,
+                                 _apply_dim_correction_map[current]);
+      ::arm_compute::Coordinates coordinates =
+          asTensorCoordinate(info.offset(), model::Layout::NHWC, _layout);
       auto tensor = std::make_shared<T_SubTensor>(parent_tensor.get(), shape, coordinates, true);
       subtensors[current] = tensor;
       stack.pop();
index 2bd41a6..6d84768 100644 (file)
@@ -553,10 +553,13 @@ void KernelGenerator::visit(const model::operation::FullyConnectedNode &node)
   auto fn = nnfw::cpp14::make_unique<arm_compute::NEFullyConnectedReshapingLayer>(
       _tensor_builder->acl_memory_manager()->internal_buffer_manager());
 
-  fn->configure(input_alloc->handle(), weight_alloc->handle(), bias_alloc->handle(),
-                output_alloc->handle(), needs_reshape,
-                ::neurun::backend::acl_common::asTensorShape(
-                    reshape, ::neurun::backend::acl_common::asRuntimeLayout(acl_layout)));
+  fn->configure(
+      input_alloc->handle(), weight_alloc->handle(), bias_alloc->handle(), output_alloc->handle(),
+      needs_reshape,
+      ::neurun::backend::acl_common::asTensorShape(/* TODO Support NCHW frontend */
+                                                   reshape, model::Layout::NHWC,
+                                                   ::neurun::backend::acl_common::asRuntimeLayout(
+                                                       acl_layout)));
 
   auto acl_fn = asAclFunction(std::move(fn));