[pureacl] Use fused_act parameter in Conv and DepthwiseConv acl layer (#4851)
author윤지영/On-Device Lab(SR)/Staff Engineer/삼성전자 <jy910.yun@samsung.com>
Thu, 28 Mar 2019 07:57:28 +0000 (16:57 +0900)
committer박세희/On-Device Lab(SR)/Principal Engineer/삼성전자 <saehie.park@samsung.com>
Thu, 28 Mar 2019 07:57:28 +0000 (16:57 +0900)
* [pureacl] Use fused_act parameter in Conv and DepthwiseConv acl layer

acl v19.03 support fused_act parameter in Conv and DepthwiseConv layer.

Signed-off-by: Jiyoung Yun <jy910.yun@samsung.com>
* Revert DepthwiseConvolution changes

Activation layer information only works for QASYMM8 in DepthwiseConvolutionLayer.

* Add comments for configure of ConvolutionLayer

runtimes/pure_arm_compute/src/compilation.cc

index 03c2e92..dcf7a30 100644 (file)
@@ -168,6 +168,33 @@ Padding same_padding(const nnfw::misc::feature::Shape &ifm_shape,
                                       ::arm_compute::DimensionRoundingType::FLOOR};
 }
 
+::arm_compute::ActivationLayerInfo asActInfo(FuseCode act)
+{
+  if (act == ANEURALNETWORKS_FUSED_NONE)
+  {
+    return ::arm_compute::ActivationLayerInfo();
+  }
+  else if (act == ANEURALNETWORKS_FUSED_RELU)
+  {
+    return ::arm_compute::ActivationLayerInfo(
+        ::arm_compute::ActivationLayerInfo::ActivationFunction::RELU);
+  }
+  else if (act == ANEURALNETWORKS_FUSED_RELU1)
+  {
+    return ::arm_compute::ActivationLayerInfo(
+        ::arm_compute::ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, 1.0f, -1.0f);
+  }
+  else if (act == ANEURALNETWORKS_FUSED_RELU6)
+  {
+    return ::arm_compute::ActivationLayerInfo(
+        ::arm_compute::ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, 6.0f, 0.0f);
+  }
+  else
+  {
+    throw std::runtime_error("Not supported, yet");
+  }
+}
+
 struct IAllocationContext
 {
   virtual ~IAllocationContext() = default;
@@ -981,13 +1008,17 @@ void Planner::visit(const ::internal::tflite::op::Conv2D::Implicit::Node &node)
     auto bias_alloc = ctx.at(::internal::tflite::operand::Index{param.bias_index});
 
     const auto conv_info = asPadStrideInfo(param.padding, param.stride);
+    const auto fused_act = asActInfo(param.activation);
 
     if (::internal::arm_compute::isGpuMode())
     {
       std::unique_ptr<::arm_compute::CLConvolutionLayer> fn{new ::arm_compute::CLConvolutionLayer};
 
+      // To pass the fused_act parameter, it calls the WeightsInfo() and Size2D(1U, 1U) (dilation)
+      // functions like the default parameter.
       fn->configure(CAST_CL(ifm_alloc), CAST_CL(ker_alloc), CAST_CL(bias_alloc), CAST_CL(ofm_alloc),
-                    conv_info);
+                    conv_info, ::arm_compute::WeightsInfo(), ::arm_compute::Size2D(1U, 1U),
+                    fused_act);
 
       builder.append("Conv2D", std::move(fn));
     }
@@ -995,12 +1026,13 @@ void Planner::visit(const ::internal::tflite::op::Conv2D::Implicit::Node &node)
     {
       std::unique_ptr<::arm_compute::NEConvolutionLayer> fn{new ::arm_compute::NEConvolutionLayer};
 
-      fn->configure(ifm_alloc, ker_alloc, bias_alloc, ofm_alloc, conv_info);
+      // To pass the fused_act parameter, it calls the WeightsInfo() and Size2D(1U, 1U) (dilation)
+      // functions like the default parameter.
+      fn->configure(ifm_alloc, ker_alloc, bias_alloc, ofm_alloc, conv_info,
+                    ::arm_compute::WeightsInfo(), ::arm_compute::Size2D(1U, 1U), fused_act);
 
       builder.append("Conv2D", std::move(fn));
     }
-
-    ActivationBuilder{builder}.append(param.activation, ofm_alloc);
   };
 
   _builder.addStage(stage);
@@ -1146,13 +1178,17 @@ void Planner::visit(const ::internal::tflite::op::Conv2D::Explicit::Node &node)
     auto bias_alloc = ctx.at(::internal::tflite::operand::Index{param.bias_index});
 
     const auto conv_info = asPadStrideInfo(param.padding, param.stride);
+    const auto fused_act = asActInfo(param.activation);
 
     if (::internal::arm_compute::isGpuMode())
     {
       std::unique_ptr<::arm_compute::CLConvolutionLayer> fn{new ::arm_compute::CLConvolutionLayer};
 
+      // To pass the fused_act parameter, it calls the WeightsInfo() and Size2D(1U, 1U) (dilation)
+      // functions like the default parameter.
       fn->configure(CAST_CL(ifm_alloc), CAST_CL(ker_alloc), CAST_CL(bias_alloc), CAST_CL(ofm_alloc),
-                    conv_info);
+                    conv_info, ::arm_compute::WeightsInfo(), ::arm_compute::Size2D(1U, 1U),
+                    fused_act);
 
       builder.append("Conv2D", std::move(fn));
     }
@@ -1160,12 +1196,13 @@ void Planner::visit(const ::internal::tflite::op::Conv2D::Explicit::Node &node)
     {
       std::unique_ptr<::arm_compute::NEConvolutionLayer> fn{new ::arm_compute::NEConvolutionLayer};
 
-      fn->configure(ifm_alloc, ker_alloc, bias_alloc, ofm_alloc, conv_info);
+      // To pass the fused_act parameter, it calls the WeightsInfo() and Size2D(1U, 1U) (dilation)
+      // functions like the default parameter.
+      fn->configure(ifm_alloc, ker_alloc, bias_alloc, ofm_alloc, conv_info,
+                    ::arm_compute::WeightsInfo(), ::arm_compute::Size2D(1U, 1U), fused_act);
 
       builder.append("Conv2D", std::move(fn));
     }
-
-    ActivationBuilder{builder}.append(param.activation, ofm_alloc);
   };
 
   _builder.addStage(stage);