Support ReLU activation (#571)
author박종현/동작제어Lab(SR)/Senior Engineer/삼성전자 <jh1302.park@samsung.com>
Wed, 11 Apr 2018 06:26:06 +0000 (15:26 +0900)
committer김정현/동작제어Lab(SR)/Senior Engineer/삼성전자 <jh0822.kim@samsung.com>
Wed, 11 Apr 2018 06:26:06 +0000 (15:26 +0900)
This commit revises Conv2D operation kernel to support ReLU activation.

Signed-off-by: Jonghyun Park <jh1302.park@samsung.com>
src/kernel/acl/src/cl/Conv2D_acl.cpp
src/kernel/acl/src/cl/Conv2D_acl.test.cpp

index 7e5c693..ebfaffa 100644 (file)
@@ -1,4 +1,6 @@
 #include <OperationsUtils.h>
+#include <NeuralNetworks.h>
+
 #include <arm_compute/core/TensorShape.h>
 #include <arm_compute/core/TensorInfo.h>
 #include "../IO_accessor.h"
@@ -137,8 +139,27 @@ bool convFloat32(const float* inputData, const android::nn::Shape& inputShape,
   CLUniqueTensor bias(arm_compute::TensorInfo(bias_shape, arm_compute::Format::F32));
   CLUniqueTensor filter(arm_compute::TensorInfo(filter_shape, arm_compute::Format::F32));
 
-  arm_compute::CLConvolutionLayer conv_f;
-  conv_f.configure(input.ptr(), filter.ptr(), bias.ptr(), output.ptr(), conv_info);
+  assert(activation == ANEURALNETWORKS_FUSED_NONE || activation == ANEURALNETWORKS_FUSED_RELU);
+
+  std::vector<std::shared_ptr<arm_compute::IFunction>> fns;
+
+  auto conv_f = std::make_shared<arm_compute::CLConvolutionLayer>();
+
+  conv_f->configure(input.ptr(), filter.ptr(), bias.ptr(), output.ptr(), conv_info);
+
+  fns.emplace_back(conv_f);
+
+  if (ANEURALNETWORKS_FUSED_RELU == activation)
+  {
+    auto relu_f = std::make_shared<arm_compute::CLActivationLayer>();
+
+    const arm_compute::ActivationLayerInfo relu_info{arm_compute::ActivationLayerInfo::ActivationFunction::RELU};
+
+    // Do in-place update
+    relu_f->configure(output.ptr(), nullptr, relu_info);
+
+    fns.emplace_back(relu_f);
+  }
 
   input.allocate();
   output.allocate();
@@ -149,7 +170,10 @@ bool convFloat32(const float* inputData, const android::nn::Shape& inputShape,
   TensorAccess<BiasAccessor>(bias.ref(), biasData, biasShape);
   TensorAccess<WeightAccessor>(filter.ref(), filterData, filterShape);
 
-  conv_f.run();
+  for (const auto &fn : fns)
+  {
+    fn->run();
+  }
 
   arm_compute::CLScheduler::get().sync();
 
index ac946de..e25678e 100644 (file)
@@ -99,6 +99,50 @@ TEST(KernelACL_TC, convFloat32_3x3to3x3)
   EXPECT_EQ(bret, true);
 }
 
+TEST(KernelACL_TC, convFloat32_3x3to3x3_RELU)
+{
+  float inputData[9];
+  const android::nn::Shape inputShape = { OperandType::FLOAT32, {1,3,3,1}, 1.0, 0 };
+  float filterData[9];
+  const android::nn::Shape filterShape = { OperandType::FLOAT32, {1,3,3,1}, 1.0, 0 };
+  float biasData[1] = { -5.0f };
+  const android::nn::Shape biasShape = { OperandType::FLOAT32, {1}, 1.0, 0 };
+  int32_t padding_left = 1;
+  int32_t padding_right = 1;
+  int32_t padding_top = 1;
+  int32_t padding_bottom = 1;
+  int32_t stride_width = 1;
+  int32_t stride_height = 1;
+  int32_t activation = static_cast<int32_t>(FusedActivationFunc::RELU);
+  float outputData[9];
+  const android::nn::Shape outputShape = { OperandType::FLOAT32, {1,3,3,1}, 1.0, 0 };
+  bool bret;
+
+  util::initData(inputData, sizeof(inputData) / sizeof(inputData[0]), 1.0);
+  util::initData(filterData, sizeof(filterData) / sizeof(filterData[0]), 1.0);
+  util::initData(outputData, sizeof(outputData) / sizeof(outputData[0]), 0.0);
+
+  bret = convFloat32(inputData, inputShape,
+                     filterData, filterShape,
+                     biasData, biasShape,
+                     padding_left, padding_right,
+                     padding_top, padding_bottom,
+                     stride_width, stride_height,
+                     activation,
+                     outputData, outputShape);
+  EXPECT_EQ(bret, true);
+
+  float expectData[] =
+  {
+    0.0f, 1.0f, 0.0f,
+    1.0f, 4.0f, 1.0f,
+    0.0f, 1.0f, 0.0f
+  };
+
+  bret = util::compareData(outputData, expectData, outputShape);
+  EXPECT_EQ(bret, true);
+}
+
 TEST(KernelACL_TC, convFloat32_3x5to3x3)
 {
   float inputData[15] = {