Add PReLU layer in PACL (#3901)
authorPrasanna R/SNAP /SRI-Bangalore/Engineer/삼성전자 <prasanna.r@samsung.com>
Fri, 7 Dec 2018 06:43:21 +0000 (12:13 +0530)
committer오형석/동작제어Lab(SR)/Staff Engineer/삼성전자 <hseok82.oh@samsung.com>
Fri, 7 Dec 2018 06:43:21 +0000 (15:43 +0900)
This patch adds PReLU layer in PACL.
Issue: #3459 for Epic4

PReLU implemented as,
      f(x) = Relu(x) + (negative_alpha * Neg(x, activation=Relu))

Signed-off-by: prasannar <prasanna.r@samsung.com>
runtimes/pure_arm_compute/src/internal/layers/PReLULayer.cc [new file with mode: 0644]
runtimes/pure_arm_compute/src/internal/layers/PReLULayer.h [new file with mode: 0644]

diff --git a/runtimes/pure_arm_compute/src/internal/layers/PReLULayer.cc b/runtimes/pure_arm_compute/src/internal/layers/PReLULayer.cc
new file mode 100644 (file)
index 0000000..fff06c3
--- /dev/null
@@ -0,0 +1,83 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "PReLULayer.h"
+#include "internal/arm_compute.h"
+
+void PReLULayer::configure(::arm_compute::ITensor *input, ::arm_compute::ITensor *alpha,
+                           ::arm_compute::ITensor *output)
+{
+  _input = input;
+  _alpha = alpha;
+  _output = output;
+
+  if (::internal::arm_compute::isGpuMode())
+  {
+    _cl_relu_x.allocator()->init(*input->info());
+    _cl_neg_x.allocator()->init(*input->info());
+    _cl_relu_neg_x.allocator()->init(*input->info());
+    _cl_neg_alpha.allocator()->init(*alpha->info());
+    _cl_mul_result.allocator()->init(*output->info());
+
+    const ::arm_compute::ActivationLayerInfo act_info{
+        ::arm_compute::ActivationLayerInfo::ActivationFunction::RELU};
+    // Perform ReLU(x)
+    _cl_relu_of_x.configure(CAST_CL(_input), &_cl_relu_x, act_info);
+    // Perform Neg(x)
+    _cl_neg_of_x.configure(CAST_CL(_input), &_cl_neg_x);
+    // Perform Neg(x, activation=ReLU)
+    _cl_relu_of_neg_x.configure(&_cl_neg_x, &_cl_relu_neg_x, act_info);
+    // Perform Neg(alpha)
+    _cl_neg_of_alpha.configure(CAST_CL(_alpha), &_cl_neg_alpha);
+    // Perform Neg(alpha) * Neg(x, activation=ReLU)
+    _cl_mul.configure(&_cl_neg_alpha, &_cl_relu_neg_x, &_cl_mul_result,
+                      1.0, // scale
+                      arm_compute::ConvertPolicy::SATURATE,
+                      arm_compute::RoundingPolicy::TO_NEAREST_EVEN);
+    // Perform f(x) = ReLU(x) + (Neg(alpha) * Neg(x, activation=ReLU))
+    _cl_add.configure(&_cl_relu_x, &_cl_mul_result, CAST_CL(_output),
+                      ::arm_compute::ConvertPolicy::SATURATE);
+
+    // NOTE intermediate tensors are inaccessible from outside,
+    // and thus it is safe to invoke allocate here.
+    _cl_relu_x.allocator()->allocate();
+    _cl_neg_x.allocator()->allocate();
+    _cl_relu_neg_x.allocator()->allocate();
+    _cl_neg_alpha.allocator()->allocate();
+    _cl_mul_result.allocator()->allocate();
+  }
+  else
+  {
+    throw std::runtime_error("Not supported, yet");
+  }
+}
+
+void PReLULayer::run(void)
+{
+  if (::internal::arm_compute::isGpuMode())
+  {
+    _cl_relu_of_x.run();
+    _cl_neg_of_x.run();
+    _cl_relu_of_neg_x.run();
+    _cl_neg_of_alpha.run();
+    _cl_mul.run();
+    _cl_add.run();
+  }
+  else
+  {
+    throw std::runtime_error("Not supported, yet");
+  }
+}
diff --git a/runtimes/pure_arm_compute/src/internal/layers/PReLULayer.h b/runtimes/pure_arm_compute/src/internal/layers/PReLULayer.h
new file mode 100644 (file)
index 0000000..83f6a58
--- /dev/null
@@ -0,0 +1,85 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * @file        PReLULayer.h
+ * @brief       This file contains PReLULayer class
+ * @ingroup     COM_AI_RUNTIME
+ */
+
+#ifndef __GENERIC_PRELU_LAYER_H__
+#define __GENERIC_PRELU_LAYER_H__
+
+#include <arm_compute/runtime/Tensor.h>
+#include <arm_compute/runtime/CL/CLTensor.h>
+
+#include <arm_compute/runtime/CL/functions/CLArithmeticAddition.h>
+#include <arm_compute/runtime/CL/functions/CLPixelWiseMultiplication.h>
+#include <arm_compute/runtime/CL/functions/CLActivationLayer.h>
+#include <arm_compute/runtime/CL/functions/CLNeg.h>
+
+/**
+ * @brief Class to run PReLU operation
+ */
+class PReLULayer : public ::arm_compute::IFunction
+{
+public:
+  PReLULayer(void)
+      : _input(nullptr), _output(nullptr), _cl_relu_x{}, _cl_neg_x{}, _cl_relu_neg_x{},
+        _cl_neg_alpha{}, _cl_relu_of_x{}, _cl_neg_of_x{}, _cl_relu_of_neg_x{}, _cl_neg_of_alpha{},
+        _cl_add{}, _cl_mul{}
+  {
+    // DO NOTHING
+  }
+
+public:
+  /**
+   * @brief Configure the layer
+   * @param[in] input The source tensor
+   * @param[in] input alpha
+   * @param[in] output The destination tensor
+   * @return N/A
+   */
+  void configure(::arm_compute::ITensor *input, ::arm_compute::ITensor *alpha,
+                 ::arm_compute::ITensor *output);
+
+public:
+  /**
+   * @brief Run the operation. Must be called after configure().
+   * @return N/A
+   */
+  void run(void) override;
+
+private:
+  ::arm_compute::ITensor *_input;
+  ::arm_compute::ITensor *_alpha;
+  ::arm_compute::ITensor *_output;
+  ::arm_compute::CLTensor _cl_relu_x;
+  ::arm_compute::CLTensor _cl_neg_x;
+  ::arm_compute::CLTensor _cl_relu_neg_x;
+  ::arm_compute::CLTensor _cl_neg_alpha;
+  ::arm_compute::CLTensor _cl_mul_result;
+
+private:
+  ::arm_compute::CLActivationLayer _cl_relu_of_x;
+  ::arm_compute::CLNeg _cl_neg_of_x;
+  ::arm_compute::CLActivationLayer _cl_relu_of_neg_x;
+  ::arm_compute::CLNeg _cl_neg_of_alpha;
+  ::arm_compute::CLArithmeticAddition _cl_add;
+  ::arm_compute::CLPixelWiseMultiplication _cl_mul;
+};
+
+#endif // __GENERIC_PRELU_LAYER_H__