[nnc] Acl backend activations and reshape (#1952)
authorТимур Отеллович Аблязимов/AI Tools Lab /SRR/Staff Engineer/삼성전자 <t.ablyazimov@samsung.com>
Thu, 25 Oct 2018 17:47:49 +0000 (20:47 +0300)
committerРоман Михайлович Русяев/AI Tools Lab /SRR/Staff Engineer/삼성전자 <r.rusyaev@samsung.com>
Thu, 25 Oct 2018 17:47:49 +0000 (20:47 +0300)
ReLU, Capped ReLU, Tanh and Reshape support.

Signed-off-by: Timur Ablyazimov <t.ablyazimov@samsung.com>
contrib/nnc/include/passes/acl_soft_backend/AclCppOpGenerator.h
contrib/nnc/passes/acl_soft_backend/AclCppOpGenerator.cpp

index af26d4d..dc1f899 100644 (file)
@@ -78,6 +78,16 @@ private:
                       const std::string& suffix);
 
   /**
+   * @brief Generates different types of activation functions: ReLU, Tanh etc.
+   * @param activation_name - names of activation functions used in ACL: RELU, TANH etc.
+   * @param a - alpha parameter used by some activation functions: BOUNDED_RELU, LU_BOUNDED_RELU,
+   *            LINEAR, TANH.
+   * @param b - betha parameter used by some activation functions: LINEAR, LU_BOUNDED_RELU, TANH.
+   */
+  void genActivation(mir::INode* node, mir::OpDescription& op, const std::string& activation_name,
+                     float a = 0, float b = 0);
+
+  /**
    * @brief Generates a unique name for the tensor.
    */
   std::string tensorName(mir::INode* node) const;
index 27901ce..94b8353 100644 (file)
@@ -7,6 +7,10 @@
 #include "core/modelIR/operations/variable_op.h"
 #include "core/modelIR/operations/softmax_op.h"
 #include "core/modelIR/operations/conv_2d_op.h"
+#include "core/modelIR/operations/relu_op.h"
+#include "core/modelIR/operations/capped_relu_op.h"
+#include "core/modelIR/operations/TanhOp.h"
+#include "core/modelIR/operations/reshape_op.h"
 #include "core/modelIR/operations/depthwise_conv2d_op.h"
 #include "core/modelIR/operations/fully_connected_op.h"
 #include "core/modelIR/operations/concat_op.h"
@@ -159,7 +163,8 @@ void AclCppOpGenerator::visit(INode* node, ops::FullyConnectedOp& op) {
   _infBlock->call("run", {}, fully_layer);
 }
 
-void AclCppOpGenerator::visit(INode* node, ops::CappedReluOp&) {
+void AclCppOpGenerator::visit(INode* node, ops::CappedReluOp& op) {
+  genActivation(node, op, "LU_BOUNDED_RELU", op.getCap());
 }
 
 void AclCppOpGenerator::visit(INode* node, ops::BiasAddOp&) {
@@ -169,10 +174,31 @@ void AclCppOpGenerator::visit(INode* node, ops::VariableOp& op) {
   genTensor(node, op);
 }
 
-void AclCppOpGenerator::visit(INode* node, ops::ReluOp&) {
+void AclCppOpGenerator::visit(INode* node, ops::ReluOp& op) {
+  genActivation(node, op, "RELU");
 }
 
-void AclCppOpGenerator::visit(INode* node, ops::ReshapeOp&) {
+void AclCppOpGenerator::visit(INode* node, ops::ReshapeOp& op) {
+  auto& prev_nodes = node->getPrevNodes();
+  assert(prev_nodes.size() == 1);
+
+  // Get the id of the input tensor in the generated artifact.
+  auto in_node = prev_nodes[0].node;
+  auto in = AF::id(tensorName(in_node));
+
+  // Create the output tensor in the DOM and return its id.
+  auto out = genTensor(node, op);
+
+  // Create an instance of the CLReshapeLayer class as a member of the artifact class.
+  auto reshape_layer_var = _artifactClass->var(false, "arm_compute::CLReshapeLayer",
+                                               out->name() + "_reshape_layer");
+  auto reshape_layer = reshape_layer_var->use();
+
+  // Generate the call: reshape_layer.configure(&in, &out);
+  _constrBlock->call("configure", {AF::ref(in), AF::ref(out)}, reshape_layer);
+
+  // Generate the call: reshape_layer.run();
+  _infBlock->call("run", {}, reshape_layer);
 }
 
 void AclCppOpGenerator::visit(INode* node, ops::ScaleOp&) {
@@ -185,6 +211,7 @@ void AclCppOpGenerator::visit(INode* node, ops::DropoutOp&) {
 }
 
 void AclCppOpGenerator::visit(INode* node, ops::TanhOp& op) {
+  genActivation(node, op, "TANH");
 }
 
 void AclCppOpGenerator::visit(INode* node, ops::ElementwiseOp& op) {
@@ -198,7 +225,7 @@ void AclCppOpGenerator::visit(INode* node, ops::EluOp& op) {
 
 template <typename Op>
 void AclCppOpGenerator::genConvolution(INode* node, Op& op, const string& acl_func_name,
-                      const string& suffix) {
+                                       const string& suffix) {
   const TensorVariant& ir_weights = op.getKernel();
   const Shape& ir_weights_shape = ir_weights.getShape();
   assert(ir_weights_shape.rank() == 4);
@@ -234,6 +261,40 @@ void AclCppOpGenerator::genConvolution(INode* node, Op& op, const string& acl_fu
   _infBlock->call("run", {}, conv_layer);
 }
 
+void AclCppOpGenerator::genActivation(INode* node, OpDescription& op,
+                                      const std::string& activation_name, float a, float b) {
+  auto &prev_nodes = node->getPrevNodes();
+  assert(prev_nodes.size() == 1);
+
+  // Get the id of the input tensor.
+  auto in_node = prev_nodes[0].node;
+  auto in = AF::id(tensorName(in_node));
+
+  // Create the output tensor in the DOM and return its id.
+  auto out = genTensor(node, op);
+  auto prefix = out->name() + "_activation_layer";
+
+  // Create an instance of the ActivationLayerInfo class as a local variable in the artifact
+  // constructor. This instance profide information about the concrete activation function,
+  // like: ReLU, Tanh etc and two optional parameter (alpha and betha) needed by some activations.
+  auto activation_info_var = _constrBlock->var(
+    "arm_compute::ActivationLayerInfo", prefix + "_activation_info", {},
+    {AF::lit("arm_compute::ActivationLayerInfo::ActivationFunction::" + activation_name),
+     AF::lit(to_string(a)), AF::lit(to_string(b))});
+  auto activation_info = activation_info_var->use();
+
+  // Create an instance of the CLActivationLayer class as a member of the artifact class.
+  auto activation_layer_var = _artifactClass->var(false, "arm_compute::CLActivationLayer",
+                                                    prefix);
+  auto activation_layer = activation_layer_var->use();
+
+  // Generate the call: activation_layer.configure(&in, &out, activation_info);
+  _constrBlock->call("configure", {AF::ref(in), AF::ref(out), activation_info}, activation_layer);
+
+  // Generate the call: activation_layer.run();
+  _infBlock->call("run", {}, activation_layer);
+}
+
 string AclCppOpGenerator::tensorName(INode* node) const {
   string tensor_name;