[nnc] Add LeakyRelu activation (#2757)
authorVladimir Plazun/AI Tools Lab /SRR/Engineer/삼성전자 <v.plazun@samsung.com>
Wed, 9 Jan 2019 11:46:20 +0000 (14:46 +0300)
committerРоман Михайлович Русяев/AI Tools Lab /SRR/Staff Engineer/삼성전자 <r.rusyaev@samsung.com>
Wed, 9 Jan 2019 11:46:20 +0000 (14:46 +0300)
* [nnc] Add LeakyRelu activation

Implement leaky relu activation function( relu with negative slope )

Signed-off-by: Vladimir Plazun <v.plazun@partner.samsung.com>
20 files changed:
contrib/nnc/core/modelIR/IrDotDumper.cpp
contrib/nnc/core/modelIR/Operation.cpp
contrib/nnc/include/core/modelIR/IrDotDumper.h
contrib/nnc/include/core/modelIR/operations/LeakyReluOp.h [new file with mode: 0644]
contrib/nnc/include/core/modelIR/operations/operations.lst.h
contrib/nnc/include/passes/interpreter/Interpreter.h
contrib/nnc/passes/acl_soft_backend/AclCppOpGenerator.cpp
contrib/nnc/passes/acl_soft_backend/AclCppOpGenerator.h
contrib/nnc/passes/caffe_frontend/caffe_op_creator.cpp
contrib/nnc/passes/interpreter/Interpreter.cpp
contrib/nnc/passes/soft_backend/CPPGenerator.cpp
contrib/nnc/passes/soft_backend/ModelAnalyzer.cpp
contrib/nnc/passes/soft_backend/ModelAnalyzer.h
contrib/nnc/passes/soft_backend/SBSerializer.cpp
contrib/nnc/passes/soft_backend/SBSerializer.h
contrib/nnc/passes/soft_backend/code_snippets/cpp_leaky_relu.def [new file with mode: 0644]
contrib/nnc/passes/tflite_frontend/tflite_importer.cpp
contrib/nnc/passes/tflite_frontend/tflite_op_creator.cpp
contrib/nnc/passes/tflite_frontend/tflite_op_creator.h
contrib/nnc/unittests/soft_backend/CPPOperations.cpp

index b430c72..4ab778f 100644 (file)
@@ -31,6 +31,7 @@
 #include "core/modelIR/operations/FullyConnectedOp.h"
 #include "core/modelIR/operations/GatherOp.h"
 #include "core/modelIR/operations/GemmOp.h"
+#include "core/modelIR/operations/LeakyReluOp.h"
 #include "core/modelIR/operations/PadOp.h"
 #include "core/modelIR/operations/PoolOp.h"
 #include "core/modelIR/operations/ReduceFOp.h"
@@ -349,6 +350,15 @@ void IrDotDumper::visit(ops::SigmoidOp& op) {
   dotBuilder.updateWithOp(&op, node_info);
 }
 
+void IrDotDumper::visit(mir::ops::LeakyReluOp& op) {
+  auto node_info = DotIrNodeInfo().withType("LeakyReluOp", op.getName())
+    .withInShapes(getInputShapes(op))
+    .withOutShapes(getOutputShapes(op))
+    .withMisc("alpha", op.getAlpha());
+
+  dotBuilder.updateWithOp(&op, node_info);
+}
+
 } // namespace mir
 } // namespace nnc
 
index 2eb6eda..50dd174 100644 (file)
@@ -29,6 +29,7 @@
 #include "core/modelIR/operations/FullyConnectedOp.h"
 #include "core/modelIR/operations/GatherOp.h"
 #include "core/modelIR/operations/GemmOp.h"
+#include "core/modelIR/operations/LeakyReluOp.h"
 #include "core/modelIR/operations/PadOp.h"
 #include "core/modelIR/operations/PoolOp.h"
 #include "core/modelIR/operations/ReduceFOp.h"
index 0b3128e..ec67fda 100644 (file)
@@ -45,6 +45,7 @@ public:
   void visit(ops::FullyConnectedOp& op) override;
   void visit(ops::GatherOp& op) override;
   void visit(ops::GemmOp& op) override;
+  void visit(mir::ops::LeakyReluOp& op) override;
   void visit(ops::PadOp& op) override;
   void visit(ops::PoolOp& op) override;
   void visit(ops::ReduceFOp& op) override;
diff --git a/contrib/nnc/include/core/modelIR/operations/LeakyReluOp.h b/contrib/nnc/include/core/modelIR/operations/LeakyReluOp.h
new file mode 100644 (file)
index 0000000..b968a07
--- /dev/null
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef _NNC_CORE_IR_MODEL_LEAKY_RELU_H_
+#define _NNC_CORE_IR_MODEL_LEAKY_RELU_H_
+
+#include "core/modelIR/Operation.h"
+
+namespace nnc {
+namespace mir {
+namespace ops {
+
+class LeakyReluOp : public Operation {
+public:
+  explicit LeakyReluOp(const IODescriptor& arg, float alpha)
+  : Operation(Type::ReLU, {arg}), _alpha(alpha) {
+    // Infer output shape.
+    setOutputShape(0, getInputShape(0));
+  }
+
+  float getAlpha() const {
+    return _alpha;
+  }
+
+private:
+  float _alpha;
+};
+
+} // namespace ops
+} // namespace mir
+} // namespace nnc
+
+#endif //_NNC_CORE_IR_MODEL_LEAKY_RELU_H_
index c3fe3d3..85b4372 100644 (file)
@@ -47,3 +47,4 @@ HANDLE_OP(pad, PadOp)
 HANDLE_OP(sqrt, SqrtOp)
 HANDLE_OP(reduceF, ReduceFOp)
 HANDLE_OP(transpose, TransposeOp)
+HANDLE_OP(leakyReLU, LeakyReluOp)
index e6f10d6..23dfaf4 100644 (file)
@@ -51,6 +51,7 @@ public:
   void visit(ops::EluOp& op) override;
   void visit(ops::FullyConnectedOp& op) override;
   void visit(ops::GatherOp& op) override;
+  void visit(ops::LeakyReluOp& op) override;
   void visit(ops::PadOp& op) override;
   void visit(ops::PoolOp& op) override;
   void visit(ops::ReduceFOp& op) override;
index ea381f3..f7698ee 100644 (file)
@@ -34,6 +34,7 @@
 #include "core/modelIR/operations/ElementwiseOp.h"
 #include "core/modelIR/operations/FullyConnectedOp.h"
 #include "core/modelIR/operations/GemmOp.h"
+#include "core/modelIR/operations/LeakyReluOp.h"
 #include "core/modelIR/operations/PoolOp.h"
 #include "core/modelIR/operations/ReduceFOp.h"
 #include "core/modelIR/operations/ReluOp.h"
@@ -944,5 +945,9 @@ void AclCppOpGenerator::visit(ops::SigmoidOp& op) {
   genActivation(op, "LOGISTIC");
 }
 
+void AclCppOpGenerator::visit(mir::ops::LeakyReluOp& op) {
+  genActivation(op, "LEAKY_RELU", op.getAlpha());
+}
+
 }
 // namespace nnc
index f48eb3d..c306a2b 100644 (file)
@@ -61,6 +61,7 @@ public:
   void visit(mir::ops::FullyConnectedOp& op) override;
   void visit(mir::ops::GatherOp& op) override;
   void visit(mir::ops::GemmOp& op) override;
+  void visit(mir::ops::LeakyReluOp& op) override;
   void visit(mir::ops::PadOp& op) override;
   void visit(mir::ops::PoolOp& op) override;
   void visit(mir::ops::ReduceFOp& op) override;
index 58c7c27..74b1570 100644 (file)
@@ -26,6 +26,7 @@
 #include "core/modelIR/operations/EluOp.h"
 #include "core/modelIR/operations/FullyConnectedOp.h"
 #include "core/modelIR/operations/GatherOp.h"
+#include "core/modelIR/operations/LeakyReluOp.h"
 #include "core/modelIR/operations/PoolOp.h"
 #include "core/modelIR/operations/ReluOp.h"
 #include "core/modelIR/operations/ReshapeOp.h"
@@ -457,14 +458,20 @@ CaffeOpCreator::convertReshape(const caffe::LayerParameter& layer,
 
 void CaffeOpCreator::checkReLU(const ReLUParameter& opts,
                                std::set<std::string>& problems_op_set) {
-  if (opts.has_negative_slope())
-    problems_op_set.insert("ReLU layer negative_slope param is not supported yet.");
 }
 
 std::vector<mir::IODescriptor>
 CaffeOpCreator::convertReLU(const caffe::LayerParameter& layer,
                             const std::vector<mir::IODescriptor>& inputs) {
-  auto relu = createOp<ops::ReluOp>(layer.name(), inputs[0]);
+  mir::Operation* relu;
+  if (layer.relu_param().has_negative_slope()) {
+    float alpha = layer.relu_param().negative_slope();
+    relu = createOp<ops::LeakyReluOp>(layer.name(), inputs[0], alpha);
+  } else {
+    relu = createOp<ops::ReluOp>(layer.name(), inputs[0]);
+  }
+
+
   return {relu->getOutput(0)};
 }
 
index 8e195d4..f0fd1c8 100644 (file)
@@ -30,6 +30,7 @@
 #include "core/modelIR/operations/FullyConnectedOp.h"
 #include "core/modelIR/operations/GatherOp.h"
 #include "core/modelIR/operations/GemmOp.h"
+#include "core/modelIR/operations/LeakyReluOp.h"
 #include "core/modelIR/operations/PadOp.h"
 #include "core/modelIR/operations/PoolOp.h"
 #include "core/modelIR/operations/ReduceFOp.h"
@@ -438,4 +439,17 @@ void NNInterpreter::visit(ops::GatherOp& op) {
   var(op.getId()) = Gather(data, indices, op)();
 }
 
+void NNInterpreter::visit(ops::LeakyReluOp& op) {
+  auto operand = op.getPrevNodes()[0];
+  float alpha = op.getAlpha();
+  Tensor<float> input(var(operand.op->getId())[operand.index]);
+  var(op.getId()) = Fill<float>(
+    op.getOutputShape(0), [&input, alpha](const Index& id) {
+      float val = input.at(id);
+      return val > 0.0f ? val : val * alpha;
+    })();
+
+  DUMP(op, false);
+}
+
 } // namespace nnc
index 9ab20a7..da2a9b1 100644 (file)
@@ -39,6 +39,7 @@ using namespace std;
 #include "cpp_sigmoid.generated.h"
 #include "cpp_sqrt.generated.h"
 #include "cpp_relu.generated.h"
+#include "cpp_leaky_relu.generated.h"
 #include "cpp_reduce.generated.h"
 #include "cpp_resize.generated.h"
 #include "cpp_softmax.generated.h"
@@ -307,6 +308,7 @@ void CPPCodeGenerator::materializeCode(ostream &out, const ModelAnalyzer &ma, co
   out.write(cpp_scale, sizeof(cpp_scale));
   out.write(cpp_dropout, sizeof(cpp_dropout));
   out.write(cpp_batchnorm, sizeof(cpp_batchnorm));
+  out.write(cpp_leaky_relu, sizeof(cpp_leaky_relu));
 
   // gen NN constructor
   out << className << "::" << className << "(const string &parametersPath)\n"
index b4fb578..1127e36 100644 (file)
@@ -37,6 +37,7 @@
 #include "core/modelIR/operations/FullyConnectedOp.h"
 #include "core/modelIR/operations/GatherOp.h"
 #include "core/modelIR/operations/GemmOp.h"
+#include "core/modelIR/operations/LeakyReluOp.h"
 #include "core/modelIR/operations/PadOp.h"
 #include "core/modelIR/operations/PoolOp.h"
 #include "core/modelIR/operations/ReduceFOp.h"
@@ -345,4 +346,8 @@ void ModelAnalyzer::visit(mir::ops::SigmoidOp& op) {
   addOpDescr(&op, "sigmoid");
 }
 
+void ModelAnalyzer::visit(mir::ops::LeakyReluOp& op) {
+  addOpDescr(&op, "leakyRelu");
+}
+
 } // namespace nnc
index 04a56cb..702e905 100644 (file)
@@ -100,6 +100,7 @@ public:
   void visit(mir::ops::EluOp& op) override;
   void visit(mir::ops::FullyConnectedOp& op) override;
   void visit(mir::ops::GatherOp& op) override;
+  void visit(mir::ops::LeakyReluOp& op) override;
   void visit(mir::ops::GemmOp& op) override;
   void visit(mir::ops::PadOp& op) override;
   void visit(mir::ops::PoolOp& op) override;
index 9eeace8..ffd6c20 100644 (file)
@@ -34,6 +34,7 @@
 #include "core/modelIR/operations/FullyConnectedOp.h"
 #include "core/modelIR/operations/GatherOp.h"
 #include "core/modelIR/operations/GemmOp.h"
+#include "core/modelIR/operations/LeakyReluOp.h"
 #include "core/modelIR/operations/PadOp.h"
 #include "core/modelIR/operations/PoolOp.h"
 #include "core/modelIR/operations/ReduceFOp.h"
@@ -380,4 +381,10 @@ void Serializer::visit(mir::ops::SigmoidOp& op) {
   _curOp->_paramStartOffset = _buffer.size();
 }
 
+void Serializer::visit(mir::ops::LeakyReluOp& op) {
+  _curOp->_paramStartOffset = _buffer.size();
+  serializeT<float>(op.getAlpha());
+  serializeShape(op.getOutputShape(0));
+}
+
 } // namespace nnc
index 789be93..6e5c7cc 100644 (file)
@@ -53,6 +53,7 @@ public:
   void visit(mir::ops::ElementwiseOp& op) override;
   void visit(mir::ops::EluOp& op) override;
   void visit(mir::ops::FullyConnectedOp& op) override;
+  void visit(mir::ops::LeakyReluOp& op) override;
   void visit(mir::ops::GatherOp& op) override;
   void visit(mir::ops::GemmOp& op) override;
   void visit(mir::ops::PadOp& op) override;
diff --git a/contrib/nnc/passes/soft_backend/code_snippets/cpp_leaky_relu.def b/contrib/nnc/passes/soft_backend/code_snippets/cpp_leaky_relu.def
new file mode 100644 (file)
index 0000000..3526004
--- /dev/null
@@ -0,0 +1,30 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+void leakyRelu(Tensor& out, const char* params, const Tensor& in) {
+  const float* input = in.getData();
+  out.reShape(in.getShape());
+  float* output = out.getData();
+  const float alpha = deserializeT<float>(params);
+
+  size_t data_length = in.getShape().getNumElems();
+
+  for( int i = 0; i < data_length; ++i ) {
+    float val = input[i];
+    float res = val > 0 ? val : val * alpha;
+    output[i] = res;
+  }
+}
index e55e068..de060f0 100644 (file)
@@ -112,6 +112,7 @@ void TfliteImporter::processUnsupportedOp(const Operator* op) {
     case BuiltinOperator_RELU6:
     case BuiltinOperator_TRANSPOSE:
     case BuiltinOperator_STRIDED_SLICE:
+    case BuiltinOperator_LEAKY_RELU:
       // No checks
       break;
     default:
@@ -276,6 +277,10 @@ void TfliteImporter::walkOperator(const Operator* op) {
       outputs = _opCreator->createStridedSlice(
         inputs, params, op->builtin_options_as<StridedSliceOptions>());
       break;
+    case BuiltinOperator_LEAKY_RELU:
+      outputs = _opCreator->createLeakyRelu(inputs, params,
+                                            op->builtin_options_as<LeakyReluOptions>());
+      break;
     default:
       assert(false && "All unsupported types should have been found before this pass.");
   }
index be8a0ae..6d039b3 100644 (file)
@@ -25,6 +25,7 @@
 #include "core/modelIR/operations/DepthwiseConv2DOp.h"
 #include "core/modelIR/operations/ElementwiseOp.h"
 #include "core/modelIR/operations/FullyConnectedOp.h"
+#include "core/modelIR/operations/LeakyReluOp.h"
 #include "core/modelIR/operations/PadOp.h"
 #include "core/modelIR/operations/PoolOp.h"
 #include "core/modelIR/operations/ReduceFOp.h"
@@ -485,4 +486,12 @@ TFLiteOpCreator::createStridedSlice(InputOps& inputs, const InputParams& params,
                                   slice_outputs[0]->getOutput(0), squeeze_dims);
 }
 
+std::vector<mir::Operation*>
+TFLiteOpCreator::createLeakyRelu(TFLiteOpCreator::InputOps& inputs, const TFLiteOpCreator::InputParams&,
+                                 const ::tflite::LeakyReluOptions* opts) {
+  float alpha = opts->alpha();
+
+  return createOp<ops::LeakyReluOp>(ActivationFunctionType_NONE, inputs[0]->getOutput(0), alpha);
+}
+
 } // namespace nnc
index 482e2e3..de79a64 100644 (file)
@@ -136,6 +136,13 @@ public:
   std::vector<mir::Operation*> createStridedSlice(InputOps&, const InputParams&,
                                                   const ::tflite::StridedSliceOptions*);
 
+  /**
+   * @brief Create leaky relu activation
+   * @return
+   */
+  std::vector<mir::Operation*> createLeakyRelu(InputOps&, const InputParams&,
+                                               const ::tflite::LeakyReluOptions*);
+
   void checkPool2D(const ::tflite::Pool2DOptions*, std::set<std::string>&);
 
   void checkConcatenation(const ::tflite::ConcatenationOptions*, std::set<std::string>&);
index 1007db6..da93b33 100644 (file)
@@ -51,6 +51,7 @@
 
 #include "code_snippets/cpp_operations.def"
 #include "code_snippets/cpp_scale.def"
+#include "code_snippets/cpp_leaky_relu.def"
 
 // soft backend part
 
@@ -71,6 +72,7 @@
 #include "core/modelIR/operations/PoolOp.h"
 #include "core/modelIR/operations/ReduceFOp.h"
 #include "core/modelIR/operations/ReluOp.h"
+#include "core/modelIR/operations/LeakyReluOp.h"
 #include "core/modelIR/operations/ReshapeOp.h"
 #include "core/modelIR/operations/ResizeOp.h"
 #include "core/modelIR/operations/ScaleOp.h"
@@ -798,6 +800,19 @@ TEST(cpp_operations_test, relu) {
   createAndRunTestGraph(op_generator, relu, input_ntensors, input_atensor);
 }
 
+TEST(cpp_operations_test, leaky_relu) {
+  // test prerequisites
+  vector<int> shape_data{2, 3, 4, 5};
+  Tensor input_atensor;
+  vector<unique_ptr<mir::TensorVariant>> input_ntensors(1);
+  fillTensors(input_ntensors[0], input_atensor, shape_data, 1.0f);
+  auto op_generator = [](mir::Graph& g, const std::vector<mir::IODescriptor>& inputs) {
+    return g.create<mir::ops::LeakyReluOp>("y", inputs[0], 0.1);
+  };
+
+  createAndRunTestGraph(op_generator, relu, input_ntensors, input_atensor);
+}
+
 TEST(cpp_operations_test, sigmoid) {
   // test prerequisites
   vector<int> shape_data{2, 3, 4, 5};