[nnc] Support for Sigmoid activation function (#2685)
authorСергей Баранников/AI Tools Lab /SRR/Engineer/삼성전자 <s.barannikov@samsung.com>
Mon, 17 Dec 2018 12:18:00 +0000 (15:18 +0300)
committerEfimov Alexander/AI Tools Lab/./Samsung Electronics <a.efimov@samsung.com>
Mon, 17 Dec 2018 12:18:00 +0000 (15:18 +0300)
Add support for Sigmoid activation function:
* In Model IR
* In C++ CPU backend
* In interpreter
* In Caffe frontend
* In TFLite fronend
* In ONNX frontend

Signed-off-by: Sergei Barannikov <s.barannikov@samsung.com>
30 files changed:
contrib/nnc/core/modelIR/IrDotDumper.cpp
contrib/nnc/core/modelIR/Operation.cpp
contrib/nnc/include/core/modelIR/IrDotDumper.h
contrib/nnc/include/core/modelIR/operations/SigmoidOp.h [new file with mode: 0644]
contrib/nnc/include/core/modelIR/operations/operations.lst.h
contrib/nnc/include/passes/interpreter/Interpreter.h
contrib/nnc/passes/acl_soft_backend/AclCppOpGenerator.cpp
contrib/nnc/passes/acl_soft_backend/AclCppOpGenerator.h
contrib/nnc/passes/caffe2_frontend/caffe2_importer.cpp
contrib/nnc/passes/caffe2_frontend/caffe2_op_creator.cpp
contrib/nnc/passes/caffe2_frontend/caffe2_op_creator.h
contrib/nnc/passes/caffe2_frontend/caffe2_op_types.h
contrib/nnc/passes/caffe_frontend/caffe_importer.cpp
contrib/nnc/passes/caffe_frontend/caffe_op_creator.cpp
contrib/nnc/passes/caffe_frontend/caffe_op_creator.h
contrib/nnc/passes/interpreter/Interpreter.cpp
contrib/nnc/passes/onnx_frontend/ONNXImporterImpl.cpp
contrib/nnc/passes/onnx_frontend/ONNXOpCreator.cpp
contrib/nnc/passes/onnx_frontend/ONNXOpCreator.h
contrib/nnc/passes/soft_backend/CPPGenerator.cpp
contrib/nnc/passes/soft_backend/ModelAnalyzer.cpp
contrib/nnc/passes/soft_backend/ModelAnalyzer.h
contrib/nnc/passes/soft_backend/SBSerializer.cpp
contrib/nnc/passes/soft_backend/SBSerializer.h
contrib/nnc/passes/soft_backend/code_snippets/cpp_operations.def
contrib/nnc/passes/soft_backend/code_snippets/cpp_sigmoid.def [new file with mode: 0644]
contrib/nnc/passes/tflite_frontend/tflite_importer.cpp
contrib/nnc/passes/tflite_frontend/tflite_op_creator.cpp
contrib/nnc/passes/tflite_frontend/tflite_op_creator.h
contrib/nnc/unittests/soft_backend/CPPOperations.cpp

index 23669ed..7e059a2 100644 (file)
@@ -38,6 +38,7 @@
 #include "core/modelIR/operations/ReshapeOp.h"
 #include "core/modelIR/operations/ResizeOp.h"
 #include "core/modelIR/operations/ScaleOp.h"
+#include "core/modelIR/operations/SigmoidOp.h"
 #include "core/modelIR/operations/SoftmaxOp.h"
 #include "core/modelIR/operations/SqrtOp.h"
 #include "core/modelIR/operations/SqueezeOp.h"
@@ -322,7 +323,19 @@ void IrDotDumper::visit(ops::TransposeOp& op) {
 }
 
 void IrDotDumper::visit(ops::GatherOp& op) {
-  auto node_info = DotIrNodeInfo().withType("GatherOp", op.getName());
+  auto node_info = DotIrNodeInfo().withType("GatherOp", op.getName())
+      .withInShapes(getInputShapes(op))
+      .withOutShapes(getOutputShapes(op));
+
+  dotBuilder.updateWithOp(&op, node_info);
+}
+
+void IrDotDumper::visit(ops::SigmoidOp& op) {
+  auto node_info = DotIrNodeInfo().withType("SigmoidOp", op.getName())
+      .withInShapes(getInputShapes(op))
+      .withOutShapes(getOutputShapes(op));
+
+  dotBuilder.updateWithOp(&op, node_info);
 }
 
 } // namespace mir
index 0ecfc66..187e08c 100644 (file)
@@ -36,6 +36,7 @@
 #include "core/modelIR/operations/ReshapeOp.h"
 #include "core/modelIR/operations/ResizeOp.h"
 #include "core/modelIR/operations/ScaleOp.h"
+#include "core/modelIR/operations/SigmoidOp.h"
 #include "core/modelIR/operations/SoftmaxOp.h"
 #include "core/modelIR/operations/SqueezeOp.h"
 #include "core/modelIR/operations/SqrtOp.h"
index 6245bb1..5770e54 100644 (file)
@@ -52,9 +52,10 @@ public:
   void visit(ops::ReshapeOp& op) override;
   void visit(ops::ResizeOp& op) override;
   void visit(ops::ScaleOp& op) override;
+  void visit(ops::SigmoidOp& op) override;
   void visit(ops::SoftmaxOp& op) override;
-  void visit(ops::SqueezeOp& op) override;
   void visit(ops::SqrtOp& op) override;
+  void visit(ops::SqueezeOp& op) override;
   void visit(ops::TanhOp& op) override;
   void visit(ops::TransposeOp& op) override;
   void visit(ops::VariableOp& op) override;
diff --git a/contrib/nnc/include/core/modelIR/operations/SigmoidOp.h b/contrib/nnc/include/core/modelIR/operations/SigmoidOp.h
new file mode 100644 (file)
index 0000000..13494c0
--- /dev/null
@@ -0,0 +1,38 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef _NNC_CORE_IR_MODEL_SIGMOID_H_
+#define _NNC_CORE_IR_MODEL_SIGMOID_H_
+
+#include "core/modelIR/Operation.h"
+
+namespace nnc {
+namespace mir {
+namespace ops {
+
+class SigmoidOp : public Operation {
+public:
+  explicit SigmoidOp(const IODescriptor& arg) : Operation(Type::sigmoid, {arg}) {
+    // Infer output shape.
+    setOutputShape(0, getInputShape(0));
+  }
+};
+
+} // namespace ops
+} // namespace mir
+} // namespace nnc
+
+#endif //_NNC_CORE_IR_MODEL_SIGMOID_H_
index 14fa493..2fd8e9d 100644 (file)
@@ -34,6 +34,7 @@ HANDLE_OP(ReLU, ReluOp)
 HANDLE_OP(reshape, ReshapeOp)
 HANDLE_OP(resizeIm, ResizeOp)
 HANDLE_OP(scale, ScaleOp)
+HANDLE_OP(sigmoid, SigmoidOp)
 HANDLE_OP(batchNorm, BatchNormOp)
 HANDLE_OP(dropout, DropoutOp)
 HANDLE_OP(tanh, TanhOp)
index f7136f0..bceb810 100644 (file)
@@ -57,9 +57,10 @@ public:
   void visit(ops::ReshapeOp& op) override;
   void visit(ops::ResizeOp& op) override;
   void visit(ops::ScaleOp& op) override;
+  void visit(ops::SigmoidOp& op) override;
   void visit(ops::SoftmaxOp& op) override;
-  void visit(ops::SqueezeOp& op) override;
   void visit(ops::SqrtOp& op) override;
+  void visit(ops::SqueezeOp& op) override;
   void visit(ops::TanhOp& op) override;
   void visit(ops::TransposeOp& op) override;
   void visit(ops::VariableOp& op) override;
index 36dd622..21b21f0 100644 (file)
@@ -1,3 +1,19 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
 #include "AclCppOpGenerator.h"
 #include "passes/acl_soft_backend/AclCppException.h"
 #include "core/modelIR/ShapeRange.h"
@@ -897,5 +913,10 @@ void AclCppOpGenerator::visit(mir::ops::TransposeOp& op) {
 void AclCppOpGenerator::visit(mir::ops::GatherOp& op) {
   assert(false && "Unimplemented operation: GatherOp");
 }
+
+void AclCppOpGenerator::visit(mir::ops::SigmoidOp& op) {
+  assert(false && "Unimplemented operation: SigmoidOp");
+}
+
 }
 // namespace nnc
index 044cf03..e97e1ba 100644 (file)
@@ -68,9 +68,10 @@ public:
   void visit(mir::ops::ReshapeOp& op) override;
   void visit(mir::ops::ResizeOp& op) override;
   void visit(mir::ops::ScaleOp& op) override;
+  void visit(mir::ops::SigmoidOp& op) override;
   void visit(mir::ops::SoftmaxOp& op) override;
-  void visit(mir::ops::SqueezeOp& op) override;
   void visit(mir::ops::SqrtOp& op) override;
+  void visit(mir::ops::SqueezeOp& op) override;
   void visit(mir::ops::TanhOp& op) override;
   void visit(mir::ops::TransposeOp& op) override;
   void visit(mir::ops::VariableOp& op) override;
index 537f7dd..3ed8086 100644 (file)
@@ -130,6 +130,7 @@ void Caffe2Importer::collectUnsupportedOp(const OperatorDef& op) {
     case SupportedCaffe2OpType::dropout:
     case SupportedCaffe2OpType::givenTensorFill:
     case SupportedCaffe2OpType::relu:
+    case SupportedCaffe2OpType::sigmoid:
     case SupportedCaffe2OpType::softmax:
     case SupportedCaffe2OpType::sum:
       _opCreator->commonCheck(op, _problemsOpSet);
@@ -199,6 +200,9 @@ void Caffe2Importer::createMIRNodesFromOp(const OperatorDef& op) {
     case SupportedCaffe2OpType::relu:
       outputs = _opCreator->convertRelu(inputs);
       break;
+    case SupportedCaffe2OpType::sigmoid:
+      outputs = _opCreator->convertSigmoid(inputs);
+      break;
     case SupportedCaffe2OpType::softmax:
       outputs = _opCreator->convertSoftmax(inputs, op);
       break;
@@ -257,6 +261,7 @@ std::vector<mir::IODescriptor> Caffe2Importer::getInputMIROps(const OperatorDef&
     case SupportedCaffe2OpType::maxPool:
     case SupportedCaffe2OpType::mul:
     case SupportedCaffe2OpType::relu:
+    case SupportedCaffe2OpType::sigmoid:
     case SupportedCaffe2OpType::softmax:
     case SupportedCaffe2OpType::spatialBN:
       inputs.push_back(_blobNameToIODescriptor[op.input(0)]);
@@ -293,6 +298,7 @@ const std::map<std::string, SupportedCaffe2OpType> Caffe2Importer::_operatorType
         {"MaxPool",         SupportedCaffe2OpType::maxPool},
         {"Mul",             SupportedCaffe2OpType::mul},
         {"Relu",            SupportedCaffe2OpType::relu},
+        {"Sigmoid",         SupportedCaffe2OpType::sigmoid},
         {"Softmax",         SupportedCaffe2OpType::softmax},
         {"SpatialBN",       SupportedCaffe2OpType::spatialBN},
         {"Sum",             SupportedCaffe2OpType::sum}
index f77c76a..8a679a9 100644 (file)
@@ -27,6 +27,7 @@
 #include "core/modelIR/operations/ReluOp.h"
 #include "core/modelIR/operations/ReshapeOp.h"
 #include "core/modelIR/operations/ScaleOp.h"
+#include "core/modelIR/operations/SigmoidOp.h"
 #include "core/modelIR/operations/SoftmaxOp.h"
 #include "core/modelIR/operations/TransposeOp.h"
 #include "core/modelIR/operations/VariableOp.h"
@@ -48,7 +49,6 @@
 #include <vector>
 #include "option/Options.h"
 
-
 namespace nnc {
 
 using namespace ::caffe2;
@@ -339,11 +339,18 @@ Caffe2OpCreator::convertMul(const std::vector<mir::IODescriptor>& inputs,
   return {convertMIRToCaffe(mul->getOutput(0))};
 }
 
-std::vector<IODescriptor> Caffe2OpCreator::convertRelu(const std::vector<IODescriptor>& inputs) {
+std::vector<IODescriptor>
+Caffe2OpCreator::convertRelu(const std::vector<IODescriptor>& inputs) {
   auto relu = createOp<ops::ReluOp>(inputs[0]);
   return {relu->getOutput(0)};
 }
 
+std::vector<IODescriptor>
+Caffe2OpCreator::convertSigmoid(const std::vector<IODescriptor>& inputs) {
+  auto result = createOp<ops::SigmoidOp>(inputs[0]);
+  return {result->getOutput(0)};
+}
+
 std::vector<IODescriptor> Caffe2OpCreator::convertSoftmax(const std::vector<IODescriptor>& inputs,
                                                           const ::caffe2::OperatorDef& op) {
   int axis = getSingleArgument(op, "axis", 1);
index cee363f..5bbbf9d 100644 (file)
@@ -82,6 +82,8 @@ public:
 
   std::vector<mir::IODescriptor> convertRelu(const std::vector<mir::IODescriptor>&);
 
+  std::vector<mir::IODescriptor> convertSigmoid(const std::vector<mir::IODescriptor>&);
+
   std::vector<mir::IODescriptor> convertSoftmax(const std::vector<mir::IODescriptor>&,
                                                 const ::caffe2::OperatorDef&);
 
index 8ac7260..2a1de48 100644 (file)
@@ -31,6 +31,7 @@ enum class SupportedCaffe2OpType : uint8_t {
   maxPool,
   mul,
   relu,
+  sigmoid,
   softmax,
   spatialBN,
   sum
index 646486e..cc4a8eb 100644 (file)
@@ -119,24 +119,27 @@ void CaffeImporter::createMIRNodesFromLayer(const LayerParameter& layer) {
     case CaffeOpType::dropout:
       outputs = _opCreator->convertDropout(layer, inputs);
       break;
-    case CaffeOpType ::tanh:
+    case CaffeOpType::tanh:
       outputs = _opCreator->convertTanH(layer, inputs);
       break;
-    case CaffeOpType ::ELU:
+    case CaffeOpType::ELU:
       outputs = _opCreator->convertELU(layer, inputs);
       break;
-    case CaffeOpType ::eltwise:
+    case CaffeOpType::eltwise:
       outputs = _opCreator->convertEltwise(layer, inputs);
       break;
     case CaffeOpType::embed:
       outputs = _opCreator->convertEmbed(layer, inputs);
       break;
-    case CaffeOpType ::deconvolution:
+    case CaffeOpType::deconvolution:
       outputs = _opCreator->convertDeconvolution(layer, inputs);
       break;
     case CaffeOpType::split:
       outputs = _opCreator->convertSplit(layer, inputs);
       break;
+    case CaffeOpType::sigmoid:
+      outputs = _opCreator->convertSigmoid(layer, inputs);
+      break;
     default:
       assert(false && "All unsupported types should have been found before this pass.");
   }
@@ -166,6 +169,7 @@ void CaffeImporter::collectUnsupportedOp(const LayerParameter& lp) {
     case CaffeOpType::eltwise:
     case CaffeOpType::ELU:
     case CaffeOpType::embed:
+    case CaffeOpType::sigmoid:
     case CaffeOpType::tanh:
       // No checks
       break;
index 1432e0a..b9731a7 100644 (file)
@@ -30,6 +30,7 @@
 #include "core/modelIR/operations/ReluOp.h"
 #include "core/modelIR/operations/ReshapeOp.h"
 #include "core/modelIR/operations/ScaleOp.h"
+#include "core/modelIR/operations/SigmoidOp.h"
 #include "core/modelIR/operations/SoftmaxOp.h"
 #include "core/modelIR/operations/TanhOp.h"
 #include "core/modelIR/operations/TransposeOp.h"
@@ -560,6 +561,13 @@ CaffeOpCreator::convertEmbed(const caffe::LayerParameter& layer,
 }
 
 std::vector<mir::IODescriptor>
+CaffeOpCreator::convertSigmoid(const caffe::LayerParameter& layer,
+                               const std::vector<mir::IODescriptor>& inputs) {
+  auto result = createOp<ops::SigmoidOp>(layer.name(), inputs[0]);
+  return {result->getOutput(0)};
+}
+
+std::vector<mir::IODescriptor>
 CaffeOpCreator::convertTanH(const caffe::LayerParameter& layer,
                             const std::vector<mir::IODescriptor>& inputs) {
   auto tanh = createOp<ops::TanhOp>(layer.name(), inputs[0]);
index d8e4b57..25e8265 100644 (file)
@@ -93,6 +93,10 @@ public:
                const std::vector<mir::IODescriptor>& inputs);
 
   std::vector<mir::IODescriptor>
+  convertSigmoid(const caffe::LayerParameter& layer,
+                 const std::vector<mir::IODescriptor>& inputs);
+
+  std::vector<mir::IODescriptor>
   convertTanH(const caffe::LayerParameter& layer,
               const std::vector<mir::IODescriptor>& inputs);
 
index 0160b02..c7f2e76 100644 (file)
  * limitations under the License.
  */
 
-#include <cmath>
-#include <cassert>
-#include <vector>
-
-#include "pass/PassException.h"
-
 #include "passes/interpreter/Interpreter.h"
 
 #include "core/modelIR/operations/BatchNormOp.h"
 #include "core/modelIR/operations/ReluOp.h"
 #include "core/modelIR/operations/ResizeOp.h"
 #include "core/modelIR/operations/ScaleOp.h"
+#include "core/modelIR/operations/SigmoidOp.h"
 #include "core/modelIR/operations/SoftmaxOp.h"
-#include "core/modelIR/operations/SqueezeOp.h"
 #include "core/modelIR/operations/SqrtOp.h"
+#include "core/modelIR/operations/SqueezeOp.h"
 #include "core/modelIR/operations/TanhOp.h"
 #include "core/modelIR/operations/TransposeOp.h"
 #include "core/modelIR/operations/VariableOp.h"
 #include "ops/Softmax.h"
 #include "ops/Transpose.h"
 
+#include <vector>
+#include <cmath>
+#include <cassert>
+
 namespace nnc {
 
 using namespace nnc::mir;
@@ -141,6 +140,15 @@ void NNInterpreter::visit(ops::ReluOp& op) {
       op.getOutputShape(0), [&input](const Index &id) { return std::max(input.at(id), 0.0f); })();
 }
 
+void NNInterpreter::visit(ops::SigmoidOp& op) {
+  mapByName(&op);
+  auto operand = op.getPrevNodes()[0];
+  Tensor<float> input(var(operand.op->getId())[operand.index]);
+  var(op.getId()) = Fill<float>(op.getOutputShape(0), [&input](const Index& id) {
+    return 1.f / (1.f + std::exp(-input.at(id)));
+  })();
+}
+
 void NNInterpreter::visit(ops::SoftmaxOp& op) {
   mapByName(&op);
   auto operand = op.getPrevNodes()[0];
index f960ead..b0c05ae 100644 (file)
@@ -61,6 +61,7 @@ static void collectUnsupportedOps(std::unique_ptr<onnx::ModelProto>& model) {
       case ONNXOpCode::opMul:
       case ONNXOpCode::opRelu:
       case ONNXOpCode::opReshape:
+      case ONNXOpCode::opSigmoid:
       case ONNXOpCode::opScale:
       case ONNXOpCode::opSoftmax:
       case ONNXOpCode::opSum:
@@ -282,6 +283,9 @@ mir::Graph *ONNXImporterImpl::createIR() {
       case ONNXOpCode::opRelu:
         outputs = _opCreator.convertRelu(input_nodes);
         break;
+      case ONNXOpCode::opSigmoid:
+        outputs = _opCreator.convertSigmoid(input_nodes);
+        break;
       case ONNXOpCode::opSoftmax:
         outputs = _opCreator.convertSoftmax(input_nodes, onnx_node);
         break;
index 1f89611..78ec2b6 100644 (file)
@@ -36,6 +36,7 @@
 #include "core/modelIR/operations/ReluOp.h"
 #include "core/modelIR/operations/ReshapeOp.h"
 #include "core/modelIR/operations/ScaleOp.h"
+#include "core/modelIR/operations/SigmoidOp.h"
 #include "core/modelIR/operations/SoftmaxOp.h"
 #include "core/modelIR/operations/VariableOp.h"
 #include "core/modelIR/operations/ElementwiseOp.h"
@@ -222,6 +223,11 @@ std::vector<Operation*> ONNXOpCreator::convertRelu(InputOps& inputs) {
   return createOp<ops::ReluOp>(inputs[0]->getOutput(0));
 }
 
+std::vector<Operation*> ONNXOpCreator::convertSigmoid(InputOps& inputs) {
+  assert(inputs.size() == 1);
+  return createOp<ops::SigmoidOp>(inputs[0]->getOutput(0));
+}
+
 std::vector<Operation*> ONNXOpCreator::convertElementwise(InputOps& inputs,
                                                          mir::ops::ElementwiseOp::OpType op_type) {
   std::vector<IODescriptor> descriptors;
index 592efca..0eda45c 100644 (file)
@@ -45,6 +45,7 @@ public:
   std::vector<mir::Operation*> convertSoftmax(InputOps& inputs, const onnx::NodeProto& onnx_node);
   std::vector<mir::Operation*> convertReshape(mir::Operation* input_data, mir::Shape output_shape);
   std::vector<mir::Operation*> convertRelu(InputOps& inputs);
+  std::vector<mir::Operation*> convertSigmoid(InputOps& inputs);
   std::vector<mir::Operation*> convertElementwise(InputOps& inputs,
                                                  mir::ops::ElementwiseOp::OpType op_type);
   std::vector<mir::Operation*> convertScale(InputOps& inputs, const onnx::NodeProto& node);
index 6f678b4..b0513a2 100644 (file)
@@ -36,6 +36,7 @@ using namespace std;
 #include "cpp_depthwise_conv.generated.h"
 #include "cpp_fully_connected.generated.h"
 #include "cpp_pool.generated.h"
+#include "cpp_sigmoid.generated.h"
 #include "cpp_sqrt.generated.h"
 #include "cpp_relu.generated.h"
 #include "cpp_reduce.generated.h"
@@ -283,6 +284,7 @@ void CPPCodeGenerator::materializeCode(ostream &out, const ModelAnalyzer &ma, co
   out.write(cpp_conv, sizeof(cpp_conv));
   out.write(cpp_depthwise_conv, sizeof(cpp_depthwise_conv));
   out.write(cpp_fully_connected, sizeof(cpp_fully_connected));
+  out.write(cpp_sigmoid, sizeof(cpp_sigmoid));
   out.write(cpp_pool, sizeof(cpp_pool));
   out.write(cpp_relu, sizeof(cpp_relu));
   out.write(cpp_reduce, sizeof(cpp_reduce));
index 0008e8c..206e18b 100644 (file)
 #include "core/modelIR/operations/ReluOp.h"
 #include "core/modelIR/operations/ReshapeOp.h"
 #include "core/modelIR/operations/ScaleOp.h"
+#include "core/modelIR/operations/SigmoidOp.h"
 #include "core/modelIR/operations/SoftmaxOp.h"
-#include "core/modelIR/operations/SqueezeOp.h"
 #include "core/modelIR/operations/SqrtOp.h"
+#include "core/modelIR/operations/SqueezeOp.h"
 #include "core/modelIR/operations/TanhOp.h"
 #include "core/modelIR/operations/TransposeOp.h"
 #include "core/modelIR/operations/VariableOp.h"
@@ -318,4 +319,8 @@ void ModelAnalyzer::visit(mir::ops::GatherOp& op) {
   addOpDescr(&op, "gather");
 }
 
+void ModelAnalyzer::visit(mir::ops::SigmoidOp& op) {
+  addOpDescr(&op, "sigmoid");
+}
+
 } // namespace nnc
index 4f87e91..f6965fc 100644 (file)
@@ -110,9 +110,10 @@ public:
   void visit(mir::ops::ReshapeOp& op) override;
   void visit(mir::ops::ResizeOp& op) override;
   void visit(mir::ops::ScaleOp& op) override;
+  void visit(mir::ops::SigmoidOp& op) override;
   void visit(mir::ops::SoftmaxOp& op) override;
-  void visit(mir::ops::SqueezeOp& op) override;
   void visit(mir::ops::SqrtOp& op) override;
+  void visit(mir::ops::SqueezeOp& op) override;
   void visit(mir::ops::TanhOp& op) override;
   void visit(mir::ops::TransposeOp& op) override;
   void visit(mir::ops::VariableOp& op) override;
index 3c8b7f6..9314f21 100644 (file)
@@ -378,4 +378,8 @@ void Serializer::visit(mir::ops::GatherOp& op) {
   serializeShape(op.getOutputShape(0));
 }
 
+void Serializer::visit(mir::ops::SigmoidOp& op) {
+  _curOp->_paramStartOffset = _buffer.size();
+}
+
 } // namespace nnc
index a5ca3ed..92c22c4 100644 (file)
@@ -62,9 +62,10 @@ public:
   void visit(mir::ops::ReshapeOp& op) override;
   void visit(mir::ops::ResizeOp& op) override;
   void visit(mir::ops::ScaleOp& op) override;
+  void visit(mir::ops::SigmoidOp& op) override;
   void visit(mir::ops::SoftmaxOp& op) override;
-  void visit(mir::ops::SqueezeOp& op) override;
   void visit(mir::ops::SqrtOp& op) override;
+  void visit(mir::ops::SqueezeOp& op) override;
   void visit(mir::ops::TanhOp& op) override;
   void visit(mir::ops::TransposeOp& op) override;
   void visit(mir::ops::VariableOp& op) override;
index bcbc917..d99913f 100644 (file)
@@ -453,6 +453,12 @@ void relu(Tensor &out, const char *params, const Tensor &in)
   Relu(input, input_d, out.getData(), input_d);
 }
 
+void sigmoid(Tensor& out, const char* params, const Tensor& in) {
+  out.reShape(in.getShape());
+  Logistic(shapeToRuntimeShape(in.getShape()), in.getData(),
+           shapeToRuntimeShape(out.getShape()), out.getData());
+}
+
 void elu(Tensor &out, const char* params, const Tensor& in) {
   const float* input = in.getData();
   const Dims<4> inp_d = shapeToDims(in.getShape());
diff --git a/contrib/nnc/passes/soft_backend/code_snippets/cpp_sigmoid.def b/contrib/nnc/passes/soft_backend/code_snippets/cpp_sigmoid.def
new file mode 100644 (file)
index 0000000..96d72f9
--- /dev/null
@@ -0,0 +1,22 @@
+/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+    http://www.apache.org/licenses/LICENSE-2.0
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+inline void Logistic(const RuntimeShape& input_shape, const float* input_data,
+                     const RuntimeShape& output_shape, float* output_data) {
+  const int flat_size = MatchingFlatSize(input_shape, output_shape);
+
+  for (int i = 0; i < flat_size; i++) {
+    float val = input_data[i];
+    float result = 1.f / (1.f + std::exp(-val));
+    output_data[i] = result;
+  }
+}
index 62c62a0..caba51b 100644 (file)
@@ -94,6 +94,7 @@ void TfliteImporter::processUnsupportedOp(const Operator* op) {
     case BuiltinOperator_RESHAPE:
     case BuiltinOperator_RESIZE_NEAREST_NEIGHBOR:
     case BuiltinOperator_SQUEEZE:
+    case BuiltinOperator_LOGISTIC:
     case BuiltinOperator_SQRT:
     case BuiltinOperator_PAD:
     case BuiltinOperator_ADD:
@@ -204,6 +205,9 @@ void TfliteImporter::walkOperator(const Operator* op) {
     case BuiltinOperator_SQUEEZE:
       outputs = _opCreator->createSqueeze(inputs, params, op->builtin_options_as<SqueezeOptions>());
       break;
+    case BuiltinOperator_LOGISTIC:
+      outputs = _opCreator->createLogistic(inputs, params);
+      break;
     case BuiltinOperator_SQRT:
       outputs = _opCreator->createSqrt(inputs, params);
       break;
index f02ed08..5e56930 100644 (file)
 #include "tflite_op_creator.h"
 #include "schema_generated.h"
 
+#include "core/modelIR/operations/BiasAddOp.h"
+#include "core/modelIR/operations/CappedReluOp.h"
 #include "core/modelIR/operations/ConcatOp.h"
 #include "core/modelIR/operations/Conv2DOp.h"
+#include "core/modelIR/operations/Deconv2DOp.h"
 #include "core/modelIR/operations/DepthwiseConv2DOp.h"
+#include "core/modelIR/operations/ElementwiseOp.h"
 #include "core/modelIR/operations/FullyConnectedOp.h"
+#include "core/modelIR/operations/PadOp.h"
+#include "core/modelIR/operations/PoolOp.h"
+#include "core/modelIR/operations/ReduceFOp.h"
 #include "core/modelIR/operations/ReluOp.h"
+#include "core/modelIR/operations/ReshapeOp.h"
 #include "core/modelIR/operations/ResizeOp.h"
-#include "core/modelIR/operations/CappedReluOp.h"
-#include "core/modelIR/operations/TanhOp.h"
-#include "core/modelIR/operations/ElementwiseOp.h"
-#include "core/modelIR/operations/Deconv2DOp.h"
+#include "core/modelIR/operations/SigmoidOp.h"
 #include "core/modelIR/operations/SoftmaxOp.h"
-#include "core/modelIR/operations/ReduceFOp.h"
-#include "core/modelIR/operations/PoolOp.h"
-#include "core/modelIR/operations/BiasAddOp.h"
-#include "core/modelIR/operations/ReshapeOp.h"
-#include "core/modelIR/operations/SqueezeOp.h"
-#include "core/modelIR/operations/PadOp.h"
 #include "core/modelIR/operations/SqrtOp.h"
+#include "core/modelIR/operations/SqueezeOp.h"
+#include "core/modelIR/operations/TanhOp.h"
+
 #include "core/modelIR/Tensor.h"
 #include "core/modelIR/ShapeRange.h"
 #include "pass/PassException.h"
@@ -378,4 +380,9 @@ TFLiteOpCreator::createSqrt(InputOps& inputs, InputParams&) {
   return createOp<ops::SqrtOp>(ActivationFunctionType_NONE, inputs[0]->getOutput(0));
 }
 
+std::vector<mir::Operation*>
+TFLiteOpCreator::createLogistic(InputOps& inputs, InputParams&) {
+  return createOp<ops::SigmoidOp>(ActivationFunctionType_NONE, inputs[0]->getOutput(0));
+}
+
 } // namespace nnc
index 3342f1c..99aff24 100644 (file)
@@ -75,7 +75,9 @@ public:
   std::vector<mir::Operation*> convertResizeNN(InputOps, InputParams,
                                                const ::tflite::ResizeNearestNeighborOptions*);
 
-  std::vector<mir::Operation*> createSqrt(InputOps, InputParams);
+  std::vector<mir::Operation*> createLogistic(InputOps& inputs, InputParams& params);
+
+  std::vector<mir::Operation*> createSqrt(InputOps& inputs, InputParams& params);
 
   std::vector<mir::Operation*> createSqueeze(InputOps& inputs, InputParams& params,
                                              const ::tflite::SqueezeOptions* opts);
index 06d6f2c..234f048 100644 (file)
@@ -37,6 +37,7 @@
 #include "code_snippets/cpp_elu.def"
 #include "code_snippets/cpp_fully_connected.def"
 #include "code_snippets/cpp_gather.def"
+#include "code_snippets/cpp_sigmoid.def"
 #include "code_snippets/cpp_pad.def"
 #include "code_snippets/cpp_pool.def"
 #include "code_snippets/cpp_reduce.def"
 #include "SBSerializer.h"
 
 // operations part
-#include "core/modelIR/operations/VariableOp.h"
-#include "core/modelIR/operations/FullyConnectedOp.h"
+#include "core/modelIR/operations/BiasAddOp.h"
+#include "core/modelIR/operations/CappedReluOp.h"
+#include "core/modelIR/operations/ConcatOp.h"
 #include "core/modelIR/operations/Conv2DOp.h"
+#include "core/modelIR/operations/Deconv2DOp.h"
 #include "core/modelIR/operations/DepthwiseConv2DOp.h"
+#include "core/modelIR/operations/ElementwiseOp.h"
+#include "core/modelIR/operations/EluOp.h"
+#include "core/modelIR/operations/FullyConnectedOp.h"
+#include "core/modelIR/operations/PadOp.h"
 #include "core/modelIR/operations/PoolOp.h"
-#include "core/modelIR/operations/ReluOp.h"
 #include "core/modelIR/operations/ReduceFOp.h"
-#include "core/modelIR/operations/CappedReluOp.h"
+#include "core/modelIR/operations/ReluOp.h"
 #include "core/modelIR/operations/ReshapeOp.h"
-#include "core/modelIR/operations/ConcatOp.h"
-#include "core/modelIR/operations/BiasAddOp.h"
-#include "core/modelIR/operations/SoftmaxOp.h"
 #include "core/modelIR/operations/ScaleOp.h"
+#include "core/modelIR/operations/SigmoidOp.h"
+#include "core/modelIR/operations/SoftmaxOp.h"
 #include "core/modelIR/operations/SqrtOp.h"
-#include "core/modelIR/operations/EluOp.h"
-#include "core/modelIR/operations/ElementwiseOp.h"
-#include "core/modelIR/operations/Deconv2DOp.h"
 #include "core/modelIR/operations/TanhOp.h"
 #include "core/modelIR/operations/TransposeOp.h"
-#include "core/modelIR/operations/PadOp.h"
+#include "core/modelIR/operations/VariableOp.h"
 
 // various headers
 #include "core/modelIR/TensorVariant.h"
@@ -722,6 +724,19 @@ TEST(cpp_operations_test, relu) {
   createAndRunTestGraph(op_generator, relu, input_ntensors, input_atensor);
 }
 
+TEST(cpp_operations_test, sigmoid) {
+  // test prerequisites
+  vector<int> shape_data{2, 3, 4, 5};
+  Tensor input_atensor;
+  vector<unique_ptr<mir::TensorVariant>> input_ntensors(1);
+  fillTensors(input_ntensors[0], input_atensor, shape_data, 1.0f);
+  auto opGenerator = [](mir::Graph& g, const std::vector<mir::IODescriptor>& inputs) {
+    return g.create<mir::ops::SigmoidOp>("y", inputs[0]);
+  };
+
+  createAndRunTestGraph(opGenerator, sigmoid, input_ntensors, input_atensor);
+}
+
 TEST(cpp_operations_test, elu) {
   // test prerequisites
   vector<int> shape_data{2, 3, 4, 5};