From: Сергей Баранников/AI Tools Lab /SRR/Engineer/삼성전자 Date: Mon, 17 Dec 2018 12:18:00 +0000 (+0300) Subject: [nnc] Support for Sigmoid activation function (#2685) X-Git-Tag: nncc_backup~1058 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=8de3f2cf757f8052fb30fa1f871005f19dbf82f7;p=platform%2Fcore%2Fml%2Fnnfw.git [nnc] Support for Sigmoid activation function (#2685) Add support for Sigmoid activation function: * In Model IR * In C++ CPU backend * In interpreter * In Caffe frontend * In TFLite fronend * In ONNX frontend Signed-off-by: Sergei Barannikov --- diff --git a/contrib/nnc/core/modelIR/IrDotDumper.cpp b/contrib/nnc/core/modelIR/IrDotDumper.cpp index 23669ed..7e059a2 100644 --- a/contrib/nnc/core/modelIR/IrDotDumper.cpp +++ b/contrib/nnc/core/modelIR/IrDotDumper.cpp @@ -38,6 +38,7 @@ #include "core/modelIR/operations/ReshapeOp.h" #include "core/modelIR/operations/ResizeOp.h" #include "core/modelIR/operations/ScaleOp.h" +#include "core/modelIR/operations/SigmoidOp.h" #include "core/modelIR/operations/SoftmaxOp.h" #include "core/modelIR/operations/SqrtOp.h" #include "core/modelIR/operations/SqueezeOp.h" @@ -322,7 +323,19 @@ void IrDotDumper::visit(ops::TransposeOp& op) { } void IrDotDumper::visit(ops::GatherOp& op) { - auto node_info = DotIrNodeInfo().withType("GatherOp", op.getName()); + auto node_info = DotIrNodeInfo().withType("GatherOp", op.getName()) + .withInShapes(getInputShapes(op)) + .withOutShapes(getOutputShapes(op)); + + dotBuilder.updateWithOp(&op, node_info); +} + +void IrDotDumper::visit(ops::SigmoidOp& op) { + auto node_info = DotIrNodeInfo().withType("SigmoidOp", op.getName()) + .withInShapes(getInputShapes(op)) + .withOutShapes(getOutputShapes(op)); + + dotBuilder.updateWithOp(&op, node_info); } } // namespace mir diff --git a/contrib/nnc/core/modelIR/Operation.cpp b/contrib/nnc/core/modelIR/Operation.cpp index 0ecfc66..187e08c 100644 --- a/contrib/nnc/core/modelIR/Operation.cpp +++ b/contrib/nnc/core/modelIR/Operation.cpp @@ -36,6 +36,7 @@ #include "core/modelIR/operations/ReshapeOp.h" #include "core/modelIR/operations/ResizeOp.h" #include "core/modelIR/operations/ScaleOp.h" +#include "core/modelIR/operations/SigmoidOp.h" #include "core/modelIR/operations/SoftmaxOp.h" #include "core/modelIR/operations/SqueezeOp.h" #include "core/modelIR/operations/SqrtOp.h" diff --git a/contrib/nnc/include/core/modelIR/IrDotDumper.h b/contrib/nnc/include/core/modelIR/IrDotDumper.h index 6245bb1..5770e54 100644 --- a/contrib/nnc/include/core/modelIR/IrDotDumper.h +++ b/contrib/nnc/include/core/modelIR/IrDotDumper.h @@ -52,9 +52,10 @@ public: void visit(ops::ReshapeOp& op) override; void visit(ops::ResizeOp& op) override; void visit(ops::ScaleOp& op) override; + void visit(ops::SigmoidOp& op) override; void visit(ops::SoftmaxOp& op) override; - void visit(ops::SqueezeOp& op) override; void visit(ops::SqrtOp& op) override; + void visit(ops::SqueezeOp& op) override; void visit(ops::TanhOp& op) override; void visit(ops::TransposeOp& op) override; void visit(ops::VariableOp& op) override; diff --git a/contrib/nnc/include/core/modelIR/operations/SigmoidOp.h b/contrib/nnc/include/core/modelIR/operations/SigmoidOp.h new file mode 100644 index 0000000..13494c0 --- /dev/null +++ b/contrib/nnc/include/core/modelIR/operations/SigmoidOp.h @@ -0,0 +1,38 @@ +/* + * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef _NNC_CORE_IR_MODEL_SIGMOID_H_ +#define _NNC_CORE_IR_MODEL_SIGMOID_H_ + +#include "core/modelIR/Operation.h" + +namespace nnc { +namespace mir { +namespace ops { + +class SigmoidOp : public Operation { +public: + explicit SigmoidOp(const IODescriptor& arg) : Operation(Type::sigmoid, {arg}) { + // Infer output shape. + setOutputShape(0, getInputShape(0)); + } +}; + +} // namespace ops +} // namespace mir +} // namespace nnc + +#endif //_NNC_CORE_IR_MODEL_SIGMOID_H_ diff --git a/contrib/nnc/include/core/modelIR/operations/operations.lst.h b/contrib/nnc/include/core/modelIR/operations/operations.lst.h index 14fa493..2fd8e9d 100644 --- a/contrib/nnc/include/core/modelIR/operations/operations.lst.h +++ b/contrib/nnc/include/core/modelIR/operations/operations.lst.h @@ -34,6 +34,7 @@ HANDLE_OP(ReLU, ReluOp) HANDLE_OP(reshape, ReshapeOp) HANDLE_OP(resizeIm, ResizeOp) HANDLE_OP(scale, ScaleOp) +HANDLE_OP(sigmoid, SigmoidOp) HANDLE_OP(batchNorm, BatchNormOp) HANDLE_OP(dropout, DropoutOp) HANDLE_OP(tanh, TanhOp) diff --git a/contrib/nnc/include/passes/interpreter/Interpreter.h b/contrib/nnc/include/passes/interpreter/Interpreter.h index f7136f0..bceb810 100644 --- a/contrib/nnc/include/passes/interpreter/Interpreter.h +++ b/contrib/nnc/include/passes/interpreter/Interpreter.h @@ -57,9 +57,10 @@ public: void visit(ops::ReshapeOp& op) override; void visit(ops::ResizeOp& op) override; void visit(ops::ScaleOp& op) override; + void visit(ops::SigmoidOp& op) override; void visit(ops::SoftmaxOp& op) override; - void visit(ops::SqueezeOp& op) override; void visit(ops::SqrtOp& op) override; + void visit(ops::SqueezeOp& op) override; void visit(ops::TanhOp& op) override; void visit(ops::TransposeOp& op) override; void visit(ops::VariableOp& op) override; diff --git a/contrib/nnc/passes/acl_soft_backend/AclCppOpGenerator.cpp b/contrib/nnc/passes/acl_soft_backend/AclCppOpGenerator.cpp index 36dd622..21b21f0 100644 --- a/contrib/nnc/passes/acl_soft_backend/AclCppOpGenerator.cpp +++ b/contrib/nnc/passes/acl_soft_backend/AclCppOpGenerator.cpp @@ -1,3 +1,19 @@ +/* + * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + #include "AclCppOpGenerator.h" #include "passes/acl_soft_backend/AclCppException.h" #include "core/modelIR/ShapeRange.h" @@ -897,5 +913,10 @@ void AclCppOpGenerator::visit(mir::ops::TransposeOp& op) { void AclCppOpGenerator::visit(mir::ops::GatherOp& op) { assert(false && "Unimplemented operation: GatherOp"); } + +void AclCppOpGenerator::visit(mir::ops::SigmoidOp& op) { + assert(false && "Unimplemented operation: SigmoidOp"); +} + } // namespace nnc diff --git a/contrib/nnc/passes/acl_soft_backend/AclCppOpGenerator.h b/contrib/nnc/passes/acl_soft_backend/AclCppOpGenerator.h index 044cf03..e97e1ba 100644 --- a/contrib/nnc/passes/acl_soft_backend/AclCppOpGenerator.h +++ b/contrib/nnc/passes/acl_soft_backend/AclCppOpGenerator.h @@ -68,9 +68,10 @@ public: void visit(mir::ops::ReshapeOp& op) override; void visit(mir::ops::ResizeOp& op) override; void visit(mir::ops::ScaleOp& op) override; + void visit(mir::ops::SigmoidOp& op) override; void visit(mir::ops::SoftmaxOp& op) override; - void visit(mir::ops::SqueezeOp& op) override; void visit(mir::ops::SqrtOp& op) override; + void visit(mir::ops::SqueezeOp& op) override; void visit(mir::ops::TanhOp& op) override; void visit(mir::ops::TransposeOp& op) override; void visit(mir::ops::VariableOp& op) override; diff --git a/contrib/nnc/passes/caffe2_frontend/caffe2_importer.cpp b/contrib/nnc/passes/caffe2_frontend/caffe2_importer.cpp index 537f7dd..3ed8086 100644 --- a/contrib/nnc/passes/caffe2_frontend/caffe2_importer.cpp +++ b/contrib/nnc/passes/caffe2_frontend/caffe2_importer.cpp @@ -130,6 +130,7 @@ void Caffe2Importer::collectUnsupportedOp(const OperatorDef& op) { case SupportedCaffe2OpType::dropout: case SupportedCaffe2OpType::givenTensorFill: case SupportedCaffe2OpType::relu: + case SupportedCaffe2OpType::sigmoid: case SupportedCaffe2OpType::softmax: case SupportedCaffe2OpType::sum: _opCreator->commonCheck(op, _problemsOpSet); @@ -199,6 +200,9 @@ void Caffe2Importer::createMIRNodesFromOp(const OperatorDef& op) { case SupportedCaffe2OpType::relu: outputs = _opCreator->convertRelu(inputs); break; + case SupportedCaffe2OpType::sigmoid: + outputs = _opCreator->convertSigmoid(inputs); + break; case SupportedCaffe2OpType::softmax: outputs = _opCreator->convertSoftmax(inputs, op); break; @@ -257,6 +261,7 @@ std::vector Caffe2Importer::getInputMIROps(const OperatorDef& case SupportedCaffe2OpType::maxPool: case SupportedCaffe2OpType::mul: case SupportedCaffe2OpType::relu: + case SupportedCaffe2OpType::sigmoid: case SupportedCaffe2OpType::softmax: case SupportedCaffe2OpType::spatialBN: inputs.push_back(_blobNameToIODescriptor[op.input(0)]); @@ -293,6 +298,7 @@ const std::map Caffe2Importer::_operatorType {"MaxPool", SupportedCaffe2OpType::maxPool}, {"Mul", SupportedCaffe2OpType::mul}, {"Relu", SupportedCaffe2OpType::relu}, + {"Sigmoid", SupportedCaffe2OpType::sigmoid}, {"Softmax", SupportedCaffe2OpType::softmax}, {"SpatialBN", SupportedCaffe2OpType::spatialBN}, {"Sum", SupportedCaffe2OpType::sum} diff --git a/contrib/nnc/passes/caffe2_frontend/caffe2_op_creator.cpp b/contrib/nnc/passes/caffe2_frontend/caffe2_op_creator.cpp index f77c76a..8a679a9 100644 --- a/contrib/nnc/passes/caffe2_frontend/caffe2_op_creator.cpp +++ b/contrib/nnc/passes/caffe2_frontend/caffe2_op_creator.cpp @@ -27,6 +27,7 @@ #include "core/modelIR/operations/ReluOp.h" #include "core/modelIR/operations/ReshapeOp.h" #include "core/modelIR/operations/ScaleOp.h" +#include "core/modelIR/operations/SigmoidOp.h" #include "core/modelIR/operations/SoftmaxOp.h" #include "core/modelIR/operations/TransposeOp.h" #include "core/modelIR/operations/VariableOp.h" @@ -48,7 +49,6 @@ #include #include "option/Options.h" - namespace nnc { using namespace ::caffe2; @@ -339,11 +339,18 @@ Caffe2OpCreator::convertMul(const std::vector& inputs, return {convertMIRToCaffe(mul->getOutput(0))}; } -std::vector Caffe2OpCreator::convertRelu(const std::vector& inputs) { +std::vector +Caffe2OpCreator::convertRelu(const std::vector& inputs) { auto relu = createOp(inputs[0]); return {relu->getOutput(0)}; } +std::vector +Caffe2OpCreator::convertSigmoid(const std::vector& inputs) { + auto result = createOp(inputs[0]); + return {result->getOutput(0)}; +} + std::vector Caffe2OpCreator::convertSoftmax(const std::vector& inputs, const ::caffe2::OperatorDef& op) { int axis = getSingleArgument(op, "axis", 1); diff --git a/contrib/nnc/passes/caffe2_frontend/caffe2_op_creator.h b/contrib/nnc/passes/caffe2_frontend/caffe2_op_creator.h index cee363f..5bbbf9d 100644 --- a/contrib/nnc/passes/caffe2_frontend/caffe2_op_creator.h +++ b/contrib/nnc/passes/caffe2_frontend/caffe2_op_creator.h @@ -82,6 +82,8 @@ public: std::vector convertRelu(const std::vector&); + std::vector convertSigmoid(const std::vector&); + std::vector convertSoftmax(const std::vector&, const ::caffe2::OperatorDef&); diff --git a/contrib/nnc/passes/caffe2_frontend/caffe2_op_types.h b/contrib/nnc/passes/caffe2_frontend/caffe2_op_types.h index 8ac7260..2a1de48 100644 --- a/contrib/nnc/passes/caffe2_frontend/caffe2_op_types.h +++ b/contrib/nnc/passes/caffe2_frontend/caffe2_op_types.h @@ -31,6 +31,7 @@ enum class SupportedCaffe2OpType : uint8_t { maxPool, mul, relu, + sigmoid, softmax, spatialBN, sum diff --git a/contrib/nnc/passes/caffe_frontend/caffe_importer.cpp b/contrib/nnc/passes/caffe_frontend/caffe_importer.cpp index 646486e..cc4a8eb 100644 --- a/contrib/nnc/passes/caffe_frontend/caffe_importer.cpp +++ b/contrib/nnc/passes/caffe_frontend/caffe_importer.cpp @@ -119,24 +119,27 @@ void CaffeImporter::createMIRNodesFromLayer(const LayerParameter& layer) { case CaffeOpType::dropout: outputs = _opCreator->convertDropout(layer, inputs); break; - case CaffeOpType ::tanh: + case CaffeOpType::tanh: outputs = _opCreator->convertTanH(layer, inputs); break; - case CaffeOpType ::ELU: + case CaffeOpType::ELU: outputs = _opCreator->convertELU(layer, inputs); break; - case CaffeOpType ::eltwise: + case CaffeOpType::eltwise: outputs = _opCreator->convertEltwise(layer, inputs); break; case CaffeOpType::embed: outputs = _opCreator->convertEmbed(layer, inputs); break; - case CaffeOpType ::deconvolution: + case CaffeOpType::deconvolution: outputs = _opCreator->convertDeconvolution(layer, inputs); break; case CaffeOpType::split: outputs = _opCreator->convertSplit(layer, inputs); break; + case CaffeOpType::sigmoid: + outputs = _opCreator->convertSigmoid(layer, inputs); + break; default: assert(false && "All unsupported types should have been found before this pass."); } @@ -166,6 +169,7 @@ void CaffeImporter::collectUnsupportedOp(const LayerParameter& lp) { case CaffeOpType::eltwise: case CaffeOpType::ELU: case CaffeOpType::embed: + case CaffeOpType::sigmoid: case CaffeOpType::tanh: // No checks break; diff --git a/contrib/nnc/passes/caffe_frontend/caffe_op_creator.cpp b/contrib/nnc/passes/caffe_frontend/caffe_op_creator.cpp index 1432e0a..b9731a7 100644 --- a/contrib/nnc/passes/caffe_frontend/caffe_op_creator.cpp +++ b/contrib/nnc/passes/caffe_frontend/caffe_op_creator.cpp @@ -30,6 +30,7 @@ #include "core/modelIR/operations/ReluOp.h" #include "core/modelIR/operations/ReshapeOp.h" #include "core/modelIR/operations/ScaleOp.h" +#include "core/modelIR/operations/SigmoidOp.h" #include "core/modelIR/operations/SoftmaxOp.h" #include "core/modelIR/operations/TanhOp.h" #include "core/modelIR/operations/TransposeOp.h" @@ -560,6 +561,13 @@ CaffeOpCreator::convertEmbed(const caffe::LayerParameter& layer, } std::vector +CaffeOpCreator::convertSigmoid(const caffe::LayerParameter& layer, + const std::vector& inputs) { + auto result = createOp(layer.name(), inputs[0]); + return {result->getOutput(0)}; +} + +std::vector CaffeOpCreator::convertTanH(const caffe::LayerParameter& layer, const std::vector& inputs) { auto tanh = createOp(layer.name(), inputs[0]); diff --git a/contrib/nnc/passes/caffe_frontend/caffe_op_creator.h b/contrib/nnc/passes/caffe_frontend/caffe_op_creator.h index d8e4b57..25e8265 100644 --- a/contrib/nnc/passes/caffe_frontend/caffe_op_creator.h +++ b/contrib/nnc/passes/caffe_frontend/caffe_op_creator.h @@ -93,6 +93,10 @@ public: const std::vector& inputs); std::vector + convertSigmoid(const caffe::LayerParameter& layer, + const std::vector& inputs); + + std::vector convertTanH(const caffe::LayerParameter& layer, const std::vector& inputs); diff --git a/contrib/nnc/passes/interpreter/Interpreter.cpp b/contrib/nnc/passes/interpreter/Interpreter.cpp index 0160b02..c7f2e76 100644 --- a/contrib/nnc/passes/interpreter/Interpreter.cpp +++ b/contrib/nnc/passes/interpreter/Interpreter.cpp @@ -14,12 +14,6 @@ * limitations under the License. */ -#include -#include -#include - -#include "pass/PassException.h" - #include "passes/interpreter/Interpreter.h" #include "core/modelIR/operations/BatchNormOp.h" @@ -42,9 +36,10 @@ #include "core/modelIR/operations/ReluOp.h" #include "core/modelIR/operations/ResizeOp.h" #include "core/modelIR/operations/ScaleOp.h" +#include "core/modelIR/operations/SigmoidOp.h" #include "core/modelIR/operations/SoftmaxOp.h" -#include "core/modelIR/operations/SqueezeOp.h" #include "core/modelIR/operations/SqrtOp.h" +#include "core/modelIR/operations/SqueezeOp.h" #include "core/modelIR/operations/TanhOp.h" #include "core/modelIR/operations/TransposeOp.h" #include "core/modelIR/operations/VariableOp.h" @@ -66,6 +61,10 @@ #include "ops/Softmax.h" #include "ops/Transpose.h" +#include +#include +#include + namespace nnc { using namespace nnc::mir; @@ -141,6 +140,15 @@ void NNInterpreter::visit(ops::ReluOp& op) { op.getOutputShape(0), [&input](const Index &id) { return std::max(input.at(id), 0.0f); })(); } +void NNInterpreter::visit(ops::SigmoidOp& op) { + mapByName(&op); + auto operand = op.getPrevNodes()[0]; + Tensor input(var(operand.op->getId())[operand.index]); + var(op.getId()) = Fill(op.getOutputShape(0), [&input](const Index& id) { + return 1.f / (1.f + std::exp(-input.at(id))); + })(); +} + void NNInterpreter::visit(ops::SoftmaxOp& op) { mapByName(&op); auto operand = op.getPrevNodes()[0]; diff --git a/contrib/nnc/passes/onnx_frontend/ONNXImporterImpl.cpp b/contrib/nnc/passes/onnx_frontend/ONNXImporterImpl.cpp index f960ead..b0c05ae 100644 --- a/contrib/nnc/passes/onnx_frontend/ONNXImporterImpl.cpp +++ b/contrib/nnc/passes/onnx_frontend/ONNXImporterImpl.cpp @@ -61,6 +61,7 @@ static void collectUnsupportedOps(std::unique_ptr& model) { case ONNXOpCode::opMul: case ONNXOpCode::opRelu: case ONNXOpCode::opReshape: + case ONNXOpCode::opSigmoid: case ONNXOpCode::opScale: case ONNXOpCode::opSoftmax: case ONNXOpCode::opSum: @@ -282,6 +283,9 @@ mir::Graph *ONNXImporterImpl::createIR() { case ONNXOpCode::opRelu: outputs = _opCreator.convertRelu(input_nodes); break; + case ONNXOpCode::opSigmoid: + outputs = _opCreator.convertSigmoid(input_nodes); + break; case ONNXOpCode::opSoftmax: outputs = _opCreator.convertSoftmax(input_nodes, onnx_node); break; diff --git a/contrib/nnc/passes/onnx_frontend/ONNXOpCreator.cpp b/contrib/nnc/passes/onnx_frontend/ONNXOpCreator.cpp index 1f89611..78ec2b6 100644 --- a/contrib/nnc/passes/onnx_frontend/ONNXOpCreator.cpp +++ b/contrib/nnc/passes/onnx_frontend/ONNXOpCreator.cpp @@ -36,6 +36,7 @@ #include "core/modelIR/operations/ReluOp.h" #include "core/modelIR/operations/ReshapeOp.h" #include "core/modelIR/operations/ScaleOp.h" +#include "core/modelIR/operations/SigmoidOp.h" #include "core/modelIR/operations/SoftmaxOp.h" #include "core/modelIR/operations/VariableOp.h" #include "core/modelIR/operations/ElementwiseOp.h" @@ -222,6 +223,11 @@ std::vector ONNXOpCreator::convertRelu(InputOps& inputs) { return createOp(inputs[0]->getOutput(0)); } +std::vector ONNXOpCreator::convertSigmoid(InputOps& inputs) { + assert(inputs.size() == 1); + return createOp(inputs[0]->getOutput(0)); +} + std::vector ONNXOpCreator::convertElementwise(InputOps& inputs, mir::ops::ElementwiseOp::OpType op_type) { std::vector descriptors; diff --git a/contrib/nnc/passes/onnx_frontend/ONNXOpCreator.h b/contrib/nnc/passes/onnx_frontend/ONNXOpCreator.h index 592efca..0eda45c 100644 --- a/contrib/nnc/passes/onnx_frontend/ONNXOpCreator.h +++ b/contrib/nnc/passes/onnx_frontend/ONNXOpCreator.h @@ -45,6 +45,7 @@ public: std::vector convertSoftmax(InputOps& inputs, const onnx::NodeProto& onnx_node); std::vector convertReshape(mir::Operation* input_data, mir::Shape output_shape); std::vector convertRelu(InputOps& inputs); + std::vector convertSigmoid(InputOps& inputs); std::vector convertElementwise(InputOps& inputs, mir::ops::ElementwiseOp::OpType op_type); std::vector convertScale(InputOps& inputs, const onnx::NodeProto& node); diff --git a/contrib/nnc/passes/soft_backend/CPPGenerator.cpp b/contrib/nnc/passes/soft_backend/CPPGenerator.cpp index 6f678b4..b0513a2 100644 --- a/contrib/nnc/passes/soft_backend/CPPGenerator.cpp +++ b/contrib/nnc/passes/soft_backend/CPPGenerator.cpp @@ -36,6 +36,7 @@ using namespace std; #include "cpp_depthwise_conv.generated.h" #include "cpp_fully_connected.generated.h" #include "cpp_pool.generated.h" +#include "cpp_sigmoid.generated.h" #include "cpp_sqrt.generated.h" #include "cpp_relu.generated.h" #include "cpp_reduce.generated.h" @@ -283,6 +284,7 @@ void CPPCodeGenerator::materializeCode(ostream &out, const ModelAnalyzer &ma, co out.write(cpp_conv, sizeof(cpp_conv)); out.write(cpp_depthwise_conv, sizeof(cpp_depthwise_conv)); out.write(cpp_fully_connected, sizeof(cpp_fully_connected)); + out.write(cpp_sigmoid, sizeof(cpp_sigmoid)); out.write(cpp_pool, sizeof(cpp_pool)); out.write(cpp_relu, sizeof(cpp_relu)); out.write(cpp_reduce, sizeof(cpp_reduce)); diff --git a/contrib/nnc/passes/soft_backend/ModelAnalyzer.cpp b/contrib/nnc/passes/soft_backend/ModelAnalyzer.cpp index 0008e8c..206e18b 100644 --- a/contrib/nnc/passes/soft_backend/ModelAnalyzer.cpp +++ b/contrib/nnc/passes/soft_backend/ModelAnalyzer.cpp @@ -44,9 +44,10 @@ #include "core/modelIR/operations/ReluOp.h" #include "core/modelIR/operations/ReshapeOp.h" #include "core/modelIR/operations/ScaleOp.h" +#include "core/modelIR/operations/SigmoidOp.h" #include "core/modelIR/operations/SoftmaxOp.h" -#include "core/modelIR/operations/SqueezeOp.h" #include "core/modelIR/operations/SqrtOp.h" +#include "core/modelIR/operations/SqueezeOp.h" #include "core/modelIR/operations/TanhOp.h" #include "core/modelIR/operations/TransposeOp.h" #include "core/modelIR/operations/VariableOp.h" @@ -318,4 +319,8 @@ void ModelAnalyzer::visit(mir::ops::GatherOp& op) { addOpDescr(&op, "gather"); } +void ModelAnalyzer::visit(mir::ops::SigmoidOp& op) { + addOpDescr(&op, "sigmoid"); +} + } // namespace nnc diff --git a/contrib/nnc/passes/soft_backend/ModelAnalyzer.h b/contrib/nnc/passes/soft_backend/ModelAnalyzer.h index 4f87e91..f6965fc 100644 --- a/contrib/nnc/passes/soft_backend/ModelAnalyzer.h +++ b/contrib/nnc/passes/soft_backend/ModelAnalyzer.h @@ -110,9 +110,10 @@ public: void visit(mir::ops::ReshapeOp& op) override; void visit(mir::ops::ResizeOp& op) override; void visit(mir::ops::ScaleOp& op) override; + void visit(mir::ops::SigmoidOp& op) override; void visit(mir::ops::SoftmaxOp& op) override; - void visit(mir::ops::SqueezeOp& op) override; void visit(mir::ops::SqrtOp& op) override; + void visit(mir::ops::SqueezeOp& op) override; void visit(mir::ops::TanhOp& op) override; void visit(mir::ops::TransposeOp& op) override; void visit(mir::ops::VariableOp& op) override; diff --git a/contrib/nnc/passes/soft_backend/SBSerializer.cpp b/contrib/nnc/passes/soft_backend/SBSerializer.cpp index 3c8b7f6..9314f21 100644 --- a/contrib/nnc/passes/soft_backend/SBSerializer.cpp +++ b/contrib/nnc/passes/soft_backend/SBSerializer.cpp @@ -378,4 +378,8 @@ void Serializer::visit(mir::ops::GatherOp& op) { serializeShape(op.getOutputShape(0)); } +void Serializer::visit(mir::ops::SigmoidOp& op) { + _curOp->_paramStartOffset = _buffer.size(); +} + } // namespace nnc diff --git a/contrib/nnc/passes/soft_backend/SBSerializer.h b/contrib/nnc/passes/soft_backend/SBSerializer.h index a5ca3ed..92c22c4 100644 --- a/contrib/nnc/passes/soft_backend/SBSerializer.h +++ b/contrib/nnc/passes/soft_backend/SBSerializer.h @@ -62,9 +62,10 @@ public: void visit(mir::ops::ReshapeOp& op) override; void visit(mir::ops::ResizeOp& op) override; void visit(mir::ops::ScaleOp& op) override; + void visit(mir::ops::SigmoidOp& op) override; void visit(mir::ops::SoftmaxOp& op) override; - void visit(mir::ops::SqueezeOp& op) override; void visit(mir::ops::SqrtOp& op) override; + void visit(mir::ops::SqueezeOp& op) override; void visit(mir::ops::TanhOp& op) override; void visit(mir::ops::TransposeOp& op) override; void visit(mir::ops::VariableOp& op) override; diff --git a/contrib/nnc/passes/soft_backend/code_snippets/cpp_operations.def b/contrib/nnc/passes/soft_backend/code_snippets/cpp_operations.def index bcbc917..d99913f 100644 --- a/contrib/nnc/passes/soft_backend/code_snippets/cpp_operations.def +++ b/contrib/nnc/passes/soft_backend/code_snippets/cpp_operations.def @@ -453,6 +453,12 @@ void relu(Tensor &out, const char *params, const Tensor &in) Relu(input, input_d, out.getData(), input_d); } +void sigmoid(Tensor& out, const char* params, const Tensor& in) { + out.reShape(in.getShape()); + Logistic(shapeToRuntimeShape(in.getShape()), in.getData(), + shapeToRuntimeShape(out.getShape()), out.getData()); +} + void elu(Tensor &out, const char* params, const Tensor& in) { const float* input = in.getData(); const Dims<4> inp_d = shapeToDims(in.getShape()); diff --git a/contrib/nnc/passes/soft_backend/code_snippets/cpp_sigmoid.def b/contrib/nnc/passes/soft_backend/code_snippets/cpp_sigmoid.def new file mode 100644 index 0000000..96d72f9 --- /dev/null +++ b/contrib/nnc/passes/soft_backend/code_snippets/cpp_sigmoid.def @@ -0,0 +1,22 @@ +/* Copyright 2017 The TensorFlow Authors. All Rights Reserved. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +inline void Logistic(const RuntimeShape& input_shape, const float* input_data, + const RuntimeShape& output_shape, float* output_data) { + const int flat_size = MatchingFlatSize(input_shape, output_shape); + + for (int i = 0; i < flat_size; i++) { + float val = input_data[i]; + float result = 1.f / (1.f + std::exp(-val)); + output_data[i] = result; + } +} diff --git a/contrib/nnc/passes/tflite_frontend/tflite_importer.cpp b/contrib/nnc/passes/tflite_frontend/tflite_importer.cpp index 62c62a0..caba51b 100644 --- a/contrib/nnc/passes/tflite_frontend/tflite_importer.cpp +++ b/contrib/nnc/passes/tflite_frontend/tflite_importer.cpp @@ -94,6 +94,7 @@ void TfliteImporter::processUnsupportedOp(const Operator* op) { case BuiltinOperator_RESHAPE: case BuiltinOperator_RESIZE_NEAREST_NEIGHBOR: case BuiltinOperator_SQUEEZE: + case BuiltinOperator_LOGISTIC: case BuiltinOperator_SQRT: case BuiltinOperator_PAD: case BuiltinOperator_ADD: @@ -204,6 +205,9 @@ void TfliteImporter::walkOperator(const Operator* op) { case BuiltinOperator_SQUEEZE: outputs = _opCreator->createSqueeze(inputs, params, op->builtin_options_as()); break; + case BuiltinOperator_LOGISTIC: + outputs = _opCreator->createLogistic(inputs, params); + break; case BuiltinOperator_SQRT: outputs = _opCreator->createSqrt(inputs, params); break; diff --git a/contrib/nnc/passes/tflite_frontend/tflite_op_creator.cpp b/contrib/nnc/passes/tflite_frontend/tflite_op_creator.cpp index f02ed08..5e56930 100644 --- a/contrib/nnc/passes/tflite_frontend/tflite_op_creator.cpp +++ b/contrib/nnc/passes/tflite_frontend/tflite_op_creator.cpp @@ -17,24 +17,26 @@ #include "tflite_op_creator.h" #include "schema_generated.h" +#include "core/modelIR/operations/BiasAddOp.h" +#include "core/modelIR/operations/CappedReluOp.h" #include "core/modelIR/operations/ConcatOp.h" #include "core/modelIR/operations/Conv2DOp.h" +#include "core/modelIR/operations/Deconv2DOp.h" #include "core/modelIR/operations/DepthwiseConv2DOp.h" +#include "core/modelIR/operations/ElementwiseOp.h" #include "core/modelIR/operations/FullyConnectedOp.h" +#include "core/modelIR/operations/PadOp.h" +#include "core/modelIR/operations/PoolOp.h" +#include "core/modelIR/operations/ReduceFOp.h" #include "core/modelIR/operations/ReluOp.h" +#include "core/modelIR/operations/ReshapeOp.h" #include "core/modelIR/operations/ResizeOp.h" -#include "core/modelIR/operations/CappedReluOp.h" -#include "core/modelIR/operations/TanhOp.h" -#include "core/modelIR/operations/ElementwiseOp.h" -#include "core/modelIR/operations/Deconv2DOp.h" +#include "core/modelIR/operations/SigmoidOp.h" #include "core/modelIR/operations/SoftmaxOp.h" -#include "core/modelIR/operations/ReduceFOp.h" -#include "core/modelIR/operations/PoolOp.h" -#include "core/modelIR/operations/BiasAddOp.h" -#include "core/modelIR/operations/ReshapeOp.h" -#include "core/modelIR/operations/SqueezeOp.h" -#include "core/modelIR/operations/PadOp.h" #include "core/modelIR/operations/SqrtOp.h" +#include "core/modelIR/operations/SqueezeOp.h" +#include "core/modelIR/operations/TanhOp.h" + #include "core/modelIR/Tensor.h" #include "core/modelIR/ShapeRange.h" #include "pass/PassException.h" @@ -378,4 +380,9 @@ TFLiteOpCreator::createSqrt(InputOps& inputs, InputParams&) { return createOp(ActivationFunctionType_NONE, inputs[0]->getOutput(0)); } +std::vector +TFLiteOpCreator::createLogistic(InputOps& inputs, InputParams&) { + return createOp(ActivationFunctionType_NONE, inputs[0]->getOutput(0)); +} + } // namespace nnc diff --git a/contrib/nnc/passes/tflite_frontend/tflite_op_creator.h b/contrib/nnc/passes/tflite_frontend/tflite_op_creator.h index 3342f1c..99aff24 100644 --- a/contrib/nnc/passes/tflite_frontend/tflite_op_creator.h +++ b/contrib/nnc/passes/tflite_frontend/tflite_op_creator.h @@ -75,7 +75,9 @@ public: std::vector convertResizeNN(InputOps, InputParams, const ::tflite::ResizeNearestNeighborOptions*); - std::vector createSqrt(InputOps, InputParams); + std::vector createLogistic(InputOps& inputs, InputParams& params); + + std::vector createSqrt(InputOps& inputs, InputParams& params); std::vector createSqueeze(InputOps& inputs, InputParams& params, const ::tflite::SqueezeOptions* opts); diff --git a/contrib/nnc/unittests/soft_backend/CPPOperations.cpp b/contrib/nnc/unittests/soft_backend/CPPOperations.cpp index 06d6f2c..234f048 100644 --- a/contrib/nnc/unittests/soft_backend/CPPOperations.cpp +++ b/contrib/nnc/unittests/soft_backend/CPPOperations.cpp @@ -37,6 +37,7 @@ #include "code_snippets/cpp_elu.def" #include "code_snippets/cpp_fully_connected.def" #include "code_snippets/cpp_gather.def" +#include "code_snippets/cpp_sigmoid.def" #include "code_snippets/cpp_pad.def" #include "code_snippets/cpp_pool.def" #include "code_snippets/cpp_reduce.def" @@ -55,26 +56,27 @@ #include "SBSerializer.h" // operations part -#include "core/modelIR/operations/VariableOp.h" -#include "core/modelIR/operations/FullyConnectedOp.h" +#include "core/modelIR/operations/BiasAddOp.h" +#include "core/modelIR/operations/CappedReluOp.h" +#include "core/modelIR/operations/ConcatOp.h" #include "core/modelIR/operations/Conv2DOp.h" +#include "core/modelIR/operations/Deconv2DOp.h" #include "core/modelIR/operations/DepthwiseConv2DOp.h" +#include "core/modelIR/operations/ElementwiseOp.h" +#include "core/modelIR/operations/EluOp.h" +#include "core/modelIR/operations/FullyConnectedOp.h" +#include "core/modelIR/operations/PadOp.h" #include "core/modelIR/operations/PoolOp.h" -#include "core/modelIR/operations/ReluOp.h" #include "core/modelIR/operations/ReduceFOp.h" -#include "core/modelIR/operations/CappedReluOp.h" +#include "core/modelIR/operations/ReluOp.h" #include "core/modelIR/operations/ReshapeOp.h" -#include "core/modelIR/operations/ConcatOp.h" -#include "core/modelIR/operations/BiasAddOp.h" -#include "core/modelIR/operations/SoftmaxOp.h" #include "core/modelIR/operations/ScaleOp.h" +#include "core/modelIR/operations/SigmoidOp.h" +#include "core/modelIR/operations/SoftmaxOp.h" #include "core/modelIR/operations/SqrtOp.h" -#include "core/modelIR/operations/EluOp.h" -#include "core/modelIR/operations/ElementwiseOp.h" -#include "core/modelIR/operations/Deconv2DOp.h" #include "core/modelIR/operations/TanhOp.h" #include "core/modelIR/operations/TransposeOp.h" -#include "core/modelIR/operations/PadOp.h" +#include "core/modelIR/operations/VariableOp.h" // various headers #include "core/modelIR/TensorVariant.h" @@ -722,6 +724,19 @@ TEST(cpp_operations_test, relu) { createAndRunTestGraph(op_generator, relu, input_ntensors, input_atensor); } +TEST(cpp_operations_test, sigmoid) { + // test prerequisites + vector shape_data{2, 3, 4, 5}; + Tensor input_atensor; + vector> input_ntensors(1); + fillTensors(input_ntensors[0], input_atensor, shape_data, 1.0f); + auto opGenerator = [](mir::Graph& g, const std::vector& inputs) { + return g.create("y", inputs[0]); + }; + + createAndRunTestGraph(opGenerator, sigmoid, input_ntensors, input_atensor); +} + TEST(cpp_operations_test, elu) { // test prerequisites vector shape_data{2, 3, 4, 5};