#include "core/modelIR/operations/ReshapeOp.h"
#include "core/modelIR/operations/ResizeOp.h"
#include "core/modelIR/operations/ScaleOp.h"
+#include "core/modelIR/operations/SigmoidOp.h"
#include "core/modelIR/operations/SoftmaxOp.h"
#include "core/modelIR/operations/SqrtOp.h"
#include "core/modelIR/operations/SqueezeOp.h"
}
void IrDotDumper::visit(ops::GatherOp& op) {
- auto node_info = DotIrNodeInfo().withType("GatherOp", op.getName());
+ auto node_info = DotIrNodeInfo().withType("GatherOp", op.getName())
+ .withInShapes(getInputShapes(op))
+ .withOutShapes(getOutputShapes(op));
+
+ dotBuilder.updateWithOp(&op, node_info);
+}
+
+void IrDotDumper::visit(ops::SigmoidOp& op) {
+ auto node_info = DotIrNodeInfo().withType("SigmoidOp", op.getName())
+ .withInShapes(getInputShapes(op))
+ .withOutShapes(getOutputShapes(op));
+
+ dotBuilder.updateWithOp(&op, node_info);
}
} // namespace mir
#include "core/modelIR/operations/ReshapeOp.h"
#include "core/modelIR/operations/ResizeOp.h"
#include "core/modelIR/operations/ScaleOp.h"
+#include "core/modelIR/operations/SigmoidOp.h"
#include "core/modelIR/operations/SoftmaxOp.h"
#include "core/modelIR/operations/SqueezeOp.h"
#include "core/modelIR/operations/SqrtOp.h"
void visit(ops::ReshapeOp& op) override;
void visit(ops::ResizeOp& op) override;
void visit(ops::ScaleOp& op) override;
+ void visit(ops::SigmoidOp& op) override;
void visit(ops::SoftmaxOp& op) override;
- void visit(ops::SqueezeOp& op) override;
void visit(ops::SqrtOp& op) override;
+ void visit(ops::SqueezeOp& op) override;
void visit(ops::TanhOp& op) override;
void visit(ops::TransposeOp& op) override;
void visit(ops::VariableOp& op) override;
--- /dev/null
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef _NNC_CORE_IR_MODEL_SIGMOID_H_
+#define _NNC_CORE_IR_MODEL_SIGMOID_H_
+
+#include "core/modelIR/Operation.h"
+
+namespace nnc {
+namespace mir {
+namespace ops {
+
+class SigmoidOp : public Operation {
+public:
+ explicit SigmoidOp(const IODescriptor& arg) : Operation(Type::sigmoid, {arg}) {
+ // Infer output shape.
+ setOutputShape(0, getInputShape(0));
+ }
+};
+
+} // namespace ops
+} // namespace mir
+} // namespace nnc
+
+#endif //_NNC_CORE_IR_MODEL_SIGMOID_H_
HANDLE_OP(reshape, ReshapeOp)
HANDLE_OP(resizeIm, ResizeOp)
HANDLE_OP(scale, ScaleOp)
+HANDLE_OP(sigmoid, SigmoidOp)
HANDLE_OP(batchNorm, BatchNormOp)
HANDLE_OP(dropout, DropoutOp)
HANDLE_OP(tanh, TanhOp)
void visit(ops::ReshapeOp& op) override;
void visit(ops::ResizeOp& op) override;
void visit(ops::ScaleOp& op) override;
+ void visit(ops::SigmoidOp& op) override;
void visit(ops::SoftmaxOp& op) override;
- void visit(ops::SqueezeOp& op) override;
void visit(ops::SqrtOp& op) override;
+ void visit(ops::SqueezeOp& op) override;
void visit(ops::TanhOp& op) override;
void visit(ops::TransposeOp& op) override;
void visit(ops::VariableOp& op) override;
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
#include "AclCppOpGenerator.h"
#include "passes/acl_soft_backend/AclCppException.h"
#include "core/modelIR/ShapeRange.h"
void AclCppOpGenerator::visit(mir::ops::GatherOp& op) {
assert(false && "Unimplemented operation: GatherOp");
}
+
+void AclCppOpGenerator::visit(mir::ops::SigmoidOp& op) {
+ assert(false && "Unimplemented operation: SigmoidOp");
+}
+
}
// namespace nnc
void visit(mir::ops::ReshapeOp& op) override;
void visit(mir::ops::ResizeOp& op) override;
void visit(mir::ops::ScaleOp& op) override;
+ void visit(mir::ops::SigmoidOp& op) override;
void visit(mir::ops::SoftmaxOp& op) override;
- void visit(mir::ops::SqueezeOp& op) override;
void visit(mir::ops::SqrtOp& op) override;
+ void visit(mir::ops::SqueezeOp& op) override;
void visit(mir::ops::TanhOp& op) override;
void visit(mir::ops::TransposeOp& op) override;
void visit(mir::ops::VariableOp& op) override;
case SupportedCaffe2OpType::dropout:
case SupportedCaffe2OpType::givenTensorFill:
case SupportedCaffe2OpType::relu:
+ case SupportedCaffe2OpType::sigmoid:
case SupportedCaffe2OpType::softmax:
case SupportedCaffe2OpType::sum:
_opCreator->commonCheck(op, _problemsOpSet);
case SupportedCaffe2OpType::relu:
outputs = _opCreator->convertRelu(inputs);
break;
+ case SupportedCaffe2OpType::sigmoid:
+ outputs = _opCreator->convertSigmoid(inputs);
+ break;
case SupportedCaffe2OpType::softmax:
outputs = _opCreator->convertSoftmax(inputs, op);
break;
case SupportedCaffe2OpType::maxPool:
case SupportedCaffe2OpType::mul:
case SupportedCaffe2OpType::relu:
+ case SupportedCaffe2OpType::sigmoid:
case SupportedCaffe2OpType::softmax:
case SupportedCaffe2OpType::spatialBN:
inputs.push_back(_blobNameToIODescriptor[op.input(0)]);
{"MaxPool", SupportedCaffe2OpType::maxPool},
{"Mul", SupportedCaffe2OpType::mul},
{"Relu", SupportedCaffe2OpType::relu},
+ {"Sigmoid", SupportedCaffe2OpType::sigmoid},
{"Softmax", SupportedCaffe2OpType::softmax},
{"SpatialBN", SupportedCaffe2OpType::spatialBN},
{"Sum", SupportedCaffe2OpType::sum}
#include "core/modelIR/operations/ReluOp.h"
#include "core/modelIR/operations/ReshapeOp.h"
#include "core/modelIR/operations/ScaleOp.h"
+#include "core/modelIR/operations/SigmoidOp.h"
#include "core/modelIR/operations/SoftmaxOp.h"
#include "core/modelIR/operations/TransposeOp.h"
#include "core/modelIR/operations/VariableOp.h"
#include <vector>
#include "option/Options.h"
-
namespace nnc {
using namespace ::caffe2;
return {convertMIRToCaffe(mul->getOutput(0))};
}
-std::vector<IODescriptor> Caffe2OpCreator::convertRelu(const std::vector<IODescriptor>& inputs) {
+std::vector<IODescriptor>
+Caffe2OpCreator::convertRelu(const std::vector<IODescriptor>& inputs) {
auto relu = createOp<ops::ReluOp>(inputs[0]);
return {relu->getOutput(0)};
}
+std::vector<IODescriptor>
+Caffe2OpCreator::convertSigmoid(const std::vector<IODescriptor>& inputs) {
+ auto result = createOp<ops::SigmoidOp>(inputs[0]);
+ return {result->getOutput(0)};
+}
+
std::vector<IODescriptor> Caffe2OpCreator::convertSoftmax(const std::vector<IODescriptor>& inputs,
const ::caffe2::OperatorDef& op) {
int axis = getSingleArgument(op, "axis", 1);
std::vector<mir::IODescriptor> convertRelu(const std::vector<mir::IODescriptor>&);
+ std::vector<mir::IODescriptor> convertSigmoid(const std::vector<mir::IODescriptor>&);
+
std::vector<mir::IODescriptor> convertSoftmax(const std::vector<mir::IODescriptor>&,
const ::caffe2::OperatorDef&);
maxPool,
mul,
relu,
+ sigmoid,
softmax,
spatialBN,
sum
case CaffeOpType::dropout:
outputs = _opCreator->convertDropout(layer, inputs);
break;
- case CaffeOpType ::tanh:
+ case CaffeOpType::tanh:
outputs = _opCreator->convertTanH(layer, inputs);
break;
- case CaffeOpType ::ELU:
+ case CaffeOpType::ELU:
outputs = _opCreator->convertELU(layer, inputs);
break;
- case CaffeOpType ::eltwise:
+ case CaffeOpType::eltwise:
outputs = _opCreator->convertEltwise(layer, inputs);
break;
case CaffeOpType::embed:
outputs = _opCreator->convertEmbed(layer, inputs);
break;
- case CaffeOpType ::deconvolution:
+ case CaffeOpType::deconvolution:
outputs = _opCreator->convertDeconvolution(layer, inputs);
break;
case CaffeOpType::split:
outputs = _opCreator->convertSplit(layer, inputs);
break;
+ case CaffeOpType::sigmoid:
+ outputs = _opCreator->convertSigmoid(layer, inputs);
+ break;
default:
assert(false && "All unsupported types should have been found before this pass.");
}
case CaffeOpType::eltwise:
case CaffeOpType::ELU:
case CaffeOpType::embed:
+ case CaffeOpType::sigmoid:
case CaffeOpType::tanh:
// No checks
break;
#include "core/modelIR/operations/ReluOp.h"
#include "core/modelIR/operations/ReshapeOp.h"
#include "core/modelIR/operations/ScaleOp.h"
+#include "core/modelIR/operations/SigmoidOp.h"
#include "core/modelIR/operations/SoftmaxOp.h"
#include "core/modelIR/operations/TanhOp.h"
#include "core/modelIR/operations/TransposeOp.h"
}
std::vector<mir::IODescriptor>
+CaffeOpCreator::convertSigmoid(const caffe::LayerParameter& layer,
+ const std::vector<mir::IODescriptor>& inputs) {
+ auto result = createOp<ops::SigmoidOp>(layer.name(), inputs[0]);
+ return {result->getOutput(0)};
+}
+
+std::vector<mir::IODescriptor>
CaffeOpCreator::convertTanH(const caffe::LayerParameter& layer,
const std::vector<mir::IODescriptor>& inputs) {
auto tanh = createOp<ops::TanhOp>(layer.name(), inputs[0]);
const std::vector<mir::IODescriptor>& inputs);
std::vector<mir::IODescriptor>
+ convertSigmoid(const caffe::LayerParameter& layer,
+ const std::vector<mir::IODescriptor>& inputs);
+
+ std::vector<mir::IODescriptor>
convertTanH(const caffe::LayerParameter& layer,
const std::vector<mir::IODescriptor>& inputs);
* limitations under the License.
*/
-#include <cmath>
-#include <cassert>
-#include <vector>
-
-#include "pass/PassException.h"
-
#include "passes/interpreter/Interpreter.h"
#include "core/modelIR/operations/BatchNormOp.h"
#include "core/modelIR/operations/ReluOp.h"
#include "core/modelIR/operations/ResizeOp.h"
#include "core/modelIR/operations/ScaleOp.h"
+#include "core/modelIR/operations/SigmoidOp.h"
#include "core/modelIR/operations/SoftmaxOp.h"
-#include "core/modelIR/operations/SqueezeOp.h"
#include "core/modelIR/operations/SqrtOp.h"
+#include "core/modelIR/operations/SqueezeOp.h"
#include "core/modelIR/operations/TanhOp.h"
#include "core/modelIR/operations/TransposeOp.h"
#include "core/modelIR/operations/VariableOp.h"
#include "ops/Softmax.h"
#include "ops/Transpose.h"
+#include <vector>
+#include <cmath>
+#include <cassert>
+
namespace nnc {
using namespace nnc::mir;
op.getOutputShape(0), [&input](const Index &id) { return std::max(input.at(id), 0.0f); })();
}
+void NNInterpreter::visit(ops::SigmoidOp& op) {
+ mapByName(&op);
+ auto operand = op.getPrevNodes()[0];
+ Tensor<float> input(var(operand.op->getId())[operand.index]);
+ var(op.getId()) = Fill<float>(op.getOutputShape(0), [&input](const Index& id) {
+ return 1.f / (1.f + std::exp(-input.at(id)));
+ })();
+}
+
void NNInterpreter::visit(ops::SoftmaxOp& op) {
mapByName(&op);
auto operand = op.getPrevNodes()[0];
case ONNXOpCode::opMul:
case ONNXOpCode::opRelu:
case ONNXOpCode::opReshape:
+ case ONNXOpCode::opSigmoid:
case ONNXOpCode::opScale:
case ONNXOpCode::opSoftmax:
case ONNXOpCode::opSum:
case ONNXOpCode::opRelu:
outputs = _opCreator.convertRelu(input_nodes);
break;
+ case ONNXOpCode::opSigmoid:
+ outputs = _opCreator.convertSigmoid(input_nodes);
+ break;
case ONNXOpCode::opSoftmax:
outputs = _opCreator.convertSoftmax(input_nodes, onnx_node);
break;
#include "core/modelIR/operations/ReluOp.h"
#include "core/modelIR/operations/ReshapeOp.h"
#include "core/modelIR/operations/ScaleOp.h"
+#include "core/modelIR/operations/SigmoidOp.h"
#include "core/modelIR/operations/SoftmaxOp.h"
#include "core/modelIR/operations/VariableOp.h"
#include "core/modelIR/operations/ElementwiseOp.h"
return createOp<ops::ReluOp>(inputs[0]->getOutput(0));
}
+std::vector<Operation*> ONNXOpCreator::convertSigmoid(InputOps& inputs) {
+ assert(inputs.size() == 1);
+ return createOp<ops::SigmoidOp>(inputs[0]->getOutput(0));
+}
+
std::vector<Operation*> ONNXOpCreator::convertElementwise(InputOps& inputs,
mir::ops::ElementwiseOp::OpType op_type) {
std::vector<IODescriptor> descriptors;
std::vector<mir::Operation*> convertSoftmax(InputOps& inputs, const onnx::NodeProto& onnx_node);
std::vector<mir::Operation*> convertReshape(mir::Operation* input_data, mir::Shape output_shape);
std::vector<mir::Operation*> convertRelu(InputOps& inputs);
+ std::vector<mir::Operation*> convertSigmoid(InputOps& inputs);
std::vector<mir::Operation*> convertElementwise(InputOps& inputs,
mir::ops::ElementwiseOp::OpType op_type);
std::vector<mir::Operation*> convertScale(InputOps& inputs, const onnx::NodeProto& node);
#include "cpp_depthwise_conv.generated.h"
#include "cpp_fully_connected.generated.h"
#include "cpp_pool.generated.h"
+#include "cpp_sigmoid.generated.h"
#include "cpp_sqrt.generated.h"
#include "cpp_relu.generated.h"
#include "cpp_reduce.generated.h"
out.write(cpp_conv, sizeof(cpp_conv));
out.write(cpp_depthwise_conv, sizeof(cpp_depthwise_conv));
out.write(cpp_fully_connected, sizeof(cpp_fully_connected));
+ out.write(cpp_sigmoid, sizeof(cpp_sigmoid));
out.write(cpp_pool, sizeof(cpp_pool));
out.write(cpp_relu, sizeof(cpp_relu));
out.write(cpp_reduce, sizeof(cpp_reduce));
#include "core/modelIR/operations/ReluOp.h"
#include "core/modelIR/operations/ReshapeOp.h"
#include "core/modelIR/operations/ScaleOp.h"
+#include "core/modelIR/operations/SigmoidOp.h"
#include "core/modelIR/operations/SoftmaxOp.h"
-#include "core/modelIR/operations/SqueezeOp.h"
#include "core/modelIR/operations/SqrtOp.h"
+#include "core/modelIR/operations/SqueezeOp.h"
#include "core/modelIR/operations/TanhOp.h"
#include "core/modelIR/operations/TransposeOp.h"
#include "core/modelIR/operations/VariableOp.h"
addOpDescr(&op, "gather");
}
+void ModelAnalyzer::visit(mir::ops::SigmoidOp& op) {
+ addOpDescr(&op, "sigmoid");
+}
+
} // namespace nnc
void visit(mir::ops::ReshapeOp& op) override;
void visit(mir::ops::ResizeOp& op) override;
void visit(mir::ops::ScaleOp& op) override;
+ void visit(mir::ops::SigmoidOp& op) override;
void visit(mir::ops::SoftmaxOp& op) override;
- void visit(mir::ops::SqueezeOp& op) override;
void visit(mir::ops::SqrtOp& op) override;
+ void visit(mir::ops::SqueezeOp& op) override;
void visit(mir::ops::TanhOp& op) override;
void visit(mir::ops::TransposeOp& op) override;
void visit(mir::ops::VariableOp& op) override;
serializeShape(op.getOutputShape(0));
}
+void Serializer::visit(mir::ops::SigmoidOp& op) {
+ _curOp->_paramStartOffset = _buffer.size();
+}
+
} // namespace nnc
void visit(mir::ops::ReshapeOp& op) override;
void visit(mir::ops::ResizeOp& op) override;
void visit(mir::ops::ScaleOp& op) override;
+ void visit(mir::ops::SigmoidOp& op) override;
void visit(mir::ops::SoftmaxOp& op) override;
- void visit(mir::ops::SqueezeOp& op) override;
void visit(mir::ops::SqrtOp& op) override;
+ void visit(mir::ops::SqueezeOp& op) override;
void visit(mir::ops::TanhOp& op) override;
void visit(mir::ops::TransposeOp& op) override;
void visit(mir::ops::VariableOp& op) override;
Relu(input, input_d, out.getData(), input_d);
}
+void sigmoid(Tensor& out, const char* params, const Tensor& in) {
+ out.reShape(in.getShape());
+ Logistic(shapeToRuntimeShape(in.getShape()), in.getData(),
+ shapeToRuntimeShape(out.getShape()), out.getData());
+}
+
void elu(Tensor &out, const char* params, const Tensor& in) {
const float* input = in.getData();
const Dims<4> inp_d = shapeToDims(in.getShape());
--- /dev/null
+/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+ http://www.apache.org/licenses/LICENSE-2.0
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+inline void Logistic(const RuntimeShape& input_shape, const float* input_data,
+ const RuntimeShape& output_shape, float* output_data) {
+ const int flat_size = MatchingFlatSize(input_shape, output_shape);
+
+ for (int i = 0; i < flat_size; i++) {
+ float val = input_data[i];
+ float result = 1.f / (1.f + std::exp(-val));
+ output_data[i] = result;
+ }
+}
case BuiltinOperator_RESHAPE:
case BuiltinOperator_RESIZE_NEAREST_NEIGHBOR:
case BuiltinOperator_SQUEEZE:
+ case BuiltinOperator_LOGISTIC:
case BuiltinOperator_SQRT:
case BuiltinOperator_PAD:
case BuiltinOperator_ADD:
case BuiltinOperator_SQUEEZE:
outputs = _opCreator->createSqueeze(inputs, params, op->builtin_options_as<SqueezeOptions>());
break;
+ case BuiltinOperator_LOGISTIC:
+ outputs = _opCreator->createLogistic(inputs, params);
+ break;
case BuiltinOperator_SQRT:
outputs = _opCreator->createSqrt(inputs, params);
break;
#include "tflite_op_creator.h"
#include "schema_generated.h"
+#include "core/modelIR/operations/BiasAddOp.h"
+#include "core/modelIR/operations/CappedReluOp.h"
#include "core/modelIR/operations/ConcatOp.h"
#include "core/modelIR/operations/Conv2DOp.h"
+#include "core/modelIR/operations/Deconv2DOp.h"
#include "core/modelIR/operations/DepthwiseConv2DOp.h"
+#include "core/modelIR/operations/ElementwiseOp.h"
#include "core/modelIR/operations/FullyConnectedOp.h"
+#include "core/modelIR/operations/PadOp.h"
+#include "core/modelIR/operations/PoolOp.h"
+#include "core/modelIR/operations/ReduceFOp.h"
#include "core/modelIR/operations/ReluOp.h"
+#include "core/modelIR/operations/ReshapeOp.h"
#include "core/modelIR/operations/ResizeOp.h"
-#include "core/modelIR/operations/CappedReluOp.h"
-#include "core/modelIR/operations/TanhOp.h"
-#include "core/modelIR/operations/ElementwiseOp.h"
-#include "core/modelIR/operations/Deconv2DOp.h"
+#include "core/modelIR/operations/SigmoidOp.h"
#include "core/modelIR/operations/SoftmaxOp.h"
-#include "core/modelIR/operations/ReduceFOp.h"
-#include "core/modelIR/operations/PoolOp.h"
-#include "core/modelIR/operations/BiasAddOp.h"
-#include "core/modelIR/operations/ReshapeOp.h"
-#include "core/modelIR/operations/SqueezeOp.h"
-#include "core/modelIR/operations/PadOp.h"
#include "core/modelIR/operations/SqrtOp.h"
+#include "core/modelIR/operations/SqueezeOp.h"
+#include "core/modelIR/operations/TanhOp.h"
+
#include "core/modelIR/Tensor.h"
#include "core/modelIR/ShapeRange.h"
#include "pass/PassException.h"
return createOp<ops::SqrtOp>(ActivationFunctionType_NONE, inputs[0]->getOutput(0));
}
+std::vector<mir::Operation*>
+TFLiteOpCreator::createLogistic(InputOps& inputs, InputParams&) {
+ return createOp<ops::SigmoidOp>(ActivationFunctionType_NONE, inputs[0]->getOutput(0));
+}
+
} // namespace nnc
std::vector<mir::Operation*> convertResizeNN(InputOps, InputParams,
const ::tflite::ResizeNearestNeighborOptions*);
- std::vector<mir::Operation*> createSqrt(InputOps, InputParams);
+ std::vector<mir::Operation*> createLogistic(InputOps& inputs, InputParams& params);
+
+ std::vector<mir::Operation*> createSqrt(InputOps& inputs, InputParams& params);
std::vector<mir::Operation*> createSqueeze(InputOps& inputs, InputParams& params,
const ::tflite::SqueezeOptions* opts);
#include "code_snippets/cpp_elu.def"
#include "code_snippets/cpp_fully_connected.def"
#include "code_snippets/cpp_gather.def"
+#include "code_snippets/cpp_sigmoid.def"
#include "code_snippets/cpp_pad.def"
#include "code_snippets/cpp_pool.def"
#include "code_snippets/cpp_reduce.def"
#include "SBSerializer.h"
// operations part
-#include "core/modelIR/operations/VariableOp.h"
-#include "core/modelIR/operations/FullyConnectedOp.h"
+#include "core/modelIR/operations/BiasAddOp.h"
+#include "core/modelIR/operations/CappedReluOp.h"
+#include "core/modelIR/operations/ConcatOp.h"
#include "core/modelIR/operations/Conv2DOp.h"
+#include "core/modelIR/operations/Deconv2DOp.h"
#include "core/modelIR/operations/DepthwiseConv2DOp.h"
+#include "core/modelIR/operations/ElementwiseOp.h"
+#include "core/modelIR/operations/EluOp.h"
+#include "core/modelIR/operations/FullyConnectedOp.h"
+#include "core/modelIR/operations/PadOp.h"
#include "core/modelIR/operations/PoolOp.h"
-#include "core/modelIR/operations/ReluOp.h"
#include "core/modelIR/operations/ReduceFOp.h"
-#include "core/modelIR/operations/CappedReluOp.h"
+#include "core/modelIR/operations/ReluOp.h"
#include "core/modelIR/operations/ReshapeOp.h"
-#include "core/modelIR/operations/ConcatOp.h"
-#include "core/modelIR/operations/BiasAddOp.h"
-#include "core/modelIR/operations/SoftmaxOp.h"
#include "core/modelIR/operations/ScaleOp.h"
+#include "core/modelIR/operations/SigmoidOp.h"
+#include "core/modelIR/operations/SoftmaxOp.h"
#include "core/modelIR/operations/SqrtOp.h"
-#include "core/modelIR/operations/EluOp.h"
-#include "core/modelIR/operations/ElementwiseOp.h"
-#include "core/modelIR/operations/Deconv2DOp.h"
#include "core/modelIR/operations/TanhOp.h"
#include "core/modelIR/operations/TransposeOp.h"
-#include "core/modelIR/operations/PadOp.h"
+#include "core/modelIR/operations/VariableOp.h"
// various headers
#include "core/modelIR/TensorVariant.h"
createAndRunTestGraph(op_generator, relu, input_ntensors, input_atensor);
}
+TEST(cpp_operations_test, sigmoid) {
+ // test prerequisites
+ vector<int> shape_data{2, 3, 4, 5};
+ Tensor input_atensor;
+ vector<unique_ptr<mir::TensorVariant>> input_ntensors(1);
+ fillTensors(input_ntensors[0], input_atensor, shape_data, 1.0f);
+ auto opGenerator = [](mir::Graph& g, const std::vector<mir::IODescriptor>& inputs) {
+ return g.create<mir::ops::SigmoidOp>("y", inputs[0]);
+ };
+
+ createAndRunTestGraph(opGenerator, sigmoid, input_ntensors, input_atensor);
+}
+
TEST(cpp_operations_test, elu) {
// test prerequisites
vector<int> shape_data{2, 3, 4, 5};