#include "core/modelIR/operations/FullyConnectedOp.h"
#include "core/modelIR/operations/GatherOp.h"
#include "core/modelIR/operations/GemmOp.h"
+#include "core/modelIR/operations/LeakyReluOp.h"
#include "core/modelIR/operations/PadOp.h"
#include "core/modelIR/operations/PoolOp.h"
#include "core/modelIR/operations/ReduceFOp.h"
dotBuilder.updateWithOp(&op, node_info);
}
+void IrDotDumper::visit(mir::ops::LeakyReluOp& op) {
+ auto node_info = DotIrNodeInfo().withType("LeakyReluOp", op.getName())
+ .withInShapes(getInputShapes(op))
+ .withOutShapes(getOutputShapes(op))
+ .withMisc("alpha", op.getAlpha());
+
+ dotBuilder.updateWithOp(&op, node_info);
+}
+
} // namespace mir
} // namespace nnc
#include "core/modelIR/operations/FullyConnectedOp.h"
#include "core/modelIR/operations/GatherOp.h"
#include "core/modelIR/operations/GemmOp.h"
+#include "core/modelIR/operations/LeakyReluOp.h"
#include "core/modelIR/operations/PadOp.h"
#include "core/modelIR/operations/PoolOp.h"
#include "core/modelIR/operations/ReduceFOp.h"
void visit(ops::FullyConnectedOp& op) override;
void visit(ops::GatherOp& op) override;
void visit(ops::GemmOp& op) override;
+ void visit(mir::ops::LeakyReluOp& op) override;
void visit(ops::PadOp& op) override;
void visit(ops::PoolOp& op) override;
void visit(ops::ReduceFOp& op) override;
--- /dev/null
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef _NNC_CORE_IR_MODEL_LEAKY_RELU_H_
+#define _NNC_CORE_IR_MODEL_LEAKY_RELU_H_
+
+#include "core/modelIR/Operation.h"
+
+namespace nnc {
+namespace mir {
+namespace ops {
+
+class LeakyReluOp : public Operation {
+public:
+ explicit LeakyReluOp(const IODescriptor& arg, float alpha)
+ : Operation(Type::ReLU, {arg}), _alpha(alpha) {
+ // Infer output shape.
+ setOutputShape(0, getInputShape(0));
+ }
+
+ float getAlpha() const {
+ return _alpha;
+ }
+
+private:
+ float _alpha;
+};
+
+} // namespace ops
+} // namespace mir
+} // namespace nnc
+
+#endif //_NNC_CORE_IR_MODEL_LEAKY_RELU_H_
HANDLE_OP(sqrt, SqrtOp)
HANDLE_OP(reduceF, ReduceFOp)
HANDLE_OP(transpose, TransposeOp)
+HANDLE_OP(leakyReLU, LeakyReluOp)
void visit(ops::EluOp& op) override;
void visit(ops::FullyConnectedOp& op) override;
void visit(ops::GatherOp& op) override;
+ void visit(ops::LeakyReluOp& op) override;
void visit(ops::PadOp& op) override;
void visit(ops::PoolOp& op) override;
void visit(ops::ReduceFOp& op) override;
#include "core/modelIR/operations/ElementwiseOp.h"
#include "core/modelIR/operations/FullyConnectedOp.h"
#include "core/modelIR/operations/GemmOp.h"
+#include "core/modelIR/operations/LeakyReluOp.h"
#include "core/modelIR/operations/PoolOp.h"
#include "core/modelIR/operations/ReduceFOp.h"
#include "core/modelIR/operations/ReluOp.h"
genActivation(op, "LOGISTIC");
}
+void AclCppOpGenerator::visit(mir::ops::LeakyReluOp& op) {
+ genActivation(op, "LEAKY_RELU", op.getAlpha());
+}
+
}
// namespace nnc
void visit(mir::ops::FullyConnectedOp& op) override;
void visit(mir::ops::GatherOp& op) override;
void visit(mir::ops::GemmOp& op) override;
+ void visit(mir::ops::LeakyReluOp& op) override;
void visit(mir::ops::PadOp& op) override;
void visit(mir::ops::PoolOp& op) override;
void visit(mir::ops::ReduceFOp& op) override;
#include "core/modelIR/operations/EluOp.h"
#include "core/modelIR/operations/FullyConnectedOp.h"
#include "core/modelIR/operations/GatherOp.h"
+#include "core/modelIR/operations/LeakyReluOp.h"
#include "core/modelIR/operations/PoolOp.h"
#include "core/modelIR/operations/ReluOp.h"
#include "core/modelIR/operations/ReshapeOp.h"
void CaffeOpCreator::checkReLU(const ReLUParameter& opts,
std::set<std::string>& problems_op_set) {
- if (opts.has_negative_slope())
- problems_op_set.insert("ReLU layer negative_slope param is not supported yet.");
}
std::vector<mir::IODescriptor>
CaffeOpCreator::convertReLU(const caffe::LayerParameter& layer,
const std::vector<mir::IODescriptor>& inputs) {
- auto relu = createOp<ops::ReluOp>(layer.name(), inputs[0]);
+ mir::Operation* relu;
+ if (layer.relu_param().has_negative_slope()) {
+ float alpha = layer.relu_param().negative_slope();
+ relu = createOp<ops::LeakyReluOp>(layer.name(), inputs[0], alpha);
+ } else {
+ relu = createOp<ops::ReluOp>(layer.name(), inputs[0]);
+ }
+
+
return {relu->getOutput(0)};
}
#include "core/modelIR/operations/FullyConnectedOp.h"
#include "core/modelIR/operations/GatherOp.h"
#include "core/modelIR/operations/GemmOp.h"
+#include "core/modelIR/operations/LeakyReluOp.h"
#include "core/modelIR/operations/PadOp.h"
#include "core/modelIR/operations/PoolOp.h"
#include "core/modelIR/operations/ReduceFOp.h"
var(op.getId()) = Gather(data, indices, op)();
}
+void NNInterpreter::visit(ops::LeakyReluOp& op) {
+ auto operand = op.getPrevNodes()[0];
+ float alpha = op.getAlpha();
+ Tensor<float> input(var(operand.op->getId())[operand.index]);
+ var(op.getId()) = Fill<float>(
+ op.getOutputShape(0), [&input, alpha](const Index& id) {
+ float val = input.at(id);
+ return val > 0.0f ? val : val * alpha;
+ })();
+
+ DUMP(op, false);
+}
+
} // namespace nnc
#include "cpp_sigmoid.generated.h"
#include "cpp_sqrt.generated.h"
#include "cpp_relu.generated.h"
+#include "cpp_leaky_relu.generated.h"
#include "cpp_reduce.generated.h"
#include "cpp_resize.generated.h"
#include "cpp_softmax.generated.h"
out.write(cpp_scale, sizeof(cpp_scale));
out.write(cpp_dropout, sizeof(cpp_dropout));
out.write(cpp_batchnorm, sizeof(cpp_batchnorm));
+ out.write(cpp_leaky_relu, sizeof(cpp_leaky_relu));
// gen NN constructor
out << className << "::" << className << "(const string ¶metersPath)\n"
#include "core/modelIR/operations/FullyConnectedOp.h"
#include "core/modelIR/operations/GatherOp.h"
#include "core/modelIR/operations/GemmOp.h"
+#include "core/modelIR/operations/LeakyReluOp.h"
#include "core/modelIR/operations/PadOp.h"
#include "core/modelIR/operations/PoolOp.h"
#include "core/modelIR/operations/ReduceFOp.h"
addOpDescr(&op, "sigmoid");
}
+void ModelAnalyzer::visit(mir::ops::LeakyReluOp& op) {
+ addOpDescr(&op, "leakyRelu");
+}
+
} // namespace nnc
void visit(mir::ops::EluOp& op) override;
void visit(mir::ops::FullyConnectedOp& op) override;
void visit(mir::ops::GatherOp& op) override;
+ void visit(mir::ops::LeakyReluOp& op) override;
void visit(mir::ops::GemmOp& op) override;
void visit(mir::ops::PadOp& op) override;
void visit(mir::ops::PoolOp& op) override;
#include "core/modelIR/operations/FullyConnectedOp.h"
#include "core/modelIR/operations/GatherOp.h"
#include "core/modelIR/operations/GemmOp.h"
+#include "core/modelIR/operations/LeakyReluOp.h"
#include "core/modelIR/operations/PadOp.h"
#include "core/modelIR/operations/PoolOp.h"
#include "core/modelIR/operations/ReduceFOp.h"
_curOp->_paramStartOffset = _buffer.size();
}
+void Serializer::visit(mir::ops::LeakyReluOp& op) {
+ _curOp->_paramStartOffset = _buffer.size();
+ serializeT<float>(op.getAlpha());
+ serializeShape(op.getOutputShape(0));
+}
+
} // namespace nnc
void visit(mir::ops::ElementwiseOp& op) override;
void visit(mir::ops::EluOp& op) override;
void visit(mir::ops::FullyConnectedOp& op) override;
+ void visit(mir::ops::LeakyReluOp& op) override;
void visit(mir::ops::GatherOp& op) override;
void visit(mir::ops::GemmOp& op) override;
void visit(mir::ops::PadOp& op) override;
--- /dev/null
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+void leakyRelu(Tensor& out, const char* params, const Tensor& in) {
+ const float* input = in.getData();
+ out.reShape(in.getShape());
+ float* output = out.getData();
+ const float alpha = deserializeT<float>(params);
+
+ size_t data_length = in.getShape().getNumElems();
+
+ for( int i = 0; i < data_length; ++i ) {
+ float val = input[i];
+ float res = val > 0 ? val : val * alpha;
+ output[i] = res;
+ }
+}
case BuiltinOperator_RELU6:
case BuiltinOperator_TRANSPOSE:
case BuiltinOperator_STRIDED_SLICE:
+ case BuiltinOperator_LEAKY_RELU:
// No checks
break;
default:
outputs = _opCreator->createStridedSlice(
inputs, params, op->builtin_options_as<StridedSliceOptions>());
break;
+ case BuiltinOperator_LEAKY_RELU:
+ outputs = _opCreator->createLeakyRelu(inputs, params,
+ op->builtin_options_as<LeakyReluOptions>());
+ break;
default:
assert(false && "All unsupported types should have been found before this pass.");
}
#include "core/modelIR/operations/DepthwiseConv2DOp.h"
#include "core/modelIR/operations/ElementwiseOp.h"
#include "core/modelIR/operations/FullyConnectedOp.h"
+#include "core/modelIR/operations/LeakyReluOp.h"
#include "core/modelIR/operations/PadOp.h"
#include "core/modelIR/operations/PoolOp.h"
#include "core/modelIR/operations/ReduceFOp.h"
slice_outputs[0]->getOutput(0), squeeze_dims);
}
+std::vector<mir::Operation*>
+TFLiteOpCreator::createLeakyRelu(TFLiteOpCreator::InputOps& inputs, const TFLiteOpCreator::InputParams&,
+ const ::tflite::LeakyReluOptions* opts) {
+ float alpha = opts->alpha();
+
+ return createOp<ops::LeakyReluOp>(ActivationFunctionType_NONE, inputs[0]->getOutput(0), alpha);
+}
+
} // namespace nnc
std::vector<mir::Operation*> createStridedSlice(InputOps&, const InputParams&,
const ::tflite::StridedSliceOptions*);
+ /**
+ * @brief Create leaky relu activation
+ * @return
+ */
+ std::vector<mir::Operation*> createLeakyRelu(InputOps&, const InputParams&,
+ const ::tflite::LeakyReluOptions*);
+
void checkPool2D(const ::tflite::Pool2DOptions*, std::set<std::string>&);
void checkConcatenation(const ::tflite::ConcatenationOptions*, std::set<std::string>&);
#include "code_snippets/cpp_operations.def"
#include "code_snippets/cpp_scale.def"
+#include "code_snippets/cpp_leaky_relu.def"
// soft backend part
#include "core/modelIR/operations/PoolOp.h"
#include "core/modelIR/operations/ReduceFOp.h"
#include "core/modelIR/operations/ReluOp.h"
+#include "core/modelIR/operations/LeakyReluOp.h"
#include "core/modelIR/operations/ReshapeOp.h"
#include "core/modelIR/operations/ResizeOp.h"
#include "core/modelIR/operations/ScaleOp.h"
createAndRunTestGraph(op_generator, relu, input_ntensors, input_atensor);
}
+TEST(cpp_operations_test, leaky_relu) {
+ // test prerequisites
+ vector<int> shape_data{2, 3, 4, 5};
+ Tensor input_atensor;
+ vector<unique_ptr<mir::TensorVariant>> input_ntensors(1);
+ fillTensors(input_ntensors[0], input_atensor, shape_data, 1.0f);
+ auto op_generator = [](mir::Graph& g, const std::vector<mir::IODescriptor>& inputs) {
+ return g.create<mir::ops::LeakyReluOp>("y", inputs[0], 0.1);
+ };
+
+ createAndRunTestGraph(op_generator, relu, input_ntensors, input_atensor);
+}
+
TEST(cpp_operations_test, sigmoid) {
// test prerequisites
vector<int> shape_data{2, 3, 4, 5};