{
outputs = opCreator.createSoftmax(inputs, params, lp.softmax_param());
}
+ else if (lp.has_scale_param())
+ {
+ outputs = opCreator.createScale(inputs, params, lp.scale_param());
+ }
+ else if (lp.has_batch_norm_param())
+ {
+ outputs = opCreator.createBatchNorm(inputs, params, lp.batch_norm_param());
+ }
+ else if (lp.has_dropout_param())
+ {
+ outputs = opCreator.createDropout(inputs, params,lp.dropout_param());
+ }
else
{
throw PluginException("Encountered unsupported Caffe layer type");
#include "core/modelIR/operations/bias_add_op.h"
#include "core/modelIR/operations/reshape_op.h"
#include "core/modelIR/operations/fully_connected_op.h"
+#include "core/modelIR/operations/scale_op.h"
+#include "core/modelIR/operations/batch_norm.h"
+#include "core/modelIR/operations/dropout_op.h"
#include "core/modelIR/Index.h"
#include "core/modelIR/ShapeRange.h"
return createOp<ops::ReluOp>(inputs);
}
+std::vector<INode::Ref> OpCreator::createScale(InputOps inputs, InputParams params, const ScaleParameter& opts)
+{
+ auto outputs = createOp<ops::ScaleOp>(inputs, std::move(*params[0]));
+ // bias_term is optional (so might not be present) and defaults to true
+ if (!opts.has_bias_term() || opts.bias_term())
+ return createOp<ops::BiasAddOp>(outputs, std::move(*params[1]));
+ else
+ return outputs;
+}
+
+std::vector<INode::Ref> OpCreator::createBatchNorm(InputOps inputs, InputParams params, const BatchNormParameter& opts)
+{
+ (void)params;
+ const float MAFRAC_DEF = 0.999f;
+ const float EPS_DEF = 1e-5f;
+ // optional params may be left out, so we fill them with defalt values (lifted from caffe docs)
+ float moving_average_fraction = (opts.has_moving_average_fraction()) ? opts.moving_average_fraction() : MAFRAC_DEF;
+ float eps = (opts.has_eps()) ? opts.eps() : EPS_DEF;
+ // The spatial normalization is a separate operation in Caffe
+ return createOp<ops::BatchNormOp>(inputs, moving_average_fraction, eps, false);
+}
+
+std::vector<INode::Ref> OpCreator::createDropout(InputOps inputs, InputParams params, const DropoutParameter& opts)
+{
+ (void)params;
+ const float DROPOUT_RATIO_DEF = 0.5f;
+ // optional params may be left out, so we fill them with defalt values (lifted from caffe docs)
+ float dropot_ratio = (opts.has_dropout_ratio()) ? opts.dropout_ratio() : DROPOUT_RATIO_DEF;
+ return createOp<ops::DropoutOp>(inputs, dropot_ratio);
+}
+
void OpCreator::connectInputs(INode::Ref op, InputOps inputs)
{
// TODO: this part doesn't support the situation where an operator takes as input
std::vector<INode::Ref> createSoftmax(InputOps inputs, InputParams params, const SoftmaxParameter& opts);
std::vector<INode::Ref> createReshape(InputOps inputs, InputParams params, const ReshapeParameter& opts);
std::vector<INode::Ref> createRelu(InputOps inputs, InputParams params, const ReLUParameter& opts);
+ std::vector<INode::Ref> createScale(InputOps inputs, InputParams params, const ScaleParameter& opts);
+ std::vector<INode::Ref> createBatchNorm(InputOps inputs, InputParams params, const BatchNormParameter& opts);
+ std::vector<INode::Ref> createDropout(InputOps inputs, InputParams params, const DropoutParameter& opts);
private:
Graph* graph = nullptr;