int64 getFLOPSPerElement() const { return power == 1 ? 2 : 10; }
};
+struct ExpFunctor : public BaseFunctor
+{
+ typedef ExpLayer Layer;
+ float base, scale, shift;
+ float normScale, normShift;
+
+ ExpFunctor(float base_ = -1.f, float scale_ = 1.f, float shift_ = 0.f)
+ : base(base_), scale(scale_), shift(shift_)
+ {
+ // For base > 0 :
+ // y = base^(scale * input + shift)
+ // ln(y) = ln(base)*(scale * input + shift)
+ // y = exp((ln(base)*scale) * input + (ln(base)*shift))
+ // y = exp(normalized_scale * input + normalized_shift)
+ CV_Check(base, base == -1.f || base > 0.f, "Unsupported 'base' value");
+ const float ln_base = (base == -1.f) ? 1.f : log(base);
+ normScale = scale * ln_base;
+ normShift = shift * ln_base;
+ }
+
+ bool supportBackend(int backendId, int targetId)
+ {
+ return backendId == DNN_BACKEND_OPENCV ||
+ backendId == DNN_BACKEND_HALIDE || backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH;
+ }
+
+ void apply(const float* srcptr, float* dstptr, int len, size_t planeSize, int cn0, int cn1) const
+ {
+ float a = normScale, b = normShift;
+ for( int cn = cn0; cn < cn1; cn++, srcptr += planeSize, dstptr += planeSize )
+ {
+ for( int i = 0; i < len; i++ )
+ {
+ float x = srcptr[i];
+ dstptr[i] = exp(a*x + b);
+ }
+ }
+ }
+
+#ifdef HAVE_OPENCL
+ bool applyOCL(InputArrayOfArrays inps, OutputArrayOfArrays outs, OutputArrayOfArrays internals)
+ {
+ std::vector<UMat> inputs;
+ std::vector<UMat> outputs;
+
+ inps.getUMatVector(inputs);
+ outs.getUMatVector(outputs);
+ String buildopt = oclGetTMacro(inputs[0]);
+
+ for (size_t i = 0; i < inputs.size(); i++)
+ {
+ UMat& src = inputs[i];
+ UMat& dst = outputs[i];
+
+ ocl::Kernel kernel("ExpForward", ocl::dnn::activations_oclsrc, buildopt);
+ kernel.set(0, (int)src.total());
+ kernel.set(1, ocl::KernelArg::PtrReadOnly(src));
+ kernel.set(2, ocl::KernelArg::PtrWriteOnly(dst));
+ kernel.set(3, (float)normScale);
+ kernel.set(4, (float)normShift);
+
+ size_t gSize = src.total();
+ CV_Assert(kernel.run(1, &gSize, NULL, false));
+ }
+ return true;
+ }
+#endif
+
+#ifdef HAVE_HALIDE
+ void attachHalide(const Halide::Expr& input, Halide::Func& top)
+ {
+ Halide::Var x("x"), y("y"), c("c"), n("n");
+ top(x, y, c, n) = exp(normScale * input + normShift);
+ }
+#endif // HAVE_HALIDE
+
+#ifdef HAVE_DNN_IE_NN_BUILDER_2019
+ InferenceEngine::Builder::Layer initInfEngineBuilderAPI()
+ {
+ CV_Error(Error::StsNotImplemented, "");
+ }
+#endif // HAVE_DNN_IE_NN_BUILDER_2019
+
+#ifdef HAVE_DNN_NGRAPH
+ std::shared_ptr<ngraph::Node> initNgraphAPI(const std::shared_ptr<ngraph::Node>& node)
+ {
+ auto scale_node = std::make_shared<ngraph::op::Constant>(ngraph::element::f32,
+ ngraph::Shape{1}, &normScale);
+ auto shift_node = std::make_shared<ngraph::op::Constant>(ngraph::element::f32,
+ ngraph::Shape{1}, &normShift);
+ auto mul = std::make_shared<ngraph::op::v1::Multiply>(scale_node, node, ngraph::op::AutoBroadcastType::NUMPY);
+ auto scale_shift = std::make_shared<ngraph::op::v1::Add>(mul, shift_node, ngraph::op::AutoBroadcastType::NUMPY);
+ return std::make_shared<ngraph::op::v0::Exp>(scale_shift);
+ }
+#endif // HAVE_DNN_NGRAPH
+
+ int64 getFLOPSPerElement() const { return 3; }
+};
+
struct ChannelsPReLUFunctor : public BaseFunctor
{
typedef ChannelsPReLULayer Layer;
return l;
}
+Ptr<ExpLayer> ExpLayer::create(const LayerParams& params)
+{
+ float base = params.get<float>("base", -1.0f);
+ float scale = params.get<float>("scale", 1.0f);
+ float shift = params.get<float>("shift", 0.0f);
+ Ptr<ExpLayer> l(new ElementWiseLayer<ExpFunctor>(ExpFunctor(base, scale, shift)));
+ l->setParamsFrom(params);
+ l->base = base;
+ l->scale = scale;
+ l->shift = shift;
+
+ return l;
+}
+
Ptr<Layer> ChannelsPReLULayer::create(const LayerParams& params)
{
CV_Assert(params.blobs.size() == 1);
dnnBackendsAndTargetsWithHalide()
));
+typedef TestWithParam<tuple<Vec3f, tuple<Backend, Target> > > Exp;
+TEST_P(Exp, Accuracy)
+{
+ float base = get<0>(GetParam())[0];
+ float scale = get<0>(GetParam())[1];
+ float shift = get<0>(GetParam())[2];
+ Backend backendId = get<0>(get<1>(GetParam()));
+ Target targetId = get<1>(get<1>(GetParam()));
+
+ LayerParams lp;
+ lp.set("base", base);
+ lp.set("scale", scale);
+ lp.set("shift", shift);
+ lp.type = "Exp";
+ lp.name = "testLayer";
+ testInPlaceActivation(lp, backendId, targetId);
+}
+
+INSTANTIATE_TEST_CASE_P(Layer_Test_Halide, Exp, Combine(
+/*base, scale, shift*/ Values(Vec3f(0.9f, -1.0f, 1.1f), Vec3f(0.9f, 1.1f, -1.0f),
+ Vec3f(-1.0f, 0.9f, 1.1f), Vec3f(-1.0f, 1.1f, 0.9f),
+ Vec3f(1.1f, 0.9f, -1.0f), Vec3f(1.1f, -1.0f, 0.9f)),
+ dnnBackendsAndTargetsWithHalide()
+));
+
TEST_P(Test_Halide_layers, ChannelsPReLU)
{
LayerParams lp;
randu(scales, -1.0f, 1.0f);
activationParams.blobs.push_back(scales);
}
+ else if (activationParams.type == "Exp")
+ {
+ activationParams.set("base", -1.0f);
+ activationParams.set("scale", 0.3f);
+ activationParams.set("shift", 0.6f);
+ }
}
static void makeDefaultTestEltwiseLayer(LayerParams& eltwiseParams, const std::string& op, bool withCoefficients)
static testing::internal::ParamGenerator<std::string> activationLayersList()
{
// TODO: automate list generation
- return Values("ReLU", "ReLU6", "ChannelsPReLU", "TanH", "Swish", "Mish", "Sigmoid", "ELU", "AbsVal", "BNLL", "Power");
+ return Values("ReLU", "ReLU6", "ChannelsPReLU", "TanH", "Swish", "Mish", "Sigmoid", "ELU", "AbsVal", "BNLL", "Power", "Exp");
}
static testing::internal::ParamGenerator<tuple<Backend, Target> > dnnBackendsAndTargetsForFusionTests()