Added exp layer
authorSamFC10 <njebastin10@gmail.com>
Sat, 20 Feb 2021 16:46:00 +0000 (22:16 +0530)
committerAlexander Alekhin <alexander.a.alekhin@gmail.com>
Sun, 28 Feb 2021 19:59:40 +0000 (19:59 +0000)
backport of commit: 6111935835dfd557c6e5ada81f2acfd8a6ae32f9
partial backport of commit: dd5976162b58cc9f78a3abac42da283c0778162f

modules/dnn/include/opencv2/dnn/all_layers.hpp
modules/dnn/src/init.cpp
modules/dnn/src/layers/elementwise_layers.cpp
modules/dnn/src/opencl/activations.cl
modules/dnn/src/tensorflow/tf_importer.cpp
modules/dnn/test/test_halide_layers.cpp
modules/dnn/test/test_layers.cpp
modules/dnn/test/test_onnx_importer.cpp

index ffc2568..98d7671 100644 (file)
@@ -499,6 +499,14 @@ CV__DNN_EXPERIMENTAL_NS_BEGIN
         static Ptr<PowerLayer> create(const LayerParams &params);
     };
 
+    class CV_EXPORTS ExpLayer : public ActivationLayer
+    {
+    public:
+        float base, scale, shift;
+
+        static Ptr<ExpLayer> create(const LayerParams &params);
+    };
+
     /* Layers used in semantic segmentation */
 
     class CV_EXPORTS CropLayer : public Layer
index be4e115..4c804dc 100644 (file)
@@ -110,6 +110,7 @@ void initializeLayerFactory()
     CV_DNN_REGISTER_LAYER_CLASS(BNLL,           BNLLLayer);
     CV_DNN_REGISTER_LAYER_CLASS(AbsVal,         AbsLayer);
     CV_DNN_REGISTER_LAYER_CLASS(Power,          PowerLayer);
+    CV_DNN_REGISTER_LAYER_CLASS(Exp,            ExpLayer);
     CV_DNN_REGISTER_LAYER_CLASS(BatchNorm,      BatchNormLayer);
     CV_DNN_REGISTER_LAYER_CLASS(MaxUnpool,      MaxUnpoolLayer);
     CV_DNN_REGISTER_LAYER_CLASS(Dropout,        BlankLayer);
index d53393c..d47e088 100644 (file)
@@ -1202,6 +1202,105 @@ struct PowerFunctor : public BaseFunctor
     int64 getFLOPSPerElement() const { return power == 1 ? 2 : 10; }
 };
 
+struct ExpFunctor : public BaseFunctor
+{
+    typedef ExpLayer Layer;
+    float base, scale, shift;
+    float normScale, normShift;
+
+    ExpFunctor(float base_ = -1.f, float scale_ = 1.f, float shift_ = 0.f)
+        : base(base_), scale(scale_), shift(shift_)
+    {
+        // For base > 0 :
+        // y     = base^(scale * input + shift)
+        // ln(y) = ln(base)*(scale * input + shift)
+        // y     = exp((ln(base)*scale) * input + (ln(base)*shift))
+        // y     = exp(normalized_scale * input + normalized_shift)
+        CV_Check(base, base == -1.f || base > 0.f, "Unsupported 'base' value");
+        const float ln_base = (base == -1.f) ? 1.f : log(base);
+        normScale = scale * ln_base;
+        normShift = shift * ln_base;
+    }
+
+    bool supportBackend(int backendId, int targetId)
+    {
+        return backendId == DNN_BACKEND_OPENCV ||
+               backendId == DNN_BACKEND_HALIDE || backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH;
+    }
+
+    void apply(const float* srcptr, float* dstptr, int len, size_t planeSize, int cn0, int cn1) const
+    {
+        float a = normScale, b = normShift;
+        for( int cn = cn0; cn < cn1; cn++, srcptr += planeSize, dstptr += planeSize )
+        {
+            for( int i = 0; i < len; i++ )
+            {
+                float x = srcptr[i];
+                dstptr[i] = exp(a*x + b);
+            }
+        }
+    }
+
+#ifdef HAVE_OPENCL
+    bool applyOCL(InputArrayOfArrays inps, OutputArrayOfArrays outs, OutputArrayOfArrays internals)
+    {
+        std::vector<UMat> inputs;
+        std::vector<UMat> outputs;
+
+        inps.getUMatVector(inputs);
+        outs.getUMatVector(outputs);
+        String buildopt = oclGetTMacro(inputs[0]);
+
+        for (size_t i = 0; i < inputs.size(); i++)
+        {
+            UMat& src = inputs[i];
+            UMat& dst = outputs[i];
+
+            ocl::Kernel kernel("ExpForward", ocl::dnn::activations_oclsrc, buildopt);
+            kernel.set(0, (int)src.total());
+            kernel.set(1, ocl::KernelArg::PtrReadOnly(src));
+            kernel.set(2, ocl::KernelArg::PtrWriteOnly(dst));
+            kernel.set(3, (float)normScale);
+            kernel.set(4, (float)normShift);
+
+            size_t gSize = src.total();
+            CV_Assert(kernel.run(1, &gSize, NULL, false));
+        }
+        return true;
+    }
+#endif
+
+#ifdef HAVE_HALIDE
+    void attachHalide(const Halide::Expr& input, Halide::Func& top)
+    {
+        Halide::Var x("x"), y("y"), c("c"), n("n");
+        top(x, y, c, n) = exp(normScale * input + normShift);
+    }
+#endif  // HAVE_HALIDE
+
+#ifdef HAVE_DNN_IE_NN_BUILDER_2019
+    InferenceEngine::Builder::Layer initInfEngineBuilderAPI()
+    {
+        CV_Error(Error::StsNotImplemented, "");
+    }
+#endif  // HAVE_DNN_IE_NN_BUILDER_2019
+
+#ifdef HAVE_DNN_NGRAPH
+    std::shared_ptr<ngraph::Node> initNgraphAPI(const std::shared_ptr<ngraph::Node>& node)
+    {
+        auto scale_node = std::make_shared<ngraph::op::Constant>(ngraph::element::f32,
+                                                                 ngraph::Shape{1}, &normScale);
+        auto shift_node = std::make_shared<ngraph::op::Constant>(ngraph::element::f32,
+                                                                 ngraph::Shape{1}, &normShift);
+        auto mul = std::make_shared<ngraph::op::v1::Multiply>(scale_node, node, ngraph::op::AutoBroadcastType::NUMPY);
+        auto scale_shift = std::make_shared<ngraph::op::v1::Add>(mul, shift_node, ngraph::op::AutoBroadcastType::NUMPY);
+        return std::make_shared<ngraph::op::v0::Exp>(scale_shift);
+    }
+#endif  // HAVE_DNN_NGRAPH
+
+    int64 getFLOPSPerElement() const { return 3; }
+};
+
 struct ChannelsPReLUFunctor : public BaseFunctor
 {
     typedef ChannelsPReLULayer Layer;
@@ -1419,6 +1518,20 @@ Ptr<PowerLayer> PowerLayer::create(const LayerParams& params)
     return l;
 }
 
+Ptr<ExpLayer> ExpLayer::create(const LayerParams& params)
+{
+    float base = params.get<float>("base", -1.0f);
+    float scale = params.get<float>("scale", 1.0f);
+    float shift = params.get<float>("shift", 0.0f);
+    Ptr<ExpLayer> l(new ElementWiseLayer<ExpFunctor>(ExpFunctor(base, scale, shift)));
+    l->setParamsFrom(params);
+    l->base = base;
+    l->scale = scale;
+    l->shift = shift;
+
+    return l;
+}
+
 Ptr<Layer> ChannelsPReLULayer::create(const LayerParams& params)
 {
     CV_Assert(params.blobs.size() == 1);
index b900e6a..68f0dd7 100644 (file)
@@ -140,3 +140,14 @@ __kernel void ELUForward(const int n, __global const T* in, __global T* out)
     out[index] = (src >= 0.f) ? src : exp(src) - 1;
   }
 }
+
+__kernel void ExpForward(const int n, __global const T* in, __global T* out,
+                         const KERNEL_ARG_DTYPE normScale,
+                         const KERNEL_ARG_DTYPE normShift)
+{
+  int index = get_global_id(0);
+  if (index < n)
+  {
+    out[index] = exp(normShift + normScale * in[index]);
+  }
+}
index cbe6418..c03ac8a 100644 (file)
@@ -2425,7 +2425,7 @@ void TFImporter::parseNode(const tensorflow::NodeDef& layer_)
             connectToAllBlobs(layer_id, dstNet, parsePin(layer.input(0)), id, num_inputs);
         }
         else if (type == "Abs" || type == "Tanh" || type == "Sigmoid" ||
-                 type == "Relu" || type == "Elu" ||
+                 type == "Relu" || type == "Elu" || type == "Exp" ||
                  type == "Identity" || type == "Relu6")
         {
             CV_CheckGT(num_inputs, 0, "");
index 295d930..a1b13b7 100644 (file)
@@ -621,6 +621,31 @@ INSTANTIATE_TEST_CASE_P(Layer_Test_Halide, Power, Combine(
                         dnnBackendsAndTargetsWithHalide()
 ));
 
+typedef TestWithParam<tuple<Vec3f, tuple<Backend, Target> > > Exp;
+TEST_P(Exp, Accuracy)
+{
+    float base = get<0>(GetParam())[0];
+    float scale = get<0>(GetParam())[1];
+    float shift = get<0>(GetParam())[2];
+    Backend backendId = get<0>(get<1>(GetParam()));
+    Target targetId = get<1>(get<1>(GetParam()));
+
+    LayerParams lp;
+    lp.set("base", base);
+    lp.set("scale", scale);
+    lp.set("shift", shift);
+    lp.type = "Exp";
+    lp.name = "testLayer";
+    testInPlaceActivation(lp, backendId, targetId);
+}
+
+INSTANTIATE_TEST_CASE_P(Layer_Test_Halide, Exp, Combine(
+/*base, scale, shift*/ Values(Vec3f(0.9f, -1.0f, 1.1f), Vec3f(0.9f, 1.1f, -1.0f),
+                              Vec3f(-1.0f, 0.9f, 1.1f), Vec3f(-1.0f, 1.1f, 0.9f),
+                              Vec3f(1.1f, 0.9f, -1.0f), Vec3f(1.1f, -1.0f, 0.9f)),
+                       dnnBackendsAndTargetsWithHalide()
+));
+
 TEST_P(Test_Halide_layers, ChannelsPReLU)
 {
     LayerParams lp;
index 327f3e9..73491fc 100644 (file)
@@ -2107,6 +2107,12 @@ public:
             randu(scales, -1.0f, 1.0f);
             activationParams.blobs.push_back(scales);
         }
+        else if (activationParams.type == "Exp")
+        {
+            activationParams.set("base", -1.0f);
+            activationParams.set("scale", 0.3f);
+            activationParams.set("shift", 0.6f);
+        }
     }
 
     static void makeDefaultTestEltwiseLayer(LayerParams& eltwiseParams, const std::string& op, bool withCoefficients)
@@ -2178,7 +2184,7 @@ public:
     static testing::internal::ParamGenerator<std::string> activationLayersList()
     {
         // TODO: automate list generation
-        return Values("ReLU", "ReLU6", "ChannelsPReLU", "TanH", "Swish", "Mish", "Sigmoid", "ELU", "AbsVal", "BNLL", "Power");
+        return Values("ReLU", "ReLU6", "ChannelsPReLU", "TanH", "Swish", "Mish", "Sigmoid", "ELU", "AbsVal", "BNLL", "Power", "Exp");
     }
 
     static testing::internal::ParamGenerator<tuple<Backend, Target> > dnnBackendsAndTargetsForFusionTests()
index 9a1f28c..eb63aa0 100644 (file)
@@ -307,6 +307,13 @@ TEST_P(Test_ONNX_layers, Power)
     testONNXModels("pow2", npy, 0, 0, false, false);
 }
 
+TEST_P(Test_ONNX_layers, Exp)
+{
+    if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019)
+        applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER);
+    testONNXModels("exp");
+}
+
 TEST_P(Test_ONNX_layers, Concatenation)
 {
     if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019)