Merge pull request #9139 from Cartucho:improve_match_template_py
[platform/upstream/opencv.git] / modules / dnn / src / layers / max_unpooling_layer.cpp
1 // This file is part of OpenCV project.
2 // It is subject to the license terms in the LICENSE file found in the top-level directory
3 // of this distribution and at http://opencv.org/license.html.
4
5 // Copyright (C) 2016, Intel Corporation, all rights reserved.
6 // Third party copyrights are property of their respective owners.
7
8 /*
9 Implementation of Batch Normalization layer.
10 */
11
12 #include "../precomp.hpp"
13 #include "layers_common.hpp"
14 #include "op_halide.hpp"
15 #include <opencv2/dnn/shape_utils.hpp>
16
17 namespace cv
18 {
19 namespace dnn
20 {
21
22 class MaxUnpoolLayerImpl : public MaxUnpoolLayer
23 {
24 public:
25     MaxUnpoolLayerImpl(const LayerParams& params)
26     {
27         setParamsFrom(params);
28         poolKernel = Size(params.get<int>("pool_k_w"), params.get<int>("pool_k_h"));
29         poolPad = Size(params.get<int>("pool_pad_w"), params.get<int>("pool_pad_h"));
30         poolStride = Size(params.get<int>("pool_stride_w"), params.get<int>("pool_stride_h"));
31     }
32
33     virtual bool supportBackend(int backendId)
34     {
35         return backendId == DNN_BACKEND_DEFAULT ||
36                backendId == DNN_BACKEND_HALIDE && haveHalide() &&
37                !poolPad.width && !poolPad.height;
38     }
39
40     bool getMemoryShapes(const std::vector<MatShape> &inputs,
41                          const int requiredOutputs,
42                          std::vector<MatShape> &outputs,
43                          std::vector<MatShape> &internals) const
44     {
45         CV_Assert(inputs.size() == 2);
46         CV_Assert(total(inputs[0]) == total(inputs[1]));
47
48         MatShape outShape = inputs[0];
49         outShape[2] = (outShape[2] - 1) * poolStride.height + poolKernel.height - 2 * poolPad.height;
50         outShape[3] = (outShape[3] - 1) * poolStride.width + poolKernel.width - 2 * poolPad.width;
51
52         outputs.clear();
53         outputs.push_back(outShape);
54
55         return false;
56     }
57
58     void forward(std::vector<Mat*> &inputs, std::vector<Mat> &outputs, std::vector<Mat> &internals)
59     {
60         CV_TRACE_FUNCTION();
61         CV_TRACE_ARG_VALUE(name, "name", name.c_str());
62
63         CV_Assert(inputs.size() == 2);
64         Mat& input = *inputs[0];
65         Mat& indices = *inputs[1];
66
67         CV_Assert(input.total() == indices.total());
68         CV_Assert(input.size[0] == 1);
69         CV_Assert(input.isContinuous());
70
71         for(int i_n = 0; i_n < outputs.size(); i_n++)
72         {
73             Mat& outBlob = outputs[i_n];
74             outBlob.setTo(0);
75             CV_Assert(input.size[1] == outBlob.size[1]);
76             int outPlaneTotal = outBlob.size[2]*outBlob.size[3];
77
78             for (int i_c = 0; i_c < input.size[1]; i_c++)
79             {
80                 Mat outPlane = getPlane(outBlob, 0, i_c);
81                 int wh_area = input.size[2]*input.size[3];
82                 const float* inptr = input.ptr<float>(0, i_c);
83                 const float* idxptr = indices.ptr<float>(0, i_c);
84                 float* outptr = outPlane.ptr<float>();
85
86                 for(int i_wh = 0; i_wh < wh_area; i_wh++)
87                 {
88                     int index = idxptr[i_wh];
89                     CV_Assert(0 <= index && index < outPlaneTotal);
90                     outptr[index] = inptr[i_wh];
91                 }
92             }
93         }
94     }
95
96     virtual Ptr<BackendNode> initHalide(const std::vector<Ptr<BackendWrapper> > &input)
97     {
98 #ifdef HAVE_HALIDE
99         // Meaningless operation if false because if kernel > stride
100         // it is not deterministic and if kernel < stride we just
101         // skip a part of input data (you'd better change your model).
102         if (poolKernel.width != poolStride.width ||
103             poolKernel.height != poolStride.height)
104             CV_Error(cv::Error::StsNotImplemented,
105                      "Halide backend for maximum unpooling "
106                      "is not support cases when kernel != stride");
107
108         Halide::Var x("x"), y("y"), c("c"), n("n");
109         Halide::Func top = (name.empty() ? Halide::Func() : Halide::Func(name));
110         Halide::Buffer<float> inputBuffer = halideBuffer(input[0]);
111         Halide::Buffer<float> indices = halideBuffer(input[1]);
112
113         Halide::Expr pooledX = x / poolKernel.width;
114         Halide::Expr pooledY = y / poolKernel.height;
115
116         const int outW = inputBuffer.width() * poolKernel.width;
117         top(x, y, c, n) = select(y * outW + x == indices(pooledX, pooledY, c, n),
118                                  inputBuffer(pooledX, pooledY, c, n), 0.0f);
119         return Ptr<BackendNode>(new HalideBackendNode(top));
120 #endif  // HAVE_HALIDE
121         return Ptr<BackendNode>();
122     }
123 };
124
125 Ptr<MaxUnpoolLayer> MaxUnpoolLayer::create(const LayerParams& params)
126 {
127     return Ptr<MaxUnpoolLayer>(new MaxUnpoolLayerImpl(params));
128 }
129
130 }
131 }