Merge pull request #10057 from LaurentBerger:ParaCalcError
[platform/upstream/opencv.git] / modules / dnn / src / layers / max_unpooling_layer.cpp
1 // This file is part of OpenCV project.
2 // It is subject to the license terms in the LICENSE file found in the top-level directory
3 // of this distribution and at http://opencv.org/license.html.
4
5 // Copyright (C) 2016, Intel Corporation, all rights reserved.
6 // Third party copyrights are property of their respective owners.
7
8 /*
9 Implementation of Batch Normalization layer.
10 */
11
12 #include "../precomp.hpp"
13 #include "layers_common.hpp"
14 #include "op_halide.hpp"
15 #include <opencv2/dnn/shape_utils.hpp>
16
17 namespace cv
18 {
19 namespace dnn
20 {
21
22 class MaxUnpoolLayerImpl : public MaxUnpoolLayer
23 {
24 public:
25     MaxUnpoolLayerImpl(const LayerParams& params)
26     {
27         setParamsFrom(params);
28         poolKernel = Size(params.get<int>("pool_k_w"), params.get<int>("pool_k_h"));
29         poolPad = Size(params.get<int>("pool_pad_w"), params.get<int>("pool_pad_h"));
30         poolStride = Size(params.get<int>("pool_stride_w"), params.get<int>("pool_stride_h"));
31     }
32
33     virtual bool supportBackend(int backendId)
34     {
35         return backendId == DNN_BACKEND_DEFAULT ||
36                backendId == DNN_BACKEND_HALIDE && haveHalide() &&
37                !poolPad.width && !poolPad.height;
38     }
39
40     bool getMemoryShapes(const std::vector<MatShape> &inputs,
41                          const int requiredOutputs,
42                          std::vector<MatShape> &outputs,
43                          std::vector<MatShape> &internals) const
44     {
45         CV_Assert(inputs.size() == 2);
46         CV_Assert(total(inputs[0]) == total(inputs[1]));
47
48         MatShape outShape = inputs[0];
49         outShape[2] = (outShape[2] - 1) * poolStride.height + poolKernel.height - 2 * poolPad.height;
50         outShape[3] = (outShape[3] - 1) * poolStride.width + poolKernel.width - 2 * poolPad.width;
51
52         outputs.clear();
53         outputs.push_back(outShape);
54
55         return false;
56     }
57
58     void forward(InputArrayOfArrays inputs_arr, OutputArrayOfArrays outputs_arr, OutputArrayOfArrays internals_arr)
59     {
60         CV_TRACE_FUNCTION();
61         CV_TRACE_ARG_VALUE(name, "name", name.c_str());
62
63         Layer::forward_fallback(inputs_arr, outputs_arr, internals_arr);
64     }
65
66     void forward(std::vector<Mat*> &inputs, std::vector<Mat> &outputs, std::vector<Mat> &internals)
67     {
68         CV_TRACE_FUNCTION();
69         CV_TRACE_ARG_VALUE(name, "name", name.c_str());
70
71         CV_Assert(inputs.size() == 2);
72         Mat& input = *inputs[0];
73         Mat& indices = *inputs[1];
74
75         CV_Assert(input.total() == indices.total());
76         CV_Assert(input.size[0] == 1);
77         CV_Assert(input.isContinuous());
78
79         for(int i_n = 0; i_n < outputs.size(); i_n++)
80         {
81             Mat& outBlob = outputs[i_n];
82             outBlob.setTo(0);
83             CV_Assert(input.size[1] == outBlob.size[1]);
84             int outPlaneTotal = outBlob.size[2]*outBlob.size[3];
85
86             for (int i_c = 0; i_c < input.size[1]; i_c++)
87             {
88                 Mat outPlane = getPlane(outBlob, 0, i_c);
89                 int wh_area = input.size[2]*input.size[3];
90                 const float* inptr = input.ptr<float>(0, i_c);
91                 const float* idxptr = indices.ptr<float>(0, i_c);
92                 float* outptr = outPlane.ptr<float>();
93
94                 for(int i_wh = 0; i_wh < wh_area; i_wh++)
95                 {
96                     int index = idxptr[i_wh];
97                     CV_Assert(0 <= index && index < outPlaneTotal);
98                     outptr[index] = inptr[i_wh];
99                 }
100             }
101         }
102     }
103
104     virtual Ptr<BackendNode> initHalide(const std::vector<Ptr<BackendWrapper> > &input)
105     {
106 #ifdef HAVE_HALIDE
107         // Meaningless operation if false because if kernel > stride
108         // it is not deterministic and if kernel < stride we just
109         // skip a part of input data (you'd better change your model).
110         if (poolKernel.width != poolStride.width ||
111             poolKernel.height != poolStride.height)
112             CV_Error(cv::Error::StsNotImplemented,
113                      "Halide backend for maximum unpooling "
114                      "is not support cases when kernel != stride");
115
116         Halide::Var x("x"), y("y"), c("c"), n("n");
117         Halide::Func top = (name.empty() ? Halide::Func() : Halide::Func(name));
118         Halide::Buffer<float> inputBuffer = halideBuffer(input[0]);
119         Halide::Buffer<float> indices = halideBuffer(input[1]);
120
121         Halide::Expr pooledX = x / poolKernel.width;
122         Halide::Expr pooledY = y / poolKernel.height;
123
124         const int outW = inputBuffer.width() * poolKernel.width;
125         top(x, y, c, n) = select(y * outW + x == indices(pooledX, pooledY, c, n),
126                                  inputBuffer(pooledX, pooledY, c, n), 0.0f);
127         return Ptr<BackendNode>(new HalideBackendNode(top));
128 #endif  // HAVE_HALIDE
129         return Ptr<BackendNode>();
130     }
131 };
132
133 Ptr<MaxUnpoolLayer> MaxUnpoolLayer::create(const LayerParams& params)
134 {
135     return Ptr<MaxUnpoolLayer>(new MaxUnpoolLayerImpl(params));
136 }
137
138 }
139 }