4486a0f6de07ce7bbad61f5d8cfb32c8465e5d3e
[platform/upstream/opencv.git] / modules / dnn / src / layers / scale_layer.cpp
1 // This file is part of OpenCV project.
2 // It is subject to the license terms in the LICENSE file found in the top-level directory
3 // of this distribution and at http://opencv.org/license.html.
4
5 // Copyright (C) 2016, Intel Corporation, all rights reserved.
6 // Third party copyrights are property of their respective owners.
7
8 /*
9 Implementation of Scale layer.
10 */
11
12 #include "../precomp.hpp"
13 #include "layers_common.hpp"
14 #include "../op_halide.hpp"
15 #include "../op_inf_engine.hpp"
16 #include <opencv2/dnn/shape_utils.hpp>
17
18 namespace cv
19 {
20 namespace dnn
21 {
22
23 class ScaleLayerImpl CV_FINAL : public ScaleLayer
24 {
25 public:
26     ScaleLayerImpl(const LayerParams& params)
27     {
28         setParamsFrom(params);
29         hasBias = params.get<bool>("bias_term", false);
30         axis = params.get<int>("axis", 1);
31         hasWeights = false;
32     }
33
34     bool getMemoryShapes(const std::vector<MatShape> &inputs,
35                          const int requiredOutputs,
36                          std::vector<MatShape> &outputs,
37                          std::vector<MatShape> &internals) const CV_OVERRIDE
38     {
39         outputs.assign(1, inputs[0]);
40         return true;
41     }
42
43     virtual void finalize(InputArrayOfArrays inputs_arr, OutputArrayOfArrays) CV_OVERRIDE
44     {
45         std::vector<Mat> inputs;
46         inputs_arr.getMatVector(inputs);
47         hasWeights = blobs.size() == 2 || (blobs.size() == 1 && !hasBias);
48         CV_Assert((inputs.size() == 2 && blobs.empty()) || blobs.size() == (int)hasWeights + (int)hasBias);
49     }
50
51     virtual bool supportBackend(int backendId) CV_OVERRIDE
52     {
53         return backendId == DNN_BACKEND_OPENCV || backendId == DNN_BACKEND_HALIDE ||
54                (backendId == DNN_BACKEND_INFERENCE_ENGINE && axis == 1);
55     }
56
57     void forward(InputArrayOfArrays inputs_arr, OutputArrayOfArrays outputs_arr, OutputArrayOfArrays internals_arr) CV_OVERRIDE
58     {
59         CV_TRACE_FUNCTION();
60         CV_TRACE_ARG_VALUE(name, "name", name.c_str());
61
62         if (inputs_arr.depth() == CV_16S)
63         {
64             forward_fallback(inputs_arr, outputs_arr, internals_arr);
65             return;
66         }
67
68         std::vector<Mat> inputs, outputs;
69         inputs_arr.getMatVector(inputs);
70         outputs_arr.getMatVector(outputs);
71
72         CV_Assert_N(outputs.size() == 1, !blobs.empty() || inputs.size() == 2);
73
74         Mat &inpBlob = inputs[0];
75         Mat &outBlob = outputs[0];
76         // There is a mode when we multiply a first blob by a second one
77         // instead of trainable weights.
78         Mat weights = blobs.empty() ? inputs[1] : (hasWeights ? blobs[0] : Mat());
79         Mat bias = hasBias ? blobs.back().reshape(1, 1) : Mat();
80         if (!weights.empty())
81             weights = weights.reshape(1, 1);
82         MatShape inpShape = shape(inpBlob);
83         const int numWeights = !weights.empty() ? weights.total() : bias.total();
84         CV_Assert(numWeights != 0);
85         if (hasWeights && hasBias)
86             CV_CheckEQ(weights.total(), bias.total(), "Incompatible weights/bias blobs");
87
88         int endAxis;
89         for (endAxis = axis + 1; endAxis <= inpBlob.dims; ++endAxis)
90         {
91             if (total(inpShape, axis, endAxis) == numWeights)
92                 break;
93         }
94         CV_Assert(total(inpShape, axis, endAxis) == numWeights);
95         CV_Assert(!hasBias || numWeights == bias.total());
96         CV_CheckTypeEQ(inpBlob.type(), CV_32FC1, ""); CV_CheckTypeEQ(outBlob.type(), CV_32FC1, "");
97
98         int numSlices = total(inpShape, 0, axis);
99         float* inpData = (float*)inpBlob.data;
100         float* outData = (float*)outBlob.data;
101
102         if (endAxis != inpBlob.dims)
103         {
104             float* weightsData = !weights.empty() ? (float*)weights.data : 0;
105             float* biasesData = hasBias ? (float*)bias.data : 0;
106             int spatialSize = total(inpShape, endAxis);  // spatialSize != 1
107             for (int i = 0; i < numSlices; ++i)
108             {
109                 for (int j = 0; j < numWeights; ++j)
110                 {
111                     float w = weightsData ? weightsData[j] : 1;
112                     float b = biasesData ? biasesData[j] : 0;
113                     Mat inpSlice(1, spatialSize, CV_32F, inpData);
114                     Mat outSlice(1, spatialSize, CV_32F, outData);
115                     inpSlice.convertTo(outSlice, CV_32F, w, b);
116                     inpData += spatialSize;
117                     outData += spatialSize;
118                 }
119             }
120         }
121         else
122         {
123             for (int i = 0; i < numSlices; ++i)
124             {
125                 Mat inpSlice(1, numWeights, CV_32F, inpData);
126                 Mat outSlice(1, numWeights, CV_32F, outData);
127                 if (!weights.empty())
128                 {
129                     multiply(inpSlice, weights, outSlice);
130                     if (hasBias)
131                         add(outSlice, bias, outSlice);
132                 }
133                 else if (hasBias)
134                     add(inpSlice, bias, outSlice);
135                 inpData += numWeights;
136                 outData += numWeights;
137             }
138         }
139     }
140
141     virtual Ptr<BackendNode> tryAttach(const Ptr<BackendNode>& node) CV_OVERRIDE
142     {
143         switch (node->backendId)
144         {
145             case DNN_BACKEND_HALIDE:
146             {
147 #ifdef HAVE_HALIDE
148                 auto base = node.dynamicCast<HalideBackendNode>();
149                 Halide::Func& input = base->funcs.back();
150                 Halide::Var x("x"), y("y"), c("c"), n("n");
151                 Halide::Func top = attachHalide(input(x, y, c, n));
152                 return Ptr<BackendNode>(new HalideBackendNode(base, top));
153 #endif  // HAVE_HALIDE
154                 break;
155             }
156         }
157         return Ptr<BackendNode>();
158     }
159
160     virtual Ptr<BackendNode> initHalide(const std::vector<Ptr<BackendWrapper> > &inputs) CV_OVERRIDE
161     {
162 #ifdef HAVE_HALIDE
163         Halide::Buffer<float> input = halideBuffer(inputs[0]);
164         Halide::Var x("x"), y("y"), c("c"), n("n");
165         Halide::Func top = attachHalide(input(x, y, c, n));
166         return Ptr<BackendNode>(new HalideBackendNode(top));
167 #endif  // HAVE_HALIDE
168         return Ptr<BackendNode>();
169     }
170
171 #ifdef HAVE_HALIDE
172     // attachHalide can work both with Halide::Buffer and Halide::Func. In the
173     // second case it will be a fusion.
174     Halide::Func attachHalide(const Halide::Expr& input)
175     {
176         Halide::Func top = (name.empty() ? Halide::Func() : Halide::Func(name));
177         Halide::Var x("x"), y("y"), c("c"), n("n");
178
179         const int numChannels = blobs[0].total();
180
181         Halide::Expr topExpr = input;
182         if (hasWeights)
183         {
184             auto weights = wrapToHalideBuffer(blobs[0], {numChannels});
185             topExpr *= weights(c);
186         }
187         if (hasBias)
188         {
189             auto bias = wrapToHalideBuffer(blobs.back(), {numChannels});
190             topExpr += bias(c);
191         }
192         top(x, y, c, n) = topExpr;
193         return top;
194     }
195 #endif  // HAVE_HALIDE
196
197 #ifdef HAVE_INF_ENGINE
198     virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> >&) CV_OVERRIDE
199     {
200         InferenceEngine::Builder::Layer l = InferenceEngine::Builder::ScaleShiftLayer(name);
201
202         CV_Assert(!blobs.empty());
203         const size_t numChannels = blobs[0].total();
204         if (hasWeights)
205         {
206             addConstantData("weights", wrapToInfEngineBlob(blobs[0], {numChannels}, InferenceEngine::Layout::C), l);
207         }
208         else
209         {
210             auto weights = InferenceEngine::make_shared_blob<float>({
211                                InferenceEngine::Precision::FP32, {(size_t)numChannels},
212                                InferenceEngine::Layout::C
213                            });
214             weights->allocate();
215             float* buf = weights->buffer().as<float*>();
216             std::fill(buf, buf + numChannels, 1);
217             addConstantData("weights", weights, l);
218         }
219         if (hasBias)
220             addConstantData("biases", wrapToInfEngineBlob(blobs.back(), {numChannels}, InferenceEngine::Layout::C), l);
221         return Ptr<BackendNode>(new InfEngineBackendNode(l));
222     }
223 #endif  // HAVE_INF_ENGINE
224
225     void getScaleShift(Mat& scale, Mat& shift) const CV_OVERRIDE
226     {
227         scale = hasWeights ? blobs[0] : Mat();
228         shift = hasBias ? blobs.back() : Mat();
229     }
230
231     virtual int64 getFLOPS(const std::vector<MatShape> &inputs,
232                            const std::vector<MatShape> &outputs) const CV_OVERRIDE
233     {
234         CV_UNUSED(outputs); // suppress unused variable warning
235         long flops = 0;
236         for(int i = 0; i < inputs.size(); i++)
237         {
238             flops += 2*total(inputs[i]);
239         }
240         return flops;
241     }
242
243 private:
244     bool hasWeights;
245 };
246
247
248 Ptr<ScaleLayer> ScaleLayer::create(const LayerParams& params)
249 {
250     return Ptr<ScaleLayer>(new ScaleLayerImpl(params));
251 }
252
253 Ptr<Layer> ShiftLayer::create(const LayerParams& params)
254 {
255     LayerParams scaleParams;
256     scaleParams.name = params.name;
257     scaleParams.type = "Scale";
258     scaleParams.blobs = params.blobs;
259     scaleParams.set("bias_term", true);
260     scaleParams.set("axis", 0);
261     return Ptr<ScaleLayer>(new ScaleLayerImpl(scaleParams));
262 }
263
264 }  // namespace dnn
265 }  // namespace cv