1 /*M///////////////////////////////////////////////////////////////////////////////////////
3 // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
5 // By downloading, copying, installing or using the software you agree to this license.
6 // If you do not agree to this license, do not download, install,
7 // copy or use the software.
11 // For Open Source Computer Vision Library
13 // Copyright (C) 2013, OpenCV Foundation, all rights reserved.
14 // Copyright (C) 2017, Intel Corporation, all rights reserved.
15 // Third party copyrights are property of their respective owners.
17 // Redistribution and use in source and binary forms, with or without modification,
18 // are permitted provided that the following conditions are met:
20 // * Redistribution's of source code must retain the above copyright notice,
21 // this list of conditions and the following disclaimer.
23 // * Redistribution's in binary form must reproduce the above copyright notice,
24 // this list of conditions and the following disclaimer in the documentation
25 // and/or other materials provided with the distribution.
27 // * The name of the copyright holders may not be used to endorse or promote products
28 // derived from this software without specific prior written permission.
30 // This software is provided by the copyright holders and contributors "as is" and
31 // any express or implied warranties, including, but not limited to, the implied
32 // warranties of merchantability and fitness for a particular purpose are disclaimed.
33 // In no event shall the Intel Corporation or contributors be liable for any direct,
34 // indirect, incidental, special, exemplary, or consequential damages
35 // (including, but not limited to, procurement of substitute goods or services;
36 // loss of use, data, or profits; or business interruption) however caused
37 // and on any theory of liability, whether in contract, strict liability,
38 // or tort (including negligence or otherwise) arising in any way out of
39 // the use of this software, even if advised of the possibility of such damage.
43 #include "../precomp.hpp"
44 #include "layers_common.hpp"
45 #include "../op_inf_engine.hpp"
47 namespace cv { namespace dnn {
49 class NormalizeBBoxLayerImpl CV_FINAL : public NormalizeBBoxLayer
52 NormalizeBBoxLayerImpl(const LayerParams& params)
54 setParamsFrom(params);
55 pnorm = params.get<float>("p", 2);
56 epsilon = params.get<float>("eps", 1e-10f);
57 acrossSpatial = params.get<bool>("across_spatial", true);
58 startAxis = params.get<int>("start_axis", 1);
59 CV_Assert(!params.has("across_spatial") || !params.has("end_axis"));
60 endAxis = params.get<int>("end_axis", acrossSpatial ? -1 : startAxis);
64 virtual bool supportBackend(int backendId) CV_OVERRIDE
66 if (backendId == DNN_BACKEND_INFERENCE_ENGINE)
71 return preferableTarget == DNN_TARGET_MYRIAD ? !acrossSpatial : startAxis == 1;
73 return backendId == DNN_BACKEND_OPENCV;
76 bool getMemoryShapes(const std::vector<MatShape> &inputs,
77 const int requiredOutputs,
78 std::vector<MatShape> &outputs,
79 std::vector<MatShape> &internals) const CV_OVERRIDE
81 CV_Assert(inputs.size() == 1);
82 Layer::getMemoryShapes(inputs, requiredOutputs, outputs, internals);
83 internals.resize(1, inputs[0]);
84 internals[0][0] = 1; // Batch size.
88 void finalize(InputArrayOfArrays inputs_arr, OutputArrayOfArrays) CV_OVERRIDE
90 std::vector<Mat> inputs;
91 inputs_arr.getMatVector(inputs);
92 CV_Assert(inputs.size() == 1);
93 endAxis = endAxis == -1 ? (inputs[0].dims - 1) : endAxis;
94 startAxis = startAxis == -1 ? (inputs[0].dims - 1) : startAxis;
95 acrossSpatial = (startAxis == 1 && endAxis == inputs[0].dims - 1);
99 bool forward_ocl(InputArrayOfArrays inputs_, OutputArrayOfArrays outputs_, OutputArrayOfArrays internals_)
101 std::vector<UMat> inputs;
102 std::vector<UMat> outputs;
103 std::vector<UMat> internals;
105 if (inputs_.depth() == CV_16S)
108 inputs_.getUMatVector(inputs);
109 outputs_.getUMatVector(outputs);
110 internals_.getUMatVector(internals);
112 CV_Assert(inputs.size() == 1 && outputs.size() == 1);
113 CV_Assert(inputs[0].total() == outputs[0].total());
115 const UMat& inp0 = inputs[0];
116 UMat& buffer = internals[0];
117 startAxis = clamp(startAxis, inp0.dims);
118 endAxis = clamp(endAxis, inp0.dims);
120 size_t num = total(shape(inp0.size), 0, startAxis);
121 size_t numPlanes = total(shape(inp0.size), startAxis, endAxis + 1);
122 size_t planeSize = inp0.total() / (num * numPlanes);
123 MatShape s = shape(1, inputs[0].total());
124 UMat inp = inputs[0].reshape(1, s.size(), &s[0]).reshape(1, num);
125 UMat out = outputs[0].reshape(1, s.size(), &s[0]).reshape(1, num);
126 for (size_t i = 0; i < num; ++i)
128 s = shape(numPlanes, planeSize);
129 UMat src = inp.row(i).reshape(1, s.size(), &s[0]);
130 UMat dst = out.row(i).reshape(1, s.size(), &s[0]);
133 absdiff(src, cv::Scalar::all(0), abs_mat);
134 pow(abs_mat, pnorm, buffer);
138 // add eps to avoid overflow
139 float absSum = sum(buffer)[0] + epsilon;
140 float norm = pow(absSum, 1.0f / pnorm);
141 multiply(src, 1.0f / norm, dst);
146 reduce(buffer, norm, 0, REDUCE_SUM);
149 // compute inverted norm to call multiply instead divide
150 cv::pow(norm, -1.0f / pnorm, norm);
152 repeat(norm, numPlanes, 1, buffer);
153 multiply(src, buffer, dst);
159 Mat scale = blobs[0];
160 if (scale.total() == 1)
163 multiply(dst, scale.at<float>(0, 0), dst);
167 // _scale: _channels x 1
168 CV_Assert(scale.total() == numPlanes);
169 repeat(scale, 1, dst.cols, buffer);
170 multiply(dst, buffer, dst);
178 void forward(InputArrayOfArrays inputs_arr, OutputArrayOfArrays outputs_arr, OutputArrayOfArrays internals_arr) CV_OVERRIDE
181 CV_TRACE_ARG_VALUE(name, "name", name.c_str());
183 CV_OCL_RUN(IS_DNN_OPENCL_TARGET(preferableTarget),
184 forward_ocl(inputs_arr, outputs_arr, internals_arr))
186 if (inputs_arr.depth() == CV_16S)
188 forward_fallback(inputs_arr, outputs_arr, internals_arr);
192 std::vector<Mat> inputs, outputs, internals;
193 inputs_arr.getMatVector(inputs);
194 outputs_arr.getMatVector(outputs);
195 internals_arr.getMatVector(internals);
197 CV_Assert(inputs.size() == 1 && outputs.size() == 1);
198 CV_Assert(inputs[0].total() == outputs[0].total());
200 const Mat& inp0 = inputs[0];
201 Mat& buffer = internals[0];
202 startAxis = clamp(startAxis, inp0.dims);
203 endAxis = clamp(endAxis, inp0.dims);
205 const float* inpData = inp0.ptr<float>();
206 float* outData = outputs[0].ptr<float>();
208 size_t num = total(shape(inp0.size), 0, startAxis);
209 size_t numPlanes = total(shape(inp0.size), startAxis, endAxis + 1);
210 CV_Assert(num * numPlanes != 0);
211 size_t planeSize = inp0.total() / (num * numPlanes);
212 for (size_t n = 0; n < num; ++n)
214 Mat src = Mat(numPlanes, planeSize, CV_32F, (void*)inpData);
215 Mat dst = Mat(numPlanes, planeSize, CV_32F, (void*)outData);
216 cv::pow(abs(src), pnorm, buffer);
220 // add eps to avoid overflow
221 float absSum = sum(buffer)[0] + epsilon;
222 float norm = pow(absSum, 1.0f / pnorm);
223 multiply(src, 1.0f / norm, dst);
228 reduce(buffer, norm, 0, REDUCE_SUM);
231 // compute inverted norm to call multiply instead divide
232 cv::pow(norm, -1.0f / pnorm, norm);
234 repeat(norm, numPlanes, 1, buffer);
235 multiply(src, buffer, dst);
241 Mat scale = blobs[0];
242 if (scale.total() == 1)
245 dst *= scale.at<float>(0, 0);
249 // _scale: _channels x 1
250 CV_Assert(scale.total() == numPlanes);
251 repeat(scale, 1, dst.cols, buffer);
252 multiply(dst, buffer, dst);
255 inpData += numPlanes * planeSize;
256 outData += numPlanes * planeSize;
260 #ifdef HAVE_INF_ENGINE
261 virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> >& inputs) CV_OVERRIDE
263 InferenceEngine::DataPtr input = infEngineDataNode(inputs[0]);
264 std::vector<size_t> dims = input->getDims();
265 if (dims.size() == 4)
267 InferenceEngine::Builder::NormalizeLayer ieLayer(name);
269 ieLayer.setChannelShared(false);
270 ieLayer.setAcrossMaps(acrossSpatial);
271 ieLayer.setEpsilon(epsilon);
273 InferenceEngine::Builder::Layer l = ieLayer;
274 const int numChannels = dims[1];
275 InferenceEngine::Blob::Ptr weights;
278 weights = InferenceEngine::make_shared_blob<float>({
279 InferenceEngine::Precision::FP32,
280 {(size_t)numChannels}, InferenceEngine::Layout::C
284 Mat weightsMat = infEngineBlobToMat(weights).reshape(1, numChannels);
285 Mat(numChannels, 1, CV_32F, Scalar(1)).copyTo(weightsMat);
286 l.getParameters()["channel_shared"] = false;
290 CV_Assert(numChannels == blobs[0].total());
291 weights = wrapToInfEngineBlob(blobs[0], {(size_t)numChannels}, InferenceEngine::Layout::C);
292 l.getParameters()["channel_shared"] = blobs[0].total() == 1;
294 addConstantData("weights", weights, l);
295 l.getParameters()["across_spatial"] = acrossSpatial;
296 return Ptr<BackendNode>(new InfEngineBackendNode(l));
300 InferenceEngine::Builder::GRNLayer ieLayer(name);
301 ieLayer.setBeta(epsilon);
303 InferenceEngine::Builder::Layer l = ieLayer;
304 l.getParameters()["bias"] = epsilon;
306 return Ptr<BackendNode>(new InfEngineBackendNode(l));
309 #endif // HAVE_INF_ENGINE
312 int startAxis, endAxis;
316 Ptr<NormalizeBBoxLayer> NormalizeBBoxLayer::create(const LayerParams ¶ms)
318 return Ptr<NormalizeBBoxLayer>(new NormalizeBBoxLayerImpl(params));