1 /*M///////////////////////////////////////////////////////////////////////////////////////
3 // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
5 // By downloading, copying, installing or using the software you agree to this license.
6 // If you do not agree to this license, do not download, install,
7 // copy or use the software.
11 // For Open Source Computer Vision Library
13 // Copyright (C) 2013, OpenCV Foundation, all rights reserved.
14 // Copyright (C) 2017, Intel Corporation, all rights reserved.
15 // Third party copyrights are property of their respective owners.
17 // Redistribution and use in source and binary forms, with or without modification,
18 // are permitted provided that the following conditions are met:
20 // * Redistribution's of source code must retain the above copyright notice,
21 // this list of conditions and the following disclaimer.
23 // * Redistribution's in binary form must reproduce the above copyright notice,
24 // this list of conditions and the following disclaimer in the documentation
25 // and/or other materials provided with the distribution.
27 // * The name of the copyright holders may not be used to endorse or promote products
28 // derived from this software without specific prior written permission.
30 // This software is provided by the copyright holders and contributors "as is" and
31 // any express or implied warranties, including, but not limited to, the implied
32 // warranties of merchantability and fitness for a particular purpose are disclaimed.
33 // In no event shall the Intel Corporation or contributors be liable for any direct,
34 // indirect, incidental, special, exemplary, or consequential damages
35 // (including, but not limited to, procurement of substitute goods or services;
36 // loss of use, data, or profits; or business interruption) however caused
37 // and on any theory of liability, whether in contract, strict liability,
38 // or tort (including negligence or otherwise) arising in any way out of
39 // the use of this software, even if advised of the possibility of such damage.
43 #include "../precomp.hpp"
44 #include "layers_common.hpp"
45 #include "../op_halide.hpp"
46 #include "../op_inf_engine.hpp"
47 #include "../op_vkcom.hpp"
48 #include "opencv2/core/hal/hal.hpp"
49 #include "opencv2/core/hal/intrin.hpp"
53 #include "opencl_kernels_dnn.hpp"
54 using namespace cv::dnn::ocl4dnn;
62 class BaseConvolutionLayerImpl : public ConvolutionLayer
65 bool fusedWeights, fusedBias;
66 std::vector<double> weightsMultipliers;
67 BaseConvolutionLayerImpl(const LayerParams ¶ms)
69 setParamsFrom(params);
70 getConvolutionKernelParams(params, kernel_size, pads_begin, pads_end, strides, dilations, padMode);
72 numOutput = params.get<int>("num_output");
73 int ngroups = params.get<int>("group", 1);
74 CV_Assert(numOutput % ngroups == 0);
76 if (kernel_size.size() == 2) {
77 kernel = Size(kernel_size[1], kernel_size[0]);
78 stride = Size(strides[1], strides[0]);
79 for (int i = 0; i < pads_begin.size(); i++) {
80 if (pads_begin[i] != pads_end[i])
81 CV_Error(Error::StsNotImplemented, "Unsupported asymmetric padding in convolution layer");
83 pad = Size(pads_begin[1], pads_begin[0]);
84 dilation = Size(dilations[1], dilations[0]);
86 adjust_pads.push_back(params.get<int>("adj_h", 0));
87 adjust_pads.push_back(params.get<int>("adj_w", 0));
89 adjustPad.height = adjust_pads[0];
90 adjustPad.width = adjust_pads[1];
91 CV_Assert(adjustPad.width < stride.width &&
92 adjustPad.height < stride.height);
98 virtual void finalize(InputArrayOfArrays inputs_arr, OutputArrayOfArrays outputs_arr) CV_OVERRIDE
100 std::vector<Mat> inputs, outputs;
101 inputs_arr.getMatVector(inputs);
102 outputs_arr.getMatVector(outputs);
104 CV_Assert(inputs.size() > 0);
106 CV_Assert(blobs.size() == 1 || blobs.size() == 2);
107 CV_Assert(inputs[0].dims == outputs[0].dims);
108 CV_Assert(blobs[0].dims == kernel_size.size() + 2);
109 for (int i = 0; i < kernel_size.size(); i++) {
110 CV_Assert(blobs[0].size[i + 2] == kernel_size[i]);
113 const Mat &input = inputs[0];
114 CV_Assert((input.dims == 4 || input.dims == 5) && (input.type() == CV_32F || input.type() == CV_16S));
115 for (size_t i = 0; i < inputs.size(); i++)
117 CV_Assert(inputs[i].type() == input.type());
118 CV_Assert((inputs[i].dims == 4 || inputs[i].dims == 5) && inputs[i].size[1] == input.size[1]);
119 for (int j = 0; j < inputs[i].dims; j++) {
120 CV_Assert(inputs[i].size[j] == input.size[j]);
124 std::vector<int> inpShape;
125 std::vector<int> outShape;
126 for (int i = 2; i < inputs[0].dims; i++) {
127 inpShape.push_back(inputs[0].size[i]);
128 outShape.push_back(outputs[0].size[i]);
130 getConvPoolPaddings(inpShape, kernel_size, strides, padMode, pads_begin, pads_end);
131 if (pads_begin.size() == 2) {
132 for (int i = 0; i < pads_begin.size(); i++) {
133 if (pads_begin[i] != pads_end[i])
134 CV_Error(Error::StsNotImplemented, "Unsupported asymmetric padding in convolution layer");
136 pad = Size(pads_begin[1], pads_begin[0]);
138 fusedWeights = false;
144 return blobs.size() >= 2;
147 virtual MatShape computeColRowShape(const MatShape &inpShape, const MatShape &outShape) const = 0;
150 return (kernel.height == 1 && kernel.width == 1) &&
151 (stride.height == 1 && stride.width == 1) &&
152 (dilation.height == 1 && dilation.width == 1);
155 virtual bool tryFuse(Ptr<Layer>& top) CV_OVERRIDE
158 top->getScaleShift(w, b);
159 if (!w.empty() || !b.empty())
162 fusedWeights = fusedWeights || !w.empty();
163 fusedBias = fusedBias || (hasBias() && !w.empty()) || !b.empty();
169 virtual void fuseWeights(const Mat& w_, const Mat& b_) = 0;
171 virtual void applyHalideScheduler(Ptr<BackendNode>& node,
172 const std::vector<Mat*> &inputs,
173 const std::vector<Mat> &outputs,
174 int targetId) const CV_OVERRIDE
177 if (targetId != DNN_TARGET_CPU)
179 Layer::applyHalideScheduler(node, inputs, outputs, targetId);
182 Halide::Var x("x"), y("y"), c("c"), n("n"), tile("tile"), yi("yi"), yo("yo"), co("co"), ci("ci");
183 Halide::Func& top = node.dynamicCast<HalideBackendNode>()->funcs[1];
184 Halide::Func& padded_input = node.dynamicCast<HalideBackendNode>()->funcs[0];
186 int outW, outH, outC, outN;
187 getCanonicalSize(outputs[0].size, &outW, &outH, &outC, &outN);
189 if (outW == 1 || outH <= 2)
192 if (is1x1() || outC <= 16)
198 .vectorize(x, outW >= 16 ? 16 : outW);
202 .split(c, co, ci, 16)
203 .fuse(yo, co, tile).fuse(n, tile, tile)
206 .vectorize(x, outW >= 16 ? 16 : outW);
207 padded_input.compute_at(top, yi);
208 #endif // HAVE_HALIDE
213 #define IS_POWER_LAYER(layer) \
214 (!layer.empty() && !layer->type.compare("Power"))
215 //TODO: simultaneously convolution and bias addition for cache optimization
216 class ConvolutionLayerImpl CV_FINAL : public BaseConvolutionLayerImpl
219 enum { VEC_ALIGN = 8, DFT_TYPE = CV_32F };
221 std::vector<float> biasvec;
222 std::vector<float> reluslope;
223 Ptr<ActivationLayer> activ;
226 Ptr<OCL4DNNConvSpatial<float> > convolutionOp;
227 std::vector<UMat> umat_blobs;
229 ocl4dnnFusedActiv_t activType;
232 ConvolutionLayerImpl(const LayerParams ¶ms) : BaseConvolutionLayerImpl(params)
236 activType = OCL4DNN_CONV_FUSED_ACTIV_NONE;
241 MatShape computeColRowShape(const MatShape &inpShape, const MatShape &outShape) const CV_OVERRIDE
243 Size out(outShape[3], outShape[2]);
244 int inpGroupCn = blobs[0].size[1];
245 int ksize = inpGroupCn * kernel.height * kernel.width;
246 return shape(out.area(), ksize);
249 virtual bool supportBackend(int backendId) CV_OVERRIDE
251 #ifdef HAVE_INF_ENGINE
252 if (backendId == DNN_BACKEND_INFERENCE_ENGINE)
254 if (kernel_size.size() == 3)
255 return preferableTarget == DNN_TARGET_CPU;
256 return INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R4) ||
257 (preferableTarget != DNN_TARGET_MYRIAD || dilation.width == dilation.height);
262 if (kernel_size.size() != 2)
264 return backendId == DNN_BACKEND_OPENCV ||
265 backendId == DNN_BACKEND_HALIDE ||
266 (backendId == DNN_BACKEND_VKCOM && haveVulkan());
270 bool getMemoryShapes(const std::vector<MatShape> &inputs,
271 const int requiredOutputs,
272 std::vector<MatShape> &outputs,
273 std::vector<MatShape> &internals) const CV_OVERRIDE
275 CV_Assert(blobs.size() != 0);
276 CV_Assert(!hasBias() || blobs[1].total() == (size_t)blobs[0].size[0]);
277 CV_Assert(inputs.size() == (size_t)1);
281 CV_Assert(inputs.size() != 0);
282 std::vector<int> inpShape(inputs[0].begin() + 2, inputs[0].end());
284 int outCn = blobs[0].size[0];
285 std::vector<int> outShape;
286 outShape.push_back(inputs[0][0]);
287 outShape.push_back(outCn);
289 int inpCn = inputs[0][1];
292 for (int i = 0; i < inpShape.size(); i++)
293 outShape.push_back((inpShape[i] + pads_begin[i] + pads_end[i] - dilations[i] * (kernel_size[i] - 1) - 1) / strides[i] + 1);
297 getConvPoolOutParams(inpShape, kernel_size, strides, padMode, dilations, outShape);
300 int ngroups = inpCn / blobs[0].size[1];
301 if (ngroups == 0 || ngroups * blobs[0].size[1] != inpCn)
302 CV_Error(Error::StsError, format("Number of input channels should "
303 "be multiple of %d but got %d", blobs[0].size[1], inpCn));
304 CV_Assert(ngroups > 0 && inpCn % ngroups == 0 && outCn % ngroups == 0);
306 outputs.resize(1, outShape);
311 virtual void finalize(InputArrayOfArrays inputs_arr, OutputArrayOfArrays outputs_arr) CV_OVERRIDE
313 BaseConvolutionLayerImpl::finalize(inputs_arr, outputs_arr);
315 CV_Assert(!blobs.empty());
316 const int outCn = blobs[0].size[0];
317 // prepare weightsMat where each row is aligned and has enough zero padding on the right to
318 // use vectorized (i.e. with intrinsics) loops without tail processing
319 Mat wm = blobs[0].reshape(1, outCn);
320 if( wm.step1() % VEC_ALIGN != 0 )
322 int newcols = (int)alignSize(wm.step1(), VEC_ALIGN);
323 Mat wm_buffer = Mat(outCn, newcols, wm.type());
324 Mat wm_padding = wm_buffer.colRange(wm.cols, newcols);
325 wm_padding.setTo(Scalar::all(0.));
326 Mat wm_aligned = wm_buffer.colRange(0, wm.cols);
327 wm.copyTo(wm_aligned);
331 weightsMultipliers.assign(outCn, 1.0);
333 Mat biasMat = hasBias() ? blobs[1].reshape(1, outCn) : Mat();
334 biasvec.resize(outCn+2);
335 if( biasMat.empty() )
337 for(int i = 0; i < outCn; i++ )
342 for(int i = 0; i < outCn; i++ )
343 biasvec[i] = biasMat.at<float>(i);
346 convolutionOp.release();
350 bool setActivation(const Ptr<ActivationLayer>& layer) CV_OVERRIDE
352 if (!activ.empty() && !layer.empty())
360 activType = OCL4DNN_CONV_FUSED_ACTIV_NONE;
362 if (IS_DNN_OPENCL_TARGET(preferableTarget))
364 Ptr<PowerLayer> activ_power = activ.dynamicCast<PowerLayer>();
365 if (!activ_power.empty())
367 if (activ_power->scale != 1.f || activ_power->shift != 0.f)
369 const int outCh = blobs[0].size[0];
370 fuseWeights(Mat(1, outCh, CV_32F, Scalar(activ_power->scale)),
371 Mat(1, outCh, CV_32F, Scalar(activ_power->shift)));
374 power = activ_power->power;
375 activType = OCL4DNN_CONV_FUSED_ACTIV_POWER;
377 Ptr<TanHLayer> activ_tanh = activ.dynamicCast<TanHLayer>();
378 if (!activ_tanh.empty())
380 activType = OCL4DNN_CONV_FUSED_ACTIV_TANH;
384 return !activ.empty();
387 void fuseWeights(const Mat& w_, const Mat& b_) CV_OVERRIDE
389 // Convolution weights have OIHW data layout. Parameters fusion in case of
390 // (conv(I) + b1 ) * w + b2
391 // means to replace convolution's weights to [w*conv(I)] and bias to [b1 * w + b2]
392 const int outCn = weightsMat.size[0];
393 Mat w = w_.total() == 1 ? Mat(1, outCn, CV_32F, Scalar(w_.at<float>(0))) : w_;
394 Mat b = b_.total() == 1 ? Mat(1, outCn, CV_32F, Scalar(b_.at<float>(0))) : b_;
395 CV_Assert_N(!weightsMat.empty(), biasvec.size() == outCn + 2,
396 w.empty() || outCn == w.total(), b.empty() || outCn == b.total());
400 // Keep origin weights unchanged.
401 if (weightsMat.data == blobs[0].data)
402 weightsMat = weightsMat.clone();
404 Mat originWeights = blobs[0].reshape(1, outCn);
405 for (int i = 0; i < outCn; ++i)
407 double wi = w.at<float>(i);
408 weightsMultipliers[i] *= wi;
409 cv::multiply(originWeights.row(i), weightsMultipliers[i], weightsMat.row(i));
416 for (int i = 0; i < outCn; ++i)
417 biasvec[i] += b.at<float>(i);
419 biasvec[outCn] = biasvec[outCn+1] = biasvec[outCn-1];
422 virtual Ptr<BackendNode> initVkCom(const std::vector<Ptr<BackendWrapper> > &inputs) CV_OVERRIDE
425 int out_channel = blobs[0].size[0];
426 bool has_bias = hasBias() || fusedBias;
427 int filter_size[2] = {kernel.height, kernel.width};
428 int pad_size[2] = {pad.height, pad.width};
429 int stride_size[2] = {stride.height, stride.width};
430 int dilation_size[2] = {dilation.height, dilation.width};
432 vkcom::Tensor input_tensor = VkComTensor(inputs[0]);
433 int in_channel = input_tensor.dimSize(1);
434 int group = in_channel / blobs[0].size[1];
436 // TODO: support group > 1
438 return Ptr<BackendNode>();
443 padding_mode = vkcom::kPaddingModeCaffe;
445 else if (padMode == "VALID")
447 padding_mode = vkcom::kPaddingModeValid;
449 else if (padMode == "SAME")
451 padding_mode = vkcom::kPaddingModeSame;
454 CV_Error(Error::StsError, "Unsupported padding mode " + padMode);
456 std::shared_ptr<vkcom::OpBase> op(new vkcom::OpConv(out_channel, has_bias,
457 filter_size, pad_size,
458 stride_size, dilation_size,
462 std::vector<Ptr<BackendWrapper> > blobsWrapper;
464 if (newWeightAndBias)
467 weightsMat.copyTo(wm); // to handle the case of isContinuous() == false
468 wm.reshape(1, blobs[0].dims, blobs[0].size);
469 blobsWrapper.push_back(Ptr<BackendWrapper>(new VkComBackendWrapper(wm)));
473 blobsWrapper.push_back(Ptr<BackendWrapper>(new VkComBackendWrapper(blobs[0])));
478 Mat biasesMat({out_channel}, CV_32F, &biasvec[0]);
479 blobsWrapper.push_back(Ptr<BackendWrapper>(new VkComBackendWrapper(biasesMat)));
482 return Ptr<BackendNode>(new VkComBackendNode(inputs, op, blobsWrapper));
483 #endif // HAVE_VULKAN
484 return Ptr<BackendNode>();
489 virtual Ptr<BackendNode> initHalide(const std::vector<Ptr<BackendWrapper> > &inputs) CV_OVERRIDE
492 Halide::Buffer<float> inputBuffer = halideBuffer(inputs[0]);
494 const int inpCn = inputBuffer.channels();
495 const int outCn = blobs[0].size[0];
496 const int inpGroupCn = blobs[0].size[1];
497 const int group = inpCn / inpGroupCn;
498 const int outGroupCn = outCn / group;
500 Halide::Buffer<float> weights = wrapToHalideBuffer(blobs[0]);
502 Halide::Var x("x"), y("y"), c("c"), n("n");
503 Halide::Func top = (name.empty() ? Halide::Func() : Halide::Func(name));
504 Halide::Func padded_input(name + "_constant_exterior");
505 if (pad.width || pad.height)
507 Halide::Func bounded =
508 Halide::BoundaryConditions::constant_exterior(inputBuffer, 0);
509 padded_input(x, y, c, n) = bounded(x, y, c, n);
513 padded_input(x, y, c, n) = inputBuffer(x, y, c, n);
516 Halide::RDom r(0, kernel.width, 0, kernel.height, 0, inpGroupCn);
517 Halide::Expr kx = x * stride.width - pad.width + r.x * dilation.width;
518 Halide::Expr ky = y * stride.height - pad.height + r.y * dilation.height;
519 Halide::Expr kc = r.z;
520 for (int i = 1; i < group; ++i)
522 kc = select(c < outGroupCn * i, kc, inpGroupCn * i + r.z);
524 Halide::Expr topExpr = sum(padded_input(kx, ky, kc, n) *
525 weights(r.x, r.y, r.z, c));
528 Halide::Buffer<float> bias = wrapToHalideBuffer(blobs[1], {outCn});
531 top(x, y, c, n) = topExpr;
532 return Ptr<BackendNode>(new HalideBackendNode({ padded_input, top }));
533 #endif // HAVE_HALIDE
534 return Ptr<BackendNode>();
537 virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> > &inputs) CV_OVERRIDE
539 #ifdef HAVE_INF_ENGINE
540 InferenceEngine::DataPtr input = infEngineDataNode(inputs[0]);
541 CV_Assert(input->dims.size() == 4 || input->dims.size() == 5);
543 const int inpCn = input->dims[input->dims.size() - 2]; // NOTE: input->dims are reversed (WHIO or WHDIO)
544 const int outCn = blobs[0].size[0];
545 const int inpGroupCn = blobs[0].size[1];
546 const int group = inpCn / inpGroupCn;
548 InferenceEngine::Layout layout = (input->dims.size() == 4) ? InferenceEngine::Layout::OIHW :
549 InferenceEngine::Layout::NCDHW;
551 auto ieWeights = wrapToInfEngineBlob(blobs[0], layout);
554 if (weightsMat.isContinuous())
556 Mat cvWeights = weightsMat.reshape(1, blobs[0].dims, blobs[0].size);
557 ieWeights = wrapToInfEngineBlob(cvWeights, layout);
561 ieWeights = InferenceEngine::make_shared_blob<float>(
562 InferenceEngine::Precision::FP32, layout,
564 ieWeights->allocate();
566 Mat newWeights = infEngineBlobToMat(ieWeights).reshape(1, outCn);
567 Mat cvWeights = weightsMat.colRange(0, newWeights.cols);
568 cvWeights.copyTo(newWeights);
571 InferenceEngine::Blob::Ptr ieBiases;
572 if (hasBias() || fusedBias)
574 Mat biasesMat({outCn}, CV_32F, &biasvec[0]);
575 ieBiases = wrapToInfEngineBlob(biasesMat, {(size_t)outCn}, InferenceEngine::Layout::C);
578 #if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5)
579 InferenceEngine::Builder::ConvolutionLayer ieLayer(name);
581 ieLayer.setKernel(kernel_size);
582 ieLayer.setStrides(strides);
583 ieLayer.setDilation(dilations);
584 ieLayer.setPaddingsBegin(pads_begin);
585 ieLayer.setPaddingsEnd(pads_end);
586 ieLayer.setGroup((size_t)group);
587 ieLayer.setOutDepth((size_t)outCn);
589 InferenceEngine::Builder::Layer l = ieLayer;
590 addConstantData("weights", ieWeights, l);
592 addConstantData("biases", ieBiases, l);
594 if (!padMode.empty())
595 l.getParameters()["auto_pad"] = padMode == "VALID" ? std::string("valid") : std::string("same_upper");
597 return Ptr<BackendNode>(new InfEngineBackendNode(l));
599 InferenceEngine::LayerParams lp;
601 lp.type = "Convolution";
602 lp.precision = InferenceEngine::Precision::FP32;
603 std::shared_ptr<InferenceEngine::ConvolutionLayer> ieLayer(new InferenceEngine::ConvolutionLayer(lp));
605 #if INF_ENGINE_VER_MAJOR_GT(INF_ENGINE_RELEASE_2018R3)
606 ieLayer->_kernel.insert(InferenceEngine::X_AXIS, kernel.width);
607 ieLayer->_kernel.insert(InferenceEngine::Y_AXIS, kernel.height);
608 ieLayer->_stride.insert(InferenceEngine::X_AXIS, stride.width);
609 ieLayer->_stride.insert(InferenceEngine::Y_AXIS, stride.height);
610 ieLayer->_padding.insert(InferenceEngine::X_AXIS, pad.width);
611 ieLayer->_padding.insert(InferenceEngine::Y_AXIS, pad.height);
612 ieLayer->_pads_end.insert(InferenceEngine::X_AXIS, pad.width);
613 ieLayer->_pads_end.insert(InferenceEngine::Y_AXIS, pad.height);
614 ieLayer->_dilation.insert(InferenceEngine::X_AXIS, dilation.width);
615 ieLayer->_dilation.insert(InferenceEngine::Y_AXIS, dilation.height);
616 ieLayer->params["output"] = format("%d", outCn);
617 ieLayer->params["kernel"] = format("%d,%d,%d,%d", outCn, inpGroupCn, kernel.height, kernel.width);
618 ieLayer->params["pads_begin"] = format("%d,%d", pad.height, pad.width);
619 ieLayer->params["pads_end"] = format("%d,%d", pad.height, pad.width);
620 ieLayer->params["strides"] = format("%d,%d", stride.height, stride.width);
621 ieLayer->params["dilations"] = format("%d,%d", dilation.height, dilation.width);
623 ieLayer->_kernel_x = kernel.width;
624 ieLayer->_kernel_y = kernel.height;
625 ieLayer->_stride_x = stride.width;
626 ieLayer->_stride_y = stride.height;
627 ieLayer->_padding_x = pad.width;
628 ieLayer->_padding_y = pad.height;
629 ieLayer->_dilation_x = dilation.width;
630 ieLayer->_dilation_y = dilation.height;
632 ieLayer->_out_depth = outCn;
633 ieLayer->_group = group;
635 ieLayer->_weights = ieWeights;
637 ieLayer->_biases = ieBiases;
638 return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer));
640 #endif // HAVE_INF_ENGINE
641 return Ptr<BackendNode>();
644 class ParallelConv : public cv::ParallelLoopBody
647 enum { BLK_SIZE = 32, BLK_SIZE_CN = 64 };
653 Size kernel_, pad_, stride_, dilation_;
654 int ngroups_, nstripes_;
655 std::vector<int> ofstab_;
656 const std::vector<float>* biasvec_;
657 const std::vector<float>* reluslope_;
658 const ActivationLayer* activ_;
665 : input_(0), weights_(0), output_(0), ngroups_(0), nstripes_(0),
666 biasvec_(0), reluslope_(0), activ_(0), is1x1_(false), useAVX(false), useAVX2(false), useAVX512(false)
669 static void run( const Mat& input, Mat& output, const Mat& weights,
670 const std::vector<float>& biasvec,
671 const std::vector<float>& reluslope,
672 Size kernel, Size pad, Size stride, Size dilation,
673 const ActivationLayer* activ, int ngroups, int nstripes )
676 input.dims == 4 && output.dims == 4,
677 input.size[0] == output.size[0],
678 weights.rows == output.size[1],
679 weights.cols == (input.size[1]/ngroups)*kernel.width*kernel.height,
680 input.type() == output.type(),
681 input.type() == weights.type(),
682 input.type() == CV_32FC1,
683 input.isContinuous(),
684 output.isContinuous(),
685 biasvec.size() == (size_t)output.size[1]+2);
689 p.weights_ = &weights;
691 for( int i = 0; i < 4; i++ ) p.outShape[i] = output.size[i];
692 p.outShape[1] /= ngroups;
693 p.kernel_ = kernel; p.pad_ = pad; p.stride_ = stride; p.dilation_ = dilation;
694 p.ngroups_ = ngroups;
695 p.nstripes_ = nstripes;
697 int inpCnAll = input.size[1], width = input.size[3], height = input.size[2];
698 int inpCn = inpCnAll / ngroups;
699 p.is1x1_ = kernel == Size(1,1) && pad == Size(0, 0);
700 p.useAVX = checkHardwareSupport(CPU_AVX);
701 p.useAVX2 = checkHardwareSupport(CPU_AVX2);
702 p.useAVX512 = CV_CPU_HAS_SUPPORT_AVX512_SKX;
704 int ncn = std::min(inpCn, (int)BLK_SIZE_CN);
705 p.ofstab_.resize(kernel.width*kernel.height*ncn);
706 int* ofstab = &p.ofstab_[0];
708 for( int k = 0; k < ncn; k++ )
709 for( int k_r = 0; k_r < kernel.height; k_r++ )
710 for( int k_c = 0; k_c < kernel.width; k_c++ )
711 ofstab[(k*kernel.height + k_r)*kernel.width + k_c] =
712 (k*height + k_r*dilation.height)*width + k_c*dilation.width;
714 p.biasvec_ = &biasvec;
715 p.reluslope_ = &reluslope;
716 p.activ_ = p.reluslope_->empty() ? activ : 0;
718 parallel_for_(Range(0, nstripes), p, nstripes);
721 virtual void operator ()(const Range &r0) const CV_OVERRIDE
723 const int valign = ConvolutionLayerImpl::VEC_ALIGN;
724 int ngroups = ngroups_, batchSize = input_->size[0]*ngroups;
725 int outW = output_->size[3], outH = output_->size[2], outCn = output_->size[1]/ngroups;
726 int width = input_->size[3], height = input_->size[2], inpCn = input_->size[1]/ngroups;
727 const int nstripes = nstripes_;
728 int kernel_w = kernel_.width, kernel_h = kernel_.height;
729 int pad_w = pad_.width, pad_h = pad_.height;
730 int stride_w = stride_.width, stride_h = stride_.height;
731 int dilation_w = dilation_.width, dilation_h = dilation_.height;
732 int karea = kernel_w*kernel_h;
734 size_t inpPlaneSize = width*height;
735 size_t outPlaneSize = outW*outH;
738 int stripesPerSample;
742 if( nstripes >= batchSize*2 )
744 stripesPerSample = nstripes/batchSize;
745 stripeSize = alignSize((outPlaneSize + stripesPerSample - 1)/stripesPerSample, valign);
746 stripeSize = std::min(stripeSize, outPlaneSize);
750 stripesPerSample = 1;
751 int samplesPerStripe = std::max((batchSize + nstripes - 1)/nstripes, 1);
752 r.start *= samplesPerStripe;
753 r.end *= samplesPerStripe;
754 stripeSize = outPlaneSize;
757 const float* data_inp0_ = input_->ptr<float>();
758 const int* ofstab = &ofstab_[0];
759 const float* wptr_orig_ = weights_->ptr<float>();
760 size_t wstep = weights_->step1();
761 const float* biasptr_ = &biasvec_->at(0);
762 const float* reluptr_ = reluslope_->empty() ? 0 : &reluslope_->at(0);
763 float* data_out0_ = output_->ptr<float>();
764 size_t rowbufsz = (size_t)karea*BLK_SIZE_CN*BLK_SIZE;
765 AutoBuffer<float> rowbuf0_(rowbufsz + valign);
766 float* rowbuf0 = alignPtr(rowbuf0_.data(), (int)(valign*sizeof(float)));
768 // we clear the buffer once; ultimately, it lets us to avoid
769 // tail processing after running the unrolled/vectorized loop.
770 // the main idea is to make sure that the tail (a.k.a. padding) of each row
771 // (i.e. the elements with indices between vsz=karea*ncn and vsz_a)
772 // does not contain NaNs or Infs. Because the padding in the weights
773 // matrix is explicitly initialized with 0's, we handle all other
774 // cases nicely, i.e. we can skip expliciting re-initialization
775 // of the padding - we just retain elements from the previous iteration
776 // of the loop over channels (cn0).
777 memset(rowbuf0, 0, rowbufsz*sizeof(rowbuf0[0]) );
779 for( int stripe = r.start; stripe < r.end; stripe++ )
781 int subsampleIdx = stripe/stripesPerSample;
782 if( subsampleIdx >= batchSize )
784 int stripeStart = (int)((stripe - subsampleIdx*stripesPerSample)*stripeSize);
785 int stripeEnd = (int)std::min(stripeStart + stripeSize, outPlaneSize);
786 const float* data_inp0 = data_inp0_ + subsampleIdx*inpPlaneSize*inpCn;
787 float* data_out0 = data_out0_ + subsampleIdx*outPlaneSize*outCn;
788 int startOutCn = (subsampleIdx % ngroups)*outCn;
789 const float* wptr_orig = wptr_orig_ + wstep*startOutCn;
790 const float* biasptr = biasptr_ + startOutCn;
792 for( int cn0 = 0; cn0 < inpCn; cn0 += BLK_SIZE_CN )
794 int cn1 = std::min(cn0 + BLK_SIZE_CN, inpCn);
795 int ncn = cn1 - cn0, vsz = karea*ncn;
796 int vsz_a = (int)alignSize(vsz, valign);
797 const float* wptr = wptr_orig + cn0*karea;
798 // we apply [Channels][P]ReLU (if any) during the final pass only.
799 const float* relu = cn1 == inpCn && reluptr_ ? reluptr_ + startOutCn : 0;
801 for( int ofs0 = stripeStart; ofs0 < stripeEnd; ofs0 += BLK_SIZE )
803 int ofs, ofs1 = std::min(ofs0 + BLK_SIZE, stripeEnd);
804 int out_i = ofs0 / outW;
805 int out_j = ofs0 - out_i * outW;
807 // do im2row for a part of input tensor
808 float* rowbuf = rowbuf0;
809 for( ofs = ofs0; ofs < ofs1; out_j = 0, ++out_i )
811 int delta = std::min(ofs1 - ofs, outW - out_j);
812 int out_j1 = out_j + delta;
813 int in_i = out_i * stride_h - pad_h;
814 int in_j = out_j * stride_w - pad_w;
815 const float* imgptr = data_inp0 + (cn0*height + in_i)*width + in_j;
818 // do im2row for a part of input tensor
821 for( ; out_j < out_j1; out_j++, rowbuf += vsz_a, imgptr += stride_w )
823 for( k = 0; k < vsz; k++ )
824 rowbuf[k] = imgptr[k*inpPlaneSize];
829 bool ok_i = 0 <= in_i && in_i < height - (kernel_h-1)*dilation_h;
830 int i0 = std::max(0, (-in_i + dilation_h-1)/dilation_h);
831 int i1 = std::min(kernel_h, (height - in_i + dilation_h-1)/dilation_h);
833 for( ; out_j < out_j1; out_j++, rowbuf += vsz_a, imgptr += stride_w, in_j += stride_w )
835 // this condition should be true for most of the tensor elements, i.e.
836 // most of the time the kernel aperture is inside the tensor X-Y plane.
837 if( ok_i && out_j + 2 <= out_j1 && 0 <= in_j && in_j + stride_w*2 <= width - (kernel_w-1)*dilation_w )
839 for( k = 0; k < vsz; k++ )
842 float v0 = imgptr[k1];
843 float v1 = imgptr[k1 + stride_w];
845 rowbuf[k+vsz_a] = v1;
854 int j0 = std::max(0, (-in_j + dilation_w-1)/dilation_w);
855 int j1 = std::min(kernel_w, (width - in_j + dilation_w-1)/dilation_w);
857 // here some non-continuous sub-row of the row will not be
858 // filled from the tensor; we need to make sure that the uncovered
859 // elements are explicitly set to 0's. the easiest way is to
860 // set all the elements to 0's before the loop.
861 memset(rowbuf, 0, vsz*sizeof(rowbuf[0]));
862 for( k = 0; k < ncn; k++ )
864 for( i = i0; i < i1; i++ )
866 for( j = j0; j < j1; j++ )
868 int imgofs = k*(width*height) + i*(dilation_h*width) + j*dilation_w;
869 rowbuf[(k*kernel_h + i)*kernel_w + j] = imgptr[imgofs];
878 // now compute dot product of the weights
879 // and im2row-transformed part of the tensor
880 int bsz = ofs1 - ofs0;
881 #if CV_TRY_AVX512_SKX
882 /* AVX512 convolution requires an alignment of 16, and ROI is only there for larger vector sizes */
884 opt_AVX512_SKX::fastConv(wptr, wstep, biasptr, rowbuf0, data_out0 + ofs0,
885 outShape, bsz, vsz, vsz_a, relu, cn0 == 0);
890 opt_AVX2::fastConv(wptr, wstep, biasptr, rowbuf0, data_out0 + ofs0,
891 outShape, bsz, vsz, vsz_a, relu, cn0 == 0);
896 opt_AVX::fastConv(wptr, wstep, biasptr, rowbuf0, data_out0 + ofs0,
897 outShape, bsz, vsz, vsz_a, relu, cn0 == 0);
900 for( int i = 0; i < outCn; i += 2 )
902 const float* wptr0 = wptr + i*wstep;
903 const float* wptr1 = wptr0 + wstep;
904 float* outptr0 = data_out0 + ofs0 + i*outPlaneSize;
905 float* outptr1 = outptr0 + outPlaneSize;
906 float bias0 = biasptr[i], bias1 = biasptr[i+1];
907 float r0 = 1.f, r1 = 1.f;
918 r0 = relu[i]; r1 = relu[i+1];
925 v_float32x4 vr0 = v_setall_f32(r0), vr1 = v_setall_f32(r1), z = v_setzero_f32();
927 for( ; j <= bsz - 4; j += 4 )
929 const float* rptr = rowbuf0 + j*vsz_a;
934 s0 = v_setall_f32(bias0);
935 s1 = v_setall_f32(bias1);
939 s0 = v_load(outptr0 + j);
940 s1 = v_load(outptr1 + j);
943 v_float32x4 vs00 = v_setzero_f32(), vs01 = v_setzero_f32(),
944 vs02 = v_setzero_f32(), vs03 = v_setzero_f32(),
945 vs10 = v_setzero_f32(), vs11 = v_setzero_f32(),
946 vs12 = v_setzero_f32(), vs13 = v_setzero_f32();
947 for( k = 0; k < vsz; k += 4, rptr += 4 )
949 v_float32x4 w0 = v_load_aligned(wptr0 + k), w1 = v_load_aligned(wptr1 + k);
950 v_float32x4 r0 = v_load_aligned(rptr), r1 = v_load_aligned(rptr + vsz_a),
951 r2 = v_load_aligned(rptr + vsz_a*2), r3 = v_load_aligned(rptr + vsz_a*3);
963 s0 += v_reduce_sum4(vs00, vs01, vs02, vs03);
964 s1 += v_reduce_sum4(vs10, vs11, vs12, vs13);
967 s0 = v_select(s0 > z, s0, s0*vr0);
968 s1 = v_select(s1 > z, s1, s1*vr1);
971 v_store(outptr0 + j, s0);
972 v_store(outptr1 + j, s1);
975 for( ; j < bsz; j++ )
977 const float* rptr = rowbuf0 + j*vsz_a;
991 for( k = 0; k < vsz; k++ )
999 s00 = s00 > 0.f ? s00 : s00*r0;
1000 s10 = s10 > 0.f ? s10 : s10*r1;
1011 activ_->forwardSlice(data_out0 + stripeStart, data_out0 + stripeStart,
1012 (int)(stripeEnd - stripeStart),
1013 outPlaneSize, startOutCn, startOutCn + outCn);
1019 bool forward_ocl(InputArrayOfArrays inps, OutputArrayOfArrays outs, OutputArrayOfArrays internals)
1021 std::vector<UMat> inputs;
1022 std::vector<UMat> outputs;
1024 bool use_half = (inps.depth() == CV_16S);
1025 inps.getUMatVector(inputs);
1026 outs.getUMatVector(outputs);
1028 CV_Assert(outputs.size() == 1);
1029 for (int i = 0; i < inputs.size(); ++i)
1030 CV_Assert(inputs[i].u != outputs[0].u);
1032 if (umat_blobs.empty())
1034 size_t n = blobs.size();
1035 umat_blobs.resize(n);
1036 for (size_t i = 0; i < n; i++)
1038 blobs[i].copyTo(umat_blobs[i]);
1042 if (convolutionOp.empty())
1044 OCL4DNNConvConfig config;
1045 config.in_shape = shape(inputs[0]);
1046 config.out_shape = shape(outputs[0]);
1047 config.kernel = kernel;
1049 config.stride = stride;
1050 config.dilation = dilation;
1051 config.group = inputs[0].size[1] / umat_blobs[0].size[1];
1052 config.bias_term = (hasBias()) ? true : false;
1053 config.use_half = use_half;
1055 convolutionOp = Ptr<OCL4DNNConvSpatial<float> >(new OCL4DNNConvSpatial<float>(config));
1058 int outCn = umat_blobs[0].size[0];
1063 Ptr<ReLULayer> activ_relu = activ.dynamicCast<ReLULayer>();
1064 if( !activ_relu.empty() )
1066 reluslope.assign(outCn+2, activ_relu->negativeSlope);
1067 activType = OCL4DNN_CONV_FUSED_ACTIV_RELU;
1070 Ptr<ReLU6Layer> activ_relu6 = activ.dynamicCast<ReLU6Layer>();
1071 if( !activ_relu6.empty() )
1073 reluslope.resize(2);
1074 reluslope[0] = activ_relu6->minValue;
1075 reluslope[1] = activ_relu6->maxValue;
1076 activType = OCL4DNN_CONV_FUSED_ACTIV_RELU6;
1079 Ptr<ChannelsPReLULayer> activ_chprelu = activ.dynamicCast<ChannelsPReLULayer>();
1080 if( !activ_chprelu.empty() )
1082 const Mat& m = activ_chprelu->blobs[0];
1083 CV_Assert(m.isContinuous() && m.type() == CV_32F && (int)m.total() == outCn);
1084 const float* mdata = m.ptr<float>();
1085 reluslope.resize(outCn+2);
1086 std::copy(mdata, mdata + outCn, reluslope.begin());
1087 reluslope[outCn] = reluslope[outCn+1] = reluslope[outCn-1];
1088 activType = OCL4DNN_CONV_FUSED_ACTIV_PRELU;
1094 weightsMat.copyTo(umat_blobs[0]);
1095 fusedWeights = false;
1099 if ( umat_blobs.size() < 2 )
1100 umat_blobs.resize(2);
1101 umat_blobs[1] = UMat(biasvec, true);
1102 convolutionOp->setBias(true);
1108 if ( activType == OCL4DNN_CONV_FUSED_ACTIV_RELU )
1110 CV_Assert(!reluslope.empty());
1111 convolutionOp->setActivReLU(true, reluslope[0]);
1113 else if ( activType == OCL4DNN_CONV_FUSED_ACTIV_PRELU)
1115 CV_Assert(!reluslope.empty());
1116 convolutionOp->setActivPReLU(true, reluslope);
1118 else if ( activType == OCL4DNN_CONV_FUSED_ACTIV_POWER)
1120 convolutionOp->setActivPower(true, power);
1122 else if ( activType == OCL4DNN_CONV_FUSED_ACTIV_TANH)
1124 convolutionOp->setActivTanh(true);
1126 else if ( activType == OCL4DNN_CONV_FUSED_ACTIV_RELU6)
1128 convolutionOp->setActivReLU6(true, reluslope[0], reluslope[1]);
1132 convolutionOp->setActivReLU(false, 0);
1133 convolutionOp->setActivPReLU(false, reluslope);
1134 convolutionOp->setActivPower(false, 1.f);
1135 convolutionOp->setActivTanh(false);
1136 convolutionOp->setActivReLU6(false, 0, 0);
1141 UMat& inpMat = inputs[0];
1142 UMat& outMat = outputs[0];
1143 int batch_size = inpMat.size[0];
1145 return convolutionOp->Forward(inpMat,
1146 inputs.size() == 2 ? inputs[1] : UMat(),
1148 umat_blobs.size() > 1 ? umat_blobs[1] : UMat(),
1154 void forward(InputArrayOfArrays inputs_arr, OutputArrayOfArrays outputs_arr, OutputArrayOfArrays internals_arr) CV_OVERRIDE
1156 CV_TRACE_FUNCTION();
1157 CV_TRACE_ARG_VALUE(name, "name", name.c_str());
1159 CV_OCL_RUN(IS_DNN_OPENCL_TARGET(preferableTarget),
1160 forward_ocl(inputs_arr, outputs_arr, internals_arr))
1162 if (inputs_arr.depth() == CV_16S)
1164 forward_fallback(inputs_arr, outputs_arr, internals_arr);
1168 std::vector<Mat> inputs, outputs;
1169 inputs_arr.getMatVector(inputs);
1170 outputs_arr.getMatVector(outputs);
1172 /*printf("conv %s: input (%d x %d x %d x %d), kernel (%d x %d), pad (%d x %d), stride (%d x %d), dilation (%d x %d)\n",
1173 name.c_str(), inputs[0].size[0], inputs[0].size[1], inputs[0].size[2], inputs[0].size[3],
1174 kernel.width, kernel.height, pad.width, pad.height,
1175 stride.width, stride.height, dilation.width, dilation.height);*/
1176 CV_Assert_N(inputs.size() == (size_t)1, inputs[0].size[1] % blobs[0].size[1] == 0,
1177 outputs.size() == 1, inputs[0].data != outputs[0].data);
1179 if (inputs[0].dims == 5) {
1180 CV_Error(Error::StsNotImplemented, "Convolution3D layer is not supported on OCV backend");
1183 int ngroups = inputs[0].size[1]/blobs[0].size[1];
1184 CV_Assert(outputs[0].size[1] % ngroups == 0);
1185 int outCn = blobs[0].size[0];
1190 Ptr<ReLULayer> activ_relu = activ.dynamicCast<ReLULayer>();
1191 if( !activ_relu.empty() )
1193 reluslope.assign(outCn+2, activ_relu->negativeSlope);
1196 Ptr<ChannelsPReLULayer> activ_chprelu = activ.dynamicCast<ChannelsPReLULayer>();
1197 if( !activ_chprelu.empty() )
1199 const Mat& m = activ_chprelu->blobs[0];
1200 CV_Assert(m.isContinuous() && m.type() == CV_32F && (int)m.total() == outCn);
1201 const float* mdata = m.ptr<float>();
1202 reluslope.resize(outCn+2);
1203 std::copy(mdata, mdata + outCn, reluslope.begin());
1204 reluslope[outCn] = reluslope[outCn+1] = reluslope[outCn-1];
1208 int nstripes = std::max(getNumThreads(), 1);
1210 ParallelConv::run(inputs[0], outputs[0], weightsMat, biasvec, reluslope,
1211 kernel, pad, stride, dilation, activ.get(), ngroups, nstripes);
1214 virtual int64 getFLOPS(const std::vector<MatShape> &inputs,
1215 const std::vector<MatShape> &outputs) const CV_OVERRIDE
1217 CV_Assert(inputs.size() == outputs.size());
1220 for (int i = 0; i < inputs.size(); i++)
1222 flops += total(outputs[i])*(CV_BIG_INT(2)*kernel.area()*inputs[i][1] + 1);
1229 class DeConvolutionLayerImpl CV_FINAL : public BaseConvolutionLayerImpl
1232 Mat weightsMat, biasesMat;
1236 DeConvolutionLayerImpl(const LayerParams& params) : BaseConvolutionLayerImpl(params) {}
1238 MatShape computeColRowShape(const MatShape &inpShape, const MatShape &outShape) const CV_OVERRIDE
1240 int inpCn = inpShape[1];
1241 int inpH = inpShape[2];
1242 int inpW = inpShape[3];
1243 int outCn = outShape[1];
1244 int ngroups = inpCn / blobs[0].size[0];
1245 int outGroupCn = outCn / ngroups;
1246 int ksize = outGroupCn * kernel.height * kernel.width;
1247 return shape(ksize, inpH * inpW);
1250 virtual bool supportBackend(int backendId) CV_OVERRIDE
1252 #ifdef HAVE_INF_ENGINE
1253 const int outGroupCn = blobs[0].size[1]; // Weights are in IOHW layout
1254 const int group = numOutput / outGroupCn;
1256 if (backendId == DNN_BACKEND_INFERENCE_ENGINE)
1258 if (kernel_size.size() == 3)
1259 CV_Error(Error::StsNotImplemented, "Unsupported deconvolution3D layer");
1261 if (INF_ENGINE_RELEASE >= 2018050000 && (adjustPad.height || adjustPad.width))
1263 if (padMode.empty())
1265 if (preferableTarget != DNN_TARGET_CPU && group != 1)
1267 if ((adjustPad.height && pad.height) || (adjustPad.width && pad.width))
1270 return pad.width >= adjustPad.width && pad.height >= adjustPad.height;
1272 else if (padMode == "SAME")
1274 return kernel.width >= pad.width + 1 + adjustPad.width &&
1275 kernel.height >= pad.height + 1 + adjustPad.height;
1277 else if (padMode == "VALID")
1283 return preferableTarget == DNN_TARGET_CPU;
1285 if (preferableTarget == DNN_TARGET_OPENCL || preferableTarget == DNN_TARGET_OPENCL_FP16)
1286 return dilation.width == 1 && dilation.height == 1;
1290 #endif // HAVE_INF_ENGINE
1291 return kernel_size.size() == 2 && (backendId == DNN_BACKEND_OPENCV || backendId == DNN_BACKEND_HALIDE);
1294 bool getMemoryShapes(const std::vector<MatShape> &inputs,
1295 const int requiredOutputs,
1296 std::vector<MatShape> &outputs,
1297 std::vector<MatShape> &internals) const CV_OVERRIDE
1299 CV_Assert(!hasBias() || blobs[1].total() == (size_t)numOutput);
1300 CV_Assert(inputs.size() != 0);
1302 int outCn = numOutput;
1303 std::vector<int> outShape;
1304 outShape.push_back(inputs[0][0]); // batch
1305 outShape.push_back(outCn);
1306 if (padMode.empty())
1308 for (int i = 0; i < kernel_size.size(); i++)
1309 outShape.push_back(strides[i] * (inputs[0][2 + i] - 1) + kernel_size[i] - pads_begin[i] - pads_end[i] + adjust_pads[i]);
1311 else if (padMode == "VALID")
1313 for (int i = 0; i < kernel_size.size(); i++)
1314 outShape.push_back(strides[i] * (inputs[0][2 + i] - 1) + kernel_size[i] + adjust_pads[i]);
1316 else if (padMode == "SAME")
1318 for (int i = 0; i < kernel_size.size(); i++)
1319 outShape.push_back(strides[i] * (inputs[0][2 + i] - 1) + 1 + adjust_pads[i]);
1322 CV_Error(Error::StsError, "Unsupported padding mode " + padMode);
1324 CV_Assert(outCn % blobs[0].size[1] == 0);
1325 int ngroups = outCn / blobs[0].size[1];
1327 int inpCn = inputs[0][1];
1328 CV_Assert(inpCn % ngroups == 0 && outCn % ngroups == 0);
1329 CV_Assert(blobs[0].size[0] == inpCn);
1331 outputs.resize(1, outShape);
1334 internals.push_back(computeColRowShape(inputs[0], outputs[0]));
1339 void finalize(InputArrayOfArrays inputs_arr, OutputArrayOfArrays outputs_arr) CV_OVERRIDE
1341 BaseConvolutionLayerImpl::finalize(inputs_arr, outputs_arr);
1343 std::vector<Mat> inputs, outputs;
1344 inputs_arr.getMatVector(inputs);
1345 outputs_arr.getMatVector(outputs);
1347 std::vector<int> inpShape;
1348 std::vector<int> outShape;
1349 for (int i = 2; i < inputs[0].dims; i++) {
1350 inpShape.push_back(inputs[0].size[i]);
1351 outShape.push_back(outputs[0].size[i]);
1353 getConvPoolPaddings(outShape, kernel_size, strides, padMode, pads_begin, pads_end);
1354 if (pads_begin.size() == 2) {
1355 for (int i = 0; i < pads_begin.size(); i++) {
1356 if (pads_begin[i] != pads_end[i])
1357 CV_Error(Error::StsNotImplemented, "Unsupported asymmetric padding in deconvolution layer");
1359 pad = Size(pads_begin[1], pads_begin[0]);
1362 weightsMultipliers.assign(numOutput, 1.0);
1363 if (weightsMat.empty())
1365 transpose(blobs[0].reshape(1, blobs[0].size[0]), weightsMat);
1366 biasesMat = hasBias() ? blobs[1].reshape(1, numOutput)
1367 : Mat::zeros(numOutput, 1, CV_32F);
1371 void fuseWeights(const Mat& w_, const Mat& b_) CV_OVERRIDE
1373 Mat w = w_.total() == 1 ? Mat(1, numOutput, CV_32F, Scalar(w_.at<float>(0))) : w_;
1374 Mat b = b_.total() == 1 ? Mat(1, numOutput, CV_32F, Scalar(b_.at<float>(0))) : b_;
1376 CV_Assert_N(!weightsMat.empty(),
1377 w.empty() || numOutput == w.total(),
1378 b.empty() || numOutput == b.total());
1382 transpose(blobs[0].reshape(1, blobs[0].size[0]), weightsMat);
1383 weightsMat = weightsMat.reshape(1, numOutput);
1384 for (int i = 0; i < numOutput; ++i)
1386 double wi = w.at<float>(i);
1387 weightsMultipliers[i] *= wi;
1388 cv::multiply(weightsMat.row(i), weightsMultipliers[i], weightsMat.row(i));
1389 biasesMat.at<float>(i) *= wi;
1391 weightsMat = weightsMat.reshape(1, weightsMat.total() / blobs[0].size[0]);
1396 cv::add(biasesMat, b.reshape(1, numOutput), biasesMat);
1400 class MatMulInvoker : public ParallelLoopBody
1403 MatMulInvoker(const Mat& a, const Mat& b, Mat& c, int nstripes)
1408 nstripes_ = nstripes;
1409 useAVX = checkHardwareSupport(CPU_AVX);
1410 useAVX2 = checkHardwareSupport(CPU_AVX2);
1411 useAVX512 = CV_CPU_HAS_SUPPORT_AVX512_SKX;
1414 void operator()(const Range& range_) const CV_OVERRIDE
1416 int stripeSize = (int)alignSize((b_->cols + nstripes_ - 1)/nstripes_, 16);
1417 Range range(range_.start*stripeSize, std::min(range_.end*stripeSize, b_->cols));
1418 int mmax = a_->rows;
1419 int nmax = range.end - range.start;
1420 int kmax = a_->cols;
1422 const float* aptr = a_->ptr<float>();
1423 const float* bptr = b_->ptr<float>() + range.start;
1424 float* cptr = c_->ptr<float>() + range.start;
1425 size_t astep = a_->step1();
1426 size_t bstep = b_->step1();
1427 size_t cstep = c_->step1();
1429 #if CV_TRY_AVX512_SKX
1431 opt_AVX512_SKX::fastGEMM( aptr, astep, bptr, bstep, cptr, cstep, mmax, kmax, nmax );
1436 opt_AVX2::fastGEMM( aptr, astep, bptr, bstep, cptr, cstep, mmax, kmax, nmax );
1441 opt_AVX::fastGEMM( aptr, astep, bptr, bstep, cptr, cstep, mmax, kmax, nmax );
1444 for( m = 0; m < mmax; m += 2 )
1446 float* dst0 = cptr + cstep*m;
1447 float* dst1 = cptr + cstep*std::min(m+1, mmax-1);
1448 const float* aptr0 = aptr + astep*m;
1449 const float* aptr1 = aptr + astep*std::min(m+1, mmax-1);
1451 for( n = 0; n < nmax; n++ )
1457 for( k = 0; k < kmax; k += 4 )
1459 float alpha00 = aptr0[k];
1460 float alpha01 = aptr1[k];
1461 float alpha10 = 0.f, alpha11 = 0.f;
1462 float alpha20 = 0.f, alpha21 = 0.f;
1463 float alpha30 = 0.f, alpha31 = 0.f;
1464 const float* bptr0 = bptr + k*bstep;
1465 const float* bptr1 = bptr0;
1466 const float* bptr2 = bptr0;
1467 const float* bptr3 = bptr0;
1471 alpha10 = aptr0[k+1];
1472 alpha11 = aptr1[k+1];
1473 bptr1 = bptr0 + bstep;
1476 alpha20 = aptr0[k+2];
1477 alpha21 = aptr1[k+2];
1478 bptr2 = bptr1 + bstep;
1481 alpha30 = aptr0[k+3];
1482 alpha31 = aptr1[k+3];
1483 bptr3 = bptr2 + bstep;
1490 v_float32x4 a00 = v_setall_f32(alpha00);
1491 v_float32x4 a01 = v_setall_f32(alpha01);
1492 v_float32x4 a10 = v_setall_f32(alpha10);
1493 v_float32x4 a11 = v_setall_f32(alpha11);
1494 v_float32x4 a20 = v_setall_f32(alpha20);
1495 v_float32x4 a21 = v_setall_f32(alpha21);
1496 v_float32x4 a30 = v_setall_f32(alpha30);
1497 v_float32x4 a31 = v_setall_f32(alpha31);
1499 for( ; n <= nmax - 4; n += 4 )
1501 v_float32x4 b0 = v_load(bptr0 + n);
1502 v_float32x4 b1 = v_load(bptr1 + n);
1503 v_float32x4 b2 = v_load(bptr2 + n);
1504 v_float32x4 b3 = v_load(bptr3 + n);
1505 v_float32x4 d0 = v_load(dst0 + n);
1506 v_float32x4 d1 = v_load(dst1 + n);
1515 v_store(dst0 + n, d0);
1516 v_store(dst1 + n, d1);
1520 for( ; n < nmax; n++ )
1522 float b0 = bptr0[n], b1 = bptr1[n];
1523 float b2 = bptr2[n], b3 = bptr3[n];
1524 float d0 = dst0[n] + alpha00*b0 + alpha10*b1 + alpha20*b2 + alpha30*b3;
1525 float d1 = dst1[n] + alpha01*b0 + alpha11*b1 + alpha21*b2 + alpha31*b3;
1541 class Col2ImInvoker : public cv::ParallelLoopBody
1544 const float* data_col;
1545 const float* biasvec;
1546 int channels, height, width;
1547 int kernel_h, kernel_w;
1549 int stride_h, stride_w;
1551 int height_col, width_col;
1556 : data_col(0), biasvec(0), channels(0), height(0), width(0),
1557 kernel_h(0), kernel_w(0), pad_h(0), pad_w(0), stride_h(0), stride_w(0), data_im(0),
1558 height_col(0), width_col(0), nstripes(0), is1x1(0)
1561 static void run(const float* data_col,
1562 int channels, int height, int width,
1563 int kernel_h, int kernel_w,
1564 int pad_h, int pad_w,
1565 int stride_h, int stride_w,
1566 int height_col, int width_col,
1568 const float* biasvec,
1571 const int nstripes = getNumThreads();
1574 t.data_col = data_col;
1575 t.data_im = data_im;
1576 t.channels = channels; t.height = height; t.width = width;
1577 t.kernel_h = kernel_h; t.kernel_w = kernel_w;
1578 t.pad_h = pad_h; t.pad_w = pad_w;
1579 t.stride_h = stride_h; t.stride_w = stride_w;
1580 t.height_col = height_col;
1581 t.width_col = width_col;
1582 t.nstripes = nstripes;
1584 t.biasvec = biasvec;
1586 parallel_for_(Range(0, nstripes), t, nstripes);
1589 virtual void operator ()(const Range &r) const CV_OVERRIDE
1591 const float* data_col_ = data_col;
1592 float* data_im_ = data_im;
1593 int coeff_h = (1 - stride_h * kernel_w * height_col) * width_col;
1594 int coeff_w = (1 - stride_w * height_col * width_col);
1595 size_t total = (size_t)channels * height * width;
1596 size_t stripeSize = (total + nstripes - 1)/nstripes;
1597 size_t startIndex = r.start*stripeSize;
1598 size_t endIndex = std::min(r.end*stripeSize, total);
1599 int w = (int)(startIndex % width + pad_w);
1600 int h = (int)((startIndex / width) % height + pad_h);
1601 int c = (int)(startIndex / (width * height));
1602 int h_col_start = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1;
1603 int h_col_end = std::min(h / stride_h + 1, height_col);
1604 int plane_size_col = height_col * width_col;
1605 int offset = (c * kernel_h * kernel_w + h * kernel_w + w) * plane_size_col;
1606 bool is1x1_ = is1x1;
1607 const float* biasvec_ = biasvec;
1609 for (size_t index = startIndex; index < endIndex; index++)
1611 // compute the start and end of the output
1612 int w_col_start = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1;
1613 int w_col_end = std::min(w / stride_w + 1, width_col);
1617 val = data_im_[index];
1621 for (int h_col = h_col_start; h_col < h_col_end; ++h_col) {
1622 for (int w_col = w_col_start; w_col < w_col_end; ++w_col) {
1623 val += data_col_[offset + h_col * coeff_h + w_col * coeff_w];
1627 data_im_[index] = val + biasvec_[c];
1629 offset += plane_size_col;
1630 if( ++w >= width + pad_w )
1632 w = (int)((index + 1)% width + pad_w);
1633 h = (int)(((index + 1) / width) % height + pad_h);
1634 c = (int)((index + 1) / (width * height));
1635 h_col_start = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1;
1636 h_col_end = std::min(h / stride_h + 1, height_col);
1637 offset = (c * kernel_h * kernel_w + h * kernel_w + w) * plane_size_col;
1644 bool forward_ocl(InputArrayOfArrays inputs_, OutputArrayOfArrays outputs_, OutputArrayOfArrays internals_)
1646 std::vector<UMat> inputs;
1647 std::vector<UMat> outputs;
1648 std::vector<UMat> internals;
1650 if (inputs_.depth() == CV_16S)
1653 inputs_.getUMatVector(inputs);
1654 outputs_.getUMatVector(outputs);
1655 internals_.getUMatVector(internals);
1657 int outCn = numOutput;
1658 int inpCn = inputs[0].size[1];
1663 if (umat_weights.empty())
1666 weightsMat.copyTo(umat_weights);
1668 transpose(blobs[0].reshape(1, inpCn), umat_weights);
1671 biasesMat.copyTo(umat_biases);
1675 blobs[1].reshape(1, outCn).copyTo(umat_biases);
1677 umat_biases = UMat::zeros(outCn, 1, CV_32F);
1681 String buildopt = format("-DT=%s ", ocl::typeToStr(inputs[0].type()));
1682 buildopt += format("-DPAD_H=%d -DPAD_W=%d -DKERNEL_H=%d -DKERNEL_W=%d -DSTRIDE_H=%d -DSTRIDE_W=%d ",
1683 pad.height, pad.width, kernel.height, kernel.width, stride.height, stride.width);
1685 for (size_t ii = 0; ii < outputs.size(); ii++)
1687 int ngroups = outCn / blobs[0].size[1];
1688 int inpGroupCn = inpCn / ngroups;
1689 int outGroupCn = blobs[0].size[1];
1690 const UMat& inp = inputs[ii];
1691 UMat& out = outputs[ii];
1692 int numImg = inp.size[0];
1693 int inpH = inp.size[2], inpW = inp.size[3];
1694 int outH = out.size[2], outW = out.size[3];
1696 MatShape inpshape = shape(numImg*inpCn, inpH*inpW);
1697 MatShape outshape = shape(numImg*outCn, outH*outW);
1698 UMat convBlob = inputs[ii].reshape(1, inpshape.size(), &inpshape[0]);
1699 UMat decnBlob = out.reshape(1, outshape.size(), &outshape[0]);
1700 int rows = internals[0].rows / ngroups;
1702 for (int n = 0; n < numImg; n++)
1704 for (int g = 0; g < ngroups; g++)
1706 UMat colMat = internals[0].rowRange(_Range(g * rows, rows));
1707 UMat convMat = convBlob.rowRange(_Range((g + n * ngroups) * inpGroupCn, inpGroupCn));
1708 UMat wghtMat = umat_weights.colRange(_Range(g * inpGroupCn, inpGroupCn));
1709 gemm(wghtMat, convMat, 1, noArray(), 0, colMat, 0);
1712 for (int g = 0; g < ngroups; g++)
1714 int total = outGroupCn * decnBlob.cols;
1716 int height_col = inpH;
1717 int width_col = inpW;
1718 int coeff_h = (1 - stride.height * kernel.width * height_col) * width_col;
1719 int coeff_w = (1 - stride.width * height_col * width_col);
1721 ocl::Kernel k("col2im", ocl::dnn::col2im_oclsrc, buildopt);
1722 k.set(index++, total);
1723 k.set(index++, ocl::KernelArg::PtrReadOnly(internals[0]));
1724 k.set(index++, (int)(g * rows * internals[0].cols));
1725 k.set(index++, outGroupCn);
1726 k.set(index++, outH);
1727 k.set(index++, outW);
1728 k.set(index++, height_col);
1729 k.set(index++, width_col);
1730 k.set(index++, coeff_h);
1731 k.set(index++, coeff_w);
1732 k.set(index++, ocl::KernelArg::PtrReadOnly(umat_biases));
1733 k.set(index++, (int)(g * outGroupCn * umat_biases.cols));
1734 k.set(index++, ocl::KernelArg::PtrWriteOnly(decnBlob));
1735 k.set(index++, (int)((g + n * ngroups) * outGroupCn * decnBlob.cols));
1737 size_t global[] = { (size_t)total };
1738 bool ret = k.run(1, global, NULL, false);
1749 void forward(InputArrayOfArrays inputs_arr, OutputArrayOfArrays outputs_arr, OutputArrayOfArrays internals_arr) CV_OVERRIDE
1751 CV_TRACE_FUNCTION();
1752 CV_TRACE_ARG_VALUE(name, "name", name.c_str());
1754 CV_OCL_RUN(IS_DNN_OPENCL_TARGET(preferableTarget),
1755 forward_ocl(inputs_arr, outputs_arr, internals_arr));
1757 if (inputs_arr.depth() == CV_16S)
1759 forward_fallback(inputs_arr, outputs_arr, internals_arr);
1763 std::vector<Mat> inputs, outputs, internals;
1764 inputs_arr.getMatVector(inputs);
1765 outputs_arr.getMatVector(outputs);
1766 internals_arr.getMatVector(internals);
1768 int outCn = numOutput;
1769 int inpCn = inputs[0].size[1];
1770 bool is1x1flag = is1x1();
1771 int nstripes = getNumThreads();
1773 if( weightsMat.empty() )
1775 transpose(blobs[0].reshape(1, inpCn), weightsMat);
1776 biasesMat = hasBias() ? blobs[1].reshape(1, outCn) : Mat::zeros(outCn, 1, CV_32F);
1779 for (size_t ii = 0; ii < outputs.size(); ii++)
1781 int ngroups = outCn / blobs[0].size[1];
1782 int inpGroupCn = inpCn / ngroups;
1783 int outGroupCn = blobs[0].size[1];
1784 const Mat& inp = inputs[ii];
1785 Mat& out = outputs[ii];
1786 int numImg = inp.size[0];
1787 int inpH = inp.size[2], inpW = inp.size[3];
1788 int outH = out.size[2], outW = out.size[3];
1790 Mat convBlob = inputs[ii].reshape(1, numImg*inpCn);
1791 Mat decnBlob = out.reshape(1, numImg*outCn);
1793 for (int n = 0; n < numImg; n++)
1795 for (int g = 0; g < ngroups; g++)
1797 Mat dstMat = decnBlob.rowRange(_Range((g + n * ngroups) * outGroupCn, outGroupCn));
1798 Mat &colMat = is1x1flag ? dstMat : internals[0];
1800 Mat convMat = convBlob.rowRange(_Range((g + n * ngroups) * inpGroupCn, inpGroupCn));
1801 Mat wghtMat = weightsMat.colRange(_Range(g * inpGroupCn, inpGroupCn));
1802 Mat curBiasMat = biasesMat.rowRange(_Range(g * outGroupCn, outGroupCn));
1804 //gemm(wghtMat, convMat, 1, colMat, 0, colMat, 0);
1805 MatMulInvoker mminvoker(wghtMat, convMat, colMat, nstripes);
1806 parallel_for_(Range(0, nstripes), mminvoker, nstripes);
1808 Col2ImInvoker::run(colMat.ptr<float>(), outGroupCn, outH, outW,
1809 kernel.height, kernel.width, pad.height, pad.width,
1810 stride.height, stride.width, inpH, inpW, dstMat.ptr<float>(),
1811 curBiasMat.ptr<float>(), is1x1flag);
1817 virtual Ptr<BackendNode> initHalide(const std::vector<Ptr<BackendWrapper> > &inputs) CV_OVERRIDE
1820 Halide::Buffer<float> inputBuffer = halideBuffer(inputs[0]);
1822 int inW, inH, inC, inN;
1823 getCanonicalSize(inputBuffer, &inW, &inH, &inC, &inN);
1824 const int outGroupCn = blobs[0].size[1];
1825 const int group = numOutput / outGroupCn;
1826 const int inpGroupCn = blobs[0].size[0] / group;
1828 Halide::Var x("x"), y("y"), c("c"), n("n");
1829 Halide::Func top = (name.empty() ? Halide::Func() : Halide::Func(name));
1830 Halide::Func padded_input(name + "_constant_exterior");
1831 auto weights = wrapToHalideBuffer(blobs[0]);
1833 Halide::Func dilated_input("dilated_input");
1834 dilated_input(x, y, c, n) = 0.0f;
1835 Halide::RDom r1(0, inW, 0, inH);
1836 dilated_input(r1.x * stride.width, r1.y * stride.height, c, n) =
1837 inputBuffer(r1.x, r1.y, c, n);
1838 dilated_input.compute_root();
1840 Halide::Func bounded =
1841 Halide::BoundaryConditions::constant_exterior(dilated_input, 0,
1842 0, (inW - 1) * stride.width + 1,
1843 0, (inH - 1) * stride.height + 1,
1845 padded_input(x, y, c, n) = bounded(x, y, c, n);
1847 Halide::RDom r(0, kernel.width, 0, kernel.height, 0, inpGroupCn);
1848 Halide::Expr kx = x + pad.width - r.x;
1849 Halide::Expr ky = y + pad.height - r.y;
1850 Halide::Expr kInC = r.z;
1851 Halide::Expr kOutC = c;
1852 for (int i = 1; i < group; ++i)
1854 kInC = select(c < outGroupCn * i, kInC, inpGroupCn * i + r.z);
1855 kOutC = select(c < outGroupCn * i, kOutC, c - outGroupCn * i);
1857 Halide::Expr topExpr = sum(padded_input(kx, ky, kInC, n) *
1858 weights(r.x, r.y, kOutC, kInC));
1861 auto bias = wrapToHalideBuffer(blobs[1], {numOutput});
1864 top(x, y, c, n) = topExpr;
1865 return Ptr<BackendNode>(new HalideBackendNode({ padded_input, top }));
1866 #endif // HAVE_HALIDE
1867 return Ptr<BackendNode>();
1870 virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> > &) CV_OVERRIDE
1872 #ifdef HAVE_INF_ENGINE
1873 auto ieWeights = wrapToInfEngineBlob(blobs[0], InferenceEngine::Layout::OIHW);
1876 ieWeights = InferenceEngine::make_shared_blob<float>(
1877 InferenceEngine::Precision::FP32, InferenceEngine::Layout::OIHW,
1879 ieWeights->allocate();
1881 int inpCn = blobs[0].size[0];
1882 Mat newWeights = infEngineBlobToMat(ieWeights).reshape(1, inpCn);
1883 transpose(weightsMat, newWeights);
1886 #if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5)
1887 const int outGroupCn = blobs[0].size[1]; // Weights are in IOHW layout
1888 const int group = numOutput / outGroupCn;
1890 InferenceEngine::Builder::DeconvolutionLayer ieLayer(name);
1892 ieLayer.setKernel(kernel_size);
1893 ieLayer.setStrides(strides);
1894 ieLayer.setDilation(dilations);
1895 ieLayer.setPaddingsBegin(pads_begin);
1897 if (padMode.empty())
1899 ieLayer.setPaddingsEnd({pads_end[0] - adjust_pads[0], pads_end[1] - adjust_pads[1]});
1901 else if (padMode == "SAME")
1903 ieLayer.setPaddingsEnd({kernel_size[0] - pads_begin[0] - 1 - adjust_pads[0],
1904 kernel_size[1] - pads_begin[1] - 1 - adjust_pads[1]});
1906 ieLayer.setGroup((size_t)group);
1907 ieLayer.setOutDepth((size_t)numOutput);
1909 InferenceEngine::Builder::Layer l = ieLayer;
1910 addConstantData("weights", ieWeights, l);
1912 addConstantData("biases", wrapToInfEngineBlob(biasesMat, {(size_t)numOutput}, InferenceEngine::Layout::C), l);
1913 return Ptr<BackendNode>(new InfEngineBackendNode(l));
1915 const int outGroupCn = blobs[0].size[1]; // Weights are in IOHW layout
1916 const int group = numOutput / outGroupCn;
1918 InferenceEngine::LayerParams lp;
1920 lp.type = "Deconvolution";
1921 lp.precision = InferenceEngine::Precision::FP32;
1922 std::shared_ptr<InferenceEngine::DeconvolutionLayer> ieLayer(new InferenceEngine::DeconvolutionLayer(lp));
1924 #if INF_ENGINE_VER_MAJOR_GT(INF_ENGINE_RELEASE_2018R3)
1925 ieLayer->_kernel.insert(InferenceEngine::X_AXIS, kernel.width);
1926 ieLayer->_kernel.insert(InferenceEngine::Y_AXIS, kernel.height);
1927 ieLayer->_stride.insert(InferenceEngine::X_AXIS, stride.width);
1928 ieLayer->_stride.insert(InferenceEngine::Y_AXIS, stride.height);
1929 ieLayer->_padding.insert(InferenceEngine::X_AXIS, pad.width);
1930 ieLayer->_padding.insert(InferenceEngine::Y_AXIS, pad.height);
1931 ieLayer->_pads_end.insert(InferenceEngine::X_AXIS, pad.width);
1932 ieLayer->_pads_end.insert(InferenceEngine::Y_AXIS, pad.height);
1933 ieLayer->_dilation.insert(InferenceEngine::X_AXIS, dilation.width);
1934 ieLayer->_dilation.insert(InferenceEngine::Y_AXIS, dilation.height);
1936 ieLayer->_kernel_x = kernel.width;
1937 ieLayer->_kernel_y = kernel.height;
1938 ieLayer->_stride_x = stride.width;
1939 ieLayer->_stride_y = stride.height;
1940 ieLayer->_padding_x = pad.width;
1941 ieLayer->_padding_y = pad.height;
1942 ieLayer->_dilation_x = dilation.width;
1943 ieLayer->_dilation_y = dilation.height;
1945 ieLayer->_out_depth = numOutput;
1946 ieLayer->_group = group;
1948 ieLayer->_weights = wrapToInfEngineBlob(blobs[0], InferenceEngine::Layout::OIHW);
1951 ieLayer->_biases = wrapToInfEngineBlob(blobs[1], {(size_t)numOutput}, InferenceEngine::Layout::C);
1953 return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer));
1955 #endif // HAVE_INF_ENGINE
1956 return Ptr<BackendNode>();
1959 virtual int64 getFLOPS(const std::vector<MatShape> &inputs,
1960 const std::vector<MatShape> &outputs) const CV_OVERRIDE
1962 CV_Assert(inputs.size() == outputs.size());
1965 int outChannels = blobs[0].size[0];
1967 for (int i = 0; i < inputs.size(); i++)
1969 flops += CV_BIG_INT(2)*outChannels*kernel.area()*total(inputs[i]);
1976 Ptr<BaseConvolutionLayer> ConvolutionLayer::create(const LayerParams ¶ms)
1978 Ptr<ConvolutionLayerImpl> l(new ConvolutionLayerImpl(params));
1982 Ptr<BaseConvolutionLayer> DeconvolutionLayer::create(const LayerParams ¶ms)
1984 return Ptr<BaseConvolutionLayer>(new DeConvolutionLayerImpl(params));