1 /*M///////////////////////////////////////////////////////////////////////////////////////
3 // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
5 // By downloading, copying, installing or using the software you agree to this license.
6 // If you do not agree to this license, do not download, install,
7 // copy or use the software.
11 // For Open Source Computer Vision Library
13 // Copyright (C) 2013, OpenCV Foundation, all rights reserved.
14 // Copyright (C) 2017, Intel Corporation, all rights reserved.
15 // Third party copyrights are property of their respective owners.
17 // Redistribution and use in source and binary forms, with or without modification,
18 // are permitted provided that the following conditions are met:
20 // * Redistribution's of source code must retain the above copyright notice,
21 // this list of conditions and the following disclaimer.
23 // * Redistribution's in binary form must reproduce the above copyright notice,
24 // this list of conditions and the following disclaimer in the documentation
25 // and/or other materials provided with the distribution.
27 // * The name of the copyright holders may not be used to endorse or promote products
28 // derived from this software without specific prior written permission.
30 // This software is provided by the copyright holders and contributors "as is" and
31 // any express or implied warranties, including, but not limited to, the implied
32 // warranties of merchantability and fitness for a particular purpose are disclaimed.
33 // In no event shall the Intel Corporation or contributors be liable for any direct,
34 // indirect, incidental, special, exemplary, or consequential damages
35 // (including, but not limited to, procurement of substitute goods or services;
36 // loss of use, data, or profits; or business interruption) however caused
37 // and on any theory of liability, whether in contract, strict liability,
38 // or tort (including negligence or otherwise) arising in any way out of
39 // the use of this software, even if advised of the possibility of such damage.
43 #include "../precomp.hpp"
44 #include "layers_common.hpp"
45 #include "../op_halide.hpp"
46 #include "../op_inf_engine.hpp"
47 #include "../op_vkcom.hpp"
48 #include "opencv2/core/hal/hal.hpp"
49 #include "opencv2/core/hal/intrin.hpp"
54 #include "opencl_kernels_dnn.hpp"
55 using namespace cv::dnn::ocl4dnn;
63 class BaseConvolutionLayerImpl : public ConvolutionLayer
66 bool fusedWeights, fusedBias;
67 std::vector<double> weightsMultipliers;
68 BaseConvolutionLayerImpl(const LayerParams ¶ms)
70 setParamsFrom(params);
71 getConvolutionKernelParams(params, kernel_size, pads_begin, pads_end, strides, dilations, padMode, adjust_pads);
73 numOutput = params.get<int>("num_output");
74 int ngroups = params.get<int>("group", 1);
75 CV_Assert(numOutput % ngroups == 0);
77 if (kernel_size.size() == 2) {
78 kernel = Size(kernel_size[1], kernel_size[0]);
79 stride = Size(strides[1], strides[0]);
80 for (int i = 0; i < pads_begin.size(); i++) {
81 if (pads_begin[i] != pads_end[i])
82 CV_Error(Error::StsNotImplemented, "Unsupported asymmetric padding in convolution layer");
84 pad = Size(pads_begin[1], pads_begin[0]);
85 dilation = Size(dilations[1], dilations[0]);
87 adjustPad.height = adjust_pads[0];
88 adjustPad.width = adjust_pads[1];
91 for (int i = 0; i < adjust_pads.size(); i++) {
92 CV_Assert(adjust_pads[i] < strides[i]);
99 virtual void finalize(InputArrayOfArrays inputs_arr, OutputArrayOfArrays outputs_arr) CV_OVERRIDE
101 std::vector<Mat> inputs, outputs;
102 inputs_arr.getMatVector(inputs);
103 outputs_arr.getMatVector(outputs);
105 CV_Assert(inputs.size() > 0);
107 CV_Assert(blobs.size() == 1 || blobs.size() == 2);
108 CV_Assert(inputs[0].dims == outputs[0].dims);
109 CV_Assert(blobs[0].dims == kernel_size.size() + 2);
110 for (int i = 0; i < kernel_size.size(); i++) {
111 CV_Assert(blobs[0].size[i + 2] == kernel_size[i]);
114 const Mat &input = inputs[0];
115 CV_Assert((input.dims == 4 || input.dims == 5) && (input.type() == CV_32F || input.type() == CV_16S));
116 for (size_t i = 0; i < inputs.size(); i++)
118 CV_Assert(inputs[i].type() == input.type());
119 CV_Assert((inputs[i].dims == 4 || inputs[i].dims == 5) && inputs[i].size[1] == input.size[1]);
120 for (int j = 0; j < inputs[i].dims; j++) {
121 CV_Assert(inputs[i].size[j] == input.size[j]);
125 std::vector<int> inpShape;
126 std::vector<int> outShape;
127 for (int i = 2; i < inputs[0].dims; i++) {
128 inpShape.push_back(inputs[0].size[i]);
129 outShape.push_back(outputs[0].size[i]);
131 getConvPoolPaddings(inpShape, kernel_size, strides, padMode, pads_begin, pads_end);
132 if (pads_begin.size() == 2) {
133 for (int i = 0; i < pads_begin.size(); i++) {
134 if (pads_begin[i] != pads_end[i])
135 CV_Error(Error::StsNotImplemented, "Unsupported asymmetric padding in convolution layer");
137 pad = Size(pads_begin[1], pads_begin[0]);
139 fusedWeights = false;
145 return blobs.size() >= 2;
148 virtual MatShape computeColRowShape(const MatShape &inpShape, const MatShape &outShape) const = 0;
151 return (kernel.height == 1 && kernel.width == 1) &&
152 (stride.height == 1 && stride.width == 1) &&
153 (dilation.height == 1 && dilation.width == 1);
156 virtual bool tryFuse(Ptr<Layer>& top) CV_OVERRIDE
159 top->getScaleShift(w, b);
160 if (!w.empty() || !b.empty())
163 fusedWeights = fusedWeights || !w.empty();
164 fusedBias = fusedBias || (hasBias() && !w.empty()) || !b.empty();
170 virtual void fuseWeights(const Mat& w_, const Mat& b_) = 0;
172 virtual void applyHalideScheduler(Ptr<BackendNode>& node,
173 const std::vector<Mat*> &inputs,
174 const std::vector<Mat> &outputs,
175 int targetId) const CV_OVERRIDE
178 if (targetId != DNN_TARGET_CPU)
180 Layer::applyHalideScheduler(node, inputs, outputs, targetId);
183 Halide::Var x("x"), y("y"), c("c"), n("n"), tile("tile"), yi("yi"), yo("yo"), co("co"), ci("ci");
184 Halide::Func& top = node.dynamicCast<HalideBackendNode>()->funcs[1];
185 Halide::Func& padded_input = node.dynamicCast<HalideBackendNode>()->funcs[0];
187 int outW, outH, outC, outN;
188 getCanonicalSize(outputs[0].size, &outW, &outH, &outC, &outN);
190 if (outW == 1 || outH <= 2)
193 if (is1x1() || outC <= 16)
199 .vectorize(x, outW >= 16 ? 16 : outW);
203 .split(c, co, ci, 16)
204 .fuse(yo, co, tile).fuse(n, tile, tile)
207 .vectorize(x, outW >= 16 ? 16 : outW);
208 padded_input.compute_at(top, yi);
209 #endif // HAVE_HALIDE
214 #define IS_POWER_LAYER(layer) \
215 (!layer.empty() && !layer->type.compare("Power"))
216 //TODO: simultaneously convolution and bias addition for cache optimization
217 class ConvolutionLayerImpl CV_FINAL : public BaseConvolutionLayerImpl
220 enum { VEC_ALIGN = 8, DFT_TYPE = CV_32F };
222 std::vector<float> biasvec;
223 std::vector<float> reluslope;
224 Ptr<ActivationLayer> activ;
227 Ptr<OCL4DNNConvSpatial<float> > convolutionOp;
228 std::vector<UMat> umat_blobs;
230 ocl4dnnFusedActiv_t activType;
233 ConvolutionLayerImpl(const LayerParams ¶ms) : BaseConvolutionLayerImpl(params)
237 activType = OCL4DNN_CONV_FUSED_ACTIV_NONE;
242 MatShape computeColRowShape(const MatShape &inpShape, const MatShape &outShape) const CV_OVERRIDE
244 Size out(outShape[3], outShape[2]);
245 int inpGroupCn = blobs[0].size[1];
246 int ksize = inpGroupCn * kernel.height * kernel.width;
247 return shape(out.area(), ksize);
250 virtual bool supportBackend(int backendId) CV_OVERRIDE
252 #ifdef HAVE_INF_ENGINE
253 if (backendId == DNN_BACKEND_INFERENCE_ENGINE)
255 if (kernel_size.size() == 3)
256 return preferableTarget == DNN_TARGET_CPU;
257 return (preferableTarget != DNN_TARGET_MYRIAD || dilation.width == dilation.height);
262 if (kernel_size.size() == 3)
263 return (preferableTarget == DNN_TARGET_CPU && backendId == DNN_BACKEND_OPENCV);
264 else if (kernel_size.size() == 2)
265 return backendId == DNN_BACKEND_OPENCV ||
266 backendId == DNN_BACKEND_HALIDE ||
267 (backendId == DNN_BACKEND_VKCOM && haveVulkan());
273 bool getMemoryShapes(const std::vector<MatShape> &inputs,
274 const int requiredOutputs,
275 std::vector<MatShape> &outputs,
276 std::vector<MatShape> &internals) const CV_OVERRIDE
278 CV_Assert(blobs.size() != 0);
279 CV_Assert(!hasBias() || blobs[1].total() == (size_t)blobs[0].size[0]);
280 CV_Assert(inputs.size() == (size_t)1);
284 CV_Assert(inputs.size() != 0);
285 std::vector<int> inpShape(inputs[0].begin() + 2, inputs[0].end());
287 int outCn = blobs[0].size[0];
288 std::vector<int> outShape;
289 outShape.push_back(inputs[0][0]);
290 outShape.push_back(outCn);
292 int inpCn = inputs[0][1];
295 for (int i = 0; i < inpShape.size(); i++)
296 outShape.push_back((inpShape[i] + pads_begin[i] + pads_end[i] - dilations[i] * (kernel_size[i] - 1) - 1) / strides[i] + 1);
300 getConvPoolOutParams(inpShape, kernel_size, strides, padMode, dilations, outShape);
303 int ngroups = inpCn / blobs[0].size[1];
304 if (ngroups == 0 || ngroups * blobs[0].size[1] != inpCn)
305 CV_Error(Error::StsError, format("Number of input channels should "
306 "be multiple of %d but got %d", blobs[0].size[1], inpCn));
307 CV_Assert(ngroups > 0 && inpCn % ngroups == 0 && outCn % ngroups == 0);
309 outputs.resize(1, outShape);
314 virtual void finalize(InputArrayOfArrays inputs_arr, OutputArrayOfArrays outputs_arr) CV_OVERRIDE
316 BaseConvolutionLayerImpl::finalize(inputs_arr, outputs_arr);
318 CV_Assert(!blobs.empty());
319 const int outCn = blobs[0].size[0];
320 // prepare weightsMat where each row is aligned and has enough zero padding on the right to
321 // use vectorized (i.e. with intrinsics) loops without tail processing
322 Mat wm = blobs[0].reshape(1, outCn);
323 if( wm.step1() % VEC_ALIGN != 0 )
325 int newcols = (int)alignSize(wm.step1(), VEC_ALIGN);
326 Mat wm_buffer = Mat(outCn, newcols, wm.type());
327 Mat wm_padding = wm_buffer.colRange(wm.cols, newcols);
328 wm_padding.setTo(Scalar::all(0.));
329 Mat wm_aligned = wm_buffer.colRange(0, wm.cols);
330 wm.copyTo(wm_aligned);
334 weightsMultipliers.assign(outCn, 1.0);
336 Mat biasMat = hasBias() ? blobs[1].reshape(1, outCn) : Mat();
337 biasvec.resize(outCn+2);
338 if( biasMat.empty() )
340 for(int i = 0; i < outCn; i++ )
345 for(int i = 0; i < outCn; i++ )
346 biasvec[i] = biasMat.at<float>(i);
349 convolutionOp.release();
353 bool setActivation(const Ptr<ActivationLayer>& layer) CV_OVERRIDE
355 if (!activ.empty() && !layer.empty())
363 activType = OCL4DNN_CONV_FUSED_ACTIV_NONE;
365 if (IS_DNN_OPENCL_TARGET(preferableTarget))
367 Ptr<PowerLayer> activ_power = activ.dynamicCast<PowerLayer>();
368 if (!activ_power.empty())
370 if (activ_power->scale != 1.f || activ_power->shift != 0.f)
372 const int outCh = blobs[0].size[0];
373 fuseWeights(Mat(1, outCh, CV_32F, Scalar(activ_power->scale)),
374 Mat(1, outCh, CV_32F, Scalar(activ_power->shift)));
377 power = activ_power->power;
378 activType = OCL4DNN_CONV_FUSED_ACTIV_POWER;
380 Ptr<TanHLayer> activ_tanh = activ.dynamicCast<TanHLayer>();
381 if (!activ_tanh.empty())
383 activType = OCL4DNN_CONV_FUSED_ACTIV_TANH;
387 return !activ.empty();
390 void fuseWeights(const Mat& w_, const Mat& b_) CV_OVERRIDE
392 // Convolution weights have OIHW data layout. Parameters fusion in case of
393 // (conv(I) + b1 ) * w + b2
394 // means to replace convolution's weights to [w*conv(I)] and bias to [b1 * w + b2]
395 const int outCn = weightsMat.size[0];
396 Mat w = w_.total() == 1 ? Mat(1, outCn, CV_32F, Scalar(w_.at<float>(0))) : w_;
397 Mat b = b_.total() == 1 ? Mat(1, outCn, CV_32F, Scalar(b_.at<float>(0))) : b_;
398 CV_Assert_N(!weightsMat.empty(), biasvec.size() == outCn + 2,
399 w.empty() || outCn == w.total(), b.empty() || outCn == b.total());
403 // Keep origin weights unchanged.
404 if (weightsMat.data == blobs[0].data)
405 weightsMat = weightsMat.clone();
407 Mat originWeights = blobs[0].reshape(1, outCn);
408 for (int i = 0; i < outCn; ++i)
410 double wi = w.at<float>(i);
411 weightsMultipliers[i] *= wi;
412 cv::multiply(originWeights.row(i), weightsMultipliers[i], weightsMat.row(i));
419 for (int i = 0; i < outCn; ++i)
420 biasvec[i] += b.at<float>(i);
422 biasvec[outCn] = biasvec[outCn+1] = biasvec[outCn-1];
425 virtual Ptr<BackendNode> initVkCom(const std::vector<Ptr<BackendWrapper> > &inputs) CV_OVERRIDE
428 int out_channel = blobs[0].size[0];
429 bool has_bias = hasBias() || fusedBias;
430 int filter_size[2] = {kernel.height, kernel.width};
431 int pad_size[2] = {pad.height, pad.width};
432 int stride_size[2] = {stride.height, stride.width};
433 int dilation_size[2] = {dilation.height, dilation.width};
435 vkcom::Tensor input_tensor = VkComTensor(inputs[0]);
436 int in_channel = input_tensor.dimSize(1);
437 int group = in_channel / blobs[0].size[1];
439 // TODO: support group > 1
441 return Ptr<BackendNode>();
446 padding_mode = vkcom::kPaddingModeCaffe;
448 else if (padMode == "VALID")
450 padding_mode = vkcom::kPaddingModeValid;
452 else if (padMode == "SAME")
454 padding_mode = vkcom::kPaddingModeSame;
457 CV_Error(Error::StsError, "Unsupported padding mode " + padMode);
459 std::shared_ptr<vkcom::OpBase> op(new vkcom::OpConv(out_channel, has_bias,
460 filter_size, pad_size,
461 stride_size, dilation_size,
465 std::vector<Ptr<BackendWrapper> > blobsWrapper;
470 weightsMat.copyTo(wm); // to handle the case of isContinuous() == false
471 wm = wm.reshape(1, blobs[0].dims, blobs[0].size);
472 blobsWrapper.push_back(Ptr<BackendWrapper>(new VkComBackendWrapper(wm)));
476 blobsWrapper.push_back(Ptr<BackendWrapper>(new VkComBackendWrapper(blobs[0])));
481 Mat biasesMat({out_channel}, CV_32F, &biasvec[0]);
482 blobsWrapper.push_back(Ptr<BackendWrapper>(new VkComBackendWrapper(biasesMat)));
485 return Ptr<BackendNode>(new VkComBackendNode(inputs, op, blobsWrapper));
486 #endif // HAVE_VULKAN
487 return Ptr<BackendNode>();
492 virtual Ptr<BackendNode> initHalide(const std::vector<Ptr<BackendWrapper> > &inputs) CV_OVERRIDE
495 Halide::Buffer<float> inputBuffer = halideBuffer(inputs[0]);
497 const int inpCn = inputBuffer.channels();
498 const int outCn = blobs[0].size[0];
499 const int inpGroupCn = blobs[0].size[1];
500 const int group = inpCn / inpGroupCn;
501 const int outGroupCn = outCn / group;
503 Halide::Buffer<float> weights = wrapToHalideBuffer(blobs[0]);
505 Halide::Var x("x"), y("y"), c("c"), n("n");
506 Halide::Func top = (name.empty() ? Halide::Func() : Halide::Func(name));
507 Halide::Func padded_input(name + "_constant_exterior");
508 if (pad.width || pad.height)
510 Halide::Func bounded =
511 Halide::BoundaryConditions::constant_exterior(inputBuffer, 0);
512 padded_input(x, y, c, n) = bounded(x, y, c, n);
516 padded_input(x, y, c, n) = inputBuffer(x, y, c, n);
519 Halide::RDom r(0, kernel.width, 0, kernel.height, 0, inpGroupCn);
520 Halide::Expr kx = x * stride.width - pad.width + r.x * dilation.width;
521 Halide::Expr ky = y * stride.height - pad.height + r.y * dilation.height;
522 Halide::Expr kc = r.z;
523 for (int i = 1; i < group; ++i)
525 kc = select(c < outGroupCn * i, kc, inpGroupCn * i + r.z);
527 Halide::Expr topExpr = sum(padded_input(kx, ky, kc, n) *
528 weights(r.x, r.y, r.z, c));
531 Halide::Buffer<float> bias = wrapToHalideBuffer(blobs[1], {outCn});
534 top(x, y, c, n) = topExpr;
535 return Ptr<BackendNode>(new HalideBackendNode({ padded_input, top }));
536 #endif // HAVE_HALIDE
537 return Ptr<BackendNode>();
540 #ifdef HAVE_INF_ENGINE
541 virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> > &inputs) CV_OVERRIDE
543 InferenceEngine::DataPtr input = infEngineDataNode(inputs[0]);
544 std::vector<size_t> dims = input->getDims();
545 CV_Assert(dims.size() == 4 || dims.size() == 5);
546 const int inpCn = dims[1];
547 const int outCn = blobs[0].size[0];
548 const int inpGroupCn = blobs[0].size[1];
549 const int group = inpCn / inpGroupCn;
550 InferenceEngine::Layout layout = (dims.size() == 4) ? InferenceEngine::Layout::OIHW :
551 InferenceEngine::Layout::NCDHW;
553 auto ieWeights = wrapToInfEngineBlob(blobs[0], layout);
556 if (weightsMat.isContinuous())
558 Mat cvWeights = weightsMat.reshape(1, blobs[0].dims, blobs[0].size);
559 ieWeights = wrapToInfEngineBlob(cvWeights, layout);
563 ieWeights = InferenceEngine::make_shared_blob<float>({
564 InferenceEngine::Precision::FP32,
565 ieWeights->getTensorDesc().getDims(), layout
567 ieWeights->allocate();
569 Mat newWeights = infEngineBlobToMat(ieWeights).reshape(1, outCn);
570 Mat cvWeights = weightsMat.colRange(0, newWeights.cols);
571 cvWeights.copyTo(newWeights);
574 InferenceEngine::Blob::Ptr ieBiases;
575 if (hasBias() || fusedBias)
577 Mat biasesMat({outCn}, CV_32F, &biasvec[0]);
578 ieBiases = wrapToInfEngineBlob(biasesMat, {(size_t)outCn}, InferenceEngine::Layout::C);
581 InferenceEngine::Builder::ConvolutionLayer ieLayer(name);
583 ieLayer.setKernel(kernel_size);
584 ieLayer.setStrides(strides);
585 ieLayer.setDilation(dilations);
586 ieLayer.setPaddingsBegin(pads_begin);
587 ieLayer.setPaddingsEnd(pads_end);
588 ieLayer.setGroup((size_t)group);
589 ieLayer.setOutDepth((size_t)outCn);
591 InferenceEngine::Builder::Layer l = ieLayer;
592 addConstantData("weights", ieWeights, l);
594 addConstantData("biases", ieBiases, l);
596 if (!padMode.empty())
597 l.getParameters()["auto_pad"] = padMode == "VALID" ? std::string("valid") : std::string("same_upper");
599 return Ptr<BackendNode>(new InfEngineBackendNode(l));
601 #endif // HAVE_INF_ENGINE
603 class ParallelConv : public cv::ParallelLoopBody
606 enum { BLK_SIZE = 32, BLK_SIZE_CN = 64 };
611 int outShape[4]; // used only for conv2d
612 std::vector<size_t> kernel_size, pads_begin, pads_end, strides, dilations;
613 int ngroups_, nstripes_;
614 std::vector<int> ofstab_;
615 const std::vector<float>* biasvec_;
616 const std::vector<float>* reluslope_;
617 const ActivationLayer* activ_;
624 : input_(0), weights_(0), output_(0), ngroups_(0), nstripes_(0),
625 biasvec_(0), reluslope_(0), activ_(0), is1x1_(false), useAVX(false), useAVX2(false), useAVX512(false)
628 static void run( const Mat& input, Mat& output, const Mat& weights,
629 const std::vector<float>& biasvec,
630 const std::vector<float>& reluslope,
631 const std::vector<size_t>& kernel_size, const std::vector<size_t>& strides,
632 const std::vector<size_t>& pads_begin, const std::vector<size_t>& pads_end,
633 const std::vector<size_t>& dilations,
634 const ActivationLayer* activ, int ngroups, int nstripes )
636 size_t karea = std::accumulate(kernel_size.begin(), kernel_size.end(),
637 1, std::multiplies<size_t>());
639 (input.dims == 4 || input.dims == 5) && (input.dims == output.dims),
640 input.size[0] == output.size[0],
641 weights.rows == output.size[1],
642 weights.cols == (input.size[1]/ngroups)*karea,
643 input.type() == output.type(),
644 input.type() == weights.type(),
645 input.type() == CV_32FC1,
646 input.isContinuous(),
647 output.isContinuous(),
648 biasvec.size() == (size_t)output.size[1]+2);
652 p.weights_ = &weights;
654 for( int i = 0; i < 4; i++ ) p.outShape[i] = output.size[i];
655 p.outShape[1] /= ngroups;
657 p.kernel_size = kernel_size; p.strides = strides; p.dilations = dilations;
658 p.pads_begin = pads_begin; p.pads_end = pads_end;
660 p.ngroups_ = ngroups;
661 p.nstripes_ = nstripes;
663 int inpCnAll = input.size[1];
664 int depth = (input.dims == 5) ? input.size[2] : 1;
665 int width = input.size[input.dims - 1];
666 int height = input.size[input.dims - 2];
667 int inpCn = inpCnAll / ngroups;
669 bool isConv2D = kernel_size.size() == 2;
671 p.is1x1_ = isConv2D && kernel_size[0] == 1 && kernel_size[1] == 1 &&
672 pads_begin[0] == 0 && pads_begin[1] == 0;
674 p.useAVX = checkHardwareSupport(CPU_AVX) && isConv2D;
675 p.useAVX2 = checkHardwareSupport(CPU_AVX2) && isConv2D;
676 p.useAVX512 = CV_CPU_HAS_SUPPORT_AVX512_SKX && isConv2D;
678 int ncn = std::min(inpCn, (int)BLK_SIZE_CN);
680 int kernel_d = !isConv2D? kernel_size[0] : 1;
681 int kernel_h = kernel_size[kernel_size.size() - 2];
682 int kernel_w = kernel_size.back();
684 int dil_d = !isConv2D? dilations[0] : 1;
685 int dil_h = dilations[dilations.size() - 2];
686 int dil_w = dilations.back();
688 p.ofstab_.resize(karea * ncn);
689 int* ofstab = &p.ofstab_[0];
693 for( int k = 0; k < ncn; k++ )
694 for( int k_r = 0; k_r < kernel_h; k_r++ )
695 for( int k_c = 0; k_c < kernel_w; k_c++ )
696 ofstab[(k*kernel_h + k_r)*kernel_w + k_c] =
697 (k*height + k_r*dil_h)*width + k_c*dil_w;
701 for( int k = 0; k < ncn; k++ )
702 for (int k_d = 0; k_d < kernel_d; k_d++)
703 for( int k_r = 0; k_r < kernel_h; k_r++ )
704 for( int k_c = 0; k_c < kernel_w; k_c++ )
705 ofstab[(k*kernel_d*kernel_h + k_d*kernel_h + k_r)*kernel_w + k_c] =
706 (k*depth*height + k_d*dil_d*height + k_r*dil_h)*width + k_c*dil_w;
709 p.biasvec_ = &biasvec;
710 p.reluslope_ = &reluslope;
711 p.activ_ = p.reluslope_->empty() ? activ : 0;
713 parallel_for_(Range(0, nstripes), p, nstripes);
716 virtual void operator ()(const Range &r0) const CV_OVERRIDE
718 const int valign = ConvolutionLayerImpl::VEC_ALIGN;
719 int ngroups = ngroups_, batchSize = input_->size[0]*ngroups;
720 bool isConv2D = input_->dims == 4;
722 int outW = output_->size[output_->dims - 1];
723 int outH = output_->size[output_->dims - 2];
724 int outCn = output_->size[1]/ngroups;
726 int depth = !isConv2D? input_->size[2] : 1;
727 int height = input_->size[input_->dims - 2];
728 int width = input_->size[input_->dims - 1];
729 int inpCn = input_->size[1]/ngroups;
731 const int nstripes = nstripes_;
733 int kernel_d = !isConv2D? kernel_size[0] : 1;
734 int kernel_h = kernel_size[kernel_size.size() - 2];
735 int kernel_w = kernel_size.back();
736 int karea = kernel_w*kernel_h*kernel_d;
738 int pad_d = !isConv2D? pads_begin[0] : 0;
739 int pad_t = pads_begin[pads_begin.size() - 2];
740 int pad_l = pads_begin.back();
742 int stride_d = !isConv2D? strides[0] : 0;
743 int stride_h = strides[strides.size() - 2];
744 int stride_w = strides.back();
746 int dilation_d = !isConv2D? dilations[0] : 1;
747 int dilation_h = dilations[dilations.size() - 2];
748 int dilation_w = dilations.back();
751 size_t inpPlaneSize = input_->total(2);
752 size_t outPlaneSize = output_->total(2);
755 int stripesPerSample;
759 if( nstripes >= batchSize*2 )
761 stripesPerSample = nstripes/batchSize;
762 stripeSize = alignSize((outPlaneSize + stripesPerSample - 1)/stripesPerSample, valign);
763 stripeSize = std::min(stripeSize, outPlaneSize);
767 stripesPerSample = 1;
768 int samplesPerStripe = std::max((batchSize + nstripes - 1)/nstripes, 1);
769 r.start *= samplesPerStripe;
770 r.end *= samplesPerStripe;
771 stripeSize = outPlaneSize;
774 const float* data_inp0_ = input_->ptr<float>();
775 const int* ofstab = &ofstab_[0];
776 const float* wptr_orig_ = weights_->ptr<float>();
777 size_t wstep = weights_->step1();
778 const float* biasptr_ = &biasvec_->at(0);
779 const float* reluptr_ = reluslope_->empty() ? 0 : &reluslope_->at(0);
780 float* data_out0_ = output_->ptr<float>();
781 size_t rowbufsz = (size_t)karea*BLK_SIZE_CN*BLK_SIZE;
782 AutoBuffer<float> rowbuf0_(rowbufsz + valign);
783 float* rowbuf0 = alignPtr(rowbuf0_.data(), (int)(valign*sizeof(float)));
785 // we clear the buffer once; ultimately, it lets us to avoid
786 // tail processing after running the unrolled/vectorized loop.
787 // the main idea is to make sure that the tail (a.k.a. padding) of each row
788 // (i.e. the elements with indices between vsz=karea*ncn and vsz_a)
789 // does not contain NaNs or Infs. Because the padding in the weights
790 // matrix is explicitly initialized with 0's, we handle all other
791 // cases nicely, i.e. we can skip expliciting re-initialization
792 // of the padding - we just retain elements from the previous iteration
793 // of the loop over channels (cn0).
794 memset(rowbuf0, 0, rowbufsz*sizeof(rowbuf0[0]) );
796 for( int stripe = r.start; stripe < r.end; stripe++ )
798 int subsampleIdx = stripe/stripesPerSample;
799 if( subsampleIdx >= batchSize )
801 int stripeStart = (int)((stripe - subsampleIdx*stripesPerSample)*stripeSize);
802 int stripeEnd = (int)std::min(stripeStart + stripeSize, outPlaneSize);
803 const float* data_inp0 = data_inp0_ + subsampleIdx*inpPlaneSize*inpCn;
804 float* data_out0 = data_out0_ + subsampleIdx*outPlaneSize*outCn;
805 int startOutCn = (subsampleIdx % ngroups)*outCn;
806 const float* wptr_orig = wptr_orig_ + wstep*startOutCn;
807 const float* biasptr = biasptr_ + startOutCn;
809 for( int cn0 = 0; cn0 < inpCn; cn0 += BLK_SIZE_CN )
811 int cn1 = std::min(cn0 + BLK_SIZE_CN, inpCn);
812 int ncn = cn1 - cn0, vsz = karea*ncn;
813 int vsz_a = (int)alignSize(vsz, valign);
814 const float* wptr = wptr_orig + cn0*karea;
815 // we apply [Channels][P]ReLU (if any) during the final pass only.
816 const float* relu = cn1 == inpCn && reluptr_ ? reluptr_ + startOutCn : 0;
818 for( int ofs0 = stripeStart; ofs0 < stripeEnd; ofs0 += BLK_SIZE )
820 int ofs, ofs1 = std::min(ofs0 + BLK_SIZE, stripeEnd);
822 int out_d = ofs0 / (outH * outW);
823 int out_i = (ofs0 - out_d * outH * outW) / outW;
824 int out_j = ofs0 % outW;
826 // do im2row for a part of input tensor
827 float* rowbuf = rowbuf0;
831 for( ofs = ofs0; ofs < ofs1; out_j = 0, ++out_i )
833 int delta = std::min(ofs1 - ofs, outW - out_j);
834 int out_j1 = out_j + delta;
836 int in_i = out_i * stride_h - pad_t;
837 int in_j = out_j * stride_w - pad_l;
838 const float* imgptr = data_inp0 + (cn0*height + in_i)*width + in_j;
841 // do im2row for a part of input tensor
844 for( ; out_j < out_j1; out_j++, rowbuf += vsz_a, imgptr += stride_w )
846 for( k = 0; k < vsz; k++ )
847 rowbuf[k] = imgptr[k*inpPlaneSize];
852 bool ok_i = 0 <= in_i && in_i < height - (kernel_h-1)*dilation_h;
853 int i0 = std::max(0, (-in_i + dilation_h-1)/dilation_h);
854 int i1 = std::min(kernel_h, (height - in_i + dilation_h-1)/dilation_h);
856 for( ; out_j < out_j1; out_j++, rowbuf += vsz_a, imgptr += stride_w, in_j += stride_w )
858 // this condition should be true for most of the tensor elements, i.e.
859 // most of the time the kernel aperture is inside the tensor X-Y plane.
860 if( ok_i && out_j + 2 <= out_j1 && 0 <= in_j && in_j + stride_w*2 <= width - (kernel_w-1)*dilation_w )
862 for( k = 0; k < vsz; k++ )
865 float v0 = imgptr[k1];
866 float v1 = imgptr[k1 + stride_w];
868 rowbuf[k+vsz_a] = v1;
877 int j0 = std::max(0, (-in_j + dilation_w-1)/dilation_w);
878 int j1 = std::min(kernel_w, (width - in_j + dilation_w-1)/dilation_w);
880 // here some non-continuous sub-row of the row will not be
881 // filled from the tensor; we need to make sure that the uncovered
882 // elements are explicitly set to 0's. the easiest way is to
883 // set all the elements to 0's before the loop.
884 memset(rowbuf, 0, vsz*sizeof(rowbuf[0]));
885 for( k = 0; k < ncn; k++ )
887 for( i = i0; i < i1; i++ )
889 for( j = j0; j < j1; j++ )
891 int imgofs = k*(width*height) + i*(dilation_h*width) + j*dilation_w;
892 rowbuf[(k*kernel_h + i)*kernel_w + j] = imgptr[imgofs];
903 for( ofs = ofs0; ofs < ofs1; out_d += (out_i + 1) / outH, out_i = (out_i + 1) % outH, out_j = 0 )
905 int delta = std::min(ofs1 - ofs, outW - out_j);
906 int out_j1 = out_j + delta;
908 int in_d = out_d * stride_d - pad_d;
909 int in_i = out_i * stride_h - pad_t;
910 int in_j = out_j * stride_w - pad_l;
911 const float* imgptr = data_inp0 + (cn0*depth*height + in_d*height + in_i)*width + in_j;
914 int d0 = std::max(0, (-in_d + dilation_d - 1) / dilation_d);
915 int d1 = std::min(kernel_d, (depth - in_d + dilation_d - 1) / dilation_d);
917 int i0 = std::max(0, (-in_i + dilation_h-1)/dilation_h);
918 int i1 = std::min(kernel_h, (height - in_i + dilation_h-1)/dilation_h);
920 for( ; out_j < out_j1; out_j++, rowbuf += vsz_a, imgptr += stride_w, in_j += stride_w )
922 int j0 = std::max(0, (-in_j + dilation_w-1)/dilation_w);
923 int j1 = std::min(kernel_w, (width - in_j + dilation_w-1)/dilation_w);
925 // here some non-continuous sub-row of the row will not be
926 // filled from the tensor; we need to make sure that the uncovered
927 // elements are explicitly set to 0's. the easiest way is to
928 // set all the elements to 0's before the loop.
929 memset(rowbuf, 0, vsz*sizeof(rowbuf[0]));
930 for( k = 0; k < ncn; k++ )
932 for ( d = d0; d < d1; d++)
934 for( i = i0; i < i1; i++ )
936 for( j = j0; j < j1; j++ )
938 int imgofs = k*(depth*width*height) + d*dilation_d*width*height + i*(dilation_h*width) + j*dilation_w;
939 rowbuf[(k*kernel_d*kernel_h + d*kernel_h + i)*kernel_w + j] = imgptr[imgofs];
948 // now compute dot product of the weights
949 // and im2row-transformed part of the tensor
950 int bsz = ofs1 - ofs0;
951 #if CV_TRY_AVX512_SKX
952 /* AVX512 convolution requires an alignment of 16, and ROI is only there for larger vector sizes */
954 opt_AVX512_SKX::fastConv(wptr, wstep, biasptr, rowbuf0, data_out0 + ofs0,
955 outShape, bsz, vsz, vsz_a, relu, cn0 == 0);
960 opt_AVX2::fastConv(wptr, wstep, biasptr, rowbuf0, data_out0 + ofs0,
961 outShape, bsz, vsz, vsz_a, relu, cn0 == 0);
966 opt_AVX::fastConv(wptr, wstep, biasptr, rowbuf0, data_out0 + ofs0,
967 outShape, bsz, vsz, vsz_a, relu, cn0 == 0);
970 for( int i = 0; i < outCn; i += 2 )
972 const float* wptr0 = wptr + i*wstep;
973 const float* wptr1 = wptr0 + wstep;
974 float* outptr0 = data_out0 + ofs0 + i*outPlaneSize;
975 float* outptr1 = outptr0 + outPlaneSize;
976 float bias0 = biasptr[i], bias1 = biasptr[i+1];
977 float r0 = 1.f, r1 = 1.f;
988 r0 = relu[i]; r1 = relu[i+1];
995 v_float32x4 vr0 = v_setall_f32(r0), vr1 = v_setall_f32(r1), z = v_setzero_f32();
997 for( ; j <= bsz - 4; j += 4 )
999 const float* rptr = rowbuf0 + j*vsz_a;
1004 s0 = v_setall_f32(bias0);
1005 s1 = v_setall_f32(bias1);
1009 s0 = v_load(outptr0 + j);
1010 s1 = v_load(outptr1 + j);
1013 v_float32x4 vs00 = v_setzero_f32(), vs01 = v_setzero_f32(),
1014 vs02 = v_setzero_f32(), vs03 = v_setzero_f32(),
1015 vs10 = v_setzero_f32(), vs11 = v_setzero_f32(),
1016 vs12 = v_setzero_f32(), vs13 = v_setzero_f32();
1017 for( k = 0; k < vsz; k += 4, rptr += 4 )
1019 v_float32x4 w0 = v_load_aligned(wptr0 + k), w1 = v_load_aligned(wptr1 + k);
1020 v_float32x4 r0 = v_load_aligned(rptr), r1 = v_load_aligned(rptr + vsz_a),
1021 r2 = v_load_aligned(rptr + vsz_a*2), r3 = v_load_aligned(rptr + vsz_a*3);
1033 s0 += v_reduce_sum4(vs00, vs01, vs02, vs03);
1034 s1 += v_reduce_sum4(vs10, vs11, vs12, vs13);
1037 s0 = v_select(s0 > z, s0, s0*vr0);
1038 s1 = v_select(s1 > z, s1, s1*vr1);
1041 v_store(outptr0 + j, s0);
1042 v_store(outptr1 + j, s1);
1045 for( ; j < bsz; j++ )
1047 const float* rptr = rowbuf0 + j*vsz_a;
1061 for( k = 0; k < vsz; k++ )
1069 s00 = s00 > 0.f ? s00 : s00*r0;
1070 s10 = s10 > 0.f ? s10 : s10*r1;
1081 activ_->forwardSlice(data_out0 + stripeStart, data_out0 + stripeStart,
1082 (int)(stripeEnd - stripeStart),
1083 outPlaneSize, startOutCn, startOutCn + outCn);
1089 bool forward_ocl(InputArrayOfArrays inps, OutputArrayOfArrays outs, OutputArrayOfArrays internals)
1091 std::vector<UMat> inputs;
1092 std::vector<UMat> outputs;
1094 bool use_half = (inps.depth() == CV_16S);
1095 inps.getUMatVector(inputs);
1096 outs.getUMatVector(outputs);
1098 CV_Assert(outputs.size() == 1);
1099 for (int i = 0; i < inputs.size(); ++i)
1100 CV_Assert(inputs[i].u != outputs[0].u);
1102 if (umat_blobs.empty())
1104 size_t n = blobs.size();
1105 umat_blobs.resize(n);
1106 for (size_t i = 0; i < n; i++)
1108 blobs[i].copyTo(umat_blobs[i]);
1112 if (convolutionOp.empty())
1114 OCL4DNNConvConfig config;
1115 config.in_shape = shape(inputs[0]);
1116 config.out_shape = shape(outputs[0]);
1117 config.kernel = kernel;
1119 config.stride = stride;
1120 config.dilation = dilation;
1121 config.group = inputs[0].size[1] / umat_blobs[0].size[1];
1122 config.bias_term = (hasBias()) ? true : false;
1123 config.use_half = use_half;
1125 convolutionOp = Ptr<OCL4DNNConvSpatial<float> >(new OCL4DNNConvSpatial<float>(config));
1128 int outCn = umat_blobs[0].size[0];
1133 Ptr<ReLULayer> activ_relu = activ.dynamicCast<ReLULayer>();
1134 if( !activ_relu.empty() )
1136 reluslope.assign(outCn+2, activ_relu->negativeSlope);
1137 activType = OCL4DNN_CONV_FUSED_ACTIV_RELU;
1140 Ptr<ReLU6Layer> activ_relu6 = activ.dynamicCast<ReLU6Layer>();
1141 if( !activ_relu6.empty() )
1143 reluslope.resize(2);
1144 reluslope[0] = activ_relu6->minValue;
1145 reluslope[1] = activ_relu6->maxValue;
1146 activType = OCL4DNN_CONV_FUSED_ACTIV_RELU6;
1149 Ptr<ChannelsPReLULayer> activ_chprelu = activ.dynamicCast<ChannelsPReLULayer>();
1150 if( !activ_chprelu.empty() )
1152 const Mat& m = activ_chprelu->blobs[0];
1153 CV_Assert(m.isContinuous() && m.type() == CV_32F && (int)m.total() == outCn);
1154 const float* mdata = m.ptr<float>();
1155 reluslope.resize(outCn+2);
1156 std::copy(mdata, mdata + outCn, reluslope.begin());
1157 reluslope[outCn] = reluslope[outCn+1] = reluslope[outCn-1];
1158 activType = OCL4DNN_CONV_FUSED_ACTIV_PRELU;
1164 weightsMat.copyTo(umat_blobs[0]);
1165 fusedWeights = false;
1169 if ( umat_blobs.size() < 2 )
1170 umat_blobs.resize(2);
1171 umat_blobs[1] = UMat(biasvec, true);
1172 convolutionOp->setBias(true);
1178 if ( activType == OCL4DNN_CONV_FUSED_ACTIV_RELU )
1180 CV_Assert(!reluslope.empty());
1181 convolutionOp->setActivReLU(true, reluslope[0]);
1183 else if ( activType == OCL4DNN_CONV_FUSED_ACTIV_PRELU)
1185 CV_Assert(!reluslope.empty());
1186 convolutionOp->setActivPReLU(true, reluslope);
1188 else if ( activType == OCL4DNN_CONV_FUSED_ACTIV_POWER)
1190 convolutionOp->setActivPower(true, power);
1192 else if ( activType == OCL4DNN_CONV_FUSED_ACTIV_TANH)
1194 convolutionOp->setActivTanh(true);
1196 else if ( activType == OCL4DNN_CONV_FUSED_ACTIV_RELU6)
1198 convolutionOp->setActivReLU6(true, reluslope[0], reluslope[1]);
1202 convolutionOp->setActivReLU(false, 0);
1203 convolutionOp->setActivPReLU(false, reluslope);
1204 convolutionOp->setActivPower(false, 1.f);
1205 convolutionOp->setActivTanh(false);
1206 convolutionOp->setActivReLU6(false, 0, 0);
1211 UMat& inpMat = inputs[0];
1212 UMat& outMat = outputs[0];
1213 int batch_size = inpMat.size[0];
1215 return convolutionOp->Forward(inpMat,
1216 inputs.size() == 2 ? inputs[1] : UMat(),
1218 umat_blobs.size() > 1 ? umat_blobs[1] : UMat(),
1224 void forward(InputArrayOfArrays inputs_arr, OutputArrayOfArrays outputs_arr, OutputArrayOfArrays internals_arr) CV_OVERRIDE
1226 CV_TRACE_FUNCTION();
1227 CV_TRACE_ARG_VALUE(name, "name", name.c_str());
1229 CV_OCL_RUN(IS_DNN_OPENCL_TARGET(preferableTarget),
1230 forward_ocl(inputs_arr, outputs_arr, internals_arr))
1232 if (inputs_arr.depth() == CV_16S)
1234 forward_fallback(inputs_arr, outputs_arr, internals_arr);
1238 std::vector<Mat> inputs, outputs;
1239 inputs_arr.getMatVector(inputs);
1240 outputs_arr.getMatVector(outputs);
1242 /*printf("conv %s: input (%d x %d x %d x %d), kernel (%d x %d), pad (%d x %d), stride (%d x %d), dilation (%d x %d)\n",
1243 name.c_str(), inputs[0].size[0], inputs[0].size[1], inputs[0].size[2], inputs[0].size[3],
1244 kernel.width, kernel.height, pad.width, pad.height,
1245 stride.width, stride.height, dilation.width, dilation.height);*/
1246 CV_Assert_N(inputs.size() == (size_t)1, inputs[0].size[1] % blobs[0].size[1] == 0,
1247 outputs.size() == 1, inputs[0].data != outputs[0].data);
1249 int ngroups = inputs[0].size[1]/blobs[0].size[1];
1250 CV_Assert(outputs[0].size[1] % ngroups == 0);
1251 int outCn = blobs[0].size[0];
1256 Ptr<ReLULayer> activ_relu = activ.dynamicCast<ReLULayer>();
1257 if( !activ_relu.empty() )
1259 reluslope.assign(outCn+2, activ_relu->negativeSlope);
1262 Ptr<ChannelsPReLULayer> activ_chprelu = activ.dynamicCast<ChannelsPReLULayer>();
1263 if( !activ_chprelu.empty() )
1265 const Mat& m = activ_chprelu->blobs[0];
1266 CV_Assert(m.isContinuous() && m.type() == CV_32F && (int)m.total() == outCn);
1267 const float* mdata = m.ptr<float>();
1268 reluslope.resize(outCn+2);
1269 std::copy(mdata, mdata + outCn, reluslope.begin());
1270 reluslope[outCn] = reluslope[outCn+1] = reluslope[outCn-1];
1274 int nstripes = std::max(getNumThreads(), 1);
1276 ParallelConv::run(inputs[0], outputs[0], weightsMat, biasvec, reluslope,
1277 kernel_size, strides, pads_begin, pads_end, dilations, activ.get(), ngroups, nstripes);
1280 virtual int64 getFLOPS(const std::vector<MatShape> &inputs,
1281 const std::vector<MatShape> &outputs) const CV_OVERRIDE
1283 CV_Assert(inputs.size() == outputs.size());
1286 int karea = std::accumulate(kernel_size.begin(), kernel_size.end(), 1, std::multiplies<size_t>());
1287 for (int i = 0; i < inputs.size(); i++)
1289 flops += total(outputs[i])*(CV_BIG_INT(2)*karea*inputs[i][1] + 1);
1296 class DeConvolutionLayerImpl CV_FINAL : public BaseConvolutionLayerImpl
1299 Mat weightsMat, biasesMat;
1303 DeConvolutionLayerImpl(const LayerParams& params) : BaseConvolutionLayerImpl(params) {}
1305 MatShape computeColRowShape(const MatShape &inpShape, const MatShape &outShape) const CV_OVERRIDE
1307 int inpCn = inpShape[1];
1308 int inpH = inpShape[2];
1309 int inpW = inpShape[3];
1310 int outCn = outShape[1];
1311 int ngroups = inpCn / blobs[0].size[0];
1312 int outGroupCn = outCn / ngroups;
1313 int ksize = outGroupCn * kernel.height * kernel.width;
1314 return shape(ksize, inpH * inpW);
1317 virtual bool supportBackend(int backendId) CV_OVERRIDE
1319 #ifdef HAVE_INF_ENGINE
1320 const int outGroupCn = blobs[0].size[1]; // Weights are in IOHW or IODHW layout
1321 const int group = numOutput / outGroupCn;
1323 if (backendId == DNN_BACKEND_INFERENCE_ENGINE)
1325 if (kernel_size.size() == 3 && preferableTarget != DNN_TARGET_CPU) {
1329 if (std::accumulate(adjust_pads.begin(), adjust_pads.end(), 0, std::plus<size_t>()) > 0)
1331 if (padMode.empty())
1333 if (preferableTarget != DNN_TARGET_CPU && group != 1)
1335 for (int i = 0; i < adjust_pads.size(); i++) {
1336 if (adjust_pads[i] && pads_begin[i])
1340 for (int i = 0; i < adjust_pads.size(); i++) {
1341 if (pads_end[i] < adjust_pads[i])
1346 else if (padMode == "SAME")
1348 for (int i = 0; i < adjust_pads.size(); i++) {
1349 if (kernel_size[i] < pads_begin[i] + 1 + adjust_pads[i])
1354 else if (padMode == "VALID")
1360 return preferableTarget == DNN_TARGET_CPU;
1362 if (preferableTarget == DNN_TARGET_OPENCL || preferableTarget == DNN_TARGET_OPENCL_FP16)
1363 return std::accumulate(dilations.begin(), dilations.end(), 1, std::multiplies<size_t>()) == 1;
1367 #endif // HAVE_INF_ENGINE
1368 return kernel_size.size() == 2 && (backendId == DNN_BACKEND_OPENCV || backendId == DNN_BACKEND_HALIDE);
1371 bool getMemoryShapes(const std::vector<MatShape> &inputs,
1372 const int requiredOutputs,
1373 std::vector<MatShape> &outputs,
1374 std::vector<MatShape> &internals) const CV_OVERRIDE
1376 CV_Assert(!hasBias() || blobs[1].total() == (size_t)numOutput);
1377 CV_Assert(inputs.size() != 0);
1379 int outCn = numOutput;
1380 std::vector<int> outShape;
1381 outShape.push_back(inputs[0][0]); // batch
1382 outShape.push_back(outCn);
1383 if (padMode.empty())
1385 for (int i = 0; i < kernel_size.size(); i++)
1386 outShape.push_back(strides[i] * (inputs[0][2 + i] - 1) + kernel_size[i] - pads_begin[i] - pads_end[i] + adjust_pads[i]);
1388 else if (padMode == "VALID")
1390 for (int i = 0; i < kernel_size.size(); i++)
1391 outShape.push_back(strides[i] * (inputs[0][2 + i] - 1) + kernel_size[i] + adjust_pads[i]);
1393 else if (padMode == "SAME")
1395 for (int i = 0; i < kernel_size.size(); i++)
1396 outShape.push_back(strides[i] * (inputs[0][2 + i] - 1) + 1 + adjust_pads[i]);
1399 CV_Error(Error::StsError, "Unsupported padding mode " + padMode);
1401 CV_Assert(outCn % blobs[0].size[1] == 0);
1402 int ngroups = outCn / blobs[0].size[1];
1404 int inpCn = inputs[0][1];
1405 CV_Assert(inpCn % ngroups == 0 && outCn % ngroups == 0);
1406 CV_Assert(blobs[0].size[0] == inpCn);
1408 outputs.resize(1, outShape);
1411 internals.push_back(computeColRowShape(inputs[0], outputs[0]));
1416 void finalize(InputArrayOfArrays inputs_arr, OutputArrayOfArrays outputs_arr) CV_OVERRIDE
1418 BaseConvolutionLayerImpl::finalize(inputs_arr, outputs_arr);
1420 std::vector<Mat> inputs, outputs;
1421 inputs_arr.getMatVector(inputs);
1422 outputs_arr.getMatVector(outputs);
1424 std::vector<int> inpShape;
1425 std::vector<int> outShape;
1426 for (int i = 2; i < inputs[0].dims; i++) {
1427 inpShape.push_back(inputs[0].size[i]);
1428 outShape.push_back(outputs[0].size[i]);
1430 getConvPoolPaddings(outShape, kernel_size, strides, padMode, pads_begin, pads_end);
1431 if (pads_begin.size() == 2) {
1432 for (int i = 0; i < pads_begin.size(); i++) {
1433 if (pads_begin[i] != pads_end[i])
1434 CV_Error(Error::StsNotImplemented, "Unsupported asymmetric padding in deconvolution layer");
1436 pad = Size(pads_begin[1], pads_begin[0]);
1439 weightsMultipliers.assign(numOutput, 1.0);
1440 if (weightsMat.empty())
1442 transpose(blobs[0].reshape(1, blobs[0].size[0]), weightsMat);
1443 biasesMat = hasBias() ? blobs[1].reshape(1, numOutput)
1444 : Mat::zeros(numOutput, 1, CV_32F);
1448 void fuseWeights(const Mat& w_, const Mat& b_) CV_OVERRIDE
1450 Mat w = w_.total() == 1 ? Mat(1, numOutput, CV_32F, Scalar(w_.at<float>(0))) : w_;
1451 Mat b = b_.total() == 1 ? Mat(1, numOutput, CV_32F, Scalar(b_.at<float>(0))) : b_;
1453 CV_Assert_N(!weightsMat.empty(),
1454 w.empty() || numOutput == w.total(),
1455 b.empty() || numOutput == b.total());
1459 transpose(blobs[0].reshape(1, blobs[0].size[0]), weightsMat);
1460 weightsMat = weightsMat.reshape(1, numOutput);
1461 for (int i = 0; i < numOutput; ++i)
1463 double wi = w.at<float>(i);
1464 weightsMultipliers[i] *= wi;
1465 cv::multiply(weightsMat.row(i), weightsMultipliers[i], weightsMat.row(i));
1466 biasesMat.at<float>(i) *= wi;
1468 weightsMat = weightsMat.reshape(1, weightsMat.total() / blobs[0].size[0]);
1473 cv::add(biasesMat, b.reshape(1, numOutput), biasesMat);
1477 class MatMulInvoker : public ParallelLoopBody
1480 MatMulInvoker(const Mat& a, const Mat& b, Mat& c, int nstripes)
1485 nstripes_ = nstripes;
1486 useAVX = checkHardwareSupport(CPU_AVX);
1487 useAVX2 = checkHardwareSupport(CPU_AVX2);
1488 useAVX512 = CV_CPU_HAS_SUPPORT_AVX512_SKX;
1491 void operator()(const Range& range_) const CV_OVERRIDE
1493 int stripeSize = (int)alignSize((b_->cols + nstripes_ - 1)/nstripes_, 16);
1494 Range range(range_.start*stripeSize, std::min(range_.end*stripeSize, b_->cols));
1495 int mmax = a_->rows;
1496 int nmax = range.end - range.start;
1497 int kmax = a_->cols;
1499 const float* aptr = a_->ptr<float>();
1500 const float* bptr = b_->ptr<float>() + range.start;
1501 float* cptr = c_->ptr<float>() + range.start;
1502 size_t astep = a_->step1();
1503 size_t bstep = b_->step1();
1504 size_t cstep = c_->step1();
1506 #if CV_TRY_AVX512_SKX
1508 opt_AVX512_SKX::fastGEMM( aptr, astep, bptr, bstep, cptr, cstep, mmax, kmax, nmax );
1513 opt_AVX2::fastGEMM( aptr, astep, bptr, bstep, cptr, cstep, mmax, kmax, nmax );
1518 opt_AVX::fastGEMM( aptr, astep, bptr, bstep, cptr, cstep, mmax, kmax, nmax );
1521 for( m = 0; m < mmax; m += 2 )
1523 float* dst0 = cptr + cstep*m;
1524 float* dst1 = cptr + cstep*std::min(m+1, mmax-1);
1525 const float* aptr0 = aptr + astep*m;
1526 const float* aptr1 = aptr + astep*std::min(m+1, mmax-1);
1528 for( n = 0; n < nmax; n++ )
1534 for( k = 0; k < kmax; k += 4 )
1536 float alpha00 = aptr0[k];
1537 float alpha01 = aptr1[k];
1538 float alpha10 = 0.f, alpha11 = 0.f;
1539 float alpha20 = 0.f, alpha21 = 0.f;
1540 float alpha30 = 0.f, alpha31 = 0.f;
1541 const float* bptr0 = bptr + k*bstep;
1542 const float* bptr1 = bptr0;
1543 const float* bptr2 = bptr0;
1544 const float* bptr3 = bptr0;
1548 alpha10 = aptr0[k+1];
1549 alpha11 = aptr1[k+1];
1550 bptr1 = bptr0 + bstep;
1553 alpha20 = aptr0[k+2];
1554 alpha21 = aptr1[k+2];
1555 bptr2 = bptr1 + bstep;
1558 alpha30 = aptr0[k+3];
1559 alpha31 = aptr1[k+3];
1560 bptr3 = bptr2 + bstep;
1567 v_float32x4 a00 = v_setall_f32(alpha00);
1568 v_float32x4 a01 = v_setall_f32(alpha01);
1569 v_float32x4 a10 = v_setall_f32(alpha10);
1570 v_float32x4 a11 = v_setall_f32(alpha11);
1571 v_float32x4 a20 = v_setall_f32(alpha20);
1572 v_float32x4 a21 = v_setall_f32(alpha21);
1573 v_float32x4 a30 = v_setall_f32(alpha30);
1574 v_float32x4 a31 = v_setall_f32(alpha31);
1576 for( ; n <= nmax - 4; n += 4 )
1578 v_float32x4 b0 = v_load(bptr0 + n);
1579 v_float32x4 b1 = v_load(bptr1 + n);
1580 v_float32x4 b2 = v_load(bptr2 + n);
1581 v_float32x4 b3 = v_load(bptr3 + n);
1582 v_float32x4 d0 = v_load(dst0 + n);
1583 v_float32x4 d1 = v_load(dst1 + n);
1592 v_store(dst0 + n, d0);
1593 v_store(dst1 + n, d1);
1597 for( ; n < nmax; n++ )
1599 float b0 = bptr0[n], b1 = bptr1[n];
1600 float b2 = bptr2[n], b3 = bptr3[n];
1601 float d0 = dst0[n] + alpha00*b0 + alpha10*b1 + alpha20*b2 + alpha30*b3;
1602 float d1 = dst1[n] + alpha01*b0 + alpha11*b1 + alpha21*b2 + alpha31*b3;
1618 class Col2ImInvoker : public cv::ParallelLoopBody
1621 const float* data_col;
1622 const float* biasvec;
1623 int channels, height, width;
1624 int kernel_h, kernel_w;
1626 int stride_h, stride_w;
1628 int height_col, width_col;
1633 : data_col(0), biasvec(0), channels(0), height(0), width(0),
1634 kernel_h(0), kernel_w(0), pad_h(0), pad_w(0), stride_h(0), stride_w(0), data_im(0),
1635 height_col(0), width_col(0), nstripes(0), is1x1(0)
1638 static void run(const float* data_col,
1639 int channels, int height, int width,
1640 int kernel_h, int kernel_w,
1641 int pad_h, int pad_w,
1642 int stride_h, int stride_w,
1643 int height_col, int width_col,
1645 const float* biasvec,
1648 const int nstripes = getNumThreads();
1651 t.data_col = data_col;
1652 t.data_im = data_im;
1653 t.channels = channels; t.height = height; t.width = width;
1654 t.kernel_h = kernel_h; t.kernel_w = kernel_w;
1655 t.pad_h = pad_h; t.pad_w = pad_w;
1656 t.stride_h = stride_h; t.stride_w = stride_w;
1657 t.height_col = height_col;
1658 t.width_col = width_col;
1659 t.nstripes = nstripes;
1661 t.biasvec = biasvec;
1663 parallel_for_(Range(0, nstripes), t, nstripes);
1666 virtual void operator ()(const Range &r) const CV_OVERRIDE
1668 const float* data_col_ = data_col;
1669 float* data_im_ = data_im;
1670 int coeff_h = (1 - stride_h * kernel_w * height_col) * width_col;
1671 int coeff_w = (1 - stride_w * height_col * width_col);
1672 size_t total = (size_t)channels * height * width;
1673 size_t stripeSize = (total + nstripes - 1)/nstripes;
1674 size_t startIndex = r.start*stripeSize;
1675 size_t endIndex = std::min(r.end*stripeSize, total);
1676 int w = (int)(startIndex % width + pad_w);
1677 int h = (int)((startIndex / width) % height + pad_h);
1678 int c = (int)(startIndex / (width * height));
1679 int h_col_start = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1;
1680 int h_col_end = std::min(h / stride_h + 1, height_col);
1681 int plane_size_col = height_col * width_col;
1682 int offset = (c * kernel_h * kernel_w + h * kernel_w + w) * plane_size_col;
1683 bool is1x1_ = is1x1;
1684 const float* biasvec_ = biasvec;
1686 for (size_t index = startIndex; index < endIndex; index++)
1688 // compute the start and end of the output
1689 int w_col_start = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1;
1690 int w_col_end = std::min(w / stride_w + 1, width_col);
1694 val = data_im_[index];
1698 for (int h_col = h_col_start; h_col < h_col_end; ++h_col) {
1699 for (int w_col = w_col_start; w_col < w_col_end; ++w_col) {
1700 val += data_col_[offset + h_col * coeff_h + w_col * coeff_w];
1704 data_im_[index] = val + biasvec_[c];
1706 offset += plane_size_col;
1707 if( ++w >= width + pad_w )
1709 w = (int)((index + 1)% width + pad_w);
1710 h = (int)(((index + 1) / width) % height + pad_h);
1711 c = (int)((index + 1) / (width * height));
1712 h_col_start = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1;
1713 h_col_end = std::min(h / stride_h + 1, height_col);
1714 offset = (c * kernel_h * kernel_w + h * kernel_w + w) * plane_size_col;
1721 bool forward_ocl(InputArrayOfArrays inputs_, OutputArrayOfArrays outputs_, OutputArrayOfArrays internals_)
1723 std::vector<UMat> inputs;
1724 std::vector<UMat> outputs;
1725 std::vector<UMat> internals;
1727 if (inputs_.depth() == CV_16S)
1730 inputs_.getUMatVector(inputs);
1731 outputs_.getUMatVector(outputs);
1732 internals_.getUMatVector(internals);
1734 int outCn = numOutput;
1735 int inpCn = inputs[0].size[1];
1740 if (umat_weights.empty())
1743 weightsMat.copyTo(umat_weights);
1745 transpose(blobs[0].reshape(1, inpCn), umat_weights);
1748 biasesMat.copyTo(umat_biases);
1752 blobs[1].reshape(1, outCn).copyTo(umat_biases);
1754 umat_biases = UMat::zeros(outCn, 1, CV_32F);
1758 String buildopt = format("-DT=%s ", ocl::typeToStr(inputs[0].type()));
1759 buildopt += format("-DPAD_H=%d -DPAD_W=%d -DKERNEL_H=%d -DKERNEL_W=%d -DSTRIDE_H=%d -DSTRIDE_W=%d ",
1760 pad.height, pad.width, kernel.height, kernel.width, stride.height, stride.width);
1762 for (size_t ii = 0; ii < outputs.size(); ii++)
1764 int ngroups = outCn / blobs[0].size[1];
1765 int inpGroupCn = inpCn / ngroups;
1766 int outGroupCn = blobs[0].size[1];
1767 const UMat& inp = inputs[ii];
1768 UMat& out = outputs[ii];
1769 int numImg = inp.size[0];
1770 int inpH = inp.size[2], inpW = inp.size[3];
1771 int outH = out.size[2], outW = out.size[3];
1773 MatShape inpshape = shape(numImg*inpCn, inpH*inpW);
1774 MatShape outshape = shape(numImg*outCn, outH*outW);
1775 UMat convBlob = inputs[ii].reshape(1, inpshape.size(), &inpshape[0]);
1776 UMat decnBlob = out.reshape(1, outshape.size(), &outshape[0]);
1777 int rows = internals[0].rows / ngroups;
1779 for (int n = 0; n < numImg; n++)
1781 for (int g = 0; g < ngroups; g++)
1783 UMat colMat = internals[0].rowRange(_Range(g * rows, rows));
1784 UMat convMat = convBlob.rowRange(_Range((g + n * ngroups) * inpGroupCn, inpGroupCn));
1785 UMat wghtMat = umat_weights.colRange(_Range(g * inpGroupCn, inpGroupCn));
1786 gemm(wghtMat, convMat, 1, noArray(), 0, colMat, 0);
1789 for (int g = 0; g < ngroups; g++)
1791 int total = outGroupCn * decnBlob.cols;
1793 int height_col = inpH;
1794 int width_col = inpW;
1795 int coeff_h = (1 - stride.height * kernel.width * height_col) * width_col;
1796 int coeff_w = (1 - stride.width * height_col * width_col);
1798 ocl::Kernel k("col2im", ocl::dnn::col2im_oclsrc, buildopt);
1799 k.set(index++, total);
1800 k.set(index++, ocl::KernelArg::PtrReadOnly(internals[0]));
1801 k.set(index++, (int)(g * rows * internals[0].cols));
1802 k.set(index++, outGroupCn);
1803 k.set(index++, outH);
1804 k.set(index++, outW);
1805 k.set(index++, height_col);
1806 k.set(index++, width_col);
1807 k.set(index++, coeff_h);
1808 k.set(index++, coeff_w);
1809 k.set(index++, ocl::KernelArg::PtrReadOnly(umat_biases));
1810 k.set(index++, (int)(g * outGroupCn * umat_biases.cols));
1811 k.set(index++, ocl::KernelArg::PtrWriteOnly(decnBlob));
1812 k.set(index++, (int)((g + n * ngroups) * outGroupCn * decnBlob.cols));
1814 size_t global[] = { (size_t)total };
1815 bool ret = k.run(1, global, NULL, false);
1826 void forward(InputArrayOfArrays inputs_arr, OutputArrayOfArrays outputs_arr, OutputArrayOfArrays internals_arr) CV_OVERRIDE
1828 CV_TRACE_FUNCTION();
1829 CV_TRACE_ARG_VALUE(name, "name", name.c_str());
1831 CV_OCL_RUN(IS_DNN_OPENCL_TARGET(preferableTarget),
1832 forward_ocl(inputs_arr, outputs_arr, internals_arr));
1834 if (inputs_arr.depth() == CV_16S)
1836 forward_fallback(inputs_arr, outputs_arr, internals_arr);
1840 std::vector<Mat> inputs, outputs, internals;
1841 inputs_arr.getMatVector(inputs);
1842 outputs_arr.getMatVector(outputs);
1843 internals_arr.getMatVector(internals);
1845 int outCn = numOutput;
1846 int inpCn = inputs[0].size[1];
1847 bool is1x1flag = is1x1();
1848 int nstripes = getNumThreads();
1850 if( weightsMat.empty() )
1852 transpose(blobs[0].reshape(1, inpCn), weightsMat);
1853 biasesMat = hasBias() ? blobs[1].reshape(1, outCn) : Mat::zeros(outCn, 1, CV_32F);
1856 for (size_t ii = 0; ii < outputs.size(); ii++)
1858 int ngroups = outCn / blobs[0].size[1];
1859 int inpGroupCn = inpCn / ngroups;
1860 int outGroupCn = blobs[0].size[1];
1861 const Mat& inp = inputs[ii];
1862 Mat& out = outputs[ii];
1863 int numImg = inp.size[0];
1864 int inpH = inp.size[2], inpW = inp.size[3];
1865 int outH = out.size[2], outW = out.size[3];
1867 Mat convBlob = inputs[ii].reshape(1, numImg*inpCn);
1868 Mat decnBlob = out.reshape(1, numImg*outCn);
1870 for (int n = 0; n < numImg; n++)
1872 for (int g = 0; g < ngroups; g++)
1874 Mat dstMat = decnBlob.rowRange(_Range((g + n * ngroups) * outGroupCn, outGroupCn));
1875 Mat &colMat = is1x1flag ? dstMat : internals[0];
1877 Mat convMat = convBlob.rowRange(_Range((g + n * ngroups) * inpGroupCn, inpGroupCn));
1878 Mat wghtMat = weightsMat.colRange(_Range(g * inpGroupCn, inpGroupCn));
1879 Mat curBiasMat = biasesMat.rowRange(_Range(g * outGroupCn, outGroupCn));
1881 //gemm(wghtMat, convMat, 1, colMat, 0, colMat, 0);
1882 MatMulInvoker mminvoker(wghtMat, convMat, colMat, nstripes);
1883 parallel_for_(Range(0, nstripes), mminvoker, nstripes);
1885 Col2ImInvoker::run(colMat.ptr<float>(), outGroupCn, outH, outW,
1886 kernel.height, kernel.width, pad.height, pad.width,
1887 stride.height, stride.width, inpH, inpW, dstMat.ptr<float>(),
1888 curBiasMat.ptr<float>(), is1x1flag);
1894 virtual Ptr<BackendNode> initHalide(const std::vector<Ptr<BackendWrapper> > &inputs) CV_OVERRIDE
1897 Halide::Buffer<float> inputBuffer = halideBuffer(inputs[0]);
1899 int inW, inH, inC, inN;
1900 getCanonicalSize(inputBuffer, &inW, &inH, &inC, &inN);
1901 const int outGroupCn = blobs[0].size[1];
1902 const int group = numOutput / outGroupCn;
1903 const int inpGroupCn = blobs[0].size[0] / group;
1905 Halide::Var x("x"), y("y"), c("c"), n("n");
1906 Halide::Func top = (name.empty() ? Halide::Func() : Halide::Func(name));
1907 Halide::Func padded_input(name + "_constant_exterior");
1908 auto weights = wrapToHalideBuffer(blobs[0]);
1910 Halide::Func dilated_input("dilated_input");
1911 dilated_input(x, y, c, n) = 0.0f;
1912 Halide::RDom r1(0, inW, 0, inH);
1913 dilated_input(r1.x * stride.width, r1.y * stride.height, c, n) =
1914 inputBuffer(r1.x, r1.y, c, n);
1915 dilated_input.compute_root();
1917 Halide::Func bounded =
1918 Halide::BoundaryConditions::constant_exterior(dilated_input, 0,
1919 0, (inW - 1) * stride.width + 1,
1920 0, (inH - 1) * stride.height + 1,
1922 padded_input(x, y, c, n) = bounded(x, y, c, n);
1924 Halide::RDom r(0, kernel.width, 0, kernel.height, 0, inpGroupCn);
1925 Halide::Expr kx = x + pad.width - r.x;
1926 Halide::Expr ky = y + pad.height - r.y;
1927 Halide::Expr kInC = r.z;
1928 Halide::Expr kOutC = c;
1929 for (int i = 1; i < group; ++i)
1931 kInC = select(c < outGroupCn * i, kInC, inpGroupCn * i + r.z);
1932 kOutC = select(c < outGroupCn * i, kOutC, c - outGroupCn * i);
1934 Halide::Expr topExpr = sum(padded_input(kx, ky, kInC, n) *
1935 weights(r.x, r.y, kOutC, kInC));
1938 auto bias = wrapToHalideBuffer(blobs[1], {numOutput});
1941 top(x, y, c, n) = topExpr;
1942 return Ptr<BackendNode>(new HalideBackendNode({ padded_input, top }));
1943 #endif // HAVE_HALIDE
1944 return Ptr<BackendNode>();
1947 #ifdef HAVE_INF_ENGINE
1948 virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> > &) CV_OVERRIDE
1950 InferenceEngine::Layout layout = blobs[0].dims == 5? InferenceEngine::Layout::NCDHW :
1951 InferenceEngine::Layout::OIHW;
1953 auto ieWeights = wrapToInfEngineBlob(blobs[0], layout);
1956 ieWeights = InferenceEngine::make_shared_blob<float>({
1957 InferenceEngine::Precision::FP32,
1958 ieWeights->getTensorDesc().getDims(), layout
1960 ieWeights->allocate();
1962 int inpCn = blobs[0].size[0];
1963 Mat newWeights = infEngineBlobToMat(ieWeights).reshape(1, inpCn);
1964 transpose(weightsMat, newWeights);
1967 const int outGroupCn = blobs[0].size[1]; // Weights are in IOHW or OIDHW layout
1968 const int group = numOutput / outGroupCn;
1970 InferenceEngine::Builder::DeconvolutionLayer ieLayer(name);
1972 ieLayer.setKernel(kernel_size);
1973 ieLayer.setStrides(strides);
1974 ieLayer.setDilation(dilations);
1975 ieLayer.setPaddingsBegin(pads_begin);
1977 if (padMode.empty())
1979 std::vector<size_t> paddings_end;
1980 for (int i = 0; i < pads_end.size(); i++) {
1981 paddings_end.push_back(pads_end[i] - adjust_pads[i]);
1983 ieLayer.setPaddingsEnd(paddings_end);
1985 else if (padMode == "SAME")
1987 std::vector<size_t> paddings_end;
1988 for (int i = 0; i < pads_begin.size(); i++) {
1989 paddings_end.push_back(kernel_size[i] - pads_begin[i] - 1 - adjust_pads[i]);
1991 ieLayer.setPaddingsEnd(paddings_end);
1993 ieLayer.setGroup((size_t)group);
1994 ieLayer.setOutDepth((size_t)numOutput);
1996 InferenceEngine::Builder::Layer l = ieLayer;
1997 addConstantData("weights", ieWeights, l);
1999 addConstantData("biases", wrapToInfEngineBlob(biasesMat, {(size_t)numOutput}, InferenceEngine::Layout::C), l);
2000 return Ptr<BackendNode>(new InfEngineBackendNode(l));
2002 #endif // HAVE_INF_ENGINE
2004 virtual int64 getFLOPS(const std::vector<MatShape> &inputs,
2005 const std::vector<MatShape> &outputs) const CV_OVERRIDE
2007 CV_Assert(inputs.size() == outputs.size());
2010 int outChannels = blobs[0].size[0];
2011 size_t karea = std::accumulate(kernel_size.begin(), kernel_size.end(),
2012 1, std::multiplies<size_t>());
2014 for (int i = 0; i < inputs.size(); i++)
2016 flops += CV_BIG_INT(2)*outChannels*karea*total(inputs[i]);
2023 Ptr<BaseConvolutionLayer> ConvolutionLayer::create(const LayerParams ¶ms)
2025 Ptr<ConvolutionLayerImpl> l(new ConvolutionLayerImpl(params));
2029 Ptr<BaseConvolutionLayer> DeconvolutionLayer::create(const LayerParams ¶ms)
2031 return Ptr<BaseConvolutionLayer>(new DeConvolutionLayerImpl(params));