1 /*M///////////////////////////////////////////////////////////////////////////////////////
3 // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
5 // By downloading, copying, installing or using the software you agree to this license.
6 // If you do not agree to this license, do not download, install,
7 // copy or use the software.
11 // For Open Source Computer Vision Library
13 // Copyright (C) 2013, OpenCV Foundation, all rights reserved.
14 // Copyright (C) 2017, Intel Corporation, all rights reserved.
15 // Third party copyrights are property of their respective owners.
17 // Redistribution and use in source and binary forms, with or without modification,
18 // are permitted provided that the following conditions are met:
20 // * Redistribution's of source code must retain the above copyright notice,
21 // this list of conditions and the following disclaimer.
23 // * Redistribution's in binary form must reproduce the above copyright notice,
24 // this list of conditions and the following disclaimer in the documentation
25 // and/or other materials provided with the distribution.
27 // * The name of the copyright holders may not be used to endorse or promote products
28 // derived from this software without specific prior written permission.
30 // This software is provided by the copyright holders and contributors "as is" and
31 // any express or implied warranties, including, but not limited to, the implied
32 // warranties of merchantability and fitness for a particular purpose are disclaimed.
33 // In no event shall the Intel Corporation or contributors be liable for any direct,
34 // indirect, incidental, special, exemplary, or consequential damages
35 // (including, but not limited to, procurement of substitute goods or services;
36 // loss of use, data, or profits; or business interruption) however caused
37 // and on any theory of liability, whether in contract, strict liability,
38 // or tort (including negligence or otherwise) arising in any way out of
39 // the use of this software, even if advised of the possibility of such damage.
43 #include "../precomp.hpp"
44 #include "layers_common.hpp"
45 #include "../op_halide.hpp"
46 #include "../op_inf_engine.hpp"
47 #include "opencv2/core/hal/hal.hpp"
48 #include "opencv2/core/hal/intrin.hpp"
53 #include "opencl_kernels_dnn.hpp"
54 using namespace cv::dnn::ocl4dnn;
62 class BaseConvolutionLayerImpl : public ConvolutionLayer
65 bool fusedWeights, fusedBias;
66 std::vector<double> weightsMultipliers;
67 BaseConvolutionLayerImpl(const LayerParams ¶ms)
69 setParamsFrom(params);
70 getConvolutionKernelParams(params, kernel_size, pads_begin, pads_end, strides, dilations, padMode, adjust_pads);
72 numOutput = params.get<int>("num_output");
73 int ngroups = params.get<int>("group", 1);
74 CV_Assert(numOutput % ngroups == 0);
76 if (kernel_size.size() == 2) {
77 kernel = Size(kernel_size[1], kernel_size[0]);
78 stride = Size(strides[1], strides[0]);
79 for (int i = 0; i < pads_begin.size(); i++) {
80 if (pads_begin[i] != pads_end[i])
81 CV_Error(Error::StsNotImplemented, "Unsupported asymmetric padding in convolution layer");
83 pad = Size(pads_begin[1], pads_begin[0]);
84 dilation = Size(dilations[1], dilations[0]);
86 adjustPad.height = adjust_pads[0];
87 adjustPad.width = adjust_pads[1];
90 for (int i = 0; i < adjust_pads.size(); i++) {
91 CV_Assert(adjust_pads[i] < strides[i]);
98 virtual void finalize(InputArrayOfArrays inputs_arr, OutputArrayOfArrays outputs_arr) CV_OVERRIDE
100 std::vector<Mat> inputs, outputs;
101 inputs_arr.getMatVector(inputs);
102 outputs_arr.getMatVector(outputs);
104 CV_Assert(inputs.size() > 0);
106 CV_Assert(blobs.size() == 1 || blobs.size() == 2);
107 CV_Assert(inputs[0].dims == outputs[0].dims);
108 CV_Assert(blobs[0].dims == kernel_size.size() + 2);
109 for (int i = 0; i < kernel_size.size(); i++) {
110 CV_Assert(blobs[0].size[i + 2] == kernel_size[i]);
113 const Mat &input = inputs[0];
114 CV_Assert((input.dims == 4 || input.dims == 5) && (input.type() == CV_32F || input.type() == CV_16S));
115 for (size_t i = 0; i < inputs.size(); i++)
117 CV_Assert(inputs[i].type() == input.type());
118 CV_Assert((inputs[i].dims == 4 || inputs[i].dims == 5) && inputs[i].size[1] == input.size[1]);
119 for (int j = 0; j < inputs[i].dims; j++) {
120 CV_Assert(inputs[i].size[j] == input.size[j]);
124 std::vector<int> inpShape;
125 std::vector<int> outShape;
126 for (int i = 2; i < inputs[0].dims; i++) {
127 inpShape.push_back(inputs[0].size[i]);
128 outShape.push_back(outputs[0].size[i]);
130 getConvPoolPaddings(inpShape, kernel_size, strides, padMode, pads_begin, pads_end);
131 if (pads_begin.size() == 2) {
132 for (int i = 0; i < pads_begin.size(); i++) {
133 if (pads_begin[i] != pads_end[i])
134 CV_Error(Error::StsNotImplemented, "Unsupported asymmetric padding in convolution layer");
136 pad = Size(pads_begin[1], pads_begin[0]);
138 fusedWeights = false;
144 return blobs.size() >= 2;
147 virtual MatShape computeColRowShape(const MatShape &inpShape, const MatShape &outShape) const = 0;
150 return (kernel.height == 1 && kernel.width == 1) &&
151 (stride.height == 1 && stride.width == 1) &&
152 (dilation.height == 1 && dilation.width == 1);
155 virtual bool tryFuse(Ptr<Layer>& top) CV_OVERRIDE
158 top->getScaleShift(w, b);
159 if (!w.empty() || !b.empty())
162 fusedWeights = fusedWeights || !w.empty();
163 fusedBias = fusedBias || (hasBias() && !w.empty()) || !b.empty();
169 virtual void fuseWeights(const Mat& w_, const Mat& b_) = 0;
171 virtual void applyHalideScheduler(Ptr<BackendNode>& node,
172 const std::vector<Mat*> &inputs,
173 const std::vector<Mat> &outputs,
174 int targetId) const CV_OVERRIDE
177 if (targetId != DNN_TARGET_CPU)
179 Layer::applyHalideScheduler(node, inputs, outputs, targetId);
182 Halide::Var x("x"), y("y"), c("c"), n("n"), tile("tile"), yi("yi"), yo("yo"), co("co"), ci("ci");
183 Halide::Func& top = node.dynamicCast<HalideBackendNode>()->funcs[1];
184 Halide::Func& padded_input = node.dynamicCast<HalideBackendNode>()->funcs[0];
186 int outW, outH, outC, outN;
187 getCanonicalSize(outputs[0].size, &outW, &outH, &outC, &outN);
189 if (outW == 1 || outH <= 2)
192 if (is1x1() || outC <= 16)
198 .vectorize(x, outW >= 16 ? 16 : outW);
202 .split(c, co, ci, 16)
203 .fuse(yo, co, tile).fuse(n, tile, tile)
206 .vectorize(x, outW >= 16 ? 16 : outW);
207 padded_input.compute_at(top, yi);
208 #endif // HAVE_HALIDE
213 #define IS_POWER_LAYER(layer) \
214 (!layer.empty() && !layer->type.compare("Power"))
215 //TODO: simultaneously convolution and bias addition for cache optimization
216 class ConvolutionLayerImpl CV_FINAL : public BaseConvolutionLayerImpl
219 enum { VEC_ALIGN = 8, DFT_TYPE = CV_32F };
221 std::vector<float> biasvec;
222 std::vector<float> reluslope;
223 Ptr<ActivationLayer> activ;
226 Ptr<OCL4DNNConvSpatial<float> > convolutionOp;
227 std::vector<UMat> umat_blobs;
229 ocl4dnnFusedActiv_t activType;
232 ConvolutionLayerImpl(const LayerParams ¶ms) : BaseConvolutionLayerImpl(params)
236 activType = OCL4DNN_CONV_FUSED_ACTIV_NONE;
241 MatShape computeColRowShape(const MatShape &inpShape, const MatShape &outShape) const CV_OVERRIDE
243 Size out(outShape[3], outShape[2]);
244 int inpGroupCn = blobs[0].size[1];
245 int ksize = inpGroupCn * kernel.height * kernel.width;
246 return shape(out.area(), ksize);
249 virtual bool supportBackend(int backendId) CV_OVERRIDE
251 #ifdef HAVE_INF_ENGINE
252 if (backendId == DNN_BACKEND_INFERENCE_ENGINE)
254 if (kernel_size.size() == 3)
255 return preferableTarget == DNN_TARGET_CPU;
256 return (preferableTarget != DNN_TARGET_MYRIAD || dilation.width == dilation.height);
260 return (kernel_size.size() == 3 && preferableTarget == DNN_TARGET_CPU && backendId == DNN_BACKEND_OPENCV) ||
261 (kernel_size.size() == 2 && (backendId == DNN_BACKEND_OPENCV || backendId == DNN_BACKEND_HALIDE));
264 bool getMemoryShapes(const std::vector<MatShape> &inputs,
265 const int requiredOutputs,
266 std::vector<MatShape> &outputs,
267 std::vector<MatShape> &internals) const CV_OVERRIDE
269 CV_Assert(blobs.size() != 0);
270 CV_Assert(!hasBias() || blobs[1].total() == (size_t)blobs[0].size[0]);
271 CV_Assert(inputs.size() == (size_t)1);
275 CV_Assert(inputs.size() != 0);
276 std::vector<int> inpShape(inputs[0].begin() + 2, inputs[0].end());
278 int outCn = blobs[0].size[0];
279 std::vector<int> outShape;
280 outShape.push_back(inputs[0][0]);
281 outShape.push_back(outCn);
283 int inpCn = inputs[0][1];
286 for (int i = 0; i < inpShape.size(); i++)
287 outShape.push_back((inpShape[i] + pads_begin[i] + pads_end[i] - dilations[i] * (kernel_size[i] - 1) - 1) / strides[i] + 1);
291 getConvPoolOutParams(inpShape, kernel_size, strides, padMode, dilations, outShape);
294 int ngroups = inpCn / blobs[0].size[1];
295 if (ngroups == 0 || ngroups * blobs[0].size[1] != inpCn)
296 CV_Error(Error::StsError, format("Number of input channels should "
297 "be multiple of %d but got %d", blobs[0].size[1], inpCn));
298 CV_Assert(ngroups > 0 && inpCn % ngroups == 0 && outCn % ngroups == 0);
300 outputs.resize(1, outShape);
305 virtual void finalize(InputArrayOfArrays inputs_arr, OutputArrayOfArrays outputs_arr) CV_OVERRIDE
307 BaseConvolutionLayerImpl::finalize(inputs_arr, outputs_arr);
309 CV_Assert(!blobs.empty());
310 const int outCn = blobs[0].size[0];
311 // prepare weightsMat where each row is aligned and has enough zero padding on the right to
312 // use vectorized (i.e. with intrinsics) loops without tail processing
313 Mat wm = blobs[0].reshape(1, outCn);
314 if( wm.step1() % VEC_ALIGN != 0 )
316 int newcols = (int)alignSize(wm.step1(), VEC_ALIGN);
317 Mat wm_buffer = Mat(outCn, newcols, wm.type());
318 Mat wm_padding = wm_buffer.colRange(wm.cols, newcols);
319 wm_padding.setTo(Scalar::all(0.));
320 Mat wm_aligned = wm_buffer.colRange(0, wm.cols);
321 wm.copyTo(wm_aligned);
325 weightsMultipliers.assign(outCn, 1.0);
327 Mat biasMat = hasBias() ? blobs[1].reshape(1, outCn) : Mat();
328 biasvec.resize(outCn+2);
329 if( biasMat.empty() )
331 for(int i = 0; i < outCn; i++ )
336 for(int i = 0; i < outCn; i++ )
337 biasvec[i] = biasMat.at<float>(i);
340 convolutionOp.release();
344 bool setActivation(const Ptr<ActivationLayer>& layer) CV_OVERRIDE
346 if (!activ.empty() && !layer.empty())
354 activType = OCL4DNN_CONV_FUSED_ACTIV_NONE;
356 if (IS_DNN_OPENCL_TARGET(preferableTarget))
358 Ptr<PowerLayer> activ_power = activ.dynamicCast<PowerLayer>();
359 if (!activ_power.empty())
361 if (activ_power->scale != 1.f || activ_power->shift != 0.f)
363 const int outCh = blobs[0].size[0];
364 fuseWeights(Mat(1, outCh, CV_32F, Scalar(activ_power->scale)),
365 Mat(1, outCh, CV_32F, Scalar(activ_power->shift)));
368 power = activ_power->power;
369 activType = OCL4DNN_CONV_FUSED_ACTIV_POWER;
371 Ptr<TanHLayer> activ_tanh = activ.dynamicCast<TanHLayer>();
372 if (!activ_tanh.empty())
374 activType = OCL4DNN_CONV_FUSED_ACTIV_TANH;
378 return !activ.empty();
381 void fuseWeights(const Mat& w_, const Mat& b_) CV_OVERRIDE
383 // Convolution weights have OIHW data layout. Parameters fusion in case of
384 // (conv(I) + b1 ) * w + b2
385 // means to replace convolution's weights to [w*conv(I)] and bias to [b1 * w + b2]
386 const int outCn = weightsMat.size[0];
387 Mat w = w_.total() == 1 ? Mat(1, outCn, CV_32F, Scalar(w_.at<float>(0))) : w_;
388 Mat b = b_.total() == 1 ? Mat(1, outCn, CV_32F, Scalar(b_.at<float>(0))) : b_;
389 CV_Assert_N(!weightsMat.empty(), biasvec.size() == outCn + 2,
390 w.empty() || outCn == w.total(), b.empty() || outCn == b.total());
394 // Keep origin weights unchanged.
395 if (weightsMat.data == blobs[0].data)
396 weightsMat = weightsMat.clone();
398 Mat originWeights = blobs[0].reshape(1, outCn);
399 for (int i = 0; i < outCn; ++i)
401 double wi = w.at<float>(i);
402 weightsMultipliers[i] *= wi;
403 cv::multiply(originWeights.row(i), weightsMultipliers[i], weightsMat.row(i));
410 for (int i = 0; i < outCn; ++i)
411 biasvec[i] += b.at<float>(i);
413 biasvec[outCn] = biasvec[outCn+1] = biasvec[outCn-1];
416 virtual Ptr<BackendNode> initHalide(const std::vector<Ptr<BackendWrapper> > &inputs) CV_OVERRIDE
419 Halide::Buffer<float> inputBuffer = halideBuffer(inputs[0]);
421 const int inpCn = inputBuffer.channels();
422 const int outCn = blobs[0].size[0];
423 const int inpGroupCn = blobs[0].size[1];
424 const int group = inpCn / inpGroupCn;
425 const int outGroupCn = outCn / group;
427 Halide::Buffer<float> weights = wrapToHalideBuffer(blobs[0]);
429 Halide::Var x("x"), y("y"), c("c"), n("n");
430 Halide::Func top = (name.empty() ? Halide::Func() : Halide::Func(name));
431 Halide::Func padded_input(name + "_constant_exterior");
432 if (pad.width || pad.height)
434 Halide::Func bounded =
435 Halide::BoundaryConditions::constant_exterior(inputBuffer, 0);
436 padded_input(x, y, c, n) = bounded(x, y, c, n);
440 padded_input(x, y, c, n) = inputBuffer(x, y, c, n);
443 Halide::RDom r(0, kernel.width, 0, kernel.height, 0, inpGroupCn);
444 Halide::Expr kx = x * stride.width - pad.width + r.x * dilation.width;
445 Halide::Expr ky = y * stride.height - pad.height + r.y * dilation.height;
446 Halide::Expr kc = r.z;
447 for (int i = 1; i < group; ++i)
449 kc = select(c < outGroupCn * i, kc, inpGroupCn * i + r.z);
451 Halide::Expr topExpr = sum(padded_input(kx, ky, kc, n) *
452 weights(r.x, r.y, r.z, c));
455 Halide::Buffer<float> bias = wrapToHalideBuffer(blobs[1], {outCn});
458 top(x, y, c, n) = topExpr;
459 return Ptr<BackendNode>(new HalideBackendNode({ padded_input, top }));
460 #endif // HAVE_HALIDE
461 return Ptr<BackendNode>();
464 #ifdef HAVE_INF_ENGINE
465 virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> > &inputs) CV_OVERRIDE
467 InferenceEngine::DataPtr input = infEngineDataNode(inputs[0]);
468 std::vector<size_t> dims = input->getDims();
469 CV_Assert(dims.size() == 4 || dims.size() == 5);
470 const int inpCn = dims[1];
471 const int outCn = blobs[0].size[0];
472 const int inpGroupCn = blobs[0].size[1];
473 const int group = inpCn / inpGroupCn;
474 InferenceEngine::Layout layout = (dims.size() == 4) ? InferenceEngine::Layout::OIHW :
475 InferenceEngine::Layout::NCDHW;
477 auto ieWeights = wrapToInfEngineBlob(blobs[0], layout);
480 if (weightsMat.isContinuous())
482 Mat cvWeights = weightsMat.reshape(1, blobs[0].dims, blobs[0].size);
483 ieWeights = wrapToInfEngineBlob(cvWeights, layout);
487 ieWeights = InferenceEngine::make_shared_blob<float>({
488 InferenceEngine::Precision::FP32,
489 ieWeights->getTensorDesc().getDims(), layout
491 ieWeights->allocate();
493 Mat newWeights = infEngineBlobToMat(ieWeights).reshape(1, outCn);
494 Mat cvWeights = weightsMat.colRange(0, newWeights.cols);
495 cvWeights.copyTo(newWeights);
498 InferenceEngine::Blob::Ptr ieBiases;
499 if (hasBias() || fusedBias)
501 Mat biasesMat({outCn}, CV_32F, &biasvec[0]);
502 ieBiases = wrapToInfEngineBlob(biasesMat, {(size_t)outCn}, InferenceEngine::Layout::C);
505 InferenceEngine::Builder::ConvolutionLayer ieLayer(name);
507 ieLayer.setKernel(kernel_size);
508 ieLayer.setStrides(strides);
509 ieLayer.setDilation(dilations);
510 ieLayer.setPaddingsBegin(pads_begin);
511 ieLayer.setPaddingsEnd(pads_end);
512 ieLayer.setGroup((size_t)group);
513 ieLayer.setOutDepth((size_t)outCn);
515 InferenceEngine::Builder::Layer l = ieLayer;
516 addConstantData("weights", ieWeights, l);
518 addConstantData("biases", ieBiases, l);
520 if (!padMode.empty())
521 l.getParameters()["auto_pad"] = padMode == "VALID" ? std::string("valid") : std::string("same_upper");
523 return Ptr<BackendNode>(new InfEngineBackendNode(l));
525 #endif // HAVE_INF_ENGINE
527 class ParallelConv : public cv::ParallelLoopBody
530 enum { BLK_SIZE = 32, BLK_SIZE_CN = 64 };
535 int outShape[4]; // used only for conv2d
536 std::vector<size_t> kernel_size, pads_begin, pads_end, strides, dilations;
537 int ngroups_, nstripes_;
538 std::vector<int> ofstab_;
539 const std::vector<float>* biasvec_;
540 const std::vector<float>* reluslope_;
541 const ActivationLayer* activ_;
548 : input_(0), weights_(0), output_(0), ngroups_(0), nstripes_(0),
549 biasvec_(0), reluslope_(0), activ_(0), is1x1_(false), useAVX(false), useAVX2(false), useAVX512(false)
552 static void run( const Mat& input, Mat& output, const Mat& weights,
553 const std::vector<float>& biasvec,
554 const std::vector<float>& reluslope,
555 const std::vector<size_t>& kernel_size, const std::vector<size_t>& strides,
556 const std::vector<size_t>& pads_begin, const std::vector<size_t>& pads_end,
557 const std::vector<size_t>& dilations,
558 const ActivationLayer* activ, int ngroups, int nstripes )
560 size_t karea = std::accumulate(kernel_size.begin(), kernel_size.end(),
561 1, std::multiplies<size_t>());
563 (input.dims == 4 || input.dims == 5) && (input.dims == output.dims),
564 input.size[0] == output.size[0],
565 weights.rows == output.size[1],
566 weights.cols == (input.size[1]/ngroups)*karea,
567 input.type() == output.type(),
568 input.type() == weights.type(),
569 input.type() == CV_32FC1,
570 input.isContinuous(),
571 output.isContinuous(),
572 biasvec.size() == (size_t)output.size[1]+2);
576 p.weights_ = &weights;
578 for( int i = 0; i < 4; i++ ) p.outShape[i] = output.size[i];
579 p.outShape[1] /= ngroups;
581 p.kernel_size = kernel_size; p.strides = strides; p.dilations = dilations;
582 p.pads_begin = pads_begin; p.pads_end = pads_end;
584 p.ngroups_ = ngroups;
585 p.nstripes_ = nstripes;
587 int inpCnAll = input.size[1];
588 int depth = (input.dims == 5) ? input.size[2] : 1;
589 int width = input.size[input.dims - 1];
590 int height = input.size[input.dims - 2];
591 int inpCn = inpCnAll / ngroups;
593 bool isConv2D = kernel_size.size() == 2;
595 p.is1x1_ = isConv2D && kernel_size[0] == 1 && kernel_size[1] == 1 &&
596 pads_begin[0] == 0 && pads_begin[1] == 0;
598 p.useAVX = checkHardwareSupport(CPU_AVX) && isConv2D;
599 p.useAVX2 = checkHardwareSupport(CPU_AVX2) && isConv2D;
600 p.useAVX512 = CV_CPU_HAS_SUPPORT_AVX512_SKX && isConv2D;
602 int ncn = std::min(inpCn, (int)BLK_SIZE_CN);
604 int kernel_d = !isConv2D? kernel_size[0] : 1;
605 int kernel_h = kernel_size[kernel_size.size() - 2];
606 int kernel_w = kernel_size.back();
608 int dil_d = !isConv2D? dilations[0] : 1;
609 int dil_h = dilations[dilations.size() - 2];
610 int dil_w = dilations.back();
612 p.ofstab_.resize(karea * ncn);
613 int* ofstab = &p.ofstab_[0];
617 for( int k = 0; k < ncn; k++ )
618 for( int k_r = 0; k_r < kernel_h; k_r++ )
619 for( int k_c = 0; k_c < kernel_w; k_c++ )
620 ofstab[(k*kernel_h + k_r)*kernel_w + k_c] =
621 (k*height + k_r*dil_h)*width + k_c*dil_w;
625 for( int k = 0; k < ncn; k++ )
626 for (int k_d = 0; k_d < kernel_d; k_d++)
627 for( int k_r = 0; k_r < kernel_h; k_r++ )
628 for( int k_c = 0; k_c < kernel_w; k_c++ )
629 ofstab[(k*kernel_d*kernel_h + k_d*kernel_h + k_r)*kernel_w + k_c] =
630 (k*depth*height + k_d*dil_d*height + k_r*dil_h)*width + k_c*dil_w;
633 p.biasvec_ = &biasvec;
634 p.reluslope_ = &reluslope;
635 p.activ_ = p.reluslope_->empty() ? activ : 0;
637 parallel_for_(Range(0, nstripes), p, nstripes);
640 virtual void operator ()(const Range &r0) const CV_OVERRIDE
642 const int valign = ConvolutionLayerImpl::VEC_ALIGN;
643 int ngroups = ngroups_, batchSize = input_->size[0]*ngroups;
644 bool isConv2D = input_->dims == 4;
646 int outW = output_->size[output_->dims - 1];
647 int outH = output_->size[output_->dims - 2];
648 int outCn = output_->size[1]/ngroups;
650 int depth = !isConv2D? input_->size[2] : 1;
651 int height = input_->size[input_->dims - 2];
652 int width = input_->size[input_->dims - 1];
653 int inpCn = input_->size[1]/ngroups;
655 const int nstripes = nstripes_;
657 int kernel_d = !isConv2D? kernel_size[0] : 1;
658 int kernel_h = kernel_size[kernel_size.size() - 2];
659 int kernel_w = kernel_size.back();
660 int karea = kernel_w*kernel_h*kernel_d;
662 int pad_d = !isConv2D? pads_begin[0] : 0;
663 int pad_t = pads_begin[pads_begin.size() - 2];
664 int pad_l = pads_begin.back();
666 int stride_d = !isConv2D? strides[0] : 0;
667 int stride_h = strides[strides.size() - 2];
668 int stride_w = strides.back();
670 int dilation_d = !isConv2D? dilations[0] : 1;
671 int dilation_h = dilations[dilations.size() - 2];
672 int dilation_w = dilations.back();
675 size_t inpPlaneSize = input_->total(2);
676 size_t outPlaneSize = output_->total(2);
679 int stripesPerSample;
683 if( nstripes >= batchSize*2 )
685 stripesPerSample = nstripes/batchSize;
686 stripeSize = alignSize((outPlaneSize + stripesPerSample - 1)/stripesPerSample, valign);
687 stripeSize = std::min(stripeSize, outPlaneSize);
691 stripesPerSample = 1;
692 int samplesPerStripe = std::max((batchSize + nstripes - 1)/nstripes, 1);
693 r.start *= samplesPerStripe;
694 r.end *= samplesPerStripe;
695 stripeSize = outPlaneSize;
698 const float* data_inp0_ = input_->ptr<float>();
699 const int* ofstab = &ofstab_[0];
700 const float* wptr_orig_ = weights_->ptr<float>();
701 size_t wstep = weights_->step1();
702 const float* biasptr_ = &biasvec_->at(0);
703 const float* reluptr_ = reluslope_->empty() ? 0 : &reluslope_->at(0);
704 float* data_out0_ = output_->ptr<float>();
705 size_t rowbufsz = (size_t)karea*BLK_SIZE_CN*BLK_SIZE;
706 AutoBuffer<float> rowbuf0_(rowbufsz + valign);
707 float* rowbuf0 = alignPtr(rowbuf0_.data(), (int)(valign*sizeof(float)));
709 // we clear the buffer once; ultimately, it lets us to avoid
710 // tail processing after running the unrolled/vectorized loop.
711 // the main idea is to make sure that the tail (a.k.a. padding) of each row
712 // (i.e. the elements with indices between vsz=karea*ncn and vsz_a)
713 // does not contain NaNs or Infs. Because the padding in the weights
714 // matrix is explicitly initialized with 0's, we handle all other
715 // cases nicely, i.e. we can skip expliciting re-initialization
716 // of the padding - we just retain elements from the previous iteration
717 // of the loop over channels (cn0).
718 memset(rowbuf0, 0, rowbufsz*sizeof(rowbuf0[0]) );
720 for( int stripe = r.start; stripe < r.end; stripe++ )
722 int subsampleIdx = stripe/stripesPerSample;
723 if( subsampleIdx >= batchSize )
725 int stripeStart = (int)((stripe - subsampleIdx*stripesPerSample)*stripeSize);
726 int stripeEnd = (int)std::min(stripeStart + stripeSize, outPlaneSize);
727 const float* data_inp0 = data_inp0_ + subsampleIdx*inpPlaneSize*inpCn;
728 float* data_out0 = data_out0_ + subsampleIdx*outPlaneSize*outCn;
729 int startOutCn = (subsampleIdx % ngroups)*outCn;
730 const float* wptr_orig = wptr_orig_ + wstep*startOutCn;
731 const float* biasptr = biasptr_ + startOutCn;
733 for( int cn0 = 0; cn0 < inpCn; cn0 += BLK_SIZE_CN )
735 int cn1 = std::min(cn0 + BLK_SIZE_CN, inpCn);
736 int ncn = cn1 - cn0, vsz = karea*ncn;
737 int vsz_a = (int)alignSize(vsz, valign);
738 const float* wptr = wptr_orig + cn0*karea;
739 // we apply [Channels][P]ReLU (if any) during the final pass only.
740 const float* relu = cn1 == inpCn && reluptr_ ? reluptr_ + startOutCn : 0;
742 for( int ofs0 = stripeStart; ofs0 < stripeEnd; ofs0 += BLK_SIZE )
744 int ofs, ofs1 = std::min(ofs0 + BLK_SIZE, stripeEnd);
746 int out_d = ofs0 / (outH * outW);
747 int out_i = (ofs0 - out_d * outH * outW) / outW;
748 int out_j = ofs0 % outW;
750 // do im2row for a part of input tensor
751 float* rowbuf = rowbuf0;
755 for( ofs = ofs0; ofs < ofs1; out_j = 0, ++out_i )
757 int delta = std::min(ofs1 - ofs, outW - out_j);
758 int out_j1 = out_j + delta;
760 int in_i = out_i * stride_h - pad_t;
761 int in_j = out_j * stride_w - pad_l;
762 const float* imgptr = data_inp0 + (cn0*height + in_i)*width + in_j;
765 // do im2row for a part of input tensor
768 for( ; out_j < out_j1; out_j++, rowbuf += vsz_a, imgptr += stride_w )
770 for( k = 0; k < vsz; k++ )
771 rowbuf[k] = imgptr[k*inpPlaneSize];
776 bool ok_i = 0 <= in_i && in_i < height - (kernel_h-1)*dilation_h;
777 int i0 = std::max(0, (-in_i + dilation_h-1)/dilation_h);
778 int i1 = std::min(kernel_h, (height - in_i + dilation_h-1)/dilation_h);
780 for( ; out_j < out_j1; out_j++, rowbuf += vsz_a, imgptr += stride_w, in_j += stride_w )
782 // this condition should be true for most of the tensor elements, i.e.
783 // most of the time the kernel aperture is inside the tensor X-Y plane.
784 if( ok_i && out_j + 2 <= out_j1 && 0 <= in_j && in_j + stride_w*2 <= width - (kernel_w-1)*dilation_w )
786 for( k = 0; k < vsz; k++ )
789 float v0 = imgptr[k1];
790 float v1 = imgptr[k1 + stride_w];
792 rowbuf[k+vsz_a] = v1;
801 int j0 = std::max(0, (-in_j + dilation_w-1)/dilation_w);
802 int j1 = std::min(kernel_w, (width - in_j + dilation_w-1)/dilation_w);
804 // here some non-continuous sub-row of the row will not be
805 // filled from the tensor; we need to make sure that the uncovered
806 // elements are explicitly set to 0's. the easiest way is to
807 // set all the elements to 0's before the loop.
808 memset(rowbuf, 0, vsz*sizeof(rowbuf[0]));
809 for( k = 0; k < ncn; k++ )
811 for( i = i0; i < i1; i++ )
813 for( j = j0; j < j1; j++ )
815 int imgofs = k*(width*height) + i*(dilation_h*width) + j*dilation_w;
816 rowbuf[(k*kernel_h + i)*kernel_w + j] = imgptr[imgofs];
827 for( ofs = ofs0; ofs < ofs1; out_d += (out_i + 1) / outH, out_i = (out_i + 1) % outH, out_j = 0 )
829 int delta = std::min(ofs1 - ofs, outW - out_j);
830 int out_j1 = out_j + delta;
832 int in_d = out_d * stride_d - pad_d;
833 int in_i = out_i * stride_h - pad_t;
834 int in_j = out_j * stride_w - pad_l;
835 const float* imgptr = data_inp0 + (cn0*depth*height + in_d*height + in_i)*width + in_j;
838 int d0 = std::max(0, (-in_d + dilation_d - 1) / dilation_d);
839 int d1 = std::min(kernel_d, (depth - in_d + dilation_d - 1) / dilation_d);
841 int i0 = std::max(0, (-in_i + dilation_h-1)/dilation_h);
842 int i1 = std::min(kernel_h, (height - in_i + dilation_h-1)/dilation_h);
844 for( ; out_j < out_j1; out_j++, rowbuf += vsz_a, imgptr += stride_w, in_j += stride_w )
846 int j0 = std::max(0, (-in_j + dilation_w-1)/dilation_w);
847 int j1 = std::min(kernel_w, (width - in_j + dilation_w-1)/dilation_w);
849 // here some non-continuous sub-row of the row will not be
850 // filled from the tensor; we need to make sure that the uncovered
851 // elements are explicitly set to 0's. the easiest way is to
852 // set all the elements to 0's before the loop.
853 memset(rowbuf, 0, vsz*sizeof(rowbuf[0]));
854 for( k = 0; k < ncn; k++ )
856 for ( d = d0; d < d1; d++)
858 for( i = i0; i < i1; i++ )
860 for( j = j0; j < j1; j++ )
862 int imgofs = k*(depth*width*height) + d*dilation_d*width*height + i*(dilation_h*width) + j*dilation_w;
863 rowbuf[(k*kernel_d*kernel_h + d*kernel_h + i)*kernel_w + j] = imgptr[imgofs];
872 // now compute dot product of the weights
873 // and im2row-transformed part of the tensor
874 int bsz = ofs1 - ofs0;
875 #if CV_TRY_AVX512_SKX
876 /* AVX512 convolution requires an alignment of 16, and ROI is only there for larger vector sizes */
878 opt_AVX512_SKX::fastConv(wptr, wstep, biasptr, rowbuf0, data_out0 + ofs0,
879 outShape, bsz, vsz, vsz_a, relu, cn0 == 0);
884 opt_AVX2::fastConv(wptr, wstep, biasptr, rowbuf0, data_out0 + ofs0,
885 outShape, bsz, vsz, vsz_a, relu, cn0 == 0);
890 opt_AVX::fastConv(wptr, wstep, biasptr, rowbuf0, data_out0 + ofs0,
891 outShape, bsz, vsz, vsz_a, relu, cn0 == 0);
894 for( int i = 0; i < outCn; i += 2 )
896 const float* wptr0 = wptr + i*wstep;
897 const float* wptr1 = wptr0 + wstep;
898 float* outptr0 = data_out0 + ofs0 + i*outPlaneSize;
899 float* outptr1 = outptr0 + outPlaneSize;
900 float bias0 = biasptr[i], bias1 = biasptr[i+1];
901 float r0 = 1.f, r1 = 1.f;
912 r0 = relu[i]; r1 = relu[i+1];
919 v_float32x4 vr0 = v_setall_f32(r0), vr1 = v_setall_f32(r1), z = v_setzero_f32();
921 for( ; j <= bsz - 4; j += 4 )
923 const float* rptr = rowbuf0 + j*vsz_a;
928 s0 = v_setall_f32(bias0);
929 s1 = v_setall_f32(bias1);
933 s0 = v_load(outptr0 + j);
934 s1 = v_load(outptr1 + j);
937 v_float32x4 vs00 = v_setzero_f32(), vs01 = v_setzero_f32(),
938 vs02 = v_setzero_f32(), vs03 = v_setzero_f32(),
939 vs10 = v_setzero_f32(), vs11 = v_setzero_f32(),
940 vs12 = v_setzero_f32(), vs13 = v_setzero_f32();
941 for( k = 0; k < vsz; k += 4, rptr += 4 )
943 v_float32x4 w0 = v_load_aligned(wptr0 + k), w1 = v_load_aligned(wptr1 + k);
944 v_float32x4 r0 = v_load_aligned(rptr), r1 = v_load_aligned(rptr + vsz_a),
945 r2 = v_load_aligned(rptr + vsz_a*2), r3 = v_load_aligned(rptr + vsz_a*3);
957 s0 += v_reduce_sum4(vs00, vs01, vs02, vs03);
958 s1 += v_reduce_sum4(vs10, vs11, vs12, vs13);
961 s0 = v_select(s0 > z, s0, s0*vr0);
962 s1 = v_select(s1 > z, s1, s1*vr1);
965 v_store(outptr0 + j, s0);
966 v_store(outptr1 + j, s1);
969 for( ; j < bsz; j++ )
971 const float* rptr = rowbuf0 + j*vsz_a;
985 for( k = 0; k < vsz; k++ )
993 s00 = s00 > 0.f ? s00 : s00*r0;
994 s10 = s10 > 0.f ? s10 : s10*r1;
1005 activ_->forwardSlice(data_out0 + stripeStart, data_out0 + stripeStart,
1006 (int)(stripeEnd - stripeStart),
1007 outPlaneSize, startOutCn, startOutCn + outCn);
1013 bool forward_ocl(InputArrayOfArrays inps, OutputArrayOfArrays outs, OutputArrayOfArrays internals)
1015 std::vector<UMat> inputs;
1016 std::vector<UMat> outputs;
1018 bool use_half = (inps.depth() == CV_16S);
1019 inps.getUMatVector(inputs);
1020 outs.getUMatVector(outputs);
1022 CV_Assert(outputs.size() == 1);
1023 for (int i = 0; i < inputs.size(); ++i)
1024 CV_Assert(inputs[i].u != outputs[0].u);
1026 if (umat_blobs.empty())
1028 size_t n = blobs.size();
1029 umat_blobs.resize(n);
1030 for (size_t i = 0; i < n; i++)
1032 blobs[i].copyTo(umat_blobs[i]);
1036 if (convolutionOp.empty())
1038 OCL4DNNConvConfig config;
1039 config.in_shape = shape(inputs[0]);
1040 config.out_shape = shape(outputs[0]);
1041 config.kernel = kernel;
1043 config.stride = stride;
1044 config.dilation = dilation;
1045 config.group = inputs[0].size[1] / umat_blobs[0].size[1];
1046 config.bias_term = (hasBias()) ? true : false;
1047 config.use_half = use_half;
1049 convolutionOp = Ptr<OCL4DNNConvSpatial<float> >(new OCL4DNNConvSpatial<float>(config));
1052 int outCn = umat_blobs[0].size[0];
1057 Ptr<ReLULayer> activ_relu = activ.dynamicCast<ReLULayer>();
1058 if( !activ_relu.empty() )
1060 reluslope.assign(outCn+2, activ_relu->negativeSlope);
1061 activType = OCL4DNN_CONV_FUSED_ACTIV_RELU;
1064 Ptr<ReLU6Layer> activ_relu6 = activ.dynamicCast<ReLU6Layer>();
1065 if( !activ_relu6.empty() )
1067 reluslope.resize(2);
1068 reluslope[0] = activ_relu6->minValue;
1069 reluslope[1] = activ_relu6->maxValue;
1070 activType = OCL4DNN_CONV_FUSED_ACTIV_RELU6;
1073 Ptr<ChannelsPReLULayer> activ_chprelu = activ.dynamicCast<ChannelsPReLULayer>();
1074 if( !activ_chprelu.empty() )
1076 const Mat& m = activ_chprelu->blobs[0];
1077 CV_Assert(m.isContinuous() && m.type() == CV_32F && (int)m.total() == outCn);
1078 const float* mdata = m.ptr<float>();
1079 reluslope.resize(outCn+2);
1080 std::copy(mdata, mdata + outCn, reluslope.begin());
1081 reluslope[outCn] = reluslope[outCn+1] = reluslope[outCn-1];
1082 activType = OCL4DNN_CONV_FUSED_ACTIV_PRELU;
1088 weightsMat.copyTo(umat_blobs[0]);
1089 fusedWeights = false;
1093 if ( umat_blobs.size() < 2 )
1094 umat_blobs.resize(2);
1095 umat_blobs[1] = UMat(biasvec, true);
1096 convolutionOp->setBias(true);
1102 if ( activType == OCL4DNN_CONV_FUSED_ACTIV_RELU )
1104 CV_Assert(!reluslope.empty());
1105 convolutionOp->setActivReLU(true, reluslope[0]);
1107 else if ( activType == OCL4DNN_CONV_FUSED_ACTIV_PRELU)
1109 CV_Assert(!reluslope.empty());
1110 convolutionOp->setActivPReLU(true, reluslope);
1112 else if ( activType == OCL4DNN_CONV_FUSED_ACTIV_POWER)
1114 convolutionOp->setActivPower(true, power);
1116 else if ( activType == OCL4DNN_CONV_FUSED_ACTIV_TANH)
1118 convolutionOp->setActivTanh(true);
1120 else if ( activType == OCL4DNN_CONV_FUSED_ACTIV_RELU6)
1122 convolutionOp->setActivReLU6(true, reluslope[0], reluslope[1]);
1126 convolutionOp->setActivReLU(false, 0);
1127 convolutionOp->setActivPReLU(false, reluslope);
1128 convolutionOp->setActivPower(false, 1.f);
1129 convolutionOp->setActivTanh(false);
1130 convolutionOp->setActivReLU6(false, 0, 0);
1135 UMat& inpMat = inputs[0];
1136 UMat& outMat = outputs[0];
1137 int batch_size = inpMat.size[0];
1139 return convolutionOp->Forward(inpMat,
1140 inputs.size() == 2 ? inputs[1] : UMat(),
1142 umat_blobs.size() > 1 ? umat_blobs[1] : UMat(),
1148 void forward(InputArrayOfArrays inputs_arr, OutputArrayOfArrays outputs_arr, OutputArrayOfArrays internals_arr) CV_OVERRIDE
1150 CV_TRACE_FUNCTION();
1151 CV_TRACE_ARG_VALUE(name, "name", name.c_str());
1153 CV_OCL_RUN(IS_DNN_OPENCL_TARGET(preferableTarget),
1154 forward_ocl(inputs_arr, outputs_arr, internals_arr))
1156 if (inputs_arr.depth() == CV_16S)
1158 forward_fallback(inputs_arr, outputs_arr, internals_arr);
1162 std::vector<Mat> inputs, outputs;
1163 inputs_arr.getMatVector(inputs);
1164 outputs_arr.getMatVector(outputs);
1166 /*printf("conv %s: input (%d x %d x %d x %d), kernel (%d x %d), pad (%d x %d), stride (%d x %d), dilation (%d x %d)\n",
1167 name.c_str(), inputs[0].size[0], inputs[0].size[1], inputs[0].size[2], inputs[0].size[3],
1168 kernel.width, kernel.height, pad.width, pad.height,
1169 stride.width, stride.height, dilation.width, dilation.height);*/
1170 CV_Assert_N(inputs.size() == (size_t)1, inputs[0].size[1] % blobs[0].size[1] == 0,
1171 outputs.size() == 1, inputs[0].data != outputs[0].data);
1173 int ngroups = inputs[0].size[1]/blobs[0].size[1];
1174 CV_Assert(outputs[0].size[1] % ngroups == 0);
1175 int outCn = blobs[0].size[0];
1180 Ptr<ReLULayer> activ_relu = activ.dynamicCast<ReLULayer>();
1181 if( !activ_relu.empty() )
1183 reluslope.assign(outCn+2, activ_relu->negativeSlope);
1186 Ptr<ChannelsPReLULayer> activ_chprelu = activ.dynamicCast<ChannelsPReLULayer>();
1187 if( !activ_chprelu.empty() )
1189 const Mat& m = activ_chprelu->blobs[0];
1190 CV_Assert(m.isContinuous() && m.type() == CV_32F && (int)m.total() == outCn);
1191 const float* mdata = m.ptr<float>();
1192 reluslope.resize(outCn+2);
1193 std::copy(mdata, mdata + outCn, reluslope.begin());
1194 reluslope[outCn] = reluslope[outCn+1] = reluslope[outCn-1];
1198 int nstripes = std::max(getNumThreads(), 1);
1200 ParallelConv::run(inputs[0], outputs[0], weightsMat, biasvec, reluslope,
1201 kernel_size, strides, pads_begin, pads_end, dilations, activ.get(), ngroups, nstripes);
1204 virtual int64 getFLOPS(const std::vector<MatShape> &inputs,
1205 const std::vector<MatShape> &outputs) const CV_OVERRIDE
1207 CV_Assert(inputs.size() == outputs.size());
1210 int karea = std::accumulate(kernel_size.begin(), kernel_size.end(), 1, std::multiplies<size_t>());
1211 for (int i = 0; i < inputs.size(); i++)
1213 flops += total(outputs[i])*(CV_BIG_INT(2)*karea*inputs[i][1] + 1);
1220 class DeConvolutionLayerImpl CV_FINAL : public BaseConvolutionLayerImpl
1223 Mat weightsMat, biasesMat;
1227 DeConvolutionLayerImpl(const LayerParams& params) : BaseConvolutionLayerImpl(params) {}
1229 MatShape computeColRowShape(const MatShape &inpShape, const MatShape &outShape) const CV_OVERRIDE
1231 int inpCn = inpShape[1];
1232 int inpH = inpShape[2];
1233 int inpW = inpShape[3];
1234 int outCn = outShape[1];
1235 int ngroups = inpCn / blobs[0].size[0];
1236 int outGroupCn = outCn / ngroups;
1237 int ksize = outGroupCn * kernel.height * kernel.width;
1238 return shape(ksize, inpH * inpW);
1241 virtual bool supportBackend(int backendId) CV_OVERRIDE
1243 #ifdef HAVE_INF_ENGINE
1244 const int outGroupCn = blobs[0].size[1]; // Weights are in IOHW or IODHW layout
1245 const int group = numOutput / outGroupCn;
1247 if (backendId == DNN_BACKEND_INFERENCE_ENGINE)
1249 if (kernel_size.size() == 3 && preferableTarget != DNN_TARGET_CPU) {
1253 if (std::accumulate(adjust_pads.begin(), adjust_pads.end(), 0, std::plus<size_t>()) > 0)
1255 if (padMode.empty())
1257 if (preferableTarget != DNN_TARGET_CPU && group != 1)
1259 for (int i = 0; i < adjust_pads.size(); i++) {
1260 if (adjust_pads[i] && pads_begin[i])
1264 for (int i = 0; i < adjust_pads.size(); i++) {
1265 if (pads_end[i] < adjust_pads[i])
1270 else if (padMode == "SAME")
1272 for (int i = 0; i < adjust_pads.size(); i++) {
1273 if (kernel_size[i] < pads_begin[i] + 1 + adjust_pads[i])
1278 else if (padMode == "VALID")
1284 return preferableTarget == DNN_TARGET_CPU;
1286 if (preferableTarget == DNN_TARGET_OPENCL || preferableTarget == DNN_TARGET_OPENCL_FP16)
1287 return std::accumulate(dilations.begin(), dilations.end(), 1, std::multiplies<size_t>()) == 1;
1291 #endif // HAVE_INF_ENGINE
1292 return kernel_size.size() == 2 && (backendId == DNN_BACKEND_OPENCV || backendId == DNN_BACKEND_HALIDE);
1295 bool getMemoryShapes(const std::vector<MatShape> &inputs,
1296 const int requiredOutputs,
1297 std::vector<MatShape> &outputs,
1298 std::vector<MatShape> &internals) const CV_OVERRIDE
1300 CV_Assert(!hasBias() || blobs[1].total() == (size_t)numOutput);
1301 CV_Assert(inputs.size() != 0);
1303 int outCn = numOutput;
1304 std::vector<int> outShape;
1305 outShape.push_back(inputs[0][0]); // batch
1306 outShape.push_back(outCn);
1307 if (padMode.empty())
1309 for (int i = 0; i < kernel_size.size(); i++)
1310 outShape.push_back(strides[i] * (inputs[0][2 + i] - 1) + kernel_size[i] - pads_begin[i] - pads_end[i] + adjust_pads[i]);
1312 else if (padMode == "VALID")
1314 for (int i = 0; i < kernel_size.size(); i++)
1315 outShape.push_back(strides[i] * (inputs[0][2 + i] - 1) + kernel_size[i] + adjust_pads[i]);
1317 else if (padMode == "SAME")
1319 for (int i = 0; i < kernel_size.size(); i++)
1320 outShape.push_back(strides[i] * (inputs[0][2 + i] - 1) + 1 + adjust_pads[i]);
1323 CV_Error(Error::StsError, "Unsupported padding mode " + padMode);
1325 CV_Assert(outCn % blobs[0].size[1] == 0);
1326 int ngroups = outCn / blobs[0].size[1];
1328 int inpCn = inputs[0][1];
1329 CV_Assert(inpCn % ngroups == 0 && outCn % ngroups == 0);
1330 CV_Assert(blobs[0].size[0] == inpCn);
1332 outputs.resize(1, outShape);
1335 internals.push_back(computeColRowShape(inputs[0], outputs[0]));
1340 void finalize(InputArrayOfArrays inputs_arr, OutputArrayOfArrays outputs_arr) CV_OVERRIDE
1342 BaseConvolutionLayerImpl::finalize(inputs_arr, outputs_arr);
1344 std::vector<Mat> inputs, outputs;
1345 inputs_arr.getMatVector(inputs);
1346 outputs_arr.getMatVector(outputs);
1348 std::vector<int> inpShape;
1349 std::vector<int> outShape;
1350 for (int i = 2; i < inputs[0].dims; i++) {
1351 inpShape.push_back(inputs[0].size[i]);
1352 outShape.push_back(outputs[0].size[i]);
1354 getConvPoolPaddings(outShape, kernel_size, strides, padMode, pads_begin, pads_end);
1355 if (pads_begin.size() == 2) {
1356 for (int i = 0; i < pads_begin.size(); i++) {
1357 if (pads_begin[i] != pads_end[i])
1358 CV_Error(Error::StsNotImplemented, "Unsupported asymmetric padding in deconvolution layer");
1360 pad = Size(pads_begin[1], pads_begin[0]);
1363 weightsMultipliers.assign(numOutput, 1.0);
1364 if (weightsMat.empty())
1366 transpose(blobs[0].reshape(1, blobs[0].size[0]), weightsMat);
1367 biasesMat = hasBias() ? blobs[1].reshape(1, numOutput)
1368 : Mat::zeros(numOutput, 1, CV_32F);
1372 void fuseWeights(const Mat& w_, const Mat& b_) CV_OVERRIDE
1374 Mat w = w_.total() == 1 ? Mat(1, numOutput, CV_32F, Scalar(w_.at<float>(0))) : w_;
1375 Mat b = b_.total() == 1 ? Mat(1, numOutput, CV_32F, Scalar(b_.at<float>(0))) : b_;
1377 CV_Assert_N(!weightsMat.empty(),
1378 w.empty() || numOutput == w.total(),
1379 b.empty() || numOutput == b.total());
1383 transpose(blobs[0].reshape(1, blobs[0].size[0]), weightsMat);
1384 weightsMat = weightsMat.reshape(1, numOutput);
1385 for (int i = 0; i < numOutput; ++i)
1387 double wi = w.at<float>(i);
1388 weightsMultipliers[i] *= wi;
1389 cv::multiply(weightsMat.row(i), weightsMultipliers[i], weightsMat.row(i));
1390 biasesMat.at<float>(i) *= wi;
1392 weightsMat = weightsMat.reshape(1, weightsMat.total() / blobs[0].size[0]);
1397 cv::add(biasesMat, b.reshape(1, numOutput), biasesMat);
1401 class MatMulInvoker : public ParallelLoopBody
1404 MatMulInvoker(const Mat& a, const Mat& b, Mat& c, int nstripes)
1409 nstripes_ = nstripes;
1410 useAVX = checkHardwareSupport(CPU_AVX);
1411 useAVX2 = checkHardwareSupport(CPU_AVX2);
1412 useAVX512 = CV_CPU_HAS_SUPPORT_AVX512_SKX;
1415 void operator()(const Range& range_) const CV_OVERRIDE
1417 int stripeSize = (int)alignSize((b_->cols + nstripes_ - 1)/nstripes_, 16);
1418 Range range(range_.start*stripeSize, std::min(range_.end*stripeSize, b_->cols));
1419 int mmax = a_->rows;
1420 int nmax = range.end - range.start;
1421 int kmax = a_->cols;
1423 const float* aptr = a_->ptr<float>();
1424 const float* bptr = b_->ptr<float>() + range.start;
1425 float* cptr = c_->ptr<float>() + range.start;
1426 size_t astep = a_->step1();
1427 size_t bstep = b_->step1();
1428 size_t cstep = c_->step1();
1430 #if CV_TRY_AVX512_SKX
1432 opt_AVX512_SKX::fastGEMM( aptr, astep, bptr, bstep, cptr, cstep, mmax, kmax, nmax );
1437 opt_AVX2::fastGEMM( aptr, astep, bptr, bstep, cptr, cstep, mmax, kmax, nmax );
1442 opt_AVX::fastGEMM( aptr, astep, bptr, bstep, cptr, cstep, mmax, kmax, nmax );
1445 for( m = 0; m < mmax; m += 2 )
1447 float* dst0 = cptr + cstep*m;
1448 float* dst1 = cptr + cstep*std::min(m+1, mmax-1);
1449 const float* aptr0 = aptr + astep*m;
1450 const float* aptr1 = aptr + astep*std::min(m+1, mmax-1);
1452 for( n = 0; n < nmax; n++ )
1458 for( k = 0; k < kmax; k += 4 )
1460 float alpha00 = aptr0[k];
1461 float alpha01 = aptr1[k];
1462 float alpha10 = 0.f, alpha11 = 0.f;
1463 float alpha20 = 0.f, alpha21 = 0.f;
1464 float alpha30 = 0.f, alpha31 = 0.f;
1465 const float* bptr0 = bptr + k*bstep;
1466 const float* bptr1 = bptr0;
1467 const float* bptr2 = bptr0;
1468 const float* bptr3 = bptr0;
1472 alpha10 = aptr0[k+1];
1473 alpha11 = aptr1[k+1];
1474 bptr1 = bptr0 + bstep;
1477 alpha20 = aptr0[k+2];
1478 alpha21 = aptr1[k+2];
1479 bptr2 = bptr1 + bstep;
1482 alpha30 = aptr0[k+3];
1483 alpha31 = aptr1[k+3];
1484 bptr3 = bptr2 + bstep;
1491 v_float32x4 a00 = v_setall_f32(alpha00);
1492 v_float32x4 a01 = v_setall_f32(alpha01);
1493 v_float32x4 a10 = v_setall_f32(alpha10);
1494 v_float32x4 a11 = v_setall_f32(alpha11);
1495 v_float32x4 a20 = v_setall_f32(alpha20);
1496 v_float32x4 a21 = v_setall_f32(alpha21);
1497 v_float32x4 a30 = v_setall_f32(alpha30);
1498 v_float32x4 a31 = v_setall_f32(alpha31);
1500 for( ; n <= nmax - 4; n += 4 )
1502 v_float32x4 b0 = v_load(bptr0 + n);
1503 v_float32x4 b1 = v_load(bptr1 + n);
1504 v_float32x4 b2 = v_load(bptr2 + n);
1505 v_float32x4 b3 = v_load(bptr3 + n);
1506 v_float32x4 d0 = v_load(dst0 + n);
1507 v_float32x4 d1 = v_load(dst1 + n);
1516 v_store(dst0 + n, d0);
1517 v_store(dst1 + n, d1);
1521 for( ; n < nmax; n++ )
1523 float b0 = bptr0[n], b1 = bptr1[n];
1524 float b2 = bptr2[n], b3 = bptr3[n];
1525 float d0 = dst0[n] + alpha00*b0 + alpha10*b1 + alpha20*b2 + alpha30*b3;
1526 float d1 = dst1[n] + alpha01*b0 + alpha11*b1 + alpha21*b2 + alpha31*b3;
1542 class Col2ImInvoker : public cv::ParallelLoopBody
1545 const float* data_col;
1546 const float* biasvec;
1547 int channels, height, width;
1548 int kernel_h, kernel_w;
1550 int stride_h, stride_w;
1552 int height_col, width_col;
1557 : data_col(0), biasvec(0), channels(0), height(0), width(0),
1558 kernel_h(0), kernel_w(0), pad_h(0), pad_w(0), stride_h(0), stride_w(0), data_im(0),
1559 height_col(0), width_col(0), nstripes(0), is1x1(0)
1562 static void run(const float* data_col,
1563 int channels, int height, int width,
1564 int kernel_h, int kernel_w,
1565 int pad_h, int pad_w,
1566 int stride_h, int stride_w,
1567 int height_col, int width_col,
1569 const float* biasvec,
1572 const int nstripes = getNumThreads();
1575 t.data_col = data_col;
1576 t.data_im = data_im;
1577 t.channels = channels; t.height = height; t.width = width;
1578 t.kernel_h = kernel_h; t.kernel_w = kernel_w;
1579 t.pad_h = pad_h; t.pad_w = pad_w;
1580 t.stride_h = stride_h; t.stride_w = stride_w;
1581 t.height_col = height_col;
1582 t.width_col = width_col;
1583 t.nstripes = nstripes;
1585 t.biasvec = biasvec;
1587 parallel_for_(Range(0, nstripes), t, nstripes);
1590 virtual void operator ()(const Range &r) const CV_OVERRIDE
1592 const float* data_col_ = data_col;
1593 float* data_im_ = data_im;
1594 int coeff_h = (1 - stride_h * kernel_w * height_col) * width_col;
1595 int coeff_w = (1 - stride_w * height_col * width_col);
1596 size_t total = (size_t)channels * height * width;
1597 size_t stripeSize = (total + nstripes - 1)/nstripes;
1598 size_t startIndex = r.start*stripeSize;
1599 size_t endIndex = std::min(r.end*stripeSize, total);
1600 int w = (int)(startIndex % width + pad_w);
1601 int h = (int)((startIndex / width) % height + pad_h);
1602 int c = (int)(startIndex / (width * height));
1603 int h_col_start = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1;
1604 int h_col_end = std::min(h / stride_h + 1, height_col);
1605 int plane_size_col = height_col * width_col;
1606 int offset = (c * kernel_h * kernel_w + h * kernel_w + w) * plane_size_col;
1607 bool is1x1_ = is1x1;
1608 const float* biasvec_ = biasvec;
1610 for (size_t index = startIndex; index < endIndex; index++)
1612 // compute the start and end of the output
1613 int w_col_start = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1;
1614 int w_col_end = std::min(w / stride_w + 1, width_col);
1618 val = data_im_[index];
1622 for (int h_col = h_col_start; h_col < h_col_end; ++h_col) {
1623 for (int w_col = w_col_start; w_col < w_col_end; ++w_col) {
1624 val += data_col_[offset + h_col * coeff_h + w_col * coeff_w];
1628 data_im_[index] = val + biasvec_[c];
1630 offset += plane_size_col;
1631 if( ++w >= width + pad_w )
1633 w = (int)((index + 1)% width + pad_w);
1634 h = (int)(((index + 1) / width) % height + pad_h);
1635 c = (int)((index + 1) / (width * height));
1636 h_col_start = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1;
1637 h_col_end = std::min(h / stride_h + 1, height_col);
1638 offset = (c * kernel_h * kernel_w + h * kernel_w + w) * plane_size_col;
1645 bool forward_ocl(InputArrayOfArrays inputs_, OutputArrayOfArrays outputs_, OutputArrayOfArrays internals_)
1647 std::vector<UMat> inputs;
1648 std::vector<UMat> outputs;
1649 std::vector<UMat> internals;
1651 if (inputs_.depth() == CV_16S)
1654 inputs_.getUMatVector(inputs);
1655 outputs_.getUMatVector(outputs);
1656 internals_.getUMatVector(internals);
1658 int outCn = numOutput;
1659 int inpCn = inputs[0].size[1];
1664 if (umat_weights.empty())
1667 weightsMat.copyTo(umat_weights);
1669 transpose(blobs[0].reshape(1, inpCn), umat_weights);
1672 biasesMat.copyTo(umat_biases);
1676 blobs[1].reshape(1, outCn).copyTo(umat_biases);
1678 umat_biases = UMat::zeros(outCn, 1, CV_32F);
1682 String buildopt = format("-DT=%s ", ocl::typeToStr(inputs[0].type()));
1683 buildopt += format("-DPAD_H=%d -DPAD_W=%d -DKERNEL_H=%d -DKERNEL_W=%d -DSTRIDE_H=%d -DSTRIDE_W=%d ",
1684 pad.height, pad.width, kernel.height, kernel.width, stride.height, stride.width);
1686 for (size_t ii = 0; ii < outputs.size(); ii++)
1688 int ngroups = outCn / blobs[0].size[1];
1689 int inpGroupCn = inpCn / ngroups;
1690 int outGroupCn = blobs[0].size[1];
1691 const UMat& inp = inputs[ii];
1692 UMat& out = outputs[ii];
1693 int numImg = inp.size[0];
1694 int inpH = inp.size[2], inpW = inp.size[3];
1695 int outH = out.size[2], outW = out.size[3];
1697 MatShape inpshape = shape(numImg*inpCn, inpH*inpW);
1698 MatShape outshape = shape(numImg*outCn, outH*outW);
1699 UMat convBlob = inputs[ii].reshape(1, inpshape.size(), &inpshape[0]);
1700 UMat decnBlob = out.reshape(1, outshape.size(), &outshape[0]);
1701 int rows = internals[0].rows / ngroups;
1703 for (int n = 0; n < numImg; n++)
1705 for (int g = 0; g < ngroups; g++)
1707 UMat colMat = internals[0].rowRange(_Range(g * rows, rows));
1708 UMat convMat = convBlob.rowRange(_Range((g + n * ngroups) * inpGroupCn, inpGroupCn));
1709 UMat wghtMat = umat_weights.colRange(_Range(g * inpGroupCn, inpGroupCn));
1710 gemm(wghtMat, convMat, 1, noArray(), 0, colMat, 0);
1713 for (int g = 0; g < ngroups; g++)
1715 int total = outGroupCn * decnBlob.cols;
1717 int height_col = inpH;
1718 int width_col = inpW;
1719 int coeff_h = (1 - stride.height * kernel.width * height_col) * width_col;
1720 int coeff_w = (1 - stride.width * height_col * width_col);
1722 ocl::Kernel k("col2im", ocl::dnn::col2im_oclsrc, buildopt);
1723 k.set(index++, total);
1724 k.set(index++, ocl::KernelArg::PtrReadOnly(internals[0]));
1725 k.set(index++, (int)(g * rows * internals[0].cols));
1726 k.set(index++, outGroupCn);
1727 k.set(index++, outH);
1728 k.set(index++, outW);
1729 k.set(index++, height_col);
1730 k.set(index++, width_col);
1731 k.set(index++, coeff_h);
1732 k.set(index++, coeff_w);
1733 k.set(index++, ocl::KernelArg::PtrReadOnly(umat_biases));
1734 k.set(index++, (int)(g * outGroupCn * umat_biases.cols));
1735 k.set(index++, ocl::KernelArg::PtrWriteOnly(decnBlob));
1736 k.set(index++, (int)((g + n * ngroups) * outGroupCn * decnBlob.cols));
1738 size_t global[] = { (size_t)total };
1739 bool ret = k.run(1, global, NULL, false);
1750 void forward(InputArrayOfArrays inputs_arr, OutputArrayOfArrays outputs_arr, OutputArrayOfArrays internals_arr) CV_OVERRIDE
1752 CV_TRACE_FUNCTION();
1753 CV_TRACE_ARG_VALUE(name, "name", name.c_str());
1755 CV_OCL_RUN(IS_DNN_OPENCL_TARGET(preferableTarget),
1756 forward_ocl(inputs_arr, outputs_arr, internals_arr));
1758 if (inputs_arr.depth() == CV_16S)
1760 forward_fallback(inputs_arr, outputs_arr, internals_arr);
1764 std::vector<Mat> inputs, outputs, internals;
1765 inputs_arr.getMatVector(inputs);
1766 outputs_arr.getMatVector(outputs);
1767 internals_arr.getMatVector(internals);
1769 int outCn = numOutput;
1770 int inpCn = inputs[0].size[1];
1771 bool is1x1flag = is1x1();
1772 int nstripes = getNumThreads();
1774 if( weightsMat.empty() )
1776 transpose(blobs[0].reshape(1, inpCn), weightsMat);
1777 biasesMat = hasBias() ? blobs[1].reshape(1, outCn) : Mat::zeros(outCn, 1, CV_32F);
1780 for (size_t ii = 0; ii < outputs.size(); ii++)
1782 int ngroups = outCn / blobs[0].size[1];
1783 int inpGroupCn = inpCn / ngroups;
1784 int outGroupCn = blobs[0].size[1];
1785 const Mat& inp = inputs[ii];
1786 Mat& out = outputs[ii];
1787 int numImg = inp.size[0];
1788 int inpH = inp.size[2], inpW = inp.size[3];
1789 int outH = out.size[2], outW = out.size[3];
1791 Mat convBlob = inputs[ii].reshape(1, numImg*inpCn);
1792 Mat decnBlob = out.reshape(1, numImg*outCn);
1794 for (int n = 0; n < numImg; n++)
1796 for (int g = 0; g < ngroups; g++)
1798 Mat dstMat = decnBlob.rowRange(_Range((g + n * ngroups) * outGroupCn, outGroupCn));
1799 Mat &colMat = is1x1flag ? dstMat : internals[0];
1801 Mat convMat = convBlob.rowRange(_Range((g + n * ngroups) * inpGroupCn, inpGroupCn));
1802 Mat wghtMat = weightsMat.colRange(_Range(g * inpGroupCn, inpGroupCn));
1803 Mat curBiasMat = biasesMat.rowRange(_Range(g * outGroupCn, outGroupCn));
1805 //gemm(wghtMat, convMat, 1, colMat, 0, colMat, 0);
1806 MatMulInvoker mminvoker(wghtMat, convMat, colMat, nstripes);
1807 parallel_for_(Range(0, nstripes), mminvoker, nstripes);
1809 Col2ImInvoker::run(colMat.ptr<float>(), outGroupCn, outH, outW,
1810 kernel.height, kernel.width, pad.height, pad.width,
1811 stride.height, stride.width, inpH, inpW, dstMat.ptr<float>(),
1812 curBiasMat.ptr<float>(), is1x1flag);
1818 virtual Ptr<BackendNode> initHalide(const std::vector<Ptr<BackendWrapper> > &inputs) CV_OVERRIDE
1821 Halide::Buffer<float> inputBuffer = halideBuffer(inputs[0]);
1823 int inW, inH, inC, inN;
1824 getCanonicalSize(inputBuffer, &inW, &inH, &inC, &inN);
1825 const int outGroupCn = blobs[0].size[1];
1826 const int group = numOutput / outGroupCn;
1827 const int inpGroupCn = blobs[0].size[0] / group;
1829 Halide::Var x("x"), y("y"), c("c"), n("n");
1830 Halide::Func top = (name.empty() ? Halide::Func() : Halide::Func(name));
1831 Halide::Func padded_input(name + "_constant_exterior");
1832 auto weights = wrapToHalideBuffer(blobs[0]);
1834 Halide::Func dilated_input("dilated_input");
1835 dilated_input(x, y, c, n) = 0.0f;
1836 Halide::RDom r1(0, inW, 0, inH);
1837 dilated_input(r1.x * stride.width, r1.y * stride.height, c, n) =
1838 inputBuffer(r1.x, r1.y, c, n);
1839 dilated_input.compute_root();
1841 Halide::Func bounded =
1842 Halide::BoundaryConditions::constant_exterior(dilated_input, 0,
1843 0, (inW - 1) * stride.width + 1,
1844 0, (inH - 1) * stride.height + 1,
1846 padded_input(x, y, c, n) = bounded(x, y, c, n);
1848 Halide::RDom r(0, kernel.width, 0, kernel.height, 0, inpGroupCn);
1849 Halide::Expr kx = x + pad.width - r.x;
1850 Halide::Expr ky = y + pad.height - r.y;
1851 Halide::Expr kInC = r.z;
1852 Halide::Expr kOutC = c;
1853 for (int i = 1; i < group; ++i)
1855 kInC = select(c < outGroupCn * i, kInC, inpGroupCn * i + r.z);
1856 kOutC = select(c < outGroupCn * i, kOutC, c - outGroupCn * i);
1858 Halide::Expr topExpr = sum(padded_input(kx, ky, kInC, n) *
1859 weights(r.x, r.y, kOutC, kInC));
1862 auto bias = wrapToHalideBuffer(blobs[1], {numOutput});
1865 top(x, y, c, n) = topExpr;
1866 return Ptr<BackendNode>(new HalideBackendNode({ padded_input, top }));
1867 #endif // HAVE_HALIDE
1868 return Ptr<BackendNode>();
1871 #ifdef HAVE_INF_ENGINE
1872 virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> > &) CV_OVERRIDE
1874 InferenceEngine::Layout layout = blobs[0].dims == 5? InferenceEngine::Layout::NCDHW :
1875 InferenceEngine::Layout::OIHW;
1877 auto ieWeights = wrapToInfEngineBlob(blobs[0], layout);
1880 ieWeights = InferenceEngine::make_shared_blob<float>({
1881 InferenceEngine::Precision::FP32,
1882 ieWeights->getTensorDesc().getDims(), layout
1884 ieWeights->allocate();
1886 int inpCn = blobs[0].size[0];
1887 Mat newWeights = infEngineBlobToMat(ieWeights).reshape(1, inpCn);
1888 transpose(weightsMat, newWeights);
1891 const int outGroupCn = blobs[0].size[1]; // Weights are in IOHW or OIDHW layout
1892 const int group = numOutput / outGroupCn;
1894 InferenceEngine::Builder::DeconvolutionLayer ieLayer(name);
1896 ieLayer.setKernel(kernel_size);
1897 ieLayer.setStrides(strides);
1898 ieLayer.setDilation(dilations);
1899 ieLayer.setPaddingsBegin(pads_begin);
1901 if (padMode.empty())
1903 std::vector<size_t> paddings_end;
1904 for (int i = 0; i < pads_end.size(); i++) {
1905 paddings_end.push_back(pads_end[i] - adjust_pads[i]);
1907 ieLayer.setPaddingsEnd(paddings_end);
1909 else if (padMode == "SAME")
1911 std::vector<size_t> paddings_end;
1912 for (int i = 0; i < pads_begin.size(); i++) {
1913 paddings_end.push_back(kernel_size[i] - pads_begin[i] - 1 - adjust_pads[i]);
1915 ieLayer.setPaddingsEnd(paddings_end);
1917 ieLayer.setGroup((size_t)group);
1918 ieLayer.setOutDepth((size_t)numOutput);
1920 InferenceEngine::Builder::Layer l = ieLayer;
1921 addConstantData("weights", ieWeights, l);
1923 addConstantData("biases", wrapToInfEngineBlob(biasesMat, {(size_t)numOutput}, InferenceEngine::Layout::C), l);
1924 return Ptr<BackendNode>(new InfEngineBackendNode(l));
1926 #endif // HAVE_INF_ENGINE
1928 virtual int64 getFLOPS(const std::vector<MatShape> &inputs,
1929 const std::vector<MatShape> &outputs) const CV_OVERRIDE
1931 CV_Assert(inputs.size() == outputs.size());
1934 int outChannels = blobs[0].size[0];
1935 size_t karea = std::accumulate(kernel_size.begin(), kernel_size.end(),
1936 1, std::multiplies<size_t>());
1938 for (int i = 0; i < inputs.size(); i++)
1940 flops += CV_BIG_INT(2)*outChannels*karea*total(inputs[i]);
1947 Ptr<BaseConvolutionLayer> ConvolutionLayer::create(const LayerParams ¶ms)
1949 Ptr<ConvolutionLayerImpl> l(new ConvolutionLayerImpl(params));
1953 Ptr<BaseConvolutionLayer> DeconvolutionLayer::create(const LayerParams ¶ms)
1955 return Ptr<BaseConvolutionLayer>(new DeConvolutionLayerImpl(params));