1 /*M///////////////////////////////////////////////////////////////////////////////////////
3 // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
5 // By downloading, copying, installing or using the software you agree to this license.
6 // If you do not agree to this license, do not download, install,
7 // copy or use the software.
11 // For Open Source Computer Vision Library
13 // Copyright (C) 2013, OpenCV Foundation, all rights reserved.
14 // Copyright (C) 2017, Intel Corporation, all rights reserved.
15 // Third party copyrights are property of their respective owners.
17 // Redistribution and use in source and binary forms, with or without modification,
18 // are permitted provided that the following conditions are met:
20 // * Redistribution's of source code must retain the above copyright notice,
21 // this list of conditions and the following disclaimer.
23 // * Redistribution's in binary form must reproduce the above copyright notice,
24 // this list of conditions and the following disclaimer in the documentation
25 // and/or other materials provided with the distribution.
27 // * The name of the copyright holders may not be used to endorse or promote products
28 // derived from this software without specific prior written permission.
30 // This software is provided by the copyright holders and contributors "as is" and
31 // any express or implied warranties, including, but not limited to, the implied
32 // warranties of merchantability and fitness for a particular purpose are disclaimed.
33 // In no event shall the Intel Corporation or contributors be liable for any direct,
34 // indirect, incidental, special, exemplary, or consequential damages
35 // (including, but not limited to, procurement of substitute goods or services;
36 // loss of use, data, or profits; or business interruption) however caused
37 // and on any theory of liability, whether in contract, strict liability,
38 // or tort (including negligence or otherwise) arising in any way out of
39 // the use of this software, even if advised of the possibility of such damage.
43 #include "../precomp.hpp"
44 #include "layers_common.hpp"
45 #include "../op_halide.hpp"
46 #include "../op_inf_engine.hpp"
47 #include "opencv2/core/hal/hal.hpp"
48 #include "opencv2/core/hal/intrin.hpp"
52 #include "opencl_kernels_dnn.hpp"
53 using namespace cv::dnn::ocl4dnn;
61 class BaseConvolutionLayerImpl : public ConvolutionLayer
64 bool fusedWeights, fusedBias;
65 std::vector<double> weightsMultipliers;
66 BaseConvolutionLayerImpl(const LayerParams ¶ms)
68 setParamsFrom(params);
69 getConvolutionKernelParams(params, kernel_size, pads_begin, pads_end, strides, dilations, padMode);
71 numOutput = params.get<int>("num_output");
72 int ngroups = params.get<int>("group", 1);
73 CV_Assert(numOutput % ngroups == 0);
75 if (kernel_size.size() == 2) {
76 kernel = Size(kernel_size[1], kernel_size[0]);
77 stride = Size(strides[1], strides[0]);
78 for (int i = 0; i < pads_begin.size(); i++) {
79 if (pads_begin[i] != pads_end[i])
80 CV_Error(Error::StsNotImplemented, "Unsupported asymmetric padding in convolution layer");
82 pad = Size(pads_begin[1], pads_begin[0]);
83 dilation = Size(dilations[1], dilations[0]);
85 adjust_pads.push_back(params.get<int>("adj_h", 0));
86 adjust_pads.push_back(params.get<int>("adj_w", 0));
88 adjustPad.height = adjust_pads[0];
89 adjustPad.width = adjust_pads[1];
90 CV_Assert(adjustPad.width < stride.width &&
91 adjustPad.height < stride.height);
97 virtual void finalize(InputArrayOfArrays inputs_arr, OutputArrayOfArrays outputs_arr) CV_OVERRIDE
99 std::vector<Mat> inputs, outputs;
100 inputs_arr.getMatVector(inputs);
101 outputs_arr.getMatVector(outputs);
103 CV_Assert(inputs.size() > 0);
105 CV_Assert(blobs.size() == 1 || blobs.size() == 2);
106 CV_Assert(inputs[0].dims == outputs[0].dims);
107 CV_Assert(blobs[0].dims == kernel_size.size() + 2);
108 for (int i = 0; i < kernel_size.size(); i++) {
109 CV_Assert(blobs[0].size[i + 2] == kernel_size[i]);
112 const Mat &input = inputs[0];
113 CV_Assert((input.dims == 4 || input.dims == 5) && (input.type() == CV_32F || input.type() == CV_16S));
114 for (size_t i = 0; i < inputs.size(); i++)
116 CV_Assert(inputs[i].type() == input.type());
117 CV_Assert((inputs[i].dims == 4 || inputs[i].dims == 5) && inputs[i].size[1] == input.size[1]);
118 for (int j = 0; j < inputs[i].dims; j++) {
119 CV_Assert(inputs[i].size[j] == input.size[j]);
123 std::vector<int> inpShape;
124 std::vector<int> outShape;
125 for (int i = 2; i < inputs[0].dims; i++) {
126 inpShape.push_back(inputs[0].size[i]);
127 outShape.push_back(outputs[0].size[i]);
129 getConvPoolPaddings(inpShape, kernel_size, strides, padMode, pads_begin, pads_end);
130 if (pads_begin.size() == 2) {
131 for (int i = 0; i < pads_begin.size(); i++) {
132 if (pads_begin[i] != pads_end[i])
133 CV_Error(Error::StsNotImplemented, "Unsupported asymmetric padding in convolution layer");
135 pad = Size(pads_begin[1], pads_begin[0]);
137 fusedWeights = false;
143 return blobs.size() >= 2;
146 virtual MatShape computeColRowShape(const MatShape &inpShape, const MatShape &outShape) const = 0;
149 return (kernel.height == 1 && kernel.width == 1) &&
150 (stride.height == 1 && stride.width == 1) &&
151 (dilation.height == 1 && dilation.width == 1);
154 virtual bool tryFuse(Ptr<Layer>& top) CV_OVERRIDE
157 top->getScaleShift(w, b);
158 if (!w.empty() || !b.empty())
161 fusedWeights = fusedWeights || !w.empty();
162 fusedBias = fusedBias || (hasBias() && !w.empty()) || !b.empty();
168 virtual void fuseWeights(const Mat& w_, const Mat& b_) = 0;
170 virtual void applyHalideScheduler(Ptr<BackendNode>& node,
171 const std::vector<Mat*> &inputs,
172 const std::vector<Mat> &outputs,
173 int targetId) const CV_OVERRIDE
176 if (targetId != DNN_TARGET_CPU)
178 Layer::applyHalideScheduler(node, inputs, outputs, targetId);
181 Halide::Var x("x"), y("y"), c("c"), n("n"), tile("tile"), yi("yi"), yo("yo"), co("co"), ci("ci");
182 Halide::Func& top = node.dynamicCast<HalideBackendNode>()->funcs[1];
183 Halide::Func& padded_input = node.dynamicCast<HalideBackendNode>()->funcs[0];
185 int outW, outH, outC, outN;
186 getCanonicalSize(outputs[0].size, &outW, &outH, &outC, &outN);
188 if (outW == 1 || outH <= 2)
191 if (is1x1() || outC <= 16)
197 .vectorize(x, outW >= 16 ? 16 : outW);
201 .split(c, co, ci, 16)
202 .fuse(yo, co, tile).fuse(n, tile, tile)
205 .vectorize(x, outW >= 16 ? 16 : outW);
206 padded_input.compute_at(top, yi);
207 #endif // HAVE_HALIDE
212 #define IS_POWER_LAYER(layer) \
213 (!layer.empty() && !layer->type.compare("Power"))
214 //TODO: simultaneously convolution and bias addition for cache optimization
215 class ConvolutionLayerImpl CV_FINAL : public BaseConvolutionLayerImpl
218 enum { VEC_ALIGN = 8, DFT_TYPE = CV_32F };
220 std::vector<float> biasvec;
221 std::vector<float> reluslope;
222 Ptr<ActivationLayer> activ;
225 Ptr<OCL4DNNConvSpatial<float> > convolutionOp;
226 std::vector<UMat> umat_blobs;
228 ocl4dnnFusedActiv_t activType;
231 ConvolutionLayerImpl(const LayerParams ¶ms) : BaseConvolutionLayerImpl(params)
235 activType = OCL4DNN_CONV_FUSED_ACTIV_NONE;
240 MatShape computeColRowShape(const MatShape &inpShape, const MatShape &outShape) const CV_OVERRIDE
242 Size out(outShape[3], outShape[2]);
243 int inpGroupCn = blobs[0].size[1];
244 int ksize = inpGroupCn * kernel.height * kernel.width;
245 return shape(out.area(), ksize);
248 virtual bool supportBackend(int backendId) CV_OVERRIDE
250 #ifdef HAVE_INF_ENGINE
251 if (backendId == DNN_BACKEND_INFERENCE_ENGINE)
253 if (kernel_size.size() == 3)
254 return preferableTarget == DNN_TARGET_CPU;
255 return (preferableTarget != DNN_TARGET_MYRIAD || dilation.width == dilation.height);
259 return (kernel_size.size() == 2) && (backendId == DNN_BACKEND_OPENCV || backendId == DNN_BACKEND_HALIDE);
262 bool getMemoryShapes(const std::vector<MatShape> &inputs,
263 const int requiredOutputs,
264 std::vector<MatShape> &outputs,
265 std::vector<MatShape> &internals) const CV_OVERRIDE
267 CV_Assert(blobs.size() != 0);
268 CV_Assert(!hasBias() || blobs[1].total() == (size_t)blobs[0].size[0]);
269 CV_Assert(inputs.size() == (size_t)1);
273 CV_Assert(inputs.size() != 0);
274 std::vector<int> inpShape(inputs[0].begin() + 2, inputs[0].end());
276 int outCn = blobs[0].size[0];
277 std::vector<int> outShape;
278 outShape.push_back(inputs[0][0]);
279 outShape.push_back(outCn);
281 int inpCn = inputs[0][1];
284 for (int i = 0; i < inpShape.size(); i++)
285 outShape.push_back((inpShape[i] + pads_begin[i] + pads_end[i] - dilations[i] * (kernel_size[i] - 1) - 1) / strides[i] + 1);
289 getConvPoolOutParams(inpShape, kernel_size, strides, padMode, dilations, outShape);
292 int ngroups = inpCn / blobs[0].size[1];
293 if (ngroups == 0 || ngroups * blobs[0].size[1] != inpCn)
294 CV_Error(Error::StsError, format("Number of input channels should "
295 "be multiple of %d but got %d", blobs[0].size[1], inpCn));
296 CV_Assert(ngroups > 0 && inpCn % ngroups == 0 && outCn % ngroups == 0);
298 outputs.resize(1, outShape);
303 virtual void finalize(InputArrayOfArrays inputs_arr, OutputArrayOfArrays outputs_arr) CV_OVERRIDE
305 BaseConvolutionLayerImpl::finalize(inputs_arr, outputs_arr);
307 CV_Assert(!blobs.empty());
308 const int outCn = blobs[0].size[0];
309 // prepare weightsMat where each row is aligned and has enough zero padding on the right to
310 // use vectorized (i.e. with intrinsics) loops without tail processing
311 Mat wm = blobs[0].reshape(1, outCn);
312 if( wm.step1() % VEC_ALIGN != 0 )
314 int newcols = (int)alignSize(wm.step1(), VEC_ALIGN);
315 Mat wm_buffer = Mat(outCn, newcols, wm.type());
316 Mat wm_padding = wm_buffer.colRange(wm.cols, newcols);
317 wm_padding.setTo(Scalar::all(0.));
318 Mat wm_aligned = wm_buffer.colRange(0, wm.cols);
319 wm.copyTo(wm_aligned);
323 weightsMultipliers.assign(outCn, 1.0);
325 Mat biasMat = hasBias() ? blobs[1].reshape(1, outCn) : Mat();
326 biasvec.resize(outCn+2);
327 if( biasMat.empty() )
329 for(int i = 0; i < outCn; i++ )
334 for(int i = 0; i < outCn; i++ )
335 biasvec[i] = biasMat.at<float>(i);
338 convolutionOp.release();
342 bool setActivation(const Ptr<ActivationLayer>& layer) CV_OVERRIDE
344 if (!activ.empty() && !layer.empty())
352 activType = OCL4DNN_CONV_FUSED_ACTIV_NONE;
354 if (IS_DNN_OPENCL_TARGET(preferableTarget))
356 Ptr<PowerLayer> activ_power = activ.dynamicCast<PowerLayer>();
357 if (!activ_power.empty())
359 if (activ_power->scale != 1.f || activ_power->shift != 0.f)
361 const int outCh = blobs[0].size[0];
362 fuseWeights(Mat(1, outCh, CV_32F, Scalar(activ_power->scale)),
363 Mat(1, outCh, CV_32F, Scalar(activ_power->shift)));
366 power = activ_power->power;
367 activType = OCL4DNN_CONV_FUSED_ACTIV_POWER;
369 Ptr<TanHLayer> activ_tanh = activ.dynamicCast<TanHLayer>();
370 if (!activ_tanh.empty())
372 activType = OCL4DNN_CONV_FUSED_ACTIV_TANH;
376 return !activ.empty();
379 void fuseWeights(const Mat& w_, const Mat& b_) CV_OVERRIDE
381 // Convolution weights have OIHW data layout. Parameters fusion in case of
382 // (conv(I) + b1 ) * w + b2
383 // means to replace convolution's weights to [w*conv(I)] and bias to [b1 * w + b2]
384 const int outCn = weightsMat.size[0];
385 Mat w = w_.total() == 1 ? Mat(1, outCn, CV_32F, Scalar(w_.at<float>(0))) : w_;
386 Mat b = b_.total() == 1 ? Mat(1, outCn, CV_32F, Scalar(b_.at<float>(0))) : b_;
387 CV_Assert_N(!weightsMat.empty(), biasvec.size() == outCn + 2,
388 w.empty() || outCn == w.total(), b.empty() || outCn == b.total());
392 // Keep origin weights unchanged.
393 if (weightsMat.data == blobs[0].data)
394 weightsMat = weightsMat.clone();
396 Mat originWeights = blobs[0].reshape(1, outCn);
397 for (int i = 0; i < outCn; ++i)
399 double wi = w.at<float>(i);
400 weightsMultipliers[i] *= wi;
401 cv::multiply(originWeights.row(i), weightsMultipliers[i], weightsMat.row(i));
408 for (int i = 0; i < outCn; ++i)
409 biasvec[i] += b.at<float>(i);
411 biasvec[outCn] = biasvec[outCn+1] = biasvec[outCn-1];
414 virtual Ptr<BackendNode> initHalide(const std::vector<Ptr<BackendWrapper> > &inputs) CV_OVERRIDE
417 Halide::Buffer<float> inputBuffer = halideBuffer(inputs[0]);
419 const int inpCn = inputBuffer.channels();
420 const int outCn = blobs[0].size[0];
421 const int inpGroupCn = blobs[0].size[1];
422 const int group = inpCn / inpGroupCn;
423 const int outGroupCn = outCn / group;
425 Halide::Buffer<float> weights = wrapToHalideBuffer(blobs[0]);
427 Halide::Var x("x"), y("y"), c("c"), n("n");
428 Halide::Func top = (name.empty() ? Halide::Func() : Halide::Func(name));
429 Halide::Func padded_input(name + "_constant_exterior");
430 if (pad.width || pad.height)
432 Halide::Func bounded =
433 Halide::BoundaryConditions::constant_exterior(inputBuffer, 0);
434 padded_input(x, y, c, n) = bounded(x, y, c, n);
438 padded_input(x, y, c, n) = inputBuffer(x, y, c, n);
441 Halide::RDom r(0, kernel.width, 0, kernel.height, 0, inpGroupCn);
442 Halide::Expr kx = x * stride.width - pad.width + r.x * dilation.width;
443 Halide::Expr ky = y * stride.height - pad.height + r.y * dilation.height;
444 Halide::Expr kc = r.z;
445 for (int i = 1; i < group; ++i)
447 kc = select(c < outGroupCn * i, kc, inpGroupCn * i + r.z);
449 Halide::Expr topExpr = sum(padded_input(kx, ky, kc, n) *
450 weights(r.x, r.y, r.z, c));
453 Halide::Buffer<float> bias = wrapToHalideBuffer(blobs[1], {outCn});
456 top(x, y, c, n) = topExpr;
457 return Ptr<BackendNode>(new HalideBackendNode({ padded_input, top }));
458 #endif // HAVE_HALIDE
459 return Ptr<BackendNode>();
462 #ifdef HAVE_INF_ENGINE
463 virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> > &inputs) CV_OVERRIDE
465 InferenceEngine::DataPtr input = infEngineDataNode(inputs[0]);
466 CV_Assert(input->dims.size() == 4 || input->dims.size() == 5);
468 const int inpCn = input->dims[input->dims.size() - 2]; // NOTE: input->dims are reversed (WHIO or WHDIO)
469 const int outCn = blobs[0].size[0];
470 const int inpGroupCn = blobs[0].size[1];
471 const int group = inpCn / inpGroupCn;
473 InferenceEngine::Layout layout = (input->dims.size() == 4) ? InferenceEngine::Layout::OIHW :
474 InferenceEngine::Layout::NCDHW;
476 auto ieWeights = wrapToInfEngineBlob(blobs[0], layout);
479 if (weightsMat.isContinuous())
481 Mat cvWeights = weightsMat.reshape(1, blobs[0].dims, blobs[0].size);
482 ieWeights = wrapToInfEngineBlob(cvWeights, layout);
486 ieWeights = InferenceEngine::make_shared_blob<float>(
487 InferenceEngine::Precision::FP32, layout,
489 ieWeights->allocate();
491 Mat newWeights = infEngineBlobToMat(ieWeights).reshape(1, outCn);
492 Mat cvWeights = weightsMat.colRange(0, newWeights.cols);
493 cvWeights.copyTo(newWeights);
496 InferenceEngine::Blob::Ptr ieBiases;
497 if (hasBias() || fusedBias)
499 Mat biasesMat({outCn}, CV_32F, &biasvec[0]);
500 ieBiases = wrapToInfEngineBlob(biasesMat, {(size_t)outCn}, InferenceEngine::Layout::C);
503 InferenceEngine::Builder::ConvolutionLayer ieLayer(name);
505 ieLayer.setKernel(kernel_size);
506 ieLayer.setStrides(strides);
507 ieLayer.setDilation(dilations);
508 ieLayer.setPaddingsBegin(pads_begin);
509 ieLayer.setPaddingsEnd(pads_end);
510 ieLayer.setGroup((size_t)group);
511 ieLayer.setOutDepth((size_t)outCn);
513 InferenceEngine::Builder::Layer l = ieLayer;
514 addConstantData("weights", ieWeights, l);
516 addConstantData("biases", ieBiases, l);
518 if (!padMode.empty())
519 l.getParameters()["auto_pad"] = padMode == "VALID" ? std::string("valid") : std::string("same_upper");
521 return Ptr<BackendNode>(new InfEngineBackendNode(l));
523 #endif // HAVE_INF_ENGINE
525 class ParallelConv : public cv::ParallelLoopBody
528 enum { BLK_SIZE = 32, BLK_SIZE_CN = 64 };
534 Size kernel_, pad_, stride_, dilation_;
535 int ngroups_, nstripes_;
536 std::vector<int> ofstab_;
537 const std::vector<float>* biasvec_;
538 const std::vector<float>* reluslope_;
539 const ActivationLayer* activ_;
546 : input_(0), weights_(0), output_(0), ngroups_(0), nstripes_(0),
547 biasvec_(0), reluslope_(0), activ_(0), is1x1_(false), useAVX(false), useAVX2(false), useAVX512(false)
550 static void run( const Mat& input, Mat& output, const Mat& weights,
551 const std::vector<float>& biasvec,
552 const std::vector<float>& reluslope,
553 Size kernel, Size pad, Size stride, Size dilation,
554 const ActivationLayer* activ, int ngroups, int nstripes )
557 input.dims == 4 && output.dims == 4,
558 input.size[0] == output.size[0],
559 weights.rows == output.size[1],
560 weights.cols == (input.size[1]/ngroups)*kernel.width*kernel.height,
561 input.type() == output.type(),
562 input.type() == weights.type(),
563 input.type() == CV_32FC1,
564 input.isContinuous(),
565 output.isContinuous(),
566 biasvec.size() == (size_t)output.size[1]+2);
570 p.weights_ = &weights;
572 for( int i = 0; i < 4; i++ ) p.outShape[i] = output.size[i];
573 p.outShape[1] /= ngroups;
574 p.kernel_ = kernel; p.pad_ = pad; p.stride_ = stride; p.dilation_ = dilation;
575 p.ngroups_ = ngroups;
576 p.nstripes_ = nstripes;
578 int inpCnAll = input.size[1], width = input.size[3], height = input.size[2];
579 int inpCn = inpCnAll / ngroups;
580 p.is1x1_ = kernel == Size(1,1) && pad == Size(0, 0);
581 p.useAVX = checkHardwareSupport(CPU_AVX);
582 p.useAVX2 = checkHardwareSupport(CPU_AVX2);
583 p.useAVX512 = CV_CPU_HAS_SUPPORT_AVX512_SKX;
585 int ncn = std::min(inpCn, (int)BLK_SIZE_CN);
586 p.ofstab_.resize(kernel.width*kernel.height*ncn);
587 int* ofstab = &p.ofstab_[0];
589 for( int k = 0; k < ncn; k++ )
590 for( int k_r = 0; k_r < kernel.height; k_r++ )
591 for( int k_c = 0; k_c < kernel.width; k_c++ )
592 ofstab[(k*kernel.height + k_r)*kernel.width + k_c] =
593 (k*height + k_r*dilation.height)*width + k_c*dilation.width;
595 p.biasvec_ = &biasvec;
596 p.reluslope_ = &reluslope;
597 p.activ_ = p.reluslope_->empty() ? activ : 0;
599 parallel_for_(Range(0, nstripes), p, nstripes);
602 virtual void operator ()(const Range &r0) const CV_OVERRIDE
604 const int valign = ConvolutionLayerImpl::VEC_ALIGN;
605 int ngroups = ngroups_, batchSize = input_->size[0]*ngroups;
606 int outW = output_->size[3], outH = output_->size[2], outCn = output_->size[1]/ngroups;
607 int width = input_->size[3], height = input_->size[2], inpCn = input_->size[1]/ngroups;
608 const int nstripes = nstripes_;
609 int kernel_w = kernel_.width, kernel_h = kernel_.height;
610 int pad_w = pad_.width, pad_h = pad_.height;
611 int stride_w = stride_.width, stride_h = stride_.height;
612 int dilation_w = dilation_.width, dilation_h = dilation_.height;
613 int karea = kernel_w*kernel_h;
615 size_t inpPlaneSize = width*height;
616 size_t outPlaneSize = outW*outH;
619 int stripesPerSample;
623 if( nstripes >= batchSize*2 )
625 stripesPerSample = nstripes/batchSize;
626 stripeSize = alignSize((outPlaneSize + stripesPerSample - 1)/stripesPerSample, valign);
627 stripeSize = std::min(stripeSize, outPlaneSize);
631 stripesPerSample = 1;
632 int samplesPerStripe = std::max((batchSize + nstripes - 1)/nstripes, 1);
633 r.start *= samplesPerStripe;
634 r.end *= samplesPerStripe;
635 stripeSize = outPlaneSize;
638 const float* data_inp0_ = input_->ptr<float>();
639 const int* ofstab = &ofstab_[0];
640 const float* wptr_orig_ = weights_->ptr<float>();
641 size_t wstep = weights_->step1();
642 const float* biasptr_ = &biasvec_->at(0);
643 const float* reluptr_ = reluslope_->empty() ? 0 : &reluslope_->at(0);
644 float* data_out0_ = output_->ptr<float>();
645 size_t rowbufsz = (size_t)karea*BLK_SIZE_CN*BLK_SIZE;
646 AutoBuffer<float> rowbuf0_(rowbufsz + valign);
647 float* rowbuf0 = alignPtr(rowbuf0_.data(), (int)(valign*sizeof(float)));
649 // we clear the buffer once; ultimately, it lets us to avoid
650 // tail processing after running the unrolled/vectorized loop.
651 // the main idea is to make sure that the tail (a.k.a. padding) of each row
652 // (i.e. the elements with indices between vsz=karea*ncn and vsz_a)
653 // does not contain NaNs or Infs. Because the padding in the weights
654 // matrix is explicitly initialized with 0's, we handle all other
655 // cases nicely, i.e. we can skip expliciting re-initialization
656 // of the padding - we just retain elements from the previous iteration
657 // of the loop over channels (cn0).
658 memset(rowbuf0, 0, rowbufsz*sizeof(rowbuf0[0]) );
660 for( int stripe = r.start; stripe < r.end; stripe++ )
662 int subsampleIdx = stripe/stripesPerSample;
663 if( subsampleIdx >= batchSize )
665 int stripeStart = (int)((stripe - subsampleIdx*stripesPerSample)*stripeSize);
666 int stripeEnd = (int)std::min(stripeStart + stripeSize, outPlaneSize);
667 const float* data_inp0 = data_inp0_ + subsampleIdx*inpPlaneSize*inpCn;
668 float* data_out0 = data_out0_ + subsampleIdx*outPlaneSize*outCn;
669 int startOutCn = (subsampleIdx % ngroups)*outCn;
670 const float* wptr_orig = wptr_orig_ + wstep*startOutCn;
671 const float* biasptr = biasptr_ + startOutCn;
673 for( int cn0 = 0; cn0 < inpCn; cn0 += BLK_SIZE_CN )
675 int cn1 = std::min(cn0 + BLK_SIZE_CN, inpCn);
676 int ncn = cn1 - cn0, vsz = karea*ncn;
677 int vsz_a = (int)alignSize(vsz, valign);
678 const float* wptr = wptr_orig + cn0*karea;
679 // we apply [Channels][P]ReLU (if any) during the final pass only.
680 const float* relu = cn1 == inpCn && reluptr_ ? reluptr_ + startOutCn : 0;
682 for( int ofs0 = stripeStart; ofs0 < stripeEnd; ofs0 += BLK_SIZE )
684 int ofs, ofs1 = std::min(ofs0 + BLK_SIZE, stripeEnd);
685 int out_i = ofs0 / outW;
686 int out_j = ofs0 - out_i * outW;
688 // do im2row for a part of input tensor
689 float* rowbuf = rowbuf0;
690 for( ofs = ofs0; ofs < ofs1; out_j = 0, ++out_i )
692 int delta = std::min(ofs1 - ofs, outW - out_j);
693 int out_j1 = out_j + delta;
694 int in_i = out_i * stride_h - pad_h;
695 int in_j = out_j * stride_w - pad_w;
696 const float* imgptr = data_inp0 + (cn0*height + in_i)*width + in_j;
699 // do im2row for a part of input tensor
702 for( ; out_j < out_j1; out_j++, rowbuf += vsz_a, imgptr += stride_w )
704 for( k = 0; k < vsz; k++ )
705 rowbuf[k] = imgptr[k*inpPlaneSize];
710 bool ok_i = 0 <= in_i && in_i < height - (kernel_h-1)*dilation_h;
711 int i0 = std::max(0, (-in_i + dilation_h-1)/dilation_h);
712 int i1 = std::min(kernel_h, (height - in_i + dilation_h-1)/dilation_h);
714 for( ; out_j < out_j1; out_j++, rowbuf += vsz_a, imgptr += stride_w, in_j += stride_w )
716 // this condition should be true for most of the tensor elements, i.e.
717 // most of the time the kernel aperture is inside the tensor X-Y plane.
718 if( ok_i && out_j + 2 <= out_j1 && 0 <= in_j && in_j + stride_w*2 <= width - (kernel_w-1)*dilation_w )
720 for( k = 0; k < vsz; k++ )
723 float v0 = imgptr[k1];
724 float v1 = imgptr[k1 + stride_w];
726 rowbuf[k+vsz_a] = v1;
735 int j0 = std::max(0, (-in_j + dilation_w-1)/dilation_w);
736 int j1 = std::min(kernel_w, (width - in_j + dilation_w-1)/dilation_w);
738 // here some non-continuous sub-row of the row will not be
739 // filled from the tensor; we need to make sure that the uncovered
740 // elements are explicitly set to 0's. the easiest way is to
741 // set all the elements to 0's before the loop.
742 memset(rowbuf, 0, vsz*sizeof(rowbuf[0]));
743 for( k = 0; k < ncn; k++ )
745 for( i = i0; i < i1; i++ )
747 for( j = j0; j < j1; j++ )
749 int imgofs = k*(width*height) + i*(dilation_h*width) + j*dilation_w;
750 rowbuf[(k*kernel_h + i)*kernel_w + j] = imgptr[imgofs];
759 // now compute dot product of the weights
760 // and im2row-transformed part of the tensor
761 int bsz = ofs1 - ofs0;
762 #if CV_TRY_AVX512_SKX
763 /* AVX512 convolution requires an alignment of 16, and ROI is only there for larger vector sizes */
765 opt_AVX512_SKX::fastConv(wptr, wstep, biasptr, rowbuf0, data_out0 + ofs0,
766 outShape, bsz, vsz, vsz_a, relu, cn0 == 0);
771 opt_AVX2::fastConv(wptr, wstep, biasptr, rowbuf0, data_out0 + ofs0,
772 outShape, bsz, vsz, vsz_a, relu, cn0 == 0);
777 opt_AVX::fastConv(wptr, wstep, biasptr, rowbuf0, data_out0 + ofs0,
778 outShape, bsz, vsz, vsz_a, relu, cn0 == 0);
781 for( int i = 0; i < outCn; i += 2 )
783 const float* wptr0 = wptr + i*wstep;
784 const float* wptr1 = wptr0 + wstep;
785 float* outptr0 = data_out0 + ofs0 + i*outPlaneSize;
786 float* outptr1 = outptr0 + outPlaneSize;
787 float bias0 = biasptr[i], bias1 = biasptr[i+1];
788 float r0 = 1.f, r1 = 1.f;
799 r0 = relu[i]; r1 = relu[i+1];
806 v_float32x4 vr0 = v_setall_f32(r0), vr1 = v_setall_f32(r1), z = v_setzero_f32();
808 for( ; j <= bsz - 4; j += 4 )
810 const float* rptr = rowbuf0 + j*vsz_a;
815 s0 = v_setall_f32(bias0);
816 s1 = v_setall_f32(bias1);
820 s0 = v_load(outptr0 + j);
821 s1 = v_load(outptr1 + j);
824 v_float32x4 vs00 = v_setzero_f32(), vs01 = v_setzero_f32(),
825 vs02 = v_setzero_f32(), vs03 = v_setzero_f32(),
826 vs10 = v_setzero_f32(), vs11 = v_setzero_f32(),
827 vs12 = v_setzero_f32(), vs13 = v_setzero_f32();
828 for( k = 0; k < vsz; k += 4, rptr += 4 )
830 v_float32x4 w0 = v_load_aligned(wptr0 + k), w1 = v_load_aligned(wptr1 + k);
831 v_float32x4 r0 = v_load_aligned(rptr), r1 = v_load_aligned(rptr + vsz_a),
832 r2 = v_load_aligned(rptr + vsz_a*2), r3 = v_load_aligned(rptr + vsz_a*3);
844 s0 += v_reduce_sum4(vs00, vs01, vs02, vs03);
845 s1 += v_reduce_sum4(vs10, vs11, vs12, vs13);
848 s0 = v_select(s0 > z, s0, s0*vr0);
849 s1 = v_select(s1 > z, s1, s1*vr1);
852 v_store(outptr0 + j, s0);
853 v_store(outptr1 + j, s1);
856 for( ; j < bsz; j++ )
858 const float* rptr = rowbuf0 + j*vsz_a;
872 for( k = 0; k < vsz; k++ )
880 s00 = s00 > 0.f ? s00 : s00*r0;
881 s10 = s10 > 0.f ? s10 : s10*r1;
892 activ_->forwardSlice(data_out0 + stripeStart, data_out0 + stripeStart,
893 (int)(stripeEnd - stripeStart),
894 outPlaneSize, startOutCn, startOutCn + outCn);
900 bool forward_ocl(InputArrayOfArrays inps, OutputArrayOfArrays outs, OutputArrayOfArrays internals)
902 std::vector<UMat> inputs;
903 std::vector<UMat> outputs;
905 bool use_half = (inps.depth() == CV_16S);
906 inps.getUMatVector(inputs);
907 outs.getUMatVector(outputs);
909 CV_Assert(outputs.size() == 1);
910 for (int i = 0; i < inputs.size(); ++i)
911 CV_Assert(inputs[i].u != outputs[0].u);
913 if (umat_blobs.empty())
915 size_t n = blobs.size();
916 umat_blobs.resize(n);
917 for (size_t i = 0; i < n; i++)
919 blobs[i].copyTo(umat_blobs[i]);
923 if (convolutionOp.empty())
925 OCL4DNNConvConfig config;
926 config.in_shape = shape(inputs[0]);
927 config.out_shape = shape(outputs[0]);
928 config.kernel = kernel;
930 config.stride = stride;
931 config.dilation = dilation;
932 config.group = inputs[0].size[1] / umat_blobs[0].size[1];
933 config.bias_term = (hasBias()) ? true : false;
934 config.use_half = use_half;
936 convolutionOp = Ptr<OCL4DNNConvSpatial<float> >(new OCL4DNNConvSpatial<float>(config));
939 int outCn = umat_blobs[0].size[0];
944 Ptr<ReLULayer> activ_relu = activ.dynamicCast<ReLULayer>();
945 if( !activ_relu.empty() )
947 reluslope.assign(outCn+2, activ_relu->negativeSlope);
948 activType = OCL4DNN_CONV_FUSED_ACTIV_RELU;
951 Ptr<ReLU6Layer> activ_relu6 = activ.dynamicCast<ReLU6Layer>();
952 if( !activ_relu6.empty() )
955 reluslope[0] = activ_relu6->minValue;
956 reluslope[1] = activ_relu6->maxValue;
957 activType = OCL4DNN_CONV_FUSED_ACTIV_RELU6;
960 Ptr<ChannelsPReLULayer> activ_chprelu = activ.dynamicCast<ChannelsPReLULayer>();
961 if( !activ_chprelu.empty() )
963 const Mat& m = activ_chprelu->blobs[0];
964 CV_Assert(m.isContinuous() && m.type() == CV_32F && (int)m.total() == outCn);
965 const float* mdata = m.ptr<float>();
966 reluslope.resize(outCn+2);
967 std::copy(mdata, mdata + outCn, reluslope.begin());
968 reluslope[outCn] = reluslope[outCn+1] = reluslope[outCn-1];
969 activType = OCL4DNN_CONV_FUSED_ACTIV_PRELU;
975 weightsMat.copyTo(umat_blobs[0]);
976 fusedWeights = false;
980 if ( umat_blobs.size() < 2 )
981 umat_blobs.resize(2);
982 umat_blobs[1] = UMat(biasvec, true);
983 convolutionOp->setBias(true);
989 if ( activType == OCL4DNN_CONV_FUSED_ACTIV_RELU )
991 CV_Assert(!reluslope.empty());
992 convolutionOp->setActivReLU(true, reluslope[0]);
994 else if ( activType == OCL4DNN_CONV_FUSED_ACTIV_PRELU)
996 CV_Assert(!reluslope.empty());
997 convolutionOp->setActivPReLU(true, reluslope);
999 else if ( activType == OCL4DNN_CONV_FUSED_ACTIV_POWER)
1001 convolutionOp->setActivPower(true, power);
1003 else if ( activType == OCL4DNN_CONV_FUSED_ACTIV_TANH)
1005 convolutionOp->setActivTanh(true);
1007 else if ( activType == OCL4DNN_CONV_FUSED_ACTIV_RELU6)
1009 convolutionOp->setActivReLU6(true, reluslope[0], reluslope[1]);
1013 convolutionOp->setActivReLU(false, 0);
1014 convolutionOp->setActivPReLU(false, reluslope);
1015 convolutionOp->setActivPower(false, 1.f);
1016 convolutionOp->setActivTanh(false);
1017 convolutionOp->setActivReLU6(false, 0, 0);
1022 UMat& inpMat = inputs[0];
1023 UMat& outMat = outputs[0];
1024 int batch_size = inpMat.size[0];
1026 return convolutionOp->Forward(inpMat,
1027 inputs.size() == 2 ? inputs[1] : UMat(),
1029 umat_blobs.size() > 1 ? umat_blobs[1] : UMat(),
1035 void forward(InputArrayOfArrays inputs_arr, OutputArrayOfArrays outputs_arr, OutputArrayOfArrays internals_arr) CV_OVERRIDE
1037 CV_TRACE_FUNCTION();
1038 CV_TRACE_ARG_VALUE(name, "name", name.c_str());
1040 CV_OCL_RUN(IS_DNN_OPENCL_TARGET(preferableTarget),
1041 forward_ocl(inputs_arr, outputs_arr, internals_arr))
1043 if (inputs_arr.depth() == CV_16S)
1045 forward_fallback(inputs_arr, outputs_arr, internals_arr);
1049 std::vector<Mat> inputs, outputs;
1050 inputs_arr.getMatVector(inputs);
1051 outputs_arr.getMatVector(outputs);
1053 /*printf("conv %s: input (%d x %d x %d x %d), kernel (%d x %d), pad (%d x %d), stride (%d x %d), dilation (%d x %d)\n",
1054 name.c_str(), inputs[0].size[0], inputs[0].size[1], inputs[0].size[2], inputs[0].size[3],
1055 kernel.width, kernel.height, pad.width, pad.height,
1056 stride.width, stride.height, dilation.width, dilation.height);*/
1057 CV_Assert_N(inputs.size() == (size_t)1, inputs[0].size[1] % blobs[0].size[1] == 0,
1058 outputs.size() == 1, inputs[0].data != outputs[0].data);
1060 if (inputs[0].dims == 5) {
1061 CV_Error(Error::StsNotImplemented, "Convolution3D layer is not supported on OCV backend");
1064 int ngroups = inputs[0].size[1]/blobs[0].size[1];
1065 CV_Assert(outputs[0].size[1] % ngroups == 0);
1066 int outCn = blobs[0].size[0];
1071 Ptr<ReLULayer> activ_relu = activ.dynamicCast<ReLULayer>();
1072 if( !activ_relu.empty() )
1074 reluslope.assign(outCn+2, activ_relu->negativeSlope);
1077 Ptr<ChannelsPReLULayer> activ_chprelu = activ.dynamicCast<ChannelsPReLULayer>();
1078 if( !activ_chprelu.empty() )
1080 const Mat& m = activ_chprelu->blobs[0];
1081 CV_Assert(m.isContinuous() && m.type() == CV_32F && (int)m.total() == outCn);
1082 const float* mdata = m.ptr<float>();
1083 reluslope.resize(outCn+2);
1084 std::copy(mdata, mdata + outCn, reluslope.begin());
1085 reluslope[outCn] = reluslope[outCn+1] = reluslope[outCn-1];
1089 int nstripes = std::max(getNumThreads(), 1);
1091 ParallelConv::run(inputs[0], outputs[0], weightsMat, biasvec, reluslope,
1092 kernel, pad, stride, dilation, activ.get(), ngroups, nstripes);
1095 virtual int64 getFLOPS(const std::vector<MatShape> &inputs,
1096 const std::vector<MatShape> &outputs) const CV_OVERRIDE
1098 CV_Assert(inputs.size() == outputs.size());
1101 for (int i = 0; i < inputs.size(); i++)
1103 flops += total(outputs[i])*(CV_BIG_INT(2)*kernel.area()*inputs[i][1] + 1);
1110 class DeConvolutionLayerImpl CV_FINAL : public BaseConvolutionLayerImpl
1113 Mat weightsMat, biasesMat;
1117 DeConvolutionLayerImpl(const LayerParams& params) : BaseConvolutionLayerImpl(params) {}
1119 MatShape computeColRowShape(const MatShape &inpShape, const MatShape &outShape) const CV_OVERRIDE
1121 int inpCn = inpShape[1];
1122 int inpH = inpShape[2];
1123 int inpW = inpShape[3];
1124 int outCn = outShape[1];
1125 int ngroups = inpCn / blobs[0].size[0];
1126 int outGroupCn = outCn / ngroups;
1127 int ksize = outGroupCn * kernel.height * kernel.width;
1128 return shape(ksize, inpH * inpW);
1131 virtual bool supportBackend(int backendId) CV_OVERRIDE
1133 #ifdef HAVE_INF_ENGINE
1134 const int outGroupCn = blobs[0].size[1]; // Weights are in IOHW layout
1135 const int group = numOutput / outGroupCn;
1137 if (backendId == DNN_BACKEND_INFERENCE_ENGINE)
1139 if (kernel_size.size() == 3)
1140 CV_Error(Error::StsNotImplemented, "Unsupported deconvolution3D layer");
1142 if (adjustPad.height || adjustPad.width)
1144 if (padMode.empty())
1146 if (preferableTarget != DNN_TARGET_CPU && group != 1)
1148 if ((adjustPad.height && pad.height) || (adjustPad.width && pad.width))
1151 return pad.width >= adjustPad.width && pad.height >= adjustPad.height;
1153 else if (padMode == "SAME")
1155 return kernel.width >= pad.width + 1 + adjustPad.width &&
1156 kernel.height >= pad.height + 1 + adjustPad.height;
1158 else if (padMode == "VALID")
1164 return preferableTarget == DNN_TARGET_CPU;
1166 if (preferableTarget == DNN_TARGET_OPENCL || preferableTarget == DNN_TARGET_OPENCL_FP16)
1167 return dilation.width == 1 && dilation.height == 1;
1171 #endif // HAVE_INF_ENGINE
1172 return kernel_size.size() == 2 && (backendId == DNN_BACKEND_OPENCV || backendId == DNN_BACKEND_HALIDE);
1175 bool getMemoryShapes(const std::vector<MatShape> &inputs,
1176 const int requiredOutputs,
1177 std::vector<MatShape> &outputs,
1178 std::vector<MatShape> &internals) const CV_OVERRIDE
1180 CV_Assert(!hasBias() || blobs[1].total() == (size_t)numOutput);
1181 CV_Assert(inputs.size() != 0);
1183 int outCn = numOutput;
1184 std::vector<int> outShape;
1185 outShape.push_back(inputs[0][0]); // batch
1186 outShape.push_back(outCn);
1187 if (padMode.empty())
1189 for (int i = 0; i < kernel_size.size(); i++)
1190 outShape.push_back(strides[i] * (inputs[0][2 + i] - 1) + kernel_size[i] - pads_begin[i] - pads_end[i] + adjust_pads[i]);
1192 else if (padMode == "VALID")
1194 for (int i = 0; i < kernel_size.size(); i++)
1195 outShape.push_back(strides[i] * (inputs[0][2 + i] - 1) + kernel_size[i] + adjust_pads[i]);
1197 else if (padMode == "SAME")
1199 for (int i = 0; i < kernel_size.size(); i++)
1200 outShape.push_back(strides[i] * (inputs[0][2 + i] - 1) + 1 + adjust_pads[i]);
1203 CV_Error(Error::StsError, "Unsupported padding mode " + padMode);
1205 CV_Assert(outCn % blobs[0].size[1] == 0);
1206 int ngroups = outCn / blobs[0].size[1];
1208 int inpCn = inputs[0][1];
1209 CV_Assert(inpCn % ngroups == 0 && outCn % ngroups == 0);
1210 CV_Assert(blobs[0].size[0] == inpCn);
1212 outputs.resize(1, outShape);
1215 internals.push_back(computeColRowShape(inputs[0], outputs[0]));
1220 void finalize(InputArrayOfArrays inputs_arr, OutputArrayOfArrays outputs_arr) CV_OVERRIDE
1222 BaseConvolutionLayerImpl::finalize(inputs_arr, outputs_arr);
1224 std::vector<Mat> inputs, outputs;
1225 inputs_arr.getMatVector(inputs);
1226 outputs_arr.getMatVector(outputs);
1228 std::vector<int> inpShape;
1229 std::vector<int> outShape;
1230 for (int i = 2; i < inputs[0].dims; i++) {
1231 inpShape.push_back(inputs[0].size[i]);
1232 outShape.push_back(outputs[0].size[i]);
1234 getConvPoolPaddings(outShape, kernel_size, strides, padMode, pads_begin, pads_end);
1235 if (pads_begin.size() == 2) {
1236 for (int i = 0; i < pads_begin.size(); i++) {
1237 if (pads_begin[i] != pads_end[i])
1238 CV_Error(Error::StsNotImplemented, "Unsupported asymmetric padding in deconvolution layer");
1240 pad = Size(pads_begin[1], pads_begin[0]);
1243 weightsMultipliers.assign(numOutput, 1.0);
1244 if (weightsMat.empty())
1246 transpose(blobs[0].reshape(1, blobs[0].size[0]), weightsMat);
1247 biasesMat = hasBias() ? blobs[1].reshape(1, numOutput)
1248 : Mat::zeros(numOutput, 1, CV_32F);
1252 void fuseWeights(const Mat& w_, const Mat& b_) CV_OVERRIDE
1254 Mat w = w_.total() == 1 ? Mat(1, numOutput, CV_32F, Scalar(w_.at<float>(0))) : w_;
1255 Mat b = b_.total() == 1 ? Mat(1, numOutput, CV_32F, Scalar(b_.at<float>(0))) : b_;
1257 CV_Assert_N(!weightsMat.empty(),
1258 w.empty() || numOutput == w.total(),
1259 b.empty() || numOutput == b.total());
1263 transpose(blobs[0].reshape(1, blobs[0].size[0]), weightsMat);
1264 weightsMat = weightsMat.reshape(1, numOutput);
1265 for (int i = 0; i < numOutput; ++i)
1267 double wi = w.at<float>(i);
1268 weightsMultipliers[i] *= wi;
1269 cv::multiply(weightsMat.row(i), weightsMultipliers[i], weightsMat.row(i));
1270 biasesMat.at<float>(i) *= wi;
1272 weightsMat = weightsMat.reshape(1, weightsMat.total() / blobs[0].size[0]);
1277 cv::add(biasesMat, b.reshape(1, numOutput), biasesMat);
1281 class MatMulInvoker : public ParallelLoopBody
1284 MatMulInvoker(const Mat& a, const Mat& b, Mat& c, int nstripes)
1289 nstripes_ = nstripes;
1290 useAVX = checkHardwareSupport(CPU_AVX);
1291 useAVX2 = checkHardwareSupport(CPU_AVX2);
1292 useAVX512 = CV_CPU_HAS_SUPPORT_AVX512_SKX;
1295 void operator()(const Range& range_) const CV_OVERRIDE
1297 int stripeSize = (int)alignSize((b_->cols + nstripes_ - 1)/nstripes_, 16);
1298 Range range(range_.start*stripeSize, std::min(range_.end*stripeSize, b_->cols));
1299 int mmax = a_->rows;
1300 int nmax = range.end - range.start;
1301 int kmax = a_->cols;
1303 const float* aptr = a_->ptr<float>();
1304 const float* bptr = b_->ptr<float>() + range.start;
1305 float* cptr = c_->ptr<float>() + range.start;
1306 size_t astep = a_->step1();
1307 size_t bstep = b_->step1();
1308 size_t cstep = c_->step1();
1310 #if CV_TRY_AVX512_SKX
1312 opt_AVX512_SKX::fastGEMM( aptr, astep, bptr, bstep, cptr, cstep, mmax, kmax, nmax );
1317 opt_AVX2::fastGEMM( aptr, astep, bptr, bstep, cptr, cstep, mmax, kmax, nmax );
1322 opt_AVX::fastGEMM( aptr, astep, bptr, bstep, cptr, cstep, mmax, kmax, nmax );
1325 for( m = 0; m < mmax; m += 2 )
1327 float* dst0 = cptr + cstep*m;
1328 float* dst1 = cptr + cstep*std::min(m+1, mmax-1);
1329 const float* aptr0 = aptr + astep*m;
1330 const float* aptr1 = aptr + astep*std::min(m+1, mmax-1);
1332 for( n = 0; n < nmax; n++ )
1338 for( k = 0; k < kmax; k += 4 )
1340 float alpha00 = aptr0[k];
1341 float alpha01 = aptr1[k];
1342 float alpha10 = 0.f, alpha11 = 0.f;
1343 float alpha20 = 0.f, alpha21 = 0.f;
1344 float alpha30 = 0.f, alpha31 = 0.f;
1345 const float* bptr0 = bptr + k*bstep;
1346 const float* bptr1 = bptr0;
1347 const float* bptr2 = bptr0;
1348 const float* bptr3 = bptr0;
1352 alpha10 = aptr0[k+1];
1353 alpha11 = aptr1[k+1];
1354 bptr1 = bptr0 + bstep;
1357 alpha20 = aptr0[k+2];
1358 alpha21 = aptr1[k+2];
1359 bptr2 = bptr1 + bstep;
1362 alpha30 = aptr0[k+3];
1363 alpha31 = aptr1[k+3];
1364 bptr3 = bptr2 + bstep;
1371 v_float32x4 a00 = v_setall_f32(alpha00);
1372 v_float32x4 a01 = v_setall_f32(alpha01);
1373 v_float32x4 a10 = v_setall_f32(alpha10);
1374 v_float32x4 a11 = v_setall_f32(alpha11);
1375 v_float32x4 a20 = v_setall_f32(alpha20);
1376 v_float32x4 a21 = v_setall_f32(alpha21);
1377 v_float32x4 a30 = v_setall_f32(alpha30);
1378 v_float32x4 a31 = v_setall_f32(alpha31);
1380 for( ; n <= nmax - 4; n += 4 )
1382 v_float32x4 b0 = v_load(bptr0 + n);
1383 v_float32x4 b1 = v_load(bptr1 + n);
1384 v_float32x4 b2 = v_load(bptr2 + n);
1385 v_float32x4 b3 = v_load(bptr3 + n);
1386 v_float32x4 d0 = v_load(dst0 + n);
1387 v_float32x4 d1 = v_load(dst1 + n);
1396 v_store(dst0 + n, d0);
1397 v_store(dst1 + n, d1);
1401 for( ; n < nmax; n++ )
1403 float b0 = bptr0[n], b1 = bptr1[n];
1404 float b2 = bptr2[n], b3 = bptr3[n];
1405 float d0 = dst0[n] + alpha00*b0 + alpha10*b1 + alpha20*b2 + alpha30*b3;
1406 float d1 = dst1[n] + alpha01*b0 + alpha11*b1 + alpha21*b2 + alpha31*b3;
1422 class Col2ImInvoker : public cv::ParallelLoopBody
1425 const float* data_col;
1426 const float* biasvec;
1427 int channels, height, width;
1428 int kernel_h, kernel_w;
1430 int stride_h, stride_w;
1432 int height_col, width_col;
1437 : data_col(0), biasvec(0), channels(0), height(0), width(0),
1438 kernel_h(0), kernel_w(0), pad_h(0), pad_w(0), stride_h(0), stride_w(0), data_im(0),
1439 height_col(0), width_col(0), nstripes(0), is1x1(0)
1442 static void run(const float* data_col,
1443 int channels, int height, int width,
1444 int kernel_h, int kernel_w,
1445 int pad_h, int pad_w,
1446 int stride_h, int stride_w,
1447 int height_col, int width_col,
1449 const float* biasvec,
1452 const int nstripes = getNumThreads();
1455 t.data_col = data_col;
1456 t.data_im = data_im;
1457 t.channels = channels; t.height = height; t.width = width;
1458 t.kernel_h = kernel_h; t.kernel_w = kernel_w;
1459 t.pad_h = pad_h; t.pad_w = pad_w;
1460 t.stride_h = stride_h; t.stride_w = stride_w;
1461 t.height_col = height_col;
1462 t.width_col = width_col;
1463 t.nstripes = nstripes;
1465 t.biasvec = biasvec;
1467 parallel_for_(Range(0, nstripes), t, nstripes);
1470 virtual void operator ()(const Range &r) const CV_OVERRIDE
1472 const float* data_col_ = data_col;
1473 float* data_im_ = data_im;
1474 int coeff_h = (1 - stride_h * kernel_w * height_col) * width_col;
1475 int coeff_w = (1 - stride_w * height_col * width_col);
1476 size_t total = (size_t)channels * height * width;
1477 size_t stripeSize = (total + nstripes - 1)/nstripes;
1478 size_t startIndex = r.start*stripeSize;
1479 size_t endIndex = std::min(r.end*stripeSize, total);
1480 int w = (int)(startIndex % width + pad_w);
1481 int h = (int)((startIndex / width) % height + pad_h);
1482 int c = (int)(startIndex / (width * height));
1483 int h_col_start = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1;
1484 int h_col_end = std::min(h / stride_h + 1, height_col);
1485 int plane_size_col = height_col * width_col;
1486 int offset = (c * kernel_h * kernel_w + h * kernel_w + w) * plane_size_col;
1487 bool is1x1_ = is1x1;
1488 const float* biasvec_ = biasvec;
1490 for (size_t index = startIndex; index < endIndex; index++)
1492 // compute the start and end of the output
1493 int w_col_start = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1;
1494 int w_col_end = std::min(w / stride_w + 1, width_col);
1498 val = data_im_[index];
1502 for (int h_col = h_col_start; h_col < h_col_end; ++h_col) {
1503 for (int w_col = w_col_start; w_col < w_col_end; ++w_col) {
1504 val += data_col_[offset + h_col * coeff_h + w_col * coeff_w];
1508 data_im_[index] = val + biasvec_[c];
1510 offset += plane_size_col;
1511 if( ++w >= width + pad_w )
1513 w = (int)((index + 1)% width + pad_w);
1514 h = (int)(((index + 1) / width) % height + pad_h);
1515 c = (int)((index + 1) / (width * height));
1516 h_col_start = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1;
1517 h_col_end = std::min(h / stride_h + 1, height_col);
1518 offset = (c * kernel_h * kernel_w + h * kernel_w + w) * plane_size_col;
1525 bool forward_ocl(InputArrayOfArrays inputs_, OutputArrayOfArrays outputs_, OutputArrayOfArrays internals_)
1527 std::vector<UMat> inputs;
1528 std::vector<UMat> outputs;
1529 std::vector<UMat> internals;
1531 if (inputs_.depth() == CV_16S)
1534 inputs_.getUMatVector(inputs);
1535 outputs_.getUMatVector(outputs);
1536 internals_.getUMatVector(internals);
1538 int outCn = numOutput;
1539 int inpCn = inputs[0].size[1];
1544 if (umat_weights.empty())
1547 weightsMat.copyTo(umat_weights);
1549 transpose(blobs[0].reshape(1, inpCn), umat_weights);
1552 biasesMat.copyTo(umat_biases);
1556 blobs[1].reshape(1, outCn).copyTo(umat_biases);
1558 umat_biases = UMat::zeros(outCn, 1, CV_32F);
1562 String buildopt = format("-DT=%s ", ocl::typeToStr(inputs[0].type()));
1563 buildopt += format("-DPAD_H=%d -DPAD_W=%d -DKERNEL_H=%d -DKERNEL_W=%d -DSTRIDE_H=%d -DSTRIDE_W=%d ",
1564 pad.height, pad.width, kernel.height, kernel.width, stride.height, stride.width);
1566 for (size_t ii = 0; ii < outputs.size(); ii++)
1568 int ngroups = outCn / blobs[0].size[1];
1569 int inpGroupCn = inpCn / ngroups;
1570 int outGroupCn = blobs[0].size[1];
1571 const UMat& inp = inputs[ii];
1572 UMat& out = outputs[ii];
1573 int numImg = inp.size[0];
1574 int inpH = inp.size[2], inpW = inp.size[3];
1575 int outH = out.size[2], outW = out.size[3];
1577 MatShape inpshape = shape(numImg*inpCn, inpH*inpW);
1578 MatShape outshape = shape(numImg*outCn, outH*outW);
1579 UMat convBlob = inputs[ii].reshape(1, inpshape.size(), &inpshape[0]);
1580 UMat decnBlob = out.reshape(1, outshape.size(), &outshape[0]);
1581 int rows = internals[0].rows / ngroups;
1583 for (int n = 0; n < numImg; n++)
1585 for (int g = 0; g < ngroups; g++)
1587 UMat colMat = internals[0].rowRange(_Range(g * rows, rows));
1588 UMat convMat = convBlob.rowRange(_Range((g + n * ngroups) * inpGroupCn, inpGroupCn));
1589 UMat wghtMat = umat_weights.colRange(_Range(g * inpGroupCn, inpGroupCn));
1590 gemm(wghtMat, convMat, 1, noArray(), 0, colMat, 0);
1593 for (int g = 0; g < ngroups; g++)
1595 int total = outGroupCn * decnBlob.cols;
1597 int height_col = inpH;
1598 int width_col = inpW;
1599 int coeff_h = (1 - stride.height * kernel.width * height_col) * width_col;
1600 int coeff_w = (1 - stride.width * height_col * width_col);
1602 ocl::Kernel k("col2im", ocl::dnn::col2im_oclsrc, buildopt);
1603 k.set(index++, total);
1604 k.set(index++, ocl::KernelArg::PtrReadOnly(internals[0]));
1605 k.set(index++, (int)(g * rows * internals[0].cols));
1606 k.set(index++, outGroupCn);
1607 k.set(index++, outH);
1608 k.set(index++, outW);
1609 k.set(index++, height_col);
1610 k.set(index++, width_col);
1611 k.set(index++, coeff_h);
1612 k.set(index++, coeff_w);
1613 k.set(index++, ocl::KernelArg::PtrReadOnly(umat_biases));
1614 k.set(index++, (int)(g * outGroupCn * umat_biases.cols));
1615 k.set(index++, ocl::KernelArg::PtrWriteOnly(decnBlob));
1616 k.set(index++, (int)((g + n * ngroups) * outGroupCn * decnBlob.cols));
1618 size_t global[] = { (size_t)total };
1619 bool ret = k.run(1, global, NULL, false);
1630 void forward(InputArrayOfArrays inputs_arr, OutputArrayOfArrays outputs_arr, OutputArrayOfArrays internals_arr) CV_OVERRIDE
1632 CV_TRACE_FUNCTION();
1633 CV_TRACE_ARG_VALUE(name, "name", name.c_str());
1635 CV_OCL_RUN(IS_DNN_OPENCL_TARGET(preferableTarget),
1636 forward_ocl(inputs_arr, outputs_arr, internals_arr));
1638 if (inputs_arr.depth() == CV_16S)
1640 forward_fallback(inputs_arr, outputs_arr, internals_arr);
1644 std::vector<Mat> inputs, outputs, internals;
1645 inputs_arr.getMatVector(inputs);
1646 outputs_arr.getMatVector(outputs);
1647 internals_arr.getMatVector(internals);
1649 int outCn = numOutput;
1650 int inpCn = inputs[0].size[1];
1651 bool is1x1flag = is1x1();
1652 int nstripes = getNumThreads();
1654 if( weightsMat.empty() )
1656 transpose(blobs[0].reshape(1, inpCn), weightsMat);
1657 biasesMat = hasBias() ? blobs[1].reshape(1, outCn) : Mat::zeros(outCn, 1, CV_32F);
1660 for (size_t ii = 0; ii < outputs.size(); ii++)
1662 int ngroups = outCn / blobs[0].size[1];
1663 int inpGroupCn = inpCn / ngroups;
1664 int outGroupCn = blobs[0].size[1];
1665 const Mat& inp = inputs[ii];
1666 Mat& out = outputs[ii];
1667 int numImg = inp.size[0];
1668 int inpH = inp.size[2], inpW = inp.size[3];
1669 int outH = out.size[2], outW = out.size[3];
1671 Mat convBlob = inputs[ii].reshape(1, numImg*inpCn);
1672 Mat decnBlob = out.reshape(1, numImg*outCn);
1674 for (int n = 0; n < numImg; n++)
1676 for (int g = 0; g < ngroups; g++)
1678 Mat dstMat = decnBlob.rowRange(_Range((g + n * ngroups) * outGroupCn, outGroupCn));
1679 Mat &colMat = is1x1flag ? dstMat : internals[0];
1681 Mat convMat = convBlob.rowRange(_Range((g + n * ngroups) * inpGroupCn, inpGroupCn));
1682 Mat wghtMat = weightsMat.colRange(_Range(g * inpGroupCn, inpGroupCn));
1683 Mat curBiasMat = biasesMat.rowRange(_Range(g * outGroupCn, outGroupCn));
1685 //gemm(wghtMat, convMat, 1, colMat, 0, colMat, 0);
1686 MatMulInvoker mminvoker(wghtMat, convMat, colMat, nstripes);
1687 parallel_for_(Range(0, nstripes), mminvoker, nstripes);
1689 Col2ImInvoker::run(colMat.ptr<float>(), outGroupCn, outH, outW,
1690 kernel.height, kernel.width, pad.height, pad.width,
1691 stride.height, stride.width, inpH, inpW, dstMat.ptr<float>(),
1692 curBiasMat.ptr<float>(), is1x1flag);
1698 virtual Ptr<BackendNode> initHalide(const std::vector<Ptr<BackendWrapper> > &inputs) CV_OVERRIDE
1701 Halide::Buffer<float> inputBuffer = halideBuffer(inputs[0]);
1703 int inW, inH, inC, inN;
1704 getCanonicalSize(inputBuffer, &inW, &inH, &inC, &inN);
1705 const int outGroupCn = blobs[0].size[1];
1706 const int group = numOutput / outGroupCn;
1707 const int inpGroupCn = blobs[0].size[0] / group;
1709 Halide::Var x("x"), y("y"), c("c"), n("n");
1710 Halide::Func top = (name.empty() ? Halide::Func() : Halide::Func(name));
1711 Halide::Func padded_input(name + "_constant_exterior");
1712 auto weights = wrapToHalideBuffer(blobs[0]);
1714 Halide::Func dilated_input("dilated_input");
1715 dilated_input(x, y, c, n) = 0.0f;
1716 Halide::RDom r1(0, inW, 0, inH);
1717 dilated_input(r1.x * stride.width, r1.y * stride.height, c, n) =
1718 inputBuffer(r1.x, r1.y, c, n);
1719 dilated_input.compute_root();
1721 Halide::Func bounded =
1722 Halide::BoundaryConditions::constant_exterior(dilated_input, 0,
1723 0, (inW - 1) * stride.width + 1,
1724 0, (inH - 1) * stride.height + 1,
1726 padded_input(x, y, c, n) = bounded(x, y, c, n);
1728 Halide::RDom r(0, kernel.width, 0, kernel.height, 0, inpGroupCn);
1729 Halide::Expr kx = x + pad.width - r.x;
1730 Halide::Expr ky = y + pad.height - r.y;
1731 Halide::Expr kInC = r.z;
1732 Halide::Expr kOutC = c;
1733 for (int i = 1; i < group; ++i)
1735 kInC = select(c < outGroupCn * i, kInC, inpGroupCn * i + r.z);
1736 kOutC = select(c < outGroupCn * i, kOutC, c - outGroupCn * i);
1738 Halide::Expr topExpr = sum(padded_input(kx, ky, kInC, n) *
1739 weights(r.x, r.y, kOutC, kInC));
1742 auto bias = wrapToHalideBuffer(blobs[1], {numOutput});
1745 top(x, y, c, n) = topExpr;
1746 return Ptr<BackendNode>(new HalideBackendNode({ padded_input, top }));
1747 #endif // HAVE_HALIDE
1748 return Ptr<BackendNode>();
1751 #ifdef HAVE_INF_ENGINE
1752 virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> > &) CV_OVERRIDE
1754 auto ieWeights = wrapToInfEngineBlob(blobs[0], InferenceEngine::Layout::OIHW);
1757 ieWeights = InferenceEngine::make_shared_blob<float>(
1758 InferenceEngine::Precision::FP32, InferenceEngine::Layout::OIHW,
1760 ieWeights->allocate();
1762 int inpCn = blobs[0].size[0];
1763 Mat newWeights = infEngineBlobToMat(ieWeights).reshape(1, inpCn);
1764 transpose(weightsMat, newWeights);
1767 const int outGroupCn = blobs[0].size[1]; // Weights are in IOHW layout
1768 const int group = numOutput / outGroupCn;
1770 InferenceEngine::Builder::DeconvolutionLayer ieLayer(name);
1772 ieLayer.setKernel(kernel_size);
1773 ieLayer.setStrides(strides);
1774 ieLayer.setDilation(dilations);
1775 ieLayer.setPaddingsBegin(pads_begin);
1777 if (padMode.empty())
1779 ieLayer.setPaddingsEnd({pads_end[0] - adjust_pads[0], pads_end[1] - adjust_pads[1]});
1781 else if (padMode == "SAME")
1783 ieLayer.setPaddingsEnd({kernel_size[0] - pads_begin[0] - 1 - adjust_pads[0],
1784 kernel_size[1] - pads_begin[1] - 1 - adjust_pads[1]});
1786 ieLayer.setGroup((size_t)group);
1787 ieLayer.setOutDepth((size_t)numOutput);
1789 InferenceEngine::Builder::Layer l = ieLayer;
1790 addConstantData("weights", ieWeights, l);
1792 addConstantData("biases", wrapToInfEngineBlob(biasesMat, {(size_t)numOutput}, InferenceEngine::Layout::C), l);
1793 return Ptr<BackendNode>(new InfEngineBackendNode(l));
1795 #endif // HAVE_INF_ENGINE
1797 virtual int64 getFLOPS(const std::vector<MatShape> &inputs,
1798 const std::vector<MatShape> &outputs) const CV_OVERRIDE
1800 CV_Assert(inputs.size() == outputs.size());
1803 int outChannels = blobs[0].size[0];
1805 for (int i = 0; i < inputs.size(); i++)
1807 flops += CV_BIG_INT(2)*outChannels*kernel.area()*total(inputs[i]);
1814 Ptr<BaseConvolutionLayer> ConvolutionLayer::create(const LayerParams ¶ms)
1816 Ptr<ConvolutionLayerImpl> l(new ConvolutionLayerImpl(params));
1820 Ptr<BaseConvolutionLayer> DeconvolutionLayer::create(const LayerParams ¶ms)
1822 return Ptr<BaseConvolutionLayer>(new DeConvolutionLayerImpl(params));