1 /*M///////////////////////////////////////////////////////////////////////////////////////
3 // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
5 // By downloading, copying, installing or using the software you agree to this license.
6 // If you do not agree to this license, do not download, install,
7 // copy or use the software.
11 // For Open Source Computer Vision Library
13 // Copyright (C) 2013, OpenCV Foundation, all rights reserved.
14 // Copyright (C) 2017, Intel Corporation, all rights reserved.
15 // Third party copyrights are property of their respective owners.
17 // Redistribution and use in source and binary forms, with or without modification,
18 // are permitted provided that the following conditions are met:
20 // * Redistribution's of source code must retain the above copyright notice,
21 // this list of conditions and the following disclaimer.
23 // * Redistribution's in binary form must reproduce the above copyright notice,
24 // this list of conditions and the following disclaimer in the documentation
25 // and/or other materials provided with the distribution.
27 // * The name of the copyright holders may not be used to endorse or promote products
28 // derived from this software without specific prior written permission.
30 // This software is provided by the copyright holders and contributors "as is" and
31 // any express or implied warranties, including, but not limited to, the implied
32 // warranties of merchantability and fitness for a particular purpose are disclaimed.
33 // In no event shall the Intel Corporation or contributors be liable for any direct,
34 // indirect, incidental, special, exemplary, or consequential damages
35 // (including, but not limited to, procurement of substitute goods or services;
36 // loss of use, data, or profits; or business interruption) however caused
37 // and on any theory of liability, whether in contract, strict liability,
38 // or tort (including negligence or otherwise) arising in any way out of
39 // the use of this software, even if advised of the possibility of such damage.
43 #include "../precomp.hpp"
44 #include "layers_common.hpp"
45 #include "../op_halide.hpp"
46 #include "../op_inf_engine.hpp"
47 #include "opencv2/core/hal/hal.hpp"
48 #include "opencv2/core/hal/intrin.hpp"
52 #include "opencl_kernels_dnn.hpp"
53 using namespace cv::dnn::ocl4dnn;
61 class BaseConvolutionLayerImpl : public ConvolutionLayer
64 bool newWeightAndBias;
65 std::vector<double> weightsMultipliers;
66 BaseConvolutionLayerImpl(const LayerParams ¶ms)
68 setParamsFrom(params);
69 int pad_t = 0, pad_l = 0, pad_r = 0, pad_b = 0;
70 getConvolutionKernelParams(params, kernel.height, kernel.width, pad_t,
71 pad_l, pad_b, pad_r, stride.height, stride.width, dilation.height,
72 dilation.width, padMode);
74 if (pad_t != pad_b || pad_l != pad_r)
75 CV_Error(Error::StsNotImplemented, "Unsupported asymmetric padding in convolution layer");
80 numOutput = params.get<int>("num_output");
81 int ngroups = params.get<int>("group", 1);
83 adjustPad.height = params.get<int>("adj_h", 0);
84 adjustPad.width = params.get<int>("adj_w", 0);
86 CV_Assert(numOutput % ngroups == 0);
87 CV_Assert(adjustPad.width < stride.width &&
88 adjustPad.height < stride.height);
90 newWeightAndBias = false;
93 void finalize(InputArrayOfArrays inputs_arr, OutputArrayOfArrays outputs_arr) CV_OVERRIDE
95 std::vector<Mat> inputs, outputs;
96 inputs_arr.getMatVector(inputs);
97 outputs_arr.getMatVector(outputs);
99 CV_Assert(inputs.size() > 0);
101 CV_Assert(blobs.size() >= 1 && blobs.size() <= 2);
102 CV_Assert(blobs[0].dims == 4 && blobs[0].size[3] == kernel.width && blobs[0].size[2] == kernel.height);
104 const Mat &input = inputs[0];
105 CV_Assert(input.dims == 4 && (input.type() == CV_32F || input.type() == CV_64F || input.type() == CV_16S));
106 for (size_t i = 0; i < inputs.size(); i++)
108 CV_Assert(inputs[i].type() == input.type());
109 CV_Assert(inputs[i].dims == 4 && inputs[i].size[1] == input.size[1]);
110 CV_Assert(inputs[i].size[2] == input.size[2] && inputs[i].size[3] == input.size[3]);
113 Size outSize = Size(outputs[0].size[3], outputs[0].size[2]);
115 int pad_t = pad.height, pad_l = pad.width, pad_b = pad.height, pad_r = pad.width;
117 getConvPoolPaddings(Size(input.size[3], input.size[2]), outSize,
118 kernel, stride, padMode, dilation, pad_t, pad_l, pad_b, pad_r);
121 if (pad_t != pad_b || pad_l != pad_r)
122 CV_Error(Error::StsNotImplemented, "Unsupported asymmetric padding in convolution layer");
130 return blobs.size() >= 2;
133 virtual MatShape computeColRowShape(const MatShape &inpShape, const MatShape &outShape) const = 0;
136 return (kernel.height == 1 && kernel.width == 1) &&
137 (stride.height == 1 && stride.width == 1) &&
138 (dilation.height == 1 && dilation.width == 1);
141 virtual bool tryFuse(Ptr<Layer>& top) CV_OVERRIDE
144 top->getScaleShift(w, b);
145 if (!w.empty() || !b.empty())
153 virtual void fuseWeights(const Mat& w_, const Mat& b_) = 0;
155 virtual void applyHalideScheduler(Ptr<BackendNode>& node,
156 const std::vector<Mat*> &inputs,
157 const std::vector<Mat> &outputs,
158 int targetId) const CV_OVERRIDE
161 if (targetId != DNN_TARGET_CPU)
163 Layer::applyHalideScheduler(node, inputs, outputs, targetId);
166 Halide::Var x("x"), y("y"), c("c"), n("n"), tile("tile"), yi("yi"), yo("yo"), co("co"), ci("ci");
167 Halide::Func& top = node.dynamicCast<HalideBackendNode>()->funcs[1];
168 Halide::Func& padded_input = node.dynamicCast<HalideBackendNode>()->funcs[0];
170 int outW, outH, outC, outN;
171 getCanonicalSize(outputs[0].size, &outW, &outH, &outC, &outN);
173 if (outW == 1 || outH <= 2)
176 if (is1x1() || outC <= 16)
182 .vectorize(x, outW >= 16 ? 16 : outW);
186 .split(c, co, ci, 16)
187 .fuse(yo, co, tile).fuse(n, tile, tile)
190 .vectorize(x, outW >= 16 ? 16 : outW);
191 padded_input.compute_at(top, yi);
192 #endif // HAVE_HALIDE
197 #define IS_POWER_LAYER(layer) \
198 (!layer.empty() && !layer->type.compare("Power"))
199 //TODO: simultaneously convolution and bias addition for cache optimization
200 class ConvolutionLayerImpl CV_FINAL : public BaseConvolutionLayerImpl
203 enum { VEC_ALIGN = 8, DFT_TYPE = CV_32F };
205 std::vector<float> biasvec;
206 std::vector<float> reluslope;
207 Ptr<ActivationLayer> activ;
211 Ptr<OCL4DNNConvSpatial<float> > convolutionOp;
212 std::vector<UMat> umat_blobs;
214 ocl4dnnFusedActiv_t activType;
217 ConvolutionLayerImpl(const LayerParams ¶ms) : BaseConvolutionLayerImpl(params)
222 activType = OCL4DNN_CONV_FUSED_ACTIV_NONE;
227 MatShape computeColRowShape(const MatShape &inpShape, const MatShape &outShape) const CV_OVERRIDE
229 Size out(outShape[3], outShape[2]);
230 int inpGroupCn = blobs[0].size[1];
231 int ksize = inpGroupCn * kernel.height * kernel.width;
232 return shape(out.area(), ksize);
235 virtual bool supportBackend(int backendId) CV_OVERRIDE
237 #ifdef HAVE_INF_ENGINE
238 if (backendId == DNN_BACKEND_INFERENCE_ENGINE)
240 return INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R4) ||
241 (preferableTarget != DNN_TARGET_MYRIAD || dilation.width == dilation.height);
245 return backendId == DNN_BACKEND_OPENCV || backendId == DNN_BACKEND_HALIDE;
248 bool getMemoryShapes(const std::vector<MatShape> &inputs,
249 const int requiredOutputs,
250 std::vector<MatShape> &outputs,
251 std::vector<MatShape> &internals) const CV_OVERRIDE
253 CV_Assert(blobs.size() != 0);
254 CV_Assert(!hasBias() || blobs[1].total() == (size_t)blobs[0].size[0]);
255 CV_Assert(inputs.size() == (size_t)1);
259 int inpCn = inputs[0][1];
260 int inpH = inputs[0][2];
261 int inpW = inputs[0][3];
263 int outCn = blobs[0].size[0];
268 out.height = (inpH + 2 * pad.height - (dilation.height * (kernel.height - 1) + 1)) / stride.height + 1;
269 out.width = (inpW + 2 * pad.width - (dilation.width * (kernel.width - 1) + 1)) / stride.width + 1;
273 getConvPoolOutParams(Size(inpW, inpH), kernel, stride, padMode, dilation, out);
276 int ngroups = inpCn / blobs[0].size[1];
277 if (ngroups == 0 || ngroups * blobs[0].size[1] != inpCn)
278 CV_Error(Error::StsError, format("Number of input channels should "
279 "be multiple of %d but got %d", blobs[0].size[1], inpCn));
280 CV_Assert(ngroups > 0 && inpCn % ngroups == 0 && outCn % ngroups == 0);
282 int dims[] = {inputs[0][0], outCn, out.height, out.width};
283 outputs.resize(inputs.size(), shape(dims, 4));
288 virtual void finalize(InputArrayOfArrays inputs_arr, OutputArrayOfArrays outputs_arr) CV_OVERRIDE
290 BaseConvolutionLayerImpl::finalize(inputs_arr, outputs_arr);
292 CV_Assert(!blobs.empty());
293 const int outCn = blobs[0].size[0];
294 // prepare weightsMat where each row is aligned and has enough zero padding on the right to
295 // use vectorized (i.e. with intrinsics) loops without tail processing
296 Mat wm = blobs[0].reshape(1, outCn);
297 if( wm.step1() % VEC_ALIGN != 0 )
299 int newcols = (int)alignSize(wm.step1(), VEC_ALIGN);
300 Mat wm_buffer = Mat(outCn, newcols, wm.type());
301 Mat wm_padding = wm_buffer.colRange(wm.cols, newcols);
302 wm_padding.setTo(Scalar::all(0.));
303 Mat wm_aligned = wm_buffer.colRange(0, wm.cols);
304 wm.copyTo(wm_aligned);
308 weightsMultipliers.assign(outCn, 1.0);
310 Mat biasMat = hasBias() ? blobs[1].reshape(1, outCn) : Mat();
311 biasvec.resize(outCn+2);
312 if( biasMat.empty() )
314 for(int i = 0; i < outCn; i++ )
319 for(int i = 0; i < outCn; i++ )
320 biasvec[i] = biasMat.at<float>(i);
323 convolutionOp.release();
327 bool setActivation(const Ptr<ActivationLayer>& layer) CV_OVERRIDE
329 if (!activ.empty() && !layer.empty())
337 activType = OCL4DNN_CONV_FUSED_ACTIV_NONE;
339 if (IS_DNN_OPENCL_TARGET(preferableTarget))
341 Ptr<PowerLayer> activ_power = activ.dynamicCast<PowerLayer>();
342 if (!activ_power.empty())
344 if (activ_power->scale != 1.f || activ_power->shift != 0.f)
346 const int outCh = blobs[0].size[0];
347 fuseWeights(Mat(1, outCh, CV_32F, Scalar(activ_power->scale)),
348 Mat(1, outCh, CV_32F, Scalar(activ_power->shift)));
351 power = activ_power->power;
352 activType = OCL4DNN_CONV_FUSED_ACTIV_POWER;
354 Ptr<TanHLayer> activ_tanh = activ.dynamicCast<TanHLayer>();
355 if (!activ_tanh.empty())
357 activType = OCL4DNN_CONV_FUSED_ACTIV_TANH;
361 return !activ.empty();
364 void fuseWeights(const Mat& w_, const Mat& b_) CV_OVERRIDE
366 // Convolution weights have OIHW data layout. Parameters fusion in case of
367 // (conv(I) + b1 ) * w + b2
368 // means to replace convolution's weights to [w*conv(I)] and bias to [b1 * w + b2]
369 const int outCn = weightsMat.size[0];
370 Mat w = w_.total() == 1 ? Mat(1, outCn, CV_32F, Scalar(w_.at<float>(0))) : w_;
371 Mat b = b_.total() == 1 ? Mat(1, outCn, CV_32F, Scalar(b_.at<float>(0))) : b_;
372 CV_Assert_N(!weightsMat.empty(), biasvec.size() == outCn + 2,
373 w.empty() || outCn == w.total(), b.empty() || outCn == b.total());
377 // Keep origin weights unchanged.
378 if (weightsMat.data == blobs[0].data)
379 weightsMat = weightsMat.clone();
381 Mat originWeights = blobs[0].reshape(1, outCn);
382 for (int i = 0; i < outCn; ++i)
384 double wi = w.at<float>(i);
385 weightsMultipliers[i] *= wi;
386 cv::multiply(originWeights.row(i), weightsMultipliers[i], weightsMat.row(i));
393 for (int i = 0; i < outCn; ++i)
394 biasvec[i] += b.at<float>(i);
397 newWeightAndBias = !w.empty() || !b.empty();
398 fusedBias = hasBias() || !b.empty();
399 biasvec[outCn] = biasvec[outCn+1] = biasvec[outCn-1];
402 virtual Ptr<BackendNode> initHalide(const std::vector<Ptr<BackendWrapper> > &inputs) CV_OVERRIDE
405 Halide::Buffer<float> inputBuffer = halideBuffer(inputs[0]);
407 const int inpCn = inputBuffer.channels();
408 const int outCn = blobs[0].size[0];
409 const int inpGroupCn = blobs[0].size[1];
410 const int group = inpCn / inpGroupCn;
411 const int outGroupCn = outCn / group;
413 Halide::Buffer<float> weights = wrapToHalideBuffer(blobs[0]);
415 Halide::Var x("x"), y("y"), c("c"), n("n");
416 Halide::Func top = (name.empty() ? Halide::Func() : Halide::Func(name));
417 Halide::Func padded_input(name + "_constant_exterior");
418 if (pad.width || pad.height)
420 Halide::Func bounded =
421 Halide::BoundaryConditions::constant_exterior(inputBuffer, 0);
422 padded_input(x, y, c, n) = bounded(x, y, c, n);
426 padded_input(x, y, c, n) = inputBuffer(x, y, c, n);
429 Halide::RDom r(0, kernel.width, 0, kernel.height, 0, inpGroupCn);
430 Halide::Expr kx = x * stride.width - pad.width + r.x * dilation.width;
431 Halide::Expr ky = y * stride.height - pad.height + r.y * dilation.height;
432 Halide::Expr kc = r.z;
433 for (int i = 1; i < group; ++i)
435 kc = select(c < outGroupCn * i, kc, inpGroupCn * i + r.z);
437 Halide::Expr topExpr = sum(padded_input(kx, ky, kc, n) *
438 weights(r.x, r.y, r.z, c));
441 Halide::Buffer<float> bias = wrapToHalideBuffer(blobs[1], {outCn});
444 top(x, y, c, n) = topExpr;
445 return Ptr<BackendNode>(new HalideBackendNode({ padded_input, top }));
446 #endif // HAVE_HALIDE
447 return Ptr<BackendNode>();
450 virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> > &inputs) CV_OVERRIDE
452 #ifdef HAVE_INF_ENGINE
453 InferenceEngine::DataPtr input = infEngineDataNode(inputs[0]);
454 CV_Assert(input->dims.size() == 4);
456 const int inpCn = input->dims[2]; // NOTE: input->dims are reversed (whcn)
457 const int outCn = blobs[0].size[0];
458 const int inpGroupCn = blobs[0].size[1];
459 const int group = inpCn / inpGroupCn;
461 auto ieWeights = wrapToInfEngineBlob(blobs[0], InferenceEngine::Layout::OIHW);
462 if (newWeightAndBias)
464 if (weightsMat.isContinuous())
466 Mat fusedWeights = weightsMat.reshape(1, blobs[0].dims, blobs[0].size);
467 ieWeights = wrapToInfEngineBlob(fusedWeights, InferenceEngine::Layout::OIHW);
471 ieWeights = InferenceEngine::make_shared_blob<float>(
472 InferenceEngine::Precision::FP32, InferenceEngine::Layout::OIHW,
474 ieWeights->allocate();
476 Mat newWeights = infEngineBlobToMat(ieWeights).reshape(1, outCn);
477 Mat fusedWeights = weightsMat.colRange(0, newWeights.cols);
478 fusedWeights.copyTo(newWeights);
481 InferenceEngine::Blob::Ptr ieBiases;
482 if (hasBias() || fusedBias)
484 Mat biasesMat({outCn}, CV_32F, &biasvec[0]);
485 ieBiases = wrapToInfEngineBlob(biasesMat, {(size_t)outCn}, InferenceEngine::Layout::C);
488 #if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5)
489 InferenceEngine::Builder::ConvolutionLayer ieLayer(name);
491 ieLayer.setKernel({(size_t)kernel.height, (size_t)kernel.width});
492 ieLayer.setStrides({(size_t)stride.height, (size_t)stride.width});
493 ieLayer.setDilation({(size_t)dilation.height, (size_t)dilation.width});
494 ieLayer.setPaddingsBegin({(size_t)pad.height, (size_t)pad.width});
495 ieLayer.setPaddingsEnd({(size_t)pad.height, (size_t)pad.width});
496 ieLayer.setGroup((size_t)group);
497 ieLayer.setOutDepth((size_t)outCn);
499 InferenceEngine::Builder::Layer l = ieLayer;
500 addConstantData("weights", ieWeights, l);
502 addConstantData("biases", ieBiases, l);
504 if (!padMode.empty())
505 l.getParameters()["auto_pad"] = padMode == "VALID" ? std::string("valid") : std::string("same_upper");
507 return Ptr<BackendNode>(new InfEngineBackendNode(l));
509 InferenceEngine::LayerParams lp;
511 lp.type = "Convolution";
512 lp.precision = InferenceEngine::Precision::FP32;
513 std::shared_ptr<InferenceEngine::ConvolutionLayer> ieLayer(new InferenceEngine::ConvolutionLayer(lp));
515 #if INF_ENGINE_VER_MAJOR_GT(INF_ENGINE_RELEASE_2018R3)
516 ieLayer->_kernel.insert(InferenceEngine::X_AXIS, kernel.width);
517 ieLayer->_kernel.insert(InferenceEngine::Y_AXIS, kernel.height);
518 ieLayer->_stride.insert(InferenceEngine::X_AXIS, stride.width);
519 ieLayer->_stride.insert(InferenceEngine::Y_AXIS, stride.height);
520 ieLayer->_padding.insert(InferenceEngine::X_AXIS, pad.width);
521 ieLayer->_padding.insert(InferenceEngine::Y_AXIS, pad.height);
522 ieLayer->_pads_end.insert(InferenceEngine::X_AXIS, pad.width);
523 ieLayer->_pads_end.insert(InferenceEngine::Y_AXIS, pad.height);
524 ieLayer->_dilation.insert(InferenceEngine::X_AXIS, dilation.width);
525 ieLayer->_dilation.insert(InferenceEngine::Y_AXIS, dilation.height);
526 ieLayer->params["output"] = format("%d", outCn);
527 ieLayer->params["kernel"] = format("%d,%d,%d,%d", outCn, inpGroupCn, kernel.height, kernel.width);
528 ieLayer->params["pads_begin"] = format("%d,%d", pad.height, pad.width);
529 ieLayer->params["pads_end"] = format("%d,%d", pad.height, pad.width);
530 ieLayer->params["strides"] = format("%d,%d", stride.height, stride.width);
531 ieLayer->params["dilations"] = format("%d,%d", dilation.height, dilation.width);
533 ieLayer->_kernel_x = kernel.width;
534 ieLayer->_kernel_y = kernel.height;
535 ieLayer->_stride_x = stride.width;
536 ieLayer->_stride_y = stride.height;
537 ieLayer->_padding_x = pad.width;
538 ieLayer->_padding_y = pad.height;
539 ieLayer->_dilation_x = dilation.width;
540 ieLayer->_dilation_y = dilation.height;
542 ieLayer->_out_depth = outCn;
543 ieLayer->_group = group;
545 ieLayer->_weights = ieWeights;
547 ieLayer->_biases = ieBiases;
548 return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer));
550 #endif // HAVE_INF_ENGINE
551 return Ptr<BackendNode>();
554 class ParallelConv : public cv::ParallelLoopBody
557 enum { BLK_SIZE = 32, BLK_SIZE_CN = 64 };
563 Size kernel_, pad_, stride_, dilation_;
564 int ngroups_, nstripes_;
565 std::vector<int> ofstab_;
566 const std::vector<float>* biasvec_;
567 const std::vector<float>* reluslope_;
568 const ActivationLayer* activ_;
575 : input_(0), weights_(0), output_(0), ngroups_(0), nstripes_(0),
576 biasvec_(0), reluslope_(0), activ_(0), is1x1_(false), useAVX(false), useAVX2(false), useAVX512(false)
579 static void run( const Mat& input, Mat& output, const Mat& weights,
580 const std::vector<float>& biasvec,
581 const std::vector<float>& reluslope,
582 Size kernel, Size pad, Size stride, Size dilation,
583 const ActivationLayer* activ, int ngroups, int nstripes )
586 input.dims == 4 && output.dims == 4,
587 input.size[0] == output.size[0],
588 weights.rows == output.size[1],
589 weights.cols == (input.size[1]/ngroups)*kernel.width*kernel.height,
590 input.type() == output.type(),
591 input.type() == weights.type(),
592 input.type() == CV_32FC1,
593 input.isContinuous(),
594 output.isContinuous(),
595 biasvec.size() == (size_t)output.size[1]+2);
599 p.weights_ = &weights;
601 for( int i = 0; i < 4; i++ ) p.outShape[i] = output.size[i];
602 p.outShape[1] /= ngroups;
603 p.kernel_ = kernel; p.pad_ = pad; p.stride_ = stride; p.dilation_ = dilation;
604 p.ngroups_ = ngroups;
605 p.nstripes_ = nstripes;
607 int inpCnAll = input.size[1], width = input.size[3], height = input.size[2];
608 int inpCn = inpCnAll / ngroups;
609 p.is1x1_ = kernel == Size(1,1) && pad == Size(0, 0);
610 p.useAVX = checkHardwareSupport(CPU_AVX);
611 p.useAVX2 = checkHardwareSupport(CPU_AVX2);
612 p.useAVX512 = CV_CPU_HAS_SUPPORT_AVX512_SKX;
614 int ncn = std::min(inpCn, (int)BLK_SIZE_CN);
615 p.ofstab_.resize(kernel.width*kernel.height*ncn);
616 int* ofstab = &p.ofstab_[0];
618 for( int k = 0; k < ncn; k++ )
619 for( int k_r = 0; k_r < kernel.height; k_r++ )
620 for( int k_c = 0; k_c < kernel.width; k_c++ )
621 ofstab[(k*kernel.height + k_r)*kernel.width + k_c] =
622 (k*height + k_r*dilation.height)*width + k_c*dilation.width;
624 p.biasvec_ = &biasvec;
625 p.reluslope_ = &reluslope;
626 p.activ_ = p.reluslope_->empty() ? activ : 0;
628 parallel_for_(Range(0, nstripes), p, nstripes);
631 virtual void operator ()(const Range &r0) const CV_OVERRIDE
633 const int valign = ConvolutionLayerImpl::VEC_ALIGN;
634 int ngroups = ngroups_, batchSize = input_->size[0]*ngroups;
635 int outW = output_->size[3], outH = output_->size[2], outCn = output_->size[1]/ngroups;
636 int width = input_->size[3], height = input_->size[2], inpCn = input_->size[1]/ngroups;
637 const int nstripes = nstripes_;
638 int kernel_w = kernel_.width, kernel_h = kernel_.height;
639 int pad_w = pad_.width, pad_h = pad_.height;
640 int stride_w = stride_.width, stride_h = stride_.height;
641 int dilation_w = dilation_.width, dilation_h = dilation_.height;
642 int karea = kernel_w*kernel_h;
644 size_t inpPlaneSize = width*height;
645 size_t outPlaneSize = outW*outH;
648 int stripesPerSample;
652 if( nstripes >= batchSize*2 )
654 stripesPerSample = nstripes/batchSize;
655 stripeSize = alignSize((outPlaneSize + stripesPerSample - 1)/stripesPerSample, valign);
656 stripeSize = std::min(stripeSize, outPlaneSize);
660 stripesPerSample = 1;
661 int samplesPerStripe = std::max((batchSize + nstripes - 1)/nstripes, 1);
662 r.start *= samplesPerStripe;
663 r.end *= samplesPerStripe;
664 stripeSize = outPlaneSize;
667 const float* data_inp0_ = input_->ptr<float>();
668 const int* ofstab = &ofstab_[0];
669 const float* wptr_orig_ = weights_->ptr<float>();
670 size_t wstep = weights_->step1();
671 const float* biasptr_ = &biasvec_->at(0);
672 const float* reluptr_ = reluslope_->empty() ? 0 : &reluslope_->at(0);
673 float* data_out0_ = output_->ptr<float>();
674 size_t rowbufsz = (size_t)karea*BLK_SIZE_CN*BLK_SIZE;
675 AutoBuffer<float> rowbuf0_(rowbufsz + valign);
676 float* rowbuf0 = alignPtr(rowbuf0_.data(), (int)(valign*sizeof(float)));
678 // we clear the buffer once; ultimately, it lets us to avoid
679 // tail processing after running the unrolled/vectorized loop.
680 // the main idea is to make sure that the tail (a.k.a. padding) of each row
681 // (i.e. the elements with indices between vsz=karea*ncn and vsz_a)
682 // does not contain NaNs or Infs. Because the padding in the weights
683 // matrix is explicitly initialized with 0's, we handle all other
684 // cases nicely, i.e. we can skip expliciting re-initialization
685 // of the padding - we just retain elements from the previous iteration
686 // of the loop over channels (cn0).
687 memset(rowbuf0, 0, rowbufsz*sizeof(rowbuf0[0]) );
689 for( int stripe = r.start; stripe < r.end; stripe++ )
691 int subsampleIdx = stripe/stripesPerSample;
692 if( subsampleIdx >= batchSize )
694 int stripeStart = (int)((stripe - subsampleIdx*stripesPerSample)*stripeSize);
695 int stripeEnd = (int)std::min(stripeStart + stripeSize, outPlaneSize);
696 const float* data_inp0 = data_inp0_ + subsampleIdx*inpPlaneSize*inpCn;
697 float* data_out0 = data_out0_ + subsampleIdx*outPlaneSize*outCn;
698 int startOutCn = (subsampleIdx % ngroups)*outCn;
699 const float* wptr_orig = wptr_orig_ + wstep*startOutCn;
700 const float* biasptr = biasptr_ + startOutCn;
702 for( int cn0 = 0; cn0 < inpCn; cn0 += BLK_SIZE_CN )
704 int cn1 = std::min(cn0 + BLK_SIZE_CN, inpCn);
705 int ncn = cn1 - cn0, vsz = karea*ncn;
706 int vsz_a = (int)alignSize(vsz, valign);
707 const float* wptr = wptr_orig + cn0*karea;
708 // we apply [Channels][P]ReLU (if any) during the final pass only.
709 const float* relu = cn1 == inpCn && reluptr_ ? reluptr_ + startOutCn : 0;
711 for( int ofs0 = stripeStart; ofs0 < stripeEnd; ofs0 += BLK_SIZE )
713 int ofs, ofs1 = std::min(ofs0 + BLK_SIZE, stripeEnd);
714 int out_i = ofs0 / outW;
715 int out_j = ofs0 - out_i * outW;
717 // do im2row for a part of input tensor
718 float* rowbuf = rowbuf0;
719 for( ofs = ofs0; ofs < ofs1; out_j = 0, ++out_i )
721 int delta = std::min(ofs1 - ofs, outW - out_j);
722 int out_j1 = out_j + delta;
723 int in_i = out_i * stride_h - pad_h;
724 int in_j = out_j * stride_w - pad_w;
725 const float* imgptr = data_inp0 + (cn0*height + in_i)*width + in_j;
728 // do im2row for a part of input tensor
731 for( ; out_j < out_j1; out_j++, rowbuf += vsz_a, imgptr += stride_w )
733 for( k = 0; k < vsz; k++ )
734 rowbuf[k] = imgptr[k*inpPlaneSize];
739 bool ok_i = 0 <= in_i && in_i < height - (kernel_h-1)*dilation_h;
740 int i0 = std::max(0, (-in_i + dilation_h-1)/dilation_h);
741 int i1 = std::min(kernel_h, (height - in_i + dilation_h-1)/dilation_h);
743 for( ; out_j < out_j1; out_j++, rowbuf += vsz_a, imgptr += stride_w, in_j += stride_w )
745 // this condition should be true for most of the tensor elements, i.e.
746 // most of the time the kernel aperture is inside the tensor X-Y plane.
747 if( ok_i && out_j + 2 <= out_j1 && 0 <= in_j && in_j + stride_w*2 <= width - (kernel_w-1)*dilation_w )
749 for( k = 0; k < vsz; k++ )
752 float v0 = imgptr[k1];
753 float v1 = imgptr[k1 + stride_w];
755 rowbuf[k+vsz_a] = v1;
764 int j0 = std::max(0, (-in_j + dilation_w-1)/dilation_w);
765 int j1 = std::min(kernel_w, (width - in_j + dilation_w-1)/dilation_w);
767 // here some non-continuous sub-row of the row will not be
768 // filled from the tensor; we need to make sure that the uncovered
769 // elements are explicitly set to 0's. the easiest way is to
770 // set all the elements to 0's before the loop.
771 memset(rowbuf, 0, vsz*sizeof(rowbuf[0]));
772 for( k = 0; k < ncn; k++ )
774 for( i = i0; i < i1; i++ )
776 for( j = j0; j < j1; j++ )
778 int imgofs = k*(width*height) + i*(dilation_h*width) + j*dilation_w;
779 rowbuf[(k*kernel_h + i)*kernel_w + j] = imgptr[imgofs];
788 // now compute dot product of the weights
789 // and im2row-transformed part of the tensor
790 int bsz = ofs1 - ofs0;
791 #if CV_TRY_AVX512_SKX
792 /* AVX512 convolution requires an alignment of 16, and ROI is only there for larger vector sizes */
794 opt_AVX512_SKX::fastConv(wptr, wstep, biasptr, rowbuf0, data_out0 + ofs0,
795 outShape, bsz, vsz, vsz_a, relu, cn0 == 0);
800 opt_AVX2::fastConv(wptr, wstep, biasptr, rowbuf0, data_out0 + ofs0,
801 outShape, bsz, vsz, vsz_a, relu, cn0 == 0);
806 opt_AVX::fastConv(wptr, wstep, biasptr, rowbuf0, data_out0 + ofs0,
807 outShape, bsz, vsz, vsz_a, relu, cn0 == 0);
810 for( int i = 0; i < outCn; i += 2 )
812 const float* wptr0 = wptr + i*wstep;
813 const float* wptr1 = wptr0 + wstep;
814 float* outptr0 = data_out0 + ofs0 + i*outPlaneSize;
815 float* outptr1 = outptr0 + outPlaneSize;
816 float bias0 = biasptr[i], bias1 = biasptr[i+1];
817 float r0 = 1.f, r1 = 1.f;
828 r0 = relu[i]; r1 = relu[i+1];
835 v_float32x4 vr0 = v_setall_f32(r0), vr1 = v_setall_f32(r1), z = v_setzero_f32();
837 for( ; j <= bsz - 4; j += 4 )
839 const float* rptr = rowbuf0 + j*vsz_a;
844 s0 = v_setall_f32(bias0);
845 s1 = v_setall_f32(bias1);
849 s0 = v_load(outptr0 + j);
850 s1 = v_load(outptr1 + j);
853 v_float32x4 vs00 = v_setzero_f32(), vs01 = v_setzero_f32(),
854 vs02 = v_setzero_f32(), vs03 = v_setzero_f32(),
855 vs10 = v_setzero_f32(), vs11 = v_setzero_f32(),
856 vs12 = v_setzero_f32(), vs13 = v_setzero_f32();
857 for( k = 0; k < vsz; k += 4, rptr += 4 )
859 v_float32x4 w0 = v_load_aligned(wptr0 + k), w1 = v_load_aligned(wptr1 + k);
860 v_float32x4 r0 = v_load_aligned(rptr), r1 = v_load_aligned(rptr + vsz_a),
861 r2 = v_load_aligned(rptr + vsz_a*2), r3 = v_load_aligned(rptr + vsz_a*3);
873 s0 += v_reduce_sum4(vs00, vs01, vs02, vs03);
874 s1 += v_reduce_sum4(vs10, vs11, vs12, vs13);
877 s0 = v_select(s0 > z, s0, s0*vr0);
878 s1 = v_select(s1 > z, s1, s1*vr1);
881 v_store(outptr0 + j, s0);
882 v_store(outptr1 + j, s1);
885 for( ; j < bsz; j++ )
887 const float* rptr = rowbuf0 + j*vsz_a;
901 for( k = 0; k < vsz; k++ )
909 s00 = s00 > 0.f ? s00 : s00*r0;
910 s10 = s10 > 0.f ? s10 : s10*r1;
921 activ_->forwardSlice(data_out0 + stripeStart, data_out0 + stripeStart,
922 (int)(stripeEnd - stripeStart),
923 outPlaneSize, startOutCn, startOutCn + outCn);
929 bool forward_ocl(InputArrayOfArrays inps, OutputArrayOfArrays outs, OutputArrayOfArrays internals)
931 std::vector<UMat> inputs;
932 std::vector<UMat> outputs;
934 bool use_half = (inps.depth() == CV_16S);
935 inps.getUMatVector(inputs);
936 outs.getUMatVector(outputs);
938 CV_Assert(outputs.size() == 1);
939 for (int i = 0; i < inputs.size(); ++i)
940 CV_Assert(inputs[i].u != outputs[0].u);
942 if (umat_blobs.empty())
944 size_t n = blobs.size();
945 umat_blobs.resize(n);
946 for (size_t i = 0; i < n; i++)
948 blobs[i].copyTo(umat_blobs[i]);
952 if (convolutionOp.empty())
954 OCL4DNNConvConfig config;
955 config.in_shape = shape(inputs[0]);
956 config.out_shape = shape(outputs[0]);
957 config.kernel = kernel;
959 config.stride = stride;
960 config.dilation = dilation;
961 config.group = inputs[0].size[1] / umat_blobs[0].size[1];
962 config.bias_term = (hasBias()) ? true : false;
963 config.use_half = use_half;
965 convolutionOp = Ptr<OCL4DNNConvSpatial<float> >(new OCL4DNNConvSpatial<float>(config));
968 int outCn = umat_blobs[0].size[0];
973 Ptr<ReLULayer> activ_relu = activ.dynamicCast<ReLULayer>();
974 if( !activ_relu.empty() )
976 reluslope.assign(outCn+2, activ_relu->negativeSlope);
977 activType = OCL4DNN_CONV_FUSED_ACTIV_RELU;
980 Ptr<ReLU6Layer> activ_relu6 = activ.dynamicCast<ReLU6Layer>();
981 if( !activ_relu6.empty() )
984 reluslope[0] = activ_relu6->minValue;
985 reluslope[1] = activ_relu6->maxValue;
986 activType = OCL4DNN_CONV_FUSED_ACTIV_RELU6;
989 Ptr<ChannelsPReLULayer> activ_chprelu = activ.dynamicCast<ChannelsPReLULayer>();
990 if( !activ_chprelu.empty() )
992 const Mat& m = activ_chprelu->blobs[0];
993 CV_Assert(m.isContinuous() && m.type() == CV_32F && (int)m.total() == outCn);
994 const float* mdata = m.ptr<float>();
995 reluslope.resize(outCn+2);
996 std::copy(mdata, mdata + outCn, reluslope.begin());
997 reluslope[outCn] = reluslope[outCn+1] = reluslope[outCn-1];
998 activType = OCL4DNN_CONV_FUSED_ACTIV_PRELU;
1002 if ( newWeightAndBias )
1004 weightsMat.copyTo(umat_blobs[0]);
1007 if ( umat_blobs.size() < 2 )
1008 umat_blobs.resize(2);
1009 umat_blobs[1] = UMat(biasvec, true);
1011 convolutionOp->setBias(fusedBias || hasBias());
1012 newWeightAndBias = false;
1017 if ( activType == OCL4DNN_CONV_FUSED_ACTIV_RELU )
1019 CV_Assert(!reluslope.empty());
1020 convolutionOp->setActivReLU(true, reluslope[0]);
1022 else if ( activType == OCL4DNN_CONV_FUSED_ACTIV_PRELU)
1024 CV_Assert(!reluslope.empty());
1025 convolutionOp->setActivPReLU(true, reluslope);
1027 else if ( activType == OCL4DNN_CONV_FUSED_ACTIV_POWER)
1029 convolutionOp->setActivPower(true, power);
1031 else if ( activType == OCL4DNN_CONV_FUSED_ACTIV_TANH)
1033 convolutionOp->setActivTanh(true);
1035 else if ( activType == OCL4DNN_CONV_FUSED_ACTIV_RELU6)
1037 convolutionOp->setActivReLU6(true, reluslope[0], reluslope[1]);
1041 convolutionOp->setActivReLU(false, 0);
1042 convolutionOp->setActivPReLU(false, reluslope);
1043 convolutionOp->setActivPower(false, 1.f);
1044 convolutionOp->setActivTanh(false);
1045 convolutionOp->setActivReLU6(false, 0, 0);
1050 UMat& inpMat = inputs[0];
1051 UMat& outMat = outputs[0];
1052 int batch_size = inpMat.size[0];
1054 return convolutionOp->Forward(inpMat,
1055 inputs.size() == 2 ? inputs[1] : UMat(),
1057 (hasBias() || fusedBias) ? umat_blobs[1] : UMat(),
1063 void forward(InputArrayOfArrays inputs_arr, OutputArrayOfArrays outputs_arr, OutputArrayOfArrays internals_arr) CV_OVERRIDE
1065 CV_TRACE_FUNCTION();
1066 CV_TRACE_ARG_VALUE(name, "name", name.c_str());
1068 CV_OCL_RUN(IS_DNN_OPENCL_TARGET(preferableTarget),
1069 forward_ocl(inputs_arr, outputs_arr, internals_arr))
1071 if (inputs_arr.depth() == CV_16S)
1073 forward_fallback(inputs_arr, outputs_arr, internals_arr);
1077 std::vector<Mat> inputs, outputs;
1078 inputs_arr.getMatVector(inputs);
1079 outputs_arr.getMatVector(outputs);
1081 /*printf("conv %s: input (%d x %d x %d x %d), kernel (%d x %d), pad (%d x %d), stride (%d x %d), dilation (%d x %d)\n",
1082 name.c_str(), inputs[0].size[0], inputs[0].size[1], inputs[0].size[2], inputs[0].size[3],
1083 kernel.width, kernel.height, pad.width, pad.height,
1084 stride.width, stride.height, dilation.width, dilation.height);*/
1085 CV_Assert_N(inputs.size() == (size_t)1, inputs[0].size[1] % blobs[0].size[1] == 0,
1086 outputs.size() == 1, inputs[0].data != outputs[0].data);
1088 int ngroups = inputs[0].size[1]/blobs[0].size[1];
1089 CV_Assert(outputs[0].size[1] % ngroups == 0);
1090 int outCn = blobs[0].size[0];
1095 Ptr<ReLULayer> activ_relu = activ.dynamicCast<ReLULayer>();
1096 if( !activ_relu.empty() )
1098 reluslope.assign(outCn+2, activ_relu->negativeSlope);
1101 Ptr<ChannelsPReLULayer> activ_chprelu = activ.dynamicCast<ChannelsPReLULayer>();
1102 if( !activ_chprelu.empty() )
1104 const Mat& m = activ_chprelu->blobs[0];
1105 CV_Assert(m.isContinuous() && m.type() == CV_32F && (int)m.total() == outCn);
1106 const float* mdata = m.ptr<float>();
1107 reluslope.resize(outCn+2);
1108 std::copy(mdata, mdata + outCn, reluslope.begin());
1109 reluslope[outCn] = reluslope[outCn+1] = reluslope[outCn-1];
1113 int nstripes = std::max(getNumThreads(), 1);
1115 ParallelConv::run(inputs[0], outputs[0], weightsMat, biasvec, reluslope,
1116 kernel, pad, stride, dilation, activ.get(), ngroups, nstripes);
1119 virtual int64 getFLOPS(const std::vector<MatShape> &inputs,
1120 const std::vector<MatShape> &outputs) const CV_OVERRIDE
1122 CV_Assert(inputs.size() == outputs.size());
1125 for (int i = 0; i < inputs.size(); i++)
1127 flops += total(outputs[i])*(CV_BIG_INT(2)*kernel.area()*inputs[i][1] + 1);
1134 class DeConvolutionLayerImpl CV_FINAL : public BaseConvolutionLayerImpl
1137 Mat weightsMat, biasesMat;
1141 DeConvolutionLayerImpl(const LayerParams& params) : BaseConvolutionLayerImpl(params) {}
1143 MatShape computeColRowShape(const MatShape &inpShape, const MatShape &outShape) const CV_OVERRIDE
1145 int inpCn = inpShape[1];
1146 int inpH = inpShape[2];
1147 int inpW = inpShape[3];
1148 int outCn = outShape[1];
1149 int ngroups = inpCn / blobs[0].size[0];
1150 int outGroupCn = outCn / ngroups;
1151 int ksize = outGroupCn * kernel.height * kernel.width;
1152 return shape(ksize, inpH * inpW);
1155 virtual bool supportBackend(int backendId) CV_OVERRIDE
1157 #ifdef HAVE_INF_ENGINE
1158 if (backendId == DNN_BACKEND_INFERENCE_ENGINE)
1160 if (INF_ENGINE_RELEASE >= 2018050000 && (adjustPad.height || adjustPad.width))
1163 const int outGroupCn = blobs[0].size[1]; // Weights are in IOHW layout
1164 const int group = numOutput / outGroupCn;
1167 return preferableTarget == DNN_TARGET_CPU;
1169 if (preferableTarget == DNN_TARGET_OPENCL || preferableTarget == DNN_TARGET_OPENCL_FP16)
1170 return dilation.width == 1 && dilation.height == 1;
1174 #endif // HAVE_INF_ENGINE
1175 return backendId == DNN_BACKEND_OPENCV || backendId == DNN_BACKEND_HALIDE;
1178 bool getMemoryShapes(const std::vector<MatShape> &inputs,
1179 const int requiredOutputs,
1180 std::vector<MatShape> &outputs,
1181 std::vector<MatShape> &internals) const CV_OVERRIDE
1183 CV_Assert(!hasBias() || blobs[1].total() == (size_t)numOutput);
1184 CV_Assert(inputs.size() != 0);
1186 int inpCn = inputs[0][1];
1187 int inpH = inputs[0][2];
1188 int inpW = inputs[0][3];
1190 int outH = -1, outW = -1;
1191 if (padMode.empty())
1193 outH = stride.height * (inpH - 1) + kernel.height - 2 * pad.height + adjustPad.height;
1194 outW = stride.width * (inpW - 1) + kernel.width - 2 * pad.width + adjustPad.width;
1196 else if (padMode == "VALID")
1198 outH = stride.height * (inpH - 1) + kernel.height + adjustPad.height;
1199 outW = stride.width * (inpW - 1) + kernel.width + adjustPad.width;
1201 else if (padMode == "SAME")
1203 outH = stride.height * (inpH - 1) + 1 + adjustPad.height;
1204 outW = stride.width * (inpW - 1) + 1 + adjustPad.width;
1207 CV_Error(Error::StsError, "Unsupported padding mode " + padMode);
1209 int outCn = numOutput;
1211 CV_Assert(outCn % blobs[0].size[1] == 0);
1212 int ngroups = outCn / blobs[0].size[1];
1214 CV_Assert(inpCn % ngroups == 0 && outCn % ngroups == 0);
1215 CV_Assert(blobs[0].size[0] == inpCn);
1217 int dims[] = {inputs[0][0], outCn, outH, outW};
1218 outputs.resize(inputs.size(), shape(dims, 4));
1221 internals.push_back(computeColRowShape(inputs[0], outputs[0]));
1226 void finalize(InputArrayOfArrays inputs_arr, OutputArrayOfArrays outputs_arr) CV_OVERRIDE
1228 BaseConvolutionLayerImpl::finalize(inputs_arr, outputs_arr);
1230 std::vector<Mat> inputs, outputs;
1231 inputs_arr.getMatVector(inputs);
1232 outputs_arr.getMatVector(outputs);
1234 int pad_t = pad.height, pad_l = pad.width, pad_b = pad.height, pad_r = pad.width;
1235 getConvPoolPaddings(Size(outputs[0].size[3], outputs[0].size[2]),
1236 Size(inputs[0].size[3], inputs[0].size[2]),
1237 kernel, stride, padMode, dilation, pad_t, pad_l, pad_b, pad_r);
1239 if (pad_t != pad_b || pad_l != pad_r)
1240 CV_Error(Error::StsNotImplemented, "Unsupported asymmetric padding in convolution layer");
1245 weightsMultipliers.assign(numOutput, 1.0);
1246 if (weightsMat.empty())
1248 transpose(blobs[0].reshape(1, blobs[0].size[0]), weightsMat);
1249 biasesMat = hasBias() ? blobs[1].reshape(1, numOutput)
1250 : Mat::zeros(numOutput, 1, CV_32F);
1254 void fuseWeights(const Mat& w_, const Mat& b_) CV_OVERRIDE
1256 Mat w = w_.total() == 1 ? Mat(1, numOutput, CV_32F, Scalar(w_.at<float>(0))) : w_;
1257 Mat b = b_.total() == 1 ? Mat(1, numOutput, CV_32F, Scalar(b_.at<float>(0))) : b_;
1259 CV_Assert_N(!weightsMat.empty(),
1260 w.empty() || numOutput == w.total(),
1261 b.empty() || numOutput == b.total());
1265 transpose(blobs[0].reshape(1, blobs[0].size[0]), weightsMat);
1266 weightsMat = weightsMat.reshape(1, numOutput);
1267 for (int i = 0; i < numOutput; ++i)
1269 double wi = w.at<float>(i);
1270 weightsMultipliers[i] *= wi;
1271 cv::multiply(weightsMat.row(i), weightsMultipliers[i], weightsMat.row(i));
1272 biasesMat.at<float>(i) *= wi;
1274 weightsMat = weightsMat.reshape(1, weightsMat.total() / blobs[0].size[0]);
1279 cv::add(biasesMat, b.reshape(1, numOutput), biasesMat);
1282 newWeightAndBias = !w.empty() || !b.empty();
1285 class MatMulInvoker : public ParallelLoopBody
1288 MatMulInvoker(const Mat& a, const Mat& b, Mat& c, int nstripes)
1293 nstripes_ = nstripes;
1294 useAVX = checkHardwareSupport(CPU_AVX);
1295 useAVX2 = checkHardwareSupport(CPU_AVX2);
1296 useAVX512 = CV_CPU_HAS_SUPPORT_AVX512_SKX;
1299 void operator()(const Range& range_) const CV_OVERRIDE
1301 int stripeSize = (int)alignSize((b_->cols + nstripes_ - 1)/nstripes_, 16);
1302 Range range(range_.start*stripeSize, std::min(range_.end*stripeSize, b_->cols));
1303 int mmax = a_->rows;
1304 int nmax = range.end - range.start;
1305 int kmax = a_->cols;
1307 const float* aptr = a_->ptr<float>();
1308 const float* bptr = b_->ptr<float>() + range.start;
1309 float* cptr = c_->ptr<float>() + range.start;
1310 size_t astep = a_->step1();
1311 size_t bstep = b_->step1();
1312 size_t cstep = c_->step1();
1314 #if CV_TRY_AVX512_SKX
1316 opt_AVX512_SKX::fastGEMM( aptr, astep, bptr, bstep, cptr, cstep, mmax, kmax, nmax );
1321 opt_AVX2::fastGEMM( aptr, astep, bptr, bstep, cptr, cstep, mmax, kmax, nmax );
1326 opt_AVX::fastGEMM( aptr, astep, bptr, bstep, cptr, cstep, mmax, kmax, nmax );
1329 for( m = 0; m < mmax; m += 2 )
1331 float* dst0 = cptr + cstep*m;
1332 float* dst1 = cptr + cstep*std::min(m+1, mmax-1);
1333 const float* aptr0 = aptr + astep*m;
1334 const float* aptr1 = aptr + astep*std::min(m+1, mmax-1);
1336 for( n = 0; n < nmax; n++ )
1342 for( k = 0; k < kmax; k += 4 )
1344 float alpha00 = aptr0[k];
1345 float alpha01 = aptr1[k];
1346 float alpha10 = 0.f, alpha11 = 0.f;
1347 float alpha20 = 0.f, alpha21 = 0.f;
1348 float alpha30 = 0.f, alpha31 = 0.f;
1349 const float* bptr0 = bptr + k*bstep;
1350 const float* bptr1 = bptr0;
1351 const float* bptr2 = bptr0;
1352 const float* bptr3 = bptr0;
1356 alpha10 = aptr0[k+1];
1357 alpha11 = aptr1[k+1];
1358 bptr1 = bptr0 + bstep;
1361 alpha20 = aptr0[k+2];
1362 alpha21 = aptr1[k+2];
1363 bptr2 = bptr1 + bstep;
1366 alpha30 = aptr0[k+3];
1367 alpha31 = aptr1[k+3];
1368 bptr3 = bptr2 + bstep;
1375 v_float32x4 a00 = v_setall_f32(alpha00);
1376 v_float32x4 a01 = v_setall_f32(alpha01);
1377 v_float32x4 a10 = v_setall_f32(alpha10);
1378 v_float32x4 a11 = v_setall_f32(alpha11);
1379 v_float32x4 a20 = v_setall_f32(alpha20);
1380 v_float32x4 a21 = v_setall_f32(alpha21);
1381 v_float32x4 a30 = v_setall_f32(alpha30);
1382 v_float32x4 a31 = v_setall_f32(alpha31);
1384 for( ; n <= nmax - 4; n += 4 )
1386 v_float32x4 b0 = v_load(bptr0 + n);
1387 v_float32x4 b1 = v_load(bptr1 + n);
1388 v_float32x4 b2 = v_load(bptr2 + n);
1389 v_float32x4 b3 = v_load(bptr3 + n);
1390 v_float32x4 d0 = v_load(dst0 + n);
1391 v_float32x4 d1 = v_load(dst1 + n);
1400 v_store(dst0 + n, d0);
1401 v_store(dst1 + n, d1);
1405 for( ; n < nmax; n++ )
1407 float b0 = bptr0[n], b1 = bptr1[n];
1408 float b2 = bptr2[n], b3 = bptr3[n];
1409 float d0 = dst0[n] + alpha00*b0 + alpha10*b1 + alpha20*b2 + alpha30*b3;
1410 float d1 = dst1[n] + alpha01*b0 + alpha11*b1 + alpha21*b2 + alpha31*b3;
1426 class Col2ImInvoker : public cv::ParallelLoopBody
1429 const float* data_col;
1430 const float* biasvec;
1431 int channels, height, width;
1432 int kernel_h, kernel_w;
1434 int stride_h, stride_w;
1436 int height_col, width_col;
1441 : data_col(0), biasvec(0), channels(0), height(0), width(0),
1442 kernel_h(0), kernel_w(0), pad_h(0), pad_w(0), stride_h(0), stride_w(0), data_im(0),
1443 height_col(0), width_col(0), nstripes(0), is1x1(0)
1446 static void run(const float* data_col,
1447 int channels, int height, int width,
1448 int kernel_h, int kernel_w,
1449 int pad_h, int pad_w,
1450 int stride_h, int stride_w,
1451 int height_col, int width_col,
1453 const float* biasvec,
1456 const int nstripes = getNumThreads();
1459 t.data_col = data_col;
1460 t.data_im = data_im;
1461 t.channels = channels; t.height = height; t.width = width;
1462 t.kernel_h = kernel_h; t.kernel_w = kernel_w;
1463 t.pad_h = pad_h; t.pad_w = pad_w;
1464 t.stride_h = stride_h; t.stride_w = stride_w;
1465 t.height_col = height_col;
1466 t.width_col = width_col;
1467 t.nstripes = nstripes;
1469 t.biasvec = biasvec;
1471 parallel_for_(Range(0, nstripes), t, nstripes);
1474 virtual void operator ()(const Range &r) const CV_OVERRIDE
1476 const float* data_col_ = data_col;
1477 float* data_im_ = data_im;
1478 int coeff_h = (1 - stride_h * kernel_w * height_col) * width_col;
1479 int coeff_w = (1 - stride_w * height_col * width_col);
1480 size_t total = (size_t)channels * height * width;
1481 size_t stripeSize = (total + nstripes - 1)/nstripes;
1482 size_t startIndex = r.start*stripeSize;
1483 size_t endIndex = std::min(r.end*stripeSize, total);
1484 int w = (int)(startIndex % width + pad_w);
1485 int h = (int)((startIndex / width) % height + pad_h);
1486 int c = (int)(startIndex / (width * height));
1487 int h_col_start = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1;
1488 int h_col_end = std::min(h / stride_h + 1, height_col);
1489 int plane_size_col = height_col * width_col;
1490 int offset = (c * kernel_h * kernel_w + h * kernel_w + w) * plane_size_col;
1491 bool is1x1_ = is1x1;
1492 const float* biasvec_ = biasvec;
1494 for (size_t index = startIndex; index < endIndex; index++)
1496 // compute the start and end of the output
1497 int w_col_start = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1;
1498 int w_col_end = std::min(w / stride_w + 1, width_col);
1502 val = data_im_[index];
1506 for (int h_col = h_col_start; h_col < h_col_end; ++h_col) {
1507 for (int w_col = w_col_start; w_col < w_col_end; ++w_col) {
1508 val += data_col_[offset + h_col * coeff_h + w_col * coeff_w];
1512 data_im_[index] = val + biasvec_[c];
1514 offset += plane_size_col;
1515 if( ++w >= width + pad_w )
1517 w = (int)((index + 1)% width + pad_w);
1518 h = (int)(((index + 1) / width) % height + pad_h);
1519 c = (int)((index + 1) / (width * height));
1520 h_col_start = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1;
1521 h_col_end = std::min(h / stride_h + 1, height_col);
1522 offset = (c * kernel_h * kernel_w + h * kernel_w + w) * plane_size_col;
1529 bool forward_ocl(InputArrayOfArrays inputs_, OutputArrayOfArrays outputs_, OutputArrayOfArrays internals_)
1531 std::vector<UMat> inputs;
1532 std::vector<UMat> outputs;
1533 std::vector<UMat> internals;
1535 if (inputs_.depth() == CV_16S)
1538 inputs_.getUMatVector(inputs);
1539 outputs_.getUMatVector(outputs);
1540 internals_.getUMatVector(internals);
1542 int outCn = numOutput;
1543 int inpCn = inputs[0].size[1];
1548 if (umat_weights.empty())
1550 if (newWeightAndBias)
1552 weightsMat.copyTo(umat_weights);
1553 biasesMat.copyTo(umat_biases);
1557 transpose(blobs[0].reshape(1, inpCn), umat_weights);
1559 blobs[1].reshape(1, outCn).copyTo(umat_biases);
1561 umat_biases = UMat::zeros(outCn, 1, CV_32F);
1565 String buildopt = format("-DT=%s ", ocl::typeToStr(inputs[0].type()));
1566 buildopt += format("-DPAD_H=%d -DPAD_W=%d -DKERNEL_H=%d -DKERNEL_W=%d -DSTRIDE_H=%d -DSTRIDE_W=%d ",
1567 pad.height, pad.width, kernel.height, kernel.width, stride.height, stride.width);
1569 for (size_t ii = 0; ii < outputs.size(); ii++)
1571 int ngroups = outCn / blobs[0].size[1];
1572 int inpGroupCn = inpCn / ngroups;
1573 int outGroupCn = blobs[0].size[1];
1574 const UMat& inp = inputs[ii];
1575 UMat& out = outputs[ii];
1576 int numImg = inp.size[0];
1577 int inpH = inp.size[2], inpW = inp.size[3];
1578 int outH = out.size[2], outW = out.size[3];
1580 MatShape inpshape = shape(numImg*inpCn, inpH*inpW);
1581 MatShape outshape = shape(numImg*outCn, outH*outW);
1582 UMat convBlob = inputs[ii].reshape(1, inpshape.size(), &inpshape[0]);
1583 UMat decnBlob = out.reshape(1, outshape.size(), &outshape[0]);
1584 int rows = internals[0].rows / ngroups;
1586 for (int n = 0; n < numImg; n++)
1588 for (int g = 0; g < ngroups; g++)
1590 UMat colMat = internals[0].rowRange(_Range(g * rows, rows));
1591 UMat convMat = convBlob.rowRange(_Range((g + n * ngroups) * inpGroupCn, inpGroupCn));
1592 UMat wghtMat = umat_weights.colRange(_Range(g * inpGroupCn, inpGroupCn));
1593 gemm(wghtMat, convMat, 1, noArray(), 0, colMat, 0);
1596 for (int g = 0; g < ngroups; g++)
1598 int total = outGroupCn * decnBlob.cols;
1600 int height_col = inpH;
1601 int width_col = inpW;
1602 int coeff_h = (1 - stride.height * kernel.width * height_col) * width_col;
1603 int coeff_w = (1 - stride.width * height_col * width_col);
1605 ocl::Kernel k("col2im", ocl::dnn::col2im_oclsrc, buildopt);
1606 k.set(index++, total);
1607 k.set(index++, ocl::KernelArg::PtrReadOnly(internals[0]));
1608 k.set(index++, (int)(g * rows * internals[0].cols));
1609 k.set(index++, outGroupCn);
1610 k.set(index++, outH);
1611 k.set(index++, outW);
1612 k.set(index++, height_col);
1613 k.set(index++, width_col);
1614 k.set(index++, coeff_h);
1615 k.set(index++, coeff_w);
1616 k.set(index++, ocl::KernelArg::PtrReadOnly(umat_biases));
1617 k.set(index++, (int)(g * outGroupCn * umat_biases.cols));
1618 k.set(index++, ocl::KernelArg::PtrWriteOnly(decnBlob));
1619 k.set(index++, (int)((g + n * ngroups) * outGroupCn * decnBlob.cols));
1621 size_t global[] = { (size_t)total };
1622 bool ret = k.run(1, global, NULL, false);
1633 void forward(InputArrayOfArrays inputs_arr, OutputArrayOfArrays outputs_arr, OutputArrayOfArrays internals_arr) CV_OVERRIDE
1635 CV_TRACE_FUNCTION();
1636 CV_TRACE_ARG_VALUE(name, "name", name.c_str());
1638 CV_OCL_RUN(IS_DNN_OPENCL_TARGET(preferableTarget),
1639 forward_ocl(inputs_arr, outputs_arr, internals_arr));
1641 if (inputs_arr.depth() == CV_16S)
1643 forward_fallback(inputs_arr, outputs_arr, internals_arr);
1647 std::vector<Mat> inputs, outputs, internals;
1648 inputs_arr.getMatVector(inputs);
1649 outputs_arr.getMatVector(outputs);
1650 internals_arr.getMatVector(internals);
1652 int outCn = numOutput;
1653 int inpCn = inputs[0].size[1];
1654 bool is1x1flag = is1x1();
1655 int nstripes = getNumThreads();
1657 if( weightsMat.empty() )
1659 transpose(blobs[0].reshape(1, inpCn), weightsMat);
1660 biasesMat = hasBias() ? blobs[1].reshape(1, outCn) : Mat::zeros(outCn, 1, CV_32F);
1663 for (size_t ii = 0; ii < outputs.size(); ii++)
1665 int ngroups = outCn / blobs[0].size[1];
1666 int inpGroupCn = inpCn / ngroups;
1667 int outGroupCn = blobs[0].size[1];
1668 const Mat& inp = inputs[ii];
1669 Mat& out = outputs[ii];
1670 int numImg = inp.size[0];
1671 int inpH = inp.size[2], inpW = inp.size[3];
1672 int outH = out.size[2], outW = out.size[3];
1674 Mat convBlob = inputs[ii].reshape(1, numImg*inpCn);
1675 Mat decnBlob = out.reshape(1, numImg*outCn);
1677 for (int n = 0; n < numImg; n++)
1679 for (int g = 0; g < ngroups; g++)
1681 Mat dstMat = decnBlob.rowRange(_Range((g + n * ngroups) * outGroupCn, outGroupCn));
1682 Mat &colMat = is1x1flag ? dstMat : internals[0];
1684 Mat convMat = convBlob.rowRange(_Range((g + n * ngroups) * inpGroupCn, inpGroupCn));
1685 Mat wghtMat = weightsMat.colRange(_Range(g * inpGroupCn, inpGroupCn));
1686 Mat curBiasMat = biasesMat.rowRange(_Range(g * outGroupCn, outGroupCn));
1688 //gemm(wghtMat, convMat, 1, colMat, 0, colMat, 0);
1689 MatMulInvoker mminvoker(wghtMat, convMat, colMat, nstripes);
1690 parallel_for_(Range(0, nstripes), mminvoker, nstripes);
1692 Col2ImInvoker::run(colMat.ptr<float>(), outGroupCn, outH, outW,
1693 kernel.height, kernel.width, pad.height, pad.width,
1694 stride.height, stride.width, inpH, inpW, dstMat.ptr<float>(),
1695 curBiasMat.ptr<float>(), is1x1flag);
1701 virtual Ptr<BackendNode> initHalide(const std::vector<Ptr<BackendWrapper> > &inputs) CV_OVERRIDE
1704 Halide::Buffer<float> inputBuffer = halideBuffer(inputs[0]);
1706 int inW, inH, inC, inN;
1707 getCanonicalSize(inputBuffer, &inW, &inH, &inC, &inN);
1708 const int outGroupCn = blobs[0].size[1];
1709 const int group = numOutput / outGroupCn;
1710 const int inpGroupCn = blobs[0].size[0] / group;
1712 Halide::Var x("x"), y("y"), c("c"), n("n");
1713 Halide::Func top = (name.empty() ? Halide::Func() : Halide::Func(name));
1714 Halide::Func padded_input(name + "_constant_exterior");
1715 auto weights = wrapToHalideBuffer(blobs[0]);
1717 Halide::Func dilated_input("dilated_input");
1718 dilated_input(x, y, c, n) = 0.0f;
1719 Halide::RDom r1(0, inW, 0, inH);
1720 dilated_input(r1.x * stride.width, r1.y * stride.height, c, n) =
1721 inputBuffer(r1.x, r1.y, c, n);
1722 dilated_input.compute_root();
1724 Halide::Func bounded =
1725 Halide::BoundaryConditions::constant_exterior(dilated_input, 0,
1726 0, (inW - 1) * stride.width + 1,
1727 0, (inH - 1) * stride.height + 1,
1729 padded_input(x, y, c, n) = bounded(x, y, c, n);
1731 Halide::RDom r(0, kernel.width, 0, kernel.height, 0, inpGroupCn);
1732 Halide::Expr kx = x + pad.width - r.x;
1733 Halide::Expr ky = y + pad.height - r.y;
1734 Halide::Expr kInC = r.z;
1735 Halide::Expr kOutC = c;
1736 for (int i = 1; i < group; ++i)
1738 kInC = select(c < outGroupCn * i, kInC, inpGroupCn * i + r.z);
1739 kOutC = select(c < outGroupCn * i, kOutC, c - outGroupCn * i);
1741 Halide::Expr topExpr = sum(padded_input(kx, ky, kInC, n) *
1742 weights(r.x, r.y, kOutC, kInC));
1745 auto bias = wrapToHalideBuffer(blobs[1], {numOutput});
1748 top(x, y, c, n) = topExpr;
1749 return Ptr<BackendNode>(new HalideBackendNode({ padded_input, top }));
1750 #endif // HAVE_HALIDE
1751 return Ptr<BackendNode>();
1754 virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> > &) CV_OVERRIDE
1756 #ifdef HAVE_INF_ENGINE
1757 #if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5)
1758 const int outGroupCn = blobs[0].size[1]; // Weights are in IOHW layout
1759 const int group = numOutput / outGroupCn;
1761 InferenceEngine::Builder::DeconvolutionLayer ieLayer(name);
1763 ieLayer.setKernel({(size_t)kernel.height, (size_t)kernel.width});
1764 ieLayer.setStrides({(size_t)stride.height, (size_t)stride.width});
1765 ieLayer.setDilation({(size_t)dilation.height, (size_t)dilation.width});
1766 ieLayer.setPaddingsBegin({(size_t)pad.height, (size_t)pad.width});
1767 ieLayer.setPaddingsEnd({(size_t)pad.height, (size_t)pad.width});
1768 ieLayer.setGroup((size_t)group);
1769 ieLayer.setOutDepth((size_t)numOutput);
1771 InferenceEngine::Builder::Layer l = ieLayer;
1772 addConstantData("weights", wrapToInfEngineBlob(blobs[0], InferenceEngine::Layout::OIHW), l);
1774 addConstantData("biases", wrapToInfEngineBlob(blobs[1], {(size_t)numOutput}, InferenceEngine::Layout::C), l);
1775 return Ptr<BackendNode>(new InfEngineBackendNode(l));
1777 const int outGroupCn = blobs[0].size[1]; // Weights are in IOHW layout
1778 const int group = numOutput / outGroupCn;
1780 InferenceEngine::LayerParams lp;
1782 lp.type = "Deconvolution";
1783 lp.precision = InferenceEngine::Precision::FP32;
1784 std::shared_ptr<InferenceEngine::DeconvolutionLayer> ieLayer(new InferenceEngine::DeconvolutionLayer(lp));
1786 #if INF_ENGINE_VER_MAJOR_GT(INF_ENGINE_RELEASE_2018R3)
1787 ieLayer->_kernel.insert(InferenceEngine::X_AXIS, kernel.width);
1788 ieLayer->_kernel.insert(InferenceEngine::Y_AXIS, kernel.height);
1789 ieLayer->_stride.insert(InferenceEngine::X_AXIS, stride.width);
1790 ieLayer->_stride.insert(InferenceEngine::Y_AXIS, stride.height);
1791 ieLayer->_padding.insert(InferenceEngine::X_AXIS, pad.width);
1792 ieLayer->_padding.insert(InferenceEngine::Y_AXIS, pad.height);
1793 ieLayer->_pads_end.insert(InferenceEngine::X_AXIS, pad.width);
1794 ieLayer->_pads_end.insert(InferenceEngine::Y_AXIS, pad.height);
1795 ieLayer->_dilation.insert(InferenceEngine::X_AXIS, dilation.width);
1796 ieLayer->_dilation.insert(InferenceEngine::Y_AXIS, dilation.height);
1798 ieLayer->_kernel_x = kernel.width;
1799 ieLayer->_kernel_y = kernel.height;
1800 ieLayer->_stride_x = stride.width;
1801 ieLayer->_stride_y = stride.height;
1802 ieLayer->_padding_x = pad.width;
1803 ieLayer->_padding_y = pad.height;
1804 ieLayer->_dilation_x = dilation.width;
1805 ieLayer->_dilation_y = dilation.height;
1807 ieLayer->_out_depth = numOutput;
1808 ieLayer->_group = group;
1810 ieLayer->_weights = wrapToInfEngineBlob(blobs[0], InferenceEngine::Layout::OIHW);
1813 ieLayer->_biases = wrapToInfEngineBlob(blobs[1], {(size_t)numOutput}, InferenceEngine::Layout::C);
1815 return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer));
1817 #endif // HAVE_INF_ENGINE
1818 return Ptr<BackendNode>();
1821 virtual int64 getFLOPS(const std::vector<MatShape> &inputs,
1822 const std::vector<MatShape> &outputs) const CV_OVERRIDE
1824 CV_Assert(inputs.size() == outputs.size());
1827 int outChannels = blobs[0].size[0];
1829 for (int i = 0; i < inputs.size(); i++)
1831 flops += CV_BIG_INT(2)*outChannels*kernel.area()*total(inputs[i]);
1838 Ptr<BaseConvolutionLayer> ConvolutionLayer::create(const LayerParams ¶ms)
1840 Ptr<ConvolutionLayerImpl> l(new ConvolutionLayerImpl(params));
1844 Ptr<BaseConvolutionLayer> DeconvolutionLayer::create(const LayerParams ¶ms)
1846 return Ptr<BaseConvolutionLayer>(new DeConvolutionLayerImpl(params));