+#ifdef HAVE_CUDA
+ Ptr<BackendNode> initCUDA(
+ void *context_,
+ const std::vector<Ptr<BackendWrapper>>& inputs,
+ const std::vector<Ptr<BackendWrapper>>& outputs
+ ) override
+ {
+ auto context = reinterpret_cast<csl::CSLContext*>(context_);
+
+ CV_Assert(inputs.size() == 1);
+ auto input_wrapper = inputs[0].dynamicCast<CUDABackendWrapper>();
+ auto input_shape = input_wrapper->getShape();
+
+ CV_Assert(outputs.size() == 1);
+ auto output_wrapper = outputs[0].dynamicCast<CUDABackendWrapper>();
+ auto output_shape = output_wrapper->getShape();
+
+ const auto output_feature_maps = blobs[0].size[0];
+ const auto input_feature_maps = input_shape[1];
+ const auto input_feature_maps_per_group = blobs[0].size[1];
+ const auto groups = input_feature_maps / input_feature_maps_per_group;
+
+ ConvolutionConfiguration config;
+ config.kernel_size.assign(std::begin(kernel_size), std::end(kernel_size));
+ config.dilations.assign(std::begin(dilations), std::end(dilations));
+ config.strides.assign(std::begin(strides), std::end(strides));
+
+ if (padMode.empty())
+ {
+ config.padMode = ConvolutionConfiguration::PaddingMode::MANUAL;
+ config.pads_begin.assign(std::begin(pads_begin), std::end(pads_begin));
+ config.pads_end.assign(std::begin(pads_end), std::end(pads_end));
+ }
+ else if (padMode == "VALID")
+ {
+ config.padMode = ConvolutionConfiguration::PaddingMode::VALID;
+ }
+ else if (padMode == "SAME")
+ {
+ config.padMode = ConvolutionConfiguration::PaddingMode::SAME;
+ }
+ else
+ {
+ CV_Error(Error::StsNotImplemented, padMode + " padding mode not supported by ConvolutionLayer");
+ }
+
+ config.input_shape.assign(std::begin(input_shape), std::end(input_shape));
+ config.output_shape.assign(std::begin(output_shape), std::end(output_shape));
+ config.groups = groups;
+
+ Mat filtersMat = fusedWeights ? weightsMat : blobs[0];
+ Mat biasMat = (hasBias() || fusedBias) ? Mat(output_feature_maps, 1, CV_32F, biasvec.data()) : Mat();
+ if (countNonZero(biasMat) == 0)
+ biasMat = Mat();
+
+ return make_cuda_node<cuda4dnn::ConvolutionOp>(
+ preferableTarget, std::move(context->stream), std::move(context->cudnn_handle), config, filtersMat, biasMat);
+ }
+#endif
+