--- /dev/null
- #define OPENCV_DNN_API_VERSION 20201117
+// This file is part of OpenCV project.
+// It is subject to the license terms in the LICENSE file found in the top-level directory
+// of this distribution and at http://opencv.org/license.html.
+
+#ifndef OPENCV_DNN_VERSION_HPP
+#define OPENCV_DNN_VERSION_HPP
+
+/// Use with major OpenCV version only.
++#define OPENCV_DNN_API_VERSION 20210205
+
+#if !defined CV_DOXYGEN && !defined CV_STATIC_ANALYSIS && !defined CV_DNN_DONT_ADD_INLINE_NS
+#define CV__DNN_INLINE_NS __CV_CAT(dnn4_v, OPENCV_DNN_API_VERSION)
+#define CV__DNN_INLINE_NS_BEGIN namespace CV__DNN_INLINE_NS {
+#define CV__DNN_INLINE_NS_END }
+namespace cv { namespace dnn { namespace CV__DNN_INLINE_NS { } using namespace CV__DNN_INLINE_NS; }}
+#else
+#define CV__DNN_INLINE_NS_BEGIN
+#define CV__DNN_INLINE_NS_END
+#endif
+
+#endif // OPENCV_DNN_VERSION_HPP
}
}
- auto concat_axis = clamp(axis, input_wrapper->getRank());
+#ifdef HAVE_CUDA
+ Ptr<BackendNode> initCUDA(
+ void *context_,
+ const std::vector<Ptr<BackendWrapper>>& inputs,
+ const std::vector<Ptr<BackendWrapper>>& outputs
+ ) override
+ {
+ auto context = reinterpret_cast<csl::CSLContext*>(context_);
+
+ auto input_wrapper = inputs[0].dynamicCast<CUDABackendWrapper>();
- int cAxis = clamp(axis, in.dimNum());
++ auto concat_axis = normalize_axis(axis, input_wrapper->getRank());
+ return make_cuda_node<cuda4dnn::ConcatOp>(preferableTarget, std::move(context->stream), concat_axis, padding);
+ }
+#endif
+
+ virtual Ptr<BackendNode> initVkCom(const std::vector<Ptr<BackendWrapper> > &input) CV_OVERRIDE
+ {
+#ifdef HAVE_VULKAN
+ vkcom::Tensor in = VkComTensor(input[0]);
++ int cAxis = normalize_axis(axis, in.dimNum());
+ std::shared_ptr<vkcom::OpBase> op(new vkcom::OpConcat(cAxis));
+ return Ptr<BackendNode>(new VkComBackendNode(input, op));
+#endif // HAVE_VULKAN
+ return Ptr<BackendNode>();
+ }
+
virtual Ptr<BackendNode> initHalide(const std::vector<Ptr<BackendWrapper> > &input) CV_OVERRIDE
{
#ifdef HAVE_HALIDE
}
}
- auto flatten_start_axis = clamp(axis, input_wrapper->getRank());
+#ifdef HAVE_CUDA
+ Ptr<BackendNode> initCUDA(
+ void *context_,
+ const std::vector<Ptr<BackendWrapper>>& inputs,
+ const std::vector<Ptr<BackendWrapper>>& outputs
+ ) override
+ {
+ auto context = reinterpret_cast<csl::CSLContext*>(context_);
+
+ auto input_wrapper = inputs[0].dynamicCast<CUDABackendWrapper>();
+
++ auto flatten_start_axis = normalize_axis(axis, input_wrapper->getRank());
+
+ auto biasMat_ = bias ? biasMat : Mat();
+ return make_cuda_node<cuda4dnn::InnerProductOp>(preferableTarget, std::move(context->stream), std::move(context->cublas_handle), flatten_start_axis, weightsMat, biasMat_);
+ }
+#endif
+
virtual Ptr<BackendNode> initHalide(const std::vector<Ptr<BackendWrapper> > &inputs) CV_OVERRIDE
{
#ifdef HAVE_HALIDE
}
#endif // HAVE_DNN_NGRAPH
- config.axis_start = clamp(startAxis, input_shape.size());
- config.axis_end = clamp(endAxis, input_shape.size()) + 1; /* +1 because NormalizeOp follows [start, end) convention */
+
+#ifdef HAVE_CUDA
+ Ptr<BackendNode> initCUDA(
+ void *context_,
+ const std::vector<Ptr<BackendWrapper>>& inputs,
+ const std::vector<Ptr<BackendWrapper>>& outputs
+ ) override
+ {
+ auto context = reinterpret_cast<csl::CSLContext*>(context_);
+
+ if(pnorm != 1 && pnorm != 2)
+ CV_Error(Error::StsNotImplemented, "Unsupported normalization mode");
+
+ auto input_wrapper = inputs[0].dynamicCast<CUDABackendWrapper>();
+ auto input_shape = input_wrapper->getShape();
+
+ NormalizeConfiguration<float> config;
+ config.input_shape.assign(std::begin(input_shape), std::end(input_shape));
++ config.axis_start = normalize_axis(startAxis, input_shape.size());
++ config.axis_end = normalize_axis(endAxis, input_shape.size()) + 1; /* +1 because NormalizeOp follows [start, end) convention */
+ config.norm = pnorm;
+ config.eps = epsilon;
+
+ const auto& weightsMat = blobs.empty() ? Mat() : blobs[0];
+ return make_cuda_node<cuda4dnn::NormalizeOp>(preferableTarget, std::move(context->stream), weightsMat, config);
+ }
+#endif
+
+
private:
int startAxis, endAxis;
};
}
}
- auto channel_axis = clamp(axisRaw, input_wrapper->getRank());
+#ifdef HAVE_CUDA
+ Ptr<BackendNode> initCUDA(
+ void *context_,
+ const std::vector<Ptr<BackendWrapper>>& inputs,
+ const std::vector<Ptr<BackendWrapper>>& outputs
+ ) override
+ {
+ auto context = reinterpret_cast<csl::CSLContext*>(context_);
+
+ auto input_wrapper = inputs[0].dynamicCast<CUDABackendWrapper>();
- int cAxis = clamp(axisRaw, in.dimNum());
++ auto channel_axis = normalize_axis(axisRaw, input_wrapper->getRank());
+ return make_cuda_node<cuda4dnn::SoftmaxOp>(preferableTarget, std::move(context->cudnn_handle), channel_axis, logSoftMax);
+ }
+#endif
+
+ virtual Ptr<BackendNode> initVkCom(const std::vector<Ptr<BackendWrapper> > &inputs) CV_OVERRIDE
+ {
+#ifdef HAVE_VULKAN
+ vkcom::Tensor in = VkComTensor(inputs[0]);
++ int cAxis = normalize_axis(axisRaw, in.dimNum());
+ std::shared_ptr<vkcom::OpBase> op(new vkcom::OpSoftmax(cAxis, logSoftMax));
+ return Ptr<BackendNode>(new VkComBackendNode(inputs, op));
+#endif // HAVE_VULKAN
+ return Ptr<BackendNode>();
+ }
+
+
virtual Ptr<BackendNode> initHalide(const std::vector<Ptr<BackendWrapper> > &inputs) CV_OVERRIDE
{
#ifdef HAVE_HALIDE
bad_count++;
}
- ASSERT_GT(tl_rect_count, 0);
- ASSERT_GT(br_rect_count, 0);
- ASSERT_EQ(bad_count, 0);
+ EXPECT_GT(tl_rect_count, 0);
+ EXPECT_GT(br_rect_count, 0);
+ EXPECT_EQ(bad_count, 0);
}
- #endif // HAVE_OPENCV_XFEATURES2D
+ #endif // HAVE_OPENCV_XFEATURES2D && OPENCV_ENABLE_NONFREE
TEST(ParallelFeaturesFinder, IsSameWithSerial)
{