Merge remote-tracking branch 'upstream/3.4' into merge-3.4
authorAlexander Alekhin <alexander.a.alekhin@gmail.com>
Sat, 6 Feb 2021 00:43:06 +0000 (00:43 +0000)
committerAlexander Alekhin <alexander.a.alekhin@gmail.com>
Sat, 6 Feb 2021 00:44:11 +0000 (00:44 +0000)
20 files changed:
1  2 
modules/dnn/include/opencv2/dnn/shape_utils.hpp
modules/dnn/include/opencv2/dnn/version.hpp
modules/dnn/src/dnn.cpp
modules/dnn/src/layers/concat_layer.cpp
modules/dnn/src/layers/flatten_layer.cpp
modules/dnn/src/layers/fully_connected_layer.cpp
modules/dnn/src/layers/normalize_bbox_layer.cpp
modules/dnn/src/layers/reshape_layer.cpp
modules/dnn/src/layers/scale_layer.cpp
modules/dnn/src/layers/slice_layer.cpp
modules/dnn/src/layers/softmax_layer.cpp
modules/dnn/src/onnx/onnx_graph_simplifier.cpp
modules/dnn/src/onnx/onnx_importer.cpp
modules/dnn/src/tensorflow/tf_importer.cpp
modules/dnn/test/test_onnx_importer.cpp
modules/dnn/test/test_tf_importer.cpp
modules/objdetect/src/qrcode.cpp
modules/python/src2/cv2.cpp
modules/python/test/test_misc.py
modules/stitching/test/test_matchers.cpp

index 62ecadb,0000000..5ba9ac6
mode 100644,000000..100644
--- /dev/null
@@@ -1,21 -1,0 +1,21 @@@
- #define OPENCV_DNN_API_VERSION 20201117
 +// This file is part of OpenCV project.
 +// It is subject to the license terms in the LICENSE file found in the top-level directory
 +// of this distribution and at http://opencv.org/license.html.
 +
 +#ifndef OPENCV_DNN_VERSION_HPP
 +#define OPENCV_DNN_VERSION_HPP
 +
 +/// Use with major OpenCV version only.
++#define OPENCV_DNN_API_VERSION 20210205
 +
 +#if !defined CV_DOXYGEN && !defined CV_STATIC_ANALYSIS && !defined CV_DNN_DONT_ADD_INLINE_NS
 +#define CV__DNN_INLINE_NS __CV_CAT(dnn4_v, OPENCV_DNN_API_VERSION)
 +#define CV__DNN_INLINE_NS_BEGIN namespace CV__DNN_INLINE_NS {
 +#define CV__DNN_INLINE_NS_END }
 +namespace cv { namespace dnn { namespace CV__DNN_INLINE_NS { } using namespace CV__DNN_INLINE_NS; }}
 +#else
 +#define CV__DNN_INLINE_NS_BEGIN
 +#define CV__DNN_INLINE_NS_END
 +#endif
 +
 +#endif  // OPENCV_DNN_VERSION_HPP
Simple merge
@@@ -286,32 -277,6 +286,32 @@@ public
          }
      }
  
-         auto concat_axis = clamp(axis, input_wrapper->getRank());
 +#ifdef HAVE_CUDA
 +    Ptr<BackendNode> initCUDA(
 +        void *context_,
 +        const std::vector<Ptr<BackendWrapper>>& inputs,
 +        const std::vector<Ptr<BackendWrapper>>& outputs
 +    ) override
 +    {
 +        auto context = reinterpret_cast<csl::CSLContext*>(context_);
 +
 +        auto input_wrapper = inputs[0].dynamicCast<CUDABackendWrapper>();
-         int cAxis = clamp(axis, in.dimNum());
++        auto concat_axis = normalize_axis(axis, input_wrapper->getRank());
 +        return make_cuda_node<cuda4dnn::ConcatOp>(preferableTarget, std::move(context->stream), concat_axis, padding);
 +    }
 +#endif
 +
 +    virtual Ptr<BackendNode> initVkCom(const std::vector<Ptr<BackendWrapper> > &input) CV_OVERRIDE
 +    {
 +#ifdef HAVE_VULKAN
 +        vkcom::Tensor in = VkComTensor(input[0]);
++        int cAxis = normalize_axis(axis, in.dimNum());
 +        std::shared_ptr<vkcom::OpBase> op(new vkcom::OpConcat(cAxis));
 +        return Ptr<BackendNode>(new VkComBackendNode(input, op));
 +#endif // HAVE_VULKAN
 +        return Ptr<BackendNode>();
 +    }
 +
      virtual Ptr<BackendNode> initHalide(const std::vector<Ptr<BackendWrapper> > &input) CV_OVERRIDE
      {
  #ifdef HAVE_HALIDE
@@@ -514,24 -510,6 +514,24 @@@ public
          }
      }
  
-         auto flatten_start_axis = clamp(axis, input_wrapper->getRank());
 +#ifdef HAVE_CUDA
 +    Ptr<BackendNode> initCUDA(
 +        void *context_,
 +        const std::vector<Ptr<BackendWrapper>>& inputs,
 +        const std::vector<Ptr<BackendWrapper>>& outputs
 +    ) override
 +    {
 +        auto context = reinterpret_cast<csl::CSLContext*>(context_);
 +
 +        auto input_wrapper = inputs[0].dynamicCast<CUDABackendWrapper>();
 +
++        auto flatten_start_axis = normalize_axis(axis, input_wrapper->getRank());
 +
 +        auto biasMat_ = bias ? biasMat : Mat();
 +        return make_cuda_node<cuda4dnn::InnerProductOp>(preferableTarget, std::move(context->stream), std::move(context->cublas_handle), flatten_start_axis, weightsMat, biasMat_);
 +    }
 +#endif
 +
      virtual Ptr<BackendNode> initHalide(const std::vector<Ptr<BackendWrapper> > &inputs) CV_OVERRIDE
      {
  #ifdef HAVE_HALIDE
@@@ -360,35 -350,6 +360,35 @@@ public
      }
  #endif  // HAVE_DNN_NGRAPH
  
-         config.axis_start = clamp(startAxis, input_shape.size());
-         config.axis_end = clamp(endAxis, input_shape.size()) + 1; /* +1 because NormalizeOp follows [start, end) convention */
 +
 +#ifdef HAVE_CUDA
 +    Ptr<BackendNode> initCUDA(
 +        void *context_,
 +        const std::vector<Ptr<BackendWrapper>>& inputs,
 +        const std::vector<Ptr<BackendWrapper>>& outputs
 +    ) override
 +    {
 +        auto context = reinterpret_cast<csl::CSLContext*>(context_);
 +
 +        if(pnorm != 1 && pnorm != 2)
 +            CV_Error(Error::StsNotImplemented, "Unsupported normalization mode");
 +
 +        auto input_wrapper = inputs[0].dynamicCast<CUDABackendWrapper>();
 +        auto input_shape = input_wrapper->getShape();
 +
 +        NormalizeConfiguration<float> config;
 +        config.input_shape.assign(std::begin(input_shape), std::end(input_shape));
++        config.axis_start = normalize_axis(startAxis, input_shape.size());
++        config.axis_end = normalize_axis(endAxis, input_shape.size()) + 1; /* +1 because NormalizeOp follows [start, end) convention */
 +        config.norm = pnorm;
 +        config.eps = epsilon;
 +
 +        const auto& weightsMat = blobs.empty() ? Mat() : blobs[0];
 +        return make_cuda_node<cuda4dnn::NormalizeOp>(preferableTarget, std::move(context->stream), weightsMat, config);
 +    }
 +#endif
 +
 +
  private:
      int startAxis, endAxis;
  };
@@@ -296,33 -287,6 +296,33 @@@ public
          }
      }
  
-         auto channel_axis = clamp(axisRaw, input_wrapper->getRank());
 +#ifdef HAVE_CUDA
 +    Ptr<BackendNode> initCUDA(
 +        void *context_,
 +        const std::vector<Ptr<BackendWrapper>>& inputs,
 +        const std::vector<Ptr<BackendWrapper>>& outputs
 +    ) override
 +    {
 +        auto context = reinterpret_cast<csl::CSLContext*>(context_);
 +
 +        auto input_wrapper = inputs[0].dynamicCast<CUDABackendWrapper>();
-         int cAxis = clamp(axisRaw, in.dimNum());
++        auto channel_axis = normalize_axis(axisRaw, input_wrapper->getRank());
 +        return make_cuda_node<cuda4dnn::SoftmaxOp>(preferableTarget, std::move(context->cudnn_handle), channel_axis, logSoftMax);
 +    }
 +#endif
 +
 +    virtual Ptr<BackendNode> initVkCom(const std::vector<Ptr<BackendWrapper> > &inputs) CV_OVERRIDE
 +    {
 +#ifdef HAVE_VULKAN
 +        vkcom::Tensor in = VkComTensor(inputs[0]);
++        int cAxis = normalize_axis(axisRaw, in.dimNum());
 +        std::shared_ptr<vkcom::OpBase> op(new vkcom::OpSoftmax(cAxis, logSoftMax));
 +        return Ptr<BackendNode>(new VkComBackendNode(inputs, op));
 +#endif  // HAVE_VULKAN
 +        return Ptr<BackendNode>();
 +    }
 +
 +
      virtual Ptr<BackendNode> initHalide(const std::vector<Ptr<BackendWrapper> > &inputs) CV_OVERRIDE
      {
  #ifdef HAVE_HALIDE
Simple merge
Simple merge
Simple merge
Simple merge
@@@ -75,12 -69,12 +75,12 @@@ TEST(SurfFeaturesFinder, CanFindInROIs
              bad_count++;
      }
  
 -    ASSERT_GT(tl_rect_count, 0);
 -    ASSERT_GT(br_rect_count, 0);
 -    ASSERT_EQ(bad_count, 0);
 +    EXPECT_GT(tl_rect_count, 0);
 +    EXPECT_GT(br_rect_count, 0);
 +    EXPECT_EQ(bad_count, 0);
  }
  
- #endif // HAVE_OPENCV_XFEATURES2D
+ #endif // HAVE_OPENCV_XFEATURES2D && OPENCV_ENABLE_NONFREE
  
  TEST(ParallelFeaturesFinder, IsSameWithSerial)
  {