Merge remote-tracking branch 'upstream/3.4' into merge-3.4
authorAlexander Alekhin <alexander.alekhin@intel.com>
Wed, 26 Feb 2020 17:09:03 +0000 (20:09 +0300)
committerAlexander Alekhin <alexander.alekhin@intel.com>
Wed, 26 Feb 2020 17:09:03 +0000 (20:09 +0300)
25 files changed:
1  2 
doc/tutorials/videoio/video-input-psnr-ssim/video_input_psnr_ssim.markdown
modules/core/include/opencv2/core/matx.hpp
modules/core/src/check.cpp
modules/core/src/matrix_wrap.cpp
modules/core/src/ocl.cpp
modules/core/src/parallel.cpp
modules/core/test/test_mat.cpp
modules/dnn/include/opencv2/dnn/dnn.hpp
modules/dnn/include/opencv2/dnn/shape_utils.hpp
modules/dnn/src/caffe/caffe_importer.cpp
modules/dnn/src/dnn.cpp
modules/dnn/src/layers/const_layer.cpp
modules/dnn/test/test_backends.cpp
modules/dnn/test/test_caffe_importer.cpp
modules/dnn/test/test_darknet_importer.cpp
modules/dnn/test/test_misc.cpp
modules/dnn/test/test_onnx_importer.cpp
modules/dnn/test/test_tf_importer.cpp
modules/dnn/test/test_torch_importer.cpp
modules/features2d/src/fast.cpp
modules/imgproc/misc/java/test/ImgprocTest.java
modules/imgproc/src/pyramids.cpp
modules/imgproc/test/test_convhull.cpp
modules/imgproc/test/test_histograms.cpp
platforms/scripts/valgrind.supp

Simple merge
Simple merge
@@@ -4698,13 -4702,14 +4698,15 @@@ public
          {
              CV_Assert(u->origdata != 0);
              Context& ctx = Context::getDefault();
 -            int createFlags = 0, flags0 = 0;
 +            int createFlags = 0;
 +            UMatData::MemoryFlag flags0 = static_cast<UMatData::MemoryFlag>(0);
              getBestFlags(ctx, accessFlags, usageFlags, createFlags, flags0);
  
+             bool copyOnMap = (flags0 & UMatData::COPY_ON_MAP) != 0;
              cl_context ctx_handle = (cl_context)ctx.ptr();
              int allocatorFlags = 0;
 -            int tempUMatFlags = 0;
 +            UMatData::MemoryFlag tempUMatFlags = static_cast<UMatData::MemoryFlag>(0);
              void* handle = NULL;
              cl_int retval = CL_SUCCESS;
  
              u->handle = handle;
              u->prevAllocator = u->currAllocator;
              u->currAllocator = this;
-             u->flags |= tempUMatFlags;
+             u->flags |= tempUMatFlags | flags0;
              u->allocatorFlags_ = allocatorFlags;
          }
 -        if(accessFlags & ACCESS_WRITE)
 +        if (!!(accessFlags & ACCESS_WRITE))
              u->markHostCopyObsolete(true);
          opencl_allocator_stats.onAllocate(u->size);
          return true;
Simple merge
Simple merge
Simple merge
@@@ -7,8 -7,8 +7,9 @@@
  
  #include "../precomp.hpp"
  #include "../op_inf_engine.hpp"
 +#include "../op_cuda.hpp"
  #include "layers_common.hpp"
+ #include "../ie_ngraph.hpp"
  
  #ifdef HAVE_OPENCL
  #include "opencl_kernels_dnn.hpp"
@@@ -34,7 -29,7 +35,8 @@@ public
      {
          return backendId == DNN_BACKEND_OPENCV ||
                 backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 ||
 -               backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH;
++               backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH ||
 +               backendId == DNN_BACKEND_CUDA;
      }
  
      virtual bool getMemoryShapes(const std::vector<MatShape> &inputs,
      }
  #endif  // HAVE_INF_ENGINE
  
 -
+ #ifdef HAVE_DNN_NGRAPH
+     virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> >& inputs,
+                                         const std::vector<Ptr<BackendNode> >& nodes) CV_OVERRIDE
+     {
+         auto node = std::make_shared<ngraph::op::Constant>(ngraph::element::f32,
+                                                            getShape<size_t>(blobs[0]),
+                                                            blobs[0].data);
+         return Ptr<BackendNode>(new InfEngineNgraphNode(node));
+     }
+ #endif  // HAVE_INF_ENGINE
++
 +#ifdef HAVE_CUDA
 +    Ptr<BackendNode> initCUDA(
 +        void *context_,
 +        const std::vector<Ptr<BackendWrapper>>& inputs,
 +        const std::vector<Ptr<BackendWrapper>>& outputs
 +    ) override
 +    {
 +        auto context = reinterpret_cast<csl::CSLContext*>(context_);
 +
 +        CV_Assert(blobs.size() == 1);
 +        return make_cuda_node<cuda4dnn::ConstOp>(preferableTarget, std::move(context->stream), blobs[0]);
 +    }
 +#endif
 +
  };
  
  Ptr<Layer> ConstLayer::create(const LayerParams& params)
@@@ -260,19 -234,10 +260,19 @@@ TEST_P(DNNTestNetwork, MobileNet_SSD_v1
  
      Mat sample = imread(findDataFile("dnn/street.png"));
      Mat inp = blobFromImage(sample, 1.0f, Size(300, 560), Scalar(), false);
 -    float l1 = (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD) ? 0.013 : 0.0;
 -    float lInf = (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD) ? 0.06 : 0.0;
 +    float scoreDiff = 0.0, iouDiff = 0.0;
 +    if (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD)
 +    {
-         scoreDiff = 0.012;
++        scoreDiff = 0.013;
 +        iouDiff = 0.06;
 +    }
 +    else if (target == DNN_TARGET_CUDA_FP16)
 +    {
 +        scoreDiff = 0.007;
 +        iouDiff = 0.06;
 +    }
      processNet("dnn/ssd_mobilenet_v1_coco_2017_11_17.pb", "dnn/ssd_mobilenet_v1_coco_2017_11_17.pbtxt",
 -               inp, "detection_out", "", l1, lInf);
 +               inp, "detection_out", "", scoreDiff, iouDiff);
      expectNoFallbacksFromIE(net);
  }
  
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge