Merge remote-tracking branch 'upstream/3.4' into merge-3.4
authorAlexander Alekhin <alexander.alekhin@intel.com>
Thu, 12 Dec 2019 10:02:19 +0000 (13:02 +0300)
committerAlexander Alekhin <alexander.alekhin@intel.com>
Thu, 12 Dec 2019 10:02:19 +0000 (13:02 +0300)
23 files changed:
1  2 
modules/core/include/opencv2/core/cvdef.h
modules/core/src/parallel_impl.cpp
modules/core/src/system.cpp
modules/dnn/CMakeLists.txt
modules/dnn/include/opencv2/dnn/all_layers.hpp
modules/dnn/misc/python/test/test_dnn.py
modules/dnn/src/dnn.cpp
modules/dnn/src/layers/convolution_layer.cpp
modules/dnn/src/layers/detection_output_layer.cpp
modules/dnn/src/layers/elementwise_layers.cpp
modules/dnn/src/layers/eltwise_layer.cpp
modules/dnn/src/layers/prior_box_layer.cpp
modules/dnn/src/layers/slice_layer.cpp
modules/dnn/src/op_inf_engine.cpp
modules/dnn/test/test_layers.cpp
modules/imgproc/include/opencv2/imgproc.hpp
modules/imgproc/src/histogram.cpp
modules/imgproc/src/thresh.cpp
modules/imgproc/test/test_approxpoly.cpp
modules/imgproc/test/test_histograms.cpp
modules/python/package/setup.py
modules/ts/src/ts_func.cpp
samples/_winpack_run_python_sample.cmd

Simple merge
Simple merge
Simple merge
Simple merge
@@@ -658,8 -579,7 +658,8 @@@ struct SwishFuncto
      bool supportBackend(int backendId, int)
      {
          return backendId == DNN_BACKEND_OPENCV ||
-                backendId == DNN_BACKEND_HALIDE;
 +               backendId == DNN_BACKEND_CUDA ||
+                backendId == DNN_BACKEND_HALIDE || backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH;;
      }
  
      void apply(const float* srcptr, float* dstptr, int len, size_t planeSize, int cn0, int cn1) const
@@@ -754,8 -660,7 +755,8 @@@ struct MishFuncto
      bool supportBackend(int backendId, int)
      {
          return backendId == DNN_BACKEND_OPENCV ||
-                backendId == DNN_BACKEND_HALIDE;
 +               backendId == DNN_BACKEND_CUDA ||
+                backendId == DNN_BACKEND_HALIDE || backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH;
      }
  
      void apply(const float* srcptr, float* dstptr, int len, size_t planeSize, int cn0, int cn1) const
@@@ -108,10 -150,9 +156,10 @@@ public
      virtual bool supportBackend(int backendId) CV_OVERRIDE
      {
          return backendId == DNN_BACKEND_OPENCV ||
 -               backendId == DNN_BACKEND_HALIDE ||
 +               backendId == DNN_BACKEND_CUDA ||
 +               (backendId == DNN_BACKEND_HALIDE && op != DIV) ||  // TODO: not implemented, see PR #15811
                 ((((backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && (preferableTarget != DNN_TARGET_OPENCL || coeffs.empty()))
-                 || backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) && !variableChannels));
+                 || backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) && channelsMode == ELTWISE_CHANNNELS_SAME));
      }
  
      bool getMemoryShapes(const std::vector<MatShape> &inputs,
  
          CV_Assert(outputs.size() == 1);
          const int nstripes = getNumThreads();
-         EltwiseInvoker::run(&inputs[0], (int)inputs.size(), outputs[0],
-                             coeffs, op, activ.get(), nstripes);
+         EltwiseInvoker::run(*this,
+                             &inputs[0], (int)inputs.size(), outputs[0],
+                             nstripes);
      }
  
 +#ifdef HAVE_CUDA
 +    Ptr<BackendNode> initCUDA(
 +        void *context_,
 +        const std::vector<Ptr<BackendWrapper>>& inputs,
 +        const std::vector<Ptr<BackendWrapper>>& outputs
 +    ) override
 +    {
 +        auto context = reinterpret_cast<csl::CSLContext*>(context_);
 +
 +        auto op_ = [this] {
 +            switch (op) {
 +            case MAX: return cuda4dnn::EltwiseOpType::MAX;
 +            case SUM: return cuda4dnn::EltwiseOpType::SUM;
 +            case PROD: return cuda4dnn::EltwiseOpType::PRODUCT;
 +            case DIV: return cuda4dnn::EltwiseOpType::DIV;
 +            }
 +            return cuda4dnn::EltwiseOpType::SUM;
 +        }();
 +
 +        return make_cuda_node<cuda4dnn::EltwiseOp>(preferableTarget, std::move(context->stream), op_, coeffs);
 +    }
 +#endif
 +
      virtual Ptr<BackendNode> initHalide(const std::vector<Ptr<BackendWrapper> > &input) CV_OVERRIDE
      {
  #ifdef HAVE_HALIDE
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge