From: Alexander Alekhin Date: Fri, 14 Feb 2020 16:25:45 +0000 (+0300) Subject: Merge remote-tracking branch 'upstream/3.4' into merge-3.4 X-Git-Tag: submit/tizen/20210224.033012~2^2~307 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=f3237fdc6e7eb3a0260e0942af27b9fadc9a11ef;p=platform%2Fupstream%2Fopencv.git Merge remote-tracking branch 'upstream/3.4' into merge-3.4 --- f3237fdc6e7eb3a0260e0942af27b9fadc9a11ef diff --cc modules/dnn/src/dnn.cpp index dfac30e,be3ce43..0fba157 --- a/modules/dnn/src/dnn.cpp +++ b/modules/dnn/src/dnn.cpp @@@ -62,12 -58,12 +62,14 @@@ #include #include +#include + namespace cv { namespace dnn { -CV__DNN_EXPERIMENTAL_NS_BEGIN +CV__DNN_INLINE_NS_BEGIN + static size_t DNN_NETWORK_DUMP = utils::getConfigurationParameterSizeT("OPENCV_DNN_NETWORK_DUMP", 0); + // this option is useful to run valgrind memory errors detection static bool DNN_DISABLE_MEMORY_OPTIMIZATIONS = utils::getConfigurationParameterBool("OPENCV_DNN_DISABLE_MEMORY_OPTIMIZATIONS", false); @@@ -3832,37 -3633,43 +3893,45 @@@ string Net::Impl::dump( prevNode = itBackend->second; } } - String colors[] = {"#ffffb3", "#fccde5", "#8dd3c7", "#bebada", "#80b1d3", "#fdb462", "#ff4848"}; - String backend; - switch (prefBackend) { - string colors[] = {"#ffffb3", "#fccde5", "#8dd3c7", "#bebada", "#80b1d3", "#fdb462"}; ++ string colors[] = {"#ffffb3", "#fccde5", "#8dd3c7", "#bebada", "#80b1d3", "#fdb462", "#ff4848", "#b35151"}; + string backend; + switch (prefBackend) + { case DNN_BACKEND_DEFAULT: backend = "DEFAULT/"; break; case DNN_BACKEND_HALIDE: backend = "HALIDE/"; break; case DNN_BACKEND_INFERENCE_ENGINE: // fallthru case DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019: backend = "DLIE/"; break; case DNN_BACKEND_INFERENCE_ENGINE_NGRAPH: backend = "NGRAPH/"; break; case DNN_BACKEND_OPENCV: backend = "OCV/"; break; ++ case DNN_BACKEND_VKCOM: backend = "VULKAN/"; break; + case DNN_BACKEND_CUDA: backend = "CUDA/"; break; + // don't use default: } - out << "digraph G {" << '\n'; + out << "digraph G {\n"; // Add nodes - for (std::map::iterator it = map.begin(); it != map.end(); ++it) + for (std::map::const_iterator it = map.begin(); it != map.end(); ++it) { - String name = it->second.params.name; - if (allLayers[it->first] == -1 && !name.empty()) { - out << " " << "\"" << name << "\"" << " [label=\""; - skipId.clear(); - skipId.push_back(it->first); + const LayerData& ld = it->second; + string name = ld.params.name; + std::vector clusterIds(1, it->first); + if (allLayers[it->first] == -1 && !name.empty()) + { + out << "\t\"" << name << "\" [label=\""; } else if (name.empty() || it->first != skippedLayers[allLayers[it->first]][0]) + { continue; - else { // first node in cluster : it->first == skippedLayers[allLayers[it->first]][0] + } + else // first node in cluster : it->first == skippedLayers[allLayers[it->first]][0] + { int cluster = allLayers[it->first]; - out << " " << "\"" << "cluster_" << cluster << "\"" << " [label=\"{"; - skipId = skippedLayers[allLayers[it->first]]; // vertices in current cluster + out << "\t\"" << "cluster_" << cluster << "\" [label=\"{"; + clusterIds = skippedLayers[allLayers[it->first]]; // vertices in current cluster } - for (int i = 0; i < skipId.size(); i++) + for (int i = 0; i < clusterIds.size(); i++) { - LayerParams& lp = map[skipId[i]].params; + CV_DbgAssert(map.find(clusterIds[i]) != map.end()); + const LayerParams& lp = map.find(clusterIds[i])->second.params; if (!lp.name.empty()) { if (i > 0) { out << " | "; @@@ -3895,56 -3709,81 +3971,84 @@@ if (lp.has("pad")) { DictValue pad = lp.get("pad"); out << "pad "; - switch (pad.size()) { - case 1: out << ": " << pad << "\\l"; break; - case 2: out << "(HxW): (" << pad.get(0) << " x " << pad.get(1) << ")" << "\\l"; break; - case 4: out << "(HxW): (" << pad.get(0) << ", " << pad.get(2) << ") x (" << pad.get(1) << ", " << pad.get(3) << ")" << "\\l"; break; - case 6: out << "(DxHxW): (" << pad.get(0) << ", " << pad.get(3) << ") x (" << pad.get(1) << ", " << pad.get(4) - << ") x (" << pad.get(2) << ", " << pad.get(5) << ")" << "\\l"; break; + switch (pad.size()) + { + case 1: out << ": " << pad; break; + case 2: + out << "(HxW): (" << pad.get(0) << " x " << pad.get(1) << ")"; + break; + case 4: + out << "(HxW): (" << pad.get(0) << ", " << pad.get(2) + << ") x (" << pad.get(1) << ", " << pad.get(3) << ")"; + break; + case 6: + out << "(DxHxW): (" << pad.get(0) << ", " << pad.get(3) + << ") x (" << pad.get(1) << ", " << pad.get(4) + << ") x (" << pad.get(2) << ", " << pad.get(5) << ")"; + break; default: CV_Error(Error::StsNotImplemented, format("Unsupported pad size = %d", pad.size())); } - } else if (lp.has("pad_l") && lp.has("pad_t") && lp.has("pad_r") && lp.has("pad_b")) { - DictValue l = lp.get("pad_l"); - DictValue t = lp.get("pad_t"); - DictValue r = lp.get("pad_r"); - DictValue b = lp.get("pad_b"); - out << "pad (HxW): (" << t << ", " << b << ") x (" << l << ", " << r << ")" << "\\l"; - } - else if (lp.has("pooled_w") || lp.has("pooled_h")) { - DictValue h = lp.get("pooled_h"); - DictValue w = lp.get("pooled_w"); - out << "pad (HxW): " << h << " x " << w << "\\l"; - } - if (lp.has("pool")) { - out << "pool: " << lp.get("pool") << "\\l"; - } - if (lp.has("global_pooling")) { - out << "global_pooling: " << lp.get("global_pooling") << "\\l"; - } - if (lp.has("group")) { - out << "group: " << lp.get("group") << "\\l"; - } - } - } - if (!it->second.outputBlobs.empty()) - out << "output: " << it->second.outputBlobs[0].size << "\\l"; - - Ptr layerBackend = it->second.backendNodes[prefBackend]; - out << (!layerBackend.empty() ? backend : "OCV/"); - int colorId = 0; - switch (it->second.layerInstance->preferableTarget) { - case DNN_TARGET_CPU: out << "CPU\\n"; colorId = layerBackend.empty() ? 0 : 5; break; - case DNN_TARGET_OPENCL: out << "OCL\\n"; colorId = 1; break; - case DNN_TARGET_OPENCL_FP16: out << "OCL_FP16\\n"; colorId = 2; break; - case DNN_TARGET_MYRIAD: out << "MYRIAD\\n"; colorId = 3; break; - case DNN_TARGET_FPGA: out << "FPGA\\n"; colorId = 4; break; - case DNN_TARGET_CUDA: out << "CUDA\\n"; colorId = 5; break; - case DNN_TARGET_CUDA_FP16: out << "CUDA_FP16\\n"; colorId = 6; break; - } - out << ((skipId.size() == 1)? "\" " : " }\" "); - out << "fillcolor=\"" << colors[colorId] << "\" "; - out << "style=filled "; - out << "shape=" << ((skipId.size() == 1)? "box" : "record") << "]" << '\n'; + out << "\\l"; // align left + } else if (lp.has("pad_l") && lp.has("pad_t") && lp.has("pad_r") && lp.has("pad_b")) { + DictValue l = lp.get("pad_l"); + DictValue t = lp.get("pad_t"); + DictValue r = lp.get("pad_r"); + DictValue b = lp.get("pad_b"); + out << "pad (HxW): (" << t << ", " << b << ") x (" << l << ", " << r << ")"; + out << "\\l"; // align left + } + else if (lp.has("pooled_w") || lp.has("pooled_h")) { + DictValue h = lp.get("pooled_h"); + DictValue w = lp.get("pooled_w"); + out << "pad pooled (HxW): " << h << " x " << w; + out << "\\l"; // align left + } + if (lp.has("pool")) { + out << "pool: " << lp.get("pool"); + out << "\\l"; // align left + } + if (lp.has("global_pooling")) { + out << "global_pooling: " << lp.get("global_pooling"); + out << "\\l"; // align left + } + if (lp.has("group")) { + out << "group: " << lp.get("group"); + out << "\\l"; // align left + } + } + } + if (!ld.outputBlobs.empty()) + { + out << "output: " << ld.outputBlobs[0].size; + out << "\\l"; // align left + } + + Ptr layerBackend; + std::map >::const_iterator ibn = ld.backendNodes.find(prefBackend); + if (ibn != ld.backendNodes.end()) + layerBackend = ibn->second; + out << (!layerBackend.empty() ? backend : "OCV/"); + int colorId = 0; + const Target target = ld.layerInstance.empty() + ? DNN_TARGET_CPU + : (Target)(ld.layerInstance->preferableTarget); // TODO fix preferableTarget type + switch (target) + { + case DNN_TARGET_CPU: out << "CPU"; colorId = layerBackend.empty() ? 0 : 5; break; + case DNN_TARGET_OPENCL: out << "OCL"; colorId = 1; break; + case DNN_TARGET_OPENCL_FP16: out << "OCL_FP16"; colorId = 2; break; + case DNN_TARGET_MYRIAD: out << "MYRIAD"; colorId = 3; break; ++ case DNN_TARGET_VULKAN: out << "VULKAN"; colorId = 7; break; + case DNN_TARGET_FPGA: out << "FPGA"; colorId = 4; break; ++ case DNN_TARGET_CUDA: out << "CUDA"; colorId = 5; break; ++ case DNN_TARGET_CUDA_FP16: out << "CUDA_FP16"; colorId = 6; break; + // don't use default: + } + out << "\\n"; // align center + out << ((clusterIds.size() == 1)? "\" " : " }\" "); + out << "fillcolor=\"" << colors[colorId] << "\" "; + out << "style=filled "; + out << "shape=" << ((clusterIds.size() == 1)? "box" : "record") << "]\n"; } out << '\n'; // Add edges diff --cc modules/dnn/src/layers/elementwise_layers.cpp index fa34993,42b2778..34b8e21 --- a/modules/dnn/src/layers/elementwise_layers.cpp +++ b/modules/dnn/src/layers/elementwise_layers.cpp @@@ -413,18 -389,6 +426,14 @@@ struct ReLUFunctor : public BaseFuncto } #endif // HAVE_DNN_NGRAPH +#ifdef HAVE_VULKAN + std::shared_ptr initVkCom() + { + std::shared_ptr op(new vkcom::OpReLU(slope)); + return op; + } +#endif // HAVE_VULKAN + - bool tryFuse(Ptr&) { return false; } - - void getScaleShift(Mat&, Mat&) const {} - int64 getFLOPSPerElement() const { return 1; } }; @@@ -540,18 -495,6 +549,14 @@@ struct ReLU6Functor : public BaseFuncto } #endif // HAVE_DNN_NGRAPH +#ifdef HAVE_VULKAN + std::shared_ptr initVkCom() + { + // TODO: add vkcom implementation + return std::shared_ptr(); + } +#endif // HAVE_VULKAN + - bool tryFuse(Ptr&) { return false; } - - void getScaleShift(Mat&, Mat&) const {} - int64 getFLOPSPerElement() const { return 2; } }; @@@ -636,18 -570,6 +641,14 @@@ struct TanHFunctor : public BaseFuncto } #endif // HAVE_DNN_NGRAPH +#ifdef HAVE_VULKAN + std::shared_ptr initVkCom() + { + // TODO: add vkcom implementation + return std::shared_ptr(); + } +#endif // HAVE_VULKAN + - bool tryFuse(Ptr&) { return false; } - - void getScaleShift(Mat&, Mat&) const {} - int64 getFLOPSPerElement() const { return 1; } }; @@@ -732,23 -646,10 +733,18 @@@ struct SwishFunctor : public BaseFuncto } #endif // HAVE_DNN_NGRAPH +#ifdef HAVE_VULKAN + std::shared_ptr initVkCom() + { + // TODO: add vkcom implementation + return std::shared_ptr(); + } +#endif // HAVE_VULKAN + - bool tryFuse(Ptr&) { return false; } - - void getScaleShift(Mat&, Mat&) const {} - int64 getFLOPSPerElement() const { return 3; } - }; - struct MishFunctor + struct MishFunctor : public BaseFunctor { typedef MishLayer Layer; @@@ -834,23 -727,10 +830,18 @@@ } #endif // HAVE_DNN_NGRAPH +#ifdef HAVE_VULKAN + std::shared_ptr initVkCom() + { + // TODO: add vkcom implementation + return std::shared_ptr(); + } +#endif // HAVE_VULKAN + - bool tryFuse(Ptr&) { return false; } - - void getScaleShift(Mat&, Mat&) const {} - int64 getFLOPSPerElement() const { return 3; } - }; - struct SigmoidFunctor + struct SigmoidFunctor : public BaseFunctor { typedef SigmoidLayer Layer; @@@ -931,18 -802,6 +922,14 @@@ } #endif // HAVE_DNN_NGRAPH +#ifdef HAVE_VULKAN + std::shared_ptr initVkCom() + { + // TODO: add vkcom implementation + return std::shared_ptr(); + } +#endif // HAVE_VULKAN + - bool tryFuse(Ptr&) { return false; } - - void getScaleShift(Mat&, Mat&) const {} - int64 getFLOPSPerElement() const { return 3; } }; @@@ -950,14 -809,10 +937,12 @@@ struct ELUFunctor : public BaseFuncto { typedef ELULayer Layer; - explicit ELUFunctor() {} - bool supportBackend(int backendId, int) { - return backendId == DNN_BACKEND_OPENCV || backendId == DNN_BACKEND_HALIDE || - backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 || backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH; + return backendId == DNN_BACKEND_OPENCV || + backendId == DNN_BACKEND_CUDA || + backendId == DNN_BACKEND_HALIDE || + backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 || backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH; } void apply(const float* srcptr, float* dstptr, int len, size_t planeSize, int cn0, int cn1) const @@@ -1029,18 -877,6 +1014,14 @@@ } #endif // HAVE_DNN_NGRAPH +#ifdef HAVE_VULKAN + std::shared_ptr initVkCom() + { + // TODO: add vkcom implementation + return std::shared_ptr(); + } +#endif // HAVE_VULKAN + - bool tryFuse(Ptr&) { return false; } - - void getScaleShift(Mat&, Mat&) const {} - int64 getFLOPSPerElement() const { return 2; } }; @@@ -1131,18 -958,6 +1112,14 @@@ struct AbsValFunctor : public BaseFunct } #endif // HAVE_DNN_NGRAPH +#ifdef HAVE_VULKAN + std::shared_ptr initVkCom() + { + // TODO: add vkcom implementation + return std::shared_ptr(); + } +#endif // HAVE_VULKAN + - bool tryFuse(Ptr&) { return false; } - - void getScaleShift(Mat&, Mat&) const {} - int64 getFLOPSPerElement() const { return 1; } }; @@@ -1228,18 -1034,6 +1205,14 @@@ struct BNLLFunctor : public BaseFuncto } #endif // HAVE_DNN_NGRAPH +#ifdef HAVE_VULKAN + std::shared_ptr initVkCom() + { + // TODO: add vkcom implementation + return std::shared_ptr(); + } +#endif // HAVE_VULKAN + - bool tryFuse(Ptr&) { return false; } - - void getScaleShift(Mat&, Mat&) const {} - int64 getFLOPSPerElement() const { return 5; } }; @@@ -1258,14 -1052,17 +1231,21 @@@ struct PowerFunctor : public BaseFuncto { if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019) return (targetId != DNN_TARGET_OPENCL && targetId != DNN_TARGET_OPENCL_FP16) || power == 1.0 || power == 0.5; + if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) + return true; else - return backendId == DNN_BACKEND_OPENCV || backendId == DNN_BACKEND_HALIDE || backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH; + return backendId == DNN_BACKEND_OPENCV || + backendId == DNN_BACKEND_CUDA || + backendId == DNN_BACKEND_HALIDE; } + void finalize() + { + power = originPower; + scale = originScale; + shift = originShift; + } + void apply(const float* srcptr, float* dstptr, int len, size_t planeSize, int cn0, int cn1) const { float a = scale, b = shift, p = power; @@@ -1537,18 -1309,6 +1516,14 @@@ struct ChannelsPReLUFunctor : public Ba } #endif // HAVE_DNN_NGRAPH +#ifdef HAVE_VULKAN + std::shared_ptr initVkCom() + { + // TODO: add vkcom implementation + return std::shared_ptr(); + } +#endif // HAVE_VULKAN + - bool tryFuse(Ptr&) { return false; } - - void getScaleShift(Mat&, Mat&) const {} - int64 getFLOPSPerElement() const { return 1; } };