"0: automatically (by default), "
"1: Halide language (http://halide-lang.org/), "
"2: Intel's Deep Learning Inference Engine (https://software.intel.com/openvino-toolkit), "
- "3: OpenCV implementation }"
+ "3: OpenCV implementation, "
+ "4: VKCOM, "
+ "5: CUDA },"
"{ target | 0 | Choose one of target computation devices: "
"0: CPU target (by default), "
"1: OpenCL, "
"2: OpenCL fp16 (half-float precision), "
- "3: VPU }";
+ "3: VPU, "
+ "4: Vulkan, "
+ "6: CUDA, "
+ "7: CUDA fp16 (half-float preprocess) }";
using namespace cv;
using namespace dnn;
def get_args_parser(func_args):
backends = (cv.dnn.DNN_BACKEND_DEFAULT, cv.dnn.DNN_BACKEND_HALIDE, cv.dnn.DNN_BACKEND_INFERENCE_ENGINE,
- cv.dnn.DNN_BACKEND_OPENCV)
+ cv.dnn.DNN_BACKEND_OPENCV, cv.dnn.DNN_BACKEND_VKCOM, cv.dnn.DNN_BACKEND_CUDA)
targets = (cv.dnn.DNN_TARGET_CPU, cv.dnn.DNN_TARGET_OPENCL, cv.dnn.DNN_TARGET_OPENCL_FP16, cv.dnn.DNN_TARGET_MYRIAD,
- cv.dnn.DNN_TARGET_HDDL)
+ cv.dnn.DNN_TARGET_HDDL, cv.dnn.DNN_TARGET_VULKAN, cv.dnn.DNN_TARGET_CUDA, cv.dnn.DNN_TARGET_CUDA_FP16)
parser = argparse.ArgumentParser(add_help=False)
parser.add_argument('--zoo', default=os.path.join(os.path.dirname(os.path.abspath(__file__)), 'models.yml'),
"%d: automatically (by default), "
"%d: Halide language (http://halide-lang.org/), "
"%d: Intel's Deep Learning Inference Engine (https://software.intel.com/openvino-toolkit), "
- "%d: OpenCV implementation" % backends)
+ "%d: OpenCV implementation, "
+ "%d: VKCOM, "
+ "%d: CUDA" % backends)
parser.add_argument('--target', choices=targets, default=cv.dnn.DNN_TARGET_CPU, type=int,
help='Choose one of target computation devices: '
'%d: CPU target (by default), '
'%d: OpenCL, '
'%d: OpenCL fp16 (half-float precision), '
'%d: NCS2 VPU, '
- '%d: HDDL VPU' % targets)
+ '%d: HDDL VPU, '
+ '%d: Vulkan, '
+ '%d: CUDA, '
+ '%d: CUDA fp16 (half-float preprocess)'% targets)
args, _ = parser.parse_known_args()
add_preproc_args(args.zoo, parser, 'classification')
"0: automatically (by default), "
"1: Halide language (http://halide-lang.org/), "
"2: Intel's Deep Learning Inference Engine (https://software.intel.com/openvino-toolkit), "
- "3: OpenCV implementation }"
+ "3: OpenCV implementation, "
+ "4: VKCOM, "
+ "5: CUDA },"
"{ target | 0 | Choose one of target computation devices: "
"0: CPU target (by default), "
"1: OpenCL, "
"2: OpenCL fp16 (half-float precision), "
- "3: VPU }"
+ "3: VPU, "
+ "4: Vulkan, "
+ "6: CUDA, "
+ "7: CUDA fp16 (half-float preprocess) }"
;
static
"0: automatically (by default), "
"1: Halide language (http://halide-lang.org/), "
"2: Intel's Deep Learning Inference Engine (https://software.intel.com/openvino-toolkit), "
- "3: OpenCV implementation }"
+ "3: OpenCV implementation, "
+ "4: VKCOM, "
+ "5: CUDA }"
"{target t | 0 | Choose one of target computation devices: "
"0: CPU target (by default), "
"1: OpenCL, "
"2: OpenCL fp16 (half-float precision), "
- "3: VPU }"
+ "3: VPU, "
+ "4: Vulkan, "
+ "6: CUDA, "
+ "7: CUDA fp16 (half-float preprocess) }"
);
if (argc == 1 || parser.has("help"))
{
import cv2 as cv
-backends = (cv.dnn.DNN_BACKEND_DEFAULT, cv.dnn.DNN_BACKEND_INFERENCE_ENGINE, cv.dnn.DNN_BACKEND_OPENCV)
-targets = (cv.dnn.DNN_TARGET_CPU, cv.dnn.DNN_TARGET_OPENCL, cv.dnn.DNN_TARGET_OPENCL_FP16, cv.dnn.DNN_TARGET_MYRIAD, cv.dnn.DNN_TARGET_HDDL)
+backends = (cv.dnn.DNN_BACKEND_DEFAULT, cv.dnn.DNN_BACKEND_INFERENCE_ENGINE, cv.dnn.DNN_BACKEND_OPENCV,
+ cv.dnn.DNN_BACKEND_VKCOM, cv.dnn.DNN_BACKEND_CUDA)
+targets = (cv.dnn.DNN_TARGET_CPU, cv.dnn.DNN_TARGET_OPENCL, cv.dnn.DNN_TARGET_OPENCL_FP16, cv.dnn.DNN_TARGET_MYRIAD,
+ cv.dnn.DNN_TARGET_HDDL, cv.dnn.DNN_TARGET_VULKAN, cv.dnn.DNN_TARGET_CUDA, cv.dnn.DNN_TARGET_CUDA_FP16)
def preprocess(image):
help="Choose one of computation backends: "
"%d: automatically (by default), "
"%d: Intel's Deep Learning Inference Engine (https://software.intel.com/openvino-toolkit), "
- "%d: OpenCV implementation" % backends)
+ "%d: OpenCV implementation, "
+ "%d: VKCOM, "
+ "%d: CUDA"% backends)
parser.add_argument('--target', choices=targets, default=cv.dnn.DNN_TARGET_CPU, type=int,
help='Choose one of target computation devices: '
'%d: CPU target (by default), '
'%d: OpenCL, '
'%d: OpenCL fp16 (half-float precision), '
'%d: NCS2 VPU, '
- '%d: HDDL VPU' % targets)
+ '%d: HDDL VPU, '
+ '%d: Vulkan, '
+ '%d: CUDA, '
+ '%d: CUDA fp16 (half-float preprocess)' % targets)
args, _ = parser.parse_known_args()
if not os.path.isfile(args.model):
"0: automatically (by default), "
"1: Halide language (http://halide-lang.org/), "
"2: Intel's Deep Learning Inference Engine (https://software.intel.com/openvino-toolkit), "
- "3: OpenCV implementation }"
+ "3: OpenCV implementation, "
+ "4: VKCOM, "
+ "5: CUDA }"
"{ target | 0 | Choose one of target computation devices: "
"0: CPU target (by default), "
"1: OpenCL, "
"2: OpenCL fp16 (half-float precision), "
- "3: VPU }"
+ "3: VPU, "
+ "4: Vulkan, "
+ "6: CUDA, "
+ "7: CUDA fp16 (half-float preprocess) }"
"{ async | 0 | Number of asynchronous forwards at the same time. "
"Choose 0 for synchronous mode }";
from tf_text_graph_ssd import createSSDGraph
from tf_text_graph_faster_rcnn import createFasterRCNNGraph
-backends = (cv.dnn.DNN_BACKEND_DEFAULT, cv.dnn.DNN_BACKEND_HALIDE, cv.dnn.DNN_BACKEND_INFERENCE_ENGINE, cv.dnn.DNN_BACKEND_OPENCV)
-targets = (cv.dnn.DNN_TARGET_CPU, cv.dnn.DNN_TARGET_OPENCL, cv.dnn.DNN_TARGET_OPENCL_FP16, cv.dnn.DNN_TARGET_MYRIAD, cv.dnn.DNN_TARGET_HDDL)
+backends = (cv.dnn.DNN_BACKEND_DEFAULT, cv.dnn.DNN_BACKEND_HALIDE, cv.dnn.DNN_BACKEND_INFERENCE_ENGINE, cv.dnn.DNN_BACKEND_OPENCV,
+ cv.dnn.DNN_BACKEND_VKCOM, cv.dnn.DNN_BACKEND_CUDA)
+targets = (cv.dnn.DNN_TARGET_CPU, cv.dnn.DNN_TARGET_OPENCL, cv.dnn.DNN_TARGET_OPENCL_FP16, cv.dnn.DNN_TARGET_MYRIAD, cv.dnn.DNN_TARGET_HDDL,
+ cv.dnn.DNN_TARGET_VULKAN, cv.dnn.DNN_TARGET_CUDA, cv.dnn.DNN_TARGET_CUDA_FP16)
parser = argparse.ArgumentParser(add_help=False)
parser.add_argument('--zoo', default=os.path.join(os.path.dirname(os.path.abspath(__file__)), 'models.yml'),
"%d: automatically (by default), "
"%d: Halide language (http://halide-lang.org/), "
"%d: Intel's Deep Learning Inference Engine (https://software.intel.com/openvino-toolkit), "
- "%d: OpenCV implementation" % backends)
+ "%d: OpenCV implementation, "
+ "%d: VKCOM, "
+ "%d: CUDA" % backends)
parser.add_argument('--target', choices=targets, default=cv.dnn.DNN_TARGET_CPU, type=int,
help='Choose one of target computation devices: '
'%d: CPU target (by default), '
'%d: OpenCL, '
'%d: OpenCL fp16 (half-float precision), '
'%d: NCS2 VPU, '
- '%d: HDDL VPU' % targets)
+ '%d: HDDL VPU, '
+ '%d: Vulkan, '
+ '%d: CUDA, '
+ '%d: CUDA fp16 (half-float preprocess)' % targets)
parser.add_argument('--async', type=int, default=0,
dest='asyncN',
help='Number of asynchronous forwards at the same time. '
"0: automatically (by default), "
"1: Halide language (http://halide-lang.org/), "
"2: Intel's Deep Learning Inference Engine (https://software.intel.com/openvino-toolkit), "
-"3: OpenCV implementation ,"
+"3: OpenCV implementation, "
+"4: VKCOM, "
"5: CUDA }"
"{target t | 0 | choose one of target computation devices: "
"0: CPU target (by default), "
"1: OpenCL, "
"2: OpenCL fp16 (half-float precision), "
-"6: CUDA ,"
+"4: Vulkan, "
+"6: CUDA, "
"7: CUDA fp16 (half-float preprocess) }";
namespace cv{
backends = (cv.dnn.DNN_BACKEND_DEFAULT,
cv.dnn.DNN_BACKEND_INFERENCE_ENGINE,
cv.dnn.DNN_BACKEND_OPENCV,
+ cv.dnn.DNN_BACKEND_VKCOM,
cv.dnn.DNN_BACKEND_CUDA)
targets = (cv.dnn.DNN_TARGET_CPU,
cv.dnn.DNN_TARGET_OPENCL_FP16,
cv.dnn.DNN_TARGET_MYRIAD,
cv.dnn.DNN_TARGET_HDDL,
+ cv.dnn.DNN_TARGET_VULKAN,
cv.dnn.DNN_TARGET_CUDA,
cv.dnn.DNN_TARGET_CUDA_FP16)
help="Choose one of computation backends: "
"%d: automatically (by default), "
"%d: Intel's Deep Learning Inference Engine (https://software.intel.com/openvino-toolkit), "
- "%d: OpenCV implementation"
+ "%d: OpenCV implementation, "
+ "%d: VKCOM, "
"%d: CUDA backend"% backends)
parser.add_argument('--target', choices=targets, default=cv.dnn.DNN_TARGET_CPU, type=int,
help='Choose one of target computation devices: '
'%d: OpenCL, '
'%d: OpenCL fp16 (half-float precision), '
'%d: NCS2 VPU, '
- '%d: HDDL VPU'
- '%d: CUDA,'
- '%d: CUDA FP16,'
+ '%d: HDDL VPU, '
+ '%d: Vulkan, '
+ '%d: CUDA, '
+ '%d: CUDA FP16'
% targets)
args, _ = parser.parse_known_args()
"0: automatically (by default), "
"1: Halide language (http://halide-lang.org/), "
"2: Intel's Deep Learning Inference Engine (https://software.intel.com/openvino-toolkit), "
- "3: OpenCV implementation }"
+ "3: OpenCV implementation, "
+ "4: VKCOM, "
+ "5: CUDA }"
"{ target | 0 | Choose one of target computation devices: "
"0: CPU target (by default), "
"1: OpenCL, "
"2: OpenCL fp16 (half-float precision), "
- "3: VPU }";
+ "3: VPU, "
+ "4: Vulkan, "
+ "6: CUDA, "
+ "7: CUDA fp16 (half-float preprocess) }";
using namespace cv;
using namespace dnn;
from common import *
-backends = (cv.dnn.DNN_BACKEND_DEFAULT, cv.dnn.DNN_BACKEND_HALIDE, cv.dnn.DNN_BACKEND_INFERENCE_ENGINE, cv.dnn.DNN_BACKEND_OPENCV)
-targets = (cv.dnn.DNN_TARGET_CPU, cv.dnn.DNN_TARGET_OPENCL, cv.dnn.DNN_TARGET_OPENCL_FP16, cv.dnn.DNN_TARGET_MYRIAD, cv.dnn.DNN_TARGET_HDDL)
+backends = (cv.dnn.DNN_BACKEND_DEFAULT, cv.dnn.DNN_BACKEND_HALIDE, cv.dnn.DNN_BACKEND_INFERENCE_ENGINE, cv.dnn.DNN_BACKEND_OPENCV,
+ cv.dnn.DNN_BACKEND_VKCOM, cv.dnn.DNN_BACKEND_CUDA)
+targets = (cv.dnn.DNN_TARGET_CPU, cv.dnn.DNN_TARGET_OPENCL, cv.dnn.DNN_TARGET_OPENCL_FP16, cv.dnn.DNN_TARGET_MYRIAD, cv.dnn.DNN_TARGET_HDDL,
+ cv.dnn.DNN_TARGET_VULKAN, cv.dnn.DNN_TARGET_CUDA, cv.dnn.DNN_TARGET_CUDA_FP16)
parser = argparse.ArgumentParser(add_help=False)
parser.add_argument('--zoo', default=os.path.join(os.path.dirname(os.path.abspath(__file__)), 'models.yml'),
"%d: automatically (by default), "
"%d: Halide language (http://halide-lang.org/), "
"%d: Intel's Deep Learning Inference Engine (https://software.intel.com/openvino-toolkit), "
- "%d: OpenCV implementation" % backends)
+ "%d: OpenCV implementation, "
+ "%d: VKCOM, "
+ "%d: CUDA"% backends)
parser.add_argument('--target', choices=targets, default=cv.dnn.DNN_TARGET_CPU, type=int,
help='Choose one of target computation devices: '
'%d: CPU target (by default), '
'%d: OpenCL, '
'%d: OpenCL fp16 (half-float precision), '
'%d: NCS2 VPU, '
- '%d: HDDL VPU' % targets)
+ '%d: HDDL VPU, '
+ '%d: Vulkan, '
+ '%d: CUDA, '
+ '%d: CUDA fp16 (half-float preprocess)'% targets)
args, _ = parser.parse_known_args()
add_preproc_args(args.zoo, parser, 'segmentation')
parser = argparse.ArgumentParser(parents=[parser],
""" Sample SiamRPN Tracker
"""
# Computation backends supported by layers
- backends = (cv.dnn.DNN_BACKEND_DEFAULT, cv.dnn.DNN_BACKEND_HALIDE, cv.dnn.DNN_BACKEND_INFERENCE_ENGINE, cv.dnn.DNN_BACKEND_OPENCV)
+ backends = (cv.dnn.DNN_BACKEND_DEFAULT, cv.dnn.DNN_BACKEND_HALIDE, cv.dnn.DNN_BACKEND_INFERENCE_ENGINE, cv.dnn.DNN_BACKEND_OPENCV,
+ cv.dnn.DNN_BACKEND_VKCOM, cv.dnn.DNN_BACKEND_CUDA)
# Target Devices for computation
- targets = (cv.dnn.DNN_TARGET_CPU, cv.dnn.DNN_TARGET_OPENCL, cv.dnn.DNN_TARGET_OPENCL_FP16, cv.dnn.DNN_TARGET_MYRIAD)
+ targets = (cv.dnn.DNN_TARGET_CPU, cv.dnn.DNN_TARGET_OPENCL, cv.dnn.DNN_TARGET_OPENCL_FP16, cv.dnn.DNN_TARGET_MYRIAD,
+ cv.dnn.DNN_TARGET_VULKAN, cv.dnn.DNN_TARGET_CUDA, cv.dnn.DNN_TARGET_CUDA_FP16)
parser = argparse.ArgumentParser(description='Use this script to run SiamRPN++ Visual Tracker',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--search_net', type=str, default='search_net.onnx', help='Path to part of SiamRPN++ ran on search frame.')
parser.add_argument('--rpn_head', type=str, default='rpn_head.onnx', help='Path to RPN Head ONNX model.')
parser.add_argument('--backend', choices=backends, default=cv.dnn.DNN_BACKEND_DEFAULT, type=int,
- help='Select a computation backend: '
- "%d: automatically (by default) "
- "%d: Halide"
- "%d: Intel's Deep Learning Inference Engine (https://software.intel.com/openvino-toolkit)"
- "%d: OpenCV Implementation" % backends)
+ help="Select a computation backend: "
+ "%d: automatically (by default), "
+ "%d: Halide, "
+ "%d: Intel's Deep Learning Inference Engine (https://software.intel.com/openvino-toolkit), "
+ "%d: OpenCV Implementation, "
+ "%d: VKCOM, "
+ "%d: CUDA" % backends)
parser.add_argument('--target', choices=targets, default=cv.dnn.DNN_TARGET_CPU, type=int,
help='Select a target device: '
- "%d: CPU target (by default)"
- "%d: OpenCL"
- "%d: OpenCL FP16"
- "%d: Myriad" % targets)
+ '%d: CPU target (by default), '
+ '%d: OpenCL, '
+ '%d: OpenCL FP16, '
+ '%d: Myriad, '
+ '%d: Vulkan, '
+ '%d: CUDA, '
+ '%d: CUDA fp16 (half-float preprocess)' % targets)
args, _ = parser.parse_known_args()
if args.input_video and not os.path.isfile(args.input_video):
from common import findFile
from human_parsing import parse_human
-backends = (cv.dnn.DNN_BACKEND_DEFAULT, cv.dnn.DNN_BACKEND_HALIDE, cv.dnn.DNN_BACKEND_INFERENCE_ENGINE, cv.dnn.DNN_BACKEND_OPENCV)
-targets = (cv.dnn.DNN_TARGET_CPU, cv.dnn.DNN_TARGET_OPENCL, cv.dnn.DNN_TARGET_OPENCL_FP16, cv.dnn.DNN_TARGET_MYRIAD, cv.dnn.DNN_TARGET_HDDL)
+backends = (cv.dnn.DNN_BACKEND_DEFAULT, cv.dnn.DNN_BACKEND_HALIDE, cv.dnn.DNN_BACKEND_INFERENCE_ENGINE, cv.dnn.DNN_BACKEND_OPENCV,
+ cv.dnn.DNN_BACKEND_VKCOM, cv.dnn.DNN_BACKEND_CUDA)
+targets = (cv.dnn.DNN_TARGET_CPU, cv.dnn.DNN_TARGET_OPENCL, cv.dnn.DNN_TARGET_OPENCL_FP16, cv.dnn.DNN_TARGET_MYRIAD, cv.dnn.DNN_TARGET_HDDL,
+ cv.dnn.DNN_TARGET_VULKAN, cv.dnn.DNN_TARGET_CUDA, cv.dnn.DNN_TARGET_CUDA_FP16)
parser = argparse.ArgumentParser(description='Use this script to run virtial try-on using CP-VTON',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
"%d: automatically (by default), "
"%d: Halide language (http://halide-lang.org/), "
"%d: Intel's Deep Learning Inference Engine (https://software.intel.com/openvino-toolkit), "
- "%d: OpenCV implementation" % backends)
+ "%d: OpenCV implementation, "
+ "%d: VKCOM, "
+ "%d: CUDA" % backends)
parser.add_argument('--target', choices=targets, default=cv.dnn.DNN_TARGET_CPU, type=int,
help='Choose one of target computation devices: '
'%d: CPU target (by default), '
'%d: OpenCL, '
'%d: OpenCL fp16 (half-float precision), '
'%d: NCS2 VPU, '
- '%d: HDDL VPU' % targets)
+ '%d: HDDL VPU, '
+ '%d: Vulkan, '
+ '%d: CUDA, '
+ '%d: CUDA fp16 (half-float preprocess)'% targets)
args, _ = parser.parse_known_args()