Merge pull request #20175 from rogday:dnn_samples_cuda
authorrogday <s.e.a.98@yandex.ru>
Tue, 1 Jun 2021 14:00:51 +0000 (17:00 +0300)
committerGitHub <noreply@github.com>
Tue, 1 Jun 2021 14:00:51 +0000 (14:00 +0000)
add cuda and vulkan backends to dnn samples

13 files changed:
samples/dnn/classification.cpp
samples/dnn/classification.py
samples/dnn/dasiamrpn_tracker.cpp
samples/dnn/human_parsing.cpp
samples/dnn/human_parsing.py
samples/dnn/object_detection.cpp
samples/dnn/object_detection.py
samples/dnn/person_reid.cpp
samples/dnn/person_reid.py
samples/dnn/segmentation.cpp
samples/dnn/segmentation.py
samples/dnn/siamrpnpp.py
samples/dnn/virtual_try_on.py

index 8440371..769d687 100644 (file)
@@ -22,12 +22,17 @@ std::string keys =
                             "0: automatically (by default), "
                             "1: Halide language (http://halide-lang.org/), "
                             "2: Intel's Deep Learning Inference Engine (https://software.intel.com/openvino-toolkit), "
-                            "3: OpenCV implementation }"
+                            "3: OpenCV implementation, "
+                            "4: VKCOM, "
+                            "5: CUDA },"
     "{ target           | 0 | Choose one of target computation devices: "
                             "0: CPU target (by default), "
                             "1: OpenCL, "
                             "2: OpenCL fp16 (half-float precision), "
-                            "3: VPU }";
+                            "3: VPU, "
+                            "4: Vulkan, "
+                            "6: CUDA, "
+                            "7: CUDA fp16 (half-float preprocess) }";
 
 using namespace cv;
 using namespace dnn;
index 558c8b0..be639e8 100644 (file)
@@ -7,9 +7,9 @@ from common import *
 
 def get_args_parser(func_args):
     backends = (cv.dnn.DNN_BACKEND_DEFAULT, cv.dnn.DNN_BACKEND_HALIDE, cv.dnn.DNN_BACKEND_INFERENCE_ENGINE,
-                cv.dnn.DNN_BACKEND_OPENCV)
+                cv.dnn.DNN_BACKEND_OPENCV, cv.dnn.DNN_BACKEND_VKCOM, cv.dnn.DNN_BACKEND_CUDA)
     targets = (cv.dnn.DNN_TARGET_CPU, cv.dnn.DNN_TARGET_OPENCL, cv.dnn.DNN_TARGET_OPENCL_FP16, cv.dnn.DNN_TARGET_MYRIAD,
-               cv.dnn.DNN_TARGET_HDDL)
+               cv.dnn.DNN_TARGET_HDDL, cv.dnn.DNN_TARGET_VULKAN, cv.dnn.DNN_TARGET_CUDA, cv.dnn.DNN_TARGET_CUDA_FP16)
 
     parser = argparse.ArgumentParser(add_help=False)
     parser.add_argument('--zoo', default=os.path.join(os.path.dirname(os.path.abspath(__file__)), 'models.yml'),
@@ -32,14 +32,19 @@ def get_args_parser(func_args):
                              "%d: automatically (by default), "
                              "%d: Halide language (http://halide-lang.org/), "
                              "%d: Intel's Deep Learning Inference Engine (https://software.intel.com/openvino-toolkit), "
-                             "%d: OpenCV implementation" % backends)
+                             "%d: OpenCV implementation, "
+                             "%d: VKCOM, "
+                             "%d: CUDA" % backends)
     parser.add_argument('--target', choices=targets, default=cv.dnn.DNN_TARGET_CPU, type=int,
                         help='Choose one of target computation devices: '
                              '%d: CPU target (by default), '
                              '%d: OpenCL, '
                              '%d: OpenCL fp16 (half-float precision), '
                              '%d: NCS2 VPU, '
-                             '%d: HDDL VPU' % targets)
+                             '%d: HDDL VPU, '
+                             '%d: Vulkan, '
+                             '%d: CUDA, '
+                             '%d: CUDA fp16 (half-float preprocess)'% targets)
 
     args, _ = parser.parse_known_args()
     add_preproc_args(args.zoo, parser, 'classification')
index e6c05ec..f6e307c 100644 (file)
@@ -27,12 +27,17 @@ const char *keys =
                             "0: automatically (by default), "
                             "1: Halide language (http://halide-lang.org/), "
                             "2: Intel's Deep Learning Inference Engine (https://software.intel.com/openvino-toolkit), "
-                            "3: OpenCV implementation }"
+                            "3: OpenCV implementation, "
+                            "4: VKCOM, "
+                            "5: CUDA },"
         "{ target      | 0 | Choose one of target computation devices: "
                             "0: CPU target (by default), "
                             "1: OpenCL, "
                             "2: OpenCL fp16 (half-float precision), "
-                            "3: VPU }"
+                            "3: VPU, "
+                            "4: Vulkan, "
+                            "6: CUDA, "
+                            "7: CUDA fp16 (half-float preprocess) }"
 ;
 
 static
index bf2cc29..0c00c02 100644 (file)
@@ -78,12 +78,17 @@ int main(int argc, char**argv)
                                          "0: automatically (by default), "
                                          "1: Halide language (http://halide-lang.org/), "
                                          "2: Intel's Deep Learning Inference Engine (https://software.intel.com/openvino-toolkit), "
-                                         "3: OpenCV implementation }"
+                                         "3: OpenCV implementation, "
+                                         "4: VKCOM, "
+                                         "5: CUDA }"
         "{target  t | 0               | Choose one of target computation devices: "
                                          "0: CPU target (by default), "
                                          "1: OpenCL, "
                                          "2: OpenCL fp16 (half-float precision), "
-                                         "3: VPU }"
+                                         "3: VPU, "
+                                         "4: Vulkan, "
+                                         "6: CUDA, "
+                                         "7: CUDA fp16 (half-float preprocess) }"
     );
     if (argc == 1 || parser.has("help"))
     {
index 09371fe..237f764 100644 (file)
@@ -45,8 +45,10 @@ import numpy as np
 import cv2 as cv
 
 
-backends = (cv.dnn.DNN_BACKEND_DEFAULT, cv.dnn.DNN_BACKEND_INFERENCE_ENGINE, cv.dnn.DNN_BACKEND_OPENCV)
-targets = (cv.dnn.DNN_TARGET_CPU, cv.dnn.DNN_TARGET_OPENCL, cv.dnn.DNN_TARGET_OPENCL_FP16, cv.dnn.DNN_TARGET_MYRIAD, cv.dnn.DNN_TARGET_HDDL)
+backends = (cv.dnn.DNN_BACKEND_DEFAULT, cv.dnn.DNN_BACKEND_INFERENCE_ENGINE, cv.dnn.DNN_BACKEND_OPENCV,
+            cv.dnn.DNN_BACKEND_VKCOM, cv.dnn.DNN_BACKEND_CUDA)
+targets = (cv.dnn.DNN_TARGET_CPU, cv.dnn.DNN_TARGET_OPENCL, cv.dnn.DNN_TARGET_OPENCL_FP16, cv.dnn.DNN_TARGET_MYRIAD,
+           cv.dnn.DNN_TARGET_HDDL, cv.dnn.DNN_TARGET_VULKAN, cv.dnn.DNN_TARGET_CUDA, cv.dnn.DNN_TARGET_CUDA_FP16)
 
 
 def preprocess(image):
@@ -162,14 +164,19 @@ if __name__ == '__main__':
                         help="Choose one of computation backends: "
                              "%d: automatically (by default), "
                              "%d: Intel's Deep Learning Inference Engine (https://software.intel.com/openvino-toolkit), "
-                             "%d: OpenCV implementation" % backends)
+                             "%d: OpenCV implementation, "
+                             "%d: VKCOM, "
+                             "%d: CUDA"% backends)
     parser.add_argument('--target', choices=targets, default=cv.dnn.DNN_TARGET_CPU, type=int,
                         help='Choose one of target computation devices: '
                              '%d: CPU target (by default), '
                              '%d: OpenCL, '
                              '%d: OpenCL fp16 (half-float precision), '
                              '%d: NCS2 VPU, '
-                             '%d: HDDL VPU' % targets)
+                             '%d: HDDL VPU, '
+                             '%d: Vulkan, '
+                             '%d: CUDA, '
+                             '%d: CUDA fp16 (half-float preprocess)' % targets)
     args, _ = parser.parse_known_args()
 
     if not os.path.isfile(args.model):
index 796e729..5ff112f 100644 (file)
@@ -27,12 +27,17 @@ std::string keys =
                          "0: automatically (by default), "
                          "1: Halide language (http://halide-lang.org/), "
                          "2: Intel's Deep Learning Inference Engine (https://software.intel.com/openvino-toolkit), "
-                         "3: OpenCV implementation }"
+                         "3: OpenCV implementation, "
+                         "4: VKCOM, "
+                         "5: CUDA }"
     "{ target      | 0 | Choose one of target computation devices: "
                          "0: CPU target (by default), "
                          "1: OpenCL, "
                          "2: OpenCL fp16 (half-float precision), "
-                         "3: VPU }"
+                         "3: VPU, "
+                         "4: Vulkan, "
+                         "6: CUDA, "
+                         "7: CUDA fp16 (half-float preprocess) }"
     "{ async       | 0 | Number of asynchronous forwards at the same time. "
                         "Choose 0 for synchronous mode }";
 
index ec8bf82..0ca5586 100644 (file)
@@ -14,8 +14,10 @@ from tf_text_graph_common import readTextMessage
 from tf_text_graph_ssd import createSSDGraph
 from tf_text_graph_faster_rcnn import createFasterRCNNGraph
 
-backends = (cv.dnn.DNN_BACKEND_DEFAULT, cv.dnn.DNN_BACKEND_HALIDE, cv.dnn.DNN_BACKEND_INFERENCE_ENGINE, cv.dnn.DNN_BACKEND_OPENCV)
-targets = (cv.dnn.DNN_TARGET_CPU, cv.dnn.DNN_TARGET_OPENCL, cv.dnn.DNN_TARGET_OPENCL_FP16, cv.dnn.DNN_TARGET_MYRIAD, cv.dnn.DNN_TARGET_HDDL)
+backends = (cv.dnn.DNN_BACKEND_DEFAULT, cv.dnn.DNN_BACKEND_HALIDE, cv.dnn.DNN_BACKEND_INFERENCE_ENGINE, cv.dnn.DNN_BACKEND_OPENCV,
+            cv.dnn.DNN_BACKEND_VKCOM, cv.dnn.DNN_BACKEND_CUDA)
+targets = (cv.dnn.DNN_TARGET_CPU, cv.dnn.DNN_TARGET_OPENCL, cv.dnn.DNN_TARGET_OPENCL_FP16, cv.dnn.DNN_TARGET_MYRIAD, cv.dnn.DNN_TARGET_HDDL,
+           cv.dnn.DNN_TARGET_VULKAN, cv.dnn.DNN_TARGET_CUDA, cv.dnn.DNN_TARGET_CUDA_FP16)
 
 parser = argparse.ArgumentParser(add_help=False)
 parser.add_argument('--zoo', default=os.path.join(os.path.dirname(os.path.abspath(__file__)), 'models.yml'),
@@ -35,14 +37,19 @@ parser.add_argument('--backend', choices=backends, default=cv.dnn.DNN_BACKEND_DE
                          "%d: automatically (by default), "
                          "%d: Halide language (http://halide-lang.org/), "
                          "%d: Intel's Deep Learning Inference Engine (https://software.intel.com/openvino-toolkit), "
-                         "%d: OpenCV implementation" % backends)
+                         "%d: OpenCV implementation, "
+                         "%d: VKCOM, "
+                         "%d: CUDA" % backends)
 parser.add_argument('--target', choices=targets, default=cv.dnn.DNN_TARGET_CPU, type=int,
                     help='Choose one of target computation devices: '
                          '%d: CPU target (by default), '
                          '%d: OpenCL, '
                          '%d: OpenCL fp16 (half-float precision), '
                          '%d: NCS2 VPU, '
-                         '%d: HDDL VPU' % targets)
+                         '%d: HDDL VPU, '
+                         '%d: Vulkan, '
+                         '%d: CUDA, '
+                         '%d: CUDA fp16 (half-float preprocess)' % targets)
 parser.add_argument('--async', type=int, default=0,
                     dest='asyncN',
                     help='Number of asynchronous forwards at the same time. '
index 23b7661..f0c22e9 100644 (file)
@@ -36,13 +36,15 @@ const char* keys =
 "0: automatically (by default), "
 "1: Halide language (http://halide-lang.org/), "
 "2: Intel's Deep Learning Inference Engine (https://software.intel.com/openvino-toolkit), "
-"3: OpenCV implementation ,"
+"3: OpenCV implementation, "
+"4: VKCOM, "
 "5: CUDA }"
 "{target  t  | 0                | choose one of target computation devices: "
 "0: CPU target (by default), "
 "1: OpenCL, "
 "2: OpenCL fp16 (half-float precision), "
-"6: CUDA ,"
+"4: Vulkan, "
+"6: CUDA, "
 "7: CUDA fp16 (half-float preprocess) }";
 
 namespace cv{
index 502f126..08f04fa 100644 (file)
@@ -21,6 +21,7 @@ import cv2 as cv
 backends = (cv.dnn.DNN_BACKEND_DEFAULT,
     cv.dnn.DNN_BACKEND_INFERENCE_ENGINE,
     cv.dnn.DNN_BACKEND_OPENCV,
+    cv.dnn.DNN_BACKEND_VKCOM,
     cv.dnn.DNN_BACKEND_CUDA)
 
 targets = (cv.dnn.DNN_TARGET_CPU,
@@ -28,6 +29,7 @@ targets = (cv.dnn.DNN_TARGET_CPU,
     cv.dnn.DNN_TARGET_OPENCL_FP16,
     cv.dnn.DNN_TARGET_MYRIAD,
     cv.dnn.DNN_TARGET_HDDL,
+    cv.dnn.DNN_TARGET_VULKAN,
     cv.dnn.DNN_TARGET_CUDA,
     cv.dnn.DNN_TARGET_CUDA_FP16)
 
@@ -212,7 +214,8 @@ if __name__ == '__main__':
                         help="Choose one of computation backends: "
                              "%d: automatically (by default), "
                              "%d: Intel's Deep Learning Inference Engine (https://software.intel.com/openvino-toolkit), "
-                             "%d: OpenCV implementation"
+                             "%d: OpenCV implementation, "
+                             "%d: VKCOM, "
                              "%d: CUDA backend"% backends)
     parser.add_argument('--target', choices=targets, default=cv.dnn.DNN_TARGET_CPU, type=int,
                         help='Choose one of target computation devices: '
@@ -220,9 +223,10 @@ if __name__ == '__main__':
                              '%d: OpenCL, '
                              '%d: OpenCL fp16 (half-float precision), '
                              '%d: NCS2 VPU, '
-                             '%d: HDDL VPU'
-                             '%d: CUDA,'
-                             '%d: CUDA FP16,'
+                             '%d: HDDL VPU, '
+                             '%d: Vulkan, '
+                             '%d: CUDA, '
+                             '%d: CUDA FP16'
                              % targets)
     args, _ = parser.parse_known_args()
 
index d9fbad8..777badf 100644 (file)
@@ -21,12 +21,17 @@ std::string keys =
                         "0: automatically (by default), "
                         "1: Halide language (http://halide-lang.org/), "
                         "2: Intel's Deep Learning Inference Engine (https://software.intel.com/openvino-toolkit), "
-                        "3: OpenCV implementation }"
+                        "3: OpenCV implementation, "
+                        "4: VKCOM, "
+                        "5: CUDA }"
     "{ target      | 0 | Choose one of target computation devices: "
                         "0: CPU target (by default), "
                         "1: OpenCL, "
                         "2: OpenCL fp16 (half-float precision), "
-                        "3: VPU }";
+                        "3: VPU, "
+                        "4: Vulkan, "
+                        "6: CUDA, "
+                        "7: CUDA fp16 (half-float preprocess) }";
 
 using namespace cv;
 using namespace dnn;
index 8eeb59b..09f3f8d 100644 (file)
@@ -5,8 +5,10 @@ import sys
 
 from common import *
 
-backends = (cv.dnn.DNN_BACKEND_DEFAULT, cv.dnn.DNN_BACKEND_HALIDE, cv.dnn.DNN_BACKEND_INFERENCE_ENGINE, cv.dnn.DNN_BACKEND_OPENCV)
-targets = (cv.dnn.DNN_TARGET_CPU, cv.dnn.DNN_TARGET_OPENCL, cv.dnn.DNN_TARGET_OPENCL_FP16, cv.dnn.DNN_TARGET_MYRIAD, cv.dnn.DNN_TARGET_HDDL)
+backends = (cv.dnn.DNN_BACKEND_DEFAULT, cv.dnn.DNN_BACKEND_HALIDE, cv.dnn.DNN_BACKEND_INFERENCE_ENGINE, cv.dnn.DNN_BACKEND_OPENCV,
+            cv.dnn.DNN_BACKEND_VKCOM, cv.dnn.DNN_BACKEND_CUDA)
+targets = (cv.dnn.DNN_TARGET_CPU, cv.dnn.DNN_TARGET_OPENCL, cv.dnn.DNN_TARGET_OPENCL_FP16, cv.dnn.DNN_TARGET_MYRIAD, cv.dnn.DNN_TARGET_HDDL,
+           cv.dnn.DNN_TARGET_VULKAN, cv.dnn.DNN_TARGET_CUDA, cv.dnn.DNN_TARGET_CUDA_FP16)
 
 parser = argparse.ArgumentParser(add_help=False)
 parser.add_argument('--zoo', default=os.path.join(os.path.dirname(os.path.abspath(__file__)), 'models.yml'),
@@ -22,14 +24,19 @@ parser.add_argument('--backend', choices=backends, default=cv.dnn.DNN_BACKEND_DE
                          "%d: automatically (by default), "
                          "%d: Halide language (http://halide-lang.org/), "
                          "%d: Intel's Deep Learning Inference Engine (https://software.intel.com/openvino-toolkit), "
-                         "%d: OpenCV implementation" % backends)
+                         "%d: OpenCV implementation, "
+                         "%d: VKCOM, "
+                         "%d: CUDA"% backends)
 parser.add_argument('--target', choices=targets, default=cv.dnn.DNN_TARGET_CPU, type=int,
                     help='Choose one of target computation devices: '
                          '%d: CPU target (by default), '
                          '%d: OpenCL, '
                          '%d: OpenCL fp16 (half-float precision), '
                          '%d: NCS2 VPU, '
-                         '%d: HDDL VPU' % targets)
+                         '%d: HDDL VPU, '
+                         '%d: Vulkan, '
+                         '%d: CUDA, '
+                         '%d: CUDA fp16 (half-float preprocess)'% targets)
 args, _ = parser.parse_known_args()
 add_preproc_args(args.zoo, parser, 'segmentation')
 parser = argparse.ArgumentParser(parents=[parser],
index c7c49b1..2e15ec6 100644 (file)
@@ -327,9 +327,11 @@ def main():
     """ Sample SiamRPN Tracker
     """
     # Computation backends supported by layers
-    backends = (cv.dnn.DNN_BACKEND_DEFAULT, cv.dnn.DNN_BACKEND_HALIDE, cv.dnn.DNN_BACKEND_INFERENCE_ENGINE, cv.dnn.DNN_BACKEND_OPENCV)
+    backends = (cv.dnn.DNN_BACKEND_DEFAULT, cv.dnn.DNN_BACKEND_HALIDE, cv.dnn.DNN_BACKEND_INFERENCE_ENGINE, cv.dnn.DNN_BACKEND_OPENCV,
+                cv.dnn.DNN_BACKEND_VKCOM, cv.dnn.DNN_BACKEND_CUDA)
     # Target Devices for computation
-    targets = (cv.dnn.DNN_TARGET_CPU, cv.dnn.DNN_TARGET_OPENCL, cv.dnn.DNN_TARGET_OPENCL_FP16, cv.dnn.DNN_TARGET_MYRIAD)
+    targets = (cv.dnn.DNN_TARGET_CPU, cv.dnn.DNN_TARGET_OPENCL, cv.dnn.DNN_TARGET_OPENCL_FP16, cv.dnn.DNN_TARGET_MYRIAD,
+               cv.dnn.DNN_TARGET_VULKAN, cv.dnn.DNN_TARGET_CUDA, cv.dnn.DNN_TARGET_CUDA_FP16)
 
     parser = argparse.ArgumentParser(description='Use this script to run SiamRPN++ Visual Tracker',
                                      formatter_class=argparse.ArgumentDefaultsHelpFormatter)
@@ -338,17 +340,22 @@ def main():
     parser.add_argument('--search_net', type=str, default='search_net.onnx', help='Path to part of SiamRPN++ ran on search frame.')
     parser.add_argument('--rpn_head', type=str, default='rpn_head.onnx', help='Path to RPN Head ONNX model.')
     parser.add_argument('--backend', choices=backends, default=cv.dnn.DNN_BACKEND_DEFAULT, type=int,
-                        help='Select a computation backend: '
-                        "%d: automatically (by default) "
-                        "%d: Halide"
-                        "%d: Intel's Deep Learning Inference Engine (https://software.intel.com/openvino-toolkit)"
-                        "%d: OpenCV Implementation" % backends)
+                        help="Select a computation backend: "
+                        "%d: automatically (by default), "
+                        "%d: Halide, "
+                        "%d: Intel's Deep Learning Inference Engine (https://software.intel.com/openvino-toolkit), "
+                        "%d: OpenCV Implementation, "
+                        "%d: VKCOM, "
+                        "%d: CUDA" % backends)
     parser.add_argument('--target', choices=targets, default=cv.dnn.DNN_TARGET_CPU, type=int,
                         help='Select a target device: '
-                        "%d: CPU target (by default)"
-                        "%d: OpenCL"
-                        "%d: OpenCL FP16"
-                        "%d: Myriad" % targets)
+                        '%d: CPU target (by default), '
+                        '%d: OpenCL, '
+                        '%d: OpenCL FP16, '
+                        '%d: Myriad, '
+                        '%d: Vulkan, '
+                        '%d: CUDA, '
+                        '%d: CUDA fp16 (half-float preprocess)' % targets)
     args, _ = parser.parse_known_args()
 
     if args.input_video and not os.path.isfile(args.input_video):
index 076cb21..e46f7ec 100644 (file)
@@ -16,8 +16,10 @@ from numpy import linalg
 from common import findFile
 from human_parsing import parse_human
 
-backends = (cv.dnn.DNN_BACKEND_DEFAULT, cv.dnn.DNN_BACKEND_HALIDE, cv.dnn.DNN_BACKEND_INFERENCE_ENGINE, cv.dnn.DNN_BACKEND_OPENCV)
-targets = (cv.dnn.DNN_TARGET_CPU, cv.dnn.DNN_TARGET_OPENCL, cv.dnn.DNN_TARGET_OPENCL_FP16, cv.dnn.DNN_TARGET_MYRIAD, cv.dnn.DNN_TARGET_HDDL)
+backends = (cv.dnn.DNN_BACKEND_DEFAULT, cv.dnn.DNN_BACKEND_HALIDE, cv.dnn.DNN_BACKEND_INFERENCE_ENGINE, cv.dnn.DNN_BACKEND_OPENCV,
+            cv.dnn.DNN_BACKEND_VKCOM, cv.dnn.DNN_BACKEND_CUDA)
+targets = (cv.dnn.DNN_TARGET_CPU, cv.dnn.DNN_TARGET_OPENCL, cv.dnn.DNN_TARGET_OPENCL_FP16, cv.dnn.DNN_TARGET_MYRIAD, cv.dnn.DNN_TARGET_HDDL,
+           cv.dnn.DNN_TARGET_VULKAN, cv.dnn.DNN_TARGET_CUDA, cv.dnn.DNN_TARGET_CUDA_FP16)
 
 parser = argparse.ArgumentParser(description='Use this script to run virtial try-on using CP-VTON',
                                  formatter_class=argparse.ArgumentDefaultsHelpFormatter)
@@ -33,14 +35,19 @@ parser.add_argument('--backend', choices=backends, default=cv.dnn.DNN_BACKEND_DE
                             "%d: automatically (by default), "
                             "%d: Halide language (http://halide-lang.org/), "
                             "%d: Intel's Deep Learning Inference Engine (https://software.intel.com/openvino-toolkit), "
-                            "%d: OpenCV implementation" % backends)
+                            "%d: OpenCV implementation, "
+                            "%d: VKCOM, "
+                            "%d: CUDA" % backends)
 parser.add_argument('--target', choices=targets, default=cv.dnn.DNN_TARGET_CPU, type=int,
                     help='Choose one of target computation devices: '
                             '%d: CPU target (by default), '
                             '%d: OpenCL, '
                             '%d: OpenCL fp16 (half-float precision), '
                             '%d: NCS2 VPU, '
-                            '%d: HDDL VPU' % targets)
+                            '%d: HDDL VPU, '
+                            '%d: Vulkan, '
+                            '%d: CUDA, '
+                            '%d: CUDA fp16 (half-float preprocess)'% targets)
 args, _ = parser.parse_known_args()