From e8e2197032f36f30c83288b3c0f6ae39b16c5ff9 Mon Sep 17 00:00:00 2001 From: Alexander Alekhin Date: Sun, 11 Nov 2018 13:18:09 +0000 Subject: [PATCH] samples: use findFile() in dnn --- samples/dnn/colorization.cpp | 6 +++--- samples/dnn/common.py | 4 ++++ samples/dnn/edge_detection.py | 2 +- samples/dnn/fast_neural_style.py | 2 +- samples/dnn/mask_rcnn.py | 4 ++-- samples/dnn/mobilenet_ssd_accuracy.py | 4 ++-- samples/dnn/object_detection.py | 4 ++-- samples/dnn/openpose.cpp | 6 +++--- samples/dnn/openpose.py | 2 +- 9 files changed, 19 insertions(+), 15 deletions(-) diff --git a/samples/dnn/colorization.cpp b/samples/dnn/colorization.cpp index 3f1c661..b68e0ec 100644 --- a/samples/dnn/colorization.cpp +++ b/samples/dnn/colorization.cpp @@ -64,9 +64,9 @@ int main(int argc, char **argv) parser.printMessage(); return 0; } - string modelTxt = parser.get("proto"); - string modelBin = parser.get("model"); - string imageFile = parser.get("image"); + string modelTxt = samples::findFile(parser.get("proto")); + string modelBin = samples::findFile(parser.get("model")); + string imageFile = samples::findFile(parser.get("image")); bool useOpenCL = parser.has("opencl"); if (!parser.check()) { diff --git a/samples/dnn/common.py b/samples/dnn/common.py index feafdc9..db9283b 100644 --- a/samples/dnn/common.py +++ b/samples/dnn/common.py @@ -86,6 +86,10 @@ def findFile(filename): if os.path.exists(filename): return filename + fpath = cv.samples.findFile(filename, False) + if fpath: + return fpath + samplesDataDir = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', 'data', diff --git a/samples/dnn/edge_detection.py b/samples/dnn/edge_detection.py index 2611978..f242aab 100644 --- a/samples/dnn/edge_detection.py +++ b/samples/dnn/edge_detection.py @@ -43,7 +43,7 @@ cv.dnn_registerLayer('Crop', CropLayer) #! [Register] # Load the model. -net = cv.dnn.readNet(args.prototxt, args.caffemodel) +net = cv.dnn.readNet(cv.samples.findFile(args.prototxt), cv.samples.findFile(args.caffemodel)) kWinName = 'Holistically-Nested Edge Detection' cv.namedWindow('Input', cv.WINDOW_NORMAL) diff --git a/samples/dnn/fast_neural_style.py b/samples/dnn/fast_neural_style.py index ab5d67f..6afd166 100644 --- a/samples/dnn/fast_neural_style.py +++ b/samples/dnn/fast_neural_style.py @@ -13,7 +13,7 @@ parser.add_argument('--height', default=-1, type=int, help='Resize input to spec parser.add_argument('--median_filter', default=0, type=int, help='Kernel size of postprocessing blurring.') args = parser.parse_args() -net = cv.dnn.readNetFromTorch(args.model) +net = cv.dnn.readNetFromTorch(cv.samples.findFile(args.model)) net.setPreferableBackend(cv.dnn.DNN_BACKEND_OPENCV); if args.input: diff --git a/samples/dnn/mask_rcnn.py b/samples/dnn/mask_rcnn.py index cac8d6d..a67f195 100644 --- a/samples/dnn/mask_rcnn.py +++ b/samples/dnn/mask_rcnn.py @@ -68,13 +68,13 @@ def drawBox(frame, classId, conf, left, top, right, bottom): # Load a network -net = cv.dnn.readNet(args.model, args.config) +net = cv.dnn.readNet(cv.samples.findFile(args.model), cv.samples.findFile(args.config)) net.setPreferableBackend(cv.dnn.DNN_BACKEND_OPENCV) winName = 'Mask-RCNN in OpenCV' cv.namedWindow(winName, cv.WINDOW_NORMAL) -cap = cv.VideoCapture(args.input if args.input else 0) +cap = cv.VideoCapture(cv.samples.findFileOrKeep(args.input) if args.input else 0) legend = None while cv.waitKey(1) < 0: hasFrame, frame = cap.read() diff --git a/samples/dnn/mobilenet_ssd_accuracy.py b/samples/dnn/mobilenet_ssd_accuracy.py index c522c5a..58395ac 100644 --- a/samples/dnn/mobilenet_ssd_accuracy.py +++ b/samples/dnn/mobilenet_ssd_accuracy.py @@ -26,12 +26,12 @@ parser.add_argument('--annotations', help='Path to COCO annotations file.', requ args = parser.parse_args() ### Get OpenCV predictions ##################################################### -net = cv.dnn.readNetFromTensorflow(args.weights, args.prototxt) +net = cv.dnn.readNetFromTensorflow(cv.samples.findFile(args.weights), cv.samples.findFile(args.prototxt)) net.setPreferableBackend(cv.dnn.DNN_BACKEND_OPENCV); detections = [] for imgName in os.listdir(args.images): - inp = cv.imread(os.path.join(args.images, imgName)) + inp = cv.imread(cv.samples.findFile(os.path.join(args.images, imgName))) rows = inp.shape[0] cols = inp.shape[1] inp = cv.resize(inp, (300, 300)) diff --git a/samples/dnn/object_detection.py b/samples/dnn/object_detection.py index 2a89b8c..bf1c2e4 100644 --- a/samples/dnn/object_detection.py +++ b/samples/dnn/object_detection.py @@ -67,7 +67,7 @@ if args.classes: classes = f.read().rstrip('\n').split('\n') # Load a network -net = cv.dnn.readNet(args.model, args.config, args.framework) +net = cv.dnn.readNet(cv.samples.findFile(args.model), cv.samples.findFile(args.config), args.framework) net.setPreferableBackend(args.backend) net.setPreferableTarget(args.target) outNames = net.getUnconnectedOutLayersNames() @@ -182,7 +182,7 @@ def callback(pos): cv.createTrackbar('Confidence threshold, %', winName, int(confThreshold * 100), 99, callback) -cap = cv.VideoCapture(args.input if args.input else 0) +cap = cv.VideoCapture(cv.samples.findFileOrKeep(args.input) if args.input else 0) while cv.waitKey(1) < 0: hasFrame, frame = cap.read() if not hasFrame: diff --git a/samples/dnn/openpose.cpp b/samples/dnn/openpose.cpp index da93154..b4934d7 100644 --- a/samples/dnn/openpose.cpp +++ b/samples/dnn/openpose.cpp @@ -66,9 +66,9 @@ int main(int argc, char **argv) "{ t threshold | 0.1 | threshold or confidence value for the heatmap }" ); - String modelTxt = parser.get("proto"); - String modelBin = parser.get("model"); - String imageFile = parser.get("image"); + String modelTxt = samples::findFile(parser.get("proto")); + String modelBin = samples::findFile(parser.get("model")); + String imageFile = samples::findFile(parser.get("image")); int W_in = parser.get("width"); int H_in = parser.get("height"); float thresh = parser.get("threshold"); diff --git a/samples/dnn/openpose.py b/samples/dnn/openpose.py index 4f367c1..9fcca13 100644 --- a/samples/dnn/openpose.py +++ b/samples/dnn/openpose.py @@ -45,7 +45,7 @@ else: inWidth = args.width inHeight = args.height -net = cv.dnn.readNetFromCaffe(args.proto, args.model) +net = cv.dnn.readNetFromCaffe(cv.samples.findFile(args.proto), cv.samples.findFile(args.model)) cap = cv.VideoCapture(args.input if args.input else 0) -- 2.7.4