fix 4.x links
authorAlexander Alekhin <alexander.a.alekhin@gmail.com>
Wed, 22 Dec 2021 13:01:26 +0000 (13:01 +0000)
committerAlexander Alekhin <alexander.a.alekhin@gmail.com>
Wed, 22 Dec 2021 13:24:30 +0000 (13:24 +0000)
134 files changed:
README.md
apps/opencv_stitching_tool/opencv_stitching/blender.py
apps/opencv_stitching_tool/opencv_stitching/camera_adjuster.py
apps/opencv_stitching_tool/opencv_stitching/camera_wave_corrector.py
apps/opencv_stitching_tool/opencv_stitching/exposure_error_compensator.py
apps/opencv_stitching_tool/opencv_stitching/feature_matcher.py
apps/opencv_stitching_tool/opencv_stitching/seam_finder.py
apps/opencv_stitching_tool/opencv_stitching/test/SAMPLE_IMAGES_TO_DOWNLOAD.txt
apps/opencv_stitching_tool/opencv_stitching/test/stitching_detailed.py
apps/opencv_stitching_tool/opencv_stitching/timelapser.py
apps/opencv_stitching_tool/opencv_stitching/warper.py
doc/js_tutorials/js_assets/js_image_classification.html
doc/js_tutorials/js_assets/js_image_classification_model_info.json
doc/js_tutorials/js_assets/js_image_classification_webnn_polyfill.html
doc/js_tutorials/js_assets/js_image_classification_with_camera.html
doc/js_tutorials/js_assets/js_object_detection.html
doc/js_tutorials/js_assets/js_object_detection_model_info.json
doc/js_tutorials/js_assets/js_object_detection_with_camera.html
doc/js_tutorials/js_assets/webnn-electron/js_image_classification_webnn_electron.html
doc/js_tutorials/js_setup/js_nodejs/js_nodejs.markdown
doc/js_tutorials/js_setup/js_usage/js_usage.markdown
doc/py_tutorials/py_ml/py_svm/py_svm_opencv/py_svm_opencv.markdown
doc/tutorials/app/intelperc.markdown
doc/tutorials/app/kinect_openni.markdown
doc/tutorials/app/orbbec_astra.markdown
doc/tutorials/app/trackbar.markdown
doc/tutorials/app/video_input_psnr_ssim.markdown
doc/tutorials/app/video_write.markdown
doc/tutorials/calib3d/camera_calibration/camera_calibration.markdown
doc/tutorials/calib3d/camera_calibration_pattern/camera_calibration_pattern.markdown
doc/tutorials/calib3d/interactive_calibration/interactive_calibration.markdown
doc/tutorials/core/adding_images/adding_images.markdown
doc/tutorials/core/basic_linear_transform/basic_linear_transform.markdown
doc/tutorials/core/discrete_fourier_transform/discrete_fourier_transform.markdown
doc/tutorials/core/file_input_output_with_xml_yml/file_input_output_with_xml_yml.markdown
doc/tutorials/core/how_to_scan_images/how_to_scan_images.markdown
doc/tutorials/core/how_to_use_OpenCV_parallel_for_/how_to_use_OpenCV_parallel_for_.markdown
doc/tutorials/core/mat-mask-operations/mat_mask_operations.markdown
doc/tutorials/core/mat_the_basic_image_container/mat_the_basic_image_container.markdown
doc/tutorials/core/univ_intrin/univ_intrin.markdown
doc/tutorials/dnn/dnn_OCR/dnn_OCR.markdown
doc/tutorials/dnn/dnn_custom_layers/dnn_custom_layers.md
doc/tutorials/dnn/dnn_face/dnn_face.markdown
doc/tutorials/dnn/dnn_googlenet/dnn_googlenet.markdown
doc/tutorials/dnn/dnn_halide_scheduling/dnn_halide_scheduling.markdown
doc/tutorials/dnn/dnn_pytorch_tf_classification/pytorch_cls_model_conversion_c_tutorial.md
doc/tutorials/dnn/dnn_pytorch_tf_classification/pytorch_cls_model_conversion_tutorial.md
doc/tutorials/dnn/dnn_pytorch_tf_classification/tf_cls_model_conversion_tutorial.md
doc/tutorials/dnn/dnn_pytorch_tf_detection/tf_det_model_conversion_tutorial.md
doc/tutorials/dnn/dnn_pytorch_tf_segmentation/pytorch_sem_segm_model_conversion_tutorial.md
doc/tutorials/dnn/dnn_pytorch_tf_segmentation/tf_sem_segm_model_conversion_tutorial.md
doc/tutorials/dnn/dnn_text_spotting/dnn_text_spotting.markdown
doc/tutorials/dnn/dnn_yolo/dnn_yolo.markdown
doc/tutorials/features2d/akaze_matching/akaze_matching.markdown
doc/tutorials/features2d/feature_description/feature_description.markdown
doc/tutorials/features2d/feature_detection/feature_detection.markdown
doc/tutorials/features2d/feature_flann_matcher/feature_flann_matcher.markdown
doc/tutorials/features2d/feature_homography/feature_homography.markdown
doc/tutorials/features2d/homography/homography.markdown
doc/tutorials/features2d/trackingmotion/corner_subpixels/corner_subpixels.markdown
doc/tutorials/features2d/trackingmotion/generic_corner_detector/generic_corner_detector.markdown
doc/tutorials/features2d/trackingmotion/good_features_to_track/good_features_to_track.markdown
doc/tutorials/features2d/trackingmotion/harris_detector/harris_detector.markdown
doc/tutorials/gpu/gpu-basics-similarity/gpu_basics_similarity.markdown
doc/tutorials/imgproc/basic_geometric_drawing/basic_geometric_drawing.markdown
doc/tutorials/imgproc/erosion_dilatation/erosion_dilatation.markdown
doc/tutorials/imgproc/gausian_median_blur_bilateral_filter/gausian_median_blur_bilateral_filter.markdown
doc/tutorials/imgproc/histograms/back_projection/back_projection.markdown
doc/tutorials/imgproc/histograms/histogram_calculation/histogram_calculation.markdown
doc/tutorials/imgproc/histograms/histogram_comparison/histogram_comparison.markdown
doc/tutorials/imgproc/histograms/histogram_equalization/histogram_equalization.markdown
doc/tutorials/imgproc/histograms/template_matching/template_matching.markdown
doc/tutorials/imgproc/hitOrMiss/hitOrMiss.markdown
doc/tutorials/imgproc/imgtrans/canny_detector/canny_detector.markdown
doc/tutorials/imgproc/imgtrans/copyMakeBorder/copyMakeBorder.markdown
doc/tutorials/imgproc/imgtrans/distance_transformation/distance_transform.markdown
doc/tutorials/imgproc/imgtrans/filter_2d/filter_2d.markdown
doc/tutorials/imgproc/imgtrans/hough_circle/hough_circle.markdown
doc/tutorials/imgproc/imgtrans/hough_lines/hough_lines.markdown
doc/tutorials/imgproc/imgtrans/laplace_operator/laplace_operator.markdown
doc/tutorials/imgproc/imgtrans/remap/remap.markdown
doc/tutorials/imgproc/imgtrans/sobel_derivatives/sobel_derivatives.markdown
doc/tutorials/imgproc/imgtrans/warp_affine/warp_affine.markdown
doc/tutorials/imgproc/morph_lines_detection/morph_lines_detection.md
doc/tutorials/imgproc/opening_closing_hats/opening_closing_hats.markdown
doc/tutorials/imgproc/pyramids/pyramids.markdown
doc/tutorials/imgproc/random_generator_and_text/random_generator_and_text.markdown
doc/tutorials/imgproc/shapedescriptors/bounding_rects_circles/bounding_rects_circles.markdown
doc/tutorials/imgproc/shapedescriptors/bounding_rotated_ellipses/bounding_rotated_ellipses.markdown
doc/tutorials/imgproc/shapedescriptors/find_contours/find_contours.markdown
doc/tutorials/imgproc/shapedescriptors/hull/hull.markdown
doc/tutorials/imgproc/shapedescriptors/moments/moments.markdown
doc/tutorials/imgproc/shapedescriptors/point_polygon_test/point_polygon_test.markdown
doc/tutorials/imgproc/threshold/threshold.markdown
doc/tutorials/imgproc/threshold_inRange/threshold_inRange.markdown
doc/tutorials/introduction/android_binary_package/O4A_SDK.markdown
doc/tutorials/introduction/android_binary_package/android_ocl_intro.markdown
doc/tutorials/introduction/clojure_dev_intro/clojure_dev_intro.markdown
doc/tutorials/introduction/config_reference/config_reference.markdown
doc/tutorials/introduction/display_image/display_image.markdown
doc/tutorials/introduction/windows_visual_studio_opencv/windows_visual_studio_opencv.markdown
doc/tutorials/others/background_subtraction.markdown
doc/tutorials/others/cascade_classifier.markdown
doc/tutorials/others/hdr_imaging.markdown
doc/tutorials/others/introduction_to_pca.markdown
doc/tutorials/others/introduction_to_svm.markdown
doc/tutorials/others/meanshift.markdown
doc/tutorials/others/non_linear_svms.markdown
doc/tutorials/others/optical_flow.markdown
doc/tutorials/others/stitcher.markdown
doc/tutorials/others/traincascade.markdown
modules/core/doc/cuda.markdown
modules/core/include/opencv2/core/simd_intrinsics.hpp
modules/dnn/misc/quantize_face_detector.py
modules/dnn/test/imagenet_cls_test_inception.py
modules/dnn/test/pascal_semsegm_test_fcn.py
modules/objdetect/include/opencv2/objdetect.hpp
modules/python/test/tests_common.py
platforms/readme.txt
samples/android/camera-calibration/src/org/opencv/samples/cameracalibration/CameraCalibrationActivity.java
samples/cpp/detect_blob.cpp
samples/cpp/facial_features.cpp
samples/cpp/mask_tmpl.cpp
samples/cpp/matchmethod_orb_akaze_brisk.cpp
samples/dnn/README.md
samples/dnn/js_face_recognition.html
samples/dnn/openpose.cpp
samples/install/linux_install_a.sh
samples/install/linux_install_b.sh
samples/install/linux_quick_install.sh
samples/install/linux_quick_install_contrib.sh
samples/python/stitching_detailed.py
samples/semihosting/histogram/histogram.cpp
samples/semihosting/norm/norm.cpp

index b989720..c626372 100644 (file)
--- a/README.md
+++ b/README.md
@@ -4,7 +4,7 @@
 
 * Homepage: <https://opencv.org>
   * Courses: <https://opencv.org/courses>
-* Docs: <https://docs.opencv.org/master/>
+* Docs: <https://docs.opencv.org/4.x/>
 * Q&A forum: <https://forum.opencv.org>
   * previous forum (read only): <http://answers.opencv.org>
 * Issue tracking: <https://github.com/opencv/opencv/issues>
index 04e6efe..886aef6 100644 (file)
@@ -36,11 +36,11 @@ class Blender:
         self.blender.prepare(dst_sz)
 
     def feed(self, img, mask, corner):
-        """https://docs.opencv.org/master/d6/d4a/classcv_1_1detail_1_1Blender.html#a64837308bcf4e414a6219beff6cbe37a"""  # noqa
+        """https://docs.opencv.org/4.x/d6/d4a/classcv_1_1detail_1_1Blender.html#a64837308bcf4e414a6219beff6cbe37a"""  # noqa
         self.blender.feed(cv.UMat(img.astype(np.int16)), mask, corner)
 
     def blend(self):
-        """https://docs.opencv.org/master/d6/d4a/classcv_1_1detail_1_1Blender.html#aa0a91ce0d6046d3a63e0123cbb1b5c00"""  # noqa
+        """https://docs.opencv.org/4.x/d6/d4a/classcv_1_1detail_1_1Blender.html#aa0a91ce0d6046d3a63e0123cbb1b5c00"""  # noqa
         result = None
         result_mask = None
         result, result_mask = self.blender.blend(result, result_mask)
index 03aa834..684fd3d 100644 (file)
@@ -6,7 +6,7 @@ from .stitching_error import StitchingError
 
 
 class CameraAdjuster:
-    """https://docs.opencv.org/master/d5/d56/classcv_1_1detail_1_1BundleAdjusterBase.html"""  # noqa
+    """https://docs.opencv.org/4.x/d5/d56/classcv_1_1detail_1_1BundleAdjusterBase.html"""  # noqa
 
     CAMERA_ADJUSTER_CHOICES = OrderedDict()
     CAMERA_ADJUSTER_CHOICES['ray'] = cv.detail_BundleAdjusterRay
index 6a9142d..97b821b 100644 (file)
@@ -4,7 +4,7 @@ import numpy as np
 
 
 class WaveCorrector:
-    """https://docs.opencv.org/master/d7/d74/group__stitching__rotation.html#ga83b24d4c3e93584986a56d9e43b9cf7f"""  # noqa
+    """https://docs.opencv.org/4.x/d7/d74/group__stitching__rotation.html#ga83b24d4c3e93584986a56d9e43b9cf7f"""  # noqa
     WAVE_CORRECT_CHOICES = OrderedDict()
     WAVE_CORRECT_CHOICES['horiz'] = cv.detail.WAVE_CORRECT_HORIZ
     WAVE_CORRECT_CHOICES['vert'] = cv.detail.WAVE_CORRECT_VERT
index 36e0292..f28fd83 100644 (file)
@@ -32,9 +32,9 @@ class ExposureErrorCompensator:
                 )
 
     def feed(self, *args):
-        """https://docs.opencv.org/master/d2/d37/classcv_1_1detail_1_1ExposureCompensator.html#ae6b0cc69a7bc53818ddea53eddb6bdba"""  # noqa
+        """https://docs.opencv.org/4.x/d2/d37/classcv_1_1detail_1_1ExposureCompensator.html#ae6b0cc69a7bc53818ddea53eddb6bdba"""  # noqa
         self.compensator.feed(*args)
 
     def apply(self, *args):
-        """https://docs.opencv.org/master/d2/d37/classcv_1_1detail_1_1ExposureCompensator.html#a473eaf1e585804c08d77c91e004f93aa"""  # noqa
+        """https://docs.opencv.org/4.x/d2/d37/classcv_1_1detail_1_1ExposureCompensator.html#a473eaf1e585804c08d77c91e004f93aa"""  # noqa
         return self.compensator.apply(*args)
index 8c1d384..f2c7183 100644 (file)
@@ -15,13 +15,13 @@ class FeatureMatcher:
                  **kwargs):
 
         if matcher_type == "affine":
-            """https://docs.opencv.org/master/d3/dda/classcv_1_1detail_1_1AffineBestOf2NearestMatcher.html"""  # noqa
+            """https://docs.opencv.org/4.x/d3/dda/classcv_1_1detail_1_1AffineBestOf2NearestMatcher.html"""  # noqa
             self.matcher = cv.detail_AffineBestOf2NearestMatcher(**kwargs)
         elif range_width == -1:
-            """https://docs.opencv.org/master/d4/d26/classcv_1_1detail_1_1BestOf2NearestMatcher.html"""  # noqa
+            """https://docs.opencv.org/4.x/d4/d26/classcv_1_1detail_1_1BestOf2NearestMatcher.html"""  # noqa
             self.matcher = cv.detail.BestOf2NearestMatcher_create(**kwargs)
         else:
-            """https://docs.opencv.org/master/d8/d72/classcv_1_1detail_1_1BestOf2NearestRangeMatcher.html"""  # noqa
+            """https://docs.opencv.org/4.x/d8/d72/classcv_1_1detail_1_1BestOf2NearestRangeMatcher.html"""  # noqa
             self.matcher = cv.detail.BestOf2NearestRangeMatcher_create(
                 range_width, **kwargs
                 )
index 675f266..edf1ed6 100644 (file)
@@ -6,7 +6,7 @@ from .blender import Blender
 
 
 class SeamFinder:
-    """https://docs.opencv.org/master/d7/d09/classcv_1_1detail_1_1SeamFinder.html"""  # noqa
+    """https://docs.opencv.org/4.x/d7/d09/classcv_1_1detail_1_1SeamFinder.html"""  # noqa
     SEAM_FINDER_CHOICES = OrderedDict()
     SEAM_FINDER_CHOICES['dp_color'] = cv.detail_DpSeamFinder('COLOR')
     SEAM_FINDER_CHOICES['dp_colorgrad'] = cv.detail_DpSeamFinder('COLOR_GRAD')
@@ -19,7 +19,7 @@ class SeamFinder:
         self.finder = SeamFinder.SEAM_FINDER_CHOICES[finder]
 
     def find(self, imgs, corners, masks):
-        """https://docs.opencv.org/master/d0/dd5/classcv_1_1detail_1_1DpSeamFinder.html#a7914624907986f7a94dd424209a8a609"""  # noqa
+        """https://docs.opencv.org/4.x/d0/dd5/classcv_1_1detail_1_1DpSeamFinder.html#a7914624907986f7a94dd424209a8a609"""  # noqa
         imgs_float = [img.astype(np.float32) for img in imgs]
         return self.finder.find(imgs_float, corners, masks)
 
index cecf3b8..236d360 100644 (file)
@@ -1,4 +1,4 @@
-https://github.com/opencv/opencv_extra/tree/master/testdata/stitching
+https://github.com/opencv/opencv_extra/tree/4.x/testdata/stitching
 
 s1.jpg s2.jpg
 boat1.jpg boat2.jpg boat3.jpg boat4.jpg boat5.jpg boat6.jpg
index b12210d..ef9d78f 100644 (file)
@@ -317,7 +317,7 @@ def main():
     sizes = []
     blender = None
     timelapser = None
-    # https://github.com/opencv/opencv/blob/master/samples/cpp/stitching_detailed.cpp#L725 ?
+    # https://github.com/opencv/opencv/blob/4.x/samples/cpp/stitching_detailed.cpp#L725 ?
     for idx, name in enumerate(img_names):
         full_img = cv.imread(name)
         if not is_compose_scale_set:
index 4085f47..894294b 100644 (file)
@@ -26,7 +26,7 @@ class Timelapser:
                 )
 
     def initialize(self, *args):
-        """https://docs.opencv.org/master/dd/dac/classcv_1_1detail_1_1Timelapser.html#aaf0f7c4128009f02473332a0c41f6345"""  # noqa
+        """https://docs.opencv.org/4.x/dd/dac/classcv_1_1detail_1_1Timelapser.html#aaf0f7c4128009f02473332a0c41f6345"""  # noqa
         self.timelapser.initialize(*args)
 
     def process_and_save_frame(self, img_name, img, corner):
index c31a864..e21521d 100644 (file)
@@ -55,7 +55,7 @@ class Warper:
 
     def update_scale(self, scale):
         if scale is not None and scale != self.scale:
-            self.warper = cv.PyRotationWarper(self.warper_type, scale)  # setScale not working: https://docs.opencv.org/master/d5/d76/classcv_1_1PyRotationWarper.html#a90b000bb75f95294f9b0b6ec9859eb55
+            self.warper = cv.PyRotationWarper(self.warper_type, scale)  # setScale not working: https://docs.opencv.org/4.x/d5/d76/classcv_1_1PyRotationWarper.html#a90b000bb75f95294f9b0b6ec9859eb55
             self.scale = scale
 
     @staticmethod
index 656f272..c345a9e 100644 (file)
@@ -116,7 +116,7 @@ swapRB = false;
 needSoftmax = false;
 
 // url for label file, can from local or Internet
-labelsUrl = "https://raw.githubusercontent.com/opencv/opencv/master/samples/data/dnn/classification_classes_ILSVRC2012.txt";
+labelsUrl = "https://raw.githubusercontent.com/opencv/opencv/4.x/samples/data/dnn/classification_classes_ILSVRC2012.txt";
 </script>
 
 <script id="codeSnippet1" type="text/code-snippet">
index 67553ec..73c907e 100644 (file)
@@ -6,7 +6,7 @@
             "std": "1",
             "swapRB": "false",
             "needSoftmax": "false",
-            "labelsUrl": "https://raw.githubusercontent.com/opencv/opencv/master/samples/data/dnn/classification_classes_ILSVRC2012.txt",
+            "labelsUrl": "https://raw.githubusercontent.com/opencv/opencv/4.x/samples/data/dnn/classification_classes_ILSVRC2012.txt",
             "modelUrl": "http://dl.caffe.berkeleyvision.org/bvlc_alexnet.caffemodel",
             "configUrl": "https://raw.githubusercontent.com/BVLC/caffe/master/models/bvlc_alexnet/deploy.prototxt"
         },
@@ -16,7 +16,7 @@
             "std": "0.007843",
             "swapRB": "false",
             "needSoftmax": "true",
-            "labelsUrl": "https://raw.githubusercontent.com/opencv/opencv/master/samples/data/dnn/classification_classes_ILSVRC2012.txt",
+            "labelsUrl": "https://raw.githubusercontent.com/opencv/opencv/4.x/samples/data/dnn/classification_classes_ILSVRC2012.txt",
             "modelUrl": "https://drive.google.com/open?id=0B7ubpZO7HnlCcHlfNmJkU2VPelE",
             "configUrl": "https://raw.githubusercontent.com/shicai/DenseNet-Caffe/master/DenseNet_121.prototxt"
         },
@@ -26,7 +26,7 @@
             "std": "1",
             "swapRB": "false",
             "needSoftmax": "false",
-            "labelsUrl": "https://raw.githubusercontent.com/opencv/opencv/master/samples/data/dnn/classification_classes_ILSVRC2012.txt",
+            "labelsUrl": "https://raw.githubusercontent.com/opencv/opencv/4.x/samples/data/dnn/classification_classes_ILSVRC2012.txt",
             "modelUrl": "http://dl.caffe.berkeleyvision.org/bvlc_googlenet.caffemodel",
             "configUrl": "https://raw.githubusercontent.com/BVLC/caffe/master/models/bvlc_googlenet/deploy.prototxt"
         },
@@ -36,7 +36,7 @@
             "std": "1",
             "swapRB": "false",
             "needSoftmax": "false",
-            "labelsUrl": "https://raw.githubusercontent.com/opencv/opencv/master/samples/data/dnn/classification_classes_ILSVRC2012.txt",
+            "labelsUrl": "https://raw.githubusercontent.com/opencv/opencv/4.x/samples/data/dnn/classification_classes_ILSVRC2012.txt",
             "modelUrl": "https://raw.githubusercontent.com/forresti/SqueezeNet/master/SqueezeNet_v1.0/squeezenet_v1.0.caffemodel",
             "configUrl": "https://raw.githubusercontent.com/forresti/SqueezeNet/master/SqueezeNet_v1.0/deploy.prototxt"
         },
@@ -46,7 +46,7 @@
             "std": "1",
             "swapRB": "false",
             "needSoftmax": "false",
-            "labelsUrl": "https://raw.githubusercontent.com/opencv/opencv/master/samples/data/dnn/classification_classes_ILSVRC2012.txt",
+            "labelsUrl": "https://raw.githubusercontent.com/opencv/opencv/4.x/samples/data/dnn/classification_classes_ILSVRC2012.txt",
             "modelUrl": "http://www.robots.ox.ac.uk/~vgg/software/very_deep/caffe/VGG_ILSVRC_19_layers.caffemodel",
             "configUrl": "https://gist.githubusercontent.com/ksimonyan/3785162f95cd2d5fee77/raw/f02f8769e64494bcd3d7e97d5d747ac275825721/VGG_ILSVRC_19_layers_deploy.prototxt"
         }
index a8910c3..f476a54 100644 (file)
@@ -117,7 +117,7 @@ swapRB = false;
 needSoftmax = false;
 
 // url for label file, can from local or Internet
-labelsUrl = "https://raw.githubusercontent.com/opencv/opencv/master/samples/data/dnn/classification_classes_ILSVRC2012.txt";
+labelsUrl = "https://raw.githubusercontent.com/opencv/opencv/4.x/samples/data/dnn/classification_classes_ILSVRC2012.txt";
 </script>
 
 <script id="codeSnippet1" type="text/code-snippet">
index 9a2473c..ef80213 100644 (file)
@@ -116,7 +116,7 @@ swapRB = false;
 needSoftmax = false;
 
 // url for label file, can from local or Internet
-labelsUrl = "https://raw.githubusercontent.com/opencv/opencv/master/samples/data/dnn/classification_classes_ILSVRC2012.txt";
+labelsUrl = "https://raw.githubusercontent.com/opencv/opencv/4.x/samples/data/dnn/classification_classes_ILSVRC2012.txt";
 </script>
 
 <script id="codeSnippet1" type="text/code-snippet">
index 53f1e48..a83fafe 100644 (file)
@@ -94,7 +94,7 @@ nmsThreshold = 0.4;
 outType = "SSD";
 
 // url for label file, can from local or Internet
-labelsUrl = "https://raw.githubusercontent.com/opencv/opencv/master/samples/data/dnn/object_detection_classes_pascal_voc.txt";
+labelsUrl = "https://raw.githubusercontent.com/opencv/opencv/4.x/samples/data/dnn/object_detection_classes_pascal_voc.txt";
 </script>
 
 <script id="codeSnippet1" type="text/code-snippet">
index c0d14be..8224f8f 100644 (file)
@@ -7,7 +7,7 @@
             "std": "0.007843",
             "swapRB": "false",
             "outType": "SSD",
-            "labelsUrl": "https://raw.githubusercontent.com/opencv/opencv/master/samples/data/dnn/object_detection_classes_pascal_voc.txt",
+            "labelsUrl": "https://raw.githubusercontent.com/opencv/opencv/4.x/samples/data/dnn/object_detection_classes_pascal_voc.txt",
             "modelUrl": "https://raw.githubusercontent.com/chuanqi305/MobileNet-SSD/master/mobilenet_iter_73000.caffemodel",
             "configUrl": "https://raw.githubusercontent.com/chuanqi305/MobileNet-SSD/master/deploy.prototxt"
         },
@@ -18,7 +18,7 @@
             "std": "1",
             "swapRB": "false",
             "outType": "SSD",
-            "labelsUrl": "https://raw.githubusercontent.com/opencv/opencv/master/samples/data/dnn/object_detection_classes_pascal_voc.txt",
+            "labelsUrl": "https://raw.githubusercontent.com/opencv/opencv/4.x/samples/data/dnn/object_detection_classes_pascal_voc.txt",
             "modelUrl": "https://drive.google.com/uc?id=0BzKzrI_SkD1_WVVTSmQxU0dVRzA&export=download",
             "configUrl": "https://drive.google.com/uc?id=0BzKzrI_SkD1_WVVTSmQxU0dVRzA&export=download"
         }
@@ -31,7 +31,7 @@
             "std": "0.00392",
             "swapRB": "false",
             "outType": "YOLO",
-            "labelsUrl": "https://raw.githubusercontent.com/opencv/opencv/master/samples/data/dnn/object_detection_classes_yolov3.txt",
+            "labelsUrl": "https://raw.githubusercontent.com/opencv/opencv/4.x/samples/data/dnn/object_detection_classes_yolov3.txt",
             "modelUrl": "https://pjreddie.com/media/files/yolov2-tiny.weights",
             "configUrl": "https://raw.githubusercontent.com/pjreddie/darknet/master/cfg/yolov2-tiny.cfg"
         }
index 41bb609..6889721 100644 (file)
@@ -94,7 +94,7 @@ nmsThreshold = 0.4;
 outType = "SSD";
 
 // url for label file, can from local or Internet
-labelsUrl = "https://raw.githubusercontent.com/opencv/opencv/master/samples/data/dnn/object_detection_classes_pascal_voc.txt";
+labelsUrl = "https://raw.githubusercontent.com/opencv/opencv/4.x/samples/data/dnn/object_detection_classes_pascal_voc.txt";
 </script>
 
 <script id="codeSnippet1" type="text/code-snippet">
index 0da4433..b7d3b83 100644 (file)
@@ -116,7 +116,7 @@ swapRB = false;
 needSoftmax = false;
 
 // url for label file, can from local or Internet
-labelsUrl = "https://raw.githubusercontent.com/opencv/opencv/master/samples/data/dnn/classification_classes_ILSVRC2012.txt";
+labelsUrl = "https://raw.githubusercontent.com/opencv/opencv/4.x/samples/data/dnn/classification_classes_ILSVRC2012.txt";
 </script>
 
 <script id="codeSnippet1" type="text/code-snippet">
index 9ff4a05..3615435 100644 (file)
@@ -333,7 +333,7 @@ function installDOM(){
 ### Execute it ###
 
 -   Save the file as `exampleNodeCanvasData.js`.
--   Make sure the files `aarcascade_frontalface_default.xml` and `haarcascade_eye.xml`\e are present in project's directory. They can be obtained from [OpenCV sources](https://github.com/opencv/opencv/tree/master/data/haarcascades).
+-   Make sure the files `aarcascade_frontalface_default.xml` and `haarcascade_eye.xml`\e are present in project's directory. They can be obtained from [OpenCV sources](https://github.com/opencv/opencv/tree/4.x/data/haarcascades).
 -   Make sure a sample image file `lena.jpg` exists in project's directory. It should display people's faces for this example to make sense. The following image is known to work:
 
 ![image](lena.jpg)
index 5a8c3b8..dbf4f1e 100644 (file)
@@ -4,7 +4,9 @@ Using OpenCV.js {#tutorial_js_usage}
 Steps
 -----
 
-In this tutorial, you will learn how to include and start to use `opencv.js` inside a web page. You can get a copy of `opencv.js` from `opencv-{VERSION_NUMBER}-docs.zip` in each [release](https://github.com/opencv/opencv/releases), or simply download the prebuilt script from the online documentations at "https://docs.opencv.org/{VERSION_NUMBER}/opencv.js" (For example, [https://docs.opencv.org/3.4.0/opencv.js](https://docs.opencv.org/3.4.0/opencv.js). Use `master` if you want the latest build). You can also build your own copy by following the tutorial on Build Opencv.js.
+In this tutorial, you will learn how to include and start to use `opencv.js` inside a web page.
+You can get a copy of `opencv.js` from `opencv-{VERSION_NUMBER}-docs.zip` in each [release](https://github.com/opencv/opencv/releases), or simply download the prebuilt script from the online documentations at "https://docs.opencv.org/{VERSION_NUMBER}/opencv.js" (For example, [https://docs.opencv.org/4.5.0/opencv.js](https://docs.opencv.org/4.5.0/opencv.js). Use `4.x` if you want the latest build).
+You can also build your own copy by following the tutorial @ref tutorial_js_setup.
 
 ### Create a web page
 
index fb59e98..2d169a3 100644 (file)
@@ -36,7 +36,7 @@ gives us a feature vector containing 64 values. This is the feature vector we us
 
 Finally, as in the previous case, we start by splitting our big dataset into individual cells. For
 every digit, 250 cells are reserved for training data and remaining 250 data is reserved for
-testing. Full code is given below, you also can download it from [here](https://github.com/opencv/opencv/tree/master/samples/python/tutorial_code/ml/py_svm_opencv/hogsvm.py):
+testing. Full code is given below, you also can download it from [here](https://github.com/opencv/opencv/tree/4.x/samples/python/tutorial_code/ml/py_svm_opencv/hogsvm.py):
 
 @include samples/python/tutorial_code/ml/py_svm_opencv/hogsvm.py
 
index 51f44ec..132074f 100644 (file)
@@ -83,5 +83,5 @@ there are two flags that should be used to set/get property of the needed genera
     flag value is assumed by default if neither of the two possible values of the property is set.
 
 For more information please refer to the example of usage
-[videocapture_realsense.cpp](https://github.com/opencv/opencv/tree/master/samples/cpp/videocapture_realsense.cpp)
+[videocapture_realsense.cpp](https://github.com/opencv/opencv/tree/4.x/samples/cpp/videocapture_realsense.cpp)
 in opencv/samples/cpp folder.
index e235a97..737b935 100644 (file)
@@ -140,5 +140,5 @@ property. The following properties of cameras available through OpenNI interface
     -   CAP_OPENNI_DEPTH_GENERATOR_REGISTRATION = CAP_OPENNI_DEPTH_GENERATOR + CAP_PROP_OPENNI_REGISTRATION
 
 For more information please refer to the example of usage
-[videocapture_openni.cpp](https://github.com/opencv/opencv/tree/master/samples/cpp/videocapture_openni.cpp) in
+[videocapture_openni.cpp](https://github.com/opencv/opencv/tree/4.x/samples/cpp/videocapture_openni.cpp) in
 opencv/samples/cpp folder.
index 273c3c3..b95053b 100644 (file)
@@ -165,5 +165,5 @@ but the depth data makes it easy.
 ![Depth frame](images/astra_depth.png)
 
 The complete implementation can be found in
-[orbbec_astra.cpp](https://github.com/opencv/opencv/tree/master/samples/cpp/tutorial_code/videoio/orbbec_astra/orbbec_astra.cpp)
+[orbbec_astra.cpp](https://github.com/opencv/opencv/tree/4.x/samples/cpp/tutorial_code/videoio/orbbec_astra/orbbec_astra.cpp)
 in `samples/cpp/tutorial_code/videoio` directory.
index 2b88114..0f886d1 100644 (file)
@@ -37,19 +37,19 @@ Let's modify the program made in the tutorial @ref tutorial_adding_images. We wi
 
 @add_toggle_cpp
 This tutorial code's is shown lines below. You can also download it from
-[here](https://github.com/opencv/opencv/tree/master/samples/cpp/tutorial_code/HighGUI/AddingImagesTrackbar.cpp)
+[here](https://github.com/opencv/opencv/tree/4.x/samples/cpp/tutorial_code/HighGUI/AddingImagesTrackbar.cpp)
 @include cpp/tutorial_code/HighGUI/AddingImagesTrackbar.cpp
 @end_toggle
 
 @add_toggle_java
 This tutorial code's is shown lines below. You can also download it from
-[here](https://github.com/opencv/opencv/tree/master/samples/java/tutorial_code/highgui/trackbar/AddingImagesTrackbar.java)
+[here](https://github.com/opencv/opencv/tree/4.x/samples/java/tutorial_code/highgui/trackbar/AddingImagesTrackbar.java)
 @include java/tutorial_code/highgui/trackbar/AddingImagesTrackbar.java
 @end_toggle
 
 @add_toggle_python
 This tutorial code's is shown lines below. You can also download it from
-[here](https://github.com/opencv/opencv/tree/master/samples/python/tutorial_code/highgui/trackbar/AddingImagesTrackbar.py)
+[here](https://github.com/opencv/opencv/tree/4.x/samples/python/tutorial_code/highgui/trackbar/AddingImagesTrackbar.py)
 @include python/tutorial_code/highgui/trackbar/AddingImagesTrackbar.py
 @end_toggle
 
index 1d53c70..0c068e3 100644 (file)
@@ -30,8 +30,8 @@ As a test case where to show off these using OpenCV I've created a small program
 video files and performs a similarity check between them. This is something you could use to check
 just how well a new video compressing algorithms works. Let there be a reference (original) video
 like [this small Megamind clip
-](https://github.com/opencv/opencv/tree/master/samples/data/Megamind.avi) and [a compressed
-version of it ](https://github.com/opencv/opencv/tree/master/samples/data/Megamind_bugy.avi).
+](https://github.com/opencv/opencv/tree/4.x/samples/data/Megamind.avi) and [a compressed
+version of it ](https://github.com/opencv/opencv/tree/4.x/samples/data/Megamind_bugy.avi).
 You may also find the source code and these video file in the
 `samples/data` folder of the OpenCV source library.
 
index d655e24..a9cbdba 100644 (file)
@@ -41,7 +41,7 @@ The source code
 
 You may also find the source code and these video file in the
 `samples/cpp/tutorial_code/videoio/video-write/` folder of the OpenCV source library or [download it
-from here ](https://github.com/opencv/opencv/tree/master/samples/cpp/tutorial_code/videoio/video-write/video-write.cpp).
+from here ](https://github.com/opencv/opencv/tree/4.x/samples/cpp/tutorial_code/videoio/video-write/video-write.cpp).
 
 @include cpp/tutorial_code/videoio/video-write/video-write.cpp
 
index 00e1e96..82ad160 100644 (file)
@@ -88,13 +88,13 @@ Source code
 
 You may also find the source code in the `samples/cpp/tutorial_code/calib3d/camera_calibration/`
 folder of the OpenCV source library or [download it from here
-](https://github.com/opencv/opencv/tree/master/samples/cpp/tutorial_code/calib3d/camera_calibration/camera_calibration.cpp). For the usage of the program, run it with `-h` argument. The program has an
+](https://github.com/opencv/opencv/tree/4.x/samples/cpp/tutorial_code/calib3d/camera_calibration/camera_calibration.cpp). For the usage of the program, run it with `-h` argument. The program has an
 essential argument: the name of its configuration file. If none is given then it will try to open the
 one named "default.xml". [Here's a sample configuration file
-](https://github.com/opencv/opencv/tree/master/samples/cpp/tutorial_code/calib3d/camera_calibration/in_VID5.xml) in XML format. In the
+](https://github.com/opencv/opencv/tree/4.x/samples/cpp/tutorial_code/calib3d/camera_calibration/in_VID5.xml) in XML format. In the
 configuration file you may choose to use camera as an input, a video file or an image list. If you
 opt for the last one, you will need to create a configuration file where you enumerate the images to
-use. Here's [an example of this ](https://github.com/opencv/opencv/tree/master/samples/cpp/tutorial_code/calib3d/camera_calibration/VID5.xml).
+use. Here's [an example of this ](https://github.com/opencv/opencv/tree/4.x/samples/cpp/tutorial_code/calib3d/camera_calibration/VID5.xml).
 The important part to remember is that the images need to be specified using the absolute path or
 the relative one from your application's working directory. You may find all this in the samples
 directory mentioned above.
index b13f097..8f4c184 100644 (file)
@@ -13,14 +13,14 @@ Create calibration pattern {#tutorial_camera_calibration_pattern}
 
 The goal of this tutorial is to learn how to create calibration pattern.
 
-You can find a chessboard pattern in https://github.com/opencv/opencv/blob/master/doc/pattern.png
+You can find a chessboard pattern in https://github.com/opencv/opencv/blob/4.x/doc/pattern.png
 
-You can find a circleboard pattern in https://github.com/opencv/opencv/blob/master/doc/acircles_pattern.png
+You can find a circleboard pattern in https://github.com/opencv/opencv/blob/4.x/doc/acircles_pattern.png
 
 Create your own pattern
 ---------------
 
-Now, if you want to create your own pattern, you will need python to use https://github.com/opencv/opencv/blob/master/doc/pattern_tools/gen_pattern.py
+Now, if you want to create your own pattern, you will need python to use https://github.com/opencv/opencv/blob/4.x/doc/pattern_tools/gen_pattern.py
 
 Example
 
index 3c4f0b0..af19fb2 100644 (file)
@@ -102,7 +102,7 @@ QR faster than SVD, but potentially less precise
 -  *camera_resolution*: resolution of camera which is used for calibration
 
 **Note:** *charuco_dict*, *charuco_square_length* and *charuco_marker_size* are used for chAruco pattern generation
-(see Aruco module description for details: [Aruco tutorials](https://github.com/opencv/opencv_contrib/tree/master/modules/aruco/tutorials))
+(see Aruco module description for details: [Aruco tutorials](https://github.com/opencv/opencv_contrib/tree/4.x/modules/aruco/tutorials))
 
 Default chAruco pattern:
 
index 3cec9f1..7f3de47 100644 (file)
@@ -41,19 +41,19 @@ Source Code
 
 @add_toggle_cpp
 Download the source code from
-[here](https://raw.githubusercontent.com/opencv/opencv/master/samples/cpp/tutorial_code/core/AddingImages/AddingImages.cpp).
+[here](https://raw.githubusercontent.com/opencv/opencv/4.x/samples/cpp/tutorial_code/core/AddingImages/AddingImages.cpp).
 @include cpp/tutorial_code/core/AddingImages/AddingImages.cpp
 @end_toggle
 
 @add_toggle_java
 Download the source code from
-[here](https://raw.githubusercontent.com/opencv/opencv/master/samples/java/tutorial_code/core/AddingImages/AddingImages.java).
+[here](https://raw.githubusercontent.com/opencv/opencv/4.x/samples/java/tutorial_code/core/AddingImages/AddingImages.java).
 @include java/tutorial_code/core/AddingImages/AddingImages.java
 @end_toggle
 
 @add_toggle_python
 Download the source code from
-[here](https://raw.githubusercontent.com/opencv/opencv/master/samples/python/tutorial_code/core/AddingImages/adding_images.py).
+[here](https://raw.githubusercontent.com/opencv/opencv/4.x/samples/python/tutorial_code/core/AddingImages/adding_images.py).
 @include python/tutorial_code/core/AddingImages/adding_images.py
 @end_toggle
 
@@ -77,7 +77,7 @@ We need two source images (\f$f_{0}(x)\f$ and \f$f_{1}(x)\f$). So, we load them
 @snippet python/tutorial_code/core/AddingImages/adding_images.py load
 @end_toggle
 
-We used the following images: [LinuxLogo.jpg](https://raw.githubusercontent.com/opencv/opencv/master/samples/data/LinuxLogo.jpg) and [WindowsLogo.jpg](https://raw.githubusercontent.com/opencv/opencv/master/samples/data/WindowsLogo.jpg)
+We used the following images: [LinuxLogo.jpg](https://raw.githubusercontent.com/opencv/opencv/4.x/samples/data/LinuxLogo.jpg) and [WindowsLogo.jpg](https://raw.githubusercontent.com/opencv/opencv/4.x/samples/data/WindowsLogo.jpg)
 
 @warning Since we are *adding* *src1* and *src2*, they both have to be of the same size
 (width and height) and type.
index 75bd655..0f569a4 100644 (file)
@@ -65,7 +65,7 @@ Code
 
 @add_toggle_cpp
 -   **Downloadable code**: Click
-    [here](https://github.com/opencv/opencv/tree/master/samples/cpp/tutorial_code/ImgProc/BasicLinearTransforms.cpp)
+    [here](https://github.com/opencv/opencv/tree/4.x/samples/cpp/tutorial_code/ImgProc/BasicLinearTransforms.cpp)
 
 -   The following code performs the operation \f$g(i,j) = \alpha \cdot f(i,j) + \beta\f$ :
     @include samples/cpp/tutorial_code/ImgProc/BasicLinearTransforms.cpp
@@ -73,7 +73,7 @@ Code
 
 @add_toggle_java
 -   **Downloadable code**: Click
-    [here](https://github.com/opencv/opencv/tree/master/samples/java/tutorial_code/ImgProc/changing_contrast_brightness_image/BasicLinearTransformsDemo.java)
+    [here](https://github.com/opencv/opencv/tree/4.x/samples/java/tutorial_code/ImgProc/changing_contrast_brightness_image/BasicLinearTransformsDemo.java)
 
 -   The following code performs the operation \f$g(i,j) = \alpha \cdot f(i,j) + \beta\f$ :
     @include samples/java/tutorial_code/ImgProc/changing_contrast_brightness_image/BasicLinearTransformsDemo.java
@@ -81,7 +81,7 @@ Code
 
 @add_toggle_python
 -   **Downloadable code**: Click
-    [here](https://github.com/opencv/opencv/tree/master/samples/python/tutorial_code/imgProc/changing_contrast_brightness_image/BasicLinearTransforms.py)
+    [here](https://github.com/opencv/opencv/tree/4.x/samples/python/tutorial_code/imgProc/changing_contrast_brightness_image/BasicLinearTransforms.py)
 
 -   The following code performs the operation \f$g(i,j) = \alpha \cdot f(i,j) + \beta\f$ :
     @include samples/python/tutorial_code/imgProc/changing_contrast_brightness_image/BasicLinearTransforms.py
@@ -291,15 +291,15 @@ and are not intended to be used as a replacement of a raster graphics editor!**
 ### Code
 
 @add_toggle_cpp
-Code for the tutorial is [here](https://github.com/opencv/opencv/blob/master/samples/cpp/tutorial_code/ImgProc/changing_contrast_brightness_image/changing_contrast_brightness_image.cpp).
+Code for the tutorial is [here](https://github.com/opencv/opencv/blob/4.x/samples/cpp/tutorial_code/ImgProc/changing_contrast_brightness_image/changing_contrast_brightness_image.cpp).
 @end_toggle
 
 @add_toggle_java
-Code for the tutorial is [here](https://github.com/opencv/opencv/blob/master/samples/java/tutorial_code/ImgProc/changing_contrast_brightness_image/ChangingContrastBrightnessImageDemo.java).
+Code for the tutorial is [here](https://github.com/opencv/opencv/blob/4.x/samples/java/tutorial_code/ImgProc/changing_contrast_brightness_image/ChangingContrastBrightnessImageDemo.java).
 @end_toggle
 
 @add_toggle_python
-Code for the tutorial is [here](https://github.com/opencv/opencv/blob/master/samples/python/tutorial_code/imgProc/changing_contrast_brightness_image/changing_contrast_brightness_image.py).
+Code for the tutorial is [here](https://github.com/opencv/opencv/blob/4.x/samples/python/tutorial_code/imgProc/changing_contrast_brightness_image/changing_contrast_brightness_image.py).
 @end_toggle
 
 Code for the gamma correction:
index 1701bab..9c11ec3 100644 (file)
@@ -26,7 +26,7 @@ Source code
 
 @add_toggle_cpp
 You can [download this from here
-](https://raw.githubusercontent.com/opencv/opencv/master/samples/cpp/tutorial_code/core/discrete_fourier_transform/discrete_fourier_transform.cpp) or
+](https://raw.githubusercontent.com/opencv/opencv/4.x/samples/cpp/tutorial_code/core/discrete_fourier_transform/discrete_fourier_transform.cpp) or
 find it in the
 `samples/cpp/tutorial_code/core/discrete_fourier_transform/discrete_fourier_transform.cpp` of the
 OpenCV source code library.
@@ -34,7 +34,7 @@ OpenCV source code library.
 
 @add_toggle_java
 You can [download this from here
-](https://raw.githubusercontent.com/opencv/opencv/master/samples/java/tutorial_code/core/discrete_fourier_transform/DiscreteFourierTransform.java) or
+](https://raw.githubusercontent.com/opencv/opencv/4.x/samples/java/tutorial_code/core/discrete_fourier_transform/DiscreteFourierTransform.java) or
 find it in the
 `samples/java/tutorial_code/core/discrete_fourier_transform/DiscreteFourierTransform.java` of the
 OpenCV source code library.
@@ -42,7 +42,7 @@ OpenCV source code library.
 
 @add_toggle_python
 You can [download this from here
-](https://raw.githubusercontent.com/opencv/opencv/master/samples/python/tutorial_code/core/discrete_fourier_transform/discrete_fourier_transform.py) or
+](https://raw.githubusercontent.com/opencv/opencv/4.x/samples/python/tutorial_code/core/discrete_fourier_transform/discrete_fourier_transform.py) or
 find it in the
 `samples/python/tutorial_code/core/discrete_fourier_transform/discrete_fourier_transform.py` of the
 OpenCV source code library.
@@ -229,7 +229,7 @@ An application idea would be to determine the geometrical orientation present in
 example, let us find out if a text is horizontal or not? Looking at some text you'll notice that the
 text lines sort of form also horizontal lines and the letters form sort of vertical lines. These two
 main components of a text snippet may be also seen in case of the Fourier transform. Let us use
-[this horizontal ](https://raw.githubusercontent.com/opencv/opencv/master/samples/data/imageTextN.png) and [this rotated](https://raw.githubusercontent.com/opencv/opencv/master/samples/data/imageTextR.png)
+[this horizontal ](https://raw.githubusercontent.com/opencv/opencv/4.x/samples/data/imageTextN.png) and [this rotated](https://raw.githubusercontent.com/opencv/opencv/4.x/samples/data/imageTextR.png)
 image about a text.
 
 In case of the horizontal text:
index 46838dd..5aa2c6b 100644 (file)
@@ -26,7 +26,7 @@ Source code
 -----------
 @add_toggle_cpp
 You can [download this from here
-](https://github.com/opencv/opencv/tree/master/samples/cpp/tutorial_code/core/file_input_output/file_input_output.cpp) or find it in the
+](https://github.com/opencv/opencv/tree/4.x/samples/cpp/tutorial_code/core/file_input_output/file_input_output.cpp) or find it in the
 `samples/cpp/tutorial_code/core/file_input_output/file_input_output.cpp` of the OpenCV source code
 library.
 
@@ -37,7 +37,7 @@ Here's a sample code of how to achieve all the stuff enumerated at the goal list
 
 @add_toggle_python
 You can [download this from here
-](https://github.com/opencv/opencv/tree/master/samples/python/tutorial_code/core/file_input_output/file_input_output.py) or find it in the
+](https://github.com/opencv/opencv/tree/4.x/samples/python/tutorial_code/core/file_input_output/file_input_output.py) or find it in the
 `samples/python/tutorial_code/core/file_input_output/file_input_output.py` of the OpenCV source code
 library.
 
index d19936e..c1b886f 100644 (file)
@@ -61,7 +61,7 @@ three major ways of going through an image pixel by pixel. To make things a litt
 we'll make the scanning of the image using each of these methods, and print out how long it took.
 
 You can download the full source code [here
-](https://github.com/opencv/opencv/tree/master/samples/cpp/tutorial_code/core/how_to_scan_images/how_to_scan_images.cpp) or look it up in
+](https://github.com/opencv/opencv/tree/4.x/samples/cpp/tutorial_code/core/how_to_scan_images/how_to_scan_images.cpp) or look it up in
 the samples directory of OpenCV at the cpp tutorial code for the core section. Its basic usage is:
 @code{.bash}
 how_to_scan_images imageName.jpg intValueToReduce [G]
index 92f73b7..4c68efe 100644 (file)
@@ -15,7 +15,7 @@ Goal
 The goal of this tutorial is to show you how to use the OpenCV `parallel_for_` framework to easily
 parallelize your code. To illustrate the concept, we will write a program to draw a Mandelbrot set
 exploiting almost all the CPU load available.
-The full tutorial code is [here](https://github.com/opencv/opencv/blob/master/samples/cpp/tutorial_code/core/how_to_use_OpenCV_parallel_for_/how_to_use_OpenCV_parallel_for_.cpp).
+The full tutorial code is [here](https://github.com/opencv/opencv/blob/4.x/samples/cpp/tutorial_code/core/how_to_use_OpenCV_parallel_for_/how_to_use_OpenCV_parallel_for_.cpp).
 If you want more information about multithreading, you will have to refer to a reference book or course as this tutorial is intended
 to remain simple.
 
@@ -183,7 +183,7 @@ C++ 11 standard allows to simplify the parallel implementation by get rid of the
 Results
 -----------
 
-You can find the full tutorial code [here](https://github.com/opencv/opencv/blob/master/samples/cpp/tutorial_code/core/how_to_use_OpenCV_parallel_for_/how_to_use_OpenCV_parallel_for_.cpp).
+You can find the full tutorial code [here](https://github.com/opencv/opencv/blob/4.x/samples/cpp/tutorial_code/core/how_to_use_OpenCV_parallel_for_/how_to_use_OpenCV_parallel_for_.cpp).
 The performance of the parallel implementation depends of the type of CPU you have. For instance, on 4 cores / 8 threads
 CPU, you can expect a speed-up of around 6.9X. There are many factors to explain why we do not achieve a speed-up of almost 8X.
 Main reasons should be mostly due to:
index 43c71d7..8958155 100644 (file)
@@ -40,7 +40,7 @@ Code
 
 @add_toggle_cpp
 You can download this source code from [here
-](https://raw.githubusercontent.com/opencv/opencv/master/samples/cpp/tutorial_code/core/mat_mask_operations/mat_mask_operations.cpp) or look in the
+](https://raw.githubusercontent.com/opencv/opencv/4.x/samples/cpp/tutorial_code/core/mat_mask_operations/mat_mask_operations.cpp) or look in the
 OpenCV source code libraries sample directory at
 `samples/cpp/tutorial_code/core/mat_mask_operations/mat_mask_operations.cpp`.
 @include samples/cpp/tutorial_code/core/mat_mask_operations/mat_mask_operations.cpp
@@ -48,7 +48,7 @@ OpenCV source code libraries sample directory at
 
 @add_toggle_java
 You can download this source code from [here
-](https://raw.githubusercontent.com/opencv/opencv/master/samples/java/tutorial_code/core/mat_mask_operations/MatMaskOperations.java) or look in the
+](https://raw.githubusercontent.com/opencv/opencv/4.x/samples/java/tutorial_code/core/mat_mask_operations/MatMaskOperations.java) or look in the
 OpenCV source code libraries sample directory at
 `samples/java/tutorial_code/core/mat_mask_operations/MatMaskOperations.java`.
 @include samples/java/tutorial_code/core/mat_mask_operations/MatMaskOperations.java
@@ -56,7 +56,7 @@ OpenCV source code libraries sample directory at
 
 @add_toggle_python
 You can download this source code from [here
-](https://raw.githubusercontent.com/opencv/opencv/master/samples/python/tutorial_code/core/mat_mask_operations/mat_mask_operations.py) or look in the
+](https://raw.githubusercontent.com/opencv/opencv/4.x/samples/python/tutorial_code/core/mat_mask_operations/mat_mask_operations.py) or look in the
 OpenCV source code libraries sample directory at
 `samples/python/tutorial_code/core/mat_mask_operations/mat_mask_operations.py`.
 @include samples/python/tutorial_code/core/mat_mask_operations/mat_mask_operations.py
index c53296b..e972521 100644 (file)
@@ -269,7 +269,7 @@ OpenCV offers support for output of other common OpenCV data structures too via
     ![](images/MatBasicContainerOut15.png)
 
 Most of the samples here have been included in a small console application. You can download it from
-[here](https://github.com/opencv/opencv/tree/master/samples/cpp/tutorial_code/core/mat_the_basic_image_container/mat_the_basic_image_container.cpp)
+[here](https://github.com/opencv/opencv/tree/4.x/samples/cpp/tutorial_code/core/mat_the_basic_image_container/mat_the_basic_image_container.cpp)
 or in the core section of the cpp samples.
 
 You can also find a quick video demonstration of this on
index 7e6a8a7..894a744 100644 (file)
@@ -245,7 +245,7 @@ In the following section, we will vectorize a simple convolution function for si
 
 You may learn more about convolution from the previous tutorial. We use the same naive implementation from the previous tutorial and compare it to the vectorized version.
 
-The full tutorial code is [here](https://github.com/opencv/opencv/tree/master/samples/cpp/tutorial_code/univ_intrin/univ_intrin.cpp).
+The full tutorial code is [here](https://github.com/opencv/opencv/tree/4.x/samples/cpp/tutorial_code/univ_intrin/univ_intrin.cpp).
 
 ### Vectorizing Convolution
 
index 48a5599..f7bdd95 100644 (file)
@@ -23,7 +23,7 @@ In this tutorial, we first introduce how to obtain the custom OCR model, then ho
 After completing the model training, please use [transform_to_onnx.py](https://github.com/zihaomu/deep-text-recognition-benchmark/blob/master/transform_to_onnx.py) to convert the model into onnx format.
 
 #### Execute in webcam
-The Python version example code can be found at [here](https://github.com/opencv/opencv/blob/master/samples/dnn/text_detection.py).
+The Python version example code can be found at [here](https://github.com/opencv/opencv/blob/4.x/samples/dnn/text_detection.py).
 
 Example:
 @code{.bash}
index 07c3fb4..0b129cf 100644 (file)
@@ -226,7 +226,7 @@ a centric one.
 @snippet dnn/edge_detection.py Register
 
 That's it! We've replaced an implemented OpenCV's layer to a custom one.
-You may find a full script in the [source code](https://github.com/opencv/opencv/tree/master/samples/dnn/edge_detection.py).
+You may find a full script in the [source code](https://github.com/opencv/opencv/tree/4.x/samples/dnn/edge_detection.py).
 
 <table border="0">
 <tr>
index f55cdb7..d85eb3c 100644 (file)
@@ -38,7 +38,7 @@ There are two models (ONNX format) pre-trained and required for this module:
 
 @add_toggle_cpp
 -   **Downloadable code**: Click
-    [here](https://github.com/opencv/opencv/tree/master/samples/dnn/face_detect.cpp)
+    [here](https://github.com/opencv/opencv/tree/4.x/samples/dnn/face_detect.cpp)
 
 -   **Code at glance:**
     @include samples/dnn/face_detect.cpp
@@ -46,7 +46,7 @@ There are two models (ONNX format) pre-trained and required for this module:
 
 @add_toggle_python
 -   **Downloadable code**: Click
-    [here](https://github.com/opencv/opencv/tree/master/samples/dnn/face_detect.py)
+    [here](https://github.com/opencv/opencv/tree/4.x/samples/dnn/face_detect.py)
 
 -   **Code at glance:**
     @include samples/dnn/face_detect.py
index a886e9e..972842b 100644 (file)
@@ -22,7 +22,7 @@ We will demonstrate results of this example on the following picture.
 Source Code
 -----------
 
-We will be using snippets from the example application, that can be downloaded [here](https://github.com/opencv/opencv/blob/master/samples/dnn/classification.cpp).
+We will be using snippets from the example application, that can be downloaded [here](https://github.com/opencv/opencv/blob/4.x/samples/dnn/classification.cpp).
 
 @include dnn/classification.cpp
 
@@ -30,11 +30,11 @@ Explanation
 -----------
 
 -# Firstly, download GoogLeNet model files:
-   [bvlc_googlenet.prototxt  ](https://github.com/opencv/opencv_extra/blob/master/testdata/dnn/bvlc_googlenet.prototxt) and
+   [bvlc_googlenet.prototxt  ](https://github.com/opencv/opencv_extra/blob/4.x/testdata/dnn/bvlc_googlenet.prototxt) and
    [bvlc_googlenet.caffemodel](http://dl.caffe.berkeleyvision.org/bvlc_googlenet.caffemodel)
 
    Also you need file with names of [ILSVRC2012](http://image-net.org/challenges/LSVRC/2012/browse-synsets) classes:
-   [classification_classes_ILSVRC2012.txt](https://github.com/opencv/opencv/blob/master/samples/data/dnn/classification_classes_ILSVRC2012.txt).
+   [classification_classes_ILSVRC2012.txt](https://github.com/opencv/opencv/blob/4.x/samples/data/dnn/classification_classes_ILSVRC2012.txt).
 
    Put these files into working dir of this program example.
 
index e4a6f1f..3832461 100644 (file)
@@ -45,7 +45,7 @@ correspondingly. In example, for variable `x` in range `[0, 10)` directive
 `split: { x: 2 }` gives new ones `xo` in range `[0, 5)` and `xi` in range `[0, 2)`.
 Variable name `x` is no longer available in the same scheduling node.
 
-You can find scheduling examples at [opencv_extra/testdata/dnn](https://github.com/opencv/opencv_extra/tree/master/testdata/dnn)
+You can find scheduling examples at [opencv_extra/testdata/dnn](https://github.com/opencv/opencv_extra/tree/4.x/testdata/dnn)
 and use it for schedule your networks.
 
 ## Layers fusing
index 1807caf..b3b6c2f 100644 (file)
@@ -30,7 +30,7 @@ source <env_dir_path>/bin/activate
 
 For OpenCV-Python building from source, follow the corresponding instructions from the @ref tutorial_py_table_of_contents_setup.
 
-Before you start the installation of the libraries, you can customize the [requirements.txt](https://github.com/opencv/opencv/tree/master/samples/dnn/dnn_model_runner/dnn_conversion/requirements.txt), excluding or including (for example, ``opencv-python``) some dependencies.
+Before you start the installation of the libraries, you can customize the [requirements.txt](https://github.com/opencv/opencv/tree/4.x/samples/dnn/dnn_model_runner/dnn_conversion/requirements.txt), excluding or including (for example, ``opencv-python``) some dependencies.
 The below line initiates requirements installation into the previously activated virtual environment:
 
 ```console
@@ -140,7 +140,7 @@ class CommonConfig:
 Thus, the converted ResNet-50 will be saved in ``dnn_model_runner/dnn_conversion/models``.
 
 ### Inference Pipeline
-Now we can use ```models/resnet50.onnx``` for the inference pipeline using OpenCV C/C++ API. The implemented pipeline can be found in [samples/dnn/classification.cpp](https://github.com/opencv/opencv/blob/master/samples/dnn/classification.cpp).
+Now we can use ```models/resnet50.onnx``` for the inference pipeline using OpenCV C/C++ API. The implemented pipeline can be found in [samples/dnn/classification.cpp](https://github.com/opencv/opencv/blob/4.x/samples/dnn/classification.cpp).
 After the build of samples (``BUILD_EXAMPLES`` flag value should be ``ON``), the appropriate ``example_dnn_classification`` executable file will be provided.
 
 To provide model inference we will use the below [squirrel photo](https://www.pexels.com/photo/brown-squirrel-eating-1564292) (under [CC0](https://www.pexels.com/terms-of-service/) license) corresponding to ImageNet class ID 335:
index 409d2f5..34ae25b 100644 (file)
@@ -31,7 +31,7 @@ source <env_dir_path>/bin/activate
 
 For OpenCV-Python building from source, follow the corresponding instructions from the @ref tutorial_py_table_of_contents_setup.
 
-Before you start the installation of the libraries, you can customize the [requirements.txt](https://github.com/opencv/opencv/tree/master/samples/dnn/dnn_model_runner/dnn_conversion/requirements.txt), excluding or including (for example, ``opencv-python``) some dependencies.
+Before you start the installation of the libraries, you can customize the [requirements.txt](https://github.com/opencv/opencv/tree/4.x/samples/dnn/dnn_model_runner/dnn_conversion/requirements.txt), excluding or including (for example, ``opencv-python``) some dependencies.
 The below line initiates requirements installation into the previously activated virtual environment:
 
 ```console
@@ -270,7 +270,7 @@ python -m dnn_model_runner.dnn_conversion.pytorch.classification.py_to_py_cls --
 
 Chosen from the list classification model will be read into OpenCV cv.dnn.Net object. Evaluation results of PyTorch and OpenCV models (accuracy, inference time, L1) will be written into the log file. Inference time values will be also depicted in a chart to generalize the obtained model information.
 
-Necessary evaluation configurations are defined in the [test_config.py](https://github.com/opencv/opencv/tree/master/samples/dnn/dnn_model_runner/dnn_conversion/common/test/configs/test_config.py) and can be modified in accordance with actual paths of data location:
+Necessary evaluation configurations are defined in the [test_config.py](https://github.com/opencv/opencv/tree/4.x/samples/dnn/dnn_model_runner/dnn_conversion/common/test/configs/test_config.py) and can be modified in accordance with actual paths of data location:
 
 ```python
 @dataclass
@@ -308,7 +308,7 @@ python -m dnn_model_runner.dnn_conversion.pytorch.classification.py_to_py_cls --
 
 Here ``default_img_preprocess`` key defines whether you'd like to parametrize the model test process with some particular values or use the default values, for example, ``scale``, ``mean`` or ``std``.
 
-Test configuration is represented in [test_config.py](https://github.com/opencv/opencv/tree/master/samples/dnn/dnn_model_runner/dnn_conversion/common/test/configs/test_config.py) ``TestClsModuleConfig`` class:
+Test configuration is represented in [test_config.py](https://github.com/opencv/opencv/tree/4.x/samples/dnn/dnn_model_runner/dnn_conversion/common/test/configs/test_config.py) ``TestClsModuleConfig`` class:
 
 ```python
 @dataclass
@@ -331,7 +331,7 @@ class TestClsModuleConfig:
     classes: str = os.path.join(cls_test_data_dir, "dnn", "classification_classes_ILSVRC2012.txt")
 ```
 
-The default image preprocessing options are defined in [default_preprocess_config.py](https://github.com/opencv/opencv/tree/master/samples/dnn/dnn_model_runner/dnn_conversion/common/test/configs/default_preprocess_config.py). For instance:
+The default image preprocessing options are defined in [default_preprocess_config.py](https://github.com/opencv/opencv/tree/4.x/samples/dnn/dnn_model_runner/dnn_conversion/common/test/configs/default_preprocess_config.py). For instance:
 
 ```python
 BASE_IMG_SCALE_FACTOR = 1 / 255.0
@@ -349,7 +349,7 @@ pytorch_resize_input_blob = {
 }
 ```
 
-The basis of the model testing is represented in [samples/dnn/classification.py](https://github.com/opencv/opencv/blob/master/samples/dnn/classification.py).  ``classification.py`` can be executed autonomously with provided converted model in ``--input`` and populated parameters for cv.dnn.blobFromImage.
+The basis of the model testing is represented in [samples/dnn/classification.py](https://github.com/opencv/opencv/blob/4.x/samples/dnn/classification.py).  ``classification.py`` can be executed autonomously with provided converted model in ``--input`` and populated parameters for cv.dnn.blobFromImage.
 
 To reproduce from scratch the described in "Model Conversion Pipeline" OpenCV steps with ``dnn_model_runner`` execute the below line:
 
index c2da541..032a276 100644 (file)
@@ -28,7 +28,7 @@ source <env_dir_path>/bin/activate
 
 For OpenCV-Python building from source, follow the corresponding instructions from the @ref tutorial_py_table_of_contents_setup.
 
-Before you start the installation of the libraries, you can customize the [requirements.txt](https://github.com/opencv/opencv/tree/master/samples/dnn/dnn_model_runner/dnn_conversion/requirements.txt), excluding or including (for example, ``opencv-python``) some dependencies.
+Before you start the installation of the libraries, you can customize the [requirements.txt](https://github.com/opencv/opencv/tree/4.x/samples/dnn/dnn_model_runner/dnn_conversion/requirements.txt), excluding or including (for example, ``opencv-python``) some dependencies.
 The below line initiates requirements installation into the previously activated virtual environment:
 
 ```console
@@ -273,7 +273,7 @@ python -m dnn_model_runner.dnn_conversion.tf.classification.py_to_py_cls --model
 
 Chosen from the list classification model will be read into OpenCV ``cv.dnn_Net`` object. Evaluation results of TF and OpenCV models (accuracy, inference time, L1) will be written into the log file. Inference time values will be also depicted in a chart to generalize the obtained model information.
 
-Necessary evaluation configurations are defined in the [test_config.py](https://github.com/opencv/opencv/tree/master/samples/dnn/dnn_model_runner/dnn_conversion/common/test/configs/test_config.py) and can be modified in accordance with actual paths of data location::
+Necessary evaluation configurations are defined in the [test_config.py](https://github.com/opencv/opencv/tree/4.x/samples/dnn/dnn_model_runner/dnn_conversion/common/test/configs/test_config.py) and can be modified in accordance with actual paths of data location::
 
 ```python
 @dataclass
@@ -312,7 +312,7 @@ python -m dnn_model_runner.dnn_conversion.tf.classification.py_to_py_cls --model
 
 Here ``default_img_preprocess`` key defines whether you'd like to parametrize the model test process with some particular values or use the default values, for example, ``scale``, ``mean`` or ``std``.
 
-Test configuration is represented in [test_config.py](https://github.com/opencv/opencv/tree/master/samples/dnn/dnn_model_runner/dnn_conversion/common/test/configs/test_config.py) ``TestClsModuleConfig`` class:
+Test configuration is represented in [test_config.py](https://github.com/opencv/opencv/tree/4.x/samples/dnn/dnn_model_runner/dnn_conversion/common/test/configs/test_config.py) ``TestClsModuleConfig`` class:
 
 ```python
 @dataclass
@@ -347,7 +347,7 @@ tf_input_blob = {
 }
 ```
 
-The basis of the model testing is represented in [samples/dnn/classification.py](https://github.com/opencv/opencv/blob/master/samples/dnn/classification.py). ``classification.py`` can be executed autonomously with provided converted model in ``--input`` and populated parameters for cv.dnn.blobFromImage.
+The basis of the model testing is represented in [samples/dnn/classification.py](https://github.com/opencv/opencv/blob/4.x/samples/dnn/classification.py). ``classification.py`` can be executed autonomously with provided converted model in ``--input`` and populated parameters for cv.dnn.blobFromImage.
 
 To reproduce from scratch the described in "Model Conversion Pipeline" OpenCV steps with ``dnn_model_runner`` execute the below line:
 
index 04388cb..37c9768 100644 (file)
@@ -27,7 +27,7 @@ source <env_dir_path>/bin/activate
 
 For OpenCV-Python building from source, follow the corresponding instructions from the @ref tutorial_py_table_of_contents_setup.
 
-Before you start the installation of the libraries, you can customize the [requirements.txt](https://github.com/opencv/opencv/tree/master/samples/dnn/dnn_model_runner/dnn_conversion/requirements.txt), excluding or including (for example, ``opencv-python``) some dependencies.
+Before you start the installation of the libraries, you can customize the [requirements.txt](https://github.com/opencv/opencv/tree/4.x/samples/dnn/dnn_model_runner/dnn_conversion/requirements.txt), excluding or including (for example, ``opencv-python``) some dependencies.
 The below line initiates requirements installation into the previously activated virtual environment:
 
 ```console
@@ -98,7 +98,7 @@ To provide model inference we will use the below [double-decker bus photo](https
 To initiate the test process we need to provide an appropriate model configuration. We will use [``ssd_mobilenet_v1_coco.config``](https://github.com/tensorflow/models/blob/master/research/object_detection/samples/configs/ssd_mobilenet_v1_coco.config) from [TensorFlow Object Detection API](https://github.com/tensorflow/models/tree/master/research/object_detection#tensorflow-object-detection-api).
 TensorFlow Object Detection API framework contains helpful mechanisms for object detection model manipulations.
 
-We will use this configuration to provide a text graph representation. To generate ``.pbtxt`` we will use the corresponding [``samples/dnn/tf_text_graph_ssd.py``](https://github.com/opencv/opencv/blob/master/samples/dnn/tf_text_graph_ssd.py) script:
+We will use this configuration to provide a text graph representation. To generate ``.pbtxt`` we will use the corresponding [``samples/dnn/tf_text_graph_ssd.py``](https://github.com/opencv/opencv/blob/4.x/samples/dnn/tf_text_graph_ssd.py) script:
 
 ```console
 python tf_text_graph_ssd.py --input ssd_mobilenet_v1_coco_2017_11_17/frozen_inference_graph.pb --config ssd_mobilenet_v1_coco_2017_11_17/ssd_mobilenet_v1_coco.config --output ssd_mobilenet_v1_coco_2017_11_17.pbtxt
@@ -106,7 +106,7 @@ python tf_text_graph_ssd.py --input ssd_mobilenet_v1_coco_2017_11_17/frozen_infe
 
 After successful execution ``ssd_mobilenet_v1_coco_2017_11_17.pbtxt`` will be created.
 
-Before we run ``object_detection.py``, let's have a look at the default values for the SSD MobileNetV1 test process configuration. They are located in [``models.yml``](https://github.com/opencv/opencv/blob/master/samples/dnn/models.yml):
+Before we run ``object_detection.py``, let's have a look at the default values for the SSD MobileNetV1 test process configuration. They are located in [``models.yml``](https://github.com/opencv/opencv/blob/4.x/samples/dnn/models.yml):
 
 ```yml
 ssd_tf:
index 368007e..e309cf6 100644 (file)
@@ -258,7 +258,7 @@ python -m dnn_model_runner.dnn_conversion.pytorch.segmentation.py_to_py_segm --m
 
 Chosen from the list segmentation model will be read into OpenCV ``cv.dnn_Net`` object. Evaluation results of PyTorch and OpenCV models (pixel accuracy, mean IoU, inference time) will be written into the log file. Inference time values will be also depicted in a chart to generalize the obtained model information.
 
-Necessary evaluation configurations are defined in the [``test_config.py``](https://github.com/opencv/opencv/tree/master/samples/dnn/dnn_model_runner/dnn_conversion/common/test/configs/test_config.py):
+Necessary evaluation configurations are defined in the [``test_config.py``](https://github.com/opencv/opencv/tree/4.x/samples/dnn/dnn_model_runner/dnn_conversion/common/test/configs/test_config.py):
 
 ```python
 @dataclass
@@ -290,7 +290,7 @@ python -m dnn_model_runner.dnn_conversion.pytorch.segmentation.py_to_py_segm --m
 
 Here ``default_img_preprocess`` key defines whether you'd like to parametrize the model test process with some particular values or use the default values, for example, ``scale``, ``mean`` or ``std``.
 
-Test configuration is represented in [``test_config.py``](https://github.com/opencv/opencv/tree/master/samples/dnn/dnn_model_runner/dnn_conversion/common/test/configs/test_config.py) ``TestSegmModuleConfig`` class:
+Test configuration is represented in [``test_config.py``](https://github.com/opencv/opencv/tree/4.x/samples/dnn/dnn_model_runner/dnn_conversion/common/test/configs/test_config.py) ``TestSegmModuleConfig`` class:
 
 ```python
 @dataclass
index bcf9749..896854b 100644 (file)
@@ -338,7 +338,7 @@ python -m dnn_model_runner.dnn_conversion.tf.segmentation.py_to_py_segm
 
 The model will be read into OpenCV ``cv.dnn_Net`` object. Evaluation results of TF and OpenCV models  (pixel accuracy, mean IoU, inference time) will be written into the log file. Inference time values will be also depicted in a chart to generalize the obtained model information.
 
-Necessary evaluation configurations are defined in the [``test_config.py``](https://github.com/opencv/opencv/tree/master/samples/dnn/dnn_model_runner/dnn_conversion/common/test/configs/test_config.py):
+Necessary evaluation configurations are defined in the [``test_config.py``](https://github.com/opencv/opencv/tree/4.x/samples/dnn/dnn_model_runner/dnn_conversion/common/test/configs/test_config.py):
 
 ```python
 @dataclass
@@ -364,7 +364,7 @@ python -m dnn_model_runner.dnn_conversion.tf.segmentation.py_to_py_segm --test T
 
 Here ``default_img_preprocess`` key defines whether you'd like to parametrize the model test process with some particular values or use the default values, for example, ``scale``, ``mean`` or ``std``.
 
-Test configuration is represented in [``test_config.py``](https://github.com/opencv/opencv/tree/master/samples/dnn/dnn_model_runner/dnn_conversion/common/test/configs/test_config.py) ``TestSegmModuleConfig`` class:
+Test configuration is represented in [``test_config.py``](https://github.com/opencv/opencv/tree/4.x/samples/dnn/dnn_model_runner/dnn_conversion/common/test/configs/test_config.py) ``TestSegmModuleConfig`` class:
 
 ```python
 @dataclass
index 0a22f27..c2b3ec8 100644 (file)
@@ -287,15 +287,15 @@ Output Examples:
 ![Picture example](detect_test2.jpg)
 
 ## Source Code
-The [source code](https://github.com/opencv/opencv/blob/master/modules/dnn/src/model.cpp)
+The [source code](https://github.com/opencv/opencv/blob/4.x/modules/dnn/src/model.cpp)
 of these APIs can be found in the DNN module.
 
 ## Detailed Sample
 For more information, please refer to:
-- [samples/dnn/scene_text_recognition.cpp](https://github.com/opencv/opencv/blob/master/samples/dnn/scene_text_recognition.cpp)
-- [samples/dnn/scene_text_detection.cpp](https://github.com/opencv/opencv/blob/master/samples/dnn/scene_text_detection.cpp)
-- [samples/dnn/text_detection.cpp](https://github.com/opencv/opencv/blob/master/samples/dnn/text_detection.cpp)
-- [samples/dnn/scene_text_spotting.cpp](https://github.com/opencv/opencv/blob/master/samples/dnn/scene_text_spotting.cpp)
+- [samples/dnn/scene_text_recognition.cpp](https://github.com/opencv/opencv/blob/4.x/samples/dnn/scene_text_recognition.cpp)
+- [samples/dnn/scene_text_detection.cpp](https://github.com/opencv/opencv/blob/4.x/samples/dnn/scene_text_detection.cpp)
+- [samples/dnn/text_detection.cpp](https://github.com/opencv/opencv/blob/4.x/samples/dnn/text_detection.cpp)
+- [samples/dnn/scene_text_spotting.cpp](https://github.com/opencv/opencv/blob/4.x/samples/dnn/scene_text_spotting.cpp)
 
 #### Test with an image
 Examples:
index 0973396..3640910 100644 (file)
@@ -29,8 +29,8 @@ Source Code
 -----------
 
 Use a universal sample for object detection models written
-[in C++](https://github.com/opencv/opencv/blob/master/samples/dnn/object_detection.cpp) and
-[in Python](https://github.com/opencv/opencv/blob/master/samples/dnn/object_detection.py) languages
+[in C++](https://github.com/opencv/opencv/blob/4.x/samples/dnn/object_detection.cpp) and
+[in Python](https://github.com/opencv/opencv/blob/4.x/samples/dnn/object_detection.py) languages
 
 Usage examples
 --------------
index 9cb920f..3461442 100644 (file)
@@ -42,7 +42,7 @@ You can find the images (*graf1.png*, *graf3.png*) and homography (*H1to3p.xml*)
 
 @add_toggle_cpp
 -   **Downloadable code**: Click
-    [here](https://raw.githubusercontent.com/opencv/opencv/master/samples/cpp/tutorial_code/features2D/AKAZE_match.cpp)
+    [here](https://raw.githubusercontent.com/opencv/opencv/4.x/samples/cpp/tutorial_code/features2D/AKAZE_match.cpp)
 
 -   **Code at glance:**
     @include samples/cpp/tutorial_code/features2D/AKAZE_match.cpp
@@ -50,7 +50,7 @@ You can find the images (*graf1.png*, *graf3.png*) and homography (*H1to3p.xml*)
 
 @add_toggle_java
 -   **Downloadable code**: Click
-    [here](https://raw.githubusercontent.com/opencv/opencv/master/samples/java/tutorial_code/features2D/akaze_matching/AKAZEMatchDemo.java)
+    [here](https://raw.githubusercontent.com/opencv/opencv/4.x/samples/java/tutorial_code/features2D/akaze_matching/AKAZEMatchDemo.java)
 
 -   **Code at glance:**
     @include samples/java/tutorial_code/features2D/akaze_matching/AKAZEMatchDemo.java
@@ -58,7 +58,7 @@ You can find the images (*graf1.png*, *graf3.png*) and homography (*H1to3p.xml*)
 
 @add_toggle_python
 -   **Downloadable code**: Click
-    [here](https://raw.githubusercontent.com/opencv/opencv/master/samples/python/tutorial_code/features2D/akaze_matching/AKAZE_match.py)
+    [here](https://raw.githubusercontent.com/opencv/opencv/4.x/samples/python/tutorial_code/features2D/akaze_matching/AKAZE_match.py)
 
 -   **Code at glance:**
     @include samples/python/tutorial_code/features2D/akaze_matching/AKAZE_match.py
index 1b77b6a..e9026ad 100644 (file)
@@ -34,19 +34,19 @@ Code
 
 @add_toggle_cpp
 This tutorial code's is shown lines below. You can also download it from
-[here](https://github.com/opencv/opencv/tree/master/samples/cpp/tutorial_code/features2D/feature_description/SURF_matching_Demo.cpp)
+[here](https://github.com/opencv/opencv/tree/4.x/samples/cpp/tutorial_code/features2D/feature_description/SURF_matching_Demo.cpp)
 @include samples/cpp/tutorial_code/features2D/feature_description/SURF_matching_Demo.cpp
 @end_toggle
 
 @add_toggle_java
 This tutorial code's is shown lines below. You can also download it from
-[here](https://github.com/opencv/opencv/tree/master/samples/java/tutorial_code/features2D/feature_description/SURFMatchingDemo.java)
+[here](https://github.com/opencv/opencv/tree/4.x/samples/java/tutorial_code/features2D/feature_description/SURFMatchingDemo.java)
 @include samples/java/tutorial_code/features2D/feature_description/SURFMatchingDemo.java
 @end_toggle
 
 @add_toggle_python
 This tutorial code's is shown lines below. You can also download it from
-[here](https://github.com/opencv/opencv/tree/master/samples/python/tutorial_code/features2D/feature_description/SURF_matching_Demo.py)
+[here](https://github.com/opencv/opencv/tree/4.x/samples/python/tutorial_code/features2D/feature_description/SURF_matching_Demo.py)
 @include samples/python/tutorial_code/features2D/feature_description/SURF_matching_Demo.py
 @end_toggle
 
index 0d52877..7dfdaac 100644 (file)
@@ -32,19 +32,19 @@ Code
 
 @add_toggle_cpp
 This tutorial code's is shown lines below. You can also download it from
-[here](https://github.com/opencv/opencv/tree/master/samples/cpp/tutorial_code/features2D/feature_detection/SURF_detection_Demo.cpp)
+[here](https://github.com/opencv/opencv/tree/4.x/samples/cpp/tutorial_code/features2D/feature_detection/SURF_detection_Demo.cpp)
 @include samples/cpp/tutorial_code/features2D/feature_detection/SURF_detection_Demo.cpp
 @end_toggle
 
 @add_toggle_java
 This tutorial code's is shown lines below. You can also download it from
-[here](https://github.com/opencv/opencv/tree/master/samples/java/tutorial_code/features2D/feature_detection/SURFDetectionDemo.java)
+[here](https://github.com/opencv/opencv/tree/4.x/samples/java/tutorial_code/features2D/feature_detection/SURFDetectionDemo.java)
 @include samples/java/tutorial_code/features2D/feature_detection/SURFDetectionDemo.java
 @end_toggle
 
 @add_toggle_python
 This tutorial code's is shown lines below. You can also download it from
-[here](https://github.com/opencv/opencv/tree/master/samples/python/tutorial_code/features2D/feature_detection/SURF_detection_Demo.py)
+[here](https://github.com/opencv/opencv/tree/4.x/samples/python/tutorial_code/features2D/feature_detection/SURF_detection_Demo.py)
 @include samples/python/tutorial_code/features2D/feature_detection/SURF_detection_Demo.py
 @end_toggle
 
index 1416604..7430c8f 100644 (file)
@@ -55,19 +55,19 @@ Code
 
 @add_toggle_cpp
 This tutorial code's is shown lines below. You can also download it from
-[here](https://github.com/opencv/opencv/tree/master/samples/cpp/tutorial_code/features2D/feature_flann_matcher/SURF_FLANN_matching_Demo.cpp)
+[here](https://github.com/opencv/opencv/tree/4.x/samples/cpp/tutorial_code/features2D/feature_flann_matcher/SURF_FLANN_matching_Demo.cpp)
 @include samples/cpp/tutorial_code/features2D/feature_flann_matcher/SURF_FLANN_matching_Demo.cpp
 @end_toggle
 
 @add_toggle_java
 This tutorial code's is shown lines below. You can also download it from
-[here](https://github.com/opencv/opencv/tree/master/samples/java/tutorial_code/features2D/feature_flann_matcher/SURFFLANNMatchingDemo.java)
+[here](https://github.com/opencv/opencv/tree/4.x/samples/java/tutorial_code/features2D/feature_flann_matcher/SURFFLANNMatchingDemo.java)
 @include samples/java/tutorial_code/features2D/feature_flann_matcher/SURFFLANNMatchingDemo.java
 @end_toggle
 
 @add_toggle_python
 This tutorial code's is shown lines below. You can also download it from
-[here](https://github.com/opencv/opencv/tree/master/samples/python/tutorial_code/features2D/feature_flann_matcher/SURF_FLANN_matching_Demo.py)
+[here](https://github.com/opencv/opencv/tree/4.x/samples/python/tutorial_code/features2D/feature_flann_matcher/SURF_FLANN_matching_Demo.py)
 @include samples/python/tutorial_code/features2D/feature_flann_matcher/SURF_FLANN_matching_Demo.py
 @end_toggle
 
index 4b75c7f..856b9a8 100644 (file)
@@ -30,19 +30,19 @@ Code
 
 @add_toggle_cpp
 This tutorial code's is shown lines below. You can also download it from
-[here](https://github.com/opencv/opencv/tree/master/samples/cpp/tutorial_code/features2D/feature_homography/SURF_FLANN_matching_homography_Demo.cpp)
+[here](https://github.com/opencv/opencv/tree/4.x/samples/cpp/tutorial_code/features2D/feature_homography/SURF_FLANN_matching_homography_Demo.cpp)
 @include samples/cpp/tutorial_code/features2D/feature_homography/SURF_FLANN_matching_homography_Demo.cpp
 @end_toggle
 
 @add_toggle_java
 This tutorial code's is shown lines below. You can also download it from
-[here](https://github.com/opencv/opencv/tree/master/samples/java/tutorial_code/features2D/feature_homography/SURFFLANNMatchingHomographyDemo.java)
+[here](https://github.com/opencv/opencv/tree/4.x/samples/java/tutorial_code/features2D/feature_homography/SURFFLANNMatchingHomographyDemo.java)
 @include samples/java/tutorial_code/features2D/feature_homography/SURFFLANNMatchingHomographyDemo.java
 @end_toggle
 
 @add_toggle_python
 This tutorial code's is shown lines below. You can also download it from
-[here](https://github.com/opencv/opencv/tree/master/samples/python/tutorial_code/features2D/feature_homography/SURF_FLANN_matching_homography_Demo.py)
+[here](https://github.com/opencv/opencv/tree/4.x/samples/python/tutorial_code/features2D/feature_homography/SURF_FLANN_matching_homography_Demo.py)
 @include samples/python/tutorial_code/features2D/feature_homography/SURF_FLANN_matching_homography_Demo.py
 @end_toggle
 
index 10690dd..7d905a2 100644 (file)
@@ -20,10 +20,10 @@ For detailed explanations about the theory, please refer to a computer vision co
 *   An Invitation to 3-D Vision: From Images to Geometric Models, @cite Ma:2003:IVI
 *   Computer Vision: Algorithms and Applications, @cite RS10
 
-The tutorial code can be found here [C++](https://github.com/opencv/opencv/tree/master/samples/cpp/tutorial_code/features2D/Homography),
-[Python](https://github.com/opencv/opencv/tree/master/samples/python/tutorial_code/features2D/Homography),
-[Java](https://github.com/opencv/opencv/tree/master/samples/java/tutorial_code/features2D/Homography).
-The images used in this tutorial can be found [here](https://github.com/opencv/opencv/tree/master/samples/data) (`left*.jpg`).
+The tutorial code can be found here [C++](https://github.com/opencv/opencv/tree/4.x/samples/cpp/tutorial_code/features2D/Homography),
+[Python](https://github.com/opencv/opencv/tree/4.x/samples/python/tutorial_code/features2D/Homography),
+[Java](https://github.com/opencv/opencv/tree/4.x/samples/java/tutorial_code/features2D/Homography).
+The images used in this tutorial can be found [here](https://github.com/opencv/opencv/tree/4.x/samples/data) (`left*.jpg`).
 
 Basic theory {#tutorial_homography_Basic_theory}
 ------------
index e43fc3b..bd70d50 100644 (file)
@@ -27,19 +27,19 @@ Code
 
 @add_toggle_cpp
 This tutorial code's is shown lines below. You can also download it from
-[here](https://github.com/opencv/opencv/tree/master/samples/cpp/tutorial_code/TrackingMotion/cornerSubPix_Demo.cpp)
+[here](https://github.com/opencv/opencv/tree/4.x/samples/cpp/tutorial_code/TrackingMotion/cornerSubPix_Demo.cpp)
 @include samples/cpp/tutorial_code/TrackingMotion/cornerSubPix_Demo.cpp
 @end_toggle
 
 @add_toggle_java
 This tutorial code's is shown lines below. You can also download it from
-[here](https://github.com/opencv/opencv/tree/master/samples/java/tutorial_code/TrackingMotion/corner_subpixels/CornerSubPixDemo.java)
+[here](https://github.com/opencv/opencv/tree/4.x/samples/java/tutorial_code/TrackingMotion/corner_subpixels/CornerSubPixDemo.java)
 @include samples/java/tutorial_code/TrackingMotion/corner_subpixels/CornerSubPixDemo.java
 @end_toggle
 
 @add_toggle_python
 This tutorial code's is shown lines below. You can also download it from
-[here](https://github.com/opencv/opencv/tree/master/samples/python/tutorial_code/TrackingMotion/corner_subpixels/cornerSubPix_Demo.py)
+[here](https://github.com/opencv/opencv/tree/4.x/samples/python/tutorial_code/TrackingMotion/corner_subpixels/cornerSubPix_Demo.py)
 @include samples/python/tutorial_code/TrackingMotion/corner_subpixels/cornerSubPix_Demo.py
 @end_toggle
 
index def9571..c8c22a0 100644 (file)
@@ -31,21 +31,21 @@ Code
 
 @add_toggle_cpp
 This tutorial code's is shown lines below. You can also download it from
-[here](https://github.com/opencv/opencv/tree/master/samples/cpp/tutorial_code/TrackingMotion/cornerDetector_Demo.cpp)
+[here](https://github.com/opencv/opencv/tree/4.x/samples/cpp/tutorial_code/TrackingMotion/cornerDetector_Demo.cpp)
 
 @include samples/cpp/tutorial_code/TrackingMotion/cornerDetector_Demo.cpp
 @end_toggle
 
 @add_toggle_java
 This tutorial code's is shown lines below. You can also download it from
-[here](https://github.com/opencv/opencv/tree/master/samples/java/tutorial_code/TrackingMotion/generic_corner_detector/CornerDetectorDemo.java)
+[here](https://github.com/opencv/opencv/tree/4.x/samples/java/tutorial_code/TrackingMotion/generic_corner_detector/CornerDetectorDemo.java)
 
 @include samples/java/tutorial_code/TrackingMotion/generic_corner_detector/CornerDetectorDemo.java
 @end_toggle
 
 @add_toggle_python
 This tutorial code's is shown lines below. You can also download it from
-[here](https://github.com/opencv/opencv/tree/master/samples/python/tutorial_code/TrackingMotion/generic_corner_detector/cornerDetector_Demo.py)
+[here](https://github.com/opencv/opencv/tree/4.x/samples/python/tutorial_code/TrackingMotion/generic_corner_detector/cornerDetector_Demo.py)
 
 @include samples/python/tutorial_code/TrackingMotion/generic_corner_detector/cornerDetector_Demo.py
 @end_toggle
index 19023c2..ccbace1 100644 (file)
@@ -26,19 +26,19 @@ Code
 
 @add_toggle_cpp
 This tutorial code's is shown lines below. You can also download it from
-[here](https://github.com/opencv/opencv/tree/master/samples/cpp/tutorial_code/TrackingMotion/goodFeaturesToTrack_Demo.cpp)
+[here](https://github.com/opencv/opencv/tree/4.x/samples/cpp/tutorial_code/TrackingMotion/goodFeaturesToTrack_Demo.cpp)
 @include samples/cpp/tutorial_code/TrackingMotion/goodFeaturesToTrack_Demo.cpp
 @end_toggle
 
 @add_toggle_java
 This tutorial code's is shown lines below. You can also download it from
-[here](https://github.com/opencv/opencv/tree/master/samples/java/tutorial_code/TrackingMotion/good_features_to_track/GoodFeaturesToTrackDemo.java)
+[here](https://github.com/opencv/opencv/tree/4.x/samples/java/tutorial_code/TrackingMotion/good_features_to_track/GoodFeaturesToTrackDemo.java)
 @include samples/java/tutorial_code/TrackingMotion/good_features_to_track/GoodFeaturesToTrackDemo.java
 @end_toggle
 
 @add_toggle_python
 This tutorial code's is shown lines below. You can also download it from
-[here](https://github.com/opencv/opencv/tree/master/samples/python/tutorial_code/TrackingMotion/good_features_to_track/goodFeaturesToTrack_Demo.py)
+[here](https://github.com/opencv/opencv/tree/4.x/samples/python/tutorial_code/TrackingMotion/good_features_to_track/goodFeaturesToTrack_Demo.py)
 @include samples/python/tutorial_code/TrackingMotion/good_features_to_track/goodFeaturesToTrack_Demo.py
 @end_toggle
 
index 1ed9cd5..6c811fe 100644 (file)
@@ -129,19 +129,19 @@ Code
 
 @add_toggle_cpp
 This tutorial code's is shown lines below. You can also download it from
-[here](https://github.com/opencv/opencv/tree/master/samples/cpp/tutorial_code/TrackingMotion/cornerHarris_Demo.cpp)
+[here](https://github.com/opencv/opencv/tree/4.x/samples/cpp/tutorial_code/TrackingMotion/cornerHarris_Demo.cpp)
 @include samples/cpp/tutorial_code/TrackingMotion/cornerHarris_Demo.cpp
 @end_toggle
 
 @add_toggle_java
 This tutorial code's is shown lines below. You can also download it from
-[here](https://github.com/opencv/opencv/tree/master/samples/java/tutorial_code/TrackingMotion/harris_detector/CornerHarrisDemo.java)
+[here](https://github.com/opencv/opencv/tree/4.x/samples/java/tutorial_code/TrackingMotion/harris_detector/CornerHarrisDemo.java)
 @include samples/java/tutorial_code/TrackingMotion/harris_detector/CornerHarrisDemo.java
 @end_toggle
 
 @add_toggle_python
 This tutorial code's is shown lines below. You can also download it from
-[here](https://github.com/opencv/opencv/tree/master/samples/python/tutorial_code/TrackingMotion/harris_detector/cornerHarris_Demo.py)
+[here](https://github.com/opencv/opencv/tree/4.x/samples/python/tutorial_code/TrackingMotion/harris_detector/cornerHarris_Demo.py)
 @include samples/python/tutorial_code/TrackingMotion/harris_detector/cornerHarris_Demo.py
 @end_toggle
 
index 60e136f..ab5a336 100644 (file)
@@ -30,7 +30,7 @@ The source code
 
 You may also find the source code and the video file in the
 `samples/cpp/tutorial_code/gpu/gpu-basics-similarity/gpu-basics-similarity` directory of the OpenCV
-source library or download it from [here](https://github.com/opencv/opencv/tree/master/samples/cpp/tutorial_code/gpu/gpu-basics-similarity/gpu-basics-similarity.cpp).
+source library or download it from [here](https://github.com/opencv/opencv/tree/4.x/samples/cpp/tutorial_code/gpu/gpu-basics-similarity/gpu-basics-similarity.cpp).
 The full source code is quite long (due to the controlling of the application via the command line
 arguments and performance measurement). Therefore, to avoid cluttering up these sections with those
 you'll find here only the functions itself.
index 289000c..aa5161c 100644 (file)
@@ -88,19 +88,19 @@ Code
 
 @add_toggle_cpp
 -   This code is in your OpenCV sample folder. Otherwise you can grab it from
-    [here](https://raw.githubusercontent.com/opencv/opencv/master/samples/cpp/tutorial_code/ImgProc/basic_drawing/Drawing_1.cpp)
+    [here](https://raw.githubusercontent.com/opencv/opencv/4.x/samples/cpp/tutorial_code/ImgProc/basic_drawing/Drawing_1.cpp)
     @include samples/cpp/tutorial_code/ImgProc/basic_drawing/Drawing_1.cpp
 @end_toggle
 
 @add_toggle_java
 -   This code is in your OpenCV sample folder. Otherwise you can grab it from
-    [here](https://raw.githubusercontent.com/opencv/opencv/master/samples/java/tutorial_code/ImgProc/BasicGeometricDrawing/BasicGeometricDrawing.java)
+    [here](https://raw.githubusercontent.com/opencv/opencv/4.x/samples/java/tutorial_code/ImgProc/BasicGeometricDrawing/BasicGeometricDrawing.java)
     @include samples/java/tutorial_code/ImgProc/BasicGeometricDrawing/BasicGeometricDrawing.java
 @end_toggle
 
 @add_toggle_python
 -   This code is in your OpenCV sample folder. Otherwise you can grab it from
-    [here](https://raw.githubusercontent.com/opencv/opencv/master/samples/python/tutorial_code/imgProc/BasicGeometricDrawing/basic_geometric_drawing.py)
+    [here](https://raw.githubusercontent.com/opencv/opencv/4.x/samples/python/tutorial_code/imgProc/BasicGeometricDrawing/basic_geometric_drawing.py)
     @include samples/python/tutorial_code/imgProc/BasicGeometricDrawing/basic_geometric_drawing.py
 @end_toggle
 
index 99179f7..086bceb 100644 (file)
@@ -72,19 +72,19 @@ Code
 
 @add_toggle_cpp
 This tutorial's code is shown below. You can also download it
-[here](https://github.com/opencv/opencv/tree/master/samples/cpp/tutorial_code/ImgProc/Morphology_1.cpp)
+[here](https://github.com/opencv/opencv/tree/4.x/samples/cpp/tutorial_code/ImgProc/Morphology_1.cpp)
 @include samples/cpp/tutorial_code/ImgProc/Morphology_1.cpp
 @end_toggle
 
 @add_toggle_java
 This tutorial's code is shown below. You can also download it
-[here](https://github.com/opencv/opencv/tree/master/samples/java/tutorial_code/ImgProc/erosion_dilatation/MorphologyDemo1.java)
+[here](https://github.com/opencv/opencv/tree/4.x/samples/java/tutorial_code/ImgProc/erosion_dilatation/MorphologyDemo1.java)
 @include samples/java/tutorial_code/ImgProc/erosion_dilatation/MorphologyDemo1.java
 @end_toggle
 
 @add_toggle_python
 This tutorial's code is shown below. You can also download it
-[here](https://github.com/opencv/opencv/tree/master/samples/python/tutorial_code/imgProc/erosion_dilatation/morphology_1.py)
+[here](https://github.com/opencv/opencv/tree/4.x/samples/python/tutorial_code/imgProc/erosion_dilatation/morphology_1.py)
 @include samples/python/tutorial_code/imgProc/erosion_dilatation/morphology_1.py
 @end_toggle
 
@@ -280,7 +280,7 @@ Results
 
 Compile the code above and execute it (or run the script if using python) with an image as argument.
 If you do not provide an image as argument the default sample image
-([LinuxLogo.jpg](https://github.com/opencv/opencv/tree/master/samples/data/LinuxLogo.jpg)) will be used.
+([LinuxLogo.jpg](https://github.com/opencv/opencv/tree/4.x/samples/data/LinuxLogo.jpg)) will be used.
 
 For instance, using this image:
 
index beb0964..7cdbcaf 100644 (file)
@@ -105,7 +105,7 @@ Code
 
 @add_toggle_cpp
 -   **Downloadable code**: Click
-    [here](https://raw.githubusercontent.com/opencv/opencv/master/samples/cpp/tutorial_code/ImgProc/Smoothing/Smoothing.cpp)
+    [here](https://raw.githubusercontent.com/opencv/opencv/4.x/samples/cpp/tutorial_code/ImgProc/Smoothing/Smoothing.cpp)
 
 -   **Code at glance:**
     @include samples/cpp/tutorial_code/ImgProc/Smoothing/Smoothing.cpp
@@ -113,7 +113,7 @@ Code
 
 @add_toggle_java
 -   **Downloadable code**: Click
-    [here](https://raw.githubusercontent.com/opencv/opencv/master/samples/java/tutorial_code/ImgProc/Smoothing/Smoothing.java)
+    [here](https://raw.githubusercontent.com/opencv/opencv/4.x/samples/java/tutorial_code/ImgProc/Smoothing/Smoothing.java)
 
 -   **Code at glance:**
     @include samples/java/tutorial_code/ImgProc/Smoothing/Smoothing.java
@@ -121,7 +121,7 @@ Code
 
 @add_toggle_python
 -   **Downloadable code**: Click
-    [here](https://raw.githubusercontent.com/opencv/opencv/master/samples/python/tutorial_code/imgProc/Smoothing/smoothing.py)
+    [here](https://raw.githubusercontent.com/opencv/opencv/4.x/samples/python/tutorial_code/imgProc/Smoothing/smoothing.py)
 
 -   **Code at glance:**
     @include samples/python/tutorial_code/imgProc/Smoothing/smoothing.py
@@ -228,7 +228,7 @@ already known by now.
 Results
 -------
 
--   The code opens an image (in this case [lena.jpg](https://raw.githubusercontent.com/opencv/opencv/master/samples/data/lena.jpg))
+-   The code opens an image (in this case [lena.jpg](https://raw.githubusercontent.com/opencv/opencv/4.x/samples/data/lena.jpg))
     and display it under the effects of the 4 filters explained.
 -   Here is a snapshot of the image smoothed using *medianBlur*:
 
index 4bc5f35..690c7b9 100644 (file)
@@ -81,13 +81,13 @@ Code
 @add_toggle_cpp
 -   **Downloadable code**:
     -   Click
-        [here](https://github.com/opencv/opencv/tree/master/samples/cpp/tutorial_code/Histograms_Matching/calcBackProject_Demo1.cpp)
+        [here](https://github.com/opencv/opencv/tree/4.x/samples/cpp/tutorial_code/Histograms_Matching/calcBackProject_Demo1.cpp)
         for the basic version (explained in this tutorial).
     -   For stuff slightly fancier (using H-S histograms and floodFill to define a mask for the
         skin area) you can check the [improved
-        demo](https://github.com/opencv/opencv/tree/master/samples/cpp/tutorial_code/Histograms_Matching/calcBackProject_Demo2.cpp)
+        demo](https://github.com/opencv/opencv/tree/4.x/samples/cpp/tutorial_code/Histograms_Matching/calcBackProject_Demo2.cpp)
     -   ...or you can always check out the classical
-        [camshiftdemo](https://github.com/opencv/opencv/tree/master/samples/cpp/camshiftdemo.cpp)
+        [camshiftdemo](https://github.com/opencv/opencv/tree/4.x/samples/cpp/camshiftdemo.cpp)
         in samples.
 
 -   **Code at glance:**
@@ -97,13 +97,13 @@ Code
 @add_toggle_java
 -   **Downloadable code**:
     -   Click
-        [here](https://github.com/opencv/opencv/tree/master/samples/java/tutorial_code/Histograms_Matching/back_projection/CalcBackProjectDemo1.java)
+        [here](https://github.com/opencv/opencv/tree/4.x/samples/java/tutorial_code/Histograms_Matching/back_projection/CalcBackProjectDemo1.java)
         for the basic version (explained in this tutorial).
     -   For stuff slightly fancier (using H-S histograms and floodFill to define a mask for the
         skin area) you can check the [improved
-        demo](https://github.com/opencv/opencv/tree/master/samples/java/tutorial_code/Histograms_Matching/back_projection/CalcBackProjectDemo2.java)
+        demo](https://github.com/opencv/opencv/tree/4.x/samples/java/tutorial_code/Histograms_Matching/back_projection/CalcBackProjectDemo2.java)
     -   ...or you can always check out the classical
-        [camshiftdemo](https://github.com/opencv/opencv/tree/master/samples/cpp/camshiftdemo.cpp)
+        [camshiftdemo](https://github.com/opencv/opencv/tree/4.x/samples/cpp/camshiftdemo.cpp)
         in samples.
 
 -   **Code at glance:**
@@ -113,13 +113,13 @@ Code
 @add_toggle_python
 -   **Downloadable code**:
     -   Click
-        [here](https://github.com/opencv/opencv/tree/master/samples/python/tutorial_code/Histograms_Matching/back_projection/calcBackProject_Demo1.py)
+        [here](https://github.com/opencv/opencv/tree/4.x/samples/python/tutorial_code/Histograms_Matching/back_projection/calcBackProject_Demo1.py)
         for the basic version (explained in this tutorial).
     -   For stuff slightly fancier (using H-S histograms and floodFill to define a mask for the
         skin area) you can check the [improved
-        demo](https://github.com/opencv/opencv/tree/master/samples/python/tutorial_code/Histograms_Matching/back_projection/calcBackProject_Demo2.py)
+        demo](https://github.com/opencv/opencv/tree/4.x/samples/python/tutorial_code/Histograms_Matching/back_projection/calcBackProject_Demo2.py)
     -   ...or you can always check out the classical
-        [camshiftdemo](https://github.com/opencv/opencv/tree/master/samples/cpp/camshiftdemo.cpp)
+        [camshiftdemo](https://github.com/opencv/opencv/tree/4.x/samples/cpp/camshiftdemo.cpp)
         in samples.
 
 -   **Code at glance:**
index 5f16931..37fe28a 100644 (file)
@@ -79,7 +79,7 @@ Code
 
 @add_toggle_cpp
 -   **Downloadable code**: Click
-    [here](https://github.com/opencv/opencv/tree/master/samples/cpp/tutorial_code/Histograms_Matching/calcHist_Demo.cpp)
+    [here](https://github.com/opencv/opencv/tree/4.x/samples/cpp/tutorial_code/Histograms_Matching/calcHist_Demo.cpp)
 
 -   **Code at glance:**
     @include samples/cpp/tutorial_code/Histograms_Matching/calcHist_Demo.cpp
@@ -87,7 +87,7 @@ Code
 
 @add_toggle_java
 -   **Downloadable code**: Click
-    [here](https://github.com/opencv/opencv/tree/master/samples/java/tutorial_code/Histograms_Matching/histogram_calculation/CalcHistDemo.java)
+    [here](https://github.com/opencv/opencv/tree/4.x/samples/java/tutorial_code/Histograms_Matching/histogram_calculation/CalcHistDemo.java)
 
 -   **Code at glance:**
     @include samples/java/tutorial_code/Histograms_Matching/histogram_calculation/CalcHistDemo.java
@@ -95,7 +95,7 @@ Code
 
 @add_toggle_python
 -   **Downloadable code**: Click
-    [here](https://github.com/opencv/opencv/tree/master/samples/python/tutorial_code/Histograms_Matching/histogram_calculation/calcHist_Demo.py)
+    [here](https://github.com/opencv/opencv/tree/4.x/samples/python/tutorial_code/Histograms_Matching/histogram_calculation/calcHist_Demo.py)
 
 -   **Code at glance:**
     @include samples/python/tutorial_code/Histograms_Matching/histogram_calculation/calcHist_Demo.py
index cc38cba..efcb428 100644 (file)
@@ -56,7 +56,7 @@ Code
 
 @add_toggle_cpp
 -   **Downloadable code**: Click
-    [here](https://github.com/opencv/opencv/tree/master/samples/cpp/tutorial_code/Histograms_Matching/compareHist_Demo.cpp)
+    [here](https://github.com/opencv/opencv/tree/4.x/samples/cpp/tutorial_code/Histograms_Matching/compareHist_Demo.cpp)
 
 -   **Code at glance:**
     @include samples/cpp/tutorial_code/Histograms_Matching/compareHist_Demo.cpp
@@ -64,7 +64,7 @@ Code
 
 @add_toggle_java
 -   **Downloadable code**: Click
-    [here](https://github.com/opencv/opencv/tree/master/samples/java/tutorial_code/Histograms_Matching/histogram_comparison/CompareHistDemo.java)
+    [here](https://github.com/opencv/opencv/tree/4.x/samples/java/tutorial_code/Histograms_Matching/histogram_comparison/CompareHistDemo.java)
 
 -   **Code at glance:**
     @include samples/java/tutorial_code/Histograms_Matching/histogram_comparison/CompareHistDemo.java
@@ -72,7 +72,7 @@ Code
 
 @add_toggle_python
 -   **Downloadable code**: Click
-    [here](https://github.com/opencv/opencv/tree/master/samples/python/tutorial_code/Histograms_Matching/histogram_comparison/compareHist_Demo.py)
+    [here](https://github.com/opencv/opencv/tree/4.x/samples/python/tutorial_code/Histograms_Matching/histogram_comparison/compareHist_Demo.py)
 
 -   **Code at glance:**
     @include samples/python/tutorial_code/Histograms_Matching/histogram_comparison/compareHist_Demo.py
index e80032b..e594c9d 100644 (file)
@@ -74,7 +74,7 @@ Code
 
 @add_toggle_cpp
 -   **Downloadable code**: Click
-    [here](https://github.com/opencv/opencv/tree/master/samples/cpp/tutorial_code/Histograms_Matching/EqualizeHist_Demo.cpp)
+    [here](https://github.com/opencv/opencv/tree/4.x/samples/cpp/tutorial_code/Histograms_Matching/EqualizeHist_Demo.cpp)
 
 -   **Code at glance:**
     @include samples/cpp/tutorial_code/Histograms_Matching/EqualizeHist_Demo.cpp
@@ -82,7 +82,7 @@ Code
 
 @add_toggle_java
 -   **Downloadable code**: Click
-    [here](https://github.com/opencv/opencv/tree/master/samples/java/tutorial_code/Histograms_Matching/histogram_equalization/EqualizeHistDemo.java)
+    [here](https://github.com/opencv/opencv/tree/4.x/samples/java/tutorial_code/Histograms_Matching/histogram_equalization/EqualizeHistDemo.java)
 
 -   **Code at glance:**
     @include samples/java/tutorial_code/Histograms_Matching/histogram_equalization/EqualizeHistDemo.java
@@ -90,7 +90,7 @@ Code
 
 @add_toggle_python
 -   **Downloadable code**: Click
-    [here](https://github.com/opencv/opencv/tree/master/samples/python/tutorial_code/Histograms_Matching/histogram_equalization/EqualizeHist_Demo.py)
+    [here](https://github.com/opencv/opencv/tree/4.x/samples/python/tutorial_code/Histograms_Matching/histogram_equalization/EqualizeHist_Demo.py)
 
 -   **Code at glance:**
     @include samples/python/tutorial_code/Histograms_Matching/histogram_equalization/EqualizeHist_Demo.py
index f03f761..56c25b5 100644 (file)
@@ -137,7 +137,7 @@ Code
 @add_toggle_cpp
 
 -   **Downloadable code**: Click
-    [here](https://github.com/opencv/opencv/tree/master/samples/cpp/tutorial_code/Histograms_Matching/MatchTemplate_Demo.cpp)
+    [here](https://github.com/opencv/opencv/tree/4.x/samples/cpp/tutorial_code/Histograms_Matching/MatchTemplate_Demo.cpp)
 -   **Code at glance:**
     @include samples/cpp/tutorial_code/Histograms_Matching/MatchTemplate_Demo.cpp
 
@@ -146,7 +146,7 @@ Code
 @add_toggle_java
 
 -   **Downloadable code**: Click
-    [here](https://github.com/opencv/opencv/tree/master/samples/java/tutorial_code/ImgProc/tutorial_template_matching/MatchTemplateDemo.java)
+    [here](https://github.com/opencv/opencv/tree/4.x/samples/java/tutorial_code/ImgProc/tutorial_template_matching/MatchTemplateDemo.java)
 -   **Code at glance:**
     @include samples/java/tutorial_code/ImgProc/tutorial_template_matching/MatchTemplateDemo.java
 
@@ -155,7 +155,7 @@ Code
 @add_toggle_python
 
 -   **Downloadable code**: Click
-    [here](https://github.com/opencv/opencv/tree/master/samples/python/tutorial_code/imgProc/match_template/match_template.py)
+    [here](https://github.com/opencv/opencv/tree/4.x/samples/python/tutorial_code/imgProc/match_template/match_template.py)
 -   **Code at glance:**
     @include samples/python/tutorial_code/imgProc/match_template/match_template.py
 
index 887b376..0c28894 100644 (file)
@@ -55,19 +55,19 @@ The code corresponding to the previous example is shown below.
 
 @add_toggle_cpp
 You can also download it from
-[here](https://raw.githubusercontent.com/opencv/opencv/master/samples/cpp/tutorial_code/ImgProc/HitMiss/HitMiss.cpp)
+[here](https://raw.githubusercontent.com/opencv/opencv/4.x/samples/cpp/tutorial_code/ImgProc/HitMiss/HitMiss.cpp)
 @include samples/cpp/tutorial_code/ImgProc/HitMiss/HitMiss.cpp
 @end_toggle
 
 @add_toggle_java
 You can also download it from
-[here](https://raw.githubusercontent.com/opencv/opencv/master/samples/java/tutorial_code/ImgProc/HitMiss/HitMiss.java)
+[here](https://raw.githubusercontent.com/opencv/opencv/4.x/samples/java/tutorial_code/ImgProc/HitMiss/HitMiss.java)
 @include samples/java/tutorial_code/ImgProc/HitMiss/HitMiss.java
 @end_toggle
 
 @add_toggle_python
 You can also download it from
-[here](https://raw.githubusercontent.com/opencv/opencv/master/samples/python/tutorial_code/imgProc/HitMiss/hit_miss.py)
+[here](https://raw.githubusercontent.com/opencv/opencv/4.x/samples/python/tutorial_code/imgProc/HitMiss/hit_miss.py)
 @include samples/python/tutorial_code/imgProc/HitMiss/hit_miss.py
 @end_toggle
 
index d585c77..23f3fac 100644 (file)
@@ -78,19 +78,19 @@ Code
 
 @add_toggle_cpp
 -   The tutorial code's is shown lines below. You can also download it from
-    [here](https://github.com/opencv/opencv/tree/master/samples/cpp/tutorial_code/ImgTrans/CannyDetector_Demo.cpp)
+    [here](https://github.com/opencv/opencv/tree/4.x/samples/cpp/tutorial_code/ImgTrans/CannyDetector_Demo.cpp)
     @include samples/cpp/tutorial_code/ImgTrans/CannyDetector_Demo.cpp
 @end_toggle
 
 @add_toggle_java
 -   The tutorial code's is shown lines below. You can also download it from
-    [here](https://github.com/opencv/opencv/tree/master/samples/java/tutorial_code/ImgTrans/canny_detector/CannyDetectorDemo.java)
+    [here](https://github.com/opencv/opencv/tree/4.x/samples/java/tutorial_code/ImgTrans/canny_detector/CannyDetectorDemo.java)
     @include samples/java/tutorial_code/ImgTrans/canny_detector/CannyDetectorDemo.java
 @end_toggle
 
 @add_toggle_python
 -   The tutorial code's is shown lines below. You can also download it from
-    [here](https://github.com/opencv/opencv/tree/master/samples/python/tutorial_code/ImgTrans/canny_detector/CannyDetector_Demo.py)
+    [here](https://github.com/opencv/opencv/tree/4.x/samples/python/tutorial_code/ImgTrans/canny_detector/CannyDetector_Demo.py)
     @include samples/python/tutorial_code/ImgTrans/canny_detector/CannyDetector_Demo.py
 @end_toggle
 
index 4acc060..7a1efcd 100644 (file)
@@ -59,19 +59,19 @@ The tutorial code's is shown lines below.
 
 @add_toggle_cpp
 You can also download it from
-[here](https://raw.githubusercontent.com/opencv/opencv/master/samples/cpp/tutorial_code/ImgTrans/copyMakeBorder_demo.cpp)
+[here](https://raw.githubusercontent.com/opencv/opencv/4.x/samples/cpp/tutorial_code/ImgTrans/copyMakeBorder_demo.cpp)
 @include samples/cpp/tutorial_code/ImgTrans/copyMakeBorder_demo.cpp
 @end_toggle
 
 @add_toggle_java
 You can also download it from
-[here](https://raw.githubusercontent.com/opencv/opencv/master/samples/java/tutorial_code/ImgTrans/MakeBorder/CopyMakeBorder.java)
+[here](https://raw.githubusercontent.com/opencv/opencv/4.x/samples/java/tutorial_code/ImgTrans/MakeBorder/CopyMakeBorder.java)
 @include samples/java/tutorial_code/ImgTrans/MakeBorder/CopyMakeBorder.java
 @end_toggle
 
 @add_toggle_python
 You can also download it from
-[here](https://raw.githubusercontent.com/opencv/opencv/master/samples/python/tutorial_code/ImgTrans/MakeBorder/copy_make_border.py)
+[here](https://raw.githubusercontent.com/opencv/opencv/4.x/samples/python/tutorial_code/ImgTrans/MakeBorder/copy_make_border.py)
 @include samples/python/tutorial_code/ImgTrans/MakeBorder/copy_make_border.py
 @end_toggle
 
index 28b81f4..8a6f4bd 100644 (file)
@@ -28,19 +28,19 @@ Code
 
 @add_toggle_cpp
 This tutorial code's is shown lines below. You can also download it from
-[here](https://github.com/opencv/opencv/tree/master/samples/cpp/tutorial_code/ImgTrans/imageSegmentation.cpp).
+[here](https://github.com/opencv/opencv/tree/4.x/samples/cpp/tutorial_code/ImgTrans/imageSegmentation.cpp).
 @include samples/cpp/tutorial_code/ImgTrans/imageSegmentation.cpp
 @end_toggle
 
 @add_toggle_java
 This tutorial code's is shown lines below. You can also download it from
-[here](https://github.com/opencv/opencv/tree/master/samples/java/tutorial_code/ImgTrans/distance_transformation/ImageSegmentationDemo.java)
+[here](https://github.com/opencv/opencv/tree/4.x/samples/java/tutorial_code/ImgTrans/distance_transformation/ImageSegmentationDemo.java)
 @include samples/java/tutorial_code/ImgTrans/distance_transformation/ImageSegmentationDemo.java
 @end_toggle
 
 @add_toggle_python
 This tutorial code's is shown lines below. You can also download it from
-[here](https://github.com/opencv/opencv/tree/master/samples/python/tutorial_code/ImgTrans/distance_transformation/imageSegmentation.py)
+[here](https://github.com/opencv/opencv/tree/4.x/samples/python/tutorial_code/ImgTrans/distance_transformation/imageSegmentation.py)
 @include samples/python/tutorial_code/ImgTrans/distance_transformation/imageSegmentation.py
 @end_toggle
 
index efe3fdf..e93a26d 100644 (file)
@@ -75,19 +75,19 @@ The tutorial code's is shown in the lines below.
 
 @add_toggle_cpp
 You can also download it from
-[here](https://raw.githubusercontent.com/opencv/opencv/master/samples/cpp/tutorial_code/ImgTrans/filter2D_demo.cpp)
+[here](https://raw.githubusercontent.com/opencv/opencv/4.x/samples/cpp/tutorial_code/ImgTrans/filter2D_demo.cpp)
 @include cpp/tutorial_code/ImgTrans/filter2D_demo.cpp
 @end_toggle
 
 @add_toggle_java
 You can also download it from
-[here](https://raw.githubusercontent.com/opencv/opencv/master/samples/java/tutorial_code/ImgTrans/Filter2D/Filter2D_Demo.java)
+[here](https://raw.githubusercontent.com/opencv/opencv/4.x/samples/java/tutorial_code/ImgTrans/Filter2D/Filter2D_Demo.java)
 @include java/tutorial_code/ImgTrans/Filter2D/Filter2D_Demo.java
 @end_toggle
 
 @add_toggle_python
 You can also download it from
-[here](https://raw.githubusercontent.com/opencv/opencv/master/samples/python/tutorial_code/ImgTrans/Filter2D/filter2D.py)
+[here](https://raw.githubusercontent.com/opencv/opencv/4.x/samples/python/tutorial_code/ImgTrans/Filter2D/filter2D.py)
 @include python/tutorial_code/ImgTrans/Filter2D/filter2D.py
 @end_toggle
 
index 6b2f215..952b542 100644 (file)
@@ -51,28 +51,28 @@ Code
 
 @add_toggle_cpp
 The sample code that we will explain can be downloaded from
-[here](https://raw.githubusercontent.com/opencv/opencv/master/samples/cpp/tutorial_code/ImgTrans/houghcircles.cpp).
+[here](https://raw.githubusercontent.com/opencv/opencv/4.x/samples/cpp/tutorial_code/ImgTrans/houghcircles.cpp).
 A slightly fancier version (which shows trackbars for changing the threshold values) can be found
-[here](https://raw.githubusercontent.com/opencv/opencv/master/samples/cpp/tutorial_code/ImgTrans/HoughCircle_Demo.cpp).
+[here](https://raw.githubusercontent.com/opencv/opencv/4.x/samples/cpp/tutorial_code/ImgTrans/HoughCircle_Demo.cpp).
 @include samples/cpp/tutorial_code/ImgTrans/houghcircles.cpp
 @end_toggle
 
 @add_toggle_java
 The sample code that we will explain can be downloaded from
-[here](https://raw.githubusercontent.com/opencv/opencv/master/samples/java/tutorial_code/ImgTrans/HoughCircle/HoughCircles.java).
+[here](https://raw.githubusercontent.com/opencv/opencv/4.x/samples/java/tutorial_code/ImgTrans/HoughCircle/HoughCircles.java).
 @include samples/java/tutorial_code/ImgTrans/HoughCircle/HoughCircles.java
 @end_toggle
 
 @add_toggle_python
 The sample code that we will explain can be downloaded from
-[here](https://raw.githubusercontent.com/opencv/opencv/master/samples/python/tutorial_code/ImgTrans/HoughCircle/hough_circle.py).
+[here](https://raw.githubusercontent.com/opencv/opencv/4.x/samples/python/tutorial_code/ImgTrans/HoughCircle/hough_circle.py).
 @include samples/python/tutorial_code/ImgTrans/HoughCircle/hough_circle.py
 @end_toggle
 
 Explanation
 -----------
 
-The image we used can be found [here](https://raw.githubusercontent.com/opencv/opencv/master/samples/data/smarties.png)
+The image we used can be found [here](https://raw.githubusercontent.com/opencv/opencv/4.x/samples/data/smarties.png)
 
 ####  Load an image:
 
index 5edff16..22295c1 100644 (file)
@@ -107,22 +107,22 @@ Code
 
 @add_toggle_cpp
 The sample code that we will explain can be downloaded from
-[here](https://raw.githubusercontent.com/opencv/opencv/master/samples/cpp/tutorial_code/ImgTrans/houghlines.cpp).
+[here](https://raw.githubusercontent.com/opencv/opencv/4.x/samples/cpp/tutorial_code/ImgTrans/houghlines.cpp).
 A slightly fancier version (which shows both Hough standard and probabilistic
 with trackbars for changing the threshold values) can be found
-[here](https://raw.githubusercontent.com/opencv/opencv/master/samples/cpp/tutorial_code/ImgTrans/HoughLines_Demo.cpp).
+[here](https://raw.githubusercontent.com/opencv/opencv/4.x/samples/cpp/tutorial_code/ImgTrans/HoughLines_Demo.cpp).
 @include samples/cpp/tutorial_code/ImgTrans/houghlines.cpp
 @end_toggle
 
 @add_toggle_java
 The sample code that we will explain can be downloaded from
-[here](https://raw.githubusercontent.com/opencv/opencv/master/samples/java/tutorial_code/ImgTrans/HoughLine/HoughLines.java).
+[here](https://raw.githubusercontent.com/opencv/opencv/4.x/samples/java/tutorial_code/ImgTrans/HoughLine/HoughLines.java).
 @include samples/java/tutorial_code/ImgTrans/HoughLine/HoughLines.java
 @end_toggle
 
 @add_toggle_python
 The sample code that we will explain can be downloaded from
-[here](https://raw.githubusercontent.com/opencv/opencv/master/samples/python/tutorial_code/ImgTrans/HoughLine/hough_lines.py).
+[here](https://raw.githubusercontent.com/opencv/opencv/4.x/samples/python/tutorial_code/ImgTrans/HoughLine/hough_lines.py).
 @include samples/python/tutorial_code/ImgTrans/HoughLine/hough_lines.py
 @end_toggle
 
@@ -278,7 +278,7 @@ Result
     section. It still implements the same stuff as above, only adding the Trackbar for the
     Threshold.
 
-Using an input image such as a [sudoku image](https://raw.githubusercontent.com/opencv/opencv/master/samples/data/sudoku.png).
+Using an input image such as a [sudoku image](https://raw.githubusercontent.com/opencv/opencv/4.x/samples/data/sudoku.png).
 We get the following result by using the Standard Hough Line Transform:
 ![](images/hough_lines_result1.png)
 And by using the Probabilistic Hough Line Transform:
index 27b4aa9..272456c 100644 (file)
@@ -62,19 +62,19 @@ Code
 
 @add_toggle_cpp
 -#  The tutorial code's is shown lines below. You can also download it from
-    [here](https://raw.githubusercontent.com/opencv/opencv/master/samples/cpp/tutorial_code/ImgTrans/Laplace_Demo.cpp)
+    [here](https://raw.githubusercontent.com/opencv/opencv/4.x/samples/cpp/tutorial_code/ImgTrans/Laplace_Demo.cpp)
     @include samples/cpp/tutorial_code/ImgTrans/Laplace_Demo.cpp
 @end_toggle
 
 @add_toggle_java
 -#  The tutorial code's is shown lines below. You can also download it from
-    [here](https://raw.githubusercontent.com/opencv/opencv/master/samples/java/tutorial_code/ImgTrans/LaPlace/LaplaceDemo.java)
+    [here](https://raw.githubusercontent.com/opencv/opencv/4.x/samples/java/tutorial_code/ImgTrans/LaPlace/LaplaceDemo.java)
     @include samples/java/tutorial_code/ImgTrans/LaPlace/LaplaceDemo.java
 @end_toggle
 
 @add_toggle_python
 -#  The tutorial code's is shown lines below. You can also download it from
-    [here](https://raw.githubusercontent.com/opencv/opencv/master/samples/python/tutorial_code/ImgTrans/LaPlace/laplace_demo.py)
+    [here](https://raw.githubusercontent.com/opencv/opencv/4.x/samples/python/tutorial_code/ImgTrans/LaPlace/laplace_demo.py)
     @include samples/python/tutorial_code/ImgTrans/LaPlace/laplace_demo.py
 @end_toggle
 
index 3c3d95c..415b5b9 100644 (file)
@@ -63,19 +63,19 @@ Code
 
 @add_toggle_cpp
 -   The tutorial code's is shown lines below. You can also download it from
-    [here](https://github.com/opencv/opencv/tree/master/samples/cpp/tutorial_code/ImgTrans/Remap_Demo.cpp)
+    [here](https://github.com/opencv/opencv/tree/4.x/samples/cpp/tutorial_code/ImgTrans/Remap_Demo.cpp)
     @include samples/cpp/tutorial_code/ImgTrans/Remap_Demo.cpp
 @end_toggle
 
 @add_toggle_java
 -   The tutorial code's is shown lines below. You can also download it from
-    [here](https://github.com/opencv/opencv/tree/master/samples/java/tutorial_code/ImgTrans/remap/RemapDemo.java)
+    [here](https://github.com/opencv/opencv/tree/4.x/samples/java/tutorial_code/ImgTrans/remap/RemapDemo.java)
     @include samples/java/tutorial_code/ImgTrans/remap/RemapDemo.java
 @end_toggle
 
 @add_toggle_python
 -   The tutorial code's is shown lines below. You can also download it from
-    [here](https://github.com/opencv/opencv/tree/master/samples/python/tutorial_code/ImgTrans/remap/Remap_Demo.py)
+    [here](https://github.com/opencv/opencv/tree/4.x/samples/python/tutorial_code/ImgTrans/remap/Remap_Demo.py)
     @include samples/python/tutorial_code/ImgTrans/remap/Remap_Demo.py
 @end_toggle
 
index 4183476..1e5a123 100644 (file)
@@ -121,19 +121,19 @@ Code
 
 @add_toggle_cpp
 You can also download it from
-[here](https://raw.githubusercontent.com/opencv/opencv/master/samples/cpp/tutorial_code/ImgTrans/Sobel_Demo.cpp)
+[here](https://raw.githubusercontent.com/opencv/opencv/4.x/samples/cpp/tutorial_code/ImgTrans/Sobel_Demo.cpp)
 @include samples/cpp/tutorial_code/ImgTrans/Sobel_Demo.cpp
 @end_toggle
 
 @add_toggle_java
 You can also download it from
-[here](https://raw.githubusercontent.com/opencv/opencv/master/samples/java/tutorial_code/ImgTrans/SobelDemo/SobelDemo.java)
+[here](https://raw.githubusercontent.com/opencv/opencv/4.x/samples/java/tutorial_code/ImgTrans/SobelDemo/SobelDemo.java)
 @include samples/java/tutorial_code/ImgTrans/SobelDemo/SobelDemo.java
 @end_toggle
 
 @add_toggle_python
 You can also download it from
-[here](https://raw.githubusercontent.com/opencv/opencv/master/samples/python/tutorial_code/ImgTrans/SobelDemo/sobel_demo.py)
+[here](https://raw.githubusercontent.com/opencv/opencv/4.x/samples/python/tutorial_code/ImgTrans/SobelDemo/sobel_demo.py)
 @include samples/python/tutorial_code/ImgTrans/SobelDemo/sobel_demo.py
 @end_toggle
 
index 22d5298..df6df1a 100644 (file)
@@ -100,19 +100,19 @@ Code
 
 @add_toggle_cpp
 -   The tutorial's code is shown below. You can also download it
-    [here](https://raw.githubusercontent.com/opencv/opencv/master/samples/cpp/tutorial_code/ImgProc/Smoothing/Smoothing.cpp)
+    [here](https://raw.githubusercontent.com/opencv/opencv/4.x/samples/cpp/tutorial_code/ImgProc/Smoothing/Smoothing.cpp)
     @include samples/cpp/tutorial_code/ImgTrans/Geometric_Transforms_Demo.cpp
 @end_toggle
 
 @add_toggle_java
 -   The tutorial's code is shown below. You can also download it
-    [here](https://raw.githubusercontent.com/opencv/opencv/master/samples/cpp/tutorial_code/ImgProc/Smoothing/Smoothing.cpp)
+    [here](https://raw.githubusercontent.com/opencv/opencv/4.x/samples/cpp/tutorial_code/ImgProc/Smoothing/Smoothing.cpp)
     @include samples/java/tutorial_code/ImgTrans/warp_affine/GeometricTransformsDemo.java
 @end_toggle
 
 @add_toggle_python
 -   The tutorial's code is shown below. You can also download it
-    [here](https://raw.githubusercontent.com/opencv/opencv/master/samples/python/tutorial_code/ImgTrans/warp_affine/Geometric_Transforms_Demo.py)
+    [here](https://raw.githubusercontent.com/opencv/opencv/4.x/samples/python/tutorial_code/ImgTrans/warp_affine/Geometric_Transforms_Demo.py)
     @include samples/python/tutorial_code/ImgTrans/warp_affine/Geometric_Transforms_Demo.py
 @end_toggle
 
index 74b117f..ce5223c 100644 (file)
@@ -61,24 +61,24 @@ Code
 This tutorial code's is shown lines below.
 
 @add_toggle_cpp
-You can also download it from [here](https://raw.githubusercontent.com/opencv/opencv/master/samples/cpp/tutorial_code/ImgProc/morph_lines_detection/Morphology_3.cpp).
+You can also download it from [here](https://raw.githubusercontent.com/opencv/opencv/4.x/samples/cpp/tutorial_code/ImgProc/morph_lines_detection/Morphology_3.cpp).
 @include samples/cpp/tutorial_code/ImgProc/morph_lines_detection/Morphology_3.cpp
 @end_toggle
 
 @add_toggle_java
-You can also download it from [here](https://raw.githubusercontent.com/opencv/opencv/master/samples/java/tutorial_code/ImgProc/morph_lines_detection/Morphology_3.java).
+You can also download it from [here](https://raw.githubusercontent.com/opencv/opencv/4.x/samples/java/tutorial_code/ImgProc/morph_lines_detection/Morphology_3.java).
 @include samples/java/tutorial_code/ImgProc/morph_lines_detection/Morphology_3.java
 @end_toggle
 
 @add_toggle_python
-You can also download it from [here](https://raw.githubusercontent.com/opencv/opencv/master/samples/python/tutorial_code/imgProc/morph_lines_detection/morph_lines_detection.py).
+You can also download it from [here](https://raw.githubusercontent.com/opencv/opencv/4.x/samples/python/tutorial_code/imgProc/morph_lines_detection/morph_lines_detection.py).
 @include samples/python/tutorial_code/imgProc/morph_lines_detection/morph_lines_detection.py
 @end_toggle
 
 Explanation / Result
 --------------------
 
-Get image from [here](https://raw.githubusercontent.com/opencv/opencv/master/doc/tutorials/imgproc/morph_lines_detection/images/src.png) .
+Get image from [here](https://raw.githubusercontent.com/opencv/opencv/4.x/doc/tutorials/imgproc/morph_lines_detection/images/src.png) .
 
 #### Load Image
 
index 8f99117..d8d9ca1 100644 (file)
@@ -91,19 +91,19 @@ Code
 
 @add_toggle_cpp
 This tutorial's code is shown below. You can also download it
-[here](https://github.com/opencv/opencv/tree/master/samples/cpp/tutorial_code/ImgProc/Morphology_2.cpp)
+[here](https://github.com/opencv/opencv/tree/4.x/samples/cpp/tutorial_code/ImgProc/Morphology_2.cpp)
 @include cpp/tutorial_code/ImgProc/Morphology_2.cpp
 @end_toggle
 
 @add_toggle_java
 This tutorial's code is shown below. You can also download it
-[here](https://github.com/opencv/opencv/tree/master/samples/java/tutorial_code/ImgProc/opening_closing_hats/MorphologyDemo2.java)
+[here](https://github.com/opencv/opencv/tree/4.x/samples/java/tutorial_code/ImgProc/opening_closing_hats/MorphologyDemo2.java)
 @include java/tutorial_code/ImgProc/opening_closing_hats/MorphologyDemo2.java
 @end_toggle
 
 @add_toggle_python
 This tutorial's code is shown below. You can also download it
-[here](https://github.com/opencv/opencv/tree/master/samples/python/tutorial_code/imgProc/opening_closing_hats/morphology_2.py)
+[here](https://github.com/opencv/opencv/tree/4.x/samples/python/tutorial_code/imgProc/opening_closing_hats/morphology_2.py)
 @include python/tutorial_code/imgProc/opening_closing_hats/morphology_2.py
 @end_toggle
 
index cb66285..8a6b9ce 100644 (file)
@@ -79,19 +79,19 @@ This tutorial code's is shown lines below.
 
 @add_toggle_cpp
 You can also download it from
-[here](https://raw.githubusercontent.com/opencv/opencv/master/samples/cpp/tutorial_code/ImgProc/Pyramids/Pyramids.cpp)
+[here](https://raw.githubusercontent.com/opencv/opencv/4.x/samples/cpp/tutorial_code/ImgProc/Pyramids/Pyramids.cpp)
 @include samples/cpp/tutorial_code/ImgProc/Pyramids/Pyramids.cpp
 @end_toggle
 
 @add_toggle_java
 You can also download it from
-[here](https://raw.githubusercontent.com/opencv/opencv/master/samples/java/tutorial_code/ImgProc/Pyramids/Pyramids.java)
+[here](https://raw.githubusercontent.com/opencv/opencv/4.x/samples/java/tutorial_code/ImgProc/Pyramids/Pyramids.java)
 @include samples/java/tutorial_code/ImgProc/Pyramids/Pyramids.java
 @end_toggle
 
 @add_toggle_python
 You can also download it from
-[here](https://raw.githubusercontent.com/opencv/opencv/master/samples/python/tutorial_code/imgProc/Pyramids/pyramids.py)
+[here](https://raw.githubusercontent.com/opencv/opencv/4.x/samples/python/tutorial_code/imgProc/Pyramids/pyramids.py)
 @include samples/python/tutorial_code/imgProc/Pyramids/pyramids.py
 @end_toggle
 
@@ -191,7 +191,7 @@ Otherwise, an error will be shown.
 Results
 -------
 
--   The program calls by default an image [chicky_512.png](https://raw.githubusercontent.com/opencv/opencv/master/samples/data/chicky_512.png)
+-   The program calls by default an image [chicky_512.png](https://raw.githubusercontent.com/opencv/opencv/4.x/samples/data/chicky_512.png)
     that comes in the `samples/data` folder. Notice that this image is \f$512 \times 512\f$,
     hence a downsample won't generate any error (\f$512 = 2^{9}\f$). The original image is shown below:
 
index 4100d91..8268214 100644 (file)
@@ -30,7 +30,7 @@ Code
     to populate our image with a big number of geometric figures. Since we will be initializing them
     in a random fashion, this process will be automatic and made by using *loops* .
 -   This code is in your OpenCV sample folder. Otherwise you can grab it from
-    [here](http://code.opencv.org/projects/opencv/repository/revisions/master/raw/samples/cpp/tutorial_code/core/Matrix/Drawing_2.cpp)
+    [here](https://github.com/opencv/opencv/blob/4.x/samples/cpp/tutorial_code/ImgProc/basic_drawing/Drawing_2.cpp)
 
 Explanation
 -----------
index 2c6d59b..520d876 100644 (file)
@@ -27,19 +27,19 @@ Code
 
 @add_toggle_cpp
 This tutorial code's is shown lines below. You can also download it from
-[here](https://github.com/opencv/opencv/tree/master/samples/cpp/tutorial_code/ShapeDescriptors/generalContours_demo1.cpp)
+[here](https://github.com/opencv/opencv/tree/4.x/samples/cpp/tutorial_code/ShapeDescriptors/generalContours_demo1.cpp)
 @include samples/cpp/tutorial_code/ShapeDescriptors/generalContours_demo1.cpp
 @end_toggle
 
 @add_toggle_java
 This tutorial code's is shown lines below. You can also download it from
-[here](https://github.com/opencv/opencv/tree/master/samples/java/tutorial_code/ShapeDescriptors/bounding_rects_circles/GeneralContoursDemo1.java)
+[here](https://github.com/opencv/opencv/tree/4.x/samples/java/tutorial_code/ShapeDescriptors/bounding_rects_circles/GeneralContoursDemo1.java)
 @include samples/java/tutorial_code/ShapeDescriptors/bounding_rects_circles/GeneralContoursDemo1.java
 @end_toggle
 
 @add_toggle_python
 This tutorial code's is shown lines below. You can also download it from
-[here](https://github.com/opencv/opencv/tree/master/samples/python/tutorial_code/ShapeDescriptors/bounding_rects_circles/generalContours_demo1.py)
+[here](https://github.com/opencv/opencv/tree/4.x/samples/python/tutorial_code/ShapeDescriptors/bounding_rects_circles/generalContours_demo1.py)
 @include samples/python/tutorial_code/ShapeDescriptors/bounding_rects_circles/generalContours_demo1.py
 @end_toggle
 
index c15d73e..0999d52 100644 (file)
@@ -27,19 +27,19 @@ Code
 
 @add_toggle_cpp
 This tutorial code's is shown lines below. You can also download it from
-[here](https://github.com/opencv/opencv/tree/master/samples/cpp/tutorial_code/ShapeDescriptors/generalContours_demo2.cpp)
+[here](https://github.com/opencv/opencv/tree/4.x/samples/cpp/tutorial_code/ShapeDescriptors/generalContours_demo2.cpp)
 @include samples/cpp/tutorial_code/ShapeDescriptors/generalContours_demo2.cpp
 @end_toggle
 
 @add_toggle_java
 This tutorial code's is shown lines below. You can also download it from
-[here](https://github.com/opencv/opencv/tree/master/samples/java/tutorial_code/ShapeDescriptors/bounding_rotated_ellipses/GeneralContoursDemo2.java)
+[here](https://github.com/opencv/opencv/tree/4.x/samples/java/tutorial_code/ShapeDescriptors/bounding_rotated_ellipses/GeneralContoursDemo2.java)
 @include samples/java/tutorial_code/ShapeDescriptors/bounding_rotated_ellipses/GeneralContoursDemo2.java
 @end_toggle
 
 @add_toggle_python
 This tutorial code's is shown lines below. You can also download it from
-[here](https://github.com/opencv/opencv/tree/master/samples/python/tutorial_code/ShapeDescriptors/bounding_rotated_ellipses/generalContours_demo2.py)
+[here](https://github.com/opencv/opencv/tree/4.x/samples/python/tutorial_code/ShapeDescriptors/bounding_rotated_ellipses/generalContours_demo2.py)
 @include samples/python/tutorial_code/ShapeDescriptors/bounding_rotated_ellipses/generalContours_demo2.py
 @end_toggle
 
index dc112b9..0e78102 100644 (file)
@@ -27,19 +27,19 @@ Code
 
 @add_toggle_cpp
 This tutorial code's is shown lines below. You can also download it from
-[here](https://github.com/opencv/opencv/tree/master/samples/cpp/tutorial_code/ShapeDescriptors/findContours_demo.cpp)
+[here](https://github.com/opencv/opencv/tree/4.x/samples/cpp/tutorial_code/ShapeDescriptors/findContours_demo.cpp)
 @include samples/cpp/tutorial_code/ShapeDescriptors/findContours_demo.cpp
 @end_toggle
 
 @add_toggle_java
 This tutorial code's is shown lines below. You can also download it from
-[here](https://github.com/opencv/opencv/tree/master/samples/java/tutorial_code/ShapeDescriptors/find_contours/FindContoursDemo.java)
+[here](https://github.com/opencv/opencv/tree/4.x/samples/java/tutorial_code/ShapeDescriptors/find_contours/FindContoursDemo.java)
 @include samples/java/tutorial_code/ShapeDescriptors/find_contours/FindContoursDemo.java
 @end_toggle
 
 @add_toggle_python
 This tutorial code's is shown lines below. You can also download it from
-[here](https://github.com/opencv/opencv/tree/master/samples/python/tutorial_code/ShapeDescriptors/find_contours/findContours_demo.py)
+[here](https://github.com/opencv/opencv/tree/4.x/samples/python/tutorial_code/ShapeDescriptors/find_contours/findContours_demo.py)
 @include samples/python/tutorial_code/ShapeDescriptors/find_contours/findContours_demo.py
 @end_toggle
 
index 36763fd..8a32cba 100644 (file)
@@ -26,19 +26,19 @@ Code
 
 @add_toggle_cpp
 This tutorial code's is shown lines below. You can also download it from
-[here](https://github.com/opencv/opencv/tree/master/samples/cpp/tutorial_code/ShapeDescriptors/hull_demo.cpp)
+[here](https://github.com/opencv/opencv/tree/4.x/samples/cpp/tutorial_code/ShapeDescriptors/hull_demo.cpp)
 @include samples/cpp/tutorial_code/ShapeDescriptors/hull_demo.cpp
 @end_toggle
 
 @add_toggle_java
 This tutorial code's is shown lines below. You can also download it from
-[here](https://github.com/opencv/opencv/tree/master/samples/java/tutorial_code/ShapeDescriptors/hull/HullDemo.java)
+[here](https://github.com/opencv/opencv/tree/4.x/samples/java/tutorial_code/ShapeDescriptors/hull/HullDemo.java)
 @include samples/java/tutorial_code/ShapeDescriptors/hull/HullDemo.java
 @end_toggle
 
 @add_toggle_python
 This tutorial code's is shown lines below. You can also download it from
-[here](https://github.com/opencv/opencv/tree/master/samples/python/tutorial_code/ShapeDescriptors/hull/hull_demo.py)
+[here](https://github.com/opencv/opencv/tree/4.x/samples/python/tutorial_code/ShapeDescriptors/hull/hull_demo.py)
 @include samples/python/tutorial_code/ShapeDescriptors/hull/hull_demo.py
 @end_toggle
 
index 4e47242..dc69e7f 100644 (file)
@@ -28,19 +28,19 @@ Code
 
 @add_toggle_cpp
 This tutorial code's is shown lines below. You can also download it from
-[here](https://github.com/opencv/opencv/tree/master/samples/cpp/tutorial_code/ShapeDescriptors/moments_demo.cpp)
+[here](https://github.com/opencv/opencv/tree/4.x/samples/cpp/tutorial_code/ShapeDescriptors/moments_demo.cpp)
 @include samples/cpp/tutorial_code/ShapeDescriptors/moments_demo.cpp
 @end_toggle
 
 @add_toggle_java
 This tutorial code's is shown lines below. You can also download it from
-[here](https://github.com/opencv/opencv/tree/master/samples/java/tutorial_code/ShapeDescriptors/moments/MomentsDemo.java)
+[here](https://github.com/opencv/opencv/tree/4.x/samples/java/tutorial_code/ShapeDescriptors/moments/MomentsDemo.java)
 @include samples/java/tutorial_code/ShapeDescriptors/moments/MomentsDemo.java
 @end_toggle
 
 @add_toggle_python
 This tutorial code's is shown lines below. You can also download it from
-[here](https://github.com/opencv/opencv/tree/master/samples/python/tutorial_code/ShapeDescriptors/moments/moments_demo.py)
+[here](https://github.com/opencv/opencv/tree/4.x/samples/python/tutorial_code/ShapeDescriptors/moments/moments_demo.py)
 @include samples/python/tutorial_code/ShapeDescriptors/moments/moments_demo.py
 @end_toggle
 
index 6f4c6d7..92f3245 100644 (file)
@@ -26,19 +26,19 @@ Code
 
 @add_toggle_cpp
 This tutorial code's is shown lines below. You can also download it from
-[here](https://github.com/opencv/opencv/tree/master/samples/cpp/tutorial_code/ShapeDescriptors/pointPolygonTest_demo.cpp)
+[here](https://github.com/opencv/opencv/tree/4.x/samples/cpp/tutorial_code/ShapeDescriptors/pointPolygonTest_demo.cpp)
 @include samples/cpp/tutorial_code/ShapeDescriptors/pointPolygonTest_demo.cpp
 @end_toggle
 
 @add_toggle_java
 This tutorial code's is shown lines below. You can also download it from
-[here](https://github.com/opencv/opencv/tree/master/samples/java/tutorial_code/ShapeDescriptors/point_polygon_test/PointPolygonTestDemo.java)
+[here](https://github.com/opencv/opencv/tree/4.x/samples/java/tutorial_code/ShapeDescriptors/point_polygon_test/PointPolygonTestDemo.java)
 @include samples/java/tutorial_code/ShapeDescriptors/point_polygon_test/PointPolygonTestDemo.java
 @end_toggle
 
 @add_toggle_python
 This tutorial code's is shown lines below. You can also download it from
-[here](https://github.com/opencv/opencv/tree/master/samples/python/tutorial_code/ShapeDescriptors/point_polygon_test/pointPolygonTest_demo.py)
+[here](https://github.com/opencv/opencv/tree/4.x/samples/python/tutorial_code/ShapeDescriptors/point_polygon_test/pointPolygonTest_demo.py)
 @include samples/python/tutorial_code/ShapeDescriptors/point_polygon_test/pointPolygonTest_demo.py
 @end_toggle
 
index f7458d1..fa69251 100644 (file)
@@ -108,19 +108,19 @@ Code
 
 @add_toggle_cpp
 The tutorial code's is shown lines below. You can also download it from
-[here](https://github.com/opencv/opencv/tree/master/samples/cpp/tutorial_code/ImgProc/Threshold.cpp)
+[here](https://github.com/opencv/opencv/tree/4.x/samples/cpp/tutorial_code/ImgProc/Threshold.cpp)
 @include samples/cpp/tutorial_code/ImgProc/Threshold.cpp
 @end_toggle
 
 @add_toggle_java
 The tutorial code's is shown lines below. You can also download it from
-[here](https://github.com/opencv/opencv/tree/master/samples/java/tutorial_code/ImgProc/threshold/Threshold.java)
+[here](https://github.com/opencv/opencv/tree/4.x/samples/java/tutorial_code/ImgProc/threshold/Threshold.java)
 @include samples/java/tutorial_code/ImgProc/threshold/Threshold.java
 @end_toggle
 
 @add_toggle_python
 The tutorial code's is shown lines below. You can also download it from
-[here](https://github.com/opencv/opencv/tree/master/samples/python/tutorial_code/imgProc/threshold/threshold.py)
+[here](https://github.com/opencv/opencv/tree/4.x/samples/python/tutorial_code/imgProc/threshold/threshold.py)
 @include samples/python/tutorial_code/imgProc/threshold/threshold.py
 @end_toggle
 
index f05ebe4..81fa494 100644 (file)
@@ -50,19 +50,19 @@ Code
 
 @add_toggle_cpp
 The tutorial code's is shown lines below. You can also download it from
-[here](https://github.com/opencv/opencv/tree/master/samples/cpp/tutorial_code/ImgProc/Threshold_inRange.cpp)
+[here](https://github.com/opencv/opencv/tree/4.x/samples/cpp/tutorial_code/ImgProc/Threshold_inRange.cpp)
 @include samples/cpp/tutorial_code/ImgProc/Threshold_inRange.cpp
 @end_toggle
 
 @add_toggle_java
 The tutorial code's is shown lines below. You can also download it from
-[here](https://github.com/opencv/opencv/tree/master/samples/java/tutorial_code/ImgProc/threshold_inRange/ThresholdInRange.java)
+[here](https://github.com/opencv/opencv/tree/4.x/samples/java/tutorial_code/ImgProc/threshold_inRange/ThresholdInRange.java)
 @include samples/java/tutorial_code/ImgProc/threshold_inRange/ThresholdInRange.java
 @end_toggle
 
 @add_toggle_python
 The tutorial code's is shown lines below. You can also download it from
-[here](https://github.com/opencv/opencv/tree/master/samples/python/tutorial_code/imgProc/threshold_inRange/threshold_inRange.py)
+[here](https://github.com/opencv/opencv/tree/4.x/samples/python/tutorial_code/imgProc/threshold_inRange/threshold_inRange.py)
 @include samples/python/tutorial_code/imgProc/threshold_inRange/threshold_inRange.py
 @end_toggle
 
index 938dd61..092eacf 100644 (file)
@@ -82,7 +82,7 @@ The structure of package contents looks as follows:
 
 -   `doc` folder contains various OpenCV documentation in PDF format. It's also available online at
     <http://docs.opencv.org>.
-    @note The most recent docs (nightly build) are at <http://docs.opencv.org/master>. Generally, it's more
+    @note The most recent docs (nightly build) are at <http://docs.opencv.org/4.x>. Generally, it's more
     up-to-date, but can refer to not-yet-released functionality.
     @todo I'm not sure that this is the best place to talk about OpenCV Manager
 
index 1f2f0e5..b06914a 100644 (file)
@@ -28,7 +28,7 @@ If you need help with anything of the above, you may refer to our @ref tutorial_
 This tutorial also assumes you have an Android operated device with OpenCL enabled.
 
 The related source code is located within OpenCV samples at
-[opencv/samples/android/tutorial-4-opencl](https://github.com/opencv/opencv/tree/master/samples/android/tutorial-4-opencl/) directory.
+[opencv/samples/android/tutorial-4-opencl](https://github.com/opencv/opencv/tree/4.x/samples/android/tutorial-4-opencl/) directory.
 
 Preface
 -------
@@ -255,7 +255,7 @@ As you can see, inheritors for `Camera` and `Camera2` APIs should implement the
 @endcode
 
 Let's leave the details of their implementation beyond of this tutorial, please refer the
-[source code](https://github.com/opencv/opencv/tree/master/samples/android/tutorial-4-opencl/) to see them.
+[source code](https://github.com/opencv/opencv/tree/4.x/samples/android/tutorial-4-opencl/) to see them.
 
 Preview Frames modification
 ---------------------------
index a6ac5d9..98ba16b 100644 (file)
@@ -313,7 +313,7 @@ Then you can start interacting with OpenCV by just referencing the fully qualifi
 classes.
 
 @note
-[Here](https://docs.opencv.org/master/javadoc/index.html) you can find the full OpenCV Java API.
+[Here](https://docs.opencv.org/4.x/javadoc/index.html) you can find the full OpenCV Java API.
 
 @code{.clojure}
 user=> (org.opencv.core.Point. 0 0)
index 0ba5627..dda1410 100644 (file)
@@ -339,7 +339,7 @@ Integration with [FFmpeg](https://en.wikipedia.org/wiki/FFmpeg) library for deco
 - _swscale_
 - _avresample_ (optional)
 
-Exception is Windows platform where a prebuilt [plugin library containing FFmpeg](https://github.com/opencv/opencv_3rdparty/tree/ffmpeg/master) will be downloaded during a configuration stage and copied to the `bin` folder with all produced libraries.
+Exception is Windows platform where a prebuilt [plugin library containing FFmpeg](https://github.com/opencv/opencv_3rdparty/tree/ffmpeg/4.x) will be downloaded during a configuration stage and copied to the `bin` folder with all produced libraries.
 
 @note [Libav](https://en.wikipedia.org/wiki/Libav) library can be used instead of FFmpeg, but this combination is not actively supported.
 
index 6fe6ca6..96284bd 100644 (file)
@@ -26,7 +26,7 @@ Source Code
 
 @add_toggle_cpp
 -   **Downloadable code**: Click
-    [here](https://github.com/opencv/opencv/tree/master/samples/cpp/tutorial_code/introduction/display_image/display_image.cpp)
+    [here](https://github.com/opencv/opencv/tree/4.x/samples/cpp/tutorial_code/introduction/display_image/display_image.cpp)
 
 -   **Code at glance:**
     @include samples/cpp/tutorial_code/introduction/display_image/display_image.cpp
@@ -34,7 +34,7 @@ Source Code
 
 @add_toggle_python
 -   **Downloadable code**: Click
-    [here](https://github.com/opencv/opencv/tree/master/samples/python/tutorial_code/introduction/display_image/display_image.py)
+    [here](https://github.com/opencv/opencv/tree/4.x/samples/python/tutorial_code/introduction/display_image/display_image.py)
 
 -   **Code at glance:**
     @include samples/python/tutorial_code/introduction/display_image/display_image.py
index ecb188d..84cd5b0 100644 (file)
@@ -204,7 +204,7 @@ Test it!
 --------
 
 Now to try this out download our little test [source code
-](https://github.com/opencv/opencv/tree/master/samples/cpp/tutorial_code/introduction/windows_visual_studio_opencv/introduction_windows_vs.cpp)
+](https://github.com/opencv/opencv/tree/4.x/samples/cpp/tutorial_code/introduction/windows_visual_studio_opencv/introduction_windows_vs.cpp)
 or get it from the sample code folder of the OpenCV sources. Add this to your project and build it.
 Here's its content:
 
@@ -220,7 +220,7 @@ the *IDE* the console window will not close once finished. It will wait for a ke
 This is important to remember when you code inside the code open and save commands. Your resources
 will be saved ( and queried for at opening!!!) relatively to your working directory. This is unless
 you give a full, explicit path as a parameter for the I/O functions. In the code above we open [this
-OpenCV logo](https://github.com/opencv/opencv/tree/master/samples/data/opencv-logo.png). Before starting up the application,
+OpenCV logo](https://github.com/opencv/opencv/tree/4.x/samples/data/opencv-logo.png). Before starting up the application,
 make sure you place
 the image file in your current working directory. Modify the image file name inside the code to try
 it out on other images too. Run it and voil Ã¡:
index 5d07f1d..839f32c 100644 (file)
@@ -51,7 +51,7 @@ The results as well as the input data are shown on the screen.
 
 @add_toggle_cpp
 -   **Downloadable code**: Click
-    [here](https://github.com/opencv/opencv/tree/master/samples/cpp/tutorial_code/video/bg_sub.cpp)
+    [here](https://github.com/opencv/opencv/tree/4.x/samples/cpp/tutorial_code/video/bg_sub.cpp)
 
 -   **Code at glance:**
     @include samples/cpp/tutorial_code/video/bg_sub.cpp
@@ -59,7 +59,7 @@ The results as well as the input data are shown on the screen.
 
 @add_toggle_java
 -   **Downloadable code**: Click
-    [here](https://github.com/opencv/opencv/tree/master/samples/java/tutorial_code/video/background_subtraction/BackgroundSubtractionDemo.java)
+    [here](https://github.com/opencv/opencv/tree/4.x/samples/java/tutorial_code/video/background_subtraction/BackgroundSubtractionDemo.java)
 
 -   **Code at glance:**
     @include samples/java/tutorial_code/video/background_subtraction/BackgroundSubtractionDemo.java
@@ -67,7 +67,7 @@ The results as well as the input data are shown on the screen.
 
 @add_toggle_python
 -   **Downloadable code**: Click
-    [here](https://github.com/opencv/opencv/tree/master/samples/python/tutorial_code/video/background_subtraction/bg_sub.py)
+    [here](https://github.com/opencv/opencv/tree/4.x/samples/python/tutorial_code/video/background_subtraction/bg_sub.py)
 
 -   **Code at glance:**
     @include samples/python/tutorial_code/video/background_subtraction/bg_sub.py
index 149bac5..8f0e125 100644 (file)
@@ -96,7 +96,7 @@ more details or check out the references in the Additional Resources section.
 Haar-cascade Detection in OpenCV
 --------------------------------
 OpenCV provides a training method (see @ref tutorial_traincascade) or pretrained models, that can be read using the @ref cv::CascadeClassifier::load method.
-The pretrained models are located in the data folder in the OpenCV installation or can be found [here](https://github.com/opencv/opencv/tree/master/data).
+The pretrained models are located in the data folder in the OpenCV installation or can be found [here](https://github.com/opencv/opencv/tree/4.x/data).
 
 The following code example will use pretrained Haar cascade models to detect faces and eyes in an image.
 First, a @ref cv::CascadeClassifier is created and the necessary XML file is loaded using the @ref cv::CascadeClassifier::load method.
@@ -104,19 +104,19 @@ Afterwards, the detection is done using the @ref cv::CascadeClassifier::detectMu
 
 @add_toggle_cpp
 This tutorial code's is shown lines below. You can also download it from
-[here](https://github.com/opencv/opencv/tree/master/samples/cpp/tutorial_code/objectDetection/objectDetection.cpp)
+[here](https://github.com/opencv/opencv/tree/4.x/samples/cpp/tutorial_code/objectDetection/objectDetection.cpp)
 @include samples/cpp/tutorial_code/objectDetection/objectDetection.cpp
 @end_toggle
 
 @add_toggle_java
 This tutorial code's is shown lines below. You can also download it from
-[here](https://github.com/opencv/opencv/tree/master/samples/java/tutorial_code/objectDetection/cascade_classifier/ObjectDetectionDemo.java)
+[here](https://github.com/opencv/opencv/tree/4.x/samples/java/tutorial_code/objectDetection/cascade_classifier/ObjectDetectionDemo.java)
 @include samples/java/tutorial_code/objectDetection/cascade_classifier/ObjectDetectionDemo.java
 @end_toggle
 
 @add_toggle_python
 This tutorial code's is shown lines below. You can also download it from
-[here](https://github.com/opencv/opencv/tree/master/samples/python/tutorial_code/objectDetection/cascade_classifier/objectDetection.py)
+[here](https://github.com/opencv/opencv/tree/4.x/samples/python/tutorial_code/objectDetection/cascade_classifier/objectDetection.py)
 @include samples/python/tutorial_code/objectDetection/cascade_classifier/objectDetection.py
 @end_toggle
 
index a9f1276..98d0f65 100644 (file)
@@ -42,19 +42,19 @@ Source Code
 
 @add_toggle_cpp
 This tutorial code's is shown lines below. You can also download it from
-[here](https://github.com/opencv/opencv/tree/master/samples/cpp/tutorial_code/photo/hdr_imaging/hdr_imaging.cpp)
+[here](https://github.com/opencv/opencv/tree/4.x/samples/cpp/tutorial_code/photo/hdr_imaging/hdr_imaging.cpp)
 @include samples/cpp/tutorial_code/photo/hdr_imaging/hdr_imaging.cpp
 @end_toggle
 
 @add_toggle_java
 This tutorial code's is shown lines below. You can also download it from
-[here](https://github.com/opencv/opencv/tree/master/samples/java/tutorial_code/photo/hdr_imaging/HDRImagingDemo.java)
+[here](https://github.com/opencv/opencv/tree/4.x/samples/java/tutorial_code/photo/hdr_imaging/HDRImagingDemo.java)
 @include samples/java/tutorial_code/photo/hdr_imaging/HDRImagingDemo.java
 @end_toggle
 
 @add_toggle_python
 This tutorial code's is shown lines below. You can also download it from
-[here](https://github.com/opencv/opencv/tree/master/samples/python/tutorial_code/photo/hdr_imaging/hdr_imaging.py)
+[here](https://github.com/opencv/opencv/tree/4.x/samples/python/tutorial_code/photo/hdr_imaging/hdr_imaging.py)
 @include samples/python/tutorial_code/photo/hdr_imaging/hdr_imaging.py
 @end_toggle
 
@@ -62,7 +62,7 @@ Sample images
 -------------
 
 Data directory that contains images, exposure times and `list.txt` file can be downloaded from
-[here](https://github.com/opencv/opencv_extra/tree/master/testdata/cv/hdr/exposures).
+[here](https://github.com/opencv/opencv_extra/tree/4.x/testdata/cv/hdr/exposures).
 
 Explanation
 -----------
index 490024a..c66ef7d 100644 (file)
@@ -102,7 +102,7 @@ Source Code
 
 @add_toggle_cpp
 -   **Downloadable code**: Click
-    [here](https://github.com/opencv/opencv/tree/master/samples/cpp/tutorial_code/ml/introduction_to_pca/introduction_to_pca.cpp)
+    [here](https://github.com/opencv/opencv/tree/4.x/samples/cpp/tutorial_code/ml/introduction_to_pca/introduction_to_pca.cpp)
 
 -   **Code at glance:**
     @include samples/cpp/tutorial_code/ml/introduction_to_pca/introduction_to_pca.cpp
@@ -110,7 +110,7 @@ Source Code
 
 @add_toggle_java
 -   **Downloadable code**: Click
-    [here](https://github.com/opencv/opencv/tree/master/samples/java/tutorial_code/ml/introduction_to_pca/IntroductionToPCADemo.java)
+    [here](https://github.com/opencv/opencv/tree/4.x/samples/java/tutorial_code/ml/introduction_to_pca/IntroductionToPCADemo.java)
 
 -   **Code at glance:**
     @include samples/java/tutorial_code/ml/introduction_to_pca/IntroductionToPCADemo.java
@@ -118,13 +118,13 @@ Source Code
 
 @add_toggle_python
 -   **Downloadable code**: Click
-    [here](https://github.com/opencv/opencv/tree/master/samples/python/tutorial_code/ml/introduction_to_pca/introduction_to_pca.py)
+    [here](https://github.com/opencv/opencv/tree/4.x/samples/python/tutorial_code/ml/introduction_to_pca/introduction_to_pca.py)
 
 -   **Code at glance:**
     @include samples/python/tutorial_code/ml/introduction_to_pca/introduction_to_pca.py
 @end_toggle
 
-@note Another example using PCA for dimensionality reduction while maintaining an amount of variance can be found at [opencv_source_code/samples/cpp/pca.cpp](https://github.com/opencv/opencv/tree/master/samples/cpp/pca.cpp)
+@note Another example using PCA for dimensionality reduction while maintaining an amount of variance can be found at [opencv_source_code/samples/cpp/pca.cpp](https://github.com/opencv/opencv/tree/4.x/samples/cpp/pca.cpp)
 
 Explanation
 -----------
index 557093c..b74d989 100644 (file)
@@ -106,7 +106,7 @@ Source Code
 
 @add_toggle_cpp
 -   **Downloadable code**: Click
-    [here](https://github.com/opencv/opencv/tree/master/samples/cpp/tutorial_code/ml/introduction_to_svm/introduction_to_svm.cpp)
+    [here](https://github.com/opencv/opencv/tree/4.x/samples/cpp/tutorial_code/ml/introduction_to_svm/introduction_to_svm.cpp)
 
 -   **Code at glance:**
     @include samples/cpp/tutorial_code/ml/introduction_to_svm/introduction_to_svm.cpp
@@ -114,7 +114,7 @@ Source Code
 
 @add_toggle_java
 -   **Downloadable code**: Click
-    [here](https://github.com/opencv/opencv/tree/master/samples/java/tutorial_code/ml/introduction_to_svm/IntroductionToSVMDemo.java)
+    [here](https://github.com/opencv/opencv/tree/4.x/samples/java/tutorial_code/ml/introduction_to_svm/IntroductionToSVMDemo.java)
 
 -   **Code at glance:**
     @include samples/java/tutorial_code/ml/introduction_to_svm/IntroductionToSVMDemo.java
@@ -122,7 +122,7 @@ Source Code
 
 @add_toggle_python
 -   **Downloadable code**: Click
-    [here](https://github.com/opencv/opencv/tree/master/samples/python/tutorial_code/ml/introduction_to_svm/introduction_to_svm.py)
+    [here](https://github.com/opencv/opencv/tree/4.x/samples/python/tutorial_code/ml/introduction_to_svm/introduction_to_svm.py)
 
 -   **Code at glance:**
     @include samples/python/tutorial_code/ml/introduction_to_svm/introduction_to_svm.py
index 7ca9eca..de95935 100644 (file)
@@ -48,7 +48,7 @@ low light, low light values are discarded using **cv.inRange()** function.
 
 @add_toggle_cpp
 -   **Downloadable code**: Click
-    [here](https://github.com/opencv/opencv/tree/master/samples/cpp/tutorial_code/video/meanshift/meanshift.cpp)
+    [here](https://github.com/opencv/opencv/tree/4.x/samples/cpp/tutorial_code/video/meanshift/meanshift.cpp)
 
 -   **Code at glance:**
     @include samples/cpp/tutorial_code/video/meanshift/meanshift.cpp
@@ -56,7 +56,7 @@ low light, low light values are discarded using **cv.inRange()** function.
 
 @add_toggle_python
 -   **Downloadable code**: Click
-    [here](https://github.com/opencv/opencv/tree/master/samples/python/tutorial_code/video/meanshift/meanshift.py)
+    [here](https://github.com/opencv/opencv/tree/4.x/samples/python/tutorial_code/video/meanshift/meanshift.py)
 
 -   **Code at glance:**
     @include samples/python/tutorial_code/video/meanshift/meanshift.py
@@ -64,7 +64,7 @@ low light, low light values are discarded using **cv.inRange()** function.
 
 @add_toggle_java
 -   **Downloadable code**: Click
-    [here](https://github.com/opencv/opencv/tree/master/samples/java/tutorial_code/video/meanshift/MeanshiftDemo.java)
+    [here](https://github.com/opencv/opencv/tree/4.x/samples/java/tutorial_code/video/meanshift/MeanshiftDemo.java)
 
 -   **Code at glance:**
     @include  samples/java/tutorial_code/video/meanshift/MeanshiftDemo.java
@@ -97,7 +97,7 @@ parameters (used to be passed as search window in next iteration). See the code
 
 @add_toggle_cpp
 -   **Downloadable code**: Click
-    [here](https://github.com/opencv/opencv/tree/master/samples/cpp/tutorial_code/video/meanshift/camshift.cpp)
+    [here](https://github.com/opencv/opencv/tree/4.x/samples/cpp/tutorial_code/video/meanshift/camshift.cpp)
 
 -   **Code at glance:**
     @include samples/cpp/tutorial_code/video/meanshift/camshift.cpp
@@ -105,7 +105,7 @@ parameters (used to be passed as search window in next iteration). See the code
 
 @add_toggle_python
 -   **Downloadable code**: Click
-    [here](https://github.com/opencv/opencv/tree/master/samples/python/tutorial_code/video/meanshift/camshift.py)
+    [here](https://github.com/opencv/opencv/tree/4.x/samples/python/tutorial_code/video/meanshift/camshift.py)
 
 -   **Code at glance:**
     @include samples/python/tutorial_code/video/meanshift/camshift.py
@@ -113,7 +113,7 @@ parameters (used to be passed as search window in next iteration). See the code
 
 @add_toggle_java
 -   **Downloadable code**: Click
-    [here](https://github.com/opencv/opencv/tree/master/samples/java/tutorial_code/video/meanshift/CamshiftDemo.java)
+    [here](https://github.com/opencv/opencv/tree/4.x/samples/java/tutorial_code/video/meanshift/CamshiftDemo.java)
 
 -   **Code at glance:**
     @include  samples/java/tutorial_code/video/meanshift/CamshiftDemo.java
@@ -135,5 +135,5 @@ Additional Resources
 Exercises
 ---------
 
--#  OpenCV comes with a Python [sample](https://github.com/opencv/opencv/blob/master/samples/python/camshift.py) for an interactive demo of camshift. Use it, hack it, understand
+-#  OpenCV comes with a Python [sample](https://github.com/opencv/opencv/blob/4.x/samples/python/camshift.py) for an interactive demo of camshift. Use it, hack it, understand
     it.
index f70449f..8512d1b 100644 (file)
@@ -97,11 +97,11 @@ Source Code
 -----------
 
 You may also find the source code in `samples/cpp/tutorial_code/ml/non_linear_svms` folder of the OpenCV source library or
-[download it from here](https://github.com/opencv/opencv/tree/master/samples/cpp/tutorial_code/ml/non_linear_svms/non_linear_svms.cpp).
+[download it from here](https://github.com/opencv/opencv/tree/4.x/samples/cpp/tutorial_code/ml/non_linear_svms/non_linear_svms.cpp).
 
 @add_toggle_cpp
 -   **Downloadable code**: Click
-    [here](https://github.com/opencv/opencv/tree/master/samples/cpp/tutorial_code/ml/non_linear_svms/non_linear_svms.cpp)
+    [here](https://github.com/opencv/opencv/tree/4.x/samples/cpp/tutorial_code/ml/non_linear_svms/non_linear_svms.cpp)
 
 -   **Code at glance:**
     @include samples/cpp/tutorial_code/ml/non_linear_svms/non_linear_svms.cpp
@@ -109,7 +109,7 @@ You may also find the source code in `samples/cpp/tutorial_code/ml/non_linear_sv
 
 @add_toggle_java
 -   **Downloadable code**: Click
-    [here](https://github.com/opencv/opencv/tree/master/samples/java/tutorial_code/ml/non_linear_svms/NonLinearSVMsDemo.java)
+    [here](https://github.com/opencv/opencv/tree/4.x/samples/java/tutorial_code/ml/non_linear_svms/NonLinearSVMsDemo.java)
 
 -   **Code at glance:**
     @include samples/java/tutorial_code/ml/non_linear_svms/NonLinearSVMsDemo.java
@@ -117,7 +117,7 @@ You may also find the source code in `samples/cpp/tutorial_code/ml/non_linear_sv
 
 @add_toggle_python
 -   **Downloadable code**: Click
-    [here](https://github.com/opencv/opencv/tree/master/samples/python/tutorial_code/ml/non_linear_svms/non_linear_svms.py)
+    [here](https://github.com/opencv/opencv/tree/4.x/samples/python/tutorial_code/ml/non_linear_svms/non_linear_svms.py)
 
 -   **Code at glance:**
     @include samples/python/tutorial_code/ml/non_linear_svms/non_linear_svms.py
index c8a2374..4fa5f1a 100644 (file)
@@ -100,7 +100,7 @@ below:
 
 @add_toggle_cpp
 -   **Downloadable code**: Click
-    [here](https://github.com/opencv/opencv/tree/master/samples/cpp/tutorial_code/video/optical_flow/optical_flow.cpp)
+    [here](https://github.com/opencv/opencv/tree/4.x/samples/cpp/tutorial_code/video/optical_flow/optical_flow.cpp)
 
 -   **Code at glance:**
     @include samples/cpp/tutorial_code/video/optical_flow/optical_flow.cpp
@@ -108,7 +108,7 @@ below:
 
 @add_toggle_python
 -   **Downloadable code**: Click
-    [here](https://github.com/opencv/opencv/tree/master/samples/python/tutorial_code/video/optical_flow/optical_flow.py)
+    [here](https://github.com/opencv/opencv/tree/4.x/samples/python/tutorial_code/video/optical_flow/optical_flow.py)
 
 -   **Code at glance:**
     @include samples/python/tutorial_code/video/optical_flow/optical_flow.py
@@ -117,7 +117,7 @@ below:
 
 @add_toggle_java
 -   **Downloadable code**: Click
-    [here](https://github.com/opencv/opencv/tree/master/samples/java/tutorial_code/video/optical_flow/OpticalFlowDemo.java)
+    [here](https://github.com/opencv/opencv/tree/4.x/samples/java/tutorial_code/video/optical_flow/OpticalFlowDemo.java)
 
 -   **Code at glance:**
     @include samples/java/tutorial_code/video/optical_flow/OpticalFlowDemo.java
@@ -150,7 +150,7 @@ corresponds to Value plane. See the code below:
 
 @add_toggle_cpp
 -   **Downloadable code**: Click
-    [here](https://github.com/opencv/opencv/tree/master/samples/cpp/tutorial_code/video/optical_flow/optical_flow_dense.cpp)
+    [here](https://github.com/opencv/opencv/tree/4.x/samples/cpp/tutorial_code/video/optical_flow/optical_flow_dense.cpp)
 
 -   **Code at glance:**
     @include samples/cpp/tutorial_code/video/optical_flow/optical_flow_dense.cpp
@@ -158,7 +158,7 @@ corresponds to Value plane. See the code below:
 
 @add_toggle_python
 -   **Downloadable code**: Click
-    [here](https://github.com/opencv/opencv/tree/master/samples/python/tutorial_code/video/optical_flow/optical_flow_dense.py)
+    [here](https://github.com/opencv/opencv/tree/4.x/samples/python/tutorial_code/video/optical_flow/optical_flow_dense.py)
 
 -   **Code at glance:**
     @include samples/python/tutorial_code/video/optical_flow/optical_flow_dense.py
@@ -167,7 +167,7 @@ corresponds to Value plane. See the code below:
 
 @add_toggle_java
 -   **Downloadable code**: Click
-    [here](https://github.com/opencv/opencv/tree/master/samples/java/tutorial_code/video/optical_flow/OpticalFlowDenseDemo.java)
+    [here](https://github.com/opencv/opencv/tree/4.x/samples/java/tutorial_code/video/optical_flow/OpticalFlowDenseDemo.java)
 
 -   **Code at glance:**
     @include samples/java/tutorial_code/video/optical_flow/OpticalFlowDenseDemo.java
index e636d83..d1ea5af 100644 (file)
@@ -25,7 +25,7 @@ Code
 ----
 
 This tutorial code's is shown lines below. You can also download it from
-[here](https://github.com/opencv/opencv/tree/master/samples/cpp/stitching.cpp).
+[here](https://github.com/opencv/opencv/tree/4.x/samples/cpp/stitching.cpp).
 
 @include samples/cpp/stitching.cpp
 
@@ -114,11 +114,11 @@ configuration you can use stitching_detailed source code available in C++ or pyt
 
 <H4>stitching_detailed</H4>
 @add_toggle_cpp
-[stitching_detailed.cpp](https://raw.githubusercontent.com/opencv/opencv/master/samples/cpp/stitching_detailed.cpp)
+[stitching_detailed.cpp](https://raw.githubusercontent.com/opencv/opencv/4.x/samples/cpp/stitching_detailed.cpp)
 @end_toggle
 
 @add_toggle_python
-[stitching_detailed.py](https://raw.githubusercontent.com/opencv/opencv/master/samples/python/stitching_detailed.py)
+[stitching_detailed.py](https://raw.githubusercontent.com/opencv/opencv/4.x/samples/python/stitching_detailed.py)
 @end_toggle
 
 stitching_detailed program uses command line to get stitching parameter. Many parameters exists. Above examples shows some command line parameters possible :
@@ -165,4 +165,4 @@ newspaper1.jpg newspaper2.jpg --work_megapix 0.6 --features surf --matcher affin
 
 ![](images/affinepano.jpg)
 
-You can find  all images in https://github.com/opencv/opencv_extra/tree/master/testdata/stitching
+You can find  all images in https://github.com/opencv/opencv_extra/tree/4.x/testdata/stitching
index fdb3851..e4f7525 100644 (file)
@@ -11,7 +11,7 @@ Introduction
 
 Working with a boosted cascade of weak classifiers includes two major stages: the training and the detection stage. The detection stage using either HAAR or LBP based models, is described in the @ref tutorial_cascade_classifier "object detection tutorial". This documentation gives an overview of the functionality needed to train your own boosted cascade of weak classifiers. The current guide will walk through all the different stages: collecting training data, preparation of the training data and executing the actual model training.
 
-To support this tutorial, several official OpenCV applications will be used: [opencv_createsamples](https://github.com/opencv/opencv/tree/master/apps/createsamples), [opencv_annotation](https://github.com/opencv/opencv/tree/master/apps/annotation), [opencv_traincascade](https://github.com/opencv/opencv/tree/master/apps/traincascade) and [opencv_visualisation](https://github.com/opencv/opencv/tree/master/apps/visualisation).
+To support this tutorial, several official OpenCV applications will be used: [opencv_createsamples](https://github.com/opencv/opencv/tree/4.x/apps/createsamples), [opencv_annotation](https://github.com/opencv/opencv/tree/4.x/apps/annotation), [opencv_traincascade](https://github.com/opencv/opencv/tree/4.x/apps/traincascade) and [opencv_visualisation](https://github.com/opencv/opencv/tree/4.x/apps/visualisation).
 
 @note Createsamples and traincascade are disabled since OpenCV 4.0. Consider using these apps for training from 3.4 branch for Cascade Classifier. Model format is the same between 3.4 and 4.x.
 
index ea85007..a5782f2 100644 (file)
@@ -82,4 +82,4 @@ Block Matching algorithm has been successfully parallelized using the following
 3.  Merge the results into a single disparity map.
 
 With this algorithm, a dual GPU gave a 180% performance increase comparing to the single Fermi GPU.
-For a source code example, see <https://github.com/opencv/opencv/tree/master/samples/gpu/>.
+For a source code example, see <https://github.com/opencv/opencv/tree/4.x/samples/gpu/>.
index 8fe7ee6..2658d92 100644 (file)
@@ -7,7 +7,7 @@
 
 /**
 Helper header to support SIMD intrinsics (universal intrinsics) in user code.
-Intrinsics documentation: https://docs.opencv.org/master/df/d91/group__core__hal__intrin.html
+Intrinsics documentation: https://docs.opencv.org/4.x/df/d91/group__core__hal__intrin.html
 
 
 Checks of target CPU instruction set based on compiler definitions don't work well enough.
index 92e307e..76c6d6f 100644 (file)
@@ -18,7 +18,7 @@ parser = argparse.ArgumentParser(description="Use this script to create TensorFl
                                              "with weights from OpenCV's face detection network. "
                                              "Only backbone part of SSD model is converted this way. "
                                              "Look for .pbtxt configuration file at "
-                                             "https://github.com/opencv/opencv_extra/tree/master/testdata/dnn/opencv_face_detector.pbtxt")
+                                             "https://github.com/opencv/opencv_extra/tree/4.x/testdata/dnn/opencv_face_detector.pbtxt")
 parser.add_argument('--model', help='Path to .caffemodel weights', required=True)
 parser.add_argument('--proto', help='Path to .prototxt Caffe model definition', required=True)
 parser.add_argument('--pb', help='Path to output .pb TensorFlow model', required=True)
index 7001719..d958e85 100644 (file)
@@ -58,7 +58,7 @@ if __name__ == "__main__":
     parser = argparse.ArgumentParser()
     parser.add_argument("--imgs_dir", help="path to ImageNet validation subset images dir, ILSVRC2012_img_val dir")
     parser.add_argument("--img_cls_file", help="path to file with classes ids for images, download it here:"
-                            "https://github.com/opencv/opencv_extra/tree/master/testdata/dnn/img_classes_inception.txt")
+                            "https://github.com/opencv/opencv_extra/tree/4.x/testdata/dnn/img_classes_inception.txt")
     parser.add_argument("--model", help="path to tensorflow model, download it here:"
                                         "https://storage.googleapis.com/download.tensorflow.org/models/inception5h.zip")
     parser.add_argument("--log", help="path to logging file")
index af4bc2a..1c8407a 100644 (file)
@@ -205,9 +205,9 @@ if __name__ == "__main__":
     parser.add_argument("--val_names", help="path to file with validation set image names, download it here: "
                         "https://github.com/shelhamer/fcn.berkeleyvision.org/blob/master/data/pascal/seg11valid.txt")
     parser.add_argument("--cls_file", help="path to file with colors for classes, download it here: "
-                        "https://github.com/opencv/opencv/blob/master/samples/data/dnn/pascal-classes.txt")
+                        "https://github.com/opencv/opencv/blob/4.x/samples/data/dnn/pascal-classes.txt")
     parser.add_argument("--prototxt", help="path to caffe prototxt, download it here: "
-                        "https://github.com/opencv/opencv/blob/master/samples/data/dnn/fcn8s-heavy-pascal.prototxt")
+                        "https://github.com/opencv/opencv/blob/4.x/samples/data/dnn/fcn8s-heavy-pascal.prototxt")
     parser.add_argument("--caffemodel", help="path to caffemodel file, download it here: "
                                              "http://dl.caffe.berkeleyvision.org/fcn8s-heavy-pascal.caffemodel")
     parser.add_argument("--log", help="path to logging file")
index 38d2628..3592255 100644 (file)
@@ -91,7 +91,7 @@ compensate for the differences in the size of areas. The sums of pixel values ov
 regions are calculated rapidly using integral images (see below and the integral description).
 
 To see the object detector at work, have a look at the facedetect demo:
-<https://github.com/opencv/opencv/tree/master/samples/cpp/dbt_face_detection.cpp>
+<https://github.com/opencv/opencv/tree/4.x/samples/cpp/dbt_face_detection.cpp>
 
 The following reference is for the detection part only. There is a separate application called
 opencv_traincascade that can train a cascade of boosted classifiers from a set of samples.
index 2245722..ec49f46 100644 (file)
@@ -25,7 +25,7 @@ class NewOpenCVTests(unittest.TestCase):
     repoPath = None
     extraTestDataPath = None
     # github repository url
-    repoUrl = 'https://raw.github.com/opencv/opencv/master'
+    repoUrl = 'https://raw.github.com/opencv/opencv/4.x'
 
     def find_file(self, filename, searchPaths=[], required=True):
         searchPaths = searchPaths if searchPaths else [self.repoPath, self.extraTestDataPath]
index 7f23e56..310148a 100644 (file)
@@ -1,3 +1,3 @@
 This folder contains toolchains and additional files that are needed for cross compilation.
 For more information see introduction tutorials for target platform in documentation:
-https://docs.opencv.org/master/df/d65/tutorial_table_of_content_introduction.html
+https://docs.opencv.org/4.x/df/d65/tutorial_table_of_content_introduction.html
index 53cf266..30a6ad9 100644 (file)
@@ -1,8 +1,8 @@
 // This sample is based on "Camera calibration With OpenCV" tutorial:
-// https://docs.opencv.org/3.4/d4/d94/tutorial_camera_calibration.html
+// https://docs.opencv.org/4.x/d4/d94/tutorial_camera_calibration.html
 //
 // It uses standard OpenCV asymmetric circles grid pattern 11x4:
-// https://github.com/opencv/opencv/blob/3.4/doc/acircles_pattern.png
+// https://github.com/opencv/opencv/blob/4.x/doc/acircles_pattern.png
 // The results are the camera matrix and 5 distortion coefficients.
 //
 // Tap on highlighted pattern to capture pattern corners for calibration.
index 97170a9..2c871db 100644 (file)
@@ -126,7 +126,7 @@ int main(int argc, char *argv[])
 
     // These descriptors are going to be detecting and computing BLOBS with 6 different params
     // Param for first BLOB detector we want all
-    typeDesc.push_back("BLOB");    // see http://docs.opencv.org/master/d0/d7a/classcv_1_1SimpleBlobDetector.html
+    typeDesc.push_back("BLOB");    // see http://docs.opencv.org/4.x/d0/d7a/classcv_1_1SimpleBlobDetector.html
     pBLOB.push_back(pDefaultBLOB);
     pBLOB.back().filterByArea = true;
     pBLOB.back().minArea = 1;
index ebe756c..1bb236c 100644 (file)
@@ -89,10 +89,10 @@ static void help(char** argv)
         "\tThis will detect only the face in image.jpg.\n";
 
     cout << " \n\nThe classifiers for face and eyes can be downloaded from : "
-        " \nhttps://github.com/opencv/opencv/tree/master/data/haarcascades";
+        " \nhttps://github.com/opencv/opencv/tree/4.x/data/haarcascades";
 
     cout << "\n\nThe classifiers for nose and mouth can be downloaded from : "
-        " \nhttps://github.com/opencv/opencv_contrib/tree/master/modules/face/data/cascades\n";
+        " \nhttps://github.com/opencv/opencv_contrib/tree/4.x/modules/face/data/cascades\n";
 }
 
 static void detectFaces(Mat& img, vector<Rect_<int> >& faces, string cascade_path)
index dbc9f44..5e37418 100644 (file)
@@ -15,7 +15,7 @@ int main( int argc, const char** argv )
 
     cout << "This program demonstrates the use of template matching with mask." << endl
          << endl
-         << "Available methods: https://docs.opencv.org/master/df/dfb/group__imgproc__object.html#ga3a7850640f1fe1f58fe91a2d7583695d" << endl
+         << "Available methods: https://docs.opencv.org/4.x/df/dfb/group__imgproc__object.html#ga3a7850640f1fe1f58fe91a2d7583695d" << endl
          << "    TM_SQDIFF = " << (int)TM_SQDIFF << endl
          << "    TM_SQDIFF_NORMED = " << (int)TM_SQDIFF_NORMED << endl
          << "    TM_CCORR = " << (int)TM_CCORR << endl
index 6e0680a..62f56b8 100644 (file)
@@ -24,11 +24,11 @@ int main(int argc, char *argv[])
     vector<String> typeAlgoMatch;
     vector<String> fileName;
     // This descriptor are going to be detect and compute
-    typeDesc.push_back("AKAZE-DESCRIPTOR_KAZE_UPRIGHT");    // see https://docs.opencv.org/master/d8/d30/classcv_1_1AKAZE.html
-    typeDesc.push_back("AKAZE");    // see http://docs.opencv.org/master/d8/d30/classcv_1_1AKAZE.html
-    typeDesc.push_back("ORB");      // see http://docs.opencv.org/master/de/dbf/classcv_1_1BRISK.html
-    typeDesc.push_back("BRISK");    // see http://docs.opencv.org/master/db/d95/classcv_1_1ORB.html
-    // This algorithm would be used to match descriptors see http://docs.opencv.org/master/db/d39/classcv_1_1DescriptorMatcher.html#ab5dc5036569ecc8d47565007fa518257
+    typeDesc.push_back("AKAZE-DESCRIPTOR_KAZE_UPRIGHT");    // see https://docs.opencv.org/4.x/d8/d30/classcv_1_1AKAZE.html
+    typeDesc.push_back("AKAZE");    // see http://docs.opencv.org/4.x/d8/d30/classcv_1_1AKAZE.html
+    typeDesc.push_back("ORB");      // see http://docs.opencv.org/4.x/de/dbf/classcv_1_1BRISK.html
+    typeDesc.push_back("BRISK");    // see http://docs.opencv.org/4.x/db/d95/classcv_1_1ORB.html
+    // This algorithm would be used to match descriptors see http://docs.opencv.org/4.x/db/d39/classcv_1_1DescriptorMatcher.html#ab5dc5036569ecc8d47565007fa518257
     typeAlgoMatch.push_back("BruteForce");
     typeAlgoMatch.push_back("BruteForce-L1");
     typeAlgoMatch.push_back("BruteForce-Hamming");
index 6005fbf..b31da10 100644 (file)
@@ -7,7 +7,7 @@ Check [a wiki](https://github.com/opencv/opencv/wiki/Deep-Learning-in-OpenCV) fo
 If OpenCV is built with [Intel's Inference Engine support](https://github.com/opencv/opencv/wiki/Intel%27s-Deep-Learning-Inference-Engine-backend) you can use [Intel's pre-trained](https://github.com/opencv/open_model_zoo) models.
 
 There are different preprocessing parameters such mean subtraction or scale factors for different models.
-You may check the most popular models and their parameters at [models.yml](https://github.com/opencv/opencv/blob/master/samples/dnn/models.yml) configuration file. It might be also used for aliasing samples parameters. In example,
+You may check the most popular models and their parameters at [models.yml](https://github.com/opencv/opencv/blob/4.x/samples/dnn/models.yml) configuration file. It might be also used for aliasing samples parameters. In example,
 
 ```bash
 python object_detection.py opencv_fd --model /path/to/caffemodel --config /path/to/prototxt
@@ -27,7 +27,7 @@ You can download sample models using ```download_models.py```. For example, the
 python download_models.py --save_dir FaceDetector opencv_fd
 ```
 
-You can use default configuration files adopted for OpenCV from [here](https://github.com/opencv/opencv_extra/tree/master/testdata/dnn).
+You can use default configuration files adopted for OpenCV from [here](https://github.com/opencv/opencv_extra/tree/4.x/testdata/dnn).
 
 You also can use the script to download necessary files from your code. Assume you have the following code inside ```your_script.py```:
 
@@ -50,14 +50,14 @@ python your_script.py
 **Note** that you can provide a directory using **save_dir** parameter or via **OPENCV_SAVE_DIR** environment variable.
 
 #### Face detection
-[An origin model](https://github.com/opencv/opencv/tree/master/samples/dnn/face_detector)
+[An origin model](https://github.com/opencv/opencv/tree/4.x/samples/dnn/face_detector)
 with single precision floating point weights has been quantized using [TensorFlow framework](https://www.tensorflow.org/).
 To achieve the best accuracy run the model on BGR images resized to `300x300` applying mean subtraction
 of values `(104, 177, 123)` for each blue, green and red channels correspondingly.
 
 The following are accuracy metrics obtained using [COCO object detection evaluation
 tool](http://cocodataset.org/#detections-eval) on [FDDB dataset](http://vis-www.cs.umass.edu/fddb/)
-(see [script](https://github.com/opencv/opencv/blob/master/modules/dnn/misc/face_detector_accuracy.py))
+(see [script](https://github.com/opencv/opencv/blob/4.x/modules/dnn/misc/face_detector_accuracy.py))
 applying resize to `300x300` and keeping an origin images' sizes.
 ```
 AP - Average Precision                            | FP32/FP16 | UINT8          | FP32/FP16 | UINT8          |
@@ -79,6 +79,6 @@ AR @[ IoU=0.50:0.95 | area= large | maxDets=100 ] | 0.528     | 0.528          |
 
 ## References
 * [Models downloading script](https://github.com/opencv/opencv/samples/dnn/download_models.py)
-* [Configuration files adopted for OpenCV](https://github.com/opencv/opencv_extra/tree/master/testdata/dnn)
+* [Configuration files adopted for OpenCV](https://github.com/opencv/opencv_extra/tree/4.x/testdata/dnn)
 * [How to import models from TensorFlow Object Detection API](https://github.com/opencv/opencv/wiki/TensorFlow-Object-Detection-API)
-* [Names of classes from different datasets](https://github.com/opencv/opencv/tree/master/samples/data/dnn)
+* [Names of classes from different datasets](https://github.com/opencv/opencv/tree/4.x/samples/data/dnn)
index 48e6731..d94ead1 100644 (file)
@@ -69,7 +69,7 @@ function recognize(face) {
 
 function loadModels(callback) {
   var utils = new Utils('');
-  var proto = 'https://raw.githubusercontent.com/opencv/opencv/master/samples/dnn/face_detector/deploy_lowres.prototxt';
+  var proto = 'https://raw.githubusercontent.com/opencv/opencv/4.x/samples/dnn/face_detector/deploy_lowres.prototxt';
   var weights = 'https://raw.githubusercontent.com/opencv/opencv_3rdparty/dnn_samples_face_detector_20180205_fp16/res10_300x300_ssd_iter_140000_fp16.caffemodel';
   var recognModel = 'https://raw.githubusercontent.com/pyannote/pyannote-data/master/openface.nn4.small2.v1.t7';
   utils.createFileFromUrl('face_detector.prototxt', proto, () => {
index 48e2dc0..ced597a 100644 (file)
@@ -3,11 +3,11 @@
 //
 //  it can be used for body pose detection, using either the COCO model(18 parts):
 //  http://posefs1.perception.cs.cmu.edu/OpenPose/models/pose/coco/pose_iter_440000.caffemodel
-//  https://raw.githubusercontent.com/opencv/opencv_extra/master/testdata/dnn/openpose_pose_coco.prototxt
+//  https://raw.githubusercontent.com/opencv/opencv_extra/4.x/testdata/dnn/openpose_pose_coco.prototxt
 //
 //  or the MPI model(16 parts):
 //  http://posefs1.perception.cs.cmu.edu/OpenPose/models/pose/mpi/pose_iter_160000.caffemodel
-//  https://raw.githubusercontent.com/opencv/opencv_extra/master/testdata/dnn/openpose_pose_mpi_faster_4_stages.prototxt
+//  https://raw.githubusercontent.com/opencv/opencv_extra/4.x/testdata/dnn/openpose_pose_mpi_faster_4_stages.prototxt
 //
 //  (to simplify this sample, the body models are restricted to a single person.)
 //
index bf8d31e..7588a50 100644 (file)
@@ -26,9 +26,9 @@ sudo apt install -y wget unzip
 # [wget]
 
 # [download]
-wget -O opencv.zip https://github.com/opencv/opencv/archive/master.zip
+wget -O opencv.zip https://github.com/opencv/opencv/archive/4.x.zip
 unzip opencv.zip
-mv opencv-master opencv
+mv opencv-4.x opencv
 # [download]
 
 # [prepare]
index aa88e03..d85b940 100644 (file)
@@ -27,7 +27,7 @@ sudo apt install -y git
 
 # [download]
 git clone https://github.com/opencv/opencv.git
-git -C opencv checkout master
+git -C opencv checkout 4.x
 # [download]
 
 # [prepare]
index 8c88bcf..c48161c 100644 (file)
@@ -12,14 +12,14 @@ fi
 sudo apt update && sudo apt install -y cmake g++ wget unzip
 
 # Download and unpack sources
-wget -O opencv.zip https://github.com/opencv/opencv/archive/master.zip
+wget -O opencv.zip https://github.com/opencv/opencv/archive/4.x.zip
 unzip opencv.zip
 
 # Create build directory
 mkdir -p build && cd build
 
 # Configure
-cmake  ../opencv-master
+cmake  ../opencv-4.x
 
 # Build
 cmake --build .
index 18f89a0..e497c08 100644 (file)
@@ -12,8 +12,8 @@ fi
 sudo apt update && sudo apt install -y cmake g++ wget unzip
 
 # Download and unpack sources
-wget -O opencv.zip https://github.com/opencv/opencv/archive/master.zip
-wget -O opencv_contrib.zip https://github.com/opencv/opencv_contrib/archive/master.zip
+wget -O opencv.zip https://github.com/opencv/opencv/archive/4.x.zip
+wget -O opencv_contrib.zip https://github.com/opencv/opencv_contrib/archive/4.x.zip
 unzip opencv.zip
 unzip opencv_contrib.zip
 
@@ -21,7 +21,7 @@ unzip opencv_contrib.zip
 mkdir -p build && cd build
 
 # Configure
-cmake -DOPENCV_EXTRA_MODULES_PATH=../opencv_contrib-master/modules ../opencv-master
+cmake -DOPENCV_EXTRA_MODULES_PATH=../opencv_contrib-4.x/modules ../opencv-4.x
 
 # Build
 cmake --build .
index dfa88be..6dcfe14 100644 (file)
@@ -436,7 +436,7 @@ def main():
     sizes = []
     blender = None
     timelapser = None
-    # https://github.com/opencv/opencv/blob/master/samples/cpp/stitching_detailed.cpp#L725 ?
+    # https://github.com/opencv/opencv/blob/4.x/samples/cpp/stitching_detailed.cpp#L725 ?
     for idx, name in enumerate(img_names):
         full_img = cv.imread(name)
         if not is_compose_scale_set:
index daa568d..49b9902 100644 (file)
@@ -20,7 +20,7 @@ int main(void)
     // Number of experiment runs
     int no_runs = 2;
 
-    // https://docs.opencv.org/master/d3/d63/classcv_1_1Mat.html
+    // https://docs.opencv.org/4.x/d3/d63/classcv_1_1Mat.html
     cv::Mat src_new(IMG_ROWS, IMG_COLS, CV_8UC1, (void *)raw_pixels);
 
     // Set parameters
index f911754..849c9cd 100644 (file)
@@ -20,7 +20,7 @@ int main(void)
     // Number of experiment runs
     int no_runs = 2;
 
-    // https://docs.opencv.org/master/d3/d63/classcv_1_1Mat.html
+    // https://docs.opencv.org/4.x/d3/d63/classcv_1_1Mat.html
     cv::Mat src(IMG_ROWS, IMG_COLS, CV_8UC1, (void *)raw_pixels);
 
     // Run calc Hist