Merge pull request #21020 from lukasalexanderweber:squash
authorLukas-Alexander Weber <32765578+lukasalexanderweber@users.noreply.github.com>
Mon, 8 Nov 2021 11:54:06 +0000 (12:54 +0100)
committerGitHub <noreply@github.com>
Mon, 8 Nov 2021 11:54:06 +0000 (11:54 +0000)
Created Stitching Tool based on stitching_detailed.py

30 files changed:
apps/opencv_stitching_tool/README.md [new file with mode: 0644]
apps/opencv_stitching_tool/opencv_stitching/.gitignore [new file with mode: 0644]
apps/opencv_stitching_tool/opencv_stitching/__init__.py [new file with mode: 0644]
apps/opencv_stitching_tool/opencv_stitching/blender.py [new file with mode: 0644]
apps/opencv_stitching_tool/opencv_stitching/camera_adjuster.py [new file with mode: 0644]
apps/opencv_stitching_tool/opencv_stitching/camera_estimator.py [new file with mode: 0644]
apps/opencv_stitching_tool/opencv_stitching/camera_wave_corrector.py [new file with mode: 0644]
apps/opencv_stitching_tool/opencv_stitching/exposure_error_compensator.py [new file with mode: 0644]
apps/opencv_stitching_tool/opencv_stitching/feature_detector.py [new file with mode: 0644]
apps/opencv_stitching_tool/opencv_stitching/feature_matcher.py [new file with mode: 0644]
apps/opencv_stitching_tool/opencv_stitching/image_handler.py [new file with mode: 0644]
apps/opencv_stitching_tool/opencv_stitching/megapix_downscaler.py [new file with mode: 0644]
apps/opencv_stitching_tool/opencv_stitching/megapix_scaler.py [new file with mode: 0644]
apps/opencv_stitching_tool/opencv_stitching/panorama_estimation.py [new file with mode: 0644]
apps/opencv_stitching_tool/opencv_stitching/seam_finder.py [new file with mode: 0644]
apps/opencv_stitching_tool/opencv_stitching/stitcher.py [new file with mode: 0644]
apps/opencv_stitching_tool/opencv_stitching/stitching_error.py [new file with mode: 0644]
apps/opencv_stitching_tool/opencv_stitching/subsetter.py [new file with mode: 0644]
apps/opencv_stitching_tool/opencv_stitching/test/.gitignore [new file with mode: 0644]
apps/opencv_stitching_tool/opencv_stitching/test/SAMPLE_IMAGES_TO_DOWNLOAD.txt [new file with mode: 0644]
apps/opencv_stitching_tool/opencv_stitching/test/stitching_detailed.py [new file with mode: 0644]
apps/opencv_stitching_tool/opencv_stitching/test/test_composition.py [new file with mode: 0644]
apps/opencv_stitching_tool/opencv_stitching/test/test_matcher.py [new file with mode: 0644]
apps/opencv_stitching_tool/opencv_stitching/test/test_megapix_scaler.py [new file with mode: 0644]
apps/opencv_stitching_tool/opencv_stitching/test/test_performance.py [new file with mode: 0644]
apps/opencv_stitching_tool/opencv_stitching/test/test_registration.py [new file with mode: 0644]
apps/opencv_stitching_tool/opencv_stitching/test/test_stitcher.py [new file with mode: 0644]
apps/opencv_stitching_tool/opencv_stitching/timelapser.py [new file with mode: 0644]
apps/opencv_stitching_tool/opencv_stitching/warper.py [new file with mode: 0644]
apps/opencv_stitching_tool/opencv_stitching_tool.py [new file with mode: 0644]

diff --git a/apps/opencv_stitching_tool/README.md b/apps/opencv_stitching_tool/README.md
new file mode 100644 (file)
index 0000000..1cf3f01
--- /dev/null
@@ -0,0 +1,3 @@
+## In-Depth Stitching Tool for experiments and research
+
+Visit [opencv_stitching_tutorial](https://github.com/lukasalexanderweber/opencv_stitching_tutorial) for a detailed Tutorial
diff --git a/apps/opencv_stitching_tool/opencv_stitching/.gitignore b/apps/opencv_stitching_tool/opencv_stitching/.gitignore
new file mode 100644 (file)
index 0000000..1f4d07f
--- /dev/null
@@ -0,0 +1,4 @@
+# python binary files
+*.pyc
+__pycache__
+.pylint*
diff --git a/apps/opencv_stitching_tool/opencv_stitching/__init__.py b/apps/opencv_stitching_tool/opencv_stitching/__init__.py
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/apps/opencv_stitching_tool/opencv_stitching/blender.py b/apps/opencv_stitching_tool/opencv_stitching/blender.py
new file mode 100644 (file)
index 0000000..04e6efe
--- /dev/null
@@ -0,0 +1,48 @@
+import cv2 as cv
+import numpy as np
+
+
+class Blender:
+
+    BLENDER_CHOICES = ('multiband', 'feather', 'no',)
+    DEFAULT_BLENDER = 'multiband'
+    DEFAULT_BLEND_STRENGTH = 5
+
+    def __init__(self, blender_type=DEFAULT_BLENDER,
+                 blend_strength=DEFAULT_BLEND_STRENGTH):
+        self.blender_type = blender_type
+        self.blend_strength = blend_strength
+        self.blender = None
+
+    def prepare(self, corners, sizes):
+        dst_sz = cv.detail.resultRoi(corners=corners, sizes=sizes)
+        blend_width = (np.sqrt(dst_sz[2] * dst_sz[3]) *
+                       self.blend_strength / 100)
+
+        if self.blender_type == 'no' or blend_width < 1:
+            self.blender = cv.detail.Blender_createDefault(
+                cv.detail.Blender_NO
+                )
+
+        elif self.blender_type == "multiband":
+            self.blender = cv.detail_MultiBandBlender()
+            self.blender.setNumBands((np.log(blend_width) /
+                                      np.log(2.) - 1.).astype(np.int))
+
+        elif self.blender_type == "feather":
+            self.blender = cv.detail_FeatherBlender()
+            self.blender.setSharpness(1. / blend_width)
+
+        self.blender.prepare(dst_sz)
+
+    def feed(self, img, mask, corner):
+        """https://docs.opencv.org/master/d6/d4a/classcv_1_1detail_1_1Blender.html#a64837308bcf4e414a6219beff6cbe37a"""  # noqa
+        self.blender.feed(cv.UMat(img.astype(np.int16)), mask, corner)
+
+    def blend(self):
+        """https://docs.opencv.org/master/d6/d4a/classcv_1_1detail_1_1Blender.html#aa0a91ce0d6046d3a63e0123cbb1b5c00"""  # noqa
+        result = None
+        result_mask = None
+        result, result_mask = self.blender.blend(result, result_mask)
+        result = cv.convertScaleAbs(result)
+        return result
diff --git a/apps/opencv_stitching_tool/opencv_stitching/camera_adjuster.py b/apps/opencv_stitching_tool/opencv_stitching/camera_adjuster.py
new file mode 100644 (file)
index 0000000..03aa834
--- /dev/null
@@ -0,0 +1,49 @@
+from collections import OrderedDict
+import cv2 as cv
+import numpy as np
+
+from .stitching_error import StitchingError
+
+
+class CameraAdjuster:
+    """https://docs.opencv.org/master/d5/d56/classcv_1_1detail_1_1BundleAdjusterBase.html"""  # noqa
+
+    CAMERA_ADJUSTER_CHOICES = OrderedDict()
+    CAMERA_ADJUSTER_CHOICES['ray'] = cv.detail_BundleAdjusterRay
+    CAMERA_ADJUSTER_CHOICES['reproj'] = cv.detail_BundleAdjusterReproj
+    CAMERA_ADJUSTER_CHOICES['affine'] = cv.detail_BundleAdjusterAffinePartial
+    CAMERA_ADJUSTER_CHOICES['no'] = cv.detail_NoBundleAdjuster
+
+    DEFAULT_CAMERA_ADJUSTER = list(CAMERA_ADJUSTER_CHOICES.keys())[0]
+    DEFAULT_REFINEMENT_MASK = "xxxxx"
+
+    def __init__(self,
+                 adjuster=DEFAULT_CAMERA_ADJUSTER,
+                 refinement_mask=DEFAULT_REFINEMENT_MASK):
+
+        self.adjuster = CameraAdjuster.CAMERA_ADJUSTER_CHOICES[adjuster]()
+        self.set_refinement_mask(refinement_mask)
+        self.adjuster.setConfThresh(1)
+
+    def set_refinement_mask(self, refinement_mask):
+        mask_matrix = np.zeros((3, 3), np.uint8)
+        if refinement_mask[0] == 'x':
+            mask_matrix[0, 0] = 1
+        if refinement_mask[1] == 'x':
+            mask_matrix[0, 1] = 1
+        if refinement_mask[2] == 'x':
+            mask_matrix[0, 2] = 1
+        if refinement_mask[3] == 'x':
+            mask_matrix[1, 1] = 1
+        if refinement_mask[4] == 'x':
+            mask_matrix[1, 2] = 1
+        self.adjuster.setRefinementMask(mask_matrix)
+
+    def adjust(self, features, pairwise_matches, estimated_cameras):
+        b, cameras = self.adjuster.apply(features,
+                                         pairwise_matches,
+                                         estimated_cameras)
+        if not b:
+            raise StitchingError("Camera parameters adjusting failed.")
+
+        return cameras
diff --git a/apps/opencv_stitching_tool/opencv_stitching/camera_estimator.py b/apps/opencv_stitching_tool/opencv_stitching/camera_estimator.py
new file mode 100644 (file)
index 0000000..8520eb0
--- /dev/null
@@ -0,0 +1,27 @@
+from collections import OrderedDict
+import cv2 as cv
+import numpy as np
+
+from .stitching_error import StitchingError
+
+
+class CameraEstimator:
+
+    CAMERA_ESTIMATOR_CHOICES = OrderedDict()
+    CAMERA_ESTIMATOR_CHOICES['homography'] = cv.detail_HomographyBasedEstimator
+    CAMERA_ESTIMATOR_CHOICES['affine'] = cv.detail_AffineBasedEstimator
+
+    DEFAULT_CAMERA_ESTIMATOR = list(CAMERA_ESTIMATOR_CHOICES.keys())[0]
+
+    def __init__(self, estimator=DEFAULT_CAMERA_ESTIMATOR, **kwargs):
+        self.estimator = CameraEstimator.CAMERA_ESTIMATOR_CHOICES[estimator](
+            **kwargs
+            )
+
+    def estimate(self, features, pairwise_matches):
+        b, cameras = self.estimator.apply(features, pairwise_matches, None)
+        if not b:
+            raise StitchingError("Homography estimation failed.")
+        for cam in cameras:
+            cam.R = cam.R.astype(np.float32)
+        return cameras
diff --git a/apps/opencv_stitching_tool/opencv_stitching/camera_wave_corrector.py b/apps/opencv_stitching_tool/opencv_stitching/camera_wave_corrector.py
new file mode 100644 (file)
index 0000000..6a9142d
--- /dev/null
@@ -0,0 +1,28 @@
+from collections import OrderedDict
+import cv2 as cv
+import numpy as np
+
+
+class WaveCorrector:
+    """https://docs.opencv.org/master/d7/d74/group__stitching__rotation.html#ga83b24d4c3e93584986a56d9e43b9cf7f"""  # noqa
+    WAVE_CORRECT_CHOICES = OrderedDict()
+    WAVE_CORRECT_CHOICES['horiz'] = cv.detail.WAVE_CORRECT_HORIZ
+    WAVE_CORRECT_CHOICES['vert'] = cv.detail.WAVE_CORRECT_VERT
+    WAVE_CORRECT_CHOICES['auto'] = cv.detail.WAVE_CORRECT_AUTO
+    WAVE_CORRECT_CHOICES['no'] = None
+
+    DEFAULT_WAVE_CORRECTION = list(WAVE_CORRECT_CHOICES.keys())[0]
+
+    def __init__(self, wave_correct_kind=DEFAULT_WAVE_CORRECTION):
+        self.wave_correct_kind = WaveCorrector.WAVE_CORRECT_CHOICES[
+            wave_correct_kind
+            ]
+
+    def correct(self, cameras):
+        if self.wave_correct_kind is not None:
+            rmats = [np.copy(cam.R) for cam in cameras]
+            rmats = cv.detail.waveCorrect(rmats, self.wave_correct_kind)
+            for idx, cam in enumerate(cameras):
+                cam.R = rmats[idx]
+            return cameras
+        return cameras
diff --git a/apps/opencv_stitching_tool/opencv_stitching/exposure_error_compensator.py b/apps/opencv_stitching_tool/opencv_stitching/exposure_error_compensator.py
new file mode 100644 (file)
index 0000000..36e0292
--- /dev/null
@@ -0,0 +1,40 @@
+from collections import OrderedDict
+import cv2 as cv
+
+
+class ExposureErrorCompensator:
+
+    COMPENSATOR_CHOICES = OrderedDict()
+    COMPENSATOR_CHOICES['gain_blocks'] = cv.detail.ExposureCompensator_GAIN_BLOCKS  # noqa
+    COMPENSATOR_CHOICES['gain'] = cv.detail.ExposureCompensator_GAIN
+    COMPENSATOR_CHOICES['channel'] = cv.detail.ExposureCompensator_CHANNELS
+    COMPENSATOR_CHOICES['channel_blocks'] = cv.detail.ExposureCompensator_CHANNELS_BLOCKS  # noqa
+    COMPENSATOR_CHOICES['no'] = cv.detail.ExposureCompensator_NO
+
+    DEFAULT_COMPENSATOR = list(COMPENSATOR_CHOICES.keys())[0]
+    DEFAULT_NR_FEEDS = 1
+    DEFAULT_BLOCK_SIZE = 32
+
+    def __init__(self,
+                 compensator=DEFAULT_COMPENSATOR,
+                 nr_feeds=DEFAULT_NR_FEEDS,
+                 block_size=DEFAULT_BLOCK_SIZE):
+
+        if compensator == 'channel':
+            self.compensator = cv.detail_ChannelsCompensator(nr_feeds)
+        elif compensator == 'channel_blocks':
+            self.compensator = cv.detail_BlocksChannelsCompensator(
+                block_size, block_size, nr_feeds
+                )
+        else:
+            self.compensator = cv.detail.ExposureCompensator_createDefault(
+                ExposureErrorCompensator.COMPENSATOR_CHOICES[compensator]
+                )
+
+    def feed(self, *args):
+        """https://docs.opencv.org/master/d2/d37/classcv_1_1detail_1_1ExposureCompensator.html#ae6b0cc69a7bc53818ddea53eddb6bdba"""  # noqa
+        self.compensator.feed(*args)
+
+    def apply(self, *args):
+        """https://docs.opencv.org/master/d2/d37/classcv_1_1detail_1_1ExposureCompensator.html#a473eaf1e585804c08d77c91e004f93aa"""  # noqa
+        return self.compensator.apply(*args)
diff --git a/apps/opencv_stitching_tool/opencv_stitching/feature_detector.py b/apps/opencv_stitching_tool/opencv_stitching/feature_detector.py
new file mode 100644 (file)
index 0000000..995517b
--- /dev/null
@@ -0,0 +1,44 @@
+from collections import OrderedDict
+import cv2 as cv
+
+
+class FeatureDetector:
+    DETECTOR_CHOICES = OrderedDict()
+    try:
+        cv.xfeatures2d_SURF.create()  # check if the function can be called
+        DETECTOR_CHOICES['surf'] = cv.xfeatures2d_SURF.create
+    except (AttributeError, cv.error):
+        print("SURF not available")
+
+    # if SURF not available, ORB is default
+    DETECTOR_CHOICES['orb'] = cv.ORB.create
+
+    try:
+        DETECTOR_CHOICES['sift'] = cv.SIFT_create
+    except AttributeError:
+        print("SIFT not available")
+
+    try:
+        DETECTOR_CHOICES['brisk'] = cv.BRISK_create
+    except AttributeError:
+        print("BRISK not available")
+
+    try:
+        DETECTOR_CHOICES['akaze'] = cv.AKAZE_create
+    except AttributeError:
+        print("AKAZE not available")
+
+    DEFAULT_DETECTOR = list(DETECTOR_CHOICES.keys())[0]
+
+    def __init__(self, detector=DEFAULT_DETECTOR, **kwargs):
+        self.detector = FeatureDetector.DETECTOR_CHOICES[detector](**kwargs)
+
+    def detect_features(self, img, *args, **kwargs):
+        return cv.detail.computeImageFeatures2(self.detector, img,
+                                               *args, **kwargs)
+
+    @staticmethod
+    def draw_keypoints(img, features, **kwargs):
+        kwargs.setdefault('color', (0, 255, 0))
+        keypoints = features.getKeypoints()
+        return cv.drawKeypoints(img, keypoints, None, **kwargs)
diff --git a/apps/opencv_stitching_tool/opencv_stitching/feature_matcher.py b/apps/opencv_stitching_tool/opencv_stitching/feature_matcher.py
new file mode 100644 (file)
index 0000000..8c1d384
--- /dev/null
@@ -0,0 +1,98 @@
+import math
+import cv2 as cv
+import numpy as np
+
+
+class FeatureMatcher:
+
+    MATCHER_CHOICES = ('homography', 'affine')
+    DEFAULT_MATCHER = 'homography'
+    DEFAULT_RANGE_WIDTH = -1
+
+    def __init__(self,
+                 matcher_type=DEFAULT_MATCHER,
+                 range_width=DEFAULT_RANGE_WIDTH,
+                 **kwargs):
+
+        if matcher_type == "affine":
+            """https://docs.opencv.org/master/d3/dda/classcv_1_1detail_1_1AffineBestOf2NearestMatcher.html"""  # noqa
+            self.matcher = cv.detail_AffineBestOf2NearestMatcher(**kwargs)
+        elif range_width == -1:
+            """https://docs.opencv.org/master/d4/d26/classcv_1_1detail_1_1BestOf2NearestMatcher.html"""  # noqa
+            self.matcher = cv.detail.BestOf2NearestMatcher_create(**kwargs)
+        else:
+            """https://docs.opencv.org/master/d8/d72/classcv_1_1detail_1_1BestOf2NearestRangeMatcher.html"""  # noqa
+            self.matcher = cv.detail.BestOf2NearestRangeMatcher_create(
+                range_width, **kwargs
+                )
+
+    def match_features(self, features, *args, **kwargs):
+        pairwise_matches = self.matcher.apply2(features, *args, **kwargs)
+        self.matcher.collectGarbage()
+        return pairwise_matches
+
+    @staticmethod
+    def draw_matches_matrix(imgs, features, matches, conf_thresh=1,
+                            inliers=False, **kwargs):
+        matches_matrix = FeatureMatcher.get_matches_matrix(matches)
+        for idx1, idx2 in FeatureMatcher.get_all_img_combinations(len(imgs)):
+            match = matches_matrix[idx1, idx2]
+            if match.confidence < conf_thresh:
+                continue
+            if inliers:
+                kwargs['matchesMask'] = match.getInliers()
+            yield idx1, idx2, FeatureMatcher.draw_matches(
+                imgs[idx1], features[idx1],
+                imgs[idx2], features[idx2],
+                match,
+                **kwargs
+                )
+
+    @staticmethod
+    def draw_matches(img1, features1, img2, features2, match1to2, **kwargs):
+        kwargs.setdefault('flags', cv.DrawMatchesFlags_NOT_DRAW_SINGLE_POINTS)
+
+        keypoints1 = features1.getKeypoints()
+        keypoints2 = features2.getKeypoints()
+        matches = match1to2.getMatches()
+
+        return cv.drawMatches(
+            img1, keypoints1, img2, keypoints2, matches, None, **kwargs
+            )
+
+    @staticmethod
+    def get_matches_matrix(pairwise_matches):
+        return FeatureMatcher.array_in_sqare_matrix(pairwise_matches)
+
+    @staticmethod
+    def get_confidence_matrix(pairwise_matches):
+        matches_matrix = FeatureMatcher.get_matches_matrix(pairwise_matches)
+        match_confs = [[m.confidence for m in row] for row in matches_matrix]
+        match_conf_matrix = np.array(match_confs)
+        return match_conf_matrix
+
+    @staticmethod
+    def array_in_sqare_matrix(array):
+        matrix_dimension = int(math.sqrt(len(array)))
+        rows = []
+        for i in range(0, len(array), matrix_dimension):
+            rows.append(array[i:i+matrix_dimension])
+        return np.array(rows)
+
+    def get_all_img_combinations(number_imgs):
+        ii, jj = np.triu_indices(number_imgs, k=1)
+        for i, j in zip(ii, jj):
+            yield i, j
+
+    @staticmethod
+    def get_match_conf(match_conf, feature_detector_type):
+        if match_conf is None:
+            match_conf = \
+                FeatureMatcher.get_default_match_conf(feature_detector_type)
+        return match_conf
+
+    @staticmethod
+    def get_default_match_conf(feature_detector_type):
+        if feature_detector_type == 'orb':
+            return 0.3
+        return 0.65
diff --git a/apps/opencv_stitching_tool/opencv_stitching/image_handler.py b/apps/opencv_stitching_tool/opencv_stitching/image_handler.py
new file mode 100644 (file)
index 0000000..a3b76b2
--- /dev/null
@@ -0,0 +1,94 @@
+import cv2 as cv
+
+from .megapix_downscaler import MegapixDownscaler
+from .stitching_error import StitchingError
+
+class ImageHandler:
+
+    DEFAULT_MEDIUM_MEGAPIX = 0.6
+    DEFAULT_LOW_MEGAPIX = 0.1
+    DEFAULT_FINAL_MEGAPIX = -1
+
+    def __init__(self,
+                 medium_megapix=DEFAULT_MEDIUM_MEGAPIX,
+                 low_megapix=DEFAULT_LOW_MEGAPIX,
+                 final_megapix=DEFAULT_FINAL_MEGAPIX):
+
+        if medium_megapix < low_megapix:
+            raise StitchingError("Medium resolution megapix need to be "
+                                 "greater or equal than low resolution "
+                                 "megapix")
+
+        self.medium_scaler = MegapixDownscaler(medium_megapix)
+        self.low_scaler = MegapixDownscaler(low_megapix)
+        self.final_scaler = MegapixDownscaler(final_megapix)
+
+        self.scales_set = False
+        self.img_names = []
+        self.img_sizes = []
+
+    def set_img_names(self, img_names):
+        self.img_names = img_names
+
+    def resize_to_medium_resolution(self):
+        return self.read_and_resize_imgs(self.medium_scaler)
+
+    def resize_to_low_resolution(self, medium_imgs=None):
+        if medium_imgs and self.scales_set:
+            return self.resize_medium_to_low(medium_imgs)
+        return self.read_and_resize_imgs(self.low_scaler)
+
+    def resize_to_final_resolution(self):
+        return self.read_and_resize_imgs(self.final_scaler)
+
+    def read_and_resize_imgs(self, scaler):
+        for img, size in self.input_images():
+            yield self.resize_img_by_scaler(scaler, size, img)
+
+    def resize_medium_to_low(self, medium_imgs):
+        for img, size in zip(medium_imgs, self.img_sizes):
+            yield self.resize_img_by_scaler(self.low_scaler, size, img)
+
+    @staticmethod
+    def resize_img_by_scaler(scaler, size, img):
+        desired_size = scaler.get_scaled_img_size(size)
+        return cv.resize(img, desired_size,
+                         interpolation=cv.INTER_LINEAR_EXACT)
+
+    def input_images(self):
+        self.img_sizes = []
+        for name in self.img_names:
+            img = self.read_image(name)
+            size = self.get_image_size(img)
+            self.img_sizes.append(size)
+            self.set_scaler_scales()
+            yield img, size
+
+    @staticmethod
+    def get_image_size(img):
+        """(width, height)"""
+        return (img.shape[1], img.shape[0])
+
+    @staticmethod
+    def read_image(img_name):
+        img = cv.imread(img_name)
+        if img is None:
+            raise StitchingError("Cannot read image " + img_name)
+        return img
+
+    def set_scaler_scales(self):
+        if not self.scales_set:
+            first_img_size = self.img_sizes[0]
+            self.medium_scaler.set_scale_by_img_size(first_img_size)
+            self.low_scaler.set_scale_by_img_size(first_img_size)
+            self.final_scaler.set_scale_by_img_size(first_img_size)
+        self.scales_set = True
+
+    def get_medium_to_final_ratio(self):
+        return self.final_scaler.scale / self.medium_scaler.scale
+
+    def get_medium_to_low_ratio(self):
+        return self.low_scaler.scale / self.medium_scaler.scale
+
+    def get_final_to_low_ratio(self):
+        return self.low_scaler.scale / self.final_scaler.scale
diff --git a/apps/opencv_stitching_tool/opencv_stitching/megapix_downscaler.py b/apps/opencv_stitching_tool/opencv_stitching/megapix_downscaler.py
new file mode 100644 (file)
index 0000000..f7553ac
--- /dev/null
@@ -0,0 +1,12 @@
+from .megapix_scaler import MegapixScaler
+
+
+class MegapixDownscaler(MegapixScaler):
+
+    @staticmethod
+    def force_downscale(scale):
+        return min(1.0, scale)
+
+    def set_scale(self, scale):
+        scale = self.force_downscale(scale)
+        super().set_scale(scale)
diff --git a/apps/opencv_stitching_tool/opencv_stitching/megapix_scaler.py b/apps/opencv_stitching_tool/opencv_stitching/megapix_scaler.py
new file mode 100644 (file)
index 0000000..96d4753
--- /dev/null
@@ -0,0 +1,27 @@
+import numpy as np
+
+
+class MegapixScaler:
+    def __init__(self, megapix):
+        self.megapix = megapix
+        self.is_scale_set = False
+        self.scale = None
+
+    def set_scale_by_img_size(self, img_size):
+        self.set_scale(
+            self.get_scale_by_resolution(img_size[0] * img_size[1])
+            )
+
+    def set_scale(self, scale):
+        self.scale = scale
+        self.is_scale_set = True
+
+    def get_scale_by_resolution(self, resolution):
+        if self.megapix > 0:
+            return np.sqrt(self.megapix * 1e6 / resolution)
+        return 1.0
+
+    def get_scaled_img_size(self, img_size):
+        width = int(round(img_size[0] * self.scale))
+        height = int(round(img_size[1] * self.scale))
+        return (width, height)
diff --git a/apps/opencv_stitching_tool/opencv_stitching/panorama_estimation.py b/apps/opencv_stitching_tool/opencv_stitching/panorama_estimation.py
new file mode 100644 (file)
index 0000000..e3a4577
--- /dev/null
@@ -0,0 +1,27 @@
+import statistics
+
+
+def estimate_final_panorama_dimensions(cameras, warper, img_handler):
+    medium_to_final_ratio = img_handler.get_medium_to_final_ratio()
+
+    panorama_scale_determined_on_medium_img = \
+        estimate_panorama_scale(cameras)
+
+    panorama_scale = (panorama_scale_determined_on_medium_img *
+                      medium_to_final_ratio)
+    panorama_corners = []
+    panorama_sizes = []
+
+    for size, camera in zip(img_handler.img_sizes, cameras):
+        width, height = img_handler.final_scaler.get_scaled_img_size(size)
+        roi = warper.warp_roi(width, height, camera, panorama_scale, medium_to_final_ratio)
+        panorama_corners.append(roi[0:2])
+        panorama_sizes.append(roi[2:4])
+
+    return panorama_scale, panorama_corners, panorama_sizes
+
+
+def estimate_panorama_scale(cameras):
+    focals = [cam.focal for cam in cameras]
+    panorama_scale = statistics.median(focals)
+    return panorama_scale
diff --git a/apps/opencv_stitching_tool/opencv_stitching/seam_finder.py b/apps/opencv_stitching_tool/opencv_stitching/seam_finder.py
new file mode 100644 (file)
index 0000000..675f266
--- /dev/null
@@ -0,0 +1,127 @@
+from collections import OrderedDict
+import cv2 as cv
+import numpy as np
+
+from .blender import Blender
+
+
+class SeamFinder:
+    """https://docs.opencv.org/master/d7/d09/classcv_1_1detail_1_1SeamFinder.html"""  # noqa
+    SEAM_FINDER_CHOICES = OrderedDict()
+    SEAM_FINDER_CHOICES['dp_color'] = cv.detail_DpSeamFinder('COLOR')
+    SEAM_FINDER_CHOICES['dp_colorgrad'] = cv.detail_DpSeamFinder('COLOR_GRAD')
+    SEAM_FINDER_CHOICES['voronoi'] = cv.detail.SeamFinder_createDefault(cv.detail.SeamFinder_VORONOI_SEAM)  # noqa
+    SEAM_FINDER_CHOICES['no'] = cv.detail.SeamFinder_createDefault(cv.detail.SeamFinder_NO)  # noqa
+
+    DEFAULT_SEAM_FINDER = list(SEAM_FINDER_CHOICES.keys())[0]
+
+    def __init__(self, finder=DEFAULT_SEAM_FINDER):
+        self.finder = SeamFinder.SEAM_FINDER_CHOICES[finder]
+
+    def find(self, imgs, corners, masks):
+        """https://docs.opencv.org/master/d0/dd5/classcv_1_1detail_1_1DpSeamFinder.html#a7914624907986f7a94dd424209a8a609"""  # noqa
+        imgs_float = [img.astype(np.float32) for img in imgs]
+        return self.finder.find(imgs_float, corners, masks)
+
+    @staticmethod
+    def resize(seam_mask, mask):
+        dilated_mask = cv.dilate(seam_mask, None)
+        resized_seam_mask = cv.resize(dilated_mask, (mask.shape[1],
+                                                     mask.shape[0]),
+                                      0, 0, cv.INTER_LINEAR_EXACT)
+        return cv.bitwise_and(resized_seam_mask, mask)
+
+    @staticmethod
+    def draw_seam_mask(img, seam_mask, color=(0, 0, 0)):
+        seam_mask = cv.UMat.get(seam_mask)
+        overlayed_img = np.copy(img)
+        overlayed_img[seam_mask == 0] = color
+        return overlayed_img
+
+    @staticmethod
+    def draw_seam_polygons(panorama, blended_seam_masks, alpha=0.5):
+        return add_weighted_image(panorama, blended_seam_masks, alpha)
+
+    @staticmethod
+    def draw_seam_lines(panorama, blended_seam_masks,
+                        linesize=1, color=(0, 0, 255)):
+        seam_lines = \
+            SeamFinder.exctract_seam_lines(blended_seam_masks, linesize)
+        panorama_with_seam_lines = panorama.copy()
+        panorama_with_seam_lines[seam_lines == 255] = color
+        return panorama_with_seam_lines
+
+    @staticmethod
+    def exctract_seam_lines(blended_seam_masks, linesize=1):
+        seam_lines = cv.Canny(np.uint8(blended_seam_masks), 100, 200)
+        seam_indices = (seam_lines == 255).nonzero()
+        seam_lines = remove_invalid_line_pixels(
+            seam_indices, seam_lines, blended_seam_masks
+            )
+        kernelsize = linesize + linesize - 1
+        kernel = np.ones((kernelsize, kernelsize), np.uint8)
+        return cv.dilate(seam_lines, kernel)
+
+    @staticmethod
+    def blend_seam_masks(seam_masks, corners, sizes, colors=[
+            (255, 000, 000),      # Blue
+            (000, 000, 255),      # Red
+            (000, 255, 000),      # Green
+            (000, 255, 255),      # Yellow
+            (255, 000, 255),      # Magenta
+            (128, 128, 255),      # Pink
+            (128, 128, 128),      # Gray
+            (000, 000, 128),      # Brown
+            (000, 128, 255)]      # Orange
+            ):
+
+        blender = Blender("no")
+        blender.prepare(corners, sizes)
+
+        for idx, (seam_mask, size, corner) in enumerate(
+                zip(seam_masks, sizes, corners)):
+            if idx+1 > len(colors):
+                raise ValueError("Not enough default colors! Pass additional "
+                                 "colors to \"colors\" parameter")
+            one_color_img = create_img_by_size(size, colors[idx])
+            blender.feed(one_color_img, seam_mask, corner)
+
+        return blender.blend()
+
+
+def create_img_by_size(size, color=(0, 0, 0)):
+    width, height = size
+    img = np.zeros((height, width, 3), np.uint8)
+    img[:] = color
+    return img
+
+
+def add_weighted_image(img1, img2, alpha):
+    return cv.addWeighted(
+        img1, alpha, img2, (1.0 - alpha), 0.0
+        )
+
+
+def remove_invalid_line_pixels(indices, lines, mask):
+    for x, y in zip(*indices):
+        if check_if_pixel_or_neighbor_is_black(mask, x, y):
+            lines[x, y] = 0
+    return lines
+
+
+def check_if_pixel_or_neighbor_is_black(img, x, y):
+    check = [is_pixel_black(img, x, y),
+             is_pixel_black(img, x+1, y), is_pixel_black(img, x-1, y),
+             is_pixel_black(img, x, y+1), is_pixel_black(img, x, y-1)]
+    return any(check)
+
+
+def is_pixel_black(img, x, y):
+    return np.all(get_pixel_value(img, x, y) == 0)
+
+
+def get_pixel_value(img, x, y):
+    try:
+        return img[x, y]
+    except IndexError:
+        pass
diff --git a/apps/opencv_stitching_tool/opencv_stitching/stitcher.py b/apps/opencv_stitching_tool/opencv_stitching/stitcher.py
new file mode 100644 (file)
index 0000000..c081126
--- /dev/null
@@ -0,0 +1,207 @@
+from types import SimpleNamespace
+
+from .image_handler import ImageHandler
+from .feature_detector import FeatureDetector
+from .feature_matcher import FeatureMatcher
+from .subsetter import Subsetter
+from .camera_estimator import CameraEstimator
+from .camera_adjuster import CameraAdjuster
+from .camera_wave_corrector import WaveCorrector
+from .warper import Warper
+from .panorama_estimation import estimate_final_panorama_dimensions
+from .exposure_error_compensator import ExposureErrorCompensator
+from .seam_finder import SeamFinder
+from .blender import Blender
+from .timelapser import Timelapser
+from .stitching_error import StitchingError
+
+
+class Stitcher:
+    DEFAULT_SETTINGS = {
+         "medium_megapix": ImageHandler.DEFAULT_MEDIUM_MEGAPIX,
+         "detector": FeatureDetector.DEFAULT_DETECTOR,
+         "nfeatures": 500,
+         "matcher_type": FeatureMatcher.DEFAULT_MATCHER,
+         "range_width": FeatureMatcher.DEFAULT_RANGE_WIDTH,
+         "try_use_gpu": False,
+         "match_conf": None,
+         "confidence_threshold": Subsetter.DEFAULT_CONFIDENCE_THRESHOLD,
+         "matches_graph_dot_file": Subsetter.DEFAULT_MATCHES_GRAPH_DOT_FILE,
+         "estimator": CameraEstimator.DEFAULT_CAMERA_ESTIMATOR,
+         "adjuster": CameraAdjuster.DEFAULT_CAMERA_ADJUSTER,
+         "refinement_mask": CameraAdjuster.DEFAULT_REFINEMENT_MASK,
+         "wave_correct_kind": WaveCorrector.DEFAULT_WAVE_CORRECTION,
+         "warper_type": Warper.DEFAULT_WARP_TYPE,
+         "low_megapix": ImageHandler.DEFAULT_LOW_MEGAPIX,
+         "compensator": ExposureErrorCompensator.DEFAULT_COMPENSATOR,
+         "nr_feeds": ExposureErrorCompensator.DEFAULT_NR_FEEDS,
+         "block_size": ExposureErrorCompensator.DEFAULT_BLOCK_SIZE,
+         "finder": SeamFinder.DEFAULT_SEAM_FINDER,
+         "final_megapix": ImageHandler.DEFAULT_FINAL_MEGAPIX,
+         "blender_type": Blender.DEFAULT_BLENDER,
+         "blend_strength": Blender.DEFAULT_BLEND_STRENGTH,
+         "timelapse": Timelapser.DEFAULT_TIMELAPSE}
+
+    def __init__(self, **kwargs):
+        self.initialize_stitcher(**kwargs)
+
+    def initialize_stitcher(self, **kwargs):
+        self.settings = Stitcher.DEFAULT_SETTINGS.copy()
+        self.validate_kwargs(kwargs)
+        self.settings.update(kwargs)
+
+        args = SimpleNamespace(**self.settings)
+        self.img_handler = ImageHandler(args.medium_megapix,
+                                        args.low_megapix,
+                                        args.final_megapix)
+        self.detector = \
+            FeatureDetector(args.detector, nfeatures=args.nfeatures)
+        match_conf = \
+            FeatureMatcher.get_match_conf(args.match_conf, args.detector)
+        self.matcher = FeatureMatcher(args.matcher_type, args.range_width,
+                                      try_use_gpu=args.try_use_gpu,
+                                      match_conf=match_conf)
+        self.subsetter = \
+            Subsetter(args.confidence_threshold, args.matches_graph_dot_file)
+        self.camera_estimator = CameraEstimator(args.estimator)
+        self.camera_adjuster = \
+            CameraAdjuster(args.adjuster, args.refinement_mask)
+        self.wave_corrector = WaveCorrector(args.wave_correct_kind)
+        self.warper = Warper(args.warper_type)
+        self.compensator = \
+            ExposureErrorCompensator(args.compensator, args.nr_feeds,
+                                     args.block_size)
+        self.seam_finder = SeamFinder(args.finder)
+        self.blender = Blender(args.blender_type, args.blend_strength)
+        self.timelapser = Timelapser(args.timelapse)
+
+    def stitch(self, img_names):
+        self.initialize_registration(img_names)
+
+        imgs = self.resize_medium_resolution()
+        features = self.find_features(imgs)
+        matches = self.match_features(features)
+        imgs, features, matches = self.subset(imgs, features, matches)
+        cameras = self.estimate_camera_parameters(features, matches)
+        cameras = self.refine_camera_parameters(features, matches, cameras)
+        cameras = self.perform_wave_correction(cameras)
+        panorama_scale, panorama_corners, panorama_sizes = \
+            self.estimate_final_panorama_dimensions(cameras)
+
+        self.initialize_composition(panorama_corners, panorama_sizes)
+
+        imgs = self.resize_low_resolution(imgs)
+        imgs = self.warp_low_resolution_images(imgs, cameras, panorama_scale)
+        self.estimate_exposure_errors(imgs)
+        seam_masks = self.find_seam_masks(imgs)
+
+        imgs = self.resize_final_resolution()
+        imgs = self.warp_final_resolution_images(imgs, cameras, panorama_scale)
+        imgs = self.compensate_exposure_errors(imgs)
+        seam_masks = self.resize_seam_masks(seam_masks)
+        self.blend_images(imgs, seam_masks)
+
+        return self.create_final_panorama()
+
+    def initialize_registration(self, img_names):
+        self.img_handler.set_img_names(img_names)
+
+    def resize_medium_resolution(self):
+        return list(self.img_handler.resize_to_medium_resolution())
+
+    def find_features(self, imgs):
+        return [self.detector.detect_features(img) for img in imgs]
+
+    def match_features(self, features):
+        return self.matcher.match_features(features)
+
+    def subset(self, imgs, features, matches):
+        names, sizes, imgs, features, matches = \
+            self.subsetter.subset(self.img_handler.img_names,
+                                  self.img_handler.img_sizes,
+                                  imgs, features, matches)
+        self.img_handler.img_names, self.img_handler.img_sizes = names, sizes
+        return imgs, features, matches
+
+    def estimate_camera_parameters(self, features, matches):
+        return self.camera_estimator.estimate(features, matches)
+
+    def refine_camera_parameters(self, features, matches, cameras):
+        return self.camera_adjuster.adjust(features, matches, cameras)
+
+    def perform_wave_correction(self, cameras):
+        return self.wave_corrector.correct(cameras)
+
+    def estimate_final_panorama_dimensions(self, cameras):
+        return estimate_final_panorama_dimensions(cameras, self.warper,
+                                                  self.img_handler)
+
+    def initialize_composition(self, corners, sizes):
+        if self.timelapser.do_timelapse:
+            self.timelapser.initialize(corners, sizes)
+        else:
+            self.blender.prepare(corners, sizes)
+
+    def resize_low_resolution(self, imgs=None):
+        return list(self.img_handler.resize_to_low_resolution(imgs))
+
+    def warp_low_resolution_images(self, imgs, cameras, final_scale):
+        camera_aspect = self.img_handler.get_medium_to_low_ratio()
+        scale = final_scale * self.img_handler.get_final_to_low_ratio()
+        return list(self.warp_images(imgs, cameras, scale, camera_aspect))
+
+    def warp_final_resolution_images(self, imgs, cameras, scale):
+        camera_aspect = self.img_handler.get_medium_to_final_ratio()
+        return self.warp_images(imgs, cameras, scale, camera_aspect)
+
+    def warp_images(self, imgs, cameras, scale, aspect=1):
+        self._masks = []
+        self._corners = []
+        for img_warped, mask_warped, corner in \
+            self.warper.warp_images_and_image_masks(
+                imgs, cameras, scale, aspect
+                ):
+            self._masks.append(mask_warped)
+            self._corners.append(corner)
+            yield img_warped
+
+    def estimate_exposure_errors(self, imgs):
+        self.compensator.feed(self._corners, imgs, self._masks)
+
+    def find_seam_masks(self, imgs):
+        return self.seam_finder.find(imgs, self._corners, self._masks)
+
+    def resize_final_resolution(self):
+        return self.img_handler.resize_to_final_resolution()
+
+    def compensate_exposure_errors(self, imgs):
+        for idx, img in enumerate(imgs):
+            yield self.compensator.apply(idx, self._corners[idx],
+                                         img, self._masks[idx])
+
+    def resize_seam_masks(self, seam_masks):
+        for idx, seam_mask in enumerate(seam_masks):
+            yield SeamFinder.resize(seam_mask, self._masks[idx])
+
+    def blend_images(self, imgs, masks):
+        for idx, (img, mask) in enumerate(zip(imgs, masks)):
+            if self.timelapser.do_timelapse:
+                self.timelapser.process_and_save_frame(
+                    self.img_handler.img_names[idx], img, self._corners[idx]
+                    )
+            else:
+                self.blender.feed(img, mask, self._corners[idx])
+
+    def create_final_panorama(self):
+        if not self.timelapser.do_timelapse:
+            return self.blender.blend()
+
+    @staticmethod
+    def validate_kwargs(kwargs):
+        for arg in kwargs:
+            if arg not in Stitcher.DEFAULT_SETTINGS:
+                raise StitchingError("Invalid Argument: " + arg)
+
+    def collect_garbage(self):
+        del self.img_handler.img_names, self.img_handler.img_sizes,
+        del self._corners, self._masks
diff --git a/apps/opencv_stitching_tool/opencv_stitching/stitching_error.py b/apps/opencv_stitching_tool/opencv_stitching/stitching_error.py
new file mode 100644 (file)
index 0000000..6d42e95
--- /dev/null
@@ -0,0 +1,2 @@
+class StitchingError(Exception):
+    pass
diff --git a/apps/opencv_stitching_tool/opencv_stitching/subsetter.py b/apps/opencv_stitching_tool/opencv_stitching/subsetter.py
new file mode 100644 (file)
index 0000000..4ea6acc
--- /dev/null
@@ -0,0 +1,95 @@
+from itertools import chain
+import math
+import cv2 as cv
+import numpy as np
+
+from .feature_matcher import FeatureMatcher
+from .stitching_error import StitchingError
+
+
+class Subsetter:
+
+    DEFAULT_CONFIDENCE_THRESHOLD = 1
+    DEFAULT_MATCHES_GRAPH_DOT_FILE = None
+
+    def __init__(self,
+                 confidence_threshold=DEFAULT_CONFIDENCE_THRESHOLD,
+                 matches_graph_dot_file=DEFAULT_MATCHES_GRAPH_DOT_FILE):
+        self.confidence_threshold = confidence_threshold
+        self.save_file = matches_graph_dot_file
+
+    def subset(self, img_names, img_sizes, imgs, features, matches):
+        self.save_matches_graph_dot_file(img_names, matches)
+        indices = self.get_indices_to_keep(features, matches)
+
+        img_names = Subsetter.subset_list(img_names, indices)
+        img_sizes = Subsetter.subset_list(img_sizes, indices)
+        imgs = Subsetter.subset_list(imgs, indices)
+        features = Subsetter.subset_list(features, indices)
+        matches = Subsetter.subset_matches(matches, indices)
+        return img_names, img_sizes, imgs, features, matches
+
+    def save_matches_graph_dot_file(self, img_names, pairwise_matches):
+        if self.save_file:
+            with open(self.save_file, 'w') as filehandler:
+                filehandler.write(self.get_matches_graph(img_names,
+                                                         pairwise_matches)
+                                  )
+
+    def get_matches_graph(self, img_names, pairwise_matches):
+        return cv.detail.matchesGraphAsString(img_names, pairwise_matches,
+                                              self.confidence_threshold)
+
+    def get_indices_to_keep(self, features, pairwise_matches):
+        indices = cv.detail.leaveBiggestComponent(features,
+                                                  pairwise_matches,
+                                                  self.confidence_threshold)
+        indices_as_list = [int(idx) for idx in list(indices[:, 0])]
+
+        if len(indices_as_list) < 2:
+            raise StitchingError("No match exceeds the "
+                                 "given confidence theshold.")
+
+        return indices_as_list
+
+    @staticmethod
+    def subset_list(list_to_subset, indices):
+        return [list_to_subset[i] for i in indices]
+
+    @staticmethod
+    def subset_matches(pairwise_matches, indices):
+        indices_to_delete = Subsetter.get_indices_to_delete(
+            math.sqrt(len(pairwise_matches)),
+            indices
+            )
+
+        matches_matrix = FeatureMatcher.get_matches_matrix(pairwise_matches)
+        matches_matrix_subset = Subsetter.subset_matrix(matches_matrix,
+                                                        indices_to_delete)
+        matches_subset = Subsetter.matrix_rows_to_list(matches_matrix_subset)
+
+        return matches_subset
+
+    @staticmethod
+    def get_indices_to_delete(nr_elements, indices_to_keep):
+        return list(set(range(int(nr_elements))) - set(indices_to_keep))
+
+    @staticmethod
+    def subset_matrix(matrix_to_subset, indices_to_delete):
+        for idx, idx_to_delete in enumerate(indices_to_delete):
+            matrix_to_subset = Subsetter.delete_index_from_matrix(
+                matrix_to_subset,
+                idx_to_delete-idx  # matrix shape reduced by one at each step
+                )
+
+        return matrix_to_subset
+
+    @staticmethod
+    def delete_index_from_matrix(matrix, idx):
+        mask = np.ones(matrix.shape[0], bool)
+        mask[idx] = 0
+        return matrix[mask, :][:, mask]
+
+    @staticmethod
+    def matrix_rows_to_list(matrix):
+        return list(chain.from_iterable(matrix.tolist()))
diff --git a/apps/opencv_stitching_tool/opencv_stitching/test/.gitignore b/apps/opencv_stitching_tool/opencv_stitching/test/.gitignore
new file mode 100644 (file)
index 0000000..93426ff
--- /dev/null
@@ -0,0 +1,13 @@
+# Ignore everything
+*
+
+# But not these files...
+!.gitignore
+!test_matcher.py
+!test_stitcher.py
+!test_megapix_scaler.py
+!test_registration.py
+!test_composition.py
+!test_performance.py
+!stitching_detailed.py
+!SAMPLE_IMAGES_TO_DOWNLOAD.txt
\ No newline at end of file
diff --git a/apps/opencv_stitching_tool/opencv_stitching/test/SAMPLE_IMAGES_TO_DOWNLOAD.txt b/apps/opencv_stitching_tool/opencv_stitching/test/SAMPLE_IMAGES_TO_DOWNLOAD.txt
new file mode 100644 (file)
index 0000000..cecf3b8
--- /dev/null
@@ -0,0 +1,5 @@
+https://github.com/opencv/opencv_extra/tree/master/testdata/stitching
+
+s1.jpg s2.jpg
+boat1.jpg boat2.jpg boat3.jpg boat4.jpg boat5.jpg boat6.jpg
+budapest1.jpg budapest2.jpg budapest3.jpg budapest4.jpg budapest5.jpg budapest6.jpg
\ No newline at end of file
diff --git a/apps/opencv_stitching_tool/opencv_stitching/test/stitching_detailed.py b/apps/opencv_stitching_tool/opencv_stitching/test/stitching_detailed.py
new file mode 100644 (file)
index 0000000..b12210d
--- /dev/null
@@ -0,0 +1,406 @@
+"""
+Stitching sample (advanced)
+===========================
+Show how to use Stitcher API from python.
+"""
+
+# Python 2/3 compatibility
+from __future__ import print_function
+
+from types import SimpleNamespace
+from collections import OrderedDict
+
+import cv2 as cv
+import numpy as np
+
+EXPOS_COMP_CHOICES = OrderedDict()
+EXPOS_COMP_CHOICES['gain_blocks'] = cv.detail.ExposureCompensator_GAIN_BLOCKS
+EXPOS_COMP_CHOICES['gain'] = cv.detail.ExposureCompensator_GAIN
+EXPOS_COMP_CHOICES['channel'] = cv.detail.ExposureCompensator_CHANNELS
+EXPOS_COMP_CHOICES['channel_blocks'] = cv.detail.ExposureCompensator_CHANNELS_BLOCKS
+EXPOS_COMP_CHOICES['no'] = cv.detail.ExposureCompensator_NO
+
+BA_COST_CHOICES = OrderedDict()
+BA_COST_CHOICES['ray'] = cv.detail_BundleAdjusterRay
+BA_COST_CHOICES['reproj'] = cv.detail_BundleAdjusterReproj
+BA_COST_CHOICES['affine'] = cv.detail_BundleAdjusterAffinePartial
+BA_COST_CHOICES['no'] = cv.detail_NoBundleAdjuster
+
+FEATURES_FIND_CHOICES = OrderedDict()
+try:
+    cv.xfeatures2d_SURF.create() # check if the function can be called
+    FEATURES_FIND_CHOICES['surf'] = cv.xfeatures2d_SURF.create
+except (AttributeError, cv.error) as e:
+    print("SURF not available")
+# if SURF not available, ORB is default
+FEATURES_FIND_CHOICES['orb'] = cv.ORB.create
+try:
+    FEATURES_FIND_CHOICES['sift'] = cv.xfeatures2d_SIFT.create
+except AttributeError:
+    print("SIFT not available")
+try:
+    FEATURES_FIND_CHOICES['brisk'] = cv.BRISK_create
+except AttributeError:
+    print("BRISK not available")
+try:
+    FEATURES_FIND_CHOICES['akaze'] = cv.AKAZE_create
+except AttributeError:
+    print("AKAZE not available")
+
+SEAM_FIND_CHOICES = OrderedDict()
+SEAM_FIND_CHOICES['dp_color'] = cv.detail_DpSeamFinder('COLOR')
+SEAM_FIND_CHOICES['dp_colorgrad'] = cv.detail_DpSeamFinder('COLOR_GRAD')
+SEAM_FIND_CHOICES['voronoi'] = cv.detail.SeamFinder_createDefault(cv.detail.SeamFinder_VORONOI_SEAM)
+SEAM_FIND_CHOICES['no'] = cv.detail.SeamFinder_createDefault(cv.detail.SeamFinder_NO)
+
+ESTIMATOR_CHOICES = OrderedDict()
+ESTIMATOR_CHOICES['homography'] = cv.detail_HomographyBasedEstimator
+ESTIMATOR_CHOICES['affine'] = cv.detail_AffineBasedEstimator
+
+WARP_CHOICES = (
+    'spherical',
+    'plane',
+    'affine',
+    'cylindrical',
+    'fisheye',
+    'stereographic',
+    'compressedPlaneA2B1',
+    'compressedPlaneA1.5B1',
+    'compressedPlanePortraitA2B1',
+    'compressedPlanePortraitA1.5B1',
+    'paniniA2B1',
+    'paniniA1.5B1',
+    'paniniPortraitA2B1',
+    'paniniPortraitA1.5B1',
+    'mercator',
+    'transverseMercator',
+)
+
+WAVE_CORRECT_CHOICES = OrderedDict()
+WAVE_CORRECT_CHOICES['horiz'] = cv.detail.WAVE_CORRECT_HORIZ
+WAVE_CORRECT_CHOICES['no'] = None
+WAVE_CORRECT_CHOICES['vert'] = cv.detail.WAVE_CORRECT_VERT
+
+BLEND_CHOICES = ('multiband', 'feather', 'no',)
+
+def get_matcher(args):
+    try_cuda = args.try_cuda
+    matcher_type = args.matcher
+    if args.match_conf is None:
+        if args.features == 'orb':
+            match_conf = 0.3
+        else:
+            match_conf = 0.65
+    else:
+        match_conf = args.match_conf
+    range_width = args.rangewidth
+    if matcher_type == "affine":
+        matcher = cv.detail_AffineBestOf2NearestMatcher(False, try_cuda, match_conf)
+    elif range_width == -1:
+        matcher = cv.detail.BestOf2NearestMatcher_create(try_cuda, match_conf)
+    else:
+        matcher = cv.detail.BestOf2NearestRangeMatcher_create(range_width, try_cuda, match_conf)
+    return matcher
+
+
+def get_compensator(args):
+    expos_comp_type = EXPOS_COMP_CHOICES[args.expos_comp]
+    expos_comp_nr_feeds = args.expos_comp_nr_feeds
+    expos_comp_block_size = args.expos_comp_block_size
+    # expos_comp_nr_filtering = args.expos_comp_nr_filtering
+    if expos_comp_type == cv.detail.ExposureCompensator_CHANNELS:
+        compensator = cv.detail_ChannelsCompensator(expos_comp_nr_feeds)
+        # compensator.setNrGainsFilteringIterations(expos_comp_nr_filtering)
+    elif expos_comp_type == cv.detail.ExposureCompensator_CHANNELS_BLOCKS:
+        compensator = cv.detail_BlocksChannelsCompensator(
+            expos_comp_block_size, expos_comp_block_size,
+            expos_comp_nr_feeds
+        )
+        # compensator.setNrGainsFilteringIterations(expos_comp_nr_filtering)
+    else:
+        compensator = cv.detail.ExposureCompensator_createDefault(expos_comp_type)
+    return compensator
+
+
+def main():
+
+    args = {
+        "img_names":["boat5.jpg", "boat2.jpg",
+                     "boat3.jpg", "boat4.jpg",
+                     "boat1.jpg", "boat6.jpg"],
+        "try_cuda": False,
+        "work_megapix": 0.6,
+        "features": "orb",
+        "matcher": "homography",
+        "estimator": "homography",
+        "match_conf": None,
+        "conf_thresh": 1.0,
+        "ba": "ray",
+        "ba_refine_mask": "xxxxx",
+        "wave_correct": "horiz",
+        "save_graph": None,
+        "warp": "spherical",
+        "seam_megapix": 0.1,
+        "seam": "dp_color",
+        "compose_megapix": 3,
+        "expos_comp": "gain_blocks",
+        "expos_comp_nr_feeds": 1,
+        "expos_comp_nr_filtering": 2,
+        "expos_comp_block_size": 32,
+        "blend": "multiband",
+        "blend_strength": 5,
+        "output": "time_test.jpg",
+        "timelapse": None,
+        "rangewidth": -1
+    }
+
+    args = SimpleNamespace(**args)
+    img_names = args.img_names
+    work_megapix = args.work_megapix
+    seam_megapix = args.seam_megapix
+    compose_megapix = args.compose_megapix
+    conf_thresh = args.conf_thresh
+    ba_refine_mask = args.ba_refine_mask
+    wave_correct = WAVE_CORRECT_CHOICES[args.wave_correct]
+    if args.save_graph is None:
+        save_graph = False
+    else:
+        save_graph = True
+    warp_type = args.warp
+    blend_type = args.blend
+    blend_strength = args.blend_strength
+    result_name = args.output
+    if args.timelapse is not None:
+        timelapse = True
+        if args.timelapse == "as_is":
+            timelapse_type = cv.detail.Timelapser_AS_IS
+        elif args.timelapse == "crop":
+            timelapse_type = cv.detail.Timelapser_CROP
+        else:
+            print("Bad timelapse method")
+            exit()
+    else:
+        timelapse = False
+    finder = FEATURES_FIND_CHOICES[args.features]()
+    seam_work_aspect = 1
+    full_img_sizes = []
+    features = []
+    images = []
+    is_work_scale_set = False
+    is_seam_scale_set = False
+    is_compose_scale_set = False
+    for name in img_names:
+        full_img = cv.imread(cv.samples.findFile(name))
+        if full_img is None:
+            print("Cannot read image ", name)
+            exit()
+        full_img_sizes.append((full_img.shape[1], full_img.shape[0]))
+        if work_megapix < 0:
+            img = full_img
+            work_scale = 1
+            is_work_scale_set = True
+        else:
+            if is_work_scale_set is False:
+                work_scale = min(1.0, np.sqrt(work_megapix * 1e6 / (full_img.shape[0] * full_img.shape[1])))
+                is_work_scale_set = True
+            img = cv.resize(src=full_img, dsize=None, fx=work_scale, fy=work_scale, interpolation=cv.INTER_LINEAR_EXACT)
+        if is_seam_scale_set is False:
+            seam_scale = min(1.0, np.sqrt(seam_megapix * 1e6 / (full_img.shape[0] * full_img.shape[1])))
+            seam_work_aspect = seam_scale / work_scale
+            is_seam_scale_set = True
+        img_feat = cv.detail.computeImageFeatures2(finder, img)
+        features.append(img_feat)
+        img = cv.resize(src=full_img, dsize=None, fx=seam_scale, fy=seam_scale, interpolation=cv.INTER_LINEAR_EXACT)
+        images.append(img)
+
+    matcher = get_matcher(args)
+    p = matcher.apply2(features)
+    matcher.collectGarbage()
+
+    if save_graph:
+        with open(args.save_graph, 'w') as fh:
+            fh.write(cv.detail.matchesGraphAsString(img_names, p, conf_thresh))
+
+    indices = cv.detail.leaveBiggestComponent(features, p, conf_thresh)
+    img_subset = []
+    img_names_subset = []
+    full_img_sizes_subset = []
+    for i in range(len(indices)):
+        img_names_subset.append(img_names[indices[i, 0]])
+        img_subset.append(images[indices[i, 0]])
+        full_img_sizes_subset.append(full_img_sizes[indices[i, 0]])
+    images = img_subset
+    img_names = img_names_subset
+    full_img_sizes = full_img_sizes_subset
+    num_images = len(img_names)
+    if num_images < 2:
+        print("Need more images")
+        exit()
+
+    estimator = ESTIMATOR_CHOICES[args.estimator]()
+    b, cameras = estimator.apply(features, p, None)
+    if not b:
+        print("Homography estimation failed.")
+        exit()
+    for cam in cameras:
+        cam.R = cam.R.astype(np.float32)
+
+    adjuster = BA_COST_CHOICES[args.ba]()
+    adjuster.setConfThresh(1)
+    refine_mask = np.zeros((3, 3), np.uint8)
+    if ba_refine_mask[0] == 'x':
+        refine_mask[0, 0] = 1
+    if ba_refine_mask[1] == 'x':
+        refine_mask[0, 1] = 1
+    if ba_refine_mask[2] == 'x':
+        refine_mask[0, 2] = 1
+    if ba_refine_mask[3] == 'x':
+        refine_mask[1, 1] = 1
+    if ba_refine_mask[4] == 'x':
+        refine_mask[1, 2] = 1
+    adjuster.setRefinementMask(refine_mask)
+    b, cameras = adjuster.apply(features, p, cameras)
+    if not b:
+        print("Camera parameters adjusting failed.")
+        exit()
+    focals = []
+    for cam in cameras:
+        focals.append(cam.focal)
+    focals.sort()
+    if len(focals) % 2 == 1:
+        warped_image_scale = focals[len(focals) // 2]
+    else:
+        warped_image_scale = (focals[len(focals) // 2] + focals[len(focals) // 2 - 1]) / 2
+    if wave_correct is not None:
+        rmats = []
+        for cam in cameras:
+            rmats.append(np.copy(cam.R))
+        rmats = cv.detail.waveCorrect(rmats, wave_correct)
+        for idx, cam in enumerate(cameras):
+            cam.R = rmats[idx]
+    corners = []
+    masks_warped = []
+    images_warped = []
+    sizes = []
+    masks = []
+    for i in range(0, num_images):
+        um = cv.UMat(255 * np.ones((images[i].shape[0], images[i].shape[1]), np.uint8))
+        masks.append(um)
+
+    warper = cv.PyRotationWarper(warp_type, warped_image_scale * seam_work_aspect)  # warper could be nullptr?
+    for idx in range(0, num_images):
+        K = cameras[idx].K().astype(np.float32)
+        swa = seam_work_aspect
+        K[0, 0] *= swa
+        K[0, 2] *= swa
+        K[1, 1] *= swa
+        K[1, 2] *= swa
+        corner, image_wp = warper.warp(images[idx], K, cameras[idx].R, cv.INTER_LINEAR, cv.BORDER_REFLECT)
+        corners.append(corner)
+        sizes.append((image_wp.shape[1], image_wp.shape[0]))
+        images_warped.append(image_wp)
+        p, mask_wp = warper.warp(masks[idx], K, cameras[idx].R, cv.INTER_NEAREST, cv.BORDER_CONSTANT)
+        masks_warped.append(mask_wp.get())
+
+    images_warped_f = []
+    for img in images_warped:
+        imgf = img.astype(np.float32)
+        images_warped_f.append(imgf)
+
+    compensator = get_compensator(args)
+    compensator.feed(corners=corners, images=images_warped, masks=masks_warped)
+
+    seam_finder = SEAM_FIND_CHOICES[args.seam]
+    masks_warped = seam_finder.find(images_warped_f, corners, masks_warped)
+    compose_scale = 1
+    corners = []
+    sizes = []
+    blender = None
+    timelapser = None
+    # https://github.com/opencv/opencv/blob/master/samples/cpp/stitching_detailed.cpp#L725 ?
+    for idx, name in enumerate(img_names):
+        full_img = cv.imread(name)
+        if not is_compose_scale_set:
+            if compose_megapix > 0:
+                compose_scale = min(1.0, np.sqrt(compose_megapix * 1e6 / (full_img.shape[0] * full_img.shape[1])))
+            is_compose_scale_set = True
+            compose_work_aspect = compose_scale / work_scale
+            warped_image_scale *= compose_work_aspect
+            warper = cv.PyRotationWarper(warp_type, warped_image_scale)
+            for i in range(0, len(img_names)):
+                cameras[i].focal *= compose_work_aspect
+                cameras[i].ppx *= compose_work_aspect
+                cameras[i].ppy *= compose_work_aspect
+                sz = (int(round(full_img_sizes[i][0] * compose_scale)),
+                      int(round(full_img_sizes[i][1] * compose_scale)))
+                K = cameras[i].K().astype(np.float32)
+                roi = warper.warpRoi(sz, K, cameras[i].R)
+                corners.append(roi[0:2])
+                sizes.append(roi[2:4])
+        if abs(compose_scale - 1) > 1e-1:
+            img = cv.resize(src=full_img, dsize=None, fx=compose_scale, fy=compose_scale,
+                            interpolation=cv.INTER_LINEAR_EXACT)
+        else:
+            img = full_img
+        _img_size = (img.shape[1], img.shape[0])
+        K = cameras[idx].K().astype(np.float32)
+        corner, image_warped = warper.warp(img, K, cameras[idx].R, cv.INTER_LINEAR, cv.BORDER_REFLECT)
+        mask = 255 * np.ones((img.shape[0], img.shape[1]), np.uint8)
+        p, mask_warped = warper.warp(mask, K, cameras[idx].R, cv.INTER_NEAREST, cv.BORDER_CONSTANT)
+        compensator.apply(idx, corners[idx], image_warped, mask_warped)
+        image_warped_s = image_warped.astype(np.int16)
+        dilated_mask = cv.dilate(masks_warped[idx], None)
+        seam_mask = cv.resize(dilated_mask, (mask_warped.shape[1], mask_warped.shape[0]), 0, 0, cv.INTER_LINEAR_EXACT)
+        mask_warped = cv.bitwise_and(seam_mask, mask_warped)
+        if blender is None and not timelapse:
+            blender = cv.detail.Blender_createDefault(cv.detail.Blender_NO)
+            dst_sz = cv.detail.resultRoi(corners=corners, sizes=sizes)
+            blend_width = np.sqrt(dst_sz[2] * dst_sz[3]) * blend_strength / 100
+            if blend_width < 1:
+                blender = cv.detail.Blender_createDefault(cv.detail.Blender_NO)
+            elif blend_type == "multiband":
+                blender = cv.detail_MultiBandBlender()
+                blender.setNumBands((np.log(blend_width) / np.log(2.) - 1.).astype(np.int64))
+            elif blend_type == "feather":
+                blender = cv.detail_FeatherBlender()
+                blender.setSharpness(1. / blend_width)
+            blender.prepare(dst_sz)
+        elif timelapser is None and timelapse:
+            timelapser = cv.detail.Timelapser_createDefault(timelapse_type)
+            timelapser.initialize(corners, sizes)
+        if timelapse:
+            ma_tones = np.ones((image_warped_s.shape[0], image_warped_s.shape[1]), np.uint8)
+            timelapser.process(image_warped_s, ma_tones, corners[idx])
+            pos_s = img_names[idx].rfind("/")
+            if pos_s == -1:
+                fixed_file_name = "fixed_" + img_names[idx]
+            else:
+                fixed_file_name = img_names[idx][:pos_s + 1] + "fixed_" + img_names[idx][pos_s + 1:]
+            cv.imwrite(fixed_file_name, timelapser.getDst())
+        else:
+            blender.feed(cv.UMat(image_warped_s), mask_warped, corners[idx])
+    if not timelapse:
+        result = None
+        result_mask = None
+        result, result_mask = blender.blend(result, result_mask)
+        # cv.imwrite(result_name, result)
+        return result
+        # zoom_x = 600.0 / result.shape[1]
+        # dst = cv.normalize(src=result, dst=None, alpha=255., norm_type=cv.NORM_MINMAX, dtype=cv.CV_8U)
+        # dst = cv.resize(dst, dsize=None, fx=zoom_x, fy=zoom_x)
+        # cv.imshow(result_name, dst)
+        # cv.waitKey()
+
+
+
+if __name__ == '__main__':
+    import tracemalloc
+    import time
+    tracemalloc.start()
+    start = time.time()
+    result = main()
+    current, peak = tracemalloc.get_traced_memory()
+    print(f"Current memory usage is {current / 10**6}MB; Peak was {peak / 10**6}MB")
+    tracemalloc.stop()
+    end = time.time()
+    print(end - start)
diff --git a/apps/opencv_stitching_tool/opencv_stitching/test/test_composition.py b/apps/opencv_stitching_tool/opencv_stitching/test/test_composition.py
new file mode 100644 (file)
index 0000000..b0b4d76
--- /dev/null
@@ -0,0 +1,67 @@
+import unittest
+import os
+import sys
+
+import numpy as np
+import cv2 as cv
+
+sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__),
+                                                '..', '..')))
+
+from opencv_stitching.stitcher import Stitcher
+
+
+class TestImageComposition(unittest.TestCase):
+
+    #  visual test: look especially in the sky
+    def test_exposure_compensation(self):
+        img = cv.imread("s1.jpg")
+        img = increase_brightness(img, value=25)
+        cv.imwrite("s1_bright.jpg", img)
+
+        stitcher = Stitcher(compensator="no", blender_type="no")
+        result = stitcher.stitch(["s1_bright.jpg", "s2.jpg"])
+
+        cv.imwrite("without_exposure_comp.jpg", result)
+
+        stitcher = Stitcher(blender_type="no")
+        result = stitcher.stitch(["s1_bright.jpg", "s2.jpg"])
+
+        cv.imwrite("with_exposure_comp.jpg", result)
+
+    def test_timelapse(self):
+        stitcher = Stitcher(timelapse='as_is')
+        _ = stitcher.stitch(["s1.jpg", "s2.jpg"])
+        frame1 = cv.imread("fixed_s1.jpg")
+
+        max_image_shape_derivation = 3
+        np.testing.assert_allclose(frame1.shape[:2],
+                                   (700, 1811),
+                                   atol=max_image_shape_derivation)
+
+        left = cv.cvtColor(frame1[:, :1300, ], cv.COLOR_BGR2GRAY)
+        right = cv.cvtColor(frame1[:, 1300:, ], cv.COLOR_BGR2GRAY)
+
+        self.assertGreater(cv.countNonZero(left), 800000)
+        self.assertEqual(cv.countNonZero(right), 0)
+
+
+def increase_brightness(img, value=30):
+    hsv = cv.cvtColor(img, cv.COLOR_BGR2HSV)
+    h, s, v = cv.split(hsv)
+
+    lim = 255 - value
+    v[v > lim] = 255
+    v[v <= lim] += value
+
+    final_hsv = cv.merge((h, s, v))
+    img = cv.cvtColor(final_hsv, cv.COLOR_HSV2BGR)
+    return img
+
+
+def starttest():
+    unittest.main()
+
+
+if __name__ == "__main__":
+    starttest()
diff --git a/apps/opencv_stitching_tool/opencv_stitching/test/test_matcher.py b/apps/opencv_stitching_tool/opencv_stitching/test/test_matcher.py
new file mode 100644 (file)
index 0000000..a2424ec
--- /dev/null
@@ -0,0 +1,47 @@
+import unittest
+import os
+import sys
+
+import numpy as np
+
+sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__),
+                                                '..', '..')))
+
+from opencv_stitching.feature_matcher import FeatureMatcher
+# %%
+
+
+class TestMatcher(unittest.TestCase):
+
+    def test_array_in_sqare_matrix(self):
+        array = np.zeros(9)
+
+        matrix = FeatureMatcher.array_in_sqare_matrix(array)
+
+        np.testing.assert_array_equal(matrix, np.array([[0., 0., 0.],
+                                                        [0., 0., 0.],
+                                                        [0., 0., 0.]]))
+
+    def test_get_all_img_combinations(self):
+        nimgs = 3
+
+        combinations = list(FeatureMatcher.get_all_img_combinations(nimgs))
+
+        self.assertEqual(combinations, [(0, 1), (0, 2), (1, 2)])
+
+    def test_get_match_conf(self):
+        explicit_match_conf = FeatureMatcher.get_match_conf(1, 'orb')
+        implicit_match_conf_orb = FeatureMatcher.get_match_conf(None, 'orb')
+        implicit_match_conf_other = FeatureMatcher.get_match_conf(None, 'surf')
+
+        self.assertEqual(explicit_match_conf, 1)
+        self.assertEqual(implicit_match_conf_orb, 0.3)
+        self.assertEqual(implicit_match_conf_other, 0.65)
+
+
+def starttest():
+    unittest.main()
+
+
+if __name__ == "__main__":
+    starttest()
diff --git a/apps/opencv_stitching_tool/opencv_stitching/test/test_megapix_scaler.py b/apps/opencv_stitching_tool/opencv_stitching/test/test_megapix_scaler.py
new file mode 100644 (file)
index 0000000..0afdad2
--- /dev/null
@@ -0,0 +1,59 @@
+import unittest
+import os
+import sys
+
+import cv2 as cv
+
+sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__),
+                                                '..', '..')))
+
+from opencv_stitching.megapix_scaler import MegapixScaler
+from opencv_stitching.megapix_downscaler import MegapixDownscaler
+#%%
+
+
+class TestScaler(unittest.TestCase):
+
+    def setUp(self):
+        self.img = cv.imread("s1.jpg")
+        self.size = (self.img.shape[1], self.img.shape[0])
+
+    def test_get_scale_by_resolution(self):
+        scaler = MegapixScaler(0.6)
+
+        scale = scaler.get_scale_by_resolution(1_200_000)
+
+        self.assertEqual(scale, 0.7071067811865476)
+
+    def test_get_scale_by_image(self):
+        scaler = MegapixScaler(0.6)
+
+        scaler.set_scale_by_img_size(self.size)
+
+        self.assertEqual(scaler.scale, 0.8294067854101966)
+
+    def test_get_scaled_img_size(self):
+        scaler = MegapixScaler(0.6)
+        scaler.set_scale_by_img_size(self.size)
+
+        size = scaler.get_scaled_img_size(self.size)
+        self.assertEqual(size, (1033, 581))
+        # 581*1033 = 600173 px = ~0.6 MP
+
+    def test_force_of_downscaling(self):
+        normal_scaler = MegapixScaler(2)
+        downscaler = MegapixDownscaler(2)
+
+        normal_scaler.set_scale_by_img_size(self.size)
+        downscaler.set_scale_by_img_size(self.size)
+
+        self.assertEqual(normal_scaler.scale, 1.5142826857233715)
+        self.assertEqual(downscaler.scale, 1.0)
+
+
+def starttest():
+    unittest.main()
+
+
+if __name__ == "__main__":
+    starttest()
diff --git a/apps/opencv_stitching_tool/opencv_stitching/test/test_performance.py b/apps/opencv_stitching_tool/opencv_stitching/test/test_performance.py
new file mode 100644 (file)
index 0000000..60b03a8
--- /dev/null
@@ -0,0 +1,65 @@
+import unittest
+import os
+import sys
+import time
+import tracemalloc
+
+sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__),
+                                                '..', '..')))
+
+from opencv_stitching.stitcher import Stitcher
+from stitching_detailed import main
+# %%
+
+
+class TestStitcher(unittest.TestCase):
+
+    def test_performance(self):
+
+        print("Run new Stitcher class:")
+
+        start = time.time()
+        tracemalloc.start()
+
+        stitcher = Stitcher(final_megapix=3)
+        stitcher.stitch(["boat5.jpg", "boat2.jpg",
+                         "boat3.jpg", "boat4.jpg",
+                         "boat1.jpg", "boat6.jpg"])
+        stitcher.collect_garbage()
+
+        _, peak_memory = tracemalloc.get_traced_memory()
+        tracemalloc.stop()
+        end = time.time()
+        time_needed = end - start
+
+        print(f"Peak was {peak_memory / 10**6} MB")
+        print(f"Time was {time_needed} s")
+
+        print("Run original stitching_detailed.py:")
+
+        start = time.time()
+        tracemalloc.start()
+
+        main()
+
+        _, peak_memory_detailed = tracemalloc.get_traced_memory()
+        tracemalloc.stop()
+        end = time.time()
+        time_needed_detailed = end - start
+
+        print(f"Peak was {peak_memory_detailed / 10**6} MB")
+        print(f"Time was {time_needed_detailed} s")
+
+        self.assertLessEqual(peak_memory / 10**6,
+                             peak_memory_detailed / 10**6)
+        uncertainty_based_on_run = 0.25
+        self.assertLessEqual(time_needed - uncertainty_based_on_run,
+                             time_needed_detailed)
+
+
+def starttest():
+    unittest.main()
+
+
+if __name__ == "__main__":
+    starttest()
diff --git a/apps/opencv_stitching_tool/opencv_stitching/test/test_registration.py b/apps/opencv_stitching_tool/opencv_stitching/test/test_registration.py
new file mode 100644 (file)
index 0000000..98e792f
--- /dev/null
@@ -0,0 +1,100 @@
+import unittest
+import os
+import sys
+
+import numpy as np
+import cv2 as cv
+
+sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__),
+                                                '..', '..')))
+
+from opencv_stitching.feature_detector import FeatureDetector
+from opencv_stitching.feature_matcher import FeatureMatcher
+from opencv_stitching.subsetter import Subsetter
+
+
+class TestImageRegistration(unittest.TestCase):
+
+    def test_feature_detector(self):
+        img1 = cv.imread("s1.jpg")
+
+        default_number_of_keypoints = 500
+        detector = FeatureDetector("orb")
+        features = detector.detect_features(img1)
+        self.assertEqual(len(features.getKeypoints()),
+                         default_number_of_keypoints)
+
+        other_keypoints = 1000
+        detector = FeatureDetector("orb", nfeatures=other_keypoints)
+        features = detector.detect_features(img1)
+        self.assertEqual(len(features.getKeypoints()), other_keypoints)
+
+    def test_feature_matcher(self):
+        img1, img2 = cv.imread("s1.jpg"), cv.imread("s2.jpg")
+
+        detector = FeatureDetector("orb")
+        features = [detector.detect_features(img1),
+                    detector.detect_features(img2)]
+
+        matcher = FeatureMatcher()
+        pairwise_matches = matcher.match_features(features)
+        self.assertEqual(len(pairwise_matches), len(features)**2)
+        self.assertGreater(pairwise_matches[1].confidence, 2)
+
+        matches_matrix = FeatureMatcher.get_matches_matrix(pairwise_matches)
+        self.assertEqual(matches_matrix.shape, (2, 2))
+        conf_matrix = FeatureMatcher.get_confidence_matrix(pairwise_matches)
+        self.assertTrue(np.array_equal(
+            conf_matrix > 2,
+            np.array([[False,  True], [True, False]])
+            ))
+
+    def test_subsetting(self):
+        img1, img2 = cv.imread("s1.jpg"), cv.imread("s2.jpg")
+        img3, img4 = cv.imread("boat1.jpg"), cv.imread("boat2.jpg")
+        img5 = cv.imread("boat3.jpg")
+        img_names = ["s1.jpg", "s2.jpg", "boat1.jpg", "boat2.jpg", "boat3.jpg"]
+
+        detector = FeatureDetector("orb")
+        features = [detector.detect_features(img1),
+                    detector.detect_features(img2),
+                    detector.detect_features(img3),
+                    detector.detect_features(img4),
+                    detector.detect_features(img5)]
+        matcher = FeatureMatcher()
+        pairwise_matches = matcher.match_features(features)
+        subsetter = Subsetter(confidence_threshold=1,
+                              matches_graph_dot_file="dot_graph.txt")  # view in https://dreampuf.github.io  # noqa
+
+        indices = subsetter.get_indices_to_keep(features, pairwise_matches)
+        indices_to_delete = subsetter.get_indices_to_delete(len(img_names),
+                                                            indices)
+
+        self.assertEqual(indices, [2, 3, 4])
+        self.assertEqual(indices_to_delete, [0, 1])
+
+        subsetted_image_names = subsetter.subset_list(img_names, indices)
+        self.assertEqual(subsetted_image_names,
+                         ['boat1.jpg', 'boat2.jpg', 'boat3.jpg'])
+
+        matches_subset = subsetter.subset_matches(pairwise_matches, indices)
+        # FeatureMatcher.get_confidence_matrix(pairwise_matches)
+        # FeatureMatcher.get_confidence_matrix(subsetted_matches)
+        self.assertEqual(pairwise_matches[13].confidence,
+                         matches_subset[1].confidence)
+
+        graph = subsetter.get_matches_graph(img_names, pairwise_matches)
+        self.assertTrue(graph.startswith("graph matches_graph{"))
+
+        subsetter.save_matches_graph_dot_file(img_names, pairwise_matches)
+        with open('dot_graph.txt', 'r') as file:
+            graph = file.read()
+            self.assertTrue(graph.startswith("graph matches_graph{"))
+
+
+def starttest():
+    unittest.main()
+
+
+if __name__ == "__main__":
+    starttest()
diff --git a/apps/opencv_stitching_tool/opencv_stitching/test/test_stitcher.py b/apps/opencv_stitching_tool/opencv_stitching/test/test_stitcher.py
new file mode 100644 (file)
index 0000000..5a24f75
--- /dev/null
@@ -0,0 +1,108 @@
+import unittest
+import os
+import sys
+
+import numpy as np
+import cv2 as cv
+
+sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__),
+                                                '..', '..')))
+
+from opencv_stitching.stitcher import Stitcher
+# %%
+
+
+class TestStitcher(unittest.TestCase):
+
+    def test_stitcher_aquaduct(self):
+        stitcher = Stitcher(n_features=250)
+        result = stitcher.stitch(["s1.jpg", "s2.jpg"])
+        cv.imwrite("result.jpg", result)
+
+        max_image_shape_derivation = 3
+        np.testing.assert_allclose(result.shape[:2],
+                                   (700, 1811),
+                                   atol=max_image_shape_derivation)
+
+    @unittest.skip("skip boat test (high resuolution ran >30s)")
+    def test_stitcher_boat1(self):
+        settings = {"warper_type": "fisheye",
+                    "wave_correct_kind": "no",
+                    "finder": "dp_colorgrad",
+                    "compensator": "no",
+                    "conf_thresh": 0.3}
+
+        stitcher = Stitcher(**settings)
+        result = stitcher.stitch(["boat5.jpg", "boat2.jpg",
+                                  "boat3.jpg", "boat4.jpg",
+                                  "boat1.jpg", "boat6.jpg"])
+
+        cv.imwrite("boat_fisheye.jpg", result)
+
+        max_image_shape_derivation = 600
+        np.testing.assert_allclose(result.shape[:2],
+                                   (14488,  7556),
+                                   atol=max_image_shape_derivation)
+
+    @unittest.skip("skip boat test (high resuolution ran >30s)")
+    def test_stitcher_boat2(self):
+        settings = {"warper_type": "compressedPlaneA2B1",
+                    "finder": "dp_colorgrad",
+                    "compensator": "channel_blocks",
+                    "conf_thresh": 0.3}
+
+        stitcher = Stitcher(**settings)
+        result = stitcher.stitch(["boat5.jpg", "boat2.jpg",
+                                  "boat3.jpg", "boat4.jpg",
+                                  "boat1.jpg", "boat6.jpg"])
+
+        cv.imwrite("boat_plane.jpg", result)
+
+        max_image_shape_derivation = 600
+        np.testing.assert_allclose(result.shape[:2],
+                                   (7400, 12340),
+                                   atol=max_image_shape_derivation)
+
+    def test_stitcher_boat_aquaduct_subset(self):
+        settings = {"final_megapix": 1}
+
+        stitcher = Stitcher(**settings)
+        result = stitcher.stitch(["boat5.jpg",
+                                  "s1.jpg", "s2.jpg",
+                                  "boat2.jpg",
+                                  "boat3.jpg", "boat4.jpg",
+                                  "boat1.jpg", "boat6.jpg"])
+        cv.imwrite("subset_low_res.jpg", result)
+
+        max_image_shape_derivation = 100
+        np.testing.assert_allclose(result.shape[:2],
+                                   (839, 3384),
+                                   atol=max_image_shape_derivation)
+
+    def test_stitcher_budapest(self):
+        settings = {"matcher_type": "affine",
+                    "estimator": "affine",
+                    "adjuster": "affine",
+                    "warper_type": "affine",
+                    "wave_correct_kind": "no",
+                    "confidence_threshold": 0.3}
+
+        stitcher = Stitcher(**settings)
+        result = stitcher.stitch(["budapest1.jpg", "budapest2.jpg",
+                                  "budapest3.jpg", "budapest4.jpg",
+                                  "budapest5.jpg", "budapest6.jpg"])
+
+        cv.imwrite("budapest.jpg", result)
+
+        max_image_shape_derivation = 50
+        np.testing.assert_allclose(result.shape[:2],
+                                   (1155, 2310),
+                                   atol=max_image_shape_derivation)
+
+
+def starttest():
+    unittest.main()
+
+
+if __name__ == "__main__":
+    starttest()
diff --git a/apps/opencv_stitching_tool/opencv_stitching/timelapser.py b/apps/opencv_stitching_tool/opencv_stitching/timelapser.py
new file mode 100644 (file)
index 0000000..4085f47
--- /dev/null
@@ -0,0 +1,50 @@
+import os
+import cv2 as cv
+import numpy as np
+
+
+class Timelapser:
+
+    TIMELAPSE_CHOICES = ('no', 'as_is', 'crop',)
+    DEFAULT_TIMELAPSE = 'no'
+
+    def __init__(self, timelapse=DEFAULT_TIMELAPSE):
+        self.do_timelapse = True
+        self.timelapse_type = None
+        self.timelapser = None
+
+        if timelapse == "as_is":
+            self.timelapse_type = cv.detail.Timelapser_AS_IS
+        elif timelapse == "crop":
+            self.timelapse_type = cv.detail.Timelapser_CROP
+        else:
+            self.do_timelapse = False
+
+        if self.do_timelapse:
+            self.timelapser = cv.detail.Timelapser_createDefault(
+                self.timelapse_type
+                )
+
+    def initialize(self, *args):
+        """https://docs.opencv.org/master/dd/dac/classcv_1_1detail_1_1Timelapser.html#aaf0f7c4128009f02473332a0c41f6345"""  # noqa
+        self.timelapser.initialize(*args)
+
+    def process_and_save_frame(self, img_name, img, corner):
+        self.process_frame(img, corner)
+        cv.imwrite(self.get_fixed_filename(img_name), self.get_frame())
+
+    def process_frame(self, img, corner):
+        mask = np.ones((img.shape[0], img.shape[1]), np.uint8)
+        img = img.astype(np.int16)
+        self.timelapser.process(img, mask, corner)
+
+    def get_frame(self):
+        frame = self.timelapser.getDst()
+        frame = np.float32(cv.UMat.get(frame))
+        frame = cv.convertScaleAbs(frame)
+        return frame
+
+    @staticmethod
+    def get_fixed_filename(img_name):
+        dirname, filename = os.path.split(img_name)
+        return os.path.join(dirname, "fixed_" + filename)
diff --git a/apps/opencv_stitching_tool/opencv_stitching/warper.py b/apps/opencv_stitching_tool/opencv_stitching/warper.py
new file mode 100644 (file)
index 0000000..c31a864
--- /dev/null
@@ -0,0 +1,71 @@
+import cv2 as cv
+import numpy as np
+
+
+class Warper:
+
+    WARP_TYPE_CHOICES = ('spherical', 'plane', 'affine', 'cylindrical',
+                         'fisheye', 'stereographic', 'compressedPlaneA2B1',
+                         'compressedPlaneA1.5B1',
+                         'compressedPlanePortraitA2B1',
+                         'compressedPlanePortraitA1.5B1',
+                         'paniniA2B1', 'paniniA1.5B1', 'paniniPortraitA2B1',
+                         'paniniPortraitA1.5B1', 'mercator',
+                         'transverseMercator')
+
+    DEFAULT_WARP_TYPE = 'spherical'
+
+    def __init__(self, warper_type=DEFAULT_WARP_TYPE, scale=1):
+        self.warper_type = warper_type
+        self.warper = cv.PyRotationWarper(warper_type, scale)
+        self.scale = scale
+
+    def warp_images_and_image_masks(self, imgs, cameras, scale=None, aspect=1):
+        self.update_scale(scale)
+        for img, camera in zip(imgs, cameras):
+            yield self.warp_image_and_image_mask(img, camera, scale, aspect)
+
+    def warp_image_and_image_mask(self, img, camera, scale=None, aspect=1):
+        self.update_scale(scale)
+        corner, img_warped = self.warp_image(img, camera, aspect)
+        mask = 255 * np.ones((img.shape[0], img.shape[1]), np.uint8)
+        _, mask_warped = self.warp_image(mask, camera, aspect, mask=True)
+        return img_warped, mask_warped, corner
+
+    def warp_image(self, image, camera, aspect=1, mask=False):
+        if mask:
+            interp_mode = cv.INTER_NEAREST
+            border_mode = cv.BORDER_CONSTANT
+        else:
+            interp_mode = cv.INTER_LINEAR
+            border_mode = cv.BORDER_REFLECT
+
+        corner, warped_image = self.warper.warp(image,
+                                                Warper.get_K(camera, aspect),
+                                                camera.R,
+                                                interp_mode,
+                                                border_mode)
+        return corner, warped_image
+
+    def warp_roi(self, width, height, camera, scale=None, aspect=1):
+        self.update_scale(scale)
+        roi = (width, height)
+        K = Warper.get_K(camera, aspect)
+        return self.warper.warpRoi(roi, K, camera.R)
+
+    def update_scale(self, scale):
+        if scale is not None and scale != self.scale:
+            self.warper = cv.PyRotationWarper(self.warper_type, scale)  # setScale not working: https://docs.opencv.org/master/d5/d76/classcv_1_1PyRotationWarper.html#a90b000bb75f95294f9b0b6ec9859eb55
+            self.scale = scale
+
+    @staticmethod
+    def get_K(camera, aspect=1):
+        K = camera.K().astype(np.float32)
+        """ Modification of intrinsic parameters needed if cameras were
+        obtained on different scale than the scale of the Images which should
+        be warped """
+        K[0, 0] *= aspect
+        K[0, 2] *= aspect
+        K[1, 1] *= aspect
+        K[1, 2] *= aspect
+        return K
diff --git a/apps/opencv_stitching_tool/opencv_stitching_tool.py b/apps/opencv_stitching_tool/opencv_stitching_tool.py
new file mode 100644 (file)
index 0000000..1ee96aa
--- /dev/null
@@ -0,0 +1,232 @@
+"""
+Stitching sample (advanced)
+===========================
+
+Show how to use Stitcher API from python.
+"""
+
+# Python 2/3 compatibility
+from __future__ import print_function
+
+import argparse
+
+import cv2 as cv
+import numpy as np
+
+from opencv_stitching.stitcher import Stitcher
+
+from opencv_stitching.image_handler import ImageHandler
+from opencv_stitching.feature_detector import FeatureDetector
+from opencv_stitching.feature_matcher import FeatureMatcher
+from opencv_stitching.subsetter import Subsetter
+from opencv_stitching.camera_estimator import CameraEstimator
+from opencv_stitching.camera_adjuster import CameraAdjuster
+from opencv_stitching.camera_wave_corrector import WaveCorrector
+from opencv_stitching.warper import Warper
+from opencv_stitching.exposure_error_compensator import ExposureErrorCompensator  # noqa
+from opencv_stitching.seam_finder import SeamFinder
+from opencv_stitching.blender import Blender
+from opencv_stitching.timelapser import Timelapser
+
+parser = argparse.ArgumentParser(
+    prog="opencv_stitching_tool.py",
+    description="Rotation model images stitcher"
+)
+parser.add_argument(
+    'img_names', nargs='+',
+    help="Files to stitch", type=str
+)
+parser.add_argument(
+    '--medium_megapix', action='store',
+    default=ImageHandler.DEFAULT_MEDIUM_MEGAPIX,
+    help="Resolution for image registration step. "
+    "The default is %s Mpx" % ImageHandler.DEFAULT_MEDIUM_MEGAPIX,
+    type=float, dest='medium_megapix'
+)
+parser.add_argument(
+    '--detector', action='store',
+    default=FeatureDetector.DEFAULT_DETECTOR,
+    help="Type of features used for images matching. "
+         "The default is '%s'." % FeatureDetector.DEFAULT_DETECTOR,
+    choices=FeatureDetector.DETECTOR_CHOICES.keys(),
+    type=str, dest='detector'
+)
+parser.add_argument(
+    '--nfeatures', action='store',
+    default=500,
+    help="Type of features used for images matching. "
+         "The default is 500.",
+    type=int, dest='nfeatures'
+)
+parser.add_argument(
+    '--matcher_type', action='store', default=FeatureMatcher.DEFAULT_MATCHER,
+    help="Matcher used for pairwise image matching. "
+         "The default is '%s'." % FeatureMatcher.DEFAULT_MATCHER,
+    choices=FeatureMatcher.MATCHER_CHOICES,
+    type=str, dest='matcher_type'
+)
+parser.add_argument(
+    '--range_width', action='store',
+    default=FeatureMatcher.DEFAULT_RANGE_WIDTH,
+    help="uses range_width to limit number of images to match with.",
+    type=int, dest='range_width'
+)
+parser.add_argument(
+    '--try_use_gpu',
+    action='store',
+    default=False,
+    help="Try to use CUDA. The default value is no. "
+         "All default values are for CPU mode.",
+    type=bool, dest='try_use_gpu'
+)
+parser.add_argument(
+    '--match_conf', action='store',
+    help="Confidence for feature matching step. "
+         "The default is 0.3 for ORB and 0.65 for other feature types.",
+    type=float, dest='match_conf'
+)
+parser.add_argument(
+    '--confidence_threshold', action='store',
+    default=Subsetter.DEFAULT_CONFIDENCE_THRESHOLD,
+    help="Threshold for two images are from the same panorama confidence. "
+         "The default is '%s'." % Subsetter.DEFAULT_CONFIDENCE_THRESHOLD,
+    type=float, dest='confidence_threshold'
+)
+parser.add_argument(
+    '--matches_graph_dot_file', action='store',
+    default=Subsetter.DEFAULT_MATCHES_GRAPH_DOT_FILE,
+    help="Save matches graph represented in DOT language to <file_name> file.",
+    type=str, dest='matches_graph_dot_file'
+)
+parser.add_argument(
+    '--estimator', action='store',
+    default=CameraEstimator.DEFAULT_CAMERA_ESTIMATOR,
+    help="Type of estimator used for transformation estimation. "
+         "The default is '%s'." % CameraEstimator.DEFAULT_CAMERA_ESTIMATOR,
+    choices=CameraEstimator.CAMERA_ESTIMATOR_CHOICES.keys(),
+    type=str, dest='estimator'
+)
+parser.add_argument(
+    '--adjuster', action='store',
+    default=CameraAdjuster.DEFAULT_CAMERA_ADJUSTER,
+    help="Bundle adjustment cost function. "
+         "The default is '%s'." % CameraAdjuster.DEFAULT_CAMERA_ADJUSTER,
+    choices=CameraAdjuster.CAMERA_ADJUSTER_CHOICES.keys(),
+    type=str, dest='adjuster'
+)
+parser.add_argument(
+    '--refinement_mask', action='store',
+    default=CameraAdjuster.DEFAULT_REFINEMENT_MASK,
+    help="Set refinement mask for bundle adjustment. It looks like 'x_xxx', "
+         "where 'x' means refine respective parameter and '_' means don't "
+         "refine, and has the following format:<fx><skew><ppx><aspect><ppy>. "
+         "The default mask is '%s'. "
+         "If bundle adjustment doesn't support estimation of selected "
+         "parameter then the respective flag is ignored."
+         "" % CameraAdjuster.DEFAULT_REFINEMENT_MASK,
+    type=str, dest='refinement_mask'
+)
+parser.add_argument(
+    '--wave_correct_kind', action='store',
+    default=WaveCorrector.DEFAULT_WAVE_CORRECTION,
+    help="Perform wave effect correction. "
+         "The default is '%s'" % WaveCorrector.DEFAULT_WAVE_CORRECTION,
+    choices=WaveCorrector.WAVE_CORRECT_CHOICES.keys(),
+    type=str, dest='wave_correct_kind'
+)
+parser.add_argument(
+    '--warper_type', action='store', default=Warper.DEFAULT_WARP_TYPE,
+    help="Warp surface type. The default is '%s'." % Warper.DEFAULT_WARP_TYPE,
+    choices=Warper.WARP_TYPE_CHOICES,
+    type=str, dest='warper_type'
+)
+parser.add_argument(
+    '--low_megapix', action='store', default=ImageHandler.DEFAULT_LOW_MEGAPIX,
+    help="Resolution for seam estimation and exposure estimation step. "
+    "The default is %s Mpx." % ImageHandler.DEFAULT_LOW_MEGAPIX,
+    type=float, dest='low_megapix'
+)
+parser.add_argument(
+    '--compensator', action='store',
+    default=ExposureErrorCompensator.DEFAULT_COMPENSATOR,
+    help="Exposure compensation method. "
+         "The default is '%s'." % ExposureErrorCompensator.DEFAULT_COMPENSATOR,
+    choices=ExposureErrorCompensator.COMPENSATOR_CHOICES.keys(),
+    type=str, dest='compensator'
+)
+parser.add_argument(
+    '--nr_feeds', action='store',
+    default=ExposureErrorCompensator.DEFAULT_NR_FEEDS,
+    help="Number of exposure compensation feed.",
+    type=np.int32, dest='nr_feeds'
+)
+parser.add_argument(
+    '--block_size', action='store',
+    default=ExposureErrorCompensator.DEFAULT_BLOCK_SIZE,
+    help="BLock size in pixels used by the exposure compensator. "
+         "The default is '%s'." % ExposureErrorCompensator.DEFAULT_BLOCK_SIZE,
+    type=np.int32, dest='block_size'
+)
+parser.add_argument(
+    '--finder', action='store', default=SeamFinder.DEFAULT_SEAM_FINDER,
+    help="Seam estimation method. "
+         "The default is '%s'." % SeamFinder.DEFAULT_SEAM_FINDER,
+    choices=SeamFinder.SEAM_FINDER_CHOICES.keys(),
+    type=str, dest='finder'
+)
+parser.add_argument(
+    '--final_megapix', action='store',
+    default=ImageHandler.DEFAULT_FINAL_MEGAPIX,
+    help="Resolution for compositing step. Use -1 for original resolution. "
+         "The default is %s" % ImageHandler.DEFAULT_FINAL_MEGAPIX,
+    type=float, dest='final_megapix'
+)
+parser.add_argument(
+    '--blender_type', action='store', default=Blender.DEFAULT_BLENDER,
+    help="Blending method. The default is '%s'." % Blender.DEFAULT_BLENDER,
+    choices=Blender.BLENDER_CHOICES,
+    type=str, dest='blender_type'
+)
+parser.add_argument(
+    '--blend_strength', action='store', default=Blender.DEFAULT_BLEND_STRENGTH,
+    help="Blending strength from [0,100] range. "
+         "The default is '%s'." % Blender.DEFAULT_BLEND_STRENGTH,
+    type=np.int32, dest='blend_strength'
+)
+parser.add_argument(
+    '--timelapse', action='store', default=Timelapser.DEFAULT_TIMELAPSE,
+    help="Output warped images separately as frames of a time lapse movie, "
+         "with 'fixed_' prepended to input file names. "
+         "The default is '%s'." % Timelapser.DEFAULT_TIMELAPSE,
+    choices=Timelapser.TIMELAPSE_CHOICES,
+    type=str, dest='timelapse'
+)
+parser.add_argument(
+    '--output', action='store', default='result.jpg',
+    help="The default is 'result.jpg'",
+    type=str, dest='output'
+)
+
+__doc__ += '\n' + parser.format_help()
+
+if __name__ == '__main__':
+    print(__doc__)
+    args = parser.parse_args()
+    args_dict = vars(args)
+
+    # Extract In- and Output
+    img_names = args_dict.pop("img_names")
+    img_names = [cv.samples.findFile(img_name) for img_name in img_names]
+    output = args_dict.pop("output")
+
+    stitcher = Stitcher(**args_dict)
+    panorama = stitcher.stitch(img_names)
+
+    cv.imwrite(output, panorama)
+
+    zoom_x = 600.0 / panorama.shape[1]
+    preview = cv.resize(panorama, dsize=None, fx=zoom_x, fy=zoom_x)
+
+    cv.imshow(output, preview)
+    cv.waitKey()
+    cv.destroyAllWindows()