From: Alexander Mordvintsev Date: Wed, 15 Jun 2011 13:58:40 +0000 (+0000) Subject: use cv2 function X-Git-Tag: accepted/tizen/6.0/unified/20201030.111113~6978 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=1a208fe13272a1744c26b0864b4e892ba6ef6f13;p=platform%2Fupstream%2Fopencv.git use cv2 function added color_histogram.py sample work on VideoSynth (chessboard) --- diff --git a/samples/python2/browse.py b/samples/python2/browse.py index 579b0e8..d75b85b 100644 --- a/samples/python2/browse.py +++ b/samples/python2/browse.py @@ -3,7 +3,7 @@ browse.py shows how to implement a simple hi resolution image navigation ''' import numpy as np -import cv2, cv +import cv2 import sys print 'This sample shows how to implement a simple hi resolution image navigation.' @@ -20,7 +20,7 @@ else: img = np.zeros((sz, sz), np.uint8) track = np.cumsum(np.random.rand(500000, 2)-0.5, axis=0) track = np.int32(track*10 + (sz/2, sz/2)) - cv2.polylines(img, [track], 0, 255, 1, cv.CV_AA) + cv2.polylines(img, [track], 0, 255, 1, cv2.CV_AA) small = img for i in xrange(3): @@ -34,5 +34,5 @@ def onmouse(event, x, y, flags, param): cv2.imshow('zoom', zoom) cv2.imshow('preview', small) -cv.SetMouseCallback('preview', onmouse, None) +cv2.setMouseCallback('preview', onmouse, None) cv2.waitKey() diff --git a/samples/python2/calibrate.py b/samples/python2/calibrate.py index cc35535..485ec3a 100644 --- a/samples/python2/calibrate.py +++ b/samples/python2/calibrate.py @@ -1,20 +1,12 @@ import numpy as np import cv2, cv import os +from common import splitfn USAGE = ''' USAGE: calib.py [--save ] [--debug ] [] ''' -class Bunch: - def __init__(self, **kwds): - self.__dict__.update(kwds) - - -def splitfn(fn): - path, fn = os.path.split(fn) - name, ext = os.path.splitext(fn) - return path, name, ext if __name__ == '__main__': @@ -42,7 +34,7 @@ if __name__ == '__main__': found, corners = cv2.findChessboardCorners(img, pattern_size) if found: term = ( cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_COUNT, 30, 0.1 ) - cv2.cornerSubPix(img, corners, (11, 11), (-1, -1), term) + cv2.cornerSubPix(img, corners, (5, 5), (-1, -1), term) if debug_dir: vis = cv2.cvtColor(img, cv.CV_GRAY2BGR) cv2.drawChessboardCorners(vis, pattern_size, corners, found) diff --git a/samples/python2/coherence.py b/samples/python2/coherence.py index 345444d..66fc704 100644 --- a/samples/python2/coherence.py +++ b/samples/python2/coherence.py @@ -7,7 +7,7 @@ ''' import numpy as np -import cv, cv2 +import cv2, cv def coherence_filter(img, sigma = 11, str_sigma = 11, blend = 0.5, iter_n = 4): @@ -55,9 +55,9 @@ if __name__ == '__main__': cv2.imshow('dst', dst) cv2.namedWindow('control', 0) - cv.CreateTrackbar('sigma', 'control', 9, 15, nothing) - cv.CreateTrackbar('blend', 'control', 7, 10, nothing) - cv.CreateTrackbar('str_sigma', 'control', 9, 15, nothing) + cv2.createTrackbar('sigma', 'control', 9, 15, nothing) + cv2.createTrackbar('blend', 'control', 7, 10, nothing) + cv2.createTrackbar('str_sigma', 'control', 9, 15, nothing) print 'Press SPACE to update the image\n' diff --git a/samples/python2/color_histogram.py b/samples/python2/color_histogram.py new file mode 100644 index 0000000..5e0b0ee --- /dev/null +++ b/samples/python2/color_histogram.py @@ -0,0 +1,51 @@ +import numpy as np +import cv2, cv +from time import clock +import sys + +import video + +hsv_map = np.zeros((180, 256, 3), np.uint8) +h, s = np.indices(hsv_map.shape[:2]) +hsv_map[:,:,0] = h +hsv_map[:,:,1] = s +hsv_map[:,:,2] = 255 +hsv_map = cv2.cvtColor(hsv_map, cv.CV_HSV2BGR) +cv2.imshow('hsv_map', hsv_map) + +cv2.namedWindow('hist', 0) +hist_scale = 10 +def set_scale(val): + global hist_scale + hist_scale = val +cv.CreateTrackbar('scale', 'hist', hist_scale, 32, set_scale) + +try: fn = sys.argv[1] +except: fn = 'synth:bg=../cpp/baboon.jpg:class=chess:noise=0.05' +cam = video.create_capture(fn) + +t = clock() +while True: + flag, frame = cam.read() + cv2.imshow('camera', frame) + + small = cv2.pyrDown(frame) + + hsv = cv2.cvtColor(small, cv.CV_BGR2HSV) + dark = hsv[...,2] < 32 + hsv[dark] = 0 + h = cv2.calcHist( [hsv], [0, 1], None, [180, 256], [0, 180, 0, 256] ) + + + h = np.clip(h*0.005*hist_scale, 0, 1) + vis = hsv_map*h[:,:,np.newaxis] / 255.0 + cv2.imshow('hist', vis) + + + t1 = clock() + #print (t1-t)*1000 + t = t1 + + ch = cv2.waitKey(1) + if ch == 27: + break diff --git a/samples/python2/common.py b/samples/python2/common.py index 27e1b34..9c52288 100644 --- a/samples/python2/common.py +++ b/samples/python2/common.py @@ -1,8 +1,11 @@ import numpy as np import cv2 +import os -def to_list(a): - return [tuple(p) for p in a] +def splitfn(fn): + path, fn = os.path.split(fn) + name, ext = os.path.splitext(fn) + return path, name, ext def anorm2(a): return (a*a).sum(-1) @@ -37,36 +40,15 @@ def lookat(eye, target, up = (0, 0, 1)): right = np.cross(fwd, up) right /= anorm(right) down = np.cross(fwd, right) - Rt = np.zeros((3, 4)) - Rt[:,:3] = [right, down, fwd] - Rt[:,3] = -np.dot(Rt[:,:3], eye) - return Rt + R = np.float64([right, down, fwd]) + tvec = -np.dot(R, eye) + return R, tvec def mtx2rvec(R): - pass - - -if __name__ == '__main__': - import cv2 - from time import clock - - ''' - w, h = 640, 480 - while True: - img = np.zeros((h, w, 3), np.uint8) - t = clock() - eye = [5*cos(t), 5*sin(t), 3] - Rt = lookat(eye, [0, 0, 0]) - ''' - - - - eye = [1, -4, 3] - target = [0, 0, 0] - Rt = lookat(eye, [0, 0, 0]) - print Rt - p = [0, 0, 0] - print cv2.transform(np.float64([[p]]), Rt) - - print cv2.SVDecomp(Rt[:,:3]) + w, u, vt = cv2.SVDecomp(R - np.eye(3)) + p = vt[0] + u[:,0]*w[0] # same as np.dot(R, vt[0]) + c = np.dot(vt[0], p) + s = np.dot(vt[1], p) + axis = np.cross(vt[0], vt[1]) + return axis * np.arctan2(s, c) diff --git a/samples/python2/edge.py b/samples/python2/edge.py index a55c51d..c2482ae 100644 --- a/samples/python2/edge.py +++ b/samples/python2/edge.py @@ -3,14 +3,14 @@ import video import sys try: fn = sys.argv[1] -except: fn = video.presets['lena'] +except: fn = video.presets['chess'] def nothing(*arg): pass cv2.namedWindow('edge') -cv.CreateTrackbar('thrs1', 'edge', 2000, 5000, nothing) -cv.CreateTrackbar('thrs2', 'edge', 4000, 5000, nothing) +cv2.createTrackbar('thrs1', 'edge', 2000, 5000, nothing) +cv2.createTrackbar('thrs2', 'edge', 4000, 5000, nothing) cap = video.create_capture(fn) while True: diff --git a/samples/python2/video.py b/samples/python2/video.py index a87d977..12bc931 100644 --- a/samples/python2/video.py +++ b/samples/python2/video.py @@ -1,11 +1,10 @@ import numpy as np import cv2 from time import clock +from numpy import pi, sin, cos +import common -def lookat(cam_pos, target_pos, up = ()): - dp = cam_pos - target_pos - -class VideoSynth(object): +class VideoSynthBase(object): def __init__(self, size=None, noise=0.0, bg = None, **params): self.bg = None self.frame_size = (640, 480) @@ -17,19 +16,13 @@ class VideoSynth(object): if size is not None: w, h = map(int, size.split('x')) self.frame_size = (w, h) - self.bg = cv2.resize(bg, self.frame_size) + self.bg = cv2.resize(self.bg, self.frame_size) self.noise = float(noise) - w, h = self.frame_size - self.K = np.float64([[1.0/w, 0.0, 0.5*(w-1)], - [ 0.0, 1.0/w, 0.5*(h-1)], - [ 0.0, 0.0, 1.0]]) - - def draw_layers(self, dst): + def render(self, dst): pass - def read(self, dst=None): w, h = self.frame_size @@ -38,7 +31,7 @@ class VideoSynth(object): else: buf = self.bg.copy() - self.draw_layers(buf) + self.render(buf) if self.noise > 0.0: noise = np.zeros((h, w, 3), np.int8) @@ -46,6 +39,54 @@ class VideoSynth(object): buf = cv2.add(buf, noise, dtype=cv2.CV_8UC3) return True, buf +class Chess(VideoSynthBase): + def __init__(self, **kw): + super(Chess, self).__init__(**kw) + + w, h = self.frame_size + + self.grid_size = sx, sy = 10, 7 + white_quads = [] + black_quads = [] + for i, j in np.ndindex(sy, sx): + q = [[j, i, 0], [j+1, i, 0], [j+1, i+1, 0], [j, i+1, 0]] + [white_quads, black_quads][(i + j) % 2].append(q) + self.white_quads = np.float32(white_quads) + self.black_quads = np.float32(black_quads) + + fx = 0.9 + self.K = np.float64([[fx*w, 0, 0.5*(w-1)], + [0, fx*w, 0.5*(h-1)], + [0.0,0.0, 1.0]]) + + self.dist_coef = np.float64([-0.2, 0.1, 0, 0]) + + def draw_quads(self, img, quads, color = (0, 255, 0)): + img_quads = cv2.projectPoints(quads.reshape(-1, 3), self.rvec, self.tvec, self.K, self.dist_coef) [0] + img_quads.shape = quads.shape[:2] + (2,) + for q in img_quads: + cv2.fillConvexPoly(img, np.int32(q*4), color, cv2.CV_AA, shift=2) + + def render(self, dst): + t = clock() + + sx, sy = self.grid_size + center = np.array([0.5*sx, 0.5*sy, 0.0]) + phi = pi/3 + sin(t*3)*pi/8 + c, s = cos(phi), sin(phi) + ofs = np.array([sin(1.2*t), cos(1.8*t), 0]) * sx * 0.2 + eye_pos = center + np.array([cos(t)*c, sin(t)*c, s]) * 15.0 + ofs + target_pos = center + ofs + + R, self.tvec = common.lookat(eye_pos, target_pos) + self.rvec = common.mtx2rvec(R) + + self.draw_quads(dst, self.white_quads, (245, 245, 245)) + self.draw_quads(dst, self.black_quads, (10, 10, 10)) + + + +classes = dict(chess=Chess) def create_capture(source): ''' @@ -59,13 +100,17 @@ def create_capture(source): if source.startswith('synth'): ss = filter(None, source.split(':')) params = dict( s.split('=') for s in ss[1:] ) - return VideoSynth(**params) + try: Class = classes[params['class']] + except: Class = VideoSynthBase + + return Class(**params) return cv2.VideoCapture(source) presets = dict( empty = 'synth:', - lena = 'synth:bg=../cpp/lena.jpg:noise=0.1' + lena = 'synth:bg=../cpp/lena.jpg:noise=0.1', + chess = 'synth:class=chess:bg=../cpp/lena.jpg:noise=0.1:size=640x480' ) if __name__ == '__main__': @@ -80,7 +125,7 @@ if __name__ == '__main__': args = dict(args) shotdir = args.get('--shotdir', '.') if len(sources) == 0: - sources = [ presets['lena'] ] + sources = [ presets['chess'] ] print 'Press SPACE to save current frame'