'''\r
\r
import numpy as np\r
-import cv2, cv\r
+import cv2\r
import sys\r
\r
print 'This sample shows how to implement a simple hi resolution image navigation.'\r
img = np.zeros((sz, sz), np.uint8)\r
track = np.cumsum(np.random.rand(500000, 2)-0.5, axis=0)\r
track = np.int32(track*10 + (sz/2, sz/2))\r
- cv2.polylines(img, [track], 0, 255, 1, cv.CV_AA)\r
+ cv2.polylines(img, [track], 0, 255, 1, cv2.CV_AA)\r
\r
small = img\r
for i in xrange(3):\r
cv2.imshow('zoom', zoom)\r
\r
cv2.imshow('preview', small)\r
-cv.SetMouseCallback('preview', onmouse, None)\r
+cv2.setMouseCallback('preview', onmouse, None)\r
cv2.waitKey()\r
import numpy as np\r
import cv2, cv\r
import os\r
+from common import splitfn\r
\r
USAGE = '''\r
USAGE: calib.py [--save <filename>] [--debug <output path>] [<image mask>] \r
'''\r
\r
-class Bunch:\r
- def __init__(self, **kwds):\r
- self.__dict__.update(kwds)\r
-\r
-\r
-def splitfn(fn):\r
- path, fn = os.path.split(fn)\r
- name, ext = os.path.splitext(fn)\r
- return path, name, ext\r
\r
\r
if __name__ == '__main__':\r
found, corners = cv2.findChessboardCorners(img, pattern_size)\r
if found:\r
term = ( cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_COUNT, 30, 0.1 )\r
- cv2.cornerSubPix(img, corners, (11, 11), (-1, -1), term)\r
+ cv2.cornerSubPix(img, corners, (5, 5), (-1, -1), term)\r
if debug_dir:\r
vis = cv2.cvtColor(img, cv.CV_GRAY2BGR)\r
cv2.drawChessboardCorners(vis, pattern_size, corners, found)\r
'''\r
\r
import numpy as np\r
-import cv, cv2\r
+import cv2, cv\r
\r
\r
def coherence_filter(img, sigma = 11, str_sigma = 11, blend = 0.5, iter_n = 4):\r
cv2.imshow('dst', dst)\r
\r
cv2.namedWindow('control', 0)\r
- cv.CreateTrackbar('sigma', 'control', 9, 15, nothing)\r
- cv.CreateTrackbar('blend', 'control', 7, 10, nothing)\r
- cv.CreateTrackbar('str_sigma', 'control', 9, 15, nothing)\r
+ cv2.createTrackbar('sigma', 'control', 9, 15, nothing)\r
+ cv2.createTrackbar('blend', 'control', 7, 10, nothing)\r
+ cv2.createTrackbar('str_sigma', 'control', 9, 15, nothing)\r
\r
\r
print 'Press SPACE to update the image\n'\r
--- /dev/null
+import numpy as np\r
+import cv2, cv\r
+from time import clock\r
+import sys\r
+\r
+import video\r
+\r
+hsv_map = np.zeros((180, 256, 3), np.uint8)\r
+h, s = np.indices(hsv_map.shape[:2])\r
+hsv_map[:,:,0] = h\r
+hsv_map[:,:,1] = s\r
+hsv_map[:,:,2] = 255\r
+hsv_map = cv2.cvtColor(hsv_map, cv.CV_HSV2BGR)\r
+cv2.imshow('hsv_map', hsv_map)\r
+\r
+cv2.namedWindow('hist', 0)\r
+hist_scale = 10\r
+def set_scale(val):\r
+ global hist_scale\r
+ hist_scale = val\r
+cv.CreateTrackbar('scale', 'hist', hist_scale, 32, set_scale)\r
+\r
+try: fn = sys.argv[1]\r
+except: fn = 'synth:bg=../cpp/baboon.jpg:class=chess:noise=0.05'\r
+cam = video.create_capture(fn)\r
+\r
+t = clock()\r
+while True:\r
+ flag, frame = cam.read()\r
+ cv2.imshow('camera', frame)\r
+ \r
+ small = cv2.pyrDown(frame)\r
+\r
+ hsv = cv2.cvtColor(small, cv.CV_BGR2HSV)\r
+ dark = hsv[...,2] < 32\r
+ hsv[dark] = 0\r
+ h = cv2.calcHist( [hsv], [0, 1], None, [180, 256], [0, 180, 0, 256] )\r
+\r
+\r
+ h = np.clip(h*0.005*hist_scale, 0, 1)\r
+ vis = hsv_map*h[:,:,np.newaxis] / 255.0\r
+ cv2.imshow('hist', vis)\r
+ \r
+\r
+ t1 = clock()\r
+ #print (t1-t)*1000\r
+ t = t1\r
+\r
+ ch = cv2.waitKey(1)\r
+ if ch == 27:\r
+ break\r
import numpy as np\r
import cv2\r
+import os\r
\r
-def to_list(a):\r
- return [tuple(p) for p in a]\r
+def splitfn(fn):\r
+ path, fn = os.path.split(fn)\r
+ name, ext = os.path.splitext(fn)\r
+ return path, name, ext\r
\r
def anorm2(a):\r
return (a*a).sum(-1)\r
right = np.cross(fwd, up)\r
right /= anorm(right)\r
down = np.cross(fwd, right)\r
- Rt = np.zeros((3, 4))\r
- Rt[:,:3] = [right, down, fwd]\r
- Rt[:,3] = -np.dot(Rt[:,:3], eye)\r
- return Rt\r
+ R = np.float64([right, down, fwd])\r
+ tvec = -np.dot(R, eye)\r
+ return R, tvec\r
\r
def mtx2rvec(R):\r
- pass\r
- \r
-\r
-if __name__ == '__main__':\r
- import cv2\r
- from time import clock\r
-\r
- '''\r
- w, h = 640, 480\r
- while True:\r
- img = np.zeros((h, w, 3), np.uint8)\r
- t = clock()\r
- eye = [5*cos(t), 5*sin(t), 3]\r
- Rt = lookat(eye, [0, 0, 0])\r
- '''\r
-\r
-\r
-\r
- eye = [1, -4, 3]\r
- target = [0, 0, 0]\r
- Rt = lookat(eye, [0, 0, 0])\r
- print Rt\r
- p = [0, 0, 0]\r
- print cv2.transform(np.float64([[p]]), Rt)\r
-\r
- print cv2.SVDecomp(Rt[:,:3])\r
+ w, u, vt = cv2.SVDecomp(R - np.eye(3))\r
+ p = vt[0] + u[:,0]*w[0] # same as np.dot(R, vt[0])\r
+ c = np.dot(vt[0], p)\r
+ s = np.dot(vt[1], p)\r
+ axis = np.cross(vt[0], vt[1])\r
+ return axis * np.arctan2(s, c)\r
\r
import sys\r
\r
try: fn = sys.argv[1]\r
-except: fn = video.presets['lena']\r
+except: fn = video.presets['chess']\r
\r
def nothing(*arg):\r
pass\r
\r
cv2.namedWindow('edge')\r
-cv.CreateTrackbar('thrs1', 'edge', 2000, 5000, nothing)\r
-cv.CreateTrackbar('thrs2', 'edge', 4000, 5000, nothing)\r
+cv2.createTrackbar('thrs1', 'edge', 2000, 5000, nothing)\r
+cv2.createTrackbar('thrs2', 'edge', 4000, 5000, nothing)\r
\r
cap = video.create_capture(fn)\r
while True:\r
import numpy as np\r
import cv2\r
from time import clock\r
+from numpy import pi, sin, cos\r
+import common\r
\r
-def lookat(cam_pos, target_pos, up = ()):\r
- dp = cam_pos - target_pos\r
-\r
-class VideoSynth(object):\r
+class VideoSynthBase(object):\r
def __init__(self, size=None, noise=0.0, bg = None, **params):\r
self.bg = None\r
self.frame_size = (640, 480)\r
if size is not None:\r
w, h = map(int, size.split('x'))\r
self.frame_size = (w, h)\r
- self.bg = cv2.resize(bg, self.frame_size)\r
+ self.bg = cv2.resize(self.bg, self.frame_size)\r
\r
self.noise = float(noise)\r
\r
- w, h = self.frame_size\r
- self.K = np.float64([[1.0/w, 0.0, 0.5*(w-1)], \r
- [ 0.0, 1.0/w, 0.5*(h-1)],\r
- [ 0.0, 0.0, 1.0]])\r
-\r
- def draw_layers(self, dst):\r
+ def render(self, dst):\r
pass\r
\r
-\r
def read(self, dst=None):\r
w, h = self.frame_size\r
\r
else:\r
buf = self.bg.copy()\r
\r
- self.draw_layers(buf)\r
+ self.render(buf)\r
\r
if self.noise > 0.0:\r
noise = np.zeros((h, w, 3), np.int8)\r
buf = cv2.add(buf, noise, dtype=cv2.CV_8UC3)\r
return True, buf\r
\r
+class Chess(VideoSynthBase):\r
+ def __init__(self, **kw):\r
+ super(Chess, self).__init__(**kw)\r
+\r
+ w, h = self.frame_size\r
+\r
+ self.grid_size = sx, sy = 10, 7\r
+ white_quads = []\r
+ black_quads = []\r
+ for i, j in np.ndindex(sy, sx):\r
+ q = [[j, i, 0], [j+1, i, 0], [j+1, i+1, 0], [j, i+1, 0]]\r
+ [white_quads, black_quads][(i + j) % 2].append(q)\r
+ self.white_quads = np.float32(white_quads)\r
+ self.black_quads = np.float32(black_quads)\r
+\r
+ fx = 0.9\r
+ self.K = np.float64([[fx*w, 0, 0.5*(w-1)],\r
+ [0, fx*w, 0.5*(h-1)],\r
+ [0.0,0.0, 1.0]])\r
+\r
+ self.dist_coef = np.float64([-0.2, 0.1, 0, 0])\r
+\r
+ def draw_quads(self, img, quads, color = (0, 255, 0)):\r
+ img_quads = cv2.projectPoints(quads.reshape(-1, 3), self.rvec, self.tvec, self.K, self.dist_coef) [0]\r
+ img_quads.shape = quads.shape[:2] + (2,) \r
+ for q in img_quads:\r
+ cv2.fillConvexPoly(img, np.int32(q*4), color, cv2.CV_AA, shift=2)\r
+\r
+ def render(self, dst):\r
+ t = clock()\r
+ \r
+ sx, sy = self.grid_size\r
+ center = np.array([0.5*sx, 0.5*sy, 0.0])\r
+ phi = pi/3 + sin(t*3)*pi/8\r
+ c, s = cos(phi), sin(phi)\r
+ ofs = np.array([sin(1.2*t), cos(1.8*t), 0]) * sx * 0.2\r
+ eye_pos = center + np.array([cos(t)*c, sin(t)*c, s]) * 15.0 + ofs\r
+ target_pos = center + ofs\r
+\r
+ R, self.tvec = common.lookat(eye_pos, target_pos)\r
+ self.rvec = common.mtx2rvec(R)\r
+\r
+ self.draw_quads(dst, self.white_quads, (245, 245, 245))\r
+ self.draw_quads(dst, self.black_quads, (10, 10, 10))\r
+\r
+\r
+\r
+classes = dict(chess=Chess)\r
\r
def create_capture(source):\r
'''\r
if source.startswith('synth'):\r
ss = filter(None, source.split(':'))\r
params = dict( s.split('=') for s in ss[1:] )\r
- return VideoSynth(**params)\r
+ try: Class = classes[params['class']]\r
+ except: Class = VideoSynthBase\r
+\r
+ return Class(**params)\r
return cv2.VideoCapture(source)\r
\r
\r
presets = dict(\r
empty = 'synth:',\r
- lena = 'synth:bg=../cpp/lena.jpg:noise=0.1'\r
+ lena = 'synth:bg=../cpp/lena.jpg:noise=0.1',\r
+ chess = 'synth:class=chess:bg=../cpp/lena.jpg:noise=0.1:size=640x480'\r
)\r
\r
if __name__ == '__main__':\r
args = dict(args)\r
shotdir = args.get('--shotdir', '.')\r
if len(sources) == 0:\r
- sources = [ presets['lena'] ]\r
+ sources = [ presets['chess'] ]\r
\r
print 'Press SPACE to save current frame'\r
\r