2 * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
13 #include "vpx_mem/vpx_mem.h"
14 #include "vpx_ports/config.h"
18 #include "vp8/common/findnearmv.h"
21 static int mv_ref_ct [31] [4] [2];
22 static int mv_mode_cts [4] [2];
25 int vp8_mv_bit_cost(int_mv *mv, int_mv *ref, int *mvcost[2], int Weight)
27 // MV costing is based on the distribution of vectors in the previous frame and as such will tend to
28 // over state the cost of vectors. In addition coding a new vector can have a knock on effect on the
29 // cost of subsequent vectors and the quality of prediction from NEAR and NEAREST for subsequent blocks.
30 // The "Weight" parameter allows, to a limited extent, for some account to be taken of these factors.
31 return ((mvcost[0][(mv->as_mv.row - ref->as_mv.row) >> 1] + mvcost[1][(mv->as_mv.col - ref->as_mv.col) >> 1]) * Weight) >> 7;
34 static int mv_err_cost(int_mv *mv, int_mv *ref, int *mvcost[2], int error_per_bit)
36 return ((mvcost[0][(mv->as_mv.row - ref->as_mv.row) >> 1] +
37 mvcost[1][(mv->as_mv.col - ref->as_mv.col) >> 1])
38 * error_per_bit + 128) >> 8;
41 static int mvsad_err_cost(int_mv *mv, int_mv *ref, int *mvsadcost[2], int error_per_bit)
43 /* Calculate sad error cost on full pixel basis. */
44 return ((mvsadcost[0][(mv->as_mv.row - ref->as_mv.row)] +
45 mvsadcost[1][(mv->as_mv.col - ref->as_mv.col)])
46 * error_per_bit + 128) >> 8;
49 void vp8_init_dsmotion_compensation(MACROBLOCK *x, int stride)
52 int search_site_count = 0;
55 // Generate offsets for 4 search sites per step.
57 x->ss[search_site_count].mv.col = 0;
58 x->ss[search_site_count].mv.row = 0;
59 x->ss[search_site_count].offset = 0;
65 // Compute offsets for search sites.
66 x->ss[search_site_count].mv.col = 0;
67 x->ss[search_site_count].mv.row = -Len;
68 x->ss[search_site_count].offset = -Len * stride;
71 // Compute offsets for search sites.
72 x->ss[search_site_count].mv.col = 0;
73 x->ss[search_site_count].mv.row = Len;
74 x->ss[search_site_count].offset = Len * stride;
77 // Compute offsets for search sites.
78 x->ss[search_site_count].mv.col = -Len;
79 x->ss[search_site_count].mv.row = 0;
80 x->ss[search_site_count].offset = -Len;
83 // Compute offsets for search sites.
84 x->ss[search_site_count].mv.col = Len;
85 x->ss[search_site_count].mv.row = 0;
86 x->ss[search_site_count].offset = Len;
93 x->ss_count = search_site_count;
94 x->searches_per_step = 4;
97 void vp8_init3smotion_compensation(MACROBLOCK *x, int stride)
100 int search_site_count = 0;
102 // Generate offsets for 8 search sites per step.
103 Len = MAX_FIRST_STEP;
104 x->ss[search_site_count].mv.col = 0;
105 x->ss[search_site_count].mv.row = 0;
106 x->ss[search_site_count].offset = 0;
112 // Compute offsets for search sites.
113 x->ss[search_site_count].mv.col = 0;
114 x->ss[search_site_count].mv.row = -Len;
115 x->ss[search_site_count].offset = -Len * stride;
118 // Compute offsets for search sites.
119 x->ss[search_site_count].mv.col = 0;
120 x->ss[search_site_count].mv.row = Len;
121 x->ss[search_site_count].offset = Len * stride;
124 // Compute offsets for search sites.
125 x->ss[search_site_count].mv.col = -Len;
126 x->ss[search_site_count].mv.row = 0;
127 x->ss[search_site_count].offset = -Len;
130 // Compute offsets for search sites.
131 x->ss[search_site_count].mv.col = Len;
132 x->ss[search_site_count].mv.row = 0;
133 x->ss[search_site_count].offset = Len;
136 // Compute offsets for search sites.
137 x->ss[search_site_count].mv.col = -Len;
138 x->ss[search_site_count].mv.row = -Len;
139 x->ss[search_site_count].offset = -Len * stride - Len;
142 // Compute offsets for search sites.
143 x->ss[search_site_count].mv.col = Len;
144 x->ss[search_site_count].mv.row = -Len;
145 x->ss[search_site_count].offset = -Len * stride + Len;
148 // Compute offsets for search sites.
149 x->ss[search_site_count].mv.col = -Len;
150 x->ss[search_site_count].mv.row = Len;
151 x->ss[search_site_count].offset = Len * stride - Len;
154 // Compute offsets for search sites.
155 x->ss[search_site_count].mv.col = Len;
156 x->ss[search_site_count].mv.row = Len;
157 x->ss[search_site_count].offset = Len * stride + Len;
165 x->ss_count = search_site_count;
166 x->searches_per_step = 8;
170 * To avoid the penalty for crossing cache-line read, preload the reference
171 * area in a small buffer, which is aligned to make sure there won't be crossing
172 * cache-line read while reading from this buffer. This reduced the cpu
173 * cycles spent on reading ref data in sub-pixel filter functions.
174 * TODO: Currently, since sub-pixel search range here is -3 ~ 3, copy 22 rows x
175 * 32 cols area that is enough for 16x16 macroblock. Later, for SPLITMV, we
176 * could reduce the area.
178 #define MVC(r,c) (((mvcost[0][(r)-rr] + mvcost[1][(c) - rc]) * error_per_bit + 128 )>>8 ) // estimated cost of a motion vector (r,c)
179 #define PRE(r,c) (y + (((r)>>2) * y_stride + ((c)>>2) -(offset))) // pointer to predictor base of a motionvector
180 #define SP(x) (((x)&3)<<1) // convert motion vector component to offset for svf calc
181 #define DIST(r,c) vfp->svf( PRE(r,c), y_stride, SP(c),SP(r), z,b->src_stride,&sse) // returns subpixel variance error function.
182 #define IFMVCV(r,c,s,e) if ( c >= minc && c <= maxc && r >= minr && r <= maxr) s else e;
183 #define ERR(r,c) (MVC(r,c)+DIST(r,c)) // returns distortion + motion vector cost
184 #define CHECK_BETTER(v,r,c) IFMVCV(r,c,{thismse = DIST(r,c); if((v = (MVC(r,c)+thismse)) < besterr) { besterr = v; br=r; bc=c; *distortion = thismse; *sse1 = sse; }}, v=INT_MAX;)// checks if (r,c) has better score than previous best
185 #define MIN(x,y) (((x)<(y))?(x):(y))
186 #define MAX(x,y) (((x)>(y))?(x):(y))
188 int vp8_find_best_sub_pixel_step_iteratively(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
189 int_mv *bestmv, int_mv *ref_mv,
191 const vp8_variance_fn_ptr_t *vfp,
192 int *mvcost[2], int *distortion,
195 unsigned char *z = (*(b->base_src) + b->src);
197 int rr = ref_mv->as_mv.row >> 1, rc = ref_mv->as_mv.col >> 1;
198 int br = bestmv->as_mv.row << 2, bc = bestmv->as_mv.col << 2;
199 int tr = br, tc = bc;
200 unsigned int besterr = INT_MAX;
201 unsigned int left, right, up, down, diag;
203 unsigned int whichdir;
204 unsigned int halfiters = 4;
205 unsigned int quarteriters = 4;
208 int minc = MAX(x->mv_col_min << 2, (ref_mv->as_mv.col >> 1) - ((1 << mvlong_width) - 1));
209 int maxc = MIN(x->mv_col_max << 2, (ref_mv->as_mv.col >> 1) + ((1 << mvlong_width) - 1));
210 int minr = MAX(x->mv_row_min << 2, (ref_mv->as_mv.row >> 1) - ((1 << mvlong_width) - 1));
211 int maxr = MIN(x->mv_row_max << 2, (ref_mv->as_mv.row >> 1) + ((1 << mvlong_width) - 1));
216 #if ARCH_X86 || ARCH_X86_64
217 MACROBLOCKD *xd = &x->e_mbd;
218 unsigned char *y0 = *(d->base_pre) + d->pre + (bestmv->as_mv.row) * d->pre_stride + bestmv->as_mv.col;
220 int buf_r1, buf_r2, buf_c1, buf_c2;
222 // Clamping to avoid out-of-range data access
223 buf_r1 = ((bestmv->as_mv.row - 3) < x->mv_row_min)?(bestmv->as_mv.row - x->mv_row_min):3;
224 buf_r2 = ((bestmv->as_mv.row + 3) > x->mv_row_max)?(x->mv_row_max - bestmv->as_mv.row):3;
225 buf_c1 = ((bestmv->as_mv.col - 3) < x->mv_col_min)?(bestmv->as_mv.col - x->mv_col_min):3;
226 buf_c2 = ((bestmv->as_mv.col + 3) > x->mv_col_max)?(x->mv_col_max - bestmv->as_mv.col):3;
229 /* Copy to intermediate buffer before searching. */
230 vfp->copymem(y0 - buf_c1 - d->pre_stride*buf_r1, d->pre_stride, xd->y_buf, y_stride, 16+buf_r1+buf_r2);
231 y = xd->y_buf + y_stride*buf_r1 +buf_c1;
233 unsigned char *y = *(d->base_pre) + d->pre + (bestmv->as_mv.row) * d->pre_stride + bestmv->as_mv.col;
234 y_stride = d->pre_stride;
237 offset = (bestmv->as_mv.row) * y_stride + bestmv->as_mv.col;
240 bestmv->as_mv.row <<= 3;
241 bestmv->as_mv.col <<= 3;
243 // calculate central point error
244 besterr = vfp->vf(y, y_stride, z, b->src_stride, sse1);
245 *distortion = besterr;
246 besterr += mv_err_cost(bestmv, ref_mv, mvcost, error_per_bit);
248 // TODO: Each subsequent iteration checks at least one point in common with the last iteration could be 2 ( if diag selected)
252 CHECK_BETTER(left, tr, tc - 2);
253 CHECK_BETTER(right, tr, tc + 2);
254 CHECK_BETTER(up, tr - 2, tc);
255 CHECK_BETTER(down, tr + 2, tc);
257 whichdir = (left < right ? 0 : 1) + (up < down ? 0 : 2);
262 CHECK_BETTER(diag, tr - 2, tc - 2);
265 CHECK_BETTER(diag, tr - 2, tc + 2);
268 CHECK_BETTER(diag, tr + 2, tc - 2);
271 CHECK_BETTER(diag, tr + 2, tc + 2);
275 // no reason to check the same one again.
276 if (tr == br && tc == bc)
283 // TODO: Each subsequent iteration checks at least one point in common with the last iteration could be 2 ( if diag selected)
285 while (--quarteriters)
287 CHECK_BETTER(left, tr, tc - 1);
288 CHECK_BETTER(right, tr, tc + 1);
289 CHECK_BETTER(up, tr - 1, tc);
290 CHECK_BETTER(down, tr + 1, tc);
292 whichdir = (left < right ? 0 : 1) + (up < down ? 0 : 2);
297 CHECK_BETTER(diag, tr - 1, tc - 1);
300 CHECK_BETTER(diag, tr - 1, tc + 1);
303 CHECK_BETTER(diag, tr + 1, tc - 1);
306 CHECK_BETTER(diag, tr + 1, tc + 1);
310 // no reason to check the same one again.
311 if (tr == br && tc == bc)
318 bestmv->as_mv.row = br << 1;
319 bestmv->as_mv.col = bc << 1;
321 if ((abs(bestmv->as_mv.col - ref_mv->as_mv.col) > (MAX_FULL_PEL_VAL<<3)) ||
322 (abs(bestmv->as_mv.row - ref_mv->as_mv.row) > (MAX_FULL_PEL_VAL<<3)))
336 int vp8_find_best_sub_pixel_step(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
337 int_mv *bestmv, int_mv *ref_mv,
339 const vp8_variance_fn_ptr_t *vfp,
340 int *mvcost[2], int *distortion,
343 int bestmse = INT_MAX;
346 unsigned char *z = (*(b->base_src) + b->src);
347 int left, right, up, down, diag;
353 #if ARCH_X86 || ARCH_X86_64
354 MACROBLOCKD *xd = &x->e_mbd;
355 unsigned char *y0 = *(d->base_pre) + d->pre + (bestmv->as_mv.row) * d->pre_stride + bestmv->as_mv.col;
359 /* Copy 18 rows x 32 cols area to intermediate buffer before searching. */
360 vfp->copymem(y0 - 1 - d->pre_stride, d->pre_stride, xd->y_buf, y_stride, 18);
361 y = xd->y_buf + y_stride + 1;
363 unsigned char *y = *(d->base_pre) + d->pre + (bestmv->as_mv.row) * d->pre_stride + bestmv->as_mv.col;
364 y_stride = d->pre_stride;
368 bestmv->as_mv.row <<= 3;
369 bestmv->as_mv.col <<= 3;
372 // calculate central point error
373 bestmse = vfp->vf(y, y_stride, z, b->src_stride, sse1);
374 *distortion = bestmse;
375 bestmse += mv_err_cost(bestmv, ref_mv, mvcost, error_per_bit);
377 // go left then right and check error
378 this_mv.as_mv.row = startmv.as_mv.row;
379 this_mv.as_mv.col = ((startmv.as_mv.col - 8) | 4);
380 thismse = vfp->svf_halfpix_h(y - 1, y_stride, z, b->src_stride, &sse);
381 left = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
387 *distortion = thismse;
391 this_mv.as_mv.col += 8;
392 thismse = vfp->svf_halfpix_h(y, y_stride, z, b->src_stride, &sse);
393 right = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
399 *distortion = thismse;
403 // go up then down and check error
404 this_mv.as_mv.col = startmv.as_mv.col;
405 this_mv.as_mv.row = ((startmv.as_mv.row - 8) | 4);
406 thismse = vfp->svf_halfpix_v(y - y_stride, y_stride, z, b->src_stride, &sse);
407 up = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
413 *distortion = thismse;
417 this_mv.as_mv.row += 8;
418 thismse = vfp->svf_halfpix_v(y, y_stride, z, b->src_stride, &sse);
419 down = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
425 *distortion = thismse;
430 // now check 1 more diagonal
431 whichdir = (left < right ? 0 : 1) + (up < down ? 0 : 2);
432 //for(whichdir =0;whichdir<4;whichdir++)
439 this_mv.as_mv.col = (this_mv.as_mv.col - 8) | 4;
440 this_mv.as_mv.row = (this_mv.as_mv.row - 8) | 4;
441 thismse = vfp->svf_halfpix_hv(y - 1 - y_stride, y_stride, z, b->src_stride, &sse);
444 this_mv.as_mv.col += 4;
445 this_mv.as_mv.row = (this_mv.as_mv.row - 8) | 4;
446 thismse = vfp->svf_halfpix_hv(y - y_stride, y_stride, z, b->src_stride, &sse);
449 this_mv.as_mv.col = (this_mv.as_mv.col - 8) | 4;
450 this_mv.as_mv.row += 4;
451 thismse = vfp->svf_halfpix_hv(y - 1, y_stride, z, b->src_stride, &sse);
455 this_mv.as_mv.col += 4;
456 this_mv.as_mv.row += 4;
457 thismse = vfp->svf_halfpix_hv(y, y_stride, z, b->src_stride, &sse);
461 diag = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
467 *distortion = thismse;
474 // time to check quarter pels.
475 if (bestmv->as_mv.row < startmv.as_mv.row)
478 if (bestmv->as_mv.col < startmv.as_mv.col)
485 // go left then right and check error
486 this_mv.as_mv.row = startmv.as_mv.row;
488 if (startmv.as_mv.col & 7)
490 this_mv.as_mv.col = startmv.as_mv.col - 2;
491 thismse = vfp->svf(y, y_stride, this_mv.as_mv.col & 7, this_mv.as_mv.row & 7, z, b->src_stride, &sse);
495 this_mv.as_mv.col = (startmv.as_mv.col - 8) | 6;
496 thismse = vfp->svf(y - 1, y_stride, 6, this_mv.as_mv.row & 7, z, b->src_stride, &sse);
499 left = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
505 *distortion = thismse;
509 this_mv.as_mv.col += 4;
510 thismse = vfp->svf(y, y_stride, this_mv.as_mv.col & 7, this_mv.as_mv.row & 7, z, b->src_stride, &sse);
511 right = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
517 *distortion = thismse;
521 // go up then down and check error
522 this_mv.as_mv.col = startmv.as_mv.col;
524 if (startmv.as_mv.row & 7)
526 this_mv.as_mv.row = startmv.as_mv.row - 2;
527 thismse = vfp->svf(y, y_stride, this_mv.as_mv.col & 7, this_mv.as_mv.row & 7, z, b->src_stride, &sse);
531 this_mv.as_mv.row = (startmv.as_mv.row - 8) | 6;
532 thismse = vfp->svf(y - y_stride, y_stride, this_mv.as_mv.col & 7, 6, z, b->src_stride, &sse);
535 up = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
541 *distortion = thismse;
545 this_mv.as_mv.row += 4;
546 thismse = vfp->svf(y, y_stride, this_mv.as_mv.col & 7, this_mv.as_mv.row & 7, z, b->src_stride, &sse);
547 down = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
553 *distortion = thismse;
558 // now check 1 more diagonal
559 whichdir = (left < right ? 0 : 1) + (up < down ? 0 : 2);
561 // for(whichdir=0;whichdir<4;whichdir++)
569 if (startmv.as_mv.row & 7)
571 this_mv.as_mv.row -= 2;
573 if (startmv.as_mv.col & 7)
575 this_mv.as_mv.col -= 2;
576 thismse = vfp->svf(y, y_stride, this_mv.as_mv.col & 7, this_mv.as_mv.row & 7, z, b->src_stride, &sse);
580 this_mv.as_mv.col = (startmv.as_mv.col - 8) | 6;
581 thismse = vfp->svf(y - 1, y_stride, 6, this_mv.as_mv.row & 7, z, b->src_stride, &sse);;
586 this_mv.as_mv.row = (startmv.as_mv.row - 8) | 6;
588 if (startmv.as_mv.col & 7)
590 this_mv.as_mv.col -= 2;
591 thismse = vfp->svf(y - y_stride, y_stride, this_mv.as_mv.col & 7, 6, z, b->src_stride, &sse);
595 this_mv.as_mv.col = (startmv.as_mv.col - 8) | 6;
596 thismse = vfp->svf(y - y_stride - 1, y_stride, 6, 6, z, b->src_stride, &sse);
602 this_mv.as_mv.col += 2;
604 if (startmv.as_mv.row & 7)
606 this_mv.as_mv.row -= 2;
607 thismse = vfp->svf(y, y_stride, this_mv.as_mv.col & 7, this_mv.as_mv.row & 7, z, b->src_stride, &sse);
611 this_mv.as_mv.row = (startmv.as_mv.row - 8) | 6;
612 thismse = vfp->svf(y - y_stride, y_stride, this_mv.as_mv.col & 7, 6, z, b->src_stride, &sse);
617 this_mv.as_mv.row += 2;
619 if (startmv.as_mv.col & 7)
621 this_mv.as_mv.col -= 2;
622 thismse = vfp->svf(y, y_stride, this_mv.as_mv.col & 7, this_mv.as_mv.row & 7, z, b->src_stride, &sse);
626 this_mv.as_mv.col = (startmv.as_mv.col - 8) | 6;
627 thismse = vfp->svf(y - 1, y_stride, 6, this_mv.as_mv.row & 7, z, b->src_stride, &sse);
632 this_mv.as_mv.col += 2;
633 this_mv.as_mv.row += 2;
634 thismse = vfp->svf(y, y_stride, this_mv.as_mv.col & 7, this_mv.as_mv.row & 7, z, b->src_stride, &sse);
638 diag = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
644 *distortion = thismse;
651 int vp8_find_best_half_pixel_step(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
652 int_mv *bestmv, int_mv *ref_mv,
654 const vp8_variance_fn_ptr_t *vfp,
655 int *mvcost[2], int *distortion,
658 int bestmse = INT_MAX;
661 unsigned char *z = (*(b->base_src) + b->src);
662 int left, right, up, down, diag;
667 #if ARCH_X86 || ARCH_X86_64
668 MACROBLOCKD *xd = &x->e_mbd;
669 unsigned char *y0 = *(d->base_pre) + d->pre + (bestmv->as_mv.row) * d->pre_stride + bestmv->as_mv.col;
673 /* Copy 18 rows x 32 cols area to intermediate buffer before searching. */
674 vfp->copymem(y0 - 1 - d->pre_stride, d->pre_stride, xd->y_buf, y_stride, 18);
675 y = xd->y_buf + y_stride + 1;
677 unsigned char *y = *(d->base_pre) + d->pre + (bestmv->as_mv.row) * d->pre_stride + bestmv->as_mv.col;
678 y_stride = d->pre_stride;
682 bestmv->as_mv.row <<= 3;
683 bestmv->as_mv.col <<= 3;
686 // calculate central point error
687 bestmse = vfp->vf(y, y_stride, z, b->src_stride, sse1);
688 *distortion = bestmse;
689 bestmse += mv_err_cost(bestmv, ref_mv, mvcost, error_per_bit);
691 // go left then right and check error
692 this_mv.as_mv.row = startmv.as_mv.row;
693 this_mv.as_mv.col = ((startmv.as_mv.col - 8) | 4);
694 thismse = vfp->svf_halfpix_h(y - 1, y_stride, z, b->src_stride, &sse);
695 left = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
701 *distortion = thismse;
705 this_mv.as_mv.col += 8;
706 thismse = vfp->svf_halfpix_h(y, y_stride, z, b->src_stride, &sse);
707 right = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
713 *distortion = thismse;
717 // go up then down and check error
718 this_mv.as_mv.col = startmv.as_mv.col;
719 this_mv.as_mv.row = ((startmv.as_mv.row - 8) | 4);
720 thismse = vfp->svf_halfpix_v(y - y_stride, y_stride, z, b->src_stride, &sse);
721 up = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
727 *distortion = thismse;
731 this_mv.as_mv.row += 8;
732 thismse = vfp->svf_halfpix_v(y, y_stride, z, b->src_stride, &sse);
733 down = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
739 *distortion = thismse;
743 // somewhat strangely not doing all the diagonals for half pel is slower than doing them.
745 // now check 1 more diagonal -
746 whichdir = (left < right ? 0 : 1) + (up < down ? 0 : 2);
752 this_mv.col = (this_mv.col - 8) | 4;
753 this_mv.row = (this_mv.row - 8) | 4;
754 diag = vfp->svf(y - 1 - y_stride, y_stride, 4, 4, z, b->src_stride, &sse);
758 this_mv.row = (this_mv.row - 8) | 4;
759 diag = vfp->svf(y - y_stride, y_stride, 4, 4, z, b->src_stride, &sse);
762 this_mv.col = (this_mv.col - 8) | 4;
764 diag = vfp->svf(y - 1, y_stride, 4, 4, z, b->src_stride, &sse);
769 diag = vfp->svf(y, y_stride, 4, 4, z, b->src_stride, &sse);
773 diag += mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
782 this_mv.as_mv.col = (this_mv.as_mv.col - 8) | 4;
783 this_mv.as_mv.row = (this_mv.as_mv.row - 8) | 4;
784 thismse = vfp->svf_halfpix_hv(y - 1 - y_stride, y_stride, z, b->src_stride, &sse);
785 diag = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
791 *distortion = thismse;
795 this_mv.as_mv.col += 8;
796 thismse = vfp->svf_halfpix_hv(y - y_stride, y_stride, z, b->src_stride, &sse);
797 diag = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
803 *distortion = thismse;
807 this_mv.as_mv.col = (this_mv.as_mv.col - 8) | 4;
808 this_mv.as_mv.row = startmv.as_mv.row + 4;
809 thismse = vfp->svf_halfpix_hv(y - 1, y_stride, z, b->src_stride, &sse);
810 diag = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
816 *distortion = thismse;
820 this_mv.as_mv.col += 8;
821 thismse = vfp->svf_halfpix_hv(y, y_stride, z, b->src_stride, &sse);
822 diag = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
828 *distortion = thismse;
836 #define CHECK_BOUNDS(range) \
839 all_in &= ((br-range) >= x->mv_row_min);\
840 all_in &= ((br+range) <= x->mv_row_max);\
841 all_in &= ((bc-range) >= x->mv_col_min);\
842 all_in &= ((bc+range) <= x->mv_col_max);\
845 #define CHECK_POINT \
847 if (this_mv.as_mv.col < x->mv_col_min) continue;\
848 if (this_mv.as_mv.col > x->mv_col_max) continue;\
849 if (this_mv.as_mv.row < x->mv_row_min) continue;\
850 if (this_mv.as_mv.row > x->mv_row_max) continue;\
853 #define CHECK_BETTER \
855 if (thissad < bestsad)\
857 thissad += mvsad_err_cost(&this_mv, &fcenter_mv, mvsadcost, sad_per_bit);\
858 if (thissad < bestsad)\
866 static const MV next_chkpts[6][3] =
868 {{ -2, 0}, { -1, -2}, {1, -2}},
869 {{ -1, -2}, {1, -2}, {2, 0}},
870 {{1, -2}, {2, 0}, {1, 2}},
871 {{2, 0}, {1, 2}, { -1, 2}},
872 {{1, 2}, { -1, 2}, { -2, 0}},
873 {{ -1, 2}, { -2, 0}, { -1, -2}}
885 const vp8_variance_fn_ptr_t *vfp,
891 MV hex[6] = { { -1, -2}, {1, -2}, {2, 0}, {1, 2}, { -1, 2}, { -2, 0} } ;
892 MV neighbors[4] = {{0, -1}, { -1, 0}, {1, 0}, {0, 1}} ;
895 unsigned char *what = (*(b->base_src) + b->src);
896 int what_stride = b->src_stride;
897 int in_what_stride = d->pre_stride;
900 unsigned int bestsad = 0x7fffffff;
901 unsigned int thissad;
902 unsigned char *base_offset;
903 unsigned char *this_offset;
909 fcenter_mv.as_mv.row = center_mv->as_mv.row >> 3;
910 fcenter_mv.as_mv.col = center_mv->as_mv.col >> 3;
912 // adjust ref_mv to make sure it is within MV range
913 vp8_clamp_mv(ref_mv, x->mv_col_min, x->mv_col_max, x->mv_row_min, x->mv_row_max);
914 br = ref_mv->as_mv.row;
915 bc = ref_mv->as_mv.col;
917 // Work out the start point for the search
918 base_offset = (unsigned char *)(*(d->base_pre) + d->pre);
919 this_offset = base_offset + (br * (d->pre_stride)) + bc;
920 this_mv.as_mv.row = br;
921 this_mv.as_mv.col = bc;
922 bestsad = vfp->sdf( what, what_stride, this_offset,
923 in_what_stride, 0x7fffffff)
924 + mvsad_err_cost(&this_mv, &fcenter_mv, mvsadcost, sad_per_bit);
932 for (i = 0; i < 6; i++)
934 this_mv.as_mv.row = br + hex[i].row;
935 this_mv.as_mv.col = bc + hex[i].col;
936 this_offset = base_offset + (this_mv.as_mv.row * in_what_stride) + this_mv.as_mv.col;
937 thissad=vfp->sdf( what, what_stride, this_offset, in_what_stride, bestsad);
942 for (i = 0; i < 6; i++)
944 this_mv.as_mv.row = br + hex[i].row;
945 this_mv.as_mv.col = bc + hex[i].col;
947 this_offset = base_offset + (this_mv.as_mv.row * in_what_stride) + this_mv.as_mv.col;
948 thissad=vfp->sdf( what, what_stride, this_offset, in_what_stride, bestsad);
957 br += hex[best_site].row;
958 bc += hex[best_site].col;
962 for (j = 1; j < 127; j++)
969 for (i = 0; i < 3; i++)
971 this_mv.as_mv.row = br + next_chkpts[k][i].row;
972 this_mv.as_mv.col = bc + next_chkpts[k][i].col;
973 this_offset = base_offset + (this_mv.as_mv.row * (in_what_stride)) + this_mv.as_mv.col;
974 thissad = vfp->sdf( what, what_stride, this_offset, in_what_stride, bestsad);
979 for (i = 0; i < 3; i++)
981 this_mv.as_mv.row = br + next_chkpts[k][i].row;
982 this_mv.as_mv.col = bc + next_chkpts[k][i].col;
984 this_offset = base_offset + (this_mv.as_mv.row * (in_what_stride)) + this_mv.as_mv.col;
985 thissad = vfp->sdf( what, what_stride, this_offset, in_what_stride, bestsad);
994 br += next_chkpts[k][best_site].row;
995 bc += next_chkpts[k][best_site].col;
997 if (k >= 12) k -= 12;
998 else if (k >= 6) k -= 6;
1002 // check 4 1-away neighbors
1004 for (j = 0; j < 32; j++)
1011 for (i = 0; i < 4; i++)
1013 this_mv.as_mv.row = br + neighbors[i].row;
1014 this_mv.as_mv.col = bc + neighbors[i].col;
1015 this_offset = base_offset + (this_mv.as_mv.row * (in_what_stride)) + this_mv.as_mv.col;
1016 thissad = vfp->sdf( what, what_stride, this_offset, in_what_stride, bestsad);
1021 for (i = 0; i < 4; i++)
1023 this_mv.as_mv.row = br + neighbors[i].row;
1024 this_mv.as_mv.col = bc + neighbors[i].col;
1026 this_offset = base_offset + (this_mv.as_mv.row * (in_what_stride)) + this_mv.as_mv.col;
1027 thissad = vfp->sdf( what, what_stride, this_offset, in_what_stride, bestsad);
1032 if (best_site == -1)
1036 br += neighbors[best_site].row;
1037 bc += neighbors[best_site].col;
1041 best_mv->as_mv.row = br;
1042 best_mv->as_mv.col = bc;
1050 int vp8_diamond_search_sad
1060 vp8_variance_fn_ptr_t *fn_ptr,
1067 unsigned char *what = (*(b->base_src) + b->src);
1068 int what_stride = b->src_stride;
1069 unsigned char *in_what;
1070 int in_what_stride = d->pre_stride;
1071 unsigned char *best_address;
1076 int bestsad = INT_MAX;
1082 int this_row_offset;
1083 int this_col_offset;
1086 unsigned char *check_here;
1089 int *mvsadcost[2] = {x->mvsadcost[0], x->mvsadcost[1]};
1091 fcenter_mv.as_mv.row = center_mv->as_mv.row >> 3;
1092 fcenter_mv.as_mv.col = center_mv->as_mv.col >> 3;
1094 vp8_clamp_mv(ref_mv, x->mv_col_min, x->mv_col_max, x->mv_row_min, x->mv_row_max);
1095 ref_row = ref_mv->as_mv.row;
1096 ref_col = ref_mv->as_mv.col;
1098 best_mv->as_mv.row = ref_row;
1099 best_mv->as_mv.col = ref_col;
1101 // Work out the start point for the search
1102 in_what = (unsigned char *)(*(d->base_pre) + d->pre + (ref_row * (d->pre_stride)) + ref_col);
1103 best_address = in_what;
1105 // Check the starting position
1106 bestsad = fn_ptr->sdf(what, what_stride, in_what,
1107 in_what_stride, 0x7fffffff)
1108 + mvsad_err_cost(best_mv, &fcenter_mv, mvsadcost, sad_per_bit);
1110 // search_param determines the length of the initial step and hence the number of iterations
1111 // 0 = initial step (MAX_FIRST_STEP) pel : 1 = (MAX_FIRST_STEP/2) pel, 2 = (MAX_FIRST_STEP/4) pel... etc.
1112 ss = &x->ss[search_param * x->searches_per_step];
1113 tot_steps = (x->ss_count / x->searches_per_step) - search_param;
1117 for (step = 0; step < tot_steps ; step++)
1119 for (j = 0 ; j < x->searches_per_step ; j++)
1121 // Trap illegal vectors
1122 this_row_offset = best_mv->as_mv.row + ss[i].mv.row;
1123 this_col_offset = best_mv->as_mv.col + ss[i].mv.col;
1125 if ((this_col_offset > x->mv_col_min) && (this_col_offset < x->mv_col_max) &&
1126 (this_row_offset > x->mv_row_min) && (this_row_offset < x->mv_row_max))
1129 check_here = ss[i].offset + best_address;
1130 thissad = fn_ptr->sdf(what, what_stride, check_here , in_what_stride, bestsad);
1132 if (thissad < bestsad)
1134 this_mv.as_mv.row = this_row_offset;
1135 this_mv.as_mv.col = this_col_offset;
1136 thissad += mvsad_err_cost(&this_mv, &fcenter_mv,
1137 mvsadcost, sad_per_bit);
1139 if (thissad < bestsad)
1150 if (best_site != last_site)
1152 best_mv->as_mv.row += ss[best_site].mv.row;
1153 best_mv->as_mv.col += ss[best_site].mv.col;
1154 best_address += ss[best_site].offset;
1155 last_site = best_site;
1157 else if (best_address == in_what)
1161 this_mv.as_mv.row = best_mv->as_mv.row << 3;
1162 this_mv.as_mv.col = best_mv->as_mv.col << 3;
1164 if (bestsad == INT_MAX)
1167 return fn_ptr->vf(what, what_stride, best_address, in_what_stride, (unsigned int *)(&thissad))
1168 + mv_err_cost(&this_mv, center_mv, mvcost, x->errorperbit);
1171 int vp8_diamond_search_sadx4
1181 vp8_variance_fn_ptr_t *fn_ptr,
1188 unsigned char *what = (*(b->base_src) + b->src);
1189 int what_stride = b->src_stride;
1190 unsigned char *in_what;
1191 int in_what_stride = d->pre_stride;
1192 unsigned char *best_address;
1197 int bestsad = INT_MAX;
1203 int this_row_offset;
1204 int this_col_offset;
1207 unsigned char *check_here;
1208 unsigned int thissad;
1210 int *mvsadcost[2] = {x->mvsadcost[0], x->mvsadcost[1]};
1212 fcenter_mv.as_mv.row = center_mv->as_mv.row >> 3;
1213 fcenter_mv.as_mv.col = center_mv->as_mv.col >> 3;
1215 vp8_clamp_mv(ref_mv, x->mv_col_min, x->mv_col_max, x->mv_row_min, x->mv_row_max);
1216 ref_row = ref_mv->as_mv.row;
1217 ref_col = ref_mv->as_mv.col;
1219 best_mv->as_mv.row = ref_row;
1220 best_mv->as_mv.col = ref_col;
1222 // Work out the start point for the search
1223 in_what = (unsigned char *)(*(d->base_pre) + d->pre + (ref_row * (d->pre_stride)) + ref_col);
1224 best_address = in_what;
1226 // Check the starting position
1227 bestsad = fn_ptr->sdf(what, what_stride,
1228 in_what, in_what_stride, 0x7fffffff)
1229 + mvsad_err_cost(best_mv, &fcenter_mv, mvsadcost, sad_per_bit);
1231 // search_param determines the length of the initial step and hence the number of iterations
1232 // 0 = initial step (MAX_FIRST_STEP) pel : 1 = (MAX_FIRST_STEP/2) pel, 2 = (MAX_FIRST_STEP/4) pel... etc.
1233 ss = &x->ss[search_param * x->searches_per_step];
1234 tot_steps = (x->ss_count / x->searches_per_step) - search_param;
1238 for (step = 0; step < tot_steps ; step++)
1242 // To know if all neighbor points are within the bounds, 4 bounds checking are enough instead of
1243 // checking 4 bounds for each points.
1244 all_in &= ((best_mv->as_mv.row + ss[i].mv.row)> x->mv_row_min);
1245 all_in &= ((best_mv->as_mv.row + ss[i+1].mv.row) < x->mv_row_max);
1246 all_in &= ((best_mv->as_mv.col + ss[i+2].mv.col) > x->mv_col_min);
1247 all_in &= ((best_mv->as_mv.col + ss[i+3].mv.col) < x->mv_col_max);
1251 unsigned int sad_array[4];
1253 for (j = 0 ; j < x->searches_per_step ; j += 4)
1255 unsigned char *block_offset[4];
1257 for (t = 0; t < 4; t++)
1258 block_offset[t] = ss[i+t].offset + best_address;
1260 fn_ptr->sdx4df(what, what_stride, block_offset, in_what_stride, sad_array);
1262 for (t = 0; t < 4; t++, i++)
1264 if (sad_array[t] < bestsad)
1266 this_mv.as_mv.row = best_mv->as_mv.row + ss[i].mv.row;
1267 this_mv.as_mv.col = best_mv->as_mv.col + ss[i].mv.col;
1268 sad_array[t] += mvsad_err_cost(&this_mv, &fcenter_mv,
1269 mvsadcost, sad_per_bit);
1271 if (sad_array[t] < bestsad)
1273 bestsad = sad_array[t];
1282 for (j = 0 ; j < x->searches_per_step ; j++)
1284 // Trap illegal vectors
1285 this_row_offset = best_mv->as_mv.row + ss[i].mv.row;
1286 this_col_offset = best_mv->as_mv.col + ss[i].mv.col;
1288 if ((this_col_offset > x->mv_col_min) && (this_col_offset < x->mv_col_max) &&
1289 (this_row_offset > x->mv_row_min) && (this_row_offset < x->mv_row_max))
1291 check_here = ss[i].offset + best_address;
1292 thissad = fn_ptr->sdf(what, what_stride, check_here , in_what_stride, bestsad);
1294 if (thissad < bestsad)
1296 this_mv.as_mv.row = this_row_offset;
1297 this_mv.as_mv.col = this_col_offset;
1298 thissad += mvsad_err_cost(&this_mv, &fcenter_mv,
1299 mvsadcost, sad_per_bit);
1301 if (thissad < bestsad)
1312 if (best_site != last_site)
1314 best_mv->as_mv.row += ss[best_site].mv.row;
1315 best_mv->as_mv.col += ss[best_site].mv.col;
1316 best_address += ss[best_site].offset;
1317 last_site = best_site;
1319 else if (best_address == in_what)
1323 this_mv.as_mv.row = best_mv->as_mv.row << 3;
1324 this_mv.as_mv.col = best_mv->as_mv.col << 3;
1326 if (bestsad == INT_MAX)
1329 return fn_ptr->vf(what, what_stride, best_address, in_what_stride, (unsigned int *)(&thissad))
1330 + mv_err_cost(&this_mv, center_mv, mvcost, x->errorperbit);
1333 int vp8_full_search_sad(MACROBLOCK *x, BLOCK *b, BLOCKD *d, int_mv *ref_mv,
1334 int sad_per_bit, int distance,
1335 vp8_variance_fn_ptr_t *fn_ptr, int *mvcost[2],
1338 unsigned char *what = (*(b->base_src) + b->src);
1339 int what_stride = b->src_stride;
1340 unsigned char *in_what;
1341 int in_what_stride = d->pre_stride;
1342 int mv_stride = d->pre_stride;
1343 unsigned char *bestaddress;
1344 int_mv *best_mv = &d->bmi.mv;
1346 int bestsad = INT_MAX;
1349 unsigned char *check_here;
1352 int ref_row = ref_mv->as_mv.row;
1353 int ref_col = ref_mv->as_mv.col;
1355 int row_min = ref_row - distance;
1356 int row_max = ref_row + distance;
1357 int col_min = ref_col - distance;
1358 int col_max = ref_col + distance;
1360 int *mvsadcost[2] = {x->mvsadcost[0], x->mvsadcost[1]};
1362 fcenter_mv.as_mv.row = center_mv->as_mv.row >> 3;
1363 fcenter_mv.as_mv.col = center_mv->as_mv.col >> 3;
1365 // Work out the mid point for the search
1366 in_what = *(d->base_pre) + d->pre;
1367 bestaddress = in_what + (ref_row * d->pre_stride) + ref_col;
1369 best_mv->as_mv.row = ref_row;
1370 best_mv->as_mv.col = ref_col;
1372 // Baseline value at the centre
1373 bestsad = fn_ptr->sdf(what, what_stride, bestaddress,
1374 in_what_stride, 0x7fffffff)
1375 + mvsad_err_cost(best_mv, &fcenter_mv, mvsadcost, sad_per_bit);
1377 // Apply further limits to prevent us looking using vectors that stretch beyiond the UMV border
1378 if (col_min < x->mv_col_min)
1379 col_min = x->mv_col_min;
1381 if (col_max > x->mv_col_max)
1382 col_max = x->mv_col_max;
1384 if (row_min < x->mv_row_min)
1385 row_min = x->mv_row_min;
1387 if (row_max > x->mv_row_max)
1388 row_max = x->mv_row_max;
1390 for (r = row_min; r < row_max ; r++)
1392 this_mv.as_mv.row = r;
1393 check_here = r * mv_stride + in_what + col_min;
1395 for (c = col_min; c < col_max; c++)
1397 thissad = fn_ptr->sdf(what, what_stride, check_here , in_what_stride, bestsad);
1399 this_mv.as_mv.col = c;
1400 thissad += mvsad_err_cost(&this_mv, &fcenter_mv,
1401 mvsadcost, sad_per_bit);
1403 if (thissad < bestsad)
1406 best_mv->as_mv.row = r;
1407 best_mv->as_mv.col = c;
1408 bestaddress = check_here;
1415 this_mv.as_mv.row = best_mv->as_mv.row << 3;
1416 this_mv.as_mv.col = best_mv->as_mv.col << 3;
1418 if (bestsad < INT_MAX)
1419 return fn_ptr->vf(what, what_stride, bestaddress, in_what_stride, (unsigned int *)(&thissad))
1420 + mv_err_cost(&this_mv, center_mv, mvcost, x->errorperbit);
1425 int vp8_full_search_sadx3(MACROBLOCK *x, BLOCK *b, BLOCKD *d, int_mv *ref_mv,
1426 int sad_per_bit, int distance,
1427 vp8_variance_fn_ptr_t *fn_ptr, int *mvcost[2],
1430 unsigned char *what = (*(b->base_src) + b->src);
1431 int what_stride = b->src_stride;
1432 unsigned char *in_what;
1433 int in_what_stride = d->pre_stride;
1434 int mv_stride = d->pre_stride;
1435 unsigned char *bestaddress;
1436 int_mv *best_mv = &d->bmi.mv;
1438 int bestsad = INT_MAX;
1441 unsigned char *check_here;
1442 unsigned int thissad;
1444 int ref_row = ref_mv->as_mv.row;
1445 int ref_col = ref_mv->as_mv.col;
1447 int row_min = ref_row - distance;
1448 int row_max = ref_row + distance;
1449 int col_min = ref_col - distance;
1450 int col_max = ref_col + distance;
1452 unsigned int sad_array[3];
1454 int *mvsadcost[2] = {x->mvsadcost[0], x->mvsadcost[1]};
1456 fcenter_mv.as_mv.row = center_mv->as_mv.row >> 3;
1457 fcenter_mv.as_mv.col = center_mv->as_mv.col >> 3;
1459 // Work out the mid point for the search
1460 in_what = *(d->base_pre) + d->pre;
1461 bestaddress = in_what + (ref_row * d->pre_stride) + ref_col;
1463 best_mv->as_mv.row = ref_row;
1464 best_mv->as_mv.col = ref_col;
1466 // Baseline value at the centre
1467 bestsad = fn_ptr->sdf(what, what_stride,
1468 bestaddress, in_what_stride, 0x7fffffff)
1469 + mvsad_err_cost(best_mv, &fcenter_mv, mvsadcost, sad_per_bit);
1471 // Apply further limits to prevent us looking using vectors that stretch beyiond the UMV border
1472 if (col_min < x->mv_col_min)
1473 col_min = x->mv_col_min;
1475 if (col_max > x->mv_col_max)
1476 col_max = x->mv_col_max;
1478 if (row_min < x->mv_row_min)
1479 row_min = x->mv_row_min;
1481 if (row_max > x->mv_row_max)
1482 row_max = x->mv_row_max;
1484 for (r = row_min; r < row_max ; r++)
1486 this_mv.as_mv.row = r;
1487 check_here = r * mv_stride + in_what + col_min;
1490 while ((c + 2) < col_max)
1494 fn_ptr->sdx3f(what, what_stride, check_here , in_what_stride, sad_array);
1496 for (i = 0; i < 3; i++)
1498 thissad = sad_array[i];
1500 if (thissad < bestsad)
1502 this_mv.as_mv.col = c;
1503 thissad += mvsad_err_cost(&this_mv, &fcenter_mv,
1504 mvsadcost, sad_per_bit);
1506 if (thissad < bestsad)
1509 best_mv->as_mv.row = r;
1510 best_mv->as_mv.col = c;
1511 bestaddress = check_here;
1522 thissad = fn_ptr->sdf(what, what_stride, check_here , in_what_stride, bestsad);
1524 if (thissad < bestsad)
1526 this_mv.as_mv.col = c;
1527 thissad += mvsad_err_cost(&this_mv, &fcenter_mv,
1528 mvsadcost, sad_per_bit);
1530 if (thissad < bestsad)
1533 best_mv->as_mv.row = r;
1534 best_mv->as_mv.col = c;
1535 bestaddress = check_here;
1545 this_mv.as_mv.row = best_mv->as_mv.row << 3;
1546 this_mv.as_mv.col = best_mv->as_mv.col << 3;
1548 if (bestsad < INT_MAX)
1549 return fn_ptr->vf(what, what_stride, bestaddress, in_what_stride, (unsigned int *)(&thissad))
1550 + mv_err_cost(&this_mv, center_mv, mvcost, x->errorperbit);
1555 int vp8_full_search_sadx8(MACROBLOCK *x, BLOCK *b, BLOCKD *d, int_mv *ref_mv,
1556 int sad_per_bit, int distance,
1557 vp8_variance_fn_ptr_t *fn_ptr, int *mvcost[2],
1560 unsigned char *what = (*(b->base_src) + b->src);
1561 int what_stride = b->src_stride;
1562 unsigned char *in_what;
1563 int in_what_stride = d->pre_stride;
1564 int mv_stride = d->pre_stride;
1565 unsigned char *bestaddress;
1566 int_mv *best_mv = &d->bmi.mv;
1568 int bestsad = INT_MAX;
1571 unsigned char *check_here;
1572 unsigned int thissad;
1574 int ref_row = ref_mv->as_mv.row;
1575 int ref_col = ref_mv->as_mv.col;
1577 int row_min = ref_row - distance;
1578 int row_max = ref_row + distance;
1579 int col_min = ref_col - distance;
1580 int col_max = ref_col + distance;
1582 DECLARE_ALIGNED_ARRAY(16, unsigned short, sad_array8, 8);
1583 unsigned int sad_array[3];
1585 int *mvsadcost[2] = {x->mvsadcost[0], x->mvsadcost[1]};
1587 fcenter_mv.as_mv.row = center_mv->as_mv.row >> 3;
1588 fcenter_mv.as_mv.col = center_mv->as_mv.col >> 3;
1590 // Work out the mid point for the search
1591 in_what = *(d->base_pre) + d->pre;
1592 bestaddress = in_what + (ref_row * d->pre_stride) + ref_col;
1594 best_mv->as_mv.row = ref_row;
1595 best_mv->as_mv.col = ref_col;
1597 // Baseline value at the centre
1598 bestsad = fn_ptr->sdf(what, what_stride,
1599 bestaddress, in_what_stride, 0x7fffffff)
1600 + mvsad_err_cost(best_mv, &fcenter_mv, mvsadcost, sad_per_bit);
1602 // Apply further limits to prevent us looking using vectors that stretch beyiond the UMV border
1603 if (col_min < x->mv_col_min)
1604 col_min = x->mv_col_min;
1606 if (col_max > x->mv_col_max)
1607 col_max = x->mv_col_max;
1609 if (row_min < x->mv_row_min)
1610 row_min = x->mv_row_min;
1612 if (row_max > x->mv_row_max)
1613 row_max = x->mv_row_max;
1615 for (r = row_min; r < row_max ; r++)
1617 this_mv.as_mv.row = r;
1618 check_here = r * mv_stride + in_what + col_min;
1621 while ((c + 7) < col_max)
1625 fn_ptr->sdx8f(what, what_stride, check_here , in_what_stride, sad_array8);
1627 for (i = 0; i < 8; i++)
1629 thissad = (unsigned int)sad_array8[i];
1631 if (thissad < bestsad)
1633 this_mv.as_mv.col = c;
1634 thissad += mvsad_err_cost(&this_mv, &fcenter_mv,
1635 mvsadcost, sad_per_bit);
1637 if (thissad < bestsad)
1640 best_mv->as_mv.row = r;
1641 best_mv->as_mv.col = c;
1642 bestaddress = check_here;
1651 while ((c + 2) < col_max)
1655 fn_ptr->sdx3f(what, what_stride, check_here , in_what_stride, sad_array);
1657 for (i = 0; i < 3; i++)
1659 thissad = sad_array[i];
1661 if (thissad < bestsad)
1663 this_mv.as_mv.col = c;
1664 thissad += mvsad_err_cost(&this_mv, &fcenter_mv,
1665 mvsadcost, sad_per_bit);
1667 if (thissad < bestsad)
1670 best_mv->as_mv.row = r;
1671 best_mv->as_mv.col = c;
1672 bestaddress = check_here;
1683 thissad = fn_ptr->sdf(what, what_stride, check_here , in_what_stride, bestsad);
1685 if (thissad < bestsad)
1687 this_mv.as_mv.col = c;
1688 thissad += mvsad_err_cost(&this_mv, &fcenter_mv,
1689 mvsadcost, sad_per_bit);
1691 if (thissad < bestsad)
1694 best_mv->as_mv.row = r;
1695 best_mv->as_mv.col = c;
1696 bestaddress = check_here;
1705 this_mv.as_mv.row = best_mv->as_mv.row << 3;
1706 this_mv.as_mv.col = best_mv->as_mv.col << 3;
1708 if (bestsad < INT_MAX)
1709 return fn_ptr->vf(what, what_stride, bestaddress, in_what_stride, (unsigned int *)(&thissad))
1710 + mv_err_cost(&this_mv, center_mv, mvcost, x->errorperbit);
1715 int vp8_refining_search_sad(MACROBLOCK *x, BLOCK *b, BLOCKD *d, int_mv *ref_mv,
1716 int error_per_bit, int search_range,
1717 vp8_variance_fn_ptr_t *fn_ptr, int *mvcost[2],
1720 MV neighbors[4] = {{-1, 0}, {0, -1}, {0, 1}, {1, 0}};
1722 short this_row_offset, this_col_offset;
1724 int what_stride = b->src_stride;
1725 int in_what_stride = d->pre_stride;
1726 unsigned char *what = (*(b->base_src) + b->src);
1727 unsigned char *best_address = (unsigned char *)(*(d->base_pre) + d->pre +
1728 (ref_mv->as_mv.row * (d->pre_stride)) + ref_mv->as_mv.col);
1729 unsigned char *check_here;
1730 unsigned int thissad;
1732 unsigned int bestsad = INT_MAX;
1734 int *mvsadcost[2] = {x->mvsadcost[0], x->mvsadcost[1]};
1737 fcenter_mv.as_mv.row = center_mv->as_mv.row >> 3;
1738 fcenter_mv.as_mv.col = center_mv->as_mv.col >> 3;
1740 bestsad = fn_ptr->sdf(what, what_stride, best_address, in_what_stride, 0x7fffffff) + mvsad_err_cost(ref_mv, &fcenter_mv, mvsadcost, error_per_bit);
1742 for (i=0; i<search_range; i++)
1746 for (j = 0 ; j < 4 ; j++)
1748 this_row_offset = ref_mv->as_mv.row + neighbors[j].row;
1749 this_col_offset = ref_mv->as_mv.col + neighbors[j].col;
1751 if ((this_col_offset > x->mv_col_min) && (this_col_offset < x->mv_col_max) &&
1752 (this_row_offset > x->mv_row_min) && (this_row_offset < x->mv_row_max))
1754 check_here = (neighbors[j].row)*in_what_stride + neighbors[j].col + best_address;
1755 thissad = fn_ptr->sdf(what, what_stride, check_here , in_what_stride, bestsad);
1757 if (thissad < bestsad)
1759 this_mv.as_mv.row = this_row_offset;
1760 this_mv.as_mv.col = this_col_offset;
1761 thissad += mvsad_err_cost(&this_mv, &fcenter_mv, mvsadcost, error_per_bit);
1763 if (thissad < bestsad)
1772 if (best_site == -1)
1776 ref_mv->as_mv.row += neighbors[best_site].row;
1777 ref_mv->as_mv.col += neighbors[best_site].col;
1778 best_address += (neighbors[best_site].row)*in_what_stride + neighbors[best_site].col;
1782 this_mv.as_mv.row = ref_mv->as_mv.row << 3;
1783 this_mv.as_mv.col = ref_mv->as_mv.col << 3;
1785 if (bestsad < INT_MAX)
1786 return fn_ptr->vf(what, what_stride, best_address, in_what_stride, (unsigned int *)(&thissad))
1787 + mv_err_cost(&this_mv, center_mv, mvcost, x->errorperbit);
1792 int vp8_refining_search_sadx4(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
1793 int_mv *ref_mv, int error_per_bit,
1794 int search_range, vp8_variance_fn_ptr_t *fn_ptr,
1795 int *mvcost[2], int_mv *center_mv)
1797 MV neighbors[4] = {{-1, 0}, {0, -1}, {0, 1}, {1, 0}};
1799 short this_row_offset, this_col_offset;
1801 int what_stride = b->src_stride;
1802 int in_what_stride = d->pre_stride;
1803 unsigned char *what = (*(b->base_src) + b->src);
1804 unsigned char *best_address = (unsigned char *)(*(d->base_pre) + d->pre +
1805 (ref_mv->as_mv.row * (d->pre_stride)) + ref_mv->as_mv.col);
1806 unsigned char *check_here;
1807 unsigned int thissad;
1809 unsigned int bestsad = INT_MAX;
1811 int *mvsadcost[2] = {x->mvsadcost[0], x->mvsadcost[1]};
1814 fcenter_mv.as_mv.row = center_mv->as_mv.row >> 3;
1815 fcenter_mv.as_mv.col = center_mv->as_mv.col >> 3;
1817 bestsad = fn_ptr->sdf(what, what_stride, best_address, in_what_stride, 0x7fffffff) + mvsad_err_cost(ref_mv, &fcenter_mv, mvsadcost, error_per_bit);
1819 for (i=0; i<search_range; i++)
1824 all_in &= ((ref_mv->as_mv.row - 1) > x->mv_row_min);
1825 all_in &= ((ref_mv->as_mv.row + 1) < x->mv_row_max);
1826 all_in &= ((ref_mv->as_mv.col - 1) > x->mv_col_min);
1827 all_in &= ((ref_mv->as_mv.col + 1) < x->mv_col_max);
1831 unsigned int sad_array[4];
1832 unsigned char *block_offset[4];
1833 block_offset[0] = best_address - in_what_stride;
1834 block_offset[1] = best_address - 1;
1835 block_offset[2] = best_address + 1;
1836 block_offset[3] = best_address + in_what_stride;
1838 fn_ptr->sdx4df(what, what_stride, block_offset, in_what_stride, sad_array);
1840 for (j = 0; j < 4; j++)
1842 if (sad_array[j] < bestsad)
1844 this_mv.as_mv.row = ref_mv->as_mv.row + neighbors[j].row;
1845 this_mv.as_mv.col = ref_mv->as_mv.col + neighbors[j].col;
1846 sad_array[j] += mvsad_err_cost(&this_mv, &fcenter_mv, mvsadcost, error_per_bit);
1848 if (sad_array[j] < bestsad)
1850 bestsad = sad_array[j];
1858 for (j = 0 ; j < 4 ; j++)
1860 this_row_offset = ref_mv->as_mv.row + neighbors[j].row;
1861 this_col_offset = ref_mv->as_mv.col + neighbors[j].col;
1863 if ((this_col_offset > x->mv_col_min) && (this_col_offset < x->mv_col_max) &&
1864 (this_row_offset > x->mv_row_min) && (this_row_offset < x->mv_row_max))
1866 check_here = (neighbors[j].row)*in_what_stride + neighbors[j].col + best_address;
1867 thissad = fn_ptr->sdf(what, what_stride, check_here , in_what_stride, bestsad);
1869 if (thissad < bestsad)
1871 this_mv.as_mv.row = this_row_offset;
1872 this_mv.as_mv.col = this_col_offset;
1873 thissad += mvsad_err_cost(&this_mv, &fcenter_mv, mvsadcost, error_per_bit);
1875 if (thissad < bestsad)
1885 if (best_site == -1)
1889 ref_mv->as_mv.row += neighbors[best_site].row;
1890 ref_mv->as_mv.col += neighbors[best_site].col;
1891 best_address += (neighbors[best_site].row)*in_what_stride + neighbors[best_site].col;
1895 this_mv.as_mv.row = ref_mv->as_mv.row << 3;
1896 this_mv.as_mv.col = ref_mv->as_mv.col << 3;
1898 if (bestsad < INT_MAX)
1899 return fn_ptr->vf(what, what_stride, best_address, in_what_stride, (unsigned int *)(&thissad))
1900 + mv_err_cost(&this_mv, center_mv, mvcost, x->errorperbit);
1905 #ifdef ENTROPY_STATS
1906 void print_mode_context(void)
1908 FILE *f = fopen("modecont.c", "w");
1911 fprintf(f, "#include \"entropy.h\"\n");
1912 fprintf(f, "const int vp8_mode_contexts[6][4] =\n");
1915 for (j = 0; j < 6; j++)
1917 fprintf(f, " { // %d \n", j);
1920 for (i = 0; i < 4; i++)
1924 int count; // = mv_ref_ct[j][i][0]+mv_ref_ct[j][i][1];
1927 count = mv_mode_cts[i][0] + mv_mode_cts[i][1];
1930 overal_prob = 256 * mv_mode_cts[i][0] / count;
1934 if (overal_prob == 0)
1938 count = mv_ref_ct[j][i][0] + mv_ref_ct[j][i][1];
1941 this_prob = 256 * mv_ref_ct[j][i][0] / count;
1948 fprintf(f, "%5d, ", this_prob);
1949 //fprintf(f,"%5d, %5d, %8d,", this_prob, overal_prob, (this_prob << 10)/overal_prob);
1950 //fprintf(f,"%8d, ", (this_prob << 10)/overal_prob);
1953 fprintf(f, " },\n");
1960 /* MV ref count ENTROPY_STATS stats code */
1961 #ifdef ENTROPY_STATS
1962 void init_mv_ref_counts()
1964 vpx_memset(mv_ref_ct, 0, sizeof(mv_ref_ct));
1965 vpx_memset(mv_mode_cts, 0, sizeof(mv_mode_cts));
1968 void accum_mv_refs(MB_PREDICTION_MODE m, const int ct[4])
1972 ++mv_ref_ct [ct[0]] [0] [0];
1973 ++mv_mode_cts[0][0];
1977 ++mv_ref_ct [ct[0]] [0] [1];
1978 ++mv_mode_cts[0][1];
1982 ++mv_ref_ct [ct[1]] [1] [0];
1983 ++mv_mode_cts[1][0];
1987 ++mv_ref_ct [ct[1]] [1] [1];
1988 ++mv_mode_cts[1][1];
1992 ++mv_ref_ct [ct[2]] [2] [0];
1993 ++mv_mode_cts[2][0];
1997 ++mv_ref_ct [ct[2]] [2] [1];
1998 ++mv_mode_cts[2][1];
2002 ++mv_ref_ct [ct[3]] [3] [0];
2003 ++mv_mode_cts[3][0];
2007 ++mv_ref_ct [ct[3]] [3] [1];
2008 ++mv_mode_cts[3][1];
2015 #endif/* END MV ref count ENTROPY_STATS stats code */