2 * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
14 #include "vpx_mem/vpx_mem.h"
15 #include "vpx_config.h"
19 #include "vp8/common/findnearmv.h"
22 static int mv_ref_ct [31] [4] [2];
23 static int mv_mode_cts [4] [2];
26 int vp8_mv_bit_cost(int_mv *mv, int_mv *ref, int *mvcost[2], int Weight)
28 // MV costing is based on the distribution of vectors in the previous frame and as such will tend to
29 // over state the cost of vectors. In addition coding a new vector can have a knock on effect on the
30 // cost of subsequent vectors and the quality of prediction from NEAR and NEAREST for subsequent blocks.
31 // The "Weight" parameter allows, to a limited extent, for some account to be taken of these factors.
32 return ((mvcost[0][(mv->as_mv.row - ref->as_mv.row) >> 1] + mvcost[1][(mv->as_mv.col - ref->as_mv.col) >> 1]) * Weight) >> 7;
35 static int mv_err_cost(int_mv *mv, int_mv *ref, int *mvcost[2], int error_per_bit)
37 return ((mvcost[0][(mv->as_mv.row - ref->as_mv.row) >> 1] +
38 mvcost[1][(mv->as_mv.col - ref->as_mv.col) >> 1])
39 * error_per_bit + 128) >> 8;
42 static int mvsad_err_cost(int_mv *mv, int_mv *ref, int *mvsadcost[2], int error_per_bit)
44 /* Calculate sad error cost on full pixel basis. */
45 return ((mvsadcost[0][(mv->as_mv.row - ref->as_mv.row)] +
46 mvsadcost[1][(mv->as_mv.col - ref->as_mv.col)])
47 * error_per_bit + 128) >> 8;
50 void vp8_init_dsmotion_compensation(MACROBLOCK *x, int stride)
53 int search_site_count = 0;
56 // Generate offsets for 4 search sites per step.
58 x->ss[search_site_count].mv.col = 0;
59 x->ss[search_site_count].mv.row = 0;
60 x->ss[search_site_count].offset = 0;
66 // Compute offsets for search sites.
67 x->ss[search_site_count].mv.col = 0;
68 x->ss[search_site_count].mv.row = -Len;
69 x->ss[search_site_count].offset = -Len * stride;
72 // Compute offsets for search sites.
73 x->ss[search_site_count].mv.col = 0;
74 x->ss[search_site_count].mv.row = Len;
75 x->ss[search_site_count].offset = Len * stride;
78 // Compute offsets for search sites.
79 x->ss[search_site_count].mv.col = -Len;
80 x->ss[search_site_count].mv.row = 0;
81 x->ss[search_site_count].offset = -Len;
84 // Compute offsets for search sites.
85 x->ss[search_site_count].mv.col = Len;
86 x->ss[search_site_count].mv.row = 0;
87 x->ss[search_site_count].offset = Len;
94 x->ss_count = search_site_count;
95 x->searches_per_step = 4;
98 void vp8_init3smotion_compensation(MACROBLOCK *x, int stride)
101 int search_site_count = 0;
103 // Generate offsets for 8 search sites per step.
104 Len = MAX_FIRST_STEP;
105 x->ss[search_site_count].mv.col = 0;
106 x->ss[search_site_count].mv.row = 0;
107 x->ss[search_site_count].offset = 0;
113 // Compute offsets for search sites.
114 x->ss[search_site_count].mv.col = 0;
115 x->ss[search_site_count].mv.row = -Len;
116 x->ss[search_site_count].offset = -Len * stride;
119 // Compute offsets for search sites.
120 x->ss[search_site_count].mv.col = 0;
121 x->ss[search_site_count].mv.row = Len;
122 x->ss[search_site_count].offset = Len * stride;
125 // Compute offsets for search sites.
126 x->ss[search_site_count].mv.col = -Len;
127 x->ss[search_site_count].mv.row = 0;
128 x->ss[search_site_count].offset = -Len;
131 // Compute offsets for search sites.
132 x->ss[search_site_count].mv.col = Len;
133 x->ss[search_site_count].mv.row = 0;
134 x->ss[search_site_count].offset = Len;
137 // Compute offsets for search sites.
138 x->ss[search_site_count].mv.col = -Len;
139 x->ss[search_site_count].mv.row = -Len;
140 x->ss[search_site_count].offset = -Len * stride - Len;
143 // Compute offsets for search sites.
144 x->ss[search_site_count].mv.col = Len;
145 x->ss[search_site_count].mv.row = -Len;
146 x->ss[search_site_count].offset = -Len * stride + Len;
149 // Compute offsets for search sites.
150 x->ss[search_site_count].mv.col = -Len;
151 x->ss[search_site_count].mv.row = Len;
152 x->ss[search_site_count].offset = Len * stride - Len;
155 // Compute offsets for search sites.
156 x->ss[search_site_count].mv.col = Len;
157 x->ss[search_site_count].mv.row = Len;
158 x->ss[search_site_count].offset = Len * stride + Len;
166 x->ss_count = search_site_count;
167 x->searches_per_step = 8;
171 * To avoid the penalty for crossing cache-line read, preload the reference
172 * area in a small buffer, which is aligned to make sure there won't be crossing
173 * cache-line read while reading from this buffer. This reduced the cpu
174 * cycles spent on reading ref data in sub-pixel filter functions.
175 * TODO: Currently, since sub-pixel search range here is -3 ~ 3, copy 22 rows x
176 * 32 cols area that is enough for 16x16 macroblock. Later, for SPLITMV, we
177 * could reduce the area.
179 #define MVC(r,c) (((mvcost[0][(r)-rr] + mvcost[1][(c) - rc]) * error_per_bit + 128 )>>8 ) // estimated cost of a motion vector (r,c)
180 #define PRE(r,c) (y + (((r)>>2) * y_stride + ((c)>>2) -(offset))) // pointer to predictor base of a motionvector
181 #define SP(x) (((x)&3)<<1) // convert motion vector component to offset for svf calc
182 #define DIST(r,c) vfp->svf( PRE(r,c), y_stride, SP(c),SP(r), z,b->src_stride,&sse) // returns subpixel variance error function.
183 #define IFMVCV(r,c,s,e) if ( c >= minc && c <= maxc && r >= minr && r <= maxr) s else e;
184 #define ERR(r,c) (MVC(r,c)+DIST(r,c)) // returns distortion + motion vector cost
185 #define CHECK_BETTER(v,r,c) IFMVCV(r,c,{thismse = DIST(r,c); if((v = (MVC(r,c)+thismse)) < besterr) { besterr = v; br=r; bc=c; *distortion = thismse; *sse1 = sse; }}, v=INT_MAX;)// checks if (r,c) has better score than previous best
187 int vp8_find_best_sub_pixel_step_iteratively(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
188 int_mv *bestmv, int_mv *ref_mv,
190 const vp8_variance_fn_ptr_t *vfp,
191 int *mvcost[2], int *distortion,
194 unsigned char *z = (*(b->base_src) + b->src);
196 int rr = ref_mv->as_mv.row >> 1, rc = ref_mv->as_mv.col >> 1;
197 int br = bestmv->as_mv.row << 2, bc = bestmv->as_mv.col << 2;
198 int tr = br, tc = bc;
199 unsigned int besterr = INT_MAX;
200 unsigned int left, right, up, down, diag;
202 unsigned int whichdir;
203 unsigned int halfiters = 4;
204 unsigned int quarteriters = 4;
207 int minc = MAX(x->mv_col_min << 2, (ref_mv->as_mv.col >> 1) - ((1 << mvlong_width) - 1));
208 int maxc = MIN(x->mv_col_max << 2, (ref_mv->as_mv.col >> 1) + ((1 << mvlong_width) - 1));
209 int minr = MAX(x->mv_row_min << 2, (ref_mv->as_mv.row >> 1) - ((1 << mvlong_width) - 1));
210 int maxr = MIN(x->mv_row_max << 2, (ref_mv->as_mv.row >> 1) + ((1 << mvlong_width) - 1));
215 #if ARCH_X86 || ARCH_X86_64
216 MACROBLOCKD *xd = &x->e_mbd;
217 unsigned char *y0 = *(d->base_pre) + d->pre + (bestmv->as_mv.row) * d->pre_stride + bestmv->as_mv.col;
219 int buf_r1, buf_r2, buf_c1, buf_c2;
221 // Clamping to avoid out-of-range data access
222 buf_r1 = ((bestmv->as_mv.row - 3) < x->mv_row_min)?(bestmv->as_mv.row - x->mv_row_min):3;
223 buf_r2 = ((bestmv->as_mv.row + 3) > x->mv_row_max)?(x->mv_row_max - bestmv->as_mv.row):3;
224 buf_c1 = ((bestmv->as_mv.col - 3) < x->mv_col_min)?(bestmv->as_mv.col - x->mv_col_min):3;
225 buf_c2 = ((bestmv->as_mv.col + 3) > x->mv_col_max)?(x->mv_col_max - bestmv->as_mv.col):3;
228 /* Copy to intermediate buffer before searching. */
229 vfp->copymem(y0 - buf_c1 - d->pre_stride*buf_r1, d->pre_stride, xd->y_buf, y_stride, 16+buf_r1+buf_r2);
230 y = xd->y_buf + y_stride*buf_r1 +buf_c1;
232 unsigned char *y = *(d->base_pre) + d->pre + (bestmv->as_mv.row) * d->pre_stride + bestmv->as_mv.col;
233 y_stride = d->pre_stride;
236 offset = (bestmv->as_mv.row) * y_stride + bestmv->as_mv.col;
239 bestmv->as_mv.row <<= 3;
240 bestmv->as_mv.col <<= 3;
242 // calculate central point error
243 besterr = vfp->vf(y, y_stride, z, b->src_stride, sse1);
244 *distortion = besterr;
245 besterr += mv_err_cost(bestmv, ref_mv, mvcost, error_per_bit);
247 // TODO: Each subsequent iteration checks at least one point in common with the last iteration could be 2 ( if diag selected)
251 CHECK_BETTER(left, tr, tc - 2);
252 CHECK_BETTER(right, tr, tc + 2);
253 CHECK_BETTER(up, tr - 2, tc);
254 CHECK_BETTER(down, tr + 2, tc);
256 whichdir = (left < right ? 0 : 1) + (up < down ? 0 : 2);
261 CHECK_BETTER(diag, tr - 2, tc - 2);
264 CHECK_BETTER(diag, tr - 2, tc + 2);
267 CHECK_BETTER(diag, tr + 2, tc - 2);
270 CHECK_BETTER(diag, tr + 2, tc + 2);
274 // no reason to check the same one again.
275 if (tr == br && tc == bc)
282 // TODO: Each subsequent iteration checks at least one point in common with the last iteration could be 2 ( if diag selected)
284 while (--quarteriters)
286 CHECK_BETTER(left, tr, tc - 1);
287 CHECK_BETTER(right, tr, tc + 1);
288 CHECK_BETTER(up, tr - 1, tc);
289 CHECK_BETTER(down, tr + 1, tc);
291 whichdir = (left < right ? 0 : 1) + (up < down ? 0 : 2);
296 CHECK_BETTER(diag, tr - 1, tc - 1);
299 CHECK_BETTER(diag, tr - 1, tc + 1);
302 CHECK_BETTER(diag, tr + 1, tc - 1);
305 CHECK_BETTER(diag, tr + 1, tc + 1);
309 // no reason to check the same one again.
310 if (tr == br && tc == bc)
317 bestmv->as_mv.row = br << 1;
318 bestmv->as_mv.col = bc << 1;
320 if ((abs(bestmv->as_mv.col - ref_mv->as_mv.col) > (MAX_FULL_PEL_VAL<<3)) ||
321 (abs(bestmv->as_mv.row - ref_mv->as_mv.row) > (MAX_FULL_PEL_VAL<<3)))
334 int vp8_find_best_sub_pixel_step(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
335 int_mv *bestmv, int_mv *ref_mv,
337 const vp8_variance_fn_ptr_t *vfp,
338 int *mvcost[2], int *distortion,
341 int bestmse = INT_MAX;
344 unsigned char *z = (*(b->base_src) + b->src);
345 int left, right, up, down, diag;
351 #if ARCH_X86 || ARCH_X86_64
352 MACROBLOCKD *xd = &x->e_mbd;
353 unsigned char *y0 = *(d->base_pre) + d->pre + (bestmv->as_mv.row) * d->pre_stride + bestmv->as_mv.col;
357 /* Copy 18 rows x 32 cols area to intermediate buffer before searching. */
358 vfp->copymem(y0 - 1 - d->pre_stride, d->pre_stride, xd->y_buf, y_stride, 18);
359 y = xd->y_buf + y_stride + 1;
361 unsigned char *y = *(d->base_pre) + d->pre + (bestmv->as_mv.row) * d->pre_stride + bestmv->as_mv.col;
362 y_stride = d->pre_stride;
366 bestmv->as_mv.row <<= 3;
367 bestmv->as_mv.col <<= 3;
370 // calculate central point error
371 bestmse = vfp->vf(y, y_stride, z, b->src_stride, sse1);
372 *distortion = bestmse;
373 bestmse += mv_err_cost(bestmv, ref_mv, mvcost, error_per_bit);
375 // go left then right and check error
376 this_mv.as_mv.row = startmv.as_mv.row;
377 this_mv.as_mv.col = ((startmv.as_mv.col - 8) | 4);
378 thismse = vfp->svf_halfpix_h(y - 1, y_stride, z, b->src_stride, &sse);
379 left = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
385 *distortion = thismse;
389 this_mv.as_mv.col += 8;
390 thismse = vfp->svf_halfpix_h(y, y_stride, z, b->src_stride, &sse);
391 right = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
397 *distortion = thismse;
401 // go up then down and check error
402 this_mv.as_mv.col = startmv.as_mv.col;
403 this_mv.as_mv.row = ((startmv.as_mv.row - 8) | 4);
404 thismse = vfp->svf_halfpix_v(y - y_stride, y_stride, z, b->src_stride, &sse);
405 up = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
411 *distortion = thismse;
415 this_mv.as_mv.row += 8;
416 thismse = vfp->svf_halfpix_v(y, y_stride, z, b->src_stride, &sse);
417 down = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
423 *distortion = thismse;
428 // now check 1 more diagonal
429 whichdir = (left < right ? 0 : 1) + (up < down ? 0 : 2);
430 //for(whichdir =0;whichdir<4;whichdir++)
437 this_mv.as_mv.col = (this_mv.as_mv.col - 8) | 4;
438 this_mv.as_mv.row = (this_mv.as_mv.row - 8) | 4;
439 thismse = vfp->svf_halfpix_hv(y - 1 - y_stride, y_stride, z, b->src_stride, &sse);
442 this_mv.as_mv.col += 4;
443 this_mv.as_mv.row = (this_mv.as_mv.row - 8) | 4;
444 thismse = vfp->svf_halfpix_hv(y - y_stride, y_stride, z, b->src_stride, &sse);
447 this_mv.as_mv.col = (this_mv.as_mv.col - 8) | 4;
448 this_mv.as_mv.row += 4;
449 thismse = vfp->svf_halfpix_hv(y - 1, y_stride, z, b->src_stride, &sse);
453 this_mv.as_mv.col += 4;
454 this_mv.as_mv.row += 4;
455 thismse = vfp->svf_halfpix_hv(y, y_stride, z, b->src_stride, &sse);
459 diag = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
465 *distortion = thismse;
472 // time to check quarter pels.
473 if (bestmv->as_mv.row < startmv.as_mv.row)
476 if (bestmv->as_mv.col < startmv.as_mv.col)
483 // go left then right and check error
484 this_mv.as_mv.row = startmv.as_mv.row;
486 if (startmv.as_mv.col & 7)
488 this_mv.as_mv.col = startmv.as_mv.col - 2;
489 thismse = vfp->svf(y, y_stride, this_mv.as_mv.col & 7, this_mv.as_mv.row & 7, z, b->src_stride, &sse);
493 this_mv.as_mv.col = (startmv.as_mv.col - 8) | 6;
494 thismse = vfp->svf(y - 1, y_stride, 6, this_mv.as_mv.row & 7, z, b->src_stride, &sse);
497 left = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
503 *distortion = thismse;
507 this_mv.as_mv.col += 4;
508 thismse = vfp->svf(y, y_stride, this_mv.as_mv.col & 7, this_mv.as_mv.row & 7, z, b->src_stride, &sse);
509 right = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
515 *distortion = thismse;
519 // go up then down and check error
520 this_mv.as_mv.col = startmv.as_mv.col;
522 if (startmv.as_mv.row & 7)
524 this_mv.as_mv.row = startmv.as_mv.row - 2;
525 thismse = vfp->svf(y, y_stride, this_mv.as_mv.col & 7, this_mv.as_mv.row & 7, z, b->src_stride, &sse);
529 this_mv.as_mv.row = (startmv.as_mv.row - 8) | 6;
530 thismse = vfp->svf(y - y_stride, y_stride, this_mv.as_mv.col & 7, 6, z, b->src_stride, &sse);
533 up = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
539 *distortion = thismse;
543 this_mv.as_mv.row += 4;
544 thismse = vfp->svf(y, y_stride, this_mv.as_mv.col & 7, this_mv.as_mv.row & 7, z, b->src_stride, &sse);
545 down = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
551 *distortion = thismse;
556 // now check 1 more diagonal
557 whichdir = (left < right ? 0 : 1) + (up < down ? 0 : 2);
559 // for(whichdir=0;whichdir<4;whichdir++)
567 if (startmv.as_mv.row & 7)
569 this_mv.as_mv.row -= 2;
571 if (startmv.as_mv.col & 7)
573 this_mv.as_mv.col -= 2;
574 thismse = vfp->svf(y, y_stride, this_mv.as_mv.col & 7, this_mv.as_mv.row & 7, z, b->src_stride, &sse);
578 this_mv.as_mv.col = (startmv.as_mv.col - 8) | 6;
579 thismse = vfp->svf(y - 1, y_stride, 6, this_mv.as_mv.row & 7, z, b->src_stride, &sse);;
584 this_mv.as_mv.row = (startmv.as_mv.row - 8) | 6;
586 if (startmv.as_mv.col & 7)
588 this_mv.as_mv.col -= 2;
589 thismse = vfp->svf(y - y_stride, y_stride, this_mv.as_mv.col & 7, 6, z, b->src_stride, &sse);
593 this_mv.as_mv.col = (startmv.as_mv.col - 8) | 6;
594 thismse = vfp->svf(y - y_stride - 1, y_stride, 6, 6, z, b->src_stride, &sse);
600 this_mv.as_mv.col += 2;
602 if (startmv.as_mv.row & 7)
604 this_mv.as_mv.row -= 2;
605 thismse = vfp->svf(y, y_stride, this_mv.as_mv.col & 7, this_mv.as_mv.row & 7, z, b->src_stride, &sse);
609 this_mv.as_mv.row = (startmv.as_mv.row - 8) | 6;
610 thismse = vfp->svf(y - y_stride, y_stride, this_mv.as_mv.col & 7, 6, z, b->src_stride, &sse);
615 this_mv.as_mv.row += 2;
617 if (startmv.as_mv.col & 7)
619 this_mv.as_mv.col -= 2;
620 thismse = vfp->svf(y, y_stride, this_mv.as_mv.col & 7, this_mv.as_mv.row & 7, z, b->src_stride, &sse);
624 this_mv.as_mv.col = (startmv.as_mv.col - 8) | 6;
625 thismse = vfp->svf(y - 1, y_stride, 6, this_mv.as_mv.row & 7, z, b->src_stride, &sse);
630 this_mv.as_mv.col += 2;
631 this_mv.as_mv.row += 2;
632 thismse = vfp->svf(y, y_stride, this_mv.as_mv.col & 7, this_mv.as_mv.row & 7, z, b->src_stride, &sse);
636 diag = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
642 *distortion = thismse;
649 int vp8_find_best_half_pixel_step(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
650 int_mv *bestmv, int_mv *ref_mv,
652 const vp8_variance_fn_ptr_t *vfp,
653 int *mvcost[2], int *distortion,
656 int bestmse = INT_MAX;
659 unsigned char *z = (*(b->base_src) + b->src);
660 int left, right, up, down, diag;
666 #if ARCH_X86 || ARCH_X86_64
667 MACROBLOCKD *xd = &x->e_mbd;
668 unsigned char *y0 = *(d->base_pre) + d->pre + (bestmv->as_mv.row) * d->pre_stride + bestmv->as_mv.col;
672 /* Copy 18 rows x 32 cols area to intermediate buffer before searching. */
673 vfp->copymem(y0 - 1 - d->pre_stride, d->pre_stride, xd->y_buf, y_stride, 18);
674 y = xd->y_buf + y_stride + 1;
676 unsigned char *y = *(d->base_pre) + d->pre + (bestmv->as_mv.row) * d->pre_stride + bestmv->as_mv.col;
677 y_stride = d->pre_stride;
681 bestmv->as_mv.row <<= 3;
682 bestmv->as_mv.col <<= 3;
685 // calculate central point error
686 bestmse = vfp->vf(y, y_stride, z, b->src_stride, sse1);
687 *distortion = bestmse;
688 bestmse += mv_err_cost(bestmv, ref_mv, mvcost, error_per_bit);
690 // go left then right and check error
691 this_mv.as_mv.row = startmv.as_mv.row;
692 this_mv.as_mv.col = ((startmv.as_mv.col - 8) | 4);
693 thismse = vfp->svf_halfpix_h(y - 1, y_stride, z, b->src_stride, &sse);
694 left = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
700 *distortion = thismse;
704 this_mv.as_mv.col += 8;
705 thismse = vfp->svf_halfpix_h(y, y_stride, z, b->src_stride, &sse);
706 right = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
712 *distortion = thismse;
716 // go up then down and check error
717 this_mv.as_mv.col = startmv.as_mv.col;
718 this_mv.as_mv.row = ((startmv.as_mv.row - 8) | 4);
719 thismse = vfp->svf_halfpix_v(y - y_stride, y_stride, z, b->src_stride, &sse);
720 up = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
726 *distortion = thismse;
730 this_mv.as_mv.row += 8;
731 thismse = vfp->svf_halfpix_v(y, y_stride, z, b->src_stride, &sse);
732 down = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
738 *distortion = thismse;
742 // now check 1 more diagonal -
743 whichdir = (left < right ? 0 : 1) + (up < down ? 0 : 2);
749 this_mv.as_mv.col = (this_mv.as_mv.col - 8) | 4;
750 this_mv.as_mv.row = (this_mv.as_mv.row - 8) | 4;
751 thismse = vfp->svf_halfpix_hv(y - 1 - y_stride, y_stride, z, b->src_stride, &sse);
754 this_mv.as_mv.col += 4;
755 this_mv.as_mv.row = (this_mv.as_mv.row - 8) | 4;
756 thismse = vfp->svf_halfpix_hv(y - y_stride, y_stride, z, b->src_stride, &sse);
759 this_mv.as_mv.col = (this_mv.as_mv.col - 8) | 4;
760 this_mv.as_mv.row += 4;
761 thismse = vfp->svf_halfpix_hv(y - 1, y_stride, z, b->src_stride, &sse);
765 this_mv.as_mv.col += 4;
766 this_mv.as_mv.row += 4;
767 thismse = vfp->svf_halfpix_hv(y, y_stride, z, b->src_stride, &sse);
771 diag = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
777 *distortion = thismse;
784 #define CHECK_BOUNDS(range) \
787 all_in &= ((br-range) >= x->mv_row_min);\
788 all_in &= ((br+range) <= x->mv_row_max);\
789 all_in &= ((bc-range) >= x->mv_col_min);\
790 all_in &= ((bc+range) <= x->mv_col_max);\
793 #define CHECK_POINT \
795 if (this_mv.as_mv.col < x->mv_col_min) continue;\
796 if (this_mv.as_mv.col > x->mv_col_max) continue;\
797 if (this_mv.as_mv.row < x->mv_row_min) continue;\
798 if (this_mv.as_mv.row > x->mv_row_max) continue;\
801 #define CHECK_BETTER \
803 if (thissad < bestsad)\
805 thissad += mvsad_err_cost(&this_mv, &fcenter_mv, mvsadcost, sad_per_bit);\
806 if (thissad < bestsad)\
814 static const MV next_chkpts[6][3] =
816 {{ -2, 0}, { -1, -2}, {1, -2}},
817 {{ -1, -2}, {1, -2}, {2, 0}},
818 {{1, -2}, {2, 0}, {1, 2}},
819 {{2, 0}, {1, 2}, { -1, 2}},
820 {{1, 2}, { -1, 2}, { -2, 0}},
821 {{ -1, 2}, { -2, 0}, { -1, -2}}
833 const vp8_variance_fn_ptr_t *vfp,
839 MV hex[6] = { { -1, -2}, {1, -2}, {2, 0}, {1, 2}, { -1, 2}, { -2, 0} } ;
840 MV neighbors[4] = {{0, -1}, { -1, 0}, {1, 0}, {0, 1}} ;
843 unsigned char *what = (*(b->base_src) + b->src);
844 int what_stride = b->src_stride;
845 int in_what_stride = d->pre_stride;
848 unsigned int bestsad = 0x7fffffff;
849 unsigned int thissad;
850 unsigned char *base_offset;
851 unsigned char *this_offset;
859 fcenter_mv.as_mv.row = center_mv->as_mv.row >> 3;
860 fcenter_mv.as_mv.col = center_mv->as_mv.col >> 3;
862 // adjust ref_mv to make sure it is within MV range
863 vp8_clamp_mv(ref_mv, x->mv_col_min, x->mv_col_max, x->mv_row_min, x->mv_row_max);
864 br = ref_mv->as_mv.row;
865 bc = ref_mv->as_mv.col;
867 // Work out the start point for the search
868 base_offset = (unsigned char *)(*(d->base_pre) + d->pre);
869 this_offset = base_offset + (br * (d->pre_stride)) + bc;
870 this_mv.as_mv.row = br;
871 this_mv.as_mv.col = bc;
872 bestsad = vfp->sdf( what, what_stride, this_offset,
873 in_what_stride, 0x7fffffff)
874 + mvsad_err_cost(&this_mv, &fcenter_mv, mvsadcost, sad_per_bit);
876 #if CONFIG_MULTI_RES_ENCODING
877 /* Lower search range based on prediction info */
878 if (search_param >= 6) goto cal_neighbors;
879 else if (search_param >= 5) hex_range = 4;
880 else if (search_param >= 4) hex_range = 6;
881 else if (search_param >= 3) hex_range = 15;
882 else if (search_param >= 2) hex_range = 31;
883 else if (search_param >= 1) hex_range = 63;
894 for (i = 0; i < 6; i++)
896 this_mv.as_mv.row = br + hex[i].row;
897 this_mv.as_mv.col = bc + hex[i].col;
898 this_offset = base_offset + (this_mv.as_mv.row * in_what_stride) + this_mv.as_mv.col;
899 thissad=vfp->sdf( what, what_stride, this_offset, in_what_stride, bestsad);
904 for (i = 0; i < 6; i++)
906 this_mv.as_mv.row = br + hex[i].row;
907 this_mv.as_mv.col = bc + hex[i].col;
909 this_offset = base_offset + (this_mv.as_mv.row * in_what_stride) + this_mv.as_mv.col;
910 thissad=vfp->sdf( what, what_stride, this_offset, in_what_stride, bestsad);
919 br += hex[best_site].row;
920 bc += hex[best_site].col;
924 for (j = 1; j < hex_range; j++)
931 for (i = 0; i < 3; i++)
933 this_mv.as_mv.row = br + next_chkpts[k][i].row;
934 this_mv.as_mv.col = bc + next_chkpts[k][i].col;
935 this_offset = base_offset + (this_mv.as_mv.row * (in_what_stride)) + this_mv.as_mv.col;
936 thissad = vfp->sdf( what, what_stride, this_offset, in_what_stride, bestsad);
941 for (i = 0; i < 3; i++)
943 this_mv.as_mv.row = br + next_chkpts[k][i].row;
944 this_mv.as_mv.col = bc + next_chkpts[k][i].col;
946 this_offset = base_offset + (this_mv.as_mv.row * (in_what_stride)) + this_mv.as_mv.col;
947 thissad = vfp->sdf( what, what_stride, this_offset, in_what_stride, bestsad);
956 br += next_chkpts[k][best_site].row;
957 bc += next_chkpts[k][best_site].col;
959 if (k >= 12) k -= 12;
960 else if (k >= 6) k -= 6;
964 // check 4 1-away neighbors
966 for (j = 0; j < dia_range; j++)
973 for (i = 0; i < 4; i++)
975 this_mv.as_mv.row = br + neighbors[i].row;
976 this_mv.as_mv.col = bc + neighbors[i].col;
977 this_offset = base_offset + (this_mv.as_mv.row * (in_what_stride)) + this_mv.as_mv.col;
978 thissad = vfp->sdf( what, what_stride, this_offset, in_what_stride, bestsad);
983 for (i = 0; i < 4; i++)
985 this_mv.as_mv.row = br + neighbors[i].row;
986 this_mv.as_mv.col = bc + neighbors[i].col;
988 this_offset = base_offset + (this_mv.as_mv.row * (in_what_stride)) + this_mv.as_mv.col;
989 thissad = vfp->sdf( what, what_stride, this_offset, in_what_stride, bestsad);
998 br += neighbors[best_site].row;
999 bc += neighbors[best_site].col;
1003 best_mv->as_mv.row = br;
1004 best_mv->as_mv.col = bc;
1012 int vp8_diamond_search_sad
1022 vp8_variance_fn_ptr_t *fn_ptr,
1029 unsigned char *what = (*(b->base_src) + b->src);
1030 int what_stride = b->src_stride;
1031 unsigned char *in_what;
1032 int in_what_stride = d->pre_stride;
1033 unsigned char *best_address;
1038 int bestsad = INT_MAX;
1044 int this_row_offset;
1045 int this_col_offset;
1048 unsigned char *check_here;
1051 int *mvsadcost[2] = {x->mvsadcost[0], x->mvsadcost[1]};
1053 fcenter_mv.as_mv.row = center_mv->as_mv.row >> 3;
1054 fcenter_mv.as_mv.col = center_mv->as_mv.col >> 3;
1056 vp8_clamp_mv(ref_mv, x->mv_col_min, x->mv_col_max, x->mv_row_min, x->mv_row_max);
1057 ref_row = ref_mv->as_mv.row;
1058 ref_col = ref_mv->as_mv.col;
1060 best_mv->as_mv.row = ref_row;
1061 best_mv->as_mv.col = ref_col;
1063 // Work out the start point for the search
1064 in_what = (unsigned char *)(*(d->base_pre) + d->pre + (ref_row * (d->pre_stride)) + ref_col);
1065 best_address = in_what;
1067 // Check the starting position
1068 bestsad = fn_ptr->sdf(what, what_stride, in_what,
1069 in_what_stride, 0x7fffffff)
1070 + mvsad_err_cost(best_mv, &fcenter_mv, mvsadcost, sad_per_bit);
1072 // search_param determines the length of the initial step and hence the number of iterations
1073 // 0 = initial step (MAX_FIRST_STEP) pel : 1 = (MAX_FIRST_STEP/2) pel, 2 = (MAX_FIRST_STEP/4) pel... etc.
1074 ss = &x->ss[search_param * x->searches_per_step];
1075 tot_steps = (x->ss_count / x->searches_per_step) - search_param;
1079 for (step = 0; step < tot_steps ; step++)
1081 for (j = 0 ; j < x->searches_per_step ; j++)
1083 // Trap illegal vectors
1084 this_row_offset = best_mv->as_mv.row + ss[i].mv.row;
1085 this_col_offset = best_mv->as_mv.col + ss[i].mv.col;
1087 if ((this_col_offset > x->mv_col_min) && (this_col_offset < x->mv_col_max) &&
1088 (this_row_offset > x->mv_row_min) && (this_row_offset < x->mv_row_max))
1091 check_here = ss[i].offset + best_address;
1092 thissad = fn_ptr->sdf(what, what_stride, check_here , in_what_stride, bestsad);
1094 if (thissad < bestsad)
1096 this_mv.as_mv.row = this_row_offset;
1097 this_mv.as_mv.col = this_col_offset;
1098 thissad += mvsad_err_cost(&this_mv, &fcenter_mv,
1099 mvsadcost, sad_per_bit);
1101 if (thissad < bestsad)
1112 if (best_site != last_site)
1114 best_mv->as_mv.row += ss[best_site].mv.row;
1115 best_mv->as_mv.col += ss[best_site].mv.col;
1116 best_address += ss[best_site].offset;
1117 last_site = best_site;
1119 else if (best_address == in_what)
1123 this_mv.as_mv.row = best_mv->as_mv.row << 3;
1124 this_mv.as_mv.col = best_mv->as_mv.col << 3;
1126 if (bestsad == INT_MAX)
1129 return fn_ptr->vf(what, what_stride, best_address, in_what_stride, (unsigned int *)(&thissad))
1130 + mv_err_cost(&this_mv, center_mv, mvcost, x->errorperbit);
1133 int vp8_diamond_search_sadx4
1143 vp8_variance_fn_ptr_t *fn_ptr,
1150 unsigned char *what = (*(b->base_src) + b->src);
1151 int what_stride = b->src_stride;
1152 unsigned char *in_what;
1153 int in_what_stride = d->pre_stride;
1154 unsigned char *best_address;
1159 unsigned int bestsad = UINT_MAX;
1165 int this_row_offset;
1166 int this_col_offset;
1169 unsigned char *check_here;
1170 unsigned int thissad;
1172 int *mvsadcost[2] = {x->mvsadcost[0], x->mvsadcost[1]};
1174 fcenter_mv.as_mv.row = center_mv->as_mv.row >> 3;
1175 fcenter_mv.as_mv.col = center_mv->as_mv.col >> 3;
1177 vp8_clamp_mv(ref_mv, x->mv_col_min, x->mv_col_max, x->mv_row_min, x->mv_row_max);
1178 ref_row = ref_mv->as_mv.row;
1179 ref_col = ref_mv->as_mv.col;
1181 best_mv->as_mv.row = ref_row;
1182 best_mv->as_mv.col = ref_col;
1184 // Work out the start point for the search
1185 in_what = (unsigned char *)(*(d->base_pre) + d->pre + (ref_row * (d->pre_stride)) + ref_col);
1186 best_address = in_what;
1188 // Check the starting position
1189 bestsad = fn_ptr->sdf(what, what_stride,
1190 in_what, in_what_stride, 0x7fffffff)
1191 + mvsad_err_cost(best_mv, &fcenter_mv, mvsadcost, sad_per_bit);
1193 // search_param determines the length of the initial step and hence the number of iterations
1194 // 0 = initial step (MAX_FIRST_STEP) pel : 1 = (MAX_FIRST_STEP/2) pel, 2 = (MAX_FIRST_STEP/4) pel... etc.
1195 ss = &x->ss[search_param * x->searches_per_step];
1196 tot_steps = (x->ss_count / x->searches_per_step) - search_param;
1200 for (step = 0; step < tot_steps ; step++)
1204 // To know if all neighbor points are within the bounds, 4 bounds checking are enough instead of
1205 // checking 4 bounds for each points.
1206 all_in &= ((best_mv->as_mv.row + ss[i].mv.row)> x->mv_row_min);
1207 all_in &= ((best_mv->as_mv.row + ss[i+1].mv.row) < x->mv_row_max);
1208 all_in &= ((best_mv->as_mv.col + ss[i+2].mv.col) > x->mv_col_min);
1209 all_in &= ((best_mv->as_mv.col + ss[i+3].mv.col) < x->mv_col_max);
1213 unsigned int sad_array[4];
1215 for (j = 0 ; j < x->searches_per_step ; j += 4)
1217 unsigned char *block_offset[4];
1219 for (t = 0; t < 4; t++)
1220 block_offset[t] = ss[i+t].offset + best_address;
1222 fn_ptr->sdx4df(what, what_stride, block_offset, in_what_stride, sad_array);
1224 for (t = 0; t < 4; t++, i++)
1226 if (sad_array[t] < bestsad)
1228 this_mv.as_mv.row = best_mv->as_mv.row + ss[i].mv.row;
1229 this_mv.as_mv.col = best_mv->as_mv.col + ss[i].mv.col;
1230 sad_array[t] += mvsad_err_cost(&this_mv, &fcenter_mv,
1231 mvsadcost, sad_per_bit);
1233 if (sad_array[t] < bestsad)
1235 bestsad = sad_array[t];
1244 for (j = 0 ; j < x->searches_per_step ; j++)
1246 // Trap illegal vectors
1247 this_row_offset = best_mv->as_mv.row + ss[i].mv.row;
1248 this_col_offset = best_mv->as_mv.col + ss[i].mv.col;
1250 if ((this_col_offset > x->mv_col_min) && (this_col_offset < x->mv_col_max) &&
1251 (this_row_offset > x->mv_row_min) && (this_row_offset < x->mv_row_max))
1253 check_here = ss[i].offset + best_address;
1254 thissad = fn_ptr->sdf(what, what_stride, check_here , in_what_stride, bestsad);
1256 if (thissad < bestsad)
1258 this_mv.as_mv.row = this_row_offset;
1259 this_mv.as_mv.col = this_col_offset;
1260 thissad += mvsad_err_cost(&this_mv, &fcenter_mv,
1261 mvsadcost, sad_per_bit);
1263 if (thissad < bestsad)
1274 if (best_site != last_site)
1276 best_mv->as_mv.row += ss[best_site].mv.row;
1277 best_mv->as_mv.col += ss[best_site].mv.col;
1278 best_address += ss[best_site].offset;
1279 last_site = best_site;
1281 else if (best_address == in_what)
1285 this_mv.as_mv.row = best_mv->as_mv.row << 3;
1286 this_mv.as_mv.col = best_mv->as_mv.col << 3;
1288 if (bestsad == INT_MAX)
1291 return fn_ptr->vf(what, what_stride, best_address, in_what_stride, (unsigned int *)(&thissad))
1292 + mv_err_cost(&this_mv, center_mv, mvcost, x->errorperbit);
1295 int vp8_full_search_sad(MACROBLOCK *x, BLOCK *b, BLOCKD *d, int_mv *ref_mv,
1296 int sad_per_bit, int distance,
1297 vp8_variance_fn_ptr_t *fn_ptr, int *mvcost[2],
1300 unsigned char *what = (*(b->base_src) + b->src);
1301 int what_stride = b->src_stride;
1302 unsigned char *in_what;
1303 int in_what_stride = d->pre_stride;
1304 int mv_stride = d->pre_stride;
1305 unsigned char *bestaddress;
1306 int_mv *best_mv = &d->bmi.mv;
1308 int bestsad = INT_MAX;
1311 unsigned char *check_here;
1314 int ref_row = ref_mv->as_mv.row;
1315 int ref_col = ref_mv->as_mv.col;
1317 int row_min = ref_row - distance;
1318 int row_max = ref_row + distance;
1319 int col_min = ref_col - distance;
1320 int col_max = ref_col + distance;
1322 int *mvsadcost[2] = {x->mvsadcost[0], x->mvsadcost[1]};
1324 fcenter_mv.as_mv.row = center_mv->as_mv.row >> 3;
1325 fcenter_mv.as_mv.col = center_mv->as_mv.col >> 3;
1327 // Work out the mid point for the search
1328 in_what = *(d->base_pre) + d->pre;
1329 bestaddress = in_what + (ref_row * d->pre_stride) + ref_col;
1331 best_mv->as_mv.row = ref_row;
1332 best_mv->as_mv.col = ref_col;
1334 // Baseline value at the centre
1335 bestsad = fn_ptr->sdf(what, what_stride, bestaddress,
1336 in_what_stride, 0x7fffffff)
1337 + mvsad_err_cost(best_mv, &fcenter_mv, mvsadcost, sad_per_bit);
1339 // Apply further limits to prevent us looking using vectors that stretch beyiond the UMV border
1340 if (col_min < x->mv_col_min)
1341 col_min = x->mv_col_min;
1343 if (col_max > x->mv_col_max)
1344 col_max = x->mv_col_max;
1346 if (row_min < x->mv_row_min)
1347 row_min = x->mv_row_min;
1349 if (row_max > x->mv_row_max)
1350 row_max = x->mv_row_max;
1352 for (r = row_min; r < row_max ; r++)
1354 this_mv.as_mv.row = r;
1355 check_here = r * mv_stride + in_what + col_min;
1357 for (c = col_min; c < col_max; c++)
1359 thissad = fn_ptr->sdf(what, what_stride, check_here , in_what_stride, bestsad);
1361 this_mv.as_mv.col = c;
1362 thissad += mvsad_err_cost(&this_mv, &fcenter_mv,
1363 mvsadcost, sad_per_bit);
1365 if (thissad < bestsad)
1368 best_mv->as_mv.row = r;
1369 best_mv->as_mv.col = c;
1370 bestaddress = check_here;
1377 this_mv.as_mv.row = best_mv->as_mv.row << 3;
1378 this_mv.as_mv.col = best_mv->as_mv.col << 3;
1380 if (bestsad < INT_MAX)
1381 return fn_ptr->vf(what, what_stride, bestaddress, in_what_stride, (unsigned int *)(&thissad))
1382 + mv_err_cost(&this_mv, center_mv, mvcost, x->errorperbit);
1387 int vp8_full_search_sadx3(MACROBLOCK *x, BLOCK *b, BLOCKD *d, int_mv *ref_mv,
1388 int sad_per_bit, int distance,
1389 vp8_variance_fn_ptr_t *fn_ptr, int *mvcost[2],
1392 unsigned char *what = (*(b->base_src) + b->src);
1393 int what_stride = b->src_stride;
1394 unsigned char *in_what;
1395 int in_what_stride = d->pre_stride;
1396 int mv_stride = d->pre_stride;
1397 unsigned char *bestaddress;
1398 int_mv *best_mv = &d->bmi.mv;
1400 unsigned int bestsad = UINT_MAX;
1403 unsigned char *check_here;
1404 unsigned int thissad;
1406 int ref_row = ref_mv->as_mv.row;
1407 int ref_col = ref_mv->as_mv.col;
1409 int row_min = ref_row - distance;
1410 int row_max = ref_row + distance;
1411 int col_min = ref_col - distance;
1412 int col_max = ref_col + distance;
1414 unsigned int sad_array[3];
1416 int *mvsadcost[2] = {x->mvsadcost[0], x->mvsadcost[1]};
1418 fcenter_mv.as_mv.row = center_mv->as_mv.row >> 3;
1419 fcenter_mv.as_mv.col = center_mv->as_mv.col >> 3;
1421 // Work out the mid point for the search
1422 in_what = *(d->base_pre) + d->pre;
1423 bestaddress = in_what + (ref_row * d->pre_stride) + ref_col;
1425 best_mv->as_mv.row = ref_row;
1426 best_mv->as_mv.col = ref_col;
1428 // Baseline value at the centre
1429 bestsad = fn_ptr->sdf(what, what_stride,
1430 bestaddress, in_what_stride, 0x7fffffff)
1431 + mvsad_err_cost(best_mv, &fcenter_mv, mvsadcost, sad_per_bit);
1433 // Apply further limits to prevent us looking using vectors that stretch beyiond the UMV border
1434 if (col_min < x->mv_col_min)
1435 col_min = x->mv_col_min;
1437 if (col_max > x->mv_col_max)
1438 col_max = x->mv_col_max;
1440 if (row_min < x->mv_row_min)
1441 row_min = x->mv_row_min;
1443 if (row_max > x->mv_row_max)
1444 row_max = x->mv_row_max;
1446 for (r = row_min; r < row_max ; r++)
1448 this_mv.as_mv.row = r;
1449 check_here = r * mv_stride + in_what + col_min;
1452 while ((c + 2) < col_max)
1456 fn_ptr->sdx3f(what, what_stride, check_here , in_what_stride, sad_array);
1458 for (i = 0; i < 3; i++)
1460 thissad = sad_array[i];
1462 if (thissad < bestsad)
1464 this_mv.as_mv.col = c;
1465 thissad += mvsad_err_cost(&this_mv, &fcenter_mv,
1466 mvsadcost, sad_per_bit);
1468 if (thissad < bestsad)
1471 best_mv->as_mv.row = r;
1472 best_mv->as_mv.col = c;
1473 bestaddress = check_here;
1484 thissad = fn_ptr->sdf(what, what_stride, check_here , in_what_stride, bestsad);
1486 if (thissad < bestsad)
1488 this_mv.as_mv.col = c;
1489 thissad += mvsad_err_cost(&this_mv, &fcenter_mv,
1490 mvsadcost, sad_per_bit);
1492 if (thissad < bestsad)
1495 best_mv->as_mv.row = r;
1496 best_mv->as_mv.col = c;
1497 bestaddress = check_here;
1507 this_mv.as_mv.row = best_mv->as_mv.row << 3;
1508 this_mv.as_mv.col = best_mv->as_mv.col << 3;
1510 if (bestsad < INT_MAX)
1511 return fn_ptr->vf(what, what_stride, bestaddress, in_what_stride, (unsigned int *)(&thissad))
1512 + mv_err_cost(&this_mv, center_mv, mvcost, x->errorperbit);
1517 int vp8_full_search_sadx8(MACROBLOCK *x, BLOCK *b, BLOCKD *d, int_mv *ref_mv,
1518 int sad_per_bit, int distance,
1519 vp8_variance_fn_ptr_t *fn_ptr, int *mvcost[2],
1522 unsigned char *what = (*(b->base_src) + b->src);
1523 int what_stride = b->src_stride;
1524 unsigned char *in_what;
1525 int in_what_stride = d->pre_stride;
1526 int mv_stride = d->pre_stride;
1527 unsigned char *bestaddress;
1528 int_mv *best_mv = &d->bmi.mv;
1530 unsigned int bestsad = UINT_MAX;
1533 unsigned char *check_here;
1534 unsigned int thissad;
1536 int ref_row = ref_mv->as_mv.row;
1537 int ref_col = ref_mv->as_mv.col;
1539 int row_min = ref_row - distance;
1540 int row_max = ref_row + distance;
1541 int col_min = ref_col - distance;
1542 int col_max = ref_col + distance;
1544 DECLARE_ALIGNED_ARRAY(16, unsigned short, sad_array8, 8);
1545 unsigned int sad_array[3];
1547 int *mvsadcost[2] = {x->mvsadcost[0], x->mvsadcost[1]};
1549 fcenter_mv.as_mv.row = center_mv->as_mv.row >> 3;
1550 fcenter_mv.as_mv.col = center_mv->as_mv.col >> 3;
1552 // Work out the mid point for the search
1553 in_what = *(d->base_pre) + d->pre;
1554 bestaddress = in_what + (ref_row * d->pre_stride) + ref_col;
1556 best_mv->as_mv.row = ref_row;
1557 best_mv->as_mv.col = ref_col;
1559 // Baseline value at the centre
1560 bestsad = fn_ptr->sdf(what, what_stride,
1561 bestaddress, in_what_stride, 0x7fffffff)
1562 + mvsad_err_cost(best_mv, &fcenter_mv, mvsadcost, sad_per_bit);
1564 // Apply further limits to prevent us looking using vectors that stretch beyiond the UMV border
1565 if (col_min < x->mv_col_min)
1566 col_min = x->mv_col_min;
1568 if (col_max > x->mv_col_max)
1569 col_max = x->mv_col_max;
1571 if (row_min < x->mv_row_min)
1572 row_min = x->mv_row_min;
1574 if (row_max > x->mv_row_max)
1575 row_max = x->mv_row_max;
1577 for (r = row_min; r < row_max ; r++)
1579 this_mv.as_mv.row = r;
1580 check_here = r * mv_stride + in_what + col_min;
1583 while ((c + 7) < col_max)
1587 fn_ptr->sdx8f(what, what_stride, check_here , in_what_stride, sad_array8);
1589 for (i = 0; i < 8; i++)
1591 thissad = (unsigned int)sad_array8[i];
1593 if (thissad < bestsad)
1595 this_mv.as_mv.col = c;
1596 thissad += mvsad_err_cost(&this_mv, &fcenter_mv,
1597 mvsadcost, sad_per_bit);
1599 if (thissad < bestsad)
1602 best_mv->as_mv.row = r;
1603 best_mv->as_mv.col = c;
1604 bestaddress = check_here;
1613 while ((c + 2) < col_max)
1617 fn_ptr->sdx3f(what, what_stride, check_here , in_what_stride, sad_array);
1619 for (i = 0; i < 3; i++)
1621 thissad = sad_array[i];
1623 if (thissad < bestsad)
1625 this_mv.as_mv.col = c;
1626 thissad += mvsad_err_cost(&this_mv, &fcenter_mv,
1627 mvsadcost, sad_per_bit);
1629 if (thissad < bestsad)
1632 best_mv->as_mv.row = r;
1633 best_mv->as_mv.col = c;
1634 bestaddress = check_here;
1645 thissad = fn_ptr->sdf(what, what_stride, check_here , in_what_stride, bestsad);
1647 if (thissad < bestsad)
1649 this_mv.as_mv.col = c;
1650 thissad += mvsad_err_cost(&this_mv, &fcenter_mv,
1651 mvsadcost, sad_per_bit);
1653 if (thissad < bestsad)
1656 best_mv->as_mv.row = r;
1657 best_mv->as_mv.col = c;
1658 bestaddress = check_here;
1667 this_mv.as_mv.row = best_mv->as_mv.row << 3;
1668 this_mv.as_mv.col = best_mv->as_mv.col << 3;
1670 if (bestsad < INT_MAX)
1671 return fn_ptr->vf(what, what_stride, bestaddress, in_what_stride, (unsigned int *)(&thissad))
1672 + mv_err_cost(&this_mv, center_mv, mvcost, x->errorperbit);
1677 int vp8_refining_search_sad(MACROBLOCK *x, BLOCK *b, BLOCKD *d, int_mv *ref_mv,
1678 int error_per_bit, int search_range,
1679 vp8_variance_fn_ptr_t *fn_ptr, int *mvcost[2],
1682 MV neighbors[4] = {{-1, 0}, {0, -1}, {0, 1}, {1, 0}};
1684 short this_row_offset, this_col_offset;
1686 int what_stride = b->src_stride;
1687 int in_what_stride = d->pre_stride;
1688 unsigned char *what = (*(b->base_src) + b->src);
1689 unsigned char *best_address = (unsigned char *)(*(d->base_pre) + d->pre +
1690 (ref_mv->as_mv.row * (d->pre_stride)) + ref_mv->as_mv.col);
1691 unsigned char *check_here;
1692 unsigned int thissad;
1694 unsigned int bestsad = INT_MAX;
1696 int *mvsadcost[2] = {x->mvsadcost[0], x->mvsadcost[1]};
1699 fcenter_mv.as_mv.row = center_mv->as_mv.row >> 3;
1700 fcenter_mv.as_mv.col = center_mv->as_mv.col >> 3;
1702 bestsad = fn_ptr->sdf(what, what_stride, best_address, in_what_stride, 0x7fffffff) + mvsad_err_cost(ref_mv, &fcenter_mv, mvsadcost, error_per_bit);
1704 for (i=0; i<search_range; i++)
1708 for (j = 0 ; j < 4 ; j++)
1710 this_row_offset = ref_mv->as_mv.row + neighbors[j].row;
1711 this_col_offset = ref_mv->as_mv.col + neighbors[j].col;
1713 if ((this_col_offset > x->mv_col_min) && (this_col_offset < x->mv_col_max) &&
1714 (this_row_offset > x->mv_row_min) && (this_row_offset < x->mv_row_max))
1716 check_here = (neighbors[j].row)*in_what_stride + neighbors[j].col + best_address;
1717 thissad = fn_ptr->sdf(what, what_stride, check_here , in_what_stride, bestsad);
1719 if (thissad < bestsad)
1721 this_mv.as_mv.row = this_row_offset;
1722 this_mv.as_mv.col = this_col_offset;
1723 thissad += mvsad_err_cost(&this_mv, &fcenter_mv, mvsadcost, error_per_bit);
1725 if (thissad < bestsad)
1734 if (best_site == -1)
1738 ref_mv->as_mv.row += neighbors[best_site].row;
1739 ref_mv->as_mv.col += neighbors[best_site].col;
1740 best_address += (neighbors[best_site].row)*in_what_stride + neighbors[best_site].col;
1744 this_mv.as_mv.row = ref_mv->as_mv.row << 3;
1745 this_mv.as_mv.col = ref_mv->as_mv.col << 3;
1747 if (bestsad < INT_MAX)
1748 return fn_ptr->vf(what, what_stride, best_address, in_what_stride, (unsigned int *)(&thissad))
1749 + mv_err_cost(&this_mv, center_mv, mvcost, x->errorperbit);
1754 int vp8_refining_search_sadx4(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
1755 int_mv *ref_mv, int error_per_bit,
1756 int search_range, vp8_variance_fn_ptr_t *fn_ptr,
1757 int *mvcost[2], int_mv *center_mv)
1759 MV neighbors[4] = {{-1, 0}, {0, -1}, {0, 1}, {1, 0}};
1761 short this_row_offset, this_col_offset;
1763 int what_stride = b->src_stride;
1764 int in_what_stride = d->pre_stride;
1765 unsigned char *what = (*(b->base_src) + b->src);
1766 unsigned char *best_address = (unsigned char *)(*(d->base_pre) + d->pre +
1767 (ref_mv->as_mv.row * (d->pre_stride)) + ref_mv->as_mv.col);
1768 unsigned char *check_here;
1769 unsigned int thissad;
1771 unsigned int bestsad = INT_MAX;
1773 int *mvsadcost[2] = {x->mvsadcost[0], x->mvsadcost[1]};
1776 fcenter_mv.as_mv.row = center_mv->as_mv.row >> 3;
1777 fcenter_mv.as_mv.col = center_mv->as_mv.col >> 3;
1779 bestsad = fn_ptr->sdf(what, what_stride, best_address, in_what_stride, 0x7fffffff) + mvsad_err_cost(ref_mv, &fcenter_mv, mvsadcost, error_per_bit);
1781 for (i=0; i<search_range; i++)
1786 all_in &= ((ref_mv->as_mv.row - 1) > x->mv_row_min);
1787 all_in &= ((ref_mv->as_mv.row + 1) < x->mv_row_max);
1788 all_in &= ((ref_mv->as_mv.col - 1) > x->mv_col_min);
1789 all_in &= ((ref_mv->as_mv.col + 1) < x->mv_col_max);
1793 unsigned int sad_array[4];
1794 unsigned char *block_offset[4];
1795 block_offset[0] = best_address - in_what_stride;
1796 block_offset[1] = best_address - 1;
1797 block_offset[2] = best_address + 1;
1798 block_offset[3] = best_address + in_what_stride;
1800 fn_ptr->sdx4df(what, what_stride, block_offset, in_what_stride, sad_array);
1802 for (j = 0; j < 4; j++)
1804 if (sad_array[j] < bestsad)
1806 this_mv.as_mv.row = ref_mv->as_mv.row + neighbors[j].row;
1807 this_mv.as_mv.col = ref_mv->as_mv.col + neighbors[j].col;
1808 sad_array[j] += mvsad_err_cost(&this_mv, &fcenter_mv, mvsadcost, error_per_bit);
1810 if (sad_array[j] < bestsad)
1812 bestsad = sad_array[j];
1820 for (j = 0 ; j < 4 ; j++)
1822 this_row_offset = ref_mv->as_mv.row + neighbors[j].row;
1823 this_col_offset = ref_mv->as_mv.col + neighbors[j].col;
1825 if ((this_col_offset > x->mv_col_min) && (this_col_offset < x->mv_col_max) &&
1826 (this_row_offset > x->mv_row_min) && (this_row_offset < x->mv_row_max))
1828 check_here = (neighbors[j].row)*in_what_stride + neighbors[j].col + best_address;
1829 thissad = fn_ptr->sdf(what, what_stride, check_here , in_what_stride, bestsad);
1831 if (thissad < bestsad)
1833 this_mv.as_mv.row = this_row_offset;
1834 this_mv.as_mv.col = this_col_offset;
1835 thissad += mvsad_err_cost(&this_mv, &fcenter_mv, mvsadcost, error_per_bit);
1837 if (thissad < bestsad)
1847 if (best_site == -1)
1851 ref_mv->as_mv.row += neighbors[best_site].row;
1852 ref_mv->as_mv.col += neighbors[best_site].col;
1853 best_address += (neighbors[best_site].row)*in_what_stride + neighbors[best_site].col;
1857 this_mv.as_mv.row = ref_mv->as_mv.row << 3;
1858 this_mv.as_mv.col = ref_mv->as_mv.col << 3;
1860 if (bestsad < INT_MAX)
1861 return fn_ptr->vf(what, what_stride, best_address, in_what_stride, (unsigned int *)(&thissad))
1862 + mv_err_cost(&this_mv, center_mv, mvcost, x->errorperbit);
1867 #ifdef ENTROPY_STATS
1868 void print_mode_context(void)
1870 FILE *f = fopen("modecont.c", "w");
1873 fprintf(f, "#include \"entropy.h\"\n");
1874 fprintf(f, "const int vp8_mode_contexts[6][4] =\n");
1877 for (j = 0; j < 6; j++)
1879 fprintf(f, " { // %d \n", j);
1882 for (i = 0; i < 4; i++)
1886 int count; // = mv_ref_ct[j][i][0]+mv_ref_ct[j][i][1];
1889 count = mv_mode_cts[i][0] + mv_mode_cts[i][1];
1892 overal_prob = 256 * mv_mode_cts[i][0] / count;
1896 if (overal_prob == 0)
1900 count = mv_ref_ct[j][i][0] + mv_ref_ct[j][i][1];
1903 this_prob = 256 * mv_ref_ct[j][i][0] / count;
1910 fprintf(f, "%5d, ", this_prob);
1911 //fprintf(f,"%5d, %5d, %8d,", this_prob, overal_prob, (this_prob << 10)/overal_prob);
1912 //fprintf(f,"%8d, ", (this_prob << 10)/overal_prob);
1915 fprintf(f, " },\n");
1922 /* MV ref count ENTROPY_STATS stats code */
1923 #ifdef ENTROPY_STATS
1924 void init_mv_ref_counts()
1926 vpx_memset(mv_ref_ct, 0, sizeof(mv_ref_ct));
1927 vpx_memset(mv_mode_cts, 0, sizeof(mv_mode_cts));
1930 void accum_mv_refs(MB_PREDICTION_MODE m, const int ct[4])
1934 ++mv_ref_ct [ct[0]] [0] [0];
1935 ++mv_mode_cts[0][0];
1939 ++mv_ref_ct [ct[0]] [0] [1];
1940 ++mv_mode_cts[0][1];
1944 ++mv_ref_ct [ct[1]] [1] [0];
1945 ++mv_mode_cts[1][0];
1949 ++mv_ref_ct [ct[1]] [1] [1];
1950 ++mv_mode_cts[1][1];
1954 ++mv_ref_ct [ct[2]] [2] [0];
1955 ++mv_mode_cts[2][0];
1959 ++mv_ref_ct [ct[2]] [2] [1];
1960 ++mv_mode_cts[2][1];
1964 ++mv_ref_ct [ct[3]] [3] [0];
1965 ++mv_mode_cts[3][0];
1969 ++mv_ref_ct [ct[3]] [3] [1];
1970 ++mv_mode_cts[3][1];
1977 #endif/* END MV ref count ENTROPY_STATS stats code */