2 * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
13 #include "vpx_mem/vpx_mem.h"
20 static int mv_ref_ct [31] [4] [2];
21 static int mv_mode_cts [4] [2];
24 int vp8_mv_bit_cost(int_mv *mv, int_mv *ref, int *mvcost[2], int Weight)
26 // MV costing is based on the distribution of vectors in the previous frame and as such will tend to
27 // over state the cost of vectors. In addition coding a new vector can have a knock on effect on the
28 // cost of subsequent vectors and the quality of prediction from NEAR and NEAREST for subsequent blocks.
29 // The "Weight" parameter allows, to a limited extent, for some account to be taken of these factors.
30 return ((mvcost[0][(mv->as_mv.row - ref->as_mv.row) >> 1] + mvcost[1][(mv->as_mv.col - ref->as_mv.col) >> 1]) * Weight) >> 7;
33 static int mv_err_cost(int_mv *mv, int_mv *ref, int *mvcost[2], int error_per_bit)
35 return ((mvcost[0][(mv->as_mv.row - ref->as_mv.row) >> 1] +
36 mvcost[1][(mv->as_mv.col - ref->as_mv.col) >> 1])
37 * error_per_bit + 128) >> 8;
40 static int mvsad_err_cost(int_mv *mv, int_mv *ref, int *mvsadcost[2], int error_per_bit)
42 /* Calculate sad error cost on full pixel basis. */
43 return ((mvsadcost[0][(mv->as_mv.row - ref->as_mv.row)] +
44 mvsadcost[1][(mv->as_mv.col - ref->as_mv.col)])
45 * error_per_bit + 128) >> 8;
48 void vp8_init_dsmotion_compensation(MACROBLOCK *x, int stride)
51 int search_site_count = 0;
54 // Generate offsets for 4 search sites per step.
56 x->ss[search_site_count].mv.col = 0;
57 x->ss[search_site_count].mv.row = 0;
58 x->ss[search_site_count].offset = 0;
64 // Compute offsets for search sites.
65 x->ss[search_site_count].mv.col = 0;
66 x->ss[search_site_count].mv.row = -Len;
67 x->ss[search_site_count].offset = -Len * stride;
70 // Compute offsets for search sites.
71 x->ss[search_site_count].mv.col = 0;
72 x->ss[search_site_count].mv.row = Len;
73 x->ss[search_site_count].offset = Len * stride;
76 // Compute offsets for search sites.
77 x->ss[search_site_count].mv.col = -Len;
78 x->ss[search_site_count].mv.row = 0;
79 x->ss[search_site_count].offset = -Len;
82 // Compute offsets for search sites.
83 x->ss[search_site_count].mv.col = Len;
84 x->ss[search_site_count].mv.row = 0;
85 x->ss[search_site_count].offset = Len;
92 x->ss_count = search_site_count;
93 x->searches_per_step = 4;
96 void vp8_init3smotion_compensation(MACROBLOCK *x, int stride)
99 int search_site_count = 0;
101 // Generate offsets for 8 search sites per step.
102 Len = MAX_FIRST_STEP;
103 x->ss[search_site_count].mv.col = 0;
104 x->ss[search_site_count].mv.row = 0;
105 x->ss[search_site_count].offset = 0;
111 // Compute offsets for search sites.
112 x->ss[search_site_count].mv.col = 0;
113 x->ss[search_site_count].mv.row = -Len;
114 x->ss[search_site_count].offset = -Len * stride;
117 // Compute offsets for search sites.
118 x->ss[search_site_count].mv.col = 0;
119 x->ss[search_site_count].mv.row = Len;
120 x->ss[search_site_count].offset = Len * stride;
123 // Compute offsets for search sites.
124 x->ss[search_site_count].mv.col = -Len;
125 x->ss[search_site_count].mv.row = 0;
126 x->ss[search_site_count].offset = -Len;
129 // Compute offsets for search sites.
130 x->ss[search_site_count].mv.col = Len;
131 x->ss[search_site_count].mv.row = 0;
132 x->ss[search_site_count].offset = Len;
135 // Compute offsets for search sites.
136 x->ss[search_site_count].mv.col = -Len;
137 x->ss[search_site_count].mv.row = -Len;
138 x->ss[search_site_count].offset = -Len * stride - Len;
141 // Compute offsets for search sites.
142 x->ss[search_site_count].mv.col = Len;
143 x->ss[search_site_count].mv.row = -Len;
144 x->ss[search_site_count].offset = -Len * stride + Len;
147 // Compute offsets for search sites.
148 x->ss[search_site_count].mv.col = -Len;
149 x->ss[search_site_count].mv.row = Len;
150 x->ss[search_site_count].offset = Len * stride - Len;
153 // Compute offsets for search sites.
154 x->ss[search_site_count].mv.col = Len;
155 x->ss[search_site_count].mv.row = Len;
156 x->ss[search_site_count].offset = Len * stride + Len;
164 x->ss_count = search_site_count;
165 x->searches_per_step = 8;
169 #define MVC(r,c) (((mvcost[0][(r)-rr] + mvcost[1][(c) - rc]) * error_per_bit + 128 )>>8 ) // estimated cost of a motion vector (r,c)
170 #define PRE(r,c) (*(d->base_pre) + d->pre + ((r)>>2) * d->pre_stride + ((c)>>2)) // pointer to predictor base of a motionvector
171 #define SP(x) (((x)&3)<<1) // convert motion vector component to offset for svf calc
172 #define DIST(r,c) vfp->svf( PRE(r,c), d->pre_stride, SP(c),SP(r), z,b->src_stride,&sse) // returns subpixel variance error function.
173 #define IFMVCV(r,c,s,e) if ( c >= minc && c <= maxc && r >= minr && r <= maxr) s else e;
174 #define ERR(r,c) (MVC(r,c)+DIST(r,c)) // returns distortion + motion vector cost
175 #define CHECK_BETTER(v,r,c) IFMVCV(r,c,{thismse = DIST(r,c); if((v = (MVC(r,c)+thismse)) < besterr) { besterr = v; br=r; bc=c; *distortion = thismse; *sse1 = sse; }}, v=INT_MAX;)// checks if (r,c) has better score than previous best
176 #define MIN(x,y) (((x)<(y))?(x):(y))
177 #define MAX(x,y) (((x)>(y))?(x):(y))
179 //#define CHECK_BETTER(v,r,c) if((v = ERR(r,c)) < besterr) { besterr = v; br=r; bc=c; }
181 int vp8_find_best_sub_pixel_step_iteratively(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
182 int_mv *bestmv, int_mv *ref_mv,
184 const vp8_variance_fn_ptr_t *vfp,
185 int *mvcost[2], int *distortion,
188 unsigned char *y = *(d->base_pre) + d->pre + (bestmv->as_mv.row) * d->pre_stride + bestmv->as_mv.col;
189 unsigned char *z = (*(b->base_src) + b->src);
191 int rr = ref_mv->as_mv.row >> 1, rc = ref_mv->as_mv.col >> 1;
192 int br = bestmv->as_mv.row << 2, bc = bestmv->as_mv.col << 2;
193 int tr = br, tc = bc;
194 unsigned int besterr = INT_MAX;
195 unsigned int left, right, up, down, diag;
197 unsigned int whichdir;
198 unsigned int halfiters = 4;
199 unsigned int quarteriters = 4;
202 int minc = MAX(x->mv_col_min << 2, (ref_mv->as_mv.col >> 1) - ((1 << mvlong_width) - 1));
203 int maxc = MIN(x->mv_col_max << 2, (ref_mv->as_mv.col >> 1) + ((1 << mvlong_width) - 1));
204 int minr = MAX(x->mv_row_min << 2, (ref_mv->as_mv.row >> 1) - ((1 << mvlong_width) - 1));
205 int maxr = MIN(x->mv_row_max << 2, (ref_mv->as_mv.row >> 1) + ((1 << mvlong_width) - 1));
208 bestmv->as_mv.row <<= 3;
209 bestmv->as_mv.col <<= 3;
211 // calculate central point error
212 besterr = vfp->vf(y, d->pre_stride, z, b->src_stride, sse1);
213 *distortion = besterr;
214 besterr += mv_err_cost(bestmv, ref_mv, mvcost, error_per_bit);
216 // TODO: Each subsequent iteration checks at least one point in common with the last iteration could be 2 ( if diag selected)
220 CHECK_BETTER(left, tr, tc - 2);
221 CHECK_BETTER(right, tr, tc + 2);
222 CHECK_BETTER(up, tr - 2, tc);
223 CHECK_BETTER(down, tr + 2, tc);
225 whichdir = (left < right ? 0 : 1) + (up < down ? 0 : 2);
230 CHECK_BETTER(diag, tr - 2, tc - 2);
233 CHECK_BETTER(diag, tr - 2, tc + 2);
236 CHECK_BETTER(diag, tr + 2, tc - 2);
239 CHECK_BETTER(diag, tr + 2, tc + 2);
243 // no reason to check the same one again.
244 if (tr == br && tc == bc)
251 // TODO: Each subsequent iteration checks at least one point in common with the last iteration could be 2 ( if diag selected)
253 while (--quarteriters)
255 CHECK_BETTER(left, tr, tc - 1);
256 CHECK_BETTER(right, tr, tc + 1);
257 CHECK_BETTER(up, tr - 1, tc);
258 CHECK_BETTER(down, tr + 1, tc);
260 whichdir = (left < right ? 0 : 1) + (up < down ? 0 : 2);
265 CHECK_BETTER(diag, tr - 1, tc - 1);
268 CHECK_BETTER(diag, tr - 1, tc + 1);
271 CHECK_BETTER(diag, tr + 1, tc - 1);
274 CHECK_BETTER(diag, tr + 1, tc + 1);
278 // no reason to check the same one again.
279 if (tr == br && tc == bc)
286 bestmv->as_mv.row = br << 1;
287 bestmv->as_mv.col = bc << 1;
289 if ((abs(bestmv->as_mv.col - ref_mv->as_mv.col) > MAX_FULL_PEL_VAL) ||
290 (abs(bestmv->as_mv.row - ref_mv->as_mv.row) > MAX_FULL_PEL_VAL))
303 int vp8_find_best_sub_pixel_step(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
304 int_mv *bestmv, int_mv *ref_mv,
306 const vp8_variance_fn_ptr_t *vfp,
307 int *mvcost[2], int *distortion,
310 int bestmse = INT_MAX;
313 unsigned char *y = *(d->base_pre) + d->pre + (bestmv->as_mv.row) * d->pre_stride + bestmv->as_mv.col;
314 unsigned char *z = (*(b->base_src) + b->src);
315 int left, right, up, down, diag;
321 // Trap uncodable vectors
322 if ((abs((bestmv->as_mv.col << 3) - ref_mv->as_mv.col) > MAX_FULL_PEL_VAL)
323 || (abs((bestmv->as_mv.row << 3) - ref_mv->as_mv.row) > MAX_FULL_PEL_VAL))
325 bestmv->as_mv.row <<= 3;
326 bestmv->as_mv.col <<= 3;
327 *distortion = INT_MAX;
332 bestmv->as_mv.row <<= 3;
333 bestmv->as_mv.col <<= 3;
336 // calculate central point error
337 bestmse = vfp->vf(y, d->pre_stride, z, b->src_stride, sse1);
338 *distortion = bestmse;
339 bestmse += mv_err_cost(bestmv, ref_mv, mvcost, error_per_bit);
341 // go left then right and check error
342 this_mv.as_mv.row = startmv.as_mv.row;
343 this_mv.as_mv.col = ((startmv.as_mv.col - 8) | 4);
344 thismse = vfp->svf_halfpix_h(y - 1, d->pre_stride, z, b->src_stride, &sse);
345 left = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
351 *distortion = thismse;
355 this_mv.as_mv.col += 8;
356 thismse = vfp->svf_halfpix_h(y, d->pre_stride, z, b->src_stride, &sse);
357 right = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
363 *distortion = thismse;
367 // go up then down and check error
368 this_mv.as_mv.col = startmv.as_mv.col;
369 this_mv.as_mv.row = ((startmv.as_mv.row - 8) | 4);
370 thismse = vfp->svf_halfpix_v(y - d->pre_stride, d->pre_stride, z, b->src_stride, &sse);
371 up = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
377 *distortion = thismse;
381 this_mv.as_mv.row += 8;
382 thismse = vfp->svf_halfpix_v(y, d->pre_stride, z, b->src_stride, &sse);
383 down = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
389 *distortion = thismse;
394 // now check 1 more diagonal
395 whichdir = (left < right ? 0 : 1) + (up < down ? 0 : 2);
396 //for(whichdir =0;whichdir<4;whichdir++)
403 this_mv.as_mv.col = (this_mv.as_mv.col - 8) | 4;
404 this_mv.as_mv.row = (this_mv.as_mv.row - 8) | 4;
405 thismse = vfp->svf_halfpix_hv(y - 1 - d->pre_stride, d->pre_stride, z, b->src_stride, &sse);
408 this_mv.as_mv.col += 4;
409 this_mv.as_mv.row = (this_mv.as_mv.row - 8) | 4;
410 thismse = vfp->svf_halfpix_hv(y - d->pre_stride, d->pre_stride, z, b->src_stride, &sse);
413 this_mv.as_mv.col = (this_mv.as_mv.col - 8) | 4;
414 this_mv.as_mv.row += 4;
415 thismse = vfp->svf_halfpix_hv(y - 1, d->pre_stride, z, b->src_stride, &sse);
419 this_mv.as_mv.col += 4;
420 this_mv.as_mv.row += 4;
421 thismse = vfp->svf_halfpix_hv(y, d->pre_stride, z, b->src_stride, &sse);
425 diag = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
431 *distortion = thismse;
438 // time to check quarter pels.
439 if (bestmv->as_mv.row < startmv.as_mv.row)
442 if (bestmv->as_mv.col < startmv.as_mv.col)
449 // go left then right and check error
450 this_mv.as_mv.row = startmv.as_mv.row;
452 if (startmv.as_mv.col & 7)
454 this_mv.as_mv.col = startmv.as_mv.col - 2;
455 thismse = vfp->svf(y, d->pre_stride, this_mv.as_mv.col & 7, this_mv.as_mv.row & 7, z, b->src_stride, &sse);
459 this_mv.as_mv.col = (startmv.as_mv.col - 8) | 6;
460 thismse = vfp->svf(y - 1, d->pre_stride, 6, this_mv.as_mv.row & 7, z, b->src_stride, &sse);
463 left = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
469 *distortion = thismse;
473 this_mv.as_mv.col += 4;
474 thismse = vfp->svf(y, d->pre_stride, this_mv.as_mv.col & 7, this_mv.as_mv.row & 7, z, b->src_stride, &sse);
475 right = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
481 *distortion = thismse;
485 // go up then down and check error
486 this_mv.as_mv.col = startmv.as_mv.col;
488 if (startmv.as_mv.row & 7)
490 this_mv.as_mv.row = startmv.as_mv.row - 2;
491 thismse = vfp->svf(y, d->pre_stride, this_mv.as_mv.col & 7, this_mv.as_mv.row & 7, z, b->src_stride, &sse);
495 this_mv.as_mv.row = (startmv.as_mv.row - 8) | 6;
496 thismse = vfp->svf(y - d->pre_stride, d->pre_stride, this_mv.as_mv.col & 7, 6, z, b->src_stride, &sse);
499 up = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
505 *distortion = thismse;
509 this_mv.as_mv.row += 4;
510 thismse = vfp->svf(y, d->pre_stride, this_mv.as_mv.col & 7, this_mv.as_mv.row & 7, z, b->src_stride, &sse);
511 down = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
517 *distortion = thismse;
522 // now check 1 more diagonal
523 whichdir = (left < right ? 0 : 1) + (up < down ? 0 : 2);
525 // for(whichdir=0;whichdir<4;whichdir++)
533 if (startmv.as_mv.row & 7)
535 this_mv.as_mv.row -= 2;
537 if (startmv.as_mv.col & 7)
539 this_mv.as_mv.col -= 2;
540 thismse = vfp->svf(y, d->pre_stride, this_mv.as_mv.col & 7, this_mv.as_mv.row & 7, z, b->src_stride, &sse);
544 this_mv.as_mv.col = (startmv.as_mv.col - 8) | 6;
545 thismse = vfp->svf(y - 1, d->pre_stride, 6, this_mv.as_mv.row & 7, z, b->src_stride, &sse);;
550 this_mv.as_mv.row = (startmv.as_mv.row - 8) | 6;
552 if (startmv.as_mv.col & 7)
554 this_mv.as_mv.col -= 2;
555 thismse = vfp->svf(y - d->pre_stride, d->pre_stride, this_mv.as_mv.col & 7, 6, z, b->src_stride, &sse);
559 this_mv.as_mv.col = (startmv.as_mv.col - 8) | 6;
560 thismse = vfp->svf(y - d->pre_stride - 1, d->pre_stride, 6, 6, z, b->src_stride, &sse);
566 this_mv.as_mv.col += 2;
568 if (startmv.as_mv.row & 7)
570 this_mv.as_mv.row -= 2;
571 thismse = vfp->svf(y, d->pre_stride, this_mv.as_mv.col & 7, this_mv.as_mv.row & 7, z, b->src_stride, &sse);
575 this_mv.as_mv.row = (startmv.as_mv.row - 8) | 6;
576 thismse = vfp->svf(y - d->pre_stride, d->pre_stride, this_mv.as_mv.col & 7, 6, z, b->src_stride, &sse);
581 this_mv.as_mv.row += 2;
583 if (startmv.as_mv.col & 7)
585 this_mv.as_mv.col -= 2;
586 thismse = vfp->svf(y, d->pre_stride, this_mv.as_mv.col & 7, this_mv.as_mv.row & 7, z, b->src_stride, &sse);
590 this_mv.as_mv.col = (startmv.as_mv.col - 8) | 6;
591 thismse = vfp->svf(y - 1, d->pre_stride, 6, this_mv.as_mv.row & 7, z, b->src_stride, &sse);
596 this_mv.as_mv.col += 2;
597 this_mv.as_mv.row += 2;
598 thismse = vfp->svf(y, d->pre_stride, this_mv.as_mv.col & 7, this_mv.as_mv.row & 7, z, b->src_stride, &sse);
602 diag = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
608 *distortion = thismse;
615 int vp8_find_best_half_pixel_step(MACROBLOCK *mb, BLOCK *b, BLOCKD *d,
616 int_mv *bestmv, int_mv *ref_mv,
618 const vp8_variance_fn_ptr_t *vfp,
619 int *mvcost[2], int *distortion,
622 int bestmse = INT_MAX;
625 unsigned char *y = *(d->base_pre) + d->pre + (bestmv->as_mv.row) * d->pre_stride + bestmv->as_mv.col;
626 unsigned char *z = (*(b->base_src) + b->src);
627 int left, right, up, down, diag;
631 // Trap uncodable vectors
632 if ((abs((bestmv->as_mv.col << 3) - ref_mv->as_mv.col) > MAX_FULL_PEL_VAL)
633 || (abs((bestmv->as_mv.row << 3) - ref_mv->as_mv.row) > MAX_FULL_PEL_VAL))
635 bestmv->as_mv.row <<= 3;
636 bestmv->as_mv.col <<= 3;
637 *distortion = INT_MAX;
642 bestmv->as_mv.row <<= 3;
643 bestmv->as_mv.col <<= 3;
646 // calculate central point error
647 bestmse = vfp->vf(y, d->pre_stride, z, b->src_stride, sse1);
648 *distortion = bestmse;
649 bestmse += mv_err_cost(bestmv, ref_mv, mvcost, error_per_bit);
651 // go left then right and check error
652 this_mv.as_mv.row = startmv.as_mv.row;
653 this_mv.as_mv.col = ((startmv.as_mv.col - 8) | 4);
654 thismse = vfp->svf_halfpix_h(y - 1, d->pre_stride, z, b->src_stride, &sse);
655 left = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
661 *distortion = thismse;
665 this_mv.as_mv.col += 8;
666 thismse = vfp->svf_halfpix_h(y, d->pre_stride, z, b->src_stride, &sse);
667 right = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
673 *distortion = thismse;
677 // go up then down and check error
678 this_mv.as_mv.col = startmv.as_mv.col;
679 this_mv.as_mv.row = ((startmv.as_mv.row - 8) | 4);
680 thismse = vfp->svf_halfpix_v(y - d->pre_stride, d->pre_stride, z, b->src_stride, &sse);
681 up = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
687 *distortion = thismse;
691 this_mv.as_mv.row += 8;
692 thismse = vfp->svf_halfpix_v(y, d->pre_stride, z, b->src_stride, &sse);
693 down = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
699 *distortion = thismse;
703 // somewhat strangely not doing all the diagonals for half pel is slower than doing them.
705 // now check 1 more diagonal -
706 whichdir = (left < right ? 0 : 1) + (up < down ? 0 : 2);
712 this_mv.col = (this_mv.col - 8) | 4;
713 this_mv.row = (this_mv.row - 8) | 4;
714 diag = vfp->svf(y - 1 - d->pre_stride, d->pre_stride, 4, 4, z, b->src_stride, &sse);
718 this_mv.row = (this_mv.row - 8) | 4;
719 diag = vfp->svf(y - d->pre_stride, d->pre_stride, 4, 4, z, b->src_stride, &sse);
722 this_mv.col = (this_mv.col - 8) | 4;
724 diag = vfp->svf(y - 1, d->pre_stride, 4, 4, z, b->src_stride, &sse);
729 diag = vfp->svf(y, d->pre_stride, 4, 4, z, b->src_stride, &sse);
733 diag += mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
742 this_mv.as_mv.col = (this_mv.as_mv.col - 8) | 4;
743 this_mv.as_mv.row = (this_mv.as_mv.row - 8) | 4;
744 thismse = vfp->svf_halfpix_hv(y - 1 - d->pre_stride, d->pre_stride, z, b->src_stride, &sse);
745 diag = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
751 *distortion = thismse;
755 this_mv.as_mv.col += 8;
756 thismse = vfp->svf_halfpix_hv(y - d->pre_stride, d->pre_stride, z, b->src_stride, &sse);
757 diag = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
763 *distortion = thismse;
767 this_mv.as_mv.col = (this_mv.as_mv.col - 8) | 4;
768 this_mv.as_mv.row = startmv.as_mv.row + 4;
769 thismse = vfp->svf_halfpix_hv(y - 1, d->pre_stride, z, b->src_stride, &sse);
770 diag = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
776 *distortion = thismse;
780 this_mv.as_mv.col += 8;
781 thismse = vfp->svf_halfpix_hv(y, d->pre_stride, z, b->src_stride, &sse);
782 diag = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
788 *distortion = thismse;
796 #define CHECK_BOUNDS(range) \
799 all_in &= ((br-range) >= x->mv_row_min);\
800 all_in &= ((br+range) <= x->mv_row_max);\
801 all_in &= ((bc-range) >= x->mv_col_min);\
802 all_in &= ((bc+range) <= x->mv_col_max);\
805 #define CHECK_POINT \
807 if (this_mv.as_mv.col < x->mv_col_min) continue;\
808 if (this_mv.as_mv.col > x->mv_col_max) continue;\
809 if (this_mv.as_mv.row < x->mv_row_min) continue;\
810 if (this_mv.as_mv.row > x->mv_row_max) continue;\
813 #define CHECK_BETTER \
815 if (thissad < bestsad)\
817 thissad += mvsad_err_cost(&this_mv, &fcenter_mv, mvsadcost, sad_per_bit);\
818 if (thissad < bestsad)\
826 static const MV next_chkpts[6][3] =
828 {{ -2, 0}, { -1, -2}, {1, -2}},
829 {{ -1, -2}, {1, -2}, {2, 0}},
830 {{1, -2}, {2, 0}, {1, 2}},
831 {{2, 0}, {1, 2}, { -1, 2}},
832 {{1, 2}, { -1, 2}, { -2, 0}},
833 {{ -1, 2}, { -2, 0}, { -1, -2}}
846 const vp8_variance_fn_ptr_t *vfp,
852 MV hex[6] = { { -1, -2}, {1, -2}, {2, 0}, {1, 2}, { -1, 2}, { -2, 0} } ;
853 MV neighbors[4] = {{0, -1}, { -1, 0}, {1, 0}, {0, 1}} ;
856 unsigned char *what = (*(b->base_src) + b->src);
857 int what_stride = b->src_stride;
858 int in_what_stride = d->pre_stride;
859 int br = ref_mv->as_mv.row >> 3, bc = ref_mv->as_mv.col >> 3;
861 unsigned int bestsad = 0x7fffffff;
862 unsigned int thissad;
863 unsigned char *base_offset;
864 unsigned char *this_offset;
870 fcenter_mv.as_mv.row = center_mv->as_mv.row >> 3;
871 fcenter_mv.as_mv.col = center_mv->as_mv.col >> 3;
873 // Work out the start point for the search
874 base_offset = (unsigned char *)(*(d->base_pre) + d->pre);
875 this_offset = base_offset + (br * (d->pre_stride)) + bc;
876 this_mv.as_mv.row = br;
877 this_mv.as_mv.col = bc;
878 bestsad = vfp->sdf( what, what_stride, this_offset,
879 in_what_stride, 0x7fffffff)
880 + mvsad_err_cost(&this_mv, &fcenter_mv, mvsadcost, sad_per_bit);
888 for (i = 0; i < 6; i++)
890 this_mv.as_mv.row = br + hex[i].row;
891 this_mv.as_mv.col = bc + hex[i].col;
892 this_offset = base_offset + (this_mv.as_mv.row * in_what_stride) + this_mv.as_mv.col;
893 thissad=vfp->sdf( what, what_stride, this_offset, in_what_stride, bestsad);
898 for (i = 0; i < 6; i++)
900 this_mv.as_mv.row = br + hex[i].row;
901 this_mv.as_mv.col = bc + hex[i].col;
903 this_offset = base_offset + (this_mv.as_mv.row * in_what_stride) + this_mv.as_mv.col;
904 thissad=vfp->sdf( what, what_stride, this_offset, in_what_stride, bestsad);
913 br += hex[best_site].row;
914 bc += hex[best_site].col;
918 for (j = 1; j < 127; j++)
925 for (i = 0; i < 3; i++)
927 this_mv.as_mv.row = br + next_chkpts[k][i].row;
928 this_mv.as_mv.col = bc + next_chkpts[k][i].col;
929 this_offset = base_offset + (this_mv.as_mv.row * (in_what_stride)) + this_mv.as_mv.col;
930 thissad = vfp->sdf( what, what_stride, this_offset, in_what_stride, bestsad);
935 for (i = 0; i < 3; i++)
937 this_mv.as_mv.row = br + next_chkpts[k][i].row;
938 this_mv.as_mv.col = bc + next_chkpts[k][i].col;
940 this_offset = base_offset + (this_mv.as_mv.row * (in_what_stride)) + this_mv.as_mv.col;
941 thissad = vfp->sdf( what, what_stride, this_offset, in_what_stride, bestsad);
950 br += next_chkpts[k][best_site].row;
951 bc += next_chkpts[k][best_site].col;
953 if (k >= 12) k -= 12;
954 else if (k >= 6) k -= 6;
958 // check 4 1-away neighbors
960 for (j = 0; j < 32; j++)
967 for (i = 0; i < 4; i++)
969 this_mv.as_mv.row = br + neighbors[i].row;
970 this_mv.as_mv.col = bc + neighbors[i].col;
971 this_offset = base_offset + (this_mv.as_mv.row * (in_what_stride)) + this_mv.as_mv.col;
972 thissad = vfp->sdf( what, what_stride, this_offset, in_what_stride, bestsad);
977 for (i = 0; i < 4; i++)
979 this_mv.as_mv.row = br + neighbors[i].row;
980 this_mv.as_mv.col = bc + neighbors[i].col;
982 this_offset = base_offset + (this_mv.as_mv.row * (in_what_stride)) + this_mv.as_mv.col;
983 thissad = vfp->sdf( what, what_stride, this_offset, in_what_stride, bestsad);
992 br += neighbors[best_site].row;
993 bc += neighbors[best_site].col;
997 best_mv->as_mv.row = br;
998 best_mv->as_mv.col = bc;
999 this_mv.as_mv.row = br<<3;
1000 this_mv.as_mv.col = bc<<3;
1002 this_offset = (unsigned char *)(*(d->base_pre) + d->pre + (br * (in_what_stride)) + bc);
1003 return vfp->vf(what, what_stride, this_offset, in_what_stride, &bestsad)
1004 + mv_err_cost(&this_mv, center_mv, mvcost, x->errorperbit) ;
1010 int vp8_diamond_search_sad
1020 vp8_variance_fn_ptr_t *fn_ptr,
1027 unsigned char *what = (*(b->base_src) + b->src);
1028 int what_stride = b->src_stride;
1029 unsigned char *in_what;
1030 int in_what_stride = d->pre_stride;
1031 unsigned char *best_address;
1036 int bestsad = INT_MAX;
1040 int ref_row = ref_mv->as_mv.row >> 3;
1041 int ref_col = ref_mv->as_mv.col >> 3;
1042 int this_row_offset;
1043 int this_col_offset;
1046 unsigned char *check_here;
1049 int *mvsadcost[2] = {x->mvsadcost[0], x->mvsadcost[1]};
1051 fcenter_mv.as_mv.row = center_mv->as_mv.row >> 3;
1052 fcenter_mv.as_mv.col = center_mv->as_mv.col >> 3;
1056 best_mv->as_mv.row = ref_row;
1057 best_mv->as_mv.col = ref_col;
1059 // Work out the start point for the search
1060 in_what = (unsigned char *)(*(d->base_pre) + d->pre + (ref_row * (d->pre_stride)) + ref_col);
1061 best_address = in_what;
1063 // We need to check that the starting point for the search (as indicated by ref_mv) is within the buffer limits
1064 if ((ref_col > x->mv_col_min) && (ref_col < x->mv_col_max) &&
1065 (ref_row > x->mv_row_min) && (ref_row < x->mv_row_max))
1067 // Check the starting position
1068 bestsad = fn_ptr->sdf(what, what_stride, in_what,
1069 in_what_stride, 0x7fffffff)
1070 + mvsad_err_cost(best_mv, &fcenter_mv, mvsadcost, sad_per_bit);
1073 // search_param determines the length of the initial step and hence the number of iterations
1074 // 0 = initial step (MAX_FIRST_STEP) pel : 1 = (MAX_FIRST_STEP/2) pel, 2 = (MAX_FIRST_STEP/4) pel... etc.
1075 ss = &x->ss[search_param * x->searches_per_step];
1076 tot_steps = (x->ss_count / x->searches_per_step) - search_param;
1080 for (step = 0; step < tot_steps ; step++)
1082 for (j = 0 ; j < x->searches_per_step ; j++)
1084 // Trap illegal vectors
1085 this_row_offset = best_mv->as_mv.row + ss[i].mv.row;
1086 this_col_offset = best_mv->as_mv.col + ss[i].mv.col;
1088 if ((this_col_offset > x->mv_col_min) && (this_col_offset < x->mv_col_max) &&
1089 (this_row_offset > x->mv_row_min) && (this_row_offset < x->mv_row_max))
1092 check_here = ss[i].offset + best_address;
1093 thissad = fn_ptr->sdf(what, what_stride, check_here , in_what_stride, bestsad);
1095 if (thissad < bestsad)
1097 this_mv.as_mv.row = this_row_offset;
1098 this_mv.as_mv.col = this_col_offset;
1099 thissad += mvsad_err_cost(&this_mv, &fcenter_mv,
1100 mvsadcost, sad_per_bit);
1102 if (thissad < bestsad)
1113 if (best_site != last_site)
1115 best_mv->as_mv.row += ss[best_site].mv.row;
1116 best_mv->as_mv.col += ss[best_site].mv.col;
1117 best_address += ss[best_site].offset;
1118 last_site = best_site;
1120 else if (best_address == in_what)
1124 this_mv.as_mv.row = best_mv->as_mv.row << 3;
1125 this_mv.as_mv.col = best_mv->as_mv.col << 3;
1127 if (bestsad == INT_MAX)
1130 return fn_ptr->vf(what, what_stride, best_address, in_what_stride, (unsigned int *)(&thissad))
1131 + mv_err_cost(&this_mv, center_mv, mvcost, x->errorperbit);
1134 int vp8_diamond_search_sadx4
1144 vp8_variance_fn_ptr_t *fn_ptr,
1151 unsigned char *what = (*(b->base_src) + b->src);
1152 int what_stride = b->src_stride;
1153 unsigned char *in_what;
1154 int in_what_stride = d->pre_stride;
1155 unsigned char *best_address;
1160 int bestsad = INT_MAX;
1164 int ref_row = ref_mv->as_mv.row >> 3;
1165 int ref_col = ref_mv->as_mv.col >> 3;
1166 int this_row_offset;
1167 int this_col_offset;
1170 unsigned char *check_here;
1171 unsigned int thissad;
1173 int *mvsadcost[2] = {x->mvsadcost[0], x->mvsadcost[1]};
1175 fcenter_mv.as_mv.row = center_mv->as_mv.row >> 3;
1176 fcenter_mv.as_mv.col = center_mv->as_mv.col >> 3;
1179 best_mv->as_mv.row = ref_row;
1180 best_mv->as_mv.col = ref_col;
1182 // Work out the start point for the search
1183 in_what = (unsigned char *)(*(d->base_pre) + d->pre + (ref_row * (d->pre_stride)) + ref_col);
1184 best_address = in_what;
1186 // We need to check that the starting point for the search (as indicated by ref_mv) is within the buffer limits
1187 if ((ref_col > x->mv_col_min) && (ref_col < x->mv_col_max) &&
1188 (ref_row > x->mv_row_min) && (ref_row < x->mv_row_max))
1190 // Check the starting position
1191 bestsad = fn_ptr->sdf(what, what_stride,
1192 in_what, in_what_stride, 0x7fffffff)
1193 + mvsad_err_cost(best_mv, &fcenter_mv, mvsadcost, sad_per_bit);
1196 // search_param determines the length of the initial step and hence the number of iterations
1197 // 0 = initial step (MAX_FIRST_STEP) pel : 1 = (MAX_FIRST_STEP/2) pel, 2 = (MAX_FIRST_STEP/4) pel... etc.
1198 ss = &x->ss[search_param * x->searches_per_step];
1199 tot_steps = (x->ss_count / x->searches_per_step) - search_param;
1203 for (step = 0; step < tot_steps ; step++)
1207 // To know if all neighbor points are within the bounds, 4 bounds checking are enough instead of
1208 // checking 4 bounds for each points.
1209 all_in &= ((best_mv->as_mv.row + ss[i].mv.row)> x->mv_row_min);
1210 all_in &= ((best_mv->as_mv.row + ss[i+1].mv.row) < x->mv_row_max);
1211 all_in &= ((best_mv->as_mv.col + ss[i+2].mv.col) > x->mv_col_min);
1212 all_in &= ((best_mv->as_mv.col + ss[i+3].mv.col) < x->mv_col_max);
1216 unsigned int sad_array[4];
1218 for (j = 0 ; j < x->searches_per_step ; j += 4)
1220 unsigned char *block_offset[4];
1222 for (t = 0; t < 4; t++)
1223 block_offset[t] = ss[i+t].offset + best_address;
1225 fn_ptr->sdx4df(what, what_stride, block_offset, in_what_stride, sad_array);
1227 for (t = 0; t < 4; t++, i++)
1229 if (sad_array[t] < bestsad)
1231 this_mv.as_mv.row = best_mv->as_mv.row + ss[i].mv.row;
1232 this_mv.as_mv.col = best_mv->as_mv.col + ss[i].mv.col;
1233 sad_array[t] += mvsad_err_cost(&this_mv, &fcenter_mv,
1234 mvsadcost, sad_per_bit);
1236 if (sad_array[t] < bestsad)
1238 bestsad = sad_array[t];
1247 for (j = 0 ; j < x->searches_per_step ; j++)
1249 // Trap illegal vectors
1250 this_row_offset = best_mv->as_mv.row + ss[i].mv.row;
1251 this_col_offset = best_mv->as_mv.col + ss[i].mv.col;
1253 if ((this_col_offset > x->mv_col_min) && (this_col_offset < x->mv_col_max) &&
1254 (this_row_offset > x->mv_row_min) && (this_row_offset < x->mv_row_max))
1256 check_here = ss[i].offset + best_address;
1257 thissad = fn_ptr->sdf(what, what_stride, check_here , in_what_stride, bestsad);
1259 if (thissad < bestsad)
1261 this_mv.as_mv.row = this_row_offset;
1262 this_mv.as_mv.col = this_col_offset;
1263 thissad += mvsad_err_cost(&this_mv, &fcenter_mv,
1264 mvsadcost, sad_per_bit);
1266 if (thissad < bestsad)
1277 if (best_site != last_site)
1279 best_mv->as_mv.row += ss[best_site].mv.row;
1280 best_mv->as_mv.col += ss[best_site].mv.col;
1281 best_address += ss[best_site].offset;
1282 last_site = best_site;
1284 else if (best_address == in_what)
1288 this_mv.as_mv.row = best_mv->as_mv.row << 3;
1289 this_mv.as_mv.col = best_mv->as_mv.col << 3;
1291 if (bestsad == INT_MAX)
1294 return fn_ptr->vf(what, what_stride, best_address, in_what_stride, (unsigned int *)(&thissad))
1295 + mv_err_cost(&this_mv, center_mv, mvcost, x->errorperbit);
1298 int vp8_full_search_sad(MACROBLOCK *x, BLOCK *b, BLOCKD *d, int_mv *ref_mv,
1299 int sad_per_bit, int distance,
1300 vp8_variance_fn_ptr_t *fn_ptr, int *mvcost[2],
1303 unsigned char *what = (*(b->base_src) + b->src);
1304 int what_stride = b->src_stride;
1305 unsigned char *in_what;
1306 int in_what_stride = d->pre_stride;
1307 int mv_stride = d->pre_stride;
1308 unsigned char *bestaddress;
1309 int_mv *best_mv = &d->bmi.mv;
1311 int bestsad = INT_MAX;
1314 unsigned char *check_here;
1317 int ref_row = ref_mv->as_mv.row;
1318 int ref_col = ref_mv->as_mv.col;
1320 int row_min = ref_row - distance;
1321 int row_max = ref_row + distance;
1322 int col_min = ref_col - distance;
1323 int col_max = ref_col + distance;
1325 int *mvsadcost[2] = {x->mvsadcost[0], x->mvsadcost[1]};
1327 fcenter_mv.as_mv.row = center_mv->as_mv.row >> 3;
1328 fcenter_mv.as_mv.col = center_mv->as_mv.col >> 3;
1330 // Work out the mid point for the search
1331 in_what = *(d->base_pre) + d->pre;
1332 bestaddress = in_what + (ref_row * d->pre_stride) + ref_col;
1334 best_mv->as_mv.row = ref_row;
1335 best_mv->as_mv.col = ref_col;
1337 // We need to check that the starting point for the search (as indicated by ref_mv) is within the buffer limits
1338 if ((ref_col > x->mv_col_min) && (ref_col < x->mv_col_max) &&
1339 (ref_row > x->mv_row_min) && (ref_row < x->mv_row_max))
1341 // Baseline value at the centre
1343 //bestsad = fn_ptr->sf( what,what_stride,bestaddress,in_what_stride) + (int)sqrt(mv_err_cost(ref_mv,ref_mv, mvcost,error_per_bit*14));
1344 bestsad = fn_ptr->sdf(what, what_stride, bestaddress,
1345 in_what_stride, 0x7fffffff)
1346 + mvsad_err_cost(best_mv, &fcenter_mv, mvsadcost, sad_per_bit);
1349 // Apply further limits to prevent us looking using vectors that stretch beyiond the UMV border
1350 if (col_min < x->mv_col_min)
1351 col_min = x->mv_col_min;
1353 if (col_max > x->mv_col_max)
1354 col_max = x->mv_col_max;
1356 if (row_min < x->mv_row_min)
1357 row_min = x->mv_row_min;
1359 if (row_max > x->mv_row_max)
1360 row_max = x->mv_row_max;
1362 for (r = row_min; r < row_max ; r++)
1364 this_mv.as_mv.row = r;
1365 check_here = r * mv_stride + in_what + col_min;
1367 for (c = col_min; c < col_max; c++)
1369 thissad = fn_ptr->sdf(what, what_stride, check_here , in_what_stride, bestsad);
1371 this_mv.as_mv.col = c;
1372 thissad += mvsad_err_cost(&this_mv, &fcenter_mv,
1373 mvsadcost, sad_per_bit);
1375 if (thissad < bestsad)
1378 best_mv->as_mv.row = r;
1379 best_mv->as_mv.col = c;
1380 bestaddress = check_here;
1387 this_mv.as_mv.row = best_mv->as_mv.row << 3;
1388 this_mv.as_mv.col = best_mv->as_mv.col << 3;
1390 if (bestsad < INT_MAX)
1391 return fn_ptr->vf(what, what_stride, bestaddress, in_what_stride, (unsigned int *)(&thissad))
1392 + mv_err_cost(&this_mv, center_mv, mvcost, x->errorperbit);
1397 int vp8_full_search_sadx3(MACROBLOCK *x, BLOCK *b, BLOCKD *d, int_mv *ref_mv,
1398 int sad_per_bit, int distance,
1399 vp8_variance_fn_ptr_t *fn_ptr, int *mvcost[2],
1402 unsigned char *what = (*(b->base_src) + b->src);
1403 int what_stride = b->src_stride;
1404 unsigned char *in_what;
1405 int in_what_stride = d->pre_stride;
1406 int mv_stride = d->pre_stride;
1407 unsigned char *bestaddress;
1408 int_mv *best_mv = &d->bmi.mv;
1410 int bestsad = INT_MAX;
1413 unsigned char *check_here;
1414 unsigned int thissad;
1416 int ref_row = ref_mv->as_mv.row;
1417 int ref_col = ref_mv->as_mv.col;
1419 int row_min = ref_row - distance;
1420 int row_max = ref_row + distance;
1421 int col_min = ref_col - distance;
1422 int col_max = ref_col + distance;
1424 unsigned int sad_array[3];
1426 int *mvsadcost[2] = {x->mvsadcost[0], x->mvsadcost[1]};
1428 fcenter_mv.as_mv.row = center_mv->as_mv.row >> 3;
1429 fcenter_mv.as_mv.col = center_mv->as_mv.col >> 3;
1431 // Work out the mid point for the search
1432 in_what = *(d->base_pre) + d->pre;
1433 bestaddress = in_what + (ref_row * d->pre_stride) + ref_col;
1435 best_mv->as_mv.row = ref_row;
1436 best_mv->as_mv.col = ref_col;
1438 // We need to check that the starting point for the search (as indicated by ref_mv) is within the buffer limits
1439 if ((ref_col > x->mv_col_min) && (ref_col < x->mv_col_max) &&
1440 (ref_row > x->mv_row_min) && (ref_row < x->mv_row_max))
1442 // Baseline value at the centre
1443 bestsad = fn_ptr->sdf(what, what_stride,
1444 bestaddress, in_what_stride, 0x7fffffff)
1445 + mvsad_err_cost(best_mv, &fcenter_mv, mvsadcost, sad_per_bit);
1448 // Apply further limits to prevent us looking using vectors that stretch beyiond the UMV border
1449 if (col_min < x->mv_col_min)
1450 col_min = x->mv_col_min;
1452 if (col_max > x->mv_col_max)
1453 col_max = x->mv_col_max;
1455 if (row_min < x->mv_row_min)
1456 row_min = x->mv_row_min;
1458 if (row_max > x->mv_row_max)
1459 row_max = x->mv_row_max;
1461 for (r = row_min; r < row_max ; r++)
1463 this_mv.as_mv.row = r;
1464 check_here = r * mv_stride + in_what + col_min;
1467 while ((c + 2) < col_max)
1471 fn_ptr->sdx3f(what, what_stride, check_here , in_what_stride, sad_array);
1473 for (i = 0; i < 3; i++)
1475 thissad = sad_array[i];
1477 if (thissad < bestsad)
1479 this_mv.as_mv.col = c;
1480 thissad += mvsad_err_cost(&this_mv, &fcenter_mv,
1481 mvsadcost, sad_per_bit);
1483 if (thissad < bestsad)
1486 best_mv->as_mv.row = r;
1487 best_mv->as_mv.col = c;
1488 bestaddress = check_here;
1499 thissad = fn_ptr->sdf(what, what_stride, check_here , in_what_stride, bestsad);
1501 if (thissad < bestsad)
1503 this_mv.as_mv.col = c;
1504 thissad += mvsad_err_cost(&this_mv, &fcenter_mv,
1505 mvsadcost, sad_per_bit);
1507 if (thissad < bestsad)
1510 best_mv->as_mv.row = r;
1511 best_mv->as_mv.col = c;
1512 bestaddress = check_here;
1522 this_mv.as_mv.row = best_mv->as_mv.row << 3;
1523 this_mv.as_mv.col = best_mv->as_mv.col << 3;
1525 if (bestsad < INT_MAX)
1526 return fn_ptr->vf(what, what_stride, bestaddress, in_what_stride, (unsigned int *)(&thissad))
1527 + mv_err_cost(&this_mv, center_mv, mvcost, x->errorperbit);
1532 int vp8_full_search_sadx8(MACROBLOCK *x, BLOCK *b, BLOCKD *d, int_mv *ref_mv,
1533 int sad_per_bit, int distance,
1534 vp8_variance_fn_ptr_t *fn_ptr, int *mvcost[2],
1537 unsigned char *what = (*(b->base_src) + b->src);
1538 int what_stride = b->src_stride;
1539 unsigned char *in_what;
1540 int in_what_stride = d->pre_stride;
1541 int mv_stride = d->pre_stride;
1542 unsigned char *bestaddress;
1543 int_mv *best_mv = &d->bmi.mv;
1545 int bestsad = INT_MAX;
1548 unsigned char *check_here;
1549 unsigned int thissad;
1551 int ref_row = ref_mv->as_mv.row;
1552 int ref_col = ref_mv->as_mv.col;
1554 int row_min = ref_row - distance;
1555 int row_max = ref_row + distance;
1556 int col_min = ref_col - distance;
1557 int col_max = ref_col + distance;
1559 DECLARE_ALIGNED_ARRAY(16, unsigned short, sad_array8, 8);
1560 unsigned int sad_array[3];
1562 int *mvsadcost[2] = {x->mvsadcost[0], x->mvsadcost[1]};
1564 fcenter_mv.as_mv.row = center_mv->as_mv.row >> 3;
1565 fcenter_mv.as_mv.col = center_mv->as_mv.col >> 3;
1567 // Work out the mid point for the search
1568 in_what = *(d->base_pre) + d->pre;
1569 bestaddress = in_what + (ref_row * d->pre_stride) + ref_col;
1571 best_mv->as_mv.row = ref_row;
1572 best_mv->as_mv.col = ref_col;
1574 // We need to check that the starting point for the search (as indicated by ref_mv) is within the buffer limits
1575 if ((ref_col > x->mv_col_min) && (ref_col < x->mv_col_max) &&
1576 (ref_row > x->mv_row_min) && (ref_row < x->mv_row_max))
1578 // Baseline value at the centre
1579 bestsad = fn_ptr->sdf(what, what_stride,
1580 bestaddress, in_what_stride, 0x7fffffff)
1581 + mvsad_err_cost(best_mv, &fcenter_mv, mvsadcost, sad_per_bit);
1584 // Apply further limits to prevent us looking using vectors that stretch beyiond the UMV border
1585 if (col_min < x->mv_col_min)
1586 col_min = x->mv_col_min;
1588 if (col_max > x->mv_col_max)
1589 col_max = x->mv_col_max;
1591 if (row_min < x->mv_row_min)
1592 row_min = x->mv_row_min;
1594 if (row_max > x->mv_row_max)
1595 row_max = x->mv_row_max;
1597 for (r = row_min; r < row_max ; r++)
1599 this_mv.as_mv.row = r;
1600 check_here = r * mv_stride + in_what + col_min;
1603 while ((c + 7) < col_max)
1607 fn_ptr->sdx8f(what, what_stride, check_here , in_what_stride, sad_array8);
1609 for (i = 0; i < 8; i++)
1611 thissad = (unsigned int)sad_array8[i];
1613 if (thissad < bestsad)
1615 this_mv.as_mv.col = c;
1616 thissad += mvsad_err_cost(&this_mv, &fcenter_mv,
1617 mvsadcost, sad_per_bit);
1619 if (thissad < bestsad)
1622 best_mv->as_mv.row = r;
1623 best_mv->as_mv.col = c;
1624 bestaddress = check_here;
1633 while ((c + 2) < col_max)
1637 fn_ptr->sdx3f(what, what_stride, check_here , in_what_stride, sad_array);
1639 for (i = 0; i < 3; i++)
1641 thissad = sad_array[i];
1643 if (thissad < bestsad)
1645 this_mv.as_mv.col = c;
1646 thissad += mvsad_err_cost(&this_mv, &fcenter_mv,
1647 mvsadcost, sad_per_bit);
1649 if (thissad < bestsad)
1652 best_mv->as_mv.row = r;
1653 best_mv->as_mv.col = c;
1654 bestaddress = check_here;
1665 thissad = fn_ptr->sdf(what, what_stride, check_here , in_what_stride, bestsad);
1667 if (thissad < bestsad)
1669 this_mv.as_mv.col = c;
1670 thissad += mvsad_err_cost(&this_mv, &fcenter_mv,
1671 mvsadcost, sad_per_bit);
1673 if (thissad < bestsad)
1676 best_mv->as_mv.row = r;
1677 best_mv->as_mv.col = c;
1678 bestaddress = check_here;
1687 this_mv.as_mv.row = best_mv->as_mv.row << 3;
1688 this_mv.as_mv.col = best_mv->as_mv.col << 3;
1690 if (bestsad < INT_MAX)
1691 return fn_ptr->vf(what, what_stride, bestaddress, in_what_stride, (unsigned int *)(&thissad))
1692 + mv_err_cost(&this_mv, center_mv, mvcost, x->errorperbit);
1697 int vp8_refining_search_sad(MACROBLOCK *x, BLOCK *b, BLOCKD *d, int_mv *ref_mv,
1698 int error_per_bit, int search_range,
1699 vp8_variance_fn_ptr_t *fn_ptr, int *mvcost[2],
1702 MV neighbors[4] = {{-1, 0}, {0, -1}, {0, 1}, {1, 0}};
1704 short this_row_offset, this_col_offset;
1706 int what_stride = b->src_stride;
1707 int in_what_stride = d->pre_stride;
1708 unsigned char *what = (*(b->base_src) + b->src);
1709 unsigned char *best_address = (unsigned char *)(*(d->base_pre) + d->pre +
1710 (ref_mv->as_mv.row * (d->pre_stride)) + ref_mv->as_mv.col);
1711 unsigned char *check_here;
1712 unsigned int thissad;
1714 unsigned int bestsad = INT_MAX;
1716 int *mvsadcost[2] = {x->mvsadcost[0], x->mvsadcost[1]};
1719 fcenter_mv.as_mv.row = center_mv->as_mv.row >> 3;
1720 fcenter_mv.as_mv.col = center_mv->as_mv.col >> 3;
1722 bestsad = fn_ptr->sdf(what, what_stride, best_address, in_what_stride, 0x7fffffff) + mvsad_err_cost(ref_mv, &fcenter_mv, mvsadcost, error_per_bit);
1724 for (i=0; i<search_range; i++)
1728 for (j = 0 ; j < 4 ; j++)
1730 this_row_offset = ref_mv->as_mv.row + neighbors[j].row;
1731 this_col_offset = ref_mv->as_mv.col + neighbors[j].col;
1733 if ((this_col_offset > x->mv_col_min) && (this_col_offset < x->mv_col_max) &&
1734 (this_row_offset > x->mv_row_min) && (this_row_offset < x->mv_row_max))
1736 check_here = (neighbors[j].row)*in_what_stride + neighbors[j].col + best_address;
1737 thissad = fn_ptr->sdf(what, what_stride, check_here , in_what_stride, bestsad);
1739 if (thissad < bestsad)
1741 this_mv.as_mv.row = this_row_offset;
1742 this_mv.as_mv.col = this_col_offset;
1743 thissad += mvsad_err_cost(&this_mv, &fcenter_mv, mvsadcost, error_per_bit);
1745 if (thissad < bestsad)
1754 if (best_site == -1)
1758 ref_mv->as_mv.row += neighbors[best_site].row;
1759 ref_mv->as_mv.col += neighbors[best_site].col;
1760 best_address += (neighbors[best_site].row)*in_what_stride + neighbors[best_site].col;
1764 this_mv.as_mv.row = ref_mv->as_mv.row << 3;
1765 this_mv.as_mv.col = ref_mv->as_mv.col << 3;
1767 if (bestsad < INT_MAX)
1768 return fn_ptr->vf(what, what_stride, best_address, in_what_stride, (unsigned int *)(&thissad))
1769 + mv_err_cost(&this_mv, center_mv, mvcost, x->errorperbit);
1774 int vp8_refining_search_sadx4(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
1775 int_mv *ref_mv, int error_per_bit,
1776 int search_range, vp8_variance_fn_ptr_t *fn_ptr,
1777 int *mvcost[2], int_mv *center_mv)
1779 MV neighbors[4] = {{-1, 0}, {0, -1}, {0, 1}, {1, 0}};
1781 short this_row_offset, this_col_offset;
1783 int what_stride = b->src_stride;
1784 int in_what_stride = d->pre_stride;
1785 unsigned char *what = (*(b->base_src) + b->src);
1786 unsigned char *best_address = (unsigned char *)(*(d->base_pre) + d->pre +
1787 (ref_mv->as_mv.row * (d->pre_stride)) + ref_mv->as_mv.col);
1788 unsigned char *check_here;
1789 unsigned int thissad;
1791 unsigned int bestsad = INT_MAX;
1793 int *mvsadcost[2] = {x->mvsadcost[0], x->mvsadcost[1]};
1796 fcenter_mv.as_mv.row = center_mv->as_mv.row >> 3;
1797 fcenter_mv.as_mv.col = center_mv->as_mv.col >> 3;
1799 bestsad = fn_ptr->sdf(what, what_stride, best_address, in_what_stride, 0x7fffffff) + mvsad_err_cost(ref_mv, &fcenter_mv, mvsadcost, error_per_bit);
1801 for (i=0; i<search_range; i++)
1806 all_in &= ((ref_mv->as_mv.row - 1) > x->mv_row_min);
1807 all_in &= ((ref_mv->as_mv.row + 1) < x->mv_row_max);
1808 all_in &= ((ref_mv->as_mv.col - 1) > x->mv_col_min);
1809 all_in &= ((ref_mv->as_mv.col + 1) < x->mv_col_max);
1813 unsigned int sad_array[4];
1814 unsigned char *block_offset[4];
1815 block_offset[0] = best_address - in_what_stride;
1816 block_offset[1] = best_address - 1;
1817 block_offset[2] = best_address + 1;
1818 block_offset[3] = best_address + in_what_stride;
1820 fn_ptr->sdx4df(what, what_stride, block_offset, in_what_stride, sad_array);
1822 for (j = 0; j < 4; j++)
1824 if (sad_array[j] < bestsad)
1826 this_mv.as_mv.row = ref_mv->as_mv.row + neighbors[j].row;
1827 this_mv.as_mv.col = ref_mv->as_mv.col + neighbors[j].col;
1828 sad_array[j] += mvsad_err_cost(&this_mv, &fcenter_mv, mvsadcost, error_per_bit);
1830 if (sad_array[j] < bestsad)
1832 bestsad = sad_array[j];
1840 for (j = 0 ; j < 4 ; j++)
1842 this_row_offset = ref_mv->as_mv.row + neighbors[j].row;
1843 this_col_offset = ref_mv->as_mv.col + neighbors[j].col;
1845 if ((this_col_offset > x->mv_col_min) && (this_col_offset < x->mv_col_max) &&
1846 (this_row_offset > x->mv_row_min) && (this_row_offset < x->mv_row_max))
1848 check_here = (neighbors[j].row)*in_what_stride + neighbors[j].col + best_address;
1849 thissad = fn_ptr->sdf(what, what_stride, check_here , in_what_stride, bestsad);
1851 if (thissad < bestsad)
1853 this_mv.as_mv.row = this_row_offset;
1854 this_mv.as_mv.col = this_col_offset;
1855 thissad += mvsad_err_cost(&this_mv, &fcenter_mv, mvsadcost, error_per_bit);
1857 if (thissad < bestsad)
1867 if (best_site == -1)
1871 ref_mv->as_mv.row += neighbors[best_site].row;
1872 ref_mv->as_mv.col += neighbors[best_site].col;
1873 best_address += (neighbors[best_site].row)*in_what_stride + neighbors[best_site].col;
1877 this_mv.as_mv.row = ref_mv->as_mv.row << 3;
1878 this_mv.as_mv.col = ref_mv->as_mv.col << 3;
1880 if (bestsad < INT_MAX)
1881 return fn_ptr->vf(what, what_stride, best_address, in_what_stride, (unsigned int *)(&thissad))
1882 + mv_err_cost(&this_mv, center_mv, mvcost, x->errorperbit);
1887 #ifdef ENTROPY_STATS
1888 void print_mode_context(void)
1890 FILE *f = fopen("modecont.c", "w");
1893 fprintf(f, "#include \"entropy.h\"\n");
1894 fprintf(f, "const int vp8_mode_contexts[6][4] =\n");
1897 for (j = 0; j < 6; j++)
1899 fprintf(f, " { // %d \n", j);
1902 for (i = 0; i < 4; i++)
1906 int count; // = mv_ref_ct[j][i][0]+mv_ref_ct[j][i][1];
1909 count = mv_mode_cts[i][0] + mv_mode_cts[i][1];
1912 overal_prob = 256 * mv_mode_cts[i][0] / count;
1916 if (overal_prob == 0)
1920 count = mv_ref_ct[j][i][0] + mv_ref_ct[j][i][1];
1923 this_prob = 256 * mv_ref_ct[j][i][0] / count;
1930 fprintf(f, "%5d, ", this_prob);
1931 //fprintf(f,"%5d, %5d, %8d,", this_prob, overal_prob, (this_prob << 10)/overal_prob);
1932 //fprintf(f,"%8d, ", (this_prob << 10)/overal_prob);
1935 fprintf(f, " },\n");
1942 /* MV ref count ENTROPY_STATS stats code */
1943 #ifdef ENTROPY_STATS
1944 void init_mv_ref_counts()
1946 vpx_memset(mv_ref_ct, 0, sizeof(mv_ref_ct));
1947 vpx_memset(mv_mode_cts, 0, sizeof(mv_mode_cts));
1950 void accum_mv_refs(MB_PREDICTION_MODE m, const int ct[4])
1954 ++mv_ref_ct [ct[0]] [0] [0];
1955 ++mv_mode_cts[0][0];
1959 ++mv_ref_ct [ct[0]] [0] [1];
1960 ++mv_mode_cts[0][1];
1964 ++mv_ref_ct [ct[1]] [1] [0];
1965 ++mv_mode_cts[1][0];
1969 ++mv_ref_ct [ct[1]] [1] [1];
1970 ++mv_mode_cts[1][1];
1974 ++mv_ref_ct [ct[2]] [2] [0];
1975 ++mv_mode_cts[2][0];
1979 ++mv_ref_ct [ct[2]] [2] [1];
1980 ++mv_mode_cts[2][1];
1984 ++mv_ref_ct [ct[3]] [3] [0];
1985 ++mv_mode_cts[3][0];
1989 ++mv_ref_ct [ct[3]] [3] [1];
1990 ++mv_mode_cts[3][1];
1997 #endif/* END MV ref count ENTROPY_STATS stats code */