2 * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
13 #include "vpx_mem/vpx_mem.h"
20 static int mv_ref_ct [31] [4] [2];
21 static int mv_mode_cts [4] [2];
24 int vp8_mv_bit_cost(int_mv *mv, int_mv *ref, int *mvcost[2], int Weight)
26 // MV costing is based on the distribution of vectors in the previous frame and as such will tend to
27 // over state the cost of vectors. In addition coding a new vector can have a knock on effect on the
28 // cost of subsequent vectors and the quality of prediction from NEAR and NEAREST for subsequent blocks.
29 // The "Weight" parameter allows, to a limited extent, for some account to be taken of these factors.
30 return ((mvcost[0][(mv->as_mv.row - ref->as_mv.row) >> 1] + mvcost[1][(mv->as_mv.col - ref->as_mv.col) >> 1]) * Weight) >> 7;
33 static int mv_err_cost(int_mv *mv, int_mv *ref, int *mvcost[2], int error_per_bit)
35 return ((mvcost[0][(mv->as_mv.row - ref->as_mv.row) >> 1] +
36 mvcost[1][(mv->as_mv.col - ref->as_mv.col) >> 1])
37 * error_per_bit + 128) >> 8;
40 static int mvsad_err_cost(int_mv *mv, int_mv *ref, int *mvsadcost[2], int error_per_bit)
42 /* Calculate sad error cost on full pixel basis. */
43 return ((mvsadcost[0][(mv->as_mv.row - ref->as_mv.row)] +
44 mvsadcost[1][(mv->as_mv.col - ref->as_mv.col)])
45 * error_per_bit + 128) >> 8;
48 void vp8_init_dsmotion_compensation(MACROBLOCK *x, int stride)
51 int search_site_count = 0;
54 // Generate offsets for 4 search sites per step.
56 x->ss[search_site_count].mv.col = 0;
57 x->ss[search_site_count].mv.row = 0;
58 x->ss[search_site_count].offset = 0;
64 // Compute offsets for search sites.
65 x->ss[search_site_count].mv.col = 0;
66 x->ss[search_site_count].mv.row = -Len;
67 x->ss[search_site_count].offset = -Len * stride;
70 // Compute offsets for search sites.
71 x->ss[search_site_count].mv.col = 0;
72 x->ss[search_site_count].mv.row = Len;
73 x->ss[search_site_count].offset = Len * stride;
76 // Compute offsets for search sites.
77 x->ss[search_site_count].mv.col = -Len;
78 x->ss[search_site_count].mv.row = 0;
79 x->ss[search_site_count].offset = -Len;
82 // Compute offsets for search sites.
83 x->ss[search_site_count].mv.col = Len;
84 x->ss[search_site_count].mv.row = 0;
85 x->ss[search_site_count].offset = Len;
92 x->ss_count = search_site_count;
93 x->searches_per_step = 4;
96 void vp8_init3smotion_compensation(MACROBLOCK *x, int stride)
99 int search_site_count = 0;
101 // Generate offsets for 8 search sites per step.
102 Len = MAX_FIRST_STEP;
103 x->ss[search_site_count].mv.col = 0;
104 x->ss[search_site_count].mv.row = 0;
105 x->ss[search_site_count].offset = 0;
111 // Compute offsets for search sites.
112 x->ss[search_site_count].mv.col = 0;
113 x->ss[search_site_count].mv.row = -Len;
114 x->ss[search_site_count].offset = -Len * stride;
117 // Compute offsets for search sites.
118 x->ss[search_site_count].mv.col = 0;
119 x->ss[search_site_count].mv.row = Len;
120 x->ss[search_site_count].offset = Len * stride;
123 // Compute offsets for search sites.
124 x->ss[search_site_count].mv.col = -Len;
125 x->ss[search_site_count].mv.row = 0;
126 x->ss[search_site_count].offset = -Len;
129 // Compute offsets for search sites.
130 x->ss[search_site_count].mv.col = Len;
131 x->ss[search_site_count].mv.row = 0;
132 x->ss[search_site_count].offset = Len;
135 // Compute offsets for search sites.
136 x->ss[search_site_count].mv.col = -Len;
137 x->ss[search_site_count].mv.row = -Len;
138 x->ss[search_site_count].offset = -Len * stride - Len;
141 // Compute offsets for search sites.
142 x->ss[search_site_count].mv.col = Len;
143 x->ss[search_site_count].mv.row = -Len;
144 x->ss[search_site_count].offset = -Len * stride + Len;
147 // Compute offsets for search sites.
148 x->ss[search_site_count].mv.col = -Len;
149 x->ss[search_site_count].mv.row = Len;
150 x->ss[search_site_count].offset = Len * stride - Len;
153 // Compute offsets for search sites.
154 x->ss[search_site_count].mv.col = Len;
155 x->ss[search_site_count].mv.row = Len;
156 x->ss[search_site_count].offset = Len * stride + Len;
164 x->ss_count = search_site_count;
165 x->searches_per_step = 8;
169 #define MVC(r,c) (((mvcost[0][(r)-rr] + mvcost[1][(c) - rc]) * error_per_bit + 128 )>>8 ) // estimated cost of a motion vector (r,c)
170 #define PRE(r,c) (*(d->base_pre) + d->pre + ((r)>>2) * d->pre_stride + ((c)>>2)) // pointer to predictor base of a motionvector
171 #define SP(x) (((x)&3)<<1) // convert motion vector component to offset for svf calc
172 #define DIST(r,c) vfp->svf( PRE(r,c), d->pre_stride, SP(c),SP(r), z,b->src_stride,&sse) // returns subpixel variance error function.
173 #define IFMVCV(r,c,s,e) if ( c >= minc && c <= maxc && r >= minr && r <= maxr) s else e;
174 #define ERR(r,c) (MVC(r,c)+DIST(r,c)) // returns distortion + motion vector cost
175 #define CHECK_BETTER(v,r,c) IFMVCV(r,c,{thismse = DIST(r,c); if((v = (MVC(r,c)+thismse)) < besterr) { besterr = v; br=r; bc=c; *distortion = thismse; *sse1 = sse; }}, v=INT_MAX;)// checks if (r,c) has better score than previous best
176 #define MIN(x,y) (((x)<(y))?(x):(y))
177 #define MAX(x,y) (((x)>(y))?(x):(y))
179 //#define CHECK_BETTER(v,r,c) if((v = ERR(r,c)) < besterr) { besterr = v; br=r; bc=c; }
181 int vp8_find_best_sub_pixel_step_iteratively(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
182 int_mv *bestmv, int_mv *ref_mv,
184 const vp8_variance_fn_ptr_t *vfp,
185 int *mvcost[2], int *distortion,
188 unsigned char *y = *(d->base_pre) + d->pre + (bestmv->as_mv.row) * d->pre_stride + bestmv->as_mv.col;
189 unsigned char *z = (*(b->base_src) + b->src);
191 int rr = ref_mv->as_mv.row >> 1, rc = ref_mv->as_mv.col >> 1;
192 int br = bestmv->as_mv.row << 2, bc = bestmv->as_mv.col << 2;
193 int tr = br, tc = bc;
194 unsigned int besterr = INT_MAX;
195 unsigned int left, right, up, down, diag;
197 unsigned int whichdir;
198 unsigned int halfiters = 4;
199 unsigned int quarteriters = 4;
202 int minc = MAX(x->mv_col_min << 2, (ref_mv->as_mv.col >> 1) - ((1 << mvlong_width) - 1));
203 int maxc = MIN(x->mv_col_max << 2, (ref_mv->as_mv.col >> 1) + ((1 << mvlong_width) - 1));
204 int minr = MAX(x->mv_row_min << 2, (ref_mv->as_mv.row >> 1) - ((1 << mvlong_width) - 1));
205 int maxr = MIN(x->mv_row_max << 2, (ref_mv->as_mv.row >> 1) + ((1 << mvlong_width) - 1));
208 bestmv->as_mv.row <<= 3;
209 bestmv->as_mv.col <<= 3;
211 // calculate central point error
212 besterr = vfp->vf(y, d->pre_stride, z, b->src_stride, sse1);
213 *distortion = besterr;
214 besterr += mv_err_cost(bestmv, ref_mv, mvcost, error_per_bit);
216 // TODO: Each subsequent iteration checks at least one point in common with the last iteration could be 2 ( if diag selected)
220 CHECK_BETTER(left, tr, tc - 2);
221 CHECK_BETTER(right, tr, tc + 2);
222 CHECK_BETTER(up, tr - 2, tc);
223 CHECK_BETTER(down, tr + 2, tc);
225 whichdir = (left < right ? 0 : 1) + (up < down ? 0 : 2);
230 CHECK_BETTER(diag, tr - 2, tc - 2);
233 CHECK_BETTER(diag, tr - 2, tc + 2);
236 CHECK_BETTER(diag, tr + 2, tc - 2);
239 CHECK_BETTER(diag, tr + 2, tc + 2);
243 // no reason to check the same one again.
244 if (tr == br && tc == bc)
251 // TODO: Each subsequent iteration checks at least one point in common with the last iteration could be 2 ( if diag selected)
253 while (--quarteriters)
255 CHECK_BETTER(left, tr, tc - 1);
256 CHECK_BETTER(right, tr, tc + 1);
257 CHECK_BETTER(up, tr - 1, tc);
258 CHECK_BETTER(down, tr + 1, tc);
260 whichdir = (left < right ? 0 : 1) + (up < down ? 0 : 2);
265 CHECK_BETTER(diag, tr - 1, tc - 1);
268 CHECK_BETTER(diag, tr - 1, tc + 1);
271 CHECK_BETTER(diag, tr + 1, tc - 1);
274 CHECK_BETTER(diag, tr + 1, tc + 1);
278 // no reason to check the same one again.
279 if (tr == br && tc == bc)
286 bestmv->as_mv.row = br << 1;
287 bestmv->as_mv.col = bc << 1;
289 if ((abs(bestmv->as_mv.col - ref_mv->as_mv.col) > (MAX_FULL_PEL_VAL<<3)) ||
290 (abs(bestmv->as_mv.row - ref_mv->as_mv.row) > (MAX_FULL_PEL_VAL<<3)))
303 int vp8_find_best_sub_pixel_step(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
304 int_mv *bestmv, int_mv *ref_mv,
306 const vp8_variance_fn_ptr_t *vfp,
307 int *mvcost[2], int *distortion,
310 int bestmse = INT_MAX;
313 unsigned char *y = *(d->base_pre) + d->pre + (bestmv->as_mv.row) * d->pre_stride + bestmv->as_mv.col;
314 unsigned char *z = (*(b->base_src) + b->src);
315 int left, right, up, down, diag;
321 bestmv->as_mv.row <<= 3;
322 bestmv->as_mv.col <<= 3;
325 // calculate central point error
326 bestmse = vfp->vf(y, d->pre_stride, z, b->src_stride, sse1);
327 *distortion = bestmse;
328 bestmse += mv_err_cost(bestmv, ref_mv, mvcost, error_per_bit);
330 // go left then right and check error
331 this_mv.as_mv.row = startmv.as_mv.row;
332 this_mv.as_mv.col = ((startmv.as_mv.col - 8) | 4);
333 thismse = vfp->svf_halfpix_h(y - 1, d->pre_stride, z, b->src_stride, &sse);
334 left = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
340 *distortion = thismse;
344 this_mv.as_mv.col += 8;
345 thismse = vfp->svf_halfpix_h(y, d->pre_stride, z, b->src_stride, &sse);
346 right = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
352 *distortion = thismse;
356 // go up then down and check error
357 this_mv.as_mv.col = startmv.as_mv.col;
358 this_mv.as_mv.row = ((startmv.as_mv.row - 8) | 4);
359 thismse = vfp->svf_halfpix_v(y - d->pre_stride, d->pre_stride, z, b->src_stride, &sse);
360 up = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
366 *distortion = thismse;
370 this_mv.as_mv.row += 8;
371 thismse = vfp->svf_halfpix_v(y, d->pre_stride, z, b->src_stride, &sse);
372 down = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
378 *distortion = thismse;
383 // now check 1 more diagonal
384 whichdir = (left < right ? 0 : 1) + (up < down ? 0 : 2);
385 //for(whichdir =0;whichdir<4;whichdir++)
392 this_mv.as_mv.col = (this_mv.as_mv.col - 8) | 4;
393 this_mv.as_mv.row = (this_mv.as_mv.row - 8) | 4;
394 thismse = vfp->svf_halfpix_hv(y - 1 - d->pre_stride, d->pre_stride, z, b->src_stride, &sse);
397 this_mv.as_mv.col += 4;
398 this_mv.as_mv.row = (this_mv.as_mv.row - 8) | 4;
399 thismse = vfp->svf_halfpix_hv(y - d->pre_stride, d->pre_stride, z, b->src_stride, &sse);
402 this_mv.as_mv.col = (this_mv.as_mv.col - 8) | 4;
403 this_mv.as_mv.row += 4;
404 thismse = vfp->svf_halfpix_hv(y - 1, d->pre_stride, z, b->src_stride, &sse);
408 this_mv.as_mv.col += 4;
409 this_mv.as_mv.row += 4;
410 thismse = vfp->svf_halfpix_hv(y, d->pre_stride, z, b->src_stride, &sse);
414 diag = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
420 *distortion = thismse;
427 // time to check quarter pels.
428 if (bestmv->as_mv.row < startmv.as_mv.row)
431 if (bestmv->as_mv.col < startmv.as_mv.col)
438 // go left then right and check error
439 this_mv.as_mv.row = startmv.as_mv.row;
441 if (startmv.as_mv.col & 7)
443 this_mv.as_mv.col = startmv.as_mv.col - 2;
444 thismse = vfp->svf(y, d->pre_stride, this_mv.as_mv.col & 7, this_mv.as_mv.row & 7, z, b->src_stride, &sse);
448 this_mv.as_mv.col = (startmv.as_mv.col - 8) | 6;
449 thismse = vfp->svf(y - 1, d->pre_stride, 6, this_mv.as_mv.row & 7, z, b->src_stride, &sse);
452 left = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
458 *distortion = thismse;
462 this_mv.as_mv.col += 4;
463 thismse = vfp->svf(y, d->pre_stride, this_mv.as_mv.col & 7, this_mv.as_mv.row & 7, z, b->src_stride, &sse);
464 right = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
470 *distortion = thismse;
474 // go up then down and check error
475 this_mv.as_mv.col = startmv.as_mv.col;
477 if (startmv.as_mv.row & 7)
479 this_mv.as_mv.row = startmv.as_mv.row - 2;
480 thismse = vfp->svf(y, d->pre_stride, this_mv.as_mv.col & 7, this_mv.as_mv.row & 7, z, b->src_stride, &sse);
484 this_mv.as_mv.row = (startmv.as_mv.row - 8) | 6;
485 thismse = vfp->svf(y - d->pre_stride, d->pre_stride, this_mv.as_mv.col & 7, 6, z, b->src_stride, &sse);
488 up = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
494 *distortion = thismse;
498 this_mv.as_mv.row += 4;
499 thismse = vfp->svf(y, d->pre_stride, this_mv.as_mv.col & 7, this_mv.as_mv.row & 7, z, b->src_stride, &sse);
500 down = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
506 *distortion = thismse;
511 // now check 1 more diagonal
512 whichdir = (left < right ? 0 : 1) + (up < down ? 0 : 2);
514 // for(whichdir=0;whichdir<4;whichdir++)
522 if (startmv.as_mv.row & 7)
524 this_mv.as_mv.row -= 2;
526 if (startmv.as_mv.col & 7)
528 this_mv.as_mv.col -= 2;
529 thismse = vfp->svf(y, d->pre_stride, this_mv.as_mv.col & 7, this_mv.as_mv.row & 7, z, b->src_stride, &sse);
533 this_mv.as_mv.col = (startmv.as_mv.col - 8) | 6;
534 thismse = vfp->svf(y - 1, d->pre_stride, 6, this_mv.as_mv.row & 7, z, b->src_stride, &sse);;
539 this_mv.as_mv.row = (startmv.as_mv.row - 8) | 6;
541 if (startmv.as_mv.col & 7)
543 this_mv.as_mv.col -= 2;
544 thismse = vfp->svf(y - d->pre_stride, d->pre_stride, this_mv.as_mv.col & 7, 6, z, b->src_stride, &sse);
548 this_mv.as_mv.col = (startmv.as_mv.col - 8) | 6;
549 thismse = vfp->svf(y - d->pre_stride - 1, d->pre_stride, 6, 6, z, b->src_stride, &sse);
555 this_mv.as_mv.col += 2;
557 if (startmv.as_mv.row & 7)
559 this_mv.as_mv.row -= 2;
560 thismse = vfp->svf(y, d->pre_stride, this_mv.as_mv.col & 7, this_mv.as_mv.row & 7, z, b->src_stride, &sse);
564 this_mv.as_mv.row = (startmv.as_mv.row - 8) | 6;
565 thismse = vfp->svf(y - d->pre_stride, d->pre_stride, this_mv.as_mv.col & 7, 6, z, b->src_stride, &sse);
570 this_mv.as_mv.row += 2;
572 if (startmv.as_mv.col & 7)
574 this_mv.as_mv.col -= 2;
575 thismse = vfp->svf(y, d->pre_stride, this_mv.as_mv.col & 7, this_mv.as_mv.row & 7, z, b->src_stride, &sse);
579 this_mv.as_mv.col = (startmv.as_mv.col - 8) | 6;
580 thismse = vfp->svf(y - 1, d->pre_stride, 6, this_mv.as_mv.row & 7, z, b->src_stride, &sse);
585 this_mv.as_mv.col += 2;
586 this_mv.as_mv.row += 2;
587 thismse = vfp->svf(y, d->pre_stride, this_mv.as_mv.col & 7, this_mv.as_mv.row & 7, z, b->src_stride, &sse);
591 diag = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
597 *distortion = thismse;
604 int vp8_find_best_half_pixel_step(MACROBLOCK *mb, BLOCK *b, BLOCKD *d,
605 int_mv *bestmv, int_mv *ref_mv,
607 const vp8_variance_fn_ptr_t *vfp,
608 int *mvcost[2], int *distortion,
611 int bestmse = INT_MAX;
614 unsigned char *y = *(d->base_pre) + d->pre + (bestmv->as_mv.row) * d->pre_stride + bestmv->as_mv.col;
615 unsigned char *z = (*(b->base_src) + b->src);
616 int left, right, up, down, diag;
621 bestmv->as_mv.row <<= 3;
622 bestmv->as_mv.col <<= 3;
625 // calculate central point error
626 bestmse = vfp->vf(y, d->pre_stride, z, b->src_stride, sse1);
627 *distortion = bestmse;
628 bestmse += mv_err_cost(bestmv, ref_mv, mvcost, error_per_bit);
630 // go left then right and check error
631 this_mv.as_mv.row = startmv.as_mv.row;
632 this_mv.as_mv.col = ((startmv.as_mv.col - 8) | 4);
633 thismse = vfp->svf_halfpix_h(y - 1, d->pre_stride, z, b->src_stride, &sse);
634 left = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
640 *distortion = thismse;
644 this_mv.as_mv.col += 8;
645 thismse = vfp->svf_halfpix_h(y, d->pre_stride, z, b->src_stride, &sse);
646 right = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
652 *distortion = thismse;
656 // go up then down and check error
657 this_mv.as_mv.col = startmv.as_mv.col;
658 this_mv.as_mv.row = ((startmv.as_mv.row - 8) | 4);
659 thismse = vfp->svf_halfpix_v(y - d->pre_stride, d->pre_stride, z, b->src_stride, &sse);
660 up = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
666 *distortion = thismse;
670 this_mv.as_mv.row += 8;
671 thismse = vfp->svf_halfpix_v(y, d->pre_stride, z, b->src_stride, &sse);
672 down = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
678 *distortion = thismse;
682 // somewhat strangely not doing all the diagonals for half pel is slower than doing them.
684 // now check 1 more diagonal -
685 whichdir = (left < right ? 0 : 1) + (up < down ? 0 : 2);
691 this_mv.col = (this_mv.col - 8) | 4;
692 this_mv.row = (this_mv.row - 8) | 4;
693 diag = vfp->svf(y - 1 - d->pre_stride, d->pre_stride, 4, 4, z, b->src_stride, &sse);
697 this_mv.row = (this_mv.row - 8) | 4;
698 diag = vfp->svf(y - d->pre_stride, d->pre_stride, 4, 4, z, b->src_stride, &sse);
701 this_mv.col = (this_mv.col - 8) | 4;
703 diag = vfp->svf(y - 1, d->pre_stride, 4, 4, z, b->src_stride, &sse);
708 diag = vfp->svf(y, d->pre_stride, 4, 4, z, b->src_stride, &sse);
712 diag += mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
721 this_mv.as_mv.col = (this_mv.as_mv.col - 8) | 4;
722 this_mv.as_mv.row = (this_mv.as_mv.row - 8) | 4;
723 thismse = vfp->svf_halfpix_hv(y - 1 - d->pre_stride, d->pre_stride, z, b->src_stride, &sse);
724 diag = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
730 *distortion = thismse;
734 this_mv.as_mv.col += 8;
735 thismse = vfp->svf_halfpix_hv(y - d->pre_stride, d->pre_stride, z, b->src_stride, &sse);
736 diag = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
742 *distortion = thismse;
746 this_mv.as_mv.col = (this_mv.as_mv.col - 8) | 4;
747 this_mv.as_mv.row = startmv.as_mv.row + 4;
748 thismse = vfp->svf_halfpix_hv(y - 1, d->pre_stride, z, b->src_stride, &sse);
749 diag = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
755 *distortion = thismse;
759 this_mv.as_mv.col += 8;
760 thismse = vfp->svf_halfpix_hv(y, d->pre_stride, z, b->src_stride, &sse);
761 diag = thismse + mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
767 *distortion = thismse;
775 #define CHECK_BOUNDS(range) \
778 all_in &= ((br-range) >= x->mv_row_min);\
779 all_in &= ((br+range) <= x->mv_row_max);\
780 all_in &= ((bc-range) >= x->mv_col_min);\
781 all_in &= ((bc+range) <= x->mv_col_max);\
784 #define CHECK_POINT \
786 if (this_mv.as_mv.col < x->mv_col_min) continue;\
787 if (this_mv.as_mv.col > x->mv_col_max) continue;\
788 if (this_mv.as_mv.row < x->mv_row_min) continue;\
789 if (this_mv.as_mv.row > x->mv_row_max) continue;\
792 #define CHECK_BETTER \
794 if (thissad < bestsad)\
796 thissad += mvsad_err_cost(&this_mv, &fcenter_mv, mvsadcost, sad_per_bit);\
797 if (thissad < bestsad)\
805 static const MV next_chkpts[6][3] =
807 {{ -2, 0}, { -1, -2}, {1, -2}},
808 {{ -1, -2}, {1, -2}, {2, 0}},
809 {{1, -2}, {2, 0}, {1, 2}},
810 {{2, 0}, {1, 2}, { -1, 2}},
811 {{1, 2}, { -1, 2}, { -2, 0}},
812 {{ -1, 2}, { -2, 0}, { -1, -2}}
824 const vp8_variance_fn_ptr_t *vfp,
830 MV hex[6] = { { -1, -2}, {1, -2}, {2, 0}, {1, 2}, { -1, 2}, { -2, 0} } ;
831 MV neighbors[4] = {{0, -1}, { -1, 0}, {1, 0}, {0, 1}} ;
834 unsigned char *what = (*(b->base_src) + b->src);
835 int what_stride = b->src_stride;
836 int in_what_stride = d->pre_stride;
837 int br = ref_mv->as_mv.row, bc = ref_mv->as_mv.col;
839 unsigned int bestsad = 0x7fffffff;
840 unsigned int thissad;
841 unsigned char *base_offset;
842 unsigned char *this_offset;
848 fcenter_mv.as_mv.row = center_mv->as_mv.row >> 3;
849 fcenter_mv.as_mv.col = center_mv->as_mv.col >> 3;
851 // Work out the start point for the search
852 base_offset = (unsigned char *)(*(d->base_pre) + d->pre);
853 this_offset = base_offset + (br * (d->pre_stride)) + bc;
854 this_mv.as_mv.row = br;
855 this_mv.as_mv.col = bc;
856 bestsad = vfp->sdf( what, what_stride, this_offset,
857 in_what_stride, 0x7fffffff)
858 + mvsad_err_cost(&this_mv, &fcenter_mv, mvsadcost, sad_per_bit);
866 for (i = 0; i < 6; i++)
868 this_mv.as_mv.row = br + hex[i].row;
869 this_mv.as_mv.col = bc + hex[i].col;
870 this_offset = base_offset + (this_mv.as_mv.row * in_what_stride) + this_mv.as_mv.col;
871 thissad=vfp->sdf( what, what_stride, this_offset, in_what_stride, bestsad);
876 for (i = 0; i < 6; i++)
878 this_mv.as_mv.row = br + hex[i].row;
879 this_mv.as_mv.col = bc + hex[i].col;
881 this_offset = base_offset + (this_mv.as_mv.row * in_what_stride) + this_mv.as_mv.col;
882 thissad=vfp->sdf( what, what_stride, this_offset, in_what_stride, bestsad);
891 br += hex[best_site].row;
892 bc += hex[best_site].col;
896 for (j = 1; j < 127; j++)
903 for (i = 0; i < 3; i++)
905 this_mv.as_mv.row = br + next_chkpts[k][i].row;
906 this_mv.as_mv.col = bc + next_chkpts[k][i].col;
907 this_offset = base_offset + (this_mv.as_mv.row * (in_what_stride)) + this_mv.as_mv.col;
908 thissad = vfp->sdf( what, what_stride, this_offset, in_what_stride, bestsad);
913 for (i = 0; i < 3; i++)
915 this_mv.as_mv.row = br + next_chkpts[k][i].row;
916 this_mv.as_mv.col = bc + next_chkpts[k][i].col;
918 this_offset = base_offset + (this_mv.as_mv.row * (in_what_stride)) + this_mv.as_mv.col;
919 thissad = vfp->sdf( what, what_stride, this_offset, in_what_stride, bestsad);
928 br += next_chkpts[k][best_site].row;
929 bc += next_chkpts[k][best_site].col;
931 if (k >= 12) k -= 12;
932 else if (k >= 6) k -= 6;
936 // check 4 1-away neighbors
938 for (j = 0; j < 32; j++)
945 for (i = 0; i < 4; i++)
947 this_mv.as_mv.row = br + neighbors[i].row;
948 this_mv.as_mv.col = bc + neighbors[i].col;
949 this_offset = base_offset + (this_mv.as_mv.row * (in_what_stride)) + this_mv.as_mv.col;
950 thissad = vfp->sdf( what, what_stride, this_offset, in_what_stride, bestsad);
955 for (i = 0; i < 4; i++)
957 this_mv.as_mv.row = br + neighbors[i].row;
958 this_mv.as_mv.col = bc + neighbors[i].col;
960 this_offset = base_offset + (this_mv.as_mv.row * (in_what_stride)) + this_mv.as_mv.col;
961 thissad = vfp->sdf( what, what_stride, this_offset, in_what_stride, bestsad);
970 br += neighbors[best_site].row;
971 bc += neighbors[best_site].col;
975 best_mv->as_mv.row = br;
976 best_mv->as_mv.col = bc;
984 int vp8_diamond_search_sad
994 vp8_variance_fn_ptr_t *fn_ptr,
1001 unsigned char *what = (*(b->base_src) + b->src);
1002 int what_stride = b->src_stride;
1003 unsigned char *in_what;
1004 int in_what_stride = d->pre_stride;
1005 unsigned char *best_address;
1010 int bestsad = INT_MAX;
1014 int ref_row = ref_mv->as_mv.row;
1015 int ref_col = ref_mv->as_mv.col;
1016 int this_row_offset;
1017 int this_col_offset;
1020 unsigned char *check_here;
1023 int *mvsadcost[2] = {x->mvsadcost[0], x->mvsadcost[1]};
1025 fcenter_mv.as_mv.row = center_mv->as_mv.row >> 3;
1026 fcenter_mv.as_mv.col = center_mv->as_mv.col >> 3;
1030 best_mv->as_mv.row = ref_row;
1031 best_mv->as_mv.col = ref_col;
1033 // Work out the start point for the search
1034 in_what = (unsigned char *)(*(d->base_pre) + d->pre + (ref_row * (d->pre_stride)) + ref_col);
1035 best_address = in_what;
1037 // Check the starting position
1038 bestsad = fn_ptr->sdf(what, what_stride, in_what,
1039 in_what_stride, 0x7fffffff)
1040 + mvsad_err_cost(best_mv, &fcenter_mv, mvsadcost, sad_per_bit);
1042 // search_param determines the length of the initial step and hence the number of iterations
1043 // 0 = initial step (MAX_FIRST_STEP) pel : 1 = (MAX_FIRST_STEP/2) pel, 2 = (MAX_FIRST_STEP/4) pel... etc.
1044 ss = &x->ss[search_param * x->searches_per_step];
1045 tot_steps = (x->ss_count / x->searches_per_step) - search_param;
1049 for (step = 0; step < tot_steps ; step++)
1051 for (j = 0 ; j < x->searches_per_step ; j++)
1053 // Trap illegal vectors
1054 this_row_offset = best_mv->as_mv.row + ss[i].mv.row;
1055 this_col_offset = best_mv->as_mv.col + ss[i].mv.col;
1057 if ((this_col_offset > x->mv_col_min) && (this_col_offset < x->mv_col_max) &&
1058 (this_row_offset > x->mv_row_min) && (this_row_offset < x->mv_row_max))
1061 check_here = ss[i].offset + best_address;
1062 thissad = fn_ptr->sdf(what, what_stride, check_here , in_what_stride, bestsad);
1064 if (thissad < bestsad)
1066 this_mv.as_mv.row = this_row_offset;
1067 this_mv.as_mv.col = this_col_offset;
1068 thissad += mvsad_err_cost(&this_mv, &fcenter_mv,
1069 mvsadcost, sad_per_bit);
1071 if (thissad < bestsad)
1082 if (best_site != last_site)
1084 best_mv->as_mv.row += ss[best_site].mv.row;
1085 best_mv->as_mv.col += ss[best_site].mv.col;
1086 best_address += ss[best_site].offset;
1087 last_site = best_site;
1089 else if (best_address == in_what)
1093 this_mv.as_mv.row = best_mv->as_mv.row << 3;
1094 this_mv.as_mv.col = best_mv->as_mv.col << 3;
1096 if (bestsad == INT_MAX)
1099 return fn_ptr->vf(what, what_stride, best_address, in_what_stride, (unsigned int *)(&thissad))
1100 + mv_err_cost(&this_mv, center_mv, mvcost, x->errorperbit);
1103 int vp8_diamond_search_sadx4
1113 vp8_variance_fn_ptr_t *fn_ptr,
1120 unsigned char *what = (*(b->base_src) + b->src);
1121 int what_stride = b->src_stride;
1122 unsigned char *in_what;
1123 int in_what_stride = d->pre_stride;
1124 unsigned char *best_address;
1129 int bestsad = INT_MAX;
1133 int ref_row = ref_mv->as_mv.row;
1134 int ref_col = ref_mv->as_mv.col;
1135 int this_row_offset;
1136 int this_col_offset;
1139 unsigned char *check_here;
1140 unsigned int thissad;
1142 int *mvsadcost[2] = {x->mvsadcost[0], x->mvsadcost[1]};
1144 fcenter_mv.as_mv.row = center_mv->as_mv.row >> 3;
1145 fcenter_mv.as_mv.col = center_mv->as_mv.col >> 3;
1148 best_mv->as_mv.row = ref_row;
1149 best_mv->as_mv.col = ref_col;
1151 // Work out the start point for the search
1152 in_what = (unsigned char *)(*(d->base_pre) + d->pre + (ref_row * (d->pre_stride)) + ref_col);
1153 best_address = in_what;
1155 // Check the starting position
1156 bestsad = fn_ptr->sdf(what, what_stride,
1157 in_what, in_what_stride, 0x7fffffff)
1158 + mvsad_err_cost(best_mv, &fcenter_mv, mvsadcost, sad_per_bit);
1160 // search_param determines the length of the initial step and hence the number of iterations
1161 // 0 = initial step (MAX_FIRST_STEP) pel : 1 = (MAX_FIRST_STEP/2) pel, 2 = (MAX_FIRST_STEP/4) pel... etc.
1162 ss = &x->ss[search_param * x->searches_per_step];
1163 tot_steps = (x->ss_count / x->searches_per_step) - search_param;
1167 for (step = 0; step < tot_steps ; step++)
1171 // To know if all neighbor points are within the bounds, 4 bounds checking are enough instead of
1172 // checking 4 bounds for each points.
1173 all_in &= ((best_mv->as_mv.row + ss[i].mv.row)> x->mv_row_min);
1174 all_in &= ((best_mv->as_mv.row + ss[i+1].mv.row) < x->mv_row_max);
1175 all_in &= ((best_mv->as_mv.col + ss[i+2].mv.col) > x->mv_col_min);
1176 all_in &= ((best_mv->as_mv.col + ss[i+3].mv.col) < x->mv_col_max);
1180 unsigned int sad_array[4];
1182 for (j = 0 ; j < x->searches_per_step ; j += 4)
1184 unsigned char *block_offset[4];
1186 for (t = 0; t < 4; t++)
1187 block_offset[t] = ss[i+t].offset + best_address;
1189 fn_ptr->sdx4df(what, what_stride, block_offset, in_what_stride, sad_array);
1191 for (t = 0; t < 4; t++, i++)
1193 if (sad_array[t] < bestsad)
1195 this_mv.as_mv.row = best_mv->as_mv.row + ss[i].mv.row;
1196 this_mv.as_mv.col = best_mv->as_mv.col + ss[i].mv.col;
1197 sad_array[t] += mvsad_err_cost(&this_mv, &fcenter_mv,
1198 mvsadcost, sad_per_bit);
1200 if (sad_array[t] < bestsad)
1202 bestsad = sad_array[t];
1211 for (j = 0 ; j < x->searches_per_step ; j++)
1213 // Trap illegal vectors
1214 this_row_offset = best_mv->as_mv.row + ss[i].mv.row;
1215 this_col_offset = best_mv->as_mv.col + ss[i].mv.col;
1217 if ((this_col_offset > x->mv_col_min) && (this_col_offset < x->mv_col_max) &&
1218 (this_row_offset > x->mv_row_min) && (this_row_offset < x->mv_row_max))
1220 check_here = ss[i].offset + best_address;
1221 thissad = fn_ptr->sdf(what, what_stride, check_here , in_what_stride, bestsad);
1223 if (thissad < bestsad)
1225 this_mv.as_mv.row = this_row_offset;
1226 this_mv.as_mv.col = this_col_offset;
1227 thissad += mvsad_err_cost(&this_mv, &fcenter_mv,
1228 mvsadcost, sad_per_bit);
1230 if (thissad < bestsad)
1241 if (best_site != last_site)
1243 best_mv->as_mv.row += ss[best_site].mv.row;
1244 best_mv->as_mv.col += ss[best_site].mv.col;
1245 best_address += ss[best_site].offset;
1246 last_site = best_site;
1248 else if (best_address == in_what)
1252 this_mv.as_mv.row = best_mv->as_mv.row << 3;
1253 this_mv.as_mv.col = best_mv->as_mv.col << 3;
1255 if (bestsad == INT_MAX)
1258 return fn_ptr->vf(what, what_stride, best_address, in_what_stride, (unsigned int *)(&thissad))
1259 + mv_err_cost(&this_mv, center_mv, mvcost, x->errorperbit);
1262 int vp8_full_search_sad(MACROBLOCK *x, BLOCK *b, BLOCKD *d, int_mv *ref_mv,
1263 int sad_per_bit, int distance,
1264 vp8_variance_fn_ptr_t *fn_ptr, int *mvcost[2],
1267 unsigned char *what = (*(b->base_src) + b->src);
1268 int what_stride = b->src_stride;
1269 unsigned char *in_what;
1270 int in_what_stride = d->pre_stride;
1271 int mv_stride = d->pre_stride;
1272 unsigned char *bestaddress;
1273 int_mv *best_mv = &d->bmi.mv;
1275 int bestsad = INT_MAX;
1278 unsigned char *check_here;
1281 int ref_row = ref_mv->as_mv.row;
1282 int ref_col = ref_mv->as_mv.col;
1284 int row_min = ref_row - distance;
1285 int row_max = ref_row + distance;
1286 int col_min = ref_col - distance;
1287 int col_max = ref_col + distance;
1289 int *mvsadcost[2] = {x->mvsadcost[0], x->mvsadcost[1]};
1291 fcenter_mv.as_mv.row = center_mv->as_mv.row >> 3;
1292 fcenter_mv.as_mv.col = center_mv->as_mv.col >> 3;
1294 // Work out the mid point for the search
1295 in_what = *(d->base_pre) + d->pre;
1296 bestaddress = in_what + (ref_row * d->pre_stride) + ref_col;
1298 best_mv->as_mv.row = ref_row;
1299 best_mv->as_mv.col = ref_col;
1301 // Baseline value at the centre
1302 bestsad = fn_ptr->sdf(what, what_stride, bestaddress,
1303 in_what_stride, 0x7fffffff)
1304 + mvsad_err_cost(best_mv, &fcenter_mv, mvsadcost, sad_per_bit);
1306 // Apply further limits to prevent us looking using vectors that stretch beyiond the UMV border
1307 if (col_min < x->mv_col_min)
1308 col_min = x->mv_col_min;
1310 if (col_max > x->mv_col_max)
1311 col_max = x->mv_col_max;
1313 if (row_min < x->mv_row_min)
1314 row_min = x->mv_row_min;
1316 if (row_max > x->mv_row_max)
1317 row_max = x->mv_row_max;
1319 for (r = row_min; r < row_max ; r++)
1321 this_mv.as_mv.row = r;
1322 check_here = r * mv_stride + in_what + col_min;
1324 for (c = col_min; c < col_max; c++)
1326 thissad = fn_ptr->sdf(what, what_stride, check_here , in_what_stride, bestsad);
1328 this_mv.as_mv.col = c;
1329 thissad += mvsad_err_cost(&this_mv, &fcenter_mv,
1330 mvsadcost, sad_per_bit);
1332 if (thissad < bestsad)
1335 best_mv->as_mv.row = r;
1336 best_mv->as_mv.col = c;
1337 bestaddress = check_here;
1344 this_mv.as_mv.row = best_mv->as_mv.row << 3;
1345 this_mv.as_mv.col = best_mv->as_mv.col << 3;
1347 if (bestsad < INT_MAX)
1348 return fn_ptr->vf(what, what_stride, bestaddress, in_what_stride, (unsigned int *)(&thissad))
1349 + mv_err_cost(&this_mv, center_mv, mvcost, x->errorperbit);
1354 int vp8_full_search_sadx3(MACROBLOCK *x, BLOCK *b, BLOCKD *d, int_mv *ref_mv,
1355 int sad_per_bit, int distance,
1356 vp8_variance_fn_ptr_t *fn_ptr, int *mvcost[2],
1359 unsigned char *what = (*(b->base_src) + b->src);
1360 int what_stride = b->src_stride;
1361 unsigned char *in_what;
1362 int in_what_stride = d->pre_stride;
1363 int mv_stride = d->pre_stride;
1364 unsigned char *bestaddress;
1365 int_mv *best_mv = &d->bmi.mv;
1367 int bestsad = INT_MAX;
1370 unsigned char *check_here;
1371 unsigned int thissad;
1373 int ref_row = ref_mv->as_mv.row;
1374 int ref_col = ref_mv->as_mv.col;
1376 int row_min = ref_row - distance;
1377 int row_max = ref_row + distance;
1378 int col_min = ref_col - distance;
1379 int col_max = ref_col + distance;
1381 unsigned int sad_array[3];
1383 int *mvsadcost[2] = {x->mvsadcost[0], x->mvsadcost[1]};
1385 fcenter_mv.as_mv.row = center_mv->as_mv.row >> 3;
1386 fcenter_mv.as_mv.col = center_mv->as_mv.col >> 3;
1388 // Work out the mid point for the search
1389 in_what = *(d->base_pre) + d->pre;
1390 bestaddress = in_what + (ref_row * d->pre_stride) + ref_col;
1392 best_mv->as_mv.row = ref_row;
1393 best_mv->as_mv.col = ref_col;
1395 // Baseline value at the centre
1396 bestsad = fn_ptr->sdf(what, what_stride,
1397 bestaddress, in_what_stride, 0x7fffffff)
1398 + mvsad_err_cost(best_mv, &fcenter_mv, mvsadcost, sad_per_bit);
1400 // Apply further limits to prevent us looking using vectors that stretch beyiond the UMV border
1401 if (col_min < x->mv_col_min)
1402 col_min = x->mv_col_min;
1404 if (col_max > x->mv_col_max)
1405 col_max = x->mv_col_max;
1407 if (row_min < x->mv_row_min)
1408 row_min = x->mv_row_min;
1410 if (row_max > x->mv_row_max)
1411 row_max = x->mv_row_max;
1413 for (r = row_min; r < row_max ; r++)
1415 this_mv.as_mv.row = r;
1416 check_here = r * mv_stride + in_what + col_min;
1419 while ((c + 2) < col_max)
1423 fn_ptr->sdx3f(what, what_stride, check_here , in_what_stride, sad_array);
1425 for (i = 0; i < 3; i++)
1427 thissad = sad_array[i];
1429 if (thissad < bestsad)
1431 this_mv.as_mv.col = c;
1432 thissad += mvsad_err_cost(&this_mv, &fcenter_mv,
1433 mvsadcost, sad_per_bit);
1435 if (thissad < bestsad)
1438 best_mv->as_mv.row = r;
1439 best_mv->as_mv.col = c;
1440 bestaddress = check_here;
1451 thissad = fn_ptr->sdf(what, what_stride, check_here , in_what_stride, bestsad);
1453 if (thissad < bestsad)
1455 this_mv.as_mv.col = c;
1456 thissad += mvsad_err_cost(&this_mv, &fcenter_mv,
1457 mvsadcost, sad_per_bit);
1459 if (thissad < bestsad)
1462 best_mv->as_mv.row = r;
1463 best_mv->as_mv.col = c;
1464 bestaddress = check_here;
1474 this_mv.as_mv.row = best_mv->as_mv.row << 3;
1475 this_mv.as_mv.col = best_mv->as_mv.col << 3;
1477 if (bestsad < INT_MAX)
1478 return fn_ptr->vf(what, what_stride, bestaddress, in_what_stride, (unsigned int *)(&thissad))
1479 + mv_err_cost(&this_mv, center_mv, mvcost, x->errorperbit);
1484 int vp8_full_search_sadx8(MACROBLOCK *x, BLOCK *b, BLOCKD *d, int_mv *ref_mv,
1485 int sad_per_bit, int distance,
1486 vp8_variance_fn_ptr_t *fn_ptr, int *mvcost[2],
1489 unsigned char *what = (*(b->base_src) + b->src);
1490 int what_stride = b->src_stride;
1491 unsigned char *in_what;
1492 int in_what_stride = d->pre_stride;
1493 int mv_stride = d->pre_stride;
1494 unsigned char *bestaddress;
1495 int_mv *best_mv = &d->bmi.mv;
1497 int bestsad = INT_MAX;
1500 unsigned char *check_here;
1501 unsigned int thissad;
1503 int ref_row = ref_mv->as_mv.row;
1504 int ref_col = ref_mv->as_mv.col;
1506 int row_min = ref_row - distance;
1507 int row_max = ref_row + distance;
1508 int col_min = ref_col - distance;
1509 int col_max = ref_col + distance;
1511 DECLARE_ALIGNED_ARRAY(16, unsigned short, sad_array8, 8);
1512 unsigned int sad_array[3];
1514 int *mvsadcost[2] = {x->mvsadcost[0], x->mvsadcost[1]};
1516 fcenter_mv.as_mv.row = center_mv->as_mv.row >> 3;
1517 fcenter_mv.as_mv.col = center_mv->as_mv.col >> 3;
1519 // Work out the mid point for the search
1520 in_what = *(d->base_pre) + d->pre;
1521 bestaddress = in_what + (ref_row * d->pre_stride) + ref_col;
1523 best_mv->as_mv.row = ref_row;
1524 best_mv->as_mv.col = ref_col;
1526 // Baseline value at the centre
1527 bestsad = fn_ptr->sdf(what, what_stride,
1528 bestaddress, in_what_stride, 0x7fffffff)
1529 + mvsad_err_cost(best_mv, &fcenter_mv, mvsadcost, sad_per_bit);
1531 // Apply further limits to prevent us looking using vectors that stretch beyiond the UMV border
1532 if (col_min < x->mv_col_min)
1533 col_min = x->mv_col_min;
1535 if (col_max > x->mv_col_max)
1536 col_max = x->mv_col_max;
1538 if (row_min < x->mv_row_min)
1539 row_min = x->mv_row_min;
1541 if (row_max > x->mv_row_max)
1542 row_max = x->mv_row_max;
1544 for (r = row_min; r < row_max ; r++)
1546 this_mv.as_mv.row = r;
1547 check_here = r * mv_stride + in_what + col_min;
1550 while ((c + 7) < col_max)
1554 fn_ptr->sdx8f(what, what_stride, check_here , in_what_stride, sad_array8);
1556 for (i = 0; i < 8; i++)
1558 thissad = (unsigned int)sad_array8[i];
1560 if (thissad < bestsad)
1562 this_mv.as_mv.col = c;
1563 thissad += mvsad_err_cost(&this_mv, &fcenter_mv,
1564 mvsadcost, sad_per_bit);
1566 if (thissad < bestsad)
1569 best_mv->as_mv.row = r;
1570 best_mv->as_mv.col = c;
1571 bestaddress = check_here;
1580 while ((c + 2) < col_max)
1584 fn_ptr->sdx3f(what, what_stride, check_here , in_what_stride, sad_array);
1586 for (i = 0; i < 3; i++)
1588 thissad = sad_array[i];
1590 if (thissad < bestsad)
1592 this_mv.as_mv.col = c;
1593 thissad += mvsad_err_cost(&this_mv, &fcenter_mv,
1594 mvsadcost, sad_per_bit);
1596 if (thissad < bestsad)
1599 best_mv->as_mv.row = r;
1600 best_mv->as_mv.col = c;
1601 bestaddress = check_here;
1612 thissad = fn_ptr->sdf(what, what_stride, check_here , in_what_stride, bestsad);
1614 if (thissad < bestsad)
1616 this_mv.as_mv.col = c;
1617 thissad += mvsad_err_cost(&this_mv, &fcenter_mv,
1618 mvsadcost, sad_per_bit);
1620 if (thissad < bestsad)
1623 best_mv->as_mv.row = r;
1624 best_mv->as_mv.col = c;
1625 bestaddress = check_here;
1634 this_mv.as_mv.row = best_mv->as_mv.row << 3;
1635 this_mv.as_mv.col = best_mv->as_mv.col << 3;
1637 if (bestsad < INT_MAX)
1638 return fn_ptr->vf(what, what_stride, bestaddress, in_what_stride, (unsigned int *)(&thissad))
1639 + mv_err_cost(&this_mv, center_mv, mvcost, x->errorperbit);
1644 int vp8_refining_search_sad(MACROBLOCK *x, BLOCK *b, BLOCKD *d, int_mv *ref_mv,
1645 int error_per_bit, int search_range,
1646 vp8_variance_fn_ptr_t *fn_ptr, int *mvcost[2],
1649 MV neighbors[4] = {{-1, 0}, {0, -1}, {0, 1}, {1, 0}};
1651 short this_row_offset, this_col_offset;
1653 int what_stride = b->src_stride;
1654 int in_what_stride = d->pre_stride;
1655 unsigned char *what = (*(b->base_src) + b->src);
1656 unsigned char *best_address = (unsigned char *)(*(d->base_pre) + d->pre +
1657 (ref_mv->as_mv.row * (d->pre_stride)) + ref_mv->as_mv.col);
1658 unsigned char *check_here;
1659 unsigned int thissad;
1661 unsigned int bestsad = INT_MAX;
1663 int *mvsadcost[2] = {x->mvsadcost[0], x->mvsadcost[1]};
1666 fcenter_mv.as_mv.row = center_mv->as_mv.row >> 3;
1667 fcenter_mv.as_mv.col = center_mv->as_mv.col >> 3;
1669 bestsad = fn_ptr->sdf(what, what_stride, best_address, in_what_stride, 0x7fffffff) + mvsad_err_cost(ref_mv, &fcenter_mv, mvsadcost, error_per_bit);
1671 for (i=0; i<search_range; i++)
1675 for (j = 0 ; j < 4 ; j++)
1677 this_row_offset = ref_mv->as_mv.row + neighbors[j].row;
1678 this_col_offset = ref_mv->as_mv.col + neighbors[j].col;
1680 if ((this_col_offset > x->mv_col_min) && (this_col_offset < x->mv_col_max) &&
1681 (this_row_offset > x->mv_row_min) && (this_row_offset < x->mv_row_max))
1683 check_here = (neighbors[j].row)*in_what_stride + neighbors[j].col + best_address;
1684 thissad = fn_ptr->sdf(what, what_stride, check_here , in_what_stride, bestsad);
1686 if (thissad < bestsad)
1688 this_mv.as_mv.row = this_row_offset;
1689 this_mv.as_mv.col = this_col_offset;
1690 thissad += mvsad_err_cost(&this_mv, &fcenter_mv, mvsadcost, error_per_bit);
1692 if (thissad < bestsad)
1701 if (best_site == -1)
1705 ref_mv->as_mv.row += neighbors[best_site].row;
1706 ref_mv->as_mv.col += neighbors[best_site].col;
1707 best_address += (neighbors[best_site].row)*in_what_stride + neighbors[best_site].col;
1711 this_mv.as_mv.row = ref_mv->as_mv.row << 3;
1712 this_mv.as_mv.col = ref_mv->as_mv.col << 3;
1714 if (bestsad < INT_MAX)
1715 return fn_ptr->vf(what, what_stride, best_address, in_what_stride, (unsigned int *)(&thissad))
1716 + mv_err_cost(&this_mv, center_mv, mvcost, x->errorperbit);
1721 int vp8_refining_search_sadx4(MACROBLOCK *x, BLOCK *b, BLOCKD *d,
1722 int_mv *ref_mv, int error_per_bit,
1723 int search_range, vp8_variance_fn_ptr_t *fn_ptr,
1724 int *mvcost[2], int_mv *center_mv)
1726 MV neighbors[4] = {{-1, 0}, {0, -1}, {0, 1}, {1, 0}};
1728 short this_row_offset, this_col_offset;
1730 int what_stride = b->src_stride;
1731 int in_what_stride = d->pre_stride;
1732 unsigned char *what = (*(b->base_src) + b->src);
1733 unsigned char *best_address = (unsigned char *)(*(d->base_pre) + d->pre +
1734 (ref_mv->as_mv.row * (d->pre_stride)) + ref_mv->as_mv.col);
1735 unsigned char *check_here;
1736 unsigned int thissad;
1738 unsigned int bestsad = INT_MAX;
1740 int *mvsadcost[2] = {x->mvsadcost[0], x->mvsadcost[1]};
1743 fcenter_mv.as_mv.row = center_mv->as_mv.row >> 3;
1744 fcenter_mv.as_mv.col = center_mv->as_mv.col >> 3;
1746 bestsad = fn_ptr->sdf(what, what_stride, best_address, in_what_stride, 0x7fffffff) + mvsad_err_cost(ref_mv, &fcenter_mv, mvsadcost, error_per_bit);
1748 for (i=0; i<search_range; i++)
1753 all_in &= ((ref_mv->as_mv.row - 1) > x->mv_row_min);
1754 all_in &= ((ref_mv->as_mv.row + 1) < x->mv_row_max);
1755 all_in &= ((ref_mv->as_mv.col - 1) > x->mv_col_min);
1756 all_in &= ((ref_mv->as_mv.col + 1) < x->mv_col_max);
1760 unsigned int sad_array[4];
1761 unsigned char *block_offset[4];
1762 block_offset[0] = best_address - in_what_stride;
1763 block_offset[1] = best_address - 1;
1764 block_offset[2] = best_address + 1;
1765 block_offset[3] = best_address + in_what_stride;
1767 fn_ptr->sdx4df(what, what_stride, block_offset, in_what_stride, sad_array);
1769 for (j = 0; j < 4; j++)
1771 if (sad_array[j] < bestsad)
1773 this_mv.as_mv.row = ref_mv->as_mv.row + neighbors[j].row;
1774 this_mv.as_mv.col = ref_mv->as_mv.col + neighbors[j].col;
1775 sad_array[j] += mvsad_err_cost(&this_mv, &fcenter_mv, mvsadcost, error_per_bit);
1777 if (sad_array[j] < bestsad)
1779 bestsad = sad_array[j];
1787 for (j = 0 ; j < 4 ; j++)
1789 this_row_offset = ref_mv->as_mv.row + neighbors[j].row;
1790 this_col_offset = ref_mv->as_mv.col + neighbors[j].col;
1792 if ((this_col_offset > x->mv_col_min) && (this_col_offset < x->mv_col_max) &&
1793 (this_row_offset > x->mv_row_min) && (this_row_offset < x->mv_row_max))
1795 check_here = (neighbors[j].row)*in_what_stride + neighbors[j].col + best_address;
1796 thissad = fn_ptr->sdf(what, what_stride, check_here , in_what_stride, bestsad);
1798 if (thissad < bestsad)
1800 this_mv.as_mv.row = this_row_offset;
1801 this_mv.as_mv.col = this_col_offset;
1802 thissad += mvsad_err_cost(&this_mv, &fcenter_mv, mvsadcost, error_per_bit);
1804 if (thissad < bestsad)
1814 if (best_site == -1)
1818 ref_mv->as_mv.row += neighbors[best_site].row;
1819 ref_mv->as_mv.col += neighbors[best_site].col;
1820 best_address += (neighbors[best_site].row)*in_what_stride + neighbors[best_site].col;
1824 this_mv.as_mv.row = ref_mv->as_mv.row << 3;
1825 this_mv.as_mv.col = ref_mv->as_mv.col << 3;
1827 if (bestsad < INT_MAX)
1828 return fn_ptr->vf(what, what_stride, best_address, in_what_stride, (unsigned int *)(&thissad))
1829 + mv_err_cost(&this_mv, center_mv, mvcost, x->errorperbit);
1834 #ifdef ENTROPY_STATS
1835 void print_mode_context(void)
1837 FILE *f = fopen("modecont.c", "w");
1840 fprintf(f, "#include \"entropy.h\"\n");
1841 fprintf(f, "const int vp8_mode_contexts[6][4] =\n");
1844 for (j = 0; j < 6; j++)
1846 fprintf(f, " { // %d \n", j);
1849 for (i = 0; i < 4; i++)
1853 int count; // = mv_ref_ct[j][i][0]+mv_ref_ct[j][i][1];
1856 count = mv_mode_cts[i][0] + mv_mode_cts[i][1];
1859 overal_prob = 256 * mv_mode_cts[i][0] / count;
1863 if (overal_prob == 0)
1867 count = mv_ref_ct[j][i][0] + mv_ref_ct[j][i][1];
1870 this_prob = 256 * mv_ref_ct[j][i][0] / count;
1877 fprintf(f, "%5d, ", this_prob);
1878 //fprintf(f,"%5d, %5d, %8d,", this_prob, overal_prob, (this_prob << 10)/overal_prob);
1879 //fprintf(f,"%8d, ", (this_prob << 10)/overal_prob);
1882 fprintf(f, " },\n");
1889 /* MV ref count ENTROPY_STATS stats code */
1890 #ifdef ENTROPY_STATS
1891 void init_mv_ref_counts()
1893 vpx_memset(mv_ref_ct, 0, sizeof(mv_ref_ct));
1894 vpx_memset(mv_mode_cts, 0, sizeof(mv_mode_cts));
1897 void accum_mv_refs(MB_PREDICTION_MODE m, const int ct[4])
1901 ++mv_ref_ct [ct[0]] [0] [0];
1902 ++mv_mode_cts[0][0];
1906 ++mv_ref_ct [ct[0]] [0] [1];
1907 ++mv_mode_cts[0][1];
1911 ++mv_ref_ct [ct[1]] [1] [0];
1912 ++mv_mode_cts[1][0];
1916 ++mv_ref_ct [ct[1]] [1] [1];
1917 ++mv_mode_cts[1][1];
1921 ++mv_ref_ct [ct[2]] [2] [0];
1922 ++mv_mode_cts[2][0];
1926 ++mv_ref_ct [ct[2]] [2] [1];
1927 ++mv_mode_cts[2][1];
1931 ++mv_ref_ct [ct[3]] [3] [0];
1932 ++mv_mode_cts[3][0];
1936 ++mv_ref_ct [ct[3]] [3] [1];
1937 ++mv_mode_cts[3][1];
1944 #endif/* END MV ref count ENTROPY_STATS stats code */