2 * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
13 #include "vpx_mem/vpx_mem.h"
20 static int mv_ref_ct [31] [4] [2];
21 static int mv_mode_cts [4] [2];
24 static int mv_bits_sadcost[256];
26 void vp8cx_init_mv_bits_sadcost()
30 for (i = 0; i < 256; i++)
32 mv_bits_sadcost[i] = (int)sqrt(i * 16);
37 int vp8_mv_bit_cost(MV *mv, MV *ref, int *mvcost[2], int Weight)
39 // MV costing is based on the distribution of vectors in the previous frame and as such will tend to
40 // over state the cost of vectors. In addition coding a new vector can have a knock on effect on the
41 // cost of subsequent vectors and the quality of prediction from NEAR and NEAREST for subsequent blocks.
42 // The "Weight" parameter allows, to a limited extent, for some account to be taken of these factors.
43 return ((mvcost[0][(mv->row - ref->row) >> 1] + mvcost[1][(mv->col - ref->col) >> 1]) * Weight) >> 7;
46 int vp8_mv_err_cost(MV *mv, MV *ref, int *mvcost[2], int error_per_bit)
49 //return ((mvcost[0][(mv->row - ref->row)>>1] + mvcost[1][(mv->col - ref->col)>>1] + 128) * error_per_bit) >> 8;
50 //return ( (vp8_mv_bit_cost(mv, ref, mvcost, 100) + 128) * error_per_bit) >> 8;
52 //i = (vp8_mv_bit_cost(mv, ref, mvcost, 100) * error_per_bit + 128) >> 8;
53 return ((mvcost[0][(mv->row - ref->row) >> 1] + mvcost[1][(mv->col - ref->col) >> 1]) * error_per_bit + 128) >> 8;
54 //return (vp8_mv_bit_cost(mv, ref, mvcost, 128) * error_per_bit + 128) >> 8;
58 static int mv_bits(MV *mv, MV *ref, int *mvcost[2])
60 // get the estimated number of bits for a motion vector, to be used for costing in SAD based
62 return ((mvcost[0][(mv->row - ref->row) >> 1] + mvcost[1][(mv->col - ref->col)>> 1]) + 128) >> 8;
65 void vp8_init_dsmotion_compensation(MACROBLOCK *x, int stride)
68 int search_site_count = 0;
71 // Generate offsets for 4 search sites per step.
73 x->ss[search_site_count].mv.col = 0;
74 x->ss[search_site_count].mv.row = 0;
75 x->ss[search_site_count].offset = 0;
81 // Compute offsets for search sites.
82 x->ss[search_site_count].mv.col = 0;
83 x->ss[search_site_count].mv.row = -Len;
84 x->ss[search_site_count].offset = -Len * stride;
87 // Compute offsets for search sites.
88 x->ss[search_site_count].mv.col = 0;
89 x->ss[search_site_count].mv.row = Len;
90 x->ss[search_site_count].offset = Len * stride;
93 // Compute offsets for search sites.
94 x->ss[search_site_count].mv.col = -Len;
95 x->ss[search_site_count].mv.row = 0;
96 x->ss[search_site_count].offset = -Len;
99 // Compute offsets for search sites.
100 x->ss[search_site_count].mv.col = Len;
101 x->ss[search_site_count].mv.row = 0;
102 x->ss[search_site_count].offset = Len;
109 x->ss_count = search_site_count;
110 x->searches_per_step = 4;
113 void vp8_init3smotion_compensation(MACROBLOCK *x, int stride)
116 int search_site_count = 0;
118 // Generate offsets for 8 search sites per step.
119 Len = MAX_FIRST_STEP;
120 x->ss[search_site_count].mv.col = 0;
121 x->ss[search_site_count].mv.row = 0;
122 x->ss[search_site_count].offset = 0;
128 // Compute offsets for search sites.
129 x->ss[search_site_count].mv.col = 0;
130 x->ss[search_site_count].mv.row = -Len;
131 x->ss[search_site_count].offset = -Len * stride;
134 // Compute offsets for search sites.
135 x->ss[search_site_count].mv.col = 0;
136 x->ss[search_site_count].mv.row = Len;
137 x->ss[search_site_count].offset = Len * stride;
140 // Compute offsets for search sites.
141 x->ss[search_site_count].mv.col = -Len;
142 x->ss[search_site_count].mv.row = 0;
143 x->ss[search_site_count].offset = -Len;
146 // Compute offsets for search sites.
147 x->ss[search_site_count].mv.col = Len;
148 x->ss[search_site_count].mv.row = 0;
149 x->ss[search_site_count].offset = Len;
152 // Compute offsets for search sites.
153 x->ss[search_site_count].mv.col = -Len;
154 x->ss[search_site_count].mv.row = -Len;
155 x->ss[search_site_count].offset = -Len * stride - Len;
158 // Compute offsets for search sites.
159 x->ss[search_site_count].mv.col = Len;
160 x->ss[search_site_count].mv.row = -Len;
161 x->ss[search_site_count].offset = -Len * stride + Len;
164 // Compute offsets for search sites.
165 x->ss[search_site_count].mv.col = -Len;
166 x->ss[search_site_count].mv.row = Len;
167 x->ss[search_site_count].offset = Len * stride - Len;
170 // Compute offsets for search sites.
171 x->ss[search_site_count].mv.col = Len;
172 x->ss[search_site_count].mv.row = Len;
173 x->ss[search_site_count].offset = Len * stride + Len;
181 x->ss_count = search_site_count;
182 x->searches_per_step = 8;
186 #define MVC(r,c) (((mvcost[0][(r)-rr] + mvcost[1][(c) - rc]) * error_per_bit + 128 )>>8 ) // estimated cost of a motion vector (r,c)
187 #define PRE(r,c) (*(d->base_pre) + d->pre + ((r)>>2) * d->pre_stride + ((c)>>2)) // pointer to predictor base of a motionvector
188 #define SP(x) (((x)&3)<<1) // convert motion vector component to offset for svf calc
189 #define DIST(r,c) svf( PRE(r,c), d->pre_stride, SP(c),SP(r), z,b->src_stride,&sse) // returns subpixel variance error function.
190 #define IFMVCV(r,c,s,e) if ( c >= minc && c <= maxc && r >= minr && r <= maxr) s else e;
191 #define ERR(r,c) (MVC(r,c)+DIST(r,c)) // returns distortion + motion vector cost
192 #define CHECK_BETTER(v,r,c) IFMVCV(r,c,{if((v = ERR(r,c)) < besterr) { besterr = v; br=r; bc=c; }}, v=INT_MAX;)// checks if (r,c) has better score than previous best
193 #define MIN(x,y) (((x)<(y))?(x):(y))
194 #define MAX(x,y) (((x)>(y))?(x):(y))
196 //#define CHECK_BETTER(v,r,c) if((v = ERR(r,c)) < besterr) { besterr = v; br=r; bc=c; }
198 int vp8_find_best_sub_pixel_step_iteratively(MACROBLOCK *x, BLOCK *b, BLOCKD *d, MV *bestmv, MV *ref_mv, int error_per_bit, vp8_subpixvariance_fn_t svf, vp8_variance_fn_t vf, int *mvcost[2])
200 unsigned char *y = *(d->base_pre) + d->pre + (bestmv->row) * d->pre_stride + bestmv->col;
201 unsigned char *z = (*(b->base_src) + b->src);
203 int rr = ref_mv->row >> 1, rc = ref_mv->col >> 1;
204 int br = bestmv->row << 2, bc = bestmv->col << 2;
205 int tr = br, tc = bc;
206 unsigned int besterr = INT_MAX;
207 unsigned int left, right, up, down, diag;
209 unsigned int whichdir;
210 unsigned int halfiters = 4;
211 unsigned int quarteriters = 4;
213 int minc = MAX(x->mv_col_min << 2, (ref_mv->col >> 1) - ((1 << mvlong_width) - 1));
214 int maxc = MIN(x->mv_col_max << 2, (ref_mv->col >> 1) + ((1 << mvlong_width) - 1));
215 int minr = MAX(x->mv_row_min << 2, (ref_mv->row >> 1) - ((1 << mvlong_width) - 1));
216 int maxr = MIN(x->mv_row_max << 2, (ref_mv->row >> 1) + ((1 << mvlong_width) - 1));
222 // calculate central point error
223 besterr = vf(y, d->pre_stride, z, b->src_stride, &sse);
224 besterr += vp8_mv_err_cost(bestmv, ref_mv, mvcost, error_per_bit);
226 // TODO: Each subsequent iteration checks at least one point in common with the last iteration could be 2 ( if diag selected)
230 CHECK_BETTER(left, tr, tc - 2);
231 CHECK_BETTER(right, tr, tc + 2);
232 CHECK_BETTER(up, tr - 2, tc);
233 CHECK_BETTER(down, tr + 2, tc);
235 whichdir = (left < right ? 0 : 1) + (up < down ? 0 : 2);
240 CHECK_BETTER(diag, tr - 2, tc - 2);
243 CHECK_BETTER(diag, tr - 2, tc + 2);
246 CHECK_BETTER(diag, tr + 2, tc - 2);
249 CHECK_BETTER(diag, tr + 2, tc + 2);
253 // no reason to check the same one again.
254 if (tr == br && tc == bc)
261 // TODO: Each subsequent iteration checks at least one point in common with the last iteration could be 2 ( if diag selected)
263 while (--quarteriters)
265 CHECK_BETTER(left, tr, tc - 1);
266 CHECK_BETTER(right, tr, tc + 1);
267 CHECK_BETTER(up, tr - 1, tc);
268 CHECK_BETTER(down, tr + 1, tc);
270 whichdir = (left < right ? 0 : 1) + (up < down ? 0 : 2);
275 CHECK_BETTER(diag, tr - 1, tc - 1);
278 CHECK_BETTER(diag, tr - 1, tc + 1);
281 CHECK_BETTER(diag, tr + 1, tc - 1);
284 CHECK_BETTER(diag, tr + 1, tc + 1);
288 // no reason to check the same one again.
289 if (tr == br && tc == bc)
296 bestmv->row = br << 1;
297 bestmv->col = bc << 1;
299 if ((abs(bestmv->col - ref_mv->col) > MAX_FULL_PEL_VAL) || (abs(bestmv->row - ref_mv->row) > MAX_FULL_PEL_VAL))
312 int vp8_find_best_sub_pixel_step(MACROBLOCK *x, BLOCK *b, BLOCKD *d, MV *bestmv, MV *ref_mv, int error_per_bit, vp8_subpixvariance_fn_t svf, vp8_variance_fn_t vf, int *mvcost[2])
314 int bestmse = INT_MAX;
318 unsigned char *y = *(d->base_pre) + d->pre + (bestmv->row) * d->pre_stride + bestmv->col;
319 unsigned char *z = (*(b->base_src) + b->src);
320 int left, right, up, down, diag;
325 // Trap uncodable vectors
326 if ((abs((bestmv->col << 3) - ref_mv->col) > MAX_FULL_PEL_VAL) || (abs((bestmv->row << 3) - ref_mv->row) > MAX_FULL_PEL_VAL))
338 // calculate central point error
339 bestmse = vf(y, d->pre_stride, z, b->src_stride, &sse);
340 bestmse += vp8_mv_err_cost(bestmv, ref_mv, mvcost, error_per_bit);
342 // go left then right and check error
343 this_mv.row = startmv.row;
344 this_mv.col = ((startmv.col - 8) | 4);
345 left = svf(y - 1, d->pre_stride, 4, 0, z, b->src_stride, &sse);
346 left += vp8_mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
355 right = svf(y, d->pre_stride, 4, 0, z, b->src_stride, &sse);
356 right += vp8_mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
364 // go up then down and check error
365 this_mv.col = startmv.col;
366 this_mv.row = ((startmv.row - 8) | 4);
367 up = svf(y - d->pre_stride, d->pre_stride, 0, 4, z, b->src_stride, &sse);
368 up += vp8_mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
377 down = svf(y, d->pre_stride, 0, 4, z, b->src_stride, &sse);
378 down += vp8_mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
387 // now check 1 more diagonal
388 whichdir = (left < right ? 0 : 1) + (up < down ? 0 : 2);
389 // whichdir must be 0-4. Therefore, one of the cases below
390 // must run through. However, because there is no default
391 // and diag is not set elsewhere, we get a compile warning
393 //for(whichdir =0;whichdir<4;whichdir++)
400 this_mv.col = (this_mv.col - 8) | 4;
401 this_mv.row = (this_mv.row - 8) | 4;
402 diag = svf(y - 1 - d->pre_stride, d->pre_stride, 4, 4, z, b->src_stride, &sse);
406 this_mv.row = (this_mv.row - 8) | 4;
407 diag = svf(y - d->pre_stride, d->pre_stride, 4, 4, z, b->src_stride, &sse);
410 this_mv.col = (this_mv.col - 8) | 4;
412 diag = svf(y - 1, d->pre_stride, 4, 4, z, b->src_stride, &sse);
417 diag = svf(y, d->pre_stride, 4, 4, z, b->src_stride, &sse);
421 diag += vp8_mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
432 // time to check quarter pels.
433 if (bestmv->row < startmv.row)
436 if (bestmv->col < startmv.col)
443 // go left then right and check error
444 this_mv.row = startmv.row;
448 this_mv.col = startmv.col - 2;
449 left = svf(y, d->pre_stride, this_mv.col & 7, this_mv.row & 7, z, b->src_stride, &sse);
453 this_mv.col = (startmv.col - 8) | 6;
454 left = svf(y - 1, d->pre_stride, 6, this_mv.row & 7, z, b->src_stride, &sse);
457 left += vp8_mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
466 right = svf(y, d->pre_stride, this_mv.col & 7, this_mv.row & 7, z, b->src_stride, &sse);
467 right += vp8_mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
475 // go up then down and check error
476 this_mv.col = startmv.col;
480 this_mv.row = startmv.row - 2;
481 up = svf(y, d->pre_stride, this_mv.col & 7, this_mv.row & 7, z, b->src_stride, &sse);
485 this_mv.row = (startmv.row - 8) | 6;
486 up = svf(y - d->pre_stride, d->pre_stride, this_mv.col & 7, 6, z, b->src_stride, &sse);
489 up += vp8_mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
498 down = svf(y, d->pre_stride, this_mv.col & 7, this_mv.row & 7, z, b->src_stride, &sse);
499 down += vp8_mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
508 // now check 1 more diagonal
509 whichdir = (left < right ? 0 : 1) + (up < down ? 0 : 2);
511 // for(whichdir=0;whichdir<4;whichdir++)
526 diag = svf(y, d->pre_stride, this_mv.col & 7, this_mv.row & 7, z, b->src_stride, &sse);
530 this_mv.col = (startmv.col - 8) | 6;
531 diag = svf(y - 1, d->pre_stride, 6, this_mv.row & 7, z, b->src_stride, &sse);;
536 this_mv.row = (startmv.row - 8) | 6;
541 diag = svf(y - d->pre_stride, d->pre_stride, this_mv.col & 7, 6, z, b->src_stride, &sse);
545 this_mv.col = (startmv.col - 8) | 6;
546 diag = svf(y - d->pre_stride - 1, d->pre_stride, 6, 6, z, b->src_stride, &sse);
557 diag = svf(y, d->pre_stride, this_mv.col & 7, this_mv.row & 7, z, b->src_stride, &sse);
561 this_mv.row = (startmv.row - 8) | 6;
562 diag = svf(y - d->pre_stride, d->pre_stride, this_mv.col & 7, 6, z, b->src_stride, &sse);
572 diag = svf(y, d->pre_stride, this_mv.col & 7, this_mv.row & 7, z, b->src_stride, &sse);
576 this_mv.col = (startmv.col - 8) | 6;
577 diag = svf(y - 1, d->pre_stride, 6, this_mv.row & 7, z, b->src_stride, &sse);;
584 diag = svf(y, d->pre_stride, this_mv.col & 7, this_mv.row & 7, z, b->src_stride, &sse);
588 diag += vp8_mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
601 int vp8_find_best_half_pixel_step(MACROBLOCK *mb, BLOCK *b, BLOCKD *d, MV *bestmv, MV *ref_mv, int error_per_bit, vp8_subpixvariance_fn_t svf, vp8_variance_fn_t vf, int *mvcost[2])
603 int bestmse = INT_MAX;
607 unsigned char *y = *(d->base_pre) + d->pre + (bestmv->row) * d->pre_stride + bestmv->col;
608 unsigned char *z = (*(b->base_src) + b->src);
609 int left, right, up, down, diag;
612 // Trap uncodable vectors
613 if ((abs((bestmv->col << 3) - ref_mv->col) > MAX_FULL_PEL_VAL) || (abs((bestmv->row << 3) - ref_mv->row) > MAX_FULL_PEL_VAL))
625 // calculate central point error
626 bestmse = vf(y, d->pre_stride, z, b->src_stride, &sse);
627 bestmse += vp8_mv_err_cost(bestmv, ref_mv, mvcost, error_per_bit);
629 // go left then right and check error
630 this_mv.row = startmv.row;
631 this_mv.col = ((startmv.col - 8) | 4);
632 left = svf(y - 1, d->pre_stride, 4, 0, z, b->src_stride, &sse);
633 left += vp8_mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
642 right = svf(y, d->pre_stride, 4, 0, z, b->src_stride, &sse);
643 right += vp8_mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
651 // go up then down and check error
652 this_mv.col = startmv.col;
653 this_mv.row = ((startmv.row - 8) | 4);
654 up = svf(y - d->pre_stride, d->pre_stride, 0, 4, z, b->src_stride, &sse);
655 up += vp8_mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
664 down = svf(y, d->pre_stride, 0, 4, z, b->src_stride, &sse);
665 down += vp8_mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
673 // somewhat strangely not doing all the diagonals for half pel is slower than doing them.
675 // now check 1 more diagonal -
676 whichdir = (left < right ? 0 : 1) + (up < down ? 0 : 2);
682 this_mv.col = (this_mv.col - 8) | 4;
683 this_mv.row = (this_mv.row - 8) | 4;
684 diag = svf(y - 1 - d->pre_stride, d->pre_stride, 4, 4, z, b->src_stride, &sse);
688 this_mv.row = (this_mv.row - 8) | 4;
689 diag = svf(y - d->pre_stride, d->pre_stride, 4, 4, z, b->src_stride, &sse);
692 this_mv.col = (this_mv.col - 8) | 4;
694 diag = svf(y - 1, d->pre_stride, 4, 4, z, b->src_stride, &sse);
699 diag = svf(y, d->pre_stride, 4, 4, z, b->src_stride, &sse);
703 diag += vp8_mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
712 this_mv.col = (this_mv.col - 8) | 4;
713 this_mv.row = (this_mv.row - 8) | 4;
714 diag = svf(y - 1 - d->pre_stride, d->pre_stride, 4, 4, z, b->src_stride, &sse);
715 diag += vp8_mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
724 diag = svf(y - d->pre_stride, d->pre_stride, 4, 4, z, b->src_stride, &sse);
725 diag += vp8_mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
733 this_mv.col = (this_mv.col - 8) | 4;
734 this_mv.row = startmv.row + 4;
735 diag = svf(y - 1, d->pre_stride, 4, 4, z, b->src_stride, &sse);
736 diag += vp8_mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
745 diag = svf(y, d->pre_stride, 4, 4, z, b->src_stride, &sse);
746 diag += vp8_mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
759 #define MVC(r,c) (((mvsadcost[0][((r)<<2)-rr] + mvsadcost[1][((c)<<2) - rc]) * error_per_bit + 128 )>>8 ) // estimated cost of a motion vector (r,c)
760 #define PRE(r,c) (*(d->base_pre) + d->pre + (r) * d->pre_stride + (c)) // pointer to predictor base of a motionvector
761 #define DIST(r,c,v) sf( src,src_stride,PRE(r,c),d->pre_stride, v) // returns sad error score.
762 #define ERR(r,c,v) (MVC(r,c)+DIST(r,c,v)) // returns distortion + motion vector cost
763 #define CHECK_BETTER(v,r,c) if ((v = ERR(r,c,besterr)) < besterr) { besterr = v; br=r; bc=c; } // checks if (r,c) has better score than previous best
764 static const MV next_chkpts[6][3] =
766 {{ -2, 0}, { -1, -2}, {1, -2}},
767 {{ -1, -2}, {1, -2}, {2, 0}},
768 {{1, -2}, {2, 0}, {1, 2}},
769 {{2, 0}, {1, 2}, { -1, 2}},
770 {{1, 2}, { -1, 2}, { -2, 0}},
771 {{ -1, 2}, { -2, 0}, { -1, -2}}
783 vp8_variance_fn_t vf,
789 MV hex[6] = { { -1, -2}, {1, -2}, {2, 0}, {1, 2}, { -1, 2}, { -2, 0} } ;
790 MV neighbors[8] = { { -1, -1}, { -1, 0}, { -1, 1}, {0, -1}, {0, 1}, {1, -1}, {1, 0}, {1, 1} } ;
792 unsigned char *src = (*(b->base_src) + b->src);
793 int src_stride = b->src_stride;
794 int rr = ref_mv->row, rc = ref_mv->col, br = rr >> 3, bc = rc >> 3, tr, tc;
795 unsigned int besterr, thiserr = 0x7fffffff;
798 if (bc < x->mv_col_min) bc = x->mv_col_min;
800 if (bc > x->mv_col_max) bc = x->mv_col_max;
802 if (br < x->mv_row_min) br = x->mv_row_min;
804 if (br > x->mv_row_max) br = x->mv_row_max;
809 besterr = ERR(br, bc, thiserr);
816 for (i = 0; i < 6; i++)
818 int nr = tr + hex[i].row, nc = tc + hex[i].col;
820 if (nc < x->mv_col_min) continue;
822 if (nc > x->mv_col_max) continue;
824 if (nr < x->mv_row_min) continue;
826 if (nr > x->mv_row_max) continue;
828 //CHECK_BETTER(thiserr,nr,nc);
829 if ((thiserr = ERR(nr, nc, besterr)) < besterr)
838 if (tr == br && tc == bc)
841 for (j = 1; j < 127; j++)
847 for (i = 0; i < 3; i++)
849 int nr = tr + next_chkpts[tk][i].row, nc = tc + next_chkpts[tk][i].col;
851 if (nc < x->mv_col_min) continue;
853 if (nc > x->mv_col_max) continue;
855 if (nr < x->mv_row_min) continue;
857 if (nr > x->mv_row_max) continue;
859 //CHECK_BETTER(thiserr,nr,nc);
860 if ((thiserr = ERR(nr, nc, besterr)) < besterr)
864 bc = nc; //k=(tk+5+i)%6;}
867 if (k >= 12) k -= 12;
868 else if (k >= 6) k -= 6;
872 if (tr == br && tc == bc)
876 // check 8 1 away neighbors
881 for (i = 0; i < 8; i++)
883 int nr = tr + neighbors[i].row, nc = tc + neighbors[i].col;
885 if (nc < x->mv_col_min) continue;
887 if (nc > x->mv_col_max) continue;
889 if (nr < x->mv_row_min) continue;
891 if (nr > x->mv_row_max) continue;
893 CHECK_BETTER(thiserr, nr, nc);
899 return vf(src, src_stride, PRE(br, bc), d->pre_stride, &thiserr) + MVC(br, bc) ;
909 int vp8_diamond_search_sad
919 vp8_variance_fn_ptr_t *fn_ptr,
926 unsigned char *what = (*(b->base_src) + b->src);
927 int what_stride = b->src_stride;
928 unsigned char *in_what;
929 int in_what_stride = d->pre_stride;
930 unsigned char *best_address;
935 int bestsad = INT_MAX;
939 int ref_row = ref_mv->row >> 3;
940 int ref_col = ref_mv->col >> 3;
945 unsigned char *check_here;
948 // Work out the start point for the search
949 in_what = (unsigned char *)(*(d->base_pre) + d->pre + (ref_row * (d->pre_stride)) + ref_col);
950 best_address = in_what;
952 // We need to check that the starting point for the search (as indicated by ref_mv) is within the buffer limits
953 if ((ref_col > x->mv_col_min) && (ref_col < x->mv_col_max) &&
954 (ref_row > x->mv_row_min) && (ref_row < x->mv_row_max))
956 // Check the starting position
957 bestsad = fn_ptr->sdf(what, what_stride, in_what, in_what_stride, 0x7fffffff) + vp8_mv_err_cost(ref_mv, ref_mv, mvsadcost, error_per_bit);
960 // search_param determines the length of the initial step and hence the number of iterations
961 // 0 = initial step (MAX_FIRST_STEP) pel : 1 = (MAX_FIRST_STEP/2) pel, 2 = (MAX_FIRST_STEP/4) pel... etc.
962 ss = &x->ss[search_param * x->searches_per_step];
963 tot_steps = (x->ss_count / x->searches_per_step) - search_param;
966 best_mv->row = ref_row;
967 best_mv->col = ref_col;
971 for (step = 0; step < tot_steps ; step++)
973 for (j = 0 ; j < x->searches_per_step ; j++)
975 // Trap illegal vectors
976 this_row_offset = best_mv->row + ss[i].mv.row;
977 this_col_offset = best_mv->col + ss[i].mv.col;
979 if ((this_col_offset > x->mv_col_min) && (this_col_offset < x->mv_col_max) &&
980 (this_row_offset > x->mv_row_min) && (this_row_offset < x->mv_row_max))
983 check_here = ss[i].offset + best_address;
984 thissad = fn_ptr->sdf(what, what_stride, check_here , in_what_stride, bestsad);
986 if (thissad < bestsad)
988 this_mv.row = this_row_offset << 3;
989 this_mv.col = this_col_offset << 3;
990 thissad += vp8_mv_err_cost(&this_mv, ref_mv, mvsadcost, error_per_bit);
992 if (thissad < bestsad)
1003 if (best_site != last_site)
1005 best_mv->row += ss[best_site].mv.row;
1006 best_mv->col += ss[best_site].mv.col;
1007 best_address += ss[best_site].offset;
1008 last_site = best_site;
1010 else if (best_address == in_what)
1014 this_mv.row = best_mv->row << 3;
1015 this_mv.col = best_mv->col << 3;
1017 if (bestsad == INT_MAX)
1020 return fn_ptr->vf(what, what_stride, best_address, in_what_stride, (unsigned int *)(&thissad))
1021 + vp8_mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
1024 int vp8_diamond_search_sadx4
1034 vp8_variance_fn_ptr_t *fn_ptr,
1041 unsigned char *what = (*(b->base_src) + b->src);
1042 int what_stride = b->src_stride;
1043 unsigned char *in_what;
1044 int in_what_stride = d->pre_stride;
1045 unsigned char *best_address;
1050 int bestsad = INT_MAX;
1054 int ref_row = ref_mv->row >> 3;
1055 int ref_col = ref_mv->col >> 3;
1056 int this_row_offset;
1057 int this_col_offset;
1060 unsigned char *check_here;
1061 unsigned int thissad;
1063 // Work out the start point for the search
1064 in_what = (unsigned char *)(*(d->base_pre) + d->pre + (ref_row * (d->pre_stride)) + ref_col);
1065 best_address = in_what;
1067 // We need to check that the starting point for the search (as indicated by ref_mv) is within the buffer limits
1068 if ((ref_col > x->mv_col_min) && (ref_col < x->mv_col_max) &&
1069 (ref_row > x->mv_row_min) && (ref_row < x->mv_row_max))
1071 // Check the starting position
1072 bestsad = fn_ptr->sdf(what, what_stride, in_what, in_what_stride, 0x7fffffff) + vp8_mv_err_cost(ref_mv, ref_mv, mvsadcost, error_per_bit);
1075 // search_param determines the length of the initial step and hence the number of iterations
1076 // 0 = initial step (MAX_FIRST_STEP) pel : 1 = (MAX_FIRST_STEP/2) pel, 2 = (MAX_FIRST_STEP/4) pel... etc.
1077 ss = &x->ss[search_param * x->searches_per_step];
1078 tot_steps = (x->ss_count / x->searches_per_step) - search_param;
1081 best_mv->row = ref_row;
1082 best_mv->col = ref_col;
1086 for (step = 0; step < tot_steps ; step++)
1090 // To know if all neighbor points are within the bounds, 4 bounds checking are enough instead of
1091 // checking 4 bounds for each points.
1092 all_in &= ((best_mv->row + ss[i].mv.row)> x->mv_row_min);
1093 all_in &= ((best_mv->row + ss[i+1].mv.row) < x->mv_row_max);
1094 all_in &= ((best_mv->col + ss[i+2].mv.col) > x->mv_col_min);
1095 all_in &= ((best_mv->col + ss[i+3].mv.col) < x->mv_col_max);
1099 unsigned int sad_array[4];
1101 for (j = 0 ; j < x->searches_per_step ; j += 4)
1103 unsigned char *block_offset[4];
1105 for (t = 0; t < 4; t++)
1106 block_offset[t] = ss[i+t].offset + best_address;
1108 fn_ptr->sdx4df(what, what_stride, block_offset, in_what_stride, sad_array);
1110 for (t = 0; t < 4; t++, i++)
1112 if (sad_array[t] < bestsad)
1114 this_mv.row = (best_mv->row + ss[i].mv.row) << 3;
1115 this_mv.col = (best_mv->col + ss[i].mv.col) << 3;
1116 sad_array[t] += vp8_mv_err_cost(&this_mv, ref_mv, mvsadcost, error_per_bit);
1118 if (sad_array[t] < bestsad)
1120 bestsad = sad_array[t];
1129 for (j = 0 ; j < x->searches_per_step ; j++)
1131 // Trap illegal vectors
1132 this_row_offset = best_mv->row + ss[i].mv.row;
1133 this_col_offset = best_mv->col + ss[i].mv.col;
1135 if ((this_col_offset > x->mv_col_min) && (this_col_offset < x->mv_col_max) &&
1136 (this_row_offset > x->mv_row_min) && (this_row_offset < x->mv_row_max))
1138 check_here = ss[i].offset + best_address;
1139 thissad = fn_ptr->sdf(what, what_stride, check_here , in_what_stride, bestsad);
1141 if (thissad < bestsad)
1143 this_mv.row = this_row_offset << 3;
1144 this_mv.col = this_col_offset << 3;
1145 thissad += vp8_mv_err_cost(&this_mv, ref_mv, mvsadcost, error_per_bit);
1147 if (thissad < bestsad)
1158 if (best_site != last_site)
1160 best_mv->row += ss[best_site].mv.row;
1161 best_mv->col += ss[best_site].mv.col;
1162 best_address += ss[best_site].offset;
1163 last_site = best_site;
1165 else if (best_address == in_what)
1169 this_mv.row = best_mv->row << 3;
1170 this_mv.col = best_mv->col << 3;
1172 if (bestsad == INT_MAX)
1175 return fn_ptr->vf(what, what_stride, best_address, in_what_stride, (unsigned int *)(&thissad))
1176 + vp8_mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
1180 #if !(CONFIG_REALTIME_ONLY)
1181 int vp8_full_search_sad(MACROBLOCK *x, BLOCK *b, BLOCKD *d, MV *ref_mv, int error_per_bit, int distance, vp8_variance_fn_ptr_t *fn_ptr, int *mvcost[2], int *mvsadcost[2])
1183 unsigned char *what = (*(b->base_src) + b->src);
1184 int what_stride = b->src_stride;
1185 unsigned char *in_what;
1186 int in_what_stride = d->pre_stride;
1187 int mv_stride = d->pre_stride;
1188 unsigned char *bestaddress;
1189 MV *best_mv = &d->bmi.mv.as_mv;
1191 int bestsad = INT_MAX;
1194 unsigned char *check_here;
1197 int ref_row = ref_mv->row >> 3;
1198 int ref_col = ref_mv->col >> 3;
1200 int row_min = ref_row - distance;
1201 int row_max = ref_row + distance;
1202 int col_min = ref_col - distance;
1203 int col_max = ref_col + distance;
1205 // Work out the mid point for the search
1206 in_what = *(d->base_pre) + d->pre;
1207 bestaddress = in_what + (ref_row * d->pre_stride) + ref_col;
1209 best_mv->row = ref_row;
1210 best_mv->col = ref_col;
1212 // We need to check that the starting point for the search (as indicated by ref_mv) is within the buffer limits
1213 if ((ref_col > x->mv_col_min) && (ref_col < x->mv_col_max) &&
1214 (ref_row > x->mv_row_min) && (ref_row < x->mv_row_max))
1216 // Baseline value at the centre
1218 //bestsad = fn_ptr->sf( what,what_stride,bestaddress,in_what_stride) + (int)sqrt(vp8_mv_err_cost(ref_mv,ref_mv, mvcost,error_per_bit*14));
1219 bestsad = fn_ptr->sdf(what, what_stride, bestaddress, in_what_stride, 0x7fffffff) + vp8_mv_err_cost(ref_mv, ref_mv, mvsadcost, error_per_bit);
1222 // Apply further limits to prevent us looking using vectors that stretch beyiond the UMV border
1223 if (col_min < x->mv_col_min)
1224 col_min = x->mv_col_min;
1226 if (col_max > x->mv_col_max)
1227 col_max = x->mv_col_max;
1229 if (row_min < x->mv_row_min)
1230 row_min = x->mv_row_min;
1232 if (row_max > x->mv_row_max)
1233 row_max = x->mv_row_max;
1235 for (r = row_min; r < row_max ; r++)
1237 this_mv.row = r << 3;
1238 check_here = r * mv_stride + in_what + col_min;
1240 for (c = col_min; c < col_max; c++)
1242 thissad = fn_ptr->sdf(what, what_stride, check_here , in_what_stride, bestsad);
1244 this_mv.col = c << 3;
1245 //thissad += (int)sqrt(vp8_mv_err_cost(&this_mv,ref_mv, mvcost,error_per_bit*14));
1246 //thissad += error_per_bit * mv_bits_sadcost[mv_bits(&this_mv, ref_mv, mvcost)];
1247 thissad += vp8_mv_err_cost(&this_mv, ref_mv, mvsadcost, error_per_bit); //mv_bits(error_per_bit, &this_mv, ref_mv, mvsadcost);
1249 if (thissad < bestsad)
1254 bestaddress = check_here;
1261 this_mv.row = best_mv->row << 3;
1262 this_mv.col = best_mv->col << 3;
1264 if (bestsad < INT_MAX)
1265 return fn_ptr->vf(what, what_stride, bestaddress, in_what_stride, (unsigned int *)(&thissad))
1266 + vp8_mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
1271 int vp8_full_search_sadx3(MACROBLOCK *x, BLOCK *b, BLOCKD *d, MV *ref_mv, int error_per_bit, int distance, vp8_variance_fn_ptr_t *fn_ptr, int *mvcost[2], int *mvsadcost[2])
1273 unsigned char *what = (*(b->base_src) + b->src);
1274 int what_stride = b->src_stride;
1275 unsigned char *in_what;
1276 int in_what_stride = d->pre_stride;
1277 int mv_stride = d->pre_stride;
1278 unsigned char *bestaddress;
1279 MV *best_mv = &d->bmi.mv.as_mv;
1281 int bestsad = INT_MAX;
1284 unsigned char *check_here;
1285 unsigned int thissad;
1287 int ref_row = ref_mv->row >> 3;
1288 int ref_col = ref_mv->col >> 3;
1290 int row_min = ref_row - distance;
1291 int row_max = ref_row + distance;
1292 int col_min = ref_col - distance;
1293 int col_max = ref_col + distance;
1295 unsigned int sad_array[3];
1297 // Work out the mid point for the search
1298 in_what = *(d->base_pre) + d->pre;
1299 bestaddress = in_what + (ref_row * d->pre_stride) + ref_col;
1301 best_mv->row = ref_row;
1302 best_mv->col = ref_col;
1304 // We need to check that the starting point for the search (as indicated by ref_mv) is within the buffer limits
1305 if ((ref_col > x->mv_col_min) && (ref_col < x->mv_col_max) &&
1306 (ref_row > x->mv_row_min) && (ref_row < x->mv_row_max))
1308 // Baseline value at the centre
1309 bestsad = fn_ptr->sdf(what, what_stride, bestaddress, in_what_stride, 0x7fffffff) + vp8_mv_err_cost(ref_mv, ref_mv, mvsadcost, error_per_bit);
1312 // Apply further limits to prevent us looking using vectors that stretch beyiond the UMV border
1313 if (col_min < x->mv_col_min)
1314 col_min = x->mv_col_min;
1316 if (col_max > x->mv_col_max)
1317 col_max = x->mv_col_max;
1319 if (row_min < x->mv_row_min)
1320 row_min = x->mv_row_min;
1322 if (row_max > x->mv_row_max)
1323 row_max = x->mv_row_max;
1325 for (r = row_min; r < row_max ; r++)
1327 this_mv.row = r << 3;
1328 check_here = r * mv_stride + in_what + col_min;
1331 while ((c + 3) < col_max)
1335 fn_ptr->sdx3f(what, what_stride, check_here , in_what_stride, sad_array);
1337 for (i = 0; i < 3; i++)
1339 thissad = sad_array[i];
1341 if (thissad < bestsad)
1343 this_mv.col = c << 3;
1344 thissad += vp8_mv_err_cost(&this_mv, ref_mv, mvsadcost, error_per_bit);
1346 if (thissad < bestsad)
1351 bestaddress = check_here;
1362 thissad = fn_ptr->sdf(what, what_stride, check_here , in_what_stride, bestsad);
1364 if (thissad < bestsad)
1366 this_mv.col = c << 3;
1367 thissad += vp8_mv_err_cost(&this_mv, ref_mv, mvsadcost, error_per_bit);
1369 if (thissad < bestsad)
1374 bestaddress = check_here;
1384 this_mv.row = best_mv->row << 3;
1385 this_mv.col = best_mv->col << 3;
1387 if (bestsad < INT_MAX)
1388 return fn_ptr->vf(what, what_stride, bestaddress, in_what_stride, (unsigned int *)(&thissad))
1389 + vp8_mv_err_cost(&this_mv, ref_mv, mvcost, error_per_bit);
1396 #ifdef ENTROPY_STATS
1397 void print_mode_context(void)
1399 FILE *f = fopen("modecont.c", "w");
1402 fprintf(f, "#include \"entropy.h\"\n");
1403 fprintf(f, "const int vp8_mode_contexts[6][4] =\n");
1406 for (j = 0; j < 6; j++)
1408 fprintf(f, " { // %d \n", j);
1411 for (i = 0; i < 4; i++)
1415 int count; // = mv_ref_ct[j][i][0]+mv_ref_ct[j][i][1];
1418 count = mv_mode_cts[i][0] + mv_mode_cts[i][1];
1421 overal_prob = 256 * mv_mode_cts[i][0] / count;
1425 if (overal_prob == 0)
1429 count = mv_ref_ct[j][i][0] + mv_ref_ct[j][i][1];
1432 this_prob = 256 * mv_ref_ct[j][i][0] / count;
1439 fprintf(f, "%5d, ", this_prob);
1440 //fprintf(f,"%5d, %5d, %8d,", this_prob, overal_prob, (this_prob << 10)/overal_prob);
1441 //fprintf(f,"%8d, ", (this_prob << 10)/overal_prob);
1444 fprintf(f, " },\n");
1451 /* MV ref count ENTROPY_STATS stats code */
1452 #ifdef ENTROPY_STATS
1453 void init_mv_ref_counts()
1455 vpx_memset(mv_ref_ct, 0, sizeof(mv_ref_ct));
1456 vpx_memset(mv_mode_cts, 0, sizeof(mv_mode_cts));
1459 void accum_mv_refs(MB_PREDICTION_MODE m, const int ct[4])
1463 ++mv_ref_ct [ct[0]] [0] [0];
1464 ++mv_mode_cts[0][0];
1468 ++mv_ref_ct [ct[0]] [0] [1];
1469 ++mv_mode_cts[0][1];
1473 ++mv_ref_ct [ct[1]] [1] [0];
1474 ++mv_mode_cts[1][0];
1478 ++mv_ref_ct [ct[1]] [1] [1];
1479 ++mv_mode_cts[1][1];
1483 ++mv_ref_ct [ct[2]] [2] [0];
1484 ++mv_mode_cts[2][0];
1488 ++mv_ref_ct [ct[2]] [2] [1];
1489 ++mv_mode_cts[2][1];
1493 ++mv_ref_ct [ct[3]] [3] [0];
1494 ++mv_mode_cts[3][0];
1498 ++mv_ref_ct [ct[3]] [3] [1];
1499 ++mv_mode_cts[3][1];
1506 #endif/* END MV ref count ENTROPY_STATS stats code */