2 * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
12 #include "vpx_config.h"
15 #include "vp8/common/common.h"
17 #include "vp8/common/extend.h"
18 #include "vp8/common/entropymode.h"
19 #include "vp8/common/quant_common.h"
20 #include "segmentation.h"
21 #include "vp8/common/setupintrarecon.h"
22 #include "encodeintra.h"
23 #include "vp8/common/reconinter.h"
25 #include "pickinter.h"
26 #include "vp8/common/findnearmv.h"
29 #include "vp8/common/invtrans.h"
30 #include "vpx_ports/vpx_timer.h"
31 #if CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING
32 #include "bitstream.h"
34 #include "encodeframe.h"
36 extern void vp8_stuff_mb(VP8_COMP *cpi, MACROBLOCKD *x, TOKENEXTRA **t) ;
37 extern void vp8_calc_ref_frame_costs(int *ref_frame_cost,
42 extern void vp8_convert_rfct_to_prob(VP8_COMP *const cpi);
43 extern void vp8cx_initialize_me_consts(VP8_COMP *cpi, int QIndex);
44 extern void vp8_auto_select_speed(VP8_COMP *cpi);
45 extern void vp8cx_init_mbrthread_data(VP8_COMP *cpi,
50 static void adjust_act_zbin( VP8_COMP *cpi, MACROBLOCK *x );
53 unsigned int inter_y_modes[10] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
54 unsigned int inter_uv_modes[4] = {0, 0, 0, 0};
55 unsigned int inter_b_modes[15] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
56 unsigned int y_modes[5] = {0, 0, 0, 0, 0};
57 unsigned int uv_modes[4] = {0, 0, 0, 0};
58 unsigned int b_modes[14] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
62 /* activity_avg must be positive, or flat regions could get a zero weight
63 * (infinite lambda), which confounds analysis.
64 * This also avoids the need for divide by zero checks in
65 * vp8_activity_masking().
67 #define VP8_ACTIVITY_AVG_MIN (64)
69 /* This is used as a reference when computing the source variance for the
70 * purposes of activity masking.
71 * Eventually this should be replaced by custom no-reference routines,
72 * which will be faster.
74 static const unsigned char VP8_VAR_OFFS[16]=
76 128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128
80 // Original activity measure from Tim T's code.
81 static unsigned int tt_activity_measure( VP8_COMP *cpi, MACROBLOCK *x )
85 /* TODO: This could also be done over smaller areas (8x8), but that would
86 * require extensive changes elsewhere, as lambda is assumed to be fixed
87 * over an entire MB in most of the code.
88 * Another option is to compute four 8x8 variances, and pick a single
89 * lambda using a non-linear combination (e.g., the smallest, or second
92 act = vp8_variance16x16(x->src.y_buffer,
93 x->src.y_stride, VP8_VAR_OFFS, 0, &sse);
96 /* If the region is flat, lower the activity some more. */
98 act = act < 5<<12 ? act : 5<<12;
103 // Stub for alternative experimental activity measures.
104 static unsigned int alt_activity_measure( VP8_COMP *cpi,
105 MACROBLOCK *x, int use_dc_pred )
107 return vp8_encode_intra(cpi,x, use_dc_pred);
111 // Measure the activity of the current macroblock
112 // What we measure here is TBD so abstracted to this function
113 #define ALT_ACT_MEASURE 1
114 static unsigned int mb_activity_measure( VP8_COMP *cpi, MACROBLOCK *x,
115 int mb_row, int mb_col)
117 unsigned int mb_activity;
119 if ( ALT_ACT_MEASURE )
121 int use_dc_pred = (mb_col || mb_row) && (!mb_col || !mb_row);
123 // Or use and alternative.
124 mb_activity = alt_activity_measure( cpi, x, use_dc_pred );
128 // Original activity measure from Tim T's code.
129 mb_activity = tt_activity_measure( cpi, x );
132 if ( mb_activity < VP8_ACTIVITY_AVG_MIN )
133 mb_activity = VP8_ACTIVITY_AVG_MIN;
138 // Calculate an "average" mb activity value for the frame
140 static void calc_av_activity( VP8_COMP *cpi, int64_t activity_sum )
143 // Find median: Simple n^2 algorithm for experimentation
147 unsigned int * sortlist;
150 // Create a list to sort to
151 CHECK_MEM_ERROR(sortlist,
152 vpx_calloc(sizeof(unsigned int),
155 // Copy map to sort list
156 vpx_memcpy( sortlist, cpi->mb_activity_map,
157 sizeof(unsigned int) * cpi->common.MBs );
160 // Ripple each value down to its correct position
161 for ( i = 1; i < cpi->common.MBs; i ++ )
163 for ( j = i; j > 0; j -- )
165 if ( sortlist[j] < sortlist[j-1] )
169 sortlist[j-1] = sortlist[j];
177 // Even number MBs so estimate median as mean of two either side.
178 median = ( 1 + sortlist[cpi->common.MBs >> 1] +
179 sortlist[(cpi->common.MBs >> 1) + 1] ) >> 1;
181 cpi->activity_avg = median;
186 // Simple mean for now
187 cpi->activity_avg = (unsigned int)(activity_sum/cpi->common.MBs);
190 if (cpi->activity_avg < VP8_ACTIVITY_AVG_MIN)
191 cpi->activity_avg = VP8_ACTIVITY_AVG_MIN;
193 // Experimental code: return fixed value normalized for several clips
194 if ( ALT_ACT_MEASURE )
195 cpi->activity_avg = 100000;
198 #define USE_ACT_INDEX 0
199 #define OUTPUT_NORM_ACT_STATS 0
202 // Calculate and activity index for each mb
203 static void calc_activity_index( VP8_COMP *cpi, MACROBLOCK *x )
205 VP8_COMMON *const cm = & cpi->common;
212 #if OUTPUT_NORM_ACT_STATS
213 FILE *f = fopen("norm_act.stt", "a");
214 fprintf(f, "\n%12d\n", cpi->activity_avg );
217 // Reset pointers to start of activity map
218 x->mb_activity_ptr = cpi->mb_activity_map;
220 // Calculate normalized mb activity number.
221 for (mb_row = 0; mb_row < cm->mb_rows; mb_row++)
223 // for each macroblock col in image
224 for (mb_col = 0; mb_col < cm->mb_cols; mb_col++)
226 // Read activity from the map
227 act = *(x->mb_activity_ptr);
229 // Calculate a normalized activity number
230 a = act + 4*cpi->activity_avg;
231 b = 4*act + cpi->activity_avg;
234 *(x->activity_ptr) = (int)((b + (a>>1))/a) - 1;
236 *(x->activity_ptr) = 1 - (int)((a + (b>>1))/b);
238 #if OUTPUT_NORM_ACT_STATS
239 fprintf(f, " %6d", *(x->mb_activity_ptr));
241 // Increment activity map pointers
242 x->mb_activity_ptr++;
245 #if OUTPUT_NORM_ACT_STATS
251 #if OUTPUT_NORM_ACT_STATS
258 // Loop through all MBs. Note activity of each, average activity and
259 // calculate a normalized activity for each
260 static void build_activity_map( VP8_COMP *cpi )
262 MACROBLOCK *const x = & cpi->mb;
263 MACROBLOCKD *xd = &x->e_mbd;
264 VP8_COMMON *const cm = & cpi->common;
267 YV12_BUFFER_CONFIG *new_yv12 = &cm->yv12_fb[cm->new_fb_idx];
269 int recon_y_stride = new_yv12->y_stride;
273 unsigned int mb_activity;
274 int64_t activity_sum = 0;
276 // for each macroblock row in image
277 for (mb_row = 0; mb_row < cm->mb_rows; mb_row++)
280 // reset above block coeffs
281 xd->up_available = (mb_row != 0);
282 recon_yoffset = (mb_row * recon_y_stride * 16);
284 // for each macroblock col in image
285 for (mb_col = 0; mb_col < cm->mb_cols; mb_col++)
288 xd->dst.y_buffer = new_yv12->y_buffer + recon_yoffset;
289 xd->left_available = (mb_col != 0);
292 //Copy current mb to a buffer
293 vp8_copy_mem16x16(x->src.y_buffer, x->src.y_stride, x->thismb, 16);
296 mb_activity = mb_activity_measure( cpi, x, mb_row, mb_col );
299 activity_sum += mb_activity;
301 // Store MB level activity details.
302 *x->mb_activity_ptr = mb_activity;
304 // Increment activity map pointer
305 x->mb_activity_ptr++;
307 // adjust to the next column of source macroblocks
308 x->src.y_buffer += 16;
312 // adjust to the next row of mbs
313 x->src.y_buffer += 16 * x->src.y_stride - 16 * cm->mb_cols;
316 //extend the recon for intra prediction
317 vp8_extend_mb_row(new_yv12, xd->dst.y_buffer + 16,
318 xd->dst.u_buffer + 8, xd->dst.v_buffer + 8);
323 // Calculate an "average" MB activity
324 calc_av_activity(cpi, activity_sum);
327 // Calculate an activity index number of each mb
328 calc_activity_index( cpi, x );
333 // Macroblock activity masking
334 void vp8_activity_masking(VP8_COMP *cpi, MACROBLOCK *x)
337 x->rdmult += *(x->mb_activity_ptr) * (x->rdmult >> 2);
338 x->errorperbit = x->rdmult * 100 /(110 * x->rddiv);
339 x->errorperbit += (x->errorperbit==0);
343 int64_t act = *(x->mb_activity_ptr);
345 // Apply the masking to the RD multiplier.
346 a = act + (2*cpi->activity_avg);
347 b = (2*act) + cpi->activity_avg;
349 x->rdmult = (unsigned int)(((int64_t)x->rdmult*b + (a>>1))/a);
350 x->errorperbit = x->rdmult * 100 /(110 * x->rddiv);
351 x->errorperbit += (x->errorperbit==0);
354 // Activity based Zbin adjustment
355 adjust_act_zbin(cpi, x);
359 void encode_mb_row(VP8_COMP *cpi,
368 int recon_yoffset, recon_uvoffset;
370 int ref_fb_idx = cm->lst_fb_idx;
371 int dst_fb_idx = cm->new_fb_idx;
372 int recon_y_stride = cm->yv12_fb[ref_fb_idx].y_stride;
373 int recon_uv_stride = cm->yv12_fb[ref_fb_idx].uv_stride;
374 int map_index = (mb_row * cpi->common.mb_cols);
376 #if (CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING)
377 const int num_part = (1 << cm->multi_token_partition);
378 TOKENEXTRA * tp_start = cpi->tok;
382 #if CONFIG_MULTITHREAD
383 const int nsync = cpi->mt_sync_range;
384 const int rightmost_col = cm->mb_cols + nsync;
385 volatile const int *last_row_current_mb_col;
386 volatile int *current_mb_col = &cpi->mt_current_mb_col[mb_row];
388 if ((cpi->b_multi_threaded != 0) && (mb_row != 0))
389 last_row_current_mb_col = &cpi->mt_current_mb_col[mb_row - 1];
391 last_row_current_mb_col = &rightmost_col;
394 #if (CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING)
396 w= &cpi->bc[1 + (mb_row % num_part)];
401 // reset above block coeffs
402 xd->above_context = cm->above_context;
404 xd->up_available = (mb_row != 0);
405 recon_yoffset = (mb_row * recon_y_stride * 16);
406 recon_uvoffset = (mb_row * recon_uv_stride * 8);
408 cpi->tplist[mb_row].start = *tp;
409 //printf("Main mb_row = %d\n", mb_row);
411 // Distance of Mb to the top & bottom edges, specified in 1/8th pel
412 // units as they are always compared to values that are in 1/8th pel units
413 xd->mb_to_top_edge = -((mb_row * 16) << 3);
414 xd->mb_to_bottom_edge = ((cm->mb_rows - 1 - mb_row) * 16) << 3;
416 // Set up limit values for vertical motion vector components
417 // to prevent them extending beyond the UMV borders
418 x->mv_row_min = -((mb_row * 16) + (VP8BORDERINPIXELS - 16));
419 x->mv_row_max = ((cm->mb_rows - 1 - mb_row) * 16)
420 + (VP8BORDERINPIXELS - 16);
422 // Set the mb activity pointer to the start of the row.
423 x->mb_activity_ptr = &cpi->mb_activity_map[map_index];
425 // for each macroblock col in image
426 for (mb_col = 0; mb_col < cm->mb_cols; mb_col++)
429 #if (CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING)
432 // Distance of Mb to the left & right edges, specified in
433 // 1/8th pel units as they are always compared to values
434 // that are in 1/8th pel units
435 xd->mb_to_left_edge = -((mb_col * 16) << 3);
436 xd->mb_to_right_edge = ((cm->mb_cols - 1 - mb_col) * 16) << 3;
438 // Set up limit values for horizontal motion vector components
439 // to prevent them extending beyond the UMV borders
440 x->mv_col_min = -((mb_col * 16) + (VP8BORDERINPIXELS - 16));
441 x->mv_col_max = ((cm->mb_cols - 1 - mb_col) * 16)
442 + (VP8BORDERINPIXELS - 16);
444 xd->dst.y_buffer = cm->yv12_fb[dst_fb_idx].y_buffer + recon_yoffset;
445 xd->dst.u_buffer = cm->yv12_fb[dst_fb_idx].u_buffer + recon_uvoffset;
446 xd->dst.v_buffer = cm->yv12_fb[dst_fb_idx].v_buffer + recon_uvoffset;
447 xd->left_available = (mb_col != 0);
449 x->rddiv = cpi->RDDIV;
450 x->rdmult = cpi->RDMULT;
452 //Copy current mb to a buffer
453 vp8_copy_mem16x16(x->src.y_buffer, x->src.y_stride, x->thismb, 16);
455 #if CONFIG_MULTITHREAD
456 if (cpi->b_multi_threaded != 0)
458 *current_mb_col = mb_col - 1; // set previous MB done
460 if ((mb_col & (nsync - 1)) == 0)
462 while (mb_col > (*last_row_current_mb_col - nsync))
471 if(cpi->oxcf.tuning == VP8_TUNE_SSIM)
472 vp8_activity_masking(cpi, x);
474 // Is segmentation enabled
475 // MB level adjustment to quantizer
476 if (xd->segmentation_enabled)
478 // Code to set segment id in xd->mbmi.segment_id for current MB (with range checking)
479 if (cpi->segmentation_map[map_index+mb_col] <= 3)
480 xd->mode_info_context->mbmi.segment_id = cpi->segmentation_map[map_index+mb_col];
482 xd->mode_info_context->mbmi.segment_id = 0;
484 vp8cx_mb_init_quantizer(cpi, x, 1);
487 xd->mode_info_context->mbmi.segment_id = 0; // Set to Segment 0 by default
489 x->active_ptr = cpi->active_map + map_index + mb_col;
491 if (cm->frame_type == KEY_FRAME)
493 *totalrate += vp8cx_encode_intra_macroblock(cpi, x, tp);
495 y_modes[xd->mbmi.mode] ++;
500 *totalrate += vp8cx_encode_inter_macroblock(cpi, x, tp, recon_yoffset, recon_uvoffset, mb_row, mb_col);
503 inter_y_modes[xd->mbmi.mode] ++;
505 if (xd->mbmi.mode == SPLITMV)
509 for (b = 0; b < xd->mbmi.partition_count; b++)
511 inter_b_modes[x->partition->bmi[b].mode] ++;
517 // Count of last ref frame 0,0 usage
518 if ((xd->mode_info_context->mbmi.mode == ZEROMV) && (xd->mode_info_context->mbmi.ref_frame == LAST_FRAME))
519 cpi->inter_zz_count ++;
521 // Special case code for cyclic refresh
522 // If cyclic update enabled then copy xd->mbmi.segment_id; (which may have been updated based on mode
523 // during vp8cx_encode_inter_macroblock()) back into the global segmentation map
524 if ((cpi->current_layer == 0) &&
525 (cpi->cyclic_refresh_mode_enabled && xd->segmentation_enabled))
527 cpi->segmentation_map[map_index+mb_col] = xd->mode_info_context->mbmi.segment_id;
529 // If the block has been refreshed mark it as clean (the magnitude of the -ve influences how long it will be before we consider another refresh):
530 // Else if it was coded (last frame 0,0) and has not already been refreshed then mark it as a candidate for cleanup next time (marked 0)
531 // else mark it as dirty (1).
532 if (xd->mode_info_context->mbmi.segment_id)
533 cpi->cyclic_refresh_map[map_index+mb_col] = -1;
534 else if ((xd->mode_info_context->mbmi.mode == ZEROMV) && (xd->mode_info_context->mbmi.ref_frame == LAST_FRAME))
536 if (cpi->cyclic_refresh_map[map_index+mb_col] == 1)
537 cpi->cyclic_refresh_map[map_index+mb_col] = 0;
540 cpi->cyclic_refresh_map[map_index+mb_col] = 1;
545 cpi->tplist[mb_row].stop = *tp;
547 #if CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING
548 /* pack tokens for this MB */
550 int tok_count = *tp - tp_start;
551 pack_tokens(w, tp_start, tok_count);
554 // Increment pointer into gf usage flags structure.
557 // Increment the activity mask pointers.
558 x->mb_activity_ptr++;
560 // adjust to the next column of macroblocks
561 x->src.y_buffer += 16;
562 x->src.u_buffer += 8;
563 x->src.v_buffer += 8;
568 // Keep track of segment usage
569 segment_counts[xd->mode_info_context->mbmi.segment_id] ++;
572 xd->mode_info_context++;
577 //extend the recon for intra prediction
578 vp8_extend_mb_row( &cm->yv12_fb[dst_fb_idx],
579 xd->dst.y_buffer + 16,
580 xd->dst.u_buffer + 8,
581 xd->dst.v_buffer + 8);
583 #if CONFIG_MULTITHREAD
584 if (cpi->b_multi_threaded != 0)
585 *current_mb_col = rightmost_col;
588 // this is to account for the border
589 xd->mode_info_context++;
593 static void init_encode_frame_mb_context(VP8_COMP *cpi)
595 MACROBLOCK *const x = & cpi->mb;
596 VP8_COMMON *const cm = & cpi->common;
597 MACROBLOCKD *const xd = & x->e_mbd;
599 // GF active flags data structure
600 x->gf_active_ptr = (signed char *)cpi->gf_active_flags;
602 // Activity map pointer
603 x->mb_activity_ptr = cpi->mb_activity_map;
607 x->partition_info = x->pi;
609 xd->mode_info_context = cm->mi;
610 xd->mode_info_stride = cm->mode_info_stride;
612 xd->frame_type = cm->frame_type;
614 // reset intra mode contexts
615 if (cm->frame_type == KEY_FRAME)
616 vp8_init_mbmode_probs(cm);
618 // Copy data over into macro block data structures.
619 x->src = * cpi->Source;
620 xd->pre = cm->yv12_fb[cm->lst_fb_idx];
621 xd->dst = cm->yv12_fb[cm->new_fb_idx];
623 // set up frame for intra coded blocks
624 vp8_setup_intra_recon(&cm->yv12_fb[cm->new_fb_idx]);
626 vp8_build_block_offsets(x);
628 vp8_setup_block_dptrs(&x->e_mbd);
630 vp8_setup_block_ptrs(x);
632 xd->mode_info_context->mbmi.mode = DC_PRED;
633 xd->mode_info_context->mbmi.uv_mode = DC_PRED;
635 xd->left_context = &cm->left_context;
637 vp8_zero(cpi->count_mb_ref_frame_usage)
638 vp8_zero(cpi->ymode_count)
639 vp8_zero(cpi->uv_mode_count)
643 vpx_memset(cm->above_context, 0,
644 sizeof(ENTROPY_CONTEXT_PLANES) * cm->mb_cols);
646 // Special case treatment when GF and ARF are not sensible options for reference
647 if (cpi->ref_frame_flags == VP8_LAST_FLAG)
648 vp8_calc_ref_frame_costs(x->ref_frame_cost,
649 cpi->prob_intra_coded,255,128);
650 else if ((cpi->oxcf.number_of_layers > 1) &&
651 (cpi->ref_frame_flags == VP8_GOLD_FLAG))
652 vp8_calc_ref_frame_costs(x->ref_frame_cost,
653 cpi->prob_intra_coded,1,255);
654 else if ((cpi->oxcf.number_of_layers > 1) &&
655 (cpi->ref_frame_flags == VP8_ALT_FLAG))
656 vp8_calc_ref_frame_costs(x->ref_frame_cost,
657 cpi->prob_intra_coded,1,1);
659 vp8_calc_ref_frame_costs(x->ref_frame_cost,
660 cpi->prob_intra_coded,
661 cpi->prob_last_coded,
664 xd->fullpixel_mask = 0xffffffff;
666 xd->fullpixel_mask = 0xfffffff8;
669 void vp8_encode_frame(VP8_COMP *cpi)
672 MACROBLOCK *const x = & cpi->mb;
673 VP8_COMMON *const cm = & cpi->common;
674 MACROBLOCKD *const xd = & x->e_mbd;
675 TOKENEXTRA *tp = cpi->tok;
676 int segment_counts[MAX_MB_SEGMENTS];
678 #if CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING
679 BOOL_CODER * bc = &cpi->bc[1]; // bc[0] is for control partition
680 const int num_part = (1 << cm->multi_token_partition);
683 vpx_memset(segment_counts, 0, sizeof(segment_counts));
686 if (cpi->compressor_speed == 2)
688 if (cpi->oxcf.cpu_used < 0)
689 cpi->Speed = -(cpi->oxcf.cpu_used);
691 vp8_auto_select_speed(cpi);
694 // Functions setup for all frame types so we can use MC in AltRef
695 if(!cm->use_bilinear_mc_filter)
697 xd->subpixel_predict = vp8_sixtap_predict4x4;
698 xd->subpixel_predict8x4 = vp8_sixtap_predict8x4;
699 xd->subpixel_predict8x8 = vp8_sixtap_predict8x8;
700 xd->subpixel_predict16x16 = vp8_sixtap_predict16x16;
704 xd->subpixel_predict = vp8_bilinear_predict4x4;
705 xd->subpixel_predict8x4 = vp8_bilinear_predict8x4;
706 xd->subpixel_predict8x8 = vp8_bilinear_predict8x8;
707 xd->subpixel_predict16x16 = vp8_bilinear_predict16x16;
710 // Reset frame count of inter 0,0 motion vector usage.
711 cpi->inter_zz_count = 0;
713 cpi->prediction_error = 0;
714 cpi->intra_error = 0;
715 cpi->skip_true_count = 0;
720 cpi->frame_distortion = 0;
721 cpi->last_mb_distortion = 0;
724 xd->mode_info_context = cm->mi;
726 vp8_zero(cpi->MVcount);
728 vp8_zero(cpi->coef_counts);
730 vp8cx_frame_init_quantizer(cpi);
732 vp8_initialize_rd_consts(cpi,
733 vp8_dc_quant(cm->base_qindex, cm->y1dc_delta_q));
735 vp8cx_initialize_me_consts(cpi, cm->base_qindex);
737 if(cpi->oxcf.tuning == VP8_TUNE_SSIM)
739 // Initialize encode frame context.
740 init_encode_frame_mb_context(cpi);
742 // Build a frame level activity map
743 build_activity_map(cpi);
746 // re-init encode frame context.
747 init_encode_frame_mb_context(cpi);
749 #if CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING
752 for(i = 0; i < num_part; i++)
754 vp8_start_encode(&bc[i], cpi->partition_d[i + 1],
755 cpi->partition_d_end[i + 1]);
756 bc[i].error = &cm->error;
763 struct vpx_usec_timer emr_timer;
764 vpx_usec_timer_start(&emr_timer);
766 #if CONFIG_MULTITHREAD
767 if (cpi->b_multi_threaded)
771 vp8cx_init_mbrthread_data(cpi, x, cpi->mb_row_ei, 1, cpi->encoding_thread_count);
773 for (i = 0; i < cm->mb_rows; i++)
774 cpi->mt_current_mb_col[i] = -1;
776 for (i = 0; i < cpi->encoding_thread_count; i++)
778 sem_post(&cpi->h_event_start_encoding[i]);
781 for (mb_row = 0; mb_row < cm->mb_rows; mb_row += (cpi->encoding_thread_count + 1))
783 vp8_zero(cm->left_context)
785 #if CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING
788 tp = cpi->tok + mb_row * (cm->mb_cols * 16 * 24);
791 encode_mb_row(cpi, cm, mb_row, x, xd, &tp, segment_counts, &totalrate);
793 // adjust to the next row of mbs
794 x->src.y_buffer += 16 * x->src.y_stride * (cpi->encoding_thread_count + 1) - 16 * cm->mb_cols;
795 x->src.u_buffer += 8 * x->src.uv_stride * (cpi->encoding_thread_count + 1) - 8 * cm->mb_cols;
796 x->src.v_buffer += 8 * x->src.uv_stride * (cpi->encoding_thread_count + 1) - 8 * cm->mb_cols;
798 xd->mode_info_context += xd->mode_info_stride * cpi->encoding_thread_count;
799 x->partition_info += xd->mode_info_stride * cpi->encoding_thread_count;
800 x->gf_active_ptr += cm->mb_cols * cpi->encoding_thread_count;
802 if(mb_row == cm->mb_rows - 1)
804 sem_post(&cpi->h_event_end_encoding); /* signal frame encoding end */
808 sem_wait(&cpi->h_event_end_encoding); /* wait for other threads to finish */
810 for (mb_row = 0; mb_row < cm->mb_rows; mb_row ++)
812 cpi->tok_count += cpi->tplist[mb_row].stop - cpi->tplist[mb_row].start;
815 if (xd->segmentation_enabled)
819 if (xd->segmentation_enabled)
822 for (i = 0; i < cpi->encoding_thread_count; i++)
824 for (j = 0; j < 4; j++)
825 segment_counts[j] += cpi->mb_row_ei[i].segment_counts[j];
830 for (i = 0; i < cpi->encoding_thread_count; i++)
832 totalrate += cpi->mb_row_ei[i].totalrate;
839 // for each macroblock row in image
840 for (mb_row = 0; mb_row < cm->mb_rows; mb_row++)
842 vp8_zero(cm->left_context)
844 #if CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING
848 encode_mb_row(cpi, cm, mb_row, x, xd, &tp, segment_counts, &totalrate);
850 // adjust to the next row of mbs
851 x->src.y_buffer += 16 * x->src.y_stride - 16 * cm->mb_cols;
852 x->src.u_buffer += 8 * x->src.uv_stride - 8 * cm->mb_cols;
853 x->src.v_buffer += 8 * x->src.uv_stride - 8 * cm->mb_cols;
856 cpi->tok_count = tp - cpi->tok;
859 #if CONFIG_REALTIME_ONLY & CONFIG_ONTHEFLY_BITPACKING
862 for(i = 0; i < num_part; i++)
864 vp8_stop_encode(&bc[i]);
865 cpi->partition_sz[i+1] = bc[i].pos;
870 vpx_usec_timer_mark(&emr_timer);
871 cpi->time_encode_mb_row += vpx_usec_timer_elapsed(&emr_timer);
875 // Work out the segment probabilities if segmentation is enabled
876 if (xd->segmentation_enabled)
882 vpx_memset(xd->mb_segment_tree_probs, 255 , sizeof(xd->mb_segment_tree_probs));
884 tot_count = segment_counts[0] + segment_counts[1] + segment_counts[2] + segment_counts[3];
888 xd->mb_segment_tree_probs[0] = ((segment_counts[0] + segment_counts[1]) * 255) / tot_count;
890 tot_count = segment_counts[0] + segment_counts[1];
894 xd->mb_segment_tree_probs[1] = (segment_counts[0] * 255) / tot_count;
897 tot_count = segment_counts[2] + segment_counts[3];
900 xd->mb_segment_tree_probs[2] = (segment_counts[2] * 255) / tot_count;
902 // Zero probabilities not allowed
903 for (i = 0; i < MB_FEATURE_TREE_PROBS; i ++)
905 if (xd->mb_segment_tree_probs[i] == 0)
906 xd->mb_segment_tree_probs[i] = 1;
911 // 256 rate units to the bit
912 cpi->projected_frame_size = totalrate >> 8; // projected_frame_size in units of BYTES
914 // Make a note of the percentage MBs coded Intra.
915 if (cm->frame_type == KEY_FRAME)
917 cpi->this_frame_percent_intra = 100;
923 tot_modes = cpi->count_mb_ref_frame_usage[INTRA_FRAME]
924 + cpi->count_mb_ref_frame_usage[LAST_FRAME]
925 + cpi->count_mb_ref_frame_usage[GOLDEN_FRAME]
926 + cpi->count_mb_ref_frame_usage[ALTREF_FRAME];
929 cpi->this_frame_percent_intra = cpi->count_mb_ref_frame_usage[INTRA_FRAME] * 100 / tot_modes;
936 int flag[2] = {0, 0};
938 for (cnt = 0; cnt < MVPcount; cnt++)
940 if (cm->fc.pre_mvc[0][cnt] != cm->fc.mvc[0][cnt])
943 vpx_memcpy(cm->fc.pre_mvc[0], cm->fc.mvc[0], MVPcount);
948 for (cnt = 0; cnt < MVPcount; cnt++)
950 if (cm->fc.pre_mvc[1][cnt] != cm->fc.mvc[1][cnt])
953 vpx_memcpy(cm->fc.pre_mvc[1], cm->fc.mvc[1], MVPcount);
958 if (flag[0] || flag[1])
959 vp8_build_component_cost_table(cpi->mb.mvcost, (const MV_CONTEXT *) cm->fc.mvc, flag);
963 #if ! CONFIG_REALTIME_ONLY
964 // Adjust the projected reference frame usage probability numbers to reflect
965 // what we have just seen. This may be useful when we make multiple iterations
966 // of the recode loop rather than continuing to use values from the previous frame.
967 if ((cm->frame_type != KEY_FRAME) && ((cpi->oxcf.number_of_layers > 1) ||
968 (!cm->refresh_alt_ref_frame && !cm->refresh_golden_frame)))
970 vp8_convert_rfct_to_prob(cpi);
974 void vp8_setup_block_ptrs(MACROBLOCK *x)
979 for (r = 0; r < 4; r++)
981 for (c = 0; c < 4; c++)
983 x->block[r*4+c].src_diff = x->src_diff + r * 4 * 16 + c * 4;
987 for (r = 0; r < 2; r++)
989 for (c = 0; c < 2; c++)
991 x->block[16 + r*2+c].src_diff = x->src_diff + 256 + r * 4 * 8 + c * 4;
996 for (r = 0; r < 2; r++)
998 for (c = 0; c < 2; c++)
1000 x->block[20 + r*2+c].src_diff = x->src_diff + 320 + r * 4 * 8 + c * 4;
1004 x->block[24].src_diff = x->src_diff + 384;
1007 for (i = 0; i < 25; i++)
1009 x->block[i].coeff = x->coeff + i * 16;
1013 void vp8_build_block_offsets(MACROBLOCK *x)
1018 vp8_build_block_doffsets(&x->e_mbd);
1021 x->thismb_ptr = &x->thismb[0];
1022 for (br = 0; br < 4; br++)
1024 for (bc = 0; bc < 4; bc++)
1026 BLOCK *this_block = &x->block[block];
1027 //this_block->base_src = &x->src.y_buffer;
1028 //this_block->src_stride = x->src.y_stride;
1029 //this_block->src = 4 * br * this_block->src_stride + 4 * bc;
1030 this_block->base_src = &x->thismb_ptr;
1031 this_block->src_stride = 16;
1032 this_block->src = 4 * br * 16 + 4 * bc;
1038 for (br = 0; br < 2; br++)
1040 for (bc = 0; bc < 2; bc++)
1042 BLOCK *this_block = &x->block[block];
1043 this_block->base_src = &x->src.u_buffer;
1044 this_block->src_stride = x->src.uv_stride;
1045 this_block->src = 4 * br * this_block->src_stride + 4 * bc;
1051 for (br = 0; br < 2; br++)
1053 for (bc = 0; bc < 2; bc++)
1055 BLOCK *this_block = &x->block[block];
1056 this_block->base_src = &x->src.v_buffer;
1057 this_block->src_stride = x->src.uv_stride;
1058 this_block->src = 4 * br * this_block->src_stride + 4 * bc;
1064 static void sum_intra_stats(VP8_COMP *cpi, MACROBLOCK *x)
1066 const MACROBLOCKD *xd = & x->e_mbd;
1067 const MB_PREDICTION_MODE m = xd->mode_info_context->mbmi.mode;
1068 const MB_PREDICTION_MODE uvm = xd->mode_info_context->mbmi.uv_mode;
1071 const int is_key = cpi->common.frame_type == KEY_FRAME;
1073 ++ (is_key ? uv_modes : inter_uv_modes)[uvm];
1077 unsigned int *const bct = is_key ? b_modes : inter_b_modes;
1083 ++ bct[xd->block[b].bmi.mode];
1090 ++cpi->ymode_count[m];
1091 ++cpi->uv_mode_count[uvm];
1095 // Experimental stub function to create a per MB zbin adjustment based on
1096 // some previously calculated measure of MB activity.
1097 static void adjust_act_zbin( VP8_COMP *cpi, MACROBLOCK *x )
1100 x->act_zbin_adj = *(x->mb_activity_ptr);
1104 int64_t act = *(x->mb_activity_ptr);
1106 // Apply the masking to the RD multiplier.
1107 a = act + 4*cpi->activity_avg;
1108 b = 4*act + cpi->activity_avg;
1110 if ( act > cpi->activity_avg )
1111 x->act_zbin_adj = (int)(((int64_t)b + (a>>1))/a) - 1;
1113 x->act_zbin_adj = 1 - (int)(((int64_t)a + (b>>1))/b);
1117 int vp8cx_encode_intra_macroblock(VP8_COMP *cpi, MACROBLOCK *x, TOKENEXTRA **t)
1119 MACROBLOCKD *xd = &x->e_mbd;
1122 if (cpi->sf.RD && cpi->compressor_speed != 2)
1123 vp8_rd_pick_intra_mode(cpi, x, &rate);
1125 vp8_pick_intra_mode(cpi, x, &rate);
1127 if(cpi->oxcf.tuning == VP8_TUNE_SSIM)
1129 adjust_act_zbin( cpi, x );
1130 vp8_update_zbin_extra(cpi, x);
1133 if (x->e_mbd.mode_info_context->mbmi.mode == B_PRED)
1134 vp8_encode_intra4x4mby(x);
1136 vp8_encode_intra16x16mby(x);
1138 vp8_encode_intra16x16mbuv(x);
1140 sum_intra_stats(cpi, x);
1142 vp8_tokenize_mb(cpi, &x->e_mbd, t);
1144 if (xd->mode_info_context->mbmi.mode != B_PRED)
1145 vp8_inverse_transform_mby(xd);
1147 vp8_dequant_idct_add_uv_block
1148 (xd->qcoeff+16*16, xd->dequant_uv,
1149 xd->dst.u_buffer, xd->dst.v_buffer,
1150 xd->dst.uv_stride, xd->eobs+16);
1157 extern void vp8_fix_contexts(MACROBLOCKD *x);
1159 int vp8cx_encode_inter_macroblock
1161 VP8_COMP *cpi, MACROBLOCK *x, TOKENEXTRA **t,
1162 int recon_yoffset, int recon_uvoffset,
1163 int mb_row, int mb_col
1166 MACROBLOCKD *const xd = &x->e_mbd;
1167 int intra_error = 0;
1173 if (xd->segmentation_enabled)
1174 x->encode_breakout = cpi->segment_encode_breakout[xd->mode_info_context->mbmi.segment_id];
1176 x->encode_breakout = cpi->oxcf.encode_breakout;
1178 #if CONFIG_TEMPORAL_DENOISING
1179 // Reset the best sse mode/mv for each macroblock.
1180 x->best_reference_frame = INTRA_FRAME;
1181 x->best_zeromv_reference_frame = INTRA_FRAME;
1182 x->best_sse_inter_mode = 0;
1183 x->best_sse_mv.as_int = 0;
1184 x->need_to_clamp_best_mvs = 0;
1189 int zbin_mode_boost_enabled = cpi->zbin_mode_boost_enabled;
1191 /* Are we using the fast quantizer for the mode selection? */
1192 if(cpi->sf.use_fastquant_for_pick)
1194 cpi->mb.quantize_b = vp8_fast_quantize_b;
1195 cpi->mb.quantize_b_pair = vp8_fast_quantize_b_pair;
1197 /* the fast quantizer does not use zbin_extra, so
1198 * do not recalculate */
1199 cpi->zbin_mode_boost_enabled = 0;
1201 vp8_rd_pick_inter_mode(cpi, x, recon_yoffset, recon_uvoffset, &rate,
1202 &distortion, &intra_error);
1204 /* switch back to the regular quantizer for the encode */
1205 if (cpi->sf.improved_quant)
1207 cpi->mb.quantize_b = vp8_regular_quantize_b;
1208 cpi->mb.quantize_b_pair = vp8_regular_quantize_b_pair;
1211 /* restore cpi->zbin_mode_boost_enabled */
1212 cpi->zbin_mode_boost_enabled = zbin_mode_boost_enabled;
1217 vp8_pick_inter_mode(cpi, x, recon_yoffset, recon_uvoffset, &rate,
1218 &distortion, &intra_error, mb_row, mb_col);
1221 cpi->prediction_error += distortion;
1222 cpi->intra_error += intra_error;
1224 if(cpi->oxcf.tuning == VP8_TUNE_SSIM)
1226 // Adjust the zbin based on this MB rate.
1227 adjust_act_zbin( cpi, x );
1231 // Experimental RD code
1232 cpi->frame_distortion += distortion;
1233 cpi->last_mb_distortion = distortion;
1236 // MB level adjutment to quantizer setup
1237 if (xd->segmentation_enabled)
1239 // If cyclic update enabled
1240 if (cpi->current_layer == 0 && cpi->cyclic_refresh_mode_enabled)
1242 // Clear segment_id back to 0 if not coded (last frame 0,0)
1243 if ((xd->mode_info_context->mbmi.segment_id == 1) &&
1244 ((xd->mode_info_context->mbmi.ref_frame != LAST_FRAME) || (xd->mode_info_context->mbmi.mode != ZEROMV)))
1246 xd->mode_info_context->mbmi.segment_id = 0;
1248 /* segment_id changed, so update */
1249 vp8cx_mb_init_quantizer(cpi, x, 1);
1255 // Experimental code. Special case for gf and arf zeromv modes.
1256 // Increase zbin size to supress noise
1257 cpi->zbin_mode_boost = 0;
1258 if (cpi->zbin_mode_boost_enabled)
1260 if ( xd->mode_info_context->mbmi.ref_frame != INTRA_FRAME )
1262 if (xd->mode_info_context->mbmi.mode == ZEROMV)
1264 if (xd->mode_info_context->mbmi.ref_frame != LAST_FRAME)
1265 cpi->zbin_mode_boost = GF_ZEROMV_ZBIN_BOOST;
1267 cpi->zbin_mode_boost = LF_ZEROMV_ZBIN_BOOST;
1269 else if (xd->mode_info_context->mbmi.mode == SPLITMV)
1270 cpi->zbin_mode_boost = 0;
1272 cpi->zbin_mode_boost = MV_ZBIN_BOOST;
1276 /* The fast quantizer doesn't use zbin_extra, only do so with
1277 * the regular quantizer. */
1278 if (cpi->sf.improved_quant)
1279 vp8_update_zbin_extra(cpi, x);
1282 cpi->count_mb_ref_frame_usage[xd->mode_info_context->mbmi.ref_frame] ++;
1284 if (xd->mode_info_context->mbmi.ref_frame == INTRA_FRAME)
1286 vp8_encode_intra16x16mbuv(x);
1288 if (xd->mode_info_context->mbmi.mode == B_PRED)
1290 vp8_encode_intra4x4mby(x);
1294 vp8_encode_intra16x16mby(x);
1297 sum_intra_stats(cpi, x);
1303 if (xd->mode_info_context->mbmi.ref_frame == LAST_FRAME)
1304 ref_fb_idx = cpi->common.lst_fb_idx;
1305 else if (xd->mode_info_context->mbmi.ref_frame == GOLDEN_FRAME)
1306 ref_fb_idx = cpi->common.gld_fb_idx;
1308 ref_fb_idx = cpi->common.alt_fb_idx;
1310 xd->pre.y_buffer = cpi->common.yv12_fb[ref_fb_idx].y_buffer + recon_yoffset;
1311 xd->pre.u_buffer = cpi->common.yv12_fb[ref_fb_idx].u_buffer + recon_uvoffset;
1312 xd->pre.v_buffer = cpi->common.yv12_fb[ref_fb_idx].v_buffer + recon_uvoffset;
1316 vp8_encode_inter16x16(x);
1319 vp8_build_inter16x16_predictors_mb(xd, xd->dst.y_buffer,
1320 xd->dst.u_buffer, xd->dst.v_buffer,
1321 xd->dst.y_stride, xd->dst.uv_stride);
1327 vp8_tokenize_mb(cpi, xd, t);
1329 if (xd->mode_info_context->mbmi.mode != B_PRED)
1330 vp8_inverse_transform_mby(xd);
1332 vp8_dequant_idct_add_uv_block
1333 (xd->qcoeff+16*16, xd->dequant_uv,
1334 xd->dst.u_buffer, xd->dst.v_buffer,
1335 xd->dst.uv_stride, xd->eobs+16);
1339 /* always set mb_skip_coeff as it is needed by the loopfilter */
1340 xd->mode_info_context->mbmi.mb_skip_coeff = 1;
1342 if (cpi->common.mb_no_coeff_skip)
1344 cpi->skip_true_count ++;
1345 vp8_fix_contexts(xd);
1349 vp8_stuff_mb(cpi, xd, t);