2 * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
15 #include "vpx/vpx_encoder.h"
16 #include "vpx_mem/vpx_mem.h"
17 #include "vpx_ports/mem_ops.h"
19 #include "vp9/common/vp9_entropy.h"
20 #include "vp9/common/vp9_entropymode.h"
21 #include "vp9/common/vp9_entropymv.h"
22 #include "vp9/common/vp9_mvref_common.h"
23 #include "vp9/common/vp9_pred_common.h"
24 #include "vp9/common/vp9_seg_common.h"
25 #include "vp9/common/vp9_systemdependent.h"
26 #include "vp9/common/vp9_tile_common.h"
28 #include "vp9/encoder/vp9_cost.h"
29 #include "vp9/encoder/vp9_bitstream.h"
30 #include "vp9/encoder/vp9_encodemv.h"
31 #include "vp9/encoder/vp9_mcomp.h"
32 #include "vp9/encoder/vp9_segmentation.h"
33 #include "vp9/encoder/vp9_subexp.h"
34 #include "vp9/encoder/vp9_tokenize.h"
35 #include "vp9/encoder/vp9_write_bit_buffer.h"
37 static struct vp9_token intra_mode_encodings[INTRA_MODES];
38 static struct vp9_token switchable_interp_encodings[SWITCHABLE_FILTERS];
39 static struct vp9_token partition_encodings[PARTITION_TYPES];
40 static struct vp9_token inter_mode_encodings[INTER_MODES];
42 void vp9_entropy_mode_init() {
43 vp9_tokens_from_tree(intra_mode_encodings, vp9_intra_mode_tree);
44 vp9_tokens_from_tree(switchable_interp_encodings, vp9_switchable_interp_tree);
45 vp9_tokens_from_tree(partition_encodings, vp9_partition_tree);
46 vp9_tokens_from_tree(inter_mode_encodings, vp9_inter_mode_tree);
49 static void write_intra_mode(vp9_writer *w, PREDICTION_MODE mode,
50 const vp9_prob *probs) {
51 vp9_write_token(w, vp9_intra_mode_tree, probs, &intra_mode_encodings[mode]);
54 static void write_inter_mode(vp9_writer *w, PREDICTION_MODE mode,
55 const vp9_prob *probs) {
56 assert(is_inter_mode(mode));
57 vp9_write_token(w, vp9_inter_mode_tree, probs,
58 &inter_mode_encodings[INTER_OFFSET(mode)]);
61 static void encode_unsigned_max(struct vp9_write_bit_buffer *wb,
63 vp9_wb_write_literal(wb, data, get_unsigned_bits(max));
66 static void prob_diff_update(const vp9_tree_index *tree,
67 vp9_prob probs[/*n - 1*/],
68 const unsigned int counts[/*n - 1*/],
69 int n, vp9_writer *w) {
71 unsigned int branch_ct[32][2];
73 // Assuming max number of probabilities <= 32
76 vp9_tree_probs_from_distribution(tree, branch_ct, counts);
77 for (i = 0; i < n - 1; ++i)
78 vp9_cond_prob_diff_update(w, &probs[i], branch_ct[i]);
81 static void write_selected_tx_size(const VP9_COMP *cpi,
82 TX_SIZE tx_size, BLOCK_SIZE bsize,
84 const TX_SIZE max_tx_size = max_txsize_lookup[bsize];
85 const MACROBLOCKD *const xd = &cpi->mb.e_mbd;
86 const vp9_prob *const tx_probs = get_tx_probs2(max_tx_size, xd,
87 &cpi->common.fc.tx_probs);
88 vp9_write(w, tx_size != TX_4X4, tx_probs[0]);
89 if (tx_size != TX_4X4 && max_tx_size >= TX_16X16) {
90 vp9_write(w, tx_size != TX_8X8, tx_probs[1]);
91 if (tx_size != TX_8X8 && max_tx_size >= TX_32X32)
92 vp9_write(w, tx_size != TX_16X16, tx_probs[2]);
96 static int write_skip(const VP9_COMP *cpi, int segment_id, const MODE_INFO *mi,
98 const MACROBLOCKD *const xd = &cpi->mb.e_mbd;
99 if (vp9_segfeature_active(&cpi->common.seg, segment_id, SEG_LVL_SKIP)) {
102 const int skip = mi->mbmi.skip;
103 vp9_write(w, skip, vp9_get_skip_prob(&cpi->common, xd));
108 static void update_skip_probs(VP9_COMMON *cm, vp9_writer *w) {
111 for (k = 0; k < SKIP_CONTEXTS; ++k)
112 vp9_cond_prob_diff_update(w, &cm->fc.skip_probs[k], cm->counts.skip[k]);
115 static void update_switchable_interp_probs(VP9_COMMON *cm, vp9_writer *w) {
117 for (j = 0; j < SWITCHABLE_FILTER_CONTEXTS; ++j)
118 prob_diff_update(vp9_switchable_interp_tree,
119 cm->fc.switchable_interp_prob[j],
120 cm->counts.switchable_interp[j], SWITCHABLE_FILTERS, w);
123 static void pack_mb_tokens(vp9_writer *w,
124 TOKENEXTRA **tp, const TOKENEXTRA *stop) {
127 while (p < stop && p->token != EOSB_TOKEN) {
128 const int t = p->token;
129 const struct vp9_token *const a = &vp9_coef_encodings[t];
130 const vp9_extra_bit *const b = &vp9_extra_bits[t];
135 /* skip one or two nodes */
136 if (p->skip_eob_node) {
137 n -= p->skip_eob_node;
138 i = 2 * p->skip_eob_node;
141 // TODO(jbb): expanding this can lead to big gains. It allows
142 // much better branch prediction and would enable us to avoid numerous
143 // lookups and compares.
145 // If we have a token that's in the constrained set, the coefficient tree
146 // is split into two treed writes. The first treed write takes care of the
147 // unconstrained nodes. The second treed write takes care of the
148 // constrained nodes.
149 if (t >= TWO_TOKEN && t < EOB_TOKEN) {
150 int len = UNCONSTRAINED_NODES - p->skip_eob_node;
151 int bits = v >> (n - len);
152 vp9_write_tree(w, vp9_coef_tree, p->context_tree, bits, len, i);
153 vp9_write_tree(w, vp9_coef_con_tree,
154 vp9_pareto8_full[p->context_tree[PIVOT_NODE] - 1],
157 vp9_write_tree(w, vp9_coef_tree, p->context_tree, v, n, i);
161 const int e = p->extra, l = b->len;
164 const unsigned char *pb = b->prob;
166 int n = l; /* number of bits in v, assumed nonzero */
170 const int bb = (v >> --n) & 1;
171 vp9_write(w, bb, pb[i >> 1]);
176 vp9_write_bit(w, e & 1);
181 *tp = p + (p->token == EOSB_TOKEN);
184 static void write_segment_id(vp9_writer *w, const struct segmentation *seg,
186 if (seg->enabled && seg->update_map)
187 vp9_write_tree(w, vp9_segment_tree, seg->tree_probs, segment_id, 3, 0);
190 // This function encodes the reference frame
191 static void write_ref_frames(const VP9_COMP *cpi, vp9_writer *w) {
192 const VP9_COMMON *const cm = &cpi->common;
193 const MACROBLOCKD *const xd = &cpi->mb.e_mbd;
194 const MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi;
195 const int is_compound = has_second_ref(mbmi);
196 const int segment_id = mbmi->segment_id;
198 // If segment level coding of this signal is disabled...
199 // or the segment allows multiple reference frame options
200 if (vp9_segfeature_active(&cm->seg, segment_id, SEG_LVL_REF_FRAME)) {
201 assert(!is_compound);
202 assert(mbmi->ref_frame[0] ==
203 vp9_get_segdata(&cm->seg, segment_id, SEG_LVL_REF_FRAME));
205 // does the feature use compound prediction or not
206 // (if not specified at the frame/segment level)
207 if (cm->reference_mode == REFERENCE_MODE_SELECT) {
208 vp9_write(w, is_compound, vp9_get_reference_mode_prob(cm, xd));
210 assert(!is_compound == (cm->reference_mode == SINGLE_REFERENCE));
214 vp9_write(w, mbmi->ref_frame[0] == GOLDEN_FRAME,
215 vp9_get_pred_prob_comp_ref_p(cm, xd));
217 const int bit0 = mbmi->ref_frame[0] != LAST_FRAME;
218 vp9_write(w, bit0, vp9_get_pred_prob_single_ref_p1(cm, xd));
220 const int bit1 = mbmi->ref_frame[0] != GOLDEN_FRAME;
221 vp9_write(w, bit1, vp9_get_pred_prob_single_ref_p2(cm, xd));
227 static void pack_inter_mode_mvs(VP9_COMP *cpi, const MODE_INFO *mi,
229 VP9_COMMON *const cm = &cpi->common;
230 const nmv_context *nmvc = &cm->fc.nmvc;
231 const MACROBLOCK *const x = &cpi->mb;
232 const MACROBLOCKD *const xd = &x->e_mbd;
233 const struct segmentation *const seg = &cm->seg;
234 const MB_MODE_INFO *const mbmi = &mi->mbmi;
235 const PREDICTION_MODE mode = mbmi->mode;
236 const int segment_id = mbmi->segment_id;
237 const BLOCK_SIZE bsize = mbmi->sb_type;
238 const int allow_hp = cm->allow_high_precision_mv;
239 const int is_inter = is_inter_block(mbmi);
240 const int is_compound = has_second_ref(mbmi);
243 if (seg->update_map) {
244 if (seg->temporal_update) {
245 const int pred_flag = mbmi->seg_id_predicted;
246 vp9_prob pred_prob = vp9_get_pred_prob_seg_id(seg, xd);
247 vp9_write(w, pred_flag, pred_prob);
249 write_segment_id(w, seg, segment_id);
251 write_segment_id(w, seg, segment_id);
255 skip = write_skip(cpi, segment_id, mi, w);
257 if (!vp9_segfeature_active(seg, segment_id, SEG_LVL_REF_FRAME))
258 vp9_write(w, is_inter, vp9_get_intra_inter_prob(cm, xd));
260 if (bsize >= BLOCK_8X8 && cm->tx_mode == TX_MODE_SELECT &&
262 (skip || vp9_segfeature_active(seg, segment_id, SEG_LVL_SKIP)))) {
263 write_selected_tx_size(cpi, mbmi->tx_size, bsize, w);
267 if (bsize >= BLOCK_8X8) {
268 write_intra_mode(w, mode, cm->fc.y_mode_prob[size_group_lookup[bsize]]);
271 const int num_4x4_w = num_4x4_blocks_wide_lookup[bsize];
272 const int num_4x4_h = num_4x4_blocks_high_lookup[bsize];
273 for (idy = 0; idy < 2; idy += num_4x4_h) {
274 for (idx = 0; idx < 2; idx += num_4x4_w) {
275 const PREDICTION_MODE b_mode = mi->bmi[idy * 2 + idx].as_mode;
276 write_intra_mode(w, b_mode, cm->fc.y_mode_prob[0]);
280 write_intra_mode(w, mbmi->uv_mode, cm->fc.uv_mode_prob[mode]);
282 const int mode_ctx = mbmi->mode_context[mbmi->ref_frame[0]];
283 const vp9_prob *const inter_probs = cm->fc.inter_mode_probs[mode_ctx];
284 write_ref_frames(cpi, w);
286 // If segment skip is not enabled code the mode.
287 if (!vp9_segfeature_active(seg, segment_id, SEG_LVL_SKIP)) {
288 if (bsize >= BLOCK_8X8) {
289 write_inter_mode(w, mode, inter_probs);
290 ++cm->counts.inter_mode[mode_ctx][INTER_OFFSET(mode)];
294 if (cm->interp_filter == SWITCHABLE) {
295 const int ctx = vp9_get_pred_context_switchable_interp(xd);
296 vp9_write_token(w, vp9_switchable_interp_tree,
297 cm->fc.switchable_interp_prob[ctx],
298 &switchable_interp_encodings[mbmi->interp_filter]);
300 assert(mbmi->interp_filter == cm->interp_filter);
303 if (bsize < BLOCK_8X8) {
304 const int num_4x4_w = num_4x4_blocks_wide_lookup[bsize];
305 const int num_4x4_h = num_4x4_blocks_high_lookup[bsize];
307 for (idy = 0; idy < 2; idy += num_4x4_h) {
308 for (idx = 0; idx < 2; idx += num_4x4_w) {
309 const int j = idy * 2 + idx;
310 const PREDICTION_MODE b_mode = mi->bmi[j].as_mode;
311 write_inter_mode(w, b_mode, inter_probs);
312 ++cm->counts.inter_mode[mode_ctx][INTER_OFFSET(b_mode)];
313 if (b_mode == NEWMV) {
314 for (ref = 0; ref < 1 + is_compound; ++ref)
315 vp9_encode_mv(cpi, w, &mi->bmi[j].as_mv[ref].as_mv,
316 &mbmi->ref_mvs[mbmi->ref_frame[ref]][0].as_mv,
323 for (ref = 0; ref < 1 + is_compound; ++ref)
324 vp9_encode_mv(cpi, w, &mbmi->mv[ref].as_mv,
325 &mbmi->ref_mvs[mbmi->ref_frame[ref]][0].as_mv, nmvc,
332 static void write_mb_modes_kf(const VP9_COMP *cpi, MODE_INFO **mi_8x8,
334 const VP9_COMMON *const cm = &cpi->common;
335 const MACROBLOCKD *const xd = &cpi->mb.e_mbd;
336 const struct segmentation *const seg = &cm->seg;
337 const MODE_INFO *const mi = mi_8x8[0];
338 const MODE_INFO *const above_mi = mi_8x8[-xd->mi_stride];
339 const MODE_INFO *const left_mi = xd->left_available ? mi_8x8[-1] : NULL;
340 const MB_MODE_INFO *const mbmi = &mi->mbmi;
341 const BLOCK_SIZE bsize = mbmi->sb_type;
344 write_segment_id(w, seg, mbmi->segment_id);
346 write_skip(cpi, mbmi->segment_id, mi, w);
348 if (bsize >= BLOCK_8X8 && cm->tx_mode == TX_MODE_SELECT)
349 write_selected_tx_size(cpi, mbmi->tx_size, bsize, w);
351 if (bsize >= BLOCK_8X8) {
352 write_intra_mode(w, mbmi->mode, get_y_mode_probs(mi, above_mi, left_mi, 0));
354 const int num_4x4_w = num_4x4_blocks_wide_lookup[bsize];
355 const int num_4x4_h = num_4x4_blocks_high_lookup[bsize];
358 for (idy = 0; idy < 2; idy += num_4x4_h) {
359 for (idx = 0; idx < 2; idx += num_4x4_w) {
360 const int block = idy * 2 + idx;
361 write_intra_mode(w, mi->bmi[block].as_mode,
362 get_y_mode_probs(mi, above_mi, left_mi, block));
367 write_intra_mode(w, mbmi->uv_mode, vp9_kf_uv_mode_prob[mbmi->mode]);
370 static void write_modes_b(VP9_COMP *cpi, const TileInfo *const tile,
371 vp9_writer *w, TOKENEXTRA **tok, TOKENEXTRA *tok_end,
372 int mi_row, int mi_col) {
373 VP9_COMMON *const cm = &cpi->common;
374 MACROBLOCKD *const xd = &cpi->mb.e_mbd;
377 xd->mi = cm->mi_grid_visible + (mi_row * cm->mi_stride + mi_col);
380 set_mi_row_col(xd, tile,
381 mi_row, num_8x8_blocks_high_lookup[m->mbmi.sb_type],
382 mi_col, num_8x8_blocks_wide_lookup[m->mbmi.sb_type],
383 cm->mi_rows, cm->mi_cols);
384 if (frame_is_intra_only(cm)) {
385 write_mb_modes_kf(cpi, xd->mi, w);
387 pack_inter_mode_mvs(cpi, m, w);
390 assert(*tok < tok_end);
391 pack_mb_tokens(w, tok, tok_end);
394 static void write_partition(VP9_COMMON *cm, MACROBLOCKD *xd,
395 int hbs, int mi_row, int mi_col,
396 PARTITION_TYPE p, BLOCK_SIZE bsize, vp9_writer *w) {
397 const int ctx = partition_plane_context(xd, mi_row, mi_col, bsize);
398 const vp9_prob *const probs = get_partition_probs(cm, ctx);
399 const int has_rows = (mi_row + hbs) < cm->mi_rows;
400 const int has_cols = (mi_col + hbs) < cm->mi_cols;
402 if (has_rows && has_cols) {
403 vp9_write_token(w, vp9_partition_tree, probs, &partition_encodings[p]);
404 } else if (!has_rows && has_cols) {
405 assert(p == PARTITION_SPLIT || p == PARTITION_HORZ);
406 vp9_write(w, p == PARTITION_SPLIT, probs[1]);
407 } else if (has_rows && !has_cols) {
408 assert(p == PARTITION_SPLIT || p == PARTITION_VERT);
409 vp9_write(w, p == PARTITION_SPLIT, probs[2]);
411 assert(p == PARTITION_SPLIT);
415 static void write_modes_sb(VP9_COMP *cpi,
416 const TileInfo *const tile,
417 vp9_writer *w, TOKENEXTRA **tok, TOKENEXTRA *tok_end,
418 int mi_row, int mi_col, BLOCK_SIZE bsize) {
419 VP9_COMMON *const cm = &cpi->common;
420 MACROBLOCKD *const xd = &cpi->mb.e_mbd;
422 const int bsl = b_width_log2(bsize);
423 const int bs = (1 << bsl) / 4;
424 PARTITION_TYPE partition;
426 MODE_INFO *m = cm->mi_grid_visible[mi_row * cm->mi_stride + mi_col];
428 if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols)
431 partition = partition_lookup[bsl][m->mbmi.sb_type];
432 write_partition(cm, xd, bs, mi_row, mi_col, partition, bsize, w);
433 subsize = get_subsize(bsize, partition);
434 if (subsize < BLOCK_8X8) {
435 write_modes_b(cpi, tile, w, tok, tok_end, mi_row, mi_col);
439 write_modes_b(cpi, tile, w, tok, tok_end, mi_row, mi_col);
442 write_modes_b(cpi, tile, w, tok, tok_end, mi_row, mi_col);
443 if (mi_row + bs < cm->mi_rows)
444 write_modes_b(cpi, tile, w, tok, tok_end, mi_row + bs, mi_col);
447 write_modes_b(cpi, tile, w, tok, tok_end, mi_row, mi_col);
448 if (mi_col + bs < cm->mi_cols)
449 write_modes_b(cpi, tile, w, tok, tok_end, mi_row, mi_col + bs);
451 case PARTITION_SPLIT:
452 write_modes_sb(cpi, tile, w, tok, tok_end, mi_row, mi_col, subsize);
453 write_modes_sb(cpi, tile, w, tok, tok_end, mi_row, mi_col + bs,
455 write_modes_sb(cpi, tile, w, tok, tok_end, mi_row + bs, mi_col,
457 write_modes_sb(cpi, tile, w, tok, tok_end, mi_row + bs, mi_col + bs,
465 // update partition context
466 if (bsize >= BLOCK_8X8 &&
467 (bsize == BLOCK_8X8 || partition != PARTITION_SPLIT))
468 update_partition_context(xd, mi_row, mi_col, subsize, bsize);
471 static void write_modes(VP9_COMP *cpi,
472 const TileInfo *const tile,
473 vp9_writer *w, TOKENEXTRA **tok, TOKENEXTRA *tok_end) {
476 for (mi_row = tile->mi_row_start; mi_row < tile->mi_row_end;
477 mi_row += MI_BLOCK_SIZE) {
478 vp9_zero(cpi->mb.e_mbd.left_seg_context);
479 for (mi_col = tile->mi_col_start; mi_col < tile->mi_col_end;
480 mi_col += MI_BLOCK_SIZE)
481 write_modes_sb(cpi, tile, w, tok, tok_end, mi_row, mi_col,
486 static void build_tree_distribution(VP9_COMP *cpi, TX_SIZE tx_size,
487 vp9_coeff_stats *coef_branch_ct,
488 vp9_coeff_probs_model *coef_probs) {
489 vp9_coeff_count *coef_counts = cpi->coef_counts[tx_size];
490 unsigned int (*eob_branch_ct)[REF_TYPES][COEF_BANDS][COEFF_CONTEXTS] =
491 cpi->common.counts.eob_branch[tx_size];
494 for (i = 0; i < PLANE_TYPES; ++i) {
495 for (j = 0; j < REF_TYPES; ++j) {
496 for (k = 0; k < COEF_BANDS; ++k) {
497 for (l = 0; l < BAND_COEFF_CONTEXTS(k); ++l) {
498 vp9_tree_probs_from_distribution(vp9_coef_tree,
499 coef_branch_ct[i][j][k][l],
500 coef_counts[i][j][k][l]);
501 coef_branch_ct[i][j][k][l][0][1] = eob_branch_ct[i][j][k][l] -
502 coef_branch_ct[i][j][k][l][0][0];
503 for (m = 0; m < UNCONSTRAINED_NODES; ++m)
504 coef_probs[i][j][k][l][m] = get_binary_prob(
505 coef_branch_ct[i][j][k][l][m][0],
506 coef_branch_ct[i][j][k][l][m][1]);
513 static void update_coef_probs_common(vp9_writer* const bc, VP9_COMP *cpi,
515 vp9_coeff_stats *frame_branch_ct,
516 vp9_coeff_probs_model *new_coef_probs) {
517 vp9_coeff_probs_model *old_coef_probs = cpi->common.fc.coef_probs[tx_size];
518 const vp9_prob upd = DIFF_UPDATE_PROB;
519 const int entropy_nodes_update = UNCONSTRAINED_NODES;
521 switch (cpi->sf.use_fast_coef_updates) {
523 /* dry run to see if there is any update at all needed */
525 int update[2] = {0, 0};
526 for (i = 0; i < PLANE_TYPES; ++i) {
527 for (j = 0; j < REF_TYPES; ++j) {
528 for (k = 0; k < COEF_BANDS; ++k) {
529 for (l = 0; l < BAND_COEFF_CONTEXTS(k); ++l) {
530 for (t = 0; t < entropy_nodes_update; ++t) {
531 vp9_prob newp = new_coef_probs[i][j][k][l][t];
532 const vp9_prob oldp = old_coef_probs[i][j][k][l][t];
536 s = vp9_prob_diff_update_savings_search_model(
537 frame_branch_ct[i][j][k][l][0],
538 old_coef_probs[i][j][k][l], &newp, upd);
540 s = vp9_prob_diff_update_savings_search(
541 frame_branch_ct[i][j][k][l][t], oldp, &newp, upd);
542 if (s > 0 && newp != oldp)
545 savings += s - (int)(vp9_cost_zero(upd));
547 savings -= (int)(vp9_cost_zero(upd));
555 // printf("Update %d %d, savings %d\n", update[0], update[1], savings);
556 /* Is coef updated at all */
557 if (update[1] == 0 || savings < 0) {
558 vp9_write_bit(bc, 0);
561 vp9_write_bit(bc, 1);
562 for (i = 0; i < PLANE_TYPES; ++i) {
563 for (j = 0; j < REF_TYPES; ++j) {
564 for (k = 0; k < COEF_BANDS; ++k) {
565 for (l = 0; l < BAND_COEFF_CONTEXTS(k); ++l) {
566 // calc probs and branch cts for this frame only
567 for (t = 0; t < entropy_nodes_update; ++t) {
568 vp9_prob newp = new_coef_probs[i][j][k][l][t];
569 vp9_prob *oldp = old_coef_probs[i][j][k][l] + t;
570 const vp9_prob upd = DIFF_UPDATE_PROB;
574 s = vp9_prob_diff_update_savings_search_model(
575 frame_branch_ct[i][j][k][l][0],
576 old_coef_probs[i][j][k][l], &newp, upd);
578 s = vp9_prob_diff_update_savings_search(
579 frame_branch_ct[i][j][k][l][t],
581 if (s > 0 && newp != *oldp)
583 vp9_write(bc, u, upd);
585 /* send/use new probability */
586 vp9_write_prob_diff_update(bc, newp, *oldp);
598 case ONE_LOOP_REDUCED: {
599 const int prev_coef_contexts_to_update =
600 cpi->sf.use_fast_coef_updates == ONE_LOOP_REDUCED ?
601 COEFF_CONTEXTS >> 1 : COEFF_CONTEXTS;
602 const int coef_band_to_update =
603 cpi->sf.use_fast_coef_updates == ONE_LOOP_REDUCED ?
604 COEF_BANDS >> 1 : COEF_BANDS;
606 int noupdates_before_first = 0;
607 for (i = 0; i < PLANE_TYPES; ++i) {
608 for (j = 0; j < REF_TYPES; ++j) {
609 for (k = 0; k < COEF_BANDS; ++k) {
610 for (l = 0; l < BAND_COEFF_CONTEXTS(k); ++l) {
611 // calc probs and branch cts for this frame only
612 for (t = 0; t < entropy_nodes_update; ++t) {
613 vp9_prob newp = new_coef_probs[i][j][k][l][t];
614 vp9_prob *oldp = old_coef_probs[i][j][k][l] + t;
617 if (l >= prev_coef_contexts_to_update ||
618 k >= coef_band_to_update) {
622 s = vp9_prob_diff_update_savings_search_model(
623 frame_branch_ct[i][j][k][l][0],
624 old_coef_probs[i][j][k][l], &newp, upd);
626 s = vp9_prob_diff_update_savings_search(
627 frame_branch_ct[i][j][k][l][t],
629 if (s > 0 && newp != *oldp)
633 if (u == 0 && updates == 0) {
634 noupdates_before_first++;
637 if (u == 1 && updates == 1) {
640 vp9_write_bit(bc, 1);
641 for (v = 0; v < noupdates_before_first; ++v)
642 vp9_write(bc, 0, upd);
644 vp9_write(bc, u, upd);
646 /* send/use new probability */
647 vp9_write_prob_diff_update(bc, newp, *oldp);
656 vp9_write_bit(bc, 0); // no updates
666 static void update_coef_probs(VP9_COMP *cpi, vp9_writer* w) {
667 const TX_MODE tx_mode = cpi->common.tx_mode;
668 const TX_SIZE max_tx_size = tx_mode_to_biggest_tx_size[tx_mode];
670 vp9_coeff_stats frame_branch_ct[TX_SIZES][PLANE_TYPES];
671 vp9_coeff_probs_model frame_coef_probs[TX_SIZES][PLANE_TYPES];
673 vp9_clear_system_state();
675 for (tx_size = TX_4X4; tx_size <= TX_32X32; ++tx_size)
676 build_tree_distribution(cpi, tx_size, frame_branch_ct[tx_size],
677 frame_coef_probs[tx_size]);
679 for (tx_size = TX_4X4; tx_size <= max_tx_size; ++tx_size)
680 update_coef_probs_common(w, cpi, tx_size, frame_branch_ct[tx_size],
681 frame_coef_probs[tx_size]);
684 static void encode_loopfilter(struct loopfilter *lf,
685 struct vp9_write_bit_buffer *wb) {
688 // Encode the loop filter level and type
689 vp9_wb_write_literal(wb, lf->filter_level, 6);
690 vp9_wb_write_literal(wb, lf->sharpness_level, 3);
692 // Write out loop filter deltas applied at the MB level based on mode or
693 // ref frame (if they are enabled).
694 vp9_wb_write_bit(wb, lf->mode_ref_delta_enabled);
696 if (lf->mode_ref_delta_enabled) {
697 vp9_wb_write_bit(wb, lf->mode_ref_delta_update);
698 if (lf->mode_ref_delta_update) {
699 for (i = 0; i < MAX_REF_LF_DELTAS; i++) {
700 const int delta = lf->ref_deltas[i];
701 const int changed = delta != lf->last_ref_deltas[i];
702 vp9_wb_write_bit(wb, changed);
704 lf->last_ref_deltas[i] = delta;
705 vp9_wb_write_literal(wb, abs(delta) & 0x3F, 6);
706 vp9_wb_write_bit(wb, delta < 0);
710 for (i = 0; i < MAX_MODE_LF_DELTAS; i++) {
711 const int delta = lf->mode_deltas[i];
712 const int changed = delta != lf->last_mode_deltas[i];
713 vp9_wb_write_bit(wb, changed);
715 lf->last_mode_deltas[i] = delta;
716 vp9_wb_write_literal(wb, abs(delta) & 0x3F, 6);
717 vp9_wb_write_bit(wb, delta < 0);
724 static void write_delta_q(struct vp9_write_bit_buffer *wb, int delta_q) {
726 vp9_wb_write_bit(wb, 1);
727 vp9_wb_write_literal(wb, abs(delta_q), 4);
728 vp9_wb_write_bit(wb, delta_q < 0);
730 vp9_wb_write_bit(wb, 0);
734 static void encode_quantization(VP9_COMMON *cm,
735 struct vp9_write_bit_buffer *wb) {
736 vp9_wb_write_literal(wb, cm->base_qindex, QINDEX_BITS);
737 write_delta_q(wb, cm->y_dc_delta_q);
738 write_delta_q(wb, cm->uv_dc_delta_q);
739 write_delta_q(wb, cm->uv_ac_delta_q);
742 static void encode_segmentation(VP9_COMP *cpi,
743 struct vp9_write_bit_buffer *wb) {
746 struct segmentation *seg = &cpi->common.seg;
748 vp9_wb_write_bit(wb, seg->enabled);
753 vp9_wb_write_bit(wb, seg->update_map);
754 if (seg->update_map) {
755 // Select the coding strategy (temporal or spatial)
756 vp9_choose_segmap_coding_method(cpi);
757 // Write out probabilities used to decode unpredicted macro-block segments
758 for (i = 0; i < SEG_TREE_PROBS; i++) {
759 const int prob = seg->tree_probs[i];
760 const int update = prob != MAX_PROB;
761 vp9_wb_write_bit(wb, update);
763 vp9_wb_write_literal(wb, prob, 8);
766 // Write out the chosen coding method.
767 vp9_wb_write_bit(wb, seg->temporal_update);
768 if (seg->temporal_update) {
769 for (i = 0; i < PREDICTION_PROBS; i++) {
770 const int prob = seg->pred_probs[i];
771 const int update = prob != MAX_PROB;
772 vp9_wb_write_bit(wb, update);
774 vp9_wb_write_literal(wb, prob, 8);
780 vp9_wb_write_bit(wb, seg->update_data);
781 if (seg->update_data) {
782 vp9_wb_write_bit(wb, seg->abs_delta);
784 for (i = 0; i < MAX_SEGMENTS; i++) {
785 for (j = 0; j < SEG_LVL_MAX; j++) {
786 const int active = vp9_segfeature_active(seg, i, j);
787 vp9_wb_write_bit(wb, active);
789 const int data = vp9_get_segdata(seg, i, j);
790 const int data_max = vp9_seg_feature_data_max(j);
792 if (vp9_is_segfeature_signed(j)) {
793 encode_unsigned_max(wb, abs(data), data_max);
794 vp9_wb_write_bit(wb, data < 0);
796 encode_unsigned_max(wb, data, data_max);
804 static void encode_txfm_probs(VP9_COMMON *cm, vp9_writer *w) {
806 vp9_write_literal(w, MIN(cm->tx_mode, ALLOW_32X32), 2);
807 if (cm->tx_mode >= ALLOW_32X32)
808 vp9_write_bit(w, cm->tx_mode == TX_MODE_SELECT);
811 if (cm->tx_mode == TX_MODE_SELECT) {
813 unsigned int ct_8x8p[TX_SIZES - 3][2];
814 unsigned int ct_16x16p[TX_SIZES - 2][2];
815 unsigned int ct_32x32p[TX_SIZES - 1][2];
818 for (i = 0; i < TX_SIZE_CONTEXTS; i++) {
819 tx_counts_to_branch_counts_8x8(cm->counts.tx.p8x8[i], ct_8x8p);
820 for (j = 0; j < TX_SIZES - 3; j++)
821 vp9_cond_prob_diff_update(w, &cm->fc.tx_probs.p8x8[i][j], ct_8x8p[j]);
824 for (i = 0; i < TX_SIZE_CONTEXTS; i++) {
825 tx_counts_to_branch_counts_16x16(cm->counts.tx.p16x16[i], ct_16x16p);
826 for (j = 0; j < TX_SIZES - 2; j++)
827 vp9_cond_prob_diff_update(w, &cm->fc.tx_probs.p16x16[i][j],
831 for (i = 0; i < TX_SIZE_CONTEXTS; i++) {
832 tx_counts_to_branch_counts_32x32(cm->counts.tx.p32x32[i], ct_32x32p);
833 for (j = 0; j < TX_SIZES - 1; j++)
834 vp9_cond_prob_diff_update(w, &cm->fc.tx_probs.p32x32[i][j],
840 static void write_interp_filter(INTERP_FILTER filter,
841 struct vp9_write_bit_buffer *wb) {
842 const int filter_to_literal[] = { 1, 0, 2, 3 };
844 vp9_wb_write_bit(wb, filter == SWITCHABLE);
845 if (filter != SWITCHABLE)
846 vp9_wb_write_literal(wb, filter_to_literal[filter], 2);
849 static void fix_interp_filter(VP9_COMMON *cm) {
850 if (cm->interp_filter == SWITCHABLE) {
851 // Check to see if only one of the filters is actually used
852 int count[SWITCHABLE_FILTERS];
854 for (i = 0; i < SWITCHABLE_FILTERS; ++i) {
856 for (j = 0; j < SWITCHABLE_FILTER_CONTEXTS; ++j)
857 count[i] += cm->counts.switchable_interp[j][i];
861 // Only one filter is used. So set the filter at frame level
862 for (i = 0; i < SWITCHABLE_FILTERS; ++i) {
864 cm->interp_filter = i;
872 static void write_tile_info(VP9_COMMON *cm, struct vp9_write_bit_buffer *wb) {
873 int min_log2_tile_cols, max_log2_tile_cols, ones;
874 vp9_get_tile_n_bits(cm->mi_cols, &min_log2_tile_cols, &max_log2_tile_cols);
877 ones = cm->log2_tile_cols - min_log2_tile_cols;
879 vp9_wb_write_bit(wb, 1);
881 if (cm->log2_tile_cols < max_log2_tile_cols)
882 vp9_wb_write_bit(wb, 0);
885 vp9_wb_write_bit(wb, cm->log2_tile_rows != 0);
886 if (cm->log2_tile_rows != 0)
887 vp9_wb_write_bit(wb, cm->log2_tile_rows != 1);
890 static int get_refresh_mask(VP9_COMP *cpi) {
891 if (!cpi->multi_arf_allowed && cpi->refresh_golden_frame &&
892 cpi->rc.is_src_frame_alt_ref && !cpi->use_svc) {
893 // Preserve the previously existing golden frame and update the frame in
894 // the alt ref slot instead. This is highly specific to the use of
895 // alt-ref as a forward reference, and this needs to be generalized as
896 // other uses are implemented (like RTC/temporal scaling)
898 // gld_fb_idx and alt_fb_idx need to be swapped for future frames, but
899 // that happens in vp9_encoder.c:update_reference_frames() so that it can
900 // be done outside of the recode loop.
901 return (cpi->refresh_last_frame << cpi->lst_fb_idx) |
902 (cpi->refresh_golden_frame << cpi->alt_fb_idx);
904 int arf_idx = cpi->alt_fb_idx;
905 if ((cpi->pass == 2) && cpi->multi_arf_allowed) {
906 const GF_GROUP *const gf_group = &cpi->twopass.gf_group;
907 arf_idx = gf_group->arf_update_idx[gf_group->index];
909 return (cpi->refresh_last_frame << cpi->lst_fb_idx) |
910 (cpi->refresh_golden_frame << cpi->gld_fb_idx) |
911 (cpi->refresh_alt_ref_frame << arf_idx);
915 static size_t encode_tiles(VP9_COMP *cpi, uint8_t *data_ptr) {
916 VP9_COMMON *const cm = &cpi->common;
917 vp9_writer residual_bc;
919 int tile_row, tile_col;
920 TOKENEXTRA *tok[4][1 << 6], *tok_end;
921 size_t total_size = 0;
922 const int tile_cols = 1 << cm->log2_tile_cols;
923 const int tile_rows = 1 << cm->log2_tile_rows;
925 vpx_memset(cm->above_seg_context, 0, sizeof(*cm->above_seg_context) *
926 mi_cols_aligned_to_sb(cm->mi_cols));
928 tok[0][0] = cpi->tok;
929 for (tile_row = 0; tile_row < tile_rows; tile_row++) {
931 tok[tile_row][0] = tok[tile_row - 1][tile_cols - 1] +
932 cpi->tok_count[tile_row - 1][tile_cols - 1];
934 for (tile_col = 1; tile_col < tile_cols; tile_col++)
935 tok[tile_row][tile_col] = tok[tile_row][tile_col - 1] +
936 cpi->tok_count[tile_row][tile_col - 1];
939 for (tile_row = 0; tile_row < tile_rows; tile_row++) {
940 for (tile_col = 0; tile_col < tile_cols; tile_col++) {
943 vp9_tile_init(&tile, cm, tile_row, tile_col);
944 tok_end = tok[tile_row][tile_col] + cpi->tok_count[tile_row][tile_col];
946 if (tile_col < tile_cols - 1 || tile_row < tile_rows - 1)
947 vp9_start_encode(&residual_bc, data_ptr + total_size + 4);
949 vp9_start_encode(&residual_bc, data_ptr + total_size);
951 write_modes(cpi, &tile, &residual_bc, &tok[tile_row][tile_col], tok_end);
952 assert(tok[tile_row][tile_col] == tok_end);
953 vp9_stop_encode(&residual_bc);
954 if (tile_col < tile_cols - 1 || tile_row < tile_rows - 1) {
956 mem_put_be32(data_ptr + total_size, residual_bc.pos);
960 total_size += residual_bc.pos;
967 static void write_display_size(const VP9_COMMON *cm,
968 struct vp9_write_bit_buffer *wb) {
969 const int scaling_active = cm->width != cm->display_width ||
970 cm->height != cm->display_height;
971 vp9_wb_write_bit(wb, scaling_active);
972 if (scaling_active) {
973 vp9_wb_write_literal(wb, cm->display_width - 1, 16);
974 vp9_wb_write_literal(wb, cm->display_height - 1, 16);
978 static void write_frame_size(const VP9_COMMON *cm,
979 struct vp9_write_bit_buffer *wb) {
980 vp9_wb_write_literal(wb, cm->width - 1, 16);
981 vp9_wb_write_literal(wb, cm->height - 1, 16);
983 write_display_size(cm, wb);
986 static void write_frame_size_with_refs(VP9_COMP *cpi,
987 struct vp9_write_bit_buffer *wb) {
988 VP9_COMMON *const cm = &cpi->common;
991 MV_REFERENCE_FRAME ref_frame;
992 for (ref_frame = LAST_FRAME; ref_frame <= ALTREF_FRAME; ++ref_frame) {
993 YV12_BUFFER_CONFIG *cfg = get_ref_frame_buffer(cpi, ref_frame);
994 found = cm->width == cfg->y_crop_width &&
995 cm->height == cfg->y_crop_height;
997 // Set "found" to 0 for temporal svc and for spatial svc key frame
999 (cpi->svc.number_spatial_layers == 1 ||
1000 cpi->svc.layer_context[cpi->svc.spatial_layer_id].is_key_frame)) {
1003 vp9_wb_write_bit(wb, found);
1010 vp9_wb_write_literal(wb, cm->width - 1, 16);
1011 vp9_wb_write_literal(wb, cm->height - 1, 16);
1014 write_display_size(cm, wb);
1017 static void write_sync_code(struct vp9_write_bit_buffer *wb) {
1018 vp9_wb_write_literal(wb, VP9_SYNC_CODE_0, 8);
1019 vp9_wb_write_literal(wb, VP9_SYNC_CODE_1, 8);
1020 vp9_wb_write_literal(wb, VP9_SYNC_CODE_2, 8);
1023 static void write_profile(BITSTREAM_PROFILE profile,
1024 struct vp9_write_bit_buffer *wb) {
1027 vp9_wb_write_literal(wb, 0, 2);
1030 vp9_wb_write_literal(wb, 2, 2);
1033 vp9_wb_write_literal(wb, 1, 2);
1036 vp9_wb_write_literal(wb, 6, 3);
1043 static void write_uncompressed_header(VP9_COMP *cpi,
1044 struct vp9_write_bit_buffer *wb) {
1045 VP9_COMMON *const cm = &cpi->common;
1047 vp9_wb_write_literal(wb, VP9_FRAME_MARKER, 2);
1049 write_profile(cm->profile, wb);
1051 vp9_wb_write_bit(wb, 0); // show_existing_frame
1052 vp9_wb_write_bit(wb, cm->frame_type);
1053 vp9_wb_write_bit(wb, cm->show_frame);
1054 vp9_wb_write_bit(wb, cm->error_resilient_mode);
1056 if (cm->frame_type == KEY_FRAME) {
1057 const COLOR_SPACE cs = UNKNOWN;
1058 write_sync_code(wb);
1059 if (cm->profile > PROFILE_1) {
1060 assert(cm->bit_depth > BITS_8);
1061 vp9_wb_write_bit(wb, cm->bit_depth - BITS_10);
1063 vp9_wb_write_literal(wb, cs, 3);
1065 vp9_wb_write_bit(wb, 0); // 0: [16, 235] (i.e. xvYCC), 1: [0, 255]
1066 if (cm->profile == PROFILE_1 || cm->profile == PROFILE_3) {
1067 vp9_wb_write_bit(wb, cm->subsampling_x);
1068 vp9_wb_write_bit(wb, cm->subsampling_y);
1069 vp9_wb_write_bit(wb, 0); // unused
1072 assert(cm->profile == PROFILE_1 || cm->profile == PROFILE_3);
1073 vp9_wb_write_bit(wb, 0); // unused
1076 write_frame_size(cm, wb);
1078 if (!cm->show_frame)
1079 vp9_wb_write_bit(wb, cm->intra_only);
1081 if (!cm->error_resilient_mode)
1082 vp9_wb_write_literal(wb, cm->reset_frame_context, 2);
1084 if (cm->intra_only) {
1085 write_sync_code(wb);
1087 vp9_wb_write_literal(wb, get_refresh_mask(cpi), REF_FRAMES);
1088 write_frame_size(cm, wb);
1090 MV_REFERENCE_FRAME ref_frame;
1091 vp9_wb_write_literal(wb, get_refresh_mask(cpi), REF_FRAMES);
1092 for (ref_frame = LAST_FRAME; ref_frame <= ALTREF_FRAME; ++ref_frame) {
1093 vp9_wb_write_literal(wb, get_ref_frame_idx(cpi, ref_frame),
1095 vp9_wb_write_bit(wb, cm->ref_frame_sign_bias[ref_frame]);
1098 write_frame_size_with_refs(cpi, wb);
1100 vp9_wb_write_bit(wb, cm->allow_high_precision_mv);
1102 fix_interp_filter(cm);
1103 write_interp_filter(cm->interp_filter, wb);
1107 if (!cm->error_resilient_mode) {
1108 vp9_wb_write_bit(wb, cm->refresh_frame_context);
1109 vp9_wb_write_bit(wb, cm->frame_parallel_decoding_mode);
1112 vp9_wb_write_literal(wb, cm->frame_context_idx, FRAME_CONTEXTS_LOG2);
1114 encode_loopfilter(&cm->lf, wb);
1115 encode_quantization(cm, wb);
1116 encode_segmentation(cpi, wb);
1118 write_tile_info(cm, wb);
1121 static size_t write_compressed_header(VP9_COMP *cpi, uint8_t *data) {
1122 VP9_COMMON *const cm = &cpi->common;
1123 MACROBLOCKD *const xd = &cpi->mb.e_mbd;
1124 FRAME_CONTEXT *const fc = &cm->fc;
1125 vp9_writer header_bc;
1127 vp9_start_encode(&header_bc, data);
1130 cm->tx_mode = ONLY_4X4;
1132 encode_txfm_probs(cm, &header_bc);
1134 update_coef_probs(cpi, &header_bc);
1135 update_skip_probs(cm, &header_bc);
1137 if (!frame_is_intra_only(cm)) {
1140 for (i = 0; i < INTER_MODE_CONTEXTS; ++i)
1141 prob_diff_update(vp9_inter_mode_tree, cm->fc.inter_mode_probs[i],
1142 cm->counts.inter_mode[i], INTER_MODES, &header_bc);
1144 vp9_zero(cm->counts.inter_mode);
1146 if (cm->interp_filter == SWITCHABLE)
1147 update_switchable_interp_probs(cm, &header_bc);
1149 for (i = 0; i < INTRA_INTER_CONTEXTS; i++)
1150 vp9_cond_prob_diff_update(&header_bc, &fc->intra_inter_prob[i],
1151 cm->counts.intra_inter[i]);
1153 if (cm->allow_comp_inter_inter) {
1154 const int use_compound_pred = cm->reference_mode != SINGLE_REFERENCE;
1155 const int use_hybrid_pred = cm->reference_mode == REFERENCE_MODE_SELECT;
1157 vp9_write_bit(&header_bc, use_compound_pred);
1158 if (use_compound_pred) {
1159 vp9_write_bit(&header_bc, use_hybrid_pred);
1160 if (use_hybrid_pred)
1161 for (i = 0; i < COMP_INTER_CONTEXTS; i++)
1162 vp9_cond_prob_diff_update(&header_bc, &fc->comp_inter_prob[i],
1163 cm->counts.comp_inter[i]);
1167 if (cm->reference_mode != COMPOUND_REFERENCE) {
1168 for (i = 0; i < REF_CONTEXTS; i++) {
1169 vp9_cond_prob_diff_update(&header_bc, &fc->single_ref_prob[i][0],
1170 cm->counts.single_ref[i][0]);
1171 vp9_cond_prob_diff_update(&header_bc, &fc->single_ref_prob[i][1],
1172 cm->counts.single_ref[i][1]);
1176 if (cm->reference_mode != SINGLE_REFERENCE)
1177 for (i = 0; i < REF_CONTEXTS; i++)
1178 vp9_cond_prob_diff_update(&header_bc, &fc->comp_ref_prob[i],
1179 cm->counts.comp_ref[i]);
1181 for (i = 0; i < BLOCK_SIZE_GROUPS; ++i)
1182 prob_diff_update(vp9_intra_mode_tree, cm->fc.y_mode_prob[i],
1183 cm->counts.y_mode[i], INTRA_MODES, &header_bc);
1185 for (i = 0; i < PARTITION_CONTEXTS; ++i)
1186 prob_diff_update(vp9_partition_tree, fc->partition_prob[i],
1187 cm->counts.partition[i], PARTITION_TYPES, &header_bc);
1189 vp9_write_nmv_probs(cm, cm->allow_high_precision_mv, &header_bc);
1192 vp9_stop_encode(&header_bc);
1193 assert(header_bc.pos <= 0xffff);
1195 return header_bc.pos;
1198 void vp9_pack_bitstream(VP9_COMP *cpi, uint8_t *dest, size_t *size) {
1199 uint8_t *data = dest;
1200 size_t first_part_size, uncompressed_hdr_size;
1201 struct vp9_write_bit_buffer wb = {data, 0};
1202 struct vp9_write_bit_buffer saved_wb;
1204 write_uncompressed_header(cpi, &wb);
1206 vp9_wb_write_literal(&wb, 0, 16); // don't know in advance first part. size
1208 uncompressed_hdr_size = vp9_rb_bytes_written(&wb);
1209 data += uncompressed_hdr_size;
1211 vp9_compute_update_table();
1213 vp9_clear_system_state();
1215 first_part_size = write_compressed_header(cpi, data);
1216 data += first_part_size;
1217 // TODO(jbb): Figure out what to do if first_part_size > 16 bits.
1218 vp9_wb_write_literal(&saved_wb, (int)first_part_size, 16);
1220 data += encode_tiles(cpi, data);
1222 *size = data - dest;