2 * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
12 #include <stdlib.h> // qsort()
14 #include "./vp9_rtcd.h"
15 #include "./vpx_scale_rtcd.h"
17 #include "vpx_mem/vpx_mem.h"
18 #include "vpx_scale/vpx_scale.h"
20 #include "vp9/common/vp9_alloccommon.h"
21 #include "vp9/common/vp9_common.h"
22 #include "vp9/common/vp9_entropy.h"
23 #include "vp9/common/vp9_entropymode.h"
24 #include "vp9/common/vp9_idct.h"
25 #include "vp9/common/vp9_pred_common.h"
26 #include "vp9/common/vp9_quant_common.h"
27 #include "vp9/common/vp9_reconintra.h"
28 #include "vp9/common/vp9_reconinter.h"
29 #include "vp9/common/vp9_seg_common.h"
30 #include "vp9/common/vp9_tile_common.h"
32 #include "vp9/decoder/vp9_decodeframe.h"
33 #include "vp9/decoder/vp9_detokenize.h"
34 #include "vp9/decoder/vp9_decodemv.h"
35 #include "vp9/decoder/vp9_dsubexp.h"
36 #include "vp9/decoder/vp9_dthread.h"
37 #include "vp9/decoder/vp9_onyxd_int.h"
38 #include "vp9/decoder/vp9_read_bit_buffer.h"
39 #include "vp9/decoder/vp9_reader.h"
40 #include "vp9/decoder/vp9_thread.h"
42 static int read_be32(const uint8_t *p) {
43 return (p[0] << 24) | (p[1] << 16) | (p[2] << 8) | p[3];
46 static int is_compound_reference_allowed(const VP9_COMMON *cm) {
48 for (i = 1; i < REFS_PER_FRAME; ++i)
49 if (cm->ref_frame_sign_bias[i + 1] != cm->ref_frame_sign_bias[1])
55 static void setup_compound_reference(VP9_COMMON *cm) {
56 if (cm->ref_frame_sign_bias[LAST_FRAME] ==
57 cm->ref_frame_sign_bias[GOLDEN_FRAME]) {
58 cm->comp_fixed_ref = ALTREF_FRAME;
59 cm->comp_var_ref[0] = LAST_FRAME;
60 cm->comp_var_ref[1] = GOLDEN_FRAME;
61 } else if (cm->ref_frame_sign_bias[LAST_FRAME] ==
62 cm->ref_frame_sign_bias[ALTREF_FRAME]) {
63 cm->comp_fixed_ref = GOLDEN_FRAME;
64 cm->comp_var_ref[0] = LAST_FRAME;
65 cm->comp_var_ref[1] = ALTREF_FRAME;
67 cm->comp_fixed_ref = LAST_FRAME;
68 cm->comp_var_ref[0] = GOLDEN_FRAME;
69 cm->comp_var_ref[1] = ALTREF_FRAME;
73 static int read_is_valid(const uint8_t *start, size_t len, const uint8_t *end) {
74 return len != 0 && len <= (size_t)(end - start);
77 static int decode_unsigned_max(struct vp9_read_bit_buffer *rb, int max) {
78 const int data = vp9_rb_read_literal(rb, get_unsigned_bits(max));
79 return data > max ? max : data;
82 static TX_MODE read_tx_mode(vp9_reader *r) {
83 TX_MODE tx_mode = vp9_read_literal(r, 2);
84 if (tx_mode == ALLOW_32X32)
85 tx_mode += vp9_read_bit(r);
89 static void read_tx_mode_probs(struct tx_probs *tx_probs, vp9_reader *r) {
92 for (i = 0; i < TX_SIZE_CONTEXTS; ++i)
93 for (j = 0; j < TX_SIZES - 3; ++j)
94 vp9_diff_update_prob(r, &tx_probs->p8x8[i][j]);
96 for (i = 0; i < TX_SIZE_CONTEXTS; ++i)
97 for (j = 0; j < TX_SIZES - 2; ++j)
98 vp9_diff_update_prob(r, &tx_probs->p16x16[i][j]);
100 for (i = 0; i < TX_SIZE_CONTEXTS; ++i)
101 for (j = 0; j < TX_SIZES - 1; ++j)
102 vp9_diff_update_prob(r, &tx_probs->p32x32[i][j]);
105 static void read_switchable_interp_probs(FRAME_CONTEXT *fc, vp9_reader *r) {
107 for (j = 0; j < SWITCHABLE_FILTER_CONTEXTS; ++j)
108 for (i = 0; i < SWITCHABLE_FILTERS - 1; ++i)
109 vp9_diff_update_prob(r, &fc->switchable_interp_prob[j][i]);
112 static void read_inter_mode_probs(FRAME_CONTEXT *fc, vp9_reader *r) {
114 for (i = 0; i < INTER_MODE_CONTEXTS; ++i)
115 for (j = 0; j < INTER_MODES - 1; ++j)
116 vp9_diff_update_prob(r, &fc->inter_mode_probs[i][j]);
119 static REFERENCE_MODE read_reference_mode(VP9_COMMON *cm, vp9_reader *r) {
120 if (is_compound_reference_allowed(cm)) {
121 REFERENCE_MODE mode = vp9_read_bit(r);
123 mode += vp9_read_bit(r);
124 setup_compound_reference(cm);
127 return SINGLE_REFERENCE;
131 static void read_reference_mode_probs(VP9_COMMON *cm, vp9_reader *r) {
133 if (cm->reference_mode == REFERENCE_MODE_SELECT)
134 for (i = 0; i < COMP_INTER_CONTEXTS; i++)
135 vp9_diff_update_prob(r, &cm->fc.comp_inter_prob[i]);
137 if (cm->reference_mode != COMPOUND_REFERENCE)
138 for (i = 0; i < REF_CONTEXTS; i++) {
139 vp9_diff_update_prob(r, &cm->fc.single_ref_prob[i][0]);
140 vp9_diff_update_prob(r, &cm->fc.single_ref_prob[i][1]);
143 if (cm->reference_mode != SINGLE_REFERENCE)
144 for (i = 0; i < REF_CONTEXTS; i++)
145 vp9_diff_update_prob(r, &cm->fc.comp_ref_prob[i]);
148 static void update_mv_probs(vp9_prob *p, int n, vp9_reader *r) {
150 for (i = 0; i < n; ++i)
151 if (vp9_read(r, NMV_UPDATE_PROB))
152 p[i] = (vp9_read_literal(r, 7) << 1) | 1;
155 static void read_mv_probs(nmv_context *ctx, int allow_hp, vp9_reader *r) {
158 update_mv_probs(ctx->joints, MV_JOINTS - 1, r);
160 for (i = 0; i < 2; ++i) {
161 nmv_component *const comp_ctx = &ctx->comps[i];
162 update_mv_probs(&comp_ctx->sign, 1, r);
163 update_mv_probs(comp_ctx->classes, MV_CLASSES - 1, r);
164 update_mv_probs(comp_ctx->class0, CLASS0_SIZE - 1, r);
165 update_mv_probs(comp_ctx->bits, MV_OFFSET_BITS, r);
168 for (i = 0; i < 2; ++i) {
169 nmv_component *const comp_ctx = &ctx->comps[i];
170 for (j = 0; j < CLASS0_SIZE; ++j)
171 update_mv_probs(comp_ctx->class0_fp[j], MV_FP_SIZE - 1, r);
172 update_mv_probs(comp_ctx->fp, 3, r);
176 for (i = 0; i < 2; ++i) {
177 nmv_component *const comp_ctx = &ctx->comps[i];
178 update_mv_probs(&comp_ctx->class0_hp, 1, r);
179 update_mv_probs(&comp_ctx->hp, 1, r);
184 static void setup_plane_dequants(VP9_COMMON *cm, MACROBLOCKD *xd, int q_index) {
186 xd->plane[0].dequant = cm->y_dequant[q_index];
188 for (i = 1; i < MAX_MB_PLANE; i++)
189 xd->plane[i].dequant = cm->uv_dequant[q_index];
192 // Allocate storage for each tile column.
193 // TODO(jzern): when max_threads <= 1 the same storage could be used for each
195 static void alloc_tile_storage(VP9D_COMP *pbi, int tile_rows, int tile_cols) {
196 VP9_COMMON *const cm = &pbi->common;
197 const int aligned_mi_cols = mi_cols_aligned_to_sb(cm->mi_cols);
198 int i, tile_row, tile_col;
200 CHECK_MEM_ERROR(cm, pbi->mi_streams,
201 vpx_realloc(pbi->mi_streams, tile_rows * tile_cols *
202 sizeof(*pbi->mi_streams)));
203 for (tile_row = 0; tile_row < tile_rows; ++tile_row) {
204 for (tile_col = 0; tile_col < tile_cols; ++tile_col) {
206 vp9_tile_init(&tile, cm, tile_row, tile_col);
207 pbi->mi_streams[tile_row * tile_cols + tile_col] =
208 &cm->mi[tile.mi_row_start * cm->mode_info_stride
209 + tile.mi_col_start];
213 // 2 contexts per 'mi unit', so that we have one context per 4x4 txfm
214 // block where mi unit size is 8x8.
215 CHECK_MEM_ERROR(cm, pbi->above_context[0],
216 vpx_realloc(pbi->above_context[0],
217 sizeof(*pbi->above_context[0]) * MAX_MB_PLANE *
218 2 * aligned_mi_cols));
219 for (i = 1; i < MAX_MB_PLANE; ++i) {
220 pbi->above_context[i] = pbi->above_context[0] +
221 i * sizeof(*pbi->above_context[0]) *
225 // This is sized based on the entire frame. Each tile operates within its
227 CHECK_MEM_ERROR(cm, pbi->above_seg_context,
228 vpx_realloc(pbi->above_seg_context,
229 sizeof(*pbi->above_seg_context) *
233 static void inverse_transform_block(MACROBLOCKD* xd, int plane, int block,
234 TX_SIZE tx_size, uint8_t *dst, int stride,
236 struct macroblockd_plane *const pd = &xd->plane[plane];
239 const int plane_type = pd->plane_type;
240 int16_t *const dqcoeff = BLOCK_OFFSET(pd->dqcoeff, block);
243 tx_type = get_tx_type_4x4(plane_type, xd, block);
244 if (tx_type == DCT_DCT)
245 xd->itxm_add(dqcoeff, dst, stride, eob);
247 vp9_iht4x4_16_add(dqcoeff, dst, stride, tx_type);
250 tx_type = get_tx_type_8x8(plane_type, xd);
251 vp9_iht8x8_add(tx_type, dqcoeff, dst, stride, eob);
254 tx_type = get_tx_type_16x16(plane_type, xd);
255 vp9_iht16x16_add(tx_type, dqcoeff, dst, stride, eob);
259 vp9_idct32x32_add(dqcoeff, dst, stride, eob);
262 assert(0 && "Invalid transform size");
266 vpx_memset(dqcoeff, 0, 2 * sizeof(dqcoeff[0]));
268 if (tx_type == DCT_DCT && tx_size <= TX_16X16 && eob <= 10)
269 vpx_memset(dqcoeff, 0, 4 * (4 << tx_size) * sizeof(dqcoeff[0]));
270 else if (tx_size == TX_32X32 && eob <= 34)
271 vpx_memset(dqcoeff, 0, 256 * sizeof(dqcoeff[0]));
273 vpx_memset(dqcoeff, 0, (16 << (tx_size << 1)) * sizeof(dqcoeff[0]));
284 static void predict_and_reconstruct_intra_block(int plane, int block,
285 BLOCK_SIZE plane_bsize,
286 TX_SIZE tx_size, void *arg) {
287 struct intra_args *const args = arg;
288 VP9_COMMON *const cm = args->cm;
289 MACROBLOCKD *const xd = args->xd;
290 struct macroblockd_plane *const pd = &xd->plane[plane];
291 MODE_INFO *const mi = xd->mi_8x8[0];
292 const MB_PREDICTION_MODE mode = (plane == 0)
293 ? ((mi->mbmi.sb_type < BLOCK_8X8) ? mi->bmi[block].as_mode
298 txfrm_block_to_raster_xy(plane_bsize, tx_size, block, &x, &y);
299 dst = &pd->dst.buf[4 * y * pd->dst.stride + 4 * x];
301 vp9_predict_intra_block(xd, block >> (tx_size << 1),
302 b_width_log2(plane_bsize), tx_size, mode,
303 dst, pd->dst.stride, dst, pd->dst.stride,
306 if (!mi->mbmi.skip) {
307 const int eob = vp9_decode_block_tokens(cm, xd, plane, block,
308 plane_bsize, x, y, tx_size,
310 inverse_transform_block(xd, plane, block, tx_size, dst, pd->dst.stride,
322 static void reconstruct_inter_block(int plane, int block,
323 BLOCK_SIZE plane_bsize,
324 TX_SIZE tx_size, void *arg) {
325 struct inter_args *args = arg;
326 VP9_COMMON *const cm = args->cm;
327 MACROBLOCKD *const xd = args->xd;
328 struct macroblockd_plane *const pd = &xd->plane[plane];
330 txfrm_block_to_raster_xy(plane_bsize, tx_size, block, &x, &y);
331 eob = vp9_decode_block_tokens(cm, xd, plane, block, plane_bsize, x, y,
333 inverse_transform_block(xd, plane, block, tx_size,
334 &pd->dst.buf[4 * y * pd->dst.stride + 4 * x],
335 pd->dst.stride, eob);
336 *args->eobtotal += eob;
339 static void set_offsets(VP9_COMMON *const cm, MACROBLOCKD *const xd,
340 const TileInfo *const tile,
341 BLOCK_SIZE bsize, int mi_row, int mi_col) {
342 const int bw = num_8x8_blocks_wide_lookup[bsize];
343 const int bh = num_8x8_blocks_high_lookup[bsize];
344 const int x_mis = MIN(bw, cm->mi_cols - mi_col);
345 const int y_mis = MIN(bh, cm->mi_rows - mi_row);
346 const int offset = mi_row * cm->mode_info_stride + mi_col;
347 const int tile_offset = tile->mi_row_start * cm->mode_info_stride +
351 xd->mi_8x8 = cm->mi_grid_visible + offset;
352 xd->prev_mi_8x8 = cm->prev_mi_grid_visible + offset;
353 // Special case: if prev_mi is NULL, the previous mode info context
355 xd->last_mi = cm->prev_mi ? xd->prev_mi_8x8[0] : NULL;
357 xd->mi_8x8[0] = xd->mi_stream + offset - tile_offset;
358 xd->mi_8x8[0]->mbmi.sb_type = bsize;
359 for (y = 0; y < y_mis; ++y)
360 for (x = !y; x < x_mis; ++x)
361 xd->mi_8x8[y * cm->mode_info_stride + x] = xd->mi_8x8[0];
363 set_skip_context(xd, xd->above_context, xd->left_context, mi_row, mi_col);
365 // Distance of Mb to the various image edges. These are specified to 8th pel
366 // as they are always compared to values that are in 1/8th pel units
367 set_mi_row_col(xd, tile, mi_row, bh, mi_col, bw, cm->mi_rows, cm->mi_cols);
369 setup_dst_planes(xd, get_frame_new_buffer(cm), mi_row, mi_col);
372 static void set_ref(VP9_COMMON *const cm, MACROBLOCKD *const xd,
373 int idx, int mi_row, int mi_col) {
374 MB_MODE_INFO *const mbmi = &xd->mi_8x8[0]->mbmi;
375 RefBuffer *ref_buffer = &cm->frame_refs[mbmi->ref_frame[idx] - LAST_FRAME];
376 xd->block_refs[idx] = ref_buffer;
377 if (!vp9_is_valid_scale(&ref_buffer->sf))
378 vpx_internal_error(&cm->error, VPX_CODEC_UNSUP_BITSTREAM,
379 "Invalid scale factors");
380 setup_pre_planes(xd, idx, ref_buffer->buf, mi_row, mi_col, &ref_buffer->sf);
381 xd->corrupted |= ref_buffer->buf->corrupted;
384 static void decode_modes_b(VP9_COMMON *const cm, MACROBLOCKD *const xd,
385 const TileInfo *const tile,
386 int mi_row, int mi_col,
387 vp9_reader *r, BLOCK_SIZE bsize) {
388 const int less8x8 = bsize < BLOCK_8X8;
391 set_offsets(cm, xd, tile, bsize, mi_row, mi_col);
392 vp9_read_mode_info(cm, xd, tile, mi_row, mi_col, r);
397 // Has to be called after set_offsets
398 mbmi = &xd->mi_8x8[0]->mbmi;
401 reset_skip_context(xd, bsize);
404 setup_plane_dequants(cm, xd, vp9_get_qindex(&cm->seg, mbmi->segment_id,
408 if (!is_inter_block(mbmi)) {
409 struct intra_args arg = { cm, xd, r };
410 vp9_foreach_transformed_block(xd, bsize,
411 predict_and_reconstruct_intra_block, &arg);
414 set_ref(cm, xd, 0, mi_row, mi_col);
415 if (has_second_ref(mbmi))
416 set_ref(cm, xd, 1, mi_row, mi_col);
418 xd->interp_kernel = vp9_get_interp_kernel(mbmi->interp_filter);
421 vp9_dec_build_inter_predictors_sb(xd, mi_row, mi_col, bsize);
426 struct inter_args arg = { cm, xd, r, &eobtotal };
427 vp9_foreach_transformed_block(xd, bsize, reconstruct_inter_block, &arg);
428 if (!less8x8 && eobtotal == 0)
429 mbmi->skip = 1; // skip loopfilter
433 xd->corrupted |= vp9_reader_has_error(r);
436 static PARTITION_TYPE read_partition(VP9_COMMON *cm, MACROBLOCKD *xd, int hbs,
437 int mi_row, int mi_col, BLOCK_SIZE bsize,
439 const int ctx = partition_plane_context(xd->above_seg_context,
440 xd->left_seg_context,
441 mi_row, mi_col, bsize);
442 const vp9_prob *const probs = get_partition_probs(cm, ctx);
443 const int has_rows = (mi_row + hbs) < cm->mi_rows;
444 const int has_cols = (mi_col + hbs) < cm->mi_cols;
447 if (has_rows && has_cols)
448 p = vp9_read_tree(r, vp9_partition_tree, probs);
449 else if (!has_rows && has_cols)
450 p = vp9_read(r, probs[1]) ? PARTITION_SPLIT : PARTITION_HORZ;
451 else if (has_rows && !has_cols)
452 p = vp9_read(r, probs[2]) ? PARTITION_SPLIT : PARTITION_VERT;
456 if (!cm->frame_parallel_decoding_mode)
457 ++cm->counts.partition[ctx][p];
462 static void decode_modes_sb(VP9_COMMON *const cm, MACROBLOCKD *const xd,
463 const TileInfo *const tile,
464 int mi_row, int mi_col,
465 vp9_reader* r, BLOCK_SIZE bsize) {
466 const int hbs = num_8x8_blocks_wide_lookup[bsize] / 2;
467 PARTITION_TYPE partition;
470 if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols)
473 partition = read_partition(cm, xd, hbs, mi_row, mi_col, bsize, r);
474 subsize = get_subsize(bsize, partition);
475 if (subsize < BLOCK_8X8) {
476 decode_modes_b(cm, xd, tile, mi_row, mi_col, r, subsize);
480 decode_modes_b(cm, xd, tile, mi_row, mi_col, r, subsize);
483 decode_modes_b(cm, xd, tile, mi_row, mi_col, r, subsize);
484 if (mi_row + hbs < cm->mi_rows)
485 decode_modes_b(cm, xd, tile, mi_row + hbs, mi_col, r, subsize);
488 decode_modes_b(cm, xd, tile, mi_row, mi_col, r, subsize);
489 if (mi_col + hbs < cm->mi_cols)
490 decode_modes_b(cm, xd, tile, mi_row, mi_col + hbs, r, subsize);
492 case PARTITION_SPLIT:
493 decode_modes_sb(cm, xd, tile, mi_row, mi_col, r, subsize);
494 decode_modes_sb(cm, xd, tile, mi_row, mi_col + hbs, r, subsize);
495 decode_modes_sb(cm, xd, tile, mi_row + hbs, mi_col, r, subsize);
496 decode_modes_sb(cm, xd, tile, mi_row + hbs, mi_col + hbs, r, subsize);
499 assert(0 && "Invalid partition type");
503 // update partition context
504 if (bsize >= BLOCK_8X8 &&
505 (bsize == BLOCK_8X8 || partition != PARTITION_SPLIT))
506 update_partition_context(xd->above_seg_context, xd->left_seg_context,
507 mi_row, mi_col, subsize, bsize);
510 static void setup_token_decoder(const uint8_t *data,
511 const uint8_t *data_end,
513 struct vpx_internal_error_info *error_info,
515 // Validate the calculated partition length. If the buffer
516 // described by the partition can't be fully read, then restrict
517 // it to the portion that can be (for EC mode) or throw an error.
518 if (!read_is_valid(data, read_size, data_end))
519 vpx_internal_error(error_info, VPX_CODEC_CORRUPT_FRAME,
520 "Truncated packet or corrupt tile length");
522 if (vp9_reader_init(r, data, read_size))
523 vpx_internal_error(error_info, VPX_CODEC_MEM_ERROR,
524 "Failed to allocate bool decoder %d", 1);
527 static void read_coef_probs_common(vp9_coeff_probs_model *coef_probs,
532 for (i = 0; i < PLANE_TYPES; ++i)
533 for (j = 0; j < REF_TYPES; ++j)
534 for (k = 0; k < COEF_BANDS; ++k)
535 for (l = 0; l < BAND_COEFF_CONTEXTS(k); ++l)
536 for (m = 0; m < UNCONSTRAINED_NODES; ++m)
537 vp9_diff_update_prob(r, &coef_probs[i][j][k][l][m]);
540 static void read_coef_probs(FRAME_CONTEXT *fc, TX_MODE tx_mode,
542 const TX_SIZE max_tx_size = tx_mode_to_biggest_tx_size[tx_mode];
544 for (tx_size = TX_4X4; tx_size <= max_tx_size; ++tx_size)
545 read_coef_probs_common(fc->coef_probs[tx_size], r);
548 static void setup_segmentation(struct segmentation *seg,
549 struct vp9_read_bit_buffer *rb) {
553 seg->update_data = 0;
555 seg->enabled = vp9_rb_read_bit(rb);
559 // Segmentation map update
560 seg->update_map = vp9_rb_read_bit(rb);
561 if (seg->update_map) {
562 for (i = 0; i < SEG_TREE_PROBS; i++)
563 seg->tree_probs[i] = vp9_rb_read_bit(rb) ? vp9_rb_read_literal(rb, 8)
566 seg->temporal_update = vp9_rb_read_bit(rb);
567 if (seg->temporal_update) {
568 for (i = 0; i < PREDICTION_PROBS; i++)
569 seg->pred_probs[i] = vp9_rb_read_bit(rb) ? vp9_rb_read_literal(rb, 8)
572 for (i = 0; i < PREDICTION_PROBS; i++)
573 seg->pred_probs[i] = MAX_PROB;
577 // Segmentation data update
578 seg->update_data = vp9_rb_read_bit(rb);
579 if (seg->update_data) {
580 seg->abs_delta = vp9_rb_read_bit(rb);
582 vp9_clearall_segfeatures(seg);
584 for (i = 0; i < MAX_SEGMENTS; i++) {
585 for (j = 0; j < SEG_LVL_MAX; j++) {
587 const int feature_enabled = vp9_rb_read_bit(rb);
588 if (feature_enabled) {
589 vp9_enable_segfeature(seg, i, j);
590 data = decode_unsigned_max(rb, vp9_seg_feature_data_max(j));
591 if (vp9_is_segfeature_signed(j))
592 data = vp9_rb_read_bit(rb) ? -data : data;
594 vp9_set_segdata(seg, i, j, data);
600 static void setup_loopfilter(struct loopfilter *lf,
601 struct vp9_read_bit_buffer *rb) {
602 lf->filter_level = vp9_rb_read_literal(rb, 6);
603 lf->sharpness_level = vp9_rb_read_literal(rb, 3);
605 // Read in loop filter deltas applied at the MB level based on mode or ref
607 lf->mode_ref_delta_update = 0;
609 lf->mode_ref_delta_enabled = vp9_rb_read_bit(rb);
610 if (lf->mode_ref_delta_enabled) {
611 lf->mode_ref_delta_update = vp9_rb_read_bit(rb);
612 if (lf->mode_ref_delta_update) {
615 for (i = 0; i < MAX_REF_LF_DELTAS; i++)
616 if (vp9_rb_read_bit(rb))
617 lf->ref_deltas[i] = vp9_rb_read_signed_literal(rb, 6);
619 for (i = 0; i < MAX_MODE_LF_DELTAS; i++)
620 if (vp9_rb_read_bit(rb))
621 lf->mode_deltas[i] = vp9_rb_read_signed_literal(rb, 6);
626 static int read_delta_q(struct vp9_read_bit_buffer *rb, int *delta_q) {
627 const int old = *delta_q;
628 *delta_q = vp9_rb_read_bit(rb) ? vp9_rb_read_signed_literal(rb, 4) : 0;
629 return old != *delta_q;
632 static void setup_quantization(VP9_COMMON *const cm, MACROBLOCKD *const xd,
633 struct vp9_read_bit_buffer *rb) {
636 cm->base_qindex = vp9_rb_read_literal(rb, QINDEX_BITS);
637 update |= read_delta_q(rb, &cm->y_dc_delta_q);
638 update |= read_delta_q(rb, &cm->uv_dc_delta_q);
639 update |= read_delta_q(rb, &cm->uv_ac_delta_q);
641 vp9_init_dequantizer(cm);
643 xd->lossless = cm->base_qindex == 0 &&
644 cm->y_dc_delta_q == 0 &&
645 cm->uv_dc_delta_q == 0 &&
646 cm->uv_ac_delta_q == 0;
648 xd->itxm_add = xd->lossless ? vp9_iwht4x4_add : vp9_idct4x4_add;
651 static INTERP_FILTER read_interp_filter(struct vp9_read_bit_buffer *rb) {
652 const INTERP_FILTER literal_to_filter[] = { EIGHTTAP_SMOOTH,
656 return vp9_rb_read_bit(rb) ? SWITCHABLE
657 : literal_to_filter[vp9_rb_read_literal(rb, 2)];
660 static void read_frame_size(struct vp9_read_bit_buffer *rb,
661 int *width, int *height) {
662 const int w = vp9_rb_read_literal(rb, 16) + 1;
663 const int h = vp9_rb_read_literal(rb, 16) + 1;
668 static void setup_display_size(VP9_COMMON *cm, struct vp9_read_bit_buffer *rb) {
669 cm->display_width = cm->width;
670 cm->display_height = cm->height;
671 if (vp9_rb_read_bit(rb))
672 read_frame_size(rb, &cm->display_width, &cm->display_height);
675 static void apply_frame_size(VP9D_COMP *pbi, int width, int height) {
676 VP9_COMMON *cm = &pbi->common;
678 if (cm->width != width || cm->height != height) {
679 // Change in frame size.
680 // TODO(agrange) Don't test width/height, check overall size.
681 if (width > cm->width || height > cm->height) {
682 // Rescale frame buffers only if they're not big enough already.
683 if (vp9_resize_frame_buffers(cm, width, height))
684 vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR,
685 "Failed to allocate frame buffers");
691 vp9_update_frame_size(cm);
694 if (vp9_realloc_frame_buffer(
695 get_frame_new_buffer(cm), cm->width, cm->height,
696 cm->subsampling_x, cm->subsampling_y, VP9_DEC_BORDER_IN_PIXELS,
697 &cm->frame_bufs[cm->new_fb_idx].raw_frame_buffer, cm->get_fb_cb,
699 vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR,
700 "Failed to allocate frame buffer");
704 static void setup_frame_size(VP9D_COMP *pbi,
705 struct vp9_read_bit_buffer *rb) {
707 read_frame_size(rb, &width, &height);
708 apply_frame_size(pbi, width, height);
709 setup_display_size(&pbi->common, rb);
712 static void setup_frame_size_with_refs(VP9D_COMP *pbi,
713 struct vp9_read_bit_buffer *rb) {
714 VP9_COMMON *const cm = &pbi->common;
718 for (i = 0; i < REFS_PER_FRAME; ++i) {
719 if (vp9_rb_read_bit(rb)) {
720 YV12_BUFFER_CONFIG *const buf = cm->frame_refs[i].buf;
721 width = buf->y_crop_width;
722 height = buf->y_crop_height;
729 read_frame_size(rb, &width, &height);
731 if (width <= 0 || height <= 0)
732 vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME,
733 "Referenced frame with invalid size");
735 apply_frame_size(pbi, width, height);
736 setup_display_size(cm, rb);
739 static void setup_tile_context(VP9D_COMP *const pbi, MACROBLOCKD *const xd,
740 int tile_row, int tile_col) {
742 const int tile_cols = 1 << pbi->common.log2_tile_cols;
743 xd->mi_stream = pbi->mi_streams[tile_row * tile_cols + tile_col];
745 for (i = 0; i < MAX_MB_PLANE; ++i) {
746 xd->above_context[i] = pbi->above_context[i];
748 // see note in alloc_tile_storage().
749 xd->above_seg_context = pbi->above_seg_context;
752 static void decode_tile(VP9D_COMP *pbi, const TileInfo *const tile,
754 const int num_threads = pbi->oxcf.max_threads;
755 VP9_COMMON *const cm = &pbi->common;
757 MACROBLOCKD *xd = &pbi->mb;
759 if (pbi->do_loopfilter_inline) {
760 LFWorkerData *const lf_data = (LFWorkerData*)pbi->lf_worker.data1;
761 lf_data->frame_buffer = get_frame_new_buffer(cm);
763 lf_data->xd = pbi->mb;
766 vp9_loop_filter_frame_init(cm, cm->lf.filter_level);
769 for (mi_row = tile->mi_row_start; mi_row < tile->mi_row_end;
770 mi_row += MI_BLOCK_SIZE) {
771 // For a SB there are 2 left contexts, each pertaining to a MB row within
772 vp9_zero(xd->left_context);
773 vp9_zero(xd->left_seg_context);
774 for (mi_col = tile->mi_col_start; mi_col < tile->mi_col_end;
775 mi_col += MI_BLOCK_SIZE) {
776 decode_modes_sb(cm, xd, tile, mi_row, mi_col, r, BLOCK_64X64);
779 if (pbi->do_loopfilter_inline) {
780 const int lf_start = mi_row - MI_BLOCK_SIZE;
781 LFWorkerData *const lf_data = (LFWorkerData*)pbi->lf_worker.data1;
783 // delay the loopfilter by 1 macroblock row.
784 if (lf_start < 0) continue;
786 // decoding has completed: finish up the loop filter in this thread.
787 if (mi_row + MI_BLOCK_SIZE >= tile->mi_row_end) continue;
789 vp9_worker_sync(&pbi->lf_worker);
790 lf_data->start = lf_start;
791 lf_data->stop = mi_row;
792 if (num_threads > 1) {
793 vp9_worker_launch(&pbi->lf_worker);
795 vp9_worker_execute(&pbi->lf_worker);
800 if (pbi->do_loopfilter_inline) {
801 LFWorkerData *const lf_data = (LFWorkerData*)pbi->lf_worker.data1;
803 vp9_worker_sync(&pbi->lf_worker);
804 lf_data->start = lf_data->stop;
805 lf_data->stop = cm->mi_rows;
806 vp9_worker_execute(&pbi->lf_worker);
810 static void setup_tile_info(VP9_COMMON *cm, struct vp9_read_bit_buffer *rb) {
811 int min_log2_tile_cols, max_log2_tile_cols, max_ones;
812 vp9_get_tile_n_bits(cm->mi_cols, &min_log2_tile_cols, &max_log2_tile_cols);
815 max_ones = max_log2_tile_cols - min_log2_tile_cols;
816 cm->log2_tile_cols = min_log2_tile_cols;
817 while (max_ones-- && vp9_rb_read_bit(rb))
818 cm->log2_tile_cols++;
821 cm->log2_tile_rows = vp9_rb_read_bit(rb);
822 if (cm->log2_tile_rows)
823 cm->log2_tile_rows += vp9_rb_read_bit(rb);
826 // Reads the next tile returning its size and adjusting '*data' accordingly
827 // based on 'is_last'.
828 static size_t get_tile(const uint8_t *const data_end,
830 struct vpx_internal_error_info *error_info,
831 const uint8_t **data) {
835 if (!read_is_valid(*data, 4, data_end))
836 vpx_internal_error(error_info, VPX_CODEC_CORRUPT_FRAME,
837 "Truncated packet or corrupt tile length");
839 size = read_be32(*data);
842 if (size > (size_t)(data_end - *data))
843 vpx_internal_error(error_info, VPX_CODEC_CORRUPT_FRAME,
844 "Truncated packet or corrupt tile size");
846 size = data_end - *data;
851 typedef struct TileBuffer {
854 int col; // only used with multi-threaded decoding
857 static const uint8_t *decode_tiles(VP9D_COMP *pbi, const uint8_t *data) {
858 VP9_COMMON *const cm = &pbi->common;
859 MACROBLOCKD *const xd = &pbi->mb;
860 const int aligned_cols = mi_cols_aligned_to_sb(cm->mi_cols);
861 const int tile_cols = 1 << cm->log2_tile_cols;
862 const int tile_rows = 1 << cm->log2_tile_rows;
863 TileBuffer tile_buffers[4][1 << 6];
864 int tile_row, tile_col;
865 const uint8_t *const data_end = pbi->source + pbi->source_sz;
866 const uint8_t *end = NULL;
869 assert(tile_rows <= 4);
870 assert(tile_cols <= (1 << 6));
872 // Note: this memset assumes above_context[0], [1] and [2]
873 // are allocated as part of the same buffer.
874 vpx_memset(pbi->above_context[0], 0,
875 sizeof(*pbi->above_context[0]) * MAX_MB_PLANE * 2 * aligned_cols);
877 vpx_memset(pbi->above_seg_context, 0,
878 sizeof(*pbi->above_seg_context) * aligned_cols);
880 // Load tile data into tile_buffers
881 for (tile_row = 0; tile_row < tile_rows; ++tile_row) {
882 for (tile_col = 0; tile_col < tile_cols; ++tile_col) {
883 const int last_tile = tile_row == tile_rows - 1 &&
884 tile_col == tile_cols - 1;
885 const size_t size = get_tile(data_end, last_tile, &cm->error, &data);
886 TileBuffer *const buf = &tile_buffers[tile_row][tile_col];
893 // Decode tiles using data from tile_buffers
894 for (tile_row = 0; tile_row < tile_rows; ++tile_row) {
895 for (tile_col = 0; tile_col < tile_cols; ++tile_col) {
896 const int col = pbi->oxcf.inv_tile_order ? tile_cols - tile_col - 1
898 const int last_tile = tile_row == tile_rows - 1 &&
899 col == tile_cols - 1;
900 const TileBuffer *const buf = &tile_buffers[tile_row][col];
903 vp9_tile_init(&tile, cm, tile_row, col);
904 setup_token_decoder(buf->data, data_end, buf->size, &cm->error, &r);
905 setup_tile_context(pbi, xd, tile_row, col);
906 decode_tile(pbi, &tile, &r);
909 end = vp9_reader_find_end(&r);
916 static void setup_tile_macroblockd(TileWorkerData *const tile_data) {
917 MACROBLOCKD *xd = &tile_data->xd;
918 struct macroblockd_plane *const pd = xd->plane;
921 for (i = 0; i < MAX_MB_PLANE; ++i) {
922 pd[i].dqcoeff = tile_data->dqcoeff[i];
923 vpx_memset(xd->plane[i].dqcoeff, 0, 64 * 64 * sizeof(int16_t));
927 static int tile_worker_hook(void *arg1, void *arg2) {
928 TileWorkerData *const tile_data = (TileWorkerData*)arg1;
929 const TileInfo *const tile = (TileInfo*)arg2;
932 for (mi_row = tile->mi_row_start; mi_row < tile->mi_row_end;
933 mi_row += MI_BLOCK_SIZE) {
934 vp9_zero(tile_data->xd.left_context);
935 vp9_zero(tile_data->xd.left_seg_context);
936 for (mi_col = tile->mi_col_start; mi_col < tile->mi_col_end;
937 mi_col += MI_BLOCK_SIZE) {
938 decode_modes_sb(tile_data->cm, &tile_data->xd, tile,
939 mi_row, mi_col, &tile_data->bit_reader, BLOCK_64X64);
942 return !tile_data->xd.corrupted;
945 // sorts in descending order
946 static int compare_tile_buffers(const void *a, const void *b) {
947 const TileBuffer *const buf1 = (const TileBuffer*)a;
948 const TileBuffer *const buf2 = (const TileBuffer*)b;
949 if (buf1->size < buf2->size) {
951 } else if (buf1->size == buf2->size) {
958 static const uint8_t *decode_tiles_mt(VP9D_COMP *pbi, const uint8_t *data) {
959 VP9_COMMON *const cm = &pbi->common;
960 const uint8_t *bit_reader_end = NULL;
961 const uint8_t *const data_end = pbi->source + pbi->source_sz;
962 const int aligned_mi_cols = mi_cols_aligned_to_sb(cm->mi_cols);
963 const int tile_cols = 1 << cm->log2_tile_cols;
964 const int tile_rows = 1 << cm->log2_tile_rows;
965 const int num_workers = MIN(pbi->oxcf.max_threads & ~1, tile_cols);
966 TileBuffer tile_buffers[1 << 6];
968 int final_worker = -1;
970 assert(tile_cols <= (1 << 6));
971 assert(tile_rows == 1);
974 if (num_workers > pbi->num_tile_workers) {
976 CHECK_MEM_ERROR(cm, pbi->tile_workers,
977 vpx_realloc(pbi->tile_workers,
978 num_workers * sizeof(*pbi->tile_workers)));
979 for (i = pbi->num_tile_workers; i < num_workers; ++i) {
980 VP9Worker *const worker = &pbi->tile_workers[i];
981 ++pbi->num_tile_workers;
983 vp9_worker_init(worker);
984 CHECK_MEM_ERROR(cm, worker->data1,
985 vpx_memalign(32, sizeof(TileWorkerData)));
986 CHECK_MEM_ERROR(cm, worker->data2, vpx_malloc(sizeof(TileInfo)));
987 if (i < num_workers - 1 && !vp9_worker_reset(worker)) {
988 vpx_internal_error(&cm->error, VPX_CODEC_ERROR,
989 "Tile decoder thread creation failed");
994 // Reset tile decoding hook
995 for (n = 0; n < pbi->num_tile_workers; ++n) {
996 pbi->tile_workers[n].hook = (VP9WorkerHook)tile_worker_hook;
999 // Note: this memset assumes above_context[0], [1] and [2]
1000 // are allocated as part of the same buffer.
1001 vpx_memset(pbi->above_context[0], 0,
1002 sizeof(*pbi->above_context[0]) * MAX_MB_PLANE *
1003 2 * aligned_mi_cols);
1004 vpx_memset(pbi->above_seg_context, 0,
1005 sizeof(*pbi->above_seg_context) * aligned_mi_cols);
1007 // Load tile data into tile_buffers
1008 for (n = 0; n < tile_cols; ++n) {
1010 get_tile(data_end, n == tile_cols - 1, &cm->error, &data);
1011 TileBuffer *const buf = &tile_buffers[n];
1018 // Sort the buffers based on size in descending order.
1019 qsort(tile_buffers, tile_cols, sizeof(tile_buffers[0]), compare_tile_buffers);
1021 // Rearrange the tile buffers such that per-tile group the largest, and
1022 // presumably the most difficult, tile will be decoded in the main thread.
1023 // This should help minimize the number of instances where the main thread is
1024 // waiting for a worker to complete.
1026 int group_start = 0;
1027 while (group_start < tile_cols) {
1028 const TileBuffer largest = tile_buffers[group_start];
1029 const int group_end = MIN(group_start + num_workers, tile_cols) - 1;
1030 memmove(tile_buffers + group_start, tile_buffers + group_start + 1,
1031 (group_end - group_start) * sizeof(tile_buffers[0]));
1032 tile_buffers[group_end] = largest;
1033 group_start = group_end + 1;
1038 while (n < tile_cols) {
1040 for (i = 0; i < num_workers && n < tile_cols; ++i) {
1041 VP9Worker *const worker = &pbi->tile_workers[i];
1042 TileWorkerData *const tile_data = (TileWorkerData*)worker->data1;
1043 TileInfo *const tile = (TileInfo*)worker->data2;
1044 TileBuffer *const buf = &tile_buffers[n];
1047 tile_data->xd = pbi->mb;
1048 tile_data->xd.corrupted = 0;
1049 vp9_tile_init(tile, tile_data->cm, 0, buf->col);
1051 setup_token_decoder(buf->data, data_end, buf->size, &cm->error,
1052 &tile_data->bit_reader);
1053 setup_tile_context(pbi, &tile_data->xd, 0, buf->col);
1054 setup_tile_macroblockd(tile_data);
1056 worker->had_error = 0;
1057 if (i == num_workers - 1 || n == tile_cols - 1) {
1058 vp9_worker_execute(worker);
1060 vp9_worker_launch(worker);
1063 if (buf->col == tile_cols - 1) {
1070 for (; i > 0; --i) {
1071 VP9Worker *const worker = &pbi->tile_workers[i - 1];
1072 pbi->mb.corrupted |= !vp9_worker_sync(worker);
1074 if (final_worker > -1) {
1075 TileWorkerData *const tile_data =
1076 (TileWorkerData*)pbi->tile_workers[final_worker].data1;
1077 bit_reader_end = vp9_reader_find_end(&tile_data->bit_reader);
1082 return bit_reader_end;
1085 static void check_sync_code(VP9_COMMON *cm, struct vp9_read_bit_buffer *rb) {
1086 if (vp9_rb_read_literal(rb, 8) != VP9_SYNC_CODE_0 ||
1087 vp9_rb_read_literal(rb, 8) != VP9_SYNC_CODE_1 ||
1088 vp9_rb_read_literal(rb, 8) != VP9_SYNC_CODE_2) {
1089 vpx_internal_error(&cm->error, VPX_CODEC_UNSUP_BITSTREAM,
1090 "Invalid frame sync code");
1094 static void error_handler(void *data, size_t bit_offset) {
1095 VP9_COMMON *const cm = (VP9_COMMON *)data;
1096 vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME, "Truncated packet");
1100 if (vp9_rb_read_bit(rb)) \
1101 vpx_internal_error(&cm->error, VPX_CODEC_UNSUP_BITSTREAM, \
1102 "Reserved bit must be unset")
1104 static size_t read_uncompressed_header(VP9D_COMP *pbi,
1105 struct vp9_read_bit_buffer *rb) {
1106 VP9_COMMON *const cm = &pbi->common;
1110 cm->last_frame_type = cm->frame_type;
1112 if (vp9_rb_read_literal(rb, 2) != VP9_FRAME_MARKER)
1113 vpx_internal_error(&cm->error, VPX_CODEC_UNSUP_BITSTREAM,
1114 "Invalid frame marker");
1116 cm->version = vp9_rb_read_bit(rb);
1119 cm->show_existing_frame = vp9_rb_read_bit(rb);
1120 if (cm->show_existing_frame) {
1121 // Show an existing frame directly.
1122 const int frame_to_show = cm->ref_frame_map[vp9_rb_read_literal(rb, 3)];
1123 ref_cnt_fb(cm->frame_bufs, &cm->new_fb_idx, frame_to_show);
1124 pbi->refresh_frame_flags = 0;
1125 cm->lf.filter_level = 0;
1130 cm->frame_type = (FRAME_TYPE) vp9_rb_read_bit(rb);
1131 cm->show_frame = vp9_rb_read_bit(rb);
1132 cm->error_resilient_mode = vp9_rb_read_bit(rb);
1134 if (cm->frame_type == KEY_FRAME) {
1135 check_sync_code(cm, rb);
1137 cm->color_space = vp9_rb_read_literal(rb, 3); // colorspace
1138 if (cm->color_space != SRGB) {
1139 vp9_rb_read_bit(rb); // [16,235] (including xvycc) vs [0,255] range
1140 if (cm->version == 1) {
1141 cm->subsampling_x = vp9_rb_read_bit(rb);
1142 cm->subsampling_y = vp9_rb_read_bit(rb);
1143 vp9_rb_read_bit(rb); // has extra plane
1145 cm->subsampling_y = cm->subsampling_x = 1;
1148 if (cm->version == 1) {
1149 cm->subsampling_y = cm->subsampling_x = 0;
1150 vp9_rb_read_bit(rb); // has extra plane
1152 vpx_internal_error(&cm->error, VPX_CODEC_UNSUP_BITSTREAM,
1153 "RGB not supported in profile 0");
1157 pbi->refresh_frame_flags = (1 << REF_FRAMES) - 1;
1159 for (i = 0; i < REFS_PER_FRAME; ++i) {
1160 cm->frame_refs[i].idx = cm->new_fb_idx;
1161 cm->frame_refs[i].buf = get_frame_new_buffer(cm);
1164 setup_frame_size(pbi, rb);
1166 cm->intra_only = cm->show_frame ? 0 : vp9_rb_read_bit(rb);
1168 cm->reset_frame_context = cm->error_resilient_mode ?
1169 0 : vp9_rb_read_literal(rb, 2);
1171 if (cm->intra_only) {
1172 check_sync_code(cm, rb);
1174 pbi->refresh_frame_flags = vp9_rb_read_literal(rb, REF_FRAMES);
1175 setup_frame_size(pbi, rb);
1177 pbi->refresh_frame_flags = vp9_rb_read_literal(rb, REF_FRAMES);
1179 for (i = 0; i < REFS_PER_FRAME; ++i) {
1180 const int ref = vp9_rb_read_literal(rb, REF_FRAMES_LOG2);
1181 const int idx = cm->ref_frame_map[ref];
1182 cm->frame_refs[i].idx = idx;
1183 cm->frame_refs[i].buf = &cm->frame_bufs[idx].buf;
1184 cm->ref_frame_sign_bias[LAST_FRAME + i] = vp9_rb_read_bit(rb);
1187 setup_frame_size_with_refs(pbi, rb);
1189 cm->allow_high_precision_mv = vp9_rb_read_bit(rb);
1190 cm->interp_filter = read_interp_filter(rb);
1192 for (i = 0; i < REFS_PER_FRAME; ++i) {
1193 RefBuffer *const ref_buf = &cm->frame_refs[i];
1194 vp9_setup_scale_factors_for_frame(&ref_buf->sf,
1195 ref_buf->buf->y_crop_width,
1196 ref_buf->buf->y_crop_height,
1197 cm->width, cm->height);
1198 if (vp9_is_scaled(&ref_buf->sf))
1199 vp9_extend_frame_borders(ref_buf->buf,
1200 cm->subsampling_x, cm->subsampling_y);
1205 if (!cm->error_resilient_mode) {
1206 cm->refresh_frame_context = vp9_rb_read_bit(rb);
1207 cm->frame_parallel_decoding_mode = vp9_rb_read_bit(rb);
1209 cm->refresh_frame_context = 0;
1210 cm->frame_parallel_decoding_mode = 1;
1213 // This flag will be overridden by the call to vp9_setup_past_independence
1214 // below, forcing the use of context 0 for those frame types.
1215 cm->frame_context_idx = vp9_rb_read_literal(rb, FRAME_CONTEXTS_LOG2);
1217 if (frame_is_intra_only(cm) || cm->error_resilient_mode)
1218 vp9_setup_past_independence(cm);
1220 setup_loopfilter(&cm->lf, rb);
1221 setup_quantization(cm, &pbi->mb, rb);
1222 setup_segmentation(&cm->seg, rb);
1224 setup_tile_info(cm, rb);
1225 sz = vp9_rb_read_literal(rb, 16);
1228 vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME,
1229 "Invalid header size");
1234 static int read_compressed_header(VP9D_COMP *pbi, const uint8_t *data,
1235 size_t partition_size) {
1236 VP9_COMMON *const cm = &pbi->common;
1237 MACROBLOCKD *const xd = &pbi->mb;
1238 FRAME_CONTEXT *const fc = &cm->fc;
1242 if (vp9_reader_init(&r, data, partition_size))
1243 vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR,
1244 "Failed to allocate bool decoder 0");
1246 cm->tx_mode = xd->lossless ? ONLY_4X4 : read_tx_mode(&r);
1247 if (cm->tx_mode == TX_MODE_SELECT)
1248 read_tx_mode_probs(&fc->tx_probs, &r);
1249 read_coef_probs(fc, cm->tx_mode, &r);
1251 for (k = 0; k < SKIP_CONTEXTS; ++k)
1252 vp9_diff_update_prob(&r, &fc->skip_probs[k]);
1254 if (!frame_is_intra_only(cm)) {
1255 nmv_context *const nmvc = &fc->nmvc;
1258 read_inter_mode_probs(fc, &r);
1260 if (cm->interp_filter == SWITCHABLE)
1261 read_switchable_interp_probs(fc, &r);
1263 for (i = 0; i < INTRA_INTER_CONTEXTS; i++)
1264 vp9_diff_update_prob(&r, &fc->intra_inter_prob[i]);
1266 cm->reference_mode = read_reference_mode(cm, &r);
1267 read_reference_mode_probs(cm, &r);
1269 for (j = 0; j < BLOCK_SIZE_GROUPS; j++)
1270 for (i = 0; i < INTRA_MODES - 1; ++i)
1271 vp9_diff_update_prob(&r, &fc->y_mode_prob[j][i]);
1273 for (j = 0; j < PARTITION_CONTEXTS; ++j)
1274 for (i = 0; i < PARTITION_TYPES - 1; ++i)
1275 vp9_diff_update_prob(&r, &fc->partition_prob[j][i]);
1277 read_mv_probs(nmvc, cm->allow_high_precision_mv, &r);
1280 return vp9_reader_has_error(&r);
1283 void vp9_init_dequantizer(VP9_COMMON *cm) {
1286 for (q = 0; q < QINDEX_RANGE; q++) {
1287 cm->y_dequant[q][0] = vp9_dc_quant(q, cm->y_dc_delta_q);
1288 cm->y_dequant[q][1] = vp9_ac_quant(q, 0);
1290 cm->uv_dequant[q][0] = vp9_dc_quant(q, cm->uv_dc_delta_q);
1291 cm->uv_dequant[q][1] = vp9_ac_quant(q, cm->uv_ac_delta_q);
1296 #define debug_check_frame_counts(cm) (void)0
1298 // Counts should only be incremented when frame_parallel_decoding_mode and
1299 // error_resilient_mode are disabled.
1300 static void debug_check_frame_counts(const VP9_COMMON *const cm) {
1301 FRAME_COUNTS zero_counts;
1302 vp9_zero(zero_counts);
1303 assert(cm->frame_parallel_decoding_mode || cm->error_resilient_mode);
1304 assert(!memcmp(cm->counts.y_mode, zero_counts.y_mode,
1305 sizeof(cm->counts.y_mode)));
1306 assert(!memcmp(cm->counts.uv_mode, zero_counts.uv_mode,
1307 sizeof(cm->counts.uv_mode)));
1308 assert(!memcmp(cm->counts.partition, zero_counts.partition,
1309 sizeof(cm->counts.partition)));
1310 assert(!memcmp(cm->counts.coef, zero_counts.coef,
1311 sizeof(cm->counts.coef)));
1312 assert(!memcmp(cm->counts.eob_branch, zero_counts.eob_branch,
1313 sizeof(cm->counts.eob_branch)));
1314 assert(!memcmp(cm->counts.switchable_interp, zero_counts.switchable_interp,
1315 sizeof(cm->counts.switchable_interp)));
1316 assert(!memcmp(cm->counts.inter_mode, zero_counts.inter_mode,
1317 sizeof(cm->counts.inter_mode)));
1318 assert(!memcmp(cm->counts.intra_inter, zero_counts.intra_inter,
1319 sizeof(cm->counts.intra_inter)));
1320 assert(!memcmp(cm->counts.comp_inter, zero_counts.comp_inter,
1321 sizeof(cm->counts.comp_inter)));
1322 assert(!memcmp(cm->counts.single_ref, zero_counts.single_ref,
1323 sizeof(cm->counts.single_ref)));
1324 assert(!memcmp(cm->counts.comp_ref, zero_counts.comp_ref,
1325 sizeof(cm->counts.comp_ref)));
1326 assert(!memcmp(&cm->counts.tx, &zero_counts.tx, sizeof(cm->counts.tx)));
1327 assert(!memcmp(cm->counts.skip, zero_counts.skip, sizeof(cm->counts.skip)));
1328 assert(!memcmp(&cm->counts.mv, &zero_counts.mv, sizeof(cm->counts.mv)));
1332 int vp9_decode_frame(VP9D_COMP *pbi, const uint8_t **p_data_end) {
1334 VP9_COMMON *const cm = &pbi->common;
1335 MACROBLOCKD *const xd = &pbi->mb;
1337 const uint8_t *data = pbi->source;
1338 const uint8_t *const data_end = pbi->source + pbi->source_sz;
1340 struct vp9_read_bit_buffer rb = { data, data_end, 0, cm, error_handler };
1341 const size_t first_partition_size = read_uncompressed_header(pbi, &rb);
1342 const int keyframe = cm->frame_type == KEY_FRAME;
1343 const int tile_rows = 1 << cm->log2_tile_rows;
1344 const int tile_cols = 1 << cm->log2_tile_cols;
1345 YV12_BUFFER_CONFIG *const new_fb = get_frame_new_buffer(cm);
1346 xd->cur_buf = new_fb;
1348 if (!first_partition_size) {
1349 // showing a frame directly
1350 *p_data_end = data + 1;
1354 if (!pbi->decoded_key_frame && !keyframe)
1357 data += vp9_rb_bytes_read(&rb);
1358 if (!read_is_valid(data, first_partition_size, data_end))
1359 vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME,
1360 "Truncated packet or corrupt header length");
1362 pbi->do_loopfilter_inline =
1363 (cm->log2_tile_rows | cm->log2_tile_cols) == 0 && cm->lf.filter_level;
1364 if (pbi->do_loopfilter_inline && pbi->lf_worker.data1 == NULL) {
1365 CHECK_MEM_ERROR(cm, pbi->lf_worker.data1, vpx_malloc(sizeof(LFWorkerData)));
1366 pbi->lf_worker.hook = (VP9WorkerHook)vp9_loop_filter_worker;
1367 if (pbi->oxcf.max_threads > 1 && !vp9_worker_reset(&pbi->lf_worker)) {
1368 vpx_internal_error(&cm->error, VPX_CODEC_ERROR,
1369 "Loop filter thread creation failed");
1373 alloc_tile_storage(pbi, tile_rows, tile_cols);
1375 xd->mode_info_stride = cm->mode_info_stride;
1378 setup_plane_dequants(cm, xd, cm->base_qindex);
1379 vp9_setup_block_planes(xd, cm->subsampling_x, cm->subsampling_y);
1381 cm->fc = cm->frame_contexts[cm->frame_context_idx];
1382 vp9_zero(cm->counts);
1383 for (i = 0; i < MAX_MB_PLANE; ++i)
1384 vpx_memset(xd->plane[i].dqcoeff, 0, 64 * 64 * sizeof(int16_t));
1387 new_fb->corrupted = read_compressed_header(pbi, data, first_partition_size);
1389 // TODO(jzern): remove frame_parallel_decoding_mode restriction for
1390 // single-frame tile decoding.
1391 if (pbi->oxcf.max_threads > 1 && tile_rows == 1 && tile_cols > 1 &&
1392 cm->frame_parallel_decoding_mode) {
1393 *p_data_end = decode_tiles_mt(pbi, data + first_partition_size);
1395 *p_data_end = decode_tiles(pbi, data + first_partition_size);
1398 new_fb->corrupted |= xd->corrupted;
1400 if (!pbi->decoded_key_frame) {
1401 if (keyframe && !new_fb->corrupted)
1402 pbi->decoded_key_frame = 1;
1404 vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME,
1405 "A stream must start with a complete key frame");
1408 if (!cm->error_resilient_mode && !cm->frame_parallel_decoding_mode) {
1409 vp9_adapt_coef_probs(cm);
1411 if (!frame_is_intra_only(cm)) {
1412 vp9_adapt_mode_probs(cm);
1413 vp9_adapt_mv_probs(cm, cm->allow_high_precision_mv);
1416 debug_check_frame_counts(cm);
1419 if (cm->refresh_frame_context)
1420 cm->frame_contexts[cm->frame_context_idx] = cm->fc;