2 * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
12 #include <stdlib.h> // qsort()
14 #include "./vp9_rtcd.h"
15 #include "./vpx_scale_rtcd.h"
17 #include "vpx_mem/vpx_mem.h"
18 #include "vpx_ports/mem.h"
19 #include "vpx_ports/mem_ops.h"
20 #include "vpx_scale/vpx_scale.h"
21 #include "vpx_util/vpx_thread.h"
23 #include "vp9/common/vp9_alloccommon.h"
24 #include "vp9/common/vp9_common.h"
25 #include "vp9/common/vp9_entropy.h"
26 #include "vp9/common/vp9_entropymode.h"
27 #include "vp9/common/vp9_idct.h"
28 #include "vp9/common/vp9_thread_common.h"
29 #include "vp9/common/vp9_pred_common.h"
30 #include "vp9/common/vp9_quant_common.h"
31 #include "vp9/common/vp9_reconintra.h"
32 #include "vp9/common/vp9_reconinter.h"
33 #include "vp9/common/vp9_seg_common.h"
34 #include "vp9/common/vp9_tile_common.h"
36 #include "vp9/decoder/vp9_decodeframe.h"
37 #include "vp9/decoder/vp9_detokenize.h"
38 #include "vp9/decoder/vp9_decodemv.h"
39 #include "vp9/decoder/vp9_decoder.h"
40 #include "vp9/decoder/vp9_dsubexp.h"
41 #include "vp9/decoder/vp9_read_bit_buffer.h"
42 #include "vp9/decoder/vp9_reader.h"
44 #define MAX_VP9_HEADER_SIZE 80
46 static int is_compound_reference_allowed(const VP9_COMMON *cm) {
48 for (i = 1; i < REFS_PER_FRAME; ++i)
49 if (cm->ref_frame_sign_bias[i + 1] != cm->ref_frame_sign_bias[1])
55 static void setup_compound_reference_mode(VP9_COMMON *cm) {
56 if (cm->ref_frame_sign_bias[LAST_FRAME] ==
57 cm->ref_frame_sign_bias[GOLDEN_FRAME]) {
58 cm->comp_fixed_ref = ALTREF_FRAME;
59 cm->comp_var_ref[0] = LAST_FRAME;
60 cm->comp_var_ref[1] = GOLDEN_FRAME;
61 } else if (cm->ref_frame_sign_bias[LAST_FRAME] ==
62 cm->ref_frame_sign_bias[ALTREF_FRAME]) {
63 cm->comp_fixed_ref = GOLDEN_FRAME;
64 cm->comp_var_ref[0] = LAST_FRAME;
65 cm->comp_var_ref[1] = ALTREF_FRAME;
67 cm->comp_fixed_ref = LAST_FRAME;
68 cm->comp_var_ref[0] = GOLDEN_FRAME;
69 cm->comp_var_ref[1] = ALTREF_FRAME;
73 static int read_is_valid(const uint8_t *start, size_t len, const uint8_t *end) {
74 return len != 0 && len <= (size_t)(end - start);
77 static int decode_unsigned_max(struct vp9_read_bit_buffer *rb, int max) {
78 const int data = vp9_rb_read_literal(rb, get_unsigned_bits(max));
79 return data > max ? max : data;
82 static TX_MODE read_tx_mode(vp9_reader *r) {
83 TX_MODE tx_mode = vp9_read_literal(r, 2);
84 if (tx_mode == ALLOW_32X32)
85 tx_mode += vp9_read_bit(r);
89 static void read_tx_mode_probs(struct tx_probs *tx_probs, vp9_reader *r) {
92 for (i = 0; i < TX_SIZE_CONTEXTS; ++i)
93 for (j = 0; j < TX_SIZES - 3; ++j)
94 vp9_diff_update_prob(r, &tx_probs->p8x8[i][j]);
96 for (i = 0; i < TX_SIZE_CONTEXTS; ++i)
97 for (j = 0; j < TX_SIZES - 2; ++j)
98 vp9_diff_update_prob(r, &tx_probs->p16x16[i][j]);
100 for (i = 0; i < TX_SIZE_CONTEXTS; ++i)
101 for (j = 0; j < TX_SIZES - 1; ++j)
102 vp9_diff_update_prob(r, &tx_probs->p32x32[i][j]);
105 static void read_switchable_interp_probs(FRAME_CONTEXT *fc, vp9_reader *r) {
107 for (j = 0; j < SWITCHABLE_FILTER_CONTEXTS; ++j)
108 for (i = 0; i < SWITCHABLE_FILTERS - 1; ++i)
109 vp9_diff_update_prob(r, &fc->switchable_interp_prob[j][i]);
112 static void read_inter_mode_probs(FRAME_CONTEXT *fc, vp9_reader *r) {
114 for (i = 0; i < INTER_MODE_CONTEXTS; ++i)
115 for (j = 0; j < INTER_MODES - 1; ++j)
116 vp9_diff_update_prob(r, &fc->inter_mode_probs[i][j]);
119 static REFERENCE_MODE read_frame_reference_mode(const VP9_COMMON *cm,
121 if (is_compound_reference_allowed(cm)) {
122 return vp9_read_bit(r) ? (vp9_read_bit(r) ? REFERENCE_MODE_SELECT
123 : COMPOUND_REFERENCE)
126 return SINGLE_REFERENCE;
130 static void read_frame_reference_mode_probs(VP9_COMMON *cm, vp9_reader *r) {
131 FRAME_CONTEXT *const fc = cm->fc;
134 if (cm->reference_mode == REFERENCE_MODE_SELECT)
135 for (i = 0; i < COMP_INTER_CONTEXTS; ++i)
136 vp9_diff_update_prob(r, &fc->comp_inter_prob[i]);
138 if (cm->reference_mode != COMPOUND_REFERENCE)
139 for (i = 0; i < REF_CONTEXTS; ++i) {
140 vp9_diff_update_prob(r, &fc->single_ref_prob[i][0]);
141 vp9_diff_update_prob(r, &fc->single_ref_prob[i][1]);
144 if (cm->reference_mode != SINGLE_REFERENCE)
145 for (i = 0; i < REF_CONTEXTS; ++i)
146 vp9_diff_update_prob(r, &fc->comp_ref_prob[i]);
149 static void update_mv_probs(vp9_prob *p, int n, vp9_reader *r) {
151 for (i = 0; i < n; ++i)
152 if (vp9_read(r, MV_UPDATE_PROB))
153 p[i] = (vp9_read_literal(r, 7) << 1) | 1;
156 static void read_mv_probs(nmv_context *ctx, int allow_hp, vp9_reader *r) {
159 update_mv_probs(ctx->joints, MV_JOINTS - 1, r);
161 for (i = 0; i < 2; ++i) {
162 nmv_component *const comp_ctx = &ctx->comps[i];
163 update_mv_probs(&comp_ctx->sign, 1, r);
164 update_mv_probs(comp_ctx->classes, MV_CLASSES - 1, r);
165 update_mv_probs(comp_ctx->class0, CLASS0_SIZE - 1, r);
166 update_mv_probs(comp_ctx->bits, MV_OFFSET_BITS, r);
169 for (i = 0; i < 2; ++i) {
170 nmv_component *const comp_ctx = &ctx->comps[i];
171 for (j = 0; j < CLASS0_SIZE; ++j)
172 update_mv_probs(comp_ctx->class0_fp[j], MV_FP_SIZE - 1, r);
173 update_mv_probs(comp_ctx->fp, 3, r);
177 for (i = 0; i < 2; ++i) {
178 nmv_component *const comp_ctx = &ctx->comps[i];
179 update_mv_probs(&comp_ctx->class0_hp, 1, r);
180 update_mv_probs(&comp_ctx->hp, 1, r);
185 static void inverse_transform_block(MACROBLOCKD* xd, int plane, int block,
186 TX_SIZE tx_size, uint8_t *dst, int stride,
188 struct macroblockd_plane *const pd = &xd->plane[plane];
190 TX_TYPE tx_type = DCT_DCT;
191 tran_low_t *const dqcoeff = pd->dqcoeff;
192 #if CONFIG_VP9_HIGHBITDEPTH
193 if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
196 vp9_highbd_iwht4x4_add(dqcoeff, dst, stride, eob, xd->bd);
198 const PLANE_TYPE plane_type = pd->plane_type;
201 tx_type = get_tx_type_4x4(plane_type, xd, block);
202 vp9_highbd_iht4x4_add(tx_type, dqcoeff, dst, stride, eob, xd->bd);
205 tx_type = get_tx_type(plane_type, xd);
206 vp9_highbd_iht8x8_add(tx_type, dqcoeff, dst, stride, eob, xd->bd);
209 tx_type = get_tx_type(plane_type, xd);
210 vp9_highbd_iht16x16_add(tx_type, dqcoeff, dst, stride, eob, xd->bd);
214 vp9_highbd_idct32x32_add(dqcoeff, dst, stride, eob, xd->bd);
217 assert(0 && "Invalid transform size");
223 vp9_iwht4x4_add(dqcoeff, dst, stride, eob);
225 const PLANE_TYPE plane_type = pd->plane_type;
228 tx_type = get_tx_type_4x4(plane_type, xd, block);
229 vp9_iht4x4_add(tx_type, dqcoeff, dst, stride, eob);
232 tx_type = get_tx_type(plane_type, xd);
233 vp9_iht8x8_add(tx_type, dqcoeff, dst, stride, eob);
236 tx_type = get_tx_type(plane_type, xd);
237 vp9_iht16x16_add(tx_type, dqcoeff, dst, stride, eob);
241 vp9_idct32x32_add(dqcoeff, dst, stride, eob);
244 assert(0 && "Invalid transform size");
252 vp9_iwht4x4_add(dqcoeff, dst, stride, eob);
254 const PLANE_TYPE plane_type = pd->plane_type;
257 tx_type = get_tx_type_4x4(plane_type, xd, block);
258 vp9_iht4x4_add(tx_type, dqcoeff, dst, stride, eob);
261 tx_type = get_tx_type(plane_type, xd);
262 vp9_iht8x8_add(tx_type, dqcoeff, dst, stride, eob);
265 tx_type = get_tx_type(plane_type, xd);
266 vp9_iht16x16_add(tx_type, dqcoeff, dst, stride, eob);
270 vp9_idct32x32_add(dqcoeff, dst, stride, eob);
273 assert(0 && "Invalid transform size");
277 #endif // CONFIG_VP9_HIGHBITDEPTH
280 memset(dqcoeff, 0, 2 * sizeof(dqcoeff[0]));
282 if (tx_type == DCT_DCT && tx_size <= TX_16X16 && eob <= 10)
283 memset(dqcoeff, 0, 4 * (4 << tx_size) * sizeof(dqcoeff[0]));
284 else if (tx_size == TX_32X32 && eob <= 34)
285 memset(dqcoeff, 0, 256 * sizeof(dqcoeff[0]));
287 memset(dqcoeff, 0, (16 << (tx_size << 1)) * sizeof(dqcoeff[0]));
298 static void predict_and_reconstruct_intra_block(int plane, int block,
299 BLOCK_SIZE plane_bsize,
300 TX_SIZE tx_size, void *arg) {
301 struct intra_args *const args = (struct intra_args *)arg;
302 MACROBLOCKD *const xd = args->xd;
303 struct macroblockd_plane *const pd = &xd->plane[plane];
304 MODE_INFO *const mi = xd->mi[0];
305 const PREDICTION_MODE mode = (plane == 0) ? get_y_mode(mi, block)
309 txfrm_block_to_raster_xy(plane_bsize, tx_size, block, &x, &y);
310 dst = &pd->dst.buf[4 * y * pd->dst.stride + 4 * x];
312 vp9_predict_intra_block(xd, block >> (tx_size << 1),
313 b_width_log2_lookup[plane_bsize], tx_size, mode,
314 dst, pd->dst.stride, dst, pd->dst.stride,
317 if (!mi->mbmi.skip) {
318 const int eob = vp9_decode_block_tokens(xd, plane, block,
319 plane_bsize, x, y, tx_size,
320 args->r, args->seg_id);
321 inverse_transform_block(xd, plane, block, tx_size, dst, pd->dst.stride,
333 static void reconstruct_inter_block(int plane, int block,
334 BLOCK_SIZE plane_bsize,
335 TX_SIZE tx_size, void *arg) {
336 struct inter_args *args = (struct inter_args *)arg;
337 MACROBLOCKD *const xd = args->xd;
338 struct macroblockd_plane *const pd = &xd->plane[plane];
340 txfrm_block_to_raster_xy(plane_bsize, tx_size, block, &x, &y);
341 eob = vp9_decode_block_tokens(xd, plane, block, plane_bsize,
342 x, y, tx_size, args->r, args->seg_id);
343 inverse_transform_block(xd, plane, block, tx_size,
344 &pd->dst.buf[4 * y * pd->dst.stride + 4 * x],
345 pd->dst.stride, eob);
346 *args->eobtotal += eob;
349 static void build_mc_border(const uint8_t *src, int src_stride,
350 uint8_t *dst, int dst_stride,
351 int x, int y, int b_w, int b_h, int w, int h) {
352 // Get a pointer to the start of the real data for this row.
353 const uint8_t *ref_row = src - x - y * src_stride;
356 ref_row += (h - 1) * src_stride;
358 ref_row += y * src_stride;
362 int left = x < 0 ? -x : 0;
373 copy = b_w - left - right;
376 memset(dst, ref_row[0], left);
379 memcpy(dst + left, ref_row + x + left, copy);
382 memset(dst + left + copy, ref_row[w - 1], right);
388 ref_row += src_stride;
392 #if CONFIG_VP9_HIGHBITDEPTH
393 static void high_build_mc_border(const uint8_t *src8, int src_stride,
394 uint16_t *dst, int dst_stride,
395 int x, int y, int b_w, int b_h,
397 // Get a pointer to the start of the real data for this row.
398 const uint16_t *src = CONVERT_TO_SHORTPTR(src8);
399 const uint16_t *ref_row = src - x - y * src_stride;
402 ref_row += (h - 1) * src_stride;
404 ref_row += y * src_stride;
408 int left = x < 0 ? -x : 0;
419 copy = b_w - left - right;
422 vpx_memset16(dst, ref_row[0], left);
425 memcpy(dst + left, ref_row + x + left, copy * sizeof(uint16_t));
428 vpx_memset16(dst + left + copy, ref_row[w - 1], right);
434 ref_row += src_stride;
437 #endif // CONFIG_VP9_HIGHBITDEPTH
439 #if CONFIG_VP9_HIGHBITDEPTH
440 static void extend_and_predict(const uint8_t *buf_ptr1, int pre_buf_stride,
441 int x0, int y0, int b_w, int b_h,
442 int frame_width, int frame_height,
444 uint8_t *const dst, int dst_buf_stride,
445 int subpel_x, int subpel_y,
446 const InterpKernel *kernel,
447 const struct scale_factors *sf,
449 int w, int h, int ref, int xs, int ys) {
450 DECLARE_ALIGNED(16, uint16_t, mc_buf_high[80 * 2 * 80 * 2]);
451 const uint8_t *buf_ptr;
453 if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
454 high_build_mc_border(buf_ptr1, pre_buf_stride, mc_buf_high, b_w,
455 x0, y0, b_w, b_h, frame_width, frame_height);
456 buf_ptr = CONVERT_TO_BYTEPTR(mc_buf_high) + border_offset;
458 build_mc_border(buf_ptr1, pre_buf_stride, (uint8_t *)mc_buf_high, b_w,
459 x0, y0, b_w, b_h, frame_width, frame_height);
460 buf_ptr = ((uint8_t *)mc_buf_high) + border_offset;
463 if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
464 high_inter_predictor(buf_ptr, b_w, dst, dst_buf_stride, subpel_x,
465 subpel_y, sf, w, h, ref, kernel, xs, ys, xd->bd);
467 inter_predictor(buf_ptr, b_w, dst, dst_buf_stride, subpel_x,
468 subpel_y, sf, w, h, ref, kernel, xs, ys);
472 static void extend_and_predict(const uint8_t *buf_ptr1, int pre_buf_stride,
473 int x0, int y0, int b_w, int b_h,
474 int frame_width, int frame_height,
476 uint8_t *const dst, int dst_buf_stride,
477 int subpel_x, int subpel_y,
478 const InterpKernel *kernel,
479 const struct scale_factors *sf,
480 int w, int h, int ref, int xs, int ys) {
481 DECLARE_ALIGNED(16, uint8_t, mc_buf[80 * 2 * 80 * 2]);
482 const uint8_t *buf_ptr;
484 build_mc_border(buf_ptr1, pre_buf_stride, mc_buf, b_w,
485 x0, y0, b_w, b_h, frame_width, frame_height);
486 buf_ptr = mc_buf + border_offset;
488 inter_predictor(buf_ptr, b_w, dst, dst_buf_stride, subpel_x,
489 subpel_y, sf, w, h, ref, kernel, xs, ys);
491 #endif // CONFIG_VP9_HIGHBITDEPTH
493 static void dec_build_inter_predictors(VP9Decoder *const pbi, MACROBLOCKD *xd,
494 int plane, int bw, int bh, int x,
495 int y, int w, int h, int mi_x, int mi_y,
496 const InterpKernel *kernel,
497 const struct scale_factors *sf,
498 struct buf_2d *pre_buf,
499 struct buf_2d *dst_buf, const MV* mv,
500 RefCntBuffer *ref_frame_buf,
501 int is_scaled, int ref) {
502 struct macroblockd_plane *const pd = &xd->plane[plane];
503 uint8_t *const dst = dst_buf->buf + dst_buf->stride * y + x;
505 int xs, ys, x0, y0, x0_16, y0_16, frame_width, frame_height,
506 buf_stride, subpel_x, subpel_y;
507 uint8_t *ref_frame, *buf_ptr;
509 // Get reference frame pointer, width and height.
511 frame_width = ref_frame_buf->buf.y_crop_width;
512 frame_height = ref_frame_buf->buf.y_crop_height;
513 ref_frame = ref_frame_buf->buf.y_buffer;
515 frame_width = ref_frame_buf->buf.uv_crop_width;
516 frame_height = ref_frame_buf->buf.uv_crop_height;
517 ref_frame = plane == 1 ? ref_frame_buf->buf.u_buffer
518 : ref_frame_buf->buf.v_buffer;
522 const MV mv_q4 = clamp_mv_to_umv_border_sb(xd, mv, bw, bh,
525 // Co-ordinate of containing block to pixel precision.
526 int x_start = (-xd->mb_to_left_edge >> (3 + pd->subsampling_x));
527 int y_start = (-xd->mb_to_top_edge >> (3 + pd->subsampling_y));
529 // Co-ordinate of the block to 1/16th pixel precision.
530 x0_16 = (x_start + x) << SUBPEL_BITS;
531 y0_16 = (y_start + y) << SUBPEL_BITS;
533 // Co-ordinate of current block in reference frame
534 // to 1/16th pixel precision.
535 x0_16 = sf->scale_value_x(x0_16, sf);
536 y0_16 = sf->scale_value_y(y0_16, sf);
538 // Map the top left corner of the block into the reference frame.
539 x0 = sf->scale_value_x(x_start + x, sf);
540 y0 = sf->scale_value_y(y_start + y, sf);
542 // Scale the MV and incorporate the sub-pixel offset of the block
543 // in the reference frame.
544 scaled_mv = vp9_scale_mv(&mv_q4, mi_x + x, mi_y + y, sf);
548 // Co-ordinate of containing block to pixel precision.
549 x0 = (-xd->mb_to_left_edge >> (3 + pd->subsampling_x)) + x;
550 y0 = (-xd->mb_to_top_edge >> (3 + pd->subsampling_y)) + y;
552 // Co-ordinate of the block to 1/16th pixel precision.
553 x0_16 = x0 << SUBPEL_BITS;
554 y0_16 = y0 << SUBPEL_BITS;
556 scaled_mv.row = mv->row * (1 << (1 - pd->subsampling_y));
557 scaled_mv.col = mv->col * (1 << (1 - pd->subsampling_x));
560 subpel_x = scaled_mv.col & SUBPEL_MASK;
561 subpel_y = scaled_mv.row & SUBPEL_MASK;
563 // Calculate the top left corner of the best matching block in the
565 x0 += scaled_mv.col >> SUBPEL_BITS;
566 y0 += scaled_mv.row >> SUBPEL_BITS;
567 x0_16 += scaled_mv.col;
568 y0_16 += scaled_mv.row;
570 // Get reference block pointer.
571 buf_ptr = ref_frame + y0 * pre_buf->stride + x0;
572 buf_stride = pre_buf->stride;
574 // Do border extension if there is motion or the
575 // width/height is not a multiple of 8 pixels.
576 if (is_scaled || scaled_mv.col || scaled_mv.row ||
577 (frame_width & 0x7) || (frame_height & 0x7)) {
578 int y1 = ((y0_16 + (h - 1) * ys) >> SUBPEL_BITS) + 1;
580 // Get reference block bottom right horizontal coordinate.
581 int x1 = ((x0_16 + (w - 1) * xs) >> SUBPEL_BITS) + 1;
582 int x_pad = 0, y_pad = 0;
584 if (subpel_x || (sf->x_step_q4 != SUBPEL_SHIFTS)) {
585 x0 -= VP9_INTERP_EXTEND - 1;
586 x1 += VP9_INTERP_EXTEND;
590 if (subpel_y || (sf->y_step_q4 != SUBPEL_SHIFTS)) {
591 y0 -= VP9_INTERP_EXTEND - 1;
592 y1 += VP9_INTERP_EXTEND;
596 // Wait until reference block is ready. Pad 7 more pixels as last 7
597 // pixels of each superblock row can be changed by next superblock row.
598 if (pbi->frame_parallel_decode)
599 vp9_frameworker_wait(pbi->frame_worker_owner, ref_frame_buf,
600 MAX(0, (y1 + 7)) << (plane == 0 ? 0 : 1));
602 // Skip border extension if block is inside the frame.
603 if (x0 < 0 || x0 > frame_width - 1 || x1 < 0 || x1 > frame_width - 1 ||
604 y0 < 0 || y0 > frame_height - 1 || y1 < 0 || y1 > frame_height - 1) {
605 // Extend the border.
606 const uint8_t *const buf_ptr1 = ref_frame + y0 * buf_stride + x0;
607 const int b_w = x1 - x0 + 1;
608 const int b_h = y1 - y0 + 1;
609 const int border_offset = y_pad * 3 * b_w + x_pad * 3;
611 extend_and_predict(buf_ptr1, buf_stride, x0, y0, b_w, b_h,
612 frame_width, frame_height, border_offset,
613 dst, dst_buf->stride,
616 #if CONFIG_VP9_HIGHBITDEPTH
623 // Wait until reference block is ready. Pad 7 more pixels as last 7
624 // pixels of each superblock row can be changed by next superblock row.
625 if (pbi->frame_parallel_decode) {
626 const int y1 = (y0_16 + (h - 1) * ys) >> SUBPEL_BITS;
627 vp9_frameworker_wait(pbi->frame_worker_owner, ref_frame_buf,
628 MAX(0, (y1 + 7)) << (plane == 0 ? 0 : 1));
631 #if CONFIG_VP9_HIGHBITDEPTH
632 if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
633 high_inter_predictor(buf_ptr, buf_stride, dst, dst_buf->stride, subpel_x,
634 subpel_y, sf, w, h, ref, kernel, xs, ys, xd->bd);
636 inter_predictor(buf_ptr, buf_stride, dst, dst_buf->stride, subpel_x,
637 subpel_y, sf, w, h, ref, kernel, xs, ys);
640 inter_predictor(buf_ptr, buf_stride, dst, dst_buf->stride, subpel_x,
641 subpel_y, sf, w, h, ref, kernel, xs, ys);
642 #endif // CONFIG_VP9_HIGHBITDEPTH
645 static void dec_build_inter_predictors_sb(VP9Decoder *const pbi,
647 int mi_row, int mi_col,
650 const int mi_x = mi_col * MI_SIZE;
651 const int mi_y = mi_row * MI_SIZE;
652 const MODE_INFO *mi = xd->mi[0];
653 const InterpKernel *kernel = vp9_filter_kernels[mi->mbmi.interp_filter];
654 const BLOCK_SIZE sb_type = mi->mbmi.sb_type;
655 const int is_compound = has_second_ref(&mi->mbmi);
657 for (plane = 0; plane < MAX_MB_PLANE; ++plane) {
658 const BLOCK_SIZE plane_bsize = get_plane_block_size(bsize,
660 struct macroblockd_plane *const pd = &xd->plane[plane];
661 struct buf_2d *const dst_buf = &pd->dst;
662 const int num_4x4_w = num_4x4_blocks_wide_lookup[plane_bsize];
663 const int num_4x4_h = num_4x4_blocks_high_lookup[plane_bsize];
665 const int bw = 4 * num_4x4_w;
666 const int bh = 4 * num_4x4_h;
669 for (ref = 0; ref < 1 + is_compound; ++ref) {
670 const struct scale_factors *const sf = &xd->block_refs[ref]->sf;
671 struct buf_2d *const pre_buf = &pd->pre[ref];
672 const int idx = xd->block_refs[ref]->idx;
673 BufferPool *const pool = pbi->common.buffer_pool;
674 RefCntBuffer *const ref_frame_buf = &pool->frame_bufs[idx];
675 const int is_scaled = vp9_is_scaled(sf);
677 if (sb_type < BLOCK_8X8) {
679 assert(bsize == BLOCK_8X8);
680 for (y = 0; y < num_4x4_h; ++y) {
681 for (x = 0; x < num_4x4_w; ++x) {
682 const MV mv = average_split_mvs(pd, mi, ref, i++);
683 dec_build_inter_predictors(pbi, xd, plane, bw, bh,
684 4 * x, 4 * y, 4, 4, mi_x, mi_y, kernel,
685 sf, pre_buf, dst_buf, &mv,
686 ref_frame_buf, is_scaled, ref);
690 const MV mv = mi->mbmi.mv[ref].as_mv;
691 dec_build_inter_predictors(pbi, xd, plane, bw, bh,
692 0, 0, bw, bh, mi_x, mi_y, kernel,
693 sf, pre_buf, dst_buf, &mv, ref_frame_buf,
700 static MB_MODE_INFO *set_offsets(VP9_COMMON *const cm, MACROBLOCKD *const xd,
701 BLOCK_SIZE bsize, int mi_row, int mi_col) {
702 const int bw = num_8x8_blocks_wide_lookup[bsize];
703 const int bh = num_8x8_blocks_high_lookup[bsize];
704 const int x_mis = MIN(bw, cm->mi_cols - mi_col);
705 const int y_mis = MIN(bh, cm->mi_rows - mi_row);
706 const int offset = mi_row * cm->mi_stride + mi_col;
708 const TileInfo *const tile = &xd->tile;
710 xd->mi = cm->mi_grid_visible + offset;
711 xd->mi[0] = &cm->mi[offset];
712 xd->mi[0]->mbmi.sb_type = bsize;
713 for (y = 0; y < y_mis; ++y)
714 for (x = !y; x < x_mis; ++x) {
715 xd->mi[y * cm->mi_stride + x] = xd->mi[0];
718 set_skip_context(xd, mi_row, mi_col);
720 // Distance of Mb to the various image edges. These are specified to 8th pel
721 // as they are always compared to values that are in 1/8th pel units
722 set_mi_row_col(xd, tile, mi_row, bh, mi_col, bw, cm->mi_rows, cm->mi_cols);
724 vp9_setup_dst_planes(xd->plane, get_frame_new_buffer(cm), mi_row, mi_col);
725 return &xd->mi[0]->mbmi;
728 static void decode_block(VP9Decoder *const pbi, MACROBLOCKD *const xd,
729 int mi_row, int mi_col,
730 vp9_reader *r, BLOCK_SIZE bsize) {
731 VP9_COMMON *const cm = &pbi->common;
732 const int less8x8 = bsize < BLOCK_8X8;
733 MB_MODE_INFO *mbmi = set_offsets(cm, xd, bsize, mi_row, mi_col);
735 if (bsize >= BLOCK_8X8 && (cm->subsampling_x || cm->subsampling_y)) {
736 const BLOCK_SIZE uv_subsize =
737 ss_size_lookup[bsize][cm->subsampling_x][cm->subsampling_y];
738 if (uv_subsize == BLOCK_INVALID)
739 vpx_internal_error(xd->error_info,
740 VPX_CODEC_CORRUPT_FRAME, "Invalid block size.");
743 vp9_read_mode_info(pbi, xd, mi_row, mi_col, r);
749 reset_skip_context(xd, bsize);
752 if (!is_inter_block(mbmi)) {
753 struct intra_args arg = {xd, r, mbmi->segment_id};
754 vp9_foreach_transformed_block(xd, bsize,
755 predict_and_reconstruct_intra_block, &arg);
758 dec_build_inter_predictors_sb(pbi, xd, mi_row, mi_col, bsize);
763 struct inter_args arg = {xd, r, &eobtotal, mbmi->segment_id};
764 vp9_foreach_transformed_block(xd, bsize, reconstruct_inter_block, &arg);
765 if (!less8x8 && eobtotal == 0)
766 mbmi->skip = 1; // skip loopfilter
770 xd->corrupted |= vp9_reader_has_error(r);
773 static PARTITION_TYPE read_partition(MACROBLOCKD *xd, int mi_row, int mi_col,
774 BLOCK_SIZE bsize, vp9_reader *r,
775 int has_rows, int has_cols) {
776 const int ctx = partition_plane_context(xd, mi_row, mi_col, bsize);
777 const vp9_prob *const probs = get_partition_probs(xd, ctx);
778 FRAME_COUNTS *counts = xd->counts;
781 if (has_rows && has_cols)
782 p = (PARTITION_TYPE)vp9_read_tree(r, vp9_partition_tree, probs);
783 else if (!has_rows && has_cols)
784 p = vp9_read(r, probs[1]) ? PARTITION_SPLIT : PARTITION_HORZ;
785 else if (has_rows && !has_cols)
786 p = vp9_read(r, probs[2]) ? PARTITION_SPLIT : PARTITION_VERT;
791 ++counts->partition[ctx][p];
796 static void decode_partition(VP9Decoder *const pbi, MACROBLOCKD *const xd,
797 int mi_row, int mi_col,
798 vp9_reader* r, BLOCK_SIZE bsize) {
799 VP9_COMMON *const cm = &pbi->common;
800 const int hbs = num_8x8_blocks_wide_lookup[bsize] / 2;
801 PARTITION_TYPE partition;
803 const int has_rows = (mi_row + hbs) < cm->mi_rows;
804 const int has_cols = (mi_col + hbs) < cm->mi_cols;
806 if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols)
809 partition = read_partition(xd, mi_row, mi_col, bsize, r, has_rows, has_cols);
810 subsize = get_subsize(bsize, partition);
811 if (bsize == BLOCK_8X8) {
812 decode_block(pbi, xd, mi_row, mi_col, r, subsize);
816 decode_block(pbi, xd, mi_row, mi_col, r, subsize);
819 decode_block(pbi, xd, mi_row, mi_col, r, subsize);
821 decode_block(pbi, xd, mi_row + hbs, mi_col, r, subsize);
824 decode_block(pbi, xd, mi_row, mi_col, r, subsize);
826 decode_block(pbi, xd, mi_row, mi_col + hbs, r, subsize);
828 case PARTITION_SPLIT:
829 decode_partition(pbi, xd, mi_row, mi_col, r, subsize);
830 decode_partition(pbi, xd, mi_row, mi_col + hbs, r, subsize);
831 decode_partition(pbi, xd, mi_row + hbs, mi_col, r, subsize);
832 decode_partition(pbi, xd, mi_row + hbs, mi_col + hbs, r, subsize);
835 assert(0 && "Invalid partition type");
839 // update partition context
840 if (bsize >= BLOCK_8X8 &&
841 (bsize == BLOCK_8X8 || partition != PARTITION_SPLIT))
842 update_partition_context(xd, mi_row, mi_col, subsize, bsize);
845 static void setup_token_decoder(const uint8_t *data,
846 const uint8_t *data_end,
848 struct vpx_internal_error_info *error_info,
850 vpx_decrypt_cb decrypt_cb,
851 void *decrypt_state) {
852 // Validate the calculated partition length. If the buffer
853 // described by the partition can't be fully read, then restrict
854 // it to the portion that can be (for EC mode) or throw an error.
855 if (!read_is_valid(data, read_size, data_end))
856 vpx_internal_error(error_info, VPX_CODEC_CORRUPT_FRAME,
857 "Truncated packet or corrupt tile length");
859 if (vp9_reader_init(r, data, read_size, decrypt_cb, decrypt_state))
860 vpx_internal_error(error_info, VPX_CODEC_MEM_ERROR,
861 "Failed to allocate bool decoder %d", 1);
864 static void read_coef_probs_common(vp9_coeff_probs_model *coef_probs,
869 for (i = 0; i < PLANE_TYPES; ++i)
870 for (j = 0; j < REF_TYPES; ++j)
871 for (k = 0; k < COEF_BANDS; ++k)
872 for (l = 0; l < BAND_COEFF_CONTEXTS(k); ++l)
873 for (m = 0; m < UNCONSTRAINED_NODES; ++m)
874 vp9_diff_update_prob(r, &coef_probs[i][j][k][l][m]);
877 static void read_coef_probs(FRAME_CONTEXT *fc, TX_MODE tx_mode,
879 const TX_SIZE max_tx_size = tx_mode_to_biggest_tx_size[tx_mode];
881 for (tx_size = TX_4X4; tx_size <= max_tx_size; ++tx_size)
882 read_coef_probs_common(fc->coef_probs[tx_size], r);
885 static void setup_segmentation(struct segmentation *seg,
886 struct vp9_read_bit_buffer *rb) {
890 seg->update_data = 0;
892 seg->enabled = vp9_rb_read_bit(rb);
896 // Segmentation map update
897 seg->update_map = vp9_rb_read_bit(rb);
898 if (seg->update_map) {
899 for (i = 0; i < SEG_TREE_PROBS; i++)
900 seg->tree_probs[i] = vp9_rb_read_bit(rb) ? vp9_rb_read_literal(rb, 8)
903 seg->temporal_update = vp9_rb_read_bit(rb);
904 if (seg->temporal_update) {
905 for (i = 0; i < PREDICTION_PROBS; i++)
906 seg->pred_probs[i] = vp9_rb_read_bit(rb) ? vp9_rb_read_literal(rb, 8)
909 for (i = 0; i < PREDICTION_PROBS; i++)
910 seg->pred_probs[i] = MAX_PROB;
914 // Segmentation data update
915 seg->update_data = vp9_rb_read_bit(rb);
916 if (seg->update_data) {
917 seg->abs_delta = vp9_rb_read_bit(rb);
919 vp9_clearall_segfeatures(seg);
921 for (i = 0; i < MAX_SEGMENTS; i++) {
922 for (j = 0; j < SEG_LVL_MAX; j++) {
924 const int feature_enabled = vp9_rb_read_bit(rb);
925 if (feature_enabled) {
926 vp9_enable_segfeature(seg, i, j);
927 data = decode_unsigned_max(rb, vp9_seg_feature_data_max(j));
928 if (vp9_is_segfeature_signed(j))
929 data = vp9_rb_read_bit(rb) ? -data : data;
931 vp9_set_segdata(seg, i, j, data);
937 static void setup_loopfilter(struct loopfilter *lf,
938 struct vp9_read_bit_buffer *rb) {
939 lf->filter_level = vp9_rb_read_literal(rb, 6);
940 lf->sharpness_level = vp9_rb_read_literal(rb, 3);
942 // Read in loop filter deltas applied at the MB level based on mode or ref
944 lf->mode_ref_delta_update = 0;
946 lf->mode_ref_delta_enabled = vp9_rb_read_bit(rb);
947 if (lf->mode_ref_delta_enabled) {
948 lf->mode_ref_delta_update = vp9_rb_read_bit(rb);
949 if (lf->mode_ref_delta_update) {
952 for (i = 0; i < MAX_REF_LF_DELTAS; i++)
953 if (vp9_rb_read_bit(rb))
954 lf->ref_deltas[i] = vp9_rb_read_signed_literal(rb, 6);
956 for (i = 0; i < MAX_MODE_LF_DELTAS; i++)
957 if (vp9_rb_read_bit(rb))
958 lf->mode_deltas[i] = vp9_rb_read_signed_literal(rb, 6);
963 static INLINE int read_delta_q(struct vp9_read_bit_buffer *rb) {
964 return vp9_rb_read_bit(rb) ? vp9_rb_read_signed_literal(rb, 4) : 0;
967 static void setup_quantization(VP9_COMMON *const cm, MACROBLOCKD *const xd,
968 struct vp9_read_bit_buffer *rb) {
969 cm->base_qindex = vp9_rb_read_literal(rb, QINDEX_BITS);
970 cm->y_dc_delta_q = read_delta_q(rb);
971 cm->uv_dc_delta_q = read_delta_q(rb);
972 cm->uv_ac_delta_q = read_delta_q(rb);
973 cm->dequant_bit_depth = cm->bit_depth;
974 xd->lossless = cm->base_qindex == 0 &&
975 cm->y_dc_delta_q == 0 &&
976 cm->uv_dc_delta_q == 0 &&
977 cm->uv_ac_delta_q == 0;
979 #if CONFIG_VP9_HIGHBITDEPTH
980 xd->bd = (int)cm->bit_depth;
984 static void setup_segmentation_dequant(VP9_COMMON *const cm) {
985 // Build y/uv dequant values based on segmentation.
986 if (cm->seg.enabled) {
988 for (i = 0; i < MAX_SEGMENTS; ++i) {
989 const int qindex = vp9_get_qindex(&cm->seg, i, cm->base_qindex);
990 cm->y_dequant[i][0] = vp9_dc_quant(qindex, cm->y_dc_delta_q,
992 cm->y_dequant[i][1] = vp9_ac_quant(qindex, 0, cm->bit_depth);
993 cm->uv_dequant[i][0] = vp9_dc_quant(qindex, cm->uv_dc_delta_q,
995 cm->uv_dequant[i][1] = vp9_ac_quant(qindex, cm->uv_ac_delta_q,
999 const int qindex = cm->base_qindex;
1000 // When segmentation is disabled, only the first value is used. The
1001 // remaining are don't cares.
1002 cm->y_dequant[0][0] = vp9_dc_quant(qindex, cm->y_dc_delta_q, cm->bit_depth);
1003 cm->y_dequant[0][1] = vp9_ac_quant(qindex, 0, cm->bit_depth);
1004 cm->uv_dequant[0][0] = vp9_dc_quant(qindex, cm->uv_dc_delta_q,
1006 cm->uv_dequant[0][1] = vp9_ac_quant(qindex, cm->uv_ac_delta_q,
1011 static INTERP_FILTER read_interp_filter(struct vp9_read_bit_buffer *rb) {
1012 const INTERP_FILTER literal_to_filter[] = { EIGHTTAP_SMOOTH,
1016 return vp9_rb_read_bit(rb) ? SWITCHABLE
1017 : literal_to_filter[vp9_rb_read_literal(rb, 2)];
1020 static void setup_display_size(VP9_COMMON *cm, struct vp9_read_bit_buffer *rb) {
1021 cm->display_width = cm->width;
1022 cm->display_height = cm->height;
1023 if (vp9_rb_read_bit(rb))
1024 vp9_read_frame_size(rb, &cm->display_width, &cm->display_height);
1027 static void resize_mv_buffer(VP9_COMMON *cm) {
1028 vpx_free(cm->cur_frame->mvs);
1029 cm->cur_frame->mi_rows = cm->mi_rows;
1030 cm->cur_frame->mi_cols = cm->mi_cols;
1031 cm->cur_frame->mvs = (MV_REF *)vpx_calloc(cm->mi_rows * cm->mi_cols,
1032 sizeof(*cm->cur_frame->mvs));
1035 static void resize_context_buffers(VP9_COMMON *cm, int width, int height) {
1036 #if CONFIG_SIZE_LIMIT
1037 if (width > DECODE_WIDTH_LIMIT || height > DECODE_HEIGHT_LIMIT)
1038 vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME,
1039 "Dimensions of %dx%d beyond allowed size of %dx%d.",
1040 width, height, DECODE_WIDTH_LIMIT, DECODE_HEIGHT_LIMIT);
1042 if (cm->width != width || cm->height != height) {
1043 const int new_mi_rows =
1044 ALIGN_POWER_OF_TWO(height, MI_SIZE_LOG2) >> MI_SIZE_LOG2;
1045 const int new_mi_cols =
1046 ALIGN_POWER_OF_TWO(width, MI_SIZE_LOG2) >> MI_SIZE_LOG2;
1048 // Allocations in vp9_alloc_context_buffers() depend on individual
1049 // dimensions as well as the overall size.
1050 if (new_mi_cols > cm->mi_cols || new_mi_rows > cm->mi_rows) {
1051 if (vp9_alloc_context_buffers(cm, width, height))
1052 vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR,
1053 "Failed to allocate context buffers");
1055 vp9_set_mb_mi(cm, width, height);
1057 vp9_init_context_buffers(cm);
1059 cm->height = height;
1061 if (cm->cur_frame->mvs == NULL || cm->mi_rows > cm->cur_frame->mi_rows ||
1062 cm->mi_cols > cm->cur_frame->mi_cols) {
1063 resize_mv_buffer(cm);
1067 static void setup_frame_size(VP9_COMMON *cm, struct vp9_read_bit_buffer *rb) {
1069 BufferPool *const pool = cm->buffer_pool;
1070 vp9_read_frame_size(rb, &width, &height);
1071 resize_context_buffers(cm, width, height);
1072 setup_display_size(cm, rb);
1074 lock_buffer_pool(pool);
1075 if (vp9_realloc_frame_buffer(
1076 get_frame_new_buffer(cm), cm->width, cm->height,
1077 cm->subsampling_x, cm->subsampling_y,
1078 #if CONFIG_VP9_HIGHBITDEPTH
1079 cm->use_highbitdepth,
1081 VP9_DEC_BORDER_IN_PIXELS,
1083 &pool->frame_bufs[cm->new_fb_idx].raw_frame_buffer, pool->get_fb_cb,
1085 unlock_buffer_pool(pool);
1086 vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR,
1087 "Failed to allocate frame buffer");
1089 unlock_buffer_pool(pool);
1091 pool->frame_bufs[cm->new_fb_idx].buf.subsampling_x = cm->subsampling_x;
1092 pool->frame_bufs[cm->new_fb_idx].buf.subsampling_y = cm->subsampling_y;
1093 pool->frame_bufs[cm->new_fb_idx].buf.bit_depth = (unsigned int)cm->bit_depth;
1094 pool->frame_bufs[cm->new_fb_idx].buf.color_space = cm->color_space;
1097 static INLINE int valid_ref_frame_img_fmt(vpx_bit_depth_t ref_bit_depth,
1098 int ref_xss, int ref_yss,
1099 vpx_bit_depth_t this_bit_depth,
1100 int this_xss, int this_yss) {
1101 return ref_bit_depth == this_bit_depth && ref_xss == this_xss &&
1102 ref_yss == this_yss;
1105 static void setup_frame_size_with_refs(VP9_COMMON *cm,
1106 struct vp9_read_bit_buffer *rb) {
1109 int has_valid_ref_frame = 0;
1110 BufferPool *const pool = cm->buffer_pool;
1111 for (i = 0; i < REFS_PER_FRAME; ++i) {
1112 if (vp9_rb_read_bit(rb)) {
1113 YV12_BUFFER_CONFIG *const buf = cm->frame_refs[i].buf;
1114 width = buf->y_crop_width;
1115 height = buf->y_crop_height;
1122 vp9_read_frame_size(rb, &width, &height);
1124 if (width <= 0 || height <= 0)
1125 vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME,
1126 "Invalid frame size");
1128 // Check to make sure at least one of frames that this frame references
1129 // has valid dimensions.
1130 for (i = 0; i < REFS_PER_FRAME; ++i) {
1131 RefBuffer *const ref_frame = &cm->frame_refs[i];
1132 has_valid_ref_frame |= valid_ref_frame_size(ref_frame->buf->y_crop_width,
1133 ref_frame->buf->y_crop_height,
1136 if (!has_valid_ref_frame)
1137 vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME,
1138 "Referenced frame has invalid size");
1139 for (i = 0; i < REFS_PER_FRAME; ++i) {
1140 RefBuffer *const ref_frame = &cm->frame_refs[i];
1141 if (!valid_ref_frame_img_fmt(
1142 ref_frame->buf->bit_depth,
1143 ref_frame->buf->subsampling_x,
1144 ref_frame->buf->subsampling_y,
1148 vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME,
1149 "Referenced frame has incompatible color format");
1152 resize_context_buffers(cm, width, height);
1153 setup_display_size(cm, rb);
1155 lock_buffer_pool(pool);
1156 if (vp9_realloc_frame_buffer(
1157 get_frame_new_buffer(cm), cm->width, cm->height,
1158 cm->subsampling_x, cm->subsampling_y,
1159 #if CONFIG_VP9_HIGHBITDEPTH
1160 cm->use_highbitdepth,
1162 VP9_DEC_BORDER_IN_PIXELS,
1164 &pool->frame_bufs[cm->new_fb_idx].raw_frame_buffer, pool->get_fb_cb,
1166 unlock_buffer_pool(pool);
1167 vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR,
1168 "Failed to allocate frame buffer");
1170 unlock_buffer_pool(pool);
1172 pool->frame_bufs[cm->new_fb_idx].buf.subsampling_x = cm->subsampling_x;
1173 pool->frame_bufs[cm->new_fb_idx].buf.subsampling_y = cm->subsampling_y;
1174 pool->frame_bufs[cm->new_fb_idx].buf.bit_depth = (unsigned int)cm->bit_depth;
1175 pool->frame_bufs[cm->new_fb_idx].buf.color_space = cm->color_space;
1178 static void setup_tile_info(VP9_COMMON *cm, struct vp9_read_bit_buffer *rb) {
1179 int min_log2_tile_cols, max_log2_tile_cols, max_ones;
1180 vp9_get_tile_n_bits(cm->mi_cols, &min_log2_tile_cols, &max_log2_tile_cols);
1183 max_ones = max_log2_tile_cols - min_log2_tile_cols;
1184 cm->log2_tile_cols = min_log2_tile_cols;
1185 while (max_ones-- && vp9_rb_read_bit(rb))
1186 cm->log2_tile_cols++;
1188 if (cm->log2_tile_cols > 6)
1189 vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME,
1190 "Invalid number of tile columns");
1193 cm->log2_tile_rows = vp9_rb_read_bit(rb);
1194 if (cm->log2_tile_rows)
1195 cm->log2_tile_rows += vp9_rb_read_bit(rb);
1198 typedef struct TileBuffer {
1199 const uint8_t *data;
1201 int col; // only used with multi-threaded decoding
1204 // Reads the next tile returning its size and adjusting '*data' accordingly
1205 // based on 'is_last'.
1206 static void get_tile_buffer(const uint8_t *const data_end,
1208 struct vpx_internal_error_info *error_info,
1209 const uint8_t **data,
1210 vpx_decrypt_cb decrypt_cb, void *decrypt_state,
1215 if (!read_is_valid(*data, 4, data_end))
1216 vpx_internal_error(error_info, VPX_CODEC_CORRUPT_FRAME,
1217 "Truncated packet or corrupt tile length");
1221 decrypt_cb(decrypt_state, *data, be_data, 4);
1222 size = mem_get_be32(be_data);
1224 size = mem_get_be32(*data);
1228 if (size > (size_t)(data_end - *data))
1229 vpx_internal_error(error_info, VPX_CODEC_CORRUPT_FRAME,
1230 "Truncated packet or corrupt tile size");
1232 size = data_end - *data;
1241 static void get_tile_buffers(VP9Decoder *pbi,
1242 const uint8_t *data, const uint8_t *data_end,
1243 int tile_cols, int tile_rows,
1244 TileBuffer (*tile_buffers)[1 << 6]) {
1247 for (r = 0; r < tile_rows; ++r) {
1248 for (c = 0; c < tile_cols; ++c) {
1249 const int is_last = (r == tile_rows - 1) && (c == tile_cols - 1);
1250 TileBuffer *const buf = &tile_buffers[r][c];
1252 get_tile_buffer(data_end, is_last, &pbi->common.error, &data,
1253 pbi->decrypt_cb, pbi->decrypt_state, buf);
1258 static const uint8_t *decode_tiles(VP9Decoder *pbi,
1259 const uint8_t *data,
1260 const uint8_t *data_end) {
1261 VP9_COMMON *const cm = &pbi->common;
1262 const VPxWorkerInterface *const winterface = vpx_get_worker_interface();
1263 const int aligned_cols = mi_cols_aligned_to_sb(cm->mi_cols);
1264 const int tile_cols = 1 << cm->log2_tile_cols;
1265 const int tile_rows = 1 << cm->log2_tile_rows;
1266 TileBuffer tile_buffers[4][1 << 6];
1267 int tile_row, tile_col;
1269 TileData *tile_data = NULL;
1271 if (cm->lf.filter_level && !cm->skip_loop_filter &&
1272 pbi->lf_worker.data1 == NULL) {
1273 CHECK_MEM_ERROR(cm, pbi->lf_worker.data1,
1274 vpx_memalign(32, sizeof(LFWorkerData)));
1275 pbi->lf_worker.hook = (VPxWorkerHook)vp9_loop_filter_worker;
1276 if (pbi->max_threads > 1 && !winterface->reset(&pbi->lf_worker)) {
1277 vpx_internal_error(&cm->error, VPX_CODEC_ERROR,
1278 "Loop filter thread creation failed");
1282 if (cm->lf.filter_level && !cm->skip_loop_filter) {
1283 LFWorkerData *const lf_data = (LFWorkerData*)pbi->lf_worker.data1;
1284 // Be sure to sync as we might be resuming after a failed frame decode.
1285 winterface->sync(&pbi->lf_worker);
1286 vp9_loop_filter_data_reset(lf_data, get_frame_new_buffer(cm), cm,
1290 assert(tile_rows <= 4);
1291 assert(tile_cols <= (1 << 6));
1293 // Note: this memset assumes above_context[0], [1] and [2]
1294 // are allocated as part of the same buffer.
1295 memset(cm->above_context, 0,
1296 sizeof(*cm->above_context) * MAX_MB_PLANE * 2 * aligned_cols);
1298 memset(cm->above_seg_context, 0,
1299 sizeof(*cm->above_seg_context) * aligned_cols);
1301 get_tile_buffers(pbi, data, data_end, tile_cols, tile_rows, tile_buffers);
1303 if (pbi->tile_data == NULL ||
1304 (tile_cols * tile_rows) != pbi->total_tiles) {
1305 vpx_free(pbi->tile_data);
1309 vpx_memalign(32, tile_cols * tile_rows * (sizeof(*pbi->tile_data))));
1310 pbi->total_tiles = tile_rows * tile_cols;
1313 // Load all tile information into tile_data.
1314 for (tile_row = 0; tile_row < tile_rows; ++tile_row) {
1315 for (tile_col = 0; tile_col < tile_cols; ++tile_col) {
1316 const TileBuffer *const buf = &tile_buffers[tile_row][tile_col];
1317 tile_data = pbi->tile_data + tile_cols * tile_row + tile_col;
1319 tile_data->xd = pbi->mb;
1320 tile_data->xd.corrupted = 0;
1321 tile_data->xd.counts = cm->frame_parallel_decoding_mode ?
1323 vp9_tile_init(&tile_data->xd.tile, tile_data->cm, tile_row, tile_col);
1324 setup_token_decoder(buf->data, data_end, buf->size, &cm->error,
1325 &tile_data->bit_reader, pbi->decrypt_cb,
1326 pbi->decrypt_state);
1327 vp9_init_macroblockd(cm, &tile_data->xd);
1331 for (tile_row = 0; tile_row < tile_rows; ++tile_row) {
1333 vp9_tile_set_row(&tile, cm, tile_row);
1334 for (mi_row = tile.mi_row_start; mi_row < tile.mi_row_end;
1335 mi_row += MI_BLOCK_SIZE) {
1336 for (tile_col = 0; tile_col < tile_cols; ++tile_col) {
1337 const int col = pbi->inv_tile_order ?
1338 tile_cols - tile_col - 1 : tile_col;
1339 tile_data = pbi->tile_data + tile_cols * tile_row + col;
1340 vp9_tile_set_col(&tile, tile_data->cm, col);
1341 vp9_zero(tile_data->xd.left_context);
1342 vp9_zero(tile_data->xd.left_seg_context);
1343 for (mi_col = tile.mi_col_start; mi_col < tile.mi_col_end;
1344 mi_col += MI_BLOCK_SIZE) {
1345 decode_partition(pbi, &tile_data->xd, mi_row, mi_col,
1346 &tile_data->bit_reader, BLOCK_64X64);
1348 pbi->mb.corrupted |= tile_data->xd.corrupted;
1349 if (pbi->mb.corrupted)
1350 vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME,
1351 "Failed to decode tile data");
1353 // Loopfilter one row.
1354 if (cm->lf.filter_level && !cm->skip_loop_filter) {
1355 const int lf_start = mi_row - MI_BLOCK_SIZE;
1356 LFWorkerData *const lf_data = (LFWorkerData*)pbi->lf_worker.data1;
1358 // delay the loopfilter by 1 macroblock row.
1359 if (lf_start < 0) continue;
1361 // decoding has completed: finish up the loop filter in this thread.
1362 if (mi_row + MI_BLOCK_SIZE >= cm->mi_rows) continue;
1364 winterface->sync(&pbi->lf_worker);
1365 lf_data->start = lf_start;
1366 lf_data->stop = mi_row;
1367 if (pbi->max_threads > 1) {
1368 winterface->launch(&pbi->lf_worker);
1370 winterface->execute(&pbi->lf_worker);
1373 // After loopfiltering, the last 7 row pixels in each superblock row may
1374 // still be changed by the longest loopfilter of the next superblock
1376 if (pbi->frame_parallel_decode)
1377 vp9_frameworker_broadcast(pbi->cur_buf,
1378 mi_row << MI_BLOCK_SIZE_LOG2);
1382 // Loopfilter remaining rows in the frame.
1383 if (cm->lf.filter_level && !cm->skip_loop_filter) {
1384 LFWorkerData *const lf_data = (LFWorkerData*)pbi->lf_worker.data1;
1385 winterface->sync(&pbi->lf_worker);
1386 lf_data->start = lf_data->stop;
1387 lf_data->stop = cm->mi_rows;
1388 winterface->execute(&pbi->lf_worker);
1391 // Get last tile data.
1392 tile_data = pbi->tile_data + tile_cols * tile_rows - 1;
1394 if (pbi->frame_parallel_decode)
1395 vp9_frameworker_broadcast(pbi->cur_buf, INT_MAX);
1396 return vp9_reader_find_end(&tile_data->bit_reader);
1399 static int tile_worker_hook(TileWorkerData *const tile_data,
1400 const TileInfo *const tile) {
1403 if (setjmp(tile_data->error_info.jmp)) {
1404 tile_data->error_info.setjmp = 0;
1405 tile_data->xd.corrupted = 1;
1409 tile_data->error_info.setjmp = 1;
1410 tile_data->xd.error_info = &tile_data->error_info;
1412 for (mi_row = tile->mi_row_start; mi_row < tile->mi_row_end;
1413 mi_row += MI_BLOCK_SIZE) {
1414 vp9_zero(tile_data->xd.left_context);
1415 vp9_zero(tile_data->xd.left_seg_context);
1416 for (mi_col = tile->mi_col_start; mi_col < tile->mi_col_end;
1417 mi_col += MI_BLOCK_SIZE) {
1418 decode_partition(tile_data->pbi, &tile_data->xd,
1419 mi_row, mi_col, &tile_data->bit_reader,
1423 return !tile_data->xd.corrupted;
1426 // sorts in descending order
1427 static int compare_tile_buffers(const void *a, const void *b) {
1428 const TileBuffer *const buf1 = (const TileBuffer*)a;
1429 const TileBuffer *const buf2 = (const TileBuffer*)b;
1430 return (int)(buf2->size - buf1->size);
1433 static const uint8_t *decode_tiles_mt(VP9Decoder *pbi,
1434 const uint8_t *data,
1435 const uint8_t *data_end) {
1436 VP9_COMMON *const cm = &pbi->common;
1437 const VPxWorkerInterface *const winterface = vpx_get_worker_interface();
1438 const uint8_t *bit_reader_end = NULL;
1439 const int aligned_mi_cols = mi_cols_aligned_to_sb(cm->mi_cols);
1440 const int tile_cols = 1 << cm->log2_tile_cols;
1441 const int tile_rows = 1 << cm->log2_tile_rows;
1442 const int num_workers = MIN(pbi->max_threads & ~1, tile_cols);
1443 TileBuffer tile_buffers[1][1 << 6];
1445 int final_worker = -1;
1447 assert(tile_cols <= (1 << 6));
1448 assert(tile_rows == 1);
1451 // TODO(jzern): See if we can remove the restriction of passing in max
1452 // threads to the decoder.
1453 if (pbi->num_tile_workers == 0) {
1454 const int num_threads = pbi->max_threads & ~1;
1456 CHECK_MEM_ERROR(cm, pbi->tile_workers,
1457 vpx_malloc(num_threads * sizeof(*pbi->tile_workers)));
1458 // Ensure tile data offsets will be properly aligned. This may fail on
1459 // platforms without DECLARE_ALIGNED().
1460 assert((sizeof(*pbi->tile_worker_data) % 16) == 0);
1461 CHECK_MEM_ERROR(cm, pbi->tile_worker_data,
1462 vpx_memalign(32, num_threads *
1463 sizeof(*pbi->tile_worker_data)));
1464 CHECK_MEM_ERROR(cm, pbi->tile_worker_info,
1465 vpx_malloc(num_threads * sizeof(*pbi->tile_worker_info)));
1466 for (i = 0; i < num_threads; ++i) {
1467 VPxWorker *const worker = &pbi->tile_workers[i];
1468 ++pbi->num_tile_workers;
1470 winterface->init(worker);
1471 if (i < num_threads - 1 && !winterface->reset(worker)) {
1472 vpx_internal_error(&cm->error, VPX_CODEC_ERROR,
1473 "Tile decoder thread creation failed");
1478 // Reset tile decoding hook
1479 for (n = 0; n < num_workers; ++n) {
1480 VPxWorker *const worker = &pbi->tile_workers[n];
1481 winterface->sync(worker);
1482 worker->hook = (VPxWorkerHook)tile_worker_hook;
1483 worker->data1 = &pbi->tile_worker_data[n];
1484 worker->data2 = &pbi->tile_worker_info[n];
1487 // Note: this memset assumes above_context[0], [1] and [2]
1488 // are allocated as part of the same buffer.
1489 memset(cm->above_context, 0,
1490 sizeof(*cm->above_context) * MAX_MB_PLANE * 2 * aligned_mi_cols);
1491 memset(cm->above_seg_context, 0,
1492 sizeof(*cm->above_seg_context) * aligned_mi_cols);
1494 // Load tile data into tile_buffers
1495 get_tile_buffers(pbi, data, data_end, tile_cols, tile_rows, tile_buffers);
1497 // Sort the buffers based on size in descending order.
1498 qsort(tile_buffers[0], tile_cols, sizeof(tile_buffers[0][0]),
1499 compare_tile_buffers);
1501 // Rearrange the tile buffers such that per-tile group the largest, and
1502 // presumably the most difficult, tile will be decoded in the main thread.
1503 // This should help minimize the number of instances where the main thread is
1504 // waiting for a worker to complete.
1506 int group_start = 0;
1507 while (group_start < tile_cols) {
1508 const TileBuffer largest = tile_buffers[0][group_start];
1509 const int group_end = MIN(group_start + num_workers, tile_cols) - 1;
1510 memmove(tile_buffers[0] + group_start, tile_buffers[0] + group_start + 1,
1511 (group_end - group_start) * sizeof(tile_buffers[0][0]));
1512 tile_buffers[0][group_end] = largest;
1513 group_start = group_end + 1;
1517 // Initialize thread frame counts.
1518 if (!cm->frame_parallel_decoding_mode) {
1521 for (i = 0; i < num_workers; ++i) {
1522 TileWorkerData *const tile_data =
1523 (TileWorkerData*)pbi->tile_workers[i].data1;
1524 vp9_zero(tile_data->counts);
1529 while (n < tile_cols) {
1531 for (i = 0; i < num_workers && n < tile_cols; ++i) {
1532 VPxWorker *const worker = &pbi->tile_workers[i];
1533 TileWorkerData *const tile_data = (TileWorkerData*)worker->data1;
1534 TileInfo *const tile = (TileInfo*)worker->data2;
1535 TileBuffer *const buf = &tile_buffers[0][n];
1537 tile_data->pbi = pbi;
1538 tile_data->xd = pbi->mb;
1539 tile_data->xd.corrupted = 0;
1540 tile_data->xd.counts = cm->frame_parallel_decoding_mode ?
1541 0 : &tile_data->counts;
1542 vp9_tile_init(tile, cm, 0, buf->col);
1543 vp9_tile_init(&tile_data->xd.tile, cm, 0, buf->col);
1544 setup_token_decoder(buf->data, data_end, buf->size, &cm->error,
1545 &tile_data->bit_reader, pbi->decrypt_cb,
1546 pbi->decrypt_state);
1547 vp9_init_macroblockd(cm, &tile_data->xd);
1549 worker->had_error = 0;
1550 if (i == num_workers - 1 || n == tile_cols - 1) {
1551 winterface->execute(worker);
1553 winterface->launch(worker);
1556 if (buf->col == tile_cols - 1) {
1563 for (; i > 0; --i) {
1564 VPxWorker *const worker = &pbi->tile_workers[i - 1];
1565 // TODO(jzern): The tile may have specific error data associated with
1566 // its vpx_internal_error_info which could be propagated to the main info
1567 // in cm. Additionally once the threads have been synced and an error is
1568 // detected, there's no point in continuing to decode tiles.
1569 pbi->mb.corrupted |= !winterface->sync(worker);
1571 if (final_worker > -1) {
1572 TileWorkerData *const tile_data =
1573 (TileWorkerData*)pbi->tile_workers[final_worker].data1;
1574 bit_reader_end = vp9_reader_find_end(&tile_data->bit_reader);
1578 // Accumulate thread frame counts.
1579 if (n >= tile_cols && !cm->frame_parallel_decoding_mode) {
1580 for (i = 0; i < num_workers; ++i) {
1581 TileWorkerData *const tile_data =
1582 (TileWorkerData*)pbi->tile_workers[i].data1;
1583 vp9_accumulate_frame_counts(cm, &tile_data->counts, 1);
1588 return bit_reader_end;
1591 static void error_handler(void *data) {
1592 VP9_COMMON *const cm = (VP9_COMMON *)data;
1593 vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME, "Truncated packet");
1596 static void read_bitdepth_colorspace_sampling(
1597 VP9_COMMON *cm, struct vp9_read_bit_buffer *rb) {
1598 if (cm->profile >= PROFILE_2) {
1599 cm->bit_depth = vp9_rb_read_bit(rb) ? VPX_BITS_12 : VPX_BITS_10;
1600 #if CONFIG_VP9_HIGHBITDEPTH
1601 cm->use_highbitdepth = 1;
1604 cm->bit_depth = VPX_BITS_8;
1605 #if CONFIG_VP9_HIGHBITDEPTH
1606 cm->use_highbitdepth = 0;
1609 cm->color_space = vp9_rb_read_literal(rb, 3);
1610 if (cm->color_space != VPX_CS_SRGB) {
1611 vp9_rb_read_bit(rb); // [16,235] (including xvycc) vs [0,255] range
1612 if (cm->profile == PROFILE_1 || cm->profile == PROFILE_3) {
1613 cm->subsampling_x = vp9_rb_read_bit(rb);
1614 cm->subsampling_y = vp9_rb_read_bit(rb);
1615 if (cm->subsampling_x == 1 && cm->subsampling_y == 1)
1616 vpx_internal_error(&cm->error, VPX_CODEC_UNSUP_BITSTREAM,
1617 "4:2:0 color not supported in profile 1 or 3");
1618 if (vp9_rb_read_bit(rb))
1619 vpx_internal_error(&cm->error, VPX_CODEC_UNSUP_BITSTREAM,
1620 "Reserved bit set");
1622 cm->subsampling_y = cm->subsampling_x = 1;
1625 if (cm->profile == PROFILE_1 || cm->profile == PROFILE_3) {
1626 // Note if colorspace is SRGB then 4:4:4 chroma sampling is assumed.
1627 // 4:2:2 or 4:4:0 chroma sampling is not allowed.
1628 cm->subsampling_y = cm->subsampling_x = 0;
1629 if (vp9_rb_read_bit(rb))
1630 vpx_internal_error(&cm->error, VPX_CODEC_UNSUP_BITSTREAM,
1631 "Reserved bit set");
1633 vpx_internal_error(&cm->error, VPX_CODEC_UNSUP_BITSTREAM,
1634 "4:4:4 color not supported in profile 0 or 2");
1639 static size_t read_uncompressed_header(VP9Decoder *pbi,
1640 struct vp9_read_bit_buffer *rb) {
1641 VP9_COMMON *const cm = &pbi->common;
1642 BufferPool *const pool = cm->buffer_pool;
1643 RefCntBuffer *const frame_bufs = pool->frame_bufs;
1644 int i, mask, ref_index = 0;
1647 cm->last_frame_type = cm->frame_type;
1648 cm->last_intra_only = cm->intra_only;
1650 if (vp9_rb_read_literal(rb, 2) != VP9_FRAME_MARKER)
1651 vpx_internal_error(&cm->error, VPX_CODEC_UNSUP_BITSTREAM,
1652 "Invalid frame marker");
1654 cm->profile = vp9_read_profile(rb);
1656 if (cm->profile >= MAX_PROFILES)
1657 vpx_internal_error(&cm->error, VPX_CODEC_UNSUP_BITSTREAM,
1658 "Unsupported bitstream profile");
1660 cm->show_existing_frame = vp9_rb_read_bit(rb);
1661 if (cm->show_existing_frame) {
1662 // Show an existing frame directly.
1663 const int frame_to_show = cm->ref_frame_map[vp9_rb_read_literal(rb, 3)];
1664 lock_buffer_pool(pool);
1665 if (frame_to_show < 0 || frame_bufs[frame_to_show].ref_count < 1) {
1666 unlock_buffer_pool(pool);
1667 vpx_internal_error(&cm->error, VPX_CODEC_UNSUP_BITSTREAM,
1668 "Buffer %d does not contain a decoded frame",
1672 ref_cnt_fb(frame_bufs, &cm->new_fb_idx, frame_to_show);
1673 unlock_buffer_pool(pool);
1674 pbi->refresh_frame_flags = 0;
1675 cm->lf.filter_level = 0;
1678 if (pbi->frame_parallel_decode) {
1679 for (i = 0; i < REF_FRAMES; ++i)
1680 cm->next_ref_frame_map[i] = cm->ref_frame_map[i];
1685 cm->frame_type = (FRAME_TYPE) vp9_rb_read_bit(rb);
1686 cm->show_frame = vp9_rb_read_bit(rb);
1687 cm->error_resilient_mode = vp9_rb_read_bit(rb);
1689 if (cm->frame_type == KEY_FRAME) {
1690 if (!vp9_read_sync_code(rb))
1691 vpx_internal_error(&cm->error, VPX_CODEC_UNSUP_BITSTREAM,
1692 "Invalid frame sync code");
1694 read_bitdepth_colorspace_sampling(cm, rb);
1695 pbi->refresh_frame_flags = (1 << REF_FRAMES) - 1;
1697 for (i = 0; i < REFS_PER_FRAME; ++i) {
1698 cm->frame_refs[i].idx = INVALID_IDX;
1699 cm->frame_refs[i].buf = NULL;
1702 setup_frame_size(cm, rb);
1703 if (pbi->need_resync) {
1704 memset(&cm->ref_frame_map, -1, sizeof(cm->ref_frame_map));
1705 pbi->need_resync = 0;
1708 cm->intra_only = cm->show_frame ? 0 : vp9_rb_read_bit(rb);
1710 cm->reset_frame_context = cm->error_resilient_mode ?
1711 0 : vp9_rb_read_literal(rb, 2);
1713 if (cm->intra_only) {
1714 if (!vp9_read_sync_code(rb))
1715 vpx_internal_error(&cm->error, VPX_CODEC_UNSUP_BITSTREAM,
1716 "Invalid frame sync code");
1717 if (cm->profile > PROFILE_0) {
1718 read_bitdepth_colorspace_sampling(cm, rb);
1720 // NOTE: The intra-only frame header does not include the specification
1721 // of either the color format or color sub-sampling in profile 0. VP9
1722 // specifies that the default color format should be YUV 4:2:0 in this
1723 // case (normative).
1724 cm->color_space = VPX_CS_BT_601;
1725 cm->subsampling_y = cm->subsampling_x = 1;
1726 cm->bit_depth = VPX_BITS_8;
1727 #if CONFIG_VP9_HIGHBITDEPTH
1728 cm->use_highbitdepth = 0;
1732 pbi->refresh_frame_flags = vp9_rb_read_literal(rb, REF_FRAMES);
1733 setup_frame_size(cm, rb);
1734 if (pbi->need_resync) {
1735 memset(&cm->ref_frame_map, -1, sizeof(cm->ref_frame_map));
1736 pbi->need_resync = 0;
1738 } else if (pbi->need_resync != 1) { /* Skip if need resync */
1739 pbi->refresh_frame_flags = vp9_rb_read_literal(rb, REF_FRAMES);
1740 for (i = 0; i < REFS_PER_FRAME; ++i) {
1741 const int ref = vp9_rb_read_literal(rb, REF_FRAMES_LOG2);
1742 const int idx = cm->ref_frame_map[ref];
1743 RefBuffer *const ref_frame = &cm->frame_refs[i];
1744 ref_frame->idx = idx;
1745 ref_frame->buf = &frame_bufs[idx].buf;
1746 cm->ref_frame_sign_bias[LAST_FRAME + i] = vp9_rb_read_bit(rb);
1749 setup_frame_size_with_refs(cm, rb);
1751 cm->allow_high_precision_mv = vp9_rb_read_bit(rb);
1752 cm->interp_filter = read_interp_filter(rb);
1754 for (i = 0; i < REFS_PER_FRAME; ++i) {
1755 RefBuffer *const ref_buf = &cm->frame_refs[i];
1756 #if CONFIG_VP9_HIGHBITDEPTH
1757 vp9_setup_scale_factors_for_frame(&ref_buf->sf,
1758 ref_buf->buf->y_crop_width,
1759 ref_buf->buf->y_crop_height,
1760 cm->width, cm->height,
1761 cm->use_highbitdepth);
1763 vp9_setup_scale_factors_for_frame(&ref_buf->sf,
1764 ref_buf->buf->y_crop_width,
1765 ref_buf->buf->y_crop_height,
1766 cm->width, cm->height);
1771 #if CONFIG_VP9_HIGHBITDEPTH
1772 get_frame_new_buffer(cm)->bit_depth = cm->bit_depth;
1774 get_frame_new_buffer(cm)->color_space = cm->color_space;
1776 if (pbi->need_resync) {
1777 vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME,
1778 "Keyframe / intra-only frame required to reset decoder"
1782 if (!cm->error_resilient_mode) {
1783 cm->refresh_frame_context = vp9_rb_read_bit(rb);
1784 cm->frame_parallel_decoding_mode = vp9_rb_read_bit(rb);
1786 cm->refresh_frame_context = 0;
1787 cm->frame_parallel_decoding_mode = 1;
1790 // This flag will be overridden by the call to vp9_setup_past_independence
1791 // below, forcing the use of context 0 for those frame types.
1792 cm->frame_context_idx = vp9_rb_read_literal(rb, FRAME_CONTEXTS_LOG2);
1794 // Generate next_ref_frame_map.
1795 lock_buffer_pool(pool);
1796 for (mask = pbi->refresh_frame_flags; mask; mask >>= 1) {
1798 cm->next_ref_frame_map[ref_index] = cm->new_fb_idx;
1799 ++frame_bufs[cm->new_fb_idx].ref_count;
1801 cm->next_ref_frame_map[ref_index] = cm->ref_frame_map[ref_index];
1803 // Current thread holds the reference frame.
1804 if (cm->ref_frame_map[ref_index] >= 0)
1805 ++frame_bufs[cm->ref_frame_map[ref_index]].ref_count;
1809 for (; ref_index < REF_FRAMES; ++ref_index) {
1810 cm->next_ref_frame_map[ref_index] = cm->ref_frame_map[ref_index];
1811 // Current thread holds the reference frame.
1812 if (cm->ref_frame_map[ref_index] >= 0)
1813 ++frame_bufs[cm->ref_frame_map[ref_index]].ref_count;
1815 unlock_buffer_pool(pool);
1816 pbi->hold_ref_buf = 1;
1818 if (frame_is_intra_only(cm) || cm->error_resilient_mode)
1819 vp9_setup_past_independence(cm);
1821 setup_loopfilter(&cm->lf, rb);
1822 setup_quantization(cm, &pbi->mb, rb);
1823 setup_segmentation(&cm->seg, rb);
1824 setup_segmentation_dequant(cm);
1826 setup_tile_info(cm, rb);
1827 sz = vp9_rb_read_literal(rb, 16);
1830 vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME,
1831 "Invalid header size");
1836 static int read_compressed_header(VP9Decoder *pbi, const uint8_t *data,
1837 size_t partition_size) {
1838 VP9_COMMON *const cm = &pbi->common;
1839 MACROBLOCKD *const xd = &pbi->mb;
1840 FRAME_CONTEXT *const fc = cm->fc;
1844 if (vp9_reader_init(&r, data, partition_size, pbi->decrypt_cb,
1845 pbi->decrypt_state))
1846 vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR,
1847 "Failed to allocate bool decoder 0");
1849 cm->tx_mode = xd->lossless ? ONLY_4X4 : read_tx_mode(&r);
1850 if (cm->tx_mode == TX_MODE_SELECT)
1851 read_tx_mode_probs(&fc->tx_probs, &r);
1852 read_coef_probs(fc, cm->tx_mode, &r);
1854 for (k = 0; k < SKIP_CONTEXTS; ++k)
1855 vp9_diff_update_prob(&r, &fc->skip_probs[k]);
1857 if (!frame_is_intra_only(cm)) {
1858 nmv_context *const nmvc = &fc->nmvc;
1861 read_inter_mode_probs(fc, &r);
1863 if (cm->interp_filter == SWITCHABLE)
1864 read_switchable_interp_probs(fc, &r);
1866 for (i = 0; i < INTRA_INTER_CONTEXTS; i++)
1867 vp9_diff_update_prob(&r, &fc->intra_inter_prob[i]);
1869 cm->reference_mode = read_frame_reference_mode(cm, &r);
1870 if (cm->reference_mode != SINGLE_REFERENCE)
1871 setup_compound_reference_mode(cm);
1872 read_frame_reference_mode_probs(cm, &r);
1874 for (j = 0; j < BLOCK_SIZE_GROUPS; j++)
1875 for (i = 0; i < INTRA_MODES - 1; ++i)
1876 vp9_diff_update_prob(&r, &fc->y_mode_prob[j][i]);
1878 for (j = 0; j < PARTITION_CONTEXTS; ++j)
1879 for (i = 0; i < PARTITION_TYPES - 1; ++i)
1880 vp9_diff_update_prob(&r, &fc->partition_prob[j][i]);
1882 read_mv_probs(nmvc, cm->allow_high_precision_mv, &r);
1885 return vp9_reader_has_error(&r);
1889 #define debug_check_frame_counts(cm) (void)0
1891 // Counts should only be incremented when frame_parallel_decoding_mode and
1892 // error_resilient_mode are disabled.
1893 static void debug_check_frame_counts(const VP9_COMMON *const cm) {
1894 FRAME_COUNTS zero_counts;
1895 vp9_zero(zero_counts);
1896 assert(cm->frame_parallel_decoding_mode || cm->error_resilient_mode);
1897 assert(!memcmp(cm->counts.y_mode, zero_counts.y_mode,
1898 sizeof(cm->counts.y_mode)));
1899 assert(!memcmp(cm->counts.uv_mode, zero_counts.uv_mode,
1900 sizeof(cm->counts.uv_mode)));
1901 assert(!memcmp(cm->counts.partition, zero_counts.partition,
1902 sizeof(cm->counts.partition)));
1903 assert(!memcmp(cm->counts.coef, zero_counts.coef,
1904 sizeof(cm->counts.coef)));
1905 assert(!memcmp(cm->counts.eob_branch, zero_counts.eob_branch,
1906 sizeof(cm->counts.eob_branch)));
1907 assert(!memcmp(cm->counts.switchable_interp, zero_counts.switchable_interp,
1908 sizeof(cm->counts.switchable_interp)));
1909 assert(!memcmp(cm->counts.inter_mode, zero_counts.inter_mode,
1910 sizeof(cm->counts.inter_mode)));
1911 assert(!memcmp(cm->counts.intra_inter, zero_counts.intra_inter,
1912 sizeof(cm->counts.intra_inter)));
1913 assert(!memcmp(cm->counts.comp_inter, zero_counts.comp_inter,
1914 sizeof(cm->counts.comp_inter)));
1915 assert(!memcmp(cm->counts.single_ref, zero_counts.single_ref,
1916 sizeof(cm->counts.single_ref)));
1917 assert(!memcmp(cm->counts.comp_ref, zero_counts.comp_ref,
1918 sizeof(cm->counts.comp_ref)));
1919 assert(!memcmp(&cm->counts.tx, &zero_counts.tx, sizeof(cm->counts.tx)));
1920 assert(!memcmp(cm->counts.skip, zero_counts.skip, sizeof(cm->counts.skip)));
1921 assert(!memcmp(&cm->counts.mv, &zero_counts.mv, sizeof(cm->counts.mv)));
1925 static struct vp9_read_bit_buffer *init_read_bit_buffer(
1927 struct vp9_read_bit_buffer *rb,
1928 const uint8_t *data,
1929 const uint8_t *data_end,
1930 uint8_t clear_data[MAX_VP9_HEADER_SIZE]) {
1932 rb->error_handler = error_handler;
1933 rb->error_handler_data = &pbi->common;
1934 if (pbi->decrypt_cb) {
1935 const int n = (int)MIN(MAX_VP9_HEADER_SIZE, data_end - data);
1936 pbi->decrypt_cb(pbi->decrypt_state, data, clear_data, n);
1937 rb->bit_buffer = clear_data;
1938 rb->bit_buffer_end = clear_data + n;
1940 rb->bit_buffer = data;
1941 rb->bit_buffer_end = data_end;
1946 //------------------------------------------------------------------------------
1948 int vp9_read_sync_code(struct vp9_read_bit_buffer *const rb) {
1949 return vp9_rb_read_literal(rb, 8) == VP9_SYNC_CODE_0 &&
1950 vp9_rb_read_literal(rb, 8) == VP9_SYNC_CODE_1 &&
1951 vp9_rb_read_literal(rb, 8) == VP9_SYNC_CODE_2;
1954 void vp9_read_frame_size(struct vp9_read_bit_buffer *rb,
1955 int *width, int *height) {
1956 *width = vp9_rb_read_literal(rb, 16) + 1;
1957 *height = vp9_rb_read_literal(rb, 16) + 1;
1960 BITSTREAM_PROFILE vp9_read_profile(struct vp9_read_bit_buffer *rb) {
1961 int profile = vp9_rb_read_bit(rb);
1962 profile |= vp9_rb_read_bit(rb) << 1;
1964 profile += vp9_rb_read_bit(rb);
1965 return (BITSTREAM_PROFILE) profile;
1968 void vp9_decode_frame(VP9Decoder *pbi,
1969 const uint8_t *data, const uint8_t *data_end,
1970 const uint8_t **p_data_end) {
1971 VP9_COMMON *const cm = &pbi->common;
1972 MACROBLOCKD *const xd = &pbi->mb;
1973 struct vp9_read_bit_buffer rb;
1974 int context_updated = 0;
1975 uint8_t clear_data[MAX_VP9_HEADER_SIZE];
1976 const size_t first_partition_size = read_uncompressed_header(pbi,
1977 init_read_bit_buffer(pbi, &rb, data, data_end, clear_data));
1978 const int tile_rows = 1 << cm->log2_tile_rows;
1979 const int tile_cols = 1 << cm->log2_tile_cols;
1980 YV12_BUFFER_CONFIG *const new_fb = get_frame_new_buffer(cm);
1981 xd->cur_buf = new_fb;
1983 if (!first_partition_size) {
1984 // showing a frame directly
1985 *p_data_end = data + (cm->profile <= PROFILE_2 ? 1 : 2);
1989 data += vp9_rb_bytes_read(&rb);
1990 if (!read_is_valid(data, first_partition_size, data_end))
1991 vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME,
1992 "Truncated packet or corrupt header length");
1994 cm->use_prev_frame_mvs = !cm->error_resilient_mode &&
1995 cm->width == cm->last_width &&
1996 cm->height == cm->last_height &&
1997 !cm->last_intra_only &&
1998 cm->last_show_frame &&
1999 (cm->last_frame_type != KEY_FRAME);
2001 vp9_setup_block_planes(xd, cm->subsampling_x, cm->subsampling_y);
2003 *cm->fc = cm->frame_contexts[cm->frame_context_idx];
2004 if (!cm->fc->initialized)
2005 vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME,
2006 "Uninitialized entropy context.");
2008 vp9_zero(cm->counts);
2011 new_fb->corrupted = read_compressed_header(pbi, data, first_partition_size);
2012 if (new_fb->corrupted)
2013 vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME,
2014 "Decode failed. Frame data header is corrupted.");
2016 if (cm->lf.filter_level && !cm->skip_loop_filter) {
2017 vp9_loop_filter_frame_init(cm, cm->lf.filter_level);
2020 // If encoded in frame parallel mode, frame context is ready after decoding
2021 // the frame header.
2022 if (pbi->frame_parallel_decode && cm->frame_parallel_decoding_mode) {
2023 VPxWorker *const worker = pbi->frame_worker_owner;
2024 FrameWorkerData *const frame_worker_data = worker->data1;
2025 if (cm->refresh_frame_context) {
2026 context_updated = 1;
2027 cm->frame_contexts[cm->frame_context_idx] = *cm->fc;
2029 vp9_frameworker_lock_stats(worker);
2030 pbi->cur_buf->row = -1;
2031 pbi->cur_buf->col = -1;
2032 frame_worker_data->frame_context_ready = 1;
2033 // Signal the main thread that context is ready.
2034 vp9_frameworker_signal_stats(worker);
2035 vp9_frameworker_unlock_stats(worker);
2038 if (pbi->max_threads > 1 && tile_rows == 1 && tile_cols > 1) {
2039 // Multi-threaded tile decoder
2040 *p_data_end = decode_tiles_mt(pbi, data + first_partition_size, data_end);
2041 if (!xd->corrupted) {
2042 if (!cm->skip_loop_filter) {
2043 // If multiple threads are used to decode tiles, then we use those
2044 // threads to do parallel loopfiltering.
2045 vp9_loop_filter_frame_mt(new_fb, cm, pbi->mb.plane,
2046 cm->lf.filter_level, 0, 0, pbi->tile_workers,
2047 pbi->num_tile_workers, &pbi->lf_row_sync);
2050 vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME,
2051 "Decode failed. Frame data is corrupted.");
2055 *p_data_end = decode_tiles(pbi, data + first_partition_size, data_end);
2058 if (!xd->corrupted) {
2059 if (!cm->error_resilient_mode && !cm->frame_parallel_decoding_mode) {
2060 vp9_adapt_coef_probs(cm);
2062 if (!frame_is_intra_only(cm)) {
2063 vp9_adapt_mode_probs(cm);
2064 vp9_adapt_mv_probs(cm, cm->allow_high_precision_mv);
2067 debug_check_frame_counts(cm);
2070 vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME,
2071 "Decode failed. Frame data is corrupted.");
2074 // Non frame parallel update frame context here.
2075 if (cm->refresh_frame_context && !context_updated)
2076 cm->frame_contexts[cm->frame_context_idx] = *cm->fc;