2 * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
12 #include <stdlib.h> // qsort()
14 #include "./vp10_rtcd.h"
15 #include "./vpx_dsp_rtcd.h"
16 #include "./vpx_scale_rtcd.h"
18 #include "vpx_dsp/bitreader_buffer.h"
19 #include "vpx_dsp/bitreader.h"
20 #include "vpx_dsp/vpx_dsp_common.h"
21 #include "vpx_mem/vpx_mem.h"
22 #include "vpx_ports/mem.h"
23 #include "vpx_ports/mem_ops.h"
24 #include "vpx_scale/vpx_scale.h"
25 #include "vpx_util/vpx_thread.h"
27 #include "vp10/common/alloccommon.h"
28 #include "vp10/common/common.h"
29 #include "vp10/common/entropy.h"
30 #include "vp10/common/entropymode.h"
31 #include "vp10/common/idct.h"
32 #include "vp10/common/thread_common.h"
33 #include "vp10/common/pred_common.h"
34 #include "vp10/common/quant_common.h"
35 #include "vp10/common/reconintra.h"
36 #include "vp10/common/reconinter.h"
37 #include "vp10/common/seg_common.h"
38 #include "vp10/common/tile_common.h"
40 #include "vp10/decoder/decodeframe.h"
41 #include "vp10/decoder/detokenize.h"
42 #include "vp10/decoder/decodemv.h"
43 #include "vp10/decoder/decoder.h"
44 #include "vp10/decoder/dsubexp.h"
46 #define MAX_VP9_HEADER_SIZE 80
48 static int is_compound_reference_allowed(const VP10_COMMON *cm) {
50 for (i = 1; i < REFS_PER_FRAME; ++i)
51 if (cm->ref_frame_sign_bias[i + 1] != cm->ref_frame_sign_bias[1])
57 static void setup_compound_reference_mode(VP10_COMMON *cm) {
58 if (cm->ref_frame_sign_bias[LAST_FRAME] ==
59 cm->ref_frame_sign_bias[GOLDEN_FRAME]) {
60 cm->comp_fixed_ref = ALTREF_FRAME;
61 cm->comp_var_ref[0] = LAST_FRAME;
62 cm->comp_var_ref[1] = GOLDEN_FRAME;
63 } else if (cm->ref_frame_sign_bias[LAST_FRAME] ==
64 cm->ref_frame_sign_bias[ALTREF_FRAME]) {
65 cm->comp_fixed_ref = GOLDEN_FRAME;
66 cm->comp_var_ref[0] = LAST_FRAME;
67 cm->comp_var_ref[1] = ALTREF_FRAME;
69 cm->comp_fixed_ref = LAST_FRAME;
70 cm->comp_var_ref[0] = GOLDEN_FRAME;
71 cm->comp_var_ref[1] = ALTREF_FRAME;
75 static int read_is_valid(const uint8_t *start, size_t len, const uint8_t *end) {
76 return len != 0 && len <= (size_t)(end - start);
79 static int decode_unsigned_max(struct vpx_read_bit_buffer *rb, int max) {
80 const int data = vpx_rb_read_literal(rb, get_unsigned_bits(max));
81 return data > max ? max : data;
85 static TX_MODE read_tx_mode(struct vpx_read_bit_buffer *rb) {
86 return vpx_rb_read_bit(rb) ? TX_MODE_SELECT : vpx_rb_read_literal(rb, 2);
89 static TX_MODE read_tx_mode(vpx_reader *r) {
90 TX_MODE tx_mode = vpx_read_literal(r, 2);
91 if (tx_mode == ALLOW_32X32)
92 tx_mode += vpx_read_bit(r);
97 static void read_tx_mode_probs(struct tx_probs *tx_probs, vpx_reader *r) {
100 for (i = 0; i < TX_SIZE_CONTEXTS; ++i)
101 for (j = 0; j < TX_SIZES - 3; ++j)
102 vp10_diff_update_prob(r, &tx_probs->p8x8[i][j]);
104 for (i = 0; i < TX_SIZE_CONTEXTS; ++i)
105 for (j = 0; j < TX_SIZES - 2; ++j)
106 vp10_diff_update_prob(r, &tx_probs->p16x16[i][j]);
108 for (i = 0; i < TX_SIZE_CONTEXTS; ++i)
109 for (j = 0; j < TX_SIZES - 1; ++j)
110 vp10_diff_update_prob(r, &tx_probs->p32x32[i][j]);
113 static void read_switchable_interp_probs(FRAME_CONTEXT *fc, vpx_reader *r) {
115 for (j = 0; j < SWITCHABLE_FILTER_CONTEXTS; ++j)
116 for (i = 0; i < SWITCHABLE_FILTERS - 1; ++i)
117 vp10_diff_update_prob(r, &fc->switchable_interp_prob[j][i]);
120 static void read_inter_mode_probs(FRAME_CONTEXT *fc, vpx_reader *r) {
122 for (i = 0; i < INTER_MODE_CONTEXTS; ++i)
123 for (j = 0; j < INTER_MODES - 1; ++j)
124 vp10_diff_update_prob(r, &fc->inter_mode_probs[i][j]);
127 static REFERENCE_MODE read_frame_reference_mode(const VP10_COMMON *cm,
129 if (is_compound_reference_allowed(cm)) {
130 return vpx_read_bit(r) ? (vpx_read_bit(r) ? REFERENCE_MODE_SELECT
131 : COMPOUND_REFERENCE)
134 return SINGLE_REFERENCE;
138 static void read_frame_reference_mode_probs(VP10_COMMON *cm, vpx_reader *r) {
139 FRAME_CONTEXT *const fc = cm->fc;
142 if (cm->reference_mode == REFERENCE_MODE_SELECT)
143 for (i = 0; i < COMP_INTER_CONTEXTS; ++i)
144 vp10_diff_update_prob(r, &fc->comp_inter_prob[i]);
146 if (cm->reference_mode != COMPOUND_REFERENCE)
147 for (i = 0; i < REF_CONTEXTS; ++i) {
148 vp10_diff_update_prob(r, &fc->single_ref_prob[i][0]);
149 vp10_diff_update_prob(r, &fc->single_ref_prob[i][1]);
152 if (cm->reference_mode != SINGLE_REFERENCE)
153 for (i = 0; i < REF_CONTEXTS; ++i)
154 vp10_diff_update_prob(r, &fc->comp_ref_prob[i]);
157 static void update_mv_probs(vpx_prob *p, int n, vpx_reader *r) {
159 for (i = 0; i < n; ++i)
160 if (vpx_read(r, MV_UPDATE_PROB))
161 p[i] = (vpx_read_literal(r, 7) << 1) | 1;
164 static void read_mv_probs(nmv_context *ctx, int allow_hp, vpx_reader *r) {
167 update_mv_probs(ctx->joints, MV_JOINTS - 1, r);
169 for (i = 0; i < 2; ++i) {
170 nmv_component *const comp_ctx = &ctx->comps[i];
171 update_mv_probs(&comp_ctx->sign, 1, r);
172 update_mv_probs(comp_ctx->classes, MV_CLASSES - 1, r);
173 update_mv_probs(comp_ctx->class0, CLASS0_SIZE - 1, r);
174 update_mv_probs(comp_ctx->bits, MV_OFFSET_BITS, r);
177 for (i = 0; i < 2; ++i) {
178 nmv_component *const comp_ctx = &ctx->comps[i];
179 for (j = 0; j < CLASS0_SIZE; ++j)
180 update_mv_probs(comp_ctx->class0_fp[j], MV_FP_SIZE - 1, r);
181 update_mv_probs(comp_ctx->fp, 3, r);
185 for (i = 0; i < 2; ++i) {
186 nmv_component *const comp_ctx = &ctx->comps[i];
187 update_mv_probs(&comp_ctx->class0_hp, 1, r);
188 update_mv_probs(&comp_ctx->hp, 1, r);
193 static void inverse_transform_block_inter(MACROBLOCKD* xd, int plane,
194 const TX_SIZE tx_size,
195 uint8_t *dst, int stride,
196 int eob, int block) {
197 struct macroblockd_plane *const pd = &xd->plane[plane];
198 TX_TYPE tx_type = get_tx_type(pd->plane_type, xd, block);
200 tran_low_t *const dqcoeff = pd->dqcoeff;
201 #if CONFIG_VP9_HIGHBITDEPTH
202 if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
205 vp10_highbd_inv_txfm_add_4x4(dqcoeff, dst, stride, eob, xd->bd,
206 tx_type, xd->lossless ?
207 vp10_highbd_iwht4x4_add :
208 vp10_highbd_idct4x4_add);
211 vp10_highbd_inv_txfm_add_8x8(dqcoeff, dst, stride, eob, xd->bd,
215 vp10_highbd_inv_txfm_add_16x16(dqcoeff, dst, stride, eob, xd->bd,
219 vp10_highbd_inv_txfm_add_32x32(dqcoeff, dst, stride, eob, xd->bd,
223 assert(0 && "Invalid transform size");
227 #endif // CONFIG_VP9_HIGHBITDEPTH
230 vp10_inv_txfm_add_4x4(dqcoeff, dst, stride, eob, tx_type,
231 xd->lossless ? vp10_iwht4x4_add :
235 vp10_inv_txfm_add_8x8(dqcoeff, dst, stride, eob, tx_type);
238 vp10_inv_txfm_add_16x16(dqcoeff, dst, stride, eob, tx_type);
241 vp10_inv_txfm_add_32x32(dqcoeff, dst, stride, eob, tx_type);
244 assert(0 && "Invalid transform size");
247 #if CONFIG_VP9_HIGHBITDEPTH
249 #endif // CONFIG_VP9_HIGHBITDEPTH
254 if (tx_size <= TX_16X16 && eob <= 10)
255 memset(dqcoeff, 0, 4 * (4 << tx_size) * sizeof(dqcoeff[0]));
256 else if (tx_size == TX_32X32 && eob <= 34)
257 memset(dqcoeff, 0, 256 * sizeof(dqcoeff[0]));
259 memset(dqcoeff, 0, (16 << (tx_size << 1)) * sizeof(dqcoeff[0]));
264 static void inverse_transform_block_intra(MACROBLOCKD* xd, int plane,
265 const TX_TYPE tx_type,
266 const TX_SIZE tx_size,
267 uint8_t *dst, int stride,
269 struct macroblockd_plane *const pd = &xd->plane[plane];
271 tran_low_t *const dqcoeff = pd->dqcoeff;
272 #if CONFIG_VP9_HIGHBITDEPTH
273 if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
276 vp10_highbd_inv_txfm_add_4x4(dqcoeff, dst, stride, eob, xd->bd,
277 tx_type, xd->lossless ?
278 vp10_highbd_iwht4x4_add :
279 vp10_highbd_idct4x4_add);
282 vp10_highbd_inv_txfm_add_8x8(dqcoeff, dst, stride, eob, xd->bd,
286 vp10_highbd_inv_txfm_add_16x16(dqcoeff, dst, stride, eob, xd->bd,
290 vp10_highbd_inv_txfm_add_32x32(dqcoeff, dst, stride, eob, xd->bd,
294 assert(0 && "Invalid transform size");
298 #endif // CONFIG_VP9_HIGHBITDEPTH
301 vp10_inv_txfm_add_4x4(dqcoeff, dst, stride, eob, tx_type,
302 xd->lossless ? vp10_iwht4x4_add :
306 vp10_inv_txfm_add_8x8(dqcoeff, dst, stride, eob, tx_type);
309 vp10_inv_txfm_add_16x16(dqcoeff, dst, stride, eob, tx_type);
312 vp10_inv_txfm_add_32x32(dqcoeff, dst, stride, eob, tx_type);
315 assert(0 && "Invalid transform size");
318 #if CONFIG_VP9_HIGHBITDEPTH
320 #endif // CONFIG_VP9_HIGHBITDEPTH
325 if (tx_type == DCT_DCT && tx_size <= TX_16X16 && eob <= 10)
326 memset(dqcoeff, 0, 4 * (4 << tx_size) * sizeof(dqcoeff[0]));
327 else if (tx_size == TX_32X32 && eob <= 34)
328 memset(dqcoeff, 0, 256 * sizeof(dqcoeff[0]));
330 memset(dqcoeff, 0, (16 << (tx_size << 1)) * sizeof(dqcoeff[0]));
335 static void predict_and_reconstruct_intra_block(MACROBLOCKD *const xd,
337 MB_MODE_INFO *const mbmi,
341 struct macroblockd_plane *const pd = &xd->plane[plane];
342 PREDICTION_MODE mode = (plane == 0) ? mbmi->mode : mbmi->uv_mode;
343 PLANE_TYPE plane_type = (plane == 0) ? PLANE_TYPE_Y : PLANE_TYPE_UV;
345 int block_idx = (row << 1) + col;
346 dst = &pd->dst.buf[4 * row * pd->dst.stride + 4 * col];
348 if (mbmi->sb_type < BLOCK_8X8)
350 mode = xd->mi[0]->bmi[(row << 1) + col].as_mode;
352 vp10_predict_intra_block(xd, pd->n4_wl, tx_size, mode,
353 dst, pd->dst.stride, dst, pd->dst.stride,
357 TX_TYPE tx_type = get_tx_type(plane_type, xd, block_idx);
358 const scan_order *sc = get_scan(tx_size, tx_type);
359 const int eob = vp10_decode_block_tokens(xd, plane, sc, col, row, tx_size,
360 r, mbmi->segment_id);
361 inverse_transform_block_intra(xd, plane, tx_type, tx_size,
362 dst, pd->dst.stride, eob);
366 static int reconstruct_inter_block(MACROBLOCKD *const xd, vpx_reader *r,
367 MB_MODE_INFO *const mbmi, int plane,
368 int row, int col, TX_SIZE tx_size) {
369 struct macroblockd_plane *const pd = &xd->plane[plane];
370 PLANE_TYPE plane_type = (plane == 0) ? PLANE_TYPE_Y : PLANE_TYPE_UV;
371 int block_idx = (row << 1) + col;
372 TX_TYPE tx_type = get_tx_type(plane_type, xd, block_idx);
373 const scan_order *sc = get_scan(tx_size, tx_type);
374 const int eob = vp10_decode_block_tokens(xd, plane, sc, col, row, tx_size, r,
377 inverse_transform_block_inter(xd, plane, tx_size,
378 &pd->dst.buf[4 * row * pd->dst.stride + 4 * col],
379 pd->dst.stride, eob, block_idx);
383 static void build_mc_border(const uint8_t *src, int src_stride,
384 uint8_t *dst, int dst_stride,
385 int x, int y, int b_w, int b_h, int w, int h) {
386 // Get a pointer to the start of the real data for this row.
387 const uint8_t *ref_row = src - x - y * src_stride;
390 ref_row += (h - 1) * src_stride;
392 ref_row += y * src_stride;
396 int left = x < 0 ? -x : 0;
407 copy = b_w - left - right;
410 memset(dst, ref_row[0], left);
413 memcpy(dst + left, ref_row + x + left, copy);
416 memset(dst + left + copy, ref_row[w - 1], right);
422 ref_row += src_stride;
426 #if CONFIG_VP9_HIGHBITDEPTH
427 static void high_build_mc_border(const uint8_t *src8, int src_stride,
428 uint16_t *dst, int dst_stride,
429 int x, int y, int b_w, int b_h,
431 // Get a pointer to the start of the real data for this row.
432 const uint16_t *src = CONVERT_TO_SHORTPTR(src8);
433 const uint16_t *ref_row = src - x - y * src_stride;
436 ref_row += (h - 1) * src_stride;
438 ref_row += y * src_stride;
442 int left = x < 0 ? -x : 0;
453 copy = b_w - left - right;
456 vpx_memset16(dst, ref_row[0], left);
459 memcpy(dst + left, ref_row + x + left, copy * sizeof(uint16_t));
462 vpx_memset16(dst + left + copy, ref_row[w - 1], right);
468 ref_row += src_stride;
471 #endif // CONFIG_VP9_HIGHBITDEPTH
473 #if CONFIG_VP9_HIGHBITDEPTH
474 static void extend_and_predict(const uint8_t *buf_ptr1, int pre_buf_stride,
475 int x0, int y0, int b_w, int b_h,
476 int frame_width, int frame_height,
478 uint8_t *const dst, int dst_buf_stride,
479 int subpel_x, int subpel_y,
480 const InterpKernel *kernel,
481 const struct scale_factors *sf,
483 int w, int h, int ref, int xs, int ys) {
484 DECLARE_ALIGNED(16, uint16_t, mc_buf_high[80 * 2 * 80 * 2]);
485 const uint8_t *buf_ptr;
487 if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
488 high_build_mc_border(buf_ptr1, pre_buf_stride, mc_buf_high, b_w,
489 x0, y0, b_w, b_h, frame_width, frame_height);
490 buf_ptr = CONVERT_TO_BYTEPTR(mc_buf_high) + border_offset;
492 build_mc_border(buf_ptr1, pre_buf_stride, (uint8_t *)mc_buf_high, b_w,
493 x0, y0, b_w, b_h, frame_width, frame_height);
494 buf_ptr = ((uint8_t *)mc_buf_high) + border_offset;
497 if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
498 high_inter_predictor(buf_ptr, b_w, dst, dst_buf_stride, subpel_x,
499 subpel_y, sf, w, h, ref, kernel, xs, ys, xd->bd);
501 inter_predictor(buf_ptr, b_w, dst, dst_buf_stride, subpel_x,
502 subpel_y, sf, w, h, ref, kernel, xs, ys);
506 static void extend_and_predict(const uint8_t *buf_ptr1, int pre_buf_stride,
507 int x0, int y0, int b_w, int b_h,
508 int frame_width, int frame_height,
510 uint8_t *const dst, int dst_buf_stride,
511 int subpel_x, int subpel_y,
512 const InterpKernel *kernel,
513 const struct scale_factors *sf,
514 int w, int h, int ref, int xs, int ys) {
515 DECLARE_ALIGNED(16, uint8_t, mc_buf[80 * 2 * 80 * 2]);
516 const uint8_t *buf_ptr;
518 build_mc_border(buf_ptr1, pre_buf_stride, mc_buf, b_w,
519 x0, y0, b_w, b_h, frame_width, frame_height);
520 buf_ptr = mc_buf + border_offset;
522 inter_predictor(buf_ptr, b_w, dst, dst_buf_stride, subpel_x,
523 subpel_y, sf, w, h, ref, kernel, xs, ys);
525 #endif // CONFIG_VP9_HIGHBITDEPTH
527 static void dec_build_inter_predictors(VP10Decoder *const pbi, MACROBLOCKD *xd,
528 int plane, int bw, int bh, int x,
529 int y, int w, int h, int mi_x, int mi_y,
530 const InterpKernel *kernel,
531 const struct scale_factors *sf,
532 struct buf_2d *pre_buf,
533 struct buf_2d *dst_buf, const MV* mv,
534 RefCntBuffer *ref_frame_buf,
535 int is_scaled, int ref) {
536 VP10_COMMON *const cm = &pbi->common;
537 struct macroblockd_plane *const pd = &xd->plane[plane];
538 uint8_t *const dst = dst_buf->buf + dst_buf->stride * y + x;
540 int xs, ys, x0, y0, x0_16, y0_16, frame_width, frame_height,
541 buf_stride, subpel_x, subpel_y;
542 uint8_t *ref_frame, *buf_ptr;
544 // Get reference frame pointer, width and height.
546 frame_width = ref_frame_buf->buf.y_crop_width;
547 frame_height = ref_frame_buf->buf.y_crop_height;
548 ref_frame = ref_frame_buf->buf.y_buffer;
550 frame_width = ref_frame_buf->buf.uv_crop_width;
551 frame_height = ref_frame_buf->buf.uv_crop_height;
552 ref_frame = plane == 1 ? ref_frame_buf->buf.u_buffer
553 : ref_frame_buf->buf.v_buffer;
557 const MV mv_q4 = clamp_mv_to_umv_border_sb(xd, mv, bw, bh,
560 // Co-ordinate of containing block to pixel precision.
561 int x_start = (-xd->mb_to_left_edge >> (3 + pd->subsampling_x));
562 int y_start = (-xd->mb_to_top_edge >> (3 + pd->subsampling_y));
564 // Co-ordinate of the block to 1/16th pixel precision.
565 x0_16 = (x_start + x) << SUBPEL_BITS;
566 y0_16 = (y_start + y) << SUBPEL_BITS;
568 // Co-ordinate of current block in reference frame
569 // to 1/16th pixel precision.
570 x0_16 = sf->scale_value_x(x0_16, sf);
571 y0_16 = sf->scale_value_y(y0_16, sf);
573 // Map the top left corner of the block into the reference frame.
574 x0 = sf->scale_value_x(x_start + x, sf);
575 y0 = sf->scale_value_y(y_start + y, sf);
577 // Scale the MV and incorporate the sub-pixel offset of the block
578 // in the reference frame.
579 scaled_mv = vp10_scale_mv(&mv_q4, mi_x + x, mi_y + y, sf);
583 // Co-ordinate of containing block to pixel precision.
584 x0 = (-xd->mb_to_left_edge >> (3 + pd->subsampling_x)) + x;
585 y0 = (-xd->mb_to_top_edge >> (3 + pd->subsampling_y)) + y;
587 // Co-ordinate of the block to 1/16th pixel precision.
588 x0_16 = x0 << SUBPEL_BITS;
589 y0_16 = y0 << SUBPEL_BITS;
591 scaled_mv.row = mv->row * (1 << (1 - pd->subsampling_y));
592 scaled_mv.col = mv->col * (1 << (1 - pd->subsampling_x));
595 subpel_x = scaled_mv.col & SUBPEL_MASK;
596 subpel_y = scaled_mv.row & SUBPEL_MASK;
598 // Calculate the top left corner of the best matching block in the
600 x0 += scaled_mv.col >> SUBPEL_BITS;
601 y0 += scaled_mv.row >> SUBPEL_BITS;
602 x0_16 += scaled_mv.col;
603 y0_16 += scaled_mv.row;
605 // Get reference block pointer.
606 buf_ptr = ref_frame + y0 * pre_buf->stride + x0;
607 buf_stride = pre_buf->stride;
609 // Do border extension if there is motion or the
610 // width/height is not a multiple of 8 pixels.
611 if (is_scaled || scaled_mv.col || scaled_mv.row ||
612 (frame_width & 0x7) || (frame_height & 0x7)) {
613 int y1 = ((y0_16 + (h - 1) * ys) >> SUBPEL_BITS) + 1;
615 // Get reference block bottom right horizontal coordinate.
616 int x1 = ((x0_16 + (w - 1) * xs) >> SUBPEL_BITS) + 1;
617 int x_pad = 0, y_pad = 0;
619 if (subpel_x || (sf->x_step_q4 != SUBPEL_SHIFTS)) {
620 x0 -= VP9_INTERP_EXTEND - 1;
621 x1 += VP9_INTERP_EXTEND;
625 if (subpel_y || (sf->y_step_q4 != SUBPEL_SHIFTS)) {
626 y0 -= VP9_INTERP_EXTEND - 1;
627 y1 += VP9_INTERP_EXTEND;
631 // Wait until reference block is ready. Pad 7 more pixels as last 7
632 // pixels of each superblock row can be changed by next superblock row.
633 if (cm->frame_parallel_decode)
634 vp10_frameworker_wait(pbi->frame_worker_owner, ref_frame_buf,
635 VPXMAX(0, (y1 + 7)) << (plane == 0 ? 0 : 1));
637 // Skip border extension if block is inside the frame.
638 if (x0 < 0 || x0 > frame_width - 1 || x1 < 0 || x1 > frame_width - 1 ||
639 y0 < 0 || y0 > frame_height - 1 || y1 < 0 || y1 > frame_height - 1) {
640 // Extend the border.
641 const uint8_t *const buf_ptr1 = ref_frame + y0 * buf_stride + x0;
642 const int b_w = x1 - x0 + 1;
643 const int b_h = y1 - y0 + 1;
644 const int border_offset = y_pad * 3 * b_w + x_pad * 3;
646 extend_and_predict(buf_ptr1, buf_stride, x0, y0, b_w, b_h,
647 frame_width, frame_height, border_offset,
648 dst, dst_buf->stride,
651 #if CONFIG_VP9_HIGHBITDEPTH
658 // Wait until reference block is ready. Pad 7 more pixels as last 7
659 // pixels of each superblock row can be changed by next superblock row.
660 if (cm->frame_parallel_decode) {
661 const int y1 = (y0_16 + (h - 1) * ys) >> SUBPEL_BITS;
662 vp10_frameworker_wait(pbi->frame_worker_owner, ref_frame_buf,
663 VPXMAX(0, (y1 + 7)) << (plane == 0 ? 0 : 1));
666 #if CONFIG_VP9_HIGHBITDEPTH
667 if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
668 high_inter_predictor(buf_ptr, buf_stride, dst, dst_buf->stride, subpel_x,
669 subpel_y, sf, w, h, ref, kernel, xs, ys, xd->bd);
671 inter_predictor(buf_ptr, buf_stride, dst, dst_buf->stride, subpel_x,
672 subpel_y, sf, w, h, ref, kernel, xs, ys);
675 inter_predictor(buf_ptr, buf_stride, dst, dst_buf->stride, subpel_x,
676 subpel_y, sf, w, h, ref, kernel, xs, ys);
677 #endif // CONFIG_VP9_HIGHBITDEPTH
680 static void dec_build_inter_predictors_sb(VP10Decoder *const pbi,
682 int mi_row, int mi_col) {
684 const int mi_x = mi_col * MI_SIZE;
685 const int mi_y = mi_row * MI_SIZE;
686 const MODE_INFO *mi = xd->mi[0];
687 const InterpKernel *kernel = vp10_filter_kernels[mi->mbmi.interp_filter];
688 const BLOCK_SIZE sb_type = mi->mbmi.sb_type;
689 const int is_compound = has_second_ref(&mi->mbmi);
691 for (plane = 0; plane < MAX_MB_PLANE; ++plane) {
692 struct macroblockd_plane *const pd = &xd->plane[plane];
693 struct buf_2d *const dst_buf = &pd->dst;
694 const int num_4x4_w = pd->n4_w;
695 const int num_4x4_h = pd->n4_h;
697 const int n4w_x4 = 4 * num_4x4_w;
698 const int n4h_x4 = 4 * num_4x4_h;
701 for (ref = 0; ref < 1 + is_compound; ++ref) {
702 const struct scale_factors *const sf = &xd->block_refs[ref]->sf;
703 struct buf_2d *const pre_buf = &pd->pre[ref];
704 const int idx = xd->block_refs[ref]->idx;
705 BufferPool *const pool = pbi->common.buffer_pool;
706 RefCntBuffer *const ref_frame_buf = &pool->frame_bufs[idx];
707 const int is_scaled = vp10_is_scaled(sf);
709 if (sb_type < BLOCK_8X8) {
710 const PARTITION_TYPE bp = BLOCK_8X8 - sb_type;
711 const int have_vsplit = bp != PARTITION_HORZ;
712 const int have_hsplit = bp != PARTITION_VERT;
713 const int num_4x4_w = 2 >> ((!have_vsplit) | pd->subsampling_x);
714 const int num_4x4_h = 2 >> ((!have_hsplit) | pd->subsampling_y);
715 const int pw = 8 >> (have_vsplit | pd->subsampling_x);
716 const int ph = 8 >> (have_hsplit | pd->subsampling_y);
718 for (y = 0; y < num_4x4_h; ++y) {
719 for (x = 0; x < num_4x4_w; ++x) {
720 const MV mv = average_split_mvs(pd, mi, ref, y * 2 + x);
721 dec_build_inter_predictors(pbi, xd, plane, n4w_x4, n4h_x4,
722 4 * x, 4 * y, pw, ph, mi_x, mi_y, kernel,
723 sf, pre_buf, dst_buf, &mv,
724 ref_frame_buf, is_scaled, ref);
728 const MV mv = mi->mbmi.mv[ref].as_mv;
729 dec_build_inter_predictors(pbi, xd, plane, n4w_x4, n4h_x4,
730 0, 0, n4w_x4, n4h_x4, mi_x, mi_y, kernel,
731 sf, pre_buf, dst_buf, &mv, ref_frame_buf,
738 static INLINE TX_SIZE dec_get_uv_tx_size(const MB_MODE_INFO *mbmi,
739 int n4_wl, int n4_hl) {
740 // get minimum log2 num4x4s dimension
741 const int x = VPXMIN(n4_wl, n4_hl);
742 return VPXMIN(mbmi->tx_size, x);
745 static INLINE void dec_reset_skip_context(MACROBLOCKD *xd) {
747 for (i = 0; i < MAX_MB_PLANE; i++) {
748 struct macroblockd_plane *const pd = &xd->plane[i];
749 memset(pd->above_context, 0, sizeof(ENTROPY_CONTEXT) * pd->n4_w);
750 memset(pd->left_context, 0, sizeof(ENTROPY_CONTEXT) * pd->n4_h);
754 static void set_plane_n4(MACROBLOCKD *const xd, int bw, int bh, int bwl,
757 for (i = 0; i < MAX_MB_PLANE; i++) {
758 xd->plane[i].n4_w = (bw << 1) >> xd->plane[i].subsampling_x;
759 xd->plane[i].n4_h = (bh << 1) >> xd->plane[i].subsampling_y;
760 xd->plane[i].n4_wl = bwl - xd->plane[i].subsampling_x;
761 xd->plane[i].n4_hl = bhl - xd->plane[i].subsampling_y;
765 static MB_MODE_INFO *set_offsets(VP10_COMMON *const cm, MACROBLOCKD *const xd,
766 BLOCK_SIZE bsize, int mi_row, int mi_col,
767 int bw, int bh, int x_mis, int y_mis,
769 const int offset = mi_row * cm->mi_stride + mi_col;
771 const TileInfo *const tile = &xd->tile;
773 xd->mi = cm->mi_grid_visible + offset;
774 xd->mi[0] = &cm->mi[offset];
775 // TODO(slavarnway): Generate sb_type based on bwl and bhl, instead of
776 // passing bsize from decode_partition().
777 xd->mi[0]->mbmi.sb_type = bsize;
778 for (y = 0; y < y_mis; ++y)
779 for (x = !y; x < x_mis; ++x) {
780 xd->mi[y * cm->mi_stride + x] = xd->mi[0];
783 set_plane_n4(xd, bw, bh, bwl, bhl);
785 set_skip_context(xd, mi_row, mi_col);
787 // Distance of Mb to the various image edges. These are specified to 8th pel
788 // as they are always compared to values that are in 1/8th pel units
789 set_mi_row_col(xd, tile, mi_row, bh, mi_col, bw, cm->mi_rows, cm->mi_cols);
791 vp10_setup_dst_planes(xd->plane, get_frame_new_buffer(cm), mi_row, mi_col);
792 return &xd->mi[0]->mbmi;
795 static void decode_block(VP10Decoder *const pbi, MACROBLOCKD *const xd,
796 int mi_row, int mi_col,
797 vpx_reader *r, BLOCK_SIZE bsize,
799 VP10_COMMON *const cm = &pbi->common;
800 const int less8x8 = bsize < BLOCK_8X8;
801 const int bw = 1 << (bwl - 1);
802 const int bh = 1 << (bhl - 1);
803 const int x_mis = VPXMIN(bw, cm->mi_cols - mi_col);
804 const int y_mis = VPXMIN(bh, cm->mi_rows - mi_row);
806 MB_MODE_INFO *mbmi = set_offsets(cm, xd, bsize, mi_row, mi_col,
807 bw, bh, x_mis, y_mis, bwl, bhl);
809 if (bsize >= BLOCK_8X8 && (cm->subsampling_x || cm->subsampling_y)) {
810 const BLOCK_SIZE uv_subsize =
811 ss_size_lookup[bsize][cm->subsampling_x][cm->subsampling_y];
812 if (uv_subsize == BLOCK_INVALID)
813 vpx_internal_error(xd->error_info,
814 VPX_CODEC_CORRUPT_FRAME, "Invalid block size.");
817 vp10_read_mode_info(pbi, xd, mi_row, mi_col, r, x_mis, y_mis);
820 dec_reset_skip_context(xd);
823 if (!is_inter_block(mbmi)) {
825 for (plane = 0; plane < MAX_MB_PLANE; ++plane) {
826 const struct macroblockd_plane *const pd = &xd->plane[plane];
827 const TX_SIZE tx_size =
828 plane ? dec_get_uv_tx_size(mbmi, pd->n4_wl, pd->n4_hl)
830 const int num_4x4_w = pd->n4_w;
831 const int num_4x4_h = pd->n4_h;
832 const int step = (1 << tx_size);
834 const int max_blocks_wide = num_4x4_w + (xd->mb_to_right_edge >= 0 ?
835 0 : xd->mb_to_right_edge >> (5 + pd->subsampling_x));
836 const int max_blocks_high = num_4x4_h + (xd->mb_to_bottom_edge >= 0 ?
837 0 : xd->mb_to_bottom_edge >> (5 + pd->subsampling_y));
839 for (row = 0; row < max_blocks_high; row += step)
840 for (col = 0; col < max_blocks_wide; col += step)
841 predict_and_reconstruct_intra_block(xd, r, mbmi, plane,
846 dec_build_inter_predictors_sb(pbi, xd, mi_row, mi_col);
853 for (plane = 0; plane < MAX_MB_PLANE; ++plane) {
854 const struct macroblockd_plane *const pd = &xd->plane[plane];
855 const TX_SIZE tx_size =
856 plane ? dec_get_uv_tx_size(mbmi, pd->n4_wl, pd->n4_hl)
858 const int num_4x4_w = pd->n4_w;
859 const int num_4x4_h = pd->n4_h;
860 const int step = (1 << tx_size);
862 const int max_blocks_wide = num_4x4_w + (xd->mb_to_right_edge >= 0 ?
863 0 : xd->mb_to_right_edge >> (5 + pd->subsampling_x));
864 const int max_blocks_high = num_4x4_h + (xd->mb_to_bottom_edge >= 0 ?
865 0 : xd->mb_to_bottom_edge >> (5 + pd->subsampling_y));
867 for (row = 0; row < max_blocks_high; row += step)
868 for (col = 0; col < max_blocks_wide; col += step)
869 eobtotal += reconstruct_inter_block(xd, r, mbmi, plane, row, col,
873 if (!less8x8 && eobtotal == 0)
874 #if CONFIG_MISC_FIXES
875 mbmi->has_no_coeffs = 1; // skip loopfilter
877 mbmi->skip = 1; // skip loopfilter
882 xd->corrupted |= vpx_reader_has_error(r);
885 static INLINE int dec_partition_plane_context(const MACROBLOCKD *xd,
886 int mi_row, int mi_col,
888 const PARTITION_CONTEXT *above_ctx = xd->above_seg_context + mi_col;
889 const PARTITION_CONTEXT *left_ctx = xd->left_seg_context + (mi_row & MI_MASK);
890 int above = (*above_ctx >> bsl) & 1 , left = (*left_ctx >> bsl) & 1;
894 return (left * 2 + above) + bsl * PARTITION_PLOFFSET;
897 static INLINE void dec_update_partition_context(MACROBLOCKD *xd,
898 int mi_row, int mi_col,
901 PARTITION_CONTEXT *const above_ctx = xd->above_seg_context + mi_col;
902 PARTITION_CONTEXT *const left_ctx = xd->left_seg_context + (mi_row & MI_MASK);
904 // update the partition context at the end notes. set partition bits
905 // of block sizes larger than the current one to be one, and partition
906 // bits of smaller block sizes to be zero.
907 memset(above_ctx, partition_context_lookup[subsize].above, bw);
908 memset(left_ctx, partition_context_lookup[subsize].left, bw);
911 static PARTITION_TYPE read_partition(MACROBLOCKD *xd, int mi_row, int mi_col,
913 int has_rows, int has_cols, int bsl) {
914 const int ctx = dec_partition_plane_context(xd, mi_row, mi_col, bsl);
915 const vpx_prob *const probs = get_partition_probs(xd, ctx);
916 FRAME_COUNTS *counts = xd->counts;
919 if (has_rows && has_cols)
920 p = (PARTITION_TYPE)vpx_read_tree(r, vp10_partition_tree, probs);
921 else if (!has_rows && has_cols)
922 p = vpx_read(r, probs[1]) ? PARTITION_SPLIT : PARTITION_HORZ;
923 else if (has_rows && !has_cols)
924 p = vpx_read(r, probs[2]) ? PARTITION_SPLIT : PARTITION_VERT;
929 ++counts->partition[ctx][p];
934 // TODO(slavarnway): eliminate bsize and subsize in future commits
935 static void decode_partition(VP10Decoder *const pbi, MACROBLOCKD *const xd,
936 int mi_row, int mi_col,
937 vpx_reader* r, BLOCK_SIZE bsize, int n4x4_l2) {
938 VP10_COMMON *const cm = &pbi->common;
939 const int n8x8_l2 = n4x4_l2 - 1;
940 const int num_8x8_wh = 1 << n8x8_l2;
941 const int hbs = num_8x8_wh >> 1;
942 PARTITION_TYPE partition;
944 const int has_rows = (mi_row + hbs) < cm->mi_rows;
945 const int has_cols = (mi_col + hbs) < cm->mi_cols;
947 if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols)
950 partition = read_partition(xd, mi_row, mi_col, r, has_rows, has_cols,
952 subsize = subsize_lookup[partition][bsize]; // get_subsize(bsize, partition);
954 // calculate bmode block dimensions (log 2)
955 xd->bmode_blocks_wl = 1 >> !!(partition & PARTITION_VERT);
956 xd->bmode_blocks_hl = 1 >> !!(partition & PARTITION_HORZ);
957 decode_block(pbi, xd, mi_row, mi_col, r, subsize, 1, 1);
961 decode_block(pbi, xd, mi_row, mi_col, r, subsize, n4x4_l2, n4x4_l2);
964 decode_block(pbi, xd, mi_row, mi_col, r, subsize, n4x4_l2, n8x8_l2);
966 decode_block(pbi, xd, mi_row + hbs, mi_col, r, subsize, n4x4_l2,
970 decode_block(pbi, xd, mi_row, mi_col, r, subsize, n8x8_l2, n4x4_l2);
972 decode_block(pbi, xd, mi_row, mi_col + hbs, r, subsize, n8x8_l2,
975 case PARTITION_SPLIT:
976 decode_partition(pbi, xd, mi_row, mi_col, r, subsize, n8x8_l2);
977 decode_partition(pbi, xd, mi_row, mi_col + hbs, r, subsize, n8x8_l2);
978 decode_partition(pbi, xd, mi_row + hbs, mi_col, r, subsize, n8x8_l2);
979 decode_partition(pbi, xd, mi_row + hbs, mi_col + hbs, r, subsize,
983 assert(0 && "Invalid partition type");
987 // update partition context
988 if (bsize >= BLOCK_8X8 &&
989 (bsize == BLOCK_8X8 || partition != PARTITION_SPLIT))
990 dec_update_partition_context(xd, mi_row, mi_col, subsize, num_8x8_wh);
993 static void setup_token_decoder(const uint8_t *data,
994 const uint8_t *data_end,
996 struct vpx_internal_error_info *error_info,
998 vpx_decrypt_cb decrypt_cb,
999 void *decrypt_state) {
1000 // Validate the calculated partition length. If the buffer
1001 // described by the partition can't be fully read, then restrict
1002 // it to the portion that can be (for EC mode) or throw an error.
1003 if (!read_is_valid(data, read_size, data_end))
1004 vpx_internal_error(error_info, VPX_CODEC_CORRUPT_FRAME,
1005 "Truncated packet or corrupt tile length");
1007 if (vpx_reader_init(r, data, read_size, decrypt_cb, decrypt_state))
1008 vpx_internal_error(error_info, VPX_CODEC_MEM_ERROR,
1009 "Failed to allocate bool decoder %d", 1);
1012 static void read_coef_probs_common(vp10_coeff_probs_model *coef_probs,
1016 if (vpx_read_bit(r))
1017 for (i = 0; i < PLANE_TYPES; ++i)
1018 for (j = 0; j < REF_TYPES; ++j)
1019 for (k = 0; k < COEF_BANDS; ++k)
1020 for (l = 0; l < BAND_COEFF_CONTEXTS(k); ++l)
1021 for (m = 0; m < UNCONSTRAINED_NODES; ++m)
1022 vp10_diff_update_prob(r, &coef_probs[i][j][k][l][m]);
1025 static void read_coef_probs(FRAME_CONTEXT *fc, TX_MODE tx_mode,
1027 const TX_SIZE max_tx_size = tx_mode_to_biggest_tx_size[tx_mode];
1029 for (tx_size = TX_4X4; tx_size <= max_tx_size; ++tx_size)
1030 read_coef_probs_common(fc->coef_probs[tx_size], r);
1033 static void setup_segmentation(VP10_COMMON *const cm,
1034 struct vpx_read_bit_buffer *rb) {
1035 struct segmentation *const seg = &cm->seg;
1038 seg->update_map = 0;
1039 seg->update_data = 0;
1041 seg->enabled = vpx_rb_read_bit(rb);
1045 // Segmentation map update
1046 if (frame_is_intra_only(cm) || cm->error_resilient_mode) {
1047 seg->update_map = 1;
1049 seg->update_map = vpx_rb_read_bit(rb);
1051 if (seg->update_map) {
1052 for (i = 0; i < SEG_TREE_PROBS; i++)
1053 seg->tree_probs[i] = vpx_rb_read_bit(rb) ? vpx_rb_read_literal(rb, 8)
1056 if (frame_is_intra_only(cm) || cm->error_resilient_mode) {
1057 seg->temporal_update = 0;
1059 seg->temporal_update = vpx_rb_read_bit(rb);
1061 if (seg->temporal_update) {
1062 for (i = 0; i < PREDICTION_PROBS; i++)
1063 seg->pred_probs[i] = vpx_rb_read_bit(rb) ? vpx_rb_read_literal(rb, 8)
1066 for (i = 0; i < PREDICTION_PROBS; i++)
1067 seg->pred_probs[i] = MAX_PROB;
1071 // Segmentation data update
1072 seg->update_data = vpx_rb_read_bit(rb);
1073 if (seg->update_data) {
1074 seg->abs_delta = vpx_rb_read_bit(rb);
1076 vp10_clearall_segfeatures(seg);
1078 for (i = 0; i < MAX_SEGMENTS; i++) {
1079 for (j = 0; j < SEG_LVL_MAX; j++) {
1081 const int feature_enabled = vpx_rb_read_bit(rb);
1082 if (feature_enabled) {
1083 vp10_enable_segfeature(seg, i, j);
1084 data = decode_unsigned_max(rb, vp10_seg_feature_data_max(j));
1085 if (vp10_is_segfeature_signed(j))
1086 data = vpx_rb_read_bit(rb) ? -data : data;
1088 vp10_set_segdata(seg, i, j, data);
1094 static void setup_loopfilter(struct loopfilter *lf,
1095 struct vpx_read_bit_buffer *rb) {
1096 lf->filter_level = vpx_rb_read_literal(rb, 6);
1097 lf->sharpness_level = vpx_rb_read_literal(rb, 3);
1099 // Read in loop filter deltas applied at the MB level based on mode or ref
1101 lf->mode_ref_delta_update = 0;
1103 lf->mode_ref_delta_enabled = vpx_rb_read_bit(rb);
1104 if (lf->mode_ref_delta_enabled) {
1105 lf->mode_ref_delta_update = vpx_rb_read_bit(rb);
1106 if (lf->mode_ref_delta_update) {
1109 for (i = 0; i < MAX_REF_FRAMES; i++)
1110 if (vpx_rb_read_bit(rb))
1111 lf->ref_deltas[i] = vpx_rb_read_inv_signed_literal(rb, 6);
1113 for (i = 0; i < MAX_MODE_LF_DELTAS; i++)
1114 if (vpx_rb_read_bit(rb))
1115 lf->mode_deltas[i] = vpx_rb_read_inv_signed_literal(rb, 6);
1120 static INLINE int read_delta_q(struct vpx_read_bit_buffer *rb) {
1121 return vpx_rb_read_bit(rb) ? vpx_rb_read_inv_signed_literal(rb, 4) : 0;
1124 static void setup_quantization(VP10_COMMON *const cm, MACROBLOCKD *const xd,
1125 struct vpx_read_bit_buffer *rb) {
1126 cm->base_qindex = vpx_rb_read_literal(rb, QINDEX_BITS);
1127 cm->y_dc_delta_q = read_delta_q(rb);
1128 cm->uv_dc_delta_q = read_delta_q(rb);
1129 cm->uv_ac_delta_q = read_delta_q(rb);
1130 cm->dequant_bit_depth = cm->bit_depth;
1131 xd->lossless = cm->base_qindex == 0 &&
1132 cm->y_dc_delta_q == 0 &&
1133 cm->uv_dc_delta_q == 0 &&
1134 cm->uv_ac_delta_q == 0;
1136 #if CONFIG_VP9_HIGHBITDEPTH
1137 xd->bd = (int)cm->bit_depth;
1141 static void setup_segmentation_dequant(VP10_COMMON *const cm) {
1142 // Build y/uv dequant values based on segmentation.
1143 if (cm->seg.enabled) {
1145 for (i = 0; i < MAX_SEGMENTS; ++i) {
1146 const int qindex = vp10_get_qindex(&cm->seg, i, cm->base_qindex);
1147 cm->y_dequant[i][0] = vp10_dc_quant(qindex, cm->y_dc_delta_q,
1149 cm->y_dequant[i][1] = vp10_ac_quant(qindex, 0, cm->bit_depth);
1150 cm->uv_dequant[i][0] = vp10_dc_quant(qindex, cm->uv_dc_delta_q,
1152 cm->uv_dequant[i][1] = vp10_ac_quant(qindex, cm->uv_ac_delta_q,
1156 const int qindex = cm->base_qindex;
1157 // When segmentation is disabled, only the first value is used. The
1158 // remaining are don't cares.
1159 cm->y_dequant[0][0] = vp10_dc_quant(qindex, cm->y_dc_delta_q, cm->bit_depth);
1160 cm->y_dequant[0][1] = vp10_ac_quant(qindex, 0, cm->bit_depth);
1161 cm->uv_dequant[0][0] = vp10_dc_quant(qindex, cm->uv_dc_delta_q,
1163 cm->uv_dequant[0][1] = vp10_ac_quant(qindex, cm->uv_ac_delta_q,
1168 static INTERP_FILTER read_interp_filter(struct vpx_read_bit_buffer *rb) {
1169 return vpx_rb_read_bit(rb) ? SWITCHABLE : vpx_rb_read_literal(rb, 2);
1172 static void setup_display_size(VP10_COMMON *cm,
1173 struct vpx_read_bit_buffer *rb) {
1174 cm->display_width = cm->width;
1175 cm->display_height = cm->height;
1176 if (vpx_rb_read_bit(rb))
1177 vp10_read_frame_size(rb, &cm->display_width, &cm->display_height);
1180 static void resize_mv_buffer(VP10_COMMON *cm) {
1181 vpx_free(cm->cur_frame->mvs);
1182 cm->cur_frame->mi_rows = cm->mi_rows;
1183 cm->cur_frame->mi_cols = cm->mi_cols;
1184 cm->cur_frame->mvs = (MV_REF *)vpx_calloc(cm->mi_rows * cm->mi_cols,
1185 sizeof(*cm->cur_frame->mvs));
1188 static void resize_context_buffers(VP10_COMMON *cm, int width, int height) {
1189 #if CONFIG_SIZE_LIMIT
1190 if (width > DECODE_WIDTH_LIMIT || height > DECODE_HEIGHT_LIMIT)
1191 vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME,
1192 "Dimensions of %dx%d beyond allowed size of %dx%d.",
1193 width, height, DECODE_WIDTH_LIMIT, DECODE_HEIGHT_LIMIT);
1195 if (cm->width != width || cm->height != height) {
1196 const int new_mi_rows =
1197 ALIGN_POWER_OF_TWO(height, MI_SIZE_LOG2) >> MI_SIZE_LOG2;
1198 const int new_mi_cols =
1199 ALIGN_POWER_OF_TWO(width, MI_SIZE_LOG2) >> MI_SIZE_LOG2;
1201 // Allocations in vp10_alloc_context_buffers() depend on individual
1202 // dimensions as well as the overall size.
1203 if (new_mi_cols > cm->mi_cols || new_mi_rows > cm->mi_rows) {
1204 if (vp10_alloc_context_buffers(cm, width, height))
1205 vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR,
1206 "Failed to allocate context buffers");
1208 vp10_set_mb_mi(cm, width, height);
1210 vp10_init_context_buffers(cm);
1212 cm->height = height;
1214 if (cm->cur_frame->mvs == NULL || cm->mi_rows > cm->cur_frame->mi_rows ||
1215 cm->mi_cols > cm->cur_frame->mi_cols) {
1216 resize_mv_buffer(cm);
1220 static void setup_frame_size(VP10_COMMON *cm, struct vpx_read_bit_buffer *rb) {
1222 BufferPool *const pool = cm->buffer_pool;
1223 vp10_read_frame_size(rb, &width, &height);
1224 resize_context_buffers(cm, width, height);
1225 setup_display_size(cm, rb);
1227 lock_buffer_pool(pool);
1228 if (vpx_realloc_frame_buffer(
1229 get_frame_new_buffer(cm), cm->width, cm->height,
1230 cm->subsampling_x, cm->subsampling_y,
1231 #if CONFIG_VP9_HIGHBITDEPTH
1232 cm->use_highbitdepth,
1234 VP9_DEC_BORDER_IN_PIXELS,
1236 &pool->frame_bufs[cm->new_fb_idx].raw_frame_buffer, pool->get_fb_cb,
1238 unlock_buffer_pool(pool);
1239 vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR,
1240 "Failed to allocate frame buffer");
1242 unlock_buffer_pool(pool);
1244 pool->frame_bufs[cm->new_fb_idx].buf.subsampling_x = cm->subsampling_x;
1245 pool->frame_bufs[cm->new_fb_idx].buf.subsampling_y = cm->subsampling_y;
1246 pool->frame_bufs[cm->new_fb_idx].buf.bit_depth = (unsigned int)cm->bit_depth;
1247 pool->frame_bufs[cm->new_fb_idx].buf.color_space = cm->color_space;
1248 pool->frame_bufs[cm->new_fb_idx].buf.color_range = cm->color_range;
1251 static INLINE int valid_ref_frame_img_fmt(vpx_bit_depth_t ref_bit_depth,
1252 int ref_xss, int ref_yss,
1253 vpx_bit_depth_t this_bit_depth,
1254 int this_xss, int this_yss) {
1255 return ref_bit_depth == this_bit_depth && ref_xss == this_xss &&
1256 ref_yss == this_yss;
1259 static void setup_frame_size_with_refs(VP10_COMMON *cm,
1260 struct vpx_read_bit_buffer *rb) {
1263 int has_valid_ref_frame = 0;
1264 BufferPool *const pool = cm->buffer_pool;
1265 for (i = 0; i < REFS_PER_FRAME; ++i) {
1266 if (vpx_rb_read_bit(rb)) {
1267 YV12_BUFFER_CONFIG *const buf = cm->frame_refs[i].buf;
1268 width = buf->y_crop_width;
1269 height = buf->y_crop_height;
1276 vp10_read_frame_size(rb, &width, &height);
1278 if (width <= 0 || height <= 0)
1279 vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME,
1280 "Invalid frame size");
1282 // Check to make sure at least one of frames that this frame references
1283 // has valid dimensions.
1284 for (i = 0; i < REFS_PER_FRAME; ++i) {
1285 RefBuffer *const ref_frame = &cm->frame_refs[i];
1286 has_valid_ref_frame |= valid_ref_frame_size(ref_frame->buf->y_crop_width,
1287 ref_frame->buf->y_crop_height,
1290 if (!has_valid_ref_frame)
1291 vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME,
1292 "Referenced frame has invalid size");
1293 for (i = 0; i < REFS_PER_FRAME; ++i) {
1294 RefBuffer *const ref_frame = &cm->frame_refs[i];
1295 if (!valid_ref_frame_img_fmt(
1296 ref_frame->buf->bit_depth,
1297 ref_frame->buf->subsampling_x,
1298 ref_frame->buf->subsampling_y,
1302 vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME,
1303 "Referenced frame has incompatible color format");
1306 resize_context_buffers(cm, width, height);
1307 setup_display_size(cm, rb);
1309 lock_buffer_pool(pool);
1310 if (vpx_realloc_frame_buffer(
1311 get_frame_new_buffer(cm), cm->width, cm->height,
1312 cm->subsampling_x, cm->subsampling_y,
1313 #if CONFIG_VP9_HIGHBITDEPTH
1314 cm->use_highbitdepth,
1316 VP9_DEC_BORDER_IN_PIXELS,
1318 &pool->frame_bufs[cm->new_fb_idx].raw_frame_buffer, pool->get_fb_cb,
1320 unlock_buffer_pool(pool);
1321 vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR,
1322 "Failed to allocate frame buffer");
1324 unlock_buffer_pool(pool);
1326 pool->frame_bufs[cm->new_fb_idx].buf.subsampling_x = cm->subsampling_x;
1327 pool->frame_bufs[cm->new_fb_idx].buf.subsampling_y = cm->subsampling_y;
1328 pool->frame_bufs[cm->new_fb_idx].buf.bit_depth = (unsigned int)cm->bit_depth;
1329 pool->frame_bufs[cm->new_fb_idx].buf.color_space = cm->color_space;
1330 pool->frame_bufs[cm->new_fb_idx].buf.color_range = cm->color_range;
1333 static void setup_tile_info(VP10_COMMON *cm, struct vpx_read_bit_buffer *rb) {
1334 int min_log2_tile_cols, max_log2_tile_cols, max_ones;
1335 vp10_get_tile_n_bits(cm->mi_cols, &min_log2_tile_cols, &max_log2_tile_cols);
1338 max_ones = max_log2_tile_cols - min_log2_tile_cols;
1339 cm->log2_tile_cols = min_log2_tile_cols;
1340 while (max_ones-- && vpx_rb_read_bit(rb))
1341 cm->log2_tile_cols++;
1343 if (cm->log2_tile_cols > 6)
1344 vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME,
1345 "Invalid number of tile columns");
1348 cm->log2_tile_rows = vpx_rb_read_bit(rb);
1349 if (cm->log2_tile_rows)
1350 cm->log2_tile_rows += vpx_rb_read_bit(rb);
1353 typedef struct TileBuffer {
1354 const uint8_t *data;
1356 int col; // only used with multi-threaded decoding
1359 // Reads the next tile returning its size and adjusting '*data' accordingly
1360 // based on 'is_last'.
1361 static void get_tile_buffer(const uint8_t *const data_end,
1363 struct vpx_internal_error_info *error_info,
1364 const uint8_t **data,
1365 vpx_decrypt_cb decrypt_cb, void *decrypt_state,
1370 if (!read_is_valid(*data, 4, data_end))
1371 vpx_internal_error(error_info, VPX_CODEC_CORRUPT_FRAME,
1372 "Truncated packet or corrupt tile length");
1376 decrypt_cb(decrypt_state, *data, be_data, 4);
1377 size = mem_get_be32(be_data);
1379 size = mem_get_be32(*data);
1383 if (size > (size_t)(data_end - *data))
1384 vpx_internal_error(error_info, VPX_CODEC_CORRUPT_FRAME,
1385 "Truncated packet or corrupt tile size");
1387 size = data_end - *data;
1396 static void get_tile_buffers(VP10Decoder *pbi,
1397 const uint8_t *data, const uint8_t *data_end,
1398 int tile_cols, int tile_rows,
1399 TileBuffer (*tile_buffers)[1 << 6]) {
1402 for (r = 0; r < tile_rows; ++r) {
1403 for (c = 0; c < tile_cols; ++c) {
1404 const int is_last = (r == tile_rows - 1) && (c == tile_cols - 1);
1405 TileBuffer *const buf = &tile_buffers[r][c];
1407 get_tile_buffer(data_end, is_last, &pbi->common.error, &data,
1408 pbi->decrypt_cb, pbi->decrypt_state, buf);
1413 static const uint8_t *decode_tiles(VP10Decoder *pbi,
1414 const uint8_t *data,
1415 const uint8_t *data_end) {
1416 VP10_COMMON *const cm = &pbi->common;
1417 const VPxWorkerInterface *const winterface = vpx_get_worker_interface();
1418 const int aligned_cols = mi_cols_aligned_to_sb(cm->mi_cols);
1419 const int tile_cols = 1 << cm->log2_tile_cols;
1420 const int tile_rows = 1 << cm->log2_tile_rows;
1421 TileBuffer tile_buffers[4][1 << 6];
1422 int tile_row, tile_col;
1424 TileData *tile_data = NULL;
1426 if (cm->lf.filter_level && !cm->skip_loop_filter &&
1427 pbi->lf_worker.data1 == NULL) {
1428 CHECK_MEM_ERROR(cm, pbi->lf_worker.data1,
1429 vpx_memalign(32, sizeof(LFWorkerData)));
1430 pbi->lf_worker.hook = (VPxWorkerHook)vp10_loop_filter_worker;
1431 if (pbi->max_threads > 1 && !winterface->reset(&pbi->lf_worker)) {
1432 vpx_internal_error(&cm->error, VPX_CODEC_ERROR,
1433 "Loop filter thread creation failed");
1437 if (cm->lf.filter_level && !cm->skip_loop_filter) {
1438 LFWorkerData *const lf_data = (LFWorkerData*)pbi->lf_worker.data1;
1439 // Be sure to sync as we might be resuming after a failed frame decode.
1440 winterface->sync(&pbi->lf_worker);
1441 vp10_loop_filter_data_reset(lf_data, get_frame_new_buffer(cm), cm,
1445 assert(tile_rows <= 4);
1446 assert(tile_cols <= (1 << 6));
1448 // Note: this memset assumes above_context[0], [1] and [2]
1449 // are allocated as part of the same buffer.
1450 memset(cm->above_context, 0,
1451 sizeof(*cm->above_context) * MAX_MB_PLANE * 2 * aligned_cols);
1453 memset(cm->above_seg_context, 0,
1454 sizeof(*cm->above_seg_context) * aligned_cols);
1456 get_tile_buffers(pbi, data, data_end, tile_cols, tile_rows, tile_buffers);
1458 if (pbi->tile_data == NULL ||
1459 (tile_cols * tile_rows) != pbi->total_tiles) {
1460 vpx_free(pbi->tile_data);
1464 vpx_memalign(32, tile_cols * tile_rows * (sizeof(*pbi->tile_data))));
1465 pbi->total_tiles = tile_rows * tile_cols;
1468 // Load all tile information into tile_data.
1469 for (tile_row = 0; tile_row < tile_rows; ++tile_row) {
1470 for (tile_col = 0; tile_col < tile_cols; ++tile_col) {
1471 const TileBuffer *const buf = &tile_buffers[tile_row][tile_col];
1472 tile_data = pbi->tile_data + tile_cols * tile_row + tile_col;
1474 tile_data->xd = pbi->mb;
1475 tile_data->xd.corrupted = 0;
1476 tile_data->xd.counts =
1477 cm->refresh_frame_context == REFRESH_FRAME_CONTEXT_BACKWARD ?
1479 vp10_zero(tile_data->dqcoeff);
1480 vp10_tile_init(&tile_data->xd.tile, tile_data->cm, tile_row, tile_col);
1481 setup_token_decoder(buf->data, data_end, buf->size, &cm->error,
1482 &tile_data->bit_reader, pbi->decrypt_cb,
1483 pbi->decrypt_state);
1484 vp10_init_macroblockd(cm, &tile_data->xd, tile_data->dqcoeff);
1488 for (tile_row = 0; tile_row < tile_rows; ++tile_row) {
1490 vp10_tile_set_row(&tile, cm, tile_row);
1491 for (mi_row = tile.mi_row_start; mi_row < tile.mi_row_end;
1492 mi_row += MI_BLOCK_SIZE) {
1493 for (tile_col = 0; tile_col < tile_cols; ++tile_col) {
1494 const int col = pbi->inv_tile_order ?
1495 tile_cols - tile_col - 1 : tile_col;
1496 tile_data = pbi->tile_data + tile_cols * tile_row + col;
1497 vp10_tile_set_col(&tile, tile_data->cm, col);
1498 vp10_zero(tile_data->xd.left_context);
1499 vp10_zero(tile_data->xd.left_seg_context);
1500 for (mi_col = tile.mi_col_start; mi_col < tile.mi_col_end;
1501 mi_col += MI_BLOCK_SIZE) {
1502 decode_partition(pbi, &tile_data->xd, mi_row,
1503 mi_col, &tile_data->bit_reader, BLOCK_64X64, 4);
1505 pbi->mb.corrupted |= tile_data->xd.corrupted;
1506 if (pbi->mb.corrupted)
1507 vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME,
1508 "Failed to decode tile data");
1510 // Loopfilter one row.
1511 if (cm->lf.filter_level && !cm->skip_loop_filter) {
1512 const int lf_start = mi_row - MI_BLOCK_SIZE;
1513 LFWorkerData *const lf_data = (LFWorkerData*)pbi->lf_worker.data1;
1515 // delay the loopfilter by 1 macroblock row.
1516 if (lf_start < 0) continue;
1518 // decoding has completed: finish up the loop filter in this thread.
1519 if (mi_row + MI_BLOCK_SIZE >= cm->mi_rows) continue;
1521 winterface->sync(&pbi->lf_worker);
1522 lf_data->start = lf_start;
1523 lf_data->stop = mi_row;
1524 if (pbi->max_threads > 1) {
1525 winterface->launch(&pbi->lf_worker);
1527 winterface->execute(&pbi->lf_worker);
1530 // After loopfiltering, the last 7 row pixels in each superblock row may
1531 // still be changed by the longest loopfilter of the next superblock
1533 if (cm->frame_parallel_decode)
1534 vp10_frameworker_broadcast(pbi->cur_buf,
1535 mi_row << MI_BLOCK_SIZE_LOG2);
1539 // Loopfilter remaining rows in the frame.
1540 if (cm->lf.filter_level && !cm->skip_loop_filter) {
1541 LFWorkerData *const lf_data = (LFWorkerData*)pbi->lf_worker.data1;
1542 winterface->sync(&pbi->lf_worker);
1543 lf_data->start = lf_data->stop;
1544 lf_data->stop = cm->mi_rows;
1545 winterface->execute(&pbi->lf_worker);
1548 // Get last tile data.
1549 tile_data = pbi->tile_data + tile_cols * tile_rows - 1;
1551 if (cm->frame_parallel_decode)
1552 vp10_frameworker_broadcast(pbi->cur_buf, INT_MAX);
1553 return vpx_reader_find_end(&tile_data->bit_reader);
1556 static int tile_worker_hook(TileWorkerData *const tile_data,
1557 const TileInfo *const tile) {
1560 if (setjmp(tile_data->error_info.jmp)) {
1561 tile_data->error_info.setjmp = 0;
1562 tile_data->xd.corrupted = 1;
1566 tile_data->error_info.setjmp = 1;
1567 tile_data->xd.error_info = &tile_data->error_info;
1569 for (mi_row = tile->mi_row_start; mi_row < tile->mi_row_end;
1570 mi_row += MI_BLOCK_SIZE) {
1571 vp10_zero(tile_data->xd.left_context);
1572 vp10_zero(tile_data->xd.left_seg_context);
1573 for (mi_col = tile->mi_col_start; mi_col < tile->mi_col_end;
1574 mi_col += MI_BLOCK_SIZE) {
1575 decode_partition(tile_data->pbi, &tile_data->xd,
1576 mi_row, mi_col, &tile_data->bit_reader,
1580 return !tile_data->xd.corrupted;
1583 // sorts in descending order
1584 static int compare_tile_buffers(const void *a, const void *b) {
1585 const TileBuffer *const buf1 = (const TileBuffer*)a;
1586 const TileBuffer *const buf2 = (const TileBuffer*)b;
1587 return (int)(buf2->size - buf1->size);
1590 static const uint8_t *decode_tiles_mt(VP10Decoder *pbi,
1591 const uint8_t *data,
1592 const uint8_t *data_end) {
1593 VP10_COMMON *const cm = &pbi->common;
1594 const VPxWorkerInterface *const winterface = vpx_get_worker_interface();
1595 const uint8_t *bit_reader_end = NULL;
1596 const int aligned_mi_cols = mi_cols_aligned_to_sb(cm->mi_cols);
1597 const int tile_cols = 1 << cm->log2_tile_cols;
1598 const int tile_rows = 1 << cm->log2_tile_rows;
1599 const int num_workers = VPXMIN(pbi->max_threads & ~1, tile_cols);
1600 TileBuffer tile_buffers[1][1 << 6];
1602 int final_worker = -1;
1604 assert(tile_cols <= (1 << 6));
1605 assert(tile_rows == 1);
1608 // TODO(jzern): See if we can remove the restriction of passing in max
1609 // threads to the decoder.
1610 if (pbi->num_tile_workers == 0) {
1611 const int num_threads = pbi->max_threads & ~1;
1613 CHECK_MEM_ERROR(cm, pbi->tile_workers,
1614 vpx_malloc(num_threads * sizeof(*pbi->tile_workers)));
1615 // Ensure tile data offsets will be properly aligned. This may fail on
1616 // platforms without DECLARE_ALIGNED().
1617 assert((sizeof(*pbi->tile_worker_data) % 16) == 0);
1618 CHECK_MEM_ERROR(cm, pbi->tile_worker_data,
1619 vpx_memalign(32, num_threads *
1620 sizeof(*pbi->tile_worker_data)));
1621 CHECK_MEM_ERROR(cm, pbi->tile_worker_info,
1622 vpx_malloc(num_threads * sizeof(*pbi->tile_worker_info)));
1623 for (i = 0; i < num_threads; ++i) {
1624 VPxWorker *const worker = &pbi->tile_workers[i];
1625 ++pbi->num_tile_workers;
1627 winterface->init(worker);
1628 if (i < num_threads - 1 && !winterface->reset(worker)) {
1629 vpx_internal_error(&cm->error, VPX_CODEC_ERROR,
1630 "Tile decoder thread creation failed");
1635 // Reset tile decoding hook
1636 for (n = 0; n < num_workers; ++n) {
1637 VPxWorker *const worker = &pbi->tile_workers[n];
1638 winterface->sync(worker);
1639 worker->hook = (VPxWorkerHook)tile_worker_hook;
1640 worker->data1 = &pbi->tile_worker_data[n];
1641 worker->data2 = &pbi->tile_worker_info[n];
1644 // Note: this memset assumes above_context[0], [1] and [2]
1645 // are allocated as part of the same buffer.
1646 memset(cm->above_context, 0,
1647 sizeof(*cm->above_context) * MAX_MB_PLANE * 2 * aligned_mi_cols);
1648 memset(cm->above_seg_context, 0,
1649 sizeof(*cm->above_seg_context) * aligned_mi_cols);
1651 // Load tile data into tile_buffers
1652 get_tile_buffers(pbi, data, data_end, tile_cols, tile_rows, tile_buffers);
1654 // Sort the buffers based on size in descending order.
1655 qsort(tile_buffers[0], tile_cols, sizeof(tile_buffers[0][0]),
1656 compare_tile_buffers);
1658 // Rearrange the tile buffers such that per-tile group the largest, and
1659 // presumably the most difficult, tile will be decoded in the main thread.
1660 // This should help minimize the number of instances where the main thread is
1661 // waiting for a worker to complete.
1663 int group_start = 0;
1664 while (group_start < tile_cols) {
1665 const TileBuffer largest = tile_buffers[0][group_start];
1666 const int group_end = VPXMIN(group_start + num_workers, tile_cols) - 1;
1667 memmove(tile_buffers[0] + group_start, tile_buffers[0] + group_start + 1,
1668 (group_end - group_start) * sizeof(tile_buffers[0][0]));
1669 tile_buffers[0][group_end] = largest;
1670 group_start = group_end + 1;
1674 // Initialize thread frame counts.
1675 if (cm->refresh_frame_context == REFRESH_FRAME_CONTEXT_BACKWARD) {
1678 for (i = 0; i < num_workers; ++i) {
1679 TileWorkerData *const tile_data =
1680 (TileWorkerData*)pbi->tile_workers[i].data1;
1681 vp10_zero(tile_data->counts);
1686 while (n < tile_cols) {
1688 for (i = 0; i < num_workers && n < tile_cols; ++i) {
1689 VPxWorker *const worker = &pbi->tile_workers[i];
1690 TileWorkerData *const tile_data = (TileWorkerData*)worker->data1;
1691 TileInfo *const tile = (TileInfo*)worker->data2;
1692 TileBuffer *const buf = &tile_buffers[0][n];
1694 tile_data->pbi = pbi;
1695 tile_data->xd = pbi->mb;
1696 tile_data->xd.corrupted = 0;
1697 tile_data->xd.counts =
1698 cm->refresh_frame_context == REFRESH_FRAME_CONTEXT_BACKWARD ?
1699 &tile_data->counts : NULL;
1700 vp10_zero(tile_data->dqcoeff);
1701 vp10_tile_init(tile, cm, 0, buf->col);
1702 vp10_tile_init(&tile_data->xd.tile, cm, 0, buf->col);
1703 setup_token_decoder(buf->data, data_end, buf->size, &cm->error,
1704 &tile_data->bit_reader, pbi->decrypt_cb,
1705 pbi->decrypt_state);
1706 vp10_init_macroblockd(cm, &tile_data->xd, tile_data->dqcoeff);
1708 worker->had_error = 0;
1709 if (i == num_workers - 1 || n == tile_cols - 1) {
1710 winterface->execute(worker);
1712 winterface->launch(worker);
1715 if (buf->col == tile_cols - 1) {
1722 for (; i > 0; --i) {
1723 VPxWorker *const worker = &pbi->tile_workers[i - 1];
1724 // TODO(jzern): The tile may have specific error data associated with
1725 // its vpx_internal_error_info which could be propagated to the main info
1726 // in cm. Additionally once the threads have been synced and an error is
1727 // detected, there's no point in continuing to decode tiles.
1728 pbi->mb.corrupted |= !winterface->sync(worker);
1730 if (final_worker > -1) {
1731 TileWorkerData *const tile_data =
1732 (TileWorkerData*)pbi->tile_workers[final_worker].data1;
1733 bit_reader_end = vpx_reader_find_end(&tile_data->bit_reader);
1737 // Accumulate thread frame counts.
1738 if (n >= tile_cols &&
1739 cm->refresh_frame_context == REFRESH_FRAME_CONTEXT_BACKWARD) {
1740 for (i = 0; i < num_workers; ++i) {
1741 TileWorkerData *const tile_data =
1742 (TileWorkerData*)pbi->tile_workers[i].data1;
1743 vp10_accumulate_frame_counts(cm, &tile_data->counts, 1);
1748 return bit_reader_end;
1751 static void error_handler(void *data) {
1752 VP10_COMMON *const cm = (VP10_COMMON *)data;
1753 vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME, "Truncated packet");
1756 static void read_bitdepth_colorspace_sampling(
1757 VP10_COMMON *cm, struct vpx_read_bit_buffer *rb) {
1758 if (cm->profile >= PROFILE_2) {
1759 cm->bit_depth = vpx_rb_read_bit(rb) ? VPX_BITS_12 : VPX_BITS_10;
1760 #if CONFIG_VP9_HIGHBITDEPTH
1761 cm->use_highbitdepth = 1;
1764 cm->bit_depth = VPX_BITS_8;
1765 #if CONFIG_VP9_HIGHBITDEPTH
1766 cm->use_highbitdepth = 0;
1769 cm->color_space = vpx_rb_read_literal(rb, 3);
1770 if (cm->color_space != VPX_CS_SRGB) {
1771 // [16,235] (including xvycc) vs [0,255] range
1772 cm->color_range = vpx_rb_read_bit(rb);
1773 if (cm->profile == PROFILE_1 || cm->profile == PROFILE_3) {
1774 cm->subsampling_x = vpx_rb_read_bit(rb);
1775 cm->subsampling_y = vpx_rb_read_bit(rb);
1776 if (cm->subsampling_x == 1 && cm->subsampling_y == 1)
1777 vpx_internal_error(&cm->error, VPX_CODEC_UNSUP_BITSTREAM,
1778 "4:2:0 color not supported in profile 1 or 3");
1779 if (vpx_rb_read_bit(rb))
1780 vpx_internal_error(&cm->error, VPX_CODEC_UNSUP_BITSTREAM,
1781 "Reserved bit set");
1783 cm->subsampling_y = cm->subsampling_x = 1;
1786 if (cm->profile == PROFILE_1 || cm->profile == PROFILE_3) {
1787 // Note if colorspace is SRGB then 4:4:4 chroma sampling is assumed.
1788 // 4:2:2 or 4:4:0 chroma sampling is not allowed.
1789 cm->subsampling_y = cm->subsampling_x = 0;
1790 if (vpx_rb_read_bit(rb))
1791 vpx_internal_error(&cm->error, VPX_CODEC_UNSUP_BITSTREAM,
1792 "Reserved bit set");
1794 vpx_internal_error(&cm->error, VPX_CODEC_UNSUP_BITSTREAM,
1795 "4:4:4 color not supported in profile 0 or 2");
1800 static size_t read_uncompressed_header(VP10Decoder *pbi,
1801 struct vpx_read_bit_buffer *rb) {
1802 VP10_COMMON *const cm = &pbi->common;
1803 #if CONFIG_MISC_FIXES
1804 MACROBLOCKD *const xd = &pbi->mb;
1806 BufferPool *const pool = cm->buffer_pool;
1807 RefCntBuffer *const frame_bufs = pool->frame_bufs;
1808 int i, mask, ref_index = 0;
1811 cm->last_frame_type = cm->frame_type;
1812 cm->last_intra_only = cm->intra_only;
1814 if (vpx_rb_read_literal(rb, 2) != VP9_FRAME_MARKER)
1815 vpx_internal_error(&cm->error, VPX_CODEC_UNSUP_BITSTREAM,
1816 "Invalid frame marker");
1818 cm->profile = vp10_read_profile(rb);
1819 #if CONFIG_VP9_HIGHBITDEPTH
1820 if (cm->profile >= MAX_PROFILES)
1821 vpx_internal_error(&cm->error, VPX_CODEC_UNSUP_BITSTREAM,
1822 "Unsupported bitstream profile");
1824 if (cm->profile >= PROFILE_2)
1825 vpx_internal_error(&cm->error, VPX_CODEC_UNSUP_BITSTREAM,
1826 "Unsupported bitstream profile");
1829 cm->show_existing_frame = vpx_rb_read_bit(rb);
1830 if (cm->show_existing_frame) {
1831 // Show an existing frame directly.
1832 const int frame_to_show = cm->ref_frame_map[vpx_rb_read_literal(rb, 3)];
1833 lock_buffer_pool(pool);
1834 if (frame_to_show < 0 || frame_bufs[frame_to_show].ref_count < 1) {
1835 unlock_buffer_pool(pool);
1836 vpx_internal_error(&cm->error, VPX_CODEC_UNSUP_BITSTREAM,
1837 "Buffer %d does not contain a decoded frame",
1841 ref_cnt_fb(frame_bufs, &cm->new_fb_idx, frame_to_show);
1842 unlock_buffer_pool(pool);
1843 pbi->refresh_frame_flags = 0;
1844 cm->lf.filter_level = 0;
1847 if (cm->frame_parallel_decode) {
1848 for (i = 0; i < REF_FRAMES; ++i)
1849 cm->next_ref_frame_map[i] = cm->ref_frame_map[i];
1854 cm->frame_type = (FRAME_TYPE) vpx_rb_read_bit(rb);
1855 cm->show_frame = vpx_rb_read_bit(rb);
1856 cm->error_resilient_mode = vpx_rb_read_bit(rb);
1858 if (cm->frame_type == KEY_FRAME) {
1859 if (!vp10_read_sync_code(rb))
1860 vpx_internal_error(&cm->error, VPX_CODEC_UNSUP_BITSTREAM,
1861 "Invalid frame sync code");
1863 read_bitdepth_colorspace_sampling(cm, rb);
1864 pbi->refresh_frame_flags = (1 << REF_FRAMES) - 1;
1866 for (i = 0; i < REFS_PER_FRAME; ++i) {
1867 cm->frame_refs[i].idx = INVALID_IDX;
1868 cm->frame_refs[i].buf = NULL;
1871 setup_frame_size(cm, rb);
1872 if (pbi->need_resync) {
1873 memset(&cm->ref_frame_map, -1, sizeof(cm->ref_frame_map));
1874 pbi->need_resync = 0;
1877 cm->intra_only = cm->show_frame ? 0 : vpx_rb_read_bit(rb);
1879 if (cm->error_resilient_mode) {
1880 cm->reset_frame_context = RESET_FRAME_CONTEXT_ALL;
1882 #if CONFIG_MISC_FIXES
1883 if (cm->intra_only) {
1884 cm->reset_frame_context =
1885 vpx_rb_read_bit(rb) ? RESET_FRAME_CONTEXT_ALL
1886 : RESET_FRAME_CONTEXT_CURRENT;
1888 cm->reset_frame_context =
1889 vpx_rb_read_bit(rb) ? RESET_FRAME_CONTEXT_CURRENT
1890 : RESET_FRAME_CONTEXT_NONE;
1891 if (cm->reset_frame_context == RESET_FRAME_CONTEXT_CURRENT)
1892 cm->reset_frame_context =
1893 vpx_rb_read_bit(rb) ? RESET_FRAME_CONTEXT_ALL
1894 : RESET_FRAME_CONTEXT_CURRENT;
1897 static const RESET_FRAME_CONTEXT_MODE reset_frame_context_conv_tbl[4] = {
1898 RESET_FRAME_CONTEXT_NONE, RESET_FRAME_CONTEXT_NONE,
1899 RESET_FRAME_CONTEXT_CURRENT, RESET_FRAME_CONTEXT_ALL
1902 cm->reset_frame_context =
1903 reset_frame_context_conv_tbl[vpx_rb_read_literal(rb, 2)];
1907 if (cm->intra_only) {
1908 if (!vp10_read_sync_code(rb))
1909 vpx_internal_error(&cm->error, VPX_CODEC_UNSUP_BITSTREAM,
1910 "Invalid frame sync code");
1911 if (cm->profile > PROFILE_0) {
1912 read_bitdepth_colorspace_sampling(cm, rb);
1914 // NOTE: The intra-only frame header does not include the specification
1915 // of either the color format or color sub-sampling in profile 0. VP9
1916 // specifies that the default color format should be YUV 4:2:0 in this
1917 // case (normative).
1918 cm->color_space = VPX_CS_BT_601;
1919 cm->color_range = 0;
1920 cm->subsampling_y = cm->subsampling_x = 1;
1921 cm->bit_depth = VPX_BITS_8;
1922 #if CONFIG_VP9_HIGHBITDEPTH
1923 cm->use_highbitdepth = 0;
1927 pbi->refresh_frame_flags = vpx_rb_read_literal(rb, REF_FRAMES);
1928 setup_frame_size(cm, rb);
1929 if (pbi->need_resync) {
1930 memset(&cm->ref_frame_map, -1, sizeof(cm->ref_frame_map));
1931 pbi->need_resync = 0;
1933 } else if (pbi->need_resync != 1) { /* Skip if need resync */
1934 pbi->refresh_frame_flags = vpx_rb_read_literal(rb, REF_FRAMES);
1935 for (i = 0; i < REFS_PER_FRAME; ++i) {
1936 const int ref = vpx_rb_read_literal(rb, REF_FRAMES_LOG2);
1937 const int idx = cm->ref_frame_map[ref];
1938 RefBuffer *const ref_frame = &cm->frame_refs[i];
1939 ref_frame->idx = idx;
1940 ref_frame->buf = &frame_bufs[idx].buf;
1941 cm->ref_frame_sign_bias[LAST_FRAME + i] = vpx_rb_read_bit(rb);
1944 setup_frame_size_with_refs(cm, rb);
1946 cm->allow_high_precision_mv = vpx_rb_read_bit(rb);
1947 cm->interp_filter = read_interp_filter(rb);
1949 for (i = 0; i < REFS_PER_FRAME; ++i) {
1950 RefBuffer *const ref_buf = &cm->frame_refs[i];
1951 #if CONFIG_VP9_HIGHBITDEPTH
1952 vp10_setup_scale_factors_for_frame(&ref_buf->sf,
1953 ref_buf->buf->y_crop_width,
1954 ref_buf->buf->y_crop_height,
1955 cm->width, cm->height,
1956 cm->use_highbitdepth);
1958 vp10_setup_scale_factors_for_frame(&ref_buf->sf,
1959 ref_buf->buf->y_crop_width,
1960 ref_buf->buf->y_crop_height,
1961 cm->width, cm->height);
1966 #if CONFIG_VP9_HIGHBITDEPTH
1967 get_frame_new_buffer(cm)->bit_depth = cm->bit_depth;
1969 get_frame_new_buffer(cm)->color_space = cm->color_space;
1970 get_frame_new_buffer(cm)->color_range = cm->color_range;
1972 if (pbi->need_resync) {
1973 vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME,
1974 "Keyframe / intra-only frame required to reset decoder"
1978 if (!cm->error_resilient_mode) {
1979 cm->refresh_frame_context =
1980 vpx_rb_read_bit(rb) ? REFRESH_FRAME_CONTEXT_FORWARD
1981 : REFRESH_FRAME_CONTEXT_OFF;
1982 if (cm->refresh_frame_context == REFRESH_FRAME_CONTEXT_FORWARD) {
1983 cm->refresh_frame_context =
1984 vpx_rb_read_bit(rb) ? REFRESH_FRAME_CONTEXT_FORWARD
1985 : REFRESH_FRAME_CONTEXT_BACKWARD;
1986 #if !CONFIG_MISC_FIXES
1988 vpx_rb_read_bit(rb); // parallel decoding mode flag
1992 cm->refresh_frame_context = REFRESH_FRAME_CONTEXT_OFF;
1995 // This flag will be overridden by the call to vp10_setup_past_independence
1996 // below, forcing the use of context 0 for those frame types.
1997 cm->frame_context_idx = vpx_rb_read_literal(rb, FRAME_CONTEXTS_LOG2);
1999 // Generate next_ref_frame_map.
2000 lock_buffer_pool(pool);
2001 for (mask = pbi->refresh_frame_flags; mask; mask >>= 1) {
2003 cm->next_ref_frame_map[ref_index] = cm->new_fb_idx;
2004 ++frame_bufs[cm->new_fb_idx].ref_count;
2006 cm->next_ref_frame_map[ref_index] = cm->ref_frame_map[ref_index];
2008 // Current thread holds the reference frame.
2009 if (cm->ref_frame_map[ref_index] >= 0)
2010 ++frame_bufs[cm->ref_frame_map[ref_index]].ref_count;
2014 for (; ref_index < REF_FRAMES; ++ref_index) {
2015 cm->next_ref_frame_map[ref_index] = cm->ref_frame_map[ref_index];
2016 // Current thread holds the reference frame.
2017 if (cm->ref_frame_map[ref_index] >= 0)
2018 ++frame_bufs[cm->ref_frame_map[ref_index]].ref_count;
2020 unlock_buffer_pool(pool);
2021 pbi->hold_ref_buf = 1;
2023 if (frame_is_intra_only(cm) || cm->error_resilient_mode)
2024 vp10_setup_past_independence(cm);
2026 setup_loopfilter(&cm->lf, rb);
2027 setup_quantization(cm, &pbi->mb, rb);
2028 setup_segmentation(cm, rb);
2029 setup_segmentation_dequant(cm);
2030 #if CONFIG_MISC_FIXES
2031 cm->tx_mode = xd->lossless ? ONLY_4X4 : read_tx_mode(rb);
2034 setup_tile_info(cm, rb);
2035 sz = vpx_rb_read_literal(rb, 16);
2038 vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME,
2039 "Invalid header size");
2044 static int read_compressed_header(VP10Decoder *pbi, const uint8_t *data,
2045 size_t partition_size) {
2046 VP10_COMMON *const cm = &pbi->common;
2047 #if !CONFIG_MISC_FIXES
2048 MACROBLOCKD *const xd = &pbi->mb;
2050 FRAME_CONTEXT *const fc = cm->fc;
2054 if (vpx_reader_init(&r, data, partition_size, pbi->decrypt_cb,
2055 pbi->decrypt_state))
2056 vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR,
2057 "Failed to allocate bool decoder 0");
2059 #if !CONFIG_MISC_FIXES
2060 cm->tx_mode = xd->lossless ? ONLY_4X4 : read_tx_mode(&r);
2062 if (cm->tx_mode == TX_MODE_SELECT)
2063 read_tx_mode_probs(&fc->tx_probs, &r);
2064 read_coef_probs(fc, cm->tx_mode, &r);
2066 for (k = 0; k < SKIP_CONTEXTS; ++k)
2067 vp10_diff_update_prob(&r, &fc->skip_probs[k]);
2069 if (!frame_is_intra_only(cm)) {
2070 nmv_context *const nmvc = &fc->nmvc;
2073 read_inter_mode_probs(fc, &r);
2075 if (cm->interp_filter == SWITCHABLE)
2076 read_switchable_interp_probs(fc, &r);
2078 for (i = 0; i < INTRA_INTER_CONTEXTS; i++)
2079 vp10_diff_update_prob(&r, &fc->intra_inter_prob[i]);
2081 cm->reference_mode = read_frame_reference_mode(cm, &r);
2082 if (cm->reference_mode != SINGLE_REFERENCE)
2083 setup_compound_reference_mode(cm);
2084 read_frame_reference_mode_probs(cm, &r);
2086 for (j = 0; j < BLOCK_SIZE_GROUPS; j++)
2087 for (i = 0; i < INTRA_MODES - 1; ++i)
2088 vp10_diff_update_prob(&r, &fc->y_mode_prob[j][i]);
2090 for (j = 0; j < PARTITION_CONTEXTS; ++j)
2091 for (i = 0; i < PARTITION_TYPES - 1; ++i)
2092 vp10_diff_update_prob(&r, &fc->partition_prob[j][i]);
2094 read_mv_probs(nmvc, cm->allow_high_precision_mv, &r);
2097 return vpx_reader_has_error(&r);
2101 #define debug_check_frame_counts(cm) (void)0
2103 // Counts should only be incremented when frame_parallel_decoding_mode and
2104 // error_resilient_mode are disabled.
2105 static void debug_check_frame_counts(const VP10_COMMON *const cm) {
2106 FRAME_COUNTS zero_counts;
2107 vp10_zero(zero_counts);
2108 assert(cm->refresh_frame_context != REFRESH_FRAME_CONTEXT_BACKWARD ||
2109 cm->error_resilient_mode);
2110 assert(!memcmp(cm->counts.y_mode, zero_counts.y_mode,
2111 sizeof(cm->counts.y_mode)));
2112 assert(!memcmp(cm->counts.uv_mode, zero_counts.uv_mode,
2113 sizeof(cm->counts.uv_mode)));
2114 assert(!memcmp(cm->counts.partition, zero_counts.partition,
2115 sizeof(cm->counts.partition)));
2116 assert(!memcmp(cm->counts.coef, zero_counts.coef,
2117 sizeof(cm->counts.coef)));
2118 assert(!memcmp(cm->counts.eob_branch, zero_counts.eob_branch,
2119 sizeof(cm->counts.eob_branch)));
2120 assert(!memcmp(cm->counts.switchable_interp, zero_counts.switchable_interp,
2121 sizeof(cm->counts.switchable_interp)));
2122 assert(!memcmp(cm->counts.inter_mode, zero_counts.inter_mode,
2123 sizeof(cm->counts.inter_mode)));
2124 assert(!memcmp(cm->counts.intra_inter, zero_counts.intra_inter,
2125 sizeof(cm->counts.intra_inter)));
2126 assert(!memcmp(cm->counts.comp_inter, zero_counts.comp_inter,
2127 sizeof(cm->counts.comp_inter)));
2128 assert(!memcmp(cm->counts.single_ref, zero_counts.single_ref,
2129 sizeof(cm->counts.single_ref)));
2130 assert(!memcmp(cm->counts.comp_ref, zero_counts.comp_ref,
2131 sizeof(cm->counts.comp_ref)));
2132 assert(!memcmp(&cm->counts.tx, &zero_counts.tx, sizeof(cm->counts.tx)));
2133 assert(!memcmp(cm->counts.skip, zero_counts.skip, sizeof(cm->counts.skip)));
2134 assert(!memcmp(&cm->counts.mv, &zero_counts.mv, sizeof(cm->counts.mv)));
2138 static struct vpx_read_bit_buffer *init_read_bit_buffer(
2140 struct vpx_read_bit_buffer *rb,
2141 const uint8_t *data,
2142 const uint8_t *data_end,
2143 uint8_t clear_data[MAX_VP9_HEADER_SIZE]) {
2145 rb->error_handler = error_handler;
2146 rb->error_handler_data = &pbi->common;
2147 if (pbi->decrypt_cb) {
2148 const int n = (int)VPXMIN(MAX_VP9_HEADER_SIZE, data_end - data);
2149 pbi->decrypt_cb(pbi->decrypt_state, data, clear_data, n);
2150 rb->bit_buffer = clear_data;
2151 rb->bit_buffer_end = clear_data + n;
2153 rb->bit_buffer = data;
2154 rb->bit_buffer_end = data_end;
2159 //------------------------------------------------------------------------------
2161 int vp10_read_sync_code(struct vpx_read_bit_buffer *const rb) {
2162 return vpx_rb_read_literal(rb, 8) == VP10_SYNC_CODE_0 &&
2163 vpx_rb_read_literal(rb, 8) == VP10_SYNC_CODE_1 &&
2164 vpx_rb_read_literal(rb, 8) == VP10_SYNC_CODE_2;
2167 void vp10_read_frame_size(struct vpx_read_bit_buffer *rb,
2168 int *width, int *height) {
2169 *width = vpx_rb_read_literal(rb, 16) + 1;
2170 *height = vpx_rb_read_literal(rb, 16) + 1;
2173 BITSTREAM_PROFILE vp10_read_profile(struct vpx_read_bit_buffer *rb) {
2174 int profile = vpx_rb_read_bit(rb);
2175 profile |= vpx_rb_read_bit(rb) << 1;
2177 profile += vpx_rb_read_bit(rb);
2178 return (BITSTREAM_PROFILE) profile;
2181 void vp10_decode_frame(VP10Decoder *pbi,
2182 const uint8_t *data, const uint8_t *data_end,
2183 const uint8_t **p_data_end) {
2184 VP10_COMMON *const cm = &pbi->common;
2185 MACROBLOCKD *const xd = &pbi->mb;
2186 struct vpx_read_bit_buffer rb;
2187 int context_updated = 0;
2188 uint8_t clear_data[MAX_VP9_HEADER_SIZE];
2189 const size_t first_partition_size = read_uncompressed_header(pbi,
2190 init_read_bit_buffer(pbi, &rb, data, data_end, clear_data));
2191 const int tile_rows = 1 << cm->log2_tile_rows;
2192 const int tile_cols = 1 << cm->log2_tile_cols;
2193 YV12_BUFFER_CONFIG *const new_fb = get_frame_new_buffer(cm);
2194 xd->cur_buf = new_fb;
2196 if (!first_partition_size) {
2197 // showing a frame directly
2198 *p_data_end = data + (cm->profile <= PROFILE_2 ? 1 : 2);
2202 data += vpx_rb_bytes_read(&rb);
2203 if (!read_is_valid(data, first_partition_size, data_end))
2204 vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME,
2205 "Truncated packet or corrupt header length");
2207 cm->use_prev_frame_mvs = !cm->error_resilient_mode &&
2208 cm->width == cm->last_width &&
2209 cm->height == cm->last_height &&
2210 !cm->last_intra_only &&
2211 cm->last_show_frame &&
2212 (cm->last_frame_type != KEY_FRAME);
2214 vp10_setup_block_planes(xd, cm->subsampling_x, cm->subsampling_y);
2216 *cm->fc = cm->frame_contexts[cm->frame_context_idx];
2217 if (!cm->fc->initialized)
2218 vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME,
2219 "Uninitialized entropy context.");
2221 vp10_zero(cm->counts);
2224 new_fb->corrupted = read_compressed_header(pbi, data, first_partition_size);
2225 if (new_fb->corrupted)
2226 vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME,
2227 "Decode failed. Frame data header is corrupted.");
2229 if (cm->lf.filter_level && !cm->skip_loop_filter) {
2230 vp10_loop_filter_frame_init(cm, cm->lf.filter_level);
2233 // If encoded in frame parallel mode, frame context is ready after decoding
2234 // the frame header.
2235 if (cm->frame_parallel_decode &&
2236 cm->refresh_frame_context != REFRESH_FRAME_CONTEXT_BACKWARD) {
2237 VPxWorker *const worker = pbi->frame_worker_owner;
2238 FrameWorkerData *const frame_worker_data = worker->data1;
2239 if (cm->refresh_frame_context == REFRESH_FRAME_CONTEXT_FORWARD) {
2240 context_updated = 1;
2241 cm->frame_contexts[cm->frame_context_idx] = *cm->fc;
2243 vp10_frameworker_lock_stats(worker);
2244 pbi->cur_buf->row = -1;
2245 pbi->cur_buf->col = -1;
2246 frame_worker_data->frame_context_ready = 1;
2247 // Signal the main thread that context is ready.
2248 vp10_frameworker_signal_stats(worker);
2249 vp10_frameworker_unlock_stats(worker);
2252 if (pbi->max_threads > 1 && tile_rows == 1 && tile_cols > 1) {
2253 // Multi-threaded tile decoder
2254 *p_data_end = decode_tiles_mt(pbi, data + first_partition_size, data_end);
2255 if (!xd->corrupted) {
2256 if (!cm->skip_loop_filter) {
2257 // If multiple threads are used to decode tiles, then we use those
2258 // threads to do parallel loopfiltering.
2259 vp10_loop_filter_frame_mt(new_fb, cm, pbi->mb.plane,
2260 cm->lf.filter_level, 0, 0, pbi->tile_workers,
2261 pbi->num_tile_workers, &pbi->lf_row_sync);
2264 vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME,
2265 "Decode failed. Frame data is corrupted.");
2269 *p_data_end = decode_tiles(pbi, data + first_partition_size, data_end);
2272 if (!xd->corrupted) {
2273 if (cm->refresh_frame_context == REFRESH_FRAME_CONTEXT_BACKWARD) {
2274 vp10_adapt_coef_probs(cm);
2276 if (!frame_is_intra_only(cm)) {
2277 vp10_adapt_mode_probs(cm);
2278 vp10_adapt_mv_probs(cm, cm->allow_high_precision_mv);
2281 debug_check_frame_counts(cm);
2284 vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME,
2285 "Decode failed. Frame data is corrupted.");
2288 // Non frame parallel update frame context here.
2289 if (cm->refresh_frame_context != REFRESH_FRAME_CONTEXT_OFF &&
2291 cm->frame_contexts[cm->frame_context_idx] = *cm->fc;