int dst;
int dst_stride;
- int eob;
+ char *eob;
union b_mode_info bmi;
} BLOCKD;
void vp8_inverse_transform_b(const vp8_idct_rtcd_vtable_t *rtcd, BLOCKD *b,
int pitch)
{
- if (b->eob > 1)
+ if (*b->eob > 1)
{
IDCT_INVOKE(rtcd, idct16)(b->dqcoeff, b->predictor, pitch,
*(b->base_dst) + b->dst, b->dst_stride);
}
}
-
-
-
{
x->block[r].qcoeff = x->qcoeff + r * 16;
x->block[r].dqcoeff = x->dqcoeff + r * 16;
+ x->block[r].eob = x->eobs + r;
}
}
int postprocess;
int max_threads;
int error_concealment;
- int input_partition;
+ int input_fragments;
} VP8D_CONFIG;
typedef enum
{
}
}
+ mbmi->need_to_clamp_mvs = 0;
do /* for each subset j */
{
int_mv leftmv, abovemv;
blockmv = sub_mv_ref(bc, prob, abovemv, leftmv, best_mv, mvc);
- mbmi->need_to_clamp_mvs = vp8_check_mv_bounds(&blockmv,
+ mbmi->need_to_clamp_mvs |= vp8_check_mv_bounds(&blockmv,
mb_to_left_edge,
mb_to_right_edge,
mb_to_top_edge,
return size;
}
-static void setup_token_decoder_partition_input(VP8D_COMP *pbi)
+static int read_is_valid(const unsigned char *start,
+ size_t len,
+ const unsigned char *end)
{
- vp8_reader *bool_decoder = &pbi->bc2;
- int part_idx = 1;
- int num_token_partitions;
+ return (start + len > start && start + len <= end);
+}
- TOKEN_PARTITION multi_token_partition =
- (TOKEN_PARTITION)vp8_read_literal(&pbi->bc, 2);
- if (!vp8dx_bool_error(&pbi->bc))
- pbi->common.multi_token_partition = multi_token_partition;
- num_token_partitions = 1 << pbi->common.multi_token_partition;
- if (num_token_partitions + 1 > pbi->num_partitions)
- vpx_internal_error(&pbi->common.error, VPX_CODEC_CORRUPT_FRAME,
- "Partitions missing");
- assert(vp8dx_bool_error(&pbi->bc) ||
- multi_token_partition == pbi->common.multi_token_partition);
- if (pbi->num_partitions > 2)
+static unsigned int read_available_partition_size(
+ VP8D_COMP *pbi,
+ const unsigned char *token_part_sizes,
+ const unsigned char *fragment_start,
+ const unsigned char *first_fragment_end,
+ const unsigned char *fragment_end,
+ int i,
+ int num_part)
+{
+ VP8_COMMON* pc = &pbi->common;
+ const unsigned char *partition_size_ptr = token_part_sizes + i * 3;
+ unsigned int partition_size;
+ ptrdiff_t bytes_left = fragment_end - fragment_start;
+ /* Calculate the length of this partition. The last partition
+ * size is implicit. If the partition size can't be read, then
+ * either use the remaining data in the buffer (for EC mode)
+ * or throw an error.
+ */
+ if (i < num_part - 1)
{
- CHECK_MEM_ERROR(pbi->mbc, vpx_malloc((pbi->num_partitions - 1) *
- sizeof(vp8_reader)));
- bool_decoder = pbi->mbc;
+ if (read_is_valid(partition_size_ptr, 3, first_fragment_end))
+ partition_size = read_partition_size(partition_size_ptr);
+ else if (pbi->ec_active)
+ partition_size = bytes_left;
+ else
+ vpx_internal_error(&pc->error, VPX_CODEC_CORRUPT_FRAME,
+ "Truncated partition size data");
}
+ else
+ partition_size = bytes_left;
- for (; part_idx < pbi->num_partitions; ++part_idx)
+ /* Validate the calculated partition length. If the buffer
+ * described by the partition can't be fully read, then restrict
+ * it to the portion that can be (for EC mode) or throw an error.
+ */
+ if (!read_is_valid(fragment_start, partition_size, fragment_end))
{
- if (vp8dx_start_decode(bool_decoder,
- pbi->partitions[part_idx],
- pbi->partition_sizes[part_idx]))
- vpx_internal_error(&pbi->common.error, VPX_CODEC_MEM_ERROR,
- "Failed to allocate bool decoder %d",
- part_idx);
-
- bool_decoder++;
+ if (pbi->ec_active)
+ partition_size = bytes_left;
+ else
+ vpx_internal_error(&pc->error, VPX_CODEC_CORRUPT_FRAME,
+ "Truncated packet or corrupt partition "
+ "%d length", i + 1);
}
-
-#if CONFIG_MULTITHREAD
- /* Clamp number of decoder threads */
- if (pbi->decoding_thread_count > pbi->num_partitions - 1)
- pbi->decoding_thread_count = pbi->num_partitions - 1;
-#endif
-}
-
-
-static int read_is_valid(const unsigned char *start,
- size_t len,
- const unsigned char *end)
-{
- return (start + len > start && start + len <= end);
+ return partition_size;
}
static void setup_token_decoder(VP8D_COMP *pbi,
- const unsigned char *cx_data)
+ const unsigned char* token_part_sizes)
{
- int num_part;
- int i;
- VP8_COMMON *pc = &pbi->common;
- const unsigned char *user_data_end = pbi->Source + pbi->source_sz;
- vp8_reader *bool_decoder;
- const unsigned char *partition;
+ vp8_reader *bool_decoder = &pbi->bc2;
+ int fragment_idx, partition_idx;
+ int num_token_partitions;
+ const unsigned char *first_fragment_end = pbi->fragments[0] +
+ pbi->fragment_sizes[0];
- /* Parse number of token partitions to use */
- const TOKEN_PARTITION multi_token_partition =
+ TOKEN_PARTITION multi_token_partition =
(TOKEN_PARTITION)vp8_read_literal(&pbi->bc, 2);
- /* Only update the multi_token_partition field if we are sure the value
- * is correct. */
- if (!pbi->ec_active || !vp8dx_bool_error(&pbi->bc))
- pc->multi_token_partition = multi_token_partition;
-
- num_part = 1 << pc->multi_token_partition;
-
- /* Set up pointers to the first partition */
- partition = cx_data;
- bool_decoder = &pbi->bc2;
-
- if (num_part > 1)
+ if (!vp8dx_bool_error(&pbi->bc))
+ pbi->common.multi_token_partition = multi_token_partition;
+ num_token_partitions = 1 << pbi->common.multi_token_partition;
+ if (num_token_partitions > 1)
{
- CHECK_MEM_ERROR(pbi->mbc, vpx_malloc(num_part * sizeof(vp8_reader)));
+ CHECK_MEM_ERROR(pbi->mbc, vpx_malloc(num_token_partitions *
+ sizeof(vp8_reader)));
bool_decoder = pbi->mbc;
- partition += 3 * (num_part - 1);
}
- for (i = 0; i < num_part; i++)
+ /* Check for partitions within the fragments and unpack the fragments
+ * so that each fragment pointer points to its corresponding partition. */
+ for (fragment_idx = 0; fragment_idx < pbi->num_fragments; ++fragment_idx)
{
- const unsigned char *partition_size_ptr = cx_data + i * 3;
- ptrdiff_t partition_size, bytes_left;
-
- bytes_left = user_data_end - partition;
-
- /* Calculate the length of this partition. The last partition
- * size is implicit. If the partition size can't be read, then
- * either use the remaining data in the buffer (for EC mode)
- * or throw an error.
- */
- if (i < num_part - 1)
+ unsigned int fragment_size = pbi->fragment_sizes[fragment_idx];
+ const unsigned char *fragment_end = pbi->fragments[fragment_idx] +
+ fragment_size;
+ /* Special case for handling the first partition since we have already
+ * read its size. */
+ if (fragment_idx == 0)
{
- if (read_is_valid(partition_size_ptr, 3, user_data_end))
- partition_size = read_partition_size(partition_size_ptr);
- else if (pbi->ec_active)
- partition_size = bytes_left;
- else
- vpx_internal_error(&pc->error, VPX_CODEC_CORRUPT_FRAME,
- "Truncated partition size data");
+ /* Size of first partition + token partition sizes element */
+ ptrdiff_t ext_first_part_size = token_part_sizes -
+ pbi->fragments[0] + 3 * (num_token_partitions - 1);
+ fragment_size -= ext_first_part_size;
+ if (fragment_size > 0)
+ {
+ pbi->fragment_sizes[0] = ext_first_part_size;
+ /* The fragment contains an additional partition. Move to
+ * next. */
+ fragment_idx++;
+ pbi->fragments[fragment_idx] = pbi->fragments[0] +
+ pbi->fragment_sizes[0];
+ }
}
- else
- partition_size = bytes_left;
-
- /* Validate the calculated partition length. If the buffer
- * described by the partition can't be fully read, then restrict
- * it to the portion that can be (for EC mode) or throw an error.
- */
- if (!read_is_valid(partition, partition_size, user_data_end))
+ /* Split the chunk into partitions read from the bitstream */
+ while (fragment_size > 0)
{
- if (pbi->ec_active)
- partition_size = bytes_left;
- else
- vpx_internal_error(&pc->error, VPX_CODEC_CORRUPT_FRAME,
- "Truncated packet or corrupt partition "
- "%d length", i + 1);
+ ptrdiff_t partition_size = read_available_partition_size(
+ pbi,
+ token_part_sizes,
+ pbi->fragments[fragment_idx],
+ first_fragment_end,
+ fragment_end,
+ fragment_idx - 1,
+ num_token_partitions);
+ pbi->fragment_sizes[fragment_idx] = partition_size;
+ fragment_size -= partition_size;
+ assert(fragment_idx <= num_token_partitions);
+ if (fragment_size > 0)
+ {
+ /* The fragment contains an additional partition.
+ * Move to next. */
+ fragment_idx++;
+ pbi->fragments[fragment_idx] =
+ pbi->fragments[fragment_idx - 1] + partition_size;
+ }
}
+ }
+
+ pbi->num_fragments = num_token_partitions + 1;
- if (vp8dx_start_decode(bool_decoder, partition, partition_size))
- vpx_internal_error(&pc->error, VPX_CODEC_MEM_ERROR,
- "Failed to allocate bool decoder %d", i + 1);
+ for (partition_idx = 1; partition_idx < pbi->num_fragments; ++partition_idx)
+ {
+ if (vp8dx_start_decode(bool_decoder,
+ pbi->fragments[partition_idx],
+ pbi->fragment_sizes[partition_idx]))
+ vpx_internal_error(&pbi->common.error, VPX_CODEC_MEM_ERROR,
+ "Failed to allocate bool decoder %d",
+ partition_idx);
- /* Advance to the next partition */
- partition += partition_size;
bool_decoder++;
}
#if CONFIG_MULTITHREAD
/* Clamp number of decoder threads */
- if (pbi->decoding_thread_count > num_part - 1)
- pbi->decoding_thread_count = num_part - 1;
+ if (pbi->decoding_thread_count > num_token_partitions - 1)
+ pbi->decoding_thread_count = num_token_partitions - 1;
#endif
}
-
static void stop_token_decoder(VP8D_COMP *pbi)
{
VP8_COMMON *pc = &pbi->common;
vp8_reader *const bc = & pbi->bc;
VP8_COMMON *const pc = & pbi->common;
MACROBLOCKD *const xd = & pbi->mb;
- const unsigned char *data = (const unsigned char *)pbi->Source;
- const unsigned char *data_end = data + pbi->source_sz;
+ const unsigned char *data = pbi->fragments[0];
+ const unsigned char *data_end = data + pbi->fragment_sizes[0];
ptrdiff_t first_partition_length_in_bytes;
int mb_row;
int corrupt_tokens = 0;
int prev_independent_partitions = pbi->independent_partitions;
- if (pbi->input_partition)
- {
- data = pbi->partitions[0];
- data_end = data + pbi->partition_sizes[0];
- }
-
/* start with no corruption of current frame */
xd->corrupted = 0;
pc->yv12_fb[pc->new_fb_idx].corrupted = 0;
}
}
- if (pbi->input_partition)
- {
- setup_token_decoder_partition_input(pbi);
- }
- else
- {
- setup_token_decoder(pbi, data + first_partition_length_in_bytes);
- }
+ setup_token_decoder(pbi, data + first_partition_length_in_bytes);
+
xd->current_bc = &pbi->bc2;
/* Read the default quantizers. */
pbi->decoded_key_frame = 0;
- pbi->input_partition = oxcf->input_partition;
+ pbi->input_fragments = oxcf->input_fragments;
+ pbi->num_fragments = 0;
/* Independent partitions is activated when a frame updates the
* token probability table to have equal probabilities over the
pbi->common.error.error_code = VPX_CODEC_OK;
- if (pbi->input_partition && !(source == NULL && size == 0))
+ if (pbi->num_fragments == 0)
{
- /* Store a pointer to this partition and return. We haven't
+ /* New frame, reset fragment pointers and sizes */
+ vpx_memset(pbi->fragments, 0, sizeof(pbi->fragments));
+ vpx_memset(pbi->fragment_sizes, 0, sizeof(pbi->fragment_sizes));
+ }
+ if (pbi->input_fragments && !(source == NULL && size == 0))
+ {
+ /* Store a pointer to this fragment and return. We haven't
* received the complete frame yet, so we will wait with decoding.
*/
- assert(pbi->num_partitions < MAX_PARTITIONS);
- pbi->partitions[pbi->num_partitions] = source;
- pbi->partition_sizes[pbi->num_partitions] = size;
- pbi->source_sz += size;
- pbi->num_partitions++;
- if (pbi->num_partitions > (1 << EIGHT_PARTITION) + 1)
+ assert(pbi->num_fragments < MAX_PARTITIONS);
+ pbi->fragments[pbi->num_fragments] = source;
+ pbi->fragment_sizes[pbi->num_fragments] = size;
+ pbi->num_fragments++;
+ if (pbi->num_fragments > (1 << EIGHT_PARTITION) + 1)
{
pbi->common.error.error_code = VPX_CODEC_UNSUP_BITSTREAM;
pbi->common.error.setjmp = 0;
- pbi->num_partitions = 0;
+ pbi->num_fragments = 0;
return -1;
}
return 0;
}
- else
+
+ if (!pbi->input_fragments)
{
- if (!pbi->input_partition)
- {
- pbi->Source = source;
- pbi->source_sz = size;
- }
- else
- {
- assert(pbi->common.multi_token_partition <= EIGHT_PARTITION);
- if (pbi->num_partitions == 0)
- {
- pbi->num_partitions = 1;
- pbi->partitions[0] = NULL;
- pbi->partition_sizes[0] = 0;
- }
- while (pbi->num_partitions < (1 << pbi->common.multi_token_partition) + 1)
- {
- // Reset all missing partitions
- pbi->partitions[pbi->num_partitions] =
- pbi->partitions[pbi->num_partitions - 1] +
- pbi->partition_sizes[pbi->num_partitions - 1];
- pbi->partition_sizes[pbi->num_partitions] = 0;
- pbi->num_partitions++;
- }
- }
+ pbi->fragments[0] = source;
+ pbi->fragment_sizes[0] = size;
+ pbi->num_fragments = 1;
+ }
+ assert(pbi->common.multi_token_partition <= EIGHT_PARTITION);
+ if (pbi->num_fragments == 0)
+ {
+ pbi->num_fragments = 1;
+ pbi->fragments[0] = NULL;
+ pbi->fragment_sizes[0] = 0;
+ }
- if (pbi->source_sz == 0)
+ if (pbi->num_fragments <= 1 && pbi->fragment_sizes[0] == 0)
+ {
+ /* This is used to signal that we are missing frames.
+ * We do not know if the missing frame(s) was supposed to update
+ * any of the reference buffers, but we act conservative and
+ * mark only the last buffer as corrupted.
+ */
+ cm->yv12_fb[cm->lst_fb_idx].corrupted = 1;
+
+ /* If error concealment is disabled we won't signal missing frames
+ * to the decoder.
+ */
+ if (!pbi->ec_active)
{
- /* This is used to signal that we are missing frames.
- * We do not know if the missing frame(s) was supposed to update
- * any of the reference buffers, but we act conservative and
- * mark only the last buffer as corrupted.
- */
- cm->yv12_fb[cm->lst_fb_idx].corrupted = 1;
-
- /* If error concealment is disabled we won't signal missing frames to
- * the decoder.
- */
- if (!pbi->ec_active)
- {
- /* Signal that we have no frame to show. */
- cm->show_frame = 0;
+ /* Signal that we have no frame to show. */
+ cm->show_frame = 0;
- pbi->num_partitions = 0;
+ pbi->num_fragments = 0;
- /* Nothing more to do. */
- return 0;
- }
+ /* Nothing more to do. */
+ return 0;
}
+ }
#if HAVE_ARMV7
#if CONFIG_RUNTIME_CPU_DETECT
- if (cm->rtcd.flags & HAS_NEON)
+ if (cm->rtcd.flags & HAS_NEON)
#endif
- {
- vp8_push_neon(dx_store_reg);
- }
+ {
+ vp8_push_neon(dx_store_reg);
+ }
#endif
- cm->new_fb_idx = get_free_fb (cm);
+ cm->new_fb_idx = get_free_fb (cm);
- if (setjmp(pbi->common.error.jmp))
- {
+ if (setjmp(pbi->common.error.jmp))
+ {
#if HAVE_ARMV7
#if CONFIG_RUNTIME_CPU_DETECT
- if (cm->rtcd.flags & HAS_NEON)
+ if (cm->rtcd.flags & HAS_NEON)
#endif
- {
- vp8_pop_neon(dx_store_reg);
- }
+ {
+ vp8_pop_neon(dx_store_reg);
+ }
#endif
- pbi->common.error.setjmp = 0;
-
- pbi->num_partitions = 0;
+ pbi->common.error.setjmp = 0;
- /* We do not know if the missing frame(s) was supposed to update
- * any of the reference buffers, but we act conservative and
- * mark only the last buffer as corrupted.
- */
- cm->yv12_fb[cm->lst_fb_idx].corrupted = 1;
+ pbi->num_fragments = 0;
- if (cm->fb_idx_ref_cnt[cm->new_fb_idx] > 0)
- cm->fb_idx_ref_cnt[cm->new_fb_idx]--;
- return -1;
- }
+ /* We do not know if the missing frame(s) was supposed to update
+ * any of the reference buffers, but we act conservative and
+ * mark only the last buffer as corrupted.
+ */
+ cm->yv12_fb[cm->lst_fb_idx].corrupted = 1;
- pbi->common.error.setjmp = 1;
+ if (cm->fb_idx_ref_cnt[cm->new_fb_idx] > 0)
+ cm->fb_idx_ref_cnt[cm->new_fb_idx]--;
+ return -1;
}
+ pbi->common.error.setjmp = 1;
+
retcode = vp8_decode_frame(pbi);
if (retcode < 0)
#endif
pbi->common.error.error_code = VPX_CODEC_ERROR;
pbi->common.error.setjmp = 0;
- pbi->num_partitions = 0;
+ pbi->num_fragments = 0;
if (cm->fb_idx_ref_cnt[cm->new_fb_idx] > 0)
cm->fb_idx_ref_cnt[cm->new_fb_idx]--;
return retcode;
#endif
pbi->common.error.error_code = VPX_CODEC_ERROR;
pbi->common.error.setjmp = 0;
- pbi->num_partitions = 0;
+ pbi->num_fragments = 0;
return -1;
}
} else
#endif
pbi->common.error.error_code = VPX_CODEC_ERROR;
pbi->common.error.setjmp = 0;
- pbi->num_partitions = 0;
+ pbi->num_fragments = 0;
return -1;
}
/* swap the mode infos to storage for future error concealment */
if (pbi->ec_enabled && pbi->common.prev_mi)
{
- const MODE_INFO* tmp = pbi->common.prev_mi;
+ MODE_INFO* tmp = pbi->common.prev_mi;
int row, col;
pbi->common.prev_mi = pbi->common.mi;
pbi->common.mi = tmp;
pbi->ready_for_new_data = 0;
pbi->last_time_stamp = time_stamp;
- pbi->num_partitions = 0;
- pbi->source_sz = 0;
+ pbi->num_fragments = 0;
#if 0
{
VP8D_CONFIG oxcf;
- const unsigned char *Source;
- unsigned int source_sz;
- const unsigned char *partitions[MAX_PARTITIONS];
- unsigned int partition_sizes[MAX_PARTITIONS];
- unsigned int num_partitions;
+ const unsigned char *fragments[MAX_PARTITIONS];
+ unsigned int fragment_sizes[MAX_PARTITIONS];
+ unsigned int num_fragments;
#if CONFIG_MULTITHREAD
/* variable for threading */
#endif
int ec_enabled;
int ec_active;
- int input_partition;
+ int input_fragments;
int decoded_key_frame;
int independent_partitions;
int frame_corrupt_residual;
bne loop
; PART 2: check position for eob...
+ ldr r11, [sp, #0] ; restore BLOCKD pointer
mov lr, #0 ; init eob
cmp r1, #0 ; coeffs after quantization?
- ldr r11, [sp, #0] ; restore BLOCKD pointer
+ ldr r12, [r11, #vp8_blockd_eob]
beq end ; skip eob calculations if all zero
ldr r0, [r11, #vp8_blockd_qcoeff]
mov lr, #1 ; rc=0, i=0
end
- str lr, [r11, #vp8_blockd_eob]
+ strb lr, [r12]
ldmfd sp!, {r1, r4-r11, pc}
ENDP
vmovl.u16 q0, d0
vmovl.u16 q10, d20
-
vmax.u32 d0, d0, d1
vmax.u32 d20, d20, d21
vpmax.u32 d0, d0, d0
vpmax.u32 d20, d20, d20
- add r4, r2, #vp8_blockd_eob
- add r5, r3, #vp8_blockd_eob
+ ldr r4, [r2, #vp8_blockd_eob]
+ ldr r5, [r3, #vp8_blockd_eob]
- vst1.32 {d0[0]}, [r4@32]
- vst1.32 {d20[0]}, [r5@32]
+ vst1.8 {d0[0]}, [r4] ; store eob
+ vst1.8 {d20[0]}, [r5] ; store eob
vldmia sp!, {q4-q7}
ldmfd sp!, {r4-r9}
vshr.s16 q12, #1 ; right shift 1 after vqdmulh
vshr.s16 q13, #1
+ ldr r5, [r1, #vp8_blockd_eob]
+
orr r2, r2, r3 ; check if all zero (step 4)
cmp r2, #0 ; check if all zero (step 5)
beq zero_output ; check if all zero (step 6)
vst1.s16 {q2, q3}, [r7@128] ; store dqcoeff = x * Dequant
- add r4, r1, #vp8_blockd_eob
- vst1.32 {d0[0]}, [r4@32]
+ vst1.8 {d0[0]}, [r5] ; store eob
ldmfd sp!, {r4-r7}
bx lr
zero_output
- str r2, [r1, #vp8_blockd_eob]
+ strb r2, [r5] ; store eob
vst1.s16 {q0, q1}, [r6@128] ; qcoeff = 0
vst1.s16 {q0, q1}, [r7@128] ; dqcoeff = 0
&x->e_mbd.block[i], &x->e_mbd.block[i+1]);
if (has_2nd_order)
- x->quantize_b(&x->block[i], &x->e_mbd.block[i]);
+ x->quantize_b(&x->block[24], &x->e_mbd.block[24]);
}
RECON_INVOKE(&rtcd->common->recon, build_intra_predictors_mby)(&x->e_mbd);
- ENCODEMB_INVOKE(&rtcd->encodemb, submby)(x->src_diff, *(b->base_src), x->e_mbd.predictor, b->src_stride);
+ ENCODEMB_INVOKE(&rtcd->encodemb, submby)(x->src_diff, *(b->base_src),
+ x->e_mbd.predictor, b->src_stride);
vp8_transform_intra_mby(x);
qcoeff_ptr = d->qcoeff;
dqcoeff_ptr = d->dqcoeff;
i0 = !type;
- eob = d->eob;
+ eob = *d->eob;
/* Now set up a Viterbi trellis to evaluate alternative roundings. */
rdmult = mb->rdmult * err_mult;
}
final_eob++;
- d->eob = final_eob;
- *a = *l = (d->eob != !type);
+ *a = *l = (final_eob != !type);
+ *d->eob = (char)final_eob;
+}
+static void check_reset_2nd_coeffs(MACROBLOCKD *x, int type,
+ ENTROPY_CONTEXT *a, ENTROPY_CONTEXT *l)
+{
+ int sum=0;
+ int i;
+ BLOCKD *bd = &x->block[24];
+
+ if(bd->dequant[0]>=35 && bd->dequant[1]>=35)
+ return;
+
+ for(i=0;i<(*bd->eob);i++)
+ {
+ int coef = bd->dqcoeff[vp8_default_zig_zag1d[i]];
+ sum+= (coef>=0)?coef:-coef;
+ if(sum>=35)
+ return;
+ }
+ /**************************************************************************
+ our inverse hadamard transform effectively is weighted sum of all 16 inputs
+ with weight either 1 or -1. It has a last stage scaling of (sum+3)>>3. And
+ dc only idct is (dc+4)>>3. So if all the sums are between -35 and 29, the
+ output after inverse wht and idct will be all zero. A sum of absolute value
+ smaller than 35 guarantees all 16 different (+1/-1) weighted sums in wht
+ fall between -35 and +35.
+ **************************************************************************/
+ if(sum < 35)
+ {
+ for(i=0;i<(*bd->eob);i++)
+ {
+ int rc = vp8_default_zig_zag1d[i];
+ bd->qcoeff[rc]=0;
+ bd->dqcoeff[rc]=0;
+ }
+ *bd->eob = 0;
+ *a = *l = (*bd->eob != !type);
+ }
}
static void optimize_mb(MACROBLOCK *x, const VP8_ENCODER_RTCD *rtcd)
int b;
int type;
int has_2nd_order;
+
ENTROPY_CONTEXT_PLANES t_above, t_left;
ENTROPY_CONTEXT *ta;
ENTROPY_CONTEXT *tl;
b=24;
optimize_b(x, b, PLANE_TYPE_Y2,
ta + vp8_block2above[b], tl + vp8_block2left[b], rtcd);
+ check_reset_2nd_coeffs(&x->e_mbd, PLANE_TYPE_Y2,
+ ta + vp8_block2above[b], tl + vp8_block2left[b]);
}
}
for (b = 0; b < 16; b++)
{
optimize_b(x, b, type,
- ta + vp8_block2above[b], tl + vp8_block2left[b], rtcd);
+ ta + vp8_block2above[b], tl + vp8_block2left[b], rtcd);
}
b=24;
optimize_b(x, b, PLANE_TYPE_Y2,
ta + vp8_block2above[b], tl + vp8_block2left[b], rtcd);
+ check_reset_2nd_coeffs(&x->e_mbd, PLANE_TYPE_Y2,
+ ta + vp8_block2above[b], tl + vp8_block2left[b]);
}
}
{
BLOCKD *b = &x->block[i];
- if (b->eob > 1)
+ if (*b->eob > 1)
{
IDCT_INVOKE(rtcd, idct16)(b->dqcoeff, b->predictor, 16,
*(b->base_dst) + b->dst, b->dst_stride);
{
BLOCKD *b = &x->block[i];
- if (b->eob > 1)
+ if (*b->eob > 1)
{
IDCT_INVOKE(rtcd, idct16)(b->dqcoeff, b->predictor, 8,
*(b->base_dst) + b->dst, b->dst_stride);
}
}
}
- d->eob = eob + 1;
+ *d->eob = (char)(eob + 1);
}
#else
eob = i; // last nonzero coeffs
}
}
- d->eob = eob + 1;
+ *d->eob = (char)(eob + 1);
}
#endif
}
}
- d->eob = eob + 1;
+ *d->eob = (char)(eob + 1);
}
/* Perform regular quantization, with unbiased rounding and no zero bin. */
}
}
- d->eob = eob + 1;
+ *d->eob = (char)(eob + 1);
}
#else
}
}
- d->eob = eob + 1;
+ *d->eob = (char)(eob + 1);
}
#endif
vp8cx_init_quantizer(cpi);
}
-
static int cost_coeffs(MACROBLOCK *mb, BLOCKD *b, int type, ENTROPY_CONTEXT *a, ENTROPY_CONTEXT *l)
{
int c = !type; /* start at coef 0, unless Y with Y2 */
- int eob = b->eob;
+ int eob = (int)(*b->eob);
int pt ; /* surrounding block/prev coef predictor */
int cost = 0;
short *qcoeff_ptr = b->qcoeff;
// store everything needed to come back to this!!
for (i = 0; i < 16; i++)
{
- BLOCKD *bd = &x->e_mbd.block[i];
-
bsi->mvs[i].as_mv = x->partition_info->bmi[i].mv.as_mv;
bsi->modes[i] = x->partition_info->bmi[i].mode;
- bsi->eobs[i] = bd->eob;
+ bsi->eobs[i] = x->e_mbd.eobs[i];
}
}
}
BLOCKD *bd = &x->e_mbd.block[i];
bd->bmi.mv.as_int = bsi.mvs[i].as_int;
- bd->eob = bsi.eobs[i];
+ *bd->eob = bsi.eobs[i];
}
*returntotrate = bsi.r;
for (i = 0; i <= 24; i++)
{
- tteob += x->e_mbd.block[i].eob;
+ tteob += x->e_mbd.eobs[i];
}
if (tteob == 0)
ENTROPY_CONTEXT * a;
ENTROPY_CONTEXT * l;
int band, rc, v, token;
+ int eob;
b = x->block + 24;
qcoeff_ptr = b->qcoeff;
a = (ENTROPY_CONTEXT *)x->above_context + 8;
l = (ENTROPY_CONTEXT *)x->left_context + 8;
-
+ eob = x->eobs[24];
VP8_COMBINEENTROPYCONTEXTS(pt, *a, *l);
- if(!b->eob)
+ if(!eob)
{
/* c = band for this case */
t->Token = DCT_EOB_TOKEN;
t++;
c = 1;
- for (; c < b->eob; c++)
+ for (; c < eob; c++)
{
rc = vp8_default_zig_zag1d[c];
band = vp8_coef_bands[c];
c = type ? 0 : 1;
- if(c >= b->eob)
+ if(c >= *b->eob)
{
/* c = band for this case */
t->Token = DCT_EOB_TOKEN;
t++;
c++;
- for (; c < b->eob; c++)
+ for (; c < *b->eob; c++)
{
rc = vp8_default_zig_zag1d[c];
band = vp8_coef_bands[c];
VP8_COMBINEENTROPYCONTEXTS(pt, *a, *l);
- if(!b->eob)
+ if(!(*b->eob))
{
/* c = band for this case */
t->Token = DCT_EOB_TOKEN;
t++;
c = 1;
- for (; c < b->eob; c++)
+ for (; c < *b->eob; c++)
{
rc = vp8_default_zig_zag1d[c];
band = vp8_coef_bands[c];
if (has_y2_block)
{
for (i = 0; i < 16; i++)
- skip &= (x->block[i].eob < 2);
+ skip &= (x->eobs[i] < 2);
}
for (; i < 24 + has_y2_block; i++)
- skip &= (!x->block[i].eob);
+ skip &= (!x->eobs[i]);
return skip;
}
movdqa [rdi], xmm0 ; store dqcoeff
movdqa [rdi + 16], xmm1
+ mov rcx, [rsi + vp8_blockd_eob]
+
; select the last value (in zig_zag order) for EOB
pcmpeqw xmm2, xmm6
pcmpeqw xmm3, xmm6
pmaxsw xmm2, xmm3
movd eax, xmm2
and eax, 0xff
- mov [rsi + vp8_blockd_eob], eax
+
+ mov BYTE PTR [rcx], al ; store eob
; begin epilog
add rsp, stack_size
pmaxsw xmm1, xmm5
+ mov rcx, [rsi + vp8_blockd_eob]
+
; now down to 8
pshufd xmm5, xmm1, 00001110b
movd eax, xmm1
and eax, 0xff
- mov [rsi + vp8_blockd_eob], eax
+
+ mov BYTE PTR [rcx], al ; store eob
; begin epilog
%if ABI_IS_32BIT
movdqa [rdi], xmm0
movdqa [rdi + 16], xmm1
+ mov rcx, [rsi + vp8_blockd_eob]
+
; select the last value (in zig_zag order) for EOB
pxor xmm6, xmm6
pcmpeqw xmm4, xmm6
add eax, 1
and eax, edi
- mov [rsi + vp8_blockd_eob], eax
+ mov BYTE PTR [rcx], al ; store eob
; begin epilog
%if ABI_IS_32BIT
movdqa [rcx], xmm2 ;store dqcoeff
movdqa [rcx + 16], xmm3 ;store dqcoeff
+ mov rcx, [rsi + vp8_blockd_eob]
+
sub edi, edx ;check for all zeros in bit mask
sar edi, 31 ;0 or -1
add eax, 1
and eax, edi ;if the bit mask was all zero,
;then eob = 0
- mov [rsi + vp8_blockd_eob], eax
+ mov BYTE PTR [rcx], al ;store eob
; begin epilog
%if ABI_IS_32BIT
short *dqcoeff_ptr = d->dqcoeff;
short *dequant_ptr = d->dequant;
- d->eob = vp8_fast_quantize_b_impl_mmx(
- coeff_ptr,
- zbin_ptr,
- qcoeff_ptr,
- dequant_ptr,
- scan_mask,
-
- round_ptr,
- quant_ptr,
- dqcoeff_ptr
- );
+ *d->eob = (char)vp8_fast_quantize_b_impl_mmx(
+ coeff_ptr,
+ zbin_ptr,
+ qcoeff_ptr,
+ dequant_ptr,
+ scan_mask,
+
+ round_ptr,
+ quant_ptr,
+ dqcoeff_ptr
+ );
}
int vp8_mbblock_error_mmx_impl(short *coeff_ptr, short *dcoef_ptr, int dc);
priv->cx_data_sz = priv->cfg.g_w * priv->cfg.g_h * 3 / 2 * 2;
- if (priv->cx_data_sz < 4096) priv->cx_data_sz = 4096;
+ if (priv->cx_data_sz < 32768) priv->cx_data_sz = 32768;
priv->cx_data = malloc(priv->cx_data_sz);
oxcf.max_threads = ctx->cfg.threads;
oxcf.error_concealment =
(ctx->base.init_flags & VPX_CODEC_USE_ERROR_CONCEALMENT);
- oxcf.input_partition =
- (ctx->base.init_flags & VPX_CODEC_USE_INPUT_PARTITION);
+ oxcf.input_fragments =
+ (ctx->base.init_flags & VPX_CODEC_USE_INPUT_FRAGMENTS);
optr = vp8dx_create_decompressor(&oxcf);
"WebM Project VP8 Decoder" VERSION_STRING,
VPX_CODEC_INTERNAL_ABI_VERSION,
VPX_CODEC_CAP_DECODER | VP8_CAP_POSTPROC | VP8_CAP_ERROR_CONCEALMENT |
- VPX_CODEC_CAP_INPUT_PARTITION,
+ VPX_CODEC_CAP_INPUT_FRAGMENTS,
/* vpx_codec_caps_t caps; */
vp8_init, /* vpx_codec_init_fn_t init; */
vp8_destroy, /* vpx_codec_destroy_fn_t destroy; */
else if ((flags & VPX_CODEC_USE_ERROR_CONCEALMENT) &&
!(iface->caps & VPX_CODEC_CAP_ERROR_CONCEALMENT))
res = VPX_CODEC_INCAPABLE;
- else if ((flags & VPX_CODEC_USE_INPUT_PARTITION) &&
- !(iface->caps & VPX_CODEC_CAP_INPUT_PARTITION))
+ else if ((flags & VPX_CODEC_USE_INPUT_FRAGMENTS) &&
+ !(iface->caps & VPX_CODEC_CAP_INPUT_FRAGMENTS))
res = VPX_CODEC_INCAPABLE;
else if (!(iface->caps & VPX_CODEC_CAP_DECODER))
res = VPX_CODEC_INCAPABLE;
#define VPX_CODEC_CAP_POSTPROC 0x40000 /**< Can postprocess decoded frame */
#define VPX_CODEC_CAP_ERROR_CONCEALMENT 0x80000 /**< Can conceal errors due to
packet loss */
-#define VPX_CODEC_CAP_INPUT_PARTITION 0x100000 /**< Can receive encoded frames
- one partition at a time */
+#define VPX_CODEC_CAP_INPUT_FRAGMENTS 0x100000 /**< Can receive encoded frames
+ one fragment at a time */
/*! \brief Initialization-time Feature Enabling
*
#define VPX_CODEC_USE_POSTPROC 0x10000 /**< Postprocess decoded frame */
#define VPX_CODEC_USE_ERROR_CONCEALMENT 0x20000 /**< Conceal errors in decoded
frames */
-#define VPX_CODEC_USE_INPUT_PARTITION 0x40000 /**< The input frame should be
+#define VPX_CODEC_USE_INPUT_FRAGMENTS 0x40000 /**< The input frame should be
passed to the decoder one
- partition at a time */
+ fragment at a time */
/*!\brief Stream properties
*
* generated, as appropriate. Encoded data \ref MUST be passed in DTS (decode
* time stamp) order. Frames produced will always be in PTS (presentation
* time stamp) order.
- * If the decoder is configured with VPX_CODEC_USE_INPUT_PARTITION enabled,
- * data and data_sz must contain at most one encoded partition. When no more
- * data is available, this function should be called with NULL as data and 0
- * as data_sz. The memory passed to this function must be available until
- * the frame has been decoded.
+ * If the decoder is configured with VPX_CODEC_USE_INPUT_FRAGMENTS enabled,
+ * data and data_sz can contain a fragment of the encoded frame. Fragment #n
+ * must contain at least partition #n, but can also contain subsequent
+ * partitions (#n+1 - #n+i), and if so, fragments #n+1, .., #n+i must be
+ * empty. When no more data is available, this function should be called
+ * with NULL as data and 0 as data_sz. The memory passed to this function
+ * must be available until the frame has been decoded.
*
* \param[in] ctx Pointer to this instance's context
* \param[in] data Pointer to this block of new coded data. If
"\rPass %d/%d frame %4d/%-4d %7"PRId64"B %7lub/f %7"PRId64"b/s"
" %7lu %s (%.2f fps)\033[K", pass + 1,
arg_passes, frames_in, frames_out, nbytes,
- (unsigned long)(nbytes * 8 / frames_in),
- nbytes * 8 *(int64_t)arg_framerate.num / arg_framerate.den / frames_in,
+ frames_in ? (unsigned long)(nbytes * 8 / frames_in) : 0,
+ frames_in ? nbytes * 8 *(int64_t)arg_framerate.num / arg_framerate.den / frames_in : 0,
cx_time > 9999999 ? cx_time / 1000 : cx_time,
cx_time > 9999999 ? "ms" : "us",
- (float)frames_in * 1000000.0 / (float)cx_time);
+ cx_time > 0 ? (float)frames_in * 1000000.0 / (float)cx_time : 0);
if ( (show_psnr) && (psnr_count>0) )
{