optimize_b(x, b, type,
ta + vp8_block2above_8x8[b], tl + vp8_block2left_8x8[b],
rtcd, TX_8X8);
- *(ta + vp8_block2above_8x8[b] + 1) = *(ta + vp8_block2above_8x8[b]);
- *(tl + vp8_block2left_8x8[b] + 1) = *(tl + vp8_block2left_8x8[b]);
+ ta[vp8_block2above_8x8[b] + 1] = ta[vp8_block2above_8x8[b]];
+ tl[vp8_block2left_8x8[b] + 1] = tl[vp8_block2left_8x8[b]];
}
// 8x8 always have 2nd roder haar block
optimize_b(x, b, PLANE_TYPE_UV,
ta + vp8_block2above_8x8[b], tl + vp8_block2left_8x8[b],
rtcd, TX_8X8);
- *(ta + vp8_block2above_8x8[b] + 1) = *(ta + vp8_block2above_8x8[b]);
- *(tl + vp8_block2left_8x8[b] + 1) = *(tl + vp8_block2left_8x8[b]);
+ ta[vp8_block2above_8x8[b] + 1] = ta[vp8_block2above_8x8[b]];
+ tl[vp8_block2left_8x8[b] + 1] = tl[vp8_block2left_8x8[b]];
}
}
// compute quantization mse of 8x8 block
distortion = vp8_block_error_c((x->block + idx)->coeff,
(xd->block + idx)->dqcoeff, 64);
- ta0 = *(a + vp8_block2above_8x8[idx]);
- tl0 = *(l + vp8_block2left_8x8 [idx]);
+ ta0 = a[vp8_block2above_8x8[idx]];
+ tl0 = l[vp8_block2left_8x8[idx]];
rate_t = cost_coeffs(x, xd->block + idx, PLANE_TYPE_Y_WITH_DC,
&ta0, &tl0, TX_8X8);
distortion += vp8_block_error_c((x->block + ib + 5)->coeff,
(xd->block + ib + 5)->dqcoeff, 16);
- ta0 = *(a + vp8_block2above[ib]);
- ta1 = *(a + vp8_block2above[ib + 1]);
- tl0 = *(l + vp8_block2left[ib]);
- tl1 = *(l + vp8_block2left[ib + 4]);
+ ta0 = a[vp8_block2above[ib]];
+ ta1 = a[vp8_block2above[ib + 1]];
+ tl0 = l[vp8_block2left[ib]];
+ tl1 = l[vp8_block2left[ib + 4]];
rate_t = cost_coeffs(x, xd->block + ib, PLANE_TYPE_Y_WITH_DC,
&ta0, &tl0, TX_4X4);
rate_t += cost_coeffs(x, xd->block + ib + 1, PLANE_TYPE_Y_WITH_DC,
vp8_encode_intra8x8(IF_RTCD(&cpi->rtcd), x, ib);
if (xd->mode_info_context->mbmi.txfm_size == TX_8X8) {
- *(a + vp8_block2above_8x8[idx]) = besta0;
- *(a + vp8_block2above_8x8[idx] + 1) = besta1;
- *(l + vp8_block2left_8x8 [idx]) = bestl0;
- *(l + vp8_block2left_8x8 [idx] + 1) = bestl1;
+ a[vp8_block2above_8x8[idx]] = besta0;
+ a[vp8_block2above_8x8[idx] + 1] = besta1;
+ l[vp8_block2left_8x8[idx]] = bestl0;
+ l[vp8_block2left_8x8[idx] + 1] = bestl1;
} else {
- *(a + vp8_block2above[ib]) = besta0;
- *(a + vp8_block2above[ib + 1]) = besta1;
- *(l + vp8_block2left[ib]) = bestl0;
- *(l + vp8_block2left[ib + 4]) = bestl1;
+ a[vp8_block2above[ib]] = besta0;
+ a[vp8_block2above[ib + 1]] = besta1;
+ l[vp8_block2left[ib]] = bestl0;
+ l[vp8_block2left[ib + 4]] = bestl1;
}
return best_rd;
tokenize1st_order_b_16x16(xd, xd->block, t, PLANE_TYPE_Y_WITH_DC,
A, L, cpi, dry_run);
+ A[1] = A[2] = A[3] = A[0];
+ L[1] = L[2] = L[3] = L[0];
- for (b = 1; b < 16; b++) {
- *(A + vp8_block2above[b]) = *(A);
- *(L + vp8_block2left[b] ) = *(L);
- }
for (b = 16; b < 24; b += 4) {
tokenize1st_order_b_8x8(xd, xd->block + b, t, PLANE_TYPE_UV,
A + vp8_block2above_8x8[b],
L + vp8_block2left_8x8[b], cpi, dry_run);
- *(A + vp8_block2above_8x8[b]+1) = *(A + vp8_block2above_8x8[b]);
- *(L + vp8_block2left_8x8[b]+1 ) = *(L + vp8_block2left_8x8[b]);
+ A[vp8_block2above_8x8[b] + 1] = A[vp8_block2above_8x8[b]];
+ L[vp8_block2left_8x8[b] + 1] = L[vp8_block2left_8x8[b]];
}
vpx_memset(&A[8], 0, sizeof(A[8]));
vpx_memset(&L[8], 0, sizeof(L[8]));
A + vp8_block2above_8x8[b],
L + vp8_block2left_8x8[b],
cpi, dry_run);
- *(A + vp8_block2above_8x8[b] + 1) = *(A + vp8_block2above_8x8[b]);
- *(L + vp8_block2left_8x8[b] + 1) = *(L + vp8_block2left_8x8[b]);
+ A[vp8_block2above_8x8[b] + 1] = A[vp8_block2above_8x8[b]];
+ L[vp8_block2left_8x8[b] + 1] = L[vp8_block2left_8x8[b]];
}
if (xd->mode_info_context->mbmi.mode == I8X8_PRED ||
xd->mode_info_context->mbmi.mode == SPLITMV) {
tokenize1st_order_b_8x8(xd, xd->block + b, t, PLANE_TYPE_UV,
A + vp8_block2above_8x8[b],
L + vp8_block2left_8x8[b], cpi, dry_run);
- *(A + vp8_block2above_8x8[b] + 1) = *(A + vp8_block2above_8x8[b]);
- *(L + vp8_block2left_8x8[b] + 1) = *(L + vp8_block2left_8x8[b]);
+ A[vp8_block2above_8x8[b] + 1] = A[vp8_block2above_8x8[b]];
+ L[vp8_block2left_8x8[b] + 1] = L[vp8_block2left_8x8[b]];
}
}
} else {
A + vp8_block2above_8x8[b],
L + vp8_block2left_8x8[b],
cpi, dry_run);
- *(A + vp8_block2above_8x8[b] + 1) = *(A + vp8_block2above_8x8[b]);
- *(L + vp8_block2left_8x8[b] + 1) = *(L + vp8_block2left_8x8[b]);
+ A[vp8_block2above_8x8[b] + 1] = A[vp8_block2above_8x8[b]];
+ L[vp8_block2left_8x8[b] + 1] = L[vp8_block2left_8x8[b]];
}
for (b = 16; b < 24; b += 4) {
A + vp8_block2above[b],
L + vp8_block2left[b],
cpi, dry_run);
- *(A + vp8_block2above_8x8[b] + 1) = *(A + vp8_block2above_8x8[b]);
- *(L + vp8_block2left_8x8[b] + 1) = *(L + vp8_block2left_8x8[b]);
+ A[vp8_block2above_8x8[b] + 1] = A[vp8_block2above_8x8[b]];
+ L[vp8_block2left_8x8[b] + 1] = L[vp8_block2left_8x8[b]];
}
if (dry_run)
*t = t_backup;
stuff1st_order_b_16x16(xd, xd->block, t, PLANE_TYPE_Y_WITH_DC,
A, L, cpi, dry_run);
- for (i = 1; i < 16; i++) {
- *(A + vp8_block2above[i]) = *(A);
- *(L + vp8_block2left[i]) = *(L);
- }
+ A[1] = A[2] = A[3] = A[0];
+ L[1] = L[2] = L[3] = L[0];
for (b = 16; b < 24; b += 4) {
stuff1st_order_buv_8x8(xd, xd->block + b, t,
A + vp8_block2above[b],
L + vp8_block2left[b],
cpi, dry_run);
- *(A + vp8_block2above_8x8[b]+1) = *(A + vp8_block2above_8x8[b]);
- *(L + vp8_block2left_8x8[b]+1 ) = *(L + vp8_block2left_8x8[b]);
+ A[vp8_block2above_8x8[b] + 1] = A[vp8_block2above_8x8[b]];
+ L[vp8_block2left_8x8[b] + 1] = L[vp8_block2left_8x8[b]];
}
vpx_memset(&A[8], 0, sizeof(A[8]));
vpx_memset(&L[8], 0, sizeof(L[8]));
A + vp8_block2above_8x8[b],
L + vp8_block2left_8x8[b],
cpi, dry_run);
- *(A + vp8_block2above_8x8[b] + 1) = *(A + vp8_block2above_8x8[b]);
- *(L + vp8_block2left_8x8[b] + 1) = *(L + vp8_block2left_8x8[b]);
+ A[vp8_block2above_8x8[b] + 1] = A[vp8_block2above_8x8[b]];
+ L[vp8_block2left_8x8[b] + 1] = L[vp8_block2left_8x8[b]];
}
for (b = 16; b < 24; b++)