2 * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
12 #include "vpx_config.h"
13 #include "vp8/common/idct.h"
15 #include "vp8/common/reconintra.h"
16 #include "vp8/common/reconintra4x4.h"
18 #include "vp8/common/invtrans.h"
19 #include "vp8/common/recon.h"
21 #include "encodeintra.h"
24 #if CONFIG_RUNTIME_CPU_DETECT
25 #define IF_RTCD(x) (x)
27 #define IF_RTCD(x) NULL
30 int vp8_encode_intra(VP8_COMP *cpi, MACROBLOCK *x, int use_dc_pred)
34 int intra_pred_var = 0;
39 const VP8_ENCODER_RTCD *rtcd = IF_RTCD(&cpi->rtcd);
41 x->e_mbd.mode_info_context->mbmi.mode = DC_PRED;
42 x->e_mbd.mode_info_context->mbmi.uv_mode = DC_PRED;
43 x->e_mbd.mode_info_context->mbmi.ref_frame = INTRA_FRAME;
45 vp8_encode_intra16x16mby(rtcd, x);
47 vp8_inverse_transform_mby(&x->e_mbd, IF_RTCD(&cpi->common.rtcd));
51 for (i = 0; i < 16; i++)
53 x->e_mbd.block[i].bmi.as_mode = B_DC_PRED;
54 vp8_encode_intra4x4block(IF_RTCD(&cpi->rtcd), x, i);
58 intra_pred_var = VARIANCE_INVOKE(&cpi->rtcd.variance, getmbss)(x->src_diff);
60 return intra_pred_var;
63 void vp8_encode_intra4x4block(const VP8_ENCODER_RTCD *rtcd,
64 MACROBLOCK *x, int ib)
66 BLOCKD *b = &x->e_mbd.block[ib];
67 BLOCK *be = &x->block[ib];
69 RECON_INVOKE(&rtcd->common->recon, intra4x4_predict)
70 (*(b->base_dst) + b->dst, b->dst_stride,
71 b->bmi.as_mode, b->predictor, 16);
73 ENCODEMB_INVOKE(&rtcd->encodemb, subb)(be, b, 16);
75 x->vp8_short_fdct4x4(be->src_diff, be->coeff, 32);
81 IDCT_INVOKE(IF_RTCD(&rtcd->common->idct), idct16)(b->dqcoeff,
82 b->predictor, 16, *(b->base_dst) + b->dst, b->dst_stride);
86 IDCT_INVOKE(IF_RTCD(&rtcd->common->idct), idct1_scalar_add)
87 (b->dqcoeff[0], b->predictor, 16, *(b->base_dst) + b->dst,
92 void vp8_encode_intra4x4mby(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *mb)
96 MACROBLOCKD *x = &mb->e_mbd;
97 vp8_intra_prediction_down_copy(x);
99 for (i = 0; i < 16; i++)
100 vp8_encode_intra4x4block(rtcd, mb, i);
104 void vp8_encode_intra16x16mby(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x)
106 BLOCK *b = &x->block[0];
107 MACROBLOCKD *xd = &x->e_mbd;
109 RECON_INVOKE(&rtcd->common->recon, build_intra_predictors_mby_s)(&x->e_mbd);
111 ENCODEMB_INVOKE(&rtcd->encodemb, submby) (x->src_diff, *(b->base_src),
112 b->src_stride, xd->dst.y_buffer, xd->dst.y_stride);
114 vp8_transform_intra_mby(x);
119 vp8_optimize_mby(x, rtcd);
122 void vp8_encode_intra16x16mbuv(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x)
124 MACROBLOCKD *xd = &x->e_mbd;
126 RECON_INVOKE(&rtcd->common->recon, build_intra_predictors_mbuv_s)(&x->e_mbd);
128 ENCODEMB_INVOKE(&rtcd->encodemb, submbuv)(x->src_diff, x->src.u_buffer,
129 x->src.v_buffer, x->src.uv_stride, xd->dst.u_buffer,
130 xd->dst.v_buffer, xd->dst.uv_stride);
132 vp8_transform_mbuv(x);
134 vp8_quantize_mbuv(x);
137 vp8_optimize_mbuv(x, rtcd);