void ff_vp3_idct_put_c(uint8_t *dest/*align 8*/, int line_size, DCTELEM *block/*align 16*/);
void ff_vp3_idct_add_c(uint8_t *dest/*align 8*/, int line_size, DCTELEM *block/*align 16*/);
+/* 1/2^n downscaling functions from imgconvert.c */
+void ff_img_copy_plane(uint8_t *dst, int dst_wrap, const uint8_t *src, int src_wrap, int width, int height);
+void ff_shrink22(uint8_t *dst, int dst_wrap, const uint8_t *src, int src_wrap, int width, int height);
+void ff_shrink44(uint8_t *dst, int dst_wrap, const uint8_t *src, int src_wrap, int width, int height);
+void ff_shrink88(uint8_t *dst, int dst_wrap, const uint8_t *src, int src_wrap, int width, int height);
+
/* minimum alignment rules ;)
if u notice errors in the align stuff, need more alignment for some asm code for some cpu
or need to use a function with less aligned data then send a mail to the ffmpeg-dev list, ...
void (*inner_add_yblock)(uint8_t *obmc, const int obmc_stride, uint8_t * * block, int b_w, int b_h, int src_x, int src_y, int src_stride, slice_buffer * sb, int add, uint8_t * dst8);
void (*prefetch)(void *mem, int stride, int h);
+
+ void (*shrink[4])(uint8_t *dst, int dst_wrap, const uint8_t *src, int src_wrap, int width, int height);
} DSPContext;
void dsputil_static_init(void);
return dst_pix_fmt;
}
-static void img_copy_plane(uint8_t *dst, int dst_wrap,
+void ff_img_copy_plane(uint8_t *dst, int dst_wrap,
const uint8_t *src, int src_wrap,
int width, int height)
{
break;
}
bwidth = (width * bits + 7) >> 3;
- img_copy_plane(dst->data[0], dst->linesize[0],
+ ff_img_copy_plane(dst->data[0], dst->linesize[0],
src->data[0], src->linesize[0],
bwidth, height);
break;
h >>= pf->y_chroma_shift;
}
bwidth = (w * pf->depth + 7) >> 3;
- img_copy_plane(dst->data[i], dst->linesize[i],
+ ff_img_copy_plane(dst->data[i], dst->linesize[i],
src->data[i], src->linesize[i],
bwidth, h);
}
break;
case FF_PIXEL_PALETTE:
- img_copy_plane(dst->data[0], dst->linesize[0],
+ ff_img_copy_plane(dst->data[0], dst->linesize[0],
src->data[0], src->linesize[0],
width, height);
/* copy the palette */
- img_copy_plane(dst->data[1], dst->linesize[1],
+ ff_img_copy_plane(dst->data[1], dst->linesize[1],
src->data[1], src->linesize[1],
4, 256);
break;
}
/* 2x2 -> 1x1 */
-static void shrink22(uint8_t *dst, int dst_wrap,
+void ff_shrink22(uint8_t *dst, int dst_wrap,
const uint8_t *src, int src_wrap,
int width, int height)
{
}
/* 4x4 -> 1x1 */
-static void shrink44(uint8_t *dst, int dst_wrap,
+void ff_shrink44(uint8_t *dst, int dst_wrap,
const uint8_t *src, int src_wrap,
int width, int height)
{
}
}
+/* 8x8 -> 1x1 */
+void ff_shrink88(uint8_t *dst, int dst_wrap,
+ const uint8_t *src, int src_wrap,
+ int width, int height)
+{
+ int w, i;
+
+ for(;height > 0; height--) {
+ for(w = width;w > 0; w--) {
+ int tmp=0;
+ for(i=0; i<8; i++){
+ tmp += src[0] + src[1] + src[2] + src[3] + src[4] + src[5] + src[6] + src[7];
+ src += src_wrap;
+ }
+ *(dst++) = (tmp + 32)>>6;
+ src += 8 - 8*src_wrap;
+ }
+ src += 8*src_wrap - 8*width;
+ dst += dst_wrap - width;
+ }
+}
+
static void grow21_line(uint8_t *dst, const uint8_t *src,
int width)
{
uint8_t *d;
if (dst_pix->color_type == FF_COLOR_YUV_JPEG) {
- img_copy_plane(dst->data[0], dst->linesize[0],
+ ff_img_copy_plane(dst->data[0], dst->linesize[0],
src->data[0], src->linesize[0],
dst_width, dst_height);
} else {
if (is_yuv_planar(src_pix) &&
dst_pix_fmt == PIX_FMT_GRAY8) {
if (src_pix->color_type == FF_COLOR_YUV_JPEG) {
- img_copy_plane(dst->data[0], dst->linesize[0],
+ ff_img_copy_plane(dst->data[0], dst->linesize[0],
src->data[0], src->linesize[0],
dst_width, dst_height);
} else {
YUV444 format */
switch(xy_shift) {
case 0x00:
- resize_func = img_copy_plane;
+ resize_func = ff_img_copy_plane;
break;
case 0x10:
resize_func = shrink21;
resize_func = shrink12;
break;
case 0x11:
- resize_func = shrink22;
+ resize_func = ff_shrink22;
break;
case 0x22:
- resize_func = shrink44;
+ resize_func = ff_shrink44;
break;
case 0xf0:
resize_func = grow21;
goto no_chroma_filter;
}
- img_copy_plane(dst->data[0], dst->linesize[0],
+ ff_img_copy_plane(dst->data[0], dst->linesize[0],
src->data[0], src->linesize[0],
dst_width, dst_height);
int i, j, out_size, p_lambda, b_lambda, lambda2;
int outbuf_size= s->width * s->height; //FIXME
uint8_t *outbuf= av_malloc(outbuf_size);
- ImgReSampleContext *resample;
int64_t best_rd= INT64_MAX;
int best_b_count= -1;
+ assert(scale>=0 && scale <=3);
+
// emms_c();
p_lambda= s->last_lambda_for[P_TYPE]; //s->next_picture_ptr->quality;
b_lambda= s->last_lambda_for[B_TYPE]; //p_lambda *ABS(s->avctx->b_quant_factor) + s->avctx->b_quant_offset;
if (avcodec_open(c, codec) < 0)
return -1;
- resample= img_resample_init(c->width, c->height, s->width, s->height); //FIXME use sws
-
for(i=0; i<s->max_b_frames+2; i++){
int ysize= c->width*c->height;
int csize= (c->width/2)*(c->height/2);
input[i].linesize[1]=
input[i].linesize[2]= c->width/2;
- if(!i || s->input_picture[i-1])
- img_resample(resample, (AVPicture*)&input[i],
- (AVPicture*)&pre_input);
+ if(!i || s->input_picture[i-1]){
+ s->dsp.shrink[scale](input[i].data[0], input[i].linesize[0], pre_input.data[0], pre_input.linesize[0], c->width, c->height);
+ s->dsp.shrink[scale](input[i].data[1], input[i].linesize[1], pre_input.data[1], pre_input.linesize[1], c->width>>1, c->height>>1);
+ s->dsp.shrink[scale](input[i].data[2], input[i].linesize[2], pre_input.data[2], pre_input.linesize[2], c->width>>1, c->height>>1);
+ }
}
for(j=0; j<s->max_b_frames+1; j++){
av_freep(&outbuf);
avcodec_close(c);
av_freep(&c);
- img_resample_close(resample);
for(i=0; i<s->max_b_frames+2; i++){
av_freep(&input[i].data[0]);