return (get_bits_count(&a->gb)+31)/32*4;
}
+#ifdef CONFIG_ENCODERS
static int encode_frame(AVCodecContext *avctx, unsigned char *buf, int buf_size, void *data){
ASV1Context * const a = avctx->priv_data;
AVFrame *pict = data;
return size*4;
}
+#endif /* CONFIG_ENCODERS */
static void common_init(AVCodecContext *avctx){
ASV1Context * const a = avctx->priv_data;
return 0;
}
+#ifdef CONFIG_ENCODERS
static int encode_init(AVCodecContext *avctx){
ASV1Context * const a = avctx->priv_data;
int i;
return 0;
}
+#endif
static int decode_end(AVCodecContext *avctx){
ASV1Context * const a = avctx->priv_data;
return 0;
}
+#ifdef CONFIG_ENCODERS
static int dv_encode_mt(AVCodecContext *avctx, void* sl)
{
DVVideoContext *s = avctx->priv_data;
&s->sys->video_place[slice*5]);
return 0;
}
+#endif
+#ifdef CONFIG_DECODERS
/* NOTE: exactly one frame must be given (120000 bytes for NTSC,
144000 bytes for PAL - or twice those for 50Mbps) */
static int dvvideo_decode_frame(AVCodecContext *avctx,
return s->sys->frame_size;
}
+#endif
static inline int dv_write_pack(enum dv_pack_type pack_id, DVVideoContext *c, uint8_t* buf)
};
#endif // CONFIG_DVVIDEO_ENCODER
+#ifdef CONFIG_DVVIDEO_DECODER
AVCodec dvvideo_decoder = {
"dvvideo",
CODEC_TYPE_VIDEO,
CODEC_CAP_DR1,
NULL
};
+#endif
return ret;
}
+#ifdef CONFIG_ENCODERS
static inline int encode_line(FFV1Context *s, int w, int_fast16_t *sample[2], int plane_index, int bits){
PlaneContext * const p= &s->plane[plane_index];
RangeCoder * const c= &s->c;
for(i=0; i<5; i++)
write_quant_table(c, f->quant_table[i]);
}
+#endif /* CONFIG_ENCODERS */
static int common_init(AVCodecContext *avctx){
FFV1Context *s = avctx->priv_data;
return 0;
}
+#ifdef CONFIG_ENCODERS
static int encode_init(AVCodecContext *avctx)
{
FFV1Context *s = avctx->priv_data;
return 0;
}
+#endif /* CONFIG_ENCODERS */
static void clear_state(FFV1Context *f){
}
}
+#ifdef CONFIG_ENCODERS
static int encode_frame(AVCodecContext *avctx, unsigned char *buf, int buf_size, void *data){
FFV1Context *f = avctx->priv_data;
RangeCoder * const c= &f->c;
return used_count + (put_bits_count(&f->pb)+7)/8;
}
}
+#endif /* CONFIG_ENCODERS */
static int common_end(AVCodecContext *avctx){
FFV1Context *s = avctx->priv_data;
return g726_iterate(c, i);
}
+#ifdef CONFIG_ENCODERS
static int16_t g726_encode(G726Context* c, int16_t sig)
{
uint8_t i;
g726_iterate(c, i);
return i;
}
+#endif
/* Interfacing to the libavcodec */
return 0;
}
+#ifdef CONFIG_ENCODERS
static int g726_encode_frame(AVCodecContext *avctx,
uint8_t *dst, int buf_size, void *data)
{
return put_bits_count(&pb)>>3;
}
+#endif
static int g726_decode_frame(AVCodecContext *avctx,
void *data, int *data_size,
static inline void mpeg4_encode_block(MpegEncContext * s, DCTELEM * block,
int n, int dc, uint8_t *scan_table,
PutBitContext *dc_pb, PutBitContext *ac_pb);
+static int mpeg4_get_block_length(MpegEncContext * s, DCTELEM * block, int n, int intra_dc,
+ uint8_t *scan_table);
#endif
static int h263_decode_motion(MpegEncContext * s, int pred, int fcode);
static inline int mpeg4_decode_dc(MpegEncContext * s, int n, int *dir_ptr);
static inline int mpeg4_decode_block(MpegEncContext * s, DCTELEM * block,
int n, int coded, int intra, int rvlc);
-static int mpeg4_get_block_length(MpegEncContext * s, DCTELEM * block, int n, int intra_dc,
- uint8_t *scan_table);
-static int h263_pred_dc(MpegEncContext * s, int n, uint16_t **dc_val_ptr);
#ifdef CONFIG_ENCODERS
+static int h263_pred_dc(MpegEncContext * s, int n, uint16_t **dc_val_ptr);
static void mpeg4_encode_visual_object_header(MpegEncContext * s);
static void mpeg4_encode_vol_header(MpegEncContext * s, int vo_number, int vol_number);
#endif //CONFIG_ENCODERS
}
}
+#ifdef CONFIG_ENCODERS
static int h263_pred_dc(MpegEncContext * s, int n, uint16_t **dc_val_ptr)
{
int x, y, wrap, a, c, pred_dc, scale;
*dc_val_ptr = &dc_val[x + y * wrap];
return pred_dc;
}
+#endif /* CONFIG_ENCODERS */
static void h263_pred_acdc(MpegEncContext * s, DCTELEM *block, int n)
{
return 0;
}
+#ifdef CONFIG_ENCODERS
static void generate_len_table(uint8_t *dst, uint64_t *stats, int size){
uint64_t counts[2*size];
int up[2*size];
if(i==size) break;
}
}
+#endif /* CONFIG_ENCODERS */
static int read_huffman_tables(HYuvContext *s, uint8_t *src, int length){
GetBitContext gb;
return 0;
}
+#ifdef CONFIG_DECODERS
static int decode_init(AVCodecContext *avctx)
{
HYuvContext *s = avctx->priv_data;
return 0;
}
+#endif
+#ifdef CONFIG_ENCODERS
static int store_table(HYuvContext *s, uint8_t *len, uint8_t *buf){
int i;
int index= 0;
return 0;
}
+#endif /* CONFIG_ENCODERS */
static void decode_422_bitstream(HYuvContext *s, int count){
int i;
}
}
+#ifdef CONFIG_ENCODERS
static int encode_422_bitstream(HYuvContext *s, int count){
int i;
}
return 0;
}
+#endif /* CONFIG_ENCODERS */
static void decode_bgr_bitstream(HYuvContext *s, int count){
int i;
}
}
+#ifdef CONFIG_DECODERS
static void draw_slice(HYuvContext *s, int y){
int h, cy;
int offset[4];
return (get_bits_count(&s->gb)+31)/32*4 + table_size;
}
+#endif
static int common_end(HYuvContext *s){
int i;
return 0;
}
+#ifdef CONFIG_DECODERS
static int decode_end(AVCodecContext *avctx)
{
HYuvContext *s = avctx->priv_data;
return 0;
}
+#endif
+#ifdef CONFIG_ENCODERS
static int encode_frame(AVCodecContext *avctx, unsigned char *buf, int buf_size, void *data){
HYuvContext *s = avctx->priv_data;
AVFrame *pict = data;
return 0;
}
+#endif /* CONFIG_ENCODERS */
+#ifdef CONFIG_DECODERS
AVCodec huffyuv_decoder = {
"huffyuv",
CODEC_TYPE_VIDEO,
CODEC_CAP_DR1 | CODEC_CAP_DRAW_HORIZ_BAND,
NULL
};
+#endif
#ifdef CONFIG_ENCODERS
}
}
+#ifdef CONFIG_ENCODERS
static int try_8x8basis_mmx(int16_t rem[64], int16_t weight[64], int16_t basis[64], int scale){
long i=0;
}
}
}
+#endif /* CONFIG_ENCODERS */
#define PREFETCH(name, op) \
void name(void *mem, int stride, int h){\
-
+#ifdef CONFIG_DECODERS
/*
*
* Decode a frame
/* always report that the buffer was completely consumed */
return buf_size;
}
+#endif
-
-
+#ifdef CONFIG_ENCODERS
/*
*
* Encode a frame
return c->zstream.total_out;
#endif
}
+#endif /* CONFIG_ENCODERS */
-
-
+#ifdef CONFIG_DECODERS
/*
*
* Init lcl decoder
return 0;
}
+#endif /* CONFIG_DECODERS */
-
-
+#ifdef CONFIG_ENCODERS
/*
*
* Init lcl encoder
return 0;
#endif
}
+#endif /* CONFIG_ENCODERS */
-
-
+#ifdef CONFIG_DECODERS
/*
*
* Uninit lcl decoder
return 0;
}
+#endif
-
-
+#ifdef CONFIG_ENCODERS
/*
*
* Uninit lcl encoder
return 0;
}
+#endif
+#ifdef CONFIG_MSZH_DECODER
AVCodec mszh_decoder = {
"mszh",
CODEC_TYPE_VIDEO,
decode_frame,
CODEC_CAP_DR1,
};
+#endif
-
+#ifdef CONFIG_ZLIB_DECODER
AVCodec zlib_decoder = {
"zlib",
CODEC_TYPE_VIDEO,
decode_frame,
CODEC_CAP_DR1,
};
+#endif
#ifdef CONFIG_ENCODERS
return buf_size;
}
-
+#ifdef CONFIG_MP3ADU_DECODER
static int decode_frame_adu(AVCodecContext * avctx,
void *data, int *data_size,
uint8_t * buf, int buf_size)
*data_size = out_size;
return buf_size;
}
+#endif /* CONFIG_MP3ADU_DECODER */
-
+#ifdef CONFIG_MP3ON4_DECODER
/* Next 3 arrays are indexed by channel config number (passed via codecdata) */
static int mp3Frames[16] = {0,1,1,2,3,3,4,5,2}; /* number of mp3 decoder instances */
static int mp3Channels[16] = {0,1,2,3,4,5,6,8,4}; /* total output channels */
*data_size = out_size;
return buf_size;
}
+#endif /* CONFIG_MP3ON4_DECODER */
-
+#ifdef CONFIG_MP2_DECODER
AVCodec mp2_decoder =
{
"mp2",
decode_frame,
CODEC_CAP_PARSE_ONLY,
};
-
+#endif
+#ifdef CONFIG_MP3_DECODER
AVCodec mp3_decoder =
{
"mp3",
decode_frame,
CODEC_CAP_PARSE_ONLY,
};
-
+#endif
+#ifdef CONFIG_MP3ADU_DECODER
AVCodec mp3adu_decoder =
{
"mp3adu",
decode_frame_adu,
CODEC_CAP_PARSE_ONLY,
};
-
+#endif
+#ifdef CONFIG_MP3ON4_DECODER
AVCodec mp3on4_decoder =
{
"mp3on4",
decode_frame_mp3on4,
0
};
+#endif
dst->type= FF_BUFFER_TYPE_COPY;
}
+#ifdef CONFIG_ENCODERS
static void copy_picture_attributes(MpegEncContext *s, AVFrame *dst, AVFrame *src){
int i;
}
}
}
+#endif
/**
* allocates a Picture
//STOP_TIMER("update_duplicate_context") //about 10k cycles / 0.01 sec for 1000frames on 1ghz with 2 threads
}
+#ifdef CONFIG_ENCODERS
static void update_duplicate_context_after_me(MpegEncContext *dst, MpegEncContext *src){
#define COPY(a) dst->a= src->a
COPY(pict_type);
COPY(partitioned_frame); //FIXME don't set in encode_header
#undef COPY
}
+#endif
/**
* sets the given MpegEncContext to common defaults (same for encoding and decoding).
emms_c();
}
-#endif //CONFIG_ENCODERS
-
static void denoise_dct_c(MpegEncContext *s, DCTELEM *block){
const int intra= s->mb_intra;
int i;
}
}
-#ifdef CONFIG_ENCODERS
-
static int dct_quantize_trellis_c(MpegEncContext *s,
DCTELEM *block, int n,
int qscale, int *overflow){
static int msmpeg4_decode_dc(MpegEncContext * s, int n, int *dir_ptr);
static int msmpeg4_decode_motion(MpegEncContext * s,
int *mx_ptr, int *my_ptr);
-static void msmpeg4v2_encode_motion(MpegEncContext * s, int val);
static void init_h263_dc_for_msmpeg4(void);
static inline void msmpeg4_memsetw(short *tab, int val, int n);
#ifdef CONFIG_ENCODERS
+static void msmpeg4v2_encode_motion(MpegEncContext * s, int val);
static int get_size_of_code(MpegEncContext * s, RLTable *rl, int last, int run, int level, int intra);
#endif //CONFIG_ENCODERS
static int msmpeg4v12_decode_mb(MpegEncContext *s, DCTELEM block[6][64]);
tab[i] = val;
}
+#ifdef CONFIG_ENCODERS
static void msmpeg4v2_encode_motion(MpegEncContext * s, int val)
{
int range, bit_size, sign, code, bits;
}
}
}
+#endif
/* this is identical to h263 except that its range is multiplied by 2 */
static int msmpeg4v2_decode_motion(MpegEncContext * s, int pred, int f_code)
return ((*b)[-4]<<24) + ((*b)[-3]<<16) + ((*b)[-2]<<8) + (*b)[-1];
}
+#ifdef CONFIG_ENCODERS
static void put32(uint8_t **b, unsigned int v){
*(*b)++= v>>24;
*(*b)++= v>>16;
*(*b)++= v>>8;
*(*b)++= v;
}
+#endif
static const uint8_t pngsig[8] = {137, 80, 78, 71, 13, 10, 26, 10};
}
}
+#ifdef CONFIG_ENCODERS
static void png_get_interlaced_row(uint8_t *dst, int row_size,
int bits_per_pixel, int pass,
const uint8_t *src, int width)
break;
}
}
+#endif
/* XXX: optimize */
/* NOTE: 'dst' can be equal to 'last' */
}
}
+#ifdef CONFIG_ENCODERS
static void convert_from_rgba32(uint8_t *dst, const uint8_t *src, int width)
{
uint8_t *d;
d += 4;
}
}
+#endif
+#ifdef CONFIG_DECODERS
static void convert_to_rgba32(uint8_t *dst, const uint8_t *src, int width)
{
int j;
ret = -1;
goto the_end;
}
+#endif
+#ifdef CONFIG_ENCODERS
static void png_write_chunk(uint8_t **f, uint32_t tag,
const uint8_t *buf, int length)
{
}
return 0;
}
+#endif /* CONFIG_ENCODERS */
static int common_init(AVCodecContext *avctx){
PNGContext *s = avctx->priv_data;
return 0;
}
+#ifdef CONFIG_ENCODERS
static int encode_frame(AVCodecContext *avctx, unsigned char *buf, int buf_size, void *data){
PNGContext *s = avctx->priv_data;
AVFrame *pict = data;
ret = -1;
goto the_end;
}
+#endif
+#ifdef CONFIG_PNG_DECODER
AVCodec png_decoder = {
"png",
CODEC_TYPE_VIDEO,
0 /*CODEC_CAP_DR1*/ /*| CODEC_CAP_DRAW_HORIZ_BAND*/,
NULL
};
+#endif
#ifdef CONFIG_PNG_ENCODER
AVCodec png_encoder = {
}
/* RAW Encoder Implementation */
-
+#ifdef CONFIG_RAWVIDEO_ENCODER
static int raw_init_encoder(AVCodecContext *avctx)
{
avctx->coded_frame = (AVFrame *)avctx->priv_data;
avctx->height, frame, buf_size);
}
-#ifdef CONFIG_RAWVIDEO_ENCODER
AVCodec rawvideo_encoder = {
"rawvideo",
CODEC_TYPE_VIDEO,
return x;
}
+#ifdef CONFIG_ENCODERS
// Heavily modified Levinson-Durbin algorithm which
// copes better with quantization, and calculates the
// actual whitened result as it goes.
av_free(state);
}
+#endif /* CONFIG_ENCODERS */
static int samplerate_table[] =
{ 44100, 22050, 11025, 96000, 48000, 32000, 24000, 16000, 8000 };
}
#endif //CONFIG_ENCODERS
+#ifdef CONFIG_DECODERS
static int sonic_decode_init(AVCodecContext *avctx)
{
SonicContext *s = avctx->priv_data;
return (get_bits_count(&gb)+7)/8;
}
+#endif
#ifdef CONFIG_ENCODERS
AVCodec sonic_encoder = {
}
#endif
+#ifdef CONFIG_DECODERS
static void svq1_parse_string (GetBitContext *bitbuf, uint8_t *out) {
uint8_t seed;
int i;
MPV_common_end(s);
return 0;
}
+#endif /* CONFIG_DECODERS */
+#ifdef CONFIG_ENCODERS
static void svq1_write_header(SVQ1Context *s, int frame_type)
{
int i;
return best_score;
}
-#ifdef CONFIG_ENCODERS
static int svq1_encode_plane(SVQ1Context *s, int plane, unsigned char *src_plane, unsigned char *ref_plane, unsigned char *decoded_plane,
int width, int height, int src_stride, int stride)
#endif //CONFIG_ENCODERS
+#ifdef CONFIG_DECODERS
AVCodec svq1_decoder = {
"svq1",
CODEC_TYPE_VIDEO,
.flush= ff_mpeg_flush,
.pix_fmts= (enum PixelFormat[]){PIX_FMT_YUV410P, -1},
};
+#endif
#ifdef CONFIG_ENCODERS