/* XXX: pb because no interleaving */
for(i=0;i<nb_frames;i++) {
if (enc->codec_id != CODEC_ID_RAWVIDEO) {
- AVVideoFrame big_picture;
+ AVFrame big_picture;
- memset(&big_picture, 0, sizeof(AVVideoFrame));
+ memset(&big_picture, 0, sizeof(AVFrame));
*(AVPicture*)&big_picture= *final_picture;
/* handles sameq here. This is not correct because it may
total_size += frame_size;
if (enc->codec_type == CODEC_TYPE_VIDEO) {
frame_number = ost->frame_number;
- fprintf(fvstats, "frame= %5d q= %2.1f ", frame_number, enc->coded_picture->quality);
+ fprintf(fvstats, "frame= %5d q= %2.1f ", frame_number, enc->coded_frame->quality);
if (enc->flags&CODEC_FLAG_PSNR)
- fprintf(fvstats, "PSNR= %6.2f ", psnr(enc->coded_picture->error[0]/(enc->width*enc->height*255.0*255.0)));
+ fprintf(fvstats, "PSNR= %6.2f ", psnr(enc->coded_frame->error[0]/(enc->width*enc->height*255.0*255.0)));
fprintf(fvstats,"f_size= %6d ", frame_size);
/* compute pts value */
avg_bitrate = (double)(total_size * 8) / ti1 / 1000.0;
fprintf(fvstats, "s_size= %8.0fkB time= %0.3f br= %7.1fkbits/s avg_br= %7.1fkbits/s ",
(double)total_size / 1024, ti1, bitrate, avg_bitrate);
- fprintf(fvstats,"type= %s\n", enc->coded_picture->key_frame == 1 ? "I" : "P");
+ fprintf(fvstats,"type= %s\n", enc->coded_frame->key_frame == 1 ? "I" : "P");
}
}
enc = &ost->st->codec;
if (vid && enc->codec_type == CODEC_TYPE_VIDEO) {
sprintf(buf + strlen(buf), "q=%2.1f ",
- enc->coded_picture->quality);
+ enc->coded_frame->quality);
}
if (!vid && enc->codec_type == CODEC_TYPE_VIDEO) {
frame_number = ost->frame_number;
sprintf(buf + strlen(buf), "frame=%5d q=%2.1f ",
- frame_number, enc->coded_picture ? enc->coded_picture->quality : 0);
+ frame_number, enc->coded_frame ? enc->coded_frame->quality : 0);
if (enc->flags&CODEC_FLAG_PSNR)
- sprintf(buf + strlen(buf), "PSNR= %6.2f ", psnr(enc->coded_picture->error[0]/(enc->width*enc->height*255.0*255.0)));
+ sprintf(buf + strlen(buf), "PSNR= %6.2f ", psnr(enc->coded_frame->error[0]/(enc->width*enc->height*255.0*255.0)));
vid = 1;
}
/* compute min output value */
ist->st->codec.height);
ret = len;
} else {
- AVVideoFrame big_picture;
+ AVFrame big_picture;
data_size = (ist->st->codec.width * ist->st->codec.height * 3) / 2;
ret = avcodec_decode_video(&ist->st->codec,
/* we use the codec indication because it is
more accurate than the demux flags */
pkt->flags = 0;
- if (st->codec.coded_picture->key_frame)
+ if (st->codec.coded_frame->key_frame)
pkt->flags |= PKT_FLAG_KEY;
return 0;
}
codec = &ctx->streams[pkt.stream_index]->codec;
}
- codec->key_frame = ((pkt.flags & PKT_FLAG_KEY) != 0);
+ codec->coded_frame->key_frame = ((pkt.flags & PKT_FLAG_KEY) != 0);
#ifdef PJSG
if (codec->codec_type == CODEC_TYPE_AUDIO) {
};
avctx->frame_size = AC3_FRAME_SIZE;
- avctx->key_frame = 1; /* always key frame */
/* number of channels */
if (channels < 1 || channels > 6)
}
ac3_crc_init();
+
+ avctx->coded_frame= avcodec_alloc_frame();
+ avctx->coded_frame->key_frame= 1;
return 0;
}
return output_frame_end(s);
}
+static int AC3_encode_close(AVCodecContext *avctx)
+{
+ av_freep(&avctx->coded_frame);
+}
+
#if 0
/*************************************************************************/
/* TEST */
sizeof(AC3EncodeContext),
AC3_encode_init,
AC3_encode_frame,
+ AC3_encode_close,
NULL,
};
return -1;
break;
}
+
+ avctx->coded_frame= avcodec_alloc_frame();
+ avctx->coded_frame->key_frame= 1;
+
return 0;
}
static int adpcm_encode_close(AVCodecContext *avctx)
{
- /* nothing to free */
+ av_freep(&avctx->coded_frame);
+
return 0;
}
default:
return -1;
}
- avctx->key_frame = 1;
return dst - frame;
}
AVCodecContext *c= NULL;
int i, out_size, size, x, y, outbuf_size;
FILE *f;
- AVVideoFrame *picture;
+ AVFrame *picture;
UINT8 *outbuf, *picture_buf;
printf("Video encoding\n");
}
c= avcodec_alloc_context();
- picture= avcodec_alloc_picture();
+ picture= avcodec_alloc_frame();
/* put sample parameters */
c->bit_rate = 400000;
AVCodecContext *c= NULL;
int frame, size, got_picture, len;
FILE *f;
- AVVideoFrame *picture;
+ AVFrame *picture;
UINT8 inbuf[INBUF_SIZE], *inbuf_ptr;
char buf[1024];
}
c= avcodec_alloc_context();
- picture= avcodec_alloc_picture();
+ picture= avcodec_alloc_frame();
if(codec->capabilities&CODEC_CAP_TRUNCATED)
c->flags|= CODEC_FLAG_TRUNCATED; /* we dont send complete frames */
#define LIBAVCODEC_VERSION_INT 0x000406
#define LIBAVCODEC_VERSION "0.4.6"
-#define LIBAVCODEC_BUILD 4644
-#define LIBAVCODEC_BUILD_STR "4644"
+#define LIBAVCODEC_BUILD 4645
+#define LIBAVCODEC_BUILD_STR "4645"
enum CodecID {
CODEC_ID_NONE,
#define FRAME_RATE_BASE 10000
-#define FF_COMMON_PICTURE \
+#define FF_COMMON_FRAME \
uint8_t *data[4];\
int linesize[4];\
/**\
#define FF_B_TYPE 3 // Bi-dir predicted
#define FF_S_TYPE 4 // S(GMC)-VOP MPEG4
-typedef struct AVVideoFrame {
- FF_COMMON_PICTURE
-} AVVideoFrame;
+typedef struct AVFrame {
+ FF_COMMON_FRAME
+} AVFrame;
typedef struct AVCodecContext {
/**
previous encoded frame */
/**
- * 1 -> keyframe, 0-> not (this if for audio only, for video, AVVideoFrame.key_frame should be used)
- * encoding: set by lavc (for the outputed bitstream, not the input frame)
- * decoding: set by lavc (for the decoded bitstream, not the displayed frame)
- */
- int key_frame;
-
- /**
* number of frames the decoded output will be delayed relative to
* the encoded input
* encoding: set by lavc.
* encoding: unused
* decoding: set by lavc, user can override
*/
- int (*get_buffer)(struct AVCodecContext *c, AVVideoFrame *pic);
+ int (*get_buffer)(struct AVCodecContext *c, AVFrame *pic);
/**
* called to release buffers which where allocated with get_buffer.
* encoding: unused
* decoding: set by lavc, user can override
*/
- void (*release_buffer)(struct AVCodecContext *c, AVVideoFrame *pic);
+ void (*release_buffer)(struct AVCodecContext *c, AVFrame *pic);
/**
* is 1 if the decoded stream contains b frames, 0 otherwise
* encoding: set by lavc
* decoding: set by lavc
*/
- AVVideoFrame *coded_picture;
+ AVFrame *coded_frame;
/**
* debug
void avcodec_get_context_defaults(AVCodecContext *s);
AVCodecContext *avcodec_alloc_context(void);
-AVVideoFrame *avcodec_alloc_picture(void);
+AVFrame *avcodec_alloc_frame(void);
-int avcodec_default_get_buffer(AVCodecContext *s, AVVideoFrame *pic);
-void avcodec_default_release_buffer(AVCodecContext *s, AVVideoFrame *pic);
+int avcodec_default_get_buffer(AVCodecContext *s, AVFrame *pic);
+void avcodec_default_release_buffer(AVCodecContext *s, AVFrame *pic);
int avcodec_open(AVCodecContext *avctx, AVCodec *codec);
int avcodec_decode_audio(AVCodecContext *avctx, INT16 *samples,
int *frame_size_ptr,
UINT8 *buf, int buf_size);
-int avcodec_decode_video(AVCodecContext *avctx, AVVideoFrame *picture,
+int avcodec_decode_video(AVCodecContext *avctx, AVFrame *picture,
int *got_picture_ptr,
UINT8 *buf, int buf_size);
int avcodec_parse_frame(AVCodecContext *avctx, UINT8 **pdata,
int avcodec_encode_audio(AVCodecContext *avctx, UINT8 *buf, int buf_size,
const short *samples);
int avcodec_encode_video(AVCodecContext *avctx, UINT8 *buf, int buf_size,
- const AVVideoFrame *pict);
+ const AVFrame *pict);
int avcodec_close(AVCodecContext *avctx);
int sampling_411; /* 0 = 420, 1 = 411 */
int width, height;
UINT8 *current_picture[3]; /* picture structure */
- AVVideoFrame picture;
+ AVFrame picture;
int linesize[3];
DCTELEM block[5*6][64] __align8;
UINT8 dv_zigzag[2][64];
emms_c();
/* return image */
- *data_size = sizeof(AVVideoFrame);
- *(AVVideoFrame*)data= s->picture;
+ *data_size = sizeof(AVFrame);
+ *(AVFrame*)data= s->picture;
avctx->release_buffer(avctx, &s->picture);
{
MpegEncContext *s = avctx->priv_data;
int ret,i;
- AVVideoFrame *pict = data;
+ AVFrame *pict = data;
float new_aspect;
#ifdef PRINT_FRAME_TIME
}
#endif
if(s->pict_type==B_TYPE || s->low_delay){
- *pict= *(AVVideoFrame*)&s->current_picture;
+ *pict= *(AVFrame*)&s->current_picture;
} else {
- *pict= *(AVVideoFrame*)&s->last_picture;
+ *pict= *(AVFrame*)&s->last_picture;
}
/* Return the Picture timestamp as the frame number */
/* dont output the last pic after seeking */
if(s->last_picture.data[0] || s->low_delay)
- *data_size = sizeof(AVVideoFrame);
+ *data_size = sizeof(AVFrame);
#ifdef PRINT_FRAME_TIME
printf("%Ld\n", rdtsc()-time);
#endif
uint8_t len[3][256];
uint32_t bits[3][256];
VLC vlc[3];
- AVVideoFrame picture;
+ AVFrame picture;
uint8_t __align8 bitstream_buffer[1024*1024*3]; //FIXME dynamic alloc or some other solution
DSPContext dsp;
}HYuvContext;
width= s->width= avctx->width;
height= s->height= avctx->height;
- avctx->coded_picture= &s->picture;
+ avctx->coded_frame= &s->picture;
s->bgr32=1;
assert(width && height);
avctx->stats_out= av_mallocz(1024*10);
s->version=2;
- avctx->coded_picture= &s->picture;
+ avctx->coded_frame= &s->picture;
s->picture.pict_type= FF_I_TYPE;
s->picture.key_frame= 1;
const int width2= s->width>>1;
const int height= s->height;
int fake_ystride, fake_ustride, fake_vstride;
- AVVideoFrame * const p= &s->picture;
+ AVFrame * const p= &s->picture;
- AVVideoFrame *picture = data;
+ AVFrame *picture = data;
*data_size = 0;
avctx->release_buffer(avctx, p);
- *data_size = sizeof(AVVideoFrame);
+ *data_size = sizeof(AVFrame);
return (get_bits_count(&s->gb)+7)>>3;
}
static int encode_frame(AVCodecContext *avctx, unsigned char *buf, int buf_size, void *data){
HYuvContext *s = avctx->priv_data;
- AVVideoFrame *pict = data;
+ AVFrame *pict = data;
const int width= s->width;
const int width2= s->width>>1;
const int height= s->height;
const int fake_ystride= s->interlaced ? pict->linesize[0]*2 : pict->linesize[0];
const int fake_ustride= s->interlaced ? pict->linesize[1]*2 : pict->linesize[1];
const int fake_vstride= s->interlaced ? pict->linesize[2]*2 : pict->linesize[2];
- AVVideoFrame * const p= &s->picture;
+ AVFrame * const p= &s->picture;
int i, size;
init_put_bits(&s->pb, buf, buf_size, NULL, NULL);
* DECODE_SLICE_EOP if the end of the picture is reached
*/
static int mpeg_decode_slice(AVCodecContext *avctx,
- AVVideoFrame *pict,
+ AVFrame *pict,
int start_code,
UINT8 *buf, int buf_size)
{
MPV_frame_end(s);
if (s->pict_type == B_TYPE || s->low_delay) {
- *pict= *(AVVideoFrame*)&s->current_picture;
+ *pict= *(AVFrame*)&s->current_picture;
} else {
s->picture_number++;
/* latency of 1 frame for I and P frames */
if (s->picture_number == 1) {
return DECODE_SLICE_OK;
} else {
- *pict= *(AVVideoFrame*)&s->last_picture;
+ *pict= *(AVFrame*)&s->last_picture;
}
}
return DECODE_SLICE_EOP;
Mpeg1Context *s = avctx->priv_data;
UINT8 *buf_end, *buf_ptr, *buf_start;
int len, start_code_found, ret, code, start_code, input_size;
- AVVideoFrame *picture = data;
+ AVFrame *picture = data;
MpegEncContext *s2 = &s->mpeg_enc_ctx;
dprintf("fill_buffer\n");
/* special case for last picture */
if (buf_size == 0) {
if (s2->picture_number > 0) {
- *picture= *(AVVideoFrame*)&s2->next_picture;
+ *picture= *(AVFrame*)&s2->next_picture;
- *data_size = sizeof(AVVideoFrame);
+ *data_size = sizeof(AVFrame);
}
return 0;
}
s->freq = freq;
s->bit_rate = bitrate * 1000;
avctx->frame_size = MPA_FRAME_SIZE;
- avctx->key_frame = 1; /* always key frame */
/* encoding freq */
s->lsf = 0;
total_quant_bits[i] = 12 * v;
}
+ avctx->coded_frame= avcodec_alloc_frame();
+ avctx->coded_frame->key_frame= 1;
+
return 0;
}
return pbBufPtr(&s->pb) - s->pb.buf;
}
+static int MPA_encode_close(AVCodecContext *avctx)
+{
+ av_freep(&avctx->coded_frame);
+}
AVCodec mp2_encoder = {
"mp2",
sizeof(MpegAudioContext),
MPA_encode_init,
MPA_encode_frame,
+ MPA_encode_close,
NULL,
};
assert(!pic->data[0]);
- r= s->avctx->get_buffer(s->avctx, (AVVideoFrame*)pic);
+ r= s->avctx->get_buffer(s->avctx, (AVFrame*)pic);
if(r<0 || !pic->age || !pic->type || !pic->data[0]){
fprintf(stderr, "get_buffer() failed (%d %d %d %X)\n", r, pic->age, pic->type, (int)pic->data[0]);
int i;
if(pic->data[0] && pic->type!=FF_BUFFER_TYPE_SHARED){
- s->avctx->release_buffer(s->avctx, (AVVideoFrame*)pic);
+ s->avctx->release_buffer(s->avctx, (AVFrame*)pic);
}
av_freep(&pic->mb_var);
CHECKED_ALLOCZ(s->edge_emu_buffer, (s->width+64)*2*17*2); //(width + edge + align)*interlaced*MBsize*tolerance
- s->avctx->coded_picture= (AVVideoFrame*)&s->current_picture;
+ s->avctx->coded_frame= (AVFrame*)&s->current_picture;
if (s->encoding) {
int mv_table_size= (s->mb_width+2)*(s->mb_height+2);
int MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
{
int i;
- AVVideoFrame *pic;
+ AVFrame *pic;
s->mb_skiped = 0;
//printf("%8X %d %d %X %X\n", s->picture[i].data[0], s->picture[i].type, i, s->next_picture.data[0], s->last_picture.data[0]);
if(s->picture[i].data[0] == s->last_picture.data[0]){
// s->picture[i].reference=0;
- avctx->release_buffer(avctx, (AVVideoFrame*)&s->picture[i]);
+ avctx->release_buffer(avctx, (AVFrame*)&s->picture[i]);
break;
}
}
for(i=0; i<MAX_PICTURE_COUNT; i++){
if(s->picture[i].data[0] && s->picture[i].data[0] != s->next_picture.data[0] && s->picture[i].reference){
fprintf(stderr, "releasing zombie picture\n");
- avctx->release_buffer(avctx, (AVVideoFrame*)&s->picture[i]);
+ avctx->release_buffer(avctx, (AVFrame*)&s->picture[i]);
}
}
}
if(!s->encoding){
i= find_unused_picture(s, 0);
- pic= (AVVideoFrame*)&s->picture[i];
+ pic= (AVFrame*)&s->picture[i];
pic->reference= s->pict_type != B_TYPE;
pic->coded_picture_number= s->current_picture.coded_picture_number+1;
/* release non refernce frames */
for(i=0; i<MAX_PICTURE_COUNT; i++){
if(s->picture[i].data[0] && !s->picture[i].reference /*&& s->picture[i].type!=FF_BUFFER_TYPE_SHARED*/)
- s->avctx->release_buffer(s->avctx, (AVVideoFrame*)&s->picture[i]);
+ s->avctx->release_buffer(s->avctx, (AVFrame*)&s->picture[i]);
}
}
}
-static int load_input_picture(MpegEncContext *s, AVVideoFrame *pic_arg){
- AVVideoFrame *pic;
+static int load_input_picture(MpegEncContext *s, AVFrame *pic_arg){
+ AVFrame *pic;
int i;
const int encoding_delay= s->max_b_frames;
int direct=1;
if(direct){
i= find_unused_picture(s, 1);
- pic= (AVVideoFrame*)&s->picture[i];
+ pic= (AVFrame*)&s->picture[i];
pic->reference= 1;
for(i=0; i<4; i++){
}else{
i= find_unused_picture(s, 0);
- pic= (AVVideoFrame*)&s->picture[i];
+ pic= (AVFrame*)&s->picture[i];
pic->reference= 1;
alloc_picture(s, (Picture*)pic, 0);
unsigned char *buf, int buf_size, void *data)
{
MpegEncContext *s = avctx->priv_data;
- AVVideoFrame *pic_arg = data;
+ AVFrame *pic_arg = data;
int i;
init_put_bits(&s->pb, buf, buf_size, NULL, NULL);
} ScanTable;
typedef struct Picture{
- FF_COMMON_PICTURE
+ FF_COMMON_FRAME
int mb_var_sum; /* sum of MB variance for current frame */
int mc_mb_var_sum; /* motion compensated MB variance for current frame */
int oggvorbis_init_encoder(vorbis_info *vi, AVCodecContext *avccontext) {
- if(avccontext->quality) /* VBR requested */
+ if(avccontext->coded_frame->quality) /* VBR requested */
return vorbis_encode_init_vbr(vi, avccontext->channels,
- avccontext->sample_rate, (float)avccontext->quality / 1000) ;
+ avccontext->sample_rate, (float)avccontext->coded_frame->quality / 1000) ;
return vorbis_encode_init(vi, avccontext->channels,
avccontext->sample_rate, -1, avccontext->bit_rate, -1) ;
vorbis_block_init(&context->vd, &context->vb) ;
avccontext->frame_size = OGGVORBIS_FRAME_SIZE ;
+
+ avccontext->coded_frame= avcodec_alloc_frame();
+ avccontext->coded_frame->key_frame= 1;
return 0 ;
}
vorbis_block_clear(&context->vb);
vorbis_dsp_clear(&context->vd);
vorbis_info_clear(&context->vi);
+
+ av_freep(&avccontext->coded_frame);
return 0 ;
}
default:
break;
}
+
+ avctx->coded_frame= avcodec_alloc_frame();
+ avctx->coded_frame->key_frame= 1;
+
return 0;
}
static int pcm_encode_close(AVCodecContext *avctx)
{
+ av_freep(&avctx->coded_frame);
+
switch(avctx->codec->id) {
case CODEC_ID_PCM_ALAW:
if (--linear_to_alaw_ref == 0)
default:
return -1;
}
- avctx->key_frame = 1;
//avctx->frame_size = (dst - frame) / (sample_size * avctx->channels);
return dst - frame;
{
MpegEncContext *s = avctx->priv_data;
int i;
- AVVideoFrame *pict = data;
+ AVFrame *pict = data;
#ifdef DEBUG
printf("*****frame %d size=%d\n", avctx->frame_number, buf_size);
if(s->mb_y>=s->mb_height){
MPV_frame_end(s);
- *pict= *(AVVideoFrame*)&s->current_picture;
+ *pict= *(AVFrame*)&s->current_picture;
- *data_size = sizeof(AVVideoFrame);
+ *data_size = sizeof(AVFrame);
}else{
*data_size = 0;
}
MpegEncContext *s=avctx->priv_data;
uint8_t *current, *previous;
int result, i, x, y, width, height;
- AVVideoFrame *pict = data;
+ AVFrame *pict = data;
/* initialize bit buffer */
init_get_bits(&s->gb,buf,buf_size);
}
}
- *pict = *(AVVideoFrame*)&s->current_picture;
+ *pict = *(AVFrame*)&s->current_picture;
MPV_frame_end(s);
- *data_size=sizeof(AVVideoFrame);
+ *data_size=sizeof(AVFrame);
return buf_size;
}
uint8_t *data[4];
}DefaultPicOpaque;
-int avcodec_default_get_buffer(AVCodecContext *s, AVVideoFrame *pic){
+int avcodec_default_get_buffer(AVCodecContext *s, AVFrame *pic){
int i;
const int width = s->width;
const int height= s->height;
return 0;
}
-void avcodec_default_release_buffer(AVCodecContext *s, AVVideoFrame *pic){
+void avcodec_default_release_buffer(AVCodecContext *s, AVFrame *pic){
int i;
assert(pic->type==FF_BUFFER_TYPE_INTERNAL);
}
/**
- * allocates a AVPicture and set it to defaults.
+ * allocates a AVPFrame and set it to defaults.
* this can be deallocated by simply calling free()
*/
-AVVideoFrame *avcodec_alloc_picture(void){
- AVVideoFrame *pic= av_mallocz(sizeof(AVVideoFrame));
+AVFrame *avcodec_alloc_frame(void){
+ AVFrame *pic= av_mallocz(sizeof(AVFrame));
return pic;
}
}
int avcodec_encode_video(AVCodecContext *avctx, UINT8 *buf, int buf_size,
- const AVVideoFrame *pict)
+ const AVFrame *pict)
{
int ret;
/* decode a frame. return -1 if error, otherwise return the number of
bytes used. If no frame could be decompressed, *got_picture_ptr is
zero. Otherwise, it is non zero */
-int avcodec_decode_video(AVCodecContext *avctx, AVVideoFrame *picture,
+int avcodec_decode_video(AVCodecContext *avctx, AVFrame *picture,
int *got_picture_ptr,
UINT8 *buf, int buf_size)
{
for(i=0; i<MAX_PICTURE_COUNT; i++){
if(s->picture[i].data[0] && ( s->picture[i].type == FF_BUFFER_TYPE_INTERNAL
|| s->picture[i].type == FF_BUFFER_TYPE_USER))
- avctx->release_buffer(avctx, (AVVideoFrame*)&s->picture[i]);
+ avctx->release_buffer(avctx, (AVFrame*)&s->picture[i]);
}
break;
default:
int val;
val = stream->num;
- if (s->streams[val - 1]->codec.coded_picture->key_frame /* && frag_offset == 0 */)
+ if (s->streams[val - 1]->codec.coded_frame->key_frame /* && frag_offset == 0 */)
val |= 0x80;
put_byte(pb, val);
put_byte(pb, stream->seq);
if (enc->codec_type == CODEC_TYPE_VIDEO) {
tag[2] = 'd';
tag[3] = 'c';
- flags = enc->coded_picture->key_frame ? 0x10 : 0x00;
+ flags = enc->coded_frame->key_frame ? 0x10 : 0x00;
} else {
tag[2] = 'w';
tag[3] = 'b';
/* packet size & key_frame */
header[0] = stream_index;
header[1] = 0;
- if (st->codec.coded_picture && st->codec.coded_picture->key_frame)
+ if (st->codec.coded_frame->key_frame) //if st->codec.coded_frame==NULL then there is a bug somewhere else
header[1] |= FLAG_KEY_FRAME;
header[2] = (size >> 16) & 0xff;
header[3] = (size >> 8) & 0xff;
/* XXX: suppress this malloc */
buf1= (UINT8*) av_malloc( size * sizeof(UINT8) );
- write_packet_header(s, stream, size, stream->enc->key_frame);
+ write_packet_header(s, stream, size, stream->enc->coded_frame->key_frame);
/* for AC3, the words seems to be reversed */
for(i=0;i<size;i+=2) {
RMContext *rm = s->priv_data;
ByteIOContext *pb = &s->pb;
StreamInfo *stream = rm->video_stream;
- int key_frame = stream->enc->coded_picture->key_frame;
+ int key_frame = stream->enc->coded_frame->key_frame;
/* XXX: this is incorrect: should be a parameter */
AVCodec *codec;
AVStream *st;
AVPacket *pkt;
- AVVideoFrame picture;
+ AVFrame picture;
AVPacketList *pktl=NULL, **ppktl;
short samples[AVCODEC_MAX_AUDIO_FRAME_SIZE / 2];
UINT8 *ptr;