Tizen 2.1 base
[sdk/emulator/qemu.git] / tizen / distrib / ffmpeg / ffplay.c
1 /*
2  * FFplay : Simple Media Player based on the FFmpeg libraries
3  * Copyright (c) 2003 Fabrice Bellard
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21
22 #include "config.h"
23 #include <inttypes.h>
24 #include <math.h>
25 #include <limits.h>
26 #include "libavutil/avstring.h"
27 #include "libavutil/pixdesc.h"
28 #include "libavformat/avformat.h"
29 #include "libavdevice/avdevice.h"
30 #include "libswscale/swscale.h"
31 #include "libavcodec/audioconvert.h"
32 #include "libavcodec/colorspace.h"
33 #include "libavcodec/opt.h"
34 #include "libavcodec/avfft.h"
35
36 #if CONFIG_AVFILTER
37 # include "libavfilter/avfilter.h"
38 # include "libavfilter/avfiltergraph.h"
39 # include "libavfilter/graphparser.h"
40 #endif
41
42 #include "cmdutils.h"
43
44 #include <SDL.h>
45 #include <SDL_thread.h>
46
47 #ifdef __MINGW32__
48 #undef main /* We don't want SDL to override our main() */
49 #endif
50
51 #include <unistd.h>
52 #include <assert.h>
53
54 const char program_name[] = "FFplay";
55 const int program_birth_year = 2003;
56
57 //#define DEBUG_SYNC
58
59 #define MAX_QUEUE_SIZE (15 * 1024 * 1024)
60 #define MIN_AUDIOQ_SIZE (20 * 16 * 1024)
61 #define MIN_FRAMES 5
62
63 /* SDL audio buffer size, in samples. Should be small to have precise
64    A/V sync as SDL does not have hardware buffer fullness info. */
65 #define SDL_AUDIO_BUFFER_SIZE 1024
66
67 /* no AV sync correction is done if below the AV sync threshold */
68 #define AV_SYNC_THRESHOLD 0.01
69 /* no AV correction is done if too big error */
70 #define AV_NOSYNC_THRESHOLD 10.0
71
72 #define FRAME_SKIP_FACTOR 0.05
73
74 /* maximum audio speed change to get correct sync */
75 #define SAMPLE_CORRECTION_PERCENT_MAX 10
76
77 /* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
78 #define AUDIO_DIFF_AVG_NB   20
79
80 /* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
81 #define SAMPLE_ARRAY_SIZE (2*65536)
82
83 #if !CONFIG_AVFILTER
84 static int sws_flags = SWS_BICUBIC;
85 #endif
86
87 typedef struct PacketQueue {
88     AVPacketList *first_pkt, *last_pkt;
89     int nb_packets;
90     int size;
91     int abort_request;
92     SDL_mutex *mutex;
93     SDL_cond *cond;
94 } PacketQueue;
95
96 #define VIDEO_PICTURE_QUEUE_SIZE 2
97 #define SUBPICTURE_QUEUE_SIZE 4
98
99 typedef struct VideoPicture {
100     double pts;                                  ///<presentation time stamp for this picture
101     double target_clock;                         ///<av_gettime() time at which this should be displayed ideally
102     int64_t pos;                                 ///<byte position in file
103     SDL_Overlay *bmp;
104     int width, height; /* source height & width */
105     int allocated;
106     enum PixelFormat pix_fmt;
107
108 #if CONFIG_AVFILTER
109     AVFilterPicRef *picref;
110 #endif
111 } VideoPicture;
112
113 typedef struct SubPicture {
114     double pts; /* presentation time stamp for this picture */
115     AVSubtitle sub;
116 } SubPicture;
117
118 enum {
119     AV_SYNC_AUDIO_MASTER, /* default choice */
120     AV_SYNC_VIDEO_MASTER,
121     AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
122 };
123
124 typedef struct VideoState {
125     SDL_Thread *parse_tid;
126     SDL_Thread *video_tid;
127     SDL_Thread *refresh_tid;
128     AVInputFormat *iformat;
129     int no_background;
130     int abort_request;
131     int paused;
132     int last_paused;
133     int seek_req;
134     int seek_flags;
135     int64_t seek_pos;
136     int64_t seek_rel;
137     int read_pause_return;
138     AVFormatContext *ic;
139     int dtg_active_format;
140
141     int audio_stream;
142
143     int av_sync_type;
144     double external_clock; /* external clock base */
145     int64_t external_clock_time;
146
147     double audio_clock;
148     double audio_diff_cum; /* used for AV difference average computation */
149     double audio_diff_avg_coef;
150     double audio_diff_threshold;
151     int audio_diff_avg_count;
152     AVStream *audio_st;
153     PacketQueue audioq;
154     int audio_hw_buf_size;
155     /* samples output by the codec. we reserve more space for avsync
156        compensation */
157     DECLARE_ALIGNED(16,uint8_t,audio_buf1)[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2];
158     DECLARE_ALIGNED(16,uint8_t,audio_buf2)[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2];
159     uint8_t *audio_buf;
160     unsigned int audio_buf_size; /* in bytes */
161     int audio_buf_index; /* in bytes */
162     AVPacket audio_pkt_temp;
163     AVPacket audio_pkt;
164     enum SampleFormat audio_src_fmt;
165     AVAudioConvert *reformat_ctx;
166
167     int show_audio; /* if true, display audio samples */
168     int16_t sample_array[SAMPLE_ARRAY_SIZE];
169     int sample_array_index;
170     int last_i_start;
171     RDFTContext *rdft;
172     int rdft_bits;
173     int xpos;
174
175     SDL_Thread *subtitle_tid;
176     int subtitle_stream;
177     int subtitle_stream_changed;
178     AVStream *subtitle_st;
179     PacketQueue subtitleq;
180     SubPicture subpq[SUBPICTURE_QUEUE_SIZE];
181     int subpq_size, subpq_rindex, subpq_windex;
182     SDL_mutex *subpq_mutex;
183     SDL_cond *subpq_cond;
184
185     double frame_timer;
186     double frame_last_pts;
187     double frame_last_delay;
188     double video_clock;                          ///<pts of last decoded frame / predicted pts of next decoded frame
189     int video_stream;
190     AVStream *video_st;
191     PacketQueue videoq;
192     double video_current_pts;                    ///<current displayed pts (different from video_clock if frame fifos are used)
193     double video_current_pts_drift;              ///<video_current_pts - time (av_gettime) at which we updated video_current_pts - used to have running video pts
194     int64_t video_current_pos;                   ///<current displayed file pos
195     VideoPicture pictq[VIDEO_PICTURE_QUEUE_SIZE];
196     int pictq_size, pictq_rindex, pictq_windex;
197     SDL_mutex *pictq_mutex;
198     SDL_cond *pictq_cond;
199 #if !CONFIG_AVFILTER
200     struct SwsContext *img_convert_ctx;
201 #endif
202
203     //    QETimer *video_timer;
204     char filename[1024];
205     int width, height, xleft, ytop;
206
207     int64_t faulty_pts;
208     int64_t faulty_dts;
209     int64_t last_dts_for_fault_detection;
210     int64_t last_pts_for_fault_detection;
211
212 #if CONFIG_AVFILTER
213     AVFilterContext *out_video_filter;          ///<the last filter in the video chain
214 #endif
215
216     float skip_frames;
217     float skip_frames_index;
218     int refresh;
219 } VideoState;
220
221 static void show_help(void);
222 static int audio_write_get_buf_size(VideoState *is);
223
224 /* options specified by the user */
225 static AVInputFormat *file_iformat;
226 static const char *input_filename;
227 static const char *window_title;
228 static int fs_screen_width;
229 static int fs_screen_height;
230 static int screen_width = 0;
231 static int screen_height = 0;
232 static int frame_width = 0;
233 static int frame_height = 0;
234 static enum PixelFormat frame_pix_fmt = PIX_FMT_NONE;
235 static int audio_disable;
236 static int video_disable;
237 static int wanted_stream[AVMEDIA_TYPE_NB]={
238     [AVMEDIA_TYPE_AUDIO]=-1,
239     [AVMEDIA_TYPE_VIDEO]=-1,
240     [AVMEDIA_TYPE_SUBTITLE]=-1,
241 };
242 static int seek_by_bytes=-1;
243 static int display_disable;
244 static int show_status = 1;
245 static int av_sync_type = AV_SYNC_AUDIO_MASTER;
246 static int64_t start_time = AV_NOPTS_VALUE;
247 static int64_t duration = AV_NOPTS_VALUE;
248 static int debug = 0;
249 static int debug_mv = 0;
250 static int step = 0;
251 static int thread_count = 1;
252 static int workaround_bugs = 1;
253 static int fast = 0;
254 static int genpts = 0;
255 static int lowres = 0;
256 static int idct = FF_IDCT_AUTO;
257 static enum AVDiscard skip_frame= AVDISCARD_DEFAULT;
258 static enum AVDiscard skip_idct= AVDISCARD_DEFAULT;
259 static enum AVDiscard skip_loop_filter= AVDISCARD_DEFAULT;
260 static int error_recognition = FF_ER_CAREFUL;
261 static int error_concealment = 3;
262 static int decoder_reorder_pts= -1;
263 static int autoexit;
264 static int loop=1;
265 static int framedrop=1;
266
267 static int rdftspeed=20;
268 #if CONFIG_AVFILTER
269 static char *vfilters = NULL;
270 #endif
271
272 /* current context */
273 static int is_full_screen;
274 static VideoState *cur_stream;
275 static int64_t audio_callback_time;
276
277 static AVPacket flush_pkt;
278
279 #define FF_ALLOC_EVENT   (SDL_USEREVENT)
280 #define FF_REFRESH_EVENT (SDL_USEREVENT + 1)
281 #define FF_QUIT_EVENT    (SDL_USEREVENT + 2)
282
283 static SDL_Surface *screen;
284
285 static int packet_queue_put(PacketQueue *q, AVPacket *pkt);
286
287 /* packet queue handling */
288 static void packet_queue_init(PacketQueue *q)
289 {
290     memset(q, 0, sizeof(PacketQueue));
291     q->mutex = SDL_CreateMutex();
292     q->cond = SDL_CreateCond();
293     packet_queue_put(q, &flush_pkt);
294 }
295
296 static void packet_queue_flush(PacketQueue *q)
297 {
298     AVPacketList *pkt, *pkt1;
299
300     SDL_LockMutex(q->mutex);
301     for(pkt = q->first_pkt; pkt != NULL; pkt = pkt1) {
302         pkt1 = pkt->next;
303         av_free_packet(&pkt->pkt);
304         av_freep(&pkt);
305     }
306     q->last_pkt = NULL;
307     q->first_pkt = NULL;
308     q->nb_packets = 0;
309     q->size = 0;
310     SDL_UnlockMutex(q->mutex);
311 }
312
313 static void packet_queue_end(PacketQueue *q)
314 {
315     packet_queue_flush(q);
316     SDL_DestroyMutex(q->mutex);
317     SDL_DestroyCond(q->cond);
318 }
319
320 static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
321 {
322     AVPacketList *pkt1;
323
324     /* duplicate the packet */
325     if (pkt!=&flush_pkt && av_dup_packet(pkt) < 0)
326         return -1;
327
328     pkt1 = av_malloc(sizeof(AVPacketList));
329     if (!pkt1)
330         return -1;
331     pkt1->pkt = *pkt;
332     pkt1->next = NULL;
333
334
335     SDL_LockMutex(q->mutex);
336
337     if (!q->last_pkt)
338
339         q->first_pkt = pkt1;
340     else
341         q->last_pkt->next = pkt1;
342     q->last_pkt = pkt1;
343     q->nb_packets++;
344     q->size += pkt1->pkt.size + sizeof(*pkt1);
345     /* XXX: should duplicate packet data in DV case */
346     SDL_CondSignal(q->cond);
347
348     SDL_UnlockMutex(q->mutex);
349     return 0;
350 }
351
352 static void packet_queue_abort(PacketQueue *q)
353 {
354     SDL_LockMutex(q->mutex);
355
356     q->abort_request = 1;
357
358     SDL_CondSignal(q->cond);
359
360     SDL_UnlockMutex(q->mutex);
361 }
362
363 /* return < 0 if aborted, 0 if no packet and > 0 if packet.  */
364 static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block)
365 {
366     AVPacketList *pkt1;
367     int ret;
368
369     SDL_LockMutex(q->mutex);
370
371     for(;;) {
372         if (q->abort_request) {
373             ret = -1;
374             break;
375         }
376
377         pkt1 = q->first_pkt;
378         if (pkt1) {
379             q->first_pkt = pkt1->next;
380             if (!q->first_pkt)
381                 q->last_pkt = NULL;
382             q->nb_packets--;
383             q->size -= pkt1->pkt.size + sizeof(*pkt1);
384             *pkt = pkt1->pkt;
385             av_free(pkt1);
386             ret = 1;
387             break;
388         } else if (!block) {
389             ret = 0;
390             break;
391         } else {
392             SDL_CondWait(q->cond, q->mutex);
393         }
394     }
395     SDL_UnlockMutex(q->mutex);
396     return ret;
397 }
398
399 static inline void fill_rectangle(SDL_Surface *screen,
400                                   int x, int y, int w, int h, int color)
401 {
402     SDL_Rect rect;
403     rect.x = x;
404     rect.y = y;
405     rect.w = w;
406     rect.h = h;
407     SDL_FillRect(screen, &rect, color);
408 }
409
410 #if 0
411 /* draw only the border of a rectangle */
412 void fill_border(VideoState *s, int x, int y, int w, int h, int color)
413 {
414     int w1, w2, h1, h2;
415
416     /* fill the background */
417     w1 = x;
418     if (w1 < 0)
419         w1 = 0;
420     w2 = s->width - (x + w);
421     if (w2 < 0)
422         w2 = 0;
423     h1 = y;
424     if (h1 < 0)
425         h1 = 0;
426     h2 = s->height - (y + h);
427     if (h2 < 0)
428         h2 = 0;
429     fill_rectangle(screen,
430                    s->xleft, s->ytop,
431                    w1, s->height,
432                    color);
433     fill_rectangle(screen,
434                    s->xleft + s->width - w2, s->ytop,
435                    w2, s->height,
436                    color);
437     fill_rectangle(screen,
438                    s->xleft + w1, s->ytop,
439                    s->width - w1 - w2, h1,
440                    color);
441     fill_rectangle(screen,
442                    s->xleft + w1, s->ytop + s->height - h2,
443                    s->width - w1 - w2, h2,
444                    color);
445 }
446 #endif
447
448 #define ALPHA_BLEND(a, oldp, newp, s)\
449 ((((oldp << s) * (255 - (a))) + (newp * (a))) / (255 << s))
450
451 #define RGBA_IN(r, g, b, a, s)\
452 {\
453     unsigned int v = ((const uint32_t *)(s))[0];\
454     a = (v >> 24) & 0xff;\
455     r = (v >> 16) & 0xff;\
456     g = (v >> 8) & 0xff;\
457     b = v & 0xff;\
458 }
459
460 #define YUVA_IN(y, u, v, a, s, pal)\
461 {\
462     unsigned int val = ((const uint32_t *)(pal))[*(const uint8_t*)(s)];\
463     a = (val >> 24) & 0xff;\
464     y = (val >> 16) & 0xff;\
465     u = (val >> 8) & 0xff;\
466     v = val & 0xff;\
467 }
468
469 #define YUVA_OUT(d, y, u, v, a)\
470 {\
471     ((uint32_t *)(d))[0] = (a << 24) | (y << 16) | (u << 8) | v;\
472 }
473
474
475 #define BPP 1
476
477 static void blend_subrect(AVPicture *dst, const AVSubtitleRect *rect, int imgw, int imgh)
478 {
479     int wrap, wrap3, width2, skip2;
480     int y, u, v, a, u1, v1, a1, w, h;
481     uint8_t *lum, *cb, *cr;
482     const uint8_t *p;
483     const uint32_t *pal;
484     int dstx, dsty, dstw, dsth;
485
486     dstw = av_clip(rect->w, 0, imgw);
487     dsth = av_clip(rect->h, 0, imgh);
488     dstx = av_clip(rect->x, 0, imgw - dstw);
489     dsty = av_clip(rect->y, 0, imgh - dsth);
490     lum = dst->data[0] + dsty * dst->linesize[0];
491     cb = dst->data[1] + (dsty >> 1) * dst->linesize[1];
492     cr = dst->data[2] + (dsty >> 1) * dst->linesize[2];
493
494     width2 = ((dstw + 1) >> 1) + (dstx & ~dstw & 1);
495     skip2 = dstx >> 1;
496     wrap = dst->linesize[0];
497     wrap3 = rect->pict.linesize[0];
498     p = rect->pict.data[0];
499     pal = (const uint32_t *)rect->pict.data[1];  /* Now in YCrCb! */
500
501     if (dsty & 1) {
502         lum += dstx;
503         cb += skip2;
504         cr += skip2;
505
506         if (dstx & 1) {
507             YUVA_IN(y, u, v, a, p, pal);
508             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
509             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
510             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
511             cb++;
512             cr++;
513             lum++;
514             p += BPP;
515         }
516         for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
517             YUVA_IN(y, u, v, a, p, pal);
518             u1 = u;
519             v1 = v;
520             a1 = a;
521             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
522
523             YUVA_IN(y, u, v, a, p + BPP, pal);
524             u1 += u;
525             v1 += v;
526             a1 += a;
527             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
528             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
529             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
530             cb++;
531             cr++;
532             p += 2 * BPP;
533             lum += 2;
534         }
535         if (w) {
536             YUVA_IN(y, u, v, a, p, pal);
537             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
538             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
539             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
540             p++;
541             lum++;
542         }
543         p += wrap3 - dstw * BPP;
544         lum += wrap - dstw - dstx;
545         cb += dst->linesize[1] - width2 - skip2;
546         cr += dst->linesize[2] - width2 - skip2;
547     }
548     for(h = dsth - (dsty & 1); h >= 2; h -= 2) {
549         lum += dstx;
550         cb += skip2;
551         cr += skip2;
552
553         if (dstx & 1) {
554             YUVA_IN(y, u, v, a, p, pal);
555             u1 = u;
556             v1 = v;
557             a1 = a;
558             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
559             p += wrap3;
560             lum += wrap;
561             YUVA_IN(y, u, v, a, p, pal);
562             u1 += u;
563             v1 += v;
564             a1 += a;
565             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
566             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
567             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
568             cb++;
569             cr++;
570             p += -wrap3 + BPP;
571             lum += -wrap + 1;
572         }
573         for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
574             YUVA_IN(y, u, v, a, p, pal);
575             u1 = u;
576             v1 = v;
577             a1 = a;
578             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
579
580             YUVA_IN(y, u, v, a, p + BPP, pal);
581             u1 += u;
582             v1 += v;
583             a1 += a;
584             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
585             p += wrap3;
586             lum += wrap;
587
588             YUVA_IN(y, u, v, a, p, pal);
589             u1 += u;
590             v1 += v;
591             a1 += a;
592             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
593
594             YUVA_IN(y, u, v, a, p + BPP, pal);
595             u1 += u;
596             v1 += v;
597             a1 += a;
598             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
599
600             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 2);
601             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 2);
602
603             cb++;
604             cr++;
605             p += -wrap3 + 2 * BPP;
606             lum += -wrap + 2;
607         }
608         if (w) {
609             YUVA_IN(y, u, v, a, p, pal);
610             u1 = u;
611             v1 = v;
612             a1 = a;
613             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
614             p += wrap3;
615             lum += wrap;
616             YUVA_IN(y, u, v, a, p, pal);
617             u1 += u;
618             v1 += v;
619             a1 += a;
620             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
621             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
622             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
623             cb++;
624             cr++;
625             p += -wrap3 + BPP;
626             lum += -wrap + 1;
627         }
628         p += wrap3 + (wrap3 - dstw * BPP);
629         lum += wrap + (wrap - dstw - dstx);
630         cb += dst->linesize[1] - width2 - skip2;
631         cr += dst->linesize[2] - width2 - skip2;
632     }
633     /* handle odd height */
634     if (h) {
635         lum += dstx;
636         cb += skip2;
637         cr += skip2;
638
639         if (dstx & 1) {
640             YUVA_IN(y, u, v, a, p, pal);
641             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
642             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
643             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
644             cb++;
645             cr++;
646             lum++;
647             p += BPP;
648         }
649         for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
650             YUVA_IN(y, u, v, a, p, pal);
651             u1 = u;
652             v1 = v;
653             a1 = a;
654             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
655
656             YUVA_IN(y, u, v, a, p + BPP, pal);
657             u1 += u;
658             v1 += v;
659             a1 += a;
660             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
661             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u, 1);
662             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v, 1);
663             cb++;
664             cr++;
665             p += 2 * BPP;
666             lum += 2;
667         }
668         if (w) {
669             YUVA_IN(y, u, v, a, p, pal);
670             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
671             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
672             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
673         }
674     }
675 }
676
677 static void free_subpicture(SubPicture *sp)
678 {
679     int i;
680
681     for (i = 0; i < sp->sub.num_rects; i++)
682     {
683         av_freep(&sp->sub.rects[i]->pict.data[0]);
684         av_freep(&sp->sub.rects[i]->pict.data[1]);
685         av_freep(&sp->sub.rects[i]);
686     }
687
688     av_free(sp->sub.rects);
689
690     memset(&sp->sub, 0, sizeof(AVSubtitle));
691 }
692
693 static void video_image_display(VideoState *is)
694 {
695     VideoPicture *vp;
696     SubPicture *sp;
697     AVPicture pict;
698     float aspect_ratio;
699     int width, height, x, y;
700     SDL_Rect rect;
701     int i;
702
703     vp = &is->pictq[is->pictq_rindex];
704     if (vp->bmp) {
705 #if CONFIG_AVFILTER
706          if (vp->picref->pixel_aspect.num == 0)
707              aspect_ratio = 0;
708          else
709              aspect_ratio = av_q2d(vp->picref->pixel_aspect);
710 #else
711
712         /* XXX: use variable in the frame */
713         if (is->video_st->sample_aspect_ratio.num)
714             aspect_ratio = av_q2d(is->video_st->sample_aspect_ratio);
715         else if (is->video_st->codec->sample_aspect_ratio.num)
716             aspect_ratio = av_q2d(is->video_st->codec->sample_aspect_ratio);
717         else
718             aspect_ratio = 0;
719 #endif
720         if (aspect_ratio <= 0.0)
721             aspect_ratio = 1.0;
722         aspect_ratio *= (float)vp->width / (float)vp->height;
723         /* if an active format is indicated, then it overrides the
724            mpeg format */
725 #if 0
726         if (is->video_st->codec->dtg_active_format != is->dtg_active_format) {
727             is->dtg_active_format = is->video_st->codec->dtg_active_format;
728             printf("dtg_active_format=%d\n", is->dtg_active_format);
729         }
730 #endif
731 #if 0
732         switch(is->video_st->codec->dtg_active_format) {
733         case FF_DTG_AFD_SAME:
734         default:
735             /* nothing to do */
736             break;
737         case FF_DTG_AFD_4_3:
738             aspect_ratio = 4.0 / 3.0;
739             break;
740         case FF_DTG_AFD_16_9:
741             aspect_ratio = 16.0 / 9.0;
742             break;
743         case FF_DTG_AFD_14_9:
744             aspect_ratio = 14.0 / 9.0;
745             break;
746         case FF_DTG_AFD_4_3_SP_14_9:
747             aspect_ratio = 14.0 / 9.0;
748             break;
749         case FF_DTG_AFD_16_9_SP_14_9:
750             aspect_ratio = 14.0 / 9.0;
751             break;
752         case FF_DTG_AFD_SP_4_3:
753             aspect_ratio = 4.0 / 3.0;
754             break;
755         }
756 #endif
757
758         if (is->subtitle_st)
759         {
760             if (is->subpq_size > 0)
761             {
762                 sp = &is->subpq[is->subpq_rindex];
763
764                 if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000))
765                 {
766                     SDL_LockYUVOverlay (vp->bmp);
767
768                     pict.data[0] = vp->bmp->pixels[0];
769                     pict.data[1] = vp->bmp->pixels[2];
770                     pict.data[2] = vp->bmp->pixels[1];
771
772                     pict.linesize[0] = vp->bmp->pitches[0];
773                     pict.linesize[1] = vp->bmp->pitches[2];
774                     pict.linesize[2] = vp->bmp->pitches[1];
775
776                     for (i = 0; i < sp->sub.num_rects; i++)
777                         blend_subrect(&pict, sp->sub.rects[i],
778                                       vp->bmp->w, vp->bmp->h);
779
780                     SDL_UnlockYUVOverlay (vp->bmp);
781                 }
782             }
783         }
784
785
786         /* XXX: we suppose the screen has a 1.0 pixel ratio */
787         height = is->height;
788         width = ((int)rint(height * aspect_ratio)) & ~1;
789         if (width > is->width) {
790             width = is->width;
791             height = ((int)rint(width / aspect_ratio)) & ~1;
792         }
793         x = (is->width - width) / 2;
794         y = (is->height - height) / 2;
795         if (!is->no_background) {
796             /* fill the background */
797             //            fill_border(is, x, y, width, height, QERGB(0x00, 0x00, 0x00));
798         } else {
799             is->no_background = 0;
800         }
801         rect.x = is->xleft + x;
802         rect.y = is->ytop  + y;
803         rect.w = width;
804         rect.h = height;
805         SDL_DisplayYUVOverlay(vp->bmp, &rect);
806     } else {
807 #if 0
808         fill_rectangle(screen,
809                        is->xleft, is->ytop, is->width, is->height,
810                        QERGB(0x00, 0x00, 0x00));
811 #endif
812     }
813 }
814
815 static inline int compute_mod(int a, int b)
816 {
817     a = a % b;
818     if (a >= 0)
819         return a;
820     else
821         return a + b;
822 }
823
824 static void video_audio_display(VideoState *s)
825 {
826     int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
827     int ch, channels, h, h2, bgcolor, fgcolor;
828     int16_t time_diff;
829     int rdft_bits, nb_freq;
830
831     for(rdft_bits=1; (1<<rdft_bits)<2*s->height; rdft_bits++)
832         ;
833     nb_freq= 1<<(rdft_bits-1);
834
835     /* compute display index : center on currently output samples */
836     channels = s->audio_st->codec->channels;
837     nb_display_channels = channels;
838     if (!s->paused) {
839         int data_used= s->show_audio==1 ? s->width : (2*nb_freq);
840         n = 2 * channels;
841         delay = audio_write_get_buf_size(s);
842         delay /= n;
843
844         /* to be more precise, we take into account the time spent since
845            the last buffer computation */
846         if (audio_callback_time) {
847             time_diff = av_gettime() - audio_callback_time;
848             delay -= (time_diff * s->audio_st->codec->sample_rate) / 1000000;
849         }
850
851         delay += 2*data_used;
852         if (delay < data_used)
853             delay = data_used;
854
855         i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
856         if(s->show_audio==1){
857             h= INT_MIN;
858             for(i=0; i<1000; i+=channels){
859                 int idx= (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
860                 int a= s->sample_array[idx];
861                 int b= s->sample_array[(idx + 4*channels)%SAMPLE_ARRAY_SIZE];
862                 int c= s->sample_array[(idx + 5*channels)%SAMPLE_ARRAY_SIZE];
863                 int d= s->sample_array[(idx + 9*channels)%SAMPLE_ARRAY_SIZE];
864                 int score= a-d;
865                 if(h<score && (b^c)<0){
866                     h= score;
867                     i_start= idx;
868                 }
869             }
870         }
871
872         s->last_i_start = i_start;
873     } else {
874         i_start = s->last_i_start;
875     }
876
877     bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
878     if(s->show_audio==1){
879         fill_rectangle(screen,
880                        s->xleft, s->ytop, s->width, s->height,
881                        bgcolor);
882
883         fgcolor = SDL_MapRGB(screen->format, 0xff, 0xff, 0xff);
884
885         /* total height for one channel */
886         h = s->height / nb_display_channels;
887         /* graph height / 2 */
888         h2 = (h * 9) / 20;
889         for(ch = 0;ch < nb_display_channels; ch++) {
890             i = i_start + ch;
891             y1 = s->ytop + ch * h + (h / 2); /* position of center line */
892             for(x = 0; x < s->width; x++) {
893                 y = (s->sample_array[i] * h2) >> 15;
894                 if (y < 0) {
895                     y = -y;
896                     ys = y1 - y;
897                 } else {
898                     ys = y1;
899                 }
900                 fill_rectangle(screen,
901                                s->xleft + x, ys, 1, y,
902                                fgcolor);
903                 i += channels;
904                 if (i >= SAMPLE_ARRAY_SIZE)
905                     i -= SAMPLE_ARRAY_SIZE;
906             }
907         }
908
909         fgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0xff);
910
911         for(ch = 1;ch < nb_display_channels; ch++) {
912             y = s->ytop + ch * h;
913             fill_rectangle(screen,
914                            s->xleft, y, s->width, 1,
915                            fgcolor);
916         }
917         SDL_UpdateRect(screen, s->xleft, s->ytop, s->width, s->height);
918     }else{
919         nb_display_channels= FFMIN(nb_display_channels, 2);
920         if(rdft_bits != s->rdft_bits){
921             av_rdft_end(s->rdft);
922             s->rdft = av_rdft_init(rdft_bits, DFT_R2C);
923             s->rdft_bits= rdft_bits;
924         }
925         {
926             FFTSample data[2][2*nb_freq];
927             for(ch = 0;ch < nb_display_channels; ch++) {
928                 i = i_start + ch;
929                 for(x = 0; x < 2*nb_freq; x++) {
930                     double w= (x-nb_freq)*(1.0/nb_freq);
931                     data[ch][x]= s->sample_array[i]*(1.0-w*w);
932                     i += channels;
933                     if (i >= SAMPLE_ARRAY_SIZE)
934                         i -= SAMPLE_ARRAY_SIZE;
935                 }
936                 av_rdft_calc(s->rdft, data[ch]);
937             }
938             //least efficient way to do this, we should of course directly access it but its more than fast enough
939             for(y=0; y<s->height; y++){
940                 double w= 1/sqrt(nb_freq);
941                 int a= sqrt(w*sqrt(data[0][2*y+0]*data[0][2*y+0] + data[0][2*y+1]*data[0][2*y+1]));
942                 int b= sqrt(w*sqrt(data[1][2*y+0]*data[1][2*y+0] + data[1][2*y+1]*data[1][2*y+1]));
943                 a= FFMIN(a,255);
944                 b= FFMIN(b,255);
945                 fgcolor = SDL_MapRGB(screen->format, a, b, (a+b)/2);
946
947                 fill_rectangle(screen,
948                             s->xpos, s->height-y, 1, 1,
949                             fgcolor);
950             }
951         }
952         SDL_UpdateRect(screen, s->xpos, s->ytop, 1, s->height);
953         s->xpos++;
954         if(s->xpos >= s->width)
955             s->xpos= s->xleft;
956     }
957 }
958
959 static int video_open(VideoState *is){
960     int flags = SDL_HWSURFACE|SDL_ASYNCBLIT|SDL_HWACCEL;
961     int w,h;
962
963     if(is_full_screen) flags |= SDL_FULLSCREEN;
964     else               flags |= SDL_RESIZABLE;
965
966     if (is_full_screen && fs_screen_width) {
967         w = fs_screen_width;
968         h = fs_screen_height;
969     } else if(!is_full_screen && screen_width){
970         w = screen_width;
971         h = screen_height;
972 #if CONFIG_AVFILTER
973     }else if (is->out_video_filter && is->out_video_filter->inputs[0]){
974         w = is->out_video_filter->inputs[0]->w;
975         h = is->out_video_filter->inputs[0]->h;
976 #else
977     }else if (is->video_st && is->video_st->codec->width){
978         w = is->video_st->codec->width;
979         h = is->video_st->codec->height;
980 #endif
981     } else {
982         w = 640;
983         h = 480;
984     }
985     if(screen && is->width == screen->w && screen->w == w
986        && is->height== screen->h && screen->h == h)
987         return 0;
988
989 #ifndef __APPLE__
990     screen = SDL_SetVideoMode(w, h, 0, flags);
991 #else
992     /* setting bits_per_pixel = 0 or 32 causes blank video on OS X */
993     screen = SDL_SetVideoMode(w, h, 24, flags);
994 #endif
995     if (!screen) {
996         fprintf(stderr, "SDL: could not set video mode - exiting\n");
997         return -1;
998     }
999     if (!window_title)
1000         window_title = input_filename;
1001     SDL_WM_SetCaption(window_title, window_title);
1002
1003     is->width = screen->w;
1004     is->height = screen->h;
1005
1006     return 0;
1007 }
1008
1009 /* display the current picture, if any */
1010 static void video_display(VideoState *is)
1011 {
1012     if(!screen)
1013         video_open(cur_stream);
1014     if (is->audio_st && is->show_audio)
1015         video_audio_display(is);
1016     else if (is->video_st)
1017         video_image_display(is);
1018 }
1019
1020 static int refresh_thread(void *opaque)
1021 {
1022     VideoState *is= opaque;
1023     while(!is->abort_request){
1024     SDL_Event event;
1025     event.type = FF_REFRESH_EVENT;
1026     event.user.data1 = opaque;
1027         if(!is->refresh){
1028             is->refresh=1;
1029     SDL_PushEvent(&event);
1030         }
1031         usleep(is->audio_st && is->show_audio ? rdftspeed*1000 : 5000); //FIXME ideally we should wait the correct time but SDLs event passing is so slow it would be silly
1032     }
1033     return 0;
1034 }
1035
1036 /* get the current audio clock value */
1037 static double get_audio_clock(VideoState *is)
1038 {
1039     double pts;
1040     int hw_buf_size, bytes_per_sec;
1041     pts = is->audio_clock;
1042     hw_buf_size = audio_write_get_buf_size(is);
1043     bytes_per_sec = 0;
1044     if (is->audio_st) {
1045         bytes_per_sec = is->audio_st->codec->sample_rate *
1046             2 * is->audio_st->codec->channels;
1047     }
1048     if (bytes_per_sec)
1049         pts -= (double)hw_buf_size / bytes_per_sec;
1050     return pts;
1051 }
1052
1053 /* get the current video clock value */
1054 static double get_video_clock(VideoState *is)
1055 {
1056     if (is->paused) {
1057         return is->video_current_pts;
1058     } else {
1059         return is->video_current_pts_drift + av_gettime() / 1000000.0;
1060     }
1061 }
1062
1063 /* get the current external clock value */
1064 static double get_external_clock(VideoState *is)
1065 {
1066     int64_t ti;
1067     ti = av_gettime();
1068     return is->external_clock + ((ti - is->external_clock_time) * 1e-6);
1069 }
1070
1071 /* get the current master clock value */
1072 static double get_master_clock(VideoState *is)
1073 {
1074     double val;
1075
1076     if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
1077         if (is->video_st)
1078             val = get_video_clock(is);
1079         else
1080             val = get_audio_clock(is);
1081     } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
1082         if (is->audio_st)
1083             val = get_audio_clock(is);
1084         else
1085             val = get_video_clock(is);
1086     } else {
1087         val = get_external_clock(is);
1088     }
1089     return val;
1090 }
1091
1092 /* seek in the stream */
1093 static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int seek_by_bytes)
1094 {
1095     if (!is->seek_req) {
1096         is->seek_pos = pos;
1097         is->seek_rel = rel;
1098         is->seek_flags &= ~AVSEEK_FLAG_BYTE;
1099         if (seek_by_bytes)
1100             is->seek_flags |= AVSEEK_FLAG_BYTE;
1101         is->seek_req = 1;
1102     }
1103 }
1104
1105 /* pause or resume the video */
1106 static void stream_pause(VideoState *is)
1107 {
1108     if (is->paused) {
1109         is->frame_timer += av_gettime() / 1000000.0 + is->video_current_pts_drift - is->video_current_pts;
1110         if(is->read_pause_return != AVERROR(ENOSYS)){
1111             is->video_current_pts = is->video_current_pts_drift + av_gettime() / 1000000.0;
1112         }
1113         is->video_current_pts_drift = is->video_current_pts - av_gettime() / 1000000.0;
1114     }
1115     is->paused = !is->paused;
1116 }
1117
1118 static double compute_target_time(double frame_current_pts, VideoState *is)
1119 {
1120     double delay, sync_threshold, diff;
1121
1122     /* compute nominal delay */
1123     delay = frame_current_pts - is->frame_last_pts;
1124     if (delay <= 0 || delay >= 10.0) {
1125         /* if incorrect delay, use previous one */
1126         delay = is->frame_last_delay;
1127     } else {
1128         is->frame_last_delay = delay;
1129     }
1130     is->frame_last_pts = frame_current_pts;
1131
1132     /* update delay to follow master synchronisation source */
1133     if (((is->av_sync_type == AV_SYNC_AUDIO_MASTER && is->audio_st) ||
1134          is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1135         /* if video is slave, we try to correct big delays by
1136            duplicating or deleting a frame */
1137         diff = get_video_clock(is) - get_master_clock(is);
1138
1139         /* skip or repeat frame. We take into account the
1140            delay to compute the threshold. I still don't know
1141            if it is the best guess */
1142         sync_threshold = FFMAX(AV_SYNC_THRESHOLD, delay);
1143         if (fabs(diff) < AV_NOSYNC_THRESHOLD) {
1144             if (diff <= -sync_threshold)
1145                 delay = 0;
1146             else if (diff >= sync_threshold)
1147                 delay = 2 * delay;
1148         }
1149     }
1150     is->frame_timer += delay;
1151 #if defined(DEBUG_SYNC)
1152     printf("video: delay=%0.3f actual_delay=%0.3f pts=%0.3f A-V=%f\n",
1153             delay, actual_delay, frame_current_pts, -diff);
1154 #endif
1155
1156     return is->frame_timer;
1157 }
1158
1159 /* called to display each frame */
1160 static void video_refresh_timer(void *opaque)
1161 {
1162     VideoState *is = opaque;
1163     VideoPicture *vp;
1164
1165     SubPicture *sp, *sp2;
1166
1167     if (is->video_st) {
1168 retry:
1169         if (is->pictq_size == 0) {
1170             //nothing to do, no picture to display in the que
1171         } else {
1172             double time= av_gettime()/1000000.0;
1173             double next_target;
1174             /* dequeue the picture */
1175             vp = &is->pictq[is->pictq_rindex];
1176
1177             if(time < vp->target_clock)
1178                 return;
1179             /* update current video pts */
1180             is->video_current_pts = vp->pts;
1181             is->video_current_pts_drift = is->video_current_pts - time;
1182             is->video_current_pos = vp->pos;
1183             if(is->pictq_size > 1){
1184                 VideoPicture *nextvp= &is->pictq[(is->pictq_rindex+1)%VIDEO_PICTURE_QUEUE_SIZE];
1185                 assert(nextvp->target_clock >= vp->target_clock);
1186                 next_target= nextvp->target_clock;
1187             }else{
1188                 next_target= vp->target_clock + is->video_clock - vp->pts; //FIXME pass durations cleanly
1189             }
1190             if(framedrop && time > next_target){
1191                 is->skip_frames *= 1.0 + FRAME_SKIP_FACTOR;
1192                 if(is->pictq_size > 1 || time > next_target + 0.5){
1193                     /* update queue size and signal for next picture */
1194                     if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1195                         is->pictq_rindex = 0;
1196
1197                     SDL_LockMutex(is->pictq_mutex);
1198                     is->pictq_size--;
1199                     SDL_CondSignal(is->pictq_cond);
1200                     SDL_UnlockMutex(is->pictq_mutex);
1201                     goto retry;
1202                 }
1203             }
1204
1205             if(is->subtitle_st) {
1206                 if (is->subtitle_stream_changed) {
1207                     SDL_LockMutex(is->subpq_mutex);
1208
1209                     while (is->subpq_size) {
1210                         free_subpicture(&is->subpq[is->subpq_rindex]);
1211
1212                         /* update queue size and signal for next picture */
1213                         if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1214                             is->subpq_rindex = 0;
1215
1216                         is->subpq_size--;
1217                     }
1218                     is->subtitle_stream_changed = 0;
1219
1220                     SDL_CondSignal(is->subpq_cond);
1221                     SDL_UnlockMutex(is->subpq_mutex);
1222                 } else {
1223                     if (is->subpq_size > 0) {
1224                         sp = &is->subpq[is->subpq_rindex];
1225
1226                         if (is->subpq_size > 1)
1227                             sp2 = &is->subpq[(is->subpq_rindex + 1) % SUBPICTURE_QUEUE_SIZE];
1228                         else
1229                             sp2 = NULL;
1230
1231                         if ((is->video_current_pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
1232                                 || (sp2 && is->video_current_pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
1233                         {
1234                             free_subpicture(sp);
1235
1236                             /* update queue size and signal for next picture */
1237                             if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1238                                 is->subpq_rindex = 0;
1239
1240                             SDL_LockMutex(is->subpq_mutex);
1241                             is->subpq_size--;
1242                             SDL_CondSignal(is->subpq_cond);
1243                             SDL_UnlockMutex(is->subpq_mutex);
1244                         }
1245                     }
1246                 }
1247             }
1248
1249             /* display picture */
1250             video_display(is);
1251
1252             /* update queue size and signal for next picture */
1253             if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1254                 is->pictq_rindex = 0;
1255
1256             SDL_LockMutex(is->pictq_mutex);
1257             is->pictq_size--;
1258             SDL_CondSignal(is->pictq_cond);
1259             SDL_UnlockMutex(is->pictq_mutex);
1260         }
1261     } else if (is->audio_st) {
1262         /* draw the next audio frame */
1263
1264         /* if only audio stream, then display the audio bars (better
1265            than nothing, just to test the implementation */
1266
1267         /* display picture */
1268         video_display(is);
1269     }
1270     if (show_status) {
1271         static int64_t last_time;
1272         int64_t cur_time;
1273         int aqsize, vqsize, sqsize;
1274         double av_diff;
1275
1276         cur_time = av_gettime();
1277         if (!last_time || (cur_time - last_time) >= 30000) {
1278             aqsize = 0;
1279             vqsize = 0;
1280             sqsize = 0;
1281             if (is->audio_st)
1282                 aqsize = is->audioq.size;
1283             if (is->video_st)
1284                 vqsize = is->videoq.size;
1285             if (is->subtitle_st)
1286                 sqsize = is->subtitleq.size;
1287             av_diff = 0;
1288             if (is->audio_st && is->video_st)
1289                 av_diff = get_audio_clock(is) - get_video_clock(is);
1290             printf("%7.2f A-V:%7.3f s:%3.1f aq=%5dKB vq=%5dKB sq=%5dB f=%"PRId64"/%"PRId64"   \r",
1291                    get_master_clock(is), av_diff, FFMAX(is->skip_frames-1, 0), aqsize / 1024, vqsize / 1024, sqsize, is->faulty_dts, is->faulty_pts);
1292             fflush(stdout);
1293             last_time = cur_time;
1294         }
1295     }
1296 }
1297
1298 /* allocate a picture (needs to do that in main thread to avoid
1299    potential locking problems */
1300 static void alloc_picture(void *opaque)
1301 {
1302     VideoState *is = opaque;
1303     VideoPicture *vp;
1304
1305     vp = &is->pictq[is->pictq_windex];
1306
1307     if (vp->bmp)
1308         SDL_FreeYUVOverlay(vp->bmp);
1309
1310 #if CONFIG_AVFILTER
1311     if (vp->picref)
1312         avfilter_unref_pic(vp->picref);
1313     vp->picref = NULL;
1314
1315     vp->width   = is->out_video_filter->inputs[0]->w;
1316     vp->height  = is->out_video_filter->inputs[0]->h;
1317     vp->pix_fmt = is->out_video_filter->inputs[0]->format;
1318 #else
1319     vp->width   = is->video_st->codec->width;
1320     vp->height  = is->video_st->codec->height;
1321     vp->pix_fmt = is->video_st->codec->pix_fmt;
1322 #endif
1323
1324     vp->bmp = SDL_CreateYUVOverlay(vp->width, vp->height,
1325                                    SDL_YV12_OVERLAY,
1326                                    screen);
1327
1328     SDL_LockMutex(is->pictq_mutex);
1329     vp->allocated = 1;
1330     SDL_CondSignal(is->pictq_cond);
1331     SDL_UnlockMutex(is->pictq_mutex);
1332 }
1333
1334 /**
1335  *
1336  * @param pts the dts of the pkt / pts of the frame and guessed if not known
1337  */
1338 static int queue_picture(VideoState *is, AVFrame *src_frame, double pts, int64_t pos)
1339 {
1340     VideoPicture *vp;
1341     int dst_pix_fmt;
1342 #if CONFIG_AVFILTER
1343     AVPicture pict_src;
1344 #endif
1345     /* wait until we have space to put a new picture */
1346     SDL_LockMutex(is->pictq_mutex);
1347
1348     if(is->pictq_size>=VIDEO_PICTURE_QUEUE_SIZE && !is->refresh)
1349         is->skip_frames= FFMAX(1.0 - FRAME_SKIP_FACTOR, is->skip_frames * (1.0-FRAME_SKIP_FACTOR));
1350
1351     while (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE &&
1352            !is->videoq.abort_request) {
1353         SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1354     }
1355     SDL_UnlockMutex(is->pictq_mutex);
1356
1357     if (is->videoq.abort_request)
1358         return -1;
1359
1360     vp = &is->pictq[is->pictq_windex];
1361
1362     /* alloc or resize hardware picture buffer */
1363     if (!vp->bmp ||
1364 #if CONFIG_AVFILTER
1365         vp->width  != is->out_video_filter->inputs[0]->w ||
1366         vp->height != is->out_video_filter->inputs[0]->h) {
1367 #else
1368         vp->width != is->video_st->codec->width ||
1369         vp->height != is->video_st->codec->height) {
1370 #endif
1371         SDL_Event event;
1372
1373         vp->allocated = 0;
1374
1375         /* the allocation must be done in the main thread to avoid
1376            locking problems */
1377         event.type = FF_ALLOC_EVENT;
1378         event.user.data1 = is;
1379         SDL_PushEvent(&event);
1380
1381         /* wait until the picture is allocated */
1382         SDL_LockMutex(is->pictq_mutex);
1383         while (!vp->allocated && !is->videoq.abort_request) {
1384             SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1385         }
1386         SDL_UnlockMutex(is->pictq_mutex);
1387
1388         if (is->videoq.abort_request)
1389             return -1;
1390     }
1391
1392     /* if the frame is not skipped, then display it */
1393     if (vp->bmp) {
1394         AVPicture pict;
1395 #if CONFIG_AVFILTER
1396         if(vp->picref)
1397             avfilter_unref_pic(vp->picref);
1398         vp->picref = src_frame->opaque;
1399 #endif
1400
1401         /* get a pointer on the bitmap */
1402         SDL_LockYUVOverlay (vp->bmp);
1403
1404         dst_pix_fmt = PIX_FMT_YUV420P;
1405         memset(&pict,0,sizeof(AVPicture));
1406         pict.data[0] = vp->bmp->pixels[0];
1407         pict.data[1] = vp->bmp->pixels[2];
1408         pict.data[2] = vp->bmp->pixels[1];
1409
1410         pict.linesize[0] = vp->bmp->pitches[0];
1411         pict.linesize[1] = vp->bmp->pitches[2];
1412         pict.linesize[2] = vp->bmp->pitches[1];
1413
1414 #if CONFIG_AVFILTER
1415         pict_src.data[0] = src_frame->data[0];
1416         pict_src.data[1] = src_frame->data[1];
1417         pict_src.data[2] = src_frame->data[2];
1418
1419         pict_src.linesize[0] = src_frame->linesize[0];
1420         pict_src.linesize[1] = src_frame->linesize[1];
1421         pict_src.linesize[2] = src_frame->linesize[2];
1422
1423         //FIXME use direct rendering
1424         av_picture_copy(&pict, &pict_src,
1425                         vp->pix_fmt, vp->width, vp->height);
1426 #else
1427         sws_flags = av_get_int(sws_opts, "sws_flags", NULL);
1428         is->img_convert_ctx = sws_getCachedContext(is->img_convert_ctx,
1429             vp->width, vp->height, vp->pix_fmt, vp->width, vp->height,
1430             dst_pix_fmt, sws_flags, NULL, NULL, NULL);
1431         if (is->img_convert_ctx == NULL) {
1432             fprintf(stderr, "Cannot initialize the conversion context\n");
1433             exit(1);
1434         }
1435         sws_scale(is->img_convert_ctx, src_frame->data, src_frame->linesize,
1436                   0, vp->height, pict.data, pict.linesize);
1437 #endif
1438         /* update the bitmap content */
1439         SDL_UnlockYUVOverlay(vp->bmp);
1440
1441         vp->pts = pts;
1442         vp->pos = pos;
1443
1444         /* now we can update the picture count */
1445         if (++is->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE)
1446             is->pictq_windex = 0;
1447         SDL_LockMutex(is->pictq_mutex);
1448         vp->target_clock= compute_target_time(vp->pts, is);
1449
1450         is->pictq_size++;
1451         SDL_UnlockMutex(is->pictq_mutex);
1452     }
1453     return 0;
1454 }
1455
1456 /**
1457  * compute the exact PTS for the picture if it is omitted in the stream
1458  * @param pts1 the dts of the pkt / pts of the frame
1459  */
1460 static int output_picture2(VideoState *is, AVFrame *src_frame, double pts1, int64_t pos)
1461 {
1462     double frame_delay, pts;
1463
1464     pts = pts1;
1465
1466     if (pts != 0) {
1467         /* update video clock with pts, if present */
1468         is->video_clock = pts;
1469     } else {
1470         pts = is->video_clock;
1471     }
1472     /* update video clock for next frame */
1473     frame_delay = av_q2d(is->video_st->codec->time_base);
1474     /* for MPEG2, the frame can be repeated, so we update the
1475        clock accordingly */
1476     frame_delay += src_frame->repeat_pict * (frame_delay * 0.5);
1477     is->video_clock += frame_delay;
1478
1479 #if defined(DEBUG_SYNC) && 0
1480     printf("frame_type=%c clock=%0.3f pts=%0.3f\n",
1481            av_get_pict_type_char(src_frame->pict_type), pts, pts1);
1482 #endif
1483     return queue_picture(is, src_frame, pts, pos);
1484 }
1485
1486 static int get_video_frame(VideoState *is, AVFrame *frame, int64_t *pts, AVPacket *pkt)
1487 {
1488     int len1, got_picture, i;
1489
1490         if (packet_queue_get(&is->videoq, pkt, 1) < 0)
1491             return -1;
1492
1493         if(pkt->data == flush_pkt.data){
1494             avcodec_flush_buffers(is->video_st->codec);
1495
1496             SDL_LockMutex(is->pictq_mutex);
1497             //Make sure there are no long delay timers (ideally we should just flush the que but thats harder)
1498             for(i=0; i<VIDEO_PICTURE_QUEUE_SIZE; i++){
1499                 is->pictq[i].target_clock= 0;
1500             }
1501             while (is->pictq_size && !is->videoq.abort_request) {
1502                 SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1503             }
1504             is->video_current_pos= -1;
1505             SDL_UnlockMutex(is->pictq_mutex);
1506
1507             is->last_dts_for_fault_detection=
1508             is->last_pts_for_fault_detection= INT64_MIN;
1509             is->frame_last_pts= AV_NOPTS_VALUE;
1510             is->frame_last_delay = 0;
1511             is->frame_timer = (double)av_gettime() / 1000000.0;
1512             is->skip_frames= 1;
1513             is->skip_frames_index= 0;
1514             return 0;
1515         }
1516
1517         /* NOTE: ipts is the PTS of the _first_ picture beginning in
1518            this packet, if any */
1519         is->video_st->codec->reordered_opaque= pkt->pts;
1520         len1 = avcodec_decode_video2(is->video_st->codec,
1521                                     frame, &got_picture,
1522                                     pkt);
1523
1524         if (got_picture) {
1525             if(pkt->dts != AV_NOPTS_VALUE){
1526                 is->faulty_dts += pkt->dts <= is->last_dts_for_fault_detection;
1527                 is->last_dts_for_fault_detection= pkt->dts;
1528             }
1529             if(frame->reordered_opaque != AV_NOPTS_VALUE){
1530                 is->faulty_pts += frame->reordered_opaque <= is->last_pts_for_fault_detection;
1531                 is->last_pts_for_fault_detection= frame->reordered_opaque;
1532             }
1533         }
1534
1535         if(   (   decoder_reorder_pts==1
1536                || (decoder_reorder_pts && is->faulty_pts<is->faulty_dts)
1537                || pkt->dts == AV_NOPTS_VALUE)
1538            && frame->reordered_opaque != AV_NOPTS_VALUE)
1539             *pts= frame->reordered_opaque;
1540         else if(pkt->dts != AV_NOPTS_VALUE)
1541             *pts= pkt->dts;
1542         else
1543             *pts= 0;
1544
1545 //            if (len1 < 0)
1546 //                break;
1547     if (got_picture){
1548         is->skip_frames_index += 1;
1549         if(is->skip_frames_index >= is->skip_frames){
1550             is->skip_frames_index -= FFMAX(is->skip_frames, 1.0);
1551             return 1;
1552         }
1553
1554     }
1555     return 0;
1556 }
1557
1558 #if CONFIG_AVFILTER
1559 typedef struct {
1560     VideoState *is;
1561     AVFrame *frame;
1562     int use_dr1;
1563 } FilterPriv;
1564
1565 static int input_get_buffer(AVCodecContext *codec, AVFrame *pic)
1566 {
1567     AVFilterContext *ctx = codec->opaque;
1568     AVFilterPicRef  *ref;
1569     int perms = AV_PERM_WRITE;
1570     int w, h, stride[4];
1571     unsigned edge;
1572
1573     if(pic->buffer_hints & FF_BUFFER_HINTS_VALID) {
1574         if(pic->buffer_hints & FF_BUFFER_HINTS_READABLE) perms |= AV_PERM_READ;
1575         if(pic->buffer_hints & FF_BUFFER_HINTS_PRESERVE) perms |= AV_PERM_PRESERVE;
1576         if(pic->buffer_hints & FF_BUFFER_HINTS_REUSABLE) perms |= AV_PERM_REUSE2;
1577     }
1578     if(pic->reference) perms |= AV_PERM_READ | AV_PERM_PRESERVE;
1579
1580     w = codec->width;
1581     h = codec->height;
1582     avcodec_align_dimensions2(codec, &w, &h, stride);
1583     edge = codec->flags & CODEC_FLAG_EMU_EDGE ? 0 : avcodec_get_edge_width();
1584     w += edge << 1;
1585     h += edge << 1;
1586
1587     if(!(ref = avfilter_get_video_buffer(ctx->outputs[0], perms, w, h)))
1588         return -1;
1589
1590     ref->w = codec->width;
1591     ref->h = codec->height;
1592     for(int i = 0; i < 3; i ++) {
1593         unsigned hshift = i == 0 ? 0 : av_pix_fmt_descriptors[ref->pic->format].log2_chroma_w;
1594         unsigned vshift = i == 0 ? 0 : av_pix_fmt_descriptors[ref->pic->format].log2_chroma_h;
1595
1596         if (ref->data[i]) {
1597             ref->data[i]    += (edge >> hshift) + ((edge * ref->linesize[i]) >> vshift);
1598         }
1599         pic->data[i]     = ref->data[i];
1600         pic->linesize[i] = ref->linesize[i];
1601     }
1602     pic->opaque = ref;
1603     pic->age    = INT_MAX;
1604     pic->type   = FF_BUFFER_TYPE_USER;
1605     return 0;
1606 }
1607
1608 static void input_release_buffer(AVCodecContext *codec, AVFrame *pic)
1609 {
1610     memset(pic->data, 0, sizeof(pic->data));
1611     avfilter_unref_pic(pic->opaque);
1612 }
1613
1614 static int input_init(AVFilterContext *ctx, const char *args, void *opaque)
1615 {
1616     FilterPriv *priv = ctx->priv;
1617     AVCodecContext *codec;
1618     if(!opaque) return -1;
1619
1620     priv->is = opaque;
1621     codec    = priv->is->video_st->codec;
1622     codec->opaque = ctx;
1623     if(codec->codec->capabilities & CODEC_CAP_DR1) {
1624         priv->use_dr1 = 1;
1625         codec->get_buffer     = input_get_buffer;
1626         codec->release_buffer = input_release_buffer;
1627     }
1628
1629     priv->frame = avcodec_alloc_frame();
1630
1631     return 0;
1632 }
1633
1634 static void input_uninit(AVFilterContext *ctx)
1635 {
1636     FilterPriv *priv = ctx->priv;
1637     av_free(priv->frame);
1638 }
1639
1640 static int input_request_frame(AVFilterLink *link)
1641 {
1642     FilterPriv *priv = link->src->priv;
1643     AVFilterPicRef *picref;
1644     int64_t pts = 0;
1645     AVPacket pkt;
1646     int ret;
1647
1648     while (!(ret = get_video_frame(priv->is, priv->frame, &pts, &pkt)))
1649         av_free_packet(&pkt);
1650     if (ret < 0)
1651         return -1;
1652
1653     if(priv->use_dr1) {
1654         picref = avfilter_ref_pic(priv->frame->opaque, ~0);
1655     } else {
1656         picref = avfilter_get_video_buffer(link, AV_PERM_WRITE, link->w, link->h);
1657         av_picture_copy((AVPicture *)&picref->data, (AVPicture *)priv->frame,
1658                         picref->pic->format, link->w, link->h);
1659     }
1660     av_free_packet(&pkt);
1661
1662     picref->pts = pts;
1663     picref->pos = pkt.pos;
1664     picref->pixel_aspect = priv->is->video_st->codec->sample_aspect_ratio;
1665     avfilter_start_frame(link, picref);
1666     avfilter_draw_slice(link, 0, link->h, 1);
1667     avfilter_end_frame(link);
1668
1669     return 0;
1670 }
1671
1672 static int input_query_formats(AVFilterContext *ctx)
1673 {
1674     FilterPriv *priv = ctx->priv;
1675     enum PixelFormat pix_fmts[] = {
1676         priv->is->video_st->codec->pix_fmt, PIX_FMT_NONE
1677     };
1678
1679     avfilter_set_common_formats(ctx, avfilter_make_format_list(pix_fmts));
1680     return 0;
1681 }
1682
1683 static int input_config_props(AVFilterLink *link)
1684 {
1685     FilterPriv *priv  = link->src->priv;
1686     AVCodecContext *c = priv->is->video_st->codec;
1687
1688     link->w = c->width;
1689     link->h = c->height;
1690
1691     return 0;
1692 }
1693
1694 static AVFilter input_filter =
1695 {
1696     .name      = "ffplay_input",
1697
1698     .priv_size = sizeof(FilterPriv),
1699
1700     .init      = input_init,
1701     .uninit    = input_uninit,
1702
1703     .query_formats = input_query_formats,
1704
1705     .inputs    = (AVFilterPad[]) {{ .name = NULL }},
1706     .outputs   = (AVFilterPad[]) {{ .name = "default",
1707                                     .type = AVMEDIA_TYPE_VIDEO,
1708                                     .request_frame = input_request_frame,
1709                                     .config_props  = input_config_props, },
1710                                   { .name = NULL }},
1711 };
1712
1713 static void output_end_frame(AVFilterLink *link)
1714 {
1715 }
1716
1717 static int output_query_formats(AVFilterContext *ctx)
1718 {
1719     enum PixelFormat pix_fmts[] = { PIX_FMT_YUV420P, PIX_FMT_NONE };
1720
1721     avfilter_set_common_formats(ctx, avfilter_make_format_list(pix_fmts));
1722     return 0;
1723 }
1724
1725 static int get_filtered_video_frame(AVFilterContext *ctx, AVFrame *frame,
1726                                     int64_t *pts, int64_t *pos)
1727 {
1728     AVFilterPicRef *pic;
1729
1730     if(avfilter_request_frame(ctx->inputs[0]))
1731         return -1;
1732     if(!(pic = ctx->inputs[0]->cur_pic))
1733         return -1;
1734     ctx->inputs[0]->cur_pic = NULL;
1735
1736     frame->opaque = pic;
1737     *pts          = pic->pts;
1738     *pos          = pic->pos;
1739
1740     memcpy(frame->data,     pic->data,     sizeof(frame->data));
1741     memcpy(frame->linesize, pic->linesize, sizeof(frame->linesize));
1742
1743     return 1;
1744 }
1745
1746 static AVFilter output_filter =
1747 {
1748     .name      = "ffplay_output",
1749
1750     .query_formats = output_query_formats,
1751
1752     .inputs    = (AVFilterPad[]) {{ .name          = "default",
1753                                     .type          = AVMEDIA_TYPE_VIDEO,
1754                                     .end_frame     = output_end_frame,
1755                                     .min_perms     = AV_PERM_READ, },
1756                                   { .name = NULL }},
1757     .outputs   = (AVFilterPad[]) {{ .name = NULL }},
1758 };
1759 #endif  /* CONFIG_AVFILTER */
1760
1761 static int video_thread(void *arg)
1762 {
1763     VideoState *is = arg;
1764     AVFrame *frame= avcodec_alloc_frame();
1765     int64_t pts_int;
1766     double pts;
1767     int ret;
1768
1769 #if CONFIG_AVFILTER
1770     int64_t pos;
1771     AVFilterContext *filt_src = NULL, *filt_out = NULL;
1772     AVFilterGraph *graph = av_mallocz(sizeof(AVFilterGraph));
1773     graph->scale_sws_opts = av_strdup("sws_flags=bilinear");
1774
1775     if(!(filt_src = avfilter_open(&input_filter,  "src")))   goto the_end;
1776     if(!(filt_out = avfilter_open(&output_filter, "out")))   goto the_end;
1777
1778     if(avfilter_init_filter(filt_src, NULL, is))             goto the_end;
1779     if(avfilter_init_filter(filt_out, NULL, frame))          goto the_end;
1780
1781
1782     if(vfilters) {
1783         AVFilterInOut *outputs = av_malloc(sizeof(AVFilterInOut));
1784         AVFilterInOut *inputs  = av_malloc(sizeof(AVFilterInOut));
1785
1786         outputs->name    = av_strdup("in");
1787         outputs->filter  = filt_src;
1788         outputs->pad_idx = 0;
1789         outputs->next    = NULL;
1790
1791         inputs->name    = av_strdup("out");
1792         inputs->filter  = filt_out;
1793         inputs->pad_idx = 0;
1794         inputs->next    = NULL;
1795
1796         if (avfilter_graph_parse(graph, vfilters, inputs, outputs, NULL) < 0)
1797             goto the_end;
1798         av_freep(&vfilters);
1799     } else {
1800         if(avfilter_link(filt_src, 0, filt_out, 0) < 0)          goto the_end;
1801     }
1802     avfilter_graph_add_filter(graph, filt_src);
1803     avfilter_graph_add_filter(graph, filt_out);
1804
1805     if(avfilter_graph_check_validity(graph, NULL))           goto the_end;
1806     if(avfilter_graph_config_formats(graph, NULL))           goto the_end;
1807     if(avfilter_graph_config_links(graph, NULL))             goto the_end;
1808
1809     is->out_video_filter = filt_out;
1810 #endif
1811
1812     for(;;) {
1813 #if !CONFIG_AVFILTER
1814         AVPacket pkt;
1815 #endif
1816         while (is->paused && !is->videoq.abort_request)
1817             SDL_Delay(10);
1818 #if CONFIG_AVFILTER
1819         ret = get_filtered_video_frame(filt_out, frame, &pts_int, &pos);
1820 #else
1821         ret = get_video_frame(is, frame, &pts_int, &pkt);
1822 #endif
1823
1824         if (ret < 0) goto the_end;
1825
1826         if (!ret)
1827             continue;
1828
1829         pts = pts_int*av_q2d(is->video_st->time_base);
1830
1831 #if CONFIG_AVFILTER
1832         ret = output_picture2(is, frame, pts, pos);
1833 #else
1834         ret = output_picture2(is, frame, pts,  pkt.pos);
1835         av_free_packet(&pkt);
1836 #endif
1837         if (ret < 0)
1838             goto the_end;
1839
1840         if (step)
1841             if (cur_stream)
1842                 stream_pause(cur_stream);
1843     }
1844  the_end:
1845 #if CONFIG_AVFILTER
1846     avfilter_graph_destroy(graph);
1847     av_freep(&graph);
1848 #endif
1849     av_free(frame);
1850     return 0;
1851 }
1852
1853 static int subtitle_thread(void *arg)
1854 {
1855     VideoState *is = arg;
1856     SubPicture *sp;
1857     AVPacket pkt1, *pkt = &pkt1;
1858     int len1, got_subtitle;
1859     double pts;
1860     int i, j;
1861     int r, g, b, y, u, v, a;
1862
1863     for(;;) {
1864         while (is->paused && !is->subtitleq.abort_request) {
1865             SDL_Delay(10);
1866         }
1867         if (packet_queue_get(&is->subtitleq, pkt, 1) < 0)
1868             break;
1869
1870         if(pkt->data == flush_pkt.data){
1871             avcodec_flush_buffers(is->subtitle_st->codec);
1872             continue;
1873         }
1874         SDL_LockMutex(is->subpq_mutex);
1875         while (is->subpq_size >= SUBPICTURE_QUEUE_SIZE &&
1876                !is->subtitleq.abort_request) {
1877             SDL_CondWait(is->subpq_cond, is->subpq_mutex);
1878         }
1879         SDL_UnlockMutex(is->subpq_mutex);
1880
1881         if (is->subtitleq.abort_request)
1882             goto the_end;
1883
1884         sp = &is->subpq[is->subpq_windex];
1885
1886        /* NOTE: ipts is the PTS of the _first_ picture beginning in
1887            this packet, if any */
1888         pts = 0;
1889         if (pkt->pts != AV_NOPTS_VALUE)
1890             pts = av_q2d(is->subtitle_st->time_base)*pkt->pts;
1891
1892         len1 = avcodec_decode_subtitle2(is->subtitle_st->codec,
1893                                     &sp->sub, &got_subtitle,
1894                                     pkt);
1895 //            if (len1 < 0)
1896 //                break;
1897         if (got_subtitle && sp->sub.format == 0) {
1898             sp->pts = pts;
1899
1900             for (i = 0; i < sp->sub.num_rects; i++)
1901             {
1902                 for (j = 0; j < sp->sub.rects[i]->nb_colors; j++)
1903                 {
1904                     RGBA_IN(r, g, b, a, (uint32_t*)sp->sub.rects[i]->pict.data[1] + j);
1905                     y = RGB_TO_Y_CCIR(r, g, b);
1906                     u = RGB_TO_U_CCIR(r, g, b, 0);
1907                     v = RGB_TO_V_CCIR(r, g, b, 0);
1908                     YUVA_OUT((uint32_t*)sp->sub.rects[i]->pict.data[1] + j, y, u, v, a);
1909                 }
1910             }
1911
1912             /* now we can update the picture count */
1913             if (++is->subpq_windex == SUBPICTURE_QUEUE_SIZE)
1914                 is->subpq_windex = 0;
1915             SDL_LockMutex(is->subpq_mutex);
1916             is->subpq_size++;
1917             SDL_UnlockMutex(is->subpq_mutex);
1918         }
1919         av_free_packet(pkt);
1920 //        if (step)
1921 //            if (cur_stream)
1922 //                stream_pause(cur_stream);
1923     }
1924  the_end:
1925     return 0;
1926 }
1927
1928 /* copy samples for viewing in editor window */
1929 static void update_sample_display(VideoState *is, short *samples, int samples_size)
1930 {
1931     int size, len, channels;
1932
1933     channels = is->audio_st->codec->channels;
1934
1935     size = samples_size / sizeof(short);
1936     while (size > 0) {
1937         len = SAMPLE_ARRAY_SIZE - is->sample_array_index;
1938         if (len > size)
1939             len = size;
1940         memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
1941         samples += len;
1942         is->sample_array_index += len;
1943         if (is->sample_array_index >= SAMPLE_ARRAY_SIZE)
1944             is->sample_array_index = 0;
1945         size -= len;
1946     }
1947 }
1948
1949 /* return the new audio buffer size (samples can be added or deleted
1950    to get better sync if video or external master clock) */
1951 static int synchronize_audio(VideoState *is, short *samples,
1952                              int samples_size1, double pts)
1953 {
1954     int n, samples_size;
1955     double ref_clock;
1956
1957     n = 2 * is->audio_st->codec->channels;
1958     samples_size = samples_size1;
1959
1960     /* if not master, then we try to remove or add samples to correct the clock */
1961     if (((is->av_sync_type == AV_SYNC_VIDEO_MASTER && is->video_st) ||
1962          is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1963         double diff, avg_diff;
1964         int wanted_size, min_size, max_size, nb_samples;
1965
1966         ref_clock = get_master_clock(is);
1967         diff = get_audio_clock(is) - ref_clock;
1968
1969         if (diff < AV_NOSYNC_THRESHOLD) {
1970             is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
1971             if (is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) {
1972                 /* not enough measures to have a correct estimate */
1973                 is->audio_diff_avg_count++;
1974             } else {
1975                 /* estimate the A-V difference */
1976                 avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
1977
1978                 if (fabs(avg_diff) >= is->audio_diff_threshold) {
1979                     wanted_size = samples_size + ((int)(diff * is->audio_st->codec->sample_rate) * n);
1980                     nb_samples = samples_size / n;
1981
1982                     min_size = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1983                     max_size = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1984                     if (wanted_size < min_size)
1985                         wanted_size = min_size;
1986                     else if (wanted_size > max_size)
1987                         wanted_size = max_size;
1988
1989                     /* add or remove samples to correction the synchro */
1990                     if (wanted_size < samples_size) {
1991                         /* remove samples */
1992                         samples_size = wanted_size;
1993                     } else if (wanted_size > samples_size) {
1994                         uint8_t *samples_end, *q;
1995                         int nb;
1996
1997                         /* add samples */
1998                         nb = (samples_size - wanted_size);
1999                         samples_end = (uint8_t *)samples + samples_size - n;
2000                         q = samples_end + n;
2001                         while (nb > 0) {
2002                             memcpy(q, samples_end, n);
2003                             q += n;
2004                             nb -= n;
2005                         }
2006                         samples_size = wanted_size;
2007                     }
2008                 }
2009 #if 0
2010                 printf("diff=%f adiff=%f sample_diff=%d apts=%0.3f vpts=%0.3f %f\n",
2011                        diff, avg_diff, samples_size - samples_size1,
2012                        is->audio_clock, is->video_clock, is->audio_diff_threshold);
2013 #endif
2014             }
2015         } else {
2016             /* too big difference : may be initial PTS errors, so
2017                reset A-V filter */
2018             is->audio_diff_avg_count = 0;
2019             is->audio_diff_cum = 0;
2020         }
2021     }
2022
2023     return samples_size;
2024 }
2025
2026 /* decode one audio frame and returns its uncompressed size */
2027 static int audio_decode_frame(VideoState *is, double *pts_ptr)
2028 {
2029     AVPacket *pkt_temp = &is->audio_pkt_temp;
2030     AVPacket *pkt = &is->audio_pkt;
2031     AVCodecContext *dec= is->audio_st->codec;
2032     int n, len1, data_size;
2033     double pts;
2034
2035     for(;;) {
2036         /* NOTE: the audio packet can contain several frames */
2037         while (pkt_temp->size > 0) {
2038             data_size = sizeof(is->audio_buf1);
2039             len1 = avcodec_decode_audio3(dec,
2040                                         (int16_t *)is->audio_buf1, &data_size,
2041                                         pkt_temp);
2042             if (len1 < 0) {
2043                 /* if error, we skip the frame */
2044                 pkt_temp->size = 0;
2045                 break;
2046             }
2047
2048             pkt_temp->data += len1;
2049             pkt_temp->size -= len1;
2050             if (data_size <= 0)
2051                 continue;
2052
2053             if (dec->sample_fmt != is->audio_src_fmt) {
2054                 if (is->reformat_ctx)
2055                     av_audio_convert_free(is->reformat_ctx);
2056                 is->reformat_ctx= av_audio_convert_alloc(SAMPLE_FMT_S16, 1,
2057                                                          dec->sample_fmt, 1, NULL, 0);
2058                 if (!is->reformat_ctx) {
2059                     fprintf(stderr, "Cannot convert %s sample format to %s sample format\n",
2060                         avcodec_get_sample_fmt_name(dec->sample_fmt),
2061                         avcodec_get_sample_fmt_name(SAMPLE_FMT_S16));
2062                         break;
2063                 }
2064                 is->audio_src_fmt= dec->sample_fmt;
2065             }
2066
2067             if (is->reformat_ctx) {
2068                 const void *ibuf[6]= {is->audio_buf1};
2069                 void *obuf[6]= {is->audio_buf2};
2070                 int istride[6]= {av_get_bits_per_sample_format(dec->sample_fmt)/8};
2071                 int ostride[6]= {2};
2072                 int len= data_size/istride[0];
2073                 if (av_audio_convert(is->reformat_ctx, obuf, ostride, ibuf, istride, len)<0) {
2074                     printf("av_audio_convert() failed\n");
2075                     break;
2076                 }
2077                 is->audio_buf= is->audio_buf2;
2078                 /* FIXME: existing code assume that data_size equals framesize*channels*2
2079                           remove this legacy cruft */
2080                 data_size= len*2;
2081             }else{
2082                 is->audio_buf= is->audio_buf1;
2083             }
2084
2085             /* if no pts, then compute it */
2086             pts = is->audio_clock;
2087             *pts_ptr = pts;
2088             n = 2 * dec->channels;
2089             is->audio_clock += (double)data_size /
2090                 (double)(n * dec->sample_rate);
2091 #if defined(DEBUG_SYNC)
2092             {
2093                 static double last_clock;
2094                 printf("audio: delay=%0.3f clock=%0.3f pts=%0.3f\n",
2095                        is->audio_clock - last_clock,
2096                        is->audio_clock, pts);
2097                 last_clock = is->audio_clock;
2098             }
2099 #endif
2100             return data_size;
2101         }
2102
2103         /* free the current packet */
2104         if (pkt->data)
2105             av_free_packet(pkt);
2106
2107         if (is->paused || is->audioq.abort_request) {
2108             return -1;
2109         }
2110
2111         /* read next packet */
2112         if (packet_queue_get(&is->audioq, pkt, 1) < 0)
2113             return -1;
2114         if(pkt->data == flush_pkt.data){
2115             avcodec_flush_buffers(dec);
2116             continue;
2117         }
2118
2119         pkt_temp->data = pkt->data;
2120         pkt_temp->size = pkt->size;
2121
2122         /* if update the audio clock with the pts */
2123         if (pkt->pts != AV_NOPTS_VALUE) {
2124             is->audio_clock = av_q2d(is->audio_st->time_base)*pkt->pts;
2125         }
2126     }
2127 }
2128
2129 /* get the current audio output buffer size, in samples. With SDL, we
2130    cannot have a precise information */
2131 static int audio_write_get_buf_size(VideoState *is)
2132 {
2133     return is->audio_buf_size - is->audio_buf_index;
2134 }
2135
2136
2137 /* prepare a new audio buffer */
2138 static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
2139 {
2140     VideoState *is = opaque;
2141     int audio_size, len1;
2142     double pts;
2143
2144     audio_callback_time = av_gettime();
2145
2146     while (len > 0) {
2147         if (is->audio_buf_index >= is->audio_buf_size) {
2148            audio_size = audio_decode_frame(is, &pts);
2149            if (audio_size < 0) {
2150                 /* if error, just output silence */
2151                is->audio_buf = is->audio_buf1;
2152                is->audio_buf_size = 1024;
2153                memset(is->audio_buf, 0, is->audio_buf_size);
2154            } else {
2155                if (is->show_audio)
2156                    update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
2157                audio_size = synchronize_audio(is, (int16_t *)is->audio_buf, audio_size,
2158                                               pts);
2159                is->audio_buf_size = audio_size;
2160            }
2161            is->audio_buf_index = 0;
2162         }
2163         len1 = is->audio_buf_size - is->audio_buf_index;
2164         if (len1 > len)
2165             len1 = len;
2166         memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
2167         len -= len1;
2168         stream += len1;
2169         is->audio_buf_index += len1;
2170     }
2171 }
2172
2173 /* open a given stream. Return 0 if OK */
2174 static int stream_component_open(VideoState *is, int stream_index)
2175 {
2176     AVFormatContext *ic = is->ic;
2177     AVCodecContext *avctx;
2178     AVCodec *codec;
2179     SDL_AudioSpec wanted_spec, spec;
2180
2181     if (stream_index < 0 || stream_index >= ic->nb_streams)
2182         return -1;
2183     avctx = ic->streams[stream_index]->codec;
2184
2185     /* prepare audio output */
2186     if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
2187         if (avctx->channels > 0) {
2188             avctx->request_channels = FFMIN(2, avctx->channels);
2189         } else {
2190             avctx->request_channels = 2;
2191         }
2192     }
2193
2194     codec = avcodec_find_decoder(avctx->codec_id);
2195     avctx->debug_mv = debug_mv;
2196     avctx->debug = debug;
2197     avctx->workaround_bugs = workaround_bugs;
2198     avctx->lowres = lowres;
2199     if(lowres) avctx->flags |= CODEC_FLAG_EMU_EDGE;
2200     avctx->idct_algo= idct;
2201     if(fast) avctx->flags2 |= CODEC_FLAG2_FAST;
2202     avctx->skip_frame= skip_frame;
2203     avctx->skip_idct= skip_idct;
2204     avctx->skip_loop_filter= skip_loop_filter;
2205     avctx->error_recognition= error_recognition;
2206     avctx->error_concealment= error_concealment;
2207     avcodec_thread_init(avctx, thread_count);
2208
2209     set_context_opts(avctx, avcodec_opts[avctx->codec_type], 0);
2210
2211     if (!codec ||
2212         avcodec_open(avctx, codec) < 0)
2213         return -1;
2214
2215     /* prepare audio output */
2216     if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
2217         wanted_spec.freq = avctx->sample_rate;
2218         wanted_spec.format = AUDIO_S16SYS;
2219         wanted_spec.channels = avctx->channels;
2220         wanted_spec.silence = 0;
2221         wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
2222         wanted_spec.callback = sdl_audio_callback;
2223         wanted_spec.userdata = is;
2224         if (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
2225             fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError());
2226             return -1;
2227         }
2228         is->audio_hw_buf_size = spec.size;
2229         is->audio_src_fmt= SAMPLE_FMT_S16;
2230     }
2231
2232     ic->streams[stream_index]->discard = AVDISCARD_DEFAULT;
2233     switch(avctx->codec_type) {
2234     case AVMEDIA_TYPE_AUDIO:
2235         is->audio_stream = stream_index;
2236         is->audio_st = ic->streams[stream_index];
2237         is->audio_buf_size = 0;
2238         is->audio_buf_index = 0;
2239
2240         /* init averaging filter */
2241         is->audio_diff_avg_coef = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
2242         is->audio_diff_avg_count = 0;
2243         /* since we do not have a precise anough audio fifo fullness,
2244            we correct audio sync only if larger than this threshold */
2245         is->audio_diff_threshold = 2.0 * SDL_AUDIO_BUFFER_SIZE / avctx->sample_rate;
2246
2247         memset(&is->audio_pkt, 0, sizeof(is->audio_pkt));
2248         packet_queue_init(&is->audioq);
2249         SDL_PauseAudio(0);
2250         break;
2251     case AVMEDIA_TYPE_VIDEO:
2252         is->video_stream = stream_index;
2253         is->video_st = ic->streams[stream_index];
2254
2255 //        is->video_current_pts_time = av_gettime();
2256
2257         packet_queue_init(&is->videoq);
2258         is->video_tid = SDL_CreateThread(video_thread, is);
2259         break;
2260     case AVMEDIA_TYPE_SUBTITLE:
2261         is->subtitle_stream = stream_index;
2262         is->subtitle_st = ic->streams[stream_index];
2263         packet_queue_init(&is->subtitleq);
2264
2265         is->subtitle_tid = SDL_CreateThread(subtitle_thread, is);
2266         break;
2267     default:
2268         break;
2269     }
2270     return 0;
2271 }
2272
2273 static void stream_component_close(VideoState *is, int stream_index)
2274 {
2275     AVFormatContext *ic = is->ic;
2276     AVCodecContext *avctx;
2277
2278     if (stream_index < 0 || stream_index >= ic->nb_streams)
2279         return;
2280     avctx = ic->streams[stream_index]->codec;
2281
2282     switch(avctx->codec_type) {
2283     case AVMEDIA_TYPE_AUDIO:
2284         packet_queue_abort(&is->audioq);
2285
2286         SDL_CloseAudio();
2287
2288         packet_queue_end(&is->audioq);
2289         if (is->reformat_ctx)
2290             av_audio_convert_free(is->reformat_ctx);
2291         is->reformat_ctx = NULL;
2292         break;
2293     case AVMEDIA_TYPE_VIDEO:
2294         packet_queue_abort(&is->videoq);
2295
2296         /* note: we also signal this mutex to make sure we deblock the
2297            video thread in all cases */
2298         SDL_LockMutex(is->pictq_mutex);
2299         SDL_CondSignal(is->pictq_cond);
2300         SDL_UnlockMutex(is->pictq_mutex);
2301
2302         SDL_WaitThread(is->video_tid, NULL);
2303
2304         packet_queue_end(&is->videoq);
2305         break;
2306     case AVMEDIA_TYPE_SUBTITLE:
2307         packet_queue_abort(&is->subtitleq);
2308
2309         /* note: we also signal this mutex to make sure we deblock the
2310            video thread in all cases */
2311         SDL_LockMutex(is->subpq_mutex);
2312         is->subtitle_stream_changed = 1;
2313
2314         SDL_CondSignal(is->subpq_cond);
2315         SDL_UnlockMutex(is->subpq_mutex);
2316
2317         SDL_WaitThread(is->subtitle_tid, NULL);
2318
2319         packet_queue_end(&is->subtitleq);
2320         break;
2321     default:
2322         break;
2323     }
2324
2325     ic->streams[stream_index]->discard = AVDISCARD_ALL;
2326     avcodec_close(avctx);
2327     switch(avctx->codec_type) {
2328     case AVMEDIA_TYPE_AUDIO:
2329         is->audio_st = NULL;
2330         is->audio_stream = -1;
2331         break;
2332     case AVMEDIA_TYPE_VIDEO:
2333         is->video_st = NULL;
2334         is->video_stream = -1;
2335         break;
2336     case AVMEDIA_TYPE_SUBTITLE:
2337         is->subtitle_st = NULL;
2338         is->subtitle_stream = -1;
2339         break;
2340     default:
2341         break;
2342     }
2343 }
2344
2345 /* since we have only one decoding thread, we can use a global
2346    variable instead of a thread local variable */
2347 static VideoState *global_video_state;
2348
2349 static int decode_interrupt_cb(void)
2350 {
2351     return (global_video_state && global_video_state->abort_request);
2352 }
2353
2354 /* this thread gets the stream from the disk or the network */
2355 static int decode_thread(void *arg)
2356 {
2357     VideoState *is = arg;
2358     AVFormatContext *ic;
2359     int err, i, ret;
2360     int st_index[AVMEDIA_TYPE_NB];
2361     int st_count[AVMEDIA_TYPE_NB]={0};
2362     int st_best_packet_count[AVMEDIA_TYPE_NB];
2363     AVPacket pkt1, *pkt = &pkt1;
2364     AVFormatParameters params, *ap = &params;
2365     int eof=0;
2366     int pkt_in_play_range = 0;
2367
2368     ic = avformat_alloc_context();
2369
2370     memset(st_index, -1, sizeof(st_index));
2371     memset(st_best_packet_count, -1, sizeof(st_best_packet_count));
2372     is->video_stream = -1;
2373     is->audio_stream = -1;
2374     is->subtitle_stream = -1;
2375
2376     global_video_state = is;
2377     url_set_interrupt_cb(decode_interrupt_cb);
2378
2379     memset(ap, 0, sizeof(*ap));
2380
2381     ap->prealloced_context = 1;
2382     ap->width = frame_width;
2383     ap->height= frame_height;
2384     ap->time_base= (AVRational){1, 25};
2385     ap->pix_fmt = frame_pix_fmt;
2386
2387     set_context_opts(ic, avformat_opts, AV_OPT_FLAG_DECODING_PARAM);
2388
2389     err = av_open_input_file(&ic, is->filename, is->iformat, 0, ap);
2390     if (err < 0) {
2391         print_error(is->filename, err);
2392         ret = -1;
2393         goto fail;
2394     }
2395     is->ic = ic;
2396
2397     if(genpts)
2398         ic->flags |= AVFMT_FLAG_GENPTS;
2399
2400     err = av_find_stream_info(ic);
2401     if (err < 0) {
2402         fprintf(stderr, "%s: could not find codec parameters\n", is->filename);
2403         ret = -1;
2404         goto fail;
2405     }
2406     if(ic->pb)
2407         ic->pb->eof_reached= 0; //FIXME hack, ffplay maybe should not use url_feof() to test for the end
2408
2409     if(seek_by_bytes<0)
2410         seek_by_bytes= !!(ic->iformat->flags & AVFMT_TS_DISCONT);
2411
2412     /* if seeking requested, we execute it */
2413     if (start_time != AV_NOPTS_VALUE) {
2414         int64_t timestamp;
2415
2416         timestamp = start_time;
2417         /* add the stream start time */
2418         if (ic->start_time != AV_NOPTS_VALUE)
2419             timestamp += ic->start_time;
2420         ret = avformat_seek_file(ic, -1, INT64_MIN, timestamp, INT64_MAX, 0);
2421         if (ret < 0) {
2422             fprintf(stderr, "%s: could not seek to position %0.3f\n",
2423                     is->filename, (double)timestamp / AV_TIME_BASE);
2424         }
2425     }
2426
2427     for(i = 0; i < ic->nb_streams; i++) {
2428         AVStream *st= ic->streams[i];
2429         AVCodecContext *avctx = st->codec;
2430         ic->streams[i]->discard = AVDISCARD_ALL;
2431         if(avctx->codec_type >= (unsigned)AVMEDIA_TYPE_NB)
2432             continue;
2433         if(st_count[avctx->codec_type]++ != wanted_stream[avctx->codec_type] && wanted_stream[avctx->codec_type] >= 0)
2434             continue;
2435
2436         if(st_best_packet_count[avctx->codec_type] >= st->codec_info_nb_frames)
2437             continue;
2438         st_best_packet_count[avctx->codec_type]= st->codec_info_nb_frames;
2439
2440         switch(avctx->codec_type) {
2441         case AVMEDIA_TYPE_AUDIO:
2442             if (!audio_disable)
2443                 st_index[AVMEDIA_TYPE_AUDIO] = i;
2444             break;
2445         case AVMEDIA_TYPE_VIDEO:
2446         case AVMEDIA_TYPE_SUBTITLE:
2447             if (!video_disable)
2448                 st_index[avctx->codec_type] = i;
2449             break;
2450         default:
2451             break;
2452         }
2453     }
2454     if (show_status) {
2455         dump_format(ic, 0, is->filename, 0);
2456     }
2457
2458     /* open the streams */
2459     if (st_index[AVMEDIA_TYPE_AUDIO] >= 0) {
2460         stream_component_open(is, st_index[AVMEDIA_TYPE_AUDIO]);
2461     }
2462
2463     ret=-1;
2464     if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
2465         ret= stream_component_open(is, st_index[AVMEDIA_TYPE_VIDEO]);
2466     }
2467     is->refresh_tid = SDL_CreateThread(refresh_thread, is);
2468     if(ret<0) {
2469         if (!display_disable)
2470             is->show_audio = 2;
2471     }
2472
2473     if (st_index[AVMEDIA_TYPE_SUBTITLE] >= 0) {
2474         stream_component_open(is, st_index[AVMEDIA_TYPE_SUBTITLE]);
2475     }
2476
2477     if (is->video_stream < 0 && is->audio_stream < 0) {
2478         fprintf(stderr, "%s: could not open codecs\n", is->filename);
2479         ret = -1;
2480         goto fail;
2481     }
2482
2483     for(;;) {
2484         if (is->abort_request)
2485             break;
2486         if (is->paused != is->last_paused) {
2487             is->last_paused = is->paused;
2488             if (is->paused)
2489                 is->read_pause_return= av_read_pause(ic);
2490             else
2491                 av_read_play(ic);
2492         }
2493 #if CONFIG_RTSP_DEMUXER
2494         if (is->paused && !strcmp(ic->iformat->name, "rtsp")) {
2495             /* wait 10 ms to avoid trying to get another packet */
2496             /* XXX: horrible */
2497             SDL_Delay(10);
2498             continue;
2499         }
2500 #endif
2501         if (is->seek_req) {
2502             int64_t seek_target= is->seek_pos;
2503             int64_t seek_min= is->seek_rel > 0 ? seek_target - is->seek_rel + 2: INT64_MIN;
2504             int64_t seek_max= is->seek_rel < 0 ? seek_target - is->seek_rel - 2: INT64_MAX;
2505 //FIXME the +-2 is due to rounding being not done in the correct direction in generation
2506 //      of the seek_pos/seek_rel variables
2507
2508             ret = avformat_seek_file(is->ic, -1, seek_min, seek_target, seek_max, is->seek_flags);
2509             if (ret < 0) {
2510                 fprintf(stderr, "%s: error while seeking\n", is->ic->filename);
2511             }else{
2512                 if (is->audio_stream >= 0) {
2513                     packet_queue_flush(&is->audioq);
2514                     packet_queue_put(&is->audioq, &flush_pkt);
2515                 }
2516                 if (is->subtitle_stream >= 0) {
2517                     packet_queue_flush(&is->subtitleq);
2518                     packet_queue_put(&is->subtitleq, &flush_pkt);
2519                 }
2520                 if (is->video_stream >= 0) {
2521                     packet_queue_flush(&is->videoq);
2522                     packet_queue_put(&is->videoq, &flush_pkt);
2523                 }
2524             }
2525             is->seek_req = 0;
2526             eof= 0;
2527         }
2528
2529         /* if the queue are full, no need to read more */
2530         if (   is->audioq.size + is->videoq.size + is->subtitleq.size > MAX_QUEUE_SIZE
2531             || (   (is->audioq   .size  > MIN_AUDIOQ_SIZE || is->audio_stream<0)
2532                 && (is->videoq   .nb_packets > MIN_FRAMES || is->video_stream<0)
2533                 && (is->subtitleq.nb_packets > MIN_FRAMES || is->subtitle_stream<0))) {
2534             /* wait 10 ms */
2535             SDL_Delay(10);
2536             continue;
2537         }
2538         if(url_feof(ic->pb) || eof) {
2539             if(is->video_stream >= 0){
2540                 av_init_packet(pkt);
2541                 pkt->data=NULL;
2542                 pkt->size=0;
2543                 pkt->stream_index= is->video_stream;
2544                 packet_queue_put(&is->videoq, pkt);
2545             }
2546             SDL_Delay(10);
2547             if(is->audioq.size + is->videoq.size + is->subtitleq.size ==0){
2548                 if(loop!=1 && (!loop || --loop)){
2549                     stream_seek(cur_stream, start_time != AV_NOPTS_VALUE ? start_time : 0, 0, 0);
2550                 }else if(autoexit){
2551                     ret=AVERROR_EOF;
2552                     goto fail;
2553                 }
2554             }
2555             continue;
2556         }
2557         ret = av_read_frame(ic, pkt);
2558         if (ret < 0) {
2559             if (ret == AVERROR_EOF)
2560                 eof=1;
2561             if (url_ferror(ic->pb))
2562                 break;
2563             SDL_Delay(100); /* wait for user event */
2564             continue;
2565         }
2566         /* check if packet is in play range specified by user, then queue, otherwise discard */
2567         pkt_in_play_range = duration == AV_NOPTS_VALUE ||
2568                 (pkt->pts - ic->streams[pkt->stream_index]->start_time) *
2569                 av_q2d(ic->streams[pkt->stream_index]->time_base) -
2570                 (double)(start_time != AV_NOPTS_VALUE ? start_time : 0)/1000000
2571                 <= ((double)duration/1000000);
2572         if (pkt->stream_index == is->audio_stream && pkt_in_play_range) {
2573             packet_queue_put(&is->audioq, pkt);
2574         } else if (pkt->stream_index == is->video_stream && pkt_in_play_range) {
2575             packet_queue_put(&is->videoq, pkt);
2576         } else if (pkt->stream_index == is->subtitle_stream && pkt_in_play_range) {
2577             packet_queue_put(&is->subtitleq, pkt);
2578         } else {
2579             av_free_packet(pkt);
2580         }
2581     }
2582     /* wait until the end */
2583     while (!is->abort_request) {
2584         SDL_Delay(100);
2585     }
2586
2587     ret = 0;
2588  fail:
2589     /* disable interrupting */
2590     global_video_state = NULL;
2591
2592     /* close each stream */
2593     if (is->audio_stream >= 0)
2594         stream_component_close(is, is->audio_stream);
2595     if (is->video_stream >= 0)
2596         stream_component_close(is, is->video_stream);
2597     if (is->subtitle_stream >= 0)
2598         stream_component_close(is, is->subtitle_stream);
2599     if (is->ic) {
2600         av_close_input_file(is->ic);
2601         is->ic = NULL; /* safety */
2602     }
2603     url_set_interrupt_cb(NULL);
2604
2605     if (ret != 0) {
2606         SDL_Event event;
2607
2608         event.type = FF_QUIT_EVENT;
2609         event.user.data1 = is;
2610         SDL_PushEvent(&event);
2611     }
2612     return 0;
2613 }
2614
2615 static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
2616 {
2617     VideoState *is;
2618
2619     is = av_mallocz(sizeof(VideoState));
2620     if (!is)
2621         return NULL;
2622     av_strlcpy(is->filename, filename, sizeof(is->filename));
2623     is->iformat = iformat;
2624     is->ytop = 0;
2625     is->xleft = 0;
2626
2627     /* start video display */
2628     is->pictq_mutex = SDL_CreateMutex();
2629     is->pictq_cond = SDL_CreateCond();
2630
2631     is->subpq_mutex = SDL_CreateMutex();
2632     is->subpq_cond = SDL_CreateCond();
2633
2634     is->av_sync_type = av_sync_type;
2635     is->parse_tid = SDL_CreateThread(decode_thread, is);
2636     if (!is->parse_tid) {
2637         av_free(is);
2638         return NULL;
2639     }
2640     return is;
2641 }
2642
2643 static void stream_close(VideoState *is)
2644 {
2645     VideoPicture *vp;
2646     int i;
2647     /* XXX: use a special url_shutdown call to abort parse cleanly */
2648     is->abort_request = 1;
2649     SDL_WaitThread(is->parse_tid, NULL);
2650     SDL_WaitThread(is->refresh_tid, NULL);
2651
2652     /* free all pictures */
2653     for(i=0;i<VIDEO_PICTURE_QUEUE_SIZE; i++) {
2654         vp = &is->pictq[i];
2655 #if CONFIG_AVFILTER
2656         if (vp->picref) {
2657             avfilter_unref_pic(vp->picref);
2658             vp->picref = NULL;
2659         }
2660 #endif
2661         if (vp->bmp) {
2662             SDL_FreeYUVOverlay(vp->bmp);
2663             vp->bmp = NULL;
2664         }
2665     }
2666     SDL_DestroyMutex(is->pictq_mutex);
2667     SDL_DestroyCond(is->pictq_cond);
2668     SDL_DestroyMutex(is->subpq_mutex);
2669     SDL_DestroyCond(is->subpq_cond);
2670 #if !CONFIG_AVFILTER
2671     if (is->img_convert_ctx)
2672         sws_freeContext(is->img_convert_ctx);
2673 #endif
2674     av_free(is);
2675 }
2676
2677 static void stream_cycle_channel(VideoState *is, int codec_type)
2678 {
2679     AVFormatContext *ic = is->ic;
2680     int start_index, stream_index;
2681     AVStream *st;
2682
2683     if (codec_type == AVMEDIA_TYPE_VIDEO)
2684         start_index = is->video_stream;
2685     else if (codec_type == AVMEDIA_TYPE_AUDIO)
2686         start_index = is->audio_stream;
2687     else
2688         start_index = is->subtitle_stream;
2689     if (start_index < (codec_type == AVMEDIA_TYPE_SUBTITLE ? -1 : 0))
2690         return;
2691     stream_index = start_index;
2692     for(;;) {
2693         if (++stream_index >= is->ic->nb_streams)
2694         {
2695             if (codec_type == AVMEDIA_TYPE_SUBTITLE)
2696             {
2697                 stream_index = -1;
2698                 goto the_end;
2699             } else
2700                 stream_index = 0;
2701         }
2702         if (stream_index == start_index)
2703             return;
2704         st = ic->streams[stream_index];
2705         if (st->codec->codec_type == codec_type) {
2706             /* check that parameters are OK */
2707             switch(codec_type) {
2708             case AVMEDIA_TYPE_AUDIO:
2709                 if (st->codec->sample_rate != 0 &&
2710                     st->codec->channels != 0)
2711                     goto the_end;
2712                 break;
2713             case AVMEDIA_TYPE_VIDEO:
2714             case AVMEDIA_TYPE_SUBTITLE:
2715                 goto the_end;
2716             default:
2717                 break;
2718             }
2719         }
2720     }
2721  the_end:
2722     stream_component_close(is, start_index);
2723     stream_component_open(is, stream_index);
2724 }
2725
2726
2727 static void toggle_full_screen(void)
2728 {
2729     is_full_screen = !is_full_screen;
2730     if (!fs_screen_width) {
2731         /* use default SDL method */
2732 //        SDL_WM_ToggleFullScreen(screen);
2733     }
2734     video_open(cur_stream);
2735 }
2736
2737 static void toggle_pause(void)
2738 {
2739     if (cur_stream)
2740         stream_pause(cur_stream);
2741     step = 0;
2742 }
2743
2744 static void step_to_next_frame(void)
2745 {
2746     if (cur_stream) {
2747         /* if the stream is paused unpause it, then step */
2748         if (cur_stream->paused)
2749             stream_pause(cur_stream);
2750     }
2751     step = 1;
2752 }
2753
2754 static void do_exit(void)
2755 {
2756     int i;
2757     if (cur_stream) {
2758         stream_close(cur_stream);
2759         cur_stream = NULL;
2760     }
2761     for (i = 0; i < AVMEDIA_TYPE_NB; i++)
2762         av_free(avcodec_opts[i]);
2763     av_free(avformat_opts);
2764     av_free(sws_opts);
2765 #if CONFIG_AVFILTER
2766     avfilter_uninit();
2767 #endif
2768     if (show_status)
2769         printf("\n");
2770     SDL_Quit();
2771     exit(0);
2772 }
2773
2774 static void toggle_audio_display(void)
2775 {
2776     if (cur_stream) {
2777         int bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
2778         cur_stream->show_audio = (cur_stream->show_audio + 1) % 3;
2779         fill_rectangle(screen,
2780                     cur_stream->xleft, cur_stream->ytop, cur_stream->width, cur_stream->height,
2781                     bgcolor);
2782         SDL_UpdateRect(screen, cur_stream->xleft, cur_stream->ytop, cur_stream->width, cur_stream->height);
2783     }
2784 }
2785
2786 /* handle an event sent by the GUI */
2787 static void event_loop(void)
2788 {
2789     SDL_Event event;
2790     double incr, pos, frac;
2791
2792     for(;;) {
2793         double x;
2794         SDL_WaitEvent(&event);
2795         switch(event.type) {
2796         case SDL_KEYDOWN:
2797             switch(event.key.keysym.sym) {
2798             case SDLK_ESCAPE:
2799             case SDLK_q:
2800                 do_exit();
2801                 break;
2802             case SDLK_f:
2803                 toggle_full_screen();
2804                 break;
2805             case SDLK_p:
2806             case SDLK_SPACE:
2807                 toggle_pause();
2808                 break;
2809             case SDLK_s: //S: Step to next frame
2810                 step_to_next_frame();
2811                 break;
2812             case SDLK_a:
2813                 if (cur_stream)
2814                     stream_cycle_channel(cur_stream, AVMEDIA_TYPE_AUDIO);
2815                 break;
2816             case SDLK_v:
2817                 if (cur_stream)
2818                     stream_cycle_channel(cur_stream, AVMEDIA_TYPE_VIDEO);
2819                 break;
2820             case SDLK_t:
2821                 if (cur_stream)
2822                     stream_cycle_channel(cur_stream, AVMEDIA_TYPE_SUBTITLE);
2823                 break;
2824             case SDLK_w:
2825                 toggle_audio_display();
2826                 break;
2827             case SDLK_LEFT:
2828                 incr = -10.0;
2829                 goto do_seek;
2830             case SDLK_RIGHT:
2831                 incr = 10.0;
2832                 goto do_seek;
2833             case SDLK_UP:
2834                 incr = 60.0;
2835                 goto do_seek;
2836             case SDLK_DOWN:
2837                 incr = -60.0;
2838             do_seek:
2839                 if (cur_stream) {
2840                     if (seek_by_bytes) {
2841                         if (cur_stream->video_stream >= 0 && cur_stream->video_current_pos>=0){
2842                             pos= cur_stream->video_current_pos;
2843                         }else if(cur_stream->audio_stream >= 0 && cur_stream->audio_pkt.pos>=0){
2844                             pos= cur_stream->audio_pkt.pos;
2845                         }else
2846                             pos = url_ftell(cur_stream->ic->pb);
2847                         if (cur_stream->ic->bit_rate)
2848                             incr *= cur_stream->ic->bit_rate / 8.0;
2849                         else
2850                             incr *= 180000.0;
2851                         pos += incr;
2852                         stream_seek(cur_stream, pos, incr, 1);
2853                     } else {
2854                         pos = get_master_clock(cur_stream);
2855                         pos += incr;
2856                         stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), (int64_t)(incr * AV_TIME_BASE), 0);
2857                     }
2858                 }
2859                 break;
2860             default:
2861                 break;
2862             }
2863             break;
2864         case SDL_MOUSEBUTTONDOWN:
2865         case SDL_MOUSEMOTION:
2866             if(event.type ==SDL_MOUSEBUTTONDOWN){
2867                 x= event.button.x;
2868             }else{
2869                 if(event.motion.state != SDL_PRESSED)
2870                     break;
2871                 x= event.motion.x;
2872             }
2873             if (cur_stream) {
2874                 if(seek_by_bytes || cur_stream->ic->duration<=0){
2875                     uint64_t size=  url_fsize(cur_stream->ic->pb);
2876                     stream_seek(cur_stream, size*x/cur_stream->width, 0, 1);
2877                 }else{
2878                     int64_t ts;
2879                     int ns, hh, mm, ss;
2880                     int tns, thh, tmm, tss;
2881                     tns = cur_stream->ic->duration/1000000LL;
2882                     thh = tns/3600;
2883                     tmm = (tns%3600)/60;
2884                     tss = (tns%60);
2885                     frac = x/cur_stream->width;
2886                     ns = frac*tns;
2887                     hh = ns/3600;
2888                     mm = (ns%3600)/60;
2889                     ss = (ns%60);
2890                     fprintf(stderr, "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d)       \n", frac*100,
2891                             hh, mm, ss, thh, tmm, tss);
2892                     ts = frac*cur_stream->ic->duration;
2893                     if (cur_stream->ic->start_time != AV_NOPTS_VALUE)
2894                         ts += cur_stream->ic->start_time;
2895                     stream_seek(cur_stream, ts, 0, 0);
2896                 }
2897             }
2898             break;
2899         case SDL_VIDEORESIZE:
2900             if (cur_stream) {
2901                 screen = SDL_SetVideoMode(event.resize.w, event.resize.h, 0,
2902                                           SDL_HWSURFACE|SDL_RESIZABLE|SDL_ASYNCBLIT|SDL_HWACCEL);
2903                 screen_width = cur_stream->width = event.resize.w;
2904                 screen_height= cur_stream->height= event.resize.h;
2905             }
2906             break;
2907         case SDL_QUIT:
2908         case FF_QUIT_EVENT:
2909             do_exit();
2910             break;
2911         case FF_ALLOC_EVENT:
2912             video_open(event.user.data1);
2913             alloc_picture(event.user.data1);
2914             break;
2915         case FF_REFRESH_EVENT:
2916             video_refresh_timer(event.user.data1);
2917             cur_stream->refresh=0;
2918             break;
2919         default:
2920             break;
2921         }
2922     }
2923 }
2924
2925 static void opt_frame_size(const char *arg)
2926 {
2927     if (av_parse_video_frame_size(&frame_width, &frame_height, arg) < 0) {
2928         fprintf(stderr, "Incorrect frame size\n");
2929         exit(1);
2930     }
2931     if ((frame_width % 2) != 0 || (frame_height % 2) != 0) {
2932         fprintf(stderr, "Frame size must be a multiple of 2\n");
2933         exit(1);
2934     }
2935 }
2936
2937 static int opt_width(const char *opt, const char *arg)
2938 {
2939     screen_width = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2940     return 0;
2941 }
2942
2943 static int opt_height(const char *opt, const char *arg)
2944 {
2945     screen_height = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2946     return 0;
2947 }
2948
2949 static void opt_format(const char *arg)
2950 {
2951     file_iformat = av_find_input_format(arg);
2952     if (!file_iformat) {
2953         fprintf(stderr, "Unknown input format: %s\n", arg);
2954         exit(1);
2955     }
2956 }
2957
2958 static void opt_frame_pix_fmt(const char *arg)
2959 {
2960     frame_pix_fmt = av_get_pix_fmt(arg);
2961 }
2962
2963 static int opt_sync(const char *opt, const char *arg)
2964 {
2965     if (!strcmp(arg, "audio"))
2966         av_sync_type = AV_SYNC_AUDIO_MASTER;
2967     else if (!strcmp(arg, "video"))
2968         av_sync_type = AV_SYNC_VIDEO_MASTER;
2969     else if (!strcmp(arg, "ext"))
2970         av_sync_type = AV_SYNC_EXTERNAL_CLOCK;
2971     else {
2972         fprintf(stderr, "Unknown value for %s: %s\n", opt, arg);
2973         exit(1);
2974     }
2975     return 0;
2976 }
2977
2978 static int opt_seek(const char *opt, const char *arg)
2979 {
2980     start_time = parse_time_or_die(opt, arg, 1);
2981     return 0;
2982 }
2983
2984 static int opt_duration(const char *opt, const char *arg)
2985 {
2986     duration = parse_time_or_die(opt, arg, 1);
2987     return 0;
2988 }
2989
2990 static int opt_debug(const char *opt, const char *arg)
2991 {
2992     av_log_set_level(99);
2993     debug = parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
2994     return 0;
2995 }
2996
2997 static int opt_vismv(const char *opt, const char *arg)
2998 {
2999     debug_mv = parse_number_or_die(opt, arg, OPT_INT64, INT_MIN, INT_MAX);
3000     return 0;
3001 }
3002
3003 static int opt_thread_count(const char *opt, const char *arg)
3004 {
3005     thread_count= parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
3006 #if !HAVE_THREADS
3007     fprintf(stderr, "Warning: not compiled with thread support, using thread emulation\n");
3008 #endif
3009     return 0;
3010 }
3011
3012 static const OptionDef options[] = {
3013 #include "cmdutils_common_opts.h"
3014     { "x", HAS_ARG | OPT_FUNC2, {(void*)opt_width}, "force displayed width", "width" },
3015     { "y", HAS_ARG | OPT_FUNC2, {(void*)opt_height}, "force displayed height", "height" },
3016     { "s", HAS_ARG | OPT_VIDEO, {(void*)opt_frame_size}, "set frame size (WxH or abbreviation)", "size" },
3017     { "fs", OPT_BOOL, {(void*)&is_full_screen}, "force full screen" },
3018     { "an", OPT_BOOL, {(void*)&audio_disable}, "disable audio" },
3019     { "vn", OPT_BOOL, {(void*)&video_disable}, "disable video" },
3020     { "ast", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_AUDIO]}, "select desired audio stream", "stream_number" },
3021     { "vst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_VIDEO]}, "select desired video stream", "stream_number" },
3022     { "sst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_SUBTITLE]}, "select desired subtitle stream", "stream_number" },
3023     { "ss", HAS_ARG | OPT_FUNC2, {(void*)&opt_seek}, "seek to a given position in seconds", "pos" },
3024     { "t", HAS_ARG | OPT_FUNC2, {(void*)&opt_duration}, "play  \"duration\" seconds of audio/video", "duration" },
3025     { "bytes", OPT_INT | HAS_ARG, {(void*)&seek_by_bytes}, "seek by bytes 0=off 1=on -1=auto", "val" },
3026     { "nodisp", OPT_BOOL, {(void*)&display_disable}, "disable graphical display" },
3027     { "f", HAS_ARG, {(void*)opt_format}, "force format", "fmt" },
3028     { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)opt_frame_pix_fmt}, "set pixel format", "format" },
3029     { "stats", OPT_BOOL | OPT_EXPERT, {(void*)&show_status}, "show status", "" },
3030     { "debug", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_debug}, "print specific debug info", "" },
3031     { "bug", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&workaround_bugs}, "workaround bugs", "" },
3032     { "vismv", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_vismv}, "visualize motion vectors", "" },
3033     { "fast", OPT_BOOL | OPT_EXPERT, {(void*)&fast}, "non spec compliant optimizations", "" },
3034     { "genpts", OPT_BOOL | OPT_EXPERT, {(void*)&genpts}, "generate pts", "" },
3035     { "drp", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&decoder_reorder_pts}, "let decoder reorder pts 0=off 1=on -1=auto", ""},
3036     { "lowres", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&lowres}, "", "" },
3037     { "skiploop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_loop_filter}, "", "" },
3038     { "skipframe", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_frame}, "", "" },
3039     { "skipidct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_idct}, "", "" },
3040     { "idct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&idct}, "set idct algo",  "algo" },
3041     { "er", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_recognition}, "set error detection threshold (0-4)",  "threshold" },
3042     { "ec", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_concealment}, "set error concealment options",  "bit_mask" },
3043     { "sync", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_sync}, "set audio-video sync. type (type=audio/video/ext)", "type" },
3044     { "threads", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_thread_count}, "thread count", "count" },
3045     { "autoexit", OPT_BOOL | OPT_EXPERT, {(void*)&autoexit}, "exit at the end", "" },
3046     { "loop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&loop}, "set number of times the playback shall be looped", "loop count" },
3047     { "framedrop", OPT_BOOL | OPT_EXPERT, {(void*)&framedrop}, "drop frames when cpu is too slow", "" },
3048     { "window_title", OPT_STRING | HAS_ARG, {(void*)&window_title}, "set window title", "window title" },
3049 #if CONFIG_AVFILTER
3050     { "vfilters", OPT_STRING | HAS_ARG, {(void*)&vfilters}, "video filters", "filter list" },
3051 #endif
3052     { "rdftspeed", OPT_INT | HAS_ARG| OPT_AUDIO | OPT_EXPERT, {(void*)&rdftspeed}, "rdft speed", "msecs" },
3053     { "default", OPT_FUNC2 | HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, {(void*)opt_default}, "generic catch all option", "" },
3054     { NULL, },
3055 };
3056
3057 static void show_usage(void)
3058 {
3059     printf("Simple media player\n");
3060     printf("usage: ffplay [options] input_file\n");
3061     printf("\n");
3062 }
3063
3064 static void show_help(void)
3065 {
3066     show_usage();
3067     show_help_options(options, "Main options:\n",
3068                       OPT_EXPERT, 0);
3069     show_help_options(options, "\nAdvanced options:\n",
3070                       OPT_EXPERT, OPT_EXPERT);
3071     printf("\nWhile playing:\n"
3072            "q, ESC              quit\n"
3073            "f                   toggle full screen\n"
3074            "p, SPC              pause\n"
3075            "a                   cycle audio channel\n"
3076            "v                   cycle video channel\n"
3077            "t                   cycle subtitle channel\n"
3078            "w                   show audio waves\n"
3079            "s                   activate frame-step mode\n"
3080            "left/right          seek backward/forward 10 seconds\n"
3081            "down/up             seek backward/forward 1 minute\n"
3082            "mouse click         seek to percentage in file corresponding to fraction of width\n"
3083            );
3084 }
3085
3086 static void opt_input_file(const char *filename)
3087 {
3088     if (input_filename) {
3089         fprintf(stderr, "Argument '%s' provided as input filename, but '%s' was already specified.\n",
3090                 filename, input_filename);
3091         exit(1);
3092     }
3093     if (!strcmp(filename, "-"))
3094         filename = "pipe:";
3095     input_filename = filename;
3096 }
3097
3098 /* Called from the main */
3099 int main(int argc, char **argv)
3100 {
3101     int flags, i;
3102
3103     /* register all codecs, demux and protocols */
3104     avcodec_register_all();
3105 #if CONFIG_AVDEVICE
3106     avdevice_register_all();
3107 #endif
3108 #if CONFIG_AVFILTER
3109     avfilter_register_all();
3110 #endif
3111     av_register_all();
3112
3113     for(i=0; i<AVMEDIA_TYPE_NB; i++){
3114         avcodec_opts[i]= avcodec_alloc_context2(i);
3115     }
3116     avformat_opts = avformat_alloc_context();
3117 #if !CONFIG_AVFILTER
3118     sws_opts = sws_getContext(16,16,0, 16,16,0, sws_flags, NULL,NULL,NULL);
3119 #endif
3120
3121     show_banner();
3122
3123     parse_options(argc, argv, options, opt_input_file);
3124
3125     if (!input_filename) {
3126         show_usage();
3127         fprintf(stderr, "An input file must be specified\n");
3128         fprintf(stderr, "Use -h to get full help or, even better, run 'man ffplay'\n");
3129         exit(1);
3130     }
3131
3132     if (display_disable) {
3133         video_disable = 1;
3134     }
3135     flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
3136 #if !defined(__MINGW32__) && !defined(__APPLE__)
3137     flags |= SDL_INIT_EVENTTHREAD; /* Not supported on Windows or Mac OS X */
3138 #endif
3139     if (SDL_Init (flags)) {
3140         fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
3141         exit(1);
3142     }
3143
3144     if (!display_disable) {
3145 #if HAVE_SDL_VIDEO_SIZE
3146         const SDL_VideoInfo *vi = SDL_GetVideoInfo();
3147         fs_screen_width = vi->current_w;
3148         fs_screen_height = vi->current_h;
3149 #endif
3150     }
3151
3152     SDL_EventState(SDL_ACTIVEEVENT, SDL_IGNORE);
3153     SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
3154     SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
3155
3156     av_init_packet(&flush_pkt);
3157     flush_pkt.data= "FLUSH";
3158
3159     cur_stream = stream_open(input_filename, file_iformat);
3160
3161     event_loop();
3162
3163     /* never returns */
3164
3165     return 0;
3166 }