avplay: use a separate buffer for playing silence
[platform/upstream/libav.git] / avplay.c
1 /*
2  * avplay : Simple Media Player based on the Libav libraries
3  * Copyright (c) 2003 Fabrice Bellard
4  *
5  * This file is part of Libav.
6  *
7  * Libav is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * Libav is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with Libav; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21
22 #include "config.h"
23 #include <inttypes.h>
24 #include <math.h>
25 #include <limits.h>
26 #include "libavutil/avstring.h"
27 #include "libavutil/colorspace.h"
28 #include "libavutil/mathematics.h"
29 #include "libavutil/pixdesc.h"
30 #include "libavutil/imgutils.h"
31 #include "libavutil/dict.h"
32 #include "libavutil/parseutils.h"
33 #include "libavutil/samplefmt.h"
34 #include "libavformat/avformat.h"
35 #include "libavdevice/avdevice.h"
36 #include "libswscale/swscale.h"
37 #include "libavcodec/audioconvert.h"
38 #include "libavutil/opt.h"
39 #include "libavcodec/avfft.h"
40
41 #if CONFIG_AVFILTER
42 # include "libavfilter/avfilter.h"
43 # include "libavfilter/avfiltergraph.h"
44 #endif
45
46 #include "cmdutils.h"
47
48 #include <SDL.h>
49 #include <SDL_thread.h>
50
51 #ifdef __MINGW32__
52 #undef main /* We don't want SDL to override our main() */
53 #endif
54
55 #include <unistd.h>
56 #include <assert.h>
57
58 const char program_name[] = "avplay";
59 const int program_birth_year = 2003;
60
61 #define MAX_QUEUE_SIZE (15 * 1024 * 1024)
62 #define MIN_AUDIOQ_SIZE (20 * 16 * 1024)
63 #define MIN_FRAMES 5
64
65 /* SDL audio buffer size, in samples. Should be small to have precise
66    A/V sync as SDL does not have hardware buffer fullness info. */
67 #define SDL_AUDIO_BUFFER_SIZE 1024
68
69 /* no AV sync correction is done if below the AV sync threshold */
70 #define AV_SYNC_THRESHOLD 0.01
71 /* no AV correction is done if too big error */
72 #define AV_NOSYNC_THRESHOLD 10.0
73
74 #define FRAME_SKIP_FACTOR 0.05
75
76 /* maximum audio speed change to get correct sync */
77 #define SAMPLE_CORRECTION_PERCENT_MAX 10
78
79 /* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
80 #define AUDIO_DIFF_AVG_NB   20
81
82 /* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
83 #define SAMPLE_ARRAY_SIZE (2*65536)
84
85 static int sws_flags = SWS_BICUBIC;
86
87 typedef struct PacketQueue {
88     AVPacketList *first_pkt, *last_pkt;
89     int nb_packets;
90     int size;
91     int abort_request;
92     SDL_mutex *mutex;
93     SDL_cond *cond;
94 } PacketQueue;
95
96 #define VIDEO_PICTURE_QUEUE_SIZE 2
97 #define SUBPICTURE_QUEUE_SIZE 4
98
99 typedef struct VideoPicture {
100     double pts;                                  ///<presentation time stamp for this picture
101     double target_clock;                         ///<av_gettime() time at which this should be displayed ideally
102     int64_t pos;                                 ///<byte position in file
103     SDL_Overlay *bmp;
104     int width, height; /* source height & width */
105     int allocated;
106     int reallocate;
107     enum PixelFormat pix_fmt;
108
109 #if CONFIG_AVFILTER
110     AVFilterBufferRef *picref;
111 #endif
112 } VideoPicture;
113
114 typedef struct SubPicture {
115     double pts; /* presentation time stamp for this picture */
116     AVSubtitle sub;
117 } SubPicture;
118
119 enum {
120     AV_SYNC_AUDIO_MASTER, /* default choice */
121     AV_SYNC_VIDEO_MASTER,
122     AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
123 };
124
125 typedef struct VideoState {
126     SDL_Thread *parse_tid;
127     SDL_Thread *video_tid;
128     SDL_Thread *refresh_tid;
129     AVInputFormat *iformat;
130     int no_background;
131     int abort_request;
132     int paused;
133     int last_paused;
134     int seek_req;
135     int seek_flags;
136     int64_t seek_pos;
137     int64_t seek_rel;
138     int read_pause_return;
139     AVFormatContext *ic;
140     int dtg_active_format;
141
142     int audio_stream;
143
144     int av_sync_type;
145     double external_clock; /* external clock base */
146     int64_t external_clock_time;
147
148     double audio_clock;
149     double audio_diff_cum; /* used for AV difference average computation */
150     double audio_diff_avg_coef;
151     double audio_diff_threshold;
152     int audio_diff_avg_count;
153     AVStream *audio_st;
154     PacketQueue audioq;
155     int audio_hw_buf_size;
156     /* samples output by the codec. we reserve more space for avsync
157        compensation */
158     DECLARE_ALIGNED(16,uint8_t,audio_buf1)[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2];
159     DECLARE_ALIGNED(16,uint8_t,audio_buf2)[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2];
160     uint8_t silence_buf[SDL_AUDIO_BUFFER_SIZE];
161     uint8_t *audio_buf;
162     unsigned int audio_buf_size; /* in bytes */
163     int audio_buf_index; /* in bytes */
164     AVPacket audio_pkt_temp;
165     AVPacket audio_pkt;
166     enum AVSampleFormat audio_src_fmt;
167     AVAudioConvert *reformat_ctx;
168
169     int show_audio; /* if true, display audio samples */
170     int16_t sample_array[SAMPLE_ARRAY_SIZE];
171     int sample_array_index;
172     int last_i_start;
173     RDFTContext *rdft;
174     int rdft_bits;
175     FFTSample *rdft_data;
176     int xpos;
177
178     SDL_Thread *subtitle_tid;
179     int subtitle_stream;
180     int subtitle_stream_changed;
181     AVStream *subtitle_st;
182     PacketQueue subtitleq;
183     SubPicture subpq[SUBPICTURE_QUEUE_SIZE];
184     int subpq_size, subpq_rindex, subpq_windex;
185     SDL_mutex *subpq_mutex;
186     SDL_cond *subpq_cond;
187
188     double frame_timer;
189     double frame_last_pts;
190     double frame_last_delay;
191     double video_clock;                          ///<pts of last decoded frame / predicted pts of next decoded frame
192     int video_stream;
193     AVStream *video_st;
194     PacketQueue videoq;
195     double video_current_pts;                    ///<current displayed pts (different from video_clock if frame fifos are used)
196     double video_current_pts_drift;              ///<video_current_pts - time (av_gettime) at which we updated video_current_pts - used to have running video pts
197     int64_t video_current_pos;                   ///<current displayed file pos
198     VideoPicture pictq[VIDEO_PICTURE_QUEUE_SIZE];
199     int pictq_size, pictq_rindex, pictq_windex;
200     SDL_mutex *pictq_mutex;
201     SDL_cond *pictq_cond;
202 #if !CONFIG_AVFILTER
203     struct SwsContext *img_convert_ctx;
204 #endif
205
206     //    QETimer *video_timer;
207     char filename[1024];
208     int width, height, xleft, ytop;
209
210     PtsCorrectionContext pts_ctx;
211
212 #if CONFIG_AVFILTER
213     AVFilterContext *out_video_filter;          ///<the last filter in the video chain
214 #endif
215
216     float skip_frames;
217     float skip_frames_index;
218     int refresh;
219 } VideoState;
220
221 static void show_help(void);
222
223 /* options specified by the user */
224 static AVInputFormat *file_iformat;
225 static const char *input_filename;
226 static const char *window_title;
227 static int fs_screen_width;
228 static int fs_screen_height;
229 static int screen_width = 0;
230 static int screen_height = 0;
231 static int audio_disable;
232 static int video_disable;
233 static int wanted_stream[AVMEDIA_TYPE_NB]={
234     [AVMEDIA_TYPE_AUDIO]=-1,
235     [AVMEDIA_TYPE_VIDEO]=-1,
236     [AVMEDIA_TYPE_SUBTITLE]=-1,
237 };
238 static int seek_by_bytes=-1;
239 static int display_disable;
240 static int show_status = 1;
241 static int av_sync_type = AV_SYNC_AUDIO_MASTER;
242 static int64_t start_time = AV_NOPTS_VALUE;
243 static int64_t duration = AV_NOPTS_VALUE;
244 static int debug = 0;
245 static int debug_mv = 0;
246 static int step = 0;
247 static int thread_count = 1;
248 static int workaround_bugs = 1;
249 static int fast = 0;
250 static int genpts = 0;
251 static int lowres = 0;
252 static int idct = FF_IDCT_AUTO;
253 static enum AVDiscard skip_frame= AVDISCARD_DEFAULT;
254 static enum AVDiscard skip_idct= AVDISCARD_DEFAULT;
255 static enum AVDiscard skip_loop_filter= AVDISCARD_DEFAULT;
256 static int error_recognition = FF_ER_CAREFUL;
257 static int error_concealment = 3;
258 static int decoder_reorder_pts= -1;
259 static int autoexit;
260 static int exit_on_keydown;
261 static int exit_on_mousedown;
262 static int loop=1;
263 static int framedrop=1;
264
265 static int rdftspeed=20;
266 #if CONFIG_AVFILTER
267 static char *vfilters = NULL;
268 #endif
269
270 /* current context */
271 static int is_full_screen;
272 static VideoState *cur_stream;
273 static int64_t audio_callback_time;
274
275 static AVPacket flush_pkt;
276
277 #define FF_ALLOC_EVENT   (SDL_USEREVENT)
278 #define FF_REFRESH_EVENT (SDL_USEREVENT + 1)
279 #define FF_QUIT_EVENT    (SDL_USEREVENT + 2)
280
281 static SDL_Surface *screen;
282
283 void exit_program(int ret)
284 {
285     exit(ret);
286 }
287
288 static int packet_queue_put(PacketQueue *q, AVPacket *pkt);
289
290 /* packet queue handling */
291 static void packet_queue_init(PacketQueue *q)
292 {
293     memset(q, 0, sizeof(PacketQueue));
294     q->mutex = SDL_CreateMutex();
295     q->cond = SDL_CreateCond();
296     packet_queue_put(q, &flush_pkt);
297 }
298
299 static void packet_queue_flush(PacketQueue *q)
300 {
301     AVPacketList *pkt, *pkt1;
302
303     SDL_LockMutex(q->mutex);
304     for(pkt = q->first_pkt; pkt != NULL; pkt = pkt1) {
305         pkt1 = pkt->next;
306         av_free_packet(&pkt->pkt);
307         av_freep(&pkt);
308     }
309     q->last_pkt = NULL;
310     q->first_pkt = NULL;
311     q->nb_packets = 0;
312     q->size = 0;
313     SDL_UnlockMutex(q->mutex);
314 }
315
316 static void packet_queue_end(PacketQueue *q)
317 {
318     packet_queue_flush(q);
319     SDL_DestroyMutex(q->mutex);
320     SDL_DestroyCond(q->cond);
321 }
322
323 static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
324 {
325     AVPacketList *pkt1;
326
327     /* duplicate the packet */
328     if (pkt!=&flush_pkt && av_dup_packet(pkt) < 0)
329         return -1;
330
331     pkt1 = av_malloc(sizeof(AVPacketList));
332     if (!pkt1)
333         return -1;
334     pkt1->pkt = *pkt;
335     pkt1->next = NULL;
336
337
338     SDL_LockMutex(q->mutex);
339
340     if (!q->last_pkt)
341
342         q->first_pkt = pkt1;
343     else
344         q->last_pkt->next = pkt1;
345     q->last_pkt = pkt1;
346     q->nb_packets++;
347     q->size += pkt1->pkt.size + sizeof(*pkt1);
348     /* XXX: should duplicate packet data in DV case */
349     SDL_CondSignal(q->cond);
350
351     SDL_UnlockMutex(q->mutex);
352     return 0;
353 }
354
355 static void packet_queue_abort(PacketQueue *q)
356 {
357     SDL_LockMutex(q->mutex);
358
359     q->abort_request = 1;
360
361     SDL_CondSignal(q->cond);
362
363     SDL_UnlockMutex(q->mutex);
364 }
365
366 /* return < 0 if aborted, 0 if no packet and > 0 if packet.  */
367 static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block)
368 {
369     AVPacketList *pkt1;
370     int ret;
371
372     SDL_LockMutex(q->mutex);
373
374     for(;;) {
375         if (q->abort_request) {
376             ret = -1;
377             break;
378         }
379
380         pkt1 = q->first_pkt;
381         if (pkt1) {
382             q->first_pkt = pkt1->next;
383             if (!q->first_pkt)
384                 q->last_pkt = NULL;
385             q->nb_packets--;
386             q->size -= pkt1->pkt.size + sizeof(*pkt1);
387             *pkt = pkt1->pkt;
388             av_free(pkt1);
389             ret = 1;
390             break;
391         } else if (!block) {
392             ret = 0;
393             break;
394         } else {
395             SDL_CondWait(q->cond, q->mutex);
396         }
397     }
398     SDL_UnlockMutex(q->mutex);
399     return ret;
400 }
401
402 static inline void fill_rectangle(SDL_Surface *screen,
403                                   int x, int y, int w, int h, int color)
404 {
405     SDL_Rect rect;
406     rect.x = x;
407     rect.y = y;
408     rect.w = w;
409     rect.h = h;
410     SDL_FillRect(screen, &rect, color);
411 }
412
413 #define ALPHA_BLEND(a, oldp, newp, s)\
414 ((((oldp << s) * (255 - (a))) + (newp * (a))) / (255 << s))
415
416 #define RGBA_IN(r, g, b, a, s)\
417 {\
418     unsigned int v = ((const uint32_t *)(s))[0];\
419     a = (v >> 24) & 0xff;\
420     r = (v >> 16) & 0xff;\
421     g = (v >> 8) & 0xff;\
422     b = v & 0xff;\
423 }
424
425 #define YUVA_IN(y, u, v, a, s, pal)\
426 {\
427     unsigned int val = ((const uint32_t *)(pal))[*(const uint8_t*)(s)];\
428     a = (val >> 24) & 0xff;\
429     y = (val >> 16) & 0xff;\
430     u = (val >> 8) & 0xff;\
431     v = val & 0xff;\
432 }
433
434 #define YUVA_OUT(d, y, u, v, a)\
435 {\
436     ((uint32_t *)(d))[0] = (a << 24) | (y << 16) | (u << 8) | v;\
437 }
438
439
440 #define BPP 1
441
442 static void blend_subrect(AVPicture *dst, const AVSubtitleRect *rect, int imgw, int imgh)
443 {
444     int wrap, wrap3, width2, skip2;
445     int y, u, v, a, u1, v1, a1, w, h;
446     uint8_t *lum, *cb, *cr;
447     const uint8_t *p;
448     const uint32_t *pal;
449     int dstx, dsty, dstw, dsth;
450
451     dstw = av_clip(rect->w, 0, imgw);
452     dsth = av_clip(rect->h, 0, imgh);
453     dstx = av_clip(rect->x, 0, imgw - dstw);
454     dsty = av_clip(rect->y, 0, imgh - dsth);
455     lum = dst->data[0] + dsty * dst->linesize[0];
456     cb = dst->data[1] + (dsty >> 1) * dst->linesize[1];
457     cr = dst->data[2] + (dsty >> 1) * dst->linesize[2];
458
459     width2 = ((dstw + 1) >> 1) + (dstx & ~dstw & 1);
460     skip2 = dstx >> 1;
461     wrap = dst->linesize[0];
462     wrap3 = rect->pict.linesize[0];
463     p = rect->pict.data[0];
464     pal = (const uint32_t *)rect->pict.data[1];  /* Now in YCrCb! */
465
466     if (dsty & 1) {
467         lum += dstx;
468         cb += skip2;
469         cr += skip2;
470
471         if (dstx & 1) {
472             YUVA_IN(y, u, v, a, p, pal);
473             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
474             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
475             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
476             cb++;
477             cr++;
478             lum++;
479             p += BPP;
480         }
481         for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
482             YUVA_IN(y, u, v, a, p, pal);
483             u1 = u;
484             v1 = v;
485             a1 = a;
486             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
487
488             YUVA_IN(y, u, v, a, p + BPP, pal);
489             u1 += u;
490             v1 += v;
491             a1 += a;
492             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
493             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
494             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
495             cb++;
496             cr++;
497             p += 2 * BPP;
498             lum += 2;
499         }
500         if (w) {
501             YUVA_IN(y, u, v, a, p, pal);
502             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
503             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
504             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
505             p++;
506             lum++;
507         }
508         p += wrap3 - dstw * BPP;
509         lum += wrap - dstw - dstx;
510         cb += dst->linesize[1] - width2 - skip2;
511         cr += dst->linesize[2] - width2 - skip2;
512     }
513     for(h = dsth - (dsty & 1); h >= 2; h -= 2) {
514         lum += dstx;
515         cb += skip2;
516         cr += skip2;
517
518         if (dstx & 1) {
519             YUVA_IN(y, u, v, a, p, pal);
520             u1 = u;
521             v1 = v;
522             a1 = a;
523             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
524             p += wrap3;
525             lum += wrap;
526             YUVA_IN(y, u, v, a, p, pal);
527             u1 += u;
528             v1 += v;
529             a1 += a;
530             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
531             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
532             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
533             cb++;
534             cr++;
535             p += -wrap3 + BPP;
536             lum += -wrap + 1;
537         }
538         for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
539             YUVA_IN(y, u, v, a, p, pal);
540             u1 = u;
541             v1 = v;
542             a1 = a;
543             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
544
545             YUVA_IN(y, u, v, a, p + BPP, pal);
546             u1 += u;
547             v1 += v;
548             a1 += a;
549             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
550             p += wrap3;
551             lum += wrap;
552
553             YUVA_IN(y, u, v, a, p, pal);
554             u1 += u;
555             v1 += v;
556             a1 += a;
557             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
558
559             YUVA_IN(y, u, v, a, p + BPP, pal);
560             u1 += u;
561             v1 += v;
562             a1 += a;
563             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
564
565             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 2);
566             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 2);
567
568             cb++;
569             cr++;
570             p += -wrap3 + 2 * BPP;
571             lum += -wrap + 2;
572         }
573         if (w) {
574             YUVA_IN(y, u, v, a, p, pal);
575             u1 = u;
576             v1 = v;
577             a1 = a;
578             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
579             p += wrap3;
580             lum += wrap;
581             YUVA_IN(y, u, v, a, p, pal);
582             u1 += u;
583             v1 += v;
584             a1 += a;
585             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
586             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
587             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
588             cb++;
589             cr++;
590             p += -wrap3 + BPP;
591             lum += -wrap + 1;
592         }
593         p += wrap3 + (wrap3 - dstw * BPP);
594         lum += wrap + (wrap - dstw - dstx);
595         cb += dst->linesize[1] - width2 - skip2;
596         cr += dst->linesize[2] - width2 - skip2;
597     }
598     /* handle odd height */
599     if (h) {
600         lum += dstx;
601         cb += skip2;
602         cr += skip2;
603
604         if (dstx & 1) {
605             YUVA_IN(y, u, v, a, p, pal);
606             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
607             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
608             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
609             cb++;
610             cr++;
611             lum++;
612             p += BPP;
613         }
614         for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
615             YUVA_IN(y, u, v, a, p, pal);
616             u1 = u;
617             v1 = v;
618             a1 = a;
619             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
620
621             YUVA_IN(y, u, v, a, p + BPP, pal);
622             u1 += u;
623             v1 += v;
624             a1 += a;
625             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
626             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u, 1);
627             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v, 1);
628             cb++;
629             cr++;
630             p += 2 * BPP;
631             lum += 2;
632         }
633         if (w) {
634             YUVA_IN(y, u, v, a, p, pal);
635             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
636             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
637             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
638         }
639     }
640 }
641
642 static void free_subpicture(SubPicture *sp)
643 {
644     avsubtitle_free(&sp->sub);
645 }
646
647 static void video_image_display(VideoState *is)
648 {
649     VideoPicture *vp;
650     SubPicture *sp;
651     AVPicture pict;
652     float aspect_ratio;
653     int width, height, x, y;
654     SDL_Rect rect;
655     int i;
656
657     vp = &is->pictq[is->pictq_rindex];
658     if (vp->bmp) {
659 #if CONFIG_AVFILTER
660          if (vp->picref->video->pixel_aspect.num == 0)
661              aspect_ratio = 0;
662          else
663              aspect_ratio = av_q2d(vp->picref->video->pixel_aspect);
664 #else
665
666         /* XXX: use variable in the frame */
667         if (is->video_st->sample_aspect_ratio.num)
668             aspect_ratio = av_q2d(is->video_st->sample_aspect_ratio);
669         else if (is->video_st->codec->sample_aspect_ratio.num)
670             aspect_ratio = av_q2d(is->video_st->codec->sample_aspect_ratio);
671         else
672             aspect_ratio = 0;
673 #endif
674         if (aspect_ratio <= 0.0)
675             aspect_ratio = 1.0;
676         aspect_ratio *= (float)vp->width / (float)vp->height;
677
678         if (is->subtitle_st)
679         {
680             if (is->subpq_size > 0)
681             {
682                 sp = &is->subpq[is->subpq_rindex];
683
684                 if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000))
685                 {
686                     SDL_LockYUVOverlay (vp->bmp);
687
688                     pict.data[0] = vp->bmp->pixels[0];
689                     pict.data[1] = vp->bmp->pixels[2];
690                     pict.data[2] = vp->bmp->pixels[1];
691
692                     pict.linesize[0] = vp->bmp->pitches[0];
693                     pict.linesize[1] = vp->bmp->pitches[2];
694                     pict.linesize[2] = vp->bmp->pitches[1];
695
696                     for (i = 0; i < sp->sub.num_rects; i++)
697                         blend_subrect(&pict, sp->sub.rects[i],
698                                       vp->bmp->w, vp->bmp->h);
699
700                     SDL_UnlockYUVOverlay (vp->bmp);
701                 }
702             }
703         }
704
705
706         /* XXX: we suppose the screen has a 1.0 pixel ratio */
707         height = is->height;
708         width = ((int)rint(height * aspect_ratio)) & ~1;
709         if (width > is->width) {
710             width = is->width;
711             height = ((int)rint(width / aspect_ratio)) & ~1;
712         }
713         x = (is->width - width) / 2;
714         y = (is->height - height) / 2;
715         is->no_background = 0;
716         rect.x = is->xleft + x;
717         rect.y = is->ytop  + y;
718         rect.w = width;
719         rect.h = height;
720         SDL_DisplayYUVOverlay(vp->bmp, &rect);
721     }
722 }
723
724 /* get the current audio output buffer size, in samples. With SDL, we
725    cannot have a precise information */
726 static int audio_write_get_buf_size(VideoState *is)
727 {
728     return is->audio_buf_size - is->audio_buf_index;
729 }
730
731 static inline int compute_mod(int a, int b)
732 {
733     a = a % b;
734     if (a >= 0)
735         return a;
736     else
737         return a + b;
738 }
739
740 static void video_audio_display(VideoState *s)
741 {
742     int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
743     int ch, channels, h, h2, bgcolor, fgcolor;
744     int16_t time_diff;
745     int rdft_bits, nb_freq;
746
747     for(rdft_bits=1; (1<<rdft_bits)<2*s->height; rdft_bits++)
748         ;
749     nb_freq= 1<<(rdft_bits-1);
750
751     /* compute display index : center on currently output samples */
752     channels = s->audio_st->codec->channels;
753     nb_display_channels = channels;
754     if (!s->paused) {
755         int data_used= s->show_audio==1 ? s->width : (2*nb_freq);
756         n = 2 * channels;
757         delay = audio_write_get_buf_size(s);
758         delay /= n;
759
760         /* to be more precise, we take into account the time spent since
761            the last buffer computation */
762         if (audio_callback_time) {
763             time_diff = av_gettime() - audio_callback_time;
764             delay -= (time_diff * s->audio_st->codec->sample_rate) / 1000000;
765         }
766
767         delay += 2*data_used;
768         if (delay < data_used)
769             delay = data_used;
770
771         i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
772         if(s->show_audio==1){
773             h= INT_MIN;
774             for(i=0; i<1000; i+=channels){
775                 int idx= (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
776                 int a= s->sample_array[idx];
777                 int b= s->sample_array[(idx + 4*channels)%SAMPLE_ARRAY_SIZE];
778                 int c= s->sample_array[(idx + 5*channels)%SAMPLE_ARRAY_SIZE];
779                 int d= s->sample_array[(idx + 9*channels)%SAMPLE_ARRAY_SIZE];
780                 int score= a-d;
781                 if(h<score && (b^c)<0){
782                     h= score;
783                     i_start= idx;
784                 }
785             }
786         }
787
788         s->last_i_start = i_start;
789     } else {
790         i_start = s->last_i_start;
791     }
792
793     bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
794     if(s->show_audio==1){
795         fill_rectangle(screen,
796                        s->xleft, s->ytop, s->width, s->height,
797                        bgcolor);
798
799         fgcolor = SDL_MapRGB(screen->format, 0xff, 0xff, 0xff);
800
801         /* total height for one channel */
802         h = s->height / nb_display_channels;
803         /* graph height / 2 */
804         h2 = (h * 9) / 20;
805         for(ch = 0;ch < nb_display_channels; ch++) {
806             i = i_start + ch;
807             y1 = s->ytop + ch * h + (h / 2); /* position of center line */
808             for(x = 0; x < s->width; x++) {
809                 y = (s->sample_array[i] * h2) >> 15;
810                 if (y < 0) {
811                     y = -y;
812                     ys = y1 - y;
813                 } else {
814                     ys = y1;
815                 }
816                 fill_rectangle(screen,
817                                s->xleft + x, ys, 1, y,
818                                fgcolor);
819                 i += channels;
820                 if (i >= SAMPLE_ARRAY_SIZE)
821                     i -= SAMPLE_ARRAY_SIZE;
822             }
823         }
824
825         fgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0xff);
826
827         for(ch = 1;ch < nb_display_channels; ch++) {
828             y = s->ytop + ch * h;
829             fill_rectangle(screen,
830                            s->xleft, y, s->width, 1,
831                            fgcolor);
832         }
833         SDL_UpdateRect(screen, s->xleft, s->ytop, s->width, s->height);
834     }else{
835         nb_display_channels= FFMIN(nb_display_channels, 2);
836         if(rdft_bits != s->rdft_bits){
837             av_rdft_end(s->rdft);
838             av_free(s->rdft_data);
839             s->rdft = av_rdft_init(rdft_bits, DFT_R2C);
840             s->rdft_bits= rdft_bits;
841             s->rdft_data= av_malloc(4*nb_freq*sizeof(*s->rdft_data));
842         }
843         {
844             FFTSample *data[2];
845             for(ch = 0;ch < nb_display_channels; ch++) {
846                 data[ch] = s->rdft_data + 2*nb_freq*ch;
847                 i = i_start + ch;
848                 for(x = 0; x < 2*nb_freq; x++) {
849                     double w= (x-nb_freq)*(1.0/nb_freq);
850                     data[ch][x]= s->sample_array[i]*(1.0-w*w);
851                     i += channels;
852                     if (i >= SAMPLE_ARRAY_SIZE)
853                         i -= SAMPLE_ARRAY_SIZE;
854                 }
855                 av_rdft_calc(s->rdft, data[ch]);
856             }
857             //least efficient way to do this, we should of course directly access it but its more than fast enough
858             for(y=0; y<s->height; y++){
859                 double w= 1/sqrt(nb_freq);
860                 int a= sqrt(w*sqrt(data[0][2*y+0]*data[0][2*y+0] + data[0][2*y+1]*data[0][2*y+1]));
861                 int b= (nb_display_channels == 2 ) ? sqrt(w*sqrt(data[1][2*y+0]*data[1][2*y+0]
862                        + data[1][2*y+1]*data[1][2*y+1])) : a;
863                 a= FFMIN(a,255);
864                 b= FFMIN(b,255);
865                 fgcolor = SDL_MapRGB(screen->format, a, b, (a+b)/2);
866
867                 fill_rectangle(screen,
868                             s->xpos, s->height-y, 1, 1,
869                             fgcolor);
870             }
871         }
872         SDL_UpdateRect(screen, s->xpos, s->ytop, 1, s->height);
873         s->xpos++;
874         if(s->xpos >= s->width)
875             s->xpos= s->xleft;
876     }
877 }
878
879 static int video_open(VideoState *is){
880     int flags = SDL_HWSURFACE|SDL_ASYNCBLIT|SDL_HWACCEL;
881     int w,h;
882
883     if(is_full_screen) flags |= SDL_FULLSCREEN;
884     else               flags |= SDL_RESIZABLE;
885
886     if (is_full_screen && fs_screen_width) {
887         w = fs_screen_width;
888         h = fs_screen_height;
889     } else if(!is_full_screen && screen_width){
890         w = screen_width;
891         h = screen_height;
892 #if CONFIG_AVFILTER
893     }else if (is->out_video_filter && is->out_video_filter->inputs[0]){
894         w = is->out_video_filter->inputs[0]->w;
895         h = is->out_video_filter->inputs[0]->h;
896 #else
897     }else if (is->video_st && is->video_st->codec->width){
898         w = is->video_st->codec->width;
899         h = is->video_st->codec->height;
900 #endif
901     } else {
902         w = 640;
903         h = 480;
904     }
905     if(screen && is->width == screen->w && screen->w == w
906        && is->height== screen->h && screen->h == h)
907         return 0;
908
909 #if defined(__APPLE__) && !SDL_VERSION_ATLEAST(1, 2, 14)
910     /* setting bits_per_pixel = 0 or 32 causes blank video on OS X and older SDL */
911     screen = SDL_SetVideoMode(w, h, 24, flags);
912 #else
913     screen = SDL_SetVideoMode(w, h, 0, flags);
914 #endif
915     if (!screen) {
916         fprintf(stderr, "SDL: could not set video mode - exiting\n");
917         return -1;
918     }
919     if (!window_title)
920         window_title = input_filename;
921     SDL_WM_SetCaption(window_title, window_title);
922
923     is->width = screen->w;
924     is->height = screen->h;
925
926     return 0;
927 }
928
929 /* display the current picture, if any */
930 static void video_display(VideoState *is)
931 {
932     if(!screen)
933         video_open(cur_stream);
934     if (is->audio_st && is->show_audio)
935         video_audio_display(is);
936     else if (is->video_st)
937         video_image_display(is);
938 }
939
940 static int refresh_thread(void *opaque)
941 {
942     VideoState *is= opaque;
943     while(!is->abort_request){
944         SDL_Event event;
945         event.type = FF_REFRESH_EVENT;
946         event.user.data1 = opaque;
947         if(!is->refresh){
948             is->refresh=1;
949             SDL_PushEvent(&event);
950         }
951         usleep(is->audio_st && is->show_audio ? rdftspeed*1000 : 5000); //FIXME ideally we should wait the correct time but SDLs event passing is so slow it would be silly
952     }
953     return 0;
954 }
955
956 /* get the current audio clock value */
957 static double get_audio_clock(VideoState *is)
958 {
959     double pts;
960     int hw_buf_size, bytes_per_sec;
961     pts = is->audio_clock;
962     hw_buf_size = audio_write_get_buf_size(is);
963     bytes_per_sec = 0;
964     if (is->audio_st) {
965         bytes_per_sec = is->audio_st->codec->sample_rate *
966             2 * is->audio_st->codec->channels;
967     }
968     if (bytes_per_sec)
969         pts -= (double)hw_buf_size / bytes_per_sec;
970     return pts;
971 }
972
973 /* get the current video clock value */
974 static double get_video_clock(VideoState *is)
975 {
976     if (is->paused) {
977         return is->video_current_pts;
978     } else {
979         return is->video_current_pts_drift + av_gettime() / 1000000.0;
980     }
981 }
982
983 /* get the current external clock value */
984 static double get_external_clock(VideoState *is)
985 {
986     int64_t ti;
987     ti = av_gettime();
988     return is->external_clock + ((ti - is->external_clock_time) * 1e-6);
989 }
990
991 /* get the current master clock value */
992 static double get_master_clock(VideoState *is)
993 {
994     double val;
995
996     if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
997         if (is->video_st)
998             val = get_video_clock(is);
999         else
1000             val = get_audio_clock(is);
1001     } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
1002         if (is->audio_st)
1003             val = get_audio_clock(is);
1004         else
1005             val = get_video_clock(is);
1006     } else {
1007         val = get_external_clock(is);
1008     }
1009     return val;
1010 }
1011
1012 /* seek in the stream */
1013 static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int seek_by_bytes)
1014 {
1015     if (!is->seek_req) {
1016         is->seek_pos = pos;
1017         is->seek_rel = rel;
1018         is->seek_flags &= ~AVSEEK_FLAG_BYTE;
1019         if (seek_by_bytes)
1020             is->seek_flags |= AVSEEK_FLAG_BYTE;
1021         is->seek_req = 1;
1022     }
1023 }
1024
1025 /* pause or resume the video */
1026 static void stream_pause(VideoState *is)
1027 {
1028     if (is->paused) {
1029         is->frame_timer += av_gettime() / 1000000.0 + is->video_current_pts_drift - is->video_current_pts;
1030         if(is->read_pause_return != AVERROR(ENOSYS)){
1031             is->video_current_pts = is->video_current_pts_drift + av_gettime() / 1000000.0;
1032         }
1033         is->video_current_pts_drift = is->video_current_pts - av_gettime() / 1000000.0;
1034     }
1035     is->paused = !is->paused;
1036 }
1037
1038 static double compute_target_time(double frame_current_pts, VideoState *is)
1039 {
1040     double delay, sync_threshold, diff;
1041
1042     /* compute nominal delay */
1043     delay = frame_current_pts - is->frame_last_pts;
1044     if (delay <= 0 || delay >= 10.0) {
1045         /* if incorrect delay, use previous one */
1046         delay = is->frame_last_delay;
1047     } else {
1048         is->frame_last_delay = delay;
1049     }
1050     is->frame_last_pts = frame_current_pts;
1051
1052     /* update delay to follow master synchronisation source */
1053     if (((is->av_sync_type == AV_SYNC_AUDIO_MASTER && is->audio_st) ||
1054          is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1055         /* if video is slave, we try to correct big delays by
1056            duplicating or deleting a frame */
1057         diff = get_video_clock(is) - get_master_clock(is);
1058
1059         /* skip or repeat frame. We take into account the
1060            delay to compute the threshold. I still don't know
1061            if it is the best guess */
1062         sync_threshold = FFMAX(AV_SYNC_THRESHOLD, delay);
1063         if (fabs(diff) < AV_NOSYNC_THRESHOLD) {
1064             if (diff <= -sync_threshold)
1065                 delay = 0;
1066             else if (diff >= sync_threshold)
1067                 delay = 2 * delay;
1068         }
1069     }
1070     is->frame_timer += delay;
1071
1072     av_dlog(NULL, "video: delay=%0.3f pts=%0.3f A-V=%f\n",
1073             delay, frame_current_pts, -diff);
1074
1075     return is->frame_timer;
1076 }
1077
1078 /* called to display each frame */
1079 static void video_refresh_timer(void *opaque)
1080 {
1081     VideoState *is = opaque;
1082     VideoPicture *vp;
1083
1084     SubPicture *sp, *sp2;
1085
1086     if (is->video_st) {
1087 retry:
1088         if (is->pictq_size == 0) {
1089             //nothing to do, no picture to display in the que
1090         } else {
1091             double time= av_gettime()/1000000.0;
1092             double next_target;
1093             /* dequeue the picture */
1094             vp = &is->pictq[is->pictq_rindex];
1095
1096             if(time < vp->target_clock)
1097                 return;
1098             /* update current video pts */
1099             is->video_current_pts = vp->pts;
1100             is->video_current_pts_drift = is->video_current_pts - time;
1101             is->video_current_pos = vp->pos;
1102             if(is->pictq_size > 1){
1103                 VideoPicture *nextvp= &is->pictq[(is->pictq_rindex+1)%VIDEO_PICTURE_QUEUE_SIZE];
1104                 assert(nextvp->target_clock >= vp->target_clock);
1105                 next_target= nextvp->target_clock;
1106             }else{
1107                 next_target= vp->target_clock + is->video_clock - vp->pts; //FIXME pass durations cleanly
1108             }
1109             if(framedrop && time > next_target){
1110                 is->skip_frames *= 1.0 + FRAME_SKIP_FACTOR;
1111                 if(is->pictq_size > 1 || time > next_target + 0.5){
1112                     /* update queue size and signal for next picture */
1113                     if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1114                         is->pictq_rindex = 0;
1115
1116                     SDL_LockMutex(is->pictq_mutex);
1117                     is->pictq_size--;
1118                     SDL_CondSignal(is->pictq_cond);
1119                     SDL_UnlockMutex(is->pictq_mutex);
1120                     goto retry;
1121                 }
1122             }
1123
1124             if(is->subtitle_st) {
1125                 if (is->subtitle_stream_changed) {
1126                     SDL_LockMutex(is->subpq_mutex);
1127
1128                     while (is->subpq_size) {
1129                         free_subpicture(&is->subpq[is->subpq_rindex]);
1130
1131                         /* update queue size and signal for next picture */
1132                         if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1133                             is->subpq_rindex = 0;
1134
1135                         is->subpq_size--;
1136                     }
1137                     is->subtitle_stream_changed = 0;
1138
1139                     SDL_CondSignal(is->subpq_cond);
1140                     SDL_UnlockMutex(is->subpq_mutex);
1141                 } else {
1142                     if (is->subpq_size > 0) {
1143                         sp = &is->subpq[is->subpq_rindex];
1144
1145                         if (is->subpq_size > 1)
1146                             sp2 = &is->subpq[(is->subpq_rindex + 1) % SUBPICTURE_QUEUE_SIZE];
1147                         else
1148                             sp2 = NULL;
1149
1150                         if ((is->video_current_pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
1151                                 || (sp2 && is->video_current_pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
1152                         {
1153                             free_subpicture(sp);
1154
1155                             /* update queue size and signal for next picture */
1156                             if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1157                                 is->subpq_rindex = 0;
1158
1159                             SDL_LockMutex(is->subpq_mutex);
1160                             is->subpq_size--;
1161                             SDL_CondSignal(is->subpq_cond);
1162                             SDL_UnlockMutex(is->subpq_mutex);
1163                         }
1164                     }
1165                 }
1166             }
1167
1168             /* display picture */
1169             if (!display_disable)
1170                 video_display(is);
1171
1172             /* update queue size and signal for next picture */
1173             if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1174                 is->pictq_rindex = 0;
1175
1176             SDL_LockMutex(is->pictq_mutex);
1177             is->pictq_size--;
1178             SDL_CondSignal(is->pictq_cond);
1179             SDL_UnlockMutex(is->pictq_mutex);
1180         }
1181     } else if (is->audio_st) {
1182         /* draw the next audio frame */
1183
1184         /* if only audio stream, then display the audio bars (better
1185            than nothing, just to test the implementation */
1186
1187         /* display picture */
1188         if (!display_disable)
1189             video_display(is);
1190     }
1191     if (show_status) {
1192         static int64_t last_time;
1193         int64_t cur_time;
1194         int aqsize, vqsize, sqsize;
1195         double av_diff;
1196
1197         cur_time = av_gettime();
1198         if (!last_time || (cur_time - last_time) >= 30000) {
1199             aqsize = 0;
1200             vqsize = 0;
1201             sqsize = 0;
1202             if (is->audio_st)
1203                 aqsize = is->audioq.size;
1204             if (is->video_st)
1205                 vqsize = is->videoq.size;
1206             if (is->subtitle_st)
1207                 sqsize = is->subtitleq.size;
1208             av_diff = 0;
1209             if (is->audio_st && is->video_st)
1210                 av_diff = get_audio_clock(is) - get_video_clock(is);
1211             printf("%7.2f A-V:%7.3f s:%3.1f aq=%5dKB vq=%5dKB sq=%5dB f=%"PRId64"/%"PRId64"   \r",
1212                    get_master_clock(is), av_diff, FFMAX(is->skip_frames-1, 0), aqsize / 1024, vqsize / 1024, sqsize, is->pts_ctx.num_faulty_dts, is->pts_ctx.num_faulty_pts);
1213             fflush(stdout);
1214             last_time = cur_time;
1215         }
1216     }
1217 }
1218
1219 static void stream_close(VideoState *is)
1220 {
1221     VideoPicture *vp;
1222     int i;
1223     /* XXX: use a special url_shutdown call to abort parse cleanly */
1224     is->abort_request = 1;
1225     SDL_WaitThread(is->parse_tid, NULL);
1226     SDL_WaitThread(is->refresh_tid, NULL);
1227
1228     /* free all pictures */
1229     for(i=0;i<VIDEO_PICTURE_QUEUE_SIZE; i++) {
1230         vp = &is->pictq[i];
1231 #if CONFIG_AVFILTER
1232         if (vp->picref) {
1233             avfilter_unref_buffer(vp->picref);
1234             vp->picref = NULL;
1235         }
1236 #endif
1237         if (vp->bmp) {
1238             SDL_FreeYUVOverlay(vp->bmp);
1239             vp->bmp = NULL;
1240         }
1241     }
1242     SDL_DestroyMutex(is->pictq_mutex);
1243     SDL_DestroyCond(is->pictq_cond);
1244     SDL_DestroyMutex(is->subpq_mutex);
1245     SDL_DestroyCond(is->subpq_cond);
1246 #if !CONFIG_AVFILTER
1247     if (is->img_convert_ctx)
1248         sws_freeContext(is->img_convert_ctx);
1249 #endif
1250     av_free(is);
1251 }
1252
1253 static void do_exit(void)
1254 {
1255     if (cur_stream) {
1256         stream_close(cur_stream);
1257         cur_stream = NULL;
1258     }
1259     uninit_opts();
1260 #if CONFIG_AVFILTER
1261     avfilter_uninit();
1262 #endif
1263     avformat_network_deinit();
1264     if (show_status)
1265         printf("\n");
1266     SDL_Quit();
1267     av_log(NULL, AV_LOG_QUIET, "");
1268     exit(0);
1269 }
1270
1271 /* allocate a picture (needs to do that in main thread to avoid
1272    potential locking problems */
1273 static void alloc_picture(void *opaque)
1274 {
1275     VideoState *is = opaque;
1276     VideoPicture *vp;
1277
1278     vp = &is->pictq[is->pictq_windex];
1279
1280     if (vp->bmp)
1281         SDL_FreeYUVOverlay(vp->bmp);
1282
1283 #if CONFIG_AVFILTER
1284     if (vp->picref)
1285         avfilter_unref_buffer(vp->picref);
1286     vp->picref = NULL;
1287
1288     vp->width   = is->out_video_filter->inputs[0]->w;
1289     vp->height  = is->out_video_filter->inputs[0]->h;
1290     vp->pix_fmt = is->out_video_filter->inputs[0]->format;
1291 #else
1292     vp->width   = is->video_st->codec->width;
1293     vp->height  = is->video_st->codec->height;
1294     vp->pix_fmt = is->video_st->codec->pix_fmt;
1295 #endif
1296
1297     vp->bmp = SDL_CreateYUVOverlay(vp->width, vp->height,
1298                                    SDL_YV12_OVERLAY,
1299                                    screen);
1300     if (!vp->bmp || vp->bmp->pitches[0] < vp->width) {
1301         /* SDL allocates a buffer smaller than requested if the video
1302          * overlay hardware is unable to support the requested size. */
1303         fprintf(stderr, "Error: the video system does not support an image\n"
1304                         "size of %dx%d pixels. Try using -lowres or -vf \"scale=w:h\"\n"
1305                         "to reduce the image size.\n", vp->width, vp->height );
1306         do_exit();
1307     }
1308
1309     SDL_LockMutex(is->pictq_mutex);
1310     vp->allocated = 1;
1311     SDL_CondSignal(is->pictq_cond);
1312     SDL_UnlockMutex(is->pictq_mutex);
1313 }
1314
1315 /**
1316  *
1317  * @param pts the dts of the pkt / pts of the frame and guessed if not known
1318  */
1319 static int queue_picture(VideoState *is, AVFrame *src_frame, double pts, int64_t pos)
1320 {
1321     VideoPicture *vp;
1322 #if CONFIG_AVFILTER
1323     AVPicture pict_src;
1324 #else
1325     int dst_pix_fmt = PIX_FMT_YUV420P;
1326 #endif
1327     /* wait until we have space to put a new picture */
1328     SDL_LockMutex(is->pictq_mutex);
1329
1330     if(is->pictq_size>=VIDEO_PICTURE_QUEUE_SIZE && !is->refresh)
1331         is->skip_frames= FFMAX(1.0 - FRAME_SKIP_FACTOR, is->skip_frames * (1.0-FRAME_SKIP_FACTOR));
1332
1333     while (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE &&
1334            !is->videoq.abort_request) {
1335         SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1336     }
1337     SDL_UnlockMutex(is->pictq_mutex);
1338
1339     if (is->videoq.abort_request)
1340         return -1;
1341
1342     vp = &is->pictq[is->pictq_windex];
1343
1344     /* alloc or resize hardware picture buffer */
1345     if (!vp->bmp || vp->reallocate ||
1346 #if CONFIG_AVFILTER
1347         vp->width  != is->out_video_filter->inputs[0]->w ||
1348         vp->height != is->out_video_filter->inputs[0]->h) {
1349 #else
1350         vp->width != is->video_st->codec->width ||
1351         vp->height != is->video_st->codec->height) {
1352 #endif
1353         SDL_Event event;
1354
1355         vp->allocated  = 0;
1356         vp->reallocate = 0;
1357
1358         /* the allocation must be done in the main thread to avoid
1359            locking problems */
1360         event.type = FF_ALLOC_EVENT;
1361         event.user.data1 = is;
1362         SDL_PushEvent(&event);
1363
1364         /* wait until the picture is allocated */
1365         SDL_LockMutex(is->pictq_mutex);
1366         while (!vp->allocated && !is->videoq.abort_request) {
1367             SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1368         }
1369         SDL_UnlockMutex(is->pictq_mutex);
1370
1371         if (is->videoq.abort_request)
1372             return -1;
1373     }
1374
1375     /* if the frame is not skipped, then display it */
1376     if (vp->bmp) {
1377         AVPicture pict;
1378 #if CONFIG_AVFILTER
1379         if(vp->picref)
1380             avfilter_unref_buffer(vp->picref);
1381         vp->picref = src_frame->opaque;
1382 #endif
1383
1384         /* get a pointer on the bitmap */
1385         SDL_LockYUVOverlay (vp->bmp);
1386
1387         memset(&pict,0,sizeof(AVPicture));
1388         pict.data[0] = vp->bmp->pixels[0];
1389         pict.data[1] = vp->bmp->pixels[2];
1390         pict.data[2] = vp->bmp->pixels[1];
1391
1392         pict.linesize[0] = vp->bmp->pitches[0];
1393         pict.linesize[1] = vp->bmp->pitches[2];
1394         pict.linesize[2] = vp->bmp->pitches[1];
1395
1396 #if CONFIG_AVFILTER
1397         pict_src.data[0] = src_frame->data[0];
1398         pict_src.data[1] = src_frame->data[1];
1399         pict_src.data[2] = src_frame->data[2];
1400
1401         pict_src.linesize[0] = src_frame->linesize[0];
1402         pict_src.linesize[1] = src_frame->linesize[1];
1403         pict_src.linesize[2] = src_frame->linesize[2];
1404
1405         //FIXME use direct rendering
1406         av_picture_copy(&pict, &pict_src,
1407                         vp->pix_fmt, vp->width, vp->height);
1408 #else
1409         sws_flags = av_get_int(sws_opts, "sws_flags", NULL);
1410         is->img_convert_ctx = sws_getCachedContext(is->img_convert_ctx,
1411             vp->width, vp->height, vp->pix_fmt, vp->width, vp->height,
1412             dst_pix_fmt, sws_flags, NULL, NULL, NULL);
1413         if (is->img_convert_ctx == NULL) {
1414             fprintf(stderr, "Cannot initialize the conversion context\n");
1415             exit(1);
1416         }
1417         sws_scale(is->img_convert_ctx, src_frame->data, src_frame->linesize,
1418                   0, vp->height, pict.data, pict.linesize);
1419 #endif
1420         /* update the bitmap content */
1421         SDL_UnlockYUVOverlay(vp->bmp);
1422
1423         vp->pts = pts;
1424         vp->pos = pos;
1425
1426         /* now we can update the picture count */
1427         if (++is->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE)
1428             is->pictq_windex = 0;
1429         SDL_LockMutex(is->pictq_mutex);
1430         vp->target_clock= compute_target_time(vp->pts, is);
1431
1432         is->pictq_size++;
1433         SDL_UnlockMutex(is->pictq_mutex);
1434     }
1435     return 0;
1436 }
1437
1438 /**
1439  * compute the exact PTS for the picture if it is omitted in the stream
1440  * @param pts1 the dts of the pkt / pts of the frame
1441  */
1442 static int output_picture2(VideoState *is, AVFrame *src_frame, double pts1, int64_t pos)
1443 {
1444     double frame_delay, pts;
1445
1446     pts = pts1;
1447
1448     if (pts != 0) {
1449         /* update video clock with pts, if present */
1450         is->video_clock = pts;
1451     } else {
1452         pts = is->video_clock;
1453     }
1454     /* update video clock for next frame */
1455     frame_delay = av_q2d(is->video_st->codec->time_base);
1456     /* for MPEG2, the frame can be repeated, so we update the
1457        clock accordingly */
1458     frame_delay += src_frame->repeat_pict * (frame_delay * 0.5);
1459     is->video_clock += frame_delay;
1460
1461     return queue_picture(is, src_frame, pts, pos);
1462 }
1463
1464 static int get_video_frame(VideoState *is, AVFrame *frame, int64_t *pts, AVPacket *pkt)
1465 {
1466     int got_picture, i;
1467
1468     if (packet_queue_get(&is->videoq, pkt, 1) < 0)
1469         return -1;
1470
1471     if (pkt->data == flush_pkt.data) {
1472         avcodec_flush_buffers(is->video_st->codec);
1473
1474         SDL_LockMutex(is->pictq_mutex);
1475         //Make sure there are no long delay timers (ideally we should just flush the que but thats harder)
1476         for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++) {
1477             is->pictq[i].target_clock= 0;
1478         }
1479         while (is->pictq_size && !is->videoq.abort_request) {
1480             SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1481         }
1482         is->video_current_pos = -1;
1483         SDL_UnlockMutex(is->pictq_mutex);
1484
1485         init_pts_correction(&is->pts_ctx);
1486         is->frame_last_pts = AV_NOPTS_VALUE;
1487         is->frame_last_delay = 0;
1488         is->frame_timer = (double)av_gettime() / 1000000.0;
1489         is->skip_frames = 1;
1490         is->skip_frames_index = 0;
1491         return 0;
1492     }
1493
1494     avcodec_decode_video2(is->video_st->codec, frame, &got_picture, pkt);
1495
1496     if (got_picture) {
1497         if (decoder_reorder_pts == -1) {
1498             *pts = guess_correct_pts(&is->pts_ctx, frame->pkt_pts, frame->pkt_dts);
1499         } else if (decoder_reorder_pts) {
1500             *pts = frame->pkt_pts;
1501         } else {
1502             *pts = frame->pkt_dts;
1503         }
1504
1505         if (*pts == AV_NOPTS_VALUE) {
1506             *pts = 0;
1507         }
1508
1509         is->skip_frames_index += 1;
1510         if(is->skip_frames_index >= is->skip_frames){
1511             is->skip_frames_index -= FFMAX(is->skip_frames, 1.0);
1512             return 1;
1513         }
1514
1515     }
1516     return 0;
1517 }
1518
1519 #if CONFIG_AVFILTER
1520 typedef struct {
1521     VideoState *is;
1522     AVFrame *frame;
1523     int use_dr1;
1524 } FilterPriv;
1525
1526 static int input_get_buffer(AVCodecContext *codec, AVFrame *pic)
1527 {
1528     AVFilterContext *ctx = codec->opaque;
1529     AVFilterBufferRef  *ref;
1530     int perms = AV_PERM_WRITE;
1531     int i, w, h, stride[4];
1532     unsigned edge;
1533     int pixel_size;
1534
1535     if (codec->codec->capabilities & CODEC_CAP_NEG_LINESIZES)
1536         perms |= AV_PERM_NEG_LINESIZES;
1537
1538     if(pic->buffer_hints & FF_BUFFER_HINTS_VALID) {
1539         if(pic->buffer_hints & FF_BUFFER_HINTS_READABLE) perms |= AV_PERM_READ;
1540         if(pic->buffer_hints & FF_BUFFER_HINTS_PRESERVE) perms |= AV_PERM_PRESERVE;
1541         if(pic->buffer_hints & FF_BUFFER_HINTS_REUSABLE) perms |= AV_PERM_REUSE2;
1542     }
1543     if(pic->reference) perms |= AV_PERM_READ | AV_PERM_PRESERVE;
1544
1545     w = codec->width;
1546     h = codec->height;
1547     avcodec_align_dimensions2(codec, &w, &h, stride);
1548     edge = codec->flags & CODEC_FLAG_EMU_EDGE ? 0 : avcodec_get_edge_width();
1549     w += edge << 1;
1550     h += edge << 1;
1551
1552     if(!(ref = avfilter_get_video_buffer(ctx->outputs[0], perms, w, h)))
1553         return -1;
1554
1555     pixel_size = av_pix_fmt_descriptors[ref->format].comp[0].step_minus1+1;
1556     ref->video->w = codec->width;
1557     ref->video->h = codec->height;
1558     for(i = 0; i < 4; i ++) {
1559         unsigned hshift = (i == 1 || i == 2) ? av_pix_fmt_descriptors[ref->format].log2_chroma_w : 0;
1560         unsigned vshift = (i == 1 || i == 2) ? av_pix_fmt_descriptors[ref->format].log2_chroma_h : 0;
1561
1562         if (ref->data[i]) {
1563             ref->data[i]    += ((edge * pixel_size) >> hshift) + ((edge * ref->linesize[i]) >> vshift);
1564         }
1565         pic->data[i]     = ref->data[i];
1566         pic->linesize[i] = ref->linesize[i];
1567     }
1568     pic->opaque = ref;
1569     pic->age    = INT_MAX;
1570     pic->type   = FF_BUFFER_TYPE_USER;
1571     pic->reordered_opaque = codec->reordered_opaque;
1572     if(codec->pkt) pic->pkt_pts = codec->pkt->pts;
1573     else           pic->pkt_pts = AV_NOPTS_VALUE;
1574     return 0;
1575 }
1576
1577 static void input_release_buffer(AVCodecContext *codec, AVFrame *pic)
1578 {
1579     memset(pic->data, 0, sizeof(pic->data));
1580     avfilter_unref_buffer(pic->opaque);
1581 }
1582
1583 static int input_reget_buffer(AVCodecContext *codec, AVFrame *pic)
1584 {
1585     AVFilterBufferRef *ref = pic->opaque;
1586
1587     if (pic->data[0] == NULL) {
1588         pic->buffer_hints |= FF_BUFFER_HINTS_READABLE;
1589         return codec->get_buffer(codec, pic);
1590     }
1591
1592     if ((codec->width != ref->video->w) || (codec->height != ref->video->h) ||
1593         (codec->pix_fmt != ref->format)) {
1594         av_log(codec, AV_LOG_ERROR, "Picture properties changed.\n");
1595         return -1;
1596     }
1597
1598     pic->reordered_opaque = codec->reordered_opaque;
1599     if(codec->pkt) pic->pkt_pts = codec->pkt->pts;
1600     else           pic->pkt_pts = AV_NOPTS_VALUE;
1601     return 0;
1602 }
1603
1604 static int input_init(AVFilterContext *ctx, const char *args, void *opaque)
1605 {
1606     FilterPriv *priv = ctx->priv;
1607     AVCodecContext *codec;
1608     if(!opaque) return -1;
1609
1610     priv->is = opaque;
1611     codec    = priv->is->video_st->codec;
1612     codec->opaque = ctx;
1613     if(codec->codec->capabilities & CODEC_CAP_DR1) {
1614         priv->use_dr1 = 1;
1615         codec->get_buffer     = input_get_buffer;
1616         codec->release_buffer = input_release_buffer;
1617         codec->reget_buffer   = input_reget_buffer;
1618         codec->thread_safe_callbacks = 1;
1619     }
1620
1621     priv->frame = avcodec_alloc_frame();
1622
1623     return 0;
1624 }
1625
1626 static void input_uninit(AVFilterContext *ctx)
1627 {
1628     FilterPriv *priv = ctx->priv;
1629     av_free(priv->frame);
1630 }
1631
1632 static int input_request_frame(AVFilterLink *link)
1633 {
1634     FilterPriv *priv = link->src->priv;
1635     AVFilterBufferRef *picref;
1636     int64_t pts = 0;
1637     AVPacket pkt;
1638     int ret;
1639
1640     while (!(ret = get_video_frame(priv->is, priv->frame, &pts, &pkt)))
1641         av_free_packet(&pkt);
1642     if (ret < 0)
1643         return -1;
1644
1645     if(priv->use_dr1) {
1646         picref = avfilter_ref_buffer(priv->frame->opaque, ~0);
1647     } else {
1648         picref = avfilter_get_video_buffer(link, AV_PERM_WRITE, link->w, link->h);
1649         av_image_copy(picref->data, picref->linesize,
1650                       priv->frame->data, priv->frame->linesize,
1651                       picref->format, link->w, link->h);
1652     }
1653     av_free_packet(&pkt);
1654
1655     picref->pts = pts;
1656     picref->pos = pkt.pos;
1657     picref->video->pixel_aspect = priv->is->video_st->codec->sample_aspect_ratio;
1658     avfilter_start_frame(link, picref);
1659     avfilter_draw_slice(link, 0, link->h, 1);
1660     avfilter_end_frame(link);
1661
1662     return 0;
1663 }
1664
1665 static int input_query_formats(AVFilterContext *ctx)
1666 {
1667     FilterPriv *priv = ctx->priv;
1668     enum PixelFormat pix_fmts[] = {
1669         priv->is->video_st->codec->pix_fmt, PIX_FMT_NONE
1670     };
1671
1672     avfilter_set_common_formats(ctx, avfilter_make_format_list(pix_fmts));
1673     return 0;
1674 }
1675
1676 static int input_config_props(AVFilterLink *link)
1677 {
1678     FilterPriv *priv  = link->src->priv;
1679     AVCodecContext *c = priv->is->video_st->codec;
1680
1681     link->w = c->width;
1682     link->h = c->height;
1683     link->time_base = priv->is->video_st->time_base;
1684
1685     return 0;
1686 }
1687
1688 static AVFilter input_filter =
1689 {
1690     .name      = "avplay_input",
1691
1692     .priv_size = sizeof(FilterPriv),
1693
1694     .init      = input_init,
1695     .uninit    = input_uninit,
1696
1697     .query_formats = input_query_formats,
1698
1699     .inputs    = (AVFilterPad[]) {{ .name = NULL }},
1700     .outputs   = (AVFilterPad[]) {{ .name = "default",
1701                                     .type = AVMEDIA_TYPE_VIDEO,
1702                                     .request_frame = input_request_frame,
1703                                     .config_props  = input_config_props, },
1704                                   { .name = NULL }},
1705 };
1706
1707 static int configure_video_filters(AVFilterGraph *graph, VideoState *is, const char *vfilters)
1708 {
1709     char sws_flags_str[128];
1710     int ret;
1711     FFSinkContext ffsink_ctx = { .pix_fmt = PIX_FMT_YUV420P };
1712     AVFilterContext *filt_src = NULL, *filt_out = NULL;
1713     snprintf(sws_flags_str, sizeof(sws_flags_str), "flags=%d", sws_flags);
1714     graph->scale_sws_opts = av_strdup(sws_flags_str);
1715
1716     if ((ret = avfilter_graph_create_filter(&filt_src, &input_filter, "src",
1717                                             NULL, is, graph)) < 0)
1718         return ret;
1719     if ((ret = avfilter_graph_create_filter(&filt_out, &ffsink, "out",
1720                                             NULL, &ffsink_ctx, graph)) < 0)
1721         return ret;
1722
1723     if(vfilters) {
1724         AVFilterInOut *outputs = av_malloc(sizeof(AVFilterInOut));
1725         AVFilterInOut *inputs  = av_malloc(sizeof(AVFilterInOut));
1726
1727         outputs->name    = av_strdup("in");
1728         outputs->filter_ctx = filt_src;
1729         outputs->pad_idx = 0;
1730         outputs->next    = NULL;
1731
1732         inputs->name    = av_strdup("out");
1733         inputs->filter_ctx = filt_out;
1734         inputs->pad_idx = 0;
1735         inputs->next    = NULL;
1736
1737         if ((ret = avfilter_graph_parse(graph, vfilters, inputs, outputs, NULL)) < 0)
1738             return ret;
1739         av_freep(&vfilters);
1740     } else {
1741         if ((ret = avfilter_link(filt_src, 0, filt_out, 0)) < 0)
1742             return ret;
1743     }
1744
1745     if ((ret = avfilter_graph_config(graph, NULL)) < 0)
1746         return ret;
1747
1748     is->out_video_filter = filt_out;
1749
1750     return ret;
1751 }
1752
1753 #endif  /* CONFIG_AVFILTER */
1754
1755 static int video_thread(void *arg)
1756 {
1757     VideoState *is = arg;
1758     AVFrame *frame= avcodec_alloc_frame();
1759     int64_t pts_int;
1760     double pts;
1761     int ret;
1762
1763 #if CONFIG_AVFILTER
1764     AVFilterGraph *graph = avfilter_graph_alloc();
1765     AVFilterContext *filt_out = NULL;
1766     int64_t pos;
1767     int last_w = is->video_st->codec->width;
1768     int last_h = is->video_st->codec->height;
1769
1770     if ((ret = configure_video_filters(graph, is, vfilters)) < 0)
1771         goto the_end;
1772     filt_out = is->out_video_filter;
1773 #endif
1774
1775     for(;;) {
1776 #if !CONFIG_AVFILTER
1777         AVPacket pkt;
1778 #else
1779         AVFilterBufferRef *picref;
1780         AVRational tb;
1781 #endif
1782         while (is->paused && !is->videoq.abort_request)
1783             SDL_Delay(10);
1784 #if CONFIG_AVFILTER
1785         if (   last_w != is->video_st->codec->width
1786             || last_h != is->video_st->codec->height) {
1787             av_dlog(NULL, "Changing size %dx%d -> %dx%d\n", last_w, last_h,
1788                     is->video_st->codec->width, is->video_st->codec->height);
1789             avfilter_graph_free(&graph);
1790             graph = avfilter_graph_alloc();
1791             if ((ret = configure_video_filters(graph, is, vfilters)) < 0)
1792                 goto the_end;
1793             filt_out = is->out_video_filter;
1794             last_w = is->video_st->codec->width;
1795             last_h = is->video_st->codec->height;
1796         }
1797         ret = get_filtered_video_frame(filt_out, frame, &picref, &tb);
1798         if (picref) {
1799             pts_int = picref->pts;
1800             pos     = picref->pos;
1801             frame->opaque = picref;
1802         }
1803
1804         if (av_cmp_q(tb, is->video_st->time_base)) {
1805             av_unused int64_t pts1 = pts_int;
1806             pts_int = av_rescale_q(pts_int, tb, is->video_st->time_base);
1807             av_dlog(NULL, "video_thread(): "
1808                     "tb:%d/%d pts:%"PRId64" -> tb:%d/%d pts:%"PRId64"\n",
1809                     tb.num, tb.den, pts1,
1810                     is->video_st->time_base.num, is->video_st->time_base.den, pts_int);
1811         }
1812 #else
1813         ret = get_video_frame(is, frame, &pts_int, &pkt);
1814 #endif
1815
1816         if (ret < 0) goto the_end;
1817
1818         if (!ret)
1819             continue;
1820
1821         pts = pts_int*av_q2d(is->video_st->time_base);
1822
1823 #if CONFIG_AVFILTER
1824         ret = output_picture2(is, frame, pts, pos);
1825 #else
1826         ret = output_picture2(is, frame, pts,  pkt.pos);
1827         av_free_packet(&pkt);
1828 #endif
1829         if (ret < 0)
1830             goto the_end;
1831
1832         if (step)
1833             if (cur_stream)
1834                 stream_pause(cur_stream);
1835     }
1836  the_end:
1837 #if CONFIG_AVFILTER
1838     avfilter_graph_free(&graph);
1839 #endif
1840     av_free(frame);
1841     return 0;
1842 }
1843
1844 static int subtitle_thread(void *arg)
1845 {
1846     VideoState *is = arg;
1847     SubPicture *sp;
1848     AVPacket pkt1, *pkt = &pkt1;
1849     int got_subtitle;
1850     double pts;
1851     int i, j;
1852     int r, g, b, y, u, v, a;
1853
1854     for(;;) {
1855         while (is->paused && !is->subtitleq.abort_request) {
1856             SDL_Delay(10);
1857         }
1858         if (packet_queue_get(&is->subtitleq, pkt, 1) < 0)
1859             break;
1860
1861         if(pkt->data == flush_pkt.data){
1862             avcodec_flush_buffers(is->subtitle_st->codec);
1863             continue;
1864         }
1865         SDL_LockMutex(is->subpq_mutex);
1866         while (is->subpq_size >= SUBPICTURE_QUEUE_SIZE &&
1867                !is->subtitleq.abort_request) {
1868             SDL_CondWait(is->subpq_cond, is->subpq_mutex);
1869         }
1870         SDL_UnlockMutex(is->subpq_mutex);
1871
1872         if (is->subtitleq.abort_request)
1873             return 0;
1874
1875         sp = &is->subpq[is->subpq_windex];
1876
1877        /* NOTE: ipts is the PTS of the _first_ picture beginning in
1878            this packet, if any */
1879         pts = 0;
1880         if (pkt->pts != AV_NOPTS_VALUE)
1881             pts = av_q2d(is->subtitle_st->time_base)*pkt->pts;
1882
1883         avcodec_decode_subtitle2(is->subtitle_st->codec, &sp->sub,
1884                                  &got_subtitle, pkt);
1885
1886         if (got_subtitle && sp->sub.format == 0) {
1887             sp->pts = pts;
1888
1889             for (i = 0; i < sp->sub.num_rects; i++)
1890             {
1891                 for (j = 0; j < sp->sub.rects[i]->nb_colors; j++)
1892                 {
1893                     RGBA_IN(r, g, b, a, (uint32_t*)sp->sub.rects[i]->pict.data[1] + j);
1894                     y = RGB_TO_Y_CCIR(r, g, b);
1895                     u = RGB_TO_U_CCIR(r, g, b, 0);
1896                     v = RGB_TO_V_CCIR(r, g, b, 0);
1897                     YUVA_OUT((uint32_t*)sp->sub.rects[i]->pict.data[1] + j, y, u, v, a);
1898                 }
1899             }
1900
1901             /* now we can update the picture count */
1902             if (++is->subpq_windex == SUBPICTURE_QUEUE_SIZE)
1903                 is->subpq_windex = 0;
1904             SDL_LockMutex(is->subpq_mutex);
1905             is->subpq_size++;
1906             SDL_UnlockMutex(is->subpq_mutex);
1907         }
1908         av_free_packet(pkt);
1909     }
1910     return 0;
1911 }
1912
1913 /* copy samples for viewing in editor window */
1914 static void update_sample_display(VideoState *is, short *samples, int samples_size)
1915 {
1916     int size, len;
1917
1918     size = samples_size / sizeof(short);
1919     while (size > 0) {
1920         len = SAMPLE_ARRAY_SIZE - is->sample_array_index;
1921         if (len > size)
1922             len = size;
1923         memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
1924         samples += len;
1925         is->sample_array_index += len;
1926         if (is->sample_array_index >= SAMPLE_ARRAY_SIZE)
1927             is->sample_array_index = 0;
1928         size -= len;
1929     }
1930 }
1931
1932 /* return the new audio buffer size (samples can be added or deleted
1933    to get better sync if video or external master clock) */
1934 static int synchronize_audio(VideoState *is, short *samples,
1935                              int samples_size1, double pts)
1936 {
1937     int n, samples_size;
1938     double ref_clock;
1939
1940     n = 2 * is->audio_st->codec->channels;
1941     samples_size = samples_size1;
1942
1943     /* if not master, then we try to remove or add samples to correct the clock */
1944     if (((is->av_sync_type == AV_SYNC_VIDEO_MASTER && is->video_st) ||
1945          is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1946         double diff, avg_diff;
1947         int wanted_size, min_size, max_size, nb_samples;
1948
1949         ref_clock = get_master_clock(is);
1950         diff = get_audio_clock(is) - ref_clock;
1951
1952         if (diff < AV_NOSYNC_THRESHOLD) {
1953             is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
1954             if (is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) {
1955                 /* not enough measures to have a correct estimate */
1956                 is->audio_diff_avg_count++;
1957             } else {
1958                 /* estimate the A-V difference */
1959                 avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
1960
1961                 if (fabs(avg_diff) >= is->audio_diff_threshold) {
1962                     wanted_size = samples_size + ((int)(diff * is->audio_st->codec->sample_rate) * n);
1963                     nb_samples = samples_size / n;
1964
1965                     min_size = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1966                     max_size = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1967                     if (wanted_size < min_size)
1968                         wanted_size = min_size;
1969                     else if (wanted_size > max_size)
1970                         wanted_size = max_size;
1971
1972                     /* add or remove samples to correction the synchro */
1973                     if (wanted_size < samples_size) {
1974                         /* remove samples */
1975                         samples_size = wanted_size;
1976                     } else if (wanted_size > samples_size) {
1977                         uint8_t *samples_end, *q;
1978                         int nb;
1979
1980                         /* add samples */
1981                         nb = (samples_size - wanted_size);
1982                         samples_end = (uint8_t *)samples + samples_size - n;
1983                         q = samples_end + n;
1984                         while (nb > 0) {
1985                             memcpy(q, samples_end, n);
1986                             q += n;
1987                             nb -= n;
1988                         }
1989                         samples_size = wanted_size;
1990                     }
1991                 }
1992                 av_dlog(NULL, "diff=%f adiff=%f sample_diff=%d apts=%0.3f vpts=%0.3f %f\n",
1993                         diff, avg_diff, samples_size - samples_size1,
1994                         is->audio_clock, is->video_clock, is->audio_diff_threshold);
1995             }
1996         } else {
1997             /* too big difference : may be initial PTS errors, so
1998                reset A-V filter */
1999             is->audio_diff_avg_count = 0;
2000             is->audio_diff_cum = 0;
2001         }
2002     }
2003
2004     return samples_size;
2005 }
2006
2007 /* decode one audio frame and returns its uncompressed size */
2008 static int audio_decode_frame(VideoState *is, double *pts_ptr)
2009 {
2010     AVPacket *pkt_temp = &is->audio_pkt_temp;
2011     AVPacket *pkt = &is->audio_pkt;
2012     AVCodecContext *dec= is->audio_st->codec;
2013     int n, len1, data_size;
2014     double pts;
2015     int new_packet = 0;
2016     int flush_complete = 0;
2017
2018     for(;;) {
2019         /* NOTE: the audio packet can contain several frames */
2020         while (pkt_temp->size > 0 || (!pkt_temp->data && new_packet)) {
2021             if (flush_complete)
2022                 break;
2023             new_packet = 0;
2024             data_size = sizeof(is->audio_buf1);
2025             len1 = avcodec_decode_audio3(dec,
2026                                         (int16_t *)is->audio_buf1, &data_size,
2027                                         pkt_temp);
2028             if (len1 < 0) {
2029                 /* if error, we skip the frame */
2030                 pkt_temp->size = 0;
2031                 break;
2032             }
2033
2034             pkt_temp->data += len1;
2035             pkt_temp->size -= len1;
2036
2037             if (data_size <= 0) {
2038                 /* stop sending empty packets if the decoder is finished */
2039                 if (!pkt_temp->data && dec->codec->capabilities & CODEC_CAP_DELAY)
2040                     flush_complete = 1;
2041                 continue;
2042             }
2043
2044             if (dec->sample_fmt != is->audio_src_fmt) {
2045                 if (is->reformat_ctx)
2046                     av_audio_convert_free(is->reformat_ctx);
2047                 is->reformat_ctx= av_audio_convert_alloc(AV_SAMPLE_FMT_S16, 1,
2048                                                          dec->sample_fmt, 1, NULL, 0);
2049                 if (!is->reformat_ctx) {
2050                     fprintf(stderr, "Cannot convert %s sample format to %s sample format\n",
2051                         av_get_sample_fmt_name(dec->sample_fmt),
2052                         av_get_sample_fmt_name(AV_SAMPLE_FMT_S16));
2053                         break;
2054                 }
2055                 is->audio_src_fmt= dec->sample_fmt;
2056             }
2057
2058             if (is->reformat_ctx) {
2059                 const void *ibuf[6]= {is->audio_buf1};
2060                 void *obuf[6]= {is->audio_buf2};
2061                 int istride[6]= {av_get_bytes_per_sample(dec->sample_fmt)};
2062                 int ostride[6]= {2};
2063                 int len= data_size/istride[0];
2064                 if (av_audio_convert(is->reformat_ctx, obuf, ostride, ibuf, istride, len)<0) {
2065                     printf("av_audio_convert() failed\n");
2066                     break;
2067                 }
2068                 is->audio_buf= is->audio_buf2;
2069                 /* FIXME: existing code assume that data_size equals framesize*channels*2
2070                           remove this legacy cruft */
2071                 data_size= len*2;
2072             }else{
2073                 is->audio_buf= is->audio_buf1;
2074             }
2075
2076             /* if no pts, then compute it */
2077             pts = is->audio_clock;
2078             *pts_ptr = pts;
2079             n = 2 * dec->channels;
2080             is->audio_clock += (double)data_size /
2081                 (double)(n * dec->sample_rate);
2082 #ifdef DEBUG
2083             {
2084                 static double last_clock;
2085                 printf("audio: delay=%0.3f clock=%0.3f pts=%0.3f\n",
2086                        is->audio_clock - last_clock,
2087                        is->audio_clock, pts);
2088                 last_clock = is->audio_clock;
2089             }
2090 #endif
2091             return data_size;
2092         }
2093
2094         /* free the current packet */
2095         if (pkt->data)
2096             av_free_packet(pkt);
2097
2098         if (is->paused || is->audioq.abort_request) {
2099             return -1;
2100         }
2101
2102         /* read next packet */
2103         if ((new_packet = packet_queue_get(&is->audioq, pkt, 1)) < 0)
2104             return -1;
2105
2106         if (pkt->data == flush_pkt.data)
2107             avcodec_flush_buffers(dec);
2108
2109         pkt_temp->data = pkt->data;
2110         pkt_temp->size = pkt->size;
2111
2112         /* if update the audio clock with the pts */
2113         if (pkt->pts != AV_NOPTS_VALUE) {
2114             is->audio_clock = av_q2d(is->audio_st->time_base)*pkt->pts;
2115         }
2116     }
2117 }
2118
2119 /* prepare a new audio buffer */
2120 static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
2121 {
2122     VideoState *is = opaque;
2123     int audio_size, len1;
2124     double pts;
2125
2126     audio_callback_time = av_gettime();
2127
2128     while (len > 0) {
2129         if (is->audio_buf_index >= is->audio_buf_size) {
2130            audio_size = audio_decode_frame(is, &pts);
2131            if (audio_size < 0) {
2132                 /* if error, just output silence */
2133                is->audio_buf      = is->silence_buf;
2134                is->audio_buf_size = sizeof(is->silence_buf);
2135            } else {
2136                if (is->show_audio)
2137                    update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
2138                audio_size = synchronize_audio(is, (int16_t *)is->audio_buf, audio_size,
2139                                               pts);
2140                is->audio_buf_size = audio_size;
2141            }
2142            is->audio_buf_index = 0;
2143         }
2144         len1 = is->audio_buf_size - is->audio_buf_index;
2145         if (len1 > len)
2146             len1 = len;
2147         memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
2148         len -= len1;
2149         stream += len1;
2150         is->audio_buf_index += len1;
2151     }
2152 }
2153
2154 /* open a given stream. Return 0 if OK */
2155 static int stream_component_open(VideoState *is, int stream_index)
2156 {
2157     AVFormatContext *ic = is->ic;
2158     AVCodecContext *avctx;
2159     AVCodec *codec;
2160     SDL_AudioSpec wanted_spec, spec;
2161     AVDictionary *opts;
2162     AVDictionaryEntry *t = NULL;
2163
2164     if (stream_index < 0 || stream_index >= ic->nb_streams)
2165         return -1;
2166     avctx = ic->streams[stream_index]->codec;
2167
2168     opts = filter_codec_opts(codec_opts, avctx->codec_id, ic, ic->streams[stream_index]);
2169
2170     /* prepare audio output */
2171     if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
2172         if (avctx->channels > 0) {
2173             avctx->request_channels = FFMIN(2, avctx->channels);
2174         } else {
2175             avctx->request_channels = 2;
2176         }
2177     }
2178
2179     codec = avcodec_find_decoder(avctx->codec_id);
2180     avctx->debug_mv = debug_mv;
2181     avctx->debug = debug;
2182     avctx->workaround_bugs = workaround_bugs;
2183     avctx->lowres = lowres;
2184     if(lowres) avctx->flags |= CODEC_FLAG_EMU_EDGE;
2185     avctx->idct_algo= idct;
2186     if(fast) avctx->flags2 |= CODEC_FLAG2_FAST;
2187     avctx->skip_frame= skip_frame;
2188     avctx->skip_idct= skip_idct;
2189     avctx->skip_loop_filter= skip_loop_filter;
2190     avctx->error_recognition= error_recognition;
2191     avctx->error_concealment= error_concealment;
2192     avctx->thread_count= thread_count;
2193
2194     if (!codec ||
2195         avcodec_open2(avctx, codec, &opts) < 0)
2196         return -1;
2197     if ((t = av_dict_get(opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
2198         av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
2199         return AVERROR_OPTION_NOT_FOUND;
2200     }
2201
2202     /* prepare audio output */
2203     if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
2204         wanted_spec.freq = avctx->sample_rate;
2205         wanted_spec.format = AUDIO_S16SYS;
2206         wanted_spec.channels = avctx->channels;
2207         wanted_spec.silence = 0;
2208         wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
2209         wanted_spec.callback = sdl_audio_callback;
2210         wanted_spec.userdata = is;
2211         if (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
2212             fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError());
2213             return -1;
2214         }
2215         is->audio_hw_buf_size = spec.size;
2216         is->audio_src_fmt= AV_SAMPLE_FMT_S16;
2217     }
2218
2219     ic->streams[stream_index]->discard = AVDISCARD_DEFAULT;
2220     switch(avctx->codec_type) {
2221     case AVMEDIA_TYPE_AUDIO:
2222         is->audio_stream = stream_index;
2223         is->audio_st = ic->streams[stream_index];
2224         is->audio_buf_size = 0;
2225         is->audio_buf_index = 0;
2226
2227         /* init averaging filter */
2228         is->audio_diff_avg_coef = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
2229         is->audio_diff_avg_count = 0;
2230         /* since we do not have a precise anough audio fifo fullness,
2231            we correct audio sync only if larger than this threshold */
2232         is->audio_diff_threshold = 2.0 * SDL_AUDIO_BUFFER_SIZE / avctx->sample_rate;
2233
2234         memset(&is->audio_pkt, 0, sizeof(is->audio_pkt));
2235         packet_queue_init(&is->audioq);
2236         SDL_PauseAudio(0);
2237         break;
2238     case AVMEDIA_TYPE_VIDEO:
2239         is->video_stream = stream_index;
2240         is->video_st = ic->streams[stream_index];
2241
2242         packet_queue_init(&is->videoq);
2243         is->video_tid = SDL_CreateThread(video_thread, is);
2244         break;
2245     case AVMEDIA_TYPE_SUBTITLE:
2246         is->subtitle_stream = stream_index;
2247         is->subtitle_st = ic->streams[stream_index];
2248         packet_queue_init(&is->subtitleq);
2249
2250         is->subtitle_tid = SDL_CreateThread(subtitle_thread, is);
2251         break;
2252     default:
2253         break;
2254     }
2255     return 0;
2256 }
2257
2258 static void stream_component_close(VideoState *is, int stream_index)
2259 {
2260     AVFormatContext *ic = is->ic;
2261     AVCodecContext *avctx;
2262
2263     if (stream_index < 0 || stream_index >= ic->nb_streams)
2264         return;
2265     avctx = ic->streams[stream_index]->codec;
2266
2267     switch(avctx->codec_type) {
2268     case AVMEDIA_TYPE_AUDIO:
2269         packet_queue_abort(&is->audioq);
2270
2271         SDL_CloseAudio();
2272
2273         packet_queue_end(&is->audioq);
2274         av_free_packet(&is->audio_pkt);
2275         if (is->reformat_ctx)
2276             av_audio_convert_free(is->reformat_ctx);
2277         is->reformat_ctx = NULL;
2278
2279         if (is->rdft) {
2280             av_rdft_end(is->rdft);
2281             av_freep(&is->rdft_data);
2282             is->rdft = NULL;
2283             is->rdft_bits = 0;
2284         }
2285         break;
2286     case AVMEDIA_TYPE_VIDEO:
2287         packet_queue_abort(&is->videoq);
2288
2289         /* note: we also signal this mutex to make sure we deblock the
2290            video thread in all cases */
2291         SDL_LockMutex(is->pictq_mutex);
2292         SDL_CondSignal(is->pictq_cond);
2293         SDL_UnlockMutex(is->pictq_mutex);
2294
2295         SDL_WaitThread(is->video_tid, NULL);
2296
2297         packet_queue_end(&is->videoq);
2298         break;
2299     case AVMEDIA_TYPE_SUBTITLE:
2300         packet_queue_abort(&is->subtitleq);
2301
2302         /* note: we also signal this mutex to make sure we deblock the
2303            video thread in all cases */
2304         SDL_LockMutex(is->subpq_mutex);
2305         is->subtitle_stream_changed = 1;
2306
2307         SDL_CondSignal(is->subpq_cond);
2308         SDL_UnlockMutex(is->subpq_mutex);
2309
2310         SDL_WaitThread(is->subtitle_tid, NULL);
2311
2312         packet_queue_end(&is->subtitleq);
2313         break;
2314     default:
2315         break;
2316     }
2317
2318     ic->streams[stream_index]->discard = AVDISCARD_ALL;
2319     avcodec_close(avctx);
2320     switch(avctx->codec_type) {
2321     case AVMEDIA_TYPE_AUDIO:
2322         is->audio_st = NULL;
2323         is->audio_stream = -1;
2324         break;
2325     case AVMEDIA_TYPE_VIDEO:
2326         is->video_st = NULL;
2327         is->video_stream = -1;
2328         break;
2329     case AVMEDIA_TYPE_SUBTITLE:
2330         is->subtitle_st = NULL;
2331         is->subtitle_stream = -1;
2332         break;
2333     default:
2334         break;
2335     }
2336 }
2337
2338 /* since we have only one decoding thread, we can use a global
2339    variable instead of a thread local variable */
2340 static VideoState *global_video_state;
2341
2342 static int decode_interrupt_cb(void *ctx)
2343 {
2344     return (global_video_state && global_video_state->abort_request);
2345 }
2346
2347 /* this thread gets the stream from the disk or the network */
2348 static int decode_thread(void *arg)
2349 {
2350     VideoState *is = arg;
2351     AVFormatContext *ic = NULL;
2352     int err, i, ret;
2353     int st_index[AVMEDIA_TYPE_NB];
2354     AVPacket pkt1, *pkt = &pkt1;
2355     int eof=0;
2356     int pkt_in_play_range = 0;
2357     AVDictionaryEntry *t;
2358     AVDictionary **opts;
2359     int orig_nb_streams;
2360
2361     memset(st_index, -1, sizeof(st_index));
2362     is->video_stream = -1;
2363     is->audio_stream = -1;
2364     is->subtitle_stream = -1;
2365
2366     global_video_state = is;
2367
2368     ic = avformat_alloc_context();
2369     ic->interrupt_callback.callback = decode_interrupt_cb;
2370     err = avformat_open_input(&ic, is->filename, is->iformat, &format_opts);
2371     if (err < 0) {
2372         print_error(is->filename, err);
2373         ret = -1;
2374         goto fail;
2375     }
2376     if ((t = av_dict_get(format_opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
2377         av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
2378         ret = AVERROR_OPTION_NOT_FOUND;
2379         goto fail;
2380     }
2381     is->ic = ic;
2382
2383     if(genpts)
2384         ic->flags |= AVFMT_FLAG_GENPTS;
2385
2386     opts = setup_find_stream_info_opts(ic, codec_opts);
2387     orig_nb_streams = ic->nb_streams;
2388
2389     err = avformat_find_stream_info(ic, opts);
2390     if (err < 0) {
2391         fprintf(stderr, "%s: could not find codec parameters\n", is->filename);
2392         ret = -1;
2393         goto fail;
2394     }
2395     for (i = 0; i < orig_nb_streams; i++)
2396         av_dict_free(&opts[i]);
2397     av_freep(&opts);
2398
2399     if(ic->pb)
2400         ic->pb->eof_reached= 0; //FIXME hack, avplay maybe should not use url_feof() to test for the end
2401
2402     if(seek_by_bytes<0)
2403         seek_by_bytes= !!(ic->iformat->flags & AVFMT_TS_DISCONT);
2404
2405     /* if seeking requested, we execute it */
2406     if (start_time != AV_NOPTS_VALUE) {
2407         int64_t timestamp;
2408
2409         timestamp = start_time;
2410         /* add the stream start time */
2411         if (ic->start_time != AV_NOPTS_VALUE)
2412             timestamp += ic->start_time;
2413         ret = avformat_seek_file(ic, -1, INT64_MIN, timestamp, INT64_MAX, 0);
2414         if (ret < 0) {
2415             fprintf(stderr, "%s: could not seek to position %0.3f\n",
2416                     is->filename, (double)timestamp / AV_TIME_BASE);
2417         }
2418     }
2419
2420     for (i = 0; i < ic->nb_streams; i++)
2421         ic->streams[i]->discard = AVDISCARD_ALL;
2422     if (!video_disable)
2423         st_index[AVMEDIA_TYPE_VIDEO] =
2424             av_find_best_stream(ic, AVMEDIA_TYPE_VIDEO,
2425                                 wanted_stream[AVMEDIA_TYPE_VIDEO], -1, NULL, 0);
2426     if (!audio_disable)
2427         st_index[AVMEDIA_TYPE_AUDIO] =
2428             av_find_best_stream(ic, AVMEDIA_TYPE_AUDIO,
2429                                 wanted_stream[AVMEDIA_TYPE_AUDIO],
2430                                 st_index[AVMEDIA_TYPE_VIDEO],
2431                                 NULL, 0);
2432     if (!video_disable)
2433         st_index[AVMEDIA_TYPE_SUBTITLE] =
2434             av_find_best_stream(ic, AVMEDIA_TYPE_SUBTITLE,
2435                                 wanted_stream[AVMEDIA_TYPE_SUBTITLE],
2436                                 (st_index[AVMEDIA_TYPE_AUDIO] >= 0 ?
2437                                  st_index[AVMEDIA_TYPE_AUDIO] :
2438                                  st_index[AVMEDIA_TYPE_VIDEO]),
2439                                 NULL, 0);
2440     if (show_status) {
2441         av_dump_format(ic, 0, is->filename, 0);
2442     }
2443
2444     /* open the streams */
2445     if (st_index[AVMEDIA_TYPE_AUDIO] >= 0) {
2446         stream_component_open(is, st_index[AVMEDIA_TYPE_AUDIO]);
2447     }
2448
2449     ret=-1;
2450     if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
2451         ret= stream_component_open(is, st_index[AVMEDIA_TYPE_VIDEO]);
2452     }
2453     is->refresh_tid = SDL_CreateThread(refresh_thread, is);
2454     if(ret<0) {
2455         if (!display_disable)
2456             is->show_audio = 2;
2457     }
2458
2459     if (st_index[AVMEDIA_TYPE_SUBTITLE] >= 0) {
2460         stream_component_open(is, st_index[AVMEDIA_TYPE_SUBTITLE]);
2461     }
2462
2463     if (is->video_stream < 0 && is->audio_stream < 0) {
2464         fprintf(stderr, "%s: could not open codecs\n", is->filename);
2465         ret = -1;
2466         goto fail;
2467     }
2468
2469     for(;;) {
2470         if (is->abort_request)
2471             break;
2472         if (is->paused != is->last_paused) {
2473             is->last_paused = is->paused;
2474             if (is->paused)
2475                 is->read_pause_return= av_read_pause(ic);
2476             else
2477                 av_read_play(ic);
2478         }
2479 #if CONFIG_RTSP_DEMUXER
2480         if (is->paused && !strcmp(ic->iformat->name, "rtsp")) {
2481             /* wait 10 ms to avoid trying to get another packet */
2482             /* XXX: horrible */
2483             SDL_Delay(10);
2484             continue;
2485         }
2486 #endif
2487         if (is->seek_req) {
2488             int64_t seek_target= is->seek_pos;
2489             int64_t seek_min= is->seek_rel > 0 ? seek_target - is->seek_rel + 2: INT64_MIN;
2490             int64_t seek_max= is->seek_rel < 0 ? seek_target - is->seek_rel - 2: INT64_MAX;
2491 //FIXME the +-2 is due to rounding being not done in the correct direction in generation
2492 //      of the seek_pos/seek_rel variables
2493
2494             ret = avformat_seek_file(is->ic, -1, seek_min, seek_target, seek_max, is->seek_flags);
2495             if (ret < 0) {
2496                 fprintf(stderr, "%s: error while seeking\n", is->ic->filename);
2497             }else{
2498                 if (is->audio_stream >= 0) {
2499                     packet_queue_flush(&is->audioq);
2500                     packet_queue_put(&is->audioq, &flush_pkt);
2501                 }
2502                 if (is->subtitle_stream >= 0) {
2503                     packet_queue_flush(&is->subtitleq);
2504                     packet_queue_put(&is->subtitleq, &flush_pkt);
2505                 }
2506                 if (is->video_stream >= 0) {
2507                     packet_queue_flush(&is->videoq);
2508                     packet_queue_put(&is->videoq, &flush_pkt);
2509                 }
2510             }
2511             is->seek_req = 0;
2512             eof= 0;
2513         }
2514
2515         /* if the queue are full, no need to read more */
2516         if (   is->audioq.size + is->videoq.size + is->subtitleq.size > MAX_QUEUE_SIZE
2517             || (   (is->audioq   .size  > MIN_AUDIOQ_SIZE || is->audio_stream<0)
2518                 && (is->videoq   .nb_packets > MIN_FRAMES || is->video_stream<0)
2519                 && (is->subtitleq.nb_packets > MIN_FRAMES || is->subtitle_stream<0))) {
2520             /* wait 10 ms */
2521             SDL_Delay(10);
2522             continue;
2523         }
2524         if(eof) {
2525             if(is->video_stream >= 0){
2526                 av_init_packet(pkt);
2527                 pkt->data=NULL;
2528                 pkt->size=0;
2529                 pkt->stream_index= is->video_stream;
2530                 packet_queue_put(&is->videoq, pkt);
2531             }
2532             if (is->audio_stream >= 0 &&
2533                 is->audio_st->codec->codec->capabilities & CODEC_CAP_DELAY) {
2534                 av_init_packet(pkt);
2535                 pkt->data = NULL;
2536                 pkt->size = 0;
2537                 pkt->stream_index = is->audio_stream;
2538                 packet_queue_put(&is->audioq, pkt);
2539             }
2540             SDL_Delay(10);
2541             if(is->audioq.size + is->videoq.size + is->subtitleq.size ==0){
2542                 if(loop!=1 && (!loop || --loop)){
2543                     stream_seek(cur_stream, start_time != AV_NOPTS_VALUE ? start_time : 0, 0, 0);
2544                 }else if(autoexit){
2545                     ret=AVERROR_EOF;
2546                     goto fail;
2547                 }
2548             }
2549             continue;
2550         }
2551         ret = av_read_frame(ic, pkt);
2552         if (ret < 0) {
2553             if (ret == AVERROR_EOF || (ic->pb && ic->pb->eof_reached))
2554                 eof=1;
2555             if (ic->pb && ic->pb->error)
2556                 break;
2557             SDL_Delay(100); /* wait for user event */
2558             continue;
2559         }
2560         /* check if packet is in play range specified by user, then queue, otherwise discard */
2561         pkt_in_play_range = duration == AV_NOPTS_VALUE ||
2562                 (pkt->pts - ic->streams[pkt->stream_index]->start_time) *
2563                 av_q2d(ic->streams[pkt->stream_index]->time_base) -
2564                 (double)(start_time != AV_NOPTS_VALUE ? start_time : 0)/1000000
2565                 <= ((double)duration/1000000);
2566         if (pkt->stream_index == is->audio_stream && pkt_in_play_range) {
2567             packet_queue_put(&is->audioq, pkt);
2568         } else if (pkt->stream_index == is->video_stream && pkt_in_play_range) {
2569             packet_queue_put(&is->videoq, pkt);
2570         } else if (pkt->stream_index == is->subtitle_stream && pkt_in_play_range) {
2571             packet_queue_put(&is->subtitleq, pkt);
2572         } else {
2573             av_free_packet(pkt);
2574         }
2575     }
2576     /* wait until the end */
2577     while (!is->abort_request) {
2578         SDL_Delay(100);
2579     }
2580
2581     ret = 0;
2582  fail:
2583     /* disable interrupting */
2584     global_video_state = NULL;
2585
2586     /* close each stream */
2587     if (is->audio_stream >= 0)
2588         stream_component_close(is, is->audio_stream);
2589     if (is->video_stream >= 0)
2590         stream_component_close(is, is->video_stream);
2591     if (is->subtitle_stream >= 0)
2592         stream_component_close(is, is->subtitle_stream);
2593     if (is->ic) {
2594         av_close_input_file(is->ic);
2595         is->ic = NULL; /* safety */
2596     }
2597     avio_set_interrupt_cb(NULL);
2598
2599     if (ret != 0) {
2600         SDL_Event event;
2601
2602         event.type = FF_QUIT_EVENT;
2603         event.user.data1 = is;
2604         SDL_PushEvent(&event);
2605     }
2606     return 0;
2607 }
2608
2609 static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
2610 {
2611     VideoState *is;
2612
2613     is = av_mallocz(sizeof(VideoState));
2614     if (!is)
2615         return NULL;
2616     av_strlcpy(is->filename, filename, sizeof(is->filename));
2617     is->iformat = iformat;
2618     is->ytop = 0;
2619     is->xleft = 0;
2620
2621     /* start video display */
2622     is->pictq_mutex = SDL_CreateMutex();
2623     is->pictq_cond = SDL_CreateCond();
2624
2625     is->subpq_mutex = SDL_CreateMutex();
2626     is->subpq_cond = SDL_CreateCond();
2627
2628     is->av_sync_type = av_sync_type;
2629     is->parse_tid = SDL_CreateThread(decode_thread, is);
2630     if (!is->parse_tid) {
2631         av_free(is);
2632         return NULL;
2633     }
2634     return is;
2635 }
2636
2637 static void stream_cycle_channel(VideoState *is, int codec_type)
2638 {
2639     AVFormatContext *ic = is->ic;
2640     int start_index, stream_index;
2641     AVStream *st;
2642
2643     if (codec_type == AVMEDIA_TYPE_VIDEO)
2644         start_index = is->video_stream;
2645     else if (codec_type == AVMEDIA_TYPE_AUDIO)
2646         start_index = is->audio_stream;
2647     else
2648         start_index = is->subtitle_stream;
2649     if (start_index < (codec_type == AVMEDIA_TYPE_SUBTITLE ? -1 : 0))
2650         return;
2651     stream_index = start_index;
2652     for(;;) {
2653         if (++stream_index >= is->ic->nb_streams)
2654         {
2655             if (codec_type == AVMEDIA_TYPE_SUBTITLE)
2656             {
2657                 stream_index = -1;
2658                 goto the_end;
2659             } else
2660                 stream_index = 0;
2661         }
2662         if (stream_index == start_index)
2663             return;
2664         st = ic->streams[stream_index];
2665         if (st->codec->codec_type == codec_type) {
2666             /* check that parameters are OK */
2667             switch(codec_type) {
2668             case AVMEDIA_TYPE_AUDIO:
2669                 if (st->codec->sample_rate != 0 &&
2670                     st->codec->channels != 0)
2671                     goto the_end;
2672                 break;
2673             case AVMEDIA_TYPE_VIDEO:
2674             case AVMEDIA_TYPE_SUBTITLE:
2675                 goto the_end;
2676             default:
2677                 break;
2678             }
2679         }
2680     }
2681  the_end:
2682     stream_component_close(is, start_index);
2683     stream_component_open(is, stream_index);
2684 }
2685
2686
2687 static void toggle_full_screen(void)
2688 {
2689     is_full_screen = !is_full_screen;
2690 #if defined(__APPLE__) && SDL_VERSION_ATLEAST(1, 2, 14)
2691     /* OSX needs to empty the picture_queue */
2692     for (int i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++) {
2693         cur_stream->pictq[i].reallocate = 1;
2694     }
2695 #endif
2696     video_open(cur_stream);
2697 }
2698
2699 static void toggle_pause(void)
2700 {
2701     if (cur_stream)
2702         stream_pause(cur_stream);
2703     step = 0;
2704 }
2705
2706 static void step_to_next_frame(void)
2707 {
2708     if (cur_stream) {
2709         /* if the stream is paused unpause it, then step */
2710         if (cur_stream->paused)
2711             stream_pause(cur_stream);
2712     }
2713     step = 1;
2714 }
2715
2716 static void toggle_audio_display(void)
2717 {
2718     if (cur_stream) {
2719         int bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
2720         cur_stream->show_audio = (cur_stream->show_audio + 1) % 3;
2721         fill_rectangle(screen,
2722                     cur_stream->xleft, cur_stream->ytop, cur_stream->width, cur_stream->height,
2723                     bgcolor);
2724         SDL_UpdateRect(screen, cur_stream->xleft, cur_stream->ytop, cur_stream->width, cur_stream->height);
2725     }
2726 }
2727
2728 /* handle an event sent by the GUI */
2729 static void event_loop(void)
2730 {
2731     SDL_Event event;
2732     double incr, pos, frac;
2733
2734     for(;;) {
2735         double x;
2736         SDL_WaitEvent(&event);
2737         switch(event.type) {
2738         case SDL_KEYDOWN:
2739             if (exit_on_keydown) {
2740                 do_exit();
2741                 break;
2742             }
2743             switch(event.key.keysym.sym) {
2744             case SDLK_ESCAPE:
2745             case SDLK_q:
2746                 do_exit();
2747                 break;
2748             case SDLK_f:
2749                 toggle_full_screen();
2750                 break;
2751             case SDLK_p:
2752             case SDLK_SPACE:
2753                 toggle_pause();
2754                 break;
2755             case SDLK_s: //S: Step to next frame
2756                 step_to_next_frame();
2757                 break;
2758             case SDLK_a:
2759                 if (cur_stream)
2760                     stream_cycle_channel(cur_stream, AVMEDIA_TYPE_AUDIO);
2761                 break;
2762             case SDLK_v:
2763                 if (cur_stream)
2764                     stream_cycle_channel(cur_stream, AVMEDIA_TYPE_VIDEO);
2765                 break;
2766             case SDLK_t:
2767                 if (cur_stream)
2768                     stream_cycle_channel(cur_stream, AVMEDIA_TYPE_SUBTITLE);
2769                 break;
2770             case SDLK_w:
2771                 toggle_audio_display();
2772                 break;
2773             case SDLK_LEFT:
2774                 incr = -10.0;
2775                 goto do_seek;
2776             case SDLK_RIGHT:
2777                 incr = 10.0;
2778                 goto do_seek;
2779             case SDLK_UP:
2780                 incr = 60.0;
2781                 goto do_seek;
2782             case SDLK_DOWN:
2783                 incr = -60.0;
2784             do_seek:
2785                 if (cur_stream) {
2786                     if (seek_by_bytes) {
2787                         if (cur_stream->video_stream >= 0 && cur_stream->video_current_pos>=0){
2788                             pos= cur_stream->video_current_pos;
2789                         }else if(cur_stream->audio_stream >= 0 && cur_stream->audio_pkt.pos>=0){
2790                             pos= cur_stream->audio_pkt.pos;
2791                         }else
2792                             pos = avio_tell(cur_stream->ic->pb);
2793                         if (cur_stream->ic->bit_rate)
2794                             incr *= cur_stream->ic->bit_rate / 8.0;
2795                         else
2796                             incr *= 180000.0;
2797                         pos += incr;
2798                         stream_seek(cur_stream, pos, incr, 1);
2799                     } else {
2800                         pos = get_master_clock(cur_stream);
2801                         pos += incr;
2802                         stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), (int64_t)(incr * AV_TIME_BASE), 0);
2803                     }
2804                 }
2805                 break;
2806             default:
2807                 break;
2808             }
2809             break;
2810         case SDL_MOUSEBUTTONDOWN:
2811             if (exit_on_mousedown) {
2812                 do_exit();
2813                 break;
2814             }
2815         case SDL_MOUSEMOTION:
2816             if(event.type ==SDL_MOUSEBUTTONDOWN){
2817                 x= event.button.x;
2818             }else{
2819                 if(event.motion.state != SDL_PRESSED)
2820                     break;
2821                 x= event.motion.x;
2822             }
2823             if (cur_stream) {
2824                 if(seek_by_bytes || cur_stream->ic->duration<=0){
2825                     uint64_t size=  avio_size(cur_stream->ic->pb);
2826                     stream_seek(cur_stream, size*x/cur_stream->width, 0, 1);
2827                 }else{
2828                     int64_t ts;
2829                     int ns, hh, mm, ss;
2830                     int tns, thh, tmm, tss;
2831                     tns = cur_stream->ic->duration/1000000LL;
2832                     thh = tns/3600;
2833                     tmm = (tns%3600)/60;
2834                     tss = (tns%60);
2835                     frac = x/cur_stream->width;
2836                     ns = frac*tns;
2837                     hh = ns/3600;
2838                     mm = (ns%3600)/60;
2839                     ss = (ns%60);
2840                     fprintf(stderr, "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d)       \n", frac*100,
2841                             hh, mm, ss, thh, tmm, tss);
2842                     ts = frac*cur_stream->ic->duration;
2843                     if (cur_stream->ic->start_time != AV_NOPTS_VALUE)
2844                         ts += cur_stream->ic->start_time;
2845                     stream_seek(cur_stream, ts, 0, 0);
2846                 }
2847             }
2848             break;
2849         case SDL_VIDEORESIZE:
2850             if (cur_stream) {
2851                 screen = SDL_SetVideoMode(event.resize.w, event.resize.h, 0,
2852                                           SDL_HWSURFACE|SDL_RESIZABLE|SDL_ASYNCBLIT|SDL_HWACCEL);
2853                 screen_width = cur_stream->width = event.resize.w;
2854                 screen_height= cur_stream->height= event.resize.h;
2855             }
2856             break;
2857         case SDL_QUIT:
2858         case FF_QUIT_EVENT:
2859             do_exit();
2860             break;
2861         case FF_ALLOC_EVENT:
2862             video_open(event.user.data1);
2863             alloc_picture(event.user.data1);
2864             break;
2865         case FF_REFRESH_EVENT:
2866             video_refresh_timer(event.user.data1);
2867             cur_stream->refresh=0;
2868             break;
2869         default:
2870             break;
2871         }
2872     }
2873 }
2874
2875 static int opt_frame_size(const char *opt, const char *arg)
2876 {
2877     av_log(NULL, AV_LOG_ERROR,
2878            "Option '%s' has been removed, use private format options instead\n", opt);
2879     return AVERROR(EINVAL);
2880 }
2881
2882 static int opt_width(const char *opt, const char *arg)
2883 {
2884     screen_width = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2885     return 0;
2886 }
2887
2888 static int opt_height(const char *opt, const char *arg)
2889 {
2890     screen_height = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2891     return 0;
2892 }
2893
2894 static int opt_format(const char *opt, const char *arg)
2895 {
2896     file_iformat = av_find_input_format(arg);
2897     if (!file_iformat) {
2898         fprintf(stderr, "Unknown input format: %s\n", arg);
2899         return AVERROR(EINVAL);
2900     }
2901     return 0;
2902 }
2903
2904 static int opt_frame_pix_fmt(const char *opt, const char *arg)
2905 {
2906     av_log(NULL, AV_LOG_ERROR,
2907            "Option '%s' has been removed, use private format options instead\n", opt);
2908     return AVERROR(EINVAL);
2909 }
2910
2911 static int opt_sync(const char *opt, const char *arg)
2912 {
2913     if (!strcmp(arg, "audio"))
2914         av_sync_type = AV_SYNC_AUDIO_MASTER;
2915     else if (!strcmp(arg, "video"))
2916         av_sync_type = AV_SYNC_VIDEO_MASTER;
2917     else if (!strcmp(arg, "ext"))
2918         av_sync_type = AV_SYNC_EXTERNAL_CLOCK;
2919     else {
2920         fprintf(stderr, "Unknown value for %s: %s\n", opt, arg);
2921         exit(1);
2922     }
2923     return 0;
2924 }
2925
2926 static int opt_seek(const char *opt, const char *arg)
2927 {
2928     start_time = parse_time_or_die(opt, arg, 1);
2929     return 0;
2930 }
2931
2932 static int opt_duration(const char *opt, const char *arg)
2933 {
2934     duration = parse_time_or_die(opt, arg, 1);
2935     return 0;
2936 }
2937
2938 static int opt_debug(const char *opt, const char *arg)
2939 {
2940     av_log_set_level(99);
2941     debug = parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
2942     return 0;
2943 }
2944
2945 static int opt_vismv(const char *opt, const char *arg)
2946 {
2947     debug_mv = parse_number_or_die(opt, arg, OPT_INT64, INT_MIN, INT_MAX);
2948     return 0;
2949 }
2950
2951 static int opt_thread_count(const char *opt, const char *arg)
2952 {
2953     thread_count= parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
2954 #if !HAVE_THREADS
2955     fprintf(stderr, "Warning: not compiled with thread support, using thread emulation\n");
2956 #endif
2957     return 0;
2958 }
2959
2960 static const OptionDef options[] = {
2961 #include "cmdutils_common_opts.h"
2962     { "x", HAS_ARG, {(void*)opt_width}, "force displayed width", "width" },
2963     { "y", HAS_ARG, {(void*)opt_height}, "force displayed height", "height" },
2964     { "s", HAS_ARG | OPT_VIDEO, {(void*)opt_frame_size}, "set frame size (WxH or abbreviation)", "size" },
2965     { "fs", OPT_BOOL, {(void*)&is_full_screen}, "force full screen" },
2966     { "an", OPT_BOOL, {(void*)&audio_disable}, "disable audio" },
2967     { "vn", OPT_BOOL, {(void*)&video_disable}, "disable video" },
2968     { "ast", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_AUDIO]}, "select desired audio stream", "stream_number" },
2969     { "vst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_VIDEO]}, "select desired video stream", "stream_number" },
2970     { "sst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_SUBTITLE]}, "select desired subtitle stream", "stream_number" },
2971     { "ss", HAS_ARG, {(void*)&opt_seek}, "seek to a given position in seconds", "pos" },
2972     { "t", HAS_ARG, {(void*)&opt_duration}, "play  \"duration\" seconds of audio/video", "duration" },
2973     { "bytes", OPT_INT | HAS_ARG, {(void*)&seek_by_bytes}, "seek by bytes 0=off 1=on -1=auto", "val" },
2974     { "nodisp", OPT_BOOL, {(void*)&display_disable}, "disable graphical display" },
2975     { "f", HAS_ARG, {(void*)opt_format}, "force format", "fmt" },
2976     { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)opt_frame_pix_fmt}, "set pixel format", "format" },
2977     { "stats", OPT_BOOL | OPT_EXPERT, {(void*)&show_status}, "show status", "" },
2978     { "debug", HAS_ARG | OPT_EXPERT, {(void*)opt_debug}, "print specific debug info", "" },
2979     { "bug", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&workaround_bugs}, "workaround bugs", "" },
2980     { "vismv", HAS_ARG | OPT_EXPERT, {(void*)opt_vismv}, "visualize motion vectors", "" },
2981     { "fast", OPT_BOOL | OPT_EXPERT, {(void*)&fast}, "non spec compliant optimizations", "" },
2982     { "genpts", OPT_BOOL | OPT_EXPERT, {(void*)&genpts}, "generate pts", "" },
2983     { "drp", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&decoder_reorder_pts}, "let decoder reorder pts 0=off 1=on -1=auto", ""},
2984     { "lowres", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&lowres}, "", "" },
2985     { "skiploop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_loop_filter}, "", "" },
2986     { "skipframe", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_frame}, "", "" },
2987     { "skipidct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_idct}, "", "" },
2988     { "idct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&idct}, "set idct algo",  "algo" },
2989     { "er", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_recognition}, "set error detection threshold (0-4)",  "threshold" },
2990     { "ec", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_concealment}, "set error concealment options",  "bit_mask" },
2991     { "sync", HAS_ARG | OPT_EXPERT, {(void*)opt_sync}, "set audio-video sync. type (type=audio/video/ext)", "type" },
2992     { "threads", HAS_ARG | OPT_EXPERT, {(void*)opt_thread_count}, "thread count", "count" },
2993     { "autoexit", OPT_BOOL | OPT_EXPERT, {(void*)&autoexit}, "exit at the end", "" },
2994     { "exitonkeydown", OPT_BOOL | OPT_EXPERT, {(void*)&exit_on_keydown}, "exit on key down", "" },
2995     { "exitonmousedown", OPT_BOOL | OPT_EXPERT, {(void*)&exit_on_mousedown}, "exit on mouse down", "" },
2996     { "loop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&loop}, "set number of times the playback shall be looped", "loop count" },
2997     { "framedrop", OPT_BOOL | OPT_EXPERT, {(void*)&framedrop}, "drop frames when cpu is too slow", "" },
2998     { "window_title", OPT_STRING | HAS_ARG, {(void*)&window_title}, "set window title", "window title" },
2999 #if CONFIG_AVFILTER
3000     { "vf", OPT_STRING | HAS_ARG, {(void*)&vfilters}, "video filters", "filter list" },
3001 #endif
3002     { "rdftspeed", OPT_INT | HAS_ARG| OPT_AUDIO | OPT_EXPERT, {(void*)&rdftspeed}, "rdft speed", "msecs" },
3003     { "default", HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, {(void*)opt_default}, "generic catch all option", "" },
3004     { "i", 0, {NULL}, "avconv compatibility dummy option", ""},
3005     { NULL, },
3006 };
3007
3008 static void show_usage(void)
3009 {
3010     printf("Simple media player\n");
3011     printf("usage: %s [options] input_file\n", program_name);
3012     printf("\n");
3013 }
3014
3015 static void show_help(void)
3016 {
3017     av_log_set_callback(log_callback_help);
3018     show_usage();
3019     show_help_options(options, "Main options:\n",
3020                       OPT_EXPERT, 0);
3021     show_help_options(options, "\nAdvanced options:\n",
3022                       OPT_EXPERT, OPT_EXPERT);
3023     printf("\n");
3024     show_help_children(avcodec_get_class(), AV_OPT_FLAG_DECODING_PARAM);
3025     show_help_children(avformat_get_class(), AV_OPT_FLAG_DECODING_PARAM);
3026 #if !CONFIG_AVFILTER
3027     show_help_children(sws_get_class(), AV_OPT_FLAG_ENCODING_PARAM);
3028 #endif
3029     printf("\nWhile playing:\n"
3030            "q, ESC              quit\n"
3031            "f                   toggle full screen\n"
3032            "p, SPC              pause\n"
3033            "a                   cycle audio channel\n"
3034            "v                   cycle video channel\n"
3035            "t                   cycle subtitle channel\n"
3036            "w                   show audio waves\n"
3037            "s                   activate frame-step mode\n"
3038            "left/right          seek backward/forward 10 seconds\n"
3039            "down/up             seek backward/forward 1 minute\n"
3040            "mouse click         seek to percentage in file corresponding to fraction of width\n"
3041            );
3042 }
3043
3044 static void opt_input_file(void *optctx, const char *filename)
3045 {
3046     if (input_filename) {
3047         fprintf(stderr, "Argument '%s' provided as input filename, but '%s' was already specified.\n",
3048                 filename, input_filename);
3049         exit(1);
3050     }
3051     if (!strcmp(filename, "-"))
3052         filename = "pipe:";
3053     input_filename = filename;
3054 }
3055
3056 /* Called from the main */
3057 int main(int argc, char **argv)
3058 {
3059     int flags;
3060
3061     av_log_set_flags(AV_LOG_SKIP_REPEATED);
3062     parse_loglevel(argc, argv, options);
3063
3064     /* register all codecs, demux and protocols */
3065     avcodec_register_all();
3066 #if CONFIG_AVDEVICE
3067     avdevice_register_all();
3068 #endif
3069 #if CONFIG_AVFILTER
3070     avfilter_register_all();
3071 #endif
3072     av_register_all();
3073     avformat_network_init();
3074
3075     init_opts();
3076
3077     show_banner();
3078
3079     parse_options(NULL, argc, argv, options, opt_input_file);
3080
3081     if (!input_filename) {
3082         show_usage();
3083         fprintf(stderr, "An input file must be specified\n");
3084         fprintf(stderr, "Use -h to get full help or, even better, run 'man %s'\n", program_name);
3085         exit(1);
3086     }
3087
3088     if (display_disable) {
3089         video_disable = 1;
3090     }
3091     flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
3092 #if !defined(__MINGW32__) && !defined(__APPLE__)
3093     flags |= SDL_INIT_EVENTTHREAD; /* Not supported on Windows or Mac OS X */
3094 #endif
3095     if (SDL_Init (flags)) {
3096         fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
3097         exit(1);
3098     }
3099
3100     if (!display_disable) {
3101 #if HAVE_SDL_VIDEO_SIZE
3102         const SDL_VideoInfo *vi = SDL_GetVideoInfo();
3103         fs_screen_width = vi->current_w;
3104         fs_screen_height = vi->current_h;
3105 #endif
3106     }
3107
3108     SDL_EventState(SDL_ACTIVEEVENT, SDL_IGNORE);
3109     SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
3110     SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
3111
3112     av_init_packet(&flush_pkt);
3113     flush_pkt.data= "FLUSH";
3114
3115     cur_stream = stream_open(input_filename, file_iformat);
3116
3117     event_loop();
3118
3119     /* never returns */
3120
3121     return 0;
3122 }