De-doxygenize some top-level files
[platform/upstream/libav.git] / avplay.c
1 /*
2  * avplay : Simple Media Player based on the Libav libraries
3  * Copyright (c) 2003 Fabrice Bellard
4  *
5  * This file is part of Libav.
6  *
7  * Libav is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * Libav is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with Libav; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21
22 #include "config.h"
23 #include <inttypes.h>
24 #include <math.h>
25 #include <limits.h>
26 #include "libavutil/avstring.h"
27 #include "libavutil/colorspace.h"
28 #include "libavutil/mathematics.h"
29 #include "libavutil/pixdesc.h"
30 #include "libavutil/imgutils.h"
31 #include "libavutil/dict.h"
32 #include "libavutil/parseutils.h"
33 #include "libavutil/samplefmt.h"
34 #include "libavutil/time.h"
35 #include "libavformat/avformat.h"
36 #include "libavdevice/avdevice.h"
37 #include "libswscale/swscale.h"
38 #include "libavresample/avresample.h"
39 #include "libavutil/opt.h"
40 #include "libavcodec/avfft.h"
41
42 #if CONFIG_AVFILTER
43 # include "libavfilter/avfilter.h"
44 # include "libavfilter/avfiltergraph.h"
45 # include "libavfilter/buffersink.h"
46 # include "libavfilter/buffersrc.h"
47 #endif
48
49 #include "cmdutils.h"
50
51 #include <SDL.h>
52 #include <SDL_thread.h>
53
54 #ifdef __MINGW32__
55 #undef main /* We don't want SDL to override our main() */
56 #endif
57
58 #include <assert.h>
59
60 const char program_name[] = "avplay";
61 const int program_birth_year = 2003;
62
63 #define MAX_QUEUE_SIZE (15 * 1024 * 1024)
64 #define MIN_AUDIOQ_SIZE (20 * 16 * 1024)
65 #define MIN_FRAMES 5
66
67 /* SDL audio buffer size, in samples. Should be small to have precise
68    A/V sync as SDL does not have hardware buffer fullness info. */
69 #define SDL_AUDIO_BUFFER_SIZE 1024
70
71 /* no AV sync correction is done if below the AV sync threshold */
72 #define AV_SYNC_THRESHOLD 0.01
73 /* no AV correction is done if too big error */
74 #define AV_NOSYNC_THRESHOLD 10.0
75
76 #define FRAME_SKIP_FACTOR 0.05
77
78 /* maximum audio speed change to get correct sync */
79 #define SAMPLE_CORRECTION_PERCENT_MAX 10
80
81 /* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
82 #define AUDIO_DIFF_AVG_NB   20
83
84 /* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
85 #define SAMPLE_ARRAY_SIZE (2 * 65536)
86
87 static int sws_flags = SWS_BICUBIC;
88
89 typedef struct PacketQueue {
90     AVPacketList *first_pkt, *last_pkt;
91     int nb_packets;
92     int size;
93     int abort_request;
94     SDL_mutex *mutex;
95     SDL_cond *cond;
96 } PacketQueue;
97
98 #define VIDEO_PICTURE_QUEUE_SIZE 2
99 #define SUBPICTURE_QUEUE_SIZE 4
100
101 typedef struct VideoPicture {
102     double pts;             // presentation timestamp for this picture
103     double target_clock;    // av_gettime() time at which this should be displayed ideally
104     int64_t pos;            // byte position in file
105     SDL_Overlay *bmp;
106     int width, height; /* source height & width */
107     int allocated;
108     int reallocate;
109     enum AVPixelFormat pix_fmt;
110
111 #if CONFIG_AVFILTER
112     AVFilterBufferRef *picref;
113 #endif
114 } VideoPicture;
115
116 typedef struct SubPicture {
117     double pts; /* presentation time stamp for this picture */
118     AVSubtitle sub;
119 } SubPicture;
120
121 enum {
122     AV_SYNC_AUDIO_MASTER, /* default choice */
123     AV_SYNC_VIDEO_MASTER,
124     AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
125 };
126
127 typedef struct VideoState {
128     SDL_Thread *parse_tid;
129     SDL_Thread *video_tid;
130     SDL_Thread *refresh_tid;
131     AVInputFormat *iformat;
132     int no_background;
133     int abort_request;
134     int paused;
135     int last_paused;
136     int seek_req;
137     int seek_flags;
138     int64_t seek_pos;
139     int64_t seek_rel;
140     int read_pause_return;
141     AVFormatContext *ic;
142
143     int audio_stream;
144
145     int av_sync_type;
146     double external_clock; /* external clock base */
147     int64_t external_clock_time;
148
149     double audio_clock;
150     double audio_diff_cum; /* used for AV difference average computation */
151     double audio_diff_avg_coef;
152     double audio_diff_threshold;
153     int audio_diff_avg_count;
154     AVStream *audio_st;
155     PacketQueue audioq;
156     int audio_hw_buf_size;
157     uint8_t silence_buf[SDL_AUDIO_BUFFER_SIZE];
158     uint8_t *audio_buf;
159     uint8_t *audio_buf1;
160     unsigned int audio_buf_size; /* in bytes */
161     int audio_buf_index; /* in bytes */
162     AVPacket audio_pkt_temp;
163     AVPacket audio_pkt;
164     enum AVSampleFormat sdl_sample_fmt;
165     uint64_t sdl_channel_layout;
166     int sdl_channels;
167     enum AVSampleFormat resample_sample_fmt;
168     uint64_t resample_channel_layout;
169     AVAudioResampleContext *avr;
170     AVFrame *frame;
171
172     int show_audio; /* if true, display audio samples */
173     int16_t sample_array[SAMPLE_ARRAY_SIZE];
174     int sample_array_index;
175     int last_i_start;
176     RDFTContext *rdft;
177     int rdft_bits;
178     FFTSample *rdft_data;
179     int xpos;
180
181     SDL_Thread *subtitle_tid;
182     int subtitle_stream;
183     int subtitle_stream_changed;
184     AVStream *subtitle_st;
185     PacketQueue subtitleq;
186     SubPicture subpq[SUBPICTURE_QUEUE_SIZE];
187     int subpq_size, subpq_rindex, subpq_windex;
188     SDL_mutex *subpq_mutex;
189     SDL_cond *subpq_cond;
190
191     double frame_timer;
192     double frame_last_pts;
193     double frame_last_delay;
194     double video_clock;             // pts of last decoded frame / predicted pts of next decoded frame
195     int video_stream;
196     AVStream *video_st;
197     PacketQueue videoq;
198     double video_current_pts;       // current displayed pts (different from video_clock if frame fifos are used)
199     double video_current_pts_drift; // video_current_pts - time (av_gettime) at which we updated video_current_pts - used to have running video pts
200     int64_t video_current_pos;      // current displayed file pos
201     VideoPicture pictq[VIDEO_PICTURE_QUEUE_SIZE];
202     int pictq_size, pictq_rindex, pictq_windex;
203     SDL_mutex *pictq_mutex;
204     SDL_cond *pictq_cond;
205 #if !CONFIG_AVFILTER
206     struct SwsContext *img_convert_ctx;
207 #endif
208
209     //    QETimer *video_timer;
210     char filename[1024];
211     int width, height, xleft, ytop;
212
213     PtsCorrectionContext pts_ctx;
214
215 #if CONFIG_AVFILTER
216     AVFilterContext *in_video_filter;   // the first filter in the video chain
217     AVFilterContext *out_video_filter;  // the last filter in the video chain
218     int use_dr1;
219     FrameBuffer *buffer_pool;
220 #endif
221
222     float skip_frames;
223     float skip_frames_index;
224     int refresh;
225 } VideoState;
226
227 /* options specified by the user */
228 static AVInputFormat *file_iformat;
229 static const char *input_filename;
230 static const char *window_title;
231 static int fs_screen_width;
232 static int fs_screen_height;
233 static int screen_width  = 0;
234 static int screen_height = 0;
235 static int audio_disable;
236 static int video_disable;
237 static int wanted_stream[AVMEDIA_TYPE_NB] = {
238     [AVMEDIA_TYPE_AUDIO]    = -1,
239     [AVMEDIA_TYPE_VIDEO]    = -1,
240     [AVMEDIA_TYPE_SUBTITLE] = -1,
241 };
242 static int seek_by_bytes = -1;
243 static int display_disable;
244 static int show_status = 1;
245 static int av_sync_type = AV_SYNC_AUDIO_MASTER;
246 static int64_t start_time = AV_NOPTS_VALUE;
247 static int64_t duration = AV_NOPTS_VALUE;
248 static int debug = 0;
249 static int debug_mv = 0;
250 static int step = 0;
251 static int workaround_bugs = 1;
252 static int fast = 0;
253 static int genpts = 0;
254 static int idct = FF_IDCT_AUTO;
255 static enum AVDiscard skip_frame       = AVDISCARD_DEFAULT;
256 static enum AVDiscard skip_idct        = AVDISCARD_DEFAULT;
257 static enum AVDiscard skip_loop_filter = AVDISCARD_DEFAULT;
258 static int error_concealment = 3;
259 static int decoder_reorder_pts = -1;
260 static int autoexit;
261 static int exit_on_keydown;
262 static int exit_on_mousedown;
263 static int loop = 1;
264 static int framedrop = 1;
265 static int infinite_buffer = 0;
266
267 static int rdftspeed = 20;
268 #if CONFIG_AVFILTER
269 static char *vfilters = NULL;
270 #endif
271
272 /* current context */
273 static int is_full_screen;
274 static VideoState *cur_stream;
275 static int64_t audio_callback_time;
276
277 static AVPacket flush_pkt;
278
279 #define FF_ALLOC_EVENT   (SDL_USEREVENT)
280 #define FF_REFRESH_EVENT (SDL_USEREVENT + 1)
281 #define FF_QUIT_EVENT    (SDL_USEREVENT + 2)
282
283 static SDL_Surface *screen;
284
285 static int packet_queue_put(PacketQueue *q, AVPacket *pkt);
286
287 /* packet queue handling */
288 static void packet_queue_init(PacketQueue *q)
289 {
290     memset(q, 0, sizeof(PacketQueue));
291     q->mutex = SDL_CreateMutex();
292     q->cond = SDL_CreateCond();
293     packet_queue_put(q, &flush_pkt);
294 }
295
296 static void packet_queue_flush(PacketQueue *q)
297 {
298     AVPacketList *pkt, *pkt1;
299
300     SDL_LockMutex(q->mutex);
301     for (pkt = q->first_pkt; pkt != NULL; pkt = pkt1) {
302         pkt1 = pkt->next;
303         av_free_packet(&pkt->pkt);
304         av_freep(&pkt);
305     }
306     q->last_pkt = NULL;
307     q->first_pkt = NULL;
308     q->nb_packets = 0;
309     q->size = 0;
310     SDL_UnlockMutex(q->mutex);
311 }
312
313 static void packet_queue_end(PacketQueue *q)
314 {
315     packet_queue_flush(q);
316     SDL_DestroyMutex(q->mutex);
317     SDL_DestroyCond(q->cond);
318 }
319
320 static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
321 {
322     AVPacketList *pkt1;
323
324     /* duplicate the packet */
325     if (pkt != &flush_pkt && av_dup_packet(pkt) < 0)
326         return -1;
327
328     pkt1 = av_malloc(sizeof(AVPacketList));
329     if (!pkt1)
330         return -1;
331     pkt1->pkt = *pkt;
332     pkt1->next = NULL;
333
334
335     SDL_LockMutex(q->mutex);
336
337     if (!q->last_pkt)
338
339         q->first_pkt = pkt1;
340     else
341         q->last_pkt->next = pkt1;
342     q->last_pkt = pkt1;
343     q->nb_packets++;
344     q->size += pkt1->pkt.size + sizeof(*pkt1);
345     /* XXX: should duplicate packet data in DV case */
346     SDL_CondSignal(q->cond);
347
348     SDL_UnlockMutex(q->mutex);
349     return 0;
350 }
351
352 static void packet_queue_abort(PacketQueue *q)
353 {
354     SDL_LockMutex(q->mutex);
355
356     q->abort_request = 1;
357
358     SDL_CondSignal(q->cond);
359
360     SDL_UnlockMutex(q->mutex);
361 }
362
363 /* return < 0 if aborted, 0 if no packet and > 0 if packet.  */
364 static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block)
365 {
366     AVPacketList *pkt1;
367     int ret;
368
369     SDL_LockMutex(q->mutex);
370
371     for (;;) {
372         if (q->abort_request) {
373             ret = -1;
374             break;
375         }
376
377         pkt1 = q->first_pkt;
378         if (pkt1) {
379             q->first_pkt = pkt1->next;
380             if (!q->first_pkt)
381                 q->last_pkt = NULL;
382             q->nb_packets--;
383             q->size -= pkt1->pkt.size + sizeof(*pkt1);
384             *pkt = pkt1->pkt;
385             av_free(pkt1);
386             ret = 1;
387             break;
388         } else if (!block) {
389             ret = 0;
390             break;
391         } else {
392             SDL_CondWait(q->cond, q->mutex);
393         }
394     }
395     SDL_UnlockMutex(q->mutex);
396     return ret;
397 }
398
399 static inline void fill_rectangle(SDL_Surface *screen,
400                                   int x, int y, int w, int h, int color)
401 {
402     SDL_Rect rect;
403     rect.x = x;
404     rect.y = y;
405     rect.w = w;
406     rect.h = h;
407     SDL_FillRect(screen, &rect, color);
408 }
409
410 #define ALPHA_BLEND(a, oldp, newp, s)\
411 ((((oldp << s) * (255 - (a))) + (newp * (a))) / (255 << s))
412
413 #define RGBA_IN(r, g, b, a, s)\
414 {\
415     unsigned int v = ((const uint32_t *)(s))[0];\
416     a = (v >> 24) & 0xff;\
417     r = (v >> 16) & 0xff;\
418     g = (v >> 8) & 0xff;\
419     b = v & 0xff;\
420 }
421
422 #define YUVA_IN(y, u, v, a, s, pal)\
423 {\
424     unsigned int val = ((const uint32_t *)(pal))[*(const uint8_t*)(s)];\
425     a = (val >> 24) & 0xff;\
426     y = (val >> 16) & 0xff;\
427     u = (val >> 8) & 0xff;\
428     v = val & 0xff;\
429 }
430
431 #define YUVA_OUT(d, y, u, v, a)\
432 {\
433     ((uint32_t *)(d))[0] = (a << 24) | (y << 16) | (u << 8) | v;\
434 }
435
436
437 #define BPP 1
438
439 static void blend_subrect(AVPicture *dst, const AVSubtitleRect *rect, int imgw, int imgh)
440 {
441     int wrap, wrap3, width2, skip2;
442     int y, u, v, a, u1, v1, a1, w, h;
443     uint8_t *lum, *cb, *cr;
444     const uint8_t *p;
445     const uint32_t *pal;
446     int dstx, dsty, dstw, dsth;
447
448     dstw = av_clip(rect->w, 0, imgw);
449     dsth = av_clip(rect->h, 0, imgh);
450     dstx = av_clip(rect->x, 0, imgw - dstw);
451     dsty = av_clip(rect->y, 0, imgh - dsth);
452     lum = dst->data[0] + dsty * dst->linesize[0];
453     cb  = dst->data[1] + (dsty >> 1) * dst->linesize[1];
454     cr  = dst->data[2] + (dsty >> 1) * dst->linesize[2];
455
456     width2 = ((dstw + 1) >> 1) + (dstx & ~dstw & 1);
457     skip2 = dstx >> 1;
458     wrap = dst->linesize[0];
459     wrap3 = rect->pict.linesize[0];
460     p = rect->pict.data[0];
461     pal = (const uint32_t *)rect->pict.data[1];  /* Now in YCrCb! */
462
463     if (dsty & 1) {
464         lum += dstx;
465         cb += skip2;
466         cr += skip2;
467
468         if (dstx & 1) {
469             YUVA_IN(y, u, v, a, p, pal);
470             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
471             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
472             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
473             cb++;
474             cr++;
475             lum++;
476             p += BPP;
477         }
478         for (w = dstw - (dstx & 1); w >= 2; w -= 2) {
479             YUVA_IN(y, u, v, a, p, pal);
480             u1 = u;
481             v1 = v;
482             a1 = a;
483             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
484
485             YUVA_IN(y, u, v, a, p + BPP, pal);
486             u1 += u;
487             v1 += v;
488             a1 += a;
489             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
490             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
491             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
492             cb++;
493             cr++;
494             p += 2 * BPP;
495             lum += 2;
496         }
497         if (w) {
498             YUVA_IN(y, u, v, a, p, pal);
499             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
500             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
501             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
502             p++;
503             lum++;
504         }
505         p += wrap3 - dstw * BPP;
506         lum += wrap - dstw - dstx;
507         cb += dst->linesize[1] - width2 - skip2;
508         cr += dst->linesize[2] - width2 - skip2;
509     }
510     for (h = dsth - (dsty & 1); h >= 2; h -= 2) {
511         lum += dstx;
512         cb += skip2;
513         cr += skip2;
514
515         if (dstx & 1) {
516             YUVA_IN(y, u, v, a, p, pal);
517             u1 = u;
518             v1 = v;
519             a1 = a;
520             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
521             p += wrap3;
522             lum += wrap;
523             YUVA_IN(y, u, v, a, p, pal);
524             u1 += u;
525             v1 += v;
526             a1 += a;
527             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
528             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
529             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
530             cb++;
531             cr++;
532             p += -wrap3 + BPP;
533             lum += -wrap + 1;
534         }
535         for (w = dstw - (dstx & 1); w >= 2; w -= 2) {
536             YUVA_IN(y, u, v, a, p, pal);
537             u1 = u;
538             v1 = v;
539             a1 = a;
540             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
541
542             YUVA_IN(y, u, v, a, p + BPP, pal);
543             u1 += u;
544             v1 += v;
545             a1 += a;
546             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
547             p += wrap3;
548             lum += wrap;
549
550             YUVA_IN(y, u, v, a, p, pal);
551             u1 += u;
552             v1 += v;
553             a1 += a;
554             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
555
556             YUVA_IN(y, u, v, a, p + BPP, pal);
557             u1 += u;
558             v1 += v;
559             a1 += a;
560             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
561
562             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 2);
563             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 2);
564
565             cb++;
566             cr++;
567             p += -wrap3 + 2 * BPP;
568             lum += -wrap + 2;
569         }
570         if (w) {
571             YUVA_IN(y, u, v, a, p, pal);
572             u1 = u;
573             v1 = v;
574             a1 = a;
575             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
576             p += wrap3;
577             lum += wrap;
578             YUVA_IN(y, u, v, a, p, pal);
579             u1 += u;
580             v1 += v;
581             a1 += a;
582             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
583             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
584             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
585             cb++;
586             cr++;
587             p += -wrap3 + BPP;
588             lum += -wrap + 1;
589         }
590         p += wrap3 + (wrap3 - dstw * BPP);
591         lum += wrap + (wrap - dstw - dstx);
592         cb += dst->linesize[1] - width2 - skip2;
593         cr += dst->linesize[2] - width2 - skip2;
594     }
595     /* handle odd height */
596     if (h) {
597         lum += dstx;
598         cb += skip2;
599         cr += skip2;
600
601         if (dstx & 1) {
602             YUVA_IN(y, u, v, a, p, pal);
603             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
604             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
605             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
606             cb++;
607             cr++;
608             lum++;
609             p += BPP;
610         }
611         for (w = dstw - (dstx & 1); w >= 2; w -= 2) {
612             YUVA_IN(y, u, v, a, p, pal);
613             u1 = u;
614             v1 = v;
615             a1 = a;
616             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
617
618             YUVA_IN(y, u, v, a, p + BPP, pal);
619             u1 += u;
620             v1 += v;
621             a1 += a;
622             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
623             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u, 1);
624             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v, 1);
625             cb++;
626             cr++;
627             p += 2 * BPP;
628             lum += 2;
629         }
630         if (w) {
631             YUVA_IN(y, u, v, a, p, pal);
632             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
633             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
634             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
635         }
636     }
637 }
638
639 static void free_subpicture(SubPicture *sp)
640 {
641     avsubtitle_free(&sp->sub);
642 }
643
644 static void video_image_display(VideoState *is)
645 {
646     VideoPicture *vp;
647     SubPicture *sp;
648     AVPicture pict;
649     float aspect_ratio;
650     int width, height, x, y;
651     SDL_Rect rect;
652     int i;
653
654     vp = &is->pictq[is->pictq_rindex];
655     if (vp->bmp) {
656 #if CONFIG_AVFILTER
657          if (vp->picref->video->pixel_aspect.num == 0)
658              aspect_ratio = 0;
659          else
660              aspect_ratio = av_q2d(vp->picref->video->pixel_aspect);
661 #else
662
663         /* XXX: use variable in the frame */
664         if (is->video_st->sample_aspect_ratio.num)
665             aspect_ratio = av_q2d(is->video_st->sample_aspect_ratio);
666         else if (is->video_st->codec->sample_aspect_ratio.num)
667             aspect_ratio = av_q2d(is->video_st->codec->sample_aspect_ratio);
668         else
669             aspect_ratio = 0;
670 #endif
671         if (aspect_ratio <= 0.0)
672             aspect_ratio = 1.0;
673         aspect_ratio *= (float)vp->width / (float)vp->height;
674
675         if (is->subtitle_st)
676         {
677             if (is->subpq_size > 0)
678             {
679                 sp = &is->subpq[is->subpq_rindex];
680
681                 if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000))
682                 {
683                     SDL_LockYUVOverlay (vp->bmp);
684
685                     pict.data[0] = vp->bmp->pixels[0];
686                     pict.data[1] = vp->bmp->pixels[2];
687                     pict.data[2] = vp->bmp->pixels[1];
688
689                     pict.linesize[0] = vp->bmp->pitches[0];
690                     pict.linesize[1] = vp->bmp->pitches[2];
691                     pict.linesize[2] = vp->bmp->pitches[1];
692
693                     for (i = 0; i < sp->sub.num_rects; i++)
694                         blend_subrect(&pict, sp->sub.rects[i],
695                                       vp->bmp->w, vp->bmp->h);
696
697                     SDL_UnlockYUVOverlay (vp->bmp);
698                 }
699             }
700         }
701
702
703         /* XXX: we suppose the screen has a 1.0 pixel ratio */
704         height = is->height;
705         width = ((int)rint(height * aspect_ratio)) & ~1;
706         if (width > is->width) {
707             width = is->width;
708             height = ((int)rint(width / aspect_ratio)) & ~1;
709         }
710         x = (is->width - width) / 2;
711         y = (is->height - height) / 2;
712         is->no_background = 0;
713         rect.x = is->xleft + x;
714         rect.y = is->ytop  + y;
715         rect.w = width;
716         rect.h = height;
717         SDL_DisplayYUVOverlay(vp->bmp, &rect);
718     }
719 }
720
721 /* get the current audio output buffer size, in samples. With SDL, we
722    cannot have a precise information */
723 static int audio_write_get_buf_size(VideoState *is)
724 {
725     return is->audio_buf_size - is->audio_buf_index;
726 }
727
728 static inline int compute_mod(int a, int b)
729 {
730     a = a % b;
731     if (a >= 0)
732         return a;
733     else
734         return a + b;
735 }
736
737 static void video_audio_display(VideoState *s)
738 {
739     int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
740     int ch, channels, h, h2, bgcolor, fgcolor;
741     int16_t time_diff;
742     int rdft_bits, nb_freq;
743
744     for (rdft_bits = 1; (1 << rdft_bits) < 2 * s->height; rdft_bits++)
745         ;
746     nb_freq = 1 << (rdft_bits - 1);
747
748     /* compute display index : center on currently output samples */
749     channels = s->sdl_channels;
750     nb_display_channels = channels;
751     if (!s->paused) {
752         int data_used = s->show_audio == 1 ? s->width : (2 * nb_freq);
753         n = 2 * channels;
754         delay = audio_write_get_buf_size(s);
755         delay /= n;
756
757         /* to be more precise, we take into account the time spent since
758            the last buffer computation */
759         if (audio_callback_time) {
760             time_diff = av_gettime() - audio_callback_time;
761             delay -= (time_diff * s->audio_st->codec->sample_rate) / 1000000;
762         }
763
764         delay += 2 * data_used;
765         if (delay < data_used)
766             delay = data_used;
767
768         i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
769         if (s->show_audio == 1) {
770             h = INT_MIN;
771             for (i = 0; i < 1000; i += channels) {
772                 int idx = (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
773                 int a = s->sample_array[idx];
774                 int b = s->sample_array[(idx + 4 * channels) % SAMPLE_ARRAY_SIZE];
775                 int c = s->sample_array[(idx + 5 * channels) % SAMPLE_ARRAY_SIZE];
776                 int d = s->sample_array[(idx + 9 * channels) % SAMPLE_ARRAY_SIZE];
777                 int score = a - d;
778                 if (h < score && (b ^ c) < 0) {
779                     h = score;
780                     i_start = idx;
781                 }
782             }
783         }
784
785         s->last_i_start = i_start;
786     } else {
787         i_start = s->last_i_start;
788     }
789
790     bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
791     if (s->show_audio == 1) {
792         fill_rectangle(screen,
793                        s->xleft, s->ytop, s->width, s->height,
794                        bgcolor);
795
796         fgcolor = SDL_MapRGB(screen->format, 0xff, 0xff, 0xff);
797
798         /* total height for one channel */
799         h = s->height / nb_display_channels;
800         /* graph height / 2 */
801         h2 = (h * 9) / 20;
802         for (ch = 0; ch < nb_display_channels; ch++) {
803             i = i_start + ch;
804             y1 = s->ytop + ch * h + (h / 2); /* position of center line */
805             for (x = 0; x < s->width; x++) {
806                 y = (s->sample_array[i] * h2) >> 15;
807                 if (y < 0) {
808                     y = -y;
809                     ys = y1 - y;
810                 } else {
811                     ys = y1;
812                 }
813                 fill_rectangle(screen,
814                                s->xleft + x, ys, 1, y,
815                                fgcolor);
816                 i += channels;
817                 if (i >= SAMPLE_ARRAY_SIZE)
818                     i -= SAMPLE_ARRAY_SIZE;
819             }
820         }
821
822         fgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0xff);
823
824         for (ch = 1; ch < nb_display_channels; ch++) {
825             y = s->ytop + ch * h;
826             fill_rectangle(screen,
827                            s->xleft, y, s->width, 1,
828                            fgcolor);
829         }
830         SDL_UpdateRect(screen, s->xleft, s->ytop, s->width, s->height);
831     } else {
832         nb_display_channels= FFMIN(nb_display_channels, 2);
833         if (rdft_bits != s->rdft_bits) {
834             av_rdft_end(s->rdft);
835             av_free(s->rdft_data);
836             s->rdft = av_rdft_init(rdft_bits, DFT_R2C);
837             s->rdft_bits = rdft_bits;
838             s->rdft_data = av_malloc(4 * nb_freq * sizeof(*s->rdft_data));
839         }
840         {
841             FFTSample *data[2];
842             for (ch = 0; ch < nb_display_channels; ch++) {
843                 data[ch] = s->rdft_data + 2 * nb_freq * ch;
844                 i = i_start + ch;
845                 for (x = 0; x < 2 * nb_freq; x++) {
846                     double w = (x-nb_freq) * (1.0 / nb_freq);
847                     data[ch][x] = s->sample_array[i] * (1.0 - w * w);
848                     i += channels;
849                     if (i >= SAMPLE_ARRAY_SIZE)
850                         i -= SAMPLE_ARRAY_SIZE;
851                 }
852                 av_rdft_calc(s->rdft, data[ch]);
853             }
854             // least efficient way to do this, we should of course directly access it but its more than fast enough
855             for (y = 0; y < s->height; y++) {
856                 double w = 1 / sqrt(nb_freq);
857                 int a = sqrt(w * sqrt(data[0][2 * y + 0] * data[0][2 * y + 0] + data[0][2 * y + 1] * data[0][2 * y + 1]));
858                 int b = (nb_display_channels == 2 ) ? sqrt(w * sqrt(data[1][2 * y + 0] * data[1][2 * y + 0]
859                        + data[1][2 * y + 1] * data[1][2 * y + 1])) : a;
860                 a = FFMIN(a, 255);
861                 b = FFMIN(b, 255);
862                 fgcolor = SDL_MapRGB(screen->format, a, b, (a + b) / 2);
863
864                 fill_rectangle(screen,
865                             s->xpos, s->height-y, 1, 1,
866                             fgcolor);
867             }
868         }
869         SDL_UpdateRect(screen, s->xpos, s->ytop, 1, s->height);
870         s->xpos++;
871         if (s->xpos >= s->width)
872             s->xpos= s->xleft;
873     }
874 }
875
876 static int video_open(VideoState *is)
877 {
878     int flags = SDL_HWSURFACE | SDL_ASYNCBLIT | SDL_HWACCEL;
879     int w,h;
880
881     if (is_full_screen) flags |= SDL_FULLSCREEN;
882     else                flags |= SDL_RESIZABLE;
883
884     if (is_full_screen && fs_screen_width) {
885         w = fs_screen_width;
886         h = fs_screen_height;
887     } else if (!is_full_screen && screen_width) {
888         w = screen_width;
889         h = screen_height;
890 #if CONFIG_AVFILTER
891     } else if (is->out_video_filter && is->out_video_filter->inputs[0]) {
892         w = is->out_video_filter->inputs[0]->w;
893         h = is->out_video_filter->inputs[0]->h;
894 #else
895     } else if (is->video_st && is->video_st->codec->width) {
896         w = is->video_st->codec->width;
897         h = is->video_st->codec->height;
898 #endif
899     } else {
900         w = 640;
901         h = 480;
902     }
903     if (screen && is->width == screen->w && screen->w == w
904        && is->height== screen->h && screen->h == h)
905         return 0;
906
907 #if defined(__APPLE__) && !SDL_VERSION_ATLEAST(1, 2, 14)
908     /* setting bits_per_pixel = 0 or 32 causes blank video on OS X and older SDL */
909     screen = SDL_SetVideoMode(w, h, 24, flags);
910 #else
911     screen = SDL_SetVideoMode(w, h, 0, flags);
912 #endif
913     if (!screen) {
914         fprintf(stderr, "SDL: could not set video mode - exiting\n");
915         return -1;
916     }
917     if (!window_title)
918         window_title = input_filename;
919     SDL_WM_SetCaption(window_title, window_title);
920
921     is->width  = screen->w;
922     is->height = screen->h;
923
924     return 0;
925 }
926
927 /* display the current picture, if any */
928 static void video_display(VideoState *is)
929 {
930     if (!screen)
931         video_open(cur_stream);
932     if (is->audio_st && is->show_audio)
933         video_audio_display(is);
934     else if (is->video_st)
935         video_image_display(is);
936 }
937
938 static int refresh_thread(void *opaque)
939 {
940     VideoState *is= opaque;
941     while (!is->abort_request) {
942         SDL_Event event;
943         event.type = FF_REFRESH_EVENT;
944         event.user.data1 = opaque;
945         if (!is->refresh) {
946             is->refresh = 1;
947             SDL_PushEvent(&event);
948         }
949         av_usleep(is->audio_st && is->show_audio ? rdftspeed * 1000 : 5000); // FIXME ideally we should wait the correct time but SDLs event passing is so slow it would be silly
950     }
951     return 0;
952 }
953
954 /* get the current audio clock value */
955 static double get_audio_clock(VideoState *is)
956 {
957     double pts;
958     int hw_buf_size, bytes_per_sec;
959     pts = is->audio_clock;
960     hw_buf_size = audio_write_get_buf_size(is);
961     bytes_per_sec = 0;
962     if (is->audio_st) {
963         bytes_per_sec = is->audio_st->codec->sample_rate * is->sdl_channels *
964                         av_get_bytes_per_sample(is->sdl_sample_fmt);
965     }
966     if (bytes_per_sec)
967         pts -= (double)hw_buf_size / bytes_per_sec;
968     return pts;
969 }
970
971 /* get the current video clock value */
972 static double get_video_clock(VideoState *is)
973 {
974     if (is->paused) {
975         return is->video_current_pts;
976     } else {
977         return is->video_current_pts_drift + av_gettime() / 1000000.0;
978     }
979 }
980
981 /* get the current external clock value */
982 static double get_external_clock(VideoState *is)
983 {
984     int64_t ti;
985     ti = av_gettime();
986     return is->external_clock + ((ti - is->external_clock_time) * 1e-6);
987 }
988
989 /* get the current master clock value */
990 static double get_master_clock(VideoState *is)
991 {
992     double val;
993
994     if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
995         if (is->video_st)
996             val = get_video_clock(is);
997         else
998             val = get_audio_clock(is);
999     } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
1000         if (is->audio_st)
1001             val = get_audio_clock(is);
1002         else
1003             val = get_video_clock(is);
1004     } else {
1005         val = get_external_clock(is);
1006     }
1007     return val;
1008 }
1009
1010 /* seek in the stream */
1011 static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int seek_by_bytes)
1012 {
1013     if (!is->seek_req) {
1014         is->seek_pos = pos;
1015         is->seek_rel = rel;
1016         is->seek_flags &= ~AVSEEK_FLAG_BYTE;
1017         if (seek_by_bytes)
1018             is->seek_flags |= AVSEEK_FLAG_BYTE;
1019         is->seek_req = 1;
1020     }
1021 }
1022
1023 /* pause or resume the video */
1024 static void stream_pause(VideoState *is)
1025 {
1026     if (is->paused) {
1027         is->frame_timer += av_gettime() / 1000000.0 + is->video_current_pts_drift - is->video_current_pts;
1028         if (is->read_pause_return != AVERROR(ENOSYS)) {
1029             is->video_current_pts = is->video_current_pts_drift + av_gettime() / 1000000.0;
1030         }
1031         is->video_current_pts_drift = is->video_current_pts - av_gettime() / 1000000.0;
1032     }
1033     is->paused = !is->paused;
1034 }
1035
1036 static double compute_target_time(double frame_current_pts, VideoState *is)
1037 {
1038     double delay, sync_threshold, diff;
1039
1040     /* compute nominal delay */
1041     delay = frame_current_pts - is->frame_last_pts;
1042     if (delay <= 0 || delay >= 10.0) {
1043         /* if incorrect delay, use previous one */
1044         delay = is->frame_last_delay;
1045     } else {
1046         is->frame_last_delay = delay;
1047     }
1048     is->frame_last_pts = frame_current_pts;
1049
1050     /* update delay to follow master synchronisation source */
1051     if (((is->av_sync_type == AV_SYNC_AUDIO_MASTER && is->audio_st) ||
1052          is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1053         /* if video is slave, we try to correct big delays by
1054            duplicating or deleting a frame */
1055         diff = get_video_clock(is) - get_master_clock(is);
1056
1057         /* skip or repeat frame. We take into account the
1058            delay to compute the threshold. I still don't know
1059            if it is the best guess */
1060         sync_threshold = FFMAX(AV_SYNC_THRESHOLD, delay);
1061         if (fabs(diff) < AV_NOSYNC_THRESHOLD) {
1062             if (diff <= -sync_threshold)
1063                 delay = 0;
1064             else if (diff >= sync_threshold)
1065                 delay = 2 * delay;
1066         }
1067     }
1068     is->frame_timer += delay;
1069
1070     av_dlog(NULL, "video: delay=%0.3f pts=%0.3f A-V=%f\n",
1071             delay, frame_current_pts, -diff);
1072
1073     return is->frame_timer;
1074 }
1075
1076 /* called to display each frame */
1077 static void video_refresh_timer(void *opaque)
1078 {
1079     VideoState *is = opaque;
1080     VideoPicture *vp;
1081
1082     SubPicture *sp, *sp2;
1083
1084     if (is->video_st) {
1085 retry:
1086         if (is->pictq_size == 0) {
1087             // nothing to do, no picture to display in the que
1088         } else {
1089             double time = av_gettime() / 1000000.0;
1090             double next_target;
1091             /* dequeue the picture */
1092             vp = &is->pictq[is->pictq_rindex];
1093
1094             if (time < vp->target_clock)
1095                 return;
1096             /* update current video pts */
1097             is->video_current_pts = vp->pts;
1098             is->video_current_pts_drift = is->video_current_pts - time;
1099             is->video_current_pos = vp->pos;
1100             if (is->pictq_size > 1) {
1101                 VideoPicture *nextvp = &is->pictq[(is->pictq_rindex + 1) % VIDEO_PICTURE_QUEUE_SIZE];
1102                 assert(nextvp->target_clock >= vp->target_clock);
1103                 next_target= nextvp->target_clock;
1104             } else {
1105                 next_target = vp->target_clock + is->video_clock - vp->pts; // FIXME pass durations cleanly
1106             }
1107             if (framedrop && time > next_target) {
1108                 is->skip_frames *= 1.0 + FRAME_SKIP_FACTOR;
1109                 if (is->pictq_size > 1 || time > next_target + 0.5) {
1110                     /* update queue size and signal for next picture */
1111                     if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1112                         is->pictq_rindex = 0;
1113
1114                     SDL_LockMutex(is->pictq_mutex);
1115                     is->pictq_size--;
1116                     SDL_CondSignal(is->pictq_cond);
1117                     SDL_UnlockMutex(is->pictq_mutex);
1118                     goto retry;
1119                 }
1120             }
1121
1122             if (is->subtitle_st) {
1123                 if (is->subtitle_stream_changed) {
1124                     SDL_LockMutex(is->subpq_mutex);
1125
1126                     while (is->subpq_size) {
1127                         free_subpicture(&is->subpq[is->subpq_rindex]);
1128
1129                         /* update queue size and signal for next picture */
1130                         if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1131                             is->subpq_rindex = 0;
1132
1133                         is->subpq_size--;
1134                     }
1135                     is->subtitle_stream_changed = 0;
1136
1137                     SDL_CondSignal(is->subpq_cond);
1138                     SDL_UnlockMutex(is->subpq_mutex);
1139                 } else {
1140                     if (is->subpq_size > 0) {
1141                         sp = &is->subpq[is->subpq_rindex];
1142
1143                         if (is->subpq_size > 1)
1144                             sp2 = &is->subpq[(is->subpq_rindex + 1) % SUBPICTURE_QUEUE_SIZE];
1145                         else
1146                             sp2 = NULL;
1147
1148                         if ((is->video_current_pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
1149                                 || (sp2 && is->video_current_pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
1150                         {
1151                             free_subpicture(sp);
1152
1153                             /* update queue size and signal for next picture */
1154                             if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1155                                 is->subpq_rindex = 0;
1156
1157                             SDL_LockMutex(is->subpq_mutex);
1158                             is->subpq_size--;
1159                             SDL_CondSignal(is->subpq_cond);
1160                             SDL_UnlockMutex(is->subpq_mutex);
1161                         }
1162                     }
1163                 }
1164             }
1165
1166             /* display picture */
1167             if (!display_disable)
1168                 video_display(is);
1169
1170             /* update queue size and signal for next picture */
1171             if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1172                 is->pictq_rindex = 0;
1173
1174             SDL_LockMutex(is->pictq_mutex);
1175             is->pictq_size--;
1176             SDL_CondSignal(is->pictq_cond);
1177             SDL_UnlockMutex(is->pictq_mutex);
1178         }
1179     } else if (is->audio_st) {
1180         /* draw the next audio frame */
1181
1182         /* if only audio stream, then display the audio bars (better
1183            than nothing, just to test the implementation */
1184
1185         /* display picture */
1186         if (!display_disable)
1187             video_display(is);
1188     }
1189     if (show_status) {
1190         static int64_t last_time;
1191         int64_t cur_time;
1192         int aqsize, vqsize, sqsize;
1193         double av_diff;
1194
1195         cur_time = av_gettime();
1196         if (!last_time || (cur_time - last_time) >= 30000) {
1197             aqsize = 0;
1198             vqsize = 0;
1199             sqsize = 0;
1200             if (is->audio_st)
1201                 aqsize = is->audioq.size;
1202             if (is->video_st)
1203                 vqsize = is->videoq.size;
1204             if (is->subtitle_st)
1205                 sqsize = is->subtitleq.size;
1206             av_diff = 0;
1207             if (is->audio_st && is->video_st)
1208                 av_diff = get_audio_clock(is) - get_video_clock(is);
1209             printf("%7.2f A-V:%7.3f s:%3.1f aq=%5dKB vq=%5dKB sq=%5dB f=%"PRId64"/%"PRId64"   \r",
1210                    get_master_clock(is), av_diff, FFMAX(is->skip_frames - 1, 0), aqsize / 1024,
1211                    vqsize / 1024, sqsize, is->pts_ctx.num_faulty_dts, is->pts_ctx.num_faulty_pts);
1212             fflush(stdout);
1213             last_time = cur_time;
1214         }
1215     }
1216 }
1217
1218 static void stream_close(VideoState *is)
1219 {
1220     VideoPicture *vp;
1221     int i;
1222     /* XXX: use a special url_shutdown call to abort parse cleanly */
1223     is->abort_request = 1;
1224     SDL_WaitThread(is->parse_tid, NULL);
1225     SDL_WaitThread(is->refresh_tid, NULL);
1226
1227     /* free all pictures */
1228     for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++) {
1229         vp = &is->pictq[i];
1230 #if CONFIG_AVFILTER
1231         avfilter_unref_bufferp(&vp->picref);
1232 #endif
1233         if (vp->bmp) {
1234             SDL_FreeYUVOverlay(vp->bmp);
1235             vp->bmp = NULL;
1236         }
1237     }
1238     SDL_DestroyMutex(is->pictq_mutex);
1239     SDL_DestroyCond(is->pictq_cond);
1240     SDL_DestroyMutex(is->subpq_mutex);
1241     SDL_DestroyCond(is->subpq_cond);
1242 #if !CONFIG_AVFILTER
1243     if (is->img_convert_ctx)
1244         sws_freeContext(is->img_convert_ctx);
1245 #endif
1246     av_free(is);
1247 }
1248
1249 static void do_exit(void)
1250 {
1251     if (cur_stream) {
1252         stream_close(cur_stream);
1253         cur_stream = NULL;
1254     }
1255     uninit_opts();
1256 #if CONFIG_AVFILTER
1257     avfilter_uninit();
1258 #endif
1259     avformat_network_deinit();
1260     if (show_status)
1261         printf("\n");
1262     SDL_Quit();
1263     av_log(NULL, AV_LOG_QUIET, "");
1264     exit(0);
1265 }
1266
1267 /* allocate a picture (needs to do that in main thread to avoid
1268    potential locking problems */
1269 static void alloc_picture(void *opaque)
1270 {
1271     VideoState *is = opaque;
1272     VideoPicture *vp;
1273
1274     vp = &is->pictq[is->pictq_windex];
1275
1276     if (vp->bmp)
1277         SDL_FreeYUVOverlay(vp->bmp);
1278
1279 #if CONFIG_AVFILTER
1280     avfilter_unref_bufferp(&vp->picref);
1281
1282     vp->width   = is->out_video_filter->inputs[0]->w;
1283     vp->height  = is->out_video_filter->inputs[0]->h;
1284     vp->pix_fmt = is->out_video_filter->inputs[0]->format;
1285 #else
1286     vp->width   = is->video_st->codec->width;
1287     vp->height  = is->video_st->codec->height;
1288     vp->pix_fmt = is->video_st->codec->pix_fmt;
1289 #endif
1290
1291     vp->bmp = SDL_CreateYUVOverlay(vp->width, vp->height,
1292                                    SDL_YV12_OVERLAY,
1293                                    screen);
1294     if (!vp->bmp || vp->bmp->pitches[0] < vp->width) {
1295         /* SDL allocates a buffer smaller than requested if the video
1296          * overlay hardware is unable to support the requested size. */
1297         fprintf(stderr, "Error: the video system does not support an image\n"
1298                         "size of %dx%d pixels. Try using -vf \"scale=w:h\"\n"
1299                         "to reduce the image size.\n", vp->width, vp->height );
1300         do_exit();
1301     }
1302
1303     SDL_LockMutex(is->pictq_mutex);
1304     vp->allocated = 1;
1305     SDL_CondSignal(is->pictq_cond);
1306     SDL_UnlockMutex(is->pictq_mutex);
1307 }
1308
1309 /* The 'pts' parameter is the dts of the packet / pts of the frame and
1310  * guessed if not known. */
1311 static int queue_picture(VideoState *is, AVFrame *src_frame, double pts, int64_t pos)
1312 {
1313     VideoPicture *vp;
1314 #if CONFIG_AVFILTER
1315     AVPicture pict_src;
1316 #else
1317     int dst_pix_fmt = AV_PIX_FMT_YUV420P;
1318 #endif
1319     /* wait until we have space to put a new picture */
1320     SDL_LockMutex(is->pictq_mutex);
1321
1322     if (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE && !is->refresh)
1323         is->skip_frames = FFMAX(1.0 - FRAME_SKIP_FACTOR, is->skip_frames * (1.0 - FRAME_SKIP_FACTOR));
1324
1325     while (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE &&
1326            !is->videoq.abort_request) {
1327         SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1328     }
1329     SDL_UnlockMutex(is->pictq_mutex);
1330
1331     if (is->videoq.abort_request)
1332         return -1;
1333
1334     vp = &is->pictq[is->pictq_windex];
1335
1336     /* alloc or resize hardware picture buffer */
1337     if (!vp->bmp || vp->reallocate ||
1338 #if CONFIG_AVFILTER
1339         vp->width  != is->out_video_filter->inputs[0]->w ||
1340         vp->height != is->out_video_filter->inputs[0]->h) {
1341 #else
1342         vp->width != is->video_st->codec->width ||
1343         vp->height != is->video_st->codec->height) {
1344 #endif
1345         SDL_Event event;
1346
1347         vp->allocated  = 0;
1348         vp->reallocate = 0;
1349
1350         /* the allocation must be done in the main thread to avoid
1351            locking problems */
1352         event.type = FF_ALLOC_EVENT;
1353         event.user.data1 = is;
1354         SDL_PushEvent(&event);
1355
1356         /* wait until the picture is allocated */
1357         SDL_LockMutex(is->pictq_mutex);
1358         while (!vp->allocated && !is->videoq.abort_request) {
1359             SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1360         }
1361         SDL_UnlockMutex(is->pictq_mutex);
1362
1363         if (is->videoq.abort_request)
1364             return -1;
1365     }
1366
1367     /* if the frame is not skipped, then display it */
1368     if (vp->bmp) {
1369         AVPicture pict = { { 0 } };
1370 #if CONFIG_AVFILTER
1371         avfilter_unref_bufferp(&vp->picref);
1372         vp->picref = src_frame->opaque;
1373 #endif
1374
1375         /* get a pointer on the bitmap */
1376         SDL_LockYUVOverlay (vp->bmp);
1377
1378         pict.data[0] = vp->bmp->pixels[0];
1379         pict.data[1] = vp->bmp->pixels[2];
1380         pict.data[2] = vp->bmp->pixels[1];
1381
1382         pict.linesize[0] = vp->bmp->pitches[0];
1383         pict.linesize[1] = vp->bmp->pitches[2];
1384         pict.linesize[2] = vp->bmp->pitches[1];
1385
1386 #if CONFIG_AVFILTER
1387         pict_src.data[0] = src_frame->data[0];
1388         pict_src.data[1] = src_frame->data[1];
1389         pict_src.data[2] = src_frame->data[2];
1390
1391         pict_src.linesize[0] = src_frame->linesize[0];
1392         pict_src.linesize[1] = src_frame->linesize[1];
1393         pict_src.linesize[2] = src_frame->linesize[2];
1394
1395         // FIXME use direct rendering
1396         av_picture_copy(&pict, &pict_src,
1397                         vp->pix_fmt, vp->width, vp->height);
1398 #else
1399         sws_flags = av_get_int(sws_opts, "sws_flags", NULL);
1400         is->img_convert_ctx = sws_getCachedContext(is->img_convert_ctx,
1401             vp->width, vp->height, vp->pix_fmt, vp->width, vp->height,
1402             dst_pix_fmt, sws_flags, NULL, NULL, NULL);
1403         if (is->img_convert_ctx == NULL) {
1404             fprintf(stderr, "Cannot initialize the conversion context\n");
1405             exit(1);
1406         }
1407         sws_scale(is->img_convert_ctx, src_frame->data, src_frame->linesize,
1408                   0, vp->height, pict.data, pict.linesize);
1409 #endif
1410         /* update the bitmap content */
1411         SDL_UnlockYUVOverlay(vp->bmp);
1412
1413         vp->pts = pts;
1414         vp->pos = pos;
1415
1416         /* now we can update the picture count */
1417         if (++is->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE)
1418             is->pictq_windex = 0;
1419         SDL_LockMutex(is->pictq_mutex);
1420         vp->target_clock = compute_target_time(vp->pts, is);
1421
1422         is->pictq_size++;
1423         SDL_UnlockMutex(is->pictq_mutex);
1424     }
1425     return 0;
1426 }
1427
1428 /* Compute the exact PTS for the picture if it is omitted in the stream.
1429  * The 'pts1' parameter is the dts of the packet / pts of the frame. */
1430 static int output_picture2(VideoState *is, AVFrame *src_frame, double pts1, int64_t pos)
1431 {
1432     double frame_delay, pts;
1433
1434     pts = pts1;
1435
1436     if (pts != 0) {
1437         /* update video clock with pts, if present */
1438         is->video_clock = pts;
1439     } else {
1440         pts = is->video_clock;
1441     }
1442     /* update video clock for next frame */
1443     frame_delay = av_q2d(is->video_st->codec->time_base);
1444     /* for MPEG2, the frame can be repeated, so we update the
1445        clock accordingly */
1446     frame_delay += src_frame->repeat_pict * (frame_delay * 0.5);
1447     is->video_clock += frame_delay;
1448
1449     return queue_picture(is, src_frame, pts, pos);
1450 }
1451
1452 static int get_video_frame(VideoState *is, AVFrame *frame, int64_t *pts, AVPacket *pkt)
1453 {
1454     int got_picture, i;
1455
1456     if (packet_queue_get(&is->videoq, pkt, 1) < 0)
1457         return -1;
1458
1459     if (pkt->data == flush_pkt.data) {
1460         avcodec_flush_buffers(is->video_st->codec);
1461
1462         SDL_LockMutex(is->pictq_mutex);
1463         // Make sure there are no long delay timers (ideally we should just flush the que but thats harder)
1464         for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++) {
1465             is->pictq[i].target_clock= 0;
1466         }
1467         while (is->pictq_size && !is->videoq.abort_request) {
1468             SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1469         }
1470         is->video_current_pos = -1;
1471         SDL_UnlockMutex(is->pictq_mutex);
1472
1473         init_pts_correction(&is->pts_ctx);
1474         is->frame_last_pts = AV_NOPTS_VALUE;
1475         is->frame_last_delay = 0;
1476         is->frame_timer = (double)av_gettime() / 1000000.0;
1477         is->skip_frames = 1;
1478         is->skip_frames_index = 0;
1479         return 0;
1480     }
1481
1482     avcodec_decode_video2(is->video_st->codec, frame, &got_picture, pkt);
1483
1484     if (got_picture) {
1485         if (decoder_reorder_pts == -1) {
1486             *pts = guess_correct_pts(&is->pts_ctx, frame->pkt_pts, frame->pkt_dts);
1487         } else if (decoder_reorder_pts) {
1488             *pts = frame->pkt_pts;
1489         } else {
1490             *pts = frame->pkt_dts;
1491         }
1492
1493         if (*pts == AV_NOPTS_VALUE) {
1494             *pts = 0;
1495         }
1496
1497         is->skip_frames_index += 1;
1498         if (is->skip_frames_index >= is->skip_frames) {
1499             is->skip_frames_index -= FFMAX(is->skip_frames, 1.0);
1500             return 1;
1501         }
1502
1503     }
1504     return 0;
1505 }
1506
1507 #if CONFIG_AVFILTER
1508 static int configure_video_filters(AVFilterGraph *graph, VideoState *is, const char *vfilters)
1509 {
1510     char sws_flags_str[128];
1511     char buffersrc_args[256];
1512     int ret;
1513     AVFilterContext *filt_src = NULL, *filt_out = NULL, *filt_format;
1514     AVCodecContext *codec = is->video_st->codec;
1515
1516     snprintf(sws_flags_str, sizeof(sws_flags_str), "flags=%d", sws_flags);
1517     graph->scale_sws_opts = av_strdup(sws_flags_str);
1518
1519     snprintf(buffersrc_args, sizeof(buffersrc_args), "%d:%d:%d:%d:%d:%d:%d",
1520              codec->width, codec->height, codec->pix_fmt,
1521              is->video_st->time_base.num, is->video_st->time_base.den,
1522              codec->sample_aspect_ratio.num, codec->sample_aspect_ratio.den);
1523
1524
1525     if ((ret = avfilter_graph_create_filter(&filt_src,
1526                                             avfilter_get_by_name("buffer"),
1527                                             "src", buffersrc_args, NULL,
1528                                             graph)) < 0)
1529         return ret;
1530     if ((ret = avfilter_graph_create_filter(&filt_out,
1531                                             avfilter_get_by_name("buffersink"),
1532                                             "out", NULL, NULL, graph)) < 0)
1533         return ret;
1534
1535     if ((ret = avfilter_graph_create_filter(&filt_format,
1536                                             avfilter_get_by_name("format"),
1537                                             "format", "yuv420p", NULL, graph)) < 0)
1538         return ret;
1539     if ((ret = avfilter_link(filt_format, 0, filt_out, 0)) < 0)
1540         return ret;
1541
1542
1543     if (vfilters) {
1544         AVFilterInOut *outputs = avfilter_inout_alloc();
1545         AVFilterInOut *inputs  = avfilter_inout_alloc();
1546
1547         outputs->name    = av_strdup("in");
1548         outputs->filter_ctx = filt_src;
1549         outputs->pad_idx = 0;
1550         outputs->next    = NULL;
1551
1552         inputs->name    = av_strdup("out");
1553         inputs->filter_ctx = filt_format;
1554         inputs->pad_idx = 0;
1555         inputs->next    = NULL;
1556
1557         if ((ret = avfilter_graph_parse(graph, vfilters, inputs, outputs, NULL)) < 0)
1558             return ret;
1559     } else {
1560         if ((ret = avfilter_link(filt_src, 0, filt_format, 0)) < 0)
1561             return ret;
1562     }
1563
1564     if ((ret = avfilter_graph_config(graph, NULL)) < 0)
1565         return ret;
1566
1567     is->in_video_filter  = filt_src;
1568     is->out_video_filter = filt_out;
1569
1570     if (codec->codec->capabilities & CODEC_CAP_DR1) {
1571         is->use_dr1 = 1;
1572         codec->get_buffer     = codec_get_buffer;
1573         codec->release_buffer = codec_release_buffer;
1574         codec->opaque         = &is->buffer_pool;
1575     }
1576
1577     return ret;
1578 }
1579
1580 #endif  /* CONFIG_AVFILTER */
1581
1582 static int video_thread(void *arg)
1583 {
1584     AVPacket pkt = { 0 };
1585     VideoState *is = arg;
1586     AVFrame *frame = avcodec_alloc_frame();
1587     int64_t pts_int;
1588     double pts;
1589     int ret;
1590
1591 #if CONFIG_AVFILTER
1592     AVFilterGraph *graph = avfilter_graph_alloc();
1593     AVFilterContext *filt_out = NULL, *filt_in = NULL;
1594     int64_t pos;
1595     int last_w = is->video_st->codec->width;
1596     int last_h = is->video_st->codec->height;
1597
1598     if ((ret = configure_video_filters(graph, is, vfilters)) < 0)
1599         goto the_end;
1600     filt_in  = is->in_video_filter;
1601     filt_out = is->out_video_filter;
1602 #endif
1603
1604     for (;;) {
1605 #if CONFIG_AVFILTER
1606         AVFilterBufferRef *picref;
1607         AVRational tb;
1608 #endif
1609         while (is->paused && !is->videoq.abort_request)
1610             SDL_Delay(10);
1611
1612         av_free_packet(&pkt);
1613
1614         ret = get_video_frame(is, frame, &pts_int, &pkt);
1615         if (ret < 0)
1616             goto the_end;
1617
1618         if (!ret)
1619             continue;
1620
1621 #if CONFIG_AVFILTER
1622         if (   last_w != is->video_st->codec->width
1623             || last_h != is->video_st->codec->height) {
1624             av_dlog(NULL, "Changing size %dx%d -> %dx%d\n", last_w, last_h,
1625                     is->video_st->codec->width, is->video_st->codec->height);
1626             avfilter_graph_free(&graph);
1627             graph = avfilter_graph_alloc();
1628             if ((ret = configure_video_filters(graph, is, vfilters)) < 0)
1629                 goto the_end;
1630             filt_in  = is->in_video_filter;
1631             filt_out = is->out_video_filter;
1632             last_w = is->video_st->codec->width;
1633             last_h = is->video_st->codec->height;
1634         }
1635
1636         frame->pts = pts_int;
1637         if (is->use_dr1) {
1638             FrameBuffer      *buf = frame->opaque;
1639             AVFilterBufferRef *fb = avfilter_get_video_buffer_ref_from_arrays(
1640                                         frame->data, frame->linesize,
1641                                         AV_PERM_READ | AV_PERM_PRESERVE,
1642                                         frame->width, frame->height,
1643                                         frame->format);
1644
1645             avfilter_copy_frame_props(fb, frame);
1646             fb->buf->priv           = buf;
1647             fb->buf->free           = filter_release_buffer;
1648
1649             buf->refcount++;
1650             av_buffersrc_buffer(filt_in, fb);
1651
1652         } else
1653             av_buffersrc_write_frame(filt_in, frame);
1654
1655         while (ret >= 0) {
1656             ret = av_buffersink_read(filt_out, &picref);
1657             if (ret < 0) {
1658                 ret = 0;
1659                 break;
1660             }
1661
1662             avfilter_copy_buf_props(frame, picref);
1663
1664             pts_int = picref->pts;
1665             tb      = filt_out->inputs[0]->time_base;
1666             pos     = picref->pos;
1667             frame->opaque = picref;
1668
1669             if (av_cmp_q(tb, is->video_st->time_base)) {
1670                 av_unused int64_t pts1 = pts_int;
1671                 pts_int = av_rescale_q(pts_int, tb, is->video_st->time_base);
1672                 av_dlog(NULL, "video_thread(): "
1673                         "tb:%d/%d pts:%"PRId64" -> tb:%d/%d pts:%"PRId64"\n",
1674                         tb.num, tb.den, pts1,
1675                         is->video_st->time_base.num, is->video_st->time_base.den, pts_int);
1676             }
1677             pts = pts_int * av_q2d(is->video_st->time_base);
1678             ret = output_picture2(is, frame, pts, pos);
1679         }
1680 #else
1681         pts = pts_int * av_q2d(is->video_st->time_base);
1682         ret = output_picture2(is, frame, pts,  pkt.pos);
1683 #endif
1684
1685         if (ret < 0)
1686             goto the_end;
1687
1688         if (step)
1689             if (cur_stream)
1690                 stream_pause(cur_stream);
1691     }
1692  the_end:
1693 #if CONFIG_AVFILTER
1694     av_freep(&vfilters);
1695     avfilter_graph_free(&graph);
1696 #endif
1697     av_free_packet(&pkt);
1698     avcodec_free_frame(&frame);
1699     return 0;
1700 }
1701
1702 static int subtitle_thread(void *arg)
1703 {
1704     VideoState *is = arg;
1705     SubPicture *sp;
1706     AVPacket pkt1, *pkt = &pkt1;
1707     int got_subtitle;
1708     double pts;
1709     int i, j;
1710     int r, g, b, y, u, v, a;
1711
1712     for (;;) {
1713         while (is->paused && !is->subtitleq.abort_request) {
1714             SDL_Delay(10);
1715         }
1716         if (packet_queue_get(&is->subtitleq, pkt, 1) < 0)
1717             break;
1718
1719         if (pkt->data == flush_pkt.data) {
1720             avcodec_flush_buffers(is->subtitle_st->codec);
1721             continue;
1722         }
1723         SDL_LockMutex(is->subpq_mutex);
1724         while (is->subpq_size >= SUBPICTURE_QUEUE_SIZE &&
1725                !is->subtitleq.abort_request) {
1726             SDL_CondWait(is->subpq_cond, is->subpq_mutex);
1727         }
1728         SDL_UnlockMutex(is->subpq_mutex);
1729
1730         if (is->subtitleq.abort_request)
1731             return 0;
1732
1733         sp = &is->subpq[is->subpq_windex];
1734
1735        /* NOTE: ipts is the PTS of the _first_ picture beginning in
1736            this packet, if any */
1737         pts = 0;
1738         if (pkt->pts != AV_NOPTS_VALUE)
1739             pts = av_q2d(is->subtitle_st->time_base) * pkt->pts;
1740
1741         avcodec_decode_subtitle2(is->subtitle_st->codec, &sp->sub,
1742                                  &got_subtitle, pkt);
1743
1744         if (got_subtitle && sp->sub.format == 0) {
1745             sp->pts = pts;
1746
1747             for (i = 0; i < sp->sub.num_rects; i++)
1748             {
1749                 for (j = 0; j < sp->sub.rects[i]->nb_colors; j++)
1750                 {
1751                     RGBA_IN(r, g, b, a, (uint32_t*)sp->sub.rects[i]->pict.data[1] + j);
1752                     y = RGB_TO_Y_CCIR(r, g, b);
1753                     u = RGB_TO_U_CCIR(r, g, b, 0);
1754                     v = RGB_TO_V_CCIR(r, g, b, 0);
1755                     YUVA_OUT((uint32_t*)sp->sub.rects[i]->pict.data[1] + j, y, u, v, a);
1756                 }
1757             }
1758
1759             /* now we can update the picture count */
1760             if (++is->subpq_windex == SUBPICTURE_QUEUE_SIZE)
1761                 is->subpq_windex = 0;
1762             SDL_LockMutex(is->subpq_mutex);
1763             is->subpq_size++;
1764             SDL_UnlockMutex(is->subpq_mutex);
1765         }
1766         av_free_packet(pkt);
1767     }
1768     return 0;
1769 }
1770
1771 /* copy samples for viewing in editor window */
1772 static void update_sample_display(VideoState *is, short *samples, int samples_size)
1773 {
1774     int size, len;
1775
1776     size = samples_size / sizeof(short);
1777     while (size > 0) {
1778         len = SAMPLE_ARRAY_SIZE - is->sample_array_index;
1779         if (len > size)
1780             len = size;
1781         memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
1782         samples += len;
1783         is->sample_array_index += len;
1784         if (is->sample_array_index >= SAMPLE_ARRAY_SIZE)
1785             is->sample_array_index = 0;
1786         size -= len;
1787     }
1788 }
1789
1790 /* return the new audio buffer size (samples can be added or deleted
1791    to get better sync if video or external master clock) */
1792 static int synchronize_audio(VideoState *is, short *samples,
1793                              int samples_size1, double pts)
1794 {
1795     int n, samples_size;
1796     double ref_clock;
1797
1798     n = is->sdl_channels * av_get_bytes_per_sample(is->sdl_sample_fmt);
1799     samples_size = samples_size1;
1800
1801     /* if not master, then we try to remove or add samples to correct the clock */
1802     if (((is->av_sync_type == AV_SYNC_VIDEO_MASTER && is->video_st) ||
1803          is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1804         double diff, avg_diff;
1805         int wanted_size, min_size, max_size, nb_samples;
1806
1807         ref_clock = get_master_clock(is);
1808         diff = get_audio_clock(is) - ref_clock;
1809
1810         if (diff < AV_NOSYNC_THRESHOLD) {
1811             is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
1812             if (is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) {
1813                 /* not enough measures to have a correct estimate */
1814                 is->audio_diff_avg_count++;
1815             } else {
1816                 /* estimate the A-V difference */
1817                 avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
1818
1819                 if (fabs(avg_diff) >= is->audio_diff_threshold) {
1820                     wanted_size = samples_size + ((int)(diff * is->audio_st->codec->sample_rate) * n);
1821                     nb_samples = samples_size / n;
1822
1823                     min_size = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1824                     max_size = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1825                     if (wanted_size < min_size)
1826                         wanted_size = min_size;
1827                     else if (wanted_size > max_size)
1828                         wanted_size = max_size;
1829
1830                     /* add or remove samples to correction the synchro */
1831                     if (wanted_size < samples_size) {
1832                         /* remove samples */
1833                         samples_size = wanted_size;
1834                     } else if (wanted_size > samples_size) {
1835                         uint8_t *samples_end, *q;
1836                         int nb;
1837
1838                         /* add samples */
1839                         nb = (samples_size - wanted_size);
1840                         samples_end = (uint8_t *)samples + samples_size - n;
1841                         q = samples_end + n;
1842                         while (nb > 0) {
1843                             memcpy(q, samples_end, n);
1844                             q += n;
1845                             nb -= n;
1846                         }
1847                         samples_size = wanted_size;
1848                     }
1849                 }
1850                 av_dlog(NULL, "diff=%f adiff=%f sample_diff=%d apts=%0.3f vpts=%0.3f %f\n",
1851                         diff, avg_diff, samples_size - samples_size1,
1852                         is->audio_clock, is->video_clock, is->audio_diff_threshold);
1853             }
1854         } else {
1855             /* too big difference : may be initial PTS errors, so
1856                reset A-V filter */
1857             is->audio_diff_avg_count = 0;
1858             is->audio_diff_cum       = 0;
1859         }
1860     }
1861
1862     return samples_size;
1863 }
1864
1865 /* decode one audio frame and returns its uncompressed size */
1866 static int audio_decode_frame(VideoState *is, double *pts_ptr)
1867 {
1868     AVPacket *pkt_temp = &is->audio_pkt_temp;
1869     AVPacket *pkt = &is->audio_pkt;
1870     AVCodecContext *dec = is->audio_st->codec;
1871     int n, len1, data_size, got_frame;
1872     double pts;
1873     int new_packet = 0;
1874     int flush_complete = 0;
1875
1876     for (;;) {
1877         /* NOTE: the audio packet can contain several frames */
1878         while (pkt_temp->size > 0 || (!pkt_temp->data && new_packet)) {
1879             int resample_changed, audio_resample;
1880
1881             if (!is->frame) {
1882                 if (!(is->frame = avcodec_alloc_frame()))
1883                     return AVERROR(ENOMEM);
1884             } else
1885                 avcodec_get_frame_defaults(is->frame);
1886
1887             if (flush_complete)
1888                 break;
1889             new_packet = 0;
1890             len1 = avcodec_decode_audio4(dec, is->frame, &got_frame, pkt_temp);
1891             if (len1 < 0) {
1892                 /* if error, we skip the frame */
1893                 pkt_temp->size = 0;
1894                 break;
1895             }
1896
1897             pkt_temp->data += len1;
1898             pkt_temp->size -= len1;
1899
1900             if (!got_frame) {
1901                 /* stop sending empty packets if the decoder is finished */
1902                 if (!pkt_temp->data && dec->codec->capabilities & CODEC_CAP_DELAY)
1903                     flush_complete = 1;
1904                 continue;
1905             }
1906             data_size = av_samples_get_buffer_size(NULL, dec->channels,
1907                                                    is->frame->nb_samples,
1908                                                    dec->sample_fmt, 1);
1909
1910             audio_resample = dec->sample_fmt     != is->sdl_sample_fmt ||
1911                              dec->channel_layout != is->sdl_channel_layout;
1912
1913             resample_changed = dec->sample_fmt     != is->resample_sample_fmt ||
1914                                dec->channel_layout != is->resample_channel_layout;
1915
1916             if ((!is->avr && audio_resample) || resample_changed) {
1917                 int ret;
1918                 if (is->avr)
1919                     avresample_close(is->avr);
1920                 else if (audio_resample) {
1921                     is->avr = avresample_alloc_context();
1922                     if (!is->avr) {
1923                         fprintf(stderr, "error allocating AVAudioResampleContext\n");
1924                         break;
1925                     }
1926                 }
1927                 if (audio_resample) {
1928                     av_opt_set_int(is->avr, "in_channel_layout",  dec->channel_layout,    0);
1929                     av_opt_set_int(is->avr, "in_sample_fmt",      dec->sample_fmt,        0);
1930                     av_opt_set_int(is->avr, "in_sample_rate",     dec->sample_rate,       0);
1931                     av_opt_set_int(is->avr, "out_channel_layout", is->sdl_channel_layout, 0);
1932                     av_opt_set_int(is->avr, "out_sample_fmt",     is->sdl_sample_fmt,     0);
1933                     av_opt_set_int(is->avr, "out_sample_rate",    dec->sample_rate,       0);
1934
1935                     if ((ret = avresample_open(is->avr)) < 0) {
1936                         fprintf(stderr, "error initializing libavresample\n");
1937                         break;
1938                     }
1939                 }
1940                 is->resample_sample_fmt     = dec->sample_fmt;
1941                 is->resample_channel_layout = dec->channel_layout;
1942             }
1943
1944             if (audio_resample) {
1945                 void *tmp_out;
1946                 int out_samples, out_size, out_linesize;
1947                 int osize      = av_get_bytes_per_sample(is->sdl_sample_fmt);
1948                 int nb_samples = is->frame->nb_samples;
1949
1950                 out_size = av_samples_get_buffer_size(&out_linesize,
1951                                                       is->sdl_channels,
1952                                                       nb_samples,
1953                                                       is->sdl_sample_fmt, 0);
1954                 tmp_out = av_realloc(is->audio_buf1, out_size);
1955                 if (!tmp_out)
1956                     return AVERROR(ENOMEM);
1957                 is->audio_buf1 = tmp_out;
1958
1959                 out_samples = avresample_convert(is->avr,
1960                                                  &is->audio_buf1,
1961                                                  out_linesize, nb_samples,
1962                                                  is->frame->data,
1963                                                  is->frame->linesize[0],
1964                                                  is->frame->nb_samples);
1965                 if (out_samples < 0) {
1966                     fprintf(stderr, "avresample_convert() failed\n");
1967                     break;
1968                 }
1969                 is->audio_buf = is->audio_buf1;
1970                 data_size = out_samples * osize * is->sdl_channels;
1971             } else {
1972                 is->audio_buf = is->frame->data[0];
1973             }
1974
1975             /* if no pts, then compute it */
1976             pts = is->audio_clock;
1977             *pts_ptr = pts;
1978             n = is->sdl_channels * av_get_bytes_per_sample(is->sdl_sample_fmt);
1979             is->audio_clock += (double)data_size /
1980                 (double)(n * dec->sample_rate);
1981 #ifdef DEBUG
1982             {
1983                 static double last_clock;
1984                 printf("audio: delay=%0.3f clock=%0.3f pts=%0.3f\n",
1985                        is->audio_clock - last_clock,
1986                        is->audio_clock, pts);
1987                 last_clock = is->audio_clock;
1988             }
1989 #endif
1990             return data_size;
1991         }
1992
1993         /* free the current packet */
1994         if (pkt->data)
1995             av_free_packet(pkt);
1996         memset(pkt_temp, 0, sizeof(*pkt_temp));
1997
1998         if (is->paused || is->audioq.abort_request) {
1999             return -1;
2000         }
2001
2002         /* read next packet */
2003         if ((new_packet = packet_queue_get(&is->audioq, pkt, 1)) < 0)
2004             return -1;
2005
2006         if (pkt->data == flush_pkt.data) {
2007             avcodec_flush_buffers(dec);
2008             flush_complete = 0;
2009         }
2010
2011         *pkt_temp = *pkt;
2012
2013         /* if update the audio clock with the pts */
2014         if (pkt->pts != AV_NOPTS_VALUE) {
2015             is->audio_clock = av_q2d(is->audio_st->time_base)*pkt->pts;
2016         }
2017     }
2018 }
2019
2020 /* prepare a new audio buffer */
2021 static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
2022 {
2023     VideoState *is = opaque;
2024     int audio_size, len1;
2025     double pts;
2026
2027     audio_callback_time = av_gettime();
2028
2029     while (len > 0) {
2030         if (is->audio_buf_index >= is->audio_buf_size) {
2031            audio_size = audio_decode_frame(is, &pts);
2032            if (audio_size < 0) {
2033                 /* if error, just output silence */
2034                is->audio_buf      = is->silence_buf;
2035                is->audio_buf_size = sizeof(is->silence_buf);
2036            } else {
2037                if (is->show_audio)
2038                    update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
2039                audio_size = synchronize_audio(is, (int16_t *)is->audio_buf, audio_size,
2040                                               pts);
2041                is->audio_buf_size = audio_size;
2042            }
2043            is->audio_buf_index = 0;
2044         }
2045         len1 = is->audio_buf_size - is->audio_buf_index;
2046         if (len1 > len)
2047             len1 = len;
2048         memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
2049         len -= len1;
2050         stream += len1;
2051         is->audio_buf_index += len1;
2052     }
2053 }
2054
2055 /* open a given stream. Return 0 if OK */
2056 static int stream_component_open(VideoState *is, int stream_index)
2057 {
2058     AVFormatContext *ic = is->ic;
2059     AVCodecContext *avctx;
2060     AVCodec *codec;
2061     SDL_AudioSpec wanted_spec, spec;
2062     AVDictionary *opts;
2063     AVDictionaryEntry *t = NULL;
2064
2065     if (stream_index < 0 || stream_index >= ic->nb_streams)
2066         return -1;
2067     avctx = ic->streams[stream_index]->codec;
2068
2069     opts = filter_codec_opts(codec_opts, avctx->codec_id, ic, ic->streams[stream_index], NULL);
2070
2071     codec = avcodec_find_decoder(avctx->codec_id);
2072     avctx->debug_mv          = debug_mv;
2073     avctx->debug             = debug;
2074     avctx->workaround_bugs   = workaround_bugs;
2075     avctx->idct_algo         = idct;
2076     avctx->skip_frame        = skip_frame;
2077     avctx->skip_idct         = skip_idct;
2078     avctx->skip_loop_filter  = skip_loop_filter;
2079     avctx->error_concealment = error_concealment;
2080
2081     if (fast)   avctx->flags2 |= CODEC_FLAG2_FAST;
2082
2083     if (!av_dict_get(opts, "threads", NULL, 0))
2084         av_dict_set(&opts, "threads", "auto", 0);
2085     if (!codec ||
2086         avcodec_open2(avctx, codec, &opts) < 0)
2087         return -1;
2088     if ((t = av_dict_get(opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
2089         av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
2090         return AVERROR_OPTION_NOT_FOUND;
2091     }
2092
2093     /* prepare audio output */
2094     if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
2095         wanted_spec.freq = avctx->sample_rate;
2096         wanted_spec.format = AUDIO_S16SYS;
2097
2098         if (!avctx->channel_layout)
2099             avctx->channel_layout = av_get_default_channel_layout(avctx->channels);
2100         if (!avctx->channel_layout) {
2101             fprintf(stderr, "unable to guess channel layout\n");
2102             return -1;
2103         }
2104         if (avctx->channels == 1)
2105             is->sdl_channel_layout = AV_CH_LAYOUT_MONO;
2106         else
2107             is->sdl_channel_layout = AV_CH_LAYOUT_STEREO;
2108         is->sdl_channels = av_get_channel_layout_nb_channels(is->sdl_channel_layout);
2109
2110         wanted_spec.channels = is->sdl_channels;
2111         wanted_spec.silence = 0;
2112         wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
2113         wanted_spec.callback = sdl_audio_callback;
2114         wanted_spec.userdata = is;
2115         if (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
2116             fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError());
2117             return -1;
2118         }
2119         is->audio_hw_buf_size = spec.size;
2120         is->sdl_sample_fmt          = AV_SAMPLE_FMT_S16;
2121         is->resample_sample_fmt     = is->sdl_sample_fmt;
2122         is->resample_channel_layout = is->sdl_channel_layout;
2123     }
2124
2125     ic->streams[stream_index]->discard = AVDISCARD_DEFAULT;
2126     switch (avctx->codec_type) {
2127     case AVMEDIA_TYPE_AUDIO:
2128         is->audio_stream = stream_index;
2129         is->audio_st = ic->streams[stream_index];
2130         is->audio_buf_size  = 0;
2131         is->audio_buf_index = 0;
2132
2133         /* init averaging filter */
2134         is->audio_diff_avg_coef  = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
2135         is->audio_diff_avg_count = 0;
2136         /* since we do not have a precise anough audio fifo fullness,
2137            we correct audio sync only if larger than this threshold */
2138         is->audio_diff_threshold = 2.0 * SDL_AUDIO_BUFFER_SIZE / avctx->sample_rate;
2139
2140         memset(&is->audio_pkt, 0, sizeof(is->audio_pkt));
2141         packet_queue_init(&is->audioq);
2142         SDL_PauseAudio(0);
2143         break;
2144     case AVMEDIA_TYPE_VIDEO:
2145         is->video_stream = stream_index;
2146         is->video_st = ic->streams[stream_index];
2147
2148         packet_queue_init(&is->videoq);
2149         is->video_tid = SDL_CreateThread(video_thread, is);
2150         break;
2151     case AVMEDIA_TYPE_SUBTITLE:
2152         is->subtitle_stream = stream_index;
2153         is->subtitle_st = ic->streams[stream_index];
2154         packet_queue_init(&is->subtitleq);
2155
2156         is->subtitle_tid = SDL_CreateThread(subtitle_thread, is);
2157         break;
2158     default:
2159         break;
2160     }
2161     return 0;
2162 }
2163
2164 static void stream_component_close(VideoState *is, int stream_index)
2165 {
2166     AVFormatContext *ic = is->ic;
2167     AVCodecContext *avctx;
2168
2169     if (stream_index < 0 || stream_index >= ic->nb_streams)
2170         return;
2171     avctx = ic->streams[stream_index]->codec;
2172
2173     switch (avctx->codec_type) {
2174     case AVMEDIA_TYPE_AUDIO:
2175         packet_queue_abort(&is->audioq);
2176
2177         SDL_CloseAudio();
2178
2179         packet_queue_end(&is->audioq);
2180         av_free_packet(&is->audio_pkt);
2181         if (is->avr)
2182             avresample_free(&is->avr);
2183         av_freep(&is->audio_buf1);
2184         is->audio_buf = NULL;
2185         avcodec_free_frame(&is->frame);
2186
2187         if (is->rdft) {
2188             av_rdft_end(is->rdft);
2189             av_freep(&is->rdft_data);
2190             is->rdft = NULL;
2191             is->rdft_bits = 0;
2192         }
2193         break;
2194     case AVMEDIA_TYPE_VIDEO:
2195         packet_queue_abort(&is->videoq);
2196
2197         /* note: we also signal this mutex to make sure we deblock the
2198            video thread in all cases */
2199         SDL_LockMutex(is->pictq_mutex);
2200         SDL_CondSignal(is->pictq_cond);
2201         SDL_UnlockMutex(is->pictq_mutex);
2202
2203         SDL_WaitThread(is->video_tid, NULL);
2204
2205         packet_queue_end(&is->videoq);
2206         break;
2207     case AVMEDIA_TYPE_SUBTITLE:
2208         packet_queue_abort(&is->subtitleq);
2209
2210         /* note: we also signal this mutex to make sure we deblock the
2211            video thread in all cases */
2212         SDL_LockMutex(is->subpq_mutex);
2213         is->subtitle_stream_changed = 1;
2214
2215         SDL_CondSignal(is->subpq_cond);
2216         SDL_UnlockMutex(is->subpq_mutex);
2217
2218         SDL_WaitThread(is->subtitle_tid, NULL);
2219
2220         packet_queue_end(&is->subtitleq);
2221         break;
2222     default:
2223         break;
2224     }
2225
2226     ic->streams[stream_index]->discard = AVDISCARD_ALL;
2227     avcodec_close(avctx);
2228 #if CONFIG_AVFILTER
2229     free_buffer_pool(&is->buffer_pool);
2230 #endif
2231     switch (avctx->codec_type) {
2232     case AVMEDIA_TYPE_AUDIO:
2233         is->audio_st = NULL;
2234         is->audio_stream = -1;
2235         break;
2236     case AVMEDIA_TYPE_VIDEO:
2237         is->video_st = NULL;
2238         is->video_stream = -1;
2239         break;
2240     case AVMEDIA_TYPE_SUBTITLE:
2241         is->subtitle_st = NULL;
2242         is->subtitle_stream = -1;
2243         break;
2244     default:
2245         break;
2246     }
2247 }
2248
2249 /* since we have only one decoding thread, we can use a global
2250    variable instead of a thread local variable */
2251 static VideoState *global_video_state;
2252
2253 static int decode_interrupt_cb(void *ctx)
2254 {
2255     return global_video_state && global_video_state->abort_request;
2256 }
2257
2258 /* this thread gets the stream from the disk or the network */
2259 static int decode_thread(void *arg)
2260 {
2261     VideoState *is = arg;
2262     AVFormatContext *ic = NULL;
2263     int err, i, ret;
2264     int st_index[AVMEDIA_TYPE_NB];
2265     AVPacket pkt1, *pkt = &pkt1;
2266     int eof = 0;
2267     int pkt_in_play_range = 0;
2268     AVDictionaryEntry *t;
2269     AVDictionary **opts;
2270     int orig_nb_streams;
2271
2272     memset(st_index, -1, sizeof(st_index));
2273     is->video_stream = -1;
2274     is->audio_stream = -1;
2275     is->subtitle_stream = -1;
2276
2277     global_video_state = is;
2278
2279     ic = avformat_alloc_context();
2280     ic->interrupt_callback.callback = decode_interrupt_cb;
2281     err = avformat_open_input(&ic, is->filename, is->iformat, &format_opts);
2282     if (err < 0) {
2283         print_error(is->filename, err);
2284         ret = -1;
2285         goto fail;
2286     }
2287     if ((t = av_dict_get(format_opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
2288         av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
2289         ret = AVERROR_OPTION_NOT_FOUND;
2290         goto fail;
2291     }
2292     is->ic = ic;
2293
2294     if (genpts)
2295         ic->flags |= AVFMT_FLAG_GENPTS;
2296
2297     opts = setup_find_stream_info_opts(ic, codec_opts);
2298     orig_nb_streams = ic->nb_streams;
2299
2300     err = avformat_find_stream_info(ic, opts);
2301     if (err < 0) {
2302         fprintf(stderr, "%s: could not find codec parameters\n", is->filename);
2303         ret = -1;
2304         goto fail;
2305     }
2306     for (i = 0; i < orig_nb_streams; i++)
2307         av_dict_free(&opts[i]);
2308     av_freep(&opts);
2309
2310     if (ic->pb)
2311         ic->pb->eof_reached = 0; // FIXME hack, avplay maybe should not use url_feof() to test for the end
2312
2313     if (seek_by_bytes < 0)
2314         seek_by_bytes = !!(ic->iformat->flags & AVFMT_TS_DISCONT);
2315
2316     /* if seeking requested, we execute it */
2317     if (start_time != AV_NOPTS_VALUE) {
2318         int64_t timestamp;
2319
2320         timestamp = start_time;
2321         /* add the stream start time */
2322         if (ic->start_time != AV_NOPTS_VALUE)
2323             timestamp += ic->start_time;
2324         ret = avformat_seek_file(ic, -1, INT64_MIN, timestamp, INT64_MAX, 0);
2325         if (ret < 0) {
2326             fprintf(stderr, "%s: could not seek to position %0.3f\n",
2327                     is->filename, (double)timestamp / AV_TIME_BASE);
2328         }
2329     }
2330
2331     for (i = 0; i < ic->nb_streams; i++)
2332         ic->streams[i]->discard = AVDISCARD_ALL;
2333     if (!video_disable)
2334         st_index[AVMEDIA_TYPE_VIDEO] =
2335             av_find_best_stream(ic, AVMEDIA_TYPE_VIDEO,
2336                                 wanted_stream[AVMEDIA_TYPE_VIDEO], -1, NULL, 0);
2337     if (!audio_disable)
2338         st_index[AVMEDIA_TYPE_AUDIO] =
2339             av_find_best_stream(ic, AVMEDIA_TYPE_AUDIO,
2340                                 wanted_stream[AVMEDIA_TYPE_AUDIO],
2341                                 st_index[AVMEDIA_TYPE_VIDEO],
2342                                 NULL, 0);
2343     if (!video_disable)
2344         st_index[AVMEDIA_TYPE_SUBTITLE] =
2345             av_find_best_stream(ic, AVMEDIA_TYPE_SUBTITLE,
2346                                 wanted_stream[AVMEDIA_TYPE_SUBTITLE],
2347                                 (st_index[AVMEDIA_TYPE_AUDIO] >= 0 ?
2348                                  st_index[AVMEDIA_TYPE_AUDIO] :
2349                                  st_index[AVMEDIA_TYPE_VIDEO]),
2350                                 NULL, 0);
2351     if (show_status) {
2352         av_dump_format(ic, 0, is->filename, 0);
2353     }
2354
2355     /* open the streams */
2356     if (st_index[AVMEDIA_TYPE_AUDIO] >= 0) {
2357         stream_component_open(is, st_index[AVMEDIA_TYPE_AUDIO]);
2358     }
2359
2360     ret = -1;
2361     if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
2362         ret = stream_component_open(is, st_index[AVMEDIA_TYPE_VIDEO]);
2363     }
2364     is->refresh_tid = SDL_CreateThread(refresh_thread, is);
2365     if (ret < 0) {
2366         if (!display_disable)
2367             is->show_audio = 2;
2368     }
2369
2370     if (st_index[AVMEDIA_TYPE_SUBTITLE] >= 0) {
2371         stream_component_open(is, st_index[AVMEDIA_TYPE_SUBTITLE]);
2372     }
2373
2374     if (is->video_stream < 0 && is->audio_stream < 0) {
2375         fprintf(stderr, "%s: could not open codecs\n", is->filename);
2376         ret = -1;
2377         goto fail;
2378     }
2379
2380     for (;;) {
2381         if (is->abort_request)
2382             break;
2383         if (is->paused != is->last_paused) {
2384             is->last_paused = is->paused;
2385             if (is->paused)
2386                 is->read_pause_return = av_read_pause(ic);
2387             else
2388                 av_read_play(ic);
2389         }
2390 #if CONFIG_RTSP_DEMUXER
2391         if (is->paused && !strcmp(ic->iformat->name, "rtsp")) {
2392             /* wait 10 ms to avoid trying to get another packet */
2393             /* XXX: horrible */
2394             SDL_Delay(10);
2395             continue;
2396         }
2397 #endif
2398         if (is->seek_req) {
2399             int64_t seek_target = is->seek_pos;
2400             int64_t seek_min    = is->seek_rel > 0 ? seek_target - is->seek_rel + 2: INT64_MIN;
2401             int64_t seek_max    = is->seek_rel < 0 ? seek_target - is->seek_rel - 2: INT64_MAX;
2402 // FIXME the +-2 is due to rounding being not done in the correct direction in generation
2403 //      of the seek_pos/seek_rel variables
2404
2405             ret = avformat_seek_file(is->ic, -1, seek_min, seek_target, seek_max, is->seek_flags);
2406             if (ret < 0) {
2407                 fprintf(stderr, "%s: error while seeking\n", is->ic->filename);
2408             } else {
2409                 if (is->audio_stream >= 0) {
2410                     packet_queue_flush(&is->audioq);
2411                     packet_queue_put(&is->audioq, &flush_pkt);
2412                 }
2413                 if (is->subtitle_stream >= 0) {
2414                     packet_queue_flush(&is->subtitleq);
2415                     packet_queue_put(&is->subtitleq, &flush_pkt);
2416                 }
2417                 if (is->video_stream >= 0) {
2418                     packet_queue_flush(&is->videoq);
2419                     packet_queue_put(&is->videoq, &flush_pkt);
2420                 }
2421             }
2422             is->seek_req = 0;
2423             eof = 0;
2424         }
2425
2426         /* if the queue are full, no need to read more */
2427         if (!infinite_buffer &&
2428               (is->audioq.size + is->videoq.size + is->subtitleq.size > MAX_QUEUE_SIZE
2429             || (   (is->audioq   .size  > MIN_AUDIOQ_SIZE || is->audio_stream < 0)
2430                 && (is->videoq   .nb_packets > MIN_FRAMES || is->video_stream < 0)
2431                 && (is->subtitleq.nb_packets > MIN_FRAMES || is->subtitle_stream < 0)))) {
2432             /* wait 10 ms */
2433             SDL_Delay(10);
2434             continue;
2435         }
2436         if (eof) {
2437             if (is->video_stream >= 0) {
2438                 av_init_packet(pkt);
2439                 pkt->data = NULL;
2440                 pkt->size = 0;
2441                 pkt->stream_index = is->video_stream;
2442                 packet_queue_put(&is->videoq, pkt);
2443             }
2444             if (is->audio_stream >= 0 &&
2445                 is->audio_st->codec->codec->capabilities & CODEC_CAP_DELAY) {
2446                 av_init_packet(pkt);
2447                 pkt->data = NULL;
2448                 pkt->size = 0;
2449                 pkt->stream_index = is->audio_stream;
2450                 packet_queue_put(&is->audioq, pkt);
2451             }
2452             SDL_Delay(10);
2453             if (is->audioq.size + is->videoq.size + is->subtitleq.size == 0) {
2454                 if (loop != 1 && (!loop || --loop)) {
2455                     stream_seek(cur_stream, start_time != AV_NOPTS_VALUE ? start_time : 0, 0, 0);
2456                 } else if (autoexit) {
2457                     ret = AVERROR_EOF;
2458                     goto fail;
2459                 }
2460             }
2461             continue;
2462         }
2463         ret = av_read_frame(ic, pkt);
2464         if (ret < 0) {
2465             if (ret == AVERROR_EOF || (ic->pb && ic->pb->eof_reached))
2466                 eof = 1;
2467             if (ic->pb && ic->pb->error)
2468                 break;
2469             SDL_Delay(100); /* wait for user event */
2470             continue;
2471         }
2472         /* check if packet is in play range specified by user, then queue, otherwise discard */
2473         pkt_in_play_range = duration == AV_NOPTS_VALUE ||
2474                 (pkt->pts - ic->streams[pkt->stream_index]->start_time) *
2475                 av_q2d(ic->streams[pkt->stream_index]->time_base) -
2476                 (double)(start_time != AV_NOPTS_VALUE ? start_time : 0) / 1000000
2477                 <= ((double)duration / 1000000);
2478         if (pkt->stream_index == is->audio_stream && pkt_in_play_range) {
2479             packet_queue_put(&is->audioq, pkt);
2480         } else if (pkt->stream_index == is->video_stream && pkt_in_play_range) {
2481             packet_queue_put(&is->videoq, pkt);
2482         } else if (pkt->stream_index == is->subtitle_stream && pkt_in_play_range) {
2483             packet_queue_put(&is->subtitleq, pkt);
2484         } else {
2485             av_free_packet(pkt);
2486         }
2487     }
2488     /* wait until the end */
2489     while (!is->abort_request) {
2490         SDL_Delay(100);
2491     }
2492
2493     ret = 0;
2494  fail:
2495     /* disable interrupting */
2496     global_video_state = NULL;
2497
2498     /* close each stream */
2499     if (is->audio_stream >= 0)
2500         stream_component_close(is, is->audio_stream);
2501     if (is->video_stream >= 0)
2502         stream_component_close(is, is->video_stream);
2503     if (is->subtitle_stream >= 0)
2504         stream_component_close(is, is->subtitle_stream);
2505     if (is->ic) {
2506         avformat_close_input(&is->ic);
2507     }
2508
2509     if (ret != 0) {
2510         SDL_Event event;
2511
2512         event.type = FF_QUIT_EVENT;
2513         event.user.data1 = is;
2514         SDL_PushEvent(&event);
2515     }
2516     return 0;
2517 }
2518
2519 static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
2520 {
2521     VideoState *is;
2522
2523     is = av_mallocz(sizeof(VideoState));
2524     if (!is)
2525         return NULL;
2526     av_strlcpy(is->filename, filename, sizeof(is->filename));
2527     is->iformat = iformat;
2528     is->ytop    = 0;
2529     is->xleft   = 0;
2530
2531     /* start video display */
2532     is->pictq_mutex = SDL_CreateMutex();
2533     is->pictq_cond  = SDL_CreateCond();
2534
2535     is->subpq_mutex = SDL_CreateMutex();
2536     is->subpq_cond  = SDL_CreateCond();
2537
2538     is->av_sync_type = av_sync_type;
2539     is->parse_tid    = SDL_CreateThread(decode_thread, is);
2540     if (!is->parse_tid) {
2541         av_free(is);
2542         return NULL;
2543     }
2544     return is;
2545 }
2546
2547 static void stream_cycle_channel(VideoState *is, int codec_type)
2548 {
2549     AVFormatContext *ic = is->ic;
2550     int start_index, stream_index;
2551     AVStream *st;
2552
2553     if (codec_type == AVMEDIA_TYPE_VIDEO)
2554         start_index = is->video_stream;
2555     else if (codec_type == AVMEDIA_TYPE_AUDIO)
2556         start_index = is->audio_stream;
2557     else
2558         start_index = is->subtitle_stream;
2559     if (start_index < (codec_type == AVMEDIA_TYPE_SUBTITLE ? -1 : 0))
2560         return;
2561     stream_index = start_index;
2562     for (;;) {
2563         if (++stream_index >= is->ic->nb_streams)
2564         {
2565             if (codec_type == AVMEDIA_TYPE_SUBTITLE)
2566             {
2567                 stream_index = -1;
2568                 goto the_end;
2569             } else
2570                 stream_index = 0;
2571         }
2572         if (stream_index == start_index)
2573             return;
2574         st = ic->streams[stream_index];
2575         if (st->codec->codec_type == codec_type) {
2576             /* check that parameters are OK */
2577             switch (codec_type) {
2578             case AVMEDIA_TYPE_AUDIO:
2579                 if (st->codec->sample_rate != 0 &&
2580                     st->codec->channels != 0)
2581                     goto the_end;
2582                 break;
2583             case AVMEDIA_TYPE_VIDEO:
2584             case AVMEDIA_TYPE_SUBTITLE:
2585                 goto the_end;
2586             default:
2587                 break;
2588             }
2589         }
2590     }
2591  the_end:
2592     stream_component_close(is, start_index);
2593     stream_component_open(is, stream_index);
2594 }
2595
2596
2597 static void toggle_full_screen(void)
2598 {
2599 #if defined(__APPLE__) && SDL_VERSION_ATLEAST(1, 2, 14)
2600     /* OS X needs to empty the picture_queue */
2601     int i;
2602     for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++)
2603         cur_stream->pictq[i].reallocate = 1;
2604 #endif
2605     is_full_screen = !is_full_screen;
2606     video_open(cur_stream);
2607 }
2608
2609 static void toggle_pause(void)
2610 {
2611     if (cur_stream)
2612         stream_pause(cur_stream);
2613     step = 0;
2614 }
2615
2616 static void step_to_next_frame(void)
2617 {
2618     if (cur_stream) {
2619         /* if the stream is paused unpause it, then step */
2620         if (cur_stream->paused)
2621             stream_pause(cur_stream);
2622     }
2623     step = 1;
2624 }
2625
2626 static void toggle_audio_display(void)
2627 {
2628     if (cur_stream) {
2629         int bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
2630         cur_stream->show_audio = (cur_stream->show_audio + 1) % 3;
2631         fill_rectangle(screen,
2632                        cur_stream->xleft, cur_stream->ytop, cur_stream->width, cur_stream->height,
2633                        bgcolor);
2634         SDL_UpdateRect(screen, cur_stream->xleft, cur_stream->ytop, cur_stream->width, cur_stream->height);
2635     }
2636 }
2637
2638 /* handle an event sent by the GUI */
2639 static void event_loop(void)
2640 {
2641     SDL_Event event;
2642     double incr, pos, frac;
2643
2644     for (;;) {
2645         double x;
2646         SDL_WaitEvent(&event);
2647         switch (event.type) {
2648         case SDL_KEYDOWN:
2649             if (exit_on_keydown) {
2650                 do_exit();
2651                 break;
2652             }
2653             switch (event.key.keysym.sym) {
2654             case SDLK_ESCAPE:
2655             case SDLK_q:
2656                 do_exit();
2657                 break;
2658             case SDLK_f:
2659                 toggle_full_screen();
2660                 break;
2661             case SDLK_p:
2662             case SDLK_SPACE:
2663                 toggle_pause();
2664                 break;
2665             case SDLK_s: // S: Step to next frame
2666                 step_to_next_frame();
2667                 break;
2668             case SDLK_a:
2669                 if (cur_stream)
2670                     stream_cycle_channel(cur_stream, AVMEDIA_TYPE_AUDIO);
2671                 break;
2672             case SDLK_v:
2673                 if (cur_stream)
2674                     stream_cycle_channel(cur_stream, AVMEDIA_TYPE_VIDEO);
2675                 break;
2676             case SDLK_t:
2677                 if (cur_stream)
2678                     stream_cycle_channel(cur_stream, AVMEDIA_TYPE_SUBTITLE);
2679                 break;
2680             case SDLK_w:
2681                 toggle_audio_display();
2682                 break;
2683             case SDLK_LEFT:
2684                 incr = -10.0;
2685                 goto do_seek;
2686             case SDLK_RIGHT:
2687                 incr = 10.0;
2688                 goto do_seek;
2689             case SDLK_UP:
2690                 incr = 60.0;
2691                 goto do_seek;
2692             case SDLK_DOWN:
2693                 incr = -60.0;
2694             do_seek:
2695                 if (cur_stream) {
2696                     if (seek_by_bytes) {
2697                         if (cur_stream->video_stream >= 0 && cur_stream->video_current_pos >= 0) {
2698                             pos = cur_stream->video_current_pos;
2699                         } else if (cur_stream->audio_stream >= 0 && cur_stream->audio_pkt.pos >= 0) {
2700                             pos = cur_stream->audio_pkt.pos;
2701                         } else
2702                             pos = avio_tell(cur_stream->ic->pb);
2703                         if (cur_stream->ic->bit_rate)
2704                             incr *= cur_stream->ic->bit_rate / 8.0;
2705                         else
2706                             incr *= 180000.0;
2707                         pos += incr;
2708                         stream_seek(cur_stream, pos, incr, 1);
2709                     } else {
2710                         pos = get_master_clock(cur_stream);
2711                         pos += incr;
2712                         stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), (int64_t)(incr * AV_TIME_BASE), 0);
2713                     }
2714                 }
2715                 break;
2716             default:
2717                 break;
2718             }
2719             break;
2720         case SDL_MOUSEBUTTONDOWN:
2721             if (exit_on_mousedown) {
2722                 do_exit();
2723                 break;
2724             }
2725         case SDL_MOUSEMOTION:
2726             if (event.type == SDL_MOUSEBUTTONDOWN) {
2727                 x = event.button.x;
2728             } else {
2729                 if (event.motion.state != SDL_PRESSED)
2730                     break;
2731                 x = event.motion.x;
2732             }
2733             if (cur_stream) {
2734                 if (seek_by_bytes || cur_stream->ic->duration <= 0) {
2735                     uint64_t size =  avio_size(cur_stream->ic->pb);
2736                     stream_seek(cur_stream, size*x/cur_stream->width, 0, 1);
2737                 } else {
2738                     int64_t ts;
2739                     int ns, hh, mm, ss;
2740                     int tns, thh, tmm, tss;
2741                     tns  = cur_stream->ic->duration / 1000000LL;
2742                     thh  = tns / 3600;
2743                     tmm  = (tns % 3600) / 60;
2744                     tss  = (tns % 60);
2745                     frac = x / cur_stream->width;
2746                     ns   = frac * tns;
2747                     hh   = ns / 3600;
2748                     mm   = (ns % 3600) / 60;
2749                     ss   = (ns % 60);
2750                     fprintf(stderr, "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d)       \n", frac*100,
2751                             hh, mm, ss, thh, tmm, tss);
2752                     ts = frac * cur_stream->ic->duration;
2753                     if (cur_stream->ic->start_time != AV_NOPTS_VALUE)
2754                         ts += cur_stream->ic->start_time;
2755                     stream_seek(cur_stream, ts, 0, 0);
2756                 }
2757             }
2758             break;
2759         case SDL_VIDEORESIZE:
2760             if (cur_stream) {
2761                 screen = SDL_SetVideoMode(event.resize.w, event.resize.h, 0,
2762                                           SDL_HWSURFACE|SDL_RESIZABLE|SDL_ASYNCBLIT|SDL_HWACCEL);
2763                 screen_width  = cur_stream->width  = event.resize.w;
2764                 screen_height = cur_stream->height = event.resize.h;
2765             }
2766             break;
2767         case SDL_QUIT:
2768         case FF_QUIT_EVENT:
2769             do_exit();
2770             break;
2771         case FF_ALLOC_EVENT:
2772             video_open(event.user.data1);
2773             alloc_picture(event.user.data1);
2774             break;
2775         case FF_REFRESH_EVENT:
2776             video_refresh_timer(event.user.data1);
2777             cur_stream->refresh = 0;
2778             break;
2779         default:
2780             break;
2781         }
2782     }
2783 }
2784
2785 static int opt_frame_size(void *optctx, const char *opt, const char *arg)
2786 {
2787     av_log(NULL, AV_LOG_ERROR,
2788            "Option '%s' has been removed, use private format options instead\n", opt);
2789     return AVERROR(EINVAL);
2790 }
2791
2792 static int opt_width(void *optctx, const char *opt, const char *arg)
2793 {
2794     screen_width = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2795     return 0;
2796 }
2797
2798 static int opt_height(void *optctx, const char *opt, const char *arg)
2799 {
2800     screen_height = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2801     return 0;
2802 }
2803
2804 static int opt_format(void *optctx, const char *opt, const char *arg)
2805 {
2806     file_iformat = av_find_input_format(arg);
2807     if (!file_iformat) {
2808         fprintf(stderr, "Unknown input format: %s\n", arg);
2809         return AVERROR(EINVAL);
2810     }
2811     return 0;
2812 }
2813
2814 static int opt_frame_pix_fmt(void *optctx, const char *opt, const char *arg)
2815 {
2816     av_log(NULL, AV_LOG_ERROR,
2817            "Option '%s' has been removed, use private format options instead\n", opt);
2818     return AVERROR(EINVAL);
2819 }
2820
2821 static int opt_sync(void *optctx, const char *opt, const char *arg)
2822 {
2823     if (!strcmp(arg, "audio"))
2824         av_sync_type = AV_SYNC_AUDIO_MASTER;
2825     else if (!strcmp(arg, "video"))
2826         av_sync_type = AV_SYNC_VIDEO_MASTER;
2827     else if (!strcmp(arg, "ext"))
2828         av_sync_type = AV_SYNC_EXTERNAL_CLOCK;
2829     else {
2830         fprintf(stderr, "Unknown value for %s: %s\n", opt, arg);
2831         exit(1);
2832     }
2833     return 0;
2834 }
2835
2836 static int opt_seek(void *optctx, const char *opt, const char *arg)
2837 {
2838     start_time = parse_time_or_die(opt, arg, 1);
2839     return 0;
2840 }
2841
2842 static int opt_duration(void *optctx, const char *opt, const char *arg)
2843 {
2844     duration = parse_time_or_die(opt, arg, 1);
2845     return 0;
2846 }
2847
2848 static int opt_debug(void *optctx, const char *opt, const char *arg)
2849 {
2850     av_log_set_level(99);
2851     debug = parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
2852     return 0;
2853 }
2854
2855 static int opt_vismv(void *optctx, const char *opt, const char *arg)
2856 {
2857     debug_mv = parse_number_or_die(opt, arg, OPT_INT64, INT_MIN, INT_MAX);
2858     return 0;
2859 }
2860
2861 static const OptionDef options[] = {
2862 #include "cmdutils_common_opts.h"
2863     { "x", HAS_ARG, { .func_arg = opt_width }, "force displayed width", "width" },
2864     { "y", HAS_ARG, { .func_arg = opt_height }, "force displayed height", "height" },
2865     { "s", HAS_ARG | OPT_VIDEO, { .func_arg = opt_frame_size }, "set frame size (WxH or abbreviation)", "size" },
2866     { "fs", OPT_BOOL, { &is_full_screen }, "force full screen" },
2867     { "an", OPT_BOOL, { &audio_disable }, "disable audio" },
2868     { "vn", OPT_BOOL, { &video_disable }, "disable video" },
2869     { "ast", OPT_INT | HAS_ARG | OPT_EXPERT, { &wanted_stream[AVMEDIA_TYPE_AUDIO] }, "select desired audio stream", "stream_number" },
2870     { "vst", OPT_INT | HAS_ARG | OPT_EXPERT, { &wanted_stream[AVMEDIA_TYPE_VIDEO] }, "select desired video stream", "stream_number" },
2871     { "sst", OPT_INT | HAS_ARG | OPT_EXPERT, { &wanted_stream[AVMEDIA_TYPE_SUBTITLE] }, "select desired subtitle stream", "stream_number" },
2872     { "ss", HAS_ARG, { .func_arg = opt_seek }, "seek to a given position in seconds", "pos" },
2873     { "t", HAS_ARG, { .func_arg = opt_duration }, "play  \"duration\" seconds of audio/video", "duration" },
2874     { "bytes", OPT_INT | HAS_ARG, { &seek_by_bytes }, "seek by bytes 0=off 1=on -1=auto", "val" },
2875     { "nodisp", OPT_BOOL, { &display_disable }, "disable graphical display" },
2876     { "f", HAS_ARG, { .func_arg = opt_format }, "force format", "fmt" },
2877     { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO, { .func_arg = opt_frame_pix_fmt }, "set pixel format", "format" },
2878     { "stats", OPT_BOOL | OPT_EXPERT, { &show_status }, "show status", "" },
2879     { "debug", HAS_ARG | OPT_EXPERT, { .func_arg = opt_debug }, "print specific debug info", "" },
2880     { "bug", OPT_INT | HAS_ARG | OPT_EXPERT, { &workaround_bugs }, "workaround bugs", "" },
2881     { "vismv", HAS_ARG | OPT_EXPERT, { .func_arg = opt_vismv }, "visualize motion vectors", "" },
2882     { "fast", OPT_BOOL | OPT_EXPERT, { &fast }, "non spec compliant optimizations", "" },
2883     { "genpts", OPT_BOOL | OPT_EXPERT, { &genpts }, "generate pts", "" },
2884     { "drp", OPT_INT | HAS_ARG | OPT_EXPERT, { &decoder_reorder_pts }, "let decoder reorder pts 0=off 1=on -1=auto", ""},
2885     { "skiploop", OPT_INT | HAS_ARG | OPT_EXPERT, { &skip_loop_filter }, "", "" },
2886     { "skipframe", OPT_INT | HAS_ARG | OPT_EXPERT, { &skip_frame }, "", "" },
2887     { "skipidct", OPT_INT | HAS_ARG | OPT_EXPERT, { &skip_idct }, "", "" },
2888     { "idct", OPT_INT | HAS_ARG | OPT_EXPERT, { &idct }, "set idct algo",  "algo" },
2889     { "ec", OPT_INT | HAS_ARG | OPT_EXPERT, { &error_concealment }, "set error concealment options",  "bit_mask" },
2890     { "sync", HAS_ARG | OPT_EXPERT, { .func_arg = opt_sync }, "set audio-video sync. type (type=audio/video/ext)", "type" },
2891     { "autoexit", OPT_BOOL | OPT_EXPERT, { &autoexit }, "exit at the end", "" },
2892     { "exitonkeydown", OPT_BOOL | OPT_EXPERT, { &exit_on_keydown }, "exit on key down", "" },
2893     { "exitonmousedown", OPT_BOOL | OPT_EXPERT, { &exit_on_mousedown }, "exit on mouse down", "" },
2894     { "loop", OPT_INT | HAS_ARG | OPT_EXPERT, { &loop }, "set number of times the playback shall be looped", "loop count" },
2895     { "framedrop", OPT_BOOL | OPT_EXPERT, { &framedrop }, "drop frames when cpu is too slow", "" },
2896     { "infbuf", OPT_BOOL | OPT_EXPERT, { &infinite_buffer }, "don't limit the input buffer size (useful with realtime streams)", "" },
2897     { "window_title", OPT_STRING | HAS_ARG, { &window_title }, "set window title", "window title" },
2898 #if CONFIG_AVFILTER
2899     { "vf", OPT_STRING | HAS_ARG, { &vfilters }, "video filters", "filter list" },
2900 #endif
2901     { "rdftspeed", OPT_INT | HAS_ARG| OPT_AUDIO | OPT_EXPERT, { &rdftspeed }, "rdft speed", "msecs" },
2902     { "default", HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, { opt_default }, "generic catch all option", "" },
2903     { "i", 0, { NULL }, "avconv compatibility dummy option", ""},
2904     { NULL, },
2905 };
2906
2907 static void show_usage(void)
2908 {
2909     printf("Simple media player\n");
2910     printf("usage: %s [options] input_file\n", program_name);
2911     printf("\n");
2912 }
2913
2914 void show_help_default(const char *opt, const char *arg)
2915 {
2916     av_log_set_callback(log_callback_help);
2917     show_usage();
2918     show_help_options(options, "Main options:", 0, OPT_EXPERT, 0);
2919     show_help_options(options, "Advanced options:", OPT_EXPERT, 0, 0);
2920     printf("\n");
2921     show_help_children(avcodec_get_class(), AV_OPT_FLAG_DECODING_PARAM);
2922     show_help_children(avformat_get_class(), AV_OPT_FLAG_DECODING_PARAM);
2923 #if !CONFIG_AVFILTER
2924     show_help_children(sws_get_class(), AV_OPT_FLAG_ENCODING_PARAM);
2925 #endif
2926     printf("\nWhile playing:\n"
2927            "q, ESC              quit\n"
2928            "f                   toggle full screen\n"
2929            "p, SPC              pause\n"
2930            "a                   cycle audio channel\n"
2931            "v                   cycle video channel\n"
2932            "t                   cycle subtitle channel\n"
2933            "w                   show audio waves\n"
2934            "s                   activate frame-step mode\n"
2935            "left/right          seek backward/forward 10 seconds\n"
2936            "down/up             seek backward/forward 1 minute\n"
2937            "mouse click         seek to percentage in file corresponding to fraction of width\n"
2938            );
2939 }
2940
2941 static void opt_input_file(void *optctx, const char *filename)
2942 {
2943     if (input_filename) {
2944         fprintf(stderr, "Argument '%s' provided as input filename, but '%s' was already specified.\n",
2945                 filename, input_filename);
2946         exit(1);
2947     }
2948     if (!strcmp(filename, "-"))
2949         filename = "pipe:";
2950     input_filename = filename;
2951 }
2952
2953 /* Called from the main */
2954 int main(int argc, char **argv)
2955 {
2956     int flags;
2957
2958     av_log_set_flags(AV_LOG_SKIP_REPEATED);
2959     parse_loglevel(argc, argv, options);
2960
2961     /* register all codecs, demux and protocols */
2962     avcodec_register_all();
2963 #if CONFIG_AVDEVICE
2964     avdevice_register_all();
2965 #endif
2966 #if CONFIG_AVFILTER
2967     avfilter_register_all();
2968 #endif
2969     av_register_all();
2970     avformat_network_init();
2971
2972     init_opts();
2973
2974     show_banner();
2975
2976     parse_options(NULL, argc, argv, options, opt_input_file);
2977
2978     if (!input_filename) {
2979         show_usage();
2980         fprintf(stderr, "An input file must be specified\n");
2981         fprintf(stderr, "Use -h to get full help or, even better, run 'man %s'\n", program_name);
2982         exit(1);
2983     }
2984
2985     if (display_disable) {
2986         video_disable = 1;
2987     }
2988     flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
2989 #if !defined(__MINGW32__) && !defined(__APPLE__)
2990     flags |= SDL_INIT_EVENTTHREAD; /* Not supported on Windows or Mac OS X */
2991 #endif
2992     if (SDL_Init (flags)) {
2993         fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
2994         exit(1);
2995     }
2996
2997     if (!display_disable) {
2998 #if HAVE_SDL_VIDEO_SIZE
2999         const SDL_VideoInfo *vi = SDL_GetVideoInfo();
3000         fs_screen_width = vi->current_w;
3001         fs_screen_height = vi->current_h;
3002 #endif
3003     }
3004
3005     SDL_EventState(SDL_ACTIVEEVENT, SDL_IGNORE);
3006     SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
3007     SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
3008
3009     av_init_packet(&flush_pkt);
3010     flush_pkt.data = "FLUSH";
3011
3012     cur_stream = stream_open(input_filename, file_iformat);
3013
3014     event_loop();
3015
3016     /* never returns */
3017
3018     return 0;
3019 }