#include "libavdevice/avdevice.h"
#include "libswscale/swscale.h"
#include "libavutil/opt.h"
-#include "libavcodec/avfft.h"
+#include "libavutil/tx.h"
#include "libswresample/swresample.h"
-#if CONFIG_AVFILTER
-# include "libavfilter/avfilter.h"
-# include "libavfilter/buffersink.h"
-# include "libavfilter/buffersrc.h"
-#endif
+#include "libavfilter/avfilter.h"
+#include "libavfilter/buffersink.h"
+#include "libavfilter/buffersrc.h"
#include <SDL.h>
#include <SDL_thread.h>
#define USE_ONEPASS_SUBTITLE_RENDER 1
-static unsigned sws_flags = SWS_BICUBIC;
-
typedef struct MyAVPacketList {
AVPacket *pkt;
int serial;
int *queue_serial; /* pointer to the current packet queue serial, used for obsolete clock detection */
} Clock;
+typedef struct FrameData {
+ int64_t pkt_pos;
+} FrameData;
+
/* Common struct for handling all types of decoded data and allocated render buffers. */
typedef struct Frame {
AVFrame *frame;
int audio_volume;
int muted;
struct AudioParams audio_src;
-#if CONFIG_AVFILTER
struct AudioParams audio_filter_src;
-#endif
struct AudioParams audio_tgt;
struct SwrContext *swr_ctx;
int frame_drops_early;
int16_t sample_array[SAMPLE_ARRAY_SIZE];
int sample_array_index;
int last_i_start;
- RDFTContext *rdft;
+ AVTXContext *rdft;
+ av_tx_fn rdft_fn;
int rdft_bits;
- FFTSample *rdft_data;
+ float *real_data;
+ AVComplexFloat *rdft_data;
int xpos;
double last_vis_time;
SDL_Texture *vis_texture;
AVStream *video_st;
PacketQueue videoq;
double max_frame_duration; // maximum duration of a frame - above this, we consider the jump a timestamp discontinuity
- struct SwsContext *img_convert_ctx;
struct SwsContext *sub_convert_ctx;
int eof;
int width, height, xleft, ytop;
int step;
-#if CONFIG_AVFILTER
int vfilter_idx;
AVFilterContext *in_video_filter; // the first filter in the video chain
AVFilterContext *out_video_filter; // the last filter in the video chain
AVFilterContext *in_audio_filter; // the first filter in the audio chain
AVFilterContext *out_audio_filter; // the last filter in the audio chain
AVFilterGraph *agraph; // audio filter graph
-#endif
int last_video_stream, last_audio_stream, last_subtitle_stream;
double rdftspeed = 0.02;
static int64_t cursor_last_shown;
static int cursor_hidden = 0;
-#if CONFIG_AVFILTER
static const char **vfilters_list = NULL;
static int nb_vfilters = 0;
static char *afilters = NULL;
-#endif
static int autorotate = 1;
static int find_stream_info = 1;
static int filter_nbthreads = 0;
{ AV_PIX_FMT_NONE, SDL_PIXELFORMAT_UNKNOWN },
};
-#if CONFIG_AVFILTER
static int opt_add_vfilter(void *optctx, const char *opt, const char *arg)
{
- GROW_ARRAY(vfilters_list, nb_vfilters);
+ int ret = GROW_ARRAY(vfilters_list, nb_vfilters);
+ if (ret < 0)
+ return ret;
+
vfilters_list[nb_vfilters - 1] = arg;
return 0;
}
-#endif
static inline
int cmp_audio_fmts(enum AVSampleFormat fmt1, int64_t channel_count1,
}
av_packet_unref(d->pkt);
} else {
+ if (d->pkt->buf && !d->pkt->opaque_ref) {
+ FrameData *fd;
+
+ d->pkt->opaque_ref = av_buffer_allocz(sizeof(*fd));
+ if (!d->pkt->opaque_ref)
+ return AVERROR(ENOMEM);
+ fd = (FrameData*)d->pkt->opaque_ref->data;
+ fd->pkt_pos = d->pkt->pos;
+ }
+
if (avcodec_send_packet(d->avctx, d->pkt) == AVERROR(EAGAIN)) {
av_log(d->avctx, AV_LOG_ERROR, "Receive_frame and send_packet both returned EAGAIN, which is an API violation.\n");
d->packet_pending = 1;
return 0;
}
-static void frame_queue_destory(FrameQueue *f)
+static void frame_queue_destroy(FrameQueue *f)
{
int i;
for (i = 0; i < f->max_size; i++) {
}
}
-static int upload_texture(SDL_Texture **tex, AVFrame *frame, struct SwsContext **img_convert_ctx) {
+static int upload_texture(SDL_Texture **tex, AVFrame *frame)
+{
int ret = 0;
Uint32 sdl_pix_fmt;
SDL_BlendMode sdl_blendmode;
if (realloc_texture(tex, sdl_pix_fmt == SDL_PIXELFORMAT_UNKNOWN ? SDL_PIXELFORMAT_ARGB8888 : sdl_pix_fmt, frame->width, frame->height, sdl_blendmode, 0) < 0)
return -1;
switch (sdl_pix_fmt) {
- case SDL_PIXELFORMAT_UNKNOWN:
- /* This should only happen if we are not using avfilter... */
- *img_convert_ctx = sws_getCachedContext(*img_convert_ctx,
- frame->width, frame->height, frame->format, frame->width, frame->height,
- AV_PIX_FMT_BGRA, sws_flags, NULL, NULL, NULL);
- if (*img_convert_ctx != NULL) {
- uint8_t *pixels[4];
- int pitch[4];
- if (!SDL_LockTexture(*tex, NULL, (void **)pixels, pitch)) {
- sws_scale(*img_convert_ctx, (const uint8_t * const *)frame->data, frame->linesize,
- 0, frame->height, pixels, pitch);
- SDL_UnlockTexture(*tex);
- }
- } else {
- av_log(NULL, AV_LOG_FATAL, "Cannot initialize the conversion context\n");
- ret = -1;
- }
- break;
case SDL_PIXELFORMAT_IYUV:
if (frame->linesize[0] > 0 && frame->linesize[1] > 0 && frame->linesize[2] > 0) {
ret = SDL_UpdateYUVTexture(*tex, NULL, frame->data[0], frame->linesize[0],
set_sdl_yuv_conversion_mode(vp->frame);
if (!vp->uploaded) {
- if (upload_texture(&is->vid_texture, vp->frame, &is->img_convert_ctx) < 0) {
+ if (upload_texture(&is->vid_texture, vp->frame) < 0) {
set_sdl_yuv_conversion_mode(NULL);
return;
}
fill_rectangle(s->xleft, y, s->width, 1);
}
} else {
+ int err = 0;
if (realloc_texture(&s->vis_texture, SDL_PIXELFORMAT_ARGB8888, s->width, s->height, SDL_BLENDMODE_NONE, 1) < 0)
return;
s->xpos = 0;
nb_display_channels= FFMIN(nb_display_channels, 2);
if (rdft_bits != s->rdft_bits) {
- av_rdft_end(s->rdft);
- av_free(s->rdft_data);
- s->rdft = av_rdft_init(rdft_bits, DFT_R2C);
+ const float rdft_scale = 1.0;
+ av_tx_uninit(&s->rdft);
+ av_freep(&s->real_data);
+ av_freep(&s->rdft_data);
s->rdft_bits = rdft_bits;
- s->rdft_data = av_malloc_array(nb_freq, 4 *sizeof(*s->rdft_data));
+ s->real_data = av_malloc_array(nb_freq, 4 *sizeof(*s->real_data));
+ s->rdft_data = av_malloc_array(nb_freq + 1, 2 *sizeof(*s->rdft_data));
+ err = av_tx_init(&s->rdft, &s->rdft_fn, AV_TX_FLOAT_RDFT,
+ 0, 1 << rdft_bits, &rdft_scale, 0);
}
- if (!s->rdft || !s->rdft_data){
+ if (err < 0 || !s->rdft_data) {
av_log(NULL, AV_LOG_ERROR, "Failed to allocate buffers for RDFT, switching to waves display\n");
s->show_mode = SHOW_MODE_WAVES;
} else {
- FFTSample *data[2];
+ float *data_in[2];
+ AVComplexFloat *data[2];
SDL_Rect rect = {.x = s->xpos, .y = 0, .w = 1, .h = s->height};
uint32_t *pixels;
int pitch;
for (ch = 0; ch < nb_display_channels; ch++) {
- data[ch] = s->rdft_data + 2 * nb_freq * ch;
+ data_in[ch] = s->real_data + 2 * nb_freq * ch;
+ data[ch] = s->rdft_data + nb_freq * ch;
i = i_start + ch;
for (x = 0; x < 2 * nb_freq; x++) {
double w = (x-nb_freq) * (1.0 / nb_freq);
- data[ch][x] = s->sample_array[i] * (1.0 - w * w);
+ data_in[ch][x] = s->sample_array[i] * (1.0 - w * w);
i += channels;
if (i >= SAMPLE_ARRAY_SIZE)
i -= SAMPLE_ARRAY_SIZE;
}
- av_rdft_calc(s->rdft, data[ch]);
+ s->rdft_fn(s->rdft, data[ch], data_in[ch], sizeof(float));
+ data[ch][0].im = data[ch][nb_freq].re;
+ data[ch][nb_freq].re = 0;
}
/* Least efficient way to do this, we should of course
* directly access it but it is more than fast enough. */
pixels += pitch * s->height;
for (y = 0; y < s->height; y++) {
double w = 1 / sqrt(nb_freq);
- int a = sqrt(w * sqrt(data[0][2 * y + 0] * data[0][2 * y + 0] + data[0][2 * y + 1] * data[0][2 * y + 1]));
- int b = (nb_display_channels == 2 ) ? sqrt(w * hypot(data[1][2 * y + 0], data[1][2 * y + 1]))
+ int a = sqrt(w * sqrt(data[0][y].re * data[0][y].re + data[0][y].im * data[0][y].im));
+ int b = (nb_display_channels == 2 ) ? sqrt(w * hypot(data[1][y].re, data[1][y].im))
: a;
a = FFMIN(a, 255);
b = FFMIN(b, 255);
is->audio_buf = NULL;
if (is->rdft) {
- av_rdft_end(is->rdft);
+ av_tx_uninit(&is->rdft);
+ av_freep(&is->real_data);
av_freep(&is->rdft_data);
is->rdft = NULL;
is->rdft_bits = 0;
packet_queue_destroy(&is->subtitleq);
/* free all pictures */
- frame_queue_destory(&is->pictq);
- frame_queue_destory(&is->sampq);
- frame_queue_destory(&is->subpq);
+ frame_queue_destroy(&is->pictq);
+ frame_queue_destroy(&is->sampq);
+ frame_queue_destroy(&is->subpq);
SDL_DestroyCond(is->continue_read_thread);
- sws_freeContext(is->img_convert_ctx);
sws_freeContext(is->sub_convert_ctx);
av_free(is->filename);
if (is->vis_texture)
if (window)
SDL_DestroyWindow(window);
uninit_opts();
-#if CONFIG_AVFILTER
av_freep(&vfilters_list);
-#endif
avformat_network_deinit();
if (show_status)
printf("\n");
}
}
-static void update_video_pts(VideoState *is, double pts, int64_t pos, int serial) {
+static void update_video_pts(VideoState *is, double pts, int serial)
+{
/* update current video pts */
set_clock(&is->vidclk, pts, serial);
sync_clock_to_slave(&is->extclk, &is->vidclk);
SDL_LockMutex(is->pictq.mutex);
if (!isnan(vp->pts))
- update_video_pts(is, vp->pts, vp->pos, vp->serial);
+ update_video_pts(is, vp->pts, vp->serial);
SDL_UnlockMutex(is->pictq.mutex);
if (frame_queue_nb_remaining(&is->pictq) > 1) {
return got_picture;
}
-#if CONFIG_AVFILTER
static int configure_filtergraph(AVFilterGraph *graph, const char *filtergraph,
AVFilterContext *source_ctx, AVFilterContext *sink_ctx)
{
}
pix_fmts[nb_pix_fmts] = AV_PIX_FMT_NONE;
- while ((e = av_dict_get(sws_dict, "", e, AV_DICT_IGNORE_SUFFIX))) {
+ while ((e = av_dict_iterate(sws_dict, e))) {
if (!strcmp(e->key, "sws_flags")) {
av_strlcatf(sws_flags_str, sizeof(sws_flags_str), "%s=%s:", "flags", e->value);
} else
} while (0)
if (autorotate) {
- int32_t *displaymatrix = (int32_t *)av_stream_get_side_data(is->video_st, AV_PKT_DATA_DISPLAYMATRIX, NULL);
- double theta = get_rotation(displaymatrix);
+ double theta = 0.0;
+ int32_t *displaymatrix = NULL;
+ AVFrameSideData *sd = av_frame_get_side_data(frame, AV_FRAME_DATA_DISPLAYMATRIX);
+ if (sd)
+ displaymatrix = (int32_t *)sd->data;
+ if (!displaymatrix) {
+ const AVPacketSideData *sd = av_packet_side_data_get(is->video_st->codecpar->coded_side_data,
+ is->video_st->codecpar->nb_coded_side_data,
+ AV_PKT_DATA_DISPLAYMATRIX);
+ if (sd)
+ displaymatrix = (int32_t *)sd->data;
+ }
+ theta = get_rotation(displaymatrix);
if (fabs(theta - 90) < 1.0) {
INSERT_FILT("transpose", "clock");
av_bprint_init(&bp, 0, AV_BPRINT_SIZE_AUTOMATIC);
- while ((e = av_dict_get(swr_opts, "", e, AV_DICT_IGNORE_SUFFIX)))
+ while ((e = av_dict_iterate(swr_opts, e)))
av_strlcatf(aresample_swr_opts, sizeof(aresample_swr_opts), "%s=%s:", e->key, e->value);
if (strlen(aresample_swr_opts))
aresample_swr_opts[strlen(aresample_swr_opts)-1] = '\0';
return ret;
}
-#endif /* CONFIG_AVFILTER */
static int audio_thread(void *arg)
{
VideoState *is = arg;
AVFrame *frame = av_frame_alloc();
Frame *af;
-#if CONFIG_AVFILTER
int last_serial = -1;
int reconfigure;
-#endif
int got_frame = 0;
AVRational tb;
int ret = 0;
if (got_frame) {
tb = (AVRational){1, frame->sample_rate};
-#if CONFIG_AVFILTER
reconfigure =
cmp_audio_fmts(is->audio_filter_src.fmt, is->audio_filter_src.ch_layout.nb_channels,
frame->format, frame->ch_layout.nb_channels) ||
goto the_end;
while ((ret = av_buffersink_get_frame_flags(is->out_audio_filter, frame, 0)) >= 0) {
+ FrameData *fd = frame->opaque_ref ? (FrameData*)frame->opaque_ref->data : NULL;
tb = av_buffersink_get_time_base(is->out_audio_filter);
-#endif
if (!(af = frame_queue_peek_writable(&is->sampq)))
goto the_end;
af->pts = (frame->pts == AV_NOPTS_VALUE) ? NAN : frame->pts * av_q2d(tb);
- af->pos = frame->pkt_pos;
+ af->pos = fd ? fd->pkt_pos : -1;
af->serial = is->auddec.pkt_serial;
af->duration = av_q2d((AVRational){frame->nb_samples, frame->sample_rate});
av_frame_move_ref(af->frame, frame);
frame_queue_push(&is->sampq);
-#if CONFIG_AVFILTER
if (is->audioq.serial != is->auddec.pkt_serial)
break;
}
if (ret == AVERROR_EOF)
is->auddec.finished = is->auddec.pkt_serial;
-#endif
}
} while (ret >= 0 || ret == AVERROR(EAGAIN) || ret == AVERROR_EOF);
the_end:
-#if CONFIG_AVFILTER
avfilter_graph_free(&is->agraph);
-#endif
av_frame_free(&frame);
return ret;
}
AVRational tb = is->video_st->time_base;
AVRational frame_rate = av_guess_frame_rate(is->ic, is->video_st, NULL);
-#if CONFIG_AVFILTER
AVFilterGraph *graph = NULL;
AVFilterContext *filt_out = NULL, *filt_in = NULL;
int last_w = 0;
enum AVPixelFormat last_format = -2;
int last_serial = -1;
int last_vfilter_idx = 0;
-#endif
if (!frame)
return AVERROR(ENOMEM);
if (!ret)
continue;
-#if CONFIG_AVFILTER
if ( last_w != frame->width
|| last_h != frame->height
|| last_format != frame->format
goto the_end;
while (ret >= 0) {
+ FrameData *fd;
+
is->frame_last_returned_time = av_gettime_relative() / 1000000.0;
ret = av_buffersink_get_frame_flags(filt_out, frame, 0);
break;
}
+ fd = frame->opaque_ref ? (FrameData*)frame->opaque_ref->data : NULL;
+
is->frame_last_filter_delay = av_gettime_relative() / 1000000.0 - is->frame_last_returned_time;
if (fabs(is->frame_last_filter_delay) > AV_NOSYNC_THRESHOLD / 10.0)
is->frame_last_filter_delay = 0;
tb = av_buffersink_get_time_base(filt_out);
-#endif
duration = (frame_rate.num && frame_rate.den ? av_q2d((AVRational){frame_rate.den, frame_rate.num}) : 0);
pts = (frame->pts == AV_NOPTS_VALUE) ? NAN : frame->pts * av_q2d(tb);
- ret = queue_picture(is, frame, pts, duration, frame->pkt_pos, is->viddec.pkt_serial);
+ ret = queue_picture(is, frame, pts, duration, fd ? fd->pkt_pos : -1, is->viddec.pkt_serial);
av_frame_unref(frame);
-#if CONFIG_AVFILTER
if (is->videoq.serial != is->viddec.pkt_serial)
break;
}
-#endif
if (ret < 0)
goto the_end;
}
the_end:
-#if CONFIG_AVFILTER
avfilter_graph_free(&graph);
-#endif
av_frame_free(&frame);
return 0;
}
if (fast)
avctx->flags2 |= AV_CODEC_FLAG2_FAST;
- opts = filter_codec_opts(codec_opts, avctx->codec_id, ic, ic->streams[stream_index], codec);
+ ret = filter_codec_opts(codec_opts, avctx->codec_id, ic,
+ ic->streams[stream_index], codec, &opts);
+ if (ret < 0)
+ goto fail;
+
if (!av_dict_get(opts, "threads", NULL, 0))
av_dict_set(&opts, "threads", "auto", 0);
if (stream_lowres)
av_dict_set_int(&opts, "lowres", stream_lowres, 0);
+
+ av_dict_set(&opts, "flags", "+copy_opaque", AV_DICT_MULTIKEY);
+
if ((ret = avcodec_open2(avctx, codec, &opts)) < 0) {
goto fail;
}
ic->streams[stream_index]->discard = AVDISCARD_DEFAULT;
switch (avctx->codec_type) {
case AVMEDIA_TYPE_AUDIO:
-#if CONFIG_AVFILTER
{
AVFilterContext *sink;
if (ret < 0)
goto fail;
}
-#else
- sample_rate = avctx->sample_rate;
- ret = av_channel_layout_copy(&ch_layout, &avctx->ch_layout);
- if (ret < 0)
- goto fail;
-#endif
/* prepare audio output */
if ((ret = audio_open(is, &ch_layout, sample_rate, &is->audio_tgt)) < 0)
if ((ret = decoder_init(&is->auddec, avctx, &is->audioq, is->continue_read_thread)) < 0)
goto fail;
- if ((is->ic->iformat->flags & (AVFMT_NOBINSEARCH | AVFMT_NOGENSEARCH | AVFMT_NO_BYTE_SEEK)) && !is->ic->iformat->read_seek) {
+ if (is->ic->iformat->flags & AVFMT_NOTIMESTAMPS) {
is->auddec.start_pts = is->audio_st->start_time;
is->auddec.start_pts_tb = is->audio_st->time_base;
}
if (genpts)
ic->flags |= AVFMT_FLAG_GENPTS;
- av_format_inject_global_side_data(ic);
-
if (find_stream_info) {
- AVDictionary **opts = setup_find_stream_info_opts(ic, codec_opts);
+ AVDictionary **opts;
int orig_nb_streams = ic->nb_streams;
+ err = setup_find_stream_info_opts(ic, codec_opts, &opts);
+ if (err < 0) {
+ av_log(NULL, AV_LOG_ERROR,
+ "Error setting up avformat_find_stream_info() options\n");
+ ret = err;
+ goto fail;
+ }
+
err = avformat_find_stream_info(ic, opts);
for (i = 0; i < orig_nb_streams; i++)
stream_cycle_channel(cur_stream, AVMEDIA_TYPE_SUBTITLE);
break;
case SDLK_w:
-#if CONFIG_AVFILTER
if (cur_stream->show_mode == SHOW_MODE_VIDEO && cur_stream->vfilter_idx < nb_vfilters - 1) {
if (++cur_stream->vfilter_idx >= nb_vfilters)
cur_stream->vfilter_idx = 0;
cur_stream->vfilter_idx = 0;
toggle_audio_display(cur_stream);
}
-#else
- toggle_audio_display(cur_stream);
-#endif
break;
case SDLK_PAGEUP:
if (cur_stream->ic->nb_chapters <= 1) {
static int opt_width(void *optctx, const char *opt, const char *arg)
{
- screen_width = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
+ double num;
+ int ret = parse_number(opt, arg, OPT_INT64, 1, INT_MAX, &num);
+ if (ret < 0)
+ return ret;
+
+ screen_width = num;
return 0;
}
static int opt_height(void *optctx, const char *opt, const char *arg)
{
- screen_height = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
+ double num;
+ int ret = parse_number(opt, arg, OPT_INT64, 1, INT_MAX, &num);
+ if (ret < 0)
+ return ret;
+
+ screen_height = num;
return 0;
}
return 0;
}
-static int opt_seek(void *optctx, const char *opt, const char *arg)
-{
- start_time = parse_time_or_die(opt, arg, 1);
- return 0;
-}
-
-static int opt_duration(void *optctx, const char *opt, const char *arg)
-{
- duration = parse_time_or_die(opt, arg, 1);
- return 0;
-}
-
static int opt_show_mode(void *optctx, const char *opt, const char *arg)
{
show_mode = !strcmp(arg, "video") ? SHOW_MODE_VIDEO :
!strcmp(arg, "waves") ? SHOW_MODE_WAVES :
- !strcmp(arg, "rdft" ) ? SHOW_MODE_RDFT :
- parse_number_or_die(opt, arg, OPT_INT, 0, SHOW_MODE_NB-1);
+ !strcmp(arg, "rdft" ) ? SHOW_MODE_RDFT : SHOW_MODE_NONE;
+
+ if (show_mode == SHOW_MODE_NONE) {
+ double num;
+ int ret = parse_number(opt, arg, OPT_INT, 0, SHOW_MODE_NB-1, &num);
+ if (ret < 0)
+ return ret;
+ show_mode = num;
+ }
return 0;
}
-static void opt_input_file(void *optctx, const char *filename)
+static int opt_input_file(void *optctx, const char *filename)
{
if (input_filename) {
av_log(NULL, AV_LOG_FATAL,
"Argument '%s' provided as input filename, but '%s' was already specified.\n",
filename, input_filename);
- exit(1);
+ return AVERROR(EINVAL);
}
if (!strcmp(filename, "-"))
- filename = "pipe:";
+ filename = "fd:";
input_filename = filename;
+
+ return 0;
}
static int opt_codec(void *optctx, const char *opt, const char *arg)
{ "ast", OPT_STRING | HAS_ARG | OPT_EXPERT, { &wanted_stream_spec[AVMEDIA_TYPE_AUDIO] }, "select desired audio stream", "stream_specifier" },
{ "vst", OPT_STRING | HAS_ARG | OPT_EXPERT, { &wanted_stream_spec[AVMEDIA_TYPE_VIDEO] }, "select desired video stream", "stream_specifier" },
{ "sst", OPT_STRING | HAS_ARG | OPT_EXPERT, { &wanted_stream_spec[AVMEDIA_TYPE_SUBTITLE] }, "select desired subtitle stream", "stream_specifier" },
- { "ss", HAS_ARG, { .func_arg = opt_seek }, "seek to a given position in seconds", "pos" },
- { "t", HAS_ARG, { .func_arg = opt_duration }, "play \"duration\" seconds of audio/video", "duration" },
+ { "ss", HAS_ARG | OPT_TIME, { &start_time }, "seek to a given position in seconds", "pos" },
+ { "t", HAS_ARG | OPT_TIME, { &duration }, "play \"duration\" seconds of audio/video", "duration" },
{ "bytes", OPT_INT | HAS_ARG, { &seek_by_bytes }, "seek by bytes 0=off 1=on -1=auto", "val" },
{ "seek_interval", OPT_FLOAT | HAS_ARG, { &seek_interval }, "set seek interval for left/right keys, in seconds", "seconds" },
{ "nodisp", OPT_BOOL, { &display_disable }, "disable graphical display" },
{ "window_title", OPT_STRING | HAS_ARG, { &window_title }, "set window title", "window title" },
{ "left", OPT_INT | HAS_ARG | OPT_EXPERT, { &screen_left }, "set the x position for the left of the window", "x pos" },
{ "top", OPT_INT | HAS_ARG | OPT_EXPERT, { &screen_top }, "set the y position for the top of the window", "y pos" },
-#if CONFIG_AVFILTER
{ "vf", OPT_EXPERT | HAS_ARG, { .func_arg = opt_add_vfilter }, "set video filters", "filter_graph" },
{ "af", OPT_STRING | HAS_ARG, { &afilters }, "set audio filters", "filter_graph" },
-#endif
{ "rdftspeed", OPT_INT | HAS_ARG| OPT_AUDIO | OPT_EXPERT, { &rdftspeed }, "rdft speed", "msecs" },
{ "showmode", HAS_ARG, { .func_arg = opt_show_mode}, "select show mode (0 = video, 1 = waves, 2 = RDFT)", "mode" },
{ "i", OPT_BOOL, { &dummy}, "read specified file", "input_file"},
printf("\n");
show_help_children(avcodec_get_class(), AV_OPT_FLAG_DECODING_PARAM);
show_help_children(avformat_get_class(), AV_OPT_FLAG_DECODING_PARAM);
-#if !CONFIG_AVFILTER
- show_help_children(sws_get_class(), AV_OPT_FLAG_ENCODING_PARAM);
-#else
show_help_children(avfilter_get_class(), AV_OPT_FLAG_FILTERING_PARAM);
-#endif
printf("\nWhile playing:\n"
"q, ESC quit\n"
"f toggle full screen\n"
/* Called from the main */
int main(int argc, char **argv)
{
- int flags;
+ int flags, ret;
VideoState *is;
init_dynload();
show_banner(argc, argv, options);
- parse_options(NULL, argc, argv, options, opt_input_file);
+ ret = parse_options(NULL, argc, argv, options, opt_input_file);
+ if (ret < 0)
+ exit(ret == AVERROR_EXIT ? 0 : 1);
if (!input_filename) {
show_usage();