3 * Copyright (c) 2000-2011 The libav developers.
5 * This file is part of Libav.
7 * Libav is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
12 * Libav is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with Libav; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
31 #include "libavformat/avformat.h"
32 #include "libavdevice/avdevice.h"
33 #include "libswscale/swscale.h"
34 #include "libavresample/avresample.h"
35 #include "libavutil/opt.h"
36 #include "libavutil/audioconvert.h"
37 #include "libavutil/parseutils.h"
38 #include "libavutil/samplefmt.h"
39 #include "libavutil/colorspace.h"
40 #include "libavutil/fifo.h"
41 #include "libavutil/intreadwrite.h"
42 #include "libavutil/dict.h"
43 #include "libavutil/mathematics.h"
44 #include "libavutil/pixdesc.h"
45 #include "libavutil/avstring.h"
46 #include "libavutil/libm.h"
47 #include "libavutil/imgutils.h"
48 #include "libavformat/os_support.h"
50 # include "libavfilter/avfilter.h"
51 # include "libavfilter/avfiltergraph.h"
52 # include "libavfilter/buffersrc.h"
53 # include "libavfilter/buffersink.h"
54 # include "libavfilter/vsrc_buffer.h"
56 #if HAVE_SYS_RESOURCE_H
57 #include <sys/types.h>
59 #include <sys/resource.h>
60 #elif HAVE_GETPROCESSTIMES
63 #if HAVE_GETPROCESSMEMORYINFO
69 #include <sys/select.h>
76 #include "libavutil/avassert.h"
79 #define VSYNC_PASSTHROUGH 0
83 const char program_name[] = "avconv";
84 const int program_birth_year = 2000;
86 /* select an input stream for an output stream */
87 typedef struct StreamMap {
88 int disabled; /** 1 is this mapping is disabled by a negative map */
92 int sync_stream_index;
93 char *linklabel; /** name of an output link, for mapping lavfi outputs */
97 * select an input file for an output file
99 typedef struct MetadataMap {
100 int file; ///< file index
101 char type; ///< type of metadata to copy -- (g)lobal, (s)tream, (c)hapter or (p)rogram
102 int index; ///< stream/chapter/program number
105 static const OptionDef options[];
107 static int video_discard = 0;
108 static int same_quant = 0;
109 static int do_deinterlace = 0;
110 static int intra_dc_precision = 8;
111 static int qp_hist = 0;
113 static int file_overwrite = 0;
114 static int do_benchmark = 0;
115 static int do_hex_dump = 0;
116 static int do_pkt_dump = 0;
117 static int do_pass = 0;
118 static char *pass_logfilename_prefix = NULL;
119 static int video_sync_method = VSYNC_AUTO;
120 static int audio_sync_method = 0;
121 static float audio_drift_threshold = 0.1;
122 static int copy_ts = 0;
123 static int copy_tb = 1;
124 static int opt_shortest = 0;
125 static char *vstats_filename;
126 static FILE *vstats_file;
128 static int audio_volume = 256;
130 static int exit_on_error = 0;
131 static int using_stdin = 0;
132 static int64_t video_size = 0;
133 static int64_t audio_size = 0;
134 static int64_t extra_size = 0;
135 static int nb_frames_dup = 0;
136 static int nb_frames_drop = 0;
137 static int input_sync;
139 static float dts_delta_threshold = 10;
141 static int print_stats = 1;
143 #define DEFAULT_PASS_LOGFILENAME_PREFIX "av2pass"
145 typedef struct InputFilter {
146 AVFilterContext *filter;
147 struct InputStream *ist;
148 struct FilterGraph *graph;
152 typedef struct OutputFilter {
153 AVFilterContext *filter;
154 struct OutputStream *ost;
155 struct FilterGraph *graph;
158 /* temporary storage until stream maps are processed */
159 AVFilterInOut *out_tmp;
162 typedef struct FilterGraph {
164 const char *graph_desc;
166 AVFilterGraph *graph;
168 InputFilter **inputs;
170 OutputFilter **outputs;
174 typedef struct FrameBuffer {
180 enum PixelFormat pix_fmt;
183 struct InputStream *ist;
184 struct FrameBuffer *next;
187 typedef struct InputStream {
190 int discard; /* true if stream data should be discarded */
191 int decoding_needed; /* true if the packets must be decoded in 'raw_fifo' */
193 AVFrame *decoded_frame;
195 int64_t start; /* time when read started */
196 /* predicted dts of the next packet read for this stream or (when there are
197 * several frames in a packet) of the next frame in current packet */
199 /* dts of the last packet read for this stream */
201 PtsCorrectionContext pts_ctx;
203 int is_start; /* is 1 at the start and after a discontinuity */
204 int showed_multi_packet_warning;
206 AVRational framerate; /* framerate forced with -r */
210 int resample_pix_fmt;
212 int resample_sample_fmt;
213 int resample_sample_rate;
214 int resample_channels;
215 uint64_t resample_channel_layout;
217 /* a pool of free buffers for decoded data */
218 FrameBuffer *buffer_pool;
220 /* decoded data from this stream goes into all those filters
221 * currently video and audio only */
222 InputFilter **filters;
226 typedef struct InputFile {
227 AVFormatContext *ctx;
228 int eof_reached; /* true if eof reached */
229 int ist_index; /* index of first stream in ist_table */
230 int buffer_size; /* current total buffer size */
232 int nb_streams; /* number of stream that avconv is aware of; may be different
233 from ctx.nb_streams if new streams appear during av_read_frame() */
237 typedef struct OutputStream {
238 int file_index; /* file index */
239 int index; /* stream index in the output file */
240 int source_index; /* InputStream index */
241 AVStream *st; /* stream in the output file */
242 int encoding_needed; /* true if encoding needed for this stream */
244 /* input pts and corresponding output pts
246 // double sync_ipts; /* dts from the AVPacket of the demuxer in second units */
247 struct InputStream *sync_ist; /* input stream to sync against */
248 int64_t sync_opts; /* output frame counter, could be changed to some true timestamp */ // FIXME look at frame_number
249 /* pts of the first frame encoded for this stream, used for limiting
252 AVBitStreamFilterContext *bitstream_filters;
255 AVFrame *filtered_frame;
258 AVRational frame_rate;
262 float frame_aspect_ratio;
265 /* forced key frames */
266 int64_t *forced_kf_pts;
272 OutputFilter *filter;
277 int is_past_recording_time;
279 const char *attachment_filename;
280 int copy_initial_nonkeyframes;
282 enum PixelFormat pix_fmts[2];
286 typedef struct OutputFile {
287 AVFormatContext *ctx;
289 int ost_index; /* index of the first stream in output_streams */
290 int64_t recording_time; /* desired length of the resulting file in microseconds */
291 int64_t start_time; /* start time in microseconds */
292 uint64_t limit_filesize;
295 static InputStream **input_streams = NULL;
296 static int nb_input_streams = 0;
297 static InputFile **input_files = NULL;
298 static int nb_input_files = 0;
300 static OutputStream **output_streams = NULL;
301 static int nb_output_streams = 0;
302 static OutputFile **output_files = NULL;
303 static int nb_output_files = 0;
305 static FilterGraph **filtergraphs;
308 typedef struct OptionsContext {
309 /* input/output options */
313 SpecifierOpt *codec_names;
315 SpecifierOpt *audio_channels;
316 int nb_audio_channels;
317 SpecifierOpt *audio_sample_rate;
318 int nb_audio_sample_rate;
319 SpecifierOpt *frame_rates;
321 SpecifierOpt *frame_sizes;
323 SpecifierOpt *frame_pix_fmts;
324 int nb_frame_pix_fmts;
327 int64_t input_ts_offset;
330 SpecifierOpt *ts_scale;
332 SpecifierOpt *dump_attachment;
333 int nb_dump_attachment;
336 StreamMap *stream_maps;
338 /* first item specifies output metadata, second is input */
339 MetadataMap (*meta_data_maps)[2];
340 int nb_meta_data_maps;
341 int metadata_global_manual;
342 int metadata_streams_manual;
343 int metadata_chapters_manual;
344 const char **attachments;
347 int chapters_input_file;
349 int64_t recording_time;
350 uint64_t limit_filesize;
356 int subtitle_disable;
359 /* indexed by output file stream index */
363 SpecifierOpt *metadata;
365 SpecifierOpt *max_frames;
367 SpecifierOpt *bitstream_filters;
368 int nb_bitstream_filters;
369 SpecifierOpt *codec_tags;
371 SpecifierOpt *sample_fmts;
373 SpecifierOpt *qscale;
375 SpecifierOpt *forced_key_frames;
376 int nb_forced_key_frames;
377 SpecifierOpt *force_fps;
379 SpecifierOpt *frame_aspect_ratios;
380 int nb_frame_aspect_ratios;
381 SpecifierOpt *rc_overrides;
383 SpecifierOpt *intra_matrices;
384 int nb_intra_matrices;
385 SpecifierOpt *inter_matrices;
386 int nb_inter_matrices;
387 SpecifierOpt *top_field_first;
388 int nb_top_field_first;
389 SpecifierOpt *metadata_map;
391 SpecifierOpt *presets;
393 SpecifierOpt *copy_initial_nonkeyframes;
394 int nb_copy_initial_nonkeyframes;
395 SpecifierOpt *filters;
399 #define MATCH_PER_STREAM_OPT(name, type, outvar, fmtctx, st)\
402 for (i = 0; i < o->nb_ ## name; i++) {\
403 char *spec = o->name[i].specifier;\
404 if ((ret = check_stream_specifier(fmtctx, st, spec)) > 0)\
405 outvar = o->name[i].u.type;\
411 static void reset_options(OptionsContext *o)
413 const OptionDef *po = options;
416 /* all OPT_SPEC and OPT_STRING can be freed in generic way */
418 void *dst = (uint8_t*)o + po->u.off;
420 if (po->flags & OPT_SPEC) {
421 SpecifierOpt **so = dst;
422 int i, *count = (int*)(so + 1);
423 for (i = 0; i < *count; i++) {
424 av_freep(&(*so)[i].specifier);
425 if (po->flags & OPT_STRING)
426 av_freep(&(*so)[i].u.str);
430 } else if (po->flags & OPT_OFFSET && po->flags & OPT_STRING)
435 for (i = 0; i < o->nb_stream_maps; i++)
436 av_freep(&o->stream_maps[i].linklabel);
437 av_freep(&o->stream_maps);
438 av_freep(&o->meta_data_maps);
439 av_freep(&o->streamid_map);
441 memset(o, 0, sizeof(*o));
443 o->mux_max_delay = 0.7;
444 o->recording_time = INT64_MAX;
445 o->limit_filesize = UINT64_MAX;
446 o->chapters_input_file = INT_MAX;
452 static int alloc_buffer(InputStream *ist, AVCodecContext *s, FrameBuffer **pbuf)
454 FrameBuffer *buf = av_mallocz(sizeof(*buf));
456 const int pixel_size = av_pix_fmt_descriptors[s->pix_fmt].comp[0].step_minus1+1;
457 int h_chroma_shift, v_chroma_shift;
458 int edge = 32; // XXX should be avcodec_get_edge_width(), but that fails on svq1
459 int w = s->width, h = s->height;
462 return AVERROR(ENOMEM);
464 if (!(s->flags & CODEC_FLAG_EMU_EDGE)) {
469 avcodec_align_dimensions(s, &w, &h);
470 if ((ret = av_image_alloc(buf->base, buf->linesize, w, h,
471 s->pix_fmt, 32)) < 0) {
475 /* XXX this shouldn't be needed, but some tests break without this line
476 * those decoders are buggy and need to be fixed.
477 * the following tests fail:
478 * cdgraphics, ansi, aasc, fraps-v1, qtrle-1bit
480 memset(buf->base[0], 128, ret);
482 avcodec_get_chroma_sub_sample(s->pix_fmt, &h_chroma_shift, &v_chroma_shift);
483 for (i = 0; i < FF_ARRAY_ELEMS(buf->data); i++) {
484 const int h_shift = i==0 ? 0 : h_chroma_shift;
485 const int v_shift = i==0 ? 0 : v_chroma_shift;
486 if (s->flags & CODEC_FLAG_EMU_EDGE)
487 buf->data[i] = buf->base[i];
489 buf->data[i] = buf->base[i] +
490 FFALIGN((buf->linesize[i]*edge >> v_shift) +
491 (pixel_size*edge >> h_shift), 32);
495 buf->pix_fmt = s->pix_fmt;
502 static void free_buffer_pool(InputStream *ist)
504 FrameBuffer *buf = ist->buffer_pool;
506 ist->buffer_pool = buf->next;
507 av_freep(&buf->base[0]);
509 buf = ist->buffer_pool;
513 static void unref_buffer(InputStream *ist, FrameBuffer *buf)
515 av_assert0(buf->refcount);
517 if (!buf->refcount) {
518 buf->next = ist->buffer_pool;
519 ist->buffer_pool = buf;
523 static int codec_get_buffer(AVCodecContext *s, AVFrame *frame)
525 InputStream *ist = s->opaque;
529 if (!ist->buffer_pool && (ret = alloc_buffer(ist, s, &ist->buffer_pool)) < 0)
532 buf = ist->buffer_pool;
533 ist->buffer_pool = buf->next;
535 if (buf->w != s->width || buf->h != s->height || buf->pix_fmt != s->pix_fmt) {
536 av_freep(&buf->base[0]);
538 if ((ret = alloc_buffer(ist, s, &buf)) < 0)
544 frame->type = FF_BUFFER_TYPE_USER;
545 frame->extended_data = frame->data;
546 frame->pkt_pts = s->pkt ? s->pkt->pts : AV_NOPTS_VALUE;
547 frame->width = buf->w;
548 frame->height = buf->h;
549 frame->format = buf->pix_fmt;
550 frame->sample_aspect_ratio = s->sample_aspect_ratio;
552 for (i = 0; i < FF_ARRAY_ELEMS(buf->data); i++) {
553 frame->base[i] = buf->base[i]; // XXX h264.c uses base though it shouldn't
554 frame->data[i] = buf->data[i];
555 frame->linesize[i] = buf->linesize[i];
561 static void codec_release_buffer(AVCodecContext *s, AVFrame *frame)
563 InputStream *ist = s->opaque;
564 FrameBuffer *buf = frame->opaque;
567 for (i = 0; i < FF_ARRAY_ELEMS(frame->data); i++)
568 frame->data[i] = NULL;
570 unref_buffer(ist, buf);
573 static void filter_release_buffer(AVFilterBuffer *fb)
575 FrameBuffer *buf = fb->priv;
577 unref_buffer(buf->ist, buf);
581 * Define a function for building a string containing a list of
584 #define DEF_CHOOSE_FORMAT(type, var, supported_list, none, get_name, separator) \
585 static char *choose_ ## var ## s(OutputStream *ost) \
587 if (ost->st->codec->var != none) { \
588 get_name(ost->st->codec->var); \
589 return av_strdup(name); \
590 } else if (ost->enc->supported_list) { \
592 AVIOContext *s = NULL; \
596 if (avio_open_dyn_buf(&s) < 0) \
599 for (p = ost->enc->supported_list; *p != none; p++) { \
601 avio_printf(s, "%s" separator, name); \
603 len = avio_close_dyn_buf(s, &ret); \
610 #define GET_PIX_FMT_NAME(pix_fmt)\
611 const char *name = av_get_pix_fmt_name(pix_fmt);
613 DEF_CHOOSE_FORMAT(enum PixelFormat, pix_fmt, pix_fmts, PIX_FMT_NONE,
614 GET_PIX_FMT_NAME, ":")
616 #define GET_SAMPLE_FMT_NAME(sample_fmt)\
617 const char *name = av_get_sample_fmt_name(sample_fmt)
619 DEF_CHOOSE_FORMAT(enum AVSampleFormat, sample_fmt, sample_fmts,
620 AV_SAMPLE_FMT_NONE, GET_SAMPLE_FMT_NAME, ",")
622 #define GET_SAMPLE_RATE_NAME(rate)\
624 snprintf(name, sizeof(name), "%d", rate);
626 DEF_CHOOSE_FORMAT(int, sample_rate, supported_samplerates, 0,
627 GET_SAMPLE_RATE_NAME, ",")
629 #define GET_CH_LAYOUT_NAME(ch_layout)\
631 snprintf(name, sizeof(name), "0x%"PRIx64, ch_layout);
633 DEF_CHOOSE_FORMAT(uint64_t, channel_layout, channel_layouts, 0,
634 GET_CH_LAYOUT_NAME, ",")
636 static FilterGraph *init_simple_filtergraph(InputStream *ist, OutputStream *ost)
638 FilterGraph *fg = av_mallocz(sizeof(*fg));
642 fg->index = nb_filtergraphs;
644 fg->outputs = grow_array(fg->outputs, sizeof(*fg->outputs), &fg->nb_outputs,
646 if (!(fg->outputs[0] = av_mallocz(sizeof(*fg->outputs[0]))))
648 fg->outputs[0]->ost = ost;
649 fg->outputs[0]->graph = fg;
651 ost->filter = fg->outputs[0];
653 fg->inputs = grow_array(fg->inputs, sizeof(*fg->inputs), &fg->nb_inputs,
655 if (!(fg->inputs[0] = av_mallocz(sizeof(*fg->inputs[0]))))
657 fg->inputs[0]->ist = ist;
658 fg->inputs[0]->graph = fg;
660 ist->filters = grow_array(ist->filters, sizeof(*ist->filters),
661 &ist->nb_filters, ist->nb_filters + 1);
662 ist->filters[ist->nb_filters - 1] = fg->inputs[0];
664 filtergraphs = grow_array(filtergraphs, sizeof(*filtergraphs),
665 &nb_filtergraphs, nb_filtergraphs + 1);
666 filtergraphs[nb_filtergraphs - 1] = fg;
671 static void init_input_filter(FilterGraph *fg, AVFilterInOut *in)
674 enum AVMediaType type = in->filter_ctx->input_pads[in->pad_idx].type;
677 // TODO: support other filter types
678 if (type != AVMEDIA_TYPE_VIDEO && type != AVMEDIA_TYPE_AUDIO) {
679 av_log(NULL, AV_LOG_FATAL, "Only video and audio filters supported "
688 int file_idx = strtol(in->name, &p, 0);
690 if (file_idx < 0 || file_idx >= nb_input_files) {
691 av_log(NULL, AV_LOG_FATAL, "Invalid file index %d in filtegraph description %s.\n",
692 file_idx, fg->graph_desc);
695 s = input_files[file_idx]->ctx;
697 for (i = 0; i < s->nb_streams; i++) {
698 if (s->streams[i]->codec->codec_type != type)
700 if (check_stream_specifier(s, s->streams[i], *p == ':' ? p + 1 : p) == 1) {
706 av_log(NULL, AV_LOG_FATAL, "Stream specifier '%s' in filtergraph description %s "
707 "matches no streams.\n", p, fg->graph_desc);
710 ist = input_streams[input_files[file_idx]->ist_index + st->index];
712 /* find the first unused stream of corresponding type */
713 for (i = 0; i < nb_input_streams; i++) {
714 ist = input_streams[i];
715 if (ist->st->codec->codec_type == type && ist->discard)
718 if (i == nb_input_streams) {
719 av_log(NULL, AV_LOG_FATAL, "Cannot find a matching stream for "
720 "unlabeled input pad %d on filter %s", in->pad_idx,
721 in->filter_ctx->name);
726 ist->decoding_needed = 1;
727 ist->st->discard = AVDISCARD_NONE;
729 fg->inputs = grow_array(fg->inputs, sizeof(*fg->inputs),
730 &fg->nb_inputs, fg->nb_inputs + 1);
731 if (!(fg->inputs[fg->nb_inputs - 1] = av_mallocz(sizeof(*fg->inputs[0]))))
733 fg->inputs[fg->nb_inputs - 1]->ist = ist;
734 fg->inputs[fg->nb_inputs - 1]->graph = fg;
736 ist->filters = grow_array(ist->filters, sizeof(*ist->filters),
737 &ist->nb_filters, ist->nb_filters + 1);
738 ist->filters[ist->nb_filters - 1] = fg->inputs[fg->nb_inputs - 1];
741 static int configure_output_video_filter(FilterGraph *fg, OutputFilter *ofilter, AVFilterInOut *out)
744 OutputStream *ost = ofilter->ost;
745 AVCodecContext *codec = ost->st->codec;
746 AVFilterContext *last_filter = out->filter_ctx;
747 int pad_idx = out->pad_idx;
751 ret = avfilter_graph_create_filter(&ofilter->filter,
752 avfilter_get_by_name("buffersink"),
753 "out", NULL, pix_fmts, fg->graph);
757 if (codec->width || codec->height) {
759 AVFilterContext *filter;
761 snprintf(args, sizeof(args), "%d:%d:flags=0x%X",
764 (unsigned)ost->sws_flags);
765 if ((ret = avfilter_graph_create_filter(&filter, avfilter_get_by_name("scale"),
766 NULL, args, NULL, fg->graph)) < 0)
768 if ((ret = avfilter_link(last_filter, pad_idx, filter, 0)) < 0)
771 last_filter = filter;
775 if ((pix_fmts = choose_pix_fmts(ost))) {
776 AVFilterContext *filter;
777 if ((ret = avfilter_graph_create_filter(&filter,
778 avfilter_get_by_name("format"),
779 "format", pix_fmts, NULL,
782 if ((ret = avfilter_link(last_filter, pad_idx, filter, 0)) < 0)
785 last_filter = filter;
790 if (ost->frame_rate.num) {
791 AVFilterContext *fps;
794 snprintf(args, sizeof(args), "fps=%d/%d", ost->frame_rate.num,
795 ost->frame_rate.den);
796 ret = avfilter_graph_create_filter(&fps, avfilter_get_by_name("fps"),
797 "fps", args, NULL, fg->graph);
801 ret = avfilter_link(last_filter, pad_idx, fps, 0);
808 if ((ret = avfilter_link(last_filter, pad_idx, ofilter->filter, 0)) < 0)
814 static int configure_output_audio_filter(FilterGraph *fg, OutputFilter *ofilter, AVFilterInOut *out)
816 OutputStream *ost = ofilter->ost;
817 AVCodecContext *codec = ost->st->codec;
818 AVFilterContext *last_filter = out->filter_ctx;
819 int pad_idx = out->pad_idx;
820 char *sample_fmts, *sample_rates, *channel_layouts;
823 ret = avfilter_graph_create_filter(&ofilter->filter,
824 avfilter_get_by_name("abuffersink"),
825 "out", NULL, NULL, fg->graph);
829 if (codec->channels && !codec->channel_layout)
830 codec->channel_layout = av_get_default_channel_layout(codec->channels);
832 sample_fmts = choose_sample_fmts(ost);
833 sample_rates = choose_sample_rates(ost);
834 channel_layouts = choose_channel_layouts(ost);
835 if (sample_fmts || sample_rates || channel_layouts) {
836 AVFilterContext *format;
841 len += snprintf(args + len, sizeof(args) - len, "sample_fmts=%s:",
844 len += snprintf(args + len, sizeof(args) - len, "sample_rates=%s:",
847 len += snprintf(args + len, sizeof(args) - len, "channel_layouts=%s:",
851 av_freep(&sample_fmts);
852 av_freep(&sample_rates);
853 av_freep(&channel_layouts);
855 ret = avfilter_graph_create_filter(&format,
856 avfilter_get_by_name("aformat"),
857 "aformat", args, NULL, fg->graph);
861 ret = avfilter_link(last_filter, pad_idx, format, 0);
865 last_filter = format;
869 if (audio_sync_method > 0) {
870 AVFilterContext *async;
874 av_log(NULL, AV_LOG_WARNING, "-async has been deprecated. Used the "
875 "asyncts audio filter instead.\n");
877 if (audio_sync_method > 1)
878 len += snprintf(args + len, sizeof(args) - len, "compensate=1:"
879 "max_comp=%d:", audio_sync_method);
880 snprintf(args + len, sizeof(args) - len, "min_delta=%f",
881 audio_drift_threshold);
883 ret = avfilter_graph_create_filter(&async,
884 avfilter_get_by_name("asyncts"),
885 "async", args, NULL, fg->graph);
889 ret = avfilter_link(last_filter, pad_idx, async, 0);
897 if ((ret = avfilter_link(last_filter, pad_idx, ofilter->filter, 0)) < 0)
903 #define DESCRIBE_FILTER_LINK(f, inout, in) \
905 AVFilterContext *ctx = inout->filter_ctx; \
906 AVFilterPad *pads = in ? ctx->input_pads : ctx->output_pads; \
907 int nb_pads = in ? ctx->input_count : ctx->output_count; \
910 if (avio_open_dyn_buf(&pb) < 0) \
913 avio_printf(pb, "%s", ctx->filter->name); \
915 avio_printf(pb, ":%s", pads[inout->pad_idx].name); \
917 avio_close_dyn_buf(pb, &f->name); \
920 static int configure_output_filter(FilterGraph *fg, OutputFilter *ofilter, AVFilterInOut *out)
922 av_freep(&ofilter->name);
923 DESCRIBE_FILTER_LINK(ofilter, out, 0);
925 switch (out->filter_ctx->output_pads[out->pad_idx].type) {
926 case AVMEDIA_TYPE_VIDEO: return configure_output_video_filter(fg, ofilter, out);
927 case AVMEDIA_TYPE_AUDIO: return configure_output_audio_filter(fg, ofilter, out);
928 default: av_assert0(0);
932 static int configure_input_video_filter(FilterGraph *fg, InputFilter *ifilter,
935 AVFilterContext *first_filter = in->filter_ctx;
936 AVFilter *filter = avfilter_get_by_name("buffer");
937 InputStream *ist = ifilter->ist;
938 AVRational tb = ist->framerate.num ? (AVRational){ist->framerate.den,
939 ist->framerate.num} :
943 int pad_idx = in->pad_idx;
946 sar = ist->st->sample_aspect_ratio.num ?
947 ist->st->sample_aspect_ratio :
948 ist->st->codec->sample_aspect_ratio;
949 snprintf(args, sizeof(args), "%d:%d:%d:%d:%d:%d:%d", ist->st->codec->width,
950 ist->st->codec->height, ist->st->codec->pix_fmt,
951 tb.num, tb.den, sar.num, sar.den);
953 if ((ret = avfilter_graph_create_filter(&ifilter->filter, filter, in->name,
954 args, NULL, fg->graph)) < 0)
957 if (ist->framerate.num) {
958 AVFilterContext *setpts;
960 if ((ret = avfilter_graph_create_filter(&setpts,
961 avfilter_get_by_name("setpts"),
966 if ((ret = avfilter_link(setpts, 0, first_filter, pad_idx)) < 0)
969 first_filter = setpts;
973 if ((ret = avfilter_link(ifilter->filter, 0, first_filter, pad_idx)) < 0)
978 static int configure_input_audio_filter(FilterGraph *fg, InputFilter *ifilter,
981 AVFilterContext *first_filter = in->filter_ctx;
982 AVFilter *filter = avfilter_get_by_name("abuffer");
983 InputStream *ist = ifilter->ist;
984 int pad_idx = in->pad_idx;
988 snprintf(args, sizeof(args), "time_base=%d/%d:sample_rate=%d:sample_fmt=%s"
989 ":channel_layout=0x%"PRIx64,
990 ist->st->time_base.num, ist->st->time_base.den,
991 ist->st->codec->sample_rate,
992 av_get_sample_fmt_name(ist->st->codec->sample_fmt),
993 ist->st->codec->channel_layout);
995 if ((ret = avfilter_graph_create_filter(&ifilter->filter, filter,
996 in->name, args, NULL,
1000 if (audio_sync_method > 0) {
1001 AVFilterContext *async;
1005 av_log(NULL, AV_LOG_WARNING, "-async has been deprecated. Used the "
1006 "asyncts audio filter instead.\n");
1008 if (audio_sync_method > 1)
1009 len += snprintf(args + len, sizeof(args) - len, "compensate=1:"
1010 "max_comp=%d:", audio_sync_method);
1011 snprintf(args + len, sizeof(args) - len, "min_delta=%f",
1012 audio_drift_threshold);
1014 ret = avfilter_graph_create_filter(&async,
1015 avfilter_get_by_name("asyncts"),
1016 "async", args, NULL, fg->graph);
1020 ret = avfilter_link(async, 0, first_filter, pad_idx);
1024 first_filter = async;
1027 if ((ret = avfilter_link(ifilter->filter, 0, first_filter, pad_idx)) < 0)
1033 static int configure_input_filter(FilterGraph *fg, InputFilter *ifilter,
1036 av_freep(&ifilter->name);
1037 DESCRIBE_FILTER_LINK(ifilter, in, 1);
1039 switch (in->filter_ctx->input_pads[in->pad_idx].type) {
1040 case AVMEDIA_TYPE_VIDEO: return configure_input_video_filter(fg, ifilter, in);
1041 case AVMEDIA_TYPE_AUDIO: return configure_input_audio_filter(fg, ifilter, in);
1042 default: av_assert0(0);
1046 static int configure_filtergraph(FilterGraph *fg)
1048 AVFilterInOut *inputs, *outputs, *cur;
1049 int ret, i, init = !fg->graph, simple = !fg->graph_desc;
1050 const char *graph_desc = simple ? fg->outputs[0]->ost->avfilter :
1053 avfilter_graph_free(&fg->graph);
1054 if (!(fg->graph = avfilter_graph_alloc()))
1055 return AVERROR(ENOMEM);
1058 OutputStream *ost = fg->outputs[0]->ost;
1060 snprintf(args, sizeof(args), "flags=0x%X", (unsigned)ost->sws_flags);
1061 fg->graph->scale_sws_opts = av_strdup(args);
1064 if ((ret = avfilter_graph_parse2(fg->graph, graph_desc, &inputs, &outputs)) < 0)
1067 if (simple && (!inputs || inputs->next || !outputs || outputs->next)) {
1068 av_log(NULL, AV_LOG_ERROR, "Simple filtergraph '%s' does not have "
1069 "exactly one input and output.\n", graph_desc);
1070 return AVERROR(EINVAL);
1073 for (cur = inputs; !simple && init && cur; cur = cur->next)
1074 init_input_filter(fg, cur);
1076 for (cur = inputs, i = 0; cur; cur = cur->next, i++)
1077 if ((ret = configure_input_filter(fg, fg->inputs[i], cur)) < 0)
1079 avfilter_inout_free(&inputs);
1081 if (!init || simple) {
1082 /* we already know the mappings between lavfi outputs and output streams,
1083 * so we can finish the setup */
1084 for (cur = outputs, i = 0; cur; cur = cur->next, i++)
1085 configure_output_filter(fg, fg->outputs[i], cur);
1086 avfilter_inout_free(&outputs);
1088 if ((ret = avfilter_graph_config(fg->graph, NULL)) < 0)
1091 /* wait until output mappings are processed */
1092 for (cur = outputs; cur;) {
1093 fg->outputs = grow_array(fg->outputs, sizeof(*fg->outputs),
1094 &fg->nb_outputs, fg->nb_outputs + 1);
1095 if (!(fg->outputs[fg->nb_outputs - 1] = av_mallocz(sizeof(*fg->outputs[0]))))
1097 fg->outputs[fg->nb_outputs - 1]->graph = fg;
1098 fg->outputs[fg->nb_outputs - 1]->out_tmp = cur;
1100 fg->outputs[fg->nb_outputs - 1]->out_tmp->next = NULL;
1107 static int configure_complex_filters(void)
1111 for (i = 0; i < nb_filtergraphs; i++)
1112 if (!filtergraphs[i]->graph &&
1113 (ret = configure_filtergraph(filtergraphs[i])) < 0)
1118 static int ist_in_filtergraph(FilterGraph *fg, InputStream *ist)
1121 for (i = 0; i < fg->nb_inputs; i++)
1122 if (fg->inputs[i]->ist == ist)
1127 static void term_exit(void)
1129 av_log(NULL, AV_LOG_QUIET, "");
1132 static volatile int received_sigterm = 0;
1133 static volatile int received_nb_signals = 0;
1136 sigterm_handler(int sig)
1138 received_sigterm = sig;
1139 received_nb_signals++;
1143 static void term_init(void)
1145 signal(SIGINT , sigterm_handler); /* Interrupt (ANSI). */
1146 signal(SIGTERM, sigterm_handler); /* Termination (ANSI). */
1148 signal(SIGXCPU, sigterm_handler);
1152 static int decode_interrupt_cb(void *ctx)
1154 return received_nb_signals > 1;
1157 static const AVIOInterruptCB int_cb = { decode_interrupt_cb, NULL };
1159 void exit_program(int ret)
1163 for (i = 0; i < nb_filtergraphs; i++) {
1164 avfilter_graph_free(&filtergraphs[i]->graph);
1165 for (j = 0; j < filtergraphs[i]->nb_inputs; j++) {
1166 av_freep(&filtergraphs[i]->inputs[j]->name);
1167 av_freep(&filtergraphs[i]->inputs[j]);
1169 av_freep(&filtergraphs[i]->inputs);
1170 for (j = 0; j < filtergraphs[i]->nb_outputs; j++) {
1171 av_freep(&filtergraphs[i]->outputs[j]->name);
1172 av_freep(&filtergraphs[i]->outputs[j]);
1174 av_freep(&filtergraphs[i]->outputs);
1175 av_freep(&filtergraphs[i]);
1177 av_freep(&filtergraphs);
1180 for (i = 0; i < nb_output_files; i++) {
1181 AVFormatContext *s = output_files[i]->ctx;
1182 if (!(s->oformat->flags & AVFMT_NOFILE) && s->pb)
1184 avformat_free_context(s);
1185 av_dict_free(&output_files[i]->opts);
1186 av_freep(&output_files[i]);
1188 for (i = 0; i < nb_output_streams; i++) {
1189 AVBitStreamFilterContext *bsfc = output_streams[i]->bitstream_filters;
1191 AVBitStreamFilterContext *next = bsfc->next;
1192 av_bitstream_filter_close(bsfc);
1195 output_streams[i]->bitstream_filters = NULL;
1197 av_freep(&output_streams[i]->avfilter);
1198 av_freep(&output_streams[i]->filtered_frame);
1199 av_freep(&output_streams[i]);
1201 for (i = 0; i < nb_input_files; i++) {
1202 avformat_close_input(&input_files[i]->ctx);
1203 av_freep(&input_files[i]);
1205 for (i = 0; i < nb_input_streams; i++) {
1206 av_freep(&input_streams[i]->decoded_frame);
1207 av_dict_free(&input_streams[i]->opts);
1208 free_buffer_pool(input_streams[i]);
1209 av_freep(&input_streams[i]->filters);
1210 av_freep(&input_streams[i]);
1214 fclose(vstats_file);
1215 av_free(vstats_filename);
1217 av_freep(&input_streams);
1218 av_freep(&input_files);
1219 av_freep(&output_streams);
1220 av_freep(&output_files);
1225 avformat_network_deinit();
1227 if (received_sigterm) {
1228 av_log(NULL, AV_LOG_INFO, "Received signal %d: terminating.\n",
1229 (int) received_sigterm);
1236 static void assert_avoptions(AVDictionary *m)
1238 AVDictionaryEntry *t;
1239 if ((t = av_dict_get(m, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
1240 av_log(NULL, AV_LOG_FATAL, "Option %s not found.\n", t->key);
1245 static void assert_codec_experimental(AVCodecContext *c, int encoder)
1247 const char *codec_string = encoder ? "encoder" : "decoder";
1249 if (c->codec->capabilities & CODEC_CAP_EXPERIMENTAL &&
1250 c->strict_std_compliance > FF_COMPLIANCE_EXPERIMENTAL) {
1251 av_log(NULL, AV_LOG_FATAL, "%s '%s' is experimental and might produce bad "
1252 "results.\nAdd '-strict experimental' if you want to use it.\n",
1253 codec_string, c->codec->name);
1254 codec = encoder ? avcodec_find_encoder(c->codec->id) : avcodec_find_decoder(c->codec->id);
1255 if (!(codec->capabilities & CODEC_CAP_EXPERIMENTAL))
1256 av_log(NULL, AV_LOG_FATAL, "Or use the non experimental %s '%s'.\n",
1257 codec_string, codec->name);
1263 * Update the requested input sample format based on the output sample format.
1264 * This is currently only used to request float output from decoders which
1265 * support multiple sample formats, one of which is AV_SAMPLE_FMT_FLT.
1266 * Ideally this will be removed in the future when decoders do not do format
1267 * conversion and only output in their native format.
1269 static void update_sample_fmt(AVCodecContext *dec, AVCodec *dec_codec,
1270 AVCodecContext *enc)
1272 /* if sample formats match or a decoder sample format has already been
1273 requested, just return */
1274 if (enc->sample_fmt == dec->sample_fmt ||
1275 dec->request_sample_fmt > AV_SAMPLE_FMT_NONE)
1278 /* if decoder supports more than one output format */
1279 if (dec_codec && dec_codec->sample_fmts &&
1280 dec_codec->sample_fmts[0] != AV_SAMPLE_FMT_NONE &&
1281 dec_codec->sample_fmts[1] != AV_SAMPLE_FMT_NONE) {
1282 const enum AVSampleFormat *p;
1283 int min_dec = -1, min_inc = -1;
1285 /* find a matching sample format in the encoder */
1286 for (p = dec_codec->sample_fmts; *p != AV_SAMPLE_FMT_NONE; p++) {
1287 if (*p == enc->sample_fmt) {
1288 dec->request_sample_fmt = *p;
1290 } else if (*p > enc->sample_fmt) {
1291 min_inc = FFMIN(min_inc, *p - enc->sample_fmt);
1293 min_dec = FFMIN(min_dec, enc->sample_fmt - *p);
1296 /* if none match, provide the one that matches quality closest */
1297 dec->request_sample_fmt = min_inc > 0 ? enc->sample_fmt + min_inc :
1298 enc->sample_fmt - min_dec;
1302 static void write_frame(AVFormatContext *s, AVPacket *pkt, OutputStream *ost)
1304 AVBitStreamFilterContext *bsfc = ost->bitstream_filters;
1305 AVCodecContext *avctx = ost->st->codec;
1309 * Audio encoders may split the packets -- #frames in != #packets out.
1310 * But there is no reordering, so we can limit the number of output packets
1311 * by simply dropping them here.
1312 * Counting encoded video frames needs to be done separately because of
1313 * reordering, see do_video_out()
1315 if (!(avctx->codec_type == AVMEDIA_TYPE_VIDEO && avctx->codec)) {
1316 if (ost->frame_number >= ost->max_frames) {
1317 av_free_packet(pkt);
1320 ost->frame_number++;
1324 AVPacket new_pkt = *pkt;
1325 int a = av_bitstream_filter_filter(bsfc, avctx, NULL,
1326 &new_pkt.data, &new_pkt.size,
1327 pkt->data, pkt->size,
1328 pkt->flags & AV_PKT_FLAG_KEY);
1330 av_free_packet(pkt);
1331 new_pkt.destruct = av_destruct_packet;
1333 av_log(NULL, AV_LOG_ERROR, "%s failed for stream %d, codec %s",
1334 bsfc->filter->name, pkt->stream_index,
1335 avctx->codec ? avctx->codec->name : "copy");
1345 pkt->stream_index = ost->index;
1346 ret = av_interleaved_write_frame(s, pkt);
1348 print_error("av_interleaved_write_frame()", ret);
1353 static int check_recording_time(OutputStream *ost)
1355 OutputFile *of = output_files[ost->file_index];
1357 if (of->recording_time != INT64_MAX &&
1358 av_compare_ts(ost->sync_opts - ost->first_pts, ost->st->codec->time_base, of->recording_time,
1359 AV_TIME_BASE_Q) >= 0) {
1360 ost->is_past_recording_time = 1;
1366 static void do_audio_out(AVFormatContext *s, OutputStream *ost,
1369 AVCodecContext *enc = ost->st->codec;
1373 av_init_packet(&pkt);
1377 if (!check_recording_time(ost))
1380 if (frame->pts == AV_NOPTS_VALUE || audio_sync_method < 0)
1381 frame->pts = ost->sync_opts;
1382 ost->sync_opts = frame->pts + frame->nb_samples;
1384 if (avcodec_encode_audio2(enc, &pkt, frame, &got_packet) < 0) {
1385 av_log(NULL, AV_LOG_FATAL, "Audio encoding failed\n");
1390 if (pkt.pts != AV_NOPTS_VALUE)
1391 pkt.pts = av_rescale_q(pkt.pts, enc->time_base, ost->st->time_base);
1392 if (pkt.dts != AV_NOPTS_VALUE)
1393 pkt.dts = av_rescale_q(pkt.dts, enc->time_base, ost->st->time_base);
1394 if (pkt.duration > 0)
1395 pkt.duration = av_rescale_q(pkt.duration, enc->time_base, ost->st->time_base);
1397 write_frame(s, &pkt, ost);
1399 audio_size += pkt.size;
1403 static void pre_process_video_frame(InputStream *ist, AVPicture *picture, void **bufp)
1405 AVCodecContext *dec;
1406 AVPicture *picture2;
1407 AVPicture picture_tmp;
1410 dec = ist->st->codec;
1412 /* deinterlace : must be done before any resize */
1413 if (do_deinterlace) {
1416 /* create temporary picture */
1417 size = avpicture_get_size(dec->pix_fmt, dec->width, dec->height);
1418 buf = av_malloc(size);
1422 picture2 = &picture_tmp;
1423 avpicture_fill(picture2, buf, dec->pix_fmt, dec->width, dec->height);
1425 if (avpicture_deinterlace(picture2, picture,
1426 dec->pix_fmt, dec->width, dec->height) < 0) {
1427 /* if error, do not deinterlace */
1428 av_log(NULL, AV_LOG_WARNING, "Deinterlacing failed\n");
1437 if (picture != picture2)
1438 *picture = *picture2;
1442 static void do_subtitle_out(AVFormatContext *s,
1448 static uint8_t *subtitle_out = NULL;
1449 int subtitle_out_max_size = 1024 * 1024;
1450 int subtitle_out_size, nb, i;
1451 AVCodecContext *enc;
1454 if (pts == AV_NOPTS_VALUE) {
1455 av_log(NULL, AV_LOG_ERROR, "Subtitle packets must have a pts\n");
1461 enc = ost->st->codec;
1463 if (!subtitle_out) {
1464 subtitle_out = av_malloc(subtitle_out_max_size);
1467 /* Note: DVB subtitle need one packet to draw them and one other
1468 packet to clear them */
1469 /* XXX: signal it in the codec context ? */
1470 if (enc->codec_id == CODEC_ID_DVB_SUBTITLE)
1475 for (i = 0; i < nb; i++) {
1476 ost->sync_opts = av_rescale_q(pts, ist->st->time_base, enc->time_base);
1477 if (!check_recording_time(ost))
1480 sub->pts = av_rescale_q(pts, ist->st->time_base, AV_TIME_BASE_Q);
1481 // start_display_time is required to be 0
1482 sub->pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, AV_TIME_BASE_Q);
1483 sub->end_display_time -= sub->start_display_time;
1484 sub->start_display_time = 0;
1485 subtitle_out_size = avcodec_encode_subtitle(enc, subtitle_out,
1486 subtitle_out_max_size, sub);
1487 if (subtitle_out_size < 0) {
1488 av_log(NULL, AV_LOG_FATAL, "Subtitle encoding failed\n");
1492 av_init_packet(&pkt);
1493 pkt.data = subtitle_out;
1494 pkt.size = subtitle_out_size;
1495 pkt.pts = av_rescale_q(sub->pts, AV_TIME_BASE_Q, ost->st->time_base);
1496 if (enc->codec_id == CODEC_ID_DVB_SUBTITLE) {
1497 /* XXX: the pts correction is handled here. Maybe handling
1498 it in the codec would be better */
1500 pkt.pts += 90 * sub->start_display_time;
1502 pkt.pts += 90 * sub->end_display_time;
1504 write_frame(s, &pkt, ost);
1508 static void do_video_out(AVFormatContext *s,
1510 AVFrame *in_picture,
1511 int *frame_size, float quality)
1513 int ret, format_video_sync;
1515 AVCodecContext *enc = ost->st->codec;
1519 format_video_sync = video_sync_method;
1520 if (format_video_sync == VSYNC_AUTO)
1521 format_video_sync = (s->oformat->flags & AVFMT_NOTIMESTAMPS) ? VSYNC_PASSTHROUGH :
1522 (s->oformat->flags & AVFMT_VARIABLE_FPS) ? VSYNC_VFR : VSYNC_CFR;
1523 if (format_video_sync != VSYNC_PASSTHROUGH &&
1524 ost->frame_number &&
1525 in_picture->pts != AV_NOPTS_VALUE &&
1526 in_picture->pts < ost->sync_opts) {
1528 av_log(NULL, AV_LOG_VERBOSE, "*** drop!\n");
1532 if (in_picture->pts == AV_NOPTS_VALUE)
1533 in_picture->pts = ost->sync_opts;
1534 ost->sync_opts = in_picture->pts;
1537 if (!ost->frame_number)
1538 ost->first_pts = in_picture->pts;
1540 av_init_packet(&pkt);
1544 if (!check_recording_time(ost) ||
1545 ost->frame_number >= ost->max_frames)
1548 if (s->oformat->flags & AVFMT_RAWPICTURE &&
1549 enc->codec->id == CODEC_ID_RAWVIDEO) {
1550 /* raw pictures are written as AVPicture structure to
1551 avoid any copies. We support temporarily the older
1553 enc->coded_frame->interlaced_frame = in_picture->interlaced_frame;
1554 enc->coded_frame->top_field_first = in_picture->top_field_first;
1555 pkt.data = (uint8_t *)in_picture;
1556 pkt.size = sizeof(AVPicture);
1557 pkt.pts = av_rescale_q(in_picture->pts, enc->time_base, ost->st->time_base);
1558 pkt.flags |= AV_PKT_FLAG_KEY;
1560 write_frame(s, &pkt, ost);
1563 AVFrame big_picture;
1565 big_picture = *in_picture;
1566 /* better than nothing: use input picture interlaced
1568 big_picture.interlaced_frame = in_picture->interlaced_frame;
1569 if (ost->st->codec->flags & (CODEC_FLAG_INTERLACED_DCT|CODEC_FLAG_INTERLACED_ME)) {
1570 if (ost->top_field_first == -1)
1571 big_picture.top_field_first = in_picture->top_field_first;
1573 big_picture.top_field_first = !!ost->top_field_first;
1576 /* handles same_quant here. This is not correct because it may
1577 not be a global option */
1578 big_picture.quality = quality;
1579 if (!enc->me_threshold)
1580 big_picture.pict_type = 0;
1581 if (ost->forced_kf_index < ost->forced_kf_count &&
1582 big_picture.pts >= ost->forced_kf_pts[ost->forced_kf_index]) {
1583 big_picture.pict_type = AV_PICTURE_TYPE_I;
1584 ost->forced_kf_index++;
1586 ret = avcodec_encode_video2(enc, &pkt, &big_picture, &got_packet);
1588 av_log(NULL, AV_LOG_FATAL, "Video encoding failed\n");
1593 if (pkt.pts != AV_NOPTS_VALUE)
1594 pkt.pts = av_rescale_q(pkt.pts, enc->time_base, ost->st->time_base);
1595 if (pkt.dts != AV_NOPTS_VALUE)
1596 pkt.dts = av_rescale_q(pkt.dts, enc->time_base, ost->st->time_base);
1598 write_frame(s, &pkt, ost);
1599 *frame_size = pkt.size;
1600 video_size += pkt.size;
1602 /* if two pass, output log */
1603 if (ost->logfile && enc->stats_out) {
1604 fprintf(ost->logfile, "%s", enc->stats_out);
1610 * For video, number of frames in == number of packets out.
1611 * But there may be reordering, so we can't throw away frames on encoder
1612 * flush, we need to limit them here, before they go into encoder.
1614 ost->frame_number++;
1617 static double psnr(double d)
1619 return -10.0 * log(d) / log(10.0);
1622 static void do_video_stats(AVFormatContext *os, OutputStream *ost,
1625 AVCodecContext *enc;
1627 double ti1, bitrate, avg_bitrate;
1629 /* this is executed just the first time do_video_stats is called */
1631 vstats_file = fopen(vstats_filename, "w");
1638 enc = ost->st->codec;
1639 if (enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1640 frame_number = ost->frame_number;
1641 fprintf(vstats_file, "frame= %5d q= %2.1f ", frame_number, enc->coded_frame->quality / (float)FF_QP2LAMBDA);
1642 if (enc->flags&CODEC_FLAG_PSNR)
1643 fprintf(vstats_file, "PSNR= %6.2f ", psnr(enc->coded_frame->error[0] / (enc->width * enc->height * 255.0 * 255.0)));
1645 fprintf(vstats_file,"f_size= %6d ", frame_size);
1646 /* compute pts value */
1647 ti1 = ost->sync_opts * av_q2d(enc->time_base);
1651 bitrate = (frame_size * 8) / av_q2d(enc->time_base) / 1000.0;
1652 avg_bitrate = (double)(video_size * 8) / ti1 / 1000.0;
1653 fprintf(vstats_file, "s_size= %8.0fkB time= %0.3f br= %7.1fkbits/s avg_br= %7.1fkbits/s ",
1654 (double)video_size / 1024, ti1, bitrate, avg_bitrate);
1655 fprintf(vstats_file, "type= %c\n", av_get_picture_type_char(enc->coded_frame->pict_type));
1659 /* check for new output on any of the filtergraphs */
1660 static int poll_filters(void)
1662 AVFilterBufferRef *picref;
1663 AVFrame *filtered_frame = NULL;
1666 for (i = 0; i < nb_output_streams; i++) {
1667 OutputStream *ost = output_streams[i];
1668 OutputFile *of = output_files[ost->file_index];
1674 if (!ost->filtered_frame && !(ost->filtered_frame = avcodec_alloc_frame())) {
1675 return AVERROR(ENOMEM);
1677 avcodec_get_frame_defaults(ost->filtered_frame);
1678 filtered_frame = ost->filtered_frame;
1680 while (ret >= 0 && !ost->is_past_recording_time) {
1681 if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
1682 !(ost->enc->capabilities & CODEC_CAP_VARIABLE_FRAME_SIZE))
1683 ret = av_buffersink_read_samples(ost->filter->filter, &picref,
1684 ost->st->codec->frame_size);
1686 ret = av_buffersink_read(ost->filter->filter, &picref);
1691 avfilter_copy_buf_props(filtered_frame, picref);
1692 if (picref->pts != AV_NOPTS_VALUE)
1693 filtered_frame->pts = av_rescale_q(picref->pts,
1694 ost->filter->filter->inputs[0]->time_base,
1695 ost->st->codec->time_base) -
1696 av_rescale_q(of->start_time,
1698 ost->st->codec->time_base);
1700 if (of->start_time && filtered_frame->pts < of->start_time) {
1701 avfilter_unref_buffer(picref);
1705 switch (ost->filter->filter->inputs[0]->type) {
1706 case AVMEDIA_TYPE_VIDEO:
1707 if (!ost->frame_aspect_ratio)
1708 ost->st->codec->sample_aspect_ratio = picref->video->pixel_aspect;
1710 do_video_out(of->ctx, ost, filtered_frame, &frame_size,
1711 same_quant ? ost->last_quality :
1712 ost->st->codec->global_quality);
1713 if (vstats_filename && frame_size)
1714 do_video_stats(of->ctx, ost, frame_size);
1716 case AVMEDIA_TYPE_AUDIO:
1717 do_audio_out(of->ctx, ost, filtered_frame);
1720 // TODO support subtitle filters
1724 avfilter_unref_buffer(picref);
1730 static void print_report(int is_last_report, int64_t timer_start)
1734 AVFormatContext *oc;
1736 AVCodecContext *enc;
1737 int frame_number, vid, i;
1738 double bitrate, ti1, pts;
1739 static int64_t last_time = -1;
1740 static int qp_histogram[52];
1742 if (!print_stats && !is_last_report)
1745 if (!is_last_report) {
1747 /* display the report every 0.5 seconds */
1748 cur_time = av_gettime();
1749 if (last_time == -1) {
1750 last_time = cur_time;
1753 if ((cur_time - last_time) < 500000)
1755 last_time = cur_time;
1759 oc = output_files[0]->ctx;
1761 total_size = avio_size(oc->pb);
1762 if (total_size < 0) // FIXME improve avio_size() so it works with non seekable output too
1763 total_size = avio_tell(oc->pb);
1768 for (i = 0; i < nb_output_streams; i++) {
1770 ost = output_streams[i];
1771 enc = ost->st->codec;
1772 if (!ost->stream_copy && enc->coded_frame)
1773 q = enc->coded_frame->quality / (float)FF_QP2LAMBDA;
1774 if (vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1775 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "q=%2.1f ", q);
1777 if (!vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1778 float t = (av_gettime() - timer_start) / 1000000.0;
1780 frame_number = ost->frame_number;
1781 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "frame=%5d fps=%3d q=%3.1f ",
1782 frame_number, (t > 1) ? (int)(frame_number / t + 0.5) : 0, q);
1784 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "L");
1788 if (qp >= 0 && qp < FF_ARRAY_ELEMS(qp_histogram))
1790 for (j = 0; j < 32; j++)
1791 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%X", (int)lrintf(log(qp_histogram[j] + 1) / log(2)));
1793 if (enc->flags&CODEC_FLAG_PSNR) {
1795 double error, error_sum = 0;
1796 double scale, scale_sum = 0;
1797 char type[3] = { 'Y','U','V' };
1798 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "PSNR=");
1799 for (j = 0; j < 3; j++) {
1800 if (is_last_report) {
1801 error = enc->error[j];
1802 scale = enc->width * enc->height * 255.0 * 255.0 * frame_number;
1804 error = enc->coded_frame->error[j];
1805 scale = enc->width * enc->height * 255.0 * 255.0;
1811 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%c:%2.2f ", type[j], psnr(error / scale));
1813 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "*:%2.2f ", psnr(error_sum / scale_sum));
1817 /* compute min output value */
1818 pts = (double)ost->st->pts.val * av_q2d(ost->st->time_base);
1819 if ((pts < ti1) && (pts > 0))
1825 bitrate = (double)(total_size * 8) / ti1 / 1000.0;
1827 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1828 "size=%8.0fkB time=%0.2f bitrate=%6.1fkbits/s",
1829 (double)total_size / 1024, ti1, bitrate);
1831 if (nb_frames_dup || nb_frames_drop)
1832 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), " dup=%d drop=%d",
1833 nb_frames_dup, nb_frames_drop);
1835 av_log(NULL, AV_LOG_INFO, "%s \r", buf);
1839 if (is_last_report) {
1840 int64_t raw= audio_size + video_size + extra_size;
1841 av_log(NULL, AV_LOG_INFO, "\n");
1842 av_log(NULL, AV_LOG_INFO, "video:%1.0fkB audio:%1.0fkB global headers:%1.0fkB muxing overhead %f%%\n",
1843 video_size / 1024.0,
1844 audio_size / 1024.0,
1845 extra_size / 1024.0,
1846 100.0 * (total_size - raw) / raw
1851 static void flush_encoders(void)
1855 for (i = 0; i < nb_output_streams; i++) {
1856 OutputStream *ost = output_streams[i];
1857 AVCodecContext *enc = ost->st->codec;
1858 AVFormatContext *os = output_files[ost->file_index]->ctx;
1859 int stop_encoding = 0;
1861 if (!ost->encoding_needed)
1864 if (ost->st->codec->codec_type == AVMEDIA_TYPE_AUDIO && enc->frame_size <= 1)
1866 if (ost->st->codec->codec_type == AVMEDIA_TYPE_VIDEO && (os->oformat->flags & AVFMT_RAWPICTURE) && enc->codec->id == CODEC_ID_RAWVIDEO)
1870 int (*encode)(AVCodecContext*, AVPacket*, const AVFrame*, int*) = NULL;
1874 switch (ost->st->codec->codec_type) {
1875 case AVMEDIA_TYPE_AUDIO:
1876 encode = avcodec_encode_audio2;
1880 case AVMEDIA_TYPE_VIDEO:
1881 encode = avcodec_encode_video2;
1892 av_init_packet(&pkt);
1896 ret = encode(enc, &pkt, NULL, &got_packet);
1898 av_log(NULL, AV_LOG_FATAL, "%s encoding failed\n", desc);
1902 if (ost->logfile && enc->stats_out) {
1903 fprintf(ost->logfile, "%s", enc->stats_out);
1909 if (pkt.pts != AV_NOPTS_VALUE)
1910 pkt.pts = av_rescale_q(pkt.pts, enc->time_base, ost->st->time_base);
1911 if (pkt.dts != AV_NOPTS_VALUE)
1912 pkt.dts = av_rescale_q(pkt.dts, enc->time_base, ost->st->time_base);
1913 write_frame(os, &pkt, ost);
1923 * Check whether a packet from ist should be written into ost at this time
1925 static int check_output_constraints(InputStream *ist, OutputStream *ost)
1927 OutputFile *of = output_files[ost->file_index];
1928 int ist_index = input_files[ist->file_index]->ist_index + ist->st->index;
1930 if (ost->source_index != ist_index)
1933 if (of->start_time && ist->last_dts < of->start_time)
1939 static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *pkt)
1941 OutputFile *of = output_files[ost->file_index];
1942 int64_t ost_tb_start_time = av_rescale_q(of->start_time, AV_TIME_BASE_Q, ost->st->time_base);
1945 av_init_packet(&opkt);
1947 if ((!ost->frame_number && !(pkt->flags & AV_PKT_FLAG_KEY)) &&
1948 !ost->copy_initial_nonkeyframes)
1951 if (of->recording_time != INT64_MAX &&
1952 ist->last_dts >= of->recording_time + of->start_time) {
1953 ost->is_past_recording_time = 1;
1957 /* force the input stream PTS */
1958 if (ost->st->codec->codec_type == AVMEDIA_TYPE_AUDIO)
1959 audio_size += pkt->size;
1960 else if (ost->st->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
1961 video_size += pkt->size;
1965 if (pkt->pts != AV_NOPTS_VALUE)
1966 opkt.pts = av_rescale_q(pkt->pts, ist->st->time_base, ost->st->time_base) - ost_tb_start_time;
1968 opkt.pts = AV_NOPTS_VALUE;
1970 if (pkt->dts == AV_NOPTS_VALUE)
1971 opkt.dts = av_rescale_q(ist->last_dts, AV_TIME_BASE_Q, ost->st->time_base);
1973 opkt.dts = av_rescale_q(pkt->dts, ist->st->time_base, ost->st->time_base);
1974 opkt.dts -= ost_tb_start_time;
1976 opkt.duration = av_rescale_q(pkt->duration, ist->st->time_base, ost->st->time_base);
1977 opkt.flags = pkt->flags;
1979 // FIXME remove the following 2 lines they shall be replaced by the bitstream filters
1980 if ( ost->st->codec->codec_id != CODEC_ID_H264
1981 && ost->st->codec->codec_id != CODEC_ID_MPEG1VIDEO
1982 && ost->st->codec->codec_id != CODEC_ID_MPEG2VIDEO
1983 && ost->st->codec->codec_id != CODEC_ID_VC1
1985 if (av_parser_change(ist->st->parser, ost->st->codec, &opkt.data, &opkt.size, pkt->data, pkt->size, pkt->flags & AV_PKT_FLAG_KEY))
1986 opkt.destruct = av_destruct_packet;
1988 opkt.data = pkt->data;
1989 opkt.size = pkt->size;
1992 write_frame(of->ctx, &opkt, ost);
1993 ost->st->codec->frame_number++;
1994 av_free_packet(&opkt);
1997 static void rate_emu_sleep(InputStream *ist)
1999 if (input_files[ist->file_index]->rate_emu) {
2000 int64_t pts = av_rescale(ist->last_dts, 1000000, AV_TIME_BASE);
2001 int64_t now = av_gettime() - ist->start;
2007 static int guess_input_channel_layout(InputStream *ist)
2009 AVCodecContext *dec = ist->st->codec;
2011 if (!dec->channel_layout) {
2012 char layout_name[256];
2014 dec->channel_layout = av_get_default_channel_layout(dec->channels);
2015 if (!dec->channel_layout)
2017 av_get_channel_layout_string(layout_name, sizeof(layout_name),
2018 dec->channels, dec->channel_layout);
2019 av_log(NULL, AV_LOG_WARNING, "Guessed Channel Layout for Input Stream "
2020 "#%d.%d : %s\n", ist->file_index, ist->st->index, layout_name);
2025 static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output)
2027 AVFrame *decoded_frame;
2028 AVCodecContext *avctx = ist->st->codec;
2029 int bps = av_get_bytes_per_sample(ist->st->codec->sample_fmt);
2030 int i, ret, resample_changed;
2032 if (!ist->decoded_frame && !(ist->decoded_frame = avcodec_alloc_frame()))
2033 return AVERROR(ENOMEM);
2035 avcodec_get_frame_defaults(ist->decoded_frame);
2036 decoded_frame = ist->decoded_frame;
2038 ret = avcodec_decode_audio4(avctx, decoded_frame, got_output, pkt);
2044 /* no audio frame */
2046 for (i = 0; i < ist->nb_filters; i++)
2047 av_buffersrc_buffer(ist->filters[i]->filter, NULL);
2051 /* if the decoder provides a pts, use it instead of the last packet pts.
2052 the decoder could be delaying output by a packet or more. */
2053 if (decoded_frame->pts != AV_NOPTS_VALUE)
2054 ist->next_dts = decoded_frame->pts;
2055 else if (pkt->pts != AV_NOPTS_VALUE) {
2056 decoded_frame->pts = pkt->pts;
2057 pkt->pts = AV_NOPTS_VALUE;
2060 // preprocess audio (volume)
2061 if (audio_volume != 256) {
2062 int decoded_data_size = decoded_frame->nb_samples * avctx->channels * bps;
2063 void *samples = decoded_frame->data[0];
2064 switch (avctx->sample_fmt) {
2065 case AV_SAMPLE_FMT_U8:
2067 uint8_t *volp = samples;
2068 for (i = 0; i < (decoded_data_size / sizeof(*volp)); i++) {
2069 int v = (((*volp - 128) * audio_volume + 128) >> 8) + 128;
2070 *volp++ = av_clip_uint8(v);
2074 case AV_SAMPLE_FMT_S16:
2076 int16_t *volp = samples;
2077 for (i = 0; i < (decoded_data_size / sizeof(*volp)); i++) {
2078 int v = ((*volp) * audio_volume + 128) >> 8;
2079 *volp++ = av_clip_int16(v);
2083 case AV_SAMPLE_FMT_S32:
2085 int32_t *volp = samples;
2086 for (i = 0; i < (decoded_data_size / sizeof(*volp)); i++) {
2087 int64_t v = (((int64_t)*volp * audio_volume + 128) >> 8);
2088 *volp++ = av_clipl_int32(v);
2092 case AV_SAMPLE_FMT_FLT:
2094 float *volp = samples;
2095 float scale = audio_volume / 256.f;
2096 for (i = 0; i < (decoded_data_size / sizeof(*volp)); i++) {
2101 case AV_SAMPLE_FMT_DBL:
2103 double *volp = samples;
2104 double scale = audio_volume / 256.;
2105 for (i = 0; i < (decoded_data_size / sizeof(*volp)); i++) {
2111 av_log(NULL, AV_LOG_FATAL,
2112 "Audio volume adjustment on sample format %s is not supported.\n",
2113 av_get_sample_fmt_name(ist->st->codec->sample_fmt));
2118 rate_emu_sleep(ist);
2120 resample_changed = ist->resample_sample_fmt != decoded_frame->format ||
2121 ist->resample_channels != avctx->channels ||
2122 ist->resample_channel_layout != decoded_frame->channel_layout ||
2123 ist->resample_sample_rate != decoded_frame->sample_rate;
2124 if (resample_changed) {
2125 char layout1[64], layout2[64];
2127 if (!guess_input_channel_layout(ist)) {
2128 av_log(NULL, AV_LOG_FATAL, "Unable to find default channel "
2129 "layout for Input Stream #%d.%d\n", ist->file_index,
2133 decoded_frame->channel_layout = avctx->channel_layout;
2135 av_get_channel_layout_string(layout1, sizeof(layout1), ist->resample_channels,
2136 ist->resample_channel_layout);
2137 av_get_channel_layout_string(layout2, sizeof(layout2), avctx->channels,
2138 decoded_frame->channel_layout);
2140 av_log(NULL, AV_LOG_INFO,
2141 "Input stream #%d:%d frame changed from rate:%d fmt:%s ch:%d chl:%s to rate:%d fmt:%s ch:%d chl:%s\n",
2142 ist->file_index, ist->st->index,
2143 ist->resample_sample_rate, av_get_sample_fmt_name(ist->resample_sample_fmt),
2144 ist->resample_channels, layout1,
2145 decoded_frame->sample_rate, av_get_sample_fmt_name(decoded_frame->format),
2146 avctx->channels, layout2);
2148 ist->resample_sample_fmt = decoded_frame->format;
2149 ist->resample_sample_rate = decoded_frame->sample_rate;
2150 ist->resample_channel_layout = decoded_frame->channel_layout;
2151 ist->resample_channels = avctx->channels;
2153 for (i = 0; i < nb_filtergraphs; i++)
2154 if (ist_in_filtergraph(filtergraphs[i], ist) &&
2155 configure_filtergraph(filtergraphs[i]) < 0) {
2156 av_log(NULL, AV_LOG_FATAL, "Error reinitializing filters!\n");
2161 for (i = 0; i < ist->nb_filters; i++)
2162 av_buffersrc_write_frame(ist->filters[i]->filter, decoded_frame);
2167 static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output)
2169 AVFrame *decoded_frame;
2170 void *buffer_to_free = NULL;
2171 int i, ret = 0, resample_changed;
2174 if (!ist->decoded_frame && !(ist->decoded_frame = avcodec_alloc_frame()))
2175 return AVERROR(ENOMEM);
2177 avcodec_get_frame_defaults(ist->decoded_frame);
2178 decoded_frame = ist->decoded_frame;
2180 ret = avcodec_decode_video2(ist->st->codec,
2181 decoded_frame, got_output, pkt);
2185 quality = same_quant ? decoded_frame->quality : 0;
2187 /* no picture yet */
2189 for (i = 0; i < ist->nb_filters; i++)
2190 av_buffersrc_buffer(ist->filters[i]->filter, NULL);
2193 decoded_frame->pts = guess_correct_pts(&ist->pts_ctx, decoded_frame->pkt_pts,
2194 decoded_frame->pkt_dts);
2196 pre_process_video_frame(ist, (AVPicture *)decoded_frame, &buffer_to_free);
2198 rate_emu_sleep(ist);
2200 if (ist->st->sample_aspect_ratio.num)
2201 decoded_frame->sample_aspect_ratio = ist->st->sample_aspect_ratio;
2203 resample_changed = ist->resample_width != decoded_frame->width ||
2204 ist->resample_height != decoded_frame->height ||
2205 ist->resample_pix_fmt != decoded_frame->format;
2206 if (resample_changed) {
2207 av_log(NULL, AV_LOG_INFO,
2208 "Input stream #%d:%d frame changed from size:%dx%d fmt:%s to size:%dx%d fmt:%s\n",
2209 ist->file_index, ist->st->index,
2210 ist->resample_width, ist->resample_height, av_get_pix_fmt_name(ist->resample_pix_fmt),
2211 decoded_frame->width, decoded_frame->height, av_get_pix_fmt_name(decoded_frame->format));
2213 ist->resample_width = decoded_frame->width;
2214 ist->resample_height = decoded_frame->height;
2215 ist->resample_pix_fmt = decoded_frame->format;
2217 for (i = 0; i < nb_filtergraphs; i++)
2218 if (ist_in_filtergraph(filtergraphs[i], ist) &&
2219 configure_filtergraph(filtergraphs[i]) < 0) {
2220 av_log(NULL, AV_LOG_FATAL, "Error reinitializing filters!\n");
2225 for (i = 0; i < ist->nb_filters; i++) {
2226 // XXX what an ugly hack
2227 if (ist->filters[i]->graph->nb_outputs == 1)
2228 ist->filters[i]->graph->outputs[0]->ost->last_quality = quality;
2230 if (ist->st->codec->codec->capabilities & CODEC_CAP_DR1) {
2231 FrameBuffer *buf = decoded_frame->opaque;
2232 AVFilterBufferRef *fb = avfilter_get_video_buffer_ref_from_arrays(
2233 decoded_frame->data, decoded_frame->linesize,
2234 AV_PERM_READ | AV_PERM_PRESERVE,
2235 ist->st->codec->width, ist->st->codec->height,
2236 ist->st->codec->pix_fmt);
2238 avfilter_copy_frame_props(fb, decoded_frame);
2239 fb->buf->priv = buf;
2240 fb->buf->free = filter_release_buffer;
2243 av_buffersrc_buffer(ist->filters[i]->filter, fb);
2245 av_buffersrc_write_frame(ist->filters[i]->filter, decoded_frame);
2248 av_free(buffer_to_free);
2252 static int transcode_subtitles(InputStream *ist, AVPacket *pkt, int *got_output)
2254 AVSubtitle subtitle;
2255 int i, ret = avcodec_decode_subtitle2(ist->st->codec,
2256 &subtitle, got_output, pkt);
2262 rate_emu_sleep(ist);
2264 for (i = 0; i < nb_output_streams; i++) {
2265 OutputStream *ost = output_streams[i];
2267 if (!check_output_constraints(ist, ost) || !ost->encoding_needed)
2270 do_subtitle_out(output_files[ost->file_index]->ctx, ost, ist, &subtitle, pkt->pts);
2273 avsubtitle_free(&subtitle);
2277 /* pkt = NULL means EOF (needed to flush decoder buffers) */
2278 static int output_packet(InputStream *ist, const AVPacket *pkt)
2284 if (ist->next_dts == AV_NOPTS_VALUE)
2285 ist->next_dts = ist->last_dts;
2289 av_init_packet(&avpkt);
2297 if (pkt->dts != AV_NOPTS_VALUE)
2298 ist->next_dts = ist->last_dts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q);
2300 // while we have more to decode or while the decoder did output something on EOF
2301 while (ist->decoding_needed && (avpkt.size > 0 || (!pkt && got_output))) {
2305 ist->last_dts = ist->next_dts;
2307 if (avpkt.size && avpkt.size != pkt->size) {
2308 av_log(NULL, ist->showed_multi_packet_warning ? AV_LOG_VERBOSE : AV_LOG_WARNING,
2309 "Multiple frames in a packet from stream %d\n", pkt->stream_index);
2310 ist->showed_multi_packet_warning = 1;
2313 switch (ist->st->codec->codec_type) {
2314 case AVMEDIA_TYPE_AUDIO:
2315 ret = decode_audio (ist, &avpkt, &got_output);
2317 case AVMEDIA_TYPE_VIDEO:
2318 ret = decode_video (ist, &avpkt, &got_output);
2320 ist->next_dts += av_rescale_q(avpkt.duration, ist->st->time_base, AV_TIME_BASE_Q);
2321 else if (ist->st->r_frame_rate.num)
2322 ist->next_dts += av_rescale_q(1, (AVRational){ist->st->r_frame_rate.den,
2323 ist->st->r_frame_rate.num},
2325 else if (ist->st->codec->time_base.num != 0) {
2326 int ticks = ist->st->parser ? ist->st->parser->repeat_pict + 1 :
2327 ist->st->codec->ticks_per_frame;
2328 ist->next_dts += av_rescale_q(ticks, ist->st->codec->time_base, AV_TIME_BASE_Q);
2331 case AVMEDIA_TYPE_SUBTITLE:
2332 ret = transcode_subtitles(ist, &avpkt, &got_output);
2340 // touch data and size only if not EOF
2350 /* handle stream copy */
2351 if (!ist->decoding_needed) {
2352 rate_emu_sleep(ist);
2353 ist->last_dts = ist->next_dts;
2354 switch (ist->st->codec->codec_type) {
2355 case AVMEDIA_TYPE_AUDIO:
2356 ist->next_dts += ((int64_t)AV_TIME_BASE * ist->st->codec->frame_size) /
2357 ist->st->codec->sample_rate;
2359 case AVMEDIA_TYPE_VIDEO:
2360 if (ist->st->codec->time_base.num != 0) {
2361 int ticks = ist->st->parser ? ist->st->parser->repeat_pict + 1 : ist->st->codec->ticks_per_frame;
2362 ist->next_dts += ((int64_t)AV_TIME_BASE *
2363 ist->st->codec->time_base.num * ticks) /
2364 ist->st->codec->time_base.den;
2369 for (i = 0; pkt && i < nb_output_streams; i++) {
2370 OutputStream *ost = output_streams[i];
2372 if (!check_output_constraints(ist, ost) || ost->encoding_needed)
2375 do_streamcopy(ist, ost, pkt);
2381 static void print_sdp(void)
2385 AVFormatContext **avc = av_malloc(sizeof(*avc) * nb_output_files);
2389 for (i = 0; i < nb_output_files; i++)
2390 avc[i] = output_files[i]->ctx;
2392 av_sdp_create(avc, nb_output_files, sdp, sizeof(sdp));
2393 printf("SDP:\n%s\n", sdp);
2398 static int init_input_stream(int ist_index, char *error, int error_len)
2401 InputStream *ist = input_streams[ist_index];
2402 if (ist->decoding_needed) {
2403 AVCodec *codec = ist->dec;
2405 snprintf(error, error_len, "Decoder (codec id %d) not found for input stream #%d:%d",
2406 ist->st->codec->codec_id, ist->file_index, ist->st->index);
2407 return AVERROR(EINVAL);
2410 /* update requested sample format for the decoder based on the
2411 corresponding encoder sample format */
2412 for (i = 0; i < nb_output_streams; i++) {
2413 OutputStream *ost = output_streams[i];
2414 if (ost->source_index == ist_index) {
2415 update_sample_fmt(ist->st->codec, codec, ost->st->codec);
2420 if (codec->type == AVMEDIA_TYPE_VIDEO && codec->capabilities & CODEC_CAP_DR1) {
2421 ist->st->codec->get_buffer = codec_get_buffer;
2422 ist->st->codec->release_buffer = codec_release_buffer;
2423 ist->st->codec->opaque = ist;
2426 if (!av_dict_get(ist->opts, "threads", NULL, 0))
2427 av_dict_set(&ist->opts, "threads", "auto", 0);
2428 if (avcodec_open2(ist->st->codec, codec, &ist->opts) < 0) {
2429 snprintf(error, error_len, "Error while opening decoder for input stream #%d:%d",
2430 ist->file_index, ist->st->index);
2431 return AVERROR(EINVAL);
2433 assert_codec_experimental(ist->st->codec, 0);
2434 assert_avoptions(ist->opts);
2437 ist->last_dts = ist->st->avg_frame_rate.num ? - ist->st->codec->has_b_frames * AV_TIME_BASE / av_q2d(ist->st->avg_frame_rate) : 0;
2438 ist->next_dts = AV_NOPTS_VALUE;
2439 init_pts_correction(&ist->pts_ctx);
2445 static InputStream *get_input_stream(OutputStream *ost)
2447 if (ost->source_index >= 0)
2448 return input_streams[ost->source_index];
2451 FilterGraph *fg = ost->filter->graph;
2454 for (i = 0; i < fg->nb_inputs; i++)
2455 if (fg->inputs[i]->ist->st->codec->codec_type == ost->st->codec->codec_type)
2456 return fg->inputs[i]->ist;
2462 static int transcode_init(void)
2464 int ret = 0, i, j, k;
2465 AVFormatContext *oc;
2466 AVCodecContext *codec, *icodec;
2472 /* init framerate emulation */
2473 for (i = 0; i < nb_input_files; i++) {
2474 InputFile *ifile = input_files[i];
2475 if (ifile->rate_emu)
2476 for (j = 0; j < ifile->nb_streams; j++)
2477 input_streams[j + ifile->ist_index]->start = av_gettime();
2480 /* output stream init */
2481 for (i = 0; i < nb_output_files; i++) {
2482 oc = output_files[i]->ctx;
2483 if (!oc->nb_streams && !(oc->oformat->flags & AVFMT_NOSTREAMS)) {
2484 av_dump_format(oc, i, oc->filename, 1);
2485 av_log(NULL, AV_LOG_ERROR, "Output file #%d does not contain any stream\n", i);
2486 return AVERROR(EINVAL);
2490 /* init complex filtergraphs */
2491 for (i = 0; i < nb_filtergraphs; i++)
2492 if ((ret = avfilter_graph_config(filtergraphs[i]->graph, NULL)) < 0)
2495 /* for each output stream, we compute the right encoding parameters */
2496 for (i = 0; i < nb_output_streams; i++) {
2497 ost = output_streams[i];
2498 oc = output_files[ost->file_index]->ctx;
2499 ist = get_input_stream(ost);
2501 if (ost->attachment_filename)
2504 codec = ost->st->codec;
2507 icodec = ist->st->codec;
2509 ost->st->disposition = ist->st->disposition;
2510 codec->bits_per_raw_sample = icodec->bits_per_raw_sample;
2511 codec->chroma_sample_location = icodec->chroma_sample_location;
2514 if (ost->stream_copy) {
2515 uint64_t extra_size;
2517 av_assert0(ist && !ost->filter);
2519 extra_size = (uint64_t)icodec->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE;
2521 if (extra_size > INT_MAX) {
2522 return AVERROR(EINVAL);
2525 /* if stream_copy is selected, no need to decode or encode */
2526 codec->codec_id = icodec->codec_id;
2527 codec->codec_type = icodec->codec_type;
2529 if (!codec->codec_tag) {
2530 if (!oc->oformat->codec_tag ||
2531 av_codec_get_id (oc->oformat->codec_tag, icodec->codec_tag) == codec->codec_id ||
2532 av_codec_get_tag(oc->oformat->codec_tag, icodec->codec_id) <= 0)
2533 codec->codec_tag = icodec->codec_tag;
2536 codec->bit_rate = icodec->bit_rate;
2537 codec->rc_max_rate = icodec->rc_max_rate;
2538 codec->rc_buffer_size = icodec->rc_buffer_size;
2539 codec->field_order = icodec->field_order;
2540 codec->extradata = av_mallocz(extra_size);
2541 if (!codec->extradata) {
2542 return AVERROR(ENOMEM);
2544 memcpy(codec->extradata, icodec->extradata, icodec->extradata_size);
2545 codec->extradata_size = icodec->extradata_size;
2547 codec->time_base = icodec->time_base;
2548 codec->time_base.num *= icodec->ticks_per_frame;
2549 av_reduce(&codec->time_base.num, &codec->time_base.den,
2550 codec->time_base.num, codec->time_base.den, INT_MAX);
2552 codec->time_base = ist->st->time_base;
2554 switch (codec->codec_type) {
2555 case AVMEDIA_TYPE_AUDIO:
2556 if (audio_volume != 256) {
2557 av_log(NULL, AV_LOG_FATAL, "-acodec copy and -vol are incompatible (frames are not decoded)\n");
2560 codec->channel_layout = icodec->channel_layout;
2561 codec->sample_rate = icodec->sample_rate;
2562 codec->channels = icodec->channels;
2563 codec->frame_size = icodec->frame_size;
2564 codec->audio_service_type = icodec->audio_service_type;
2565 codec->block_align = icodec->block_align;
2567 case AVMEDIA_TYPE_VIDEO:
2568 codec->pix_fmt = icodec->pix_fmt;
2569 codec->width = icodec->width;
2570 codec->height = icodec->height;
2571 codec->has_b_frames = icodec->has_b_frames;
2572 if (!codec->sample_aspect_ratio.num) {
2573 codec->sample_aspect_ratio =
2574 ost->st->sample_aspect_ratio =
2575 ist->st->sample_aspect_ratio.num ? ist->st->sample_aspect_ratio :
2576 ist->st->codec->sample_aspect_ratio.num ?
2577 ist->st->codec->sample_aspect_ratio : (AVRational){0, 1};
2580 case AVMEDIA_TYPE_SUBTITLE:
2581 codec->width = icodec->width;
2582 codec->height = icodec->height;
2584 case AVMEDIA_TYPE_DATA:
2585 case AVMEDIA_TYPE_ATTACHMENT:
2592 /* should only happen when a default codec is not present. */
2593 snprintf(error, sizeof(error), "Automatic encoder selection "
2594 "failed for output stream #%d:%d. Default encoder for "
2595 "format %s is probably disabled. Please choose an "
2596 "encoder manually.\n", ost->file_index, ost->index,
2598 ret = AVERROR(EINVAL);
2603 ist->decoding_needed = 1;
2604 ost->encoding_needed = 1;
2607 * We want CFR output if and only if one of those is true:
2608 * 1) user specified output framerate with -r
2609 * 2) user specified -vsync cfr
2610 * 3) output format is CFR and the user didn't force vsync to
2611 * something else than CFR
2613 * in such a case, set ost->frame_rate
2615 if (codec->codec_type == AVMEDIA_TYPE_VIDEO &&
2616 !ost->frame_rate.num && ist &&
2617 (video_sync_method == VSYNC_CFR ||
2618 (video_sync_method == VSYNC_AUTO &&
2619 !(oc->oformat->flags & (AVFMT_NOTIMESTAMPS | AVFMT_VARIABLE_FPS))))) {
2620 ost->frame_rate = ist->st->r_frame_rate.num ? ist->st->r_frame_rate : (AVRational){25, 1};
2621 if (ost->enc && ost->enc->supported_framerates && !ost->force_fps) {
2622 int idx = av_find_nearest_q_idx(ost->frame_rate, ost->enc->supported_framerates);
2623 ost->frame_rate = ost->enc->supported_framerates[idx];
2628 (codec->codec_type == AVMEDIA_TYPE_VIDEO ||
2629 codec->codec_type == AVMEDIA_TYPE_AUDIO)) {
2631 fg = init_simple_filtergraph(ist, ost);
2632 if (configure_filtergraph(fg)) {
2633 av_log(NULL, AV_LOG_FATAL, "Error opening filters!\n");
2638 switch (codec->codec_type) {
2639 case AVMEDIA_TYPE_AUDIO:
2640 codec->sample_fmt = ost->filter->filter->inputs[0]->format;
2641 codec->sample_rate = ost->filter->filter->inputs[0]->sample_rate;
2642 codec->channel_layout = ost->filter->filter->inputs[0]->channel_layout;
2643 codec->channels = av_get_channel_layout_nb_channels(codec->channel_layout);
2644 codec->time_base = (AVRational){ 1, codec->sample_rate };
2646 case AVMEDIA_TYPE_VIDEO:
2647 codec->time_base = ost->filter->filter->inputs[0]->time_base;
2649 codec->width = ost->filter->filter->inputs[0]->w;
2650 codec->height = ost->filter->filter->inputs[0]->h;
2651 codec->sample_aspect_ratio = ost->st->sample_aspect_ratio =
2652 ost->frame_aspect_ratio ? // overridden by the -aspect cli option
2653 av_d2q(ost->frame_aspect_ratio * codec->height/codec->width, 255) :
2654 ost->filter->filter->inputs[0]->sample_aspect_ratio;
2655 codec->pix_fmt = ost->filter->filter->inputs[0]->format;
2657 if (codec->width != icodec->width ||
2658 codec->height != icodec->height ||
2659 codec->pix_fmt != icodec->pix_fmt) {
2660 codec->bits_per_raw_sample = 0;
2664 case AVMEDIA_TYPE_SUBTITLE:
2665 codec->time_base = (AVRational){1, 1000};
2672 if ((codec->flags & (CODEC_FLAG_PASS1 | CODEC_FLAG_PASS2))) {
2673 char logfilename[1024];
2676 snprintf(logfilename, sizeof(logfilename), "%s-%d.log",
2677 pass_logfilename_prefix ? pass_logfilename_prefix : DEFAULT_PASS_LOGFILENAME_PREFIX,
2679 if (!strcmp(ost->enc->name, "libx264")) {
2680 av_dict_set(&ost->opts, "stats", logfilename, AV_DICT_DONT_OVERWRITE);
2682 if (codec->flags & CODEC_FLAG_PASS1) {
2683 f = fopen(logfilename, "wb");
2685 av_log(NULL, AV_LOG_FATAL, "Cannot write log file '%s' for pass-1 encoding: %s\n",
2686 logfilename, strerror(errno));
2692 size_t logbuffer_size;
2693 if (cmdutils_read_file(logfilename, &logbuffer, &logbuffer_size) < 0) {
2694 av_log(NULL, AV_LOG_FATAL, "Error reading log file '%s' for pass-2 encoding\n",
2698 codec->stats_in = logbuffer;
2705 /* open each encoder */
2706 for (i = 0; i < nb_output_streams; i++) {
2707 ost = output_streams[i];
2708 if (ost->encoding_needed) {
2709 AVCodec *codec = ost->enc;
2710 AVCodecContext *dec = NULL;
2712 if ((ist = get_input_stream(ost)))
2713 dec = ist->st->codec;
2714 if (dec && dec->subtitle_header) {
2715 ost->st->codec->subtitle_header = av_malloc(dec->subtitle_header_size);
2716 if (!ost->st->codec->subtitle_header) {
2717 ret = AVERROR(ENOMEM);
2720 memcpy(ost->st->codec->subtitle_header, dec->subtitle_header, dec->subtitle_header_size);
2721 ost->st->codec->subtitle_header_size = dec->subtitle_header_size;
2723 if (!av_dict_get(ost->opts, "threads", NULL, 0))
2724 av_dict_set(&ost->opts, "threads", "auto", 0);
2725 if (avcodec_open2(ost->st->codec, codec, &ost->opts) < 0) {
2726 snprintf(error, sizeof(error), "Error while opening encoder for output stream #%d:%d - maybe incorrect parameters such as bit_rate, rate, width or height",
2727 ost->file_index, ost->index);
2728 ret = AVERROR(EINVAL);
2731 assert_codec_experimental(ost->st->codec, 1);
2732 assert_avoptions(ost->opts);
2733 if (ost->st->codec->bit_rate && ost->st->codec->bit_rate < 1000)
2734 av_log(NULL, AV_LOG_WARNING, "The bitrate parameter is set too low."
2735 "It takes bits/s as argument, not kbits/s\n");
2736 extra_size += ost->st->codec->extradata_size;
2738 if (ost->st->codec->me_threshold)
2739 input_streams[ost->source_index]->st->codec->debug |= FF_DEBUG_MV;
2743 /* init input streams */
2744 for (i = 0; i < nb_input_streams; i++)
2745 if ((ret = init_input_stream(i, error, sizeof(error))) < 0)
2748 /* discard unused programs */
2749 for (i = 0; i < nb_input_files; i++) {
2750 InputFile *ifile = input_files[i];
2751 for (j = 0; j < ifile->ctx->nb_programs; j++) {
2752 AVProgram *p = ifile->ctx->programs[j];
2753 int discard = AVDISCARD_ALL;
2755 for (k = 0; k < p->nb_stream_indexes; k++)
2756 if (!input_streams[ifile->ist_index + p->stream_index[k]]->discard) {
2757 discard = AVDISCARD_DEFAULT;
2760 p->discard = discard;
2764 /* open files and write file headers */
2765 for (i = 0; i < nb_output_files; i++) {
2766 oc = output_files[i]->ctx;
2767 oc->interrupt_callback = int_cb;
2768 if ((ret = avformat_write_header(oc, &output_files[i]->opts)) < 0) {
2770 const char *errbuf_ptr = errbuf;
2771 if (av_strerror(ret, errbuf, sizeof(errbuf)) < 0)
2772 errbuf_ptr = strerror(AVUNERROR(ret));
2773 snprintf(error, sizeof(error), "Could not write header for output file #%d (incorrect codec parameters ?): %s", i, errbuf_ptr);
2774 ret = AVERROR(EINVAL);
2777 assert_avoptions(output_files[i]->opts);
2778 if (strcmp(oc->oformat->name, "rtp")) {
2784 /* dump the file output parameters - cannot be done before in case
2786 for (i = 0; i < nb_output_files; i++) {
2787 av_dump_format(output_files[i]->ctx, i, output_files[i]->ctx->filename, 1);
2790 /* dump the stream mapping */
2791 av_log(NULL, AV_LOG_INFO, "Stream mapping:\n");
2792 for (i = 0; i < nb_input_streams; i++) {
2793 ist = input_streams[i];
2795 for (j = 0; j < ist->nb_filters; j++) {
2796 if (ist->filters[j]->graph->graph_desc) {
2797 av_log(NULL, AV_LOG_INFO, " Stream #%d:%d (%s) -> %s",
2798 ist->file_index, ist->st->index, ist->dec ? ist->dec->name : "?",
2799 ist->filters[j]->name);
2800 if (nb_filtergraphs > 1)
2801 av_log(NULL, AV_LOG_INFO, " (graph %d)", ist->filters[j]->graph->index);
2802 av_log(NULL, AV_LOG_INFO, "\n");
2807 for (i = 0; i < nb_output_streams; i++) {
2808 ost = output_streams[i];
2810 if (ost->attachment_filename) {
2811 /* an attached file */
2812 av_log(NULL, AV_LOG_INFO, " File %s -> Stream #%d:%d\n",
2813 ost->attachment_filename, ost->file_index, ost->index);
2817 if (ost->filter && ost->filter->graph->graph_desc) {
2818 /* output from a complex graph */
2819 av_log(NULL, AV_LOG_INFO, " %s", ost->filter->name);
2820 if (nb_filtergraphs > 1)
2821 av_log(NULL, AV_LOG_INFO, " (graph %d)", ost->filter->graph->index);
2823 av_log(NULL, AV_LOG_INFO, " -> Stream #%d:%d (%s)\n", ost->file_index,
2824 ost->index, ost->enc ? ost->enc->name : "?");
2828 av_log(NULL, AV_LOG_INFO, " Stream #%d:%d -> #%d:%d",
2829 input_streams[ost->source_index]->file_index,
2830 input_streams[ost->source_index]->st->index,
2833 if (ost->sync_ist != input_streams[ost->source_index])
2834 av_log(NULL, AV_LOG_INFO, " [sync #%d:%d]",
2835 ost->sync_ist->file_index,
2836 ost->sync_ist->st->index);
2837 if (ost->stream_copy)
2838 av_log(NULL, AV_LOG_INFO, " (copy)");
2840 av_log(NULL, AV_LOG_INFO, " (%s -> %s)", input_streams[ost->source_index]->dec ?
2841 input_streams[ost->source_index]->dec->name : "?",
2842 ost->enc ? ost->enc->name : "?");
2843 av_log(NULL, AV_LOG_INFO, "\n");
2847 av_log(NULL, AV_LOG_ERROR, "%s\n", error);
2859 * @return 1 if there are still streams where more output is wanted,
2862 static int need_output(void)
2866 for (i = 0; i < nb_output_streams; i++) {
2867 OutputStream *ost = output_streams[i];
2868 OutputFile *of = output_files[ost->file_index];
2869 AVFormatContext *os = output_files[ost->file_index]->ctx;
2871 if (ost->is_past_recording_time ||
2872 (os->pb && avio_tell(os->pb) >= of->limit_filesize))
2874 if (ost->frame_number > ost->max_frames) {
2876 for (j = 0; j < of->ctx->nb_streams; j++)
2877 output_streams[of->ost_index + j]->is_past_recording_time = 1;
2887 static int select_input_file(uint8_t *no_packet)
2889 int64_t ipts_min = INT64_MAX;
2890 int i, file_index = -1;
2892 for (i = 0; i < nb_input_streams; i++) {
2893 InputStream *ist = input_streams[i];
2894 int64_t ipts = ist->last_dts;
2896 if (ist->discard || no_packet[ist->file_index])
2898 if (!input_files[ist->file_index]->eof_reached) {
2899 if (ipts < ipts_min) {
2901 file_index = ist->file_index;
2910 * The following code is the main loop of the file converter
2912 static int transcode(void)
2915 AVFormatContext *is, *os;
2919 int no_packet_count = 0;
2920 int64_t timer_start;
2922 if (!(no_packet = av_mallocz(nb_input_files)))
2925 ret = transcode_init();
2929 av_log(NULL, AV_LOG_INFO, "Press ctrl-c to stop encoding\n");
2932 timer_start = av_gettime();
2934 for (; received_sigterm == 0;) {
2935 int file_index, ist_index;
2938 /* check if there's any stream where output is still needed */
2939 if (!need_output()) {
2940 av_log(NULL, AV_LOG_VERBOSE, "No more output streams to write to, finishing.\n");
2944 /* select the stream that we must read now */
2945 file_index = select_input_file(no_packet);
2946 /* if none, if is finished */
2947 if (file_index < 0) {
2948 if (no_packet_count) {
2949 no_packet_count = 0;
2950 memset(no_packet, 0, nb_input_files);
2957 /* read a frame from it and output it in the fifo */
2958 is = input_files[file_index]->ctx;
2959 ret = av_read_frame(is, &pkt);
2960 if (ret == AVERROR(EAGAIN)) {
2961 no_packet[file_index] = 1;
2966 input_files[file_index]->eof_reached = 1;
2968 for (i = 0; i < input_files[file_index]->nb_streams; i++) {
2969 ist = input_streams[input_files[file_index]->ist_index + i];
2970 if (ist->decoding_needed)
2971 output_packet(ist, NULL);
2980 no_packet_count = 0;
2981 memset(no_packet, 0, nb_input_files);
2984 av_pkt_dump_log2(NULL, AV_LOG_DEBUG, &pkt, do_hex_dump,
2985 is->streams[pkt.stream_index]);
2987 /* the following test is needed in case new streams appear
2988 dynamically in stream : we ignore them */
2989 if (pkt.stream_index >= input_files[file_index]->nb_streams)
2990 goto discard_packet;
2991 ist_index = input_files[file_index]->ist_index + pkt.stream_index;
2992 ist = input_streams[ist_index];
2994 goto discard_packet;
2996 if (pkt.dts != AV_NOPTS_VALUE)
2997 pkt.dts += av_rescale_q(input_files[ist->file_index]->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
2998 if (pkt.pts != AV_NOPTS_VALUE)
2999 pkt.pts += av_rescale_q(input_files[ist->file_index]->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
3001 if (pkt.pts != AV_NOPTS_VALUE)
3002 pkt.pts *= ist->ts_scale;
3003 if (pkt.dts != AV_NOPTS_VALUE)
3004 pkt.dts *= ist->ts_scale;
3006 //fprintf(stderr, "next:%"PRId64" dts:%"PRId64" off:%"PRId64" %d\n",
3008 // pkt.dts, input_files[ist->file_index].ts_offset,
3009 // ist->st->codec->codec_type);
3010 if (pkt.dts != AV_NOPTS_VALUE && ist->next_dts != AV_NOPTS_VALUE
3011 && (is->iformat->flags & AVFMT_TS_DISCONT)) {
3012 int64_t pkt_dts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);
3013 int64_t delta = pkt_dts - ist->next_dts;
3014 if ((FFABS(delta) > 1LL * dts_delta_threshold * AV_TIME_BASE || pkt_dts + 1 < ist->last_dts) && !copy_ts) {
3015 input_files[ist->file_index]->ts_offset -= delta;
3016 av_log(NULL, AV_LOG_DEBUG,
3017 "timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
3018 delta, input_files[ist->file_index]->ts_offset);
3019 pkt.dts-= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
3020 if (pkt.pts != AV_NOPTS_VALUE)
3021 pkt.pts-= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
3025 // fprintf(stderr,"read #%d.%d size=%d\n", ist->file_index, ist->st->index, pkt.size);
3026 if (output_packet(ist, &pkt) < 0 || poll_filters() < 0) {
3027 av_log(NULL, AV_LOG_ERROR, "Error while decoding stream #%d:%d\n",
3028 ist->file_index, ist->st->index);
3031 av_free_packet(&pkt);
3036 av_free_packet(&pkt);
3038 /* dump report by using the output first video and audio streams */
3039 print_report(0, timer_start);
3042 /* at the end of stream, we must flush the decoder buffers */
3043 for (i = 0; i < nb_input_streams; i++) {
3044 ist = input_streams[i];
3045 if (!input_files[ist->file_index]->eof_reached && ist->decoding_needed) {
3046 output_packet(ist, NULL);
3054 /* write the trailer if needed and close file */
3055 for (i = 0; i < nb_output_files; i++) {
3056 os = output_files[i]->ctx;
3057 av_write_trailer(os);
3060 /* dump report by using the first video and audio streams */
3061 print_report(1, timer_start);
3063 /* close each encoder */
3064 for (i = 0; i < nb_output_streams; i++) {
3065 ost = output_streams[i];
3066 if (ost->encoding_needed) {
3067 av_freep(&ost->st->codec->stats_in);
3068 avcodec_close(ost->st->codec);
3072 /* close each decoder */
3073 for (i = 0; i < nb_input_streams; i++) {
3074 ist = input_streams[i];
3075 if (ist->decoding_needed) {
3076 avcodec_close(ist->st->codec);
3084 av_freep(&no_packet);
3086 if (output_streams) {
3087 for (i = 0; i < nb_output_streams; i++) {
3088 ost = output_streams[i];
3090 if (ost->stream_copy)
3091 av_freep(&ost->st->codec->extradata);
3093 fclose(ost->logfile);
3094 ost->logfile = NULL;
3096 av_freep(&ost->st->codec->subtitle_header);
3097 av_free(ost->forced_kf_pts);
3098 av_dict_free(&ost->opts);
3105 static double parse_frame_aspect_ratio(const char *arg)
3112 p = strchr(arg, ':');
3114 x = strtol(arg, &end, 10);
3116 y = strtol(end + 1, &end, 10);
3118 ar = (double)x / (double)y;
3120 ar = strtod(arg, NULL);
3123 av_log(NULL, AV_LOG_FATAL, "Incorrect aspect ratio specification.\n");
3129 static int opt_audio_codec(OptionsContext *o, const char *opt, const char *arg)
3131 return parse_option(o, "codec:a", arg, options);
3134 static int opt_video_codec(OptionsContext *o, const char *opt, const char *arg)
3136 return parse_option(o, "codec:v", arg, options);
3139 static int opt_subtitle_codec(OptionsContext *o, const char *opt, const char *arg)
3141 return parse_option(o, "codec:s", arg, options);
3144 static int opt_data_codec(OptionsContext *o, const char *opt, const char *arg)
3146 return parse_option(o, "codec:d", arg, options);
3149 static int opt_map(OptionsContext *o, const char *opt, const char *arg)
3151 StreamMap *m = NULL;
3152 int i, negative = 0, file_idx;
3153 int sync_file_idx = -1, sync_stream_idx;
3161 map = av_strdup(arg);
3163 /* parse sync stream first, just pick first matching stream */
3164 if (sync = strchr(map, ',')) {
3166 sync_file_idx = strtol(sync + 1, &sync, 0);
3167 if (sync_file_idx >= nb_input_files || sync_file_idx < 0) {
3168 av_log(NULL, AV_LOG_FATAL, "Invalid sync file index: %d.\n", sync_file_idx);
3173 for (i = 0; i < input_files[sync_file_idx]->nb_streams; i++)
3174 if (check_stream_specifier(input_files[sync_file_idx]->ctx,
3175 input_files[sync_file_idx]->ctx->streams[i], sync) == 1) {
3176 sync_stream_idx = i;
3179 if (i == input_files[sync_file_idx]->nb_streams) {
3180 av_log(NULL, AV_LOG_FATAL, "Sync stream specification in map %s does not "
3181 "match any streams.\n", arg);
3187 if (map[0] == '[') {
3188 /* this mapping refers to lavfi output */
3189 const char *c = map + 1;
3190 o->stream_maps = grow_array(o->stream_maps, sizeof(*o->stream_maps),
3191 &o->nb_stream_maps, o->nb_stream_maps + 1);
3192 m = &o->stream_maps[o->nb_stream_maps - 1];
3193 m->linklabel = av_get_token(&c, "]");
3194 if (!m->linklabel) {
3195 av_log(NULL, AV_LOG_ERROR, "Invalid output link label: %s.\n", map);
3199 file_idx = strtol(map, &p, 0);
3200 if (file_idx >= nb_input_files || file_idx < 0) {
3201 av_log(NULL, AV_LOG_FATAL, "Invalid input file index: %d.\n", file_idx);
3205 /* disable some already defined maps */
3206 for (i = 0; i < o->nb_stream_maps; i++) {
3207 m = &o->stream_maps[i];
3208 if (file_idx == m->file_index &&
3209 check_stream_specifier(input_files[m->file_index]->ctx,
3210 input_files[m->file_index]->ctx->streams[m->stream_index],
3211 *p == ':' ? p + 1 : p) > 0)
3215 for (i = 0; i < input_files[file_idx]->nb_streams; i++) {
3216 if (check_stream_specifier(input_files[file_idx]->ctx, input_files[file_idx]->ctx->streams[i],
3217 *p == ':' ? p + 1 : p) <= 0)
3219 o->stream_maps = grow_array(o->stream_maps, sizeof(*o->stream_maps),
3220 &o->nb_stream_maps, o->nb_stream_maps + 1);
3221 m = &o->stream_maps[o->nb_stream_maps - 1];
3223 m->file_index = file_idx;
3224 m->stream_index = i;
3226 if (sync_file_idx >= 0) {
3227 m->sync_file_index = sync_file_idx;
3228 m->sync_stream_index = sync_stream_idx;
3230 m->sync_file_index = file_idx;
3231 m->sync_stream_index = i;
3237 av_log(NULL, AV_LOG_FATAL, "Stream map '%s' matches no streams.\n", arg);
3245 static int opt_attach(OptionsContext *o, const char *opt, const char *arg)
3247 o->attachments = grow_array(o->attachments, sizeof(*o->attachments),
3248 &o->nb_attachments, o->nb_attachments + 1);
3249 o->attachments[o->nb_attachments - 1] = arg;
3254 * Parse a metadata specifier in arg.
3255 * @param type metadata type is written here -- g(lobal)/s(tream)/c(hapter)/p(rogram)
3256 * @param index for type c/p, chapter/program index is written here
3257 * @param stream_spec for type s, the stream specifier is written here
3259 static void parse_meta_type(char *arg, char *type, int *index, const char **stream_spec)
3267 if (*(++arg) && *arg != ':') {
3268 av_log(NULL, AV_LOG_FATAL, "Invalid metadata specifier %s.\n", arg);
3271 *stream_spec = *arg == ':' ? arg + 1 : "";
3275 if (*(++arg) == ':')
3276 *index = strtol(++arg, NULL, 0);
3279 av_log(NULL, AV_LOG_FATAL, "Invalid metadata type %c.\n", *arg);
3286 static int copy_metadata(char *outspec, char *inspec, AVFormatContext *oc, AVFormatContext *ic, OptionsContext *o)
3288 AVDictionary **meta_in = NULL;
3289 AVDictionary **meta_out;
3291 char type_in, type_out;
3292 const char *istream_spec = NULL, *ostream_spec = NULL;
3293 int idx_in = 0, idx_out = 0;
3295 parse_meta_type(inspec, &type_in, &idx_in, &istream_spec);
3296 parse_meta_type(outspec, &type_out, &idx_out, &ostream_spec);
3298 if (type_in == 'g' || type_out == 'g')
3299 o->metadata_global_manual = 1;
3300 if (type_in == 's' || type_out == 's')
3301 o->metadata_streams_manual = 1;
3302 if (type_in == 'c' || type_out == 'c')
3303 o->metadata_chapters_manual = 1;
3305 #define METADATA_CHECK_INDEX(index, nb_elems, desc)\
3306 if ((index) < 0 || (index) >= (nb_elems)) {\
3307 av_log(NULL, AV_LOG_FATAL, "Invalid %s index %d while processing metadata maps.\n",\
3312 #define SET_DICT(type, meta, context, index)\
3315 meta = &context->metadata;\
3318 METADATA_CHECK_INDEX(index, context->nb_chapters, "chapter")\
3319 meta = &context->chapters[index]->metadata;\
3322 METADATA_CHECK_INDEX(index, context->nb_programs, "program")\
3323 meta = &context->programs[index]->metadata;\
3327 SET_DICT(type_in, meta_in, ic, idx_in);
3328 SET_DICT(type_out, meta_out, oc, idx_out);
3330 /* for input streams choose first matching stream */
3331 if (type_in == 's') {
3332 for (i = 0; i < ic->nb_streams; i++) {
3333 if ((ret = check_stream_specifier(ic, ic->streams[i], istream_spec)) > 0) {
3334 meta_in = &ic->streams[i]->metadata;
3340 av_log(NULL, AV_LOG_FATAL, "Stream specifier %s does not match any streams.\n", istream_spec);
3345 if (type_out == 's') {
3346 for (i = 0; i < oc->nb_streams; i++) {
3347 if ((ret = check_stream_specifier(oc, oc->streams[i], ostream_spec)) > 0) {
3348 meta_out = &oc->streams[i]->metadata;
3349 av_dict_copy(meta_out, *meta_in, AV_DICT_DONT_OVERWRITE);
3354 av_dict_copy(meta_out, *meta_in, AV_DICT_DONT_OVERWRITE);
3359 static AVCodec *find_codec_or_die(const char *name, enum AVMediaType type, int encoder)
3361 const char *codec_string = encoder ? "encoder" : "decoder";
3365 avcodec_find_encoder_by_name(name) :
3366 avcodec_find_decoder_by_name(name);
3368 av_log(NULL, AV_LOG_FATAL, "Unknown %s '%s'\n", codec_string, name);
3371 if (codec->type != type) {
3372 av_log(NULL, AV_LOG_FATAL, "Invalid %s type '%s'\n", codec_string, name);
3378 static AVCodec *choose_decoder(OptionsContext *o, AVFormatContext *s, AVStream *st)
3380 char *codec_name = NULL;
3382 MATCH_PER_STREAM_OPT(codec_names, str, codec_name, s, st);
3384 AVCodec *codec = find_codec_or_die(codec_name, st->codec->codec_type, 0);
3385 st->codec->codec_id = codec->id;
3388 return avcodec_find_decoder(st->codec->codec_id);
3392 * Add all the streams from the given input file to the global
3393 * list of input streams.
3395 static void add_input_streams(OptionsContext *o, AVFormatContext *ic)
3399 for (i = 0; i < ic->nb_streams; i++) {
3400 AVStream *st = ic->streams[i];
3401 AVCodecContext *dec = st->codec;
3402 InputStream *ist = av_mallocz(sizeof(*ist));
3403 char *framerate = NULL;
3408 input_streams = grow_array(input_streams, sizeof(*input_streams), &nb_input_streams, nb_input_streams + 1);
3409 input_streams[nb_input_streams - 1] = ist;
3412 ist->file_index = nb_input_files;
3414 st->discard = AVDISCARD_ALL;
3415 ist->opts = filter_codec_opts(codec_opts, ist->st->codec->codec_id, ic, st);
3417 ist->ts_scale = 1.0;
3418 MATCH_PER_STREAM_OPT(ts_scale, dbl, ist->ts_scale, ic, st);
3420 ist->dec = choose_decoder(o, ic, st);
3422 switch (dec->codec_type) {
3423 case AVMEDIA_TYPE_VIDEO:
3424 ist->resample_height = dec->height;
3425 ist->resample_width = dec->width;
3426 ist->resample_pix_fmt = dec->pix_fmt;
3428 MATCH_PER_STREAM_OPT(frame_rates, str, framerate, ic, st);
3429 if (framerate && av_parse_video_rate(&ist->framerate,
3431 av_log(NULL, AV_LOG_ERROR, "Error parsing framerate %s.\n",
3437 case AVMEDIA_TYPE_AUDIO:
3438 guess_input_channel_layout(ist);
3440 ist->resample_sample_fmt = dec->sample_fmt;
3441 ist->resample_sample_rate = dec->sample_rate;
3442 ist->resample_channels = dec->channels;
3443 ist->resample_channel_layout = dec->channel_layout;
3446 case AVMEDIA_TYPE_DATA:
3447 case AVMEDIA_TYPE_SUBTITLE:
3448 case AVMEDIA_TYPE_ATTACHMENT:
3449 case AVMEDIA_TYPE_UNKNOWN:
3457 static void assert_file_overwrite(const char *filename)
3459 if (!file_overwrite &&
3460 (strchr(filename, ':') == NULL || filename[1] == ':' ||
3461 av_strstart(filename, "file:", NULL))) {
3462 if (avio_check(filename, 0) == 0) {
3464 fprintf(stderr,"File '%s' already exists. Overwrite ? [y/N] ", filename);
3466 if (!read_yesno()) {
3467 fprintf(stderr, "Not overwriting - exiting\n");
3472 fprintf(stderr,"File '%s' already exists. Exiting.\n", filename);
3479 static void dump_attachment(AVStream *st, const char *filename)
3482 AVIOContext *out = NULL;
3483 AVDictionaryEntry *e;
3485 if (!st->codec->extradata_size) {
3486 av_log(NULL, AV_LOG_WARNING, "No extradata to dump in stream #%d:%d.\n",
3487 nb_input_files - 1, st->index);
3490 if (!*filename && (e = av_dict_get(st->metadata, "filename", NULL, 0)))
3491 filename = e->value;
3493 av_log(NULL, AV_LOG_FATAL, "No filename specified and no 'filename' tag"
3494 "in stream #%d:%d.\n", nb_input_files - 1, st->index);
3498 assert_file_overwrite(filename);
3500 if ((ret = avio_open2(&out, filename, AVIO_FLAG_WRITE, &int_cb, NULL)) < 0) {
3501 av_log(NULL, AV_LOG_FATAL, "Could not open file %s for writing.\n",
3506 avio_write(out, st->codec->extradata, st->codec->extradata_size);
3511 static int opt_input_file(OptionsContext *o, const char *opt, const char *filename)
3513 AVFormatContext *ic;
3514 AVInputFormat *file_iformat = NULL;
3518 AVDictionary **opts;
3519 int orig_nb_streams; // number of streams before avformat_find_stream_info
3522 if (!(file_iformat = av_find_input_format(o->format))) {
3523 av_log(NULL, AV_LOG_FATAL, "Unknown input format: '%s'\n", o->format);
3528 if (!strcmp(filename, "-"))
3531 using_stdin |= !strncmp(filename, "pipe:", 5) ||
3532 !strcmp(filename, "/dev/stdin");
3534 /* get default parameters from command line */
3535 ic = avformat_alloc_context();
3537 print_error(filename, AVERROR(ENOMEM));
3540 if (o->nb_audio_sample_rate) {
3541 snprintf(buf, sizeof(buf), "%d", o->audio_sample_rate[o->nb_audio_sample_rate - 1].u.i);
3542 av_dict_set(&format_opts, "sample_rate", buf, 0);
3544 if (o->nb_audio_channels) {
3545 /* because we set audio_channels based on both the "ac" and
3546 * "channel_layout" options, we need to check that the specified
3547 * demuxer actually has the "channels" option before setting it */
3548 if (file_iformat && file_iformat->priv_class &&
3549 av_opt_find(&file_iformat->priv_class, "channels", NULL, 0,
3550 AV_OPT_SEARCH_FAKE_OBJ)) {
3551 snprintf(buf, sizeof(buf), "%d",
3552 o->audio_channels[o->nb_audio_channels - 1].u.i);
3553 av_dict_set(&format_opts, "channels", buf, 0);
3556 if (o->nb_frame_rates) {
3557 /* set the format-level framerate option;
3558 * this is important for video grabbers, e.g. x11 */
3559 if (file_iformat && file_iformat->priv_class &&
3560 av_opt_find(&file_iformat->priv_class, "framerate", NULL, 0,
3561 AV_OPT_SEARCH_FAKE_OBJ)) {
3562 av_dict_set(&format_opts, "framerate",
3563 o->frame_rates[o->nb_frame_rates - 1].u.str, 0);
3566 if (o->nb_frame_sizes) {
3567 av_dict_set(&format_opts, "video_size", o->frame_sizes[o->nb_frame_sizes - 1].u.str, 0);
3569 if (o->nb_frame_pix_fmts)
3570 av_dict_set(&format_opts, "pixel_format", o->frame_pix_fmts[o->nb_frame_pix_fmts - 1].u.str, 0);
3572 ic->flags |= AVFMT_FLAG_NONBLOCK;
3573 ic->interrupt_callback = int_cb;
3575 /* open the input file with generic libav function */
3576 err = avformat_open_input(&ic, filename, file_iformat, &format_opts);
3578 print_error(filename, err);
3581 assert_avoptions(format_opts);
3583 /* apply forced codec ids */
3584 for (i = 0; i < ic->nb_streams; i++)
3585 choose_decoder(o, ic, ic->streams[i]);
3587 /* Set AVCodecContext options for avformat_find_stream_info */
3588 opts = setup_find_stream_info_opts(ic, codec_opts);
3589 orig_nb_streams = ic->nb_streams;
3591 /* If not enough info to get the stream parameters, we decode the
3592 first frames to get it. (used in mpeg case for example) */
3593 ret = avformat_find_stream_info(ic, opts);
3595 av_log(NULL, AV_LOG_FATAL, "%s: could not find codec parameters\n", filename);
3596 avformat_close_input(&ic);
3600 timestamp = o->start_time;
3601 /* add the stream start time */
3602 if (ic->start_time != AV_NOPTS_VALUE)
3603 timestamp += ic->start_time;
3605 /* if seeking requested, we execute it */
3606 if (o->start_time != 0) {
3607 ret = av_seek_frame(ic, -1, timestamp, AVSEEK_FLAG_BACKWARD);
3609 av_log(NULL, AV_LOG_WARNING, "%s: could not seek to position %0.3f\n",
3610 filename, (double)timestamp / AV_TIME_BASE);
3614 /* update the current parameters so that they match the one of the input stream */
3615 add_input_streams(o, ic);
3617 /* dump the file content */
3618 av_dump_format(ic, nb_input_files, filename, 0);
3620 input_files = grow_array(input_files, sizeof(*input_files), &nb_input_files, nb_input_files + 1);
3621 if (!(input_files[nb_input_files - 1] = av_mallocz(sizeof(*input_files[0]))))
3624 input_files[nb_input_files - 1]->ctx = ic;
3625 input_files[nb_input_files - 1]->ist_index = nb_input_streams - ic->nb_streams;
3626 input_files[nb_input_files - 1]->ts_offset = o->input_ts_offset - (copy_ts ? 0 : timestamp);
3627 input_files[nb_input_files - 1]->nb_streams = ic->nb_streams;
3628 input_files[nb_input_files - 1]->rate_emu = o->rate_emu;
3630 for (i = 0; i < o->nb_dump_attachment; i++) {
3633 for (j = 0; j < ic->nb_streams; j++) {
3634 AVStream *st = ic->streams[j];
3636 if (check_stream_specifier(ic, st, o->dump_attachment[i].specifier) == 1)
3637 dump_attachment(st, o->dump_attachment[i].u.str);
3641 for (i = 0; i < orig_nb_streams; i++)
3642 av_dict_free(&opts[i]);
3649 static void parse_forced_key_frames(char *kf, OutputStream *ost,
3650 AVCodecContext *avctx)
3656 for (p = kf; *p; p++)
3659 ost->forced_kf_count = n;
3660 ost->forced_kf_pts = av_malloc(sizeof(*ost->forced_kf_pts) * n);
3661 if (!ost->forced_kf_pts) {
3662 av_log(NULL, AV_LOG_FATAL, "Could not allocate forced key frames array.\n");
3665 for (i = 0; i < n; i++) {
3666 p = i ? strchr(p, ',') + 1 : kf;
3667 t = parse_time_or_die("force_key_frames", p, 1);
3668 ost->forced_kf_pts[i] = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
3672 static uint8_t *get_line(AVIOContext *s)
3678 if (avio_open_dyn_buf(&line) < 0) {
3679 av_log(NULL, AV_LOG_FATAL, "Could not alloc buffer for reading preset.\n");
3683 while ((c = avio_r8(s)) && c != '\n')
3686 avio_close_dyn_buf(line, &buf);
3691 static int get_preset_file_2(const char *preset_name, const char *codec_name, AVIOContext **s)
3694 char filename[1000];
3695 const char *base[3] = { getenv("AVCONV_DATADIR"),
3700 for (i = 0; i < FF_ARRAY_ELEMS(base) && ret; i++) {
3704 snprintf(filename, sizeof(filename), "%s%s/%s-%s.avpreset", base[i],
3705 i != 1 ? "" : "/.avconv", codec_name, preset_name);
3706 ret = avio_open2(s, filename, AVIO_FLAG_READ, &int_cb, NULL);
3709 snprintf(filename, sizeof(filename), "%s%s/%s.avpreset", base[i],
3710 i != 1 ? "" : "/.avconv", preset_name);
3711 ret = avio_open2(s, filename, AVIO_FLAG_READ, &int_cb, NULL);
3717 static void choose_encoder(OptionsContext *o, AVFormatContext *s, OutputStream *ost)
3719 char *codec_name = NULL;
3721 MATCH_PER_STREAM_OPT(codec_names, str, codec_name, s, ost->st);
3723 ost->st->codec->codec_id = av_guess_codec(s->oformat, NULL, s->filename,
3724 NULL, ost->st->codec->codec_type);
3725 ost->enc = avcodec_find_encoder(ost->st->codec->codec_id);
3726 } else if (!strcmp(codec_name, "copy"))
3727 ost->stream_copy = 1;
3729 ost->enc = find_codec_or_die(codec_name, ost->st->codec->codec_type, 1);
3730 ost->st->codec->codec_id = ost->enc->id;
3734 static OutputStream *new_output_stream(OptionsContext *o, AVFormatContext *oc, enum AVMediaType type)
3737 AVStream *st = avformat_new_stream(oc, NULL);
3738 int idx = oc->nb_streams - 1, ret = 0;
3739 char *bsf = NULL, *next, *codec_tag = NULL;
3740 AVBitStreamFilterContext *bsfc, *bsfc_prev = NULL;
3742 char *buf = NULL, *arg = NULL, *preset = NULL;
3743 AVIOContext *s = NULL;
3746 av_log(NULL, AV_LOG_FATAL, "Could not alloc stream.\n");
3750 if (oc->nb_streams - 1 < o->nb_streamid_map)
3751 st->id = o->streamid_map[oc->nb_streams - 1];
3753 output_streams = grow_array(output_streams, sizeof(*output_streams), &nb_output_streams,
3754 nb_output_streams + 1);
3755 if (!(ost = av_mallocz(sizeof(*ost))))
3757 output_streams[nb_output_streams - 1] = ost;
3759 ost->file_index = nb_output_files;
3762 st->codec->codec_type = type;
3763 choose_encoder(o, oc, ost);
3765 ost->opts = filter_codec_opts(codec_opts, ost->enc->id, oc, st);
3768 avcodec_get_context_defaults3(st->codec, ost->enc);
3769 st->codec->codec_type = type; // XXX hack, avcodec_get_context_defaults2() sets type to unknown for stream copy
3771 MATCH_PER_STREAM_OPT(presets, str, preset, oc, st);
3772 if (preset && (!(ret = get_preset_file_2(preset, ost->enc->name, &s)))) {
3775 if (!buf[0] || buf[0] == '#') {
3779 if (!(arg = strchr(buf, '='))) {
3780 av_log(NULL, AV_LOG_FATAL, "Invalid line found in the preset file.\n");
3784 av_dict_set(&ost->opts, buf, arg, AV_DICT_DONT_OVERWRITE);
3786 } while (!s->eof_reached);
3790 av_log(NULL, AV_LOG_FATAL,
3791 "Preset %s specified for stream %d:%d, but could not be opened.\n",
3792 preset, ost->file_index, ost->index);
3796 ost->max_frames = INT64_MAX;
3797 MATCH_PER_STREAM_OPT(max_frames, i64, ost->max_frames, oc, st);
3799 MATCH_PER_STREAM_OPT(bitstream_filters, str, bsf, oc, st);
3801 if (next = strchr(bsf, ','))
3803 if (!(bsfc = av_bitstream_filter_init(bsf))) {
3804 av_log(NULL, AV_LOG_FATAL, "Unknown bitstream filter %s\n", bsf);
3808 bsfc_prev->next = bsfc;
3810 ost->bitstream_filters = bsfc;
3816 MATCH_PER_STREAM_OPT(codec_tags, str, codec_tag, oc, st);
3818 uint32_t tag = strtol(codec_tag, &next, 0);
3820 tag = AV_RL32(codec_tag);
3821 st->codec->codec_tag = tag;
3824 MATCH_PER_STREAM_OPT(qscale, dbl, qscale, oc, st);
3825 if (qscale >= 0 || same_quant) {
3826 st->codec->flags |= CODEC_FLAG_QSCALE;
3827 st->codec->global_quality = FF_QP2LAMBDA * qscale;
3830 if (oc->oformat->flags & AVFMT_GLOBALHEADER)
3831 st->codec->flags |= CODEC_FLAG_GLOBAL_HEADER;
3833 av_opt_get_int(sws_opts, "sws_flags", 0, &ost->sws_flags);
3835 ost->pix_fmts[0] = ost->pix_fmts[1] = PIX_FMT_NONE;
3840 static void parse_matrix_coeffs(uint16_t *dest, const char *str)
3843 const char *p = str;
3850 av_log(NULL, AV_LOG_FATAL, "Syntax error in matrix \"%s\" at coeff %d\n", str, i);
3857 static OutputStream *new_video_stream(OptionsContext *o, AVFormatContext *oc)
3861 AVCodecContext *video_enc;
3863 ost = new_output_stream(o, oc, AVMEDIA_TYPE_VIDEO);
3865 video_enc = st->codec;
3867 if (!ost->stream_copy) {
3868 const char *p = NULL;
3869 char *forced_key_frames = NULL, *frame_rate = NULL, *frame_size = NULL;
3870 char *frame_aspect_ratio = NULL, *frame_pix_fmt = NULL;
3871 char *intra_matrix = NULL, *inter_matrix = NULL;
3872 const char *filters = "null";
3875 MATCH_PER_STREAM_OPT(frame_rates, str, frame_rate, oc, st);
3876 if (frame_rate && av_parse_video_rate(&ost->frame_rate, frame_rate) < 0) {
3877 av_log(NULL, AV_LOG_FATAL, "Invalid framerate value: %s\n", frame_rate);
3881 MATCH_PER_STREAM_OPT(frame_sizes, str, frame_size, oc, st);
3882 if (frame_size && av_parse_video_size(&video_enc->width, &video_enc->height, frame_size) < 0) {
3883 av_log(NULL, AV_LOG_FATAL, "Invalid frame size: %s.\n", frame_size);
3887 MATCH_PER_STREAM_OPT(frame_aspect_ratios, str, frame_aspect_ratio, oc, st);
3888 if (frame_aspect_ratio)
3889 ost->frame_aspect_ratio = parse_frame_aspect_ratio(frame_aspect_ratio);
3891 MATCH_PER_STREAM_OPT(frame_pix_fmts, str, frame_pix_fmt, oc, st);
3892 if (frame_pix_fmt && (video_enc->pix_fmt = av_get_pix_fmt(frame_pix_fmt)) == PIX_FMT_NONE) {
3893 av_log(NULL, AV_LOG_FATAL, "Unknown pixel format requested: %s.\n", frame_pix_fmt);
3896 st->sample_aspect_ratio = video_enc->sample_aspect_ratio;
3898 MATCH_PER_STREAM_OPT(intra_matrices, str, intra_matrix, oc, st);
3900 if (!(video_enc->intra_matrix = av_mallocz(sizeof(*video_enc->intra_matrix) * 64))) {
3901 av_log(NULL, AV_LOG_FATAL, "Could not allocate memory for intra matrix.\n");
3904 parse_matrix_coeffs(video_enc->intra_matrix, intra_matrix);
3906 MATCH_PER_STREAM_OPT(inter_matrices, str, inter_matrix, oc, st);
3908 if (!(video_enc->inter_matrix = av_mallocz(sizeof(*video_enc->inter_matrix) * 64))) {
3909 av_log(NULL, AV_LOG_FATAL, "Could not allocate memory for inter matrix.\n");
3912 parse_matrix_coeffs(video_enc->inter_matrix, inter_matrix);
3915 MATCH_PER_STREAM_OPT(rc_overrides, str, p, oc, st);
3916 for (i = 0; p; i++) {
3918 int e = sscanf(p, "%d,%d,%d", &start, &end, &q);
3920 av_log(NULL, AV_LOG_FATAL, "error parsing rc_override\n");
3923 video_enc->rc_override =
3924 av_realloc(video_enc->rc_override,
3925 sizeof(RcOverride) * (i + 1));
3926 video_enc->rc_override[i].start_frame = start;
3927 video_enc->rc_override[i].end_frame = end;
3929 video_enc->rc_override[i].qscale = q;
3930 video_enc->rc_override[i].quality_factor = 1.0;
3933 video_enc->rc_override[i].qscale = 0;
3934 video_enc->rc_override[i].quality_factor = -q/100.0;
3939 video_enc->rc_override_count = i;
3940 if (!video_enc->rc_initial_buffer_occupancy)
3941 video_enc->rc_initial_buffer_occupancy = video_enc->rc_buffer_size * 3 / 4;
3942 video_enc->intra_dc_precision = intra_dc_precision - 8;
3947 video_enc->flags |= CODEC_FLAG_PASS1;
3949 video_enc->flags |= CODEC_FLAG_PASS2;
3953 MATCH_PER_STREAM_OPT(forced_key_frames, str, forced_key_frames, oc, st);
3954 if (forced_key_frames)
3955 parse_forced_key_frames(forced_key_frames, ost, video_enc);
3957 MATCH_PER_STREAM_OPT(force_fps, i, ost->force_fps, oc, st);
3959 ost->top_field_first = -1;
3960 MATCH_PER_STREAM_OPT(top_field_first, i, ost->top_field_first, oc, st);
3962 MATCH_PER_STREAM_OPT(filters, str, filters, oc, st);
3963 ost->avfilter = av_strdup(filters);
3965 MATCH_PER_STREAM_OPT(copy_initial_nonkeyframes, i, ost->copy_initial_nonkeyframes, oc ,st);
3971 static OutputStream *new_audio_stream(OptionsContext *o, AVFormatContext *oc)
3975 AVCodecContext *audio_enc;
3977 ost = new_output_stream(o, oc, AVMEDIA_TYPE_AUDIO);
3980 audio_enc = st->codec;
3981 audio_enc->codec_type = AVMEDIA_TYPE_AUDIO;
3983 if (!ost->stream_copy) {
3984 char *sample_fmt = NULL;
3985 const char *filters = "anull";
3987 MATCH_PER_STREAM_OPT(audio_channels, i, audio_enc->channels, oc, st);
3989 MATCH_PER_STREAM_OPT(sample_fmts, str, sample_fmt, oc, st);
3991 (audio_enc->sample_fmt = av_get_sample_fmt(sample_fmt)) == AV_SAMPLE_FMT_NONE) {
3992 av_log(NULL, AV_LOG_FATAL, "Invalid sample format '%s'\n", sample_fmt);
3996 MATCH_PER_STREAM_OPT(audio_sample_rate, i, audio_enc->sample_rate, oc, st);
3998 MATCH_PER_STREAM_OPT(filters, str, filters, oc, st);
3999 ost->avfilter = av_strdup(filters);
4005 static OutputStream *new_data_stream(OptionsContext *o, AVFormatContext *oc)
4009 ost = new_output_stream(o, oc, AVMEDIA_TYPE_DATA);
4010 if (!ost->stream_copy) {
4011 av_log(NULL, AV_LOG_FATAL, "Data stream encoding not supported yet (only streamcopy)\n");
4018 static OutputStream *new_attachment_stream(OptionsContext *o, AVFormatContext *oc)
4020 OutputStream *ost = new_output_stream(o, oc, AVMEDIA_TYPE_ATTACHMENT);
4021 ost->stream_copy = 1;
4025 static OutputStream *new_subtitle_stream(OptionsContext *o, AVFormatContext *oc)
4029 AVCodecContext *subtitle_enc;
4031 ost = new_output_stream(o, oc, AVMEDIA_TYPE_SUBTITLE);
4033 subtitle_enc = st->codec;
4035 subtitle_enc->codec_type = AVMEDIA_TYPE_SUBTITLE;
4040 /* arg format is "output-stream-index:streamid-value". */
4041 static int opt_streamid(OptionsContext *o, const char *opt, const char *arg)
4047 av_strlcpy(idx_str, arg, sizeof(idx_str));
4048 p = strchr(idx_str, ':');
4050 av_log(NULL, AV_LOG_FATAL,
4051 "Invalid value '%s' for option '%s', required syntax is 'index:value'\n",
4056 idx = parse_number_or_die(opt, idx_str, OPT_INT, 0, INT_MAX);
4057 o->streamid_map = grow_array(o->streamid_map, sizeof(*o->streamid_map), &o->nb_streamid_map, idx+1);
4058 o->streamid_map[idx] = parse_number_or_die(opt, p, OPT_INT, 0, INT_MAX);
4062 static int copy_chapters(InputFile *ifile, OutputFile *ofile, int copy_metadata)
4064 AVFormatContext *is = ifile->ctx;
4065 AVFormatContext *os = ofile->ctx;
4068 for (i = 0; i < is->nb_chapters; i++) {
4069 AVChapter *in_ch = is->chapters[i], *out_ch;
4070 int64_t ts_off = av_rescale_q(ofile->start_time - ifile->ts_offset,
4071 AV_TIME_BASE_Q, in_ch->time_base);
4072 int64_t rt = (ofile->recording_time == INT64_MAX) ? INT64_MAX :
4073 av_rescale_q(ofile->recording_time, AV_TIME_BASE_Q, in_ch->time_base);
4076 if (in_ch->end < ts_off)
4078 if (rt != INT64_MAX && in_ch->start > rt + ts_off)
4081 out_ch = av_mallocz(sizeof(AVChapter));
4083 return AVERROR(ENOMEM);
4085 out_ch->id = in_ch->id;
4086 out_ch->time_base = in_ch->time_base;
4087 out_ch->start = FFMAX(0, in_ch->start - ts_off);
4088 out_ch->end = FFMIN(rt, in_ch->end - ts_off);
4091 av_dict_copy(&out_ch->metadata, in_ch->metadata, 0);
4094 os->chapters = av_realloc(os->chapters, sizeof(AVChapter) * os->nb_chapters);
4096 return AVERROR(ENOMEM);
4097 os->chapters[os->nb_chapters - 1] = out_ch;
4102 static void init_output_filter(OutputFilter *ofilter, OptionsContext *o,
4103 AVFormatContext *oc)
4107 switch (ofilter->out_tmp->filter_ctx->output_pads[ofilter->out_tmp->pad_idx].type) {
4108 case AVMEDIA_TYPE_VIDEO: ost = new_video_stream(o, oc); break;
4109 case AVMEDIA_TYPE_AUDIO: ost = new_audio_stream(o, oc); break;
4111 av_log(NULL, AV_LOG_FATAL, "Only video and audio filters are supported "
4116 ost->source_index = -1;
4117 ost->filter = ofilter;
4121 if (ost->stream_copy) {
4122 av_log(NULL, AV_LOG_ERROR, "Streamcopy requested for output stream %d:%d, "
4123 "which is fed from a complex filtergraph. Filtering and streamcopy "
4124 "cannot be used together.\n", ost->file_index, ost->index);
4128 if (configure_output_filter(ofilter->graph, ofilter, ofilter->out_tmp) < 0) {
4129 av_log(NULL, AV_LOG_FATAL, "Error configuring filter.\n");
4132 avfilter_inout_free(&ofilter->out_tmp);
4135 static void opt_output_file(void *optctx, const char *filename)
4137 OptionsContext *o = optctx;
4138 AVFormatContext *oc;
4140 AVOutputFormat *file_oformat;
4144 if (configure_complex_filters() < 0) {
4145 av_log(NULL, AV_LOG_FATAL, "Error configuring filters.\n");
4149 if (!strcmp(filename, "-"))
4152 oc = avformat_alloc_context();
4154 print_error(filename, AVERROR(ENOMEM));
4159 file_oformat = av_guess_format(o->format, NULL, NULL);
4160 if (!file_oformat) {
4161 av_log(NULL, AV_LOG_FATAL, "Requested output format '%s' is not a suitable output format\n", o->format);
4165 file_oformat = av_guess_format(NULL, filename, NULL);
4166 if (!file_oformat) {
4167 av_log(NULL, AV_LOG_FATAL, "Unable to find a suitable output format for '%s'\n",
4173 oc->oformat = file_oformat;
4174 oc->interrupt_callback = int_cb;
4175 av_strlcpy(oc->filename, filename, sizeof(oc->filename));
4177 /* create streams for all unlabeled output pads */
4178 for (i = 0; i < nb_filtergraphs; i++) {
4179 FilterGraph *fg = filtergraphs[i];
4180 for (j = 0; j < fg->nb_outputs; j++) {
4181 OutputFilter *ofilter = fg->outputs[j];
4183 if (!ofilter->out_tmp || ofilter->out_tmp->name)
4186 switch (ofilter->out_tmp->filter_ctx->output_pads[ofilter->out_tmp->pad_idx].type) {
4187 case AVMEDIA_TYPE_VIDEO: o->video_disable = 1; break;
4188 case AVMEDIA_TYPE_AUDIO: o->audio_disable = 1; break;
4189 case AVMEDIA_TYPE_SUBTITLE: o->subtitle_disable = 1; break;
4191 init_output_filter(ofilter, o, oc);
4195 if (!o->nb_stream_maps) {
4196 /* pick the "best" stream of each type */
4197 #define NEW_STREAM(type, index)\
4199 ost = new_ ## type ## _stream(o, oc);\
4200 ost->source_index = index;\
4201 ost->sync_ist = input_streams[index];\
4202 input_streams[index]->discard = 0;\
4203 input_streams[index]->st->discard = AVDISCARD_NONE;\
4206 /* video: highest resolution */
4207 if (!o->video_disable && oc->oformat->video_codec != CODEC_ID_NONE) {
4208 int area = 0, idx = -1;
4209 for (i = 0; i < nb_input_streams; i++) {
4210 ist = input_streams[i];
4211 if (ist->st->codec->codec_type == AVMEDIA_TYPE_VIDEO &&
4212 ist->st->codec->width * ist->st->codec->height > area) {
4213 area = ist->st->codec->width * ist->st->codec->height;
4217 NEW_STREAM(video, idx);
4220 /* audio: most channels */
4221 if (!o->audio_disable && oc->oformat->audio_codec != CODEC_ID_NONE) {
4222 int channels = 0, idx = -1;
4223 for (i = 0; i < nb_input_streams; i++) {
4224 ist = input_streams[i];
4225 if (ist->st->codec->codec_type == AVMEDIA_TYPE_AUDIO &&
4226 ist->st->codec->channels > channels) {
4227 channels = ist->st->codec->channels;
4231 NEW_STREAM(audio, idx);
4234 /* subtitles: pick first */
4235 if (!o->subtitle_disable && oc->oformat->subtitle_codec != CODEC_ID_NONE) {
4236 for (i = 0; i < nb_input_streams; i++)
4237 if (input_streams[i]->st->codec->codec_type == AVMEDIA_TYPE_SUBTITLE) {
4238 NEW_STREAM(subtitle, i);
4242 /* do something with data? */
4244 for (i = 0; i < o->nb_stream_maps; i++) {
4245 StreamMap *map = &o->stream_maps[i];
4250 if (map->linklabel) {
4252 OutputFilter *ofilter = NULL;
4255 for (j = 0; j < nb_filtergraphs; j++) {
4256 fg = filtergraphs[j];
4257 for (k = 0; k < fg->nb_outputs; k++) {
4258 AVFilterInOut *out = fg->outputs[k]->out_tmp;
4259 if (out && !strcmp(out->name, map->linklabel)) {
4260 ofilter = fg->outputs[k];
4267 av_log(NULL, AV_LOG_FATAL, "Output with label '%s' does not exist "
4268 "in any defined filter graph.\n", map->linklabel);
4271 init_output_filter(ofilter, o, oc);
4273 ist = input_streams[input_files[map->file_index]->ist_index + map->stream_index];
4274 switch (ist->st->codec->codec_type) {
4275 case AVMEDIA_TYPE_VIDEO: ost = new_video_stream(o, oc); break;
4276 case AVMEDIA_TYPE_AUDIO: ost = new_audio_stream(o, oc); break;
4277 case AVMEDIA_TYPE_SUBTITLE: ost = new_subtitle_stream(o, oc); break;
4278 case AVMEDIA_TYPE_DATA: ost = new_data_stream(o, oc); break;
4279 case AVMEDIA_TYPE_ATTACHMENT: ost = new_attachment_stream(o, oc); break;
4281 av_log(NULL, AV_LOG_FATAL, "Cannot map stream #%d:%d - unsupported type.\n",
4282 map->file_index, map->stream_index);
4286 ost->source_index = input_files[map->file_index]->ist_index + map->stream_index;
4287 ost->sync_ist = input_streams[input_files[map->sync_file_index]->ist_index +
4288 map->sync_stream_index];
4290 ist->st->discard = AVDISCARD_NONE;
4295 /* handle attached files */
4296 for (i = 0; i < o->nb_attachments; i++) {
4298 uint8_t *attachment;
4302 if ((err = avio_open2(&pb, o->attachments[i], AVIO_FLAG_READ, &int_cb, NULL)) < 0) {
4303 av_log(NULL, AV_LOG_FATAL, "Could not open attachment file %s.\n",
4307 if ((len = avio_size(pb)) <= 0) {
4308 av_log(NULL, AV_LOG_FATAL, "Could not get size of the attachment %s.\n",
4312 if (!(attachment = av_malloc(len))) {
4313 av_log(NULL, AV_LOG_FATAL, "Attachment %s too large to fit into memory.\n",
4317 avio_read(pb, attachment, len);
4319 ost = new_attachment_stream(o, oc);
4320 ost->stream_copy = 0;
4321 ost->source_index = -1;
4322 ost->attachment_filename = o->attachments[i];
4323 ost->st->codec->extradata = attachment;
4324 ost->st->codec->extradata_size = len;
4326 p = strrchr(o->attachments[i], '/');
4327 av_dict_set(&ost->st->metadata, "filename", (p && *p) ? p + 1 : o->attachments[i], AV_DICT_DONT_OVERWRITE);
4331 output_files = grow_array(output_files, sizeof(*output_files), &nb_output_files, nb_output_files + 1);
4332 if (!(output_files[nb_output_files - 1] = av_mallocz(sizeof(*output_files[0]))))
4335 output_files[nb_output_files - 1]->ctx = oc;
4336 output_files[nb_output_files - 1]->ost_index = nb_output_streams - oc->nb_streams;
4337 output_files[nb_output_files - 1]->recording_time = o->recording_time;
4338 if (o->recording_time != INT64_MAX)
4339 oc->duration = o->recording_time;
4340 output_files[nb_output_files - 1]->start_time = o->start_time;
4341 output_files[nb_output_files - 1]->limit_filesize = o->limit_filesize;
4342 av_dict_copy(&output_files[nb_output_files - 1]->opts, format_opts, 0);
4344 /* check filename in case of an image number is expected */
4345 if (oc->oformat->flags & AVFMT_NEEDNUMBER) {
4346 if (!av_filename_number_test(oc->filename)) {
4347 print_error(oc->filename, AVERROR(EINVAL));
4352 if (!(oc->oformat->flags & AVFMT_NOFILE)) {
4353 /* test if it already exists to avoid losing precious files */
4354 assert_file_overwrite(filename);
4357 if ((err = avio_open2(&oc->pb, filename, AVIO_FLAG_WRITE,
4358 &oc->interrupt_callback,
4359 &output_files[nb_output_files - 1]->opts)) < 0) {
4360 print_error(filename, err);
4365 if (o->mux_preload) {
4367 snprintf(buf, sizeof(buf), "%d", (int)(o->mux_preload*AV_TIME_BASE));
4368 av_dict_set(&output_files[nb_output_files - 1]->opts, "preload", buf, 0);
4370 oc->max_delay = (int)(o->mux_max_delay * AV_TIME_BASE);
4371 oc->flags |= AVFMT_FLAG_NONBLOCK;
4374 for (i = 0; i < o->nb_metadata_map; i++) {
4376 int in_file_index = strtol(o->metadata_map[i].u.str, &p, 0);
4378 if (in_file_index < 0)
4380 if (in_file_index >= nb_input_files) {
4381 av_log(NULL, AV_LOG_FATAL, "Invalid input file index %d while processing metadata maps\n", in_file_index);
4384 copy_metadata(o->metadata_map[i].specifier, *p ? p + 1 : p, oc, input_files[in_file_index]->ctx, o);
4388 if (o->chapters_input_file >= nb_input_files) {
4389 if (o->chapters_input_file == INT_MAX) {
4390 /* copy chapters from the first input file that has them*/
4391 o->chapters_input_file = -1;
4392 for (i = 0; i < nb_input_files; i++)
4393 if (input_files[i]->ctx->nb_chapters) {
4394 o->chapters_input_file = i;
4398 av_log(NULL, AV_LOG_FATAL, "Invalid input file index %d in chapter mapping.\n",
4399 o->chapters_input_file);
4403 if (o->chapters_input_file >= 0)
4404 copy_chapters(input_files[o->chapters_input_file], output_files[nb_output_files - 1],
4405 !o->metadata_chapters_manual);
4407 /* copy global metadata by default */
4408 if (!o->metadata_global_manual && nb_input_files)
4409 av_dict_copy(&oc->metadata, input_files[0]->ctx->metadata,
4410 AV_DICT_DONT_OVERWRITE);
4411 if (!o->metadata_streams_manual)
4412 for (i = output_files[nb_output_files - 1]->ost_index; i < nb_output_streams; i++) {
4414 if (output_streams[i]->source_index < 0) /* this is true e.g. for attached files */
4416 ist = input_streams[output_streams[i]->source_index];
4417 av_dict_copy(&output_streams[i]->st->metadata, ist->st->metadata, AV_DICT_DONT_OVERWRITE);
4420 /* process manually set metadata */
4421 for (i = 0; i < o->nb_metadata; i++) {
4424 const char *stream_spec;
4425 int index = 0, j, ret;
4427 val = strchr(o->metadata[i].u.str, '=');
4429 av_log(NULL, AV_LOG_FATAL, "No '=' character in metadata string %s.\n",
4430 o->metadata[i].u.str);
4435 parse_meta_type(o->metadata[i].specifier, &type, &index, &stream_spec);
4437 for (j = 0; j < oc->nb_streams; j++) {
4438 if ((ret = check_stream_specifier(oc, oc->streams[j], stream_spec)) > 0) {
4439 av_dict_set(&oc->streams[j]->metadata, o->metadata[i].u.str, *val ? val : NULL, 0);
4443 printf("ret %d, stream_spec %s\n", ret, stream_spec);
4451 if (index < 0 || index >= oc->nb_chapters) {
4452 av_log(NULL, AV_LOG_FATAL, "Invalid chapter index %d in metadata specifier.\n", index);
4455 m = &oc->chapters[index]->metadata;
4458 av_log(NULL, AV_LOG_FATAL, "Invalid metadata specifier %s.\n", o->metadata[i].specifier);
4461 av_dict_set(m, o->metadata[i].u.str, *val ? val : NULL, 0);
4468 /* same option as mencoder */
4469 static int opt_pass(const char *opt, const char *arg)
4471 do_pass = parse_number_or_die(opt, arg, OPT_INT, 1, 2);
4475 static int64_t getutime(void)
4478 struct rusage rusage;
4480 getrusage(RUSAGE_SELF, &rusage);
4481 return (rusage.ru_utime.tv_sec * 1000000LL) + rusage.ru_utime.tv_usec;
4482 #elif HAVE_GETPROCESSTIMES
4484 FILETIME c, e, k, u;
4485 proc = GetCurrentProcess();
4486 GetProcessTimes(proc, &c, &e, &k, &u);
4487 return ((int64_t) u.dwHighDateTime << 32 | u.dwLowDateTime) / 10;
4489 return av_gettime();
4493 static int64_t getmaxrss(void)
4495 #if HAVE_GETRUSAGE && HAVE_STRUCT_RUSAGE_RU_MAXRSS
4496 struct rusage rusage;
4497 getrusage(RUSAGE_SELF, &rusage);
4498 return (int64_t)rusage.ru_maxrss * 1024;
4499 #elif HAVE_GETPROCESSMEMORYINFO
4501 PROCESS_MEMORY_COUNTERS memcounters;
4502 proc = GetCurrentProcess();
4503 memcounters.cb = sizeof(memcounters);
4504 GetProcessMemoryInfo(proc, &memcounters, sizeof(memcounters));
4505 return memcounters.PeakPagefileUsage;
4511 static int opt_audio_qscale(OptionsContext *o, const char *opt, const char *arg)
4513 return parse_option(o, "q:a", arg, options);
4516 static void show_usage(void)
4518 printf("Hyper fast Audio and Video encoder\n");
4519 printf("usage: %s [options] [[infile options] -i infile]... {[outfile options] outfile}...\n", program_name);
4523 static void show_help(void)
4525 int flags = AV_OPT_FLAG_DECODING_PARAM | AV_OPT_FLAG_ENCODING_PARAM;
4526 av_log_set_callback(log_callback_help);
4528 show_help_options(options, "Main options:\n",
4529 OPT_EXPERT | OPT_AUDIO | OPT_VIDEO | OPT_SUBTITLE | OPT_GRAB, 0);
4530 show_help_options(options, "\nAdvanced options:\n",
4531 OPT_EXPERT | OPT_AUDIO | OPT_VIDEO | OPT_SUBTITLE | OPT_GRAB,
4533 show_help_options(options, "\nVideo options:\n",
4534 OPT_EXPERT | OPT_AUDIO | OPT_VIDEO | OPT_GRAB,
4536 show_help_options(options, "\nAdvanced Video options:\n",
4537 OPT_EXPERT | OPT_AUDIO | OPT_VIDEO | OPT_GRAB,
4538 OPT_VIDEO | OPT_EXPERT);
4539 show_help_options(options, "\nAudio options:\n",
4540 OPT_EXPERT | OPT_AUDIO | OPT_VIDEO | OPT_GRAB,
4542 show_help_options(options, "\nAdvanced Audio options:\n",
4543 OPT_EXPERT | OPT_AUDIO | OPT_VIDEO | OPT_GRAB,
4544 OPT_AUDIO | OPT_EXPERT);
4545 show_help_options(options, "\nSubtitle options:\n",
4546 OPT_SUBTITLE | OPT_GRAB,
4548 show_help_options(options, "\nAudio/Video grab options:\n",
4552 show_help_children(avcodec_get_class(), flags);
4553 show_help_children(avformat_get_class(), flags);
4554 show_help_children(sws_get_class(), flags);
4557 static int opt_target(OptionsContext *o, const char *opt, const char *arg)
4559 enum { PAL, NTSC, FILM, UNKNOWN } norm = UNKNOWN;
4560 static const char *const frame_rates[] = { "25", "30000/1001", "24000/1001" };
4562 if (!strncmp(arg, "pal-", 4)) {
4565 } else if (!strncmp(arg, "ntsc-", 5)) {
4568 } else if (!strncmp(arg, "film-", 5)) {
4572 /* Try to determine PAL/NTSC by peeking in the input files */
4573 if (nb_input_files) {
4575 for (j = 0; j < nb_input_files; j++) {
4576 for (i = 0; i < input_files[j]->nb_streams; i++) {
4577 AVCodecContext *c = input_files[j]->ctx->streams[i]->codec;
4578 if (c->codec_type != AVMEDIA_TYPE_VIDEO)
4580 fr = c->time_base.den * 1000 / c->time_base.num;
4584 } else if ((fr == 29970) || (fr == 23976)) {
4589 if (norm != UNKNOWN)
4593 if (norm != UNKNOWN)
4594 av_log(NULL, AV_LOG_INFO, "Assuming %s for target.\n", norm == PAL ? "PAL" : "NTSC");
4597 if (norm == UNKNOWN) {
4598 av_log(NULL, AV_LOG_FATAL, "Could not determine norm (PAL/NTSC/NTSC-Film) for target.\n");
4599 av_log(NULL, AV_LOG_FATAL, "Please prefix target with \"pal-\", \"ntsc-\" or \"film-\",\n");
4600 av_log(NULL, AV_LOG_FATAL, "or set a framerate with \"-r xxx\".\n");
4604 if (!strcmp(arg, "vcd")) {
4605 opt_video_codec(o, "c:v", "mpeg1video");
4606 opt_audio_codec(o, "c:a", "mp2");
4607 parse_option(o, "f", "vcd", options);
4609 parse_option(o, "s", norm == PAL ? "352x288" : "352x240", options);
4610 parse_option(o, "r", frame_rates[norm], options);
4611 opt_default("g", norm == PAL ? "15" : "18");
4613 opt_default("b", "1150000");
4614 opt_default("maxrate", "1150000");
4615 opt_default("minrate", "1150000");
4616 opt_default("bufsize", "327680"); // 40*1024*8;
4618 opt_default("b:a", "224000");
4619 parse_option(o, "ar", "44100", options);
4620 parse_option(o, "ac", "2", options);
4622 opt_default("packetsize", "2324");
4623 opt_default("muxrate", "1411200"); // 2352 * 75 * 8;
4625 /* We have to offset the PTS, so that it is consistent with the SCR.
4626 SCR starts at 36000, but the first two packs contain only padding
4627 and the first pack from the other stream, respectively, may also have
4628 been written before.
4629 So the real data starts at SCR 36000+3*1200. */
4630 o->mux_preload = (36000 + 3 * 1200) / 90000.0; // 0.44
4631 } else if (!strcmp(arg, "svcd")) {
4633 opt_video_codec(o, "c:v", "mpeg2video");
4634 opt_audio_codec(o, "c:a", "mp2");
4635 parse_option(o, "f", "svcd", options);
4637 parse_option(o, "s", norm == PAL ? "480x576" : "480x480", options);
4638 parse_option(o, "r", frame_rates[norm], options);
4639 opt_default("g", norm == PAL ? "15" : "18");
4641 opt_default("b", "2040000");
4642 opt_default("maxrate", "2516000");
4643 opt_default("minrate", "0"); // 1145000;
4644 opt_default("bufsize", "1835008"); // 224*1024*8;
4645 opt_default("flags", "+scan_offset");
4648 opt_default("b:a", "224000");
4649 parse_option(o, "ar", "44100", options);
4651 opt_default("packetsize", "2324");
4653 } else if (!strcmp(arg, "dvd")) {
4655 opt_video_codec(o, "c:v", "mpeg2video");
4656 opt_audio_codec(o, "c:a", "ac3");
4657 parse_option(o, "f", "dvd", options);
4659 parse_option(o, "s", norm == PAL ? "720x576" : "720x480", options);
4660 parse_option(o, "r", frame_rates[norm], options);
4661 opt_default("g", norm == PAL ? "15" : "18");
4663 opt_default("b", "6000000");
4664 opt_default("maxrate", "9000000");
4665 opt_default("minrate", "0"); // 1500000;
4666 opt_default("bufsize", "1835008"); // 224*1024*8;
4668 opt_default("packetsize", "2048"); // from www.mpucoder.com: DVD sectors contain 2048 bytes of data, this is also the size of one pack.
4669 opt_default("muxrate", "10080000"); // from mplex project: data_rate = 1260000. mux_rate = data_rate * 8
4671 opt_default("b:a", "448000");
4672 parse_option(o, "ar", "48000", options);
4674 } else if (!strncmp(arg, "dv", 2)) {
4676 parse_option(o, "f", "dv", options);
4678 parse_option(o, "s", norm == PAL ? "720x576" : "720x480", options);
4679 parse_option(o, "pix_fmt", !strncmp(arg, "dv50", 4) ? "yuv422p" :
4680 norm == PAL ? "yuv420p" : "yuv411p", options);
4681 parse_option(o, "r", frame_rates[norm], options);
4683 parse_option(o, "ar", "48000", options);
4684 parse_option(o, "ac", "2", options);
4687 av_log(NULL, AV_LOG_ERROR, "Unknown target: %s\n", arg);
4688 return AVERROR(EINVAL);
4693 static int opt_vstats_file(const char *opt, const char *arg)
4695 av_free (vstats_filename);
4696 vstats_filename = av_strdup (arg);
4700 static int opt_vstats(const char *opt, const char *arg)
4703 time_t today2 = time(NULL);
4704 struct tm *today = localtime(&today2);
4706 snprintf(filename, sizeof(filename), "vstats_%02d%02d%02d.log", today->tm_hour, today->tm_min,
4708 return opt_vstats_file(opt, filename);
4711 static int opt_video_frames(OptionsContext *o, const char *opt, const char *arg)
4713 return parse_option(o, "frames:v", arg, options);
4716 static int opt_audio_frames(OptionsContext *o, const char *opt, const char *arg)
4718 return parse_option(o, "frames:a", arg, options);
4721 static int opt_data_frames(OptionsContext *o, const char *opt, const char *arg)
4723 return parse_option(o, "frames:d", arg, options);
4726 static int opt_video_tag(OptionsContext *o, const char *opt, const char *arg)
4728 return parse_option(o, "tag:v", arg, options);
4731 static int opt_audio_tag(OptionsContext *o, const char *opt, const char *arg)
4733 return parse_option(o, "tag:a", arg, options);
4736 static int opt_subtitle_tag(OptionsContext *o, const char *opt, const char *arg)
4738 return parse_option(o, "tag:s", arg, options);
4741 static int opt_video_filters(OptionsContext *o, const char *opt, const char *arg)
4743 return parse_option(o, "filter:v", arg, options);
4746 static int opt_audio_filters(OptionsContext *o, const char *opt, const char *arg)
4748 return parse_option(o, "filter:a", arg, options);
4751 static int opt_vsync(const char *opt, const char *arg)
4753 if (!av_strcasecmp(arg, "cfr")) video_sync_method = VSYNC_CFR;
4754 else if (!av_strcasecmp(arg, "vfr")) video_sync_method = VSYNC_VFR;
4755 else if (!av_strcasecmp(arg, "passthrough")) video_sync_method = VSYNC_PASSTHROUGH;
4757 if (video_sync_method == VSYNC_AUTO)
4758 video_sync_method = parse_number_or_die("vsync", arg, OPT_INT, VSYNC_AUTO, VSYNC_VFR);
4762 static int opt_deinterlace(const char *opt, const char *arg)
4764 av_log(NULL, AV_LOG_WARNING, "-%s is deprecated, use -filter:v yadif instead\n", opt);
4769 static int opt_cpuflags(const char *opt, const char *arg)
4771 int flags = av_parse_cpu_flags(arg);
4776 av_set_cpu_flags_mask(flags);
4780 static void parse_cpuflags(int argc, char **argv, const OptionDef *options)
4782 int idx = locate_option(argc, argv, options, "cpuflags");
4783 if (idx && argv[idx + 1])
4784 opt_cpuflags("cpuflags", argv[idx + 1]);
4787 static int opt_channel_layout(OptionsContext *o, const char *opt, const char *arg)
4789 char layout_str[32];
4792 int ret, channels, ac_str_size;
4795 layout = av_get_channel_layout(arg);
4797 av_log(NULL, AV_LOG_ERROR, "Unknown channel layout: %s\n", arg);
4798 return AVERROR(EINVAL);
4800 snprintf(layout_str, sizeof(layout_str), "%"PRIu64, layout);
4801 ret = opt_default(opt, layout_str);
4805 /* set 'ac' option based on channel layout */
4806 channels = av_get_channel_layout_nb_channels(layout);
4807 snprintf(layout_str, sizeof(layout_str), "%d", channels);
4808 stream_str = strchr(opt, ':');
4809 ac_str_size = 3 + (stream_str ? strlen(stream_str) : 0);
4810 ac_str = av_mallocz(ac_str_size);
4812 return AVERROR(ENOMEM);
4813 av_strlcpy(ac_str, "ac", 3);
4815 av_strlcat(ac_str, stream_str, ac_str_size);
4816 ret = parse_option(o, ac_str, layout_str, options);
4822 static int opt_filter_complex(const char *opt, const char *arg)
4824 filtergraphs = grow_array(filtergraphs, sizeof(*filtergraphs),
4825 &nb_filtergraphs, nb_filtergraphs + 1);
4826 if (!(filtergraphs[nb_filtergraphs - 1] = av_mallocz(sizeof(*filtergraphs[0]))))
4827 return AVERROR(ENOMEM);
4828 filtergraphs[nb_filtergraphs - 1]->index = nb_filtergraphs - 1;
4829 filtergraphs[nb_filtergraphs - 1]->graph_desc = arg;
4833 #define OFFSET(x) offsetof(OptionsContext, x)
4834 static const OptionDef options[] = {
4836 #include "cmdutils_common_opts.h"
4837 { "f", HAS_ARG | OPT_STRING | OPT_OFFSET, {.off = OFFSET(format)}, "force format", "fmt" },
4838 { "i", HAS_ARG | OPT_FUNC2, {(void*)opt_input_file}, "input file name", "filename" },
4839 { "y", OPT_BOOL, {(void*)&file_overwrite}, "overwrite output files" },
4840 { "c", HAS_ARG | OPT_STRING | OPT_SPEC, {.off = OFFSET(codec_names)}, "codec name", "codec" },
4841 { "codec", HAS_ARG | OPT_STRING | OPT_SPEC, {.off = OFFSET(codec_names)}, "codec name", "codec" },
4842 { "pre", HAS_ARG | OPT_STRING | OPT_SPEC, {.off = OFFSET(presets)}, "preset name", "preset" },
4843 { "map", HAS_ARG | OPT_EXPERT | OPT_FUNC2, {(void*)opt_map}, "set input stream mapping", "[-]input_file_id[:stream_specifier][,sync_file_id[:stream_specifier]]" },
4844 { "map_metadata", HAS_ARG | OPT_STRING | OPT_SPEC, {.off = OFFSET(metadata_map)}, "set metadata information of outfile from infile",
4845 "outfile[,metadata]:infile[,metadata]" },
4846 { "map_chapters", OPT_INT | HAS_ARG | OPT_EXPERT | OPT_OFFSET, {.off = OFFSET(chapters_input_file)}, "set chapters mapping", "input_file_index" },
4847 { "t", HAS_ARG | OPT_TIME | OPT_OFFSET, {.off = OFFSET(recording_time)}, "record or transcode \"duration\" seconds of audio/video", "duration" },
4848 { "fs", HAS_ARG | OPT_INT64 | OPT_OFFSET, {.off = OFFSET(limit_filesize)}, "set the limit file size in bytes", "limit_size" }, //
4849 { "ss", HAS_ARG | OPT_TIME | OPT_OFFSET, {.off = OFFSET(start_time)}, "set the start time offset", "time_off" },
4850 { "itsoffset", HAS_ARG | OPT_TIME | OPT_OFFSET, {.off = OFFSET(input_ts_offset)}, "set the input ts offset", "time_off" },
4851 { "itsscale", HAS_ARG | OPT_DOUBLE | OPT_SPEC, {.off = OFFSET(ts_scale)}, "set the input ts scale", "scale" },
4852 { "metadata", HAS_ARG | OPT_STRING | OPT_SPEC, {.off = OFFSET(metadata)}, "add metadata", "string=string" },
4853 { "dframes", HAS_ARG | OPT_FUNC2, {(void*)opt_data_frames}, "set the number of data frames to record", "number" },
4854 { "benchmark", OPT_BOOL | OPT_EXPERT, {(void*)&do_benchmark},
4855 "add timings for benchmarking" },
4856 { "timelimit", HAS_ARG, {(void*)opt_timelimit}, "set max runtime in seconds", "limit" },
4857 { "dump", OPT_BOOL | OPT_EXPERT, {(void*)&do_pkt_dump},
4858 "dump each input packet" },
4859 { "hex", OPT_BOOL | OPT_EXPERT, {(void*)&do_hex_dump},
4860 "when dumping packets, also dump the payload" },
4861 { "re", OPT_BOOL | OPT_EXPERT | OPT_OFFSET, {.off = OFFSET(rate_emu)}, "read input at native frame rate", "" },
4862 { "target", HAS_ARG | OPT_FUNC2, {(void*)opt_target}, "specify target file type (\"vcd\", \"svcd\", \"dvd\", \"dv\", \"dv50\", \"pal-vcd\", \"ntsc-svcd\", ...)", "type" },
4863 { "vsync", HAS_ARG | OPT_EXPERT, {(void*)opt_vsync}, "video sync method", "" },
4864 { "async", HAS_ARG | OPT_INT | OPT_EXPERT, {(void*)&audio_sync_method}, "audio sync method", "" },
4865 { "adrift_threshold", HAS_ARG | OPT_FLOAT | OPT_EXPERT, {(void*)&audio_drift_threshold}, "audio drift threshold", "threshold" },
4866 { "copyts", OPT_BOOL | OPT_EXPERT, {(void*)©_ts}, "copy timestamps" },
4867 { "copytb", OPT_BOOL | OPT_EXPERT, {(void*)©_tb}, "copy input stream time base when stream copying" },
4868 { "shortest", OPT_BOOL | OPT_EXPERT, {(void*)&opt_shortest}, "finish encoding within shortest input" }, //
4869 { "dts_delta_threshold", HAS_ARG | OPT_FLOAT | OPT_EXPERT, {(void*)&dts_delta_threshold}, "timestamp discontinuity delta threshold", "threshold" },
4870 { "xerror", OPT_BOOL, {(void*)&exit_on_error}, "exit on error", "error" },
4871 { "copyinkf", OPT_BOOL | OPT_EXPERT | OPT_SPEC, {.off = OFFSET(copy_initial_nonkeyframes)}, "copy initial non-keyframes" },
4872 { "frames", OPT_INT64 | HAS_ARG | OPT_SPEC, {.off = OFFSET(max_frames)}, "set the number of frames to record", "number" },
4873 { "tag", OPT_STRING | HAS_ARG | OPT_SPEC, {.off = OFFSET(codec_tags)}, "force codec tag/fourcc", "fourcc/tag" },
4874 { "q", HAS_ARG | OPT_EXPERT | OPT_DOUBLE | OPT_SPEC, {.off = OFFSET(qscale)}, "use fixed quality scale (VBR)", "q" },
4875 { "qscale", HAS_ARG | OPT_EXPERT | OPT_DOUBLE | OPT_SPEC, {.off = OFFSET(qscale)}, "use fixed quality scale (VBR)", "q" },
4876 { "filter", HAS_ARG | OPT_STRING | OPT_SPEC, {.off = OFFSET(filters)}, "set stream filterchain", "filter_list" },
4877 { "filter_complex", HAS_ARG | OPT_EXPERT, {(void*)opt_filter_complex}, "create a complex filtergraph", "graph_description" },
4878 { "stats", OPT_BOOL, {&print_stats}, "print progress report during encoding", },
4879 { "attach", HAS_ARG | OPT_FUNC2, {(void*)opt_attach}, "add an attachment to the output file", "filename" },
4880 { "dump_attachment", HAS_ARG | OPT_STRING | OPT_SPEC, {.off = OFFSET(dump_attachment)}, "extract an attachment into a file", "filename" },
4881 { "cpuflags", HAS_ARG | OPT_EXPERT, {(void*)opt_cpuflags}, "set CPU flags mask", "mask" },
4884 { "vframes", HAS_ARG | OPT_VIDEO | OPT_FUNC2, {(void*)opt_video_frames}, "set the number of video frames to record", "number" },
4885 { "r", HAS_ARG | OPT_VIDEO | OPT_STRING | OPT_SPEC, {.off = OFFSET(frame_rates)}, "set frame rate (Hz value, fraction or abbreviation)", "rate" },
4886 { "s", HAS_ARG | OPT_VIDEO | OPT_STRING | OPT_SPEC, {.off = OFFSET(frame_sizes)}, "set frame size (WxH or abbreviation)", "size" },
4887 { "aspect", HAS_ARG | OPT_VIDEO | OPT_STRING | OPT_SPEC, {.off = OFFSET(frame_aspect_ratios)}, "set aspect ratio (4:3, 16:9 or 1.3333, 1.7777)", "aspect" },
4888 { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO | OPT_STRING | OPT_SPEC, {.off = OFFSET(frame_pix_fmts)}, "set pixel format", "format" },
4889 { "vn", OPT_BOOL | OPT_VIDEO | OPT_OFFSET, {.off = OFFSET(video_disable)}, "disable video" },
4890 { "vdt", OPT_INT | HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)&video_discard}, "discard threshold", "n" },
4891 { "rc_override", HAS_ARG | OPT_EXPERT | OPT_VIDEO | OPT_STRING | OPT_SPEC, {.off = OFFSET(rc_overrides)}, "rate control override for specific intervals", "override" },
4892 { "vcodec", HAS_ARG | OPT_VIDEO | OPT_FUNC2, {(void*)opt_video_codec}, "force video codec ('copy' to copy stream)", "codec" },
4893 { "same_quant", OPT_BOOL | OPT_VIDEO, {(void*)&same_quant},
4894 "use same quantizer as source (implies VBR)" },
4895 { "pass", HAS_ARG | OPT_VIDEO, {(void*)opt_pass}, "select the pass number (1 or 2)", "n" },
4896 { "passlogfile", HAS_ARG | OPT_STRING | OPT_VIDEO, {(void*)&pass_logfilename_prefix}, "select two pass log file name prefix", "prefix" },
4897 { "deinterlace", OPT_EXPERT | OPT_VIDEO, {(void*)opt_deinterlace},
4898 "this option is deprecated, use the yadif filter instead" },
4899 { "vstats", OPT_EXPERT | OPT_VIDEO, {(void*)&opt_vstats}, "dump video coding statistics to file" },
4900 { "vstats_file", HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)opt_vstats_file}, "dump video coding statistics to file", "file" },
4901 { "vf", HAS_ARG | OPT_VIDEO | OPT_FUNC2, {(void*)opt_video_filters}, "video filters", "filter list" },
4902 { "intra_matrix", HAS_ARG | OPT_EXPERT | OPT_VIDEO | OPT_STRING | OPT_SPEC, {.off = OFFSET(intra_matrices)}, "specify intra matrix coeffs", "matrix" },
4903 { "inter_matrix", HAS_ARG | OPT_EXPERT | OPT_VIDEO | OPT_STRING | OPT_SPEC, {.off = OFFSET(inter_matrices)}, "specify inter matrix coeffs", "matrix" },
4904 { "top", HAS_ARG | OPT_EXPERT | OPT_VIDEO | OPT_INT| OPT_SPEC, {.off = OFFSET(top_field_first)}, "top=1/bottom=0/auto=-1 field first", "" },
4905 { "dc", OPT_INT | HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)&intra_dc_precision}, "intra_dc_precision", "precision" },
4906 { "vtag", HAS_ARG | OPT_EXPERT | OPT_VIDEO | OPT_FUNC2, {(void*)opt_video_tag}, "force video tag/fourcc", "fourcc/tag" },
4907 { "qphist", OPT_BOOL | OPT_EXPERT | OPT_VIDEO, { (void *)&qp_hist }, "show QP histogram" },
4908 { "force_fps", OPT_BOOL | OPT_EXPERT | OPT_VIDEO | OPT_SPEC, {.off = OFFSET(force_fps)}, "force the selected framerate, disable the best supported framerate selection" },
4909 { "streamid", HAS_ARG | OPT_EXPERT | OPT_FUNC2, {(void*)opt_streamid}, "set the value of an outfile streamid", "streamIndex:value" },
4910 { "force_key_frames", OPT_STRING | HAS_ARG | OPT_EXPERT | OPT_VIDEO | OPT_SPEC, {.off = OFFSET(forced_key_frames)}, "force key frames at specified timestamps", "timestamps" },
4913 { "aframes", HAS_ARG | OPT_AUDIO | OPT_FUNC2, {(void*)opt_audio_frames}, "set the number of audio frames to record", "number" },
4914 { "aq", HAS_ARG | OPT_AUDIO | OPT_FUNC2, {(void*)opt_audio_qscale}, "set audio quality (codec-specific)", "quality", },
4915 { "ar", HAS_ARG | OPT_AUDIO | OPT_INT | OPT_SPEC, {.off = OFFSET(audio_sample_rate)}, "set audio sampling rate (in Hz)", "rate" },
4916 { "ac", HAS_ARG | OPT_AUDIO | OPT_INT | OPT_SPEC, {.off = OFFSET(audio_channels)}, "set number of audio channels", "channels" },
4917 { "an", OPT_BOOL | OPT_AUDIO | OPT_OFFSET, {.off = OFFSET(audio_disable)}, "disable audio" },
4918 { "acodec", HAS_ARG | OPT_AUDIO | OPT_FUNC2, {(void*)opt_audio_codec}, "force audio codec ('copy' to copy stream)", "codec" },
4919 { "atag", HAS_ARG | OPT_EXPERT | OPT_AUDIO | OPT_FUNC2, {(void*)opt_audio_tag}, "force audio tag/fourcc", "fourcc/tag" },
4920 { "vol", OPT_INT | HAS_ARG | OPT_AUDIO, {(void*)&audio_volume}, "change audio volume (256=normal)" , "volume" }, //
4921 { "sample_fmt", HAS_ARG | OPT_EXPERT | OPT_AUDIO | OPT_SPEC | OPT_STRING, {.off = OFFSET(sample_fmts)}, "set sample format", "format" },
4922 { "channel_layout", HAS_ARG | OPT_EXPERT | OPT_AUDIO | OPT_FUNC2, {(void*)opt_channel_layout}, "set channel layout", "layout" },
4923 { "af", HAS_ARG | OPT_AUDIO | OPT_FUNC2, {(void*)opt_audio_filters}, "audio filters", "filter list" },
4925 /* subtitle options */
4926 { "sn", OPT_BOOL | OPT_SUBTITLE | OPT_OFFSET, {.off = OFFSET(subtitle_disable)}, "disable subtitle" },
4927 { "scodec", HAS_ARG | OPT_SUBTITLE | OPT_FUNC2, {(void*)opt_subtitle_codec}, "force subtitle codec ('copy' to copy stream)", "codec" },
4928 { "stag", HAS_ARG | OPT_EXPERT | OPT_SUBTITLE | OPT_FUNC2, {(void*)opt_subtitle_tag}, "force subtitle tag/fourcc", "fourcc/tag" },
4931 { "isync", OPT_BOOL | OPT_EXPERT | OPT_GRAB, {(void*)&input_sync}, "sync read on input", "" },
4934 { "muxdelay", OPT_FLOAT | HAS_ARG | OPT_EXPERT | OPT_OFFSET, {.off = OFFSET(mux_max_delay)}, "set the maximum demux-decode delay", "seconds" },
4935 { "muxpreload", OPT_FLOAT | HAS_ARG | OPT_EXPERT | OPT_OFFSET, {.off = OFFSET(mux_preload)}, "set the initial demux-decode delay", "seconds" },
4937 { "bsf", HAS_ARG | OPT_STRING | OPT_SPEC, {.off = OFFSET(bitstream_filters)}, "A comma-separated list of bitstream filters", "bitstream_filters" },
4939 /* data codec support */
4940 { "dcodec", HAS_ARG | OPT_DATA | OPT_FUNC2, {(void*)opt_data_codec}, "force data codec ('copy' to copy stream)", "codec" },
4942 { "default", HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, {(void*)opt_default}, "generic catch all option", "" },
4946 int main(int argc, char **argv)
4948 OptionsContext o = { 0 };
4953 av_log_set_flags(AV_LOG_SKIP_REPEATED);
4954 parse_loglevel(argc, argv, options);
4956 avcodec_register_all();
4958 avdevice_register_all();
4960 avfilter_register_all();
4962 avformat_network_init();
4966 parse_cpuflags(argc, argv, options);
4969 parse_options(&o, argc, argv, options, opt_output_file);
4971 if (nb_output_files <= 0 && nb_input_files == 0) {
4973 av_log(NULL, AV_LOG_WARNING, "Use -h to get full help or, even better, run 'man %s'\n", program_name);
4977 /* file converter / grab */
4978 if (nb_output_files <= 0) {
4979 fprintf(stderr, "At least one output file must be specified\n");
4983 if (nb_input_files == 0) {
4984 av_log(NULL, AV_LOG_FATAL, "At least one input file must be specified\n");
4989 if (transcode() < 0)
4991 ti = getutime() - ti;
4993 int maxrss = getmaxrss() / 1024;
4994 printf("bench: utime=%0.3fs maxrss=%ikB\n", ti / 1000000.0, maxrss);