3 * Copyright (c) 2000-2011 The libav developers.
5 * This file is part of Libav.
7 * Libav is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
12 * Libav is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with Libav; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
31 #include "libavformat/avformat.h"
32 #include "libavdevice/avdevice.h"
33 #include "libswscale/swscale.h"
34 #include "libavresample/avresample.h"
35 #include "libavutil/opt.h"
36 #include "libavutil/audioconvert.h"
37 #include "libavutil/parseutils.h"
38 #include "libavutil/samplefmt.h"
39 #include "libavutil/colorspace.h"
40 #include "libavutil/fifo.h"
41 #include "libavutil/intreadwrite.h"
42 #include "libavutil/dict.h"
43 #include "libavutil/mathematics.h"
44 #include "libavutil/pixdesc.h"
45 #include "libavutil/avstring.h"
46 #include "libavutil/libm.h"
47 #include "libavutil/imgutils.h"
48 #include "libavformat/os_support.h"
50 # include "libavfilter/avfilter.h"
51 # include "libavfilter/avfiltergraph.h"
52 # include "libavfilter/buffersrc.h"
53 # include "libavfilter/buffersink.h"
54 # include "libavfilter/vsrc_buffer.h"
56 #if HAVE_SYS_RESOURCE_H
57 #include <sys/types.h>
59 #include <sys/resource.h>
60 #elif HAVE_GETPROCESSTIMES
63 #if HAVE_GETPROCESSMEMORYINFO
69 #include <sys/select.h>
76 #include "libavutil/avassert.h"
79 #define VSYNC_PASSTHROUGH 0
83 const char program_name[] = "avconv";
84 const int program_birth_year = 2000;
86 /* select an input stream for an output stream */
87 typedef struct StreamMap {
88 int disabled; /** 1 is this mapping is disabled by a negative map */
92 int sync_stream_index;
93 char *linklabel; /** name of an output link, for mapping lavfi outputs */
97 * select an input file for an output file
99 typedef struct MetadataMap {
100 int file; ///< file index
101 char type; ///< type of metadata to copy -- (g)lobal, (s)tream, (c)hapter or (p)rogram
102 int index; ///< stream/chapter/program number
105 static const OptionDef options[];
107 static int video_discard = 0;
108 static int same_quant = 0;
109 static int do_deinterlace = 0;
110 static int intra_dc_precision = 8;
111 static int qp_hist = 0;
113 static int file_overwrite = 0;
114 static int do_benchmark = 0;
115 static int do_hex_dump = 0;
116 static int do_pkt_dump = 0;
117 static int do_pass = 0;
118 static char *pass_logfilename_prefix = NULL;
119 static int video_sync_method = VSYNC_AUTO;
120 static int audio_sync_method = 0;
121 static float audio_drift_threshold = 0.1;
122 static int copy_ts = 0;
123 static int copy_tb = 1;
124 static int opt_shortest = 0;
125 static char *vstats_filename;
126 static FILE *vstats_file;
128 static int audio_volume = 256;
130 static int exit_on_error = 0;
131 static int using_stdin = 0;
132 static int64_t video_size = 0;
133 static int64_t audio_size = 0;
134 static int64_t extra_size = 0;
135 static int nb_frames_dup = 0;
136 static int nb_frames_drop = 0;
137 static int input_sync;
139 static float dts_delta_threshold = 10;
141 static int print_stats = 1;
143 #define DEFAULT_PASS_LOGFILENAME_PREFIX "av2pass"
145 typedef struct InputFilter {
146 AVFilterContext *filter;
147 struct InputStream *ist;
148 struct FilterGraph *graph;
151 typedef struct OutputFilter {
152 AVFilterContext *filter;
153 struct OutputStream *ost;
154 struct FilterGraph *graph;
156 /* temporary storage until stream maps are processed */
157 AVFilterInOut *out_tmp;
160 typedef struct FilterGraph {
162 const char *graph_desc;
164 AVFilterGraph *graph;
166 InputFilter **inputs;
168 OutputFilter **outputs;
172 typedef struct FrameBuffer {
178 enum PixelFormat pix_fmt;
181 struct InputStream *ist;
182 struct FrameBuffer *next;
185 typedef struct InputStream {
188 int discard; /* true if stream data should be discarded */
189 int decoding_needed; /* true if the packets must be decoded in 'raw_fifo' */
191 AVFrame *decoded_frame;
193 int64_t start; /* time when read started */
194 /* predicted dts of the next packet read for this stream or (when there are
195 * several frames in a packet) of the next frame in current packet */
197 /* dts of the last packet read for this stream */
199 PtsCorrectionContext pts_ctx;
201 int is_start; /* is 1 at the start and after a discontinuity */
202 int showed_multi_packet_warning;
207 int resample_pix_fmt;
209 int resample_sample_fmt;
210 int resample_sample_rate;
211 int resample_channels;
212 uint64_t resample_channel_layout;
214 /* a pool of free buffers for decoded data */
215 FrameBuffer *buffer_pool;
217 /* decoded data from this stream goes into all those filters
218 * currently video only */
219 InputFilter **filters;
223 typedef struct InputFile {
224 AVFormatContext *ctx;
225 int eof_reached; /* true if eof reached */
226 int ist_index; /* index of first stream in ist_table */
227 int buffer_size; /* current total buffer size */
229 int nb_streams; /* number of stream that avconv is aware of; may be different
230 from ctx.nb_streams if new streams appear during av_read_frame() */
234 typedef struct OutputStream {
235 int file_index; /* file index */
236 int index; /* stream index in the output file */
237 int source_index; /* InputStream index */
238 AVStream *st; /* stream in the output file */
239 int encoding_needed; /* true if encoding needed for this stream */
241 /* input pts and corresponding output pts
243 // double sync_ipts; /* dts from the AVPacket of the demuxer in second units */
244 struct InputStream *sync_ist; /* input stream to sync against */
245 int64_t sync_opts; /* output frame counter, could be changed to some true timestamp */ // FIXME look at frame_number
246 /* pts of the first frame encoded for this stream, used for limiting
249 AVBitStreamFilterContext *bitstream_filters;
252 AVFrame *filtered_frame;
255 AVRational frame_rate;
259 float frame_aspect_ratio;
262 /* forced key frames */
263 int64_t *forced_kf_pts;
269 OutputFilter *filter;
274 int is_past_recording_time;
276 const char *attachment_filename;
277 int copy_initial_nonkeyframes;
279 enum PixelFormat pix_fmts[2];
283 typedef struct OutputFile {
284 AVFormatContext *ctx;
286 int ost_index; /* index of the first stream in output_streams */
287 int64_t recording_time; /* desired length of the resulting file in microseconds */
288 int64_t start_time; /* start time in microseconds */
289 uint64_t limit_filesize;
292 static InputStream **input_streams = NULL;
293 static int nb_input_streams = 0;
294 static InputFile **input_files = NULL;
295 static int nb_input_files = 0;
297 static OutputStream **output_streams = NULL;
298 static int nb_output_streams = 0;
299 static OutputFile **output_files = NULL;
300 static int nb_output_files = 0;
302 static FilterGraph **filtergraphs;
305 typedef struct OptionsContext {
306 /* input/output options */
310 SpecifierOpt *codec_names;
312 SpecifierOpt *audio_channels;
313 int nb_audio_channels;
314 SpecifierOpt *audio_sample_rate;
315 int nb_audio_sample_rate;
316 SpecifierOpt *frame_rates;
318 SpecifierOpt *frame_sizes;
320 SpecifierOpt *frame_pix_fmts;
321 int nb_frame_pix_fmts;
324 int64_t input_ts_offset;
327 SpecifierOpt *ts_scale;
329 SpecifierOpt *dump_attachment;
330 int nb_dump_attachment;
333 StreamMap *stream_maps;
335 /* first item specifies output metadata, second is input */
336 MetadataMap (*meta_data_maps)[2];
337 int nb_meta_data_maps;
338 int metadata_global_manual;
339 int metadata_streams_manual;
340 int metadata_chapters_manual;
341 const char **attachments;
344 int chapters_input_file;
346 int64_t recording_time;
347 uint64_t limit_filesize;
353 int subtitle_disable;
356 /* indexed by output file stream index */
360 SpecifierOpt *metadata;
362 SpecifierOpt *max_frames;
364 SpecifierOpt *bitstream_filters;
365 int nb_bitstream_filters;
366 SpecifierOpt *codec_tags;
368 SpecifierOpt *sample_fmts;
370 SpecifierOpt *qscale;
372 SpecifierOpt *forced_key_frames;
373 int nb_forced_key_frames;
374 SpecifierOpt *force_fps;
376 SpecifierOpt *frame_aspect_ratios;
377 int nb_frame_aspect_ratios;
378 SpecifierOpt *rc_overrides;
380 SpecifierOpt *intra_matrices;
381 int nb_intra_matrices;
382 SpecifierOpt *inter_matrices;
383 int nb_inter_matrices;
384 SpecifierOpt *top_field_first;
385 int nb_top_field_first;
386 SpecifierOpt *metadata_map;
388 SpecifierOpt *presets;
390 SpecifierOpt *copy_initial_nonkeyframes;
391 int nb_copy_initial_nonkeyframes;
392 SpecifierOpt *filters;
396 #define MATCH_PER_STREAM_OPT(name, type, outvar, fmtctx, st)\
399 for (i = 0; i < o->nb_ ## name; i++) {\
400 char *spec = o->name[i].specifier;\
401 if ((ret = check_stream_specifier(fmtctx, st, spec)) > 0)\
402 outvar = o->name[i].u.type;\
408 static void reset_options(OptionsContext *o)
410 const OptionDef *po = options;
413 /* all OPT_SPEC and OPT_STRING can be freed in generic way */
415 void *dst = (uint8_t*)o + po->u.off;
417 if (po->flags & OPT_SPEC) {
418 SpecifierOpt **so = dst;
419 int i, *count = (int*)(so + 1);
420 for (i = 0; i < *count; i++) {
421 av_freep(&(*so)[i].specifier);
422 if (po->flags & OPT_STRING)
423 av_freep(&(*so)[i].u.str);
427 } else if (po->flags & OPT_OFFSET && po->flags & OPT_STRING)
432 for (i = 0; i < o->nb_stream_maps; i++)
433 av_freep(&o->stream_maps[i].linklabel);
434 av_freep(&o->stream_maps);
435 av_freep(&o->meta_data_maps);
436 av_freep(&o->streamid_map);
438 memset(o, 0, sizeof(*o));
440 o->mux_max_delay = 0.7;
441 o->recording_time = INT64_MAX;
442 o->limit_filesize = UINT64_MAX;
443 o->chapters_input_file = INT_MAX;
449 static int alloc_buffer(InputStream *ist, AVCodecContext *s, FrameBuffer **pbuf)
451 FrameBuffer *buf = av_mallocz(sizeof(*buf));
453 const int pixel_size = av_pix_fmt_descriptors[s->pix_fmt].comp[0].step_minus1+1;
454 int h_chroma_shift, v_chroma_shift;
455 int edge = 32; // XXX should be avcodec_get_edge_width(), but that fails on svq1
456 int w = s->width, h = s->height;
459 return AVERROR(ENOMEM);
461 if (!(s->flags & CODEC_FLAG_EMU_EDGE)) {
466 avcodec_align_dimensions(s, &w, &h);
467 if ((ret = av_image_alloc(buf->base, buf->linesize, w, h,
468 s->pix_fmt, 32)) < 0) {
472 /* XXX this shouldn't be needed, but some tests break without this line
473 * those decoders are buggy and need to be fixed.
474 * the following tests fail:
475 * cdgraphics, ansi, aasc, fraps-v1, qtrle-1bit
477 memset(buf->base[0], 128, ret);
479 avcodec_get_chroma_sub_sample(s->pix_fmt, &h_chroma_shift, &v_chroma_shift);
480 for (i = 0; i < FF_ARRAY_ELEMS(buf->data); i++) {
481 const int h_shift = i==0 ? 0 : h_chroma_shift;
482 const int v_shift = i==0 ? 0 : v_chroma_shift;
483 if (s->flags & CODEC_FLAG_EMU_EDGE)
484 buf->data[i] = buf->base[i];
486 buf->data[i] = buf->base[i] +
487 FFALIGN((buf->linesize[i]*edge >> v_shift) +
488 (pixel_size*edge >> h_shift), 32);
492 buf->pix_fmt = s->pix_fmt;
499 static void free_buffer_pool(InputStream *ist)
501 FrameBuffer *buf = ist->buffer_pool;
503 ist->buffer_pool = buf->next;
504 av_freep(&buf->base[0]);
506 buf = ist->buffer_pool;
510 static void unref_buffer(InputStream *ist, FrameBuffer *buf)
512 av_assert0(buf->refcount);
514 if (!buf->refcount) {
515 buf->next = ist->buffer_pool;
516 ist->buffer_pool = buf;
520 static int codec_get_buffer(AVCodecContext *s, AVFrame *frame)
522 InputStream *ist = s->opaque;
526 if (!ist->buffer_pool && (ret = alloc_buffer(ist, s, &ist->buffer_pool)) < 0)
529 buf = ist->buffer_pool;
530 ist->buffer_pool = buf->next;
532 if (buf->w != s->width || buf->h != s->height || buf->pix_fmt != s->pix_fmt) {
533 av_freep(&buf->base[0]);
535 if ((ret = alloc_buffer(ist, s, &buf)) < 0)
541 frame->type = FF_BUFFER_TYPE_USER;
542 frame->extended_data = frame->data;
543 frame->pkt_pts = s->pkt ? s->pkt->pts : AV_NOPTS_VALUE;
544 frame->width = buf->w;
545 frame->height = buf->h;
546 frame->format = buf->pix_fmt;
547 frame->sample_aspect_ratio = s->sample_aspect_ratio;
549 for (i = 0; i < FF_ARRAY_ELEMS(buf->data); i++) {
550 frame->base[i] = buf->base[i]; // XXX h264.c uses base though it shouldn't
551 frame->data[i] = buf->data[i];
552 frame->linesize[i] = buf->linesize[i];
558 static void codec_release_buffer(AVCodecContext *s, AVFrame *frame)
560 InputStream *ist = s->opaque;
561 FrameBuffer *buf = frame->opaque;
564 for (i = 0; i < FF_ARRAY_ELEMS(frame->data); i++)
565 frame->data[i] = NULL;
567 unref_buffer(ist, buf);
570 static void filter_release_buffer(AVFilterBuffer *fb)
572 FrameBuffer *buf = fb->priv;
574 unref_buffer(buf->ist, buf);
578 * Define a function for building a string containing a list of
581 #define DEF_CHOOSE_FORMAT(type, var, supported_list, none, get_name, separator) \
582 static char *choose_ ## var ## s(OutputStream *ost) \
584 if (ost->st->codec->var != none) { \
585 get_name(ost->st->codec->var); \
586 return av_strdup(name); \
587 } else if (ost->enc->supported_list) { \
589 AVIOContext *s = NULL; \
593 if (avio_open_dyn_buf(&s) < 0) \
596 for (p = ost->enc->supported_list; *p != none; p++) { \
598 avio_printf(s, "%s" separator, name); \
600 len = avio_close_dyn_buf(s, &ret); \
607 #define GET_PIX_FMT_NAME(pix_fmt)\
608 const char *name = av_get_pix_fmt_name(pix_fmt);
610 DEF_CHOOSE_FORMAT(enum PixelFormat, pix_fmt, pix_fmts, PIX_FMT_NONE,
611 GET_PIX_FMT_NAME, ":")
613 #define GET_SAMPLE_FMT_NAME(sample_fmt)\
614 const char *name = av_get_sample_fmt_name(sample_fmt)
616 DEF_CHOOSE_FORMAT(enum AVSampleFormat, sample_fmt, sample_fmts,
617 AV_SAMPLE_FMT_NONE, GET_SAMPLE_FMT_NAME, ",")
619 #define GET_SAMPLE_RATE_NAME(rate)\
621 snprintf(name, sizeof(name), "%d", rate);
623 DEF_CHOOSE_FORMAT(int, sample_rate, supported_samplerates, 0,
624 GET_SAMPLE_RATE_NAME, ",")
626 #define GET_CH_LAYOUT_NAME(ch_layout)\
628 snprintf(name, sizeof(name), "0x%"PRIx64, ch_layout);
630 DEF_CHOOSE_FORMAT(uint64_t, channel_layout, channel_layouts, 0,
631 GET_CH_LAYOUT_NAME, ",")
633 static int configure_audio_filters(FilterGraph *fg, AVFilterContext **in_filter,
634 AVFilterContext **out_filter)
636 InputStream *ist = fg->inputs[0]->ist;
637 OutputStream *ost = fg->outputs[0]->ost;
638 AVCodecContext *codec = ost->st->codec;
639 AVCodecContext *icodec = ist->st->codec;
640 char *sample_fmts, *sample_rates, *channel_layouts;
644 avfilter_graph_free(&fg->graph);
645 if (!(fg->graph = avfilter_graph_alloc()))
646 return AVERROR(ENOMEM);
648 snprintf(args, sizeof(args), "time_base=%d/%d:sample_rate=%d:sample_fmt=%s:"
649 "channel_layout=0x%"PRIx64, ist->st->time_base.num,
650 ist->st->time_base.den, icodec->sample_rate,
651 av_get_sample_fmt_name(icodec->sample_fmt), icodec->channel_layout);
652 ret = avfilter_graph_create_filter(&fg->inputs[0]->filter,
653 avfilter_get_by_name("abuffer"),
654 "src", args, NULL, fg->graph);
658 ret = avfilter_graph_create_filter(&fg->outputs[0]->filter,
659 avfilter_get_by_name("abuffersink"),
660 "out", NULL, NULL, fg->graph);
664 *in_filter = fg->inputs[0]->filter;
665 *out_filter = fg->outputs[0]->filter;
667 if (codec->channels && !codec->channel_layout)
668 codec->channel_layout = av_get_default_channel_layout(codec->channels);
670 sample_fmts = choose_sample_fmts(ost);
671 sample_rates = choose_sample_rates(ost);
672 channel_layouts = choose_channel_layouts(ost);
673 if (sample_fmts || sample_rates || channel_layouts) {
674 AVFilterContext *format;
679 len += snprintf(args + len, sizeof(args) - len, "sample_fmts=%s:",
682 len += snprintf(args + len, sizeof(args) - len, "sample_rates=%s:",
685 len += snprintf(args + len, sizeof(args) - len, "channel_layouts=%s:",
689 av_freep(&sample_fmts);
690 av_freep(&sample_rates);
691 av_freep(&channel_layouts);
693 ret = avfilter_graph_create_filter(&format,
694 avfilter_get_by_name("aformat"),
695 "aformat", args, NULL, fg->graph);
699 ret = avfilter_link(format, 0, fg->outputs[0]->filter, 0);
703 *out_filter = format;
709 static int configure_video_filters(FilterGraph *fg, AVFilterContext **in_filter,
710 AVFilterContext **out_filter)
712 InputStream *ist = fg->inputs[0]->ist;
713 OutputStream *ost = fg->outputs[0]->ost;
714 AVFilterContext *filter;
715 AVCodecContext *codec = ost->st->codec;
717 AVRational sample_aspect_ratio;
721 if (ist->st->sample_aspect_ratio.num) {
722 sample_aspect_ratio = ist->st->sample_aspect_ratio;
724 sample_aspect_ratio = ist->st->codec->sample_aspect_ratio;
726 snprintf(args, 255, "%d:%d:%d:%d:%d:%d:%d", ist->st->codec->width,
727 ist->st->codec->height, ist->st->codec->pix_fmt, 1, AV_TIME_BASE,
728 sample_aspect_ratio.num, sample_aspect_ratio.den);
730 ret = avfilter_graph_create_filter(&fg->inputs[0]->filter,
731 avfilter_get_by_name("buffer"),
732 "src", args, NULL, fg->graph);
735 ret = avfilter_graph_create_filter(&fg->outputs[0]->filter,
736 avfilter_get_by_name("buffersink"),
737 "out", NULL, NULL, fg->graph);
740 *in_filter = fg->inputs[0]->filter;
741 *out_filter = fg->outputs[0]->filter;
743 if (codec->width || codec->height) {
744 snprintf(args, 255, "%d:%d:flags=0x%X",
747 (unsigned)ost->sws_flags);
748 if ((ret = avfilter_graph_create_filter(&filter, avfilter_get_by_name("scale"),
749 NULL, args, NULL, fg->graph)) < 0)
751 if ((ret = avfilter_link(*in_filter, 0, filter, 0)) < 0)
756 if ((pix_fmts = choose_pix_fmts(ost))) {
757 if ((ret = avfilter_graph_create_filter(&filter,
758 avfilter_get_by_name("format"),
759 "format", pix_fmts, NULL,
762 if ((ret = avfilter_link(filter, 0, *out_filter, 0)) < 0)
765 *out_filter = filter;
769 snprintf(args, sizeof(args), "flags=0x%X", (unsigned)ost->sws_flags);
770 fg->graph->scale_sws_opts = av_strdup(args);
775 static int configure_simple_filtergraph(FilterGraph *fg)
777 OutputStream *ost = fg->outputs[0]->ost;
778 AVFilterContext *in_filter, *out_filter;
781 avfilter_graph_free(&fg->graph);
782 fg->graph = avfilter_graph_alloc();
784 switch (ost->st->codec->codec_type) {
785 case AVMEDIA_TYPE_VIDEO:
786 ret = configure_video_filters(fg, &in_filter, &out_filter);
788 case AVMEDIA_TYPE_AUDIO:
789 ret = configure_audio_filters(fg, &in_filter, &out_filter);
791 default: av_assert0(0);
797 AVFilterInOut *outputs = avfilter_inout_alloc();
798 AVFilterInOut *inputs = avfilter_inout_alloc();
800 outputs->name = av_strdup("in");
801 outputs->filter_ctx = in_filter;
802 outputs->pad_idx = 0;
803 outputs->next = NULL;
805 inputs->name = av_strdup("out");
806 inputs->filter_ctx = out_filter;
810 if ((ret = avfilter_graph_parse(fg->graph, ost->avfilter, inputs, outputs, NULL)) < 0)
813 if ((ret = avfilter_link(in_filter, 0, out_filter, 0)) < 0)
817 if ((ret = avfilter_graph_config(fg->graph, NULL)) < 0)
820 ost->filter = fg->outputs[0];
825 static FilterGraph *init_simple_filtergraph(InputStream *ist, OutputStream *ost)
827 FilterGraph *fg = av_mallocz(sizeof(*fg));
831 fg->index = nb_filtergraphs;
833 fg->outputs = grow_array(fg->outputs, sizeof(*fg->outputs), &fg->nb_outputs,
835 if (!(fg->outputs[0] = av_mallocz(sizeof(*fg->outputs[0]))))
837 fg->outputs[0]->ost = ost;
838 fg->outputs[0]->graph = fg;
840 fg->inputs = grow_array(fg->inputs, sizeof(*fg->inputs), &fg->nb_inputs,
842 if (!(fg->inputs[0] = av_mallocz(sizeof(*fg->inputs[0]))))
844 fg->inputs[0]->ist = ist;
845 fg->inputs[0]->graph = fg;
847 ist->filters = grow_array(ist->filters, sizeof(*ist->filters),
848 &ist->nb_filters, ist->nb_filters + 1);
849 ist->filters[ist->nb_filters - 1] = fg->inputs[0];
851 filtergraphs = grow_array(filtergraphs, sizeof(*filtergraphs),
852 &nb_filtergraphs, nb_filtergraphs + 1);
853 filtergraphs[nb_filtergraphs - 1] = fg;
858 static void init_input_filter(FilterGraph *fg, AVFilterInOut *in)
861 enum AVMediaType type = in->filter_ctx->input_pads[in->pad_idx].type;
864 // TODO: support other filter types
865 if (type != AVMEDIA_TYPE_VIDEO) {
866 av_log(NULL, AV_LOG_FATAL, "Only video filters supported currently.\n");
874 int file_idx = strtol(in->name, &p, 0);
876 if (file_idx < 0 || file_idx >= nb_input_files) {
877 av_log(NULL, AV_LOG_FATAL, "Invalid file index %d in filtegraph description %s.\n",
878 file_idx, fg->graph_desc);
881 s = input_files[file_idx]->ctx;
883 for (i = 0; i < s->nb_streams; i++) {
884 if (s->streams[i]->codec->codec_type != type)
886 if (check_stream_specifier(s, s->streams[i], *p == ':' ? p + 1 : p) == 1) {
892 av_log(NULL, AV_LOG_FATAL, "Stream specifier '%s' in filtergraph description %s "
893 "matches no streams.\n", p, fg->graph_desc);
896 ist = input_streams[input_files[file_idx]->ist_index + st->index];
898 /* find the first unused stream of corresponding type */
899 for (i = 0; i < nb_input_streams; i++) {
900 ist = input_streams[i];
901 if (ist->st->codec->codec_type == type && ist->discard)
904 if (i == nb_input_streams) {
905 av_log(NULL, AV_LOG_FATAL, "Cannot find a matching stream for "
906 "unlabeled input pad %d on filter %s", in->pad_idx,
907 in->filter_ctx->name);
912 ist->decoding_needed = 1;
913 ist->st->discard = AVDISCARD_NONE;
915 fg->inputs = grow_array(fg->inputs, sizeof(*fg->inputs),
916 &fg->nb_inputs, fg->nb_inputs + 1);
917 if (!(fg->inputs[fg->nb_inputs - 1] = av_mallocz(sizeof(*fg->inputs[0]))))
919 fg->inputs[fg->nb_inputs - 1]->ist = ist;
920 fg->inputs[fg->nb_inputs - 1]->graph = fg;
922 ist->filters = grow_array(ist->filters, sizeof(*ist->filters),
923 &ist->nb_filters, ist->nb_filters + 1);
924 ist->filters[ist->nb_filters - 1] = fg->inputs[fg->nb_inputs - 1];
927 static int configure_output_filter(FilterGraph *fg, OutputFilter *ofilter, AVFilterInOut *out)
930 AVCodecContext *codec = ofilter->ost->st->codec;
931 AVFilterContext *last_filter = out->filter_ctx;
932 int pad_idx = out->pad_idx;
936 ret = avfilter_graph_create_filter(&ofilter->filter,
937 avfilter_get_by_name("buffersink"),
938 "out", NULL, pix_fmts, fg->graph);
942 if (codec->width || codec->height) {
944 AVFilterContext *filter;
946 snprintf(args, sizeof(args), "%d:%d:flags=0x%X",
949 (unsigned)ofilter->ost->sws_flags);
950 if ((ret = avfilter_graph_create_filter(&filter, avfilter_get_by_name("scale"),
951 NULL, args, NULL, fg->graph)) < 0)
953 if ((ret = avfilter_link(last_filter, pad_idx, filter, 0)) < 0)
956 last_filter = filter;
960 if ((pix_fmts = choose_pix_fmts(ofilter->ost))) {
961 AVFilterContext *filter;
962 if ((ret = avfilter_graph_create_filter(&filter,
963 avfilter_get_by_name("format"),
964 "format", pix_fmts, NULL,
967 if ((ret = avfilter_link(last_filter, pad_idx, filter, 0)) < 0)
970 last_filter = filter;
975 if ((ret = avfilter_link(last_filter, pad_idx, ofilter->filter, 0)) < 0)
981 static int configure_complex_filter(FilterGraph *fg)
983 AVFilterInOut *inputs, *outputs, *cur;
984 int ret, i, init = !fg->graph;
986 avfilter_graph_free(&fg->graph);
987 if (!(fg->graph = avfilter_graph_alloc()))
988 return AVERROR(ENOMEM);
990 if ((ret = avfilter_graph_parse2(fg->graph, fg->graph_desc, &inputs, &outputs)) < 0)
993 for (cur = inputs; init && cur; cur = cur->next)
994 init_input_filter(fg, cur);
996 for (cur = inputs, i = 0; cur; cur = cur->next, i++) {
997 InputFilter *ifilter = fg->inputs[i];
998 InputStream *ist = ifilter->ist;
1002 sar = ist->st->sample_aspect_ratio.num ? ist->st->sample_aspect_ratio :
1003 ist->st->codec->sample_aspect_ratio;
1004 snprintf(args, sizeof(args), "%d:%d:%d:%d:%d:%d:%d", ist->st->codec->width,
1005 ist->st->codec->height, ist->st->codec->pix_fmt, 1, AV_TIME_BASE,
1008 if ((ret = avfilter_graph_create_filter(&ifilter->filter,
1009 avfilter_get_by_name("buffer"), cur->name,
1010 args, NULL, fg->graph)) < 0)
1012 if ((ret = avfilter_link(ifilter->filter, 0,
1013 cur->filter_ctx, cur->pad_idx)) < 0)
1016 avfilter_inout_free(&inputs);
1019 /* we already know the mappings between lavfi outputs and output streams,
1020 * so we can finish the setup */
1021 for (cur = outputs, i = 0; cur; cur = cur->next, i++)
1022 configure_output_filter(fg, fg->outputs[i], cur);
1023 avfilter_inout_free(&outputs);
1025 if ((ret = avfilter_graph_config(fg->graph, NULL)) < 0)
1028 /* wait until output mappings are processed */
1029 for (cur = outputs; cur;) {
1030 fg->outputs = grow_array(fg->outputs, sizeof(*fg->outputs),
1031 &fg->nb_outputs, fg->nb_outputs + 1);
1032 if (!(fg->outputs[fg->nb_outputs - 1] = av_mallocz(sizeof(*fg->outputs[0]))))
1034 fg->outputs[fg->nb_outputs - 1]->graph = fg;
1035 fg->outputs[fg->nb_outputs - 1]->out_tmp = cur;
1037 fg->outputs[fg->nb_outputs - 1]->out_tmp->next = NULL;
1044 static int configure_complex_filters(void)
1048 for (i = 0; i < nb_filtergraphs; i++)
1049 if (!filtergraphs[i]->graph &&
1050 (ret = configure_complex_filter(filtergraphs[i])) < 0)
1055 static int configure_filtergraph(FilterGraph *fg)
1057 return fg->graph_desc ? configure_complex_filter(fg) :
1058 configure_simple_filtergraph(fg);
1061 static int ist_in_filtergraph(FilterGraph *fg, InputStream *ist)
1064 for (i = 0; i < fg->nb_inputs; i++)
1065 if (fg->inputs[i]->ist == ist)
1070 static void term_exit(void)
1072 av_log(NULL, AV_LOG_QUIET, "");
1075 static volatile int received_sigterm = 0;
1076 static volatile int received_nb_signals = 0;
1079 sigterm_handler(int sig)
1081 received_sigterm = sig;
1082 received_nb_signals++;
1086 static void term_init(void)
1088 signal(SIGINT , sigterm_handler); /* Interrupt (ANSI). */
1089 signal(SIGTERM, sigterm_handler); /* Termination (ANSI). */
1091 signal(SIGXCPU, sigterm_handler);
1095 static int decode_interrupt_cb(void *ctx)
1097 return received_nb_signals > 1;
1100 static const AVIOInterruptCB int_cb = { decode_interrupt_cb, NULL };
1102 void exit_program(int ret)
1106 for (i = 0; i < nb_filtergraphs; i++) {
1107 avfilter_graph_free(&filtergraphs[i]->graph);
1108 for (j = 0; j < filtergraphs[i]->nb_inputs; j++)
1109 av_freep(&filtergraphs[i]->inputs[j]);
1110 av_freep(&filtergraphs[i]->inputs);
1111 for (j = 0; j < filtergraphs[i]->nb_outputs; j++)
1112 av_freep(&filtergraphs[i]->outputs[j]);
1113 av_freep(&filtergraphs[i]->outputs);
1114 av_freep(&filtergraphs[i]);
1116 av_freep(&filtergraphs);
1119 for (i = 0; i < nb_output_files; i++) {
1120 AVFormatContext *s = output_files[i]->ctx;
1121 if (!(s->oformat->flags & AVFMT_NOFILE) && s->pb)
1123 avformat_free_context(s);
1124 av_dict_free(&output_files[i]->opts);
1125 av_freep(&output_files[i]);
1127 for (i = 0; i < nb_output_streams; i++) {
1128 AVBitStreamFilterContext *bsfc = output_streams[i]->bitstream_filters;
1130 AVBitStreamFilterContext *next = bsfc->next;
1131 av_bitstream_filter_close(bsfc);
1134 output_streams[i]->bitstream_filters = NULL;
1136 av_freep(&output_streams[i]->avfilter);
1137 av_freep(&output_streams[i]->filtered_frame);
1138 av_freep(&output_streams[i]);
1140 for (i = 0; i < nb_input_files; i++) {
1141 avformat_close_input(&input_files[i]->ctx);
1142 av_freep(&input_files[i]);
1144 for (i = 0; i < nb_input_streams; i++) {
1145 av_freep(&input_streams[i]->decoded_frame);
1146 av_dict_free(&input_streams[i]->opts);
1147 free_buffer_pool(input_streams[i]);
1148 av_freep(&input_streams[i]->filters);
1149 av_freep(&input_streams[i]);
1153 fclose(vstats_file);
1154 av_free(vstats_filename);
1156 av_freep(&input_streams);
1157 av_freep(&input_files);
1158 av_freep(&output_streams);
1159 av_freep(&output_files);
1164 avformat_network_deinit();
1166 if (received_sigterm) {
1167 av_log(NULL, AV_LOG_INFO, "Received signal %d: terminating.\n",
1168 (int) received_sigterm);
1175 static void assert_avoptions(AVDictionary *m)
1177 AVDictionaryEntry *t;
1178 if ((t = av_dict_get(m, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
1179 av_log(NULL, AV_LOG_FATAL, "Option %s not found.\n", t->key);
1184 static void assert_codec_experimental(AVCodecContext *c, int encoder)
1186 const char *codec_string = encoder ? "encoder" : "decoder";
1188 if (c->codec->capabilities & CODEC_CAP_EXPERIMENTAL &&
1189 c->strict_std_compliance > FF_COMPLIANCE_EXPERIMENTAL) {
1190 av_log(NULL, AV_LOG_FATAL, "%s '%s' is experimental and might produce bad "
1191 "results.\nAdd '-strict experimental' if you want to use it.\n",
1192 codec_string, c->codec->name);
1193 codec = encoder ? avcodec_find_encoder(c->codec->id) : avcodec_find_decoder(c->codec->id);
1194 if (!(codec->capabilities & CODEC_CAP_EXPERIMENTAL))
1195 av_log(NULL, AV_LOG_FATAL, "Or use the non experimental %s '%s'.\n",
1196 codec_string, codec->name);
1202 * Update the requested input sample format based on the output sample format.
1203 * This is currently only used to request float output from decoders which
1204 * support multiple sample formats, one of which is AV_SAMPLE_FMT_FLT.
1205 * Ideally this will be removed in the future when decoders do not do format
1206 * conversion and only output in their native format.
1208 static void update_sample_fmt(AVCodecContext *dec, AVCodec *dec_codec,
1209 AVCodecContext *enc)
1211 /* if sample formats match or a decoder sample format has already been
1212 requested, just return */
1213 if (enc->sample_fmt == dec->sample_fmt ||
1214 dec->request_sample_fmt > AV_SAMPLE_FMT_NONE)
1217 /* if decoder supports more than one output format */
1218 if (dec_codec && dec_codec->sample_fmts &&
1219 dec_codec->sample_fmts[0] != AV_SAMPLE_FMT_NONE &&
1220 dec_codec->sample_fmts[1] != AV_SAMPLE_FMT_NONE) {
1221 const enum AVSampleFormat *p;
1222 int min_dec = -1, min_inc = -1;
1224 /* find a matching sample format in the encoder */
1225 for (p = dec_codec->sample_fmts; *p != AV_SAMPLE_FMT_NONE; p++) {
1226 if (*p == enc->sample_fmt) {
1227 dec->request_sample_fmt = *p;
1229 } else if (*p > enc->sample_fmt) {
1230 min_inc = FFMIN(min_inc, *p - enc->sample_fmt);
1232 min_dec = FFMIN(min_dec, enc->sample_fmt - *p);
1235 /* if none match, provide the one that matches quality closest */
1236 dec->request_sample_fmt = min_inc > 0 ? enc->sample_fmt + min_inc :
1237 enc->sample_fmt - min_dec;
1242 get_sync_ipts(const OutputStream *ost, int64_t pts)
1244 OutputFile *of = output_files[ost->file_index];
1245 return (double)(pts - of->start_time) / AV_TIME_BASE;
1248 static void write_frame(AVFormatContext *s, AVPacket *pkt, OutputStream *ost)
1250 AVBitStreamFilterContext *bsfc = ost->bitstream_filters;
1251 AVCodecContext *avctx = ost->st->codec;
1255 * Audio encoders may split the packets -- #frames in != #packets out.
1256 * But there is no reordering, so we can limit the number of output packets
1257 * by simply dropping them here.
1258 * Counting encoded video frames needs to be done separately because of
1259 * reordering, see do_video_out()
1261 if (!(avctx->codec_type == AVMEDIA_TYPE_VIDEO && avctx->codec)) {
1262 if (ost->frame_number >= ost->max_frames) {
1263 av_free_packet(pkt);
1266 ost->frame_number++;
1270 AVPacket new_pkt = *pkt;
1271 int a = av_bitstream_filter_filter(bsfc, avctx, NULL,
1272 &new_pkt.data, &new_pkt.size,
1273 pkt->data, pkt->size,
1274 pkt->flags & AV_PKT_FLAG_KEY);
1276 av_free_packet(pkt);
1277 new_pkt.destruct = av_destruct_packet;
1279 av_log(NULL, AV_LOG_ERROR, "%s failed for stream %d, codec %s",
1280 bsfc->filter->name, pkt->stream_index,
1281 avctx->codec ? avctx->codec->name : "copy");
1291 pkt->stream_index = ost->index;
1292 ret = av_interleaved_write_frame(s, pkt);
1294 print_error("av_interleaved_write_frame()", ret);
1299 static int check_recording_time(OutputStream *ost)
1301 OutputFile *of = output_files[ost->file_index];
1303 if (of->recording_time != INT64_MAX &&
1304 av_compare_ts(ost->sync_opts - ost->first_pts, ost->st->codec->time_base, of->recording_time,
1305 AV_TIME_BASE_Q) >= 0) {
1306 ost->is_past_recording_time = 1;
1312 static void do_audio_out(AVFormatContext *s, OutputStream *ost,
1315 AVCodecContext *enc = ost->st->codec;
1319 av_init_packet(&pkt);
1323 if (!check_recording_time(ost))
1326 if (frame->pts == AV_NOPTS_VALUE || audio_sync_method < 0)
1327 frame->pts = ost->sync_opts;
1328 ost->sync_opts = frame->pts + frame->nb_samples;
1330 if (avcodec_encode_audio2(enc, &pkt, frame, &got_packet) < 0) {
1331 av_log(NULL, AV_LOG_FATAL, "Audio encoding failed\n");
1336 if (pkt.pts != AV_NOPTS_VALUE)
1337 pkt.pts = av_rescale_q(pkt.pts, enc->time_base, ost->st->time_base);
1338 if (pkt.dts != AV_NOPTS_VALUE)
1339 pkt.dts = av_rescale_q(pkt.dts, enc->time_base, ost->st->time_base);
1340 if (pkt.duration > 0)
1341 pkt.duration = av_rescale_q(pkt.duration, enc->time_base, ost->st->time_base);
1343 write_frame(s, &pkt, ost);
1345 audio_size += pkt.size;
1349 static void pre_process_video_frame(InputStream *ist, AVPicture *picture, void **bufp)
1351 AVCodecContext *dec;
1352 AVPicture *picture2;
1353 AVPicture picture_tmp;
1356 dec = ist->st->codec;
1358 /* deinterlace : must be done before any resize */
1359 if (do_deinterlace) {
1362 /* create temporary picture */
1363 size = avpicture_get_size(dec->pix_fmt, dec->width, dec->height);
1364 buf = av_malloc(size);
1368 picture2 = &picture_tmp;
1369 avpicture_fill(picture2, buf, dec->pix_fmt, dec->width, dec->height);
1371 if (avpicture_deinterlace(picture2, picture,
1372 dec->pix_fmt, dec->width, dec->height) < 0) {
1373 /* if error, do not deinterlace */
1374 av_log(NULL, AV_LOG_WARNING, "Deinterlacing failed\n");
1383 if (picture != picture2)
1384 *picture = *picture2;
1388 static void do_subtitle_out(AVFormatContext *s,
1394 static uint8_t *subtitle_out = NULL;
1395 int subtitle_out_max_size = 1024 * 1024;
1396 int subtitle_out_size, nb, i;
1397 AVCodecContext *enc;
1400 if (pts == AV_NOPTS_VALUE) {
1401 av_log(NULL, AV_LOG_ERROR, "Subtitle packets must have a pts\n");
1407 enc = ost->st->codec;
1409 if (!subtitle_out) {
1410 subtitle_out = av_malloc(subtitle_out_max_size);
1413 /* Note: DVB subtitle need one packet to draw them and one other
1414 packet to clear them */
1415 /* XXX: signal it in the codec context ? */
1416 if (enc->codec_id == CODEC_ID_DVB_SUBTITLE)
1421 for (i = 0; i < nb; i++) {
1422 ost->sync_opts = av_rescale_q(pts, ist->st->time_base, enc->time_base);
1423 if (!check_recording_time(ost))
1426 sub->pts = av_rescale_q(pts, ist->st->time_base, AV_TIME_BASE_Q);
1427 // start_display_time is required to be 0
1428 sub->pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, AV_TIME_BASE_Q);
1429 sub->end_display_time -= sub->start_display_time;
1430 sub->start_display_time = 0;
1431 subtitle_out_size = avcodec_encode_subtitle(enc, subtitle_out,
1432 subtitle_out_max_size, sub);
1433 if (subtitle_out_size < 0) {
1434 av_log(NULL, AV_LOG_FATAL, "Subtitle encoding failed\n");
1438 av_init_packet(&pkt);
1439 pkt.data = subtitle_out;
1440 pkt.size = subtitle_out_size;
1441 pkt.pts = av_rescale_q(sub->pts, AV_TIME_BASE_Q, ost->st->time_base);
1442 if (enc->codec_id == CODEC_ID_DVB_SUBTITLE) {
1443 /* XXX: the pts correction is handled here. Maybe handling
1444 it in the codec would be better */
1446 pkt.pts += 90 * sub->start_display_time;
1448 pkt.pts += 90 * sub->end_display_time;
1450 write_frame(s, &pkt, ost);
1454 static void do_video_out(AVFormatContext *s,
1456 AVFrame *in_picture,
1457 int *frame_size, float quality)
1459 int nb_frames, i, ret, format_video_sync;
1460 AVCodecContext *enc;
1461 double sync_ipts, delta;
1463 enc = ost->st->codec;
1465 sync_ipts = get_sync_ipts(ost, in_picture->pts) / av_q2d(enc->time_base);
1466 delta = sync_ipts - ost->sync_opts;
1468 /* by default, we output a single frame */
1473 format_video_sync = video_sync_method;
1474 if (format_video_sync == VSYNC_AUTO)
1475 format_video_sync = (s->oformat->flags & AVFMT_NOTIMESTAMPS) ? VSYNC_PASSTHROUGH :
1476 (s->oformat->flags & AVFMT_VARIABLE_FPS) ? VSYNC_VFR : VSYNC_CFR;
1478 switch (format_video_sync) {
1480 // FIXME set to 0.5 after we fix some dts/pts bugs like in avidec.c
1483 else if (delta > 1.1)
1484 nb_frames = lrintf(delta);
1489 else if (delta > 0.6)
1490 ost->sync_opts = lrint(sync_ipts);
1492 case VSYNC_PASSTHROUGH:
1493 ost->sync_opts = lrint(sync_ipts);
1499 nb_frames = FFMIN(nb_frames, ost->max_frames - ost->frame_number);
1500 if (nb_frames == 0) {
1502 av_log(NULL, AV_LOG_VERBOSE, "*** drop!\n");
1504 } else if (nb_frames > 1) {
1505 nb_frames_dup += nb_frames - 1;
1506 av_log(NULL, AV_LOG_VERBOSE, "*** %d dup!\n", nb_frames - 1);
1509 if (!ost->frame_number)
1510 ost->first_pts = ost->sync_opts;
1512 /* duplicates frame if needed */
1513 for (i = 0; i < nb_frames; i++) {
1515 av_init_packet(&pkt);
1519 if (!check_recording_time(ost))
1522 if (s->oformat->flags & AVFMT_RAWPICTURE &&
1523 enc->codec->id == CODEC_ID_RAWVIDEO) {
1524 /* raw pictures are written as AVPicture structure to
1525 avoid any copies. We support temporarily the older
1527 enc->coded_frame->interlaced_frame = in_picture->interlaced_frame;
1528 enc->coded_frame->top_field_first = in_picture->top_field_first;
1529 pkt.data = (uint8_t *)in_picture;
1530 pkt.size = sizeof(AVPicture);
1531 pkt.pts = av_rescale_q(ost->sync_opts, enc->time_base, ost->st->time_base);
1532 pkt.flags |= AV_PKT_FLAG_KEY;
1534 write_frame(s, &pkt, ost);
1537 AVFrame big_picture;
1539 big_picture = *in_picture;
1540 /* better than nothing: use input picture interlaced
1542 big_picture.interlaced_frame = in_picture->interlaced_frame;
1543 if (ost->st->codec->flags & (CODEC_FLAG_INTERLACED_DCT|CODEC_FLAG_INTERLACED_ME)) {
1544 if (ost->top_field_first == -1)
1545 big_picture.top_field_first = in_picture->top_field_first;
1547 big_picture.top_field_first = !!ost->top_field_first;
1550 /* handles same_quant here. This is not correct because it may
1551 not be a global option */
1552 big_picture.quality = quality;
1553 if (!enc->me_threshold)
1554 big_picture.pict_type = 0;
1555 big_picture.pts = ost->sync_opts;
1556 if (ost->forced_kf_index < ost->forced_kf_count &&
1557 big_picture.pts >= ost->forced_kf_pts[ost->forced_kf_index]) {
1558 big_picture.pict_type = AV_PICTURE_TYPE_I;
1559 ost->forced_kf_index++;
1561 ret = avcodec_encode_video2(enc, &pkt, &big_picture, &got_packet);
1563 av_log(NULL, AV_LOG_FATAL, "Video encoding failed\n");
1568 if (pkt.pts != AV_NOPTS_VALUE)
1569 pkt.pts = av_rescale_q(pkt.pts, enc->time_base, ost->st->time_base);
1570 if (pkt.dts != AV_NOPTS_VALUE)
1571 pkt.dts = av_rescale_q(pkt.dts, enc->time_base, ost->st->time_base);
1573 write_frame(s, &pkt, ost);
1574 *frame_size = pkt.size;
1575 video_size += pkt.size;
1577 /* if two pass, output log */
1578 if (ost->logfile && enc->stats_out) {
1579 fprintf(ost->logfile, "%s", enc->stats_out);
1585 * For video, number of frames in == number of packets out.
1586 * But there may be reordering, so we can't throw away frames on encoder
1587 * flush, we need to limit them here, before they go into encoder.
1589 ost->frame_number++;
1593 static double psnr(double d)
1595 return -10.0 * log(d) / log(10.0);
1598 static void do_video_stats(AVFormatContext *os, OutputStream *ost,
1601 AVCodecContext *enc;
1603 double ti1, bitrate, avg_bitrate;
1605 /* this is executed just the first time do_video_stats is called */
1607 vstats_file = fopen(vstats_filename, "w");
1614 enc = ost->st->codec;
1615 if (enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1616 frame_number = ost->frame_number;
1617 fprintf(vstats_file, "frame= %5d q= %2.1f ", frame_number, enc->coded_frame->quality / (float)FF_QP2LAMBDA);
1618 if (enc->flags&CODEC_FLAG_PSNR)
1619 fprintf(vstats_file, "PSNR= %6.2f ", psnr(enc->coded_frame->error[0] / (enc->width * enc->height * 255.0 * 255.0)));
1621 fprintf(vstats_file,"f_size= %6d ", frame_size);
1622 /* compute pts value */
1623 ti1 = ost->sync_opts * av_q2d(enc->time_base);
1627 bitrate = (frame_size * 8) / av_q2d(enc->time_base) / 1000.0;
1628 avg_bitrate = (double)(video_size * 8) / ti1 / 1000.0;
1629 fprintf(vstats_file, "s_size= %8.0fkB time= %0.3f br= %7.1fkbits/s avg_br= %7.1fkbits/s ",
1630 (double)video_size / 1024, ti1, bitrate, avg_bitrate);
1631 fprintf(vstats_file, "type= %c\n", av_get_picture_type_char(enc->coded_frame->pict_type));
1635 /* check for new output on any of the filtergraphs */
1636 static int poll_filters(void)
1638 AVFilterBufferRef *picref;
1639 AVFrame *filtered_frame = NULL;
1642 for (i = 0; i < nb_output_streams; i++) {
1643 OutputStream *ost = output_streams[i];
1644 OutputFile *of = output_files[ost->file_index];
1647 if (!ost->filter || ost->is_past_recording_time)
1650 if (!ost->filtered_frame && !(ost->filtered_frame = avcodec_alloc_frame())) {
1651 return AVERROR(ENOMEM);
1653 avcodec_get_frame_defaults(ost->filtered_frame);
1654 filtered_frame = ost->filtered_frame;
1657 if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
1658 !(ost->enc->capabilities & CODEC_CAP_VARIABLE_FRAME_SIZE))
1659 ret = av_buffersink_read_samples(ost->filter->filter, &picref,
1660 ost->st->codec->frame_size);
1662 ret = av_buffersink_read(ost->filter->filter, &picref);
1667 avfilter_copy_buf_props(filtered_frame, picref);
1668 if (ost->enc->type == AVMEDIA_TYPE_VIDEO)
1669 filtered_frame->pts = av_rescale_q(picref->pts,
1670 ost->filter->filter->inputs[0]->time_base,
1672 else if (picref->pts != AV_NOPTS_VALUE)
1673 filtered_frame->pts = av_rescale_q(picref->pts,
1674 ost->filter->filter->inputs[0]->time_base,
1675 ost->st->codec->time_base) -
1676 av_rescale_q(of->start_time,
1678 ost->st->codec->time_base);
1680 if (of->start_time && filtered_frame->pts < of->start_time)
1683 switch (ost->filter->filter->inputs[0]->type) {
1684 case AVMEDIA_TYPE_VIDEO:
1685 if (!ost->frame_aspect_ratio)
1686 ost->st->codec->sample_aspect_ratio = picref->video->pixel_aspect;
1688 do_video_out(of->ctx, ost, filtered_frame, &frame_size,
1689 same_quant ? ost->last_quality :
1690 ost->st->codec->global_quality);
1691 if (vstats_filename && frame_size)
1692 do_video_stats(of->ctx, ost, frame_size);
1694 case AVMEDIA_TYPE_AUDIO:
1695 do_audio_out(of->ctx, ost, filtered_frame);
1698 // TODO support subtitle filters
1702 avfilter_unref_buffer(picref);
1708 static void print_report(int is_last_report, int64_t timer_start)
1712 AVFormatContext *oc;
1714 AVCodecContext *enc;
1715 int frame_number, vid, i;
1716 double bitrate, ti1, pts;
1717 static int64_t last_time = -1;
1718 static int qp_histogram[52];
1720 if (!print_stats && !is_last_report)
1723 if (!is_last_report) {
1725 /* display the report every 0.5 seconds */
1726 cur_time = av_gettime();
1727 if (last_time == -1) {
1728 last_time = cur_time;
1731 if ((cur_time - last_time) < 500000)
1733 last_time = cur_time;
1737 oc = output_files[0]->ctx;
1739 total_size = avio_size(oc->pb);
1740 if (total_size < 0) // FIXME improve avio_size() so it works with non seekable output too
1741 total_size = avio_tell(oc->pb);
1746 for (i = 0; i < nb_output_streams; i++) {
1748 ost = output_streams[i];
1749 enc = ost->st->codec;
1750 if (!ost->stream_copy && enc->coded_frame)
1751 q = enc->coded_frame->quality / (float)FF_QP2LAMBDA;
1752 if (vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1753 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "q=%2.1f ", q);
1755 if (!vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1756 float t = (av_gettime() - timer_start) / 1000000.0;
1758 frame_number = ost->frame_number;
1759 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "frame=%5d fps=%3d q=%3.1f ",
1760 frame_number, (t > 1) ? (int)(frame_number / t + 0.5) : 0, q);
1762 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "L");
1766 if (qp >= 0 && qp < FF_ARRAY_ELEMS(qp_histogram))
1768 for (j = 0; j < 32; j++)
1769 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%X", (int)lrintf(log(qp_histogram[j] + 1) / log(2)));
1771 if (enc->flags&CODEC_FLAG_PSNR) {
1773 double error, error_sum = 0;
1774 double scale, scale_sum = 0;
1775 char type[3] = { 'Y','U','V' };
1776 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "PSNR=");
1777 for (j = 0; j < 3; j++) {
1778 if (is_last_report) {
1779 error = enc->error[j];
1780 scale = enc->width * enc->height * 255.0 * 255.0 * frame_number;
1782 error = enc->coded_frame->error[j];
1783 scale = enc->width * enc->height * 255.0 * 255.0;
1789 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%c:%2.2f ", type[j], psnr(error / scale));
1791 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "*:%2.2f ", psnr(error_sum / scale_sum));
1795 /* compute min output value */
1796 pts = (double)ost->st->pts.val * av_q2d(ost->st->time_base);
1797 if ((pts < ti1) && (pts > 0))
1803 bitrate = (double)(total_size * 8) / ti1 / 1000.0;
1805 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1806 "size=%8.0fkB time=%0.2f bitrate=%6.1fkbits/s",
1807 (double)total_size / 1024, ti1, bitrate);
1809 if (nb_frames_dup || nb_frames_drop)
1810 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), " dup=%d drop=%d",
1811 nb_frames_dup, nb_frames_drop);
1813 av_log(NULL, AV_LOG_INFO, "%s \r", buf);
1817 if (is_last_report) {
1818 int64_t raw= audio_size + video_size + extra_size;
1819 av_log(NULL, AV_LOG_INFO, "\n");
1820 av_log(NULL, AV_LOG_INFO, "video:%1.0fkB audio:%1.0fkB global headers:%1.0fkB muxing overhead %f%%\n",
1821 video_size / 1024.0,
1822 audio_size / 1024.0,
1823 extra_size / 1024.0,
1824 100.0 * (total_size - raw) / raw
1829 static void flush_encoders(void)
1833 for (i = 0; i < nb_output_streams; i++) {
1834 OutputStream *ost = output_streams[i];
1835 AVCodecContext *enc = ost->st->codec;
1836 AVFormatContext *os = output_files[ost->file_index]->ctx;
1837 int stop_encoding = 0;
1839 if (!ost->encoding_needed)
1842 if (ost->st->codec->codec_type == AVMEDIA_TYPE_AUDIO && enc->frame_size <= 1)
1844 if (ost->st->codec->codec_type == AVMEDIA_TYPE_VIDEO && (os->oformat->flags & AVFMT_RAWPICTURE) && enc->codec->id == CODEC_ID_RAWVIDEO)
1848 int (*encode)(AVCodecContext*, AVPacket*, const AVFrame*, int*) = NULL;
1852 switch (ost->st->codec->codec_type) {
1853 case AVMEDIA_TYPE_AUDIO:
1854 encode = avcodec_encode_audio2;
1858 case AVMEDIA_TYPE_VIDEO:
1859 encode = avcodec_encode_video2;
1870 av_init_packet(&pkt);
1874 ret = encode(enc, &pkt, NULL, &got_packet);
1876 av_log(NULL, AV_LOG_FATAL, "%s encoding failed\n", desc);
1880 if (ost->logfile && enc->stats_out) {
1881 fprintf(ost->logfile, "%s", enc->stats_out);
1887 if (pkt.pts != AV_NOPTS_VALUE)
1888 pkt.pts = av_rescale_q(pkt.pts, enc->time_base, ost->st->time_base);
1889 if (pkt.dts != AV_NOPTS_VALUE)
1890 pkt.dts = av_rescale_q(pkt.dts, enc->time_base, ost->st->time_base);
1891 write_frame(os, &pkt, ost);
1901 * Check whether a packet from ist should be written into ost at this time
1903 static int check_output_constraints(InputStream *ist, OutputStream *ost)
1905 OutputFile *of = output_files[ost->file_index];
1906 int ist_index = input_files[ist->file_index]->ist_index + ist->st->index;
1908 if (ost->source_index != ist_index)
1911 if (of->start_time && ist->last_dts < of->start_time)
1917 static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *pkt)
1919 OutputFile *of = output_files[ost->file_index];
1920 int64_t ost_tb_start_time = av_rescale_q(of->start_time, AV_TIME_BASE_Q, ost->st->time_base);
1923 av_init_packet(&opkt);
1925 if ((!ost->frame_number && !(pkt->flags & AV_PKT_FLAG_KEY)) &&
1926 !ost->copy_initial_nonkeyframes)
1929 if (of->recording_time != INT64_MAX &&
1930 ist->last_dts >= of->recording_time + of->start_time) {
1931 ost->is_past_recording_time = 1;
1935 /* force the input stream PTS */
1936 if (ost->st->codec->codec_type == AVMEDIA_TYPE_AUDIO)
1937 audio_size += pkt->size;
1938 else if (ost->st->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
1939 video_size += pkt->size;
1943 if (pkt->pts != AV_NOPTS_VALUE)
1944 opkt.pts = av_rescale_q(pkt->pts, ist->st->time_base, ost->st->time_base) - ost_tb_start_time;
1946 opkt.pts = AV_NOPTS_VALUE;
1948 if (pkt->dts == AV_NOPTS_VALUE)
1949 opkt.dts = av_rescale_q(ist->last_dts, AV_TIME_BASE_Q, ost->st->time_base);
1951 opkt.dts = av_rescale_q(pkt->dts, ist->st->time_base, ost->st->time_base);
1952 opkt.dts -= ost_tb_start_time;
1954 opkt.duration = av_rescale_q(pkt->duration, ist->st->time_base, ost->st->time_base);
1955 opkt.flags = pkt->flags;
1957 // FIXME remove the following 2 lines they shall be replaced by the bitstream filters
1958 if ( ost->st->codec->codec_id != CODEC_ID_H264
1959 && ost->st->codec->codec_id != CODEC_ID_MPEG1VIDEO
1960 && ost->st->codec->codec_id != CODEC_ID_MPEG2VIDEO
1961 && ost->st->codec->codec_id != CODEC_ID_VC1
1963 if (av_parser_change(ist->st->parser, ost->st->codec, &opkt.data, &opkt.size, pkt->data, pkt->size, pkt->flags & AV_PKT_FLAG_KEY))
1964 opkt.destruct = av_destruct_packet;
1966 opkt.data = pkt->data;
1967 opkt.size = pkt->size;
1970 write_frame(of->ctx, &opkt, ost);
1971 ost->st->codec->frame_number++;
1972 av_free_packet(&opkt);
1975 static void rate_emu_sleep(InputStream *ist)
1977 if (input_files[ist->file_index]->rate_emu) {
1978 int64_t pts = av_rescale(ist->last_dts, 1000000, AV_TIME_BASE);
1979 int64_t now = av_gettime() - ist->start;
1985 static int guess_input_channel_layout(InputStream *ist)
1987 AVCodecContext *dec = ist->st->codec;
1989 if (!dec->channel_layout) {
1990 char layout_name[256];
1992 dec->channel_layout = av_get_default_channel_layout(dec->channels);
1993 if (!dec->channel_layout)
1995 av_get_channel_layout_string(layout_name, sizeof(layout_name),
1996 dec->channels, dec->channel_layout);
1997 av_log(NULL, AV_LOG_WARNING, "Guessed Channel Layout for Input Stream "
1998 "#%d.%d : %s\n", ist->file_index, ist->st->index, layout_name);
2003 static int transcode_audio(InputStream *ist, AVPacket *pkt, int *got_output)
2005 AVFrame *decoded_frame;
2006 AVCodecContext *avctx = ist->st->codec;
2007 int bps = av_get_bytes_per_sample(ist->st->codec->sample_fmt);
2008 int i, ret, resample_changed;
2010 if (!ist->decoded_frame && !(ist->decoded_frame = avcodec_alloc_frame()))
2011 return AVERROR(ENOMEM);
2013 avcodec_get_frame_defaults(ist->decoded_frame);
2014 decoded_frame = ist->decoded_frame;
2016 ret = avcodec_decode_audio4(avctx, decoded_frame, got_output, pkt);
2022 /* no audio frame */
2024 for (i = 0; i < ist->nb_filters; i++)
2025 av_buffersrc_buffer(ist->filters[i]->filter, NULL);
2029 /* if the decoder provides a pts, use it instead of the last packet pts.
2030 the decoder could be delaying output by a packet or more. */
2031 if (decoded_frame->pts != AV_NOPTS_VALUE)
2032 ist->next_dts = decoded_frame->pts;
2033 else if (pkt->pts != AV_NOPTS_VALUE) {
2034 decoded_frame->pts = pkt->pts;
2035 pkt->pts = AV_NOPTS_VALUE;
2038 // preprocess audio (volume)
2039 if (audio_volume != 256) {
2040 int decoded_data_size = decoded_frame->nb_samples * avctx->channels * bps;
2041 void *samples = decoded_frame->data[0];
2042 switch (avctx->sample_fmt) {
2043 case AV_SAMPLE_FMT_U8:
2045 uint8_t *volp = samples;
2046 for (i = 0; i < (decoded_data_size / sizeof(*volp)); i++) {
2047 int v = (((*volp - 128) * audio_volume + 128) >> 8) + 128;
2048 *volp++ = av_clip_uint8(v);
2052 case AV_SAMPLE_FMT_S16:
2054 int16_t *volp = samples;
2055 for (i = 0; i < (decoded_data_size / sizeof(*volp)); i++) {
2056 int v = ((*volp) * audio_volume + 128) >> 8;
2057 *volp++ = av_clip_int16(v);
2061 case AV_SAMPLE_FMT_S32:
2063 int32_t *volp = samples;
2064 for (i = 0; i < (decoded_data_size / sizeof(*volp)); i++) {
2065 int64_t v = (((int64_t)*volp * audio_volume + 128) >> 8);
2066 *volp++ = av_clipl_int32(v);
2070 case AV_SAMPLE_FMT_FLT:
2072 float *volp = samples;
2073 float scale = audio_volume / 256.f;
2074 for (i = 0; i < (decoded_data_size / sizeof(*volp)); i++) {
2079 case AV_SAMPLE_FMT_DBL:
2081 double *volp = samples;
2082 double scale = audio_volume / 256.;
2083 for (i = 0; i < (decoded_data_size / sizeof(*volp)); i++) {
2089 av_log(NULL, AV_LOG_FATAL,
2090 "Audio volume adjustment on sample format %s is not supported.\n",
2091 av_get_sample_fmt_name(ist->st->codec->sample_fmt));
2096 rate_emu_sleep(ist);
2098 resample_changed = ist->resample_sample_fmt != decoded_frame->format ||
2099 ist->resample_channels != avctx->channels ||
2100 ist->resample_channel_layout != decoded_frame->channel_layout ||
2101 ist->resample_sample_rate != decoded_frame->sample_rate;
2102 if (resample_changed) {
2103 char layout1[64], layout2[64];
2105 if (!guess_input_channel_layout(ist)) {
2106 av_log(NULL, AV_LOG_FATAL, "Unable to find default channel "
2107 "layout for Input Stream #%d.%d\n", ist->file_index,
2111 decoded_frame->channel_layout = avctx->channel_layout;
2113 av_get_channel_layout_string(layout1, sizeof(layout1), ist->resample_channels,
2114 ist->resample_channel_layout);
2115 av_get_channel_layout_string(layout2, sizeof(layout2), avctx->channels,
2116 decoded_frame->channel_layout);
2118 av_log(NULL, AV_LOG_INFO,
2119 "Input stream #%d:%d frame changed from rate:%d fmt:%s ch:%d chl:%s to rate:%d fmt:%s ch:%d chl:%s\n",
2120 ist->file_index, ist->st->index,
2121 ist->resample_sample_rate, av_get_sample_fmt_name(ist->resample_sample_fmt),
2122 ist->resample_channels, layout1,
2123 decoded_frame->sample_rate, av_get_sample_fmt_name(decoded_frame->format),
2124 avctx->channels, layout2);
2126 ist->resample_sample_fmt = decoded_frame->format;
2127 ist->resample_sample_rate = decoded_frame->sample_rate;
2128 ist->resample_channel_layout = decoded_frame->channel_layout;
2129 ist->resample_channels = avctx->channels;
2131 for (i = 0; i < nb_filtergraphs; i++)
2132 if (ist_in_filtergraph(filtergraphs[i], ist) &&
2133 configure_filtergraph(filtergraphs[i]) < 0) {
2134 av_log(NULL, AV_LOG_FATAL, "Error reinitializing filters!\n");
2139 for (i = 0; i < ist->nb_filters; i++)
2140 av_buffersrc_write_frame(ist->filters[i]->filter, decoded_frame);
2145 static int transcode_video(InputStream *ist, AVPacket *pkt, int *got_output, int64_t *pkt_pts)
2147 AVFrame *decoded_frame;
2148 void *buffer_to_free = NULL;
2149 int i, ret = 0, resample_changed;
2152 if (!ist->decoded_frame && !(ist->decoded_frame = avcodec_alloc_frame()))
2153 return AVERROR(ENOMEM);
2155 avcodec_get_frame_defaults(ist->decoded_frame);
2156 decoded_frame = ist->decoded_frame;
2157 pkt->pts = *pkt_pts;
2158 pkt->dts = ist->last_dts;
2159 *pkt_pts = AV_NOPTS_VALUE;
2161 ret = avcodec_decode_video2(ist->st->codec,
2162 decoded_frame, got_output, pkt);
2166 quality = same_quant ? decoded_frame->quality : 0;
2168 /* no picture yet */
2170 for (i = 0; i < ist->nb_filters; i++)
2171 av_buffersrc_buffer(ist->filters[i]->filter, NULL);
2174 decoded_frame->pts = guess_correct_pts(&ist->pts_ctx, decoded_frame->pkt_pts,
2175 decoded_frame->pkt_dts);
2177 pre_process_video_frame(ist, (AVPicture *)decoded_frame, &buffer_to_free);
2179 rate_emu_sleep(ist);
2181 if (ist->st->sample_aspect_ratio.num)
2182 decoded_frame->sample_aspect_ratio = ist->st->sample_aspect_ratio;
2184 resample_changed = ist->resample_width != decoded_frame->width ||
2185 ist->resample_height != decoded_frame->height ||
2186 ist->resample_pix_fmt != decoded_frame->format;
2187 if (resample_changed) {
2188 av_log(NULL, AV_LOG_INFO,
2189 "Input stream #%d:%d frame changed from size:%dx%d fmt:%s to size:%dx%d fmt:%s\n",
2190 ist->file_index, ist->st->index,
2191 ist->resample_width, ist->resample_height, av_get_pix_fmt_name(ist->resample_pix_fmt),
2192 decoded_frame->width, decoded_frame->height, av_get_pix_fmt_name(decoded_frame->format));
2194 ist->resample_width = decoded_frame->width;
2195 ist->resample_height = decoded_frame->height;
2196 ist->resample_pix_fmt = decoded_frame->format;
2198 for (i = 0; i < nb_filtergraphs; i++)
2199 if (ist_in_filtergraph(filtergraphs[i], ist) &&
2200 configure_filtergraph(filtergraphs[i]) < 0) {
2201 av_log(NULL, AV_LOG_FATAL, "Error reinitializing filters!\n");
2206 for (i = 0; i < ist->nb_filters; i++) {
2207 // XXX what an ugly hack
2208 if (ist->filters[i]->graph->nb_outputs == 1)
2209 ist->filters[i]->graph->outputs[0]->ost->last_quality = quality;
2211 if (ist->st->codec->codec->capabilities & CODEC_CAP_DR1) {
2212 FrameBuffer *buf = decoded_frame->opaque;
2213 AVFilterBufferRef *fb = avfilter_get_video_buffer_ref_from_arrays(
2214 decoded_frame->data, decoded_frame->linesize,
2215 AV_PERM_READ | AV_PERM_PRESERVE,
2216 ist->st->codec->width, ist->st->codec->height,
2217 ist->st->codec->pix_fmt);
2219 avfilter_copy_frame_props(fb, decoded_frame);
2220 fb->buf->priv = buf;
2221 fb->buf->free = filter_release_buffer;
2224 av_buffersrc_buffer(ist->filters[i]->filter, fb);
2226 av_buffersrc_write_frame(ist->filters[i]->filter, decoded_frame);
2229 av_free(buffer_to_free);
2233 static int transcode_subtitles(InputStream *ist, AVPacket *pkt, int *got_output)
2235 AVSubtitle subtitle;
2236 int i, ret = avcodec_decode_subtitle2(ist->st->codec,
2237 &subtitle, got_output, pkt);
2243 rate_emu_sleep(ist);
2245 for (i = 0; i < nb_output_streams; i++) {
2246 OutputStream *ost = output_streams[i];
2248 if (!check_output_constraints(ist, ost) || !ost->encoding_needed)
2251 do_subtitle_out(output_files[ost->file_index]->ctx, ost, ist, &subtitle, pkt->pts);
2254 avsubtitle_free(&subtitle);
2258 /* pkt = NULL means EOF (needed to flush decoder buffers) */
2259 static int output_packet(InputStream *ist, const AVPacket *pkt)
2263 int64_t pkt_pts = AV_NOPTS_VALUE;
2266 if (ist->next_dts == AV_NOPTS_VALUE)
2267 ist->next_dts = ist->last_dts;
2271 av_init_packet(&avpkt);
2279 if (pkt->dts != AV_NOPTS_VALUE)
2280 ist->next_dts = ist->last_dts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q);
2281 if (pkt->pts != AV_NOPTS_VALUE)
2282 pkt_pts = av_rescale_q(pkt->pts, ist->st->time_base, AV_TIME_BASE_Q);
2284 // while we have more to decode or while the decoder did output something on EOF
2285 while (ist->decoding_needed && (avpkt.size > 0 || (!pkt && got_output))) {
2289 ist->last_dts = ist->next_dts;
2291 if (avpkt.size && avpkt.size != pkt->size) {
2292 av_log(NULL, ist->showed_multi_packet_warning ? AV_LOG_VERBOSE : AV_LOG_WARNING,
2293 "Multiple frames in a packet from stream %d\n", pkt->stream_index);
2294 ist->showed_multi_packet_warning = 1;
2297 switch (ist->st->codec->codec_type) {
2298 case AVMEDIA_TYPE_AUDIO:
2299 ret = transcode_audio (ist, &avpkt, &got_output);
2301 case AVMEDIA_TYPE_VIDEO:
2302 ret = transcode_video (ist, &avpkt, &got_output, &pkt_pts);
2304 ist->next_dts += av_rescale_q(avpkt.duration, ist->st->time_base, AV_TIME_BASE_Q);
2305 else if (ist->st->r_frame_rate.num)
2306 ist->next_dts += av_rescale_q(1, (AVRational){ist->st->r_frame_rate.den,
2307 ist->st->r_frame_rate.num},
2309 else if (ist->st->codec->time_base.num != 0) {
2310 int ticks = ist->st->parser ? ist->st->parser->repeat_pict + 1 :
2311 ist->st->codec->ticks_per_frame;
2312 ist->next_dts += av_rescale_q(ticks, ist->st->codec->time_base, AV_TIME_BASE_Q);
2315 case AVMEDIA_TYPE_SUBTITLE:
2316 ret = transcode_subtitles(ist, &avpkt, &got_output);
2324 // touch data and size only if not EOF
2334 /* handle stream copy */
2335 if (!ist->decoding_needed) {
2336 rate_emu_sleep(ist);
2337 ist->last_dts = ist->next_dts;
2338 switch (ist->st->codec->codec_type) {
2339 case AVMEDIA_TYPE_AUDIO:
2340 ist->next_dts += ((int64_t)AV_TIME_BASE * ist->st->codec->frame_size) /
2341 ist->st->codec->sample_rate;
2343 case AVMEDIA_TYPE_VIDEO:
2344 if (ist->st->codec->time_base.num != 0) {
2345 int ticks = ist->st->parser ? ist->st->parser->repeat_pict + 1 : ist->st->codec->ticks_per_frame;
2346 ist->next_dts += ((int64_t)AV_TIME_BASE *
2347 ist->st->codec->time_base.num * ticks) /
2348 ist->st->codec->time_base.den;
2353 for (i = 0; pkt && i < nb_output_streams; i++) {
2354 OutputStream *ost = output_streams[i];
2356 if (!check_output_constraints(ist, ost) || ost->encoding_needed)
2359 do_streamcopy(ist, ost, pkt);
2365 static void print_sdp(void)
2369 AVFormatContext **avc = av_malloc(sizeof(*avc) * nb_output_files);
2373 for (i = 0; i < nb_output_files; i++)
2374 avc[i] = output_files[i]->ctx;
2376 av_sdp_create(avc, nb_output_files, sdp, sizeof(sdp));
2377 printf("SDP:\n%s\n", sdp);
2382 static int init_input_stream(int ist_index, char *error, int error_len)
2385 InputStream *ist = input_streams[ist_index];
2386 if (ist->decoding_needed) {
2387 AVCodec *codec = ist->dec;
2389 snprintf(error, error_len, "Decoder (codec id %d) not found for input stream #%d:%d",
2390 ist->st->codec->codec_id, ist->file_index, ist->st->index);
2391 return AVERROR(EINVAL);
2394 /* update requested sample format for the decoder based on the
2395 corresponding encoder sample format */
2396 for (i = 0; i < nb_output_streams; i++) {
2397 OutputStream *ost = output_streams[i];
2398 if (ost->source_index == ist_index) {
2399 update_sample_fmt(ist->st->codec, codec, ost->st->codec);
2404 if (codec->type == AVMEDIA_TYPE_VIDEO && codec->capabilities & CODEC_CAP_DR1) {
2405 ist->st->codec->get_buffer = codec_get_buffer;
2406 ist->st->codec->release_buffer = codec_release_buffer;
2407 ist->st->codec->opaque = ist;
2410 if (!av_dict_get(ist->opts, "threads", NULL, 0))
2411 av_dict_set(&ist->opts, "threads", "auto", 0);
2412 if (avcodec_open2(ist->st->codec, codec, &ist->opts) < 0) {
2413 snprintf(error, error_len, "Error while opening decoder for input stream #%d:%d",
2414 ist->file_index, ist->st->index);
2415 return AVERROR(EINVAL);
2417 assert_codec_experimental(ist->st->codec, 0);
2418 assert_avoptions(ist->opts);
2421 ist->last_dts = ist->st->avg_frame_rate.num ? - ist->st->codec->has_b_frames * AV_TIME_BASE / av_q2d(ist->st->avg_frame_rate) : 0;
2422 ist->next_dts = AV_NOPTS_VALUE;
2423 init_pts_correction(&ist->pts_ctx);
2429 static InputStream *get_input_stream(OutputStream *ost)
2431 if (ost->source_index >= 0)
2432 return input_streams[ost->source_index];
2435 FilterGraph *fg = ost->filter->graph;
2438 for (i = 0; i < fg->nb_inputs; i++)
2439 if (fg->inputs[i]->ist->st->codec->codec_type == ost->st->codec->codec_type)
2440 return fg->inputs[i]->ist;
2446 static int transcode_init(void)
2448 int ret = 0, i, j, k;
2449 AVFormatContext *oc;
2450 AVCodecContext *codec, *icodec;
2456 /* init framerate emulation */
2457 for (i = 0; i < nb_input_files; i++) {
2458 InputFile *ifile = input_files[i];
2459 if (ifile->rate_emu)
2460 for (j = 0; j < ifile->nb_streams; j++)
2461 input_streams[j + ifile->ist_index]->start = av_gettime();
2464 /* output stream init */
2465 for (i = 0; i < nb_output_files; i++) {
2466 oc = output_files[i]->ctx;
2467 if (!oc->nb_streams && !(oc->oformat->flags & AVFMT_NOSTREAMS)) {
2468 av_dump_format(oc, i, oc->filename, 1);
2469 av_log(NULL, AV_LOG_ERROR, "Output file #%d does not contain any stream\n", i);
2470 return AVERROR(EINVAL);
2474 /* init complex filtergraphs */
2475 for (i = 0; i < nb_filtergraphs; i++)
2476 if ((ret = avfilter_graph_config(filtergraphs[i]->graph, NULL)) < 0)
2479 /* for each output stream, we compute the right encoding parameters */
2480 for (i = 0; i < nb_output_streams; i++) {
2481 ost = output_streams[i];
2482 oc = output_files[ost->file_index]->ctx;
2483 ist = get_input_stream(ost);
2485 if (ost->attachment_filename)
2488 codec = ost->st->codec;
2491 icodec = ist->st->codec;
2493 ost->st->disposition = ist->st->disposition;
2494 codec->bits_per_raw_sample = icodec->bits_per_raw_sample;
2495 codec->chroma_sample_location = icodec->chroma_sample_location;
2498 if (ost->stream_copy) {
2499 uint64_t extra_size;
2501 av_assert0(ist && !ost->filter);
2503 extra_size = (uint64_t)icodec->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE;
2505 if (extra_size > INT_MAX) {
2506 return AVERROR(EINVAL);
2509 /* if stream_copy is selected, no need to decode or encode */
2510 codec->codec_id = icodec->codec_id;
2511 codec->codec_type = icodec->codec_type;
2513 if (!codec->codec_tag) {
2514 if (!oc->oformat->codec_tag ||
2515 av_codec_get_id (oc->oformat->codec_tag, icodec->codec_tag) == codec->codec_id ||
2516 av_codec_get_tag(oc->oformat->codec_tag, icodec->codec_id) <= 0)
2517 codec->codec_tag = icodec->codec_tag;
2520 codec->bit_rate = icodec->bit_rate;
2521 codec->rc_max_rate = icodec->rc_max_rate;
2522 codec->rc_buffer_size = icodec->rc_buffer_size;
2523 codec->field_order = icodec->field_order;
2524 codec->extradata = av_mallocz(extra_size);
2525 if (!codec->extradata) {
2526 return AVERROR(ENOMEM);
2528 memcpy(codec->extradata, icodec->extradata, icodec->extradata_size);
2529 codec->extradata_size = icodec->extradata_size;
2531 codec->time_base = icodec->time_base;
2532 codec->time_base.num *= icodec->ticks_per_frame;
2533 av_reduce(&codec->time_base.num, &codec->time_base.den,
2534 codec->time_base.num, codec->time_base.den, INT_MAX);
2536 codec->time_base = ist->st->time_base;
2538 switch (codec->codec_type) {
2539 case AVMEDIA_TYPE_AUDIO:
2540 if (audio_volume != 256) {
2541 av_log(NULL, AV_LOG_FATAL, "-acodec copy and -vol are incompatible (frames are not decoded)\n");
2544 codec->channel_layout = icodec->channel_layout;
2545 codec->sample_rate = icodec->sample_rate;
2546 codec->channels = icodec->channels;
2547 codec->frame_size = icodec->frame_size;
2548 codec->audio_service_type = icodec->audio_service_type;
2549 codec->block_align = icodec->block_align;
2551 case AVMEDIA_TYPE_VIDEO:
2552 codec->pix_fmt = icodec->pix_fmt;
2553 codec->width = icodec->width;
2554 codec->height = icodec->height;
2555 codec->has_b_frames = icodec->has_b_frames;
2556 if (!codec->sample_aspect_ratio.num) {
2557 codec->sample_aspect_ratio =
2558 ost->st->sample_aspect_ratio =
2559 ist->st->sample_aspect_ratio.num ? ist->st->sample_aspect_ratio :
2560 ist->st->codec->sample_aspect_ratio.num ?
2561 ist->st->codec->sample_aspect_ratio : (AVRational){0, 1};
2564 case AVMEDIA_TYPE_SUBTITLE:
2565 codec->width = icodec->width;
2566 codec->height = icodec->height;
2568 case AVMEDIA_TYPE_DATA:
2569 case AVMEDIA_TYPE_ATTACHMENT:
2576 /* should only happen when a default codec is not present. */
2577 snprintf(error, sizeof(error), "Automatic encoder selection "
2578 "failed for output stream #%d:%d. Default encoder for "
2579 "format %s is probably disabled. Please choose an "
2580 "encoder manually.\n", ost->file_index, ost->index,
2582 ret = AVERROR(EINVAL);
2587 ist->decoding_needed = 1;
2588 ost->encoding_needed = 1;
2591 (codec->codec_type == AVMEDIA_TYPE_VIDEO ||
2592 codec->codec_type == AVMEDIA_TYPE_AUDIO)) {
2594 fg = init_simple_filtergraph(ist, ost);
2595 if (configure_simple_filtergraph(fg)) {
2596 av_log(NULL, AV_LOG_FATAL, "Error opening filters!\n");
2601 switch (codec->codec_type) {
2602 case AVMEDIA_TYPE_AUDIO:
2603 codec->sample_fmt = ost->filter->filter->inputs[0]->format;
2604 codec->sample_rate = ost->filter->filter->inputs[0]->sample_rate;
2605 codec->channel_layout = ost->filter->filter->inputs[0]->channel_layout;
2606 codec->channels = av_get_channel_layout_nb_channels(codec->channel_layout);
2607 codec->time_base = (AVRational){ 1, codec->sample_rate };
2609 case AVMEDIA_TYPE_VIDEO:
2611 * We want CFR output if and only if one of those is true:
2612 * 1) user specified output framerate with -r
2613 * 2) user specified -vsync cfr
2614 * 3) output format is CFR and the user didn't force vsync to
2615 * something else than CFR
2617 * in such a case, set ost->frame_rate
2619 if (!ost->frame_rate.num && ist &&
2620 (video_sync_method == VSYNC_CFR ||
2621 (video_sync_method == VSYNC_AUTO &&
2622 !(oc->oformat->flags & (AVFMT_NOTIMESTAMPS | AVFMT_VARIABLE_FPS))))) {
2623 ost->frame_rate = ist->st->r_frame_rate.num ? ist->st->r_frame_rate : (AVRational){25, 1};
2624 if (ost->enc && ost->enc->supported_framerates && !ost->force_fps) {
2625 int idx = av_find_nearest_q_idx(ost->frame_rate, ost->enc->supported_framerates);
2626 ost->frame_rate = ost->enc->supported_framerates[idx];
2629 if (ost->frame_rate.num) {
2630 codec->time_base = (AVRational){ost->frame_rate.den, ost->frame_rate.num};
2631 video_sync_method = VSYNC_CFR;
2633 codec->time_base = ist->st->time_base;
2635 codec->time_base = ost->filter->filter->inputs[0]->time_base;
2637 codec->width = ost->filter->filter->inputs[0]->w;
2638 codec->height = ost->filter->filter->inputs[0]->h;
2639 codec->sample_aspect_ratio = ost->st->sample_aspect_ratio =
2640 ost->frame_aspect_ratio ? // overridden by the -aspect cli option
2641 av_d2q(ost->frame_aspect_ratio * codec->height/codec->width, 255) :
2642 ost->filter->filter->inputs[0]->sample_aspect_ratio;
2643 codec->pix_fmt = ost->filter->filter->inputs[0]->format;
2645 if (codec->width != icodec->width ||
2646 codec->height != icodec->height ||
2647 codec->pix_fmt != icodec->pix_fmt) {
2648 codec->bits_per_raw_sample = 0;
2652 case AVMEDIA_TYPE_SUBTITLE:
2653 codec->time_base = (AVRational){1, 1000};
2660 if ((codec->flags & (CODEC_FLAG_PASS1 | CODEC_FLAG_PASS2))) {
2661 char logfilename[1024];
2664 snprintf(logfilename, sizeof(logfilename), "%s-%d.log",
2665 pass_logfilename_prefix ? pass_logfilename_prefix : DEFAULT_PASS_LOGFILENAME_PREFIX,
2667 if (!strcmp(ost->enc->name, "libx264")) {
2668 av_dict_set(&ost->opts, "stats", logfilename, AV_DICT_DONT_OVERWRITE);
2670 if (codec->flags & CODEC_FLAG_PASS1) {
2671 f = fopen(logfilename, "wb");
2673 av_log(NULL, AV_LOG_FATAL, "Cannot write log file '%s' for pass-1 encoding: %s\n",
2674 logfilename, strerror(errno));
2680 size_t logbuffer_size;
2681 if (cmdutils_read_file(logfilename, &logbuffer, &logbuffer_size) < 0) {
2682 av_log(NULL, AV_LOG_FATAL, "Error reading log file '%s' for pass-2 encoding\n",
2686 codec->stats_in = logbuffer;
2693 /* open each encoder */
2694 for (i = 0; i < nb_output_streams; i++) {
2695 ost = output_streams[i];
2696 if (ost->encoding_needed) {
2697 AVCodec *codec = ost->enc;
2698 AVCodecContext *dec = NULL;
2700 if ((ist = get_input_stream(ost)))
2701 dec = ist->st->codec;
2702 if (dec && dec->subtitle_header) {
2703 ost->st->codec->subtitle_header = av_malloc(dec->subtitle_header_size);
2704 if (!ost->st->codec->subtitle_header) {
2705 ret = AVERROR(ENOMEM);
2708 memcpy(ost->st->codec->subtitle_header, dec->subtitle_header, dec->subtitle_header_size);
2709 ost->st->codec->subtitle_header_size = dec->subtitle_header_size;
2711 if (!av_dict_get(ost->opts, "threads", NULL, 0))
2712 av_dict_set(&ost->opts, "threads", "auto", 0);
2713 if (avcodec_open2(ost->st->codec, codec, &ost->opts) < 0) {
2714 snprintf(error, sizeof(error), "Error while opening encoder for output stream #%d:%d - maybe incorrect parameters such as bit_rate, rate, width or height",
2715 ost->file_index, ost->index);
2716 ret = AVERROR(EINVAL);
2719 assert_codec_experimental(ost->st->codec, 1);
2720 assert_avoptions(ost->opts);
2721 if (ost->st->codec->bit_rate && ost->st->codec->bit_rate < 1000)
2722 av_log(NULL, AV_LOG_WARNING, "The bitrate parameter is set too low."
2723 "It takes bits/s as argument, not kbits/s\n");
2724 extra_size += ost->st->codec->extradata_size;
2726 if (ost->st->codec->me_threshold)
2727 input_streams[ost->source_index]->st->codec->debug |= FF_DEBUG_MV;
2731 /* init input streams */
2732 for (i = 0; i < nb_input_streams; i++)
2733 if ((ret = init_input_stream(i, error, sizeof(error))) < 0)
2736 /* discard unused programs */
2737 for (i = 0; i < nb_input_files; i++) {
2738 InputFile *ifile = input_files[i];
2739 for (j = 0; j < ifile->ctx->nb_programs; j++) {
2740 AVProgram *p = ifile->ctx->programs[j];
2741 int discard = AVDISCARD_ALL;
2743 for (k = 0; k < p->nb_stream_indexes; k++)
2744 if (!input_streams[ifile->ist_index + p->stream_index[k]]->discard) {
2745 discard = AVDISCARD_DEFAULT;
2748 p->discard = discard;
2752 /* open files and write file headers */
2753 for (i = 0; i < nb_output_files; i++) {
2754 oc = output_files[i]->ctx;
2755 oc->interrupt_callback = int_cb;
2756 if (avformat_write_header(oc, &output_files[i]->opts) < 0) {
2757 snprintf(error, sizeof(error), "Could not write header for output file #%d (incorrect codec parameters ?)", i);
2758 ret = AVERROR(EINVAL);
2761 assert_avoptions(output_files[i]->opts);
2762 if (strcmp(oc->oformat->name, "rtp")) {
2768 /* dump the file output parameters - cannot be done before in case
2770 for (i = 0; i < nb_output_files; i++) {
2771 av_dump_format(output_files[i]->ctx, i, output_files[i]->ctx->filename, 1);
2774 /* dump the stream mapping */
2775 av_log(NULL, AV_LOG_INFO, "Stream mapping:\n");
2776 for (i = 0; i < nb_input_streams; i++) {
2777 ist = input_streams[i];
2779 for (j = 0; j < ist->nb_filters; j++) {
2780 AVFilterLink *link = ist->filters[j]->filter->outputs[0];
2781 if (ist->filters[j]->graph->graph_desc) {
2782 av_log(NULL, AV_LOG_INFO, " Stream #%d:%d (%s) -> %s",
2783 ist->file_index, ist->st->index, ist->dec ? ist->dec->name : "?",
2784 link->dst->filter->name);
2785 if (link->dst->input_count > 1)
2786 av_log(NULL, AV_LOG_INFO, ":%s", link->dstpad->name);
2787 if (nb_filtergraphs > 1)
2788 av_log(NULL, AV_LOG_INFO, " (graph %d)", ist->filters[j]->graph->index);
2789 av_log(NULL, AV_LOG_INFO, "\n");
2794 for (i = 0; i < nb_output_streams; i++) {
2795 ost = output_streams[i];
2797 if (ost->attachment_filename) {
2798 /* an attached file */
2799 av_log(NULL, AV_LOG_INFO, " File %s -> Stream #%d:%d\n",
2800 ost->attachment_filename, ost->file_index, ost->index);
2804 if (ost->filter && ost->filter->graph->graph_desc) {
2805 /* output from a complex graph */
2806 AVFilterLink *link = ost->filter->filter->inputs[0];
2807 av_log(NULL, AV_LOG_INFO, " %s", link->src->filter->name);
2808 if (link->src->output_count > 1)
2809 av_log(NULL, AV_LOG_INFO, ":%s", link->srcpad->name);
2810 if (nb_filtergraphs > 1)
2811 av_log(NULL, AV_LOG_INFO, " (graph %d)", ost->filter->graph->index);
2813 av_log(NULL, AV_LOG_INFO, " -> Stream #%d:%d (%s)\n", ost->file_index,
2814 ost->index, ost->enc ? ost->enc->name : "?");
2818 av_log(NULL, AV_LOG_INFO, " Stream #%d:%d -> #%d:%d",
2819 input_streams[ost->source_index]->file_index,
2820 input_streams[ost->source_index]->st->index,
2823 if (ost->sync_ist != input_streams[ost->source_index])
2824 av_log(NULL, AV_LOG_INFO, " [sync #%d:%d]",
2825 ost->sync_ist->file_index,
2826 ost->sync_ist->st->index);
2827 if (ost->stream_copy)
2828 av_log(NULL, AV_LOG_INFO, " (copy)");
2830 av_log(NULL, AV_LOG_INFO, " (%s -> %s)", input_streams[ost->source_index]->dec ?
2831 input_streams[ost->source_index]->dec->name : "?",
2832 ost->enc ? ost->enc->name : "?");
2833 av_log(NULL, AV_LOG_INFO, "\n");
2837 av_log(NULL, AV_LOG_ERROR, "%s\n", error);
2849 * The following code is the main loop of the file converter
2851 static int transcode(void)
2854 AVFormatContext *is, *os;
2858 int no_packet_count = 0;
2859 int64_t timer_start;
2861 if (!(no_packet = av_mallocz(nb_input_files)))
2864 ret = transcode_init();
2868 av_log(NULL, AV_LOG_INFO, "Press ctrl-c to stop encoding\n");
2871 timer_start = av_gettime();
2873 for (; received_sigterm == 0;) {
2874 int file_index, ist_index, past_recording_time = 1;
2878 ipts_min = INT64_MAX;
2880 /* check if there's any stream where output is still needed */
2881 for (i = 0; i < nb_output_streams; i++) {
2883 ost = output_streams[i];
2884 of = output_files[ost->file_index];
2885 os = output_files[ost->file_index]->ctx;
2886 if (ost->is_past_recording_time ||
2887 (os->pb && avio_tell(os->pb) >= of->limit_filesize))
2889 if (ost->frame_number > ost->max_frames) {
2891 for (j = 0; j < of->ctx->nb_streams; j++)
2892 output_streams[of->ost_index + j]->is_past_recording_time = 1;
2895 past_recording_time = 0;
2897 if (past_recording_time)
2900 /* select the stream that we must read now by looking at the
2901 smallest output pts */
2903 for (i = 0; i < nb_input_streams; i++) {
2905 ist = input_streams[i];
2906 ipts = ist->last_dts;
2907 if (ist->discard || no_packet[ist->file_index])
2909 if (!input_files[ist->file_index]->eof_reached) {
2910 if (ipts < ipts_min) {
2912 file_index = ist->file_index;
2916 /* if none, if is finished */
2917 if (file_index < 0) {
2918 if (no_packet_count) {
2919 no_packet_count = 0;
2920 memset(no_packet, 0, nb_input_files);
2927 /* read a frame from it and output it in the fifo */
2928 is = input_files[file_index]->ctx;
2929 ret = av_read_frame(is, &pkt);
2930 if (ret == AVERROR(EAGAIN)) {
2931 no_packet[file_index] = 1;
2936 input_files[file_index]->eof_reached = 1;
2938 for (i = 0; i < input_files[file_index]->nb_streams; i++) {
2939 ist = input_streams[input_files[file_index]->ist_index + i];
2940 if (ist->decoding_needed)
2941 output_packet(ist, NULL);
2950 no_packet_count = 0;
2951 memset(no_packet, 0, nb_input_files);
2954 av_pkt_dump_log2(NULL, AV_LOG_DEBUG, &pkt, do_hex_dump,
2955 is->streams[pkt.stream_index]);
2957 /* the following test is needed in case new streams appear
2958 dynamically in stream : we ignore them */
2959 if (pkt.stream_index >= input_files[file_index]->nb_streams)
2960 goto discard_packet;
2961 ist_index = input_files[file_index]->ist_index + pkt.stream_index;
2962 ist = input_streams[ist_index];
2964 goto discard_packet;
2966 if (pkt.dts != AV_NOPTS_VALUE)
2967 pkt.dts += av_rescale_q(input_files[ist->file_index]->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
2968 if (pkt.pts != AV_NOPTS_VALUE)
2969 pkt.pts += av_rescale_q(input_files[ist->file_index]->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
2971 if (pkt.pts != AV_NOPTS_VALUE)
2972 pkt.pts *= ist->ts_scale;
2973 if (pkt.dts != AV_NOPTS_VALUE)
2974 pkt.dts *= ist->ts_scale;
2976 //fprintf(stderr, "next:%"PRId64" dts:%"PRId64" off:%"PRId64" %d\n",
2978 // pkt.dts, input_files[ist->file_index].ts_offset,
2979 // ist->st->codec->codec_type);
2980 if (pkt.dts != AV_NOPTS_VALUE && ist->next_dts != AV_NOPTS_VALUE
2981 && (is->iformat->flags & AVFMT_TS_DISCONT)) {
2982 int64_t pkt_dts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);
2983 int64_t delta = pkt_dts - ist->next_dts;
2984 if ((FFABS(delta) > 1LL * dts_delta_threshold * AV_TIME_BASE || pkt_dts + 1 < ist->last_dts) && !copy_ts) {
2985 input_files[ist->file_index]->ts_offset -= delta;
2986 av_log(NULL, AV_LOG_DEBUG,
2987 "timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
2988 delta, input_files[ist->file_index]->ts_offset);
2989 pkt.dts-= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
2990 if (pkt.pts != AV_NOPTS_VALUE)
2991 pkt.pts-= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
2995 // fprintf(stderr,"read #%d.%d size=%d\n", ist->file_index, ist->st->index, pkt.size);
2996 if (output_packet(ist, &pkt) < 0 || poll_filters() < 0) {
2997 av_log(NULL, AV_LOG_ERROR, "Error while decoding stream #%d:%d\n",
2998 ist->file_index, ist->st->index);
3001 av_free_packet(&pkt);
3006 av_free_packet(&pkt);
3008 /* dump report by using the output first video and audio streams */
3009 print_report(0, timer_start);
3012 /* at the end of stream, we must flush the decoder buffers */
3013 for (i = 0; i < nb_input_streams; i++) {
3014 ist = input_streams[i];
3015 if (!input_files[ist->file_index]->eof_reached && ist->decoding_needed) {
3016 output_packet(ist, NULL);
3024 /* write the trailer if needed and close file */
3025 for (i = 0; i < nb_output_files; i++) {
3026 os = output_files[i]->ctx;
3027 av_write_trailer(os);
3030 /* dump report by using the first video and audio streams */
3031 print_report(1, timer_start);
3033 /* close each encoder */
3034 for (i = 0; i < nb_output_streams; i++) {
3035 ost = output_streams[i];
3036 if (ost->encoding_needed) {
3037 av_freep(&ost->st->codec->stats_in);
3038 avcodec_close(ost->st->codec);
3042 /* close each decoder */
3043 for (i = 0; i < nb_input_streams; i++) {
3044 ist = input_streams[i];
3045 if (ist->decoding_needed) {
3046 avcodec_close(ist->st->codec);
3054 av_freep(&no_packet);
3056 if (output_streams) {
3057 for (i = 0; i < nb_output_streams; i++) {
3058 ost = output_streams[i];
3060 if (ost->stream_copy)
3061 av_freep(&ost->st->codec->extradata);
3063 fclose(ost->logfile);
3064 ost->logfile = NULL;
3066 av_freep(&ost->st->codec->subtitle_header);
3067 av_free(ost->forced_kf_pts);
3068 av_dict_free(&ost->opts);
3075 static double parse_frame_aspect_ratio(const char *arg)
3082 p = strchr(arg, ':');
3084 x = strtol(arg, &end, 10);
3086 y = strtol(end + 1, &end, 10);
3088 ar = (double)x / (double)y;
3090 ar = strtod(arg, NULL);
3093 av_log(NULL, AV_LOG_FATAL, "Incorrect aspect ratio specification.\n");
3099 static int opt_audio_codec(OptionsContext *o, const char *opt, const char *arg)
3101 return parse_option(o, "codec:a", arg, options);
3104 static int opt_video_codec(OptionsContext *o, const char *opt, const char *arg)
3106 return parse_option(o, "codec:v", arg, options);
3109 static int opt_subtitle_codec(OptionsContext *o, const char *opt, const char *arg)
3111 return parse_option(o, "codec:s", arg, options);
3114 static int opt_data_codec(OptionsContext *o, const char *opt, const char *arg)
3116 return parse_option(o, "codec:d", arg, options);
3119 static int opt_map(OptionsContext *o, const char *opt, const char *arg)
3121 StreamMap *m = NULL;
3122 int i, negative = 0, file_idx;
3123 int sync_file_idx = -1, sync_stream_idx;
3131 map = av_strdup(arg);
3133 /* parse sync stream first, just pick first matching stream */
3134 if (sync = strchr(map, ',')) {
3136 sync_file_idx = strtol(sync + 1, &sync, 0);
3137 if (sync_file_idx >= nb_input_files || sync_file_idx < 0) {
3138 av_log(NULL, AV_LOG_FATAL, "Invalid sync file index: %d.\n", sync_file_idx);
3143 for (i = 0; i < input_files[sync_file_idx]->nb_streams; i++)
3144 if (check_stream_specifier(input_files[sync_file_idx]->ctx,
3145 input_files[sync_file_idx]->ctx->streams[i], sync) == 1) {
3146 sync_stream_idx = i;
3149 if (i == input_files[sync_file_idx]->nb_streams) {
3150 av_log(NULL, AV_LOG_FATAL, "Sync stream specification in map %s does not "
3151 "match any streams.\n", arg);
3157 if (map[0] == '[') {
3158 /* this mapping refers to lavfi output */
3159 const char *c = map + 1;
3160 o->stream_maps = grow_array(o->stream_maps, sizeof(*o->stream_maps),
3161 &o->nb_stream_maps, o->nb_stream_maps + 1);
3162 m = &o->stream_maps[o->nb_stream_maps - 1];
3163 m->linklabel = av_get_token(&c, "]");
3164 if (!m->linklabel) {
3165 av_log(NULL, AV_LOG_ERROR, "Invalid output link label: %s.\n", map);
3169 file_idx = strtol(map, &p, 0);
3170 if (file_idx >= nb_input_files || file_idx < 0) {
3171 av_log(NULL, AV_LOG_FATAL, "Invalid input file index: %d.\n", file_idx);
3175 /* disable some already defined maps */
3176 for (i = 0; i < o->nb_stream_maps; i++) {
3177 m = &o->stream_maps[i];
3178 if (file_idx == m->file_index &&
3179 check_stream_specifier(input_files[m->file_index]->ctx,
3180 input_files[m->file_index]->ctx->streams[m->stream_index],
3181 *p == ':' ? p + 1 : p) > 0)
3185 for (i = 0; i < input_files[file_idx]->nb_streams; i++) {
3186 if (check_stream_specifier(input_files[file_idx]->ctx, input_files[file_idx]->ctx->streams[i],
3187 *p == ':' ? p + 1 : p) <= 0)
3189 o->stream_maps = grow_array(o->stream_maps, sizeof(*o->stream_maps),
3190 &o->nb_stream_maps, o->nb_stream_maps + 1);
3191 m = &o->stream_maps[o->nb_stream_maps - 1];
3193 m->file_index = file_idx;
3194 m->stream_index = i;
3196 if (sync_file_idx >= 0) {
3197 m->sync_file_index = sync_file_idx;
3198 m->sync_stream_index = sync_stream_idx;
3200 m->sync_file_index = file_idx;
3201 m->sync_stream_index = i;
3207 av_log(NULL, AV_LOG_FATAL, "Stream map '%s' matches no streams.\n", arg);
3215 static int opt_attach(OptionsContext *o, const char *opt, const char *arg)
3217 o->attachments = grow_array(o->attachments, sizeof(*o->attachments),
3218 &o->nb_attachments, o->nb_attachments + 1);
3219 o->attachments[o->nb_attachments - 1] = arg;
3224 * Parse a metadata specifier in arg.
3225 * @param type metadata type is written here -- g(lobal)/s(tream)/c(hapter)/p(rogram)
3226 * @param index for type c/p, chapter/program index is written here
3227 * @param stream_spec for type s, the stream specifier is written here
3229 static void parse_meta_type(char *arg, char *type, int *index, const char **stream_spec)
3237 if (*(++arg) && *arg != ':') {
3238 av_log(NULL, AV_LOG_FATAL, "Invalid metadata specifier %s.\n", arg);
3241 *stream_spec = *arg == ':' ? arg + 1 : "";
3245 if (*(++arg) == ':')
3246 *index = strtol(++arg, NULL, 0);
3249 av_log(NULL, AV_LOG_FATAL, "Invalid metadata type %c.\n", *arg);
3256 static int copy_metadata(char *outspec, char *inspec, AVFormatContext *oc, AVFormatContext *ic, OptionsContext *o)
3258 AVDictionary **meta_in = NULL;
3259 AVDictionary **meta_out;
3261 char type_in, type_out;
3262 const char *istream_spec = NULL, *ostream_spec = NULL;
3263 int idx_in = 0, idx_out = 0;
3265 parse_meta_type(inspec, &type_in, &idx_in, &istream_spec);
3266 parse_meta_type(outspec, &type_out, &idx_out, &ostream_spec);
3268 if (type_in == 'g' || type_out == 'g')
3269 o->metadata_global_manual = 1;
3270 if (type_in == 's' || type_out == 's')
3271 o->metadata_streams_manual = 1;
3272 if (type_in == 'c' || type_out == 'c')
3273 o->metadata_chapters_manual = 1;
3275 #define METADATA_CHECK_INDEX(index, nb_elems, desc)\
3276 if ((index) < 0 || (index) >= (nb_elems)) {\
3277 av_log(NULL, AV_LOG_FATAL, "Invalid %s index %d while processing metadata maps.\n",\
3282 #define SET_DICT(type, meta, context, index)\
3285 meta = &context->metadata;\
3288 METADATA_CHECK_INDEX(index, context->nb_chapters, "chapter")\
3289 meta = &context->chapters[index]->metadata;\
3292 METADATA_CHECK_INDEX(index, context->nb_programs, "program")\
3293 meta = &context->programs[index]->metadata;\
3297 SET_DICT(type_in, meta_in, ic, idx_in);
3298 SET_DICT(type_out, meta_out, oc, idx_out);
3300 /* for input streams choose first matching stream */
3301 if (type_in == 's') {
3302 for (i = 0; i < ic->nb_streams; i++) {
3303 if ((ret = check_stream_specifier(ic, ic->streams[i], istream_spec)) > 0) {
3304 meta_in = &ic->streams[i]->metadata;
3310 av_log(NULL, AV_LOG_FATAL, "Stream specifier %s does not match any streams.\n", istream_spec);
3315 if (type_out == 's') {
3316 for (i = 0; i < oc->nb_streams; i++) {
3317 if ((ret = check_stream_specifier(oc, oc->streams[i], ostream_spec)) > 0) {
3318 meta_out = &oc->streams[i]->metadata;
3319 av_dict_copy(meta_out, *meta_in, AV_DICT_DONT_OVERWRITE);
3324 av_dict_copy(meta_out, *meta_in, AV_DICT_DONT_OVERWRITE);
3329 static AVCodec *find_codec_or_die(const char *name, enum AVMediaType type, int encoder)
3331 const char *codec_string = encoder ? "encoder" : "decoder";
3335 avcodec_find_encoder_by_name(name) :
3336 avcodec_find_decoder_by_name(name);
3338 av_log(NULL, AV_LOG_FATAL, "Unknown %s '%s'\n", codec_string, name);
3341 if (codec->type != type) {
3342 av_log(NULL, AV_LOG_FATAL, "Invalid %s type '%s'\n", codec_string, name);
3348 static AVCodec *choose_decoder(OptionsContext *o, AVFormatContext *s, AVStream *st)
3350 char *codec_name = NULL;
3352 MATCH_PER_STREAM_OPT(codec_names, str, codec_name, s, st);
3354 AVCodec *codec = find_codec_or_die(codec_name, st->codec->codec_type, 0);
3355 st->codec->codec_id = codec->id;
3358 return avcodec_find_decoder(st->codec->codec_id);
3362 * Add all the streams from the given input file to the global
3363 * list of input streams.
3365 static void add_input_streams(OptionsContext *o, AVFormatContext *ic)
3369 for (i = 0; i < ic->nb_streams; i++) {
3370 AVStream *st = ic->streams[i];
3371 AVCodecContext *dec = st->codec;
3372 InputStream *ist = av_mallocz(sizeof(*ist));
3377 input_streams = grow_array(input_streams, sizeof(*input_streams), &nb_input_streams, nb_input_streams + 1);
3378 input_streams[nb_input_streams - 1] = ist;
3381 ist->file_index = nb_input_files;
3383 st->discard = AVDISCARD_ALL;
3384 ist->opts = filter_codec_opts(codec_opts, ist->st->codec->codec_id, ic, st);
3386 ist->ts_scale = 1.0;
3387 MATCH_PER_STREAM_OPT(ts_scale, dbl, ist->ts_scale, ic, st);
3389 ist->dec = choose_decoder(o, ic, st);
3391 switch (dec->codec_type) {
3392 case AVMEDIA_TYPE_VIDEO:
3393 ist->resample_height = dec->height;
3394 ist->resample_width = dec->width;
3395 ist->resample_pix_fmt = dec->pix_fmt;
3398 case AVMEDIA_TYPE_AUDIO:
3399 guess_input_channel_layout(ist);
3401 ist->resample_sample_fmt = dec->sample_fmt;
3402 ist->resample_sample_rate = dec->sample_rate;
3403 ist->resample_channels = dec->channels;
3404 ist->resample_channel_layout = dec->channel_layout;
3407 case AVMEDIA_TYPE_DATA:
3408 case AVMEDIA_TYPE_SUBTITLE:
3409 case AVMEDIA_TYPE_ATTACHMENT:
3410 case AVMEDIA_TYPE_UNKNOWN:
3418 static void assert_file_overwrite(const char *filename)
3420 if (!file_overwrite &&
3421 (strchr(filename, ':') == NULL || filename[1] == ':' ||
3422 av_strstart(filename, "file:", NULL))) {
3423 if (avio_check(filename, 0) == 0) {
3425 fprintf(stderr,"File '%s' already exists. Overwrite ? [y/N] ", filename);
3427 if (!read_yesno()) {
3428 fprintf(stderr, "Not overwriting - exiting\n");
3433 fprintf(stderr,"File '%s' already exists. Exiting.\n", filename);
3440 static void dump_attachment(AVStream *st, const char *filename)
3443 AVIOContext *out = NULL;
3444 AVDictionaryEntry *e;
3446 if (!st->codec->extradata_size) {
3447 av_log(NULL, AV_LOG_WARNING, "No extradata to dump in stream #%d:%d.\n",
3448 nb_input_files - 1, st->index);
3451 if (!*filename && (e = av_dict_get(st->metadata, "filename", NULL, 0)))
3452 filename = e->value;
3454 av_log(NULL, AV_LOG_FATAL, "No filename specified and no 'filename' tag"
3455 "in stream #%d:%d.\n", nb_input_files - 1, st->index);
3459 assert_file_overwrite(filename);
3461 if ((ret = avio_open2(&out, filename, AVIO_FLAG_WRITE, &int_cb, NULL)) < 0) {
3462 av_log(NULL, AV_LOG_FATAL, "Could not open file %s for writing.\n",
3467 avio_write(out, st->codec->extradata, st->codec->extradata_size);
3472 static int opt_input_file(OptionsContext *o, const char *opt, const char *filename)
3474 AVFormatContext *ic;
3475 AVInputFormat *file_iformat = NULL;
3479 AVDictionary **opts;
3480 int orig_nb_streams; // number of streams before avformat_find_stream_info
3483 if (!(file_iformat = av_find_input_format(o->format))) {
3484 av_log(NULL, AV_LOG_FATAL, "Unknown input format: '%s'\n", o->format);
3489 if (!strcmp(filename, "-"))
3492 using_stdin |= !strncmp(filename, "pipe:", 5) ||
3493 !strcmp(filename, "/dev/stdin");
3495 /* get default parameters from command line */
3496 ic = avformat_alloc_context();
3498 print_error(filename, AVERROR(ENOMEM));
3501 if (o->nb_audio_sample_rate) {
3502 snprintf(buf, sizeof(buf), "%d", o->audio_sample_rate[o->nb_audio_sample_rate - 1].u.i);
3503 av_dict_set(&format_opts, "sample_rate", buf, 0);
3505 if (o->nb_audio_channels) {
3506 /* because we set audio_channels based on both the "ac" and
3507 * "channel_layout" options, we need to check that the specified
3508 * demuxer actually has the "channels" option before setting it */
3509 if (file_iformat && file_iformat->priv_class &&
3510 av_opt_find(&file_iformat->priv_class, "channels", NULL, 0,
3511 AV_OPT_SEARCH_FAKE_OBJ)) {
3512 snprintf(buf, sizeof(buf), "%d",
3513 o->audio_channels[o->nb_audio_channels - 1].u.i);
3514 av_dict_set(&format_opts, "channels", buf, 0);
3517 if (o->nb_frame_rates) {
3518 av_dict_set(&format_opts, "framerate", o->frame_rates[o->nb_frame_rates - 1].u.str, 0);
3520 if (o->nb_frame_sizes) {
3521 av_dict_set(&format_opts, "video_size", o->frame_sizes[o->nb_frame_sizes - 1].u.str, 0);
3523 if (o->nb_frame_pix_fmts)
3524 av_dict_set(&format_opts, "pixel_format", o->frame_pix_fmts[o->nb_frame_pix_fmts - 1].u.str, 0);
3526 ic->flags |= AVFMT_FLAG_NONBLOCK;
3527 ic->interrupt_callback = int_cb;
3529 /* open the input file with generic libav function */
3530 err = avformat_open_input(&ic, filename, file_iformat, &format_opts);
3532 print_error(filename, err);
3535 assert_avoptions(format_opts);
3537 /* apply forced codec ids */
3538 for (i = 0; i < ic->nb_streams; i++)
3539 choose_decoder(o, ic, ic->streams[i]);
3541 /* Set AVCodecContext options for avformat_find_stream_info */
3542 opts = setup_find_stream_info_opts(ic, codec_opts);
3543 orig_nb_streams = ic->nb_streams;
3545 /* If not enough info to get the stream parameters, we decode the
3546 first frames to get it. (used in mpeg case for example) */
3547 ret = avformat_find_stream_info(ic, opts);
3549 av_log(NULL, AV_LOG_FATAL, "%s: could not find codec parameters\n", filename);
3550 avformat_close_input(&ic);
3554 timestamp = o->start_time;
3555 /* add the stream start time */
3556 if (ic->start_time != AV_NOPTS_VALUE)
3557 timestamp += ic->start_time;
3559 /* if seeking requested, we execute it */
3560 if (o->start_time != 0) {
3561 ret = av_seek_frame(ic, -1, timestamp, AVSEEK_FLAG_BACKWARD);
3563 av_log(NULL, AV_LOG_WARNING, "%s: could not seek to position %0.3f\n",
3564 filename, (double)timestamp / AV_TIME_BASE);
3568 /* update the current parameters so that they match the one of the input stream */
3569 add_input_streams(o, ic);
3571 /* dump the file content */
3572 av_dump_format(ic, nb_input_files, filename, 0);
3574 input_files = grow_array(input_files, sizeof(*input_files), &nb_input_files, nb_input_files + 1);
3575 if (!(input_files[nb_input_files - 1] = av_mallocz(sizeof(*input_files[0]))))
3578 input_files[nb_input_files - 1]->ctx = ic;
3579 input_files[nb_input_files - 1]->ist_index = nb_input_streams - ic->nb_streams;
3580 input_files[nb_input_files - 1]->ts_offset = o->input_ts_offset - (copy_ts ? 0 : timestamp);
3581 input_files[nb_input_files - 1]->nb_streams = ic->nb_streams;
3582 input_files[nb_input_files - 1]->rate_emu = o->rate_emu;
3584 for (i = 0; i < o->nb_dump_attachment; i++) {
3587 for (j = 0; j < ic->nb_streams; j++) {
3588 AVStream *st = ic->streams[j];
3590 if (check_stream_specifier(ic, st, o->dump_attachment[i].specifier) == 1)
3591 dump_attachment(st, o->dump_attachment[i].u.str);
3595 for (i = 0; i < orig_nb_streams; i++)
3596 av_dict_free(&opts[i]);
3603 static void parse_forced_key_frames(char *kf, OutputStream *ost,
3604 AVCodecContext *avctx)
3610 for (p = kf; *p; p++)
3613 ost->forced_kf_count = n;
3614 ost->forced_kf_pts = av_malloc(sizeof(*ost->forced_kf_pts) * n);
3615 if (!ost->forced_kf_pts) {
3616 av_log(NULL, AV_LOG_FATAL, "Could not allocate forced key frames array.\n");
3619 for (i = 0; i < n; i++) {
3620 p = i ? strchr(p, ',') + 1 : kf;
3621 t = parse_time_or_die("force_key_frames", p, 1);
3622 ost->forced_kf_pts[i] = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
3626 static uint8_t *get_line(AVIOContext *s)
3632 if (avio_open_dyn_buf(&line) < 0) {
3633 av_log(NULL, AV_LOG_FATAL, "Could not alloc buffer for reading preset.\n");
3637 while ((c = avio_r8(s)) && c != '\n')
3640 avio_close_dyn_buf(line, &buf);
3645 static int get_preset_file_2(const char *preset_name, const char *codec_name, AVIOContext **s)
3648 char filename[1000];
3649 const char *base[3] = { getenv("AVCONV_DATADIR"),
3654 for (i = 0; i < FF_ARRAY_ELEMS(base) && ret; i++) {
3658 snprintf(filename, sizeof(filename), "%s%s/%s-%s.avpreset", base[i],
3659 i != 1 ? "" : "/.avconv", codec_name, preset_name);
3660 ret = avio_open2(s, filename, AVIO_FLAG_READ, &int_cb, NULL);
3663 snprintf(filename, sizeof(filename), "%s%s/%s.avpreset", base[i],
3664 i != 1 ? "" : "/.avconv", preset_name);
3665 ret = avio_open2(s, filename, AVIO_FLAG_READ, &int_cb, NULL);
3671 static void choose_encoder(OptionsContext *o, AVFormatContext *s, OutputStream *ost)
3673 char *codec_name = NULL;
3675 MATCH_PER_STREAM_OPT(codec_names, str, codec_name, s, ost->st);
3677 ost->st->codec->codec_id = av_guess_codec(s->oformat, NULL, s->filename,
3678 NULL, ost->st->codec->codec_type);
3679 ost->enc = avcodec_find_encoder(ost->st->codec->codec_id);
3680 } else if (!strcmp(codec_name, "copy"))
3681 ost->stream_copy = 1;
3683 ost->enc = find_codec_or_die(codec_name, ost->st->codec->codec_type, 1);
3684 ost->st->codec->codec_id = ost->enc->id;
3688 static OutputStream *new_output_stream(OptionsContext *o, AVFormatContext *oc, enum AVMediaType type)
3691 AVStream *st = avformat_new_stream(oc, NULL);
3692 int idx = oc->nb_streams - 1, ret = 0;
3693 char *bsf = NULL, *next, *codec_tag = NULL;
3694 AVBitStreamFilterContext *bsfc, *bsfc_prev = NULL;
3696 char *buf = NULL, *arg = NULL, *preset = NULL;
3697 AVIOContext *s = NULL;
3700 av_log(NULL, AV_LOG_FATAL, "Could not alloc stream.\n");
3704 if (oc->nb_streams - 1 < o->nb_streamid_map)
3705 st->id = o->streamid_map[oc->nb_streams - 1];
3707 output_streams = grow_array(output_streams, sizeof(*output_streams), &nb_output_streams,
3708 nb_output_streams + 1);
3709 if (!(ost = av_mallocz(sizeof(*ost))))
3711 output_streams[nb_output_streams - 1] = ost;
3713 ost->file_index = nb_output_files;
3716 st->codec->codec_type = type;
3717 choose_encoder(o, oc, ost);
3719 ost->opts = filter_codec_opts(codec_opts, ost->enc->id, oc, st);
3722 avcodec_get_context_defaults3(st->codec, ost->enc);
3723 st->codec->codec_type = type; // XXX hack, avcodec_get_context_defaults2() sets type to unknown for stream copy
3725 MATCH_PER_STREAM_OPT(presets, str, preset, oc, st);
3726 if (preset && (!(ret = get_preset_file_2(preset, ost->enc->name, &s)))) {
3729 if (!buf[0] || buf[0] == '#') {
3733 if (!(arg = strchr(buf, '='))) {
3734 av_log(NULL, AV_LOG_FATAL, "Invalid line found in the preset file.\n");
3738 av_dict_set(&ost->opts, buf, arg, AV_DICT_DONT_OVERWRITE);
3740 } while (!s->eof_reached);
3744 av_log(NULL, AV_LOG_FATAL,
3745 "Preset %s specified for stream %d:%d, but could not be opened.\n",
3746 preset, ost->file_index, ost->index);
3750 ost->max_frames = INT64_MAX;
3751 MATCH_PER_STREAM_OPT(max_frames, i64, ost->max_frames, oc, st);
3753 MATCH_PER_STREAM_OPT(bitstream_filters, str, bsf, oc, st);
3755 if (next = strchr(bsf, ','))
3757 if (!(bsfc = av_bitstream_filter_init(bsf))) {
3758 av_log(NULL, AV_LOG_FATAL, "Unknown bitstream filter %s\n", bsf);
3762 bsfc_prev->next = bsfc;
3764 ost->bitstream_filters = bsfc;
3770 MATCH_PER_STREAM_OPT(codec_tags, str, codec_tag, oc, st);
3772 uint32_t tag = strtol(codec_tag, &next, 0);
3774 tag = AV_RL32(codec_tag);
3775 st->codec->codec_tag = tag;
3778 MATCH_PER_STREAM_OPT(qscale, dbl, qscale, oc, st);
3779 if (qscale >= 0 || same_quant) {
3780 st->codec->flags |= CODEC_FLAG_QSCALE;
3781 st->codec->global_quality = FF_QP2LAMBDA * qscale;
3784 if (oc->oformat->flags & AVFMT_GLOBALHEADER)
3785 st->codec->flags |= CODEC_FLAG_GLOBAL_HEADER;
3787 av_opt_get_int(sws_opts, "sws_flags", 0, &ost->sws_flags);
3789 ost->pix_fmts[0] = ost->pix_fmts[1] = PIX_FMT_NONE;
3794 static void parse_matrix_coeffs(uint16_t *dest, const char *str)
3797 const char *p = str;
3804 av_log(NULL, AV_LOG_FATAL, "Syntax error in matrix \"%s\" at coeff %d\n", str, i);
3811 static OutputStream *new_video_stream(OptionsContext *o, AVFormatContext *oc)
3815 AVCodecContext *video_enc;
3817 ost = new_output_stream(o, oc, AVMEDIA_TYPE_VIDEO);
3819 video_enc = st->codec;
3821 if (!ost->stream_copy) {
3822 const char *p = NULL;
3823 char *forced_key_frames = NULL, *frame_rate = NULL, *frame_size = NULL;
3824 char *frame_aspect_ratio = NULL, *frame_pix_fmt = NULL;
3825 char *intra_matrix = NULL, *inter_matrix = NULL, *filters = NULL;
3828 MATCH_PER_STREAM_OPT(frame_rates, str, frame_rate, oc, st);
3829 if (frame_rate && av_parse_video_rate(&ost->frame_rate, frame_rate) < 0) {
3830 av_log(NULL, AV_LOG_FATAL, "Invalid framerate value: %s\n", frame_rate);
3834 MATCH_PER_STREAM_OPT(frame_sizes, str, frame_size, oc, st);
3835 if (frame_size && av_parse_video_size(&video_enc->width, &video_enc->height, frame_size) < 0) {
3836 av_log(NULL, AV_LOG_FATAL, "Invalid frame size: %s.\n", frame_size);
3840 MATCH_PER_STREAM_OPT(frame_aspect_ratios, str, frame_aspect_ratio, oc, st);
3841 if (frame_aspect_ratio)
3842 ost->frame_aspect_ratio = parse_frame_aspect_ratio(frame_aspect_ratio);
3844 MATCH_PER_STREAM_OPT(frame_pix_fmts, str, frame_pix_fmt, oc, st);
3845 if (frame_pix_fmt && (video_enc->pix_fmt = av_get_pix_fmt(frame_pix_fmt)) == PIX_FMT_NONE) {
3846 av_log(NULL, AV_LOG_FATAL, "Unknown pixel format requested: %s.\n", frame_pix_fmt);
3849 st->sample_aspect_ratio = video_enc->sample_aspect_ratio;
3851 MATCH_PER_STREAM_OPT(intra_matrices, str, intra_matrix, oc, st);
3853 if (!(video_enc->intra_matrix = av_mallocz(sizeof(*video_enc->intra_matrix) * 64))) {
3854 av_log(NULL, AV_LOG_FATAL, "Could not allocate memory for intra matrix.\n");
3857 parse_matrix_coeffs(video_enc->intra_matrix, intra_matrix);
3859 MATCH_PER_STREAM_OPT(inter_matrices, str, inter_matrix, oc, st);
3861 if (!(video_enc->inter_matrix = av_mallocz(sizeof(*video_enc->inter_matrix) * 64))) {
3862 av_log(NULL, AV_LOG_FATAL, "Could not allocate memory for inter matrix.\n");
3865 parse_matrix_coeffs(video_enc->inter_matrix, inter_matrix);
3868 MATCH_PER_STREAM_OPT(rc_overrides, str, p, oc, st);
3869 for (i = 0; p; i++) {
3871 int e = sscanf(p, "%d,%d,%d", &start, &end, &q);
3873 av_log(NULL, AV_LOG_FATAL, "error parsing rc_override\n");
3876 video_enc->rc_override =
3877 av_realloc(video_enc->rc_override,
3878 sizeof(RcOverride) * (i + 1));
3879 video_enc->rc_override[i].start_frame = start;
3880 video_enc->rc_override[i].end_frame = end;
3882 video_enc->rc_override[i].qscale = q;
3883 video_enc->rc_override[i].quality_factor = 1.0;
3886 video_enc->rc_override[i].qscale = 0;
3887 video_enc->rc_override[i].quality_factor = -q/100.0;
3892 video_enc->rc_override_count = i;
3893 if (!video_enc->rc_initial_buffer_occupancy)
3894 video_enc->rc_initial_buffer_occupancy = video_enc->rc_buffer_size * 3 / 4;
3895 video_enc->intra_dc_precision = intra_dc_precision - 8;
3900 video_enc->flags |= CODEC_FLAG_PASS1;
3902 video_enc->flags |= CODEC_FLAG_PASS2;
3906 MATCH_PER_STREAM_OPT(forced_key_frames, str, forced_key_frames, oc, st);
3907 if (forced_key_frames)
3908 parse_forced_key_frames(forced_key_frames, ost, video_enc);
3910 MATCH_PER_STREAM_OPT(force_fps, i, ost->force_fps, oc, st);
3912 ost->top_field_first = -1;
3913 MATCH_PER_STREAM_OPT(top_field_first, i, ost->top_field_first, oc, st);
3915 MATCH_PER_STREAM_OPT(filters, str, filters, oc, st);
3917 ost->avfilter = av_strdup(filters);
3919 MATCH_PER_STREAM_OPT(copy_initial_nonkeyframes, i, ost->copy_initial_nonkeyframes, oc ,st);
3925 static OutputStream *new_audio_stream(OptionsContext *o, AVFormatContext *oc)
3929 AVCodecContext *audio_enc;
3931 ost = new_output_stream(o, oc, AVMEDIA_TYPE_AUDIO);
3934 audio_enc = st->codec;
3935 audio_enc->codec_type = AVMEDIA_TYPE_AUDIO;
3937 if (!ost->stream_copy) {
3938 char *sample_fmt = NULL, *filters = NULL;;
3940 MATCH_PER_STREAM_OPT(audio_channels, i, audio_enc->channels, oc, st);
3942 MATCH_PER_STREAM_OPT(sample_fmts, str, sample_fmt, oc, st);
3944 (audio_enc->sample_fmt = av_get_sample_fmt(sample_fmt)) == AV_SAMPLE_FMT_NONE) {
3945 av_log(NULL, AV_LOG_FATAL, "Invalid sample format '%s'\n", sample_fmt);
3949 MATCH_PER_STREAM_OPT(audio_sample_rate, i, audio_enc->sample_rate, oc, st);
3951 MATCH_PER_STREAM_OPT(filters, str, filters, oc, st);
3953 ost->avfilter = av_strdup(filters);
3959 static OutputStream *new_data_stream(OptionsContext *o, AVFormatContext *oc)
3963 ost = new_output_stream(o, oc, AVMEDIA_TYPE_DATA);
3964 if (!ost->stream_copy) {
3965 av_log(NULL, AV_LOG_FATAL, "Data stream encoding not supported yet (only streamcopy)\n");
3972 static OutputStream *new_attachment_stream(OptionsContext *o, AVFormatContext *oc)
3974 OutputStream *ost = new_output_stream(o, oc, AVMEDIA_TYPE_ATTACHMENT);
3975 ost->stream_copy = 1;
3979 static OutputStream *new_subtitle_stream(OptionsContext *o, AVFormatContext *oc)
3983 AVCodecContext *subtitle_enc;
3985 ost = new_output_stream(o, oc, AVMEDIA_TYPE_SUBTITLE);
3987 subtitle_enc = st->codec;
3989 subtitle_enc->codec_type = AVMEDIA_TYPE_SUBTITLE;
3994 /* arg format is "output-stream-index:streamid-value". */
3995 static int opt_streamid(OptionsContext *o, const char *opt, const char *arg)
4001 av_strlcpy(idx_str, arg, sizeof(idx_str));
4002 p = strchr(idx_str, ':');
4004 av_log(NULL, AV_LOG_FATAL,
4005 "Invalid value '%s' for option '%s', required syntax is 'index:value'\n",
4010 idx = parse_number_or_die(opt, idx_str, OPT_INT, 0, INT_MAX);
4011 o->streamid_map = grow_array(o->streamid_map, sizeof(*o->streamid_map), &o->nb_streamid_map, idx+1);
4012 o->streamid_map[idx] = parse_number_or_die(opt, p, OPT_INT, 0, INT_MAX);
4016 static int copy_chapters(InputFile *ifile, OutputFile *ofile, int copy_metadata)
4018 AVFormatContext *is = ifile->ctx;
4019 AVFormatContext *os = ofile->ctx;
4022 for (i = 0; i < is->nb_chapters; i++) {
4023 AVChapter *in_ch = is->chapters[i], *out_ch;
4024 int64_t ts_off = av_rescale_q(ofile->start_time - ifile->ts_offset,
4025 AV_TIME_BASE_Q, in_ch->time_base);
4026 int64_t rt = (ofile->recording_time == INT64_MAX) ? INT64_MAX :
4027 av_rescale_q(ofile->recording_time, AV_TIME_BASE_Q, in_ch->time_base);
4030 if (in_ch->end < ts_off)
4032 if (rt != INT64_MAX && in_ch->start > rt + ts_off)
4035 out_ch = av_mallocz(sizeof(AVChapter));
4037 return AVERROR(ENOMEM);
4039 out_ch->id = in_ch->id;
4040 out_ch->time_base = in_ch->time_base;
4041 out_ch->start = FFMAX(0, in_ch->start - ts_off);
4042 out_ch->end = FFMIN(rt, in_ch->end - ts_off);
4045 av_dict_copy(&out_ch->metadata, in_ch->metadata, 0);
4048 os->chapters = av_realloc(os->chapters, sizeof(AVChapter) * os->nb_chapters);
4050 return AVERROR(ENOMEM);
4051 os->chapters[os->nb_chapters - 1] = out_ch;
4056 static void init_output_filter(OutputFilter *ofilter, OptionsContext *o,
4057 AVFormatContext *oc)
4061 if (ofilter->out_tmp->filter_ctx->output_pads[ofilter->out_tmp->pad_idx].type != AVMEDIA_TYPE_VIDEO) {
4062 av_log(NULL, AV_LOG_FATAL, "Only video filters are supported currently.\n");
4066 ost = new_video_stream(o, oc);
4067 ost->source_index = -1;
4068 ost->filter = ofilter;
4072 if (ost->stream_copy) {
4073 av_log(NULL, AV_LOG_ERROR, "Streamcopy requested for output stream %d:%d, "
4074 "which is fed from a complex filtergraph. Filtering and streamcopy "
4075 "cannot be used together.\n", ost->file_index, ost->index);
4079 if (configure_output_filter(ofilter->graph, ofilter, ofilter->out_tmp) < 0) {
4080 av_log(NULL, AV_LOG_FATAL, "Error configuring filter.\n");
4083 avfilter_inout_free(&ofilter->out_tmp);
4086 static void opt_output_file(void *optctx, const char *filename)
4088 OptionsContext *o = optctx;
4089 AVFormatContext *oc;
4091 AVOutputFormat *file_oformat;
4095 if (configure_complex_filters() < 0) {
4096 av_log(NULL, AV_LOG_FATAL, "Error configuring filters.\n");
4100 if (!strcmp(filename, "-"))
4103 oc = avformat_alloc_context();
4105 print_error(filename, AVERROR(ENOMEM));
4110 file_oformat = av_guess_format(o->format, NULL, NULL);
4111 if (!file_oformat) {
4112 av_log(NULL, AV_LOG_FATAL, "Requested output format '%s' is not a suitable output format\n", o->format);
4116 file_oformat = av_guess_format(NULL, filename, NULL);
4117 if (!file_oformat) {
4118 av_log(NULL, AV_LOG_FATAL, "Unable to find a suitable output format for '%s'\n",
4124 oc->oformat = file_oformat;
4125 oc->interrupt_callback = int_cb;
4126 av_strlcpy(oc->filename, filename, sizeof(oc->filename));
4128 /* create streams for all unlabeled output pads */
4129 for (i = 0; i < nb_filtergraphs; i++) {
4130 FilterGraph *fg = filtergraphs[i];
4131 for (j = 0; j < fg->nb_outputs; j++) {
4132 OutputFilter *ofilter = fg->outputs[j];
4134 if (!ofilter->out_tmp || ofilter->out_tmp->name)
4137 switch (ofilter->out_tmp->filter_ctx->output_pads[ofilter->out_tmp->pad_idx].type) {
4138 case AVMEDIA_TYPE_VIDEO: o->video_disable = 1; break;
4139 case AVMEDIA_TYPE_AUDIO: o->audio_disable = 1; break;
4140 case AVMEDIA_TYPE_SUBTITLE: o->subtitle_disable = 1; break;
4142 init_output_filter(ofilter, o, oc);
4146 if (!o->nb_stream_maps) {
4147 /* pick the "best" stream of each type */
4148 #define NEW_STREAM(type, index)\
4150 ost = new_ ## type ## _stream(o, oc);\
4151 ost->source_index = index;\
4152 ost->sync_ist = input_streams[index];\
4153 input_streams[index]->discard = 0;\
4154 input_streams[index]->st->discard = AVDISCARD_NONE;\
4157 /* video: highest resolution */
4158 if (!o->video_disable && oc->oformat->video_codec != CODEC_ID_NONE) {
4159 int area = 0, idx = -1;
4160 for (i = 0; i < nb_input_streams; i++) {
4161 ist = input_streams[i];
4162 if (ist->st->codec->codec_type == AVMEDIA_TYPE_VIDEO &&
4163 ist->st->codec->width * ist->st->codec->height > area) {
4164 area = ist->st->codec->width * ist->st->codec->height;
4168 NEW_STREAM(video, idx);
4171 /* audio: most channels */
4172 if (!o->audio_disable && oc->oformat->audio_codec != CODEC_ID_NONE) {
4173 int channels = 0, idx = -1;
4174 for (i = 0; i < nb_input_streams; i++) {
4175 ist = input_streams[i];
4176 if (ist->st->codec->codec_type == AVMEDIA_TYPE_AUDIO &&
4177 ist->st->codec->channels > channels) {
4178 channels = ist->st->codec->channels;
4182 NEW_STREAM(audio, idx);
4185 /* subtitles: pick first */
4186 if (!o->subtitle_disable && oc->oformat->subtitle_codec != CODEC_ID_NONE) {
4187 for (i = 0; i < nb_input_streams; i++)
4188 if (input_streams[i]->st->codec->codec_type == AVMEDIA_TYPE_SUBTITLE) {
4189 NEW_STREAM(subtitle, i);
4193 /* do something with data? */
4195 for (i = 0; i < o->nb_stream_maps; i++) {
4196 StreamMap *map = &o->stream_maps[i];
4201 if (map->linklabel) {
4203 OutputFilter *ofilter = NULL;
4206 for (j = 0; j < nb_filtergraphs; j++) {
4207 fg = filtergraphs[j];
4208 for (k = 0; k < fg->nb_outputs; k++) {
4209 AVFilterInOut *out = fg->outputs[k]->out_tmp;
4210 if (out && !strcmp(out->name, map->linklabel)) {
4211 ofilter = fg->outputs[k];
4218 av_log(NULL, AV_LOG_FATAL, "Output with label '%s' does not exist "
4219 "in any defined filter graph.\n", map->linklabel);
4222 init_output_filter(ofilter, o, oc);
4224 ist = input_streams[input_files[map->file_index]->ist_index + map->stream_index];
4225 switch (ist->st->codec->codec_type) {
4226 case AVMEDIA_TYPE_VIDEO: ost = new_video_stream(o, oc); break;
4227 case AVMEDIA_TYPE_AUDIO: ost = new_audio_stream(o, oc); break;
4228 case AVMEDIA_TYPE_SUBTITLE: ost = new_subtitle_stream(o, oc); break;
4229 case AVMEDIA_TYPE_DATA: ost = new_data_stream(o, oc); break;
4230 case AVMEDIA_TYPE_ATTACHMENT: ost = new_attachment_stream(o, oc); break;
4232 av_log(NULL, AV_LOG_FATAL, "Cannot map stream #%d:%d - unsupported type.\n",
4233 map->file_index, map->stream_index);
4237 ost->source_index = input_files[map->file_index]->ist_index + map->stream_index;
4238 ost->sync_ist = input_streams[input_files[map->sync_file_index]->ist_index +
4239 map->sync_stream_index];
4241 ist->st->discard = AVDISCARD_NONE;
4246 /* handle attached files */
4247 for (i = 0; i < o->nb_attachments; i++) {
4249 uint8_t *attachment;
4253 if ((err = avio_open2(&pb, o->attachments[i], AVIO_FLAG_READ, &int_cb, NULL)) < 0) {
4254 av_log(NULL, AV_LOG_FATAL, "Could not open attachment file %s.\n",
4258 if ((len = avio_size(pb)) <= 0) {
4259 av_log(NULL, AV_LOG_FATAL, "Could not get size of the attachment %s.\n",
4263 if (!(attachment = av_malloc(len))) {
4264 av_log(NULL, AV_LOG_FATAL, "Attachment %s too large to fit into memory.\n",
4268 avio_read(pb, attachment, len);
4270 ost = new_attachment_stream(o, oc);
4271 ost->stream_copy = 0;
4272 ost->source_index = -1;
4273 ost->attachment_filename = o->attachments[i];
4274 ost->st->codec->extradata = attachment;
4275 ost->st->codec->extradata_size = len;
4277 p = strrchr(o->attachments[i], '/');
4278 av_dict_set(&ost->st->metadata, "filename", (p && *p) ? p + 1 : o->attachments[i], AV_DICT_DONT_OVERWRITE);
4282 output_files = grow_array(output_files, sizeof(*output_files), &nb_output_files, nb_output_files + 1);
4283 if (!(output_files[nb_output_files - 1] = av_mallocz(sizeof(*output_files[0]))))
4286 output_files[nb_output_files - 1]->ctx = oc;
4287 output_files[nb_output_files - 1]->ost_index = nb_output_streams - oc->nb_streams;
4288 output_files[nb_output_files - 1]->recording_time = o->recording_time;
4289 if (o->recording_time != INT64_MAX)
4290 oc->duration = o->recording_time;
4291 output_files[nb_output_files - 1]->start_time = o->start_time;
4292 output_files[nb_output_files - 1]->limit_filesize = o->limit_filesize;
4293 av_dict_copy(&output_files[nb_output_files - 1]->opts, format_opts, 0);
4295 /* check filename in case of an image number is expected */
4296 if (oc->oformat->flags & AVFMT_NEEDNUMBER) {
4297 if (!av_filename_number_test(oc->filename)) {
4298 print_error(oc->filename, AVERROR(EINVAL));
4303 if (!(oc->oformat->flags & AVFMT_NOFILE)) {
4304 /* test if it already exists to avoid losing precious files */
4305 assert_file_overwrite(filename);
4308 if ((err = avio_open2(&oc->pb, filename, AVIO_FLAG_WRITE,
4309 &oc->interrupt_callback,
4310 &output_files[nb_output_files - 1]->opts)) < 0) {
4311 print_error(filename, err);
4316 if (o->mux_preload) {
4318 snprintf(buf, sizeof(buf), "%d", (int)(o->mux_preload*AV_TIME_BASE));
4319 av_dict_set(&output_files[nb_output_files - 1]->opts, "preload", buf, 0);
4321 oc->max_delay = (int)(o->mux_max_delay * AV_TIME_BASE);
4322 oc->flags |= AVFMT_FLAG_NONBLOCK;
4325 for (i = 0; i < o->nb_metadata_map; i++) {
4327 int in_file_index = strtol(o->metadata_map[i].u.str, &p, 0);
4329 if (in_file_index < 0)
4331 if (in_file_index >= nb_input_files) {
4332 av_log(NULL, AV_LOG_FATAL, "Invalid input file index %d while processing metadata maps\n", in_file_index);
4335 copy_metadata(o->metadata_map[i].specifier, *p ? p + 1 : p, oc, input_files[in_file_index]->ctx, o);
4339 if (o->chapters_input_file >= nb_input_files) {
4340 if (o->chapters_input_file == INT_MAX) {
4341 /* copy chapters from the first input file that has them*/
4342 o->chapters_input_file = -1;
4343 for (i = 0; i < nb_input_files; i++)
4344 if (input_files[i]->ctx->nb_chapters) {
4345 o->chapters_input_file = i;
4349 av_log(NULL, AV_LOG_FATAL, "Invalid input file index %d in chapter mapping.\n",
4350 o->chapters_input_file);
4354 if (o->chapters_input_file >= 0)
4355 copy_chapters(input_files[o->chapters_input_file], output_files[nb_output_files - 1],
4356 !o->metadata_chapters_manual);
4358 /* copy global metadata by default */
4359 if (!o->metadata_global_manual && nb_input_files)
4360 av_dict_copy(&oc->metadata, input_files[0]->ctx->metadata,
4361 AV_DICT_DONT_OVERWRITE);
4362 if (!o->metadata_streams_manual)
4363 for (i = output_files[nb_output_files - 1]->ost_index; i < nb_output_streams; i++) {
4365 if (output_streams[i]->source_index < 0) /* this is true e.g. for attached files */
4367 ist = input_streams[output_streams[i]->source_index];
4368 av_dict_copy(&output_streams[i]->st->metadata, ist->st->metadata, AV_DICT_DONT_OVERWRITE);
4371 /* process manually set metadata */
4372 for (i = 0; i < o->nb_metadata; i++) {
4375 const char *stream_spec;
4376 int index = 0, j, ret;
4378 val = strchr(o->metadata[i].u.str, '=');
4380 av_log(NULL, AV_LOG_FATAL, "No '=' character in metadata string %s.\n",
4381 o->metadata[i].u.str);
4386 parse_meta_type(o->metadata[i].specifier, &type, &index, &stream_spec);
4388 for (j = 0; j < oc->nb_streams; j++) {
4389 if ((ret = check_stream_specifier(oc, oc->streams[j], stream_spec)) > 0) {
4390 av_dict_set(&oc->streams[j]->metadata, o->metadata[i].u.str, *val ? val : NULL, 0);
4394 printf("ret %d, stream_spec %s\n", ret, stream_spec);
4402 if (index < 0 || index >= oc->nb_chapters) {
4403 av_log(NULL, AV_LOG_FATAL, "Invalid chapter index %d in metadata specifier.\n", index);
4406 m = &oc->chapters[index]->metadata;
4409 av_log(NULL, AV_LOG_FATAL, "Invalid metadata specifier %s.\n", o->metadata[i].specifier);
4412 av_dict_set(m, o->metadata[i].u.str, *val ? val : NULL, 0);
4419 /* same option as mencoder */
4420 static int opt_pass(const char *opt, const char *arg)
4422 do_pass = parse_number_or_die(opt, arg, OPT_INT, 1, 2);
4426 static int64_t getutime(void)
4429 struct rusage rusage;
4431 getrusage(RUSAGE_SELF, &rusage);
4432 return (rusage.ru_utime.tv_sec * 1000000LL) + rusage.ru_utime.tv_usec;
4433 #elif HAVE_GETPROCESSTIMES
4435 FILETIME c, e, k, u;
4436 proc = GetCurrentProcess();
4437 GetProcessTimes(proc, &c, &e, &k, &u);
4438 return ((int64_t) u.dwHighDateTime << 32 | u.dwLowDateTime) / 10;
4440 return av_gettime();
4444 static int64_t getmaxrss(void)
4446 #if HAVE_GETRUSAGE && HAVE_STRUCT_RUSAGE_RU_MAXRSS
4447 struct rusage rusage;
4448 getrusage(RUSAGE_SELF, &rusage);
4449 return (int64_t)rusage.ru_maxrss * 1024;
4450 #elif HAVE_GETPROCESSMEMORYINFO
4452 PROCESS_MEMORY_COUNTERS memcounters;
4453 proc = GetCurrentProcess();
4454 memcounters.cb = sizeof(memcounters);
4455 GetProcessMemoryInfo(proc, &memcounters, sizeof(memcounters));
4456 return memcounters.PeakPagefileUsage;
4462 static int opt_audio_qscale(OptionsContext *o, const char *opt, const char *arg)
4464 return parse_option(o, "q:a", arg, options);
4467 static void show_usage(void)
4469 printf("Hyper fast Audio and Video encoder\n");
4470 printf("usage: %s [options] [[infile options] -i infile]... {[outfile options] outfile}...\n", program_name);
4474 static void show_help(void)
4476 int flags = AV_OPT_FLAG_DECODING_PARAM | AV_OPT_FLAG_ENCODING_PARAM;
4477 av_log_set_callback(log_callback_help);
4479 show_help_options(options, "Main options:\n",
4480 OPT_EXPERT | OPT_AUDIO | OPT_VIDEO | OPT_SUBTITLE | OPT_GRAB, 0);
4481 show_help_options(options, "\nAdvanced options:\n",
4482 OPT_EXPERT | OPT_AUDIO | OPT_VIDEO | OPT_SUBTITLE | OPT_GRAB,
4484 show_help_options(options, "\nVideo options:\n",
4485 OPT_EXPERT | OPT_AUDIO | OPT_VIDEO | OPT_GRAB,
4487 show_help_options(options, "\nAdvanced Video options:\n",
4488 OPT_EXPERT | OPT_AUDIO | OPT_VIDEO | OPT_GRAB,
4489 OPT_VIDEO | OPT_EXPERT);
4490 show_help_options(options, "\nAudio options:\n",
4491 OPT_EXPERT | OPT_AUDIO | OPT_VIDEO | OPT_GRAB,
4493 show_help_options(options, "\nAdvanced Audio options:\n",
4494 OPT_EXPERT | OPT_AUDIO | OPT_VIDEO | OPT_GRAB,
4495 OPT_AUDIO | OPT_EXPERT);
4496 show_help_options(options, "\nSubtitle options:\n",
4497 OPT_SUBTITLE | OPT_GRAB,
4499 show_help_options(options, "\nAudio/Video grab options:\n",
4503 show_help_children(avcodec_get_class(), flags);
4504 show_help_children(avformat_get_class(), flags);
4505 show_help_children(sws_get_class(), flags);
4508 static int opt_target(OptionsContext *o, const char *opt, const char *arg)
4510 enum { PAL, NTSC, FILM, UNKNOWN } norm = UNKNOWN;
4511 static const char *const frame_rates[] = { "25", "30000/1001", "24000/1001" };
4513 if (!strncmp(arg, "pal-", 4)) {
4516 } else if (!strncmp(arg, "ntsc-", 5)) {
4519 } else if (!strncmp(arg, "film-", 5)) {
4523 /* Try to determine PAL/NTSC by peeking in the input files */
4524 if (nb_input_files) {
4526 for (j = 0; j < nb_input_files; j++) {
4527 for (i = 0; i < input_files[j]->nb_streams; i++) {
4528 AVCodecContext *c = input_files[j]->ctx->streams[i]->codec;
4529 if (c->codec_type != AVMEDIA_TYPE_VIDEO)
4531 fr = c->time_base.den * 1000 / c->time_base.num;
4535 } else if ((fr == 29970) || (fr == 23976)) {
4540 if (norm != UNKNOWN)
4544 if (norm != UNKNOWN)
4545 av_log(NULL, AV_LOG_INFO, "Assuming %s for target.\n", norm == PAL ? "PAL" : "NTSC");
4548 if (norm == UNKNOWN) {
4549 av_log(NULL, AV_LOG_FATAL, "Could not determine norm (PAL/NTSC/NTSC-Film) for target.\n");
4550 av_log(NULL, AV_LOG_FATAL, "Please prefix target with \"pal-\", \"ntsc-\" or \"film-\",\n");
4551 av_log(NULL, AV_LOG_FATAL, "or set a framerate with \"-r xxx\".\n");
4555 if (!strcmp(arg, "vcd")) {
4556 opt_video_codec(o, "c:v", "mpeg1video");
4557 opt_audio_codec(o, "c:a", "mp2");
4558 parse_option(o, "f", "vcd", options);
4560 parse_option(o, "s", norm == PAL ? "352x288" : "352x240", options);
4561 parse_option(o, "r", frame_rates[norm], options);
4562 opt_default("g", norm == PAL ? "15" : "18");
4564 opt_default("b", "1150000");
4565 opt_default("maxrate", "1150000");
4566 opt_default("minrate", "1150000");
4567 opt_default("bufsize", "327680"); // 40*1024*8;
4569 opt_default("b:a", "224000");
4570 parse_option(o, "ar", "44100", options);
4571 parse_option(o, "ac", "2", options);
4573 opt_default("packetsize", "2324");
4574 opt_default("muxrate", "1411200"); // 2352 * 75 * 8;
4576 /* We have to offset the PTS, so that it is consistent with the SCR.
4577 SCR starts at 36000, but the first two packs contain only padding
4578 and the first pack from the other stream, respectively, may also have
4579 been written before.
4580 So the real data starts at SCR 36000+3*1200. */
4581 o->mux_preload = (36000 + 3 * 1200) / 90000.0; // 0.44
4582 } else if (!strcmp(arg, "svcd")) {
4584 opt_video_codec(o, "c:v", "mpeg2video");
4585 opt_audio_codec(o, "c:a", "mp2");
4586 parse_option(o, "f", "svcd", options);
4588 parse_option(o, "s", norm == PAL ? "480x576" : "480x480", options);
4589 parse_option(o, "r", frame_rates[norm], options);
4590 opt_default("g", norm == PAL ? "15" : "18");
4592 opt_default("b", "2040000");
4593 opt_default("maxrate", "2516000");
4594 opt_default("minrate", "0"); // 1145000;
4595 opt_default("bufsize", "1835008"); // 224*1024*8;
4596 opt_default("flags", "+scan_offset");
4599 opt_default("b:a", "224000");
4600 parse_option(o, "ar", "44100", options);
4602 opt_default("packetsize", "2324");
4604 } else if (!strcmp(arg, "dvd")) {
4606 opt_video_codec(o, "c:v", "mpeg2video");
4607 opt_audio_codec(o, "c:a", "ac3");
4608 parse_option(o, "f", "dvd", options);
4610 parse_option(o, "s", norm == PAL ? "720x576" : "720x480", options);
4611 parse_option(o, "r", frame_rates[norm], options);
4612 opt_default("g", norm == PAL ? "15" : "18");
4614 opt_default("b", "6000000");
4615 opt_default("maxrate", "9000000");
4616 opt_default("minrate", "0"); // 1500000;
4617 opt_default("bufsize", "1835008"); // 224*1024*8;
4619 opt_default("packetsize", "2048"); // from www.mpucoder.com: DVD sectors contain 2048 bytes of data, this is also the size of one pack.
4620 opt_default("muxrate", "10080000"); // from mplex project: data_rate = 1260000. mux_rate = data_rate * 8
4622 opt_default("b:a", "448000");
4623 parse_option(o, "ar", "48000", options);
4625 } else if (!strncmp(arg, "dv", 2)) {
4627 parse_option(o, "f", "dv", options);
4629 parse_option(o, "s", norm == PAL ? "720x576" : "720x480", options);
4630 parse_option(o, "pix_fmt", !strncmp(arg, "dv50", 4) ? "yuv422p" :
4631 norm == PAL ? "yuv420p" : "yuv411p", options);
4632 parse_option(o, "r", frame_rates[norm], options);
4634 parse_option(o, "ar", "48000", options);
4635 parse_option(o, "ac", "2", options);
4638 av_log(NULL, AV_LOG_ERROR, "Unknown target: %s\n", arg);
4639 return AVERROR(EINVAL);
4644 static int opt_vstats_file(const char *opt, const char *arg)
4646 av_free (vstats_filename);
4647 vstats_filename = av_strdup (arg);
4651 static int opt_vstats(const char *opt, const char *arg)
4654 time_t today2 = time(NULL);
4655 struct tm *today = localtime(&today2);
4657 snprintf(filename, sizeof(filename), "vstats_%02d%02d%02d.log", today->tm_hour, today->tm_min,
4659 return opt_vstats_file(opt, filename);
4662 static int opt_video_frames(OptionsContext *o, const char *opt, const char *arg)
4664 return parse_option(o, "frames:v", arg, options);
4667 static int opt_audio_frames(OptionsContext *o, const char *opt, const char *arg)
4669 return parse_option(o, "frames:a", arg, options);
4672 static int opt_data_frames(OptionsContext *o, const char *opt, const char *arg)
4674 return parse_option(o, "frames:d", arg, options);
4677 static int opt_video_tag(OptionsContext *o, const char *opt, const char *arg)
4679 return parse_option(o, "tag:v", arg, options);
4682 static int opt_audio_tag(OptionsContext *o, const char *opt, const char *arg)
4684 return parse_option(o, "tag:a", arg, options);
4687 static int opt_subtitle_tag(OptionsContext *o, const char *opt, const char *arg)
4689 return parse_option(o, "tag:s", arg, options);
4692 static int opt_video_filters(OptionsContext *o, const char *opt, const char *arg)
4694 return parse_option(o, "filter:v", arg, options);
4697 static int opt_audio_filters(OptionsContext *o, const char *opt, const char *arg)
4699 return parse_option(o, "filter:a", arg, options);
4702 static int opt_vsync(const char *opt, const char *arg)
4704 if (!av_strcasecmp(arg, "cfr")) video_sync_method = VSYNC_CFR;
4705 else if (!av_strcasecmp(arg, "vfr")) video_sync_method = VSYNC_VFR;
4706 else if (!av_strcasecmp(arg, "passthrough")) video_sync_method = VSYNC_PASSTHROUGH;
4708 if (video_sync_method == VSYNC_AUTO)
4709 video_sync_method = parse_number_or_die("vsync", arg, OPT_INT, VSYNC_AUTO, VSYNC_VFR);
4713 static int opt_deinterlace(const char *opt, const char *arg)
4715 av_log(NULL, AV_LOG_WARNING, "-%s is deprecated, use -filter:v yadif instead\n", opt);
4720 static int opt_cpuflags(const char *opt, const char *arg)
4722 int flags = av_parse_cpu_flags(arg);
4727 av_set_cpu_flags_mask(flags);
4731 static void parse_cpuflags(int argc, char **argv, const OptionDef *options)
4733 int idx = locate_option(argc, argv, options, "cpuflags");
4734 if (idx && argv[idx + 1])
4735 opt_cpuflags("cpuflags", argv[idx + 1]);
4738 static int opt_channel_layout(OptionsContext *o, const char *opt, const char *arg)
4740 char layout_str[32];
4743 int ret, channels, ac_str_size;
4746 layout = av_get_channel_layout(arg);
4748 av_log(NULL, AV_LOG_ERROR, "Unknown channel layout: %s\n", arg);
4749 return AVERROR(EINVAL);
4751 snprintf(layout_str, sizeof(layout_str), "%"PRIu64, layout);
4752 ret = opt_default(opt, layout_str);
4756 /* set 'ac' option based on channel layout */
4757 channels = av_get_channel_layout_nb_channels(layout);
4758 snprintf(layout_str, sizeof(layout_str), "%d", channels);
4759 stream_str = strchr(opt, ':');
4760 ac_str_size = 3 + (stream_str ? strlen(stream_str) : 0);
4761 ac_str = av_mallocz(ac_str_size);
4763 return AVERROR(ENOMEM);
4764 av_strlcpy(ac_str, "ac", 3);
4766 av_strlcat(ac_str, stream_str, ac_str_size);
4767 ret = parse_option(o, ac_str, layout_str, options);
4773 static int opt_filter_complex(const char *opt, const char *arg)
4775 filtergraphs = grow_array(filtergraphs, sizeof(*filtergraphs),
4776 &nb_filtergraphs, nb_filtergraphs + 1);
4777 if (!(filtergraphs[nb_filtergraphs - 1] = av_mallocz(sizeof(*filtergraphs[0]))))
4778 return AVERROR(ENOMEM);
4779 filtergraphs[nb_filtergraphs - 1]->index = nb_filtergraphs - 1;
4780 filtergraphs[nb_filtergraphs - 1]->graph_desc = arg;
4784 #define OFFSET(x) offsetof(OptionsContext, x)
4785 static const OptionDef options[] = {
4787 #include "cmdutils_common_opts.h"
4788 { "f", HAS_ARG | OPT_STRING | OPT_OFFSET, {.off = OFFSET(format)}, "force format", "fmt" },
4789 { "i", HAS_ARG | OPT_FUNC2, {(void*)opt_input_file}, "input file name", "filename" },
4790 { "y", OPT_BOOL, {(void*)&file_overwrite}, "overwrite output files" },
4791 { "c", HAS_ARG | OPT_STRING | OPT_SPEC, {.off = OFFSET(codec_names)}, "codec name", "codec" },
4792 { "codec", HAS_ARG | OPT_STRING | OPT_SPEC, {.off = OFFSET(codec_names)}, "codec name", "codec" },
4793 { "pre", HAS_ARG | OPT_STRING | OPT_SPEC, {.off = OFFSET(presets)}, "preset name", "preset" },
4794 { "map", HAS_ARG | OPT_EXPERT | OPT_FUNC2, {(void*)opt_map}, "set input stream mapping", "[-]input_file_id[:stream_specifier][,sync_file_id[:stream_specifier]]" },
4795 { "map_metadata", HAS_ARG | OPT_STRING | OPT_SPEC, {.off = OFFSET(metadata_map)}, "set metadata information of outfile from infile",
4796 "outfile[,metadata]:infile[,metadata]" },
4797 { "map_chapters", OPT_INT | HAS_ARG | OPT_EXPERT | OPT_OFFSET, {.off = OFFSET(chapters_input_file)}, "set chapters mapping", "input_file_index" },
4798 { "t", HAS_ARG | OPT_TIME | OPT_OFFSET, {.off = OFFSET(recording_time)}, "record or transcode \"duration\" seconds of audio/video", "duration" },
4799 { "fs", HAS_ARG | OPT_INT64 | OPT_OFFSET, {.off = OFFSET(limit_filesize)}, "set the limit file size in bytes", "limit_size" }, //
4800 { "ss", HAS_ARG | OPT_TIME | OPT_OFFSET, {.off = OFFSET(start_time)}, "set the start time offset", "time_off" },
4801 { "itsoffset", HAS_ARG | OPT_TIME | OPT_OFFSET, {.off = OFFSET(input_ts_offset)}, "set the input ts offset", "time_off" },
4802 { "itsscale", HAS_ARG | OPT_DOUBLE | OPT_SPEC, {.off = OFFSET(ts_scale)}, "set the input ts scale", "scale" },
4803 { "metadata", HAS_ARG | OPT_STRING | OPT_SPEC, {.off = OFFSET(metadata)}, "add metadata", "string=string" },
4804 { "dframes", HAS_ARG | OPT_FUNC2, {(void*)opt_data_frames}, "set the number of data frames to record", "number" },
4805 { "benchmark", OPT_BOOL | OPT_EXPERT, {(void*)&do_benchmark},
4806 "add timings for benchmarking" },
4807 { "timelimit", HAS_ARG, {(void*)opt_timelimit}, "set max runtime in seconds", "limit" },
4808 { "dump", OPT_BOOL | OPT_EXPERT, {(void*)&do_pkt_dump},
4809 "dump each input packet" },
4810 { "hex", OPT_BOOL | OPT_EXPERT, {(void*)&do_hex_dump},
4811 "when dumping packets, also dump the payload" },
4812 { "re", OPT_BOOL | OPT_EXPERT | OPT_OFFSET, {.off = OFFSET(rate_emu)}, "read input at native frame rate", "" },
4813 { "target", HAS_ARG | OPT_FUNC2, {(void*)opt_target}, "specify target file type (\"vcd\", \"svcd\", \"dvd\", \"dv\", \"dv50\", \"pal-vcd\", \"ntsc-svcd\", ...)", "type" },
4814 { "vsync", HAS_ARG | OPT_EXPERT, {(void*)opt_vsync}, "video sync method", "" },
4815 { "async", HAS_ARG | OPT_INT | OPT_EXPERT, {(void*)&audio_sync_method}, "audio sync method", "" },
4816 { "adrift_threshold", HAS_ARG | OPT_FLOAT | OPT_EXPERT, {(void*)&audio_drift_threshold}, "audio drift threshold", "threshold" },
4817 { "copyts", OPT_BOOL | OPT_EXPERT, {(void*)©_ts}, "copy timestamps" },
4818 { "copytb", OPT_BOOL | OPT_EXPERT, {(void*)©_tb}, "copy input stream time base when stream copying" },
4819 { "shortest", OPT_BOOL | OPT_EXPERT, {(void*)&opt_shortest}, "finish encoding within shortest input" }, //
4820 { "dts_delta_threshold", HAS_ARG | OPT_FLOAT | OPT_EXPERT, {(void*)&dts_delta_threshold}, "timestamp discontinuity delta threshold", "threshold" },
4821 { "xerror", OPT_BOOL, {(void*)&exit_on_error}, "exit on error", "error" },
4822 { "copyinkf", OPT_BOOL | OPT_EXPERT | OPT_SPEC, {.off = OFFSET(copy_initial_nonkeyframes)}, "copy initial non-keyframes" },
4823 { "frames", OPT_INT64 | HAS_ARG | OPT_SPEC, {.off = OFFSET(max_frames)}, "set the number of frames to record", "number" },
4824 { "tag", OPT_STRING | HAS_ARG | OPT_SPEC, {.off = OFFSET(codec_tags)}, "force codec tag/fourcc", "fourcc/tag" },
4825 { "q", HAS_ARG | OPT_EXPERT | OPT_DOUBLE | OPT_SPEC, {.off = OFFSET(qscale)}, "use fixed quality scale (VBR)", "q" },
4826 { "qscale", HAS_ARG | OPT_EXPERT | OPT_DOUBLE | OPT_SPEC, {.off = OFFSET(qscale)}, "use fixed quality scale (VBR)", "q" },
4827 { "filter", HAS_ARG | OPT_STRING | OPT_SPEC, {.off = OFFSET(filters)}, "set stream filterchain", "filter_list" },
4828 { "filter_complex", HAS_ARG | OPT_EXPERT, {(void*)opt_filter_complex}, "create a complex filtergraph", "graph_description" },
4829 { "stats", OPT_BOOL, {&print_stats}, "print progress report during encoding", },
4830 { "attach", HAS_ARG | OPT_FUNC2, {(void*)opt_attach}, "add an attachment to the output file", "filename" },
4831 { "dump_attachment", HAS_ARG | OPT_STRING | OPT_SPEC, {.off = OFFSET(dump_attachment)}, "extract an attachment into a file", "filename" },
4832 { "cpuflags", HAS_ARG | OPT_EXPERT, {(void*)opt_cpuflags}, "set CPU flags mask", "mask" },
4835 { "vframes", HAS_ARG | OPT_VIDEO | OPT_FUNC2, {(void*)opt_video_frames}, "set the number of video frames to record", "number" },
4836 { "r", HAS_ARG | OPT_VIDEO | OPT_STRING | OPT_SPEC, {.off = OFFSET(frame_rates)}, "set frame rate (Hz value, fraction or abbreviation)", "rate" },
4837 { "s", HAS_ARG | OPT_VIDEO | OPT_STRING | OPT_SPEC, {.off = OFFSET(frame_sizes)}, "set frame size (WxH or abbreviation)", "size" },
4838 { "aspect", HAS_ARG | OPT_VIDEO | OPT_STRING | OPT_SPEC, {.off = OFFSET(frame_aspect_ratios)}, "set aspect ratio (4:3, 16:9 or 1.3333, 1.7777)", "aspect" },
4839 { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO | OPT_STRING | OPT_SPEC, {.off = OFFSET(frame_pix_fmts)}, "set pixel format", "format" },
4840 { "vn", OPT_BOOL | OPT_VIDEO | OPT_OFFSET, {.off = OFFSET(video_disable)}, "disable video" },
4841 { "vdt", OPT_INT | HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)&video_discard}, "discard threshold", "n" },
4842 { "rc_override", HAS_ARG | OPT_EXPERT | OPT_VIDEO | OPT_STRING | OPT_SPEC, {.off = OFFSET(rc_overrides)}, "rate control override for specific intervals", "override" },
4843 { "vcodec", HAS_ARG | OPT_VIDEO | OPT_FUNC2, {(void*)opt_video_codec}, "force video codec ('copy' to copy stream)", "codec" },
4844 { "same_quant", OPT_BOOL | OPT_VIDEO, {(void*)&same_quant},
4845 "use same quantizer as source (implies VBR)" },
4846 { "pass", HAS_ARG | OPT_VIDEO, {(void*)opt_pass}, "select the pass number (1 or 2)", "n" },
4847 { "passlogfile", HAS_ARG | OPT_STRING | OPT_VIDEO, {(void*)&pass_logfilename_prefix}, "select two pass log file name prefix", "prefix" },
4848 { "deinterlace", OPT_EXPERT | OPT_VIDEO, {(void*)opt_deinterlace},
4849 "this option is deprecated, use the yadif filter instead" },
4850 { "vstats", OPT_EXPERT | OPT_VIDEO, {(void*)&opt_vstats}, "dump video coding statistics to file" },
4851 { "vstats_file", HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)opt_vstats_file}, "dump video coding statistics to file", "file" },
4852 { "vf", HAS_ARG | OPT_VIDEO | OPT_FUNC2, {(void*)opt_video_filters}, "video filters", "filter list" },
4853 { "intra_matrix", HAS_ARG | OPT_EXPERT | OPT_VIDEO | OPT_STRING | OPT_SPEC, {.off = OFFSET(intra_matrices)}, "specify intra matrix coeffs", "matrix" },
4854 { "inter_matrix", HAS_ARG | OPT_EXPERT | OPT_VIDEO | OPT_STRING | OPT_SPEC, {.off = OFFSET(inter_matrices)}, "specify inter matrix coeffs", "matrix" },
4855 { "top", HAS_ARG | OPT_EXPERT | OPT_VIDEO | OPT_INT| OPT_SPEC, {.off = OFFSET(top_field_first)}, "top=1/bottom=0/auto=-1 field first", "" },
4856 { "dc", OPT_INT | HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)&intra_dc_precision}, "intra_dc_precision", "precision" },
4857 { "vtag", HAS_ARG | OPT_EXPERT | OPT_VIDEO | OPT_FUNC2, {(void*)opt_video_tag}, "force video tag/fourcc", "fourcc/tag" },
4858 { "qphist", OPT_BOOL | OPT_EXPERT | OPT_VIDEO, { (void *)&qp_hist }, "show QP histogram" },
4859 { "force_fps", OPT_BOOL | OPT_EXPERT | OPT_VIDEO | OPT_SPEC, {.off = OFFSET(force_fps)}, "force the selected framerate, disable the best supported framerate selection" },
4860 { "streamid", HAS_ARG | OPT_EXPERT | OPT_FUNC2, {(void*)opt_streamid}, "set the value of an outfile streamid", "streamIndex:value" },
4861 { "force_key_frames", OPT_STRING | HAS_ARG | OPT_EXPERT | OPT_VIDEO | OPT_SPEC, {.off = OFFSET(forced_key_frames)}, "force key frames at specified timestamps", "timestamps" },
4864 { "aframes", HAS_ARG | OPT_AUDIO | OPT_FUNC2, {(void*)opt_audio_frames}, "set the number of audio frames to record", "number" },
4865 { "aq", HAS_ARG | OPT_AUDIO | OPT_FUNC2, {(void*)opt_audio_qscale}, "set audio quality (codec-specific)", "quality", },
4866 { "ar", HAS_ARG | OPT_AUDIO | OPT_INT | OPT_SPEC, {.off = OFFSET(audio_sample_rate)}, "set audio sampling rate (in Hz)", "rate" },
4867 { "ac", HAS_ARG | OPT_AUDIO | OPT_INT | OPT_SPEC, {.off = OFFSET(audio_channels)}, "set number of audio channels", "channels" },
4868 { "an", OPT_BOOL | OPT_AUDIO | OPT_OFFSET, {.off = OFFSET(audio_disable)}, "disable audio" },
4869 { "acodec", HAS_ARG | OPT_AUDIO | OPT_FUNC2, {(void*)opt_audio_codec}, "force audio codec ('copy' to copy stream)", "codec" },
4870 { "atag", HAS_ARG | OPT_EXPERT | OPT_AUDIO | OPT_FUNC2, {(void*)opt_audio_tag}, "force audio tag/fourcc", "fourcc/tag" },
4871 { "vol", OPT_INT | HAS_ARG | OPT_AUDIO, {(void*)&audio_volume}, "change audio volume (256=normal)" , "volume" }, //
4872 { "sample_fmt", HAS_ARG | OPT_EXPERT | OPT_AUDIO | OPT_SPEC | OPT_STRING, {.off = OFFSET(sample_fmts)}, "set sample format", "format" },
4873 { "channel_layout", HAS_ARG | OPT_EXPERT | OPT_AUDIO | OPT_FUNC2, {(void*)opt_channel_layout}, "set channel layout", "layout" },
4874 { "af", HAS_ARG | OPT_AUDIO | OPT_FUNC2, {(void*)opt_audio_filters}, "audio filters", "filter list" },
4876 /* subtitle options */
4877 { "sn", OPT_BOOL | OPT_SUBTITLE | OPT_OFFSET, {.off = OFFSET(subtitle_disable)}, "disable subtitle" },
4878 { "scodec", HAS_ARG | OPT_SUBTITLE | OPT_FUNC2, {(void*)opt_subtitle_codec}, "force subtitle codec ('copy' to copy stream)", "codec" },
4879 { "stag", HAS_ARG | OPT_EXPERT | OPT_SUBTITLE | OPT_FUNC2, {(void*)opt_subtitle_tag}, "force subtitle tag/fourcc", "fourcc/tag" },
4882 { "isync", OPT_BOOL | OPT_EXPERT | OPT_GRAB, {(void*)&input_sync}, "sync read on input", "" },
4885 { "muxdelay", OPT_FLOAT | HAS_ARG | OPT_EXPERT | OPT_OFFSET, {.off = OFFSET(mux_max_delay)}, "set the maximum demux-decode delay", "seconds" },
4886 { "muxpreload", OPT_FLOAT | HAS_ARG | OPT_EXPERT | OPT_OFFSET, {.off = OFFSET(mux_preload)}, "set the initial demux-decode delay", "seconds" },
4888 { "bsf", HAS_ARG | OPT_STRING | OPT_SPEC, {.off = OFFSET(bitstream_filters)}, "A comma-separated list of bitstream filters", "bitstream_filters" },
4890 /* data codec support */
4891 { "dcodec", HAS_ARG | OPT_DATA | OPT_FUNC2, {(void*)opt_data_codec}, "force data codec ('copy' to copy stream)", "codec" },
4893 { "default", HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, {(void*)opt_default}, "generic catch all option", "" },
4897 int main(int argc, char **argv)
4899 OptionsContext o = { 0 };
4904 av_log_set_flags(AV_LOG_SKIP_REPEATED);
4905 parse_loglevel(argc, argv, options);
4907 avcodec_register_all();
4909 avdevice_register_all();
4911 avfilter_register_all();
4913 avformat_network_init();
4917 parse_cpuflags(argc, argv, options);
4920 parse_options(&o, argc, argv, options, opt_output_file);
4922 if (nb_output_files <= 0 && nb_input_files == 0) {
4924 av_log(NULL, AV_LOG_WARNING, "Use -h to get full help or, even better, run 'man %s'\n", program_name);
4928 /* file converter / grab */
4929 if (nb_output_files <= 0) {
4930 fprintf(stderr, "At least one output file must be specified\n");
4934 if (nb_input_files == 0) {
4935 av_log(NULL, AV_LOG_FATAL, "At least one input file must be specified\n");
4940 if (transcode() < 0)
4942 ti = getutime() - ti;
4944 int maxrss = getmaxrss() / 1024;
4945 printf("bench: utime=%0.3fs maxrss=%ikB\n", ti / 1000000.0, maxrss);