3 * Copyright (c) 2000-2011 The libav developers.
5 * This file is part of Libav.
7 * Libav is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
12 * Libav is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with Libav; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
31 #include "libavformat/avformat.h"
32 #include "libavdevice/avdevice.h"
33 #include "libswscale/swscale.h"
34 #include "libavresample/avresample.h"
35 #include "libavutil/opt.h"
36 #include "libavutil/audioconvert.h"
37 #include "libavutil/parseutils.h"
38 #include "libavutil/samplefmt.h"
39 #include "libavutil/colorspace.h"
40 #include "libavutil/fifo.h"
41 #include "libavutil/intreadwrite.h"
42 #include "libavutil/dict.h"
43 #include "libavutil/mathematics.h"
44 #include "libavutil/pixdesc.h"
45 #include "libavutil/avstring.h"
46 #include "libavutil/libm.h"
47 #include "libavutil/imgutils.h"
48 #include "libavformat/os_support.h"
50 # include "libavfilter/avfilter.h"
51 # include "libavfilter/avfiltergraph.h"
52 # include "libavfilter/buffersrc.h"
53 # include "libavfilter/buffersink.h"
54 # include "libavfilter/vsrc_buffer.h"
56 #if HAVE_SYS_RESOURCE_H
57 #include <sys/types.h>
59 #include <sys/resource.h>
60 #elif HAVE_GETPROCESSTIMES
63 #if HAVE_GETPROCESSMEMORYINFO
69 #include <sys/select.h>
76 #include "libavutil/avassert.h"
79 #define VSYNC_PASSTHROUGH 0
83 const char program_name[] = "avconv";
84 const int program_birth_year = 2000;
86 /* select an input stream for an output stream */
87 typedef struct StreamMap {
88 int disabled; /** 1 is this mapping is disabled by a negative map */
92 int sync_stream_index;
93 char *linklabel; /** name of an output link, for mapping lavfi outputs */
97 * select an input file for an output file
99 typedef struct MetadataMap {
100 int file; ///< file index
101 char type; ///< type of metadata to copy -- (g)lobal, (s)tream, (c)hapter or (p)rogram
102 int index; ///< stream/chapter/program number
105 static const OptionDef options[];
107 static int video_discard = 0;
108 static int same_quant = 0;
109 static int do_deinterlace = 0;
110 static int intra_dc_precision = 8;
111 static int qp_hist = 0;
113 static int file_overwrite = 0;
114 static int do_benchmark = 0;
115 static int do_hex_dump = 0;
116 static int do_pkt_dump = 0;
117 static int do_pass = 0;
118 static char *pass_logfilename_prefix = NULL;
119 static int video_sync_method = VSYNC_AUTO;
120 static int audio_sync_method = 0;
121 static float audio_drift_threshold = 0.1;
122 static int copy_ts = 0;
123 static int copy_tb = 1;
124 static int opt_shortest = 0;
125 static char *vstats_filename;
126 static FILE *vstats_file;
128 static int audio_volume = 256;
130 static int exit_on_error = 0;
131 static int using_stdin = 0;
132 static int64_t video_size = 0;
133 static int64_t audio_size = 0;
134 static int64_t extra_size = 0;
135 static int nb_frames_dup = 0;
136 static int nb_frames_drop = 0;
137 static int input_sync;
139 static float dts_delta_threshold = 10;
141 static int print_stats = 1;
143 static uint8_t *audio_buf;
144 static unsigned int allocated_audio_buf_size;
145 static uint8_t *async_buf;
146 static unsigned int allocated_async_buf_size;
148 #define DEFAULT_PASS_LOGFILENAME_PREFIX "av2pass"
150 typedef struct InputFilter {
151 AVFilterContext *filter;
152 struct InputStream *ist;
153 struct FilterGraph *graph;
156 typedef struct OutputFilter {
157 AVFilterContext *filter;
158 struct OutputStream *ost;
159 struct FilterGraph *graph;
161 /* temporary storage until stream maps are processed */
162 AVFilterInOut *out_tmp;
165 typedef struct FilterGraph {
167 const char *graph_desc;
169 AVFilterGraph *graph;
171 InputFilter **inputs;
173 OutputFilter **outputs;
177 typedef struct FrameBuffer {
183 enum PixelFormat pix_fmt;
186 struct InputStream *ist;
187 struct FrameBuffer *next;
190 typedef struct InputStream {
193 int discard; /* true if stream data should be discarded */
194 int decoding_needed; /* true if the packets must be decoded in 'raw_fifo' */
196 AVFrame *decoded_frame;
198 int64_t start; /* time when read started */
199 /* predicted dts of the next packet read for this stream or (when there are
200 * several frames in a packet) of the next frame in current packet */
202 /* dts of the last packet read for this stream */
204 PtsCorrectionContext pts_ctx;
206 int is_start; /* is 1 at the start and after a discontinuity */
207 int showed_multi_packet_warning;
212 int resample_pix_fmt;
214 /* a pool of free buffers for decoded data */
215 FrameBuffer *buffer_pool;
217 /* decoded data from this stream goes into all those filters
218 * currently video only */
219 InputFilter **filters;
223 typedef struct InputFile {
224 AVFormatContext *ctx;
225 int eof_reached; /* true if eof reached */
226 int ist_index; /* index of first stream in ist_table */
227 int buffer_size; /* current total buffer size */
229 int nb_streams; /* number of stream that avconv is aware of; may be different
230 from ctx.nb_streams if new streams appear during av_read_frame() */
234 typedef struct OutputStream {
235 int file_index; /* file index */
236 int index; /* stream index in the output file */
237 int source_index; /* InputStream index */
238 AVStream *st; /* stream in the output file */
239 int encoding_needed; /* true if encoding needed for this stream */
241 /* input pts and corresponding output pts
243 // double sync_ipts; /* dts from the AVPacket of the demuxer in second units */
244 struct InputStream *sync_ist; /* input stream to sync against */
245 int64_t sync_opts; /* output frame counter, could be changed to some true timestamp */ // FIXME look at frame_number
246 /* pts of the first frame encoded for this stream, used for limiting
249 AVBitStreamFilterContext *bitstream_filters;
252 AVFrame *output_frame;
253 AVFrame *filtered_frame;
256 AVRational frame_rate;
260 float frame_aspect_ratio;
263 /* forced key frames */
264 int64_t *forced_kf_pts;
270 AVAudioResampleContext *avr;
271 int resample_sample_fmt;
272 int resample_channels;
273 uint64_t resample_channel_layout;
274 int resample_sample_rate;
275 AVFifoBuffer *fifo; /* for compression: one audio fifo per codec */
278 OutputFilter *filter;
283 int is_past_recording_time;
285 const char *attachment_filename;
286 int copy_initial_nonkeyframes;
288 enum PixelFormat pix_fmts[2];
292 typedef struct OutputFile {
293 AVFormatContext *ctx;
295 int ost_index; /* index of the first stream in output_streams */
296 int64_t recording_time; /* desired length of the resulting file in microseconds */
297 int64_t start_time; /* start time in microseconds */
298 uint64_t limit_filesize;
301 static InputStream **input_streams = NULL;
302 static int nb_input_streams = 0;
303 static InputFile **input_files = NULL;
304 static int nb_input_files = 0;
306 static OutputStream **output_streams = NULL;
307 static int nb_output_streams = 0;
308 static OutputFile **output_files = NULL;
309 static int nb_output_files = 0;
311 static FilterGraph **filtergraphs;
314 typedef struct OptionsContext {
315 /* input/output options */
319 SpecifierOpt *codec_names;
321 SpecifierOpt *audio_channels;
322 int nb_audio_channels;
323 SpecifierOpt *audio_sample_rate;
324 int nb_audio_sample_rate;
325 SpecifierOpt *frame_rates;
327 SpecifierOpt *frame_sizes;
329 SpecifierOpt *frame_pix_fmts;
330 int nb_frame_pix_fmts;
333 int64_t input_ts_offset;
336 SpecifierOpt *ts_scale;
338 SpecifierOpt *dump_attachment;
339 int nb_dump_attachment;
342 StreamMap *stream_maps;
344 /* first item specifies output metadata, second is input */
345 MetadataMap (*meta_data_maps)[2];
346 int nb_meta_data_maps;
347 int metadata_global_manual;
348 int metadata_streams_manual;
349 int metadata_chapters_manual;
350 const char **attachments;
353 int chapters_input_file;
355 int64_t recording_time;
356 uint64_t limit_filesize;
362 int subtitle_disable;
365 /* indexed by output file stream index */
369 SpecifierOpt *metadata;
371 SpecifierOpt *max_frames;
373 SpecifierOpt *bitstream_filters;
374 int nb_bitstream_filters;
375 SpecifierOpt *codec_tags;
377 SpecifierOpt *sample_fmts;
379 SpecifierOpt *qscale;
381 SpecifierOpt *forced_key_frames;
382 int nb_forced_key_frames;
383 SpecifierOpt *force_fps;
385 SpecifierOpt *frame_aspect_ratios;
386 int nb_frame_aspect_ratios;
387 SpecifierOpt *rc_overrides;
389 SpecifierOpt *intra_matrices;
390 int nb_intra_matrices;
391 SpecifierOpt *inter_matrices;
392 int nb_inter_matrices;
393 SpecifierOpt *top_field_first;
394 int nb_top_field_first;
395 SpecifierOpt *metadata_map;
397 SpecifierOpt *presets;
399 SpecifierOpt *copy_initial_nonkeyframes;
400 int nb_copy_initial_nonkeyframes;
401 SpecifierOpt *filters;
405 #define MATCH_PER_STREAM_OPT(name, type, outvar, fmtctx, st)\
408 for (i = 0; i < o->nb_ ## name; i++) {\
409 char *spec = o->name[i].specifier;\
410 if ((ret = check_stream_specifier(fmtctx, st, spec)) > 0)\
411 outvar = o->name[i].u.type;\
417 static void reset_options(OptionsContext *o)
419 const OptionDef *po = options;
422 /* all OPT_SPEC and OPT_STRING can be freed in generic way */
424 void *dst = (uint8_t*)o + po->u.off;
426 if (po->flags & OPT_SPEC) {
427 SpecifierOpt **so = dst;
428 int i, *count = (int*)(so + 1);
429 for (i = 0; i < *count; i++) {
430 av_freep(&(*so)[i].specifier);
431 if (po->flags & OPT_STRING)
432 av_freep(&(*so)[i].u.str);
436 } else if (po->flags & OPT_OFFSET && po->flags & OPT_STRING)
441 for (i = 0; i < o->nb_stream_maps; i++)
442 av_freep(&o->stream_maps[i].linklabel);
443 av_freep(&o->stream_maps);
444 av_freep(&o->meta_data_maps);
445 av_freep(&o->streamid_map);
447 memset(o, 0, sizeof(*o));
449 o->mux_max_delay = 0.7;
450 o->recording_time = INT64_MAX;
451 o->limit_filesize = UINT64_MAX;
452 o->chapters_input_file = INT_MAX;
458 static int alloc_buffer(InputStream *ist, AVCodecContext *s, FrameBuffer **pbuf)
460 FrameBuffer *buf = av_mallocz(sizeof(*buf));
462 const int pixel_size = av_pix_fmt_descriptors[s->pix_fmt].comp[0].step_minus1+1;
463 int h_chroma_shift, v_chroma_shift;
464 int edge = 32; // XXX should be avcodec_get_edge_width(), but that fails on svq1
465 int w = s->width, h = s->height;
468 return AVERROR(ENOMEM);
470 if (!(s->flags & CODEC_FLAG_EMU_EDGE)) {
475 avcodec_align_dimensions(s, &w, &h);
476 if ((ret = av_image_alloc(buf->base, buf->linesize, w, h,
477 s->pix_fmt, 32)) < 0) {
481 /* XXX this shouldn't be needed, but some tests break without this line
482 * those decoders are buggy and need to be fixed.
483 * the following tests fail:
484 * cdgraphics, ansi, aasc, fraps-v1, qtrle-1bit
486 memset(buf->base[0], 128, ret);
488 avcodec_get_chroma_sub_sample(s->pix_fmt, &h_chroma_shift, &v_chroma_shift);
489 for (i = 0; i < FF_ARRAY_ELEMS(buf->data); i++) {
490 const int h_shift = i==0 ? 0 : h_chroma_shift;
491 const int v_shift = i==0 ? 0 : v_chroma_shift;
492 if (s->flags & CODEC_FLAG_EMU_EDGE)
493 buf->data[i] = buf->base[i];
495 buf->data[i] = buf->base[i] +
496 FFALIGN((buf->linesize[i]*edge >> v_shift) +
497 (pixel_size*edge >> h_shift), 32);
501 buf->pix_fmt = s->pix_fmt;
508 static void free_buffer_pool(InputStream *ist)
510 FrameBuffer *buf = ist->buffer_pool;
512 ist->buffer_pool = buf->next;
513 av_freep(&buf->base[0]);
515 buf = ist->buffer_pool;
519 static void unref_buffer(InputStream *ist, FrameBuffer *buf)
521 av_assert0(buf->refcount);
523 if (!buf->refcount) {
524 buf->next = ist->buffer_pool;
525 ist->buffer_pool = buf;
529 static int codec_get_buffer(AVCodecContext *s, AVFrame *frame)
531 InputStream *ist = s->opaque;
535 if (!ist->buffer_pool && (ret = alloc_buffer(ist, s, &ist->buffer_pool)) < 0)
538 buf = ist->buffer_pool;
539 ist->buffer_pool = buf->next;
541 if (buf->w != s->width || buf->h != s->height || buf->pix_fmt != s->pix_fmt) {
542 av_freep(&buf->base[0]);
544 if ((ret = alloc_buffer(ist, s, &buf)) < 0)
550 frame->type = FF_BUFFER_TYPE_USER;
551 frame->extended_data = frame->data;
552 frame->pkt_pts = s->pkt ? s->pkt->pts : AV_NOPTS_VALUE;
553 frame->width = buf->w;
554 frame->height = buf->h;
555 frame->format = buf->pix_fmt;
556 frame->sample_aspect_ratio = s->sample_aspect_ratio;
558 for (i = 0; i < FF_ARRAY_ELEMS(buf->data); i++) {
559 frame->base[i] = buf->base[i]; // XXX h264.c uses base though it shouldn't
560 frame->data[i] = buf->data[i];
561 frame->linesize[i] = buf->linesize[i];
567 static void codec_release_buffer(AVCodecContext *s, AVFrame *frame)
569 InputStream *ist = s->opaque;
570 FrameBuffer *buf = frame->opaque;
573 for (i = 0; i < FF_ARRAY_ELEMS(frame->data); i++)
574 frame->data[i] = NULL;
576 unref_buffer(ist, buf);
579 static void filter_release_buffer(AVFilterBuffer *fb)
581 FrameBuffer *buf = fb->priv;
583 unref_buffer(buf->ist, buf);
586 static char *choose_pixel_fmts(OutputStream *ost)
588 if (ost->st->codec->pix_fmt != PIX_FMT_NONE) {
589 return av_strdup(av_get_pix_fmt_name(ost->st->codec->pix_fmt));
590 } else if (ost->enc->pix_fmts) {
591 const enum PixelFormat *p;
592 AVIOContext *s = NULL;
596 if (avio_open_dyn_buf(&s) < 0)
599 for (p = ost->enc->pix_fmts; *p != PIX_FMT_NONE; p++)
600 avio_printf(s, "%s:", av_get_pix_fmt_name(*p));
601 len = avio_close_dyn_buf(s, &ret);
608 static int configure_video_filters(FilterGraph *fg)
610 InputStream *ist = fg->inputs[0]->ist;
611 OutputStream *ost = fg->outputs[0]->ost;
612 AVFilterContext *in_filter, *out_filter, *filter;
613 AVCodecContext *codec = ost->st->codec;
615 AVRational sample_aspect_ratio;
619 avfilter_graph_free(&fg->graph);
620 fg->graph = avfilter_graph_alloc();
622 if (ist->st->sample_aspect_ratio.num) {
623 sample_aspect_ratio = ist->st->sample_aspect_ratio;
625 sample_aspect_ratio = ist->st->codec->sample_aspect_ratio;
627 snprintf(args, 255, "%d:%d:%d:%d:%d:%d:%d", ist->st->codec->width,
628 ist->st->codec->height, ist->st->codec->pix_fmt, 1, AV_TIME_BASE,
629 sample_aspect_ratio.num, sample_aspect_ratio.den);
631 ret = avfilter_graph_create_filter(&fg->inputs[0]->filter,
632 avfilter_get_by_name("buffer"),
633 "src", args, NULL, fg->graph);
636 ret = avfilter_graph_create_filter(&fg->outputs[0]->filter,
637 avfilter_get_by_name("buffersink"),
638 "out", NULL, NULL, fg->graph);
641 in_filter = fg->inputs[0]->filter;
642 out_filter = fg->outputs[0]->filter;
644 if (codec->width || codec->height) {
645 snprintf(args, 255, "%d:%d:flags=0x%X",
648 (unsigned)ost->sws_flags);
649 if ((ret = avfilter_graph_create_filter(&filter, avfilter_get_by_name("scale"),
650 NULL, args, NULL, fg->graph)) < 0)
652 if ((ret = avfilter_link(in_filter, 0, filter, 0)) < 0)
657 if ((pix_fmts = choose_pixel_fmts(ost))) {
658 if ((ret = avfilter_graph_create_filter(&filter,
659 avfilter_get_by_name("format"),
660 "format", pix_fmts, NULL,
663 if ((ret = avfilter_link(filter, 0, out_filter, 0)) < 0)
670 snprintf(args, sizeof(args), "flags=0x%X", (unsigned)ost->sws_flags);
671 fg->graph->scale_sws_opts = av_strdup(args);
674 AVFilterInOut *outputs = avfilter_inout_alloc();
675 AVFilterInOut *inputs = avfilter_inout_alloc();
677 outputs->name = av_strdup("in");
678 outputs->filter_ctx = in_filter;
679 outputs->pad_idx = 0;
680 outputs->next = NULL;
682 inputs->name = av_strdup("out");
683 inputs->filter_ctx = out_filter;
687 if ((ret = avfilter_graph_parse(fg->graph, ost->avfilter, inputs, outputs, NULL)) < 0)
690 if ((ret = avfilter_link(in_filter, 0, out_filter, 0)) < 0)
694 if ((ret = avfilter_graph_config(fg->graph, NULL)) < 0)
697 ost->filter = fg->outputs[0];
702 static FilterGraph *init_simple_filtergraph(InputStream *ist, OutputStream *ost)
704 FilterGraph *fg = av_mallocz(sizeof(*fg));
708 fg->index = nb_filtergraphs;
710 fg->outputs = grow_array(fg->outputs, sizeof(*fg->outputs), &fg->nb_outputs,
712 if (!(fg->outputs[0] = av_mallocz(sizeof(*fg->outputs[0]))))
714 fg->outputs[0]->ost = ost;
715 fg->outputs[0]->graph = fg;
717 fg->inputs = grow_array(fg->inputs, sizeof(*fg->inputs), &fg->nb_inputs,
719 if (!(fg->inputs[0] = av_mallocz(sizeof(*fg->inputs[0]))))
721 fg->inputs[0]->ist = ist;
722 fg->inputs[0]->graph = fg;
724 ist->filters = grow_array(ist->filters, sizeof(*ist->filters),
725 &ist->nb_filters, ist->nb_filters + 1);
726 ist->filters[ist->nb_filters - 1] = fg->inputs[0];
728 filtergraphs = grow_array(filtergraphs, sizeof(*filtergraphs),
729 &nb_filtergraphs, nb_filtergraphs + 1);
730 filtergraphs[nb_filtergraphs - 1] = fg;
735 static void init_input_filter(FilterGraph *fg, AVFilterInOut *in)
738 enum AVMediaType type = in->filter_ctx->input_pads[in->pad_idx].type;
741 // TODO: support other filter types
742 if (type != AVMEDIA_TYPE_VIDEO) {
743 av_log(NULL, AV_LOG_FATAL, "Only video filters supported currently.\n");
751 int file_idx = strtol(in->name, &p, 0);
753 if (file_idx < 0 || file_idx >= nb_input_files) {
754 av_log(NULL, AV_LOG_FATAL, "Invalid file index %d in filtegraph description %s.\n",
755 file_idx, fg->graph_desc);
758 s = input_files[file_idx]->ctx;
760 for (i = 0; i < s->nb_streams; i++) {
761 if (s->streams[i]->codec->codec_type != type)
763 if (check_stream_specifier(s, s->streams[i], *p == ':' ? p + 1 : p) == 1) {
769 av_log(NULL, AV_LOG_FATAL, "Stream specifier '%s' in filtergraph description %s "
770 "matches no streams.\n", p, fg->graph_desc);
773 ist = input_streams[input_files[file_idx]->ist_index + st->index];
775 /* find the first unused stream of corresponding type */
776 for (i = 0; i < nb_input_streams; i++) {
777 ist = input_streams[i];
778 if (ist->st->codec->codec_type == type && ist->discard)
781 if (i == nb_input_streams) {
782 av_log(NULL, AV_LOG_FATAL, "Cannot find a matching stream for "
783 "unlabeled input pad %d on filter %s", in->pad_idx,
784 in->filter_ctx->name);
789 ist->decoding_needed = 1;
790 ist->st->discard = AVDISCARD_NONE;
792 fg->inputs = grow_array(fg->inputs, sizeof(*fg->inputs),
793 &fg->nb_inputs, fg->nb_inputs + 1);
794 if (!(fg->inputs[fg->nb_inputs - 1] = av_mallocz(sizeof(*fg->inputs[0]))))
796 fg->inputs[fg->nb_inputs - 1]->ist = ist;
797 fg->inputs[fg->nb_inputs - 1]->graph = fg;
799 ist->filters = grow_array(ist->filters, sizeof(*ist->filters),
800 &ist->nb_filters, ist->nb_filters + 1);
801 ist->filters[ist->nb_filters - 1] = fg->inputs[fg->nb_inputs - 1];
804 static int configure_output_filter(FilterGraph *fg, OutputFilter *ofilter, AVFilterInOut *out)
807 AVCodecContext *codec = ofilter->ost->st->codec;
808 AVFilterContext *last_filter = out->filter_ctx;
809 int pad_idx = out->pad_idx;
813 ret = avfilter_graph_create_filter(&ofilter->filter,
814 avfilter_get_by_name("buffersink"),
815 "out", NULL, pix_fmts, fg->graph);
819 if (codec->width || codec->height) {
821 AVFilterContext *filter;
823 snprintf(args, sizeof(args), "%d:%d:flags=0x%X",
826 (unsigned)ofilter->ost->sws_flags);
827 if ((ret = avfilter_graph_create_filter(&filter, avfilter_get_by_name("scale"),
828 NULL, args, NULL, fg->graph)) < 0)
830 if ((ret = avfilter_link(last_filter, pad_idx, filter, 0)) < 0)
833 last_filter = filter;
837 if ((pix_fmts = choose_pixel_fmts(ofilter->ost))) {
838 AVFilterContext *filter;
839 if ((ret = avfilter_graph_create_filter(&filter,
840 avfilter_get_by_name("format"),
841 "format", pix_fmts, NULL,
844 if ((ret = avfilter_link(last_filter, pad_idx, filter, 0)) < 0)
847 last_filter = filter;
852 if ((ret = avfilter_link(last_filter, pad_idx, ofilter->filter, 0)) < 0)
858 static int configure_complex_filter(FilterGraph *fg)
860 AVFilterInOut *inputs, *outputs, *cur;
861 int ret, i, init = !fg->graph;
863 avfilter_graph_free(&fg->graph);
864 if (!(fg->graph = avfilter_graph_alloc()))
865 return AVERROR(ENOMEM);
867 if ((ret = avfilter_graph_parse2(fg->graph, fg->graph_desc, &inputs, &outputs)) < 0)
870 for (cur = inputs; init && cur; cur = cur->next)
871 init_input_filter(fg, cur);
873 for (cur = inputs, i = 0; cur; cur = cur->next, i++) {
874 InputFilter *ifilter = fg->inputs[i];
875 InputStream *ist = ifilter->ist;
879 sar = ist->st->sample_aspect_ratio.num ? ist->st->sample_aspect_ratio :
880 ist->st->codec->sample_aspect_ratio;
881 snprintf(args, sizeof(args), "%d:%d:%d:%d:%d:%d:%d", ist->st->codec->width,
882 ist->st->codec->height, ist->st->codec->pix_fmt, 1, AV_TIME_BASE,
885 if ((ret = avfilter_graph_create_filter(&ifilter->filter,
886 avfilter_get_by_name("buffer"), cur->name,
887 args, NULL, fg->graph)) < 0)
889 if ((ret = avfilter_link(ifilter->filter, 0,
890 cur->filter_ctx, cur->pad_idx)) < 0)
893 avfilter_inout_free(&inputs);
896 /* we already know the mappings between lavfi outputs and output streams,
897 * so we can finish the setup */
898 for (cur = outputs, i = 0; cur; cur = cur->next, i++)
899 configure_output_filter(fg, fg->outputs[i], cur);
900 avfilter_inout_free(&outputs);
902 if ((ret = avfilter_graph_config(fg->graph, NULL)) < 0)
905 /* wait until output mappings are processed */
906 for (cur = outputs; cur;) {
907 fg->outputs = grow_array(fg->outputs, sizeof(*fg->outputs),
908 &fg->nb_outputs, fg->nb_outputs + 1);
909 if (!(fg->outputs[fg->nb_outputs - 1] = av_mallocz(sizeof(*fg->outputs[0]))))
911 fg->outputs[fg->nb_outputs - 1]->graph = fg;
912 fg->outputs[fg->nb_outputs - 1]->out_tmp = cur;
914 fg->outputs[fg->nb_outputs - 1]->out_tmp->next = NULL;
921 static int configure_complex_filters(void)
925 for (i = 0; i < nb_filtergraphs; i++)
926 if (!filtergraphs[i]->graph &&
927 (ret = configure_complex_filter(filtergraphs[i])) < 0)
932 static int configure_filtergraph(FilterGraph *fg)
934 return fg->graph_desc ? configure_complex_filter(fg) : configure_video_filters(fg);
937 static int ist_in_filtergraph(FilterGraph *fg, InputStream *ist)
940 for (i = 0; i < fg->nb_inputs; i++)
941 if (fg->inputs[i]->ist == ist)
946 static void term_exit(void)
948 av_log(NULL, AV_LOG_QUIET, "");
951 static volatile int received_sigterm = 0;
952 static volatile int received_nb_signals = 0;
955 sigterm_handler(int sig)
957 received_sigterm = sig;
958 received_nb_signals++;
962 static void term_init(void)
964 signal(SIGINT , sigterm_handler); /* Interrupt (ANSI). */
965 signal(SIGTERM, sigterm_handler); /* Termination (ANSI). */
967 signal(SIGXCPU, sigterm_handler);
971 static int decode_interrupt_cb(void *ctx)
973 return received_nb_signals > 1;
976 static const AVIOInterruptCB int_cb = { decode_interrupt_cb, NULL };
978 void exit_program(int ret)
982 for (i = 0; i < nb_filtergraphs; i++) {
983 avfilter_graph_free(&filtergraphs[i]->graph);
984 for (j = 0; j < filtergraphs[i]->nb_inputs; j++)
985 av_freep(&filtergraphs[i]->inputs[j]);
986 av_freep(&filtergraphs[i]->inputs);
987 for (j = 0; j < filtergraphs[i]->nb_outputs; j++)
988 av_freep(&filtergraphs[i]->outputs[j]);
989 av_freep(&filtergraphs[i]->outputs);
990 av_freep(&filtergraphs[i]);
992 av_freep(&filtergraphs);
995 for (i = 0; i < nb_output_files; i++) {
996 AVFormatContext *s = output_files[i]->ctx;
997 if (!(s->oformat->flags & AVFMT_NOFILE) && s->pb)
999 avformat_free_context(s);
1000 av_dict_free(&output_files[i]->opts);
1001 av_freep(&output_files[i]);
1003 for (i = 0; i < nb_output_streams; i++) {
1004 AVBitStreamFilterContext *bsfc = output_streams[i]->bitstream_filters;
1006 AVBitStreamFilterContext *next = bsfc->next;
1007 av_bitstream_filter_close(bsfc);
1010 output_streams[i]->bitstream_filters = NULL;
1012 if (output_streams[i]->output_frame) {
1013 AVFrame *frame = output_streams[i]->output_frame;
1014 if (frame->extended_data != frame->data)
1015 av_freep(&frame->extended_data);
1019 av_freep(&output_streams[i]->avfilter);
1020 av_freep(&output_streams[i]->filtered_frame);
1021 av_freep(&output_streams[i]);
1023 for (i = 0; i < nb_input_files; i++) {
1024 avformat_close_input(&input_files[i]->ctx);
1025 av_freep(&input_files[i]);
1027 for (i = 0; i < nb_input_streams; i++) {
1028 av_freep(&input_streams[i]->decoded_frame);
1029 av_dict_free(&input_streams[i]->opts);
1030 free_buffer_pool(input_streams[i]);
1031 av_freep(&input_streams[i]->filters);
1032 av_freep(&input_streams[i]);
1036 fclose(vstats_file);
1037 av_free(vstats_filename);
1039 av_freep(&input_streams);
1040 av_freep(&input_files);
1041 av_freep(&output_streams);
1042 av_freep(&output_files);
1046 allocated_audio_buf_size = 0;
1048 allocated_async_buf_size = 0;
1051 avformat_network_deinit();
1053 if (received_sigterm) {
1054 av_log(NULL, AV_LOG_INFO, "Received signal %d: terminating.\n",
1055 (int) received_sigterm);
1062 static void assert_avoptions(AVDictionary *m)
1064 AVDictionaryEntry *t;
1065 if ((t = av_dict_get(m, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
1066 av_log(NULL, AV_LOG_FATAL, "Option %s not found.\n", t->key);
1071 static void assert_codec_experimental(AVCodecContext *c, int encoder)
1073 const char *codec_string = encoder ? "encoder" : "decoder";
1075 if (c->codec->capabilities & CODEC_CAP_EXPERIMENTAL &&
1076 c->strict_std_compliance > FF_COMPLIANCE_EXPERIMENTAL) {
1077 av_log(NULL, AV_LOG_FATAL, "%s '%s' is experimental and might produce bad "
1078 "results.\nAdd '-strict experimental' if you want to use it.\n",
1079 codec_string, c->codec->name);
1080 codec = encoder ? avcodec_find_encoder(c->codec->id) : avcodec_find_decoder(c->codec->id);
1081 if (!(codec->capabilities & CODEC_CAP_EXPERIMENTAL))
1082 av_log(NULL, AV_LOG_FATAL, "Or use the non experimental %s '%s'.\n",
1083 codec_string, codec->name);
1088 static void choose_sample_fmt(AVStream *st, AVCodec *codec)
1090 if (codec && codec->sample_fmts) {
1091 const enum AVSampleFormat *p = codec->sample_fmts;
1092 for (; *p != -1; p++) {
1093 if (*p == st->codec->sample_fmt)
1097 av_log(NULL, AV_LOG_WARNING,
1098 "Incompatible sample format '%s' for codec '%s', auto-selecting format '%s'\n",
1099 av_get_sample_fmt_name(st->codec->sample_fmt),
1101 av_get_sample_fmt_name(codec->sample_fmts[0]));
1102 st->codec->sample_fmt = codec->sample_fmts[0];
1108 * Update the requested input sample format based on the output sample format.
1109 * This is currently only used to request float output from decoders which
1110 * support multiple sample formats, one of which is AV_SAMPLE_FMT_FLT.
1111 * Ideally this will be removed in the future when decoders do not do format
1112 * conversion and only output in their native format.
1114 static void update_sample_fmt(AVCodecContext *dec, AVCodec *dec_codec,
1115 AVCodecContext *enc)
1117 /* if sample formats match or a decoder sample format has already been
1118 requested, just return */
1119 if (enc->sample_fmt == dec->sample_fmt ||
1120 dec->request_sample_fmt > AV_SAMPLE_FMT_NONE)
1123 /* if decoder supports more than one output format */
1124 if (dec_codec && dec_codec->sample_fmts &&
1125 dec_codec->sample_fmts[0] != AV_SAMPLE_FMT_NONE &&
1126 dec_codec->sample_fmts[1] != AV_SAMPLE_FMT_NONE) {
1127 const enum AVSampleFormat *p;
1128 int min_dec = -1, min_inc = -1;
1130 /* find a matching sample format in the encoder */
1131 for (p = dec_codec->sample_fmts; *p != AV_SAMPLE_FMT_NONE; p++) {
1132 if (*p == enc->sample_fmt) {
1133 dec->request_sample_fmt = *p;
1135 } else if (*p > enc->sample_fmt) {
1136 min_inc = FFMIN(min_inc, *p - enc->sample_fmt);
1138 min_dec = FFMIN(min_dec, enc->sample_fmt - *p);
1141 /* if none match, provide the one that matches quality closest */
1142 dec->request_sample_fmt = min_inc > 0 ? enc->sample_fmt + min_inc :
1143 enc->sample_fmt - min_dec;
1147 static void choose_sample_rate(AVStream *st, AVCodec *codec)
1149 if (codec && codec->supported_samplerates) {
1150 const int *p = codec->supported_samplerates;
1152 int best_dist = INT_MAX;
1154 int dist = abs(st->codec->sample_rate - *p);
1155 if (dist < best_dist) {
1161 av_log(st->codec, AV_LOG_WARNING, "Requested sampling rate unsupported using closest supported (%d)\n", best);
1163 st->codec->sample_rate = best;
1168 get_sync_ipts(const OutputStream *ost, int64_t pts)
1170 OutputFile *of = output_files[ost->file_index];
1171 return (double)(pts - of->start_time) / AV_TIME_BASE;
1174 static void write_frame(AVFormatContext *s, AVPacket *pkt, OutputStream *ost)
1176 AVBitStreamFilterContext *bsfc = ost->bitstream_filters;
1177 AVCodecContext *avctx = ost->st->codec;
1181 * Audio encoders may split the packets -- #frames in != #packets out.
1182 * But there is no reordering, so we can limit the number of output packets
1183 * by simply dropping them here.
1184 * Counting encoded video frames needs to be done separately because of
1185 * reordering, see do_video_out()
1187 if (!(avctx->codec_type == AVMEDIA_TYPE_VIDEO && avctx->codec)) {
1188 if (ost->frame_number >= ost->max_frames) {
1189 av_free_packet(pkt);
1192 ost->frame_number++;
1196 AVPacket new_pkt = *pkt;
1197 int a = av_bitstream_filter_filter(bsfc, avctx, NULL,
1198 &new_pkt.data, &new_pkt.size,
1199 pkt->data, pkt->size,
1200 pkt->flags & AV_PKT_FLAG_KEY);
1202 av_free_packet(pkt);
1203 new_pkt.destruct = av_destruct_packet;
1205 av_log(NULL, AV_LOG_ERROR, "%s failed for stream %d, codec %s",
1206 bsfc->filter->name, pkt->stream_index,
1207 avctx->codec ? avctx->codec->name : "copy");
1217 pkt->stream_index = ost->index;
1218 ret = av_interleaved_write_frame(s, pkt);
1220 print_error("av_interleaved_write_frame()", ret);
1225 static int check_recording_time(OutputStream *ost)
1227 OutputFile *of = output_files[ost->file_index];
1229 if (of->recording_time != INT64_MAX &&
1230 av_compare_ts(ost->sync_opts - ost->first_pts, ost->st->codec->time_base, of->recording_time,
1231 AV_TIME_BASE_Q) >= 0) {
1232 ost->is_past_recording_time = 1;
1238 static void get_default_channel_layouts(OutputStream *ost, InputStream *ist)
1240 char layout_name[256];
1241 AVCodecContext *enc = ost->st->codec;
1242 AVCodecContext *dec = ist->st->codec;
1244 if (dec->channel_layout &&
1245 av_get_channel_layout_nb_channels(dec->channel_layout) != dec->channels) {
1246 av_get_channel_layout_string(layout_name, sizeof(layout_name),
1247 dec->channels, dec->channel_layout);
1248 av_log(NULL, AV_LOG_ERROR, "New channel layout (%s) is invalid\n",
1250 dec->channel_layout = 0;
1252 if (!dec->channel_layout) {
1253 if (enc->channel_layout && dec->channels == enc->channels) {
1254 dec->channel_layout = enc->channel_layout;
1256 dec->channel_layout = av_get_default_channel_layout(dec->channels);
1258 if (!dec->channel_layout) {
1259 av_log(NULL, AV_LOG_FATAL, "Unable to find default channel "
1260 "layout for Input Stream #%d.%d\n", ist->file_index,
1265 av_get_channel_layout_string(layout_name, sizeof(layout_name),
1266 dec->channels, dec->channel_layout);
1267 av_log(NULL, AV_LOG_WARNING, "Guessed Channel Layout for Input Stream "
1268 "#%d.%d : %s\n", ist->file_index, ist->st->index, layout_name);
1270 if (!enc->channel_layout) {
1271 if (dec->channels == enc->channels) {
1272 enc->channel_layout = dec->channel_layout;
1275 enc->channel_layout = av_get_default_channel_layout(enc->channels);
1277 if (!enc->channel_layout) {
1278 av_log(NULL, AV_LOG_FATAL, "Unable to find default channel layout "
1279 "for Output Stream #%d.%d\n", ost->file_index,
1283 av_get_channel_layout_string(layout_name, sizeof(layout_name),
1284 enc->channels, enc->channel_layout);
1285 av_log(NULL, AV_LOG_WARNING, "Guessed Channel Layout for Output Stream "
1286 "#%d.%d : %s\n", ost->file_index, ost->st->index, layout_name);
1290 static void generate_silence(uint8_t* buf, enum AVSampleFormat sample_fmt, size_t size)
1292 int fill_char = 0x00;
1293 if (sample_fmt == AV_SAMPLE_FMT_U8)
1295 memset(buf, fill_char, size);
1298 static int encode_audio_frame(AVFormatContext *s, OutputStream *ost,
1299 const uint8_t *buf, int buf_size)
1301 AVCodecContext *enc = ost->st->codec;
1302 AVFrame *frame = NULL;
1304 int ret, got_packet;
1306 av_init_packet(&pkt);
1311 if (!ost->output_frame) {
1312 ost->output_frame = avcodec_alloc_frame();
1313 if (!ost->output_frame) {
1314 av_log(NULL, AV_LOG_FATAL, "out-of-memory in encode_audio_frame()\n");
1318 frame = ost->output_frame;
1319 if (frame->extended_data != frame->data)
1320 av_freep(&frame->extended_data);
1321 avcodec_get_frame_defaults(frame);
1323 frame->nb_samples = buf_size /
1324 (enc->channels * av_get_bytes_per_sample(enc->sample_fmt));
1325 if ((ret = avcodec_fill_audio_frame(frame, enc->channels, enc->sample_fmt,
1326 buf, buf_size, 1)) < 0) {
1327 av_log(NULL, AV_LOG_FATAL, "Audio encoding failed\n");
1331 if (!check_recording_time(ost))
1334 frame->pts = ost->sync_opts;
1335 ost->sync_opts += frame->nb_samples;
1339 if (avcodec_encode_audio2(enc, &pkt, frame, &got_packet) < 0) {
1340 av_log(NULL, AV_LOG_FATAL, "Audio encoding failed\n");
1345 if (pkt.pts != AV_NOPTS_VALUE)
1346 pkt.pts = av_rescale_q(pkt.pts, enc->time_base, ost->st->time_base);
1347 if (pkt.dts != AV_NOPTS_VALUE)
1348 pkt.dts = av_rescale_q(pkt.dts, enc->time_base, ost->st->time_base);
1349 if (pkt.duration > 0)
1350 pkt.duration = av_rescale_q(pkt.duration, enc->time_base, ost->st->time_base);
1352 write_frame(s, &pkt, ost);
1354 audio_size += pkt.size;
1360 static int alloc_audio_output_buf(AVCodecContext *dec, AVCodecContext *enc,
1361 int nb_samples, int *buf_linesize)
1363 int64_t audio_buf_samples;
1366 /* calculate required number of samples to allocate */
1367 audio_buf_samples = ((int64_t)nb_samples * enc->sample_rate + dec->sample_rate) /
1369 audio_buf_samples = 4 * audio_buf_samples + 16; // safety factors for resampling
1370 audio_buf_samples = FFMAX(audio_buf_samples, enc->frame_size);
1371 if (audio_buf_samples > INT_MAX)
1372 return AVERROR(EINVAL);
1374 audio_buf_size = av_samples_get_buffer_size(buf_linesize, enc->channels,
1376 enc->sample_fmt, 0);
1377 if (audio_buf_size < 0)
1378 return audio_buf_size;
1380 av_fast_malloc(&audio_buf, &allocated_audio_buf_size, audio_buf_size);
1382 return AVERROR(ENOMEM);
1387 static void do_audio_out(AVFormatContext *s, OutputStream *ost,
1388 InputStream *ist, AVFrame *decoded_frame)
1392 int size_out, frame_bytes, resample_changed, ret;
1393 AVCodecContext *enc = ost->st->codec;
1394 AVCodecContext *dec = ist->st->codec;
1395 int osize = av_get_bytes_per_sample(enc->sample_fmt);
1396 int isize = av_get_bytes_per_sample(dec->sample_fmt);
1397 uint8_t *buf = decoded_frame->data[0];
1398 int size = decoded_frame->nb_samples * dec->channels * isize;
1399 int out_linesize = 0;
1400 int buf_linesize = decoded_frame->linesize[0];
1402 get_default_channel_layouts(ost, ist);
1404 if (alloc_audio_output_buf(dec, enc, decoded_frame->nb_samples, &out_linesize) < 0) {
1405 av_log(NULL, AV_LOG_FATAL, "Error allocating audio buffer\n");
1409 if (audio_sync_method > 1 ||
1410 enc->channels != dec->channels ||
1411 enc->channel_layout != dec->channel_layout ||
1412 enc->sample_rate != dec->sample_rate ||
1413 dec->sample_fmt != enc->sample_fmt)
1414 ost->audio_resample = 1;
1416 resample_changed = ost->resample_sample_fmt != dec->sample_fmt ||
1417 ost->resample_channels != dec->channels ||
1418 ost->resample_channel_layout != dec->channel_layout ||
1419 ost->resample_sample_rate != dec->sample_rate;
1421 if ((ost->audio_resample && !ost->avr) || resample_changed) {
1422 if (resample_changed) {
1423 av_log(NULL, AV_LOG_INFO, "Input stream #%d:%d frame changed from rate:%d fmt:%s ch:%d chl:0x%"PRIx64" to rate:%d fmt:%s ch:%d chl:0x%"PRIx64"\n",
1424 ist->file_index, ist->st->index,
1425 ost->resample_sample_rate, av_get_sample_fmt_name(ost->resample_sample_fmt),
1426 ost->resample_channels, ost->resample_channel_layout,
1427 dec->sample_rate, av_get_sample_fmt_name(dec->sample_fmt),
1428 dec->channels, dec->channel_layout);
1429 ost->resample_sample_fmt = dec->sample_fmt;
1430 ost->resample_channels = dec->channels;
1431 ost->resample_channel_layout = dec->channel_layout;
1432 ost->resample_sample_rate = dec->sample_rate;
1434 avresample_close(ost->avr);
1436 /* if audio_sync_method is >1 the resampler is needed for audio drift compensation */
1437 if (audio_sync_method <= 1 &&
1438 ost->resample_sample_fmt == enc->sample_fmt &&
1439 ost->resample_channels == enc->channels &&
1440 ost->resample_channel_layout == enc->channel_layout &&
1441 ost->resample_sample_rate == enc->sample_rate) {
1442 ost->audio_resample = 0;
1443 } else if (ost->audio_resample) {
1445 ost->avr = avresample_alloc_context();
1447 av_log(NULL, AV_LOG_FATAL, "Error allocating context for libavresample\n");
1452 av_opt_set_int(ost->avr, "in_channel_layout", dec->channel_layout, 0);
1453 av_opt_set_int(ost->avr, "in_sample_fmt", dec->sample_fmt, 0);
1454 av_opt_set_int(ost->avr, "in_sample_rate", dec->sample_rate, 0);
1455 av_opt_set_int(ost->avr, "out_channel_layout", enc->channel_layout, 0);
1456 av_opt_set_int(ost->avr, "out_sample_fmt", enc->sample_fmt, 0);
1457 av_opt_set_int(ost->avr, "out_sample_rate", enc->sample_rate, 0);
1458 if (audio_sync_method > 1)
1459 av_opt_set_int(ost->avr, "force_resampling", 1, 0);
1461 /* if both the input and output formats are s16 or u8, use s16 as
1462 the internal sample format */
1463 if (av_get_bytes_per_sample(dec->sample_fmt) <= 2 &&
1464 av_get_bytes_per_sample(enc->sample_fmt) <= 2) {
1465 av_opt_set_int(ost->avr, "internal_sample_fmt", AV_SAMPLE_FMT_S16P, 0);
1468 ret = avresample_open(ost->avr);
1470 av_log(NULL, AV_LOG_FATAL, "Error opening libavresample\n");
1476 if (audio_sync_method > 0) {
1477 double delta = get_sync_ipts(ost, ist->last_dts) * enc->sample_rate - ost->sync_opts -
1478 av_fifo_size(ost->fifo) / (enc->channels * osize);
1479 int idelta = delta * dec->sample_rate / enc->sample_rate;
1480 int byte_delta = idelta * isize * dec->channels;
1482 // FIXME resample delay
1483 if (fabs(delta) > 50) {
1484 if (ist->is_start || fabs(delta) > audio_drift_threshold*enc->sample_rate) {
1485 if (byte_delta < 0) {
1486 byte_delta = FFMAX(byte_delta, -size);
1489 av_log(NULL, AV_LOG_VERBOSE, "discarding %d audio samples\n",
1490 -byte_delta / (isize * dec->channels));
1495 av_fast_malloc(&async_buf, &allocated_async_buf_size,
1498 av_log(NULL, AV_LOG_FATAL, "Out of memory in do_audio_out\n");
1502 if (alloc_audio_output_buf(dec, enc, decoded_frame->nb_samples + idelta, &out_linesize) < 0) {
1503 av_log(NULL, AV_LOG_FATAL, "Error allocating audio buffer\n");
1508 generate_silence(async_buf, dec->sample_fmt, byte_delta);
1509 memcpy(async_buf + byte_delta, buf, size);
1512 buf_linesize = allocated_async_buf_size;
1513 av_log(NULL, AV_LOG_VERBOSE, "adding %d audio samples of silence\n", idelta);
1515 } else if (audio_sync_method > 1) {
1516 int comp = av_clip(delta, -audio_sync_method, audio_sync_method);
1517 av_log(NULL, AV_LOG_VERBOSE, "compensating audio timestamp drift:%f compensation:%d in:%d\n",
1518 delta, comp, enc->sample_rate);
1519 // fprintf(stderr, "drift:%f len:%d opts:%"PRId64" ipts:%"PRId64" fifo:%d\n", delta, -1, ost->sync_opts, (int64_t)(get_sync_ipts(ost) * enc->sample_rate), av_fifo_size(ost->fifo)/(ost->st->codec->channels * 2));
1520 avresample_set_compensation(ost->avr, comp, enc->sample_rate);
1523 } else if (audio_sync_method == 0)
1524 ost->sync_opts = lrintf(get_sync_ipts(ost, ist->last_dts) * enc->sample_rate) -
1525 av_fifo_size(ost->fifo) / (enc->channels * osize); // FIXME wrong
1527 if (ost->audio_resample) {
1529 size_out = avresample_convert(ost->avr, (void **)&buftmp,
1530 allocated_audio_buf_size, out_linesize,
1531 (void **)&buf, buf_linesize,
1532 size / (dec->channels * isize));
1533 size_out = size_out * enc->channels * osize;
1539 /* now encode as many frames as possible */
1540 if (!(enc->codec->capabilities & CODEC_CAP_VARIABLE_FRAME_SIZE)) {
1541 /* output resampled raw samples */
1542 if (av_fifo_realloc2(ost->fifo, av_fifo_size(ost->fifo) + size_out) < 0) {
1543 av_log(NULL, AV_LOG_FATAL, "av_fifo_realloc2() failed\n");
1546 av_fifo_generic_write(ost->fifo, buftmp, size_out, NULL);
1548 frame_bytes = enc->frame_size * osize * enc->channels;
1550 while (av_fifo_size(ost->fifo) >= frame_bytes) {
1551 av_fifo_generic_read(ost->fifo, audio_buf, frame_bytes, NULL);
1552 encode_audio_frame(s, ost, audio_buf, frame_bytes);
1555 encode_audio_frame(s, ost, buftmp, size_out);
1559 static void pre_process_video_frame(InputStream *ist, AVPicture *picture, void **bufp)
1561 AVCodecContext *dec;
1562 AVPicture *picture2;
1563 AVPicture picture_tmp;
1566 dec = ist->st->codec;
1568 /* deinterlace : must be done before any resize */
1569 if (do_deinterlace) {
1572 /* create temporary picture */
1573 size = avpicture_get_size(dec->pix_fmt, dec->width, dec->height);
1574 buf = av_malloc(size);
1578 picture2 = &picture_tmp;
1579 avpicture_fill(picture2, buf, dec->pix_fmt, dec->width, dec->height);
1581 if (avpicture_deinterlace(picture2, picture,
1582 dec->pix_fmt, dec->width, dec->height) < 0) {
1583 /* if error, do not deinterlace */
1584 av_log(NULL, AV_LOG_WARNING, "Deinterlacing failed\n");
1593 if (picture != picture2)
1594 *picture = *picture2;
1598 static void do_subtitle_out(AVFormatContext *s,
1604 static uint8_t *subtitle_out = NULL;
1605 int subtitle_out_max_size = 1024 * 1024;
1606 int subtitle_out_size, nb, i;
1607 AVCodecContext *enc;
1610 if (pts == AV_NOPTS_VALUE) {
1611 av_log(NULL, AV_LOG_ERROR, "Subtitle packets must have a pts\n");
1617 enc = ost->st->codec;
1619 if (!subtitle_out) {
1620 subtitle_out = av_malloc(subtitle_out_max_size);
1623 /* Note: DVB subtitle need one packet to draw them and one other
1624 packet to clear them */
1625 /* XXX: signal it in the codec context ? */
1626 if (enc->codec_id == CODEC_ID_DVB_SUBTITLE)
1631 for (i = 0; i < nb; i++) {
1632 ost->sync_opts = av_rescale_q(pts, ist->st->time_base, enc->time_base);
1633 if (!check_recording_time(ost))
1636 sub->pts = av_rescale_q(pts, ist->st->time_base, AV_TIME_BASE_Q);
1637 // start_display_time is required to be 0
1638 sub->pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, AV_TIME_BASE_Q);
1639 sub->end_display_time -= sub->start_display_time;
1640 sub->start_display_time = 0;
1641 subtitle_out_size = avcodec_encode_subtitle(enc, subtitle_out,
1642 subtitle_out_max_size, sub);
1643 if (subtitle_out_size < 0) {
1644 av_log(NULL, AV_LOG_FATAL, "Subtitle encoding failed\n");
1648 av_init_packet(&pkt);
1649 pkt.data = subtitle_out;
1650 pkt.size = subtitle_out_size;
1651 pkt.pts = av_rescale_q(sub->pts, AV_TIME_BASE_Q, ost->st->time_base);
1652 if (enc->codec_id == CODEC_ID_DVB_SUBTITLE) {
1653 /* XXX: the pts correction is handled here. Maybe handling
1654 it in the codec would be better */
1656 pkt.pts += 90 * sub->start_display_time;
1658 pkt.pts += 90 * sub->end_display_time;
1660 write_frame(s, &pkt, ost);
1664 static void do_video_out(AVFormatContext *s,
1666 AVFrame *in_picture,
1667 int *frame_size, float quality)
1669 int nb_frames, i, ret, format_video_sync;
1670 AVCodecContext *enc;
1671 double sync_ipts, delta;
1673 enc = ost->st->codec;
1675 sync_ipts = get_sync_ipts(ost, in_picture->pts) / av_q2d(enc->time_base);
1676 delta = sync_ipts - ost->sync_opts;
1678 /* by default, we output a single frame */
1683 format_video_sync = video_sync_method;
1684 if (format_video_sync == VSYNC_AUTO)
1685 format_video_sync = (s->oformat->flags & AVFMT_NOTIMESTAMPS) ? VSYNC_PASSTHROUGH :
1686 (s->oformat->flags & AVFMT_VARIABLE_FPS) ? VSYNC_VFR : VSYNC_CFR;
1688 switch (format_video_sync) {
1690 // FIXME set to 0.5 after we fix some dts/pts bugs like in avidec.c
1693 else if (delta > 1.1)
1694 nb_frames = lrintf(delta);
1699 else if (delta > 0.6)
1700 ost->sync_opts = lrint(sync_ipts);
1702 case VSYNC_PASSTHROUGH:
1703 ost->sync_opts = lrint(sync_ipts);
1709 nb_frames = FFMIN(nb_frames, ost->max_frames - ost->frame_number);
1710 if (nb_frames == 0) {
1712 av_log(NULL, AV_LOG_VERBOSE, "*** drop!\n");
1714 } else if (nb_frames > 1) {
1715 nb_frames_dup += nb_frames - 1;
1716 av_log(NULL, AV_LOG_VERBOSE, "*** %d dup!\n", nb_frames - 1);
1719 if (!ost->frame_number)
1720 ost->first_pts = ost->sync_opts;
1722 /* duplicates frame if needed */
1723 for (i = 0; i < nb_frames; i++) {
1725 av_init_packet(&pkt);
1729 if (!check_recording_time(ost))
1732 if (s->oformat->flags & AVFMT_RAWPICTURE &&
1733 enc->codec->id == CODEC_ID_RAWVIDEO) {
1734 /* raw pictures are written as AVPicture structure to
1735 avoid any copies. We support temporarily the older
1737 enc->coded_frame->interlaced_frame = in_picture->interlaced_frame;
1738 enc->coded_frame->top_field_first = in_picture->top_field_first;
1739 pkt.data = (uint8_t *)in_picture;
1740 pkt.size = sizeof(AVPicture);
1741 pkt.pts = av_rescale_q(ost->sync_opts, enc->time_base, ost->st->time_base);
1742 pkt.flags |= AV_PKT_FLAG_KEY;
1744 write_frame(s, &pkt, ost);
1747 AVFrame big_picture;
1749 big_picture = *in_picture;
1750 /* better than nothing: use input picture interlaced
1752 big_picture.interlaced_frame = in_picture->interlaced_frame;
1753 if (ost->st->codec->flags & (CODEC_FLAG_INTERLACED_DCT|CODEC_FLAG_INTERLACED_ME)) {
1754 if (ost->top_field_first == -1)
1755 big_picture.top_field_first = in_picture->top_field_first;
1757 big_picture.top_field_first = !!ost->top_field_first;
1760 /* handles same_quant here. This is not correct because it may
1761 not be a global option */
1762 big_picture.quality = quality;
1763 if (!enc->me_threshold)
1764 big_picture.pict_type = 0;
1765 big_picture.pts = ost->sync_opts;
1766 if (ost->forced_kf_index < ost->forced_kf_count &&
1767 big_picture.pts >= ost->forced_kf_pts[ost->forced_kf_index]) {
1768 big_picture.pict_type = AV_PICTURE_TYPE_I;
1769 ost->forced_kf_index++;
1771 ret = avcodec_encode_video2(enc, &pkt, &big_picture, &got_packet);
1773 av_log(NULL, AV_LOG_FATAL, "Video encoding failed\n");
1778 if (pkt.pts != AV_NOPTS_VALUE)
1779 pkt.pts = av_rescale_q(pkt.pts, enc->time_base, ost->st->time_base);
1780 if (pkt.dts != AV_NOPTS_VALUE)
1781 pkt.dts = av_rescale_q(pkt.dts, enc->time_base, ost->st->time_base);
1783 write_frame(s, &pkt, ost);
1784 *frame_size = pkt.size;
1785 video_size += pkt.size;
1787 /* if two pass, output log */
1788 if (ost->logfile && enc->stats_out) {
1789 fprintf(ost->logfile, "%s", enc->stats_out);
1795 * For video, number of frames in == number of packets out.
1796 * But there may be reordering, so we can't throw away frames on encoder
1797 * flush, we need to limit them here, before they go into encoder.
1799 ost->frame_number++;
1803 static double psnr(double d)
1805 return -10.0 * log(d) / log(10.0);
1808 static void do_video_stats(AVFormatContext *os, OutputStream *ost,
1811 AVCodecContext *enc;
1813 double ti1, bitrate, avg_bitrate;
1815 /* this is executed just the first time do_video_stats is called */
1817 vstats_file = fopen(vstats_filename, "w");
1824 enc = ost->st->codec;
1825 if (enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1826 frame_number = ost->frame_number;
1827 fprintf(vstats_file, "frame= %5d q= %2.1f ", frame_number, enc->coded_frame->quality / (float)FF_QP2LAMBDA);
1828 if (enc->flags&CODEC_FLAG_PSNR)
1829 fprintf(vstats_file, "PSNR= %6.2f ", psnr(enc->coded_frame->error[0] / (enc->width * enc->height * 255.0 * 255.0)));
1831 fprintf(vstats_file,"f_size= %6d ", frame_size);
1832 /* compute pts value */
1833 ti1 = ost->sync_opts * av_q2d(enc->time_base);
1837 bitrate = (frame_size * 8) / av_q2d(enc->time_base) / 1000.0;
1838 avg_bitrate = (double)(video_size * 8) / ti1 / 1000.0;
1839 fprintf(vstats_file, "s_size= %8.0fkB time= %0.3f br= %7.1fkbits/s avg_br= %7.1fkbits/s ",
1840 (double)video_size / 1024, ti1, bitrate, avg_bitrate);
1841 fprintf(vstats_file, "type= %c\n", av_get_picture_type_char(enc->coded_frame->pict_type));
1845 /* check for new output on any of the filtergraphs */
1846 static int poll_filters(void)
1848 AVFilterBufferRef *picref;
1849 AVFrame *filtered_frame = NULL;
1852 for (i = 0; i < nb_output_streams; i++) {
1853 OutputStream *ost = output_streams[i];
1854 OutputFile *of = output_files[ost->file_index];
1856 if (!ost->filter || ost->is_past_recording_time)
1859 if (!ost->filtered_frame && !(ost->filtered_frame = avcodec_alloc_frame())) {
1860 return AVERROR(ENOMEM);
1862 avcodec_get_frame_defaults(ost->filtered_frame);
1863 filtered_frame = ost->filtered_frame;
1865 while (av_buffersink_read(ost->filter->filter, &picref) >= 0) {
1866 avfilter_copy_buf_props(filtered_frame, picref);
1867 filtered_frame->pts = av_rescale_q(picref->pts,
1868 ost->filter->filter->inputs[0]->time_base,
1871 if (of->start_time && filtered_frame->pts < of->start_time)
1874 switch (ost->filter->filter->inputs[0]->type) {
1875 case AVMEDIA_TYPE_VIDEO:
1876 if (!ost->frame_aspect_ratio)
1877 ost->st->codec->sample_aspect_ratio = picref->video->pixel_aspect;
1879 do_video_out(of->ctx, ost, filtered_frame, &frame_size,
1880 same_quant ? ost->last_quality :
1881 ost->st->codec->global_quality);
1882 if (vstats_filename && frame_size)
1883 do_video_stats(of->ctx, ost, frame_size);
1886 // TODO support audio/subtitle filters
1890 avfilter_unref_buffer(picref);
1896 static void print_report(int is_last_report, int64_t timer_start)
1900 AVFormatContext *oc;
1902 AVCodecContext *enc;
1903 int frame_number, vid, i;
1904 double bitrate, ti1, pts;
1905 static int64_t last_time = -1;
1906 static int qp_histogram[52];
1908 if (!print_stats && !is_last_report)
1911 if (!is_last_report) {
1913 /* display the report every 0.5 seconds */
1914 cur_time = av_gettime();
1915 if (last_time == -1) {
1916 last_time = cur_time;
1919 if ((cur_time - last_time) < 500000)
1921 last_time = cur_time;
1925 oc = output_files[0]->ctx;
1927 total_size = avio_size(oc->pb);
1928 if (total_size < 0) // FIXME improve avio_size() so it works with non seekable output too
1929 total_size = avio_tell(oc->pb);
1934 for (i = 0; i < nb_output_streams; i++) {
1936 ost = output_streams[i];
1937 enc = ost->st->codec;
1938 if (!ost->stream_copy && enc->coded_frame)
1939 q = enc->coded_frame->quality / (float)FF_QP2LAMBDA;
1940 if (vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1941 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "q=%2.1f ", q);
1943 if (!vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1944 float t = (av_gettime() - timer_start) / 1000000.0;
1946 frame_number = ost->frame_number;
1947 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "frame=%5d fps=%3d q=%3.1f ",
1948 frame_number, (t > 1) ? (int)(frame_number / t + 0.5) : 0, q);
1950 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "L");
1954 if (qp >= 0 && qp < FF_ARRAY_ELEMS(qp_histogram))
1956 for (j = 0; j < 32; j++)
1957 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%X", (int)lrintf(log(qp_histogram[j] + 1) / log(2)));
1959 if (enc->flags&CODEC_FLAG_PSNR) {
1961 double error, error_sum = 0;
1962 double scale, scale_sum = 0;
1963 char type[3] = { 'Y','U','V' };
1964 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "PSNR=");
1965 for (j = 0; j < 3; j++) {
1966 if (is_last_report) {
1967 error = enc->error[j];
1968 scale = enc->width * enc->height * 255.0 * 255.0 * frame_number;
1970 error = enc->coded_frame->error[j];
1971 scale = enc->width * enc->height * 255.0 * 255.0;
1977 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%c:%2.2f ", type[j], psnr(error / scale));
1979 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "*:%2.2f ", psnr(error_sum / scale_sum));
1983 /* compute min output value */
1984 pts = (double)ost->st->pts.val * av_q2d(ost->st->time_base);
1985 if ((pts < ti1) && (pts > 0))
1991 bitrate = (double)(total_size * 8) / ti1 / 1000.0;
1993 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1994 "size=%8.0fkB time=%0.2f bitrate=%6.1fkbits/s",
1995 (double)total_size / 1024, ti1, bitrate);
1997 if (nb_frames_dup || nb_frames_drop)
1998 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), " dup=%d drop=%d",
1999 nb_frames_dup, nb_frames_drop);
2001 av_log(NULL, AV_LOG_INFO, "%s \r", buf);
2005 if (is_last_report) {
2006 int64_t raw= audio_size + video_size + extra_size;
2007 av_log(NULL, AV_LOG_INFO, "\n");
2008 av_log(NULL, AV_LOG_INFO, "video:%1.0fkB audio:%1.0fkB global headers:%1.0fkB muxing overhead %f%%\n",
2009 video_size / 1024.0,
2010 audio_size / 1024.0,
2011 extra_size / 1024.0,
2012 100.0 * (total_size - raw) / raw
2017 static void flush_encoders(void)
2021 for (i = 0; i < nb_output_streams; i++) {
2022 OutputStream *ost = output_streams[i];
2023 AVCodecContext *enc = ost->st->codec;
2024 AVFormatContext *os = output_files[ost->file_index]->ctx;
2025 int stop_encoding = 0;
2027 if (!ost->encoding_needed)
2030 if (ost->st->codec->codec_type == AVMEDIA_TYPE_AUDIO && enc->frame_size <= 1)
2032 if (ost->st->codec->codec_type == AVMEDIA_TYPE_VIDEO && (os->oformat->flags & AVFMT_RAWPICTURE) && enc->codec->id == CODEC_ID_RAWVIDEO)
2037 int fifo_bytes, got_packet;
2038 av_init_packet(&pkt);
2042 switch (ost->st->codec->codec_type) {
2043 case AVMEDIA_TYPE_AUDIO:
2044 fifo_bytes = av_fifo_size(ost->fifo);
2045 if (fifo_bytes > 0) {
2046 /* encode any samples remaining in fifo */
2047 int frame_bytes = fifo_bytes;
2049 av_fifo_generic_read(ost->fifo, audio_buf, fifo_bytes, NULL);
2051 encode_audio_frame(os, ost, audio_buf, frame_bytes);
2053 /* flush encoder with NULL frames until it is done
2054 returning packets */
2055 if (encode_audio_frame(os, ost, NULL, 0) == 0) {
2061 case AVMEDIA_TYPE_VIDEO:
2062 ret = avcodec_encode_video2(enc, &pkt, NULL, &got_packet);
2064 av_log(NULL, AV_LOG_FATAL, "Video encoding failed\n");
2068 if (ost->logfile && enc->stats_out) {
2069 fprintf(ost->logfile, "%s", enc->stats_out);
2075 if (pkt.pts != AV_NOPTS_VALUE)
2076 pkt.pts = av_rescale_q(pkt.pts, enc->time_base, ost->st->time_base);
2077 if (pkt.dts != AV_NOPTS_VALUE)
2078 pkt.dts = av_rescale_q(pkt.dts, enc->time_base, ost->st->time_base);
2079 write_frame(os, &pkt, ost);
2091 * Check whether a packet from ist should be written into ost at this time
2093 static int check_output_constraints(InputStream *ist, OutputStream *ost)
2095 OutputFile *of = output_files[ost->file_index];
2096 int ist_index = input_files[ist->file_index]->ist_index + ist->st->index;
2098 if (ost->source_index != ist_index)
2101 if (of->start_time && ist->last_dts < of->start_time)
2107 static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *pkt)
2109 OutputFile *of = output_files[ost->file_index];
2110 int64_t ost_tb_start_time = av_rescale_q(of->start_time, AV_TIME_BASE_Q, ost->st->time_base);
2113 av_init_packet(&opkt);
2115 if ((!ost->frame_number && !(pkt->flags & AV_PKT_FLAG_KEY)) &&
2116 !ost->copy_initial_nonkeyframes)
2119 if (of->recording_time != INT64_MAX &&
2120 ist->last_dts >= of->recording_time + of->start_time) {
2121 ost->is_past_recording_time = 1;
2125 /* force the input stream PTS */
2126 if (ost->st->codec->codec_type == AVMEDIA_TYPE_AUDIO)
2127 audio_size += pkt->size;
2128 else if (ost->st->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
2129 video_size += pkt->size;
2133 if (pkt->pts != AV_NOPTS_VALUE)
2134 opkt.pts = av_rescale_q(pkt->pts, ist->st->time_base, ost->st->time_base) - ost_tb_start_time;
2136 opkt.pts = AV_NOPTS_VALUE;
2138 if (pkt->dts == AV_NOPTS_VALUE)
2139 opkt.dts = av_rescale_q(ist->last_dts, AV_TIME_BASE_Q, ost->st->time_base);
2141 opkt.dts = av_rescale_q(pkt->dts, ist->st->time_base, ost->st->time_base);
2142 opkt.dts -= ost_tb_start_time;
2144 opkt.duration = av_rescale_q(pkt->duration, ist->st->time_base, ost->st->time_base);
2145 opkt.flags = pkt->flags;
2147 // FIXME remove the following 2 lines they shall be replaced by the bitstream filters
2148 if ( ost->st->codec->codec_id != CODEC_ID_H264
2149 && ost->st->codec->codec_id != CODEC_ID_MPEG1VIDEO
2150 && ost->st->codec->codec_id != CODEC_ID_MPEG2VIDEO
2151 && ost->st->codec->codec_id != CODEC_ID_VC1
2153 if (av_parser_change(ist->st->parser, ost->st->codec, &opkt.data, &opkt.size, pkt->data, pkt->size, pkt->flags & AV_PKT_FLAG_KEY))
2154 opkt.destruct = av_destruct_packet;
2156 opkt.data = pkt->data;
2157 opkt.size = pkt->size;
2160 write_frame(of->ctx, &opkt, ost);
2161 ost->st->codec->frame_number++;
2162 av_free_packet(&opkt);
2165 static void rate_emu_sleep(InputStream *ist)
2167 if (input_files[ist->file_index]->rate_emu) {
2168 int64_t pts = av_rescale(ist->last_dts, 1000000, AV_TIME_BASE);
2169 int64_t now = av_gettime() - ist->start;
2175 static int transcode_audio(InputStream *ist, AVPacket *pkt, int *got_output)
2177 AVFrame *decoded_frame;
2178 AVCodecContext *avctx = ist->st->codec;
2179 int bps = av_get_bytes_per_sample(ist->st->codec->sample_fmt);
2182 if (!ist->decoded_frame && !(ist->decoded_frame = avcodec_alloc_frame()))
2183 return AVERROR(ENOMEM);
2185 avcodec_get_frame_defaults(ist->decoded_frame);
2186 decoded_frame = ist->decoded_frame;
2188 ret = avcodec_decode_audio4(avctx, decoded_frame, got_output, pkt);
2194 /* no audio frame */
2198 /* if the decoder provides a pts, use it instead of the last packet pts.
2199 the decoder could be delaying output by a packet or more. */
2200 if (decoded_frame->pts != AV_NOPTS_VALUE)
2201 ist->next_dts = decoded_frame->pts;
2203 /* increment next_dts to use for the case where the input stream does not
2204 have timestamps or there are multiple frames in the packet */
2205 ist->next_dts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
2208 // preprocess audio (volume)
2209 if (audio_volume != 256) {
2210 int decoded_data_size = decoded_frame->nb_samples * avctx->channels * bps;
2211 void *samples = decoded_frame->data[0];
2212 switch (avctx->sample_fmt) {
2213 case AV_SAMPLE_FMT_U8:
2215 uint8_t *volp = samples;
2216 for (i = 0; i < (decoded_data_size / sizeof(*volp)); i++) {
2217 int v = (((*volp - 128) * audio_volume + 128) >> 8) + 128;
2218 *volp++ = av_clip_uint8(v);
2222 case AV_SAMPLE_FMT_S16:
2224 int16_t *volp = samples;
2225 for (i = 0; i < (decoded_data_size / sizeof(*volp)); i++) {
2226 int v = ((*volp) * audio_volume + 128) >> 8;
2227 *volp++ = av_clip_int16(v);
2231 case AV_SAMPLE_FMT_S32:
2233 int32_t *volp = samples;
2234 for (i = 0; i < (decoded_data_size / sizeof(*volp)); i++) {
2235 int64_t v = (((int64_t)*volp * audio_volume + 128) >> 8);
2236 *volp++ = av_clipl_int32(v);
2240 case AV_SAMPLE_FMT_FLT:
2242 float *volp = samples;
2243 float scale = audio_volume / 256.f;
2244 for (i = 0; i < (decoded_data_size / sizeof(*volp)); i++) {
2249 case AV_SAMPLE_FMT_DBL:
2251 double *volp = samples;
2252 double scale = audio_volume / 256.;
2253 for (i = 0; i < (decoded_data_size / sizeof(*volp)); i++) {
2259 av_log(NULL, AV_LOG_FATAL,
2260 "Audio volume adjustment on sample format %s is not supported.\n",
2261 av_get_sample_fmt_name(ist->st->codec->sample_fmt));
2266 rate_emu_sleep(ist);
2268 for (i = 0; i < nb_output_streams; i++) {
2269 OutputStream *ost = output_streams[i];
2271 if (!check_output_constraints(ist, ost) || !ost->encoding_needed)
2273 do_audio_out(output_files[ost->file_index]->ctx, ost, ist, decoded_frame);
2279 static int transcode_video(InputStream *ist, AVPacket *pkt, int *got_output, int64_t *pkt_pts)
2281 AVFrame *decoded_frame;
2282 void *buffer_to_free = NULL;
2283 int i, ret = 0, resample_changed;
2286 if (!ist->decoded_frame && !(ist->decoded_frame = avcodec_alloc_frame()))
2287 return AVERROR(ENOMEM);
2289 avcodec_get_frame_defaults(ist->decoded_frame);
2290 decoded_frame = ist->decoded_frame;
2291 pkt->pts = *pkt_pts;
2292 pkt->dts = ist->last_dts;
2293 *pkt_pts = AV_NOPTS_VALUE;
2295 ret = avcodec_decode_video2(ist->st->codec,
2296 decoded_frame, got_output, pkt);
2300 quality = same_quant ? decoded_frame->quality : 0;
2302 /* no picture yet */
2304 for (i = 0; i < ist->nb_filters; i++)
2305 av_buffersrc_buffer(ist->filters[i]->filter, NULL);
2308 decoded_frame->pts = guess_correct_pts(&ist->pts_ctx, decoded_frame->pkt_pts,
2309 decoded_frame->pkt_dts);
2311 pre_process_video_frame(ist, (AVPicture *)decoded_frame, &buffer_to_free);
2313 rate_emu_sleep(ist);
2315 if (ist->st->sample_aspect_ratio.num)
2316 decoded_frame->sample_aspect_ratio = ist->st->sample_aspect_ratio;
2318 resample_changed = ist->resample_width != decoded_frame->width ||
2319 ist->resample_height != decoded_frame->height ||
2320 ist->resample_pix_fmt != decoded_frame->format;
2321 if (resample_changed) {
2322 av_log(NULL, AV_LOG_INFO,
2323 "Input stream #%d:%d frame changed from size:%dx%d fmt:%s to size:%dx%d fmt:%s\n",
2324 ist->file_index, ist->st->index,
2325 ist->resample_width, ist->resample_height, av_get_pix_fmt_name(ist->resample_pix_fmt),
2326 decoded_frame->width, decoded_frame->height, av_get_pix_fmt_name(decoded_frame->format));
2328 ist->resample_width = decoded_frame->width;
2329 ist->resample_height = decoded_frame->height;
2330 ist->resample_pix_fmt = decoded_frame->format;
2332 for (i = 0; i < nb_filtergraphs; i++)
2333 if (ist_in_filtergraph(filtergraphs[i], ist) &&
2334 configure_filtergraph(filtergraphs[i]) < 0) {
2335 av_log(NULL, AV_LOG_FATAL, "Error reinitializing filters!\n");
2340 for (i = 0; i < ist->nb_filters; i++) {
2341 // XXX what an ugly hack
2342 if (ist->filters[i]->graph->nb_outputs == 1)
2343 ist->filters[i]->graph->outputs[0]->ost->last_quality = quality;
2345 if (ist->st->codec->codec->capabilities & CODEC_CAP_DR1) {
2346 FrameBuffer *buf = decoded_frame->opaque;
2347 AVFilterBufferRef *fb = avfilter_get_video_buffer_ref_from_arrays(
2348 decoded_frame->data, decoded_frame->linesize,
2349 AV_PERM_READ | AV_PERM_PRESERVE,
2350 ist->st->codec->width, ist->st->codec->height,
2351 ist->st->codec->pix_fmt);
2353 avfilter_copy_frame_props(fb, decoded_frame);
2354 fb->buf->priv = buf;
2355 fb->buf->free = filter_release_buffer;
2358 av_buffersrc_buffer(ist->filters[i]->filter, fb);
2360 av_buffersrc_write_frame(ist->filters[i]->filter, decoded_frame);
2363 av_free(buffer_to_free);
2367 static int transcode_subtitles(InputStream *ist, AVPacket *pkt, int *got_output)
2369 AVSubtitle subtitle;
2370 int i, ret = avcodec_decode_subtitle2(ist->st->codec,
2371 &subtitle, got_output, pkt);
2377 rate_emu_sleep(ist);
2379 for (i = 0; i < nb_output_streams; i++) {
2380 OutputStream *ost = output_streams[i];
2382 if (!check_output_constraints(ist, ost) || !ost->encoding_needed)
2385 do_subtitle_out(output_files[ost->file_index]->ctx, ost, ist, &subtitle, pkt->pts);
2388 avsubtitle_free(&subtitle);
2392 /* pkt = NULL means EOF (needed to flush decoder buffers) */
2393 static int output_packet(InputStream *ist, const AVPacket *pkt)
2397 int64_t pkt_pts = AV_NOPTS_VALUE;
2400 if (ist->next_dts == AV_NOPTS_VALUE)
2401 ist->next_dts = ist->last_dts;
2405 av_init_packet(&avpkt);
2413 if (pkt->dts != AV_NOPTS_VALUE)
2414 ist->next_dts = ist->last_dts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q);
2415 if (pkt->pts != AV_NOPTS_VALUE)
2416 pkt_pts = av_rescale_q(pkt->pts, ist->st->time_base, AV_TIME_BASE_Q);
2418 // while we have more to decode or while the decoder did output something on EOF
2419 while (ist->decoding_needed && (avpkt.size > 0 || (!pkt && got_output))) {
2423 ist->last_dts = ist->next_dts;
2425 if (avpkt.size && avpkt.size != pkt->size) {
2426 av_log(NULL, ist->showed_multi_packet_warning ? AV_LOG_VERBOSE : AV_LOG_WARNING,
2427 "Multiple frames in a packet from stream %d\n", pkt->stream_index);
2428 ist->showed_multi_packet_warning = 1;
2431 switch (ist->st->codec->codec_type) {
2432 case AVMEDIA_TYPE_AUDIO:
2433 ret = transcode_audio (ist, &avpkt, &got_output);
2435 case AVMEDIA_TYPE_VIDEO:
2436 ret = transcode_video (ist, &avpkt, &got_output, &pkt_pts);
2438 ist->next_dts += av_rescale_q(avpkt.duration, ist->st->time_base, AV_TIME_BASE_Q);
2439 else if (ist->st->r_frame_rate.num)
2440 ist->next_dts += av_rescale_q(1, (AVRational){ist->st->r_frame_rate.den,
2441 ist->st->r_frame_rate.num},
2443 else if (ist->st->codec->time_base.num != 0) {
2444 int ticks = ist->st->parser ? ist->st->parser->repeat_pict + 1 :
2445 ist->st->codec->ticks_per_frame;
2446 ist->next_dts += av_rescale_q(ticks, ist->st->codec->time_base, AV_TIME_BASE_Q);
2449 case AVMEDIA_TYPE_SUBTITLE:
2450 ret = transcode_subtitles(ist, &avpkt, &got_output);
2458 // touch data and size only if not EOF
2468 /* handle stream copy */
2469 if (!ist->decoding_needed) {
2470 rate_emu_sleep(ist);
2471 ist->last_dts = ist->next_dts;
2472 switch (ist->st->codec->codec_type) {
2473 case AVMEDIA_TYPE_AUDIO:
2474 ist->next_dts += ((int64_t)AV_TIME_BASE * ist->st->codec->frame_size) /
2475 ist->st->codec->sample_rate;
2477 case AVMEDIA_TYPE_VIDEO:
2478 if (ist->st->codec->time_base.num != 0) {
2479 int ticks = ist->st->parser ? ist->st->parser->repeat_pict + 1 : ist->st->codec->ticks_per_frame;
2480 ist->next_dts += ((int64_t)AV_TIME_BASE *
2481 ist->st->codec->time_base.num * ticks) /
2482 ist->st->codec->time_base.den;
2487 for (i = 0; pkt && i < nb_output_streams; i++) {
2488 OutputStream *ost = output_streams[i];
2490 if (!check_output_constraints(ist, ost) || ost->encoding_needed)
2493 do_streamcopy(ist, ost, pkt);
2499 static void print_sdp(void)
2503 AVFormatContext **avc = av_malloc(sizeof(*avc) * nb_output_files);
2507 for (i = 0; i < nb_output_files; i++)
2508 avc[i] = output_files[i]->ctx;
2510 av_sdp_create(avc, nb_output_files, sdp, sizeof(sdp));
2511 printf("SDP:\n%s\n", sdp);
2516 static int init_input_stream(int ist_index, char *error, int error_len)
2519 InputStream *ist = input_streams[ist_index];
2520 if (ist->decoding_needed) {
2521 AVCodec *codec = ist->dec;
2523 snprintf(error, error_len, "Decoder (codec id %d) not found for input stream #%d:%d",
2524 ist->st->codec->codec_id, ist->file_index, ist->st->index);
2525 return AVERROR(EINVAL);
2528 /* update requested sample format for the decoder based on the
2529 corresponding encoder sample format */
2530 for (i = 0; i < nb_output_streams; i++) {
2531 OutputStream *ost = output_streams[i];
2532 if (ost->source_index == ist_index) {
2533 update_sample_fmt(ist->st->codec, codec, ost->st->codec);
2538 if (codec->type == AVMEDIA_TYPE_VIDEO && codec->capabilities & CODEC_CAP_DR1) {
2539 ist->st->codec->get_buffer = codec_get_buffer;
2540 ist->st->codec->release_buffer = codec_release_buffer;
2541 ist->st->codec->opaque = ist;
2544 if (!av_dict_get(ist->opts, "threads", NULL, 0))
2545 av_dict_set(&ist->opts, "threads", "auto", 0);
2546 if (avcodec_open2(ist->st->codec, codec, &ist->opts) < 0) {
2547 snprintf(error, error_len, "Error while opening decoder for input stream #%d:%d",
2548 ist->file_index, ist->st->index);
2549 return AVERROR(EINVAL);
2551 assert_codec_experimental(ist->st->codec, 0);
2552 assert_avoptions(ist->opts);
2554 if (ist->st->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
2555 for (i = 0; i < nb_output_streams; i++) {
2556 OutputStream *ost = output_streams[i];
2557 if (ost->source_index == ist_index) {
2558 if (!ist->st->codec->channel_layout || !ost->st->codec->channel_layout)
2559 get_default_channel_layouts(ost, ist);
2566 ist->last_dts = ist->st->avg_frame_rate.num ? - ist->st->codec->has_b_frames * AV_TIME_BASE / av_q2d(ist->st->avg_frame_rate) : 0;
2567 ist->next_dts = AV_NOPTS_VALUE;
2568 init_pts_correction(&ist->pts_ctx);
2574 static InputStream *get_input_stream(OutputStream *ost)
2576 if (ost->source_index >= 0)
2577 return input_streams[ost->source_index];
2580 FilterGraph *fg = ost->filter->graph;
2583 for (i = 0; i < fg->nb_inputs; i++)
2584 if (fg->inputs[i]->ist->st->codec->codec_type == ost->st->codec->codec_type)
2585 return fg->inputs[i]->ist;
2591 static int transcode_init(void)
2593 int ret = 0, i, j, k;
2594 AVFormatContext *oc;
2595 AVCodecContext *codec, *icodec;
2601 /* init framerate emulation */
2602 for (i = 0; i < nb_input_files; i++) {
2603 InputFile *ifile = input_files[i];
2604 if (ifile->rate_emu)
2605 for (j = 0; j < ifile->nb_streams; j++)
2606 input_streams[j + ifile->ist_index]->start = av_gettime();
2609 /* output stream init */
2610 for (i = 0; i < nb_output_files; i++) {
2611 oc = output_files[i]->ctx;
2612 if (!oc->nb_streams && !(oc->oformat->flags & AVFMT_NOSTREAMS)) {
2613 av_dump_format(oc, i, oc->filename, 1);
2614 av_log(NULL, AV_LOG_ERROR, "Output file #%d does not contain any stream\n", i);
2615 return AVERROR(EINVAL);
2619 /* init complex filtergraphs */
2620 for (i = 0; i < nb_filtergraphs; i++)
2621 if ((ret = avfilter_graph_config(filtergraphs[i]->graph, NULL)) < 0)
2624 /* for each output stream, we compute the right encoding parameters */
2625 for (i = 0; i < nb_output_streams; i++) {
2626 ost = output_streams[i];
2627 oc = output_files[ost->file_index]->ctx;
2628 ist = get_input_stream(ost);
2630 if (ost->attachment_filename)
2633 codec = ost->st->codec;
2636 icodec = ist->st->codec;
2638 ost->st->disposition = ist->st->disposition;
2639 codec->bits_per_raw_sample = icodec->bits_per_raw_sample;
2640 codec->chroma_sample_location = icodec->chroma_sample_location;
2643 if (ost->stream_copy) {
2644 uint64_t extra_size;
2646 av_assert0(ist && !ost->filter);
2648 extra_size = (uint64_t)icodec->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE;
2650 if (extra_size > INT_MAX) {
2651 return AVERROR(EINVAL);
2654 /* if stream_copy is selected, no need to decode or encode */
2655 codec->codec_id = icodec->codec_id;
2656 codec->codec_type = icodec->codec_type;
2658 if (!codec->codec_tag) {
2659 if (!oc->oformat->codec_tag ||
2660 av_codec_get_id (oc->oformat->codec_tag, icodec->codec_tag) == codec->codec_id ||
2661 av_codec_get_tag(oc->oformat->codec_tag, icodec->codec_id) <= 0)
2662 codec->codec_tag = icodec->codec_tag;
2665 codec->bit_rate = icodec->bit_rate;
2666 codec->rc_max_rate = icodec->rc_max_rate;
2667 codec->rc_buffer_size = icodec->rc_buffer_size;
2668 codec->field_order = icodec->field_order;
2669 codec->extradata = av_mallocz(extra_size);
2670 if (!codec->extradata) {
2671 return AVERROR(ENOMEM);
2673 memcpy(codec->extradata, icodec->extradata, icodec->extradata_size);
2674 codec->extradata_size = icodec->extradata_size;
2676 codec->time_base = icodec->time_base;
2677 codec->time_base.num *= icodec->ticks_per_frame;
2678 av_reduce(&codec->time_base.num, &codec->time_base.den,
2679 codec->time_base.num, codec->time_base.den, INT_MAX);
2681 codec->time_base = ist->st->time_base;
2683 switch (codec->codec_type) {
2684 case AVMEDIA_TYPE_AUDIO:
2685 if (audio_volume != 256) {
2686 av_log(NULL, AV_LOG_FATAL, "-acodec copy and -vol are incompatible (frames are not decoded)\n");
2689 codec->channel_layout = icodec->channel_layout;
2690 codec->sample_rate = icodec->sample_rate;
2691 codec->channels = icodec->channels;
2692 codec->frame_size = icodec->frame_size;
2693 codec->audio_service_type = icodec->audio_service_type;
2694 codec->block_align = icodec->block_align;
2696 case AVMEDIA_TYPE_VIDEO:
2697 codec->pix_fmt = icodec->pix_fmt;
2698 codec->width = icodec->width;
2699 codec->height = icodec->height;
2700 codec->has_b_frames = icodec->has_b_frames;
2701 if (!codec->sample_aspect_ratio.num) {
2702 codec->sample_aspect_ratio =
2703 ost->st->sample_aspect_ratio =
2704 ist->st->sample_aspect_ratio.num ? ist->st->sample_aspect_ratio :
2705 ist->st->codec->sample_aspect_ratio.num ?
2706 ist->st->codec->sample_aspect_ratio : (AVRational){0, 1};
2709 case AVMEDIA_TYPE_SUBTITLE:
2710 codec->width = icodec->width;
2711 codec->height = icodec->height;
2713 case AVMEDIA_TYPE_DATA:
2714 case AVMEDIA_TYPE_ATTACHMENT:
2721 /* should only happen when a default codec is not present. */
2722 snprintf(error, sizeof(error), "Automatic encoder selection "
2723 "failed for output stream #%d:%d. Default encoder for "
2724 "format %s is probably disabled. Please choose an "
2725 "encoder manually.\n", ost->file_index, ost->index,
2727 ret = AVERROR(EINVAL);
2732 ist->decoding_needed = 1;
2733 ost->encoding_needed = 1;
2735 switch (codec->codec_type) {
2736 case AVMEDIA_TYPE_AUDIO:
2737 ost->fifo = av_fifo_alloc(1024);
2739 return AVERROR(ENOMEM);
2742 if (!codec->sample_rate)
2743 codec->sample_rate = icodec->sample_rate;
2744 choose_sample_rate(ost->st, ost->enc);
2745 codec->time_base = (AVRational){ 1, codec->sample_rate };
2747 if (codec->sample_fmt == AV_SAMPLE_FMT_NONE)
2748 codec->sample_fmt = icodec->sample_fmt;
2749 choose_sample_fmt(ost->st, ost->enc);
2751 if (!codec->channels)
2752 codec->channels = icodec->channels;
2753 if (!codec->channel_layout)
2754 codec->channel_layout = icodec->channel_layout;
2755 if (av_get_channel_layout_nb_channels(codec->channel_layout) != codec->channels)
2756 codec->channel_layout = 0;
2758 icodec->request_channels = codec-> channels;
2759 ost->resample_sample_fmt = icodec->sample_fmt;
2760 ost->resample_sample_rate = icodec->sample_rate;
2761 ost->resample_channels = icodec->channels;
2762 ost->resample_channel_layout = icodec->channel_layout;
2764 case AVMEDIA_TYPE_VIDEO:
2767 fg = init_simple_filtergraph(ist, ost);
2768 if (configure_video_filters(fg)) {
2769 av_log(NULL, AV_LOG_FATAL, "Error opening filters!\n");
2775 * We want CFR output if and only if one of those is true:
2776 * 1) user specified output framerate with -r
2777 * 2) user specified -vsync cfr
2778 * 3) output format is CFR and the user didn't force vsync to
2779 * something else than CFR
2781 * in such a case, set ost->frame_rate
2783 if (!ost->frame_rate.num && ist &&
2784 (video_sync_method == VSYNC_CFR ||
2785 (video_sync_method == VSYNC_AUTO &&
2786 !(oc->oformat->flags & (AVFMT_NOTIMESTAMPS | AVFMT_VARIABLE_FPS))))) {
2787 ost->frame_rate = ist->st->r_frame_rate.num ? ist->st->r_frame_rate : (AVRational){25, 1};
2788 if (ost->enc && ost->enc->supported_framerates && !ost->force_fps) {
2789 int idx = av_find_nearest_q_idx(ost->frame_rate, ost->enc->supported_framerates);
2790 ost->frame_rate = ost->enc->supported_framerates[idx];
2793 if (ost->frame_rate.num) {
2794 codec->time_base = (AVRational){ost->frame_rate.den, ost->frame_rate.num};
2795 video_sync_method = VSYNC_CFR;
2797 codec->time_base = ist->st->time_base;
2799 codec->time_base = ost->filter->filter->inputs[0]->time_base;
2801 codec->width = ost->filter->filter->inputs[0]->w;
2802 codec->height = ost->filter->filter->inputs[0]->h;
2803 codec->sample_aspect_ratio = ost->st->sample_aspect_ratio =
2804 ost->frame_aspect_ratio ? // overridden by the -aspect cli option
2805 av_d2q(ost->frame_aspect_ratio * codec->height/codec->width, 255) :
2806 ost->filter->filter->inputs[0]->sample_aspect_ratio;
2807 codec->pix_fmt = ost->filter->filter->inputs[0]->format;
2809 if (codec->width != icodec->width ||
2810 codec->height != icodec->height ||
2811 codec->pix_fmt != icodec->pix_fmt) {
2812 codec->bits_per_raw_sample = 0;
2816 case AVMEDIA_TYPE_SUBTITLE:
2817 codec->time_base = (AVRational){1, 1000};
2824 if ((codec->flags & (CODEC_FLAG_PASS1 | CODEC_FLAG_PASS2))) {
2825 char logfilename[1024];
2828 snprintf(logfilename, sizeof(logfilename), "%s-%d.log",
2829 pass_logfilename_prefix ? pass_logfilename_prefix : DEFAULT_PASS_LOGFILENAME_PREFIX,
2831 if (!strcmp(ost->enc->name, "libx264")) {
2832 av_dict_set(&ost->opts, "stats", logfilename, AV_DICT_DONT_OVERWRITE);
2834 if (codec->flags & CODEC_FLAG_PASS1) {
2835 f = fopen(logfilename, "wb");
2837 av_log(NULL, AV_LOG_FATAL, "Cannot write log file '%s' for pass-1 encoding: %s\n",
2838 logfilename, strerror(errno));
2844 size_t logbuffer_size;
2845 if (cmdutils_read_file(logfilename, &logbuffer, &logbuffer_size) < 0) {
2846 av_log(NULL, AV_LOG_FATAL, "Error reading log file '%s' for pass-2 encoding\n",
2850 codec->stats_in = logbuffer;
2857 /* open each encoder */
2858 for (i = 0; i < nb_output_streams; i++) {
2859 ost = output_streams[i];
2860 if (ost->encoding_needed) {
2861 AVCodec *codec = ost->enc;
2862 AVCodecContext *dec = NULL;
2864 if ((ist = get_input_stream(ost)))
2865 dec = ist->st->codec;
2866 if (dec && dec->subtitle_header) {
2867 ost->st->codec->subtitle_header = av_malloc(dec->subtitle_header_size);
2868 if (!ost->st->codec->subtitle_header) {
2869 ret = AVERROR(ENOMEM);
2872 memcpy(ost->st->codec->subtitle_header, dec->subtitle_header, dec->subtitle_header_size);
2873 ost->st->codec->subtitle_header_size = dec->subtitle_header_size;
2875 if (!av_dict_get(ost->opts, "threads", NULL, 0))
2876 av_dict_set(&ost->opts, "threads", "auto", 0);
2877 if (avcodec_open2(ost->st->codec, codec, &ost->opts) < 0) {
2878 snprintf(error, sizeof(error), "Error while opening encoder for output stream #%d:%d - maybe incorrect parameters such as bit_rate, rate, width or height",
2879 ost->file_index, ost->index);
2880 ret = AVERROR(EINVAL);
2883 assert_codec_experimental(ost->st->codec, 1);
2884 assert_avoptions(ost->opts);
2885 if (ost->st->codec->bit_rate && ost->st->codec->bit_rate < 1000)
2886 av_log(NULL, AV_LOG_WARNING, "The bitrate parameter is set too low."
2887 "It takes bits/s as argument, not kbits/s\n");
2888 extra_size += ost->st->codec->extradata_size;
2890 if (ost->st->codec->me_threshold)
2891 input_streams[ost->source_index]->st->codec->debug |= FF_DEBUG_MV;
2895 /* init input streams */
2896 for (i = 0; i < nb_input_streams; i++)
2897 if ((ret = init_input_stream(i, error, sizeof(error))) < 0)
2900 /* discard unused programs */
2901 for (i = 0; i < nb_input_files; i++) {
2902 InputFile *ifile = input_files[i];
2903 for (j = 0; j < ifile->ctx->nb_programs; j++) {
2904 AVProgram *p = ifile->ctx->programs[j];
2905 int discard = AVDISCARD_ALL;
2907 for (k = 0; k < p->nb_stream_indexes; k++)
2908 if (!input_streams[ifile->ist_index + p->stream_index[k]]->discard) {
2909 discard = AVDISCARD_DEFAULT;
2912 p->discard = discard;
2916 /* open files and write file headers */
2917 for (i = 0; i < nb_output_files; i++) {
2918 oc = output_files[i]->ctx;
2919 oc->interrupt_callback = int_cb;
2920 if (avformat_write_header(oc, &output_files[i]->opts) < 0) {
2921 snprintf(error, sizeof(error), "Could not write header for output file #%d (incorrect codec parameters ?)", i);
2922 ret = AVERROR(EINVAL);
2925 assert_avoptions(output_files[i]->opts);
2926 if (strcmp(oc->oformat->name, "rtp")) {
2932 /* dump the file output parameters - cannot be done before in case
2934 for (i = 0; i < nb_output_files; i++) {
2935 av_dump_format(output_files[i]->ctx, i, output_files[i]->ctx->filename, 1);
2938 /* dump the stream mapping */
2939 av_log(NULL, AV_LOG_INFO, "Stream mapping:\n");
2940 for (i = 0; i < nb_input_streams; i++) {
2941 ist = input_streams[i];
2943 for (j = 0; j < ist->nb_filters; j++) {
2944 AVFilterLink *link = ist->filters[j]->filter->outputs[0];
2945 if (ist->filters[j]->graph->graph_desc) {
2946 av_log(NULL, AV_LOG_INFO, " Stream #%d:%d (%s) -> %s",
2947 ist->file_index, ist->st->index, ist->dec ? ist->dec->name : "?",
2948 link->dst->filter->name);
2949 if (link->dst->input_count > 1)
2950 av_log(NULL, AV_LOG_INFO, ":%s", link->dstpad->name);
2951 if (nb_filtergraphs > 1)
2952 av_log(NULL, AV_LOG_INFO, " (graph %d)", ist->filters[j]->graph->index);
2953 av_log(NULL, AV_LOG_INFO, "\n");
2958 for (i = 0; i < nb_output_streams; i++) {
2959 ost = output_streams[i];
2961 if (ost->attachment_filename) {
2962 /* an attached file */
2963 av_log(NULL, AV_LOG_INFO, " File %s -> Stream #%d:%d\n",
2964 ost->attachment_filename, ost->file_index, ost->index);
2968 if (ost->filter && ost->filter->graph->graph_desc) {
2969 /* output from a complex graph */
2970 AVFilterLink *link = ost->filter->filter->inputs[0];
2971 av_log(NULL, AV_LOG_INFO, " %s", link->src->filter->name);
2972 if (link->src->output_count > 1)
2973 av_log(NULL, AV_LOG_INFO, ":%s", link->srcpad->name);
2974 if (nb_filtergraphs > 1)
2975 av_log(NULL, AV_LOG_INFO, " (graph %d)", ost->filter->graph->index);
2977 av_log(NULL, AV_LOG_INFO, " -> Stream #%d:%d (%s)\n", ost->file_index,
2978 ost->index, ost->enc ? ost->enc->name : "?");
2982 av_log(NULL, AV_LOG_INFO, " Stream #%d:%d -> #%d:%d",
2983 input_streams[ost->source_index]->file_index,
2984 input_streams[ost->source_index]->st->index,
2987 if (ost->sync_ist != input_streams[ost->source_index])
2988 av_log(NULL, AV_LOG_INFO, " [sync #%d:%d]",
2989 ost->sync_ist->file_index,
2990 ost->sync_ist->st->index);
2991 if (ost->stream_copy)
2992 av_log(NULL, AV_LOG_INFO, " (copy)");
2994 av_log(NULL, AV_LOG_INFO, " (%s -> %s)", input_streams[ost->source_index]->dec ?
2995 input_streams[ost->source_index]->dec->name : "?",
2996 ost->enc ? ost->enc->name : "?");
2997 av_log(NULL, AV_LOG_INFO, "\n");
3001 av_log(NULL, AV_LOG_ERROR, "%s\n", error);
3013 * The following code is the main loop of the file converter
3015 static int transcode(void)
3018 AVFormatContext *is, *os;
3022 int no_packet_count = 0;
3023 int64_t timer_start;
3025 if (!(no_packet = av_mallocz(nb_input_files)))
3028 ret = transcode_init();
3032 av_log(NULL, AV_LOG_INFO, "Press ctrl-c to stop encoding\n");
3035 timer_start = av_gettime();
3037 for (; received_sigterm == 0;) {
3038 int file_index, ist_index, past_recording_time = 1;
3042 ipts_min = INT64_MAX;
3044 /* check if there's any stream where output is still needed */
3045 for (i = 0; i < nb_output_streams; i++) {
3047 ost = output_streams[i];
3048 of = output_files[ost->file_index];
3049 os = output_files[ost->file_index]->ctx;
3050 if (ost->is_past_recording_time ||
3051 (os->pb && avio_tell(os->pb) >= of->limit_filesize))
3053 if (ost->frame_number > ost->max_frames) {
3055 for (j = 0; j < of->ctx->nb_streams; j++)
3056 output_streams[of->ost_index + j]->is_past_recording_time = 1;
3059 past_recording_time = 0;
3061 if (past_recording_time)
3064 /* select the stream that we must read now by looking at the
3065 smallest output pts */
3067 for (i = 0; i < nb_input_streams; i++) {
3069 ist = input_streams[i];
3070 ipts = ist->last_dts;
3071 if (ist->discard || no_packet[ist->file_index])
3073 if (!input_files[ist->file_index]->eof_reached) {
3074 if (ipts < ipts_min) {
3076 file_index = ist->file_index;
3080 /* if none, if is finished */
3081 if (file_index < 0) {
3082 if (no_packet_count) {
3083 no_packet_count = 0;
3084 memset(no_packet, 0, nb_input_files);
3091 /* read a frame from it and output it in the fifo */
3092 is = input_files[file_index]->ctx;
3093 ret = av_read_frame(is, &pkt);
3094 if (ret == AVERROR(EAGAIN)) {
3095 no_packet[file_index] = 1;
3100 input_files[file_index]->eof_reached = 1;
3102 for (i = 0; i < input_files[file_index]->nb_streams; i++) {
3103 ist = input_streams[input_files[file_index]->ist_index + i];
3104 if (ist->decoding_needed)
3105 output_packet(ist, NULL);
3114 no_packet_count = 0;
3115 memset(no_packet, 0, nb_input_files);
3118 av_pkt_dump_log2(NULL, AV_LOG_DEBUG, &pkt, do_hex_dump,
3119 is->streams[pkt.stream_index]);
3121 /* the following test is needed in case new streams appear
3122 dynamically in stream : we ignore them */
3123 if (pkt.stream_index >= input_files[file_index]->nb_streams)
3124 goto discard_packet;
3125 ist_index = input_files[file_index]->ist_index + pkt.stream_index;
3126 ist = input_streams[ist_index];
3128 goto discard_packet;
3130 if (pkt.dts != AV_NOPTS_VALUE)
3131 pkt.dts += av_rescale_q(input_files[ist->file_index]->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
3132 if (pkt.pts != AV_NOPTS_VALUE)
3133 pkt.pts += av_rescale_q(input_files[ist->file_index]->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
3135 if (pkt.pts != AV_NOPTS_VALUE)
3136 pkt.pts *= ist->ts_scale;
3137 if (pkt.dts != AV_NOPTS_VALUE)
3138 pkt.dts *= ist->ts_scale;
3140 //fprintf(stderr, "next:%"PRId64" dts:%"PRId64" off:%"PRId64" %d\n",
3142 // pkt.dts, input_files[ist->file_index].ts_offset,
3143 // ist->st->codec->codec_type);
3144 if (pkt.dts != AV_NOPTS_VALUE && ist->next_dts != AV_NOPTS_VALUE
3145 && (is->iformat->flags & AVFMT_TS_DISCONT)) {
3146 int64_t pkt_dts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);
3147 int64_t delta = pkt_dts - ist->next_dts;
3148 if ((FFABS(delta) > 1LL * dts_delta_threshold * AV_TIME_BASE || pkt_dts + 1 < ist->last_dts) && !copy_ts) {
3149 input_files[ist->file_index]->ts_offset -= delta;
3150 av_log(NULL, AV_LOG_DEBUG,
3151 "timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
3152 delta, input_files[ist->file_index]->ts_offset);
3153 pkt.dts-= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
3154 if (pkt.pts != AV_NOPTS_VALUE)
3155 pkt.pts-= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
3159 // fprintf(stderr,"read #%d.%d size=%d\n", ist->file_index, ist->st->index, pkt.size);
3160 if (output_packet(ist, &pkt) < 0 || poll_filters() < 0) {
3161 av_log(NULL, AV_LOG_ERROR, "Error while decoding stream #%d:%d\n",
3162 ist->file_index, ist->st->index);
3165 av_free_packet(&pkt);
3170 av_free_packet(&pkt);
3172 /* dump report by using the output first video and audio streams */
3173 print_report(0, timer_start);
3176 /* at the end of stream, we must flush the decoder buffers */
3177 for (i = 0; i < nb_input_streams; i++) {
3178 ist = input_streams[i];
3179 if (!input_files[ist->file_index]->eof_reached && ist->decoding_needed) {
3180 output_packet(ist, NULL);
3188 /* write the trailer if needed and close file */
3189 for (i = 0; i < nb_output_files; i++) {
3190 os = output_files[i]->ctx;
3191 av_write_trailer(os);
3194 /* dump report by using the first video and audio streams */
3195 print_report(1, timer_start);
3197 /* close each encoder */
3198 for (i = 0; i < nb_output_streams; i++) {
3199 ost = output_streams[i];
3200 if (ost->encoding_needed) {
3201 av_freep(&ost->st->codec->stats_in);
3202 avcodec_close(ost->st->codec);
3206 /* close each decoder */
3207 for (i = 0; i < nb_input_streams; i++) {
3208 ist = input_streams[i];
3209 if (ist->decoding_needed) {
3210 avcodec_close(ist->st->codec);
3218 av_freep(&no_packet);
3220 if (output_streams) {
3221 for (i = 0; i < nb_output_streams; i++) {
3222 ost = output_streams[i];
3224 if (ost->stream_copy)
3225 av_freep(&ost->st->codec->extradata);
3227 fclose(ost->logfile);
3228 ost->logfile = NULL;
3230 av_fifo_free(ost->fifo); /* works even if fifo is not
3231 initialized but set to zero */
3232 av_freep(&ost->st->codec->subtitle_header);
3233 av_free(ost->forced_kf_pts);
3235 avresample_free(&ost->avr);
3236 av_dict_free(&ost->opts);
3243 static double parse_frame_aspect_ratio(const char *arg)
3250 p = strchr(arg, ':');
3252 x = strtol(arg, &end, 10);
3254 y = strtol(end + 1, &end, 10);
3256 ar = (double)x / (double)y;
3258 ar = strtod(arg, NULL);
3261 av_log(NULL, AV_LOG_FATAL, "Incorrect aspect ratio specification.\n");
3267 static int opt_audio_codec(OptionsContext *o, const char *opt, const char *arg)
3269 return parse_option(o, "codec:a", arg, options);
3272 static int opt_video_codec(OptionsContext *o, const char *opt, const char *arg)
3274 return parse_option(o, "codec:v", arg, options);
3277 static int opt_subtitle_codec(OptionsContext *o, const char *opt, const char *arg)
3279 return parse_option(o, "codec:s", arg, options);
3282 static int opt_data_codec(OptionsContext *o, const char *opt, const char *arg)
3284 return parse_option(o, "codec:d", arg, options);
3287 static int opt_map(OptionsContext *o, const char *opt, const char *arg)
3289 StreamMap *m = NULL;
3290 int i, negative = 0, file_idx;
3291 int sync_file_idx = -1, sync_stream_idx;
3299 map = av_strdup(arg);
3301 /* parse sync stream first, just pick first matching stream */
3302 if (sync = strchr(map, ',')) {
3304 sync_file_idx = strtol(sync + 1, &sync, 0);
3305 if (sync_file_idx >= nb_input_files || sync_file_idx < 0) {
3306 av_log(NULL, AV_LOG_FATAL, "Invalid sync file index: %d.\n", sync_file_idx);
3311 for (i = 0; i < input_files[sync_file_idx]->nb_streams; i++)
3312 if (check_stream_specifier(input_files[sync_file_idx]->ctx,
3313 input_files[sync_file_idx]->ctx->streams[i], sync) == 1) {
3314 sync_stream_idx = i;
3317 if (i == input_files[sync_file_idx]->nb_streams) {
3318 av_log(NULL, AV_LOG_FATAL, "Sync stream specification in map %s does not "
3319 "match any streams.\n", arg);
3325 if (map[0] == '[') {
3326 /* this mapping refers to lavfi output */
3327 const char *c = map + 1;
3328 o->stream_maps = grow_array(o->stream_maps, sizeof(*o->stream_maps),
3329 &o->nb_stream_maps, o->nb_stream_maps + 1);
3330 m = &o->stream_maps[o->nb_stream_maps - 1];
3331 m->linklabel = av_get_token(&c, "]");
3332 if (!m->linklabel) {
3333 av_log(NULL, AV_LOG_ERROR, "Invalid output link label: %s.\n", map);
3337 file_idx = strtol(map, &p, 0);
3338 if (file_idx >= nb_input_files || file_idx < 0) {
3339 av_log(NULL, AV_LOG_FATAL, "Invalid input file index: %d.\n", file_idx);
3343 /* disable some already defined maps */
3344 for (i = 0; i < o->nb_stream_maps; i++) {
3345 m = &o->stream_maps[i];
3346 if (file_idx == m->file_index &&
3347 check_stream_specifier(input_files[m->file_index]->ctx,
3348 input_files[m->file_index]->ctx->streams[m->stream_index],
3349 *p == ':' ? p + 1 : p) > 0)
3353 for (i = 0; i < input_files[file_idx]->nb_streams; i++) {
3354 if (check_stream_specifier(input_files[file_idx]->ctx, input_files[file_idx]->ctx->streams[i],
3355 *p == ':' ? p + 1 : p) <= 0)
3357 o->stream_maps = grow_array(o->stream_maps, sizeof(*o->stream_maps),
3358 &o->nb_stream_maps, o->nb_stream_maps + 1);
3359 m = &o->stream_maps[o->nb_stream_maps - 1];
3361 m->file_index = file_idx;
3362 m->stream_index = i;
3364 if (sync_file_idx >= 0) {
3365 m->sync_file_index = sync_file_idx;
3366 m->sync_stream_index = sync_stream_idx;
3368 m->sync_file_index = file_idx;
3369 m->sync_stream_index = i;
3375 av_log(NULL, AV_LOG_FATAL, "Stream map '%s' matches no streams.\n", arg);
3383 static int opt_attach(OptionsContext *o, const char *opt, const char *arg)
3385 o->attachments = grow_array(o->attachments, sizeof(*o->attachments),
3386 &o->nb_attachments, o->nb_attachments + 1);
3387 o->attachments[o->nb_attachments - 1] = arg;
3392 * Parse a metadata specifier in arg.
3393 * @param type metadata type is written here -- g(lobal)/s(tream)/c(hapter)/p(rogram)
3394 * @param index for type c/p, chapter/program index is written here
3395 * @param stream_spec for type s, the stream specifier is written here
3397 static void parse_meta_type(char *arg, char *type, int *index, const char **stream_spec)
3405 if (*(++arg) && *arg != ':') {
3406 av_log(NULL, AV_LOG_FATAL, "Invalid metadata specifier %s.\n", arg);
3409 *stream_spec = *arg == ':' ? arg + 1 : "";
3413 if (*(++arg) == ':')
3414 *index = strtol(++arg, NULL, 0);
3417 av_log(NULL, AV_LOG_FATAL, "Invalid metadata type %c.\n", *arg);
3424 static int copy_metadata(char *outspec, char *inspec, AVFormatContext *oc, AVFormatContext *ic, OptionsContext *o)
3426 AVDictionary **meta_in = NULL;
3427 AVDictionary **meta_out;
3429 char type_in, type_out;
3430 const char *istream_spec = NULL, *ostream_spec = NULL;
3431 int idx_in = 0, idx_out = 0;
3433 parse_meta_type(inspec, &type_in, &idx_in, &istream_spec);
3434 parse_meta_type(outspec, &type_out, &idx_out, &ostream_spec);
3436 if (type_in == 'g' || type_out == 'g')
3437 o->metadata_global_manual = 1;
3438 if (type_in == 's' || type_out == 's')
3439 o->metadata_streams_manual = 1;
3440 if (type_in == 'c' || type_out == 'c')
3441 o->metadata_chapters_manual = 1;
3443 #define METADATA_CHECK_INDEX(index, nb_elems, desc)\
3444 if ((index) < 0 || (index) >= (nb_elems)) {\
3445 av_log(NULL, AV_LOG_FATAL, "Invalid %s index %d while processing metadata maps.\n",\
3450 #define SET_DICT(type, meta, context, index)\
3453 meta = &context->metadata;\
3456 METADATA_CHECK_INDEX(index, context->nb_chapters, "chapter")\
3457 meta = &context->chapters[index]->metadata;\
3460 METADATA_CHECK_INDEX(index, context->nb_programs, "program")\
3461 meta = &context->programs[index]->metadata;\
3465 SET_DICT(type_in, meta_in, ic, idx_in);
3466 SET_DICT(type_out, meta_out, oc, idx_out);
3468 /* for input streams choose first matching stream */
3469 if (type_in == 's') {
3470 for (i = 0; i < ic->nb_streams; i++) {
3471 if ((ret = check_stream_specifier(ic, ic->streams[i], istream_spec)) > 0) {
3472 meta_in = &ic->streams[i]->metadata;
3478 av_log(NULL, AV_LOG_FATAL, "Stream specifier %s does not match any streams.\n", istream_spec);
3483 if (type_out == 's') {
3484 for (i = 0; i < oc->nb_streams; i++) {
3485 if ((ret = check_stream_specifier(oc, oc->streams[i], ostream_spec)) > 0) {
3486 meta_out = &oc->streams[i]->metadata;
3487 av_dict_copy(meta_out, *meta_in, AV_DICT_DONT_OVERWRITE);
3492 av_dict_copy(meta_out, *meta_in, AV_DICT_DONT_OVERWRITE);
3497 static AVCodec *find_codec_or_die(const char *name, enum AVMediaType type, int encoder)
3499 const char *codec_string = encoder ? "encoder" : "decoder";
3503 avcodec_find_encoder_by_name(name) :
3504 avcodec_find_decoder_by_name(name);
3506 av_log(NULL, AV_LOG_FATAL, "Unknown %s '%s'\n", codec_string, name);
3509 if (codec->type != type) {
3510 av_log(NULL, AV_LOG_FATAL, "Invalid %s type '%s'\n", codec_string, name);
3516 static AVCodec *choose_decoder(OptionsContext *o, AVFormatContext *s, AVStream *st)
3518 char *codec_name = NULL;
3520 MATCH_PER_STREAM_OPT(codec_names, str, codec_name, s, st);
3522 AVCodec *codec = find_codec_or_die(codec_name, st->codec->codec_type, 0);
3523 st->codec->codec_id = codec->id;
3526 return avcodec_find_decoder(st->codec->codec_id);
3530 * Add all the streams from the given input file to the global
3531 * list of input streams.
3533 static void add_input_streams(OptionsContext *o, AVFormatContext *ic)
3537 for (i = 0; i < ic->nb_streams; i++) {
3538 AVStream *st = ic->streams[i];
3539 AVCodecContext *dec = st->codec;
3540 InputStream *ist = av_mallocz(sizeof(*ist));
3545 input_streams = grow_array(input_streams, sizeof(*input_streams), &nb_input_streams, nb_input_streams + 1);
3546 input_streams[nb_input_streams - 1] = ist;
3549 ist->file_index = nb_input_files;
3551 st->discard = AVDISCARD_ALL;
3552 ist->opts = filter_codec_opts(codec_opts, ist->st->codec->codec_id, ic, st);
3554 ist->ts_scale = 1.0;
3555 MATCH_PER_STREAM_OPT(ts_scale, dbl, ist->ts_scale, ic, st);
3557 ist->dec = choose_decoder(o, ic, st);
3559 switch (dec->codec_type) {
3560 case AVMEDIA_TYPE_VIDEO:
3561 ist->resample_height = dec->height;
3562 ist->resample_width = dec->width;
3563 ist->resample_pix_fmt = dec->pix_fmt;
3566 case AVMEDIA_TYPE_AUDIO:
3567 case AVMEDIA_TYPE_DATA:
3568 case AVMEDIA_TYPE_SUBTITLE:
3569 case AVMEDIA_TYPE_ATTACHMENT:
3570 case AVMEDIA_TYPE_UNKNOWN:
3578 static void assert_file_overwrite(const char *filename)
3580 if (!file_overwrite &&
3581 (strchr(filename, ':') == NULL || filename[1] == ':' ||
3582 av_strstart(filename, "file:", NULL))) {
3583 if (avio_check(filename, 0) == 0) {
3585 fprintf(stderr,"File '%s' already exists. Overwrite ? [y/N] ", filename);
3587 if (!read_yesno()) {
3588 fprintf(stderr, "Not overwriting - exiting\n");
3593 fprintf(stderr,"File '%s' already exists. Exiting.\n", filename);
3600 static void dump_attachment(AVStream *st, const char *filename)
3603 AVIOContext *out = NULL;
3604 AVDictionaryEntry *e;
3606 if (!st->codec->extradata_size) {
3607 av_log(NULL, AV_LOG_WARNING, "No extradata to dump in stream #%d:%d.\n",
3608 nb_input_files - 1, st->index);
3611 if (!*filename && (e = av_dict_get(st->metadata, "filename", NULL, 0)))
3612 filename = e->value;
3614 av_log(NULL, AV_LOG_FATAL, "No filename specified and no 'filename' tag"
3615 "in stream #%d:%d.\n", nb_input_files - 1, st->index);
3619 assert_file_overwrite(filename);
3621 if ((ret = avio_open2(&out, filename, AVIO_FLAG_WRITE, &int_cb, NULL)) < 0) {
3622 av_log(NULL, AV_LOG_FATAL, "Could not open file %s for writing.\n",
3627 avio_write(out, st->codec->extradata, st->codec->extradata_size);
3632 static int opt_input_file(OptionsContext *o, const char *opt, const char *filename)
3634 AVFormatContext *ic;
3635 AVInputFormat *file_iformat = NULL;
3639 AVDictionary **opts;
3640 int orig_nb_streams; // number of streams before avformat_find_stream_info
3643 if (!(file_iformat = av_find_input_format(o->format))) {
3644 av_log(NULL, AV_LOG_FATAL, "Unknown input format: '%s'\n", o->format);
3649 if (!strcmp(filename, "-"))
3652 using_stdin |= !strncmp(filename, "pipe:", 5) ||
3653 !strcmp(filename, "/dev/stdin");
3655 /* get default parameters from command line */
3656 ic = avformat_alloc_context();
3658 print_error(filename, AVERROR(ENOMEM));
3661 if (o->nb_audio_sample_rate) {
3662 snprintf(buf, sizeof(buf), "%d", o->audio_sample_rate[o->nb_audio_sample_rate - 1].u.i);
3663 av_dict_set(&format_opts, "sample_rate", buf, 0);
3665 if (o->nb_audio_channels) {
3666 /* because we set audio_channels based on both the "ac" and
3667 * "channel_layout" options, we need to check that the specified
3668 * demuxer actually has the "channels" option before setting it */
3669 if (file_iformat && file_iformat->priv_class &&
3670 av_opt_find(&file_iformat->priv_class, "channels", NULL, 0,
3671 AV_OPT_SEARCH_FAKE_OBJ)) {
3672 snprintf(buf, sizeof(buf), "%d",
3673 o->audio_channels[o->nb_audio_channels - 1].u.i);
3674 av_dict_set(&format_opts, "channels", buf, 0);
3677 if (o->nb_frame_rates) {
3678 av_dict_set(&format_opts, "framerate", o->frame_rates[o->nb_frame_rates - 1].u.str, 0);
3680 if (o->nb_frame_sizes) {
3681 av_dict_set(&format_opts, "video_size", o->frame_sizes[o->nb_frame_sizes - 1].u.str, 0);
3683 if (o->nb_frame_pix_fmts)
3684 av_dict_set(&format_opts, "pixel_format", o->frame_pix_fmts[o->nb_frame_pix_fmts - 1].u.str, 0);
3686 ic->flags |= AVFMT_FLAG_NONBLOCK;
3687 ic->interrupt_callback = int_cb;
3689 /* open the input file with generic libav function */
3690 err = avformat_open_input(&ic, filename, file_iformat, &format_opts);
3692 print_error(filename, err);
3695 assert_avoptions(format_opts);
3697 /* apply forced codec ids */
3698 for (i = 0; i < ic->nb_streams; i++)
3699 choose_decoder(o, ic, ic->streams[i]);
3701 /* Set AVCodecContext options for avformat_find_stream_info */
3702 opts = setup_find_stream_info_opts(ic, codec_opts);
3703 orig_nb_streams = ic->nb_streams;
3705 /* If not enough info to get the stream parameters, we decode the
3706 first frames to get it. (used in mpeg case for example) */
3707 ret = avformat_find_stream_info(ic, opts);
3709 av_log(NULL, AV_LOG_FATAL, "%s: could not find codec parameters\n", filename);
3710 avformat_close_input(&ic);
3714 timestamp = o->start_time;
3715 /* add the stream start time */
3716 if (ic->start_time != AV_NOPTS_VALUE)
3717 timestamp += ic->start_time;
3719 /* if seeking requested, we execute it */
3720 if (o->start_time != 0) {
3721 ret = av_seek_frame(ic, -1, timestamp, AVSEEK_FLAG_BACKWARD);
3723 av_log(NULL, AV_LOG_WARNING, "%s: could not seek to position %0.3f\n",
3724 filename, (double)timestamp / AV_TIME_BASE);
3728 /* update the current parameters so that they match the one of the input stream */
3729 add_input_streams(o, ic);
3731 /* dump the file content */
3732 av_dump_format(ic, nb_input_files, filename, 0);
3734 input_files = grow_array(input_files, sizeof(*input_files), &nb_input_files, nb_input_files + 1);
3735 if (!(input_files[nb_input_files - 1] = av_mallocz(sizeof(*input_files[0]))))
3738 input_files[nb_input_files - 1]->ctx = ic;
3739 input_files[nb_input_files - 1]->ist_index = nb_input_streams - ic->nb_streams;
3740 input_files[nb_input_files - 1]->ts_offset = o->input_ts_offset - (copy_ts ? 0 : timestamp);
3741 input_files[nb_input_files - 1]->nb_streams = ic->nb_streams;
3742 input_files[nb_input_files - 1]->rate_emu = o->rate_emu;
3744 for (i = 0; i < o->nb_dump_attachment; i++) {
3747 for (j = 0; j < ic->nb_streams; j++) {
3748 AVStream *st = ic->streams[j];
3750 if (check_stream_specifier(ic, st, o->dump_attachment[i].specifier) == 1)
3751 dump_attachment(st, o->dump_attachment[i].u.str);
3755 for (i = 0; i < orig_nb_streams; i++)
3756 av_dict_free(&opts[i]);
3763 static void parse_forced_key_frames(char *kf, OutputStream *ost,
3764 AVCodecContext *avctx)
3770 for (p = kf; *p; p++)
3773 ost->forced_kf_count = n;
3774 ost->forced_kf_pts = av_malloc(sizeof(*ost->forced_kf_pts) * n);
3775 if (!ost->forced_kf_pts) {
3776 av_log(NULL, AV_LOG_FATAL, "Could not allocate forced key frames array.\n");
3779 for (i = 0; i < n; i++) {
3780 p = i ? strchr(p, ',') + 1 : kf;
3781 t = parse_time_or_die("force_key_frames", p, 1);
3782 ost->forced_kf_pts[i] = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
3786 static uint8_t *get_line(AVIOContext *s)
3792 if (avio_open_dyn_buf(&line) < 0) {
3793 av_log(NULL, AV_LOG_FATAL, "Could not alloc buffer for reading preset.\n");
3797 while ((c = avio_r8(s)) && c != '\n')
3800 avio_close_dyn_buf(line, &buf);
3805 static int get_preset_file_2(const char *preset_name, const char *codec_name, AVIOContext **s)
3808 char filename[1000];
3809 const char *base[3] = { getenv("AVCONV_DATADIR"),
3814 for (i = 0; i < FF_ARRAY_ELEMS(base) && ret; i++) {
3818 snprintf(filename, sizeof(filename), "%s%s/%s-%s.avpreset", base[i],
3819 i != 1 ? "" : "/.avconv", codec_name, preset_name);
3820 ret = avio_open2(s, filename, AVIO_FLAG_READ, &int_cb, NULL);
3823 snprintf(filename, sizeof(filename), "%s%s/%s.avpreset", base[i],
3824 i != 1 ? "" : "/.avconv", preset_name);
3825 ret = avio_open2(s, filename, AVIO_FLAG_READ, &int_cb, NULL);
3831 static void choose_encoder(OptionsContext *o, AVFormatContext *s, OutputStream *ost)
3833 char *codec_name = NULL;
3835 MATCH_PER_STREAM_OPT(codec_names, str, codec_name, s, ost->st);
3837 ost->st->codec->codec_id = av_guess_codec(s->oformat, NULL, s->filename,
3838 NULL, ost->st->codec->codec_type);
3839 ost->enc = avcodec_find_encoder(ost->st->codec->codec_id);
3840 } else if (!strcmp(codec_name, "copy"))
3841 ost->stream_copy = 1;
3843 ost->enc = find_codec_or_die(codec_name, ost->st->codec->codec_type, 1);
3844 ost->st->codec->codec_id = ost->enc->id;
3848 static OutputStream *new_output_stream(OptionsContext *o, AVFormatContext *oc, enum AVMediaType type)
3851 AVStream *st = avformat_new_stream(oc, NULL);
3852 int idx = oc->nb_streams - 1, ret = 0;
3853 char *bsf = NULL, *next, *codec_tag = NULL;
3854 AVBitStreamFilterContext *bsfc, *bsfc_prev = NULL;
3856 char *buf = NULL, *arg = NULL, *preset = NULL;
3857 AVIOContext *s = NULL;
3860 av_log(NULL, AV_LOG_FATAL, "Could not alloc stream.\n");
3864 if (oc->nb_streams - 1 < o->nb_streamid_map)
3865 st->id = o->streamid_map[oc->nb_streams - 1];
3867 output_streams = grow_array(output_streams, sizeof(*output_streams), &nb_output_streams,
3868 nb_output_streams + 1);
3869 if (!(ost = av_mallocz(sizeof(*ost))))
3871 output_streams[nb_output_streams - 1] = ost;
3873 ost->file_index = nb_output_files;
3876 st->codec->codec_type = type;
3877 choose_encoder(o, oc, ost);
3879 ost->opts = filter_codec_opts(codec_opts, ost->enc->id, oc, st);
3882 avcodec_get_context_defaults3(st->codec, ost->enc);
3883 st->codec->codec_type = type; // XXX hack, avcodec_get_context_defaults2() sets type to unknown for stream copy
3885 MATCH_PER_STREAM_OPT(presets, str, preset, oc, st);
3886 if (preset && (!(ret = get_preset_file_2(preset, ost->enc->name, &s)))) {
3889 if (!buf[0] || buf[0] == '#') {
3893 if (!(arg = strchr(buf, '='))) {
3894 av_log(NULL, AV_LOG_FATAL, "Invalid line found in the preset file.\n");
3898 av_dict_set(&ost->opts, buf, arg, AV_DICT_DONT_OVERWRITE);
3900 } while (!s->eof_reached);
3904 av_log(NULL, AV_LOG_FATAL,
3905 "Preset %s specified for stream %d:%d, but could not be opened.\n",
3906 preset, ost->file_index, ost->index);
3910 ost->max_frames = INT64_MAX;
3911 MATCH_PER_STREAM_OPT(max_frames, i64, ost->max_frames, oc, st);
3913 MATCH_PER_STREAM_OPT(bitstream_filters, str, bsf, oc, st);
3915 if (next = strchr(bsf, ','))
3917 if (!(bsfc = av_bitstream_filter_init(bsf))) {
3918 av_log(NULL, AV_LOG_FATAL, "Unknown bitstream filter %s\n", bsf);
3922 bsfc_prev->next = bsfc;
3924 ost->bitstream_filters = bsfc;
3930 MATCH_PER_STREAM_OPT(codec_tags, str, codec_tag, oc, st);
3932 uint32_t tag = strtol(codec_tag, &next, 0);
3934 tag = AV_RL32(codec_tag);
3935 st->codec->codec_tag = tag;
3938 MATCH_PER_STREAM_OPT(qscale, dbl, qscale, oc, st);
3939 if (qscale >= 0 || same_quant) {
3940 st->codec->flags |= CODEC_FLAG_QSCALE;
3941 st->codec->global_quality = FF_QP2LAMBDA * qscale;
3944 if (oc->oformat->flags & AVFMT_GLOBALHEADER)
3945 st->codec->flags |= CODEC_FLAG_GLOBAL_HEADER;
3947 av_opt_get_int(sws_opts, "sws_flags", 0, &ost->sws_flags);
3949 ost->pix_fmts[0] = ost->pix_fmts[1] = PIX_FMT_NONE;
3954 static void parse_matrix_coeffs(uint16_t *dest, const char *str)
3957 const char *p = str;
3964 av_log(NULL, AV_LOG_FATAL, "Syntax error in matrix \"%s\" at coeff %d\n", str, i);
3971 static OutputStream *new_video_stream(OptionsContext *o, AVFormatContext *oc)
3975 AVCodecContext *video_enc;
3977 ost = new_output_stream(o, oc, AVMEDIA_TYPE_VIDEO);
3979 video_enc = st->codec;
3981 if (!ost->stream_copy) {
3982 const char *p = NULL;
3983 char *forced_key_frames = NULL, *frame_rate = NULL, *frame_size = NULL;
3984 char *frame_aspect_ratio = NULL, *frame_pix_fmt = NULL;
3985 char *intra_matrix = NULL, *inter_matrix = NULL, *filters = NULL;
3988 MATCH_PER_STREAM_OPT(frame_rates, str, frame_rate, oc, st);
3989 if (frame_rate && av_parse_video_rate(&ost->frame_rate, frame_rate) < 0) {
3990 av_log(NULL, AV_LOG_FATAL, "Invalid framerate value: %s\n", frame_rate);
3994 MATCH_PER_STREAM_OPT(frame_sizes, str, frame_size, oc, st);
3995 if (frame_size && av_parse_video_size(&video_enc->width, &video_enc->height, frame_size) < 0) {
3996 av_log(NULL, AV_LOG_FATAL, "Invalid frame size: %s.\n", frame_size);
4000 MATCH_PER_STREAM_OPT(frame_aspect_ratios, str, frame_aspect_ratio, oc, st);
4001 if (frame_aspect_ratio)
4002 ost->frame_aspect_ratio = parse_frame_aspect_ratio(frame_aspect_ratio);
4004 MATCH_PER_STREAM_OPT(frame_pix_fmts, str, frame_pix_fmt, oc, st);
4005 if (frame_pix_fmt && (video_enc->pix_fmt = av_get_pix_fmt(frame_pix_fmt)) == PIX_FMT_NONE) {
4006 av_log(NULL, AV_LOG_FATAL, "Unknown pixel format requested: %s.\n", frame_pix_fmt);
4009 st->sample_aspect_ratio = video_enc->sample_aspect_ratio;
4011 MATCH_PER_STREAM_OPT(intra_matrices, str, intra_matrix, oc, st);
4013 if (!(video_enc->intra_matrix = av_mallocz(sizeof(*video_enc->intra_matrix) * 64))) {
4014 av_log(NULL, AV_LOG_FATAL, "Could not allocate memory for intra matrix.\n");
4017 parse_matrix_coeffs(video_enc->intra_matrix, intra_matrix);
4019 MATCH_PER_STREAM_OPT(inter_matrices, str, inter_matrix, oc, st);
4021 if (!(video_enc->inter_matrix = av_mallocz(sizeof(*video_enc->inter_matrix) * 64))) {
4022 av_log(NULL, AV_LOG_FATAL, "Could not allocate memory for inter matrix.\n");
4025 parse_matrix_coeffs(video_enc->inter_matrix, inter_matrix);
4028 MATCH_PER_STREAM_OPT(rc_overrides, str, p, oc, st);
4029 for (i = 0; p; i++) {
4031 int e = sscanf(p, "%d,%d,%d", &start, &end, &q);
4033 av_log(NULL, AV_LOG_FATAL, "error parsing rc_override\n");
4036 video_enc->rc_override =
4037 av_realloc(video_enc->rc_override,
4038 sizeof(RcOverride) * (i + 1));
4039 video_enc->rc_override[i].start_frame = start;
4040 video_enc->rc_override[i].end_frame = end;
4042 video_enc->rc_override[i].qscale = q;
4043 video_enc->rc_override[i].quality_factor = 1.0;
4046 video_enc->rc_override[i].qscale = 0;
4047 video_enc->rc_override[i].quality_factor = -q/100.0;
4052 video_enc->rc_override_count = i;
4053 if (!video_enc->rc_initial_buffer_occupancy)
4054 video_enc->rc_initial_buffer_occupancy = video_enc->rc_buffer_size * 3 / 4;
4055 video_enc->intra_dc_precision = intra_dc_precision - 8;
4060 video_enc->flags |= CODEC_FLAG_PASS1;
4062 video_enc->flags |= CODEC_FLAG_PASS2;
4066 MATCH_PER_STREAM_OPT(forced_key_frames, str, forced_key_frames, oc, st);
4067 if (forced_key_frames)
4068 parse_forced_key_frames(forced_key_frames, ost, video_enc);
4070 MATCH_PER_STREAM_OPT(force_fps, i, ost->force_fps, oc, st);
4072 ost->top_field_first = -1;
4073 MATCH_PER_STREAM_OPT(top_field_first, i, ost->top_field_first, oc, st);
4075 MATCH_PER_STREAM_OPT(filters, str, filters, oc, st);
4077 ost->avfilter = av_strdup(filters);
4079 MATCH_PER_STREAM_OPT(copy_initial_nonkeyframes, i, ost->copy_initial_nonkeyframes, oc ,st);
4085 static OutputStream *new_audio_stream(OptionsContext *o, AVFormatContext *oc)
4089 AVCodecContext *audio_enc;
4091 ost = new_output_stream(o, oc, AVMEDIA_TYPE_AUDIO);
4094 audio_enc = st->codec;
4095 audio_enc->codec_type = AVMEDIA_TYPE_AUDIO;
4097 if (!ost->stream_copy) {
4098 char *sample_fmt = NULL;
4100 MATCH_PER_STREAM_OPT(audio_channels, i, audio_enc->channels, oc, st);
4102 MATCH_PER_STREAM_OPT(sample_fmts, str, sample_fmt, oc, st);
4104 (audio_enc->sample_fmt = av_get_sample_fmt(sample_fmt)) == AV_SAMPLE_FMT_NONE) {
4105 av_log(NULL, AV_LOG_FATAL, "Invalid sample format '%s'\n", sample_fmt);
4109 MATCH_PER_STREAM_OPT(audio_sample_rate, i, audio_enc->sample_rate, oc, st);
4115 static OutputStream *new_data_stream(OptionsContext *o, AVFormatContext *oc)
4119 ost = new_output_stream(o, oc, AVMEDIA_TYPE_DATA);
4120 if (!ost->stream_copy) {
4121 av_log(NULL, AV_LOG_FATAL, "Data stream encoding not supported yet (only streamcopy)\n");
4128 static OutputStream *new_attachment_stream(OptionsContext *o, AVFormatContext *oc)
4130 OutputStream *ost = new_output_stream(o, oc, AVMEDIA_TYPE_ATTACHMENT);
4131 ost->stream_copy = 1;
4135 static OutputStream *new_subtitle_stream(OptionsContext *o, AVFormatContext *oc)
4139 AVCodecContext *subtitle_enc;
4141 ost = new_output_stream(o, oc, AVMEDIA_TYPE_SUBTITLE);
4143 subtitle_enc = st->codec;
4145 subtitle_enc->codec_type = AVMEDIA_TYPE_SUBTITLE;
4150 /* arg format is "output-stream-index:streamid-value". */
4151 static int opt_streamid(OptionsContext *o, const char *opt, const char *arg)
4157 av_strlcpy(idx_str, arg, sizeof(idx_str));
4158 p = strchr(idx_str, ':');
4160 av_log(NULL, AV_LOG_FATAL,
4161 "Invalid value '%s' for option '%s', required syntax is 'index:value'\n",
4166 idx = parse_number_or_die(opt, idx_str, OPT_INT, 0, INT_MAX);
4167 o->streamid_map = grow_array(o->streamid_map, sizeof(*o->streamid_map), &o->nb_streamid_map, idx+1);
4168 o->streamid_map[idx] = parse_number_or_die(opt, p, OPT_INT, 0, INT_MAX);
4172 static int copy_chapters(InputFile *ifile, OutputFile *ofile, int copy_metadata)
4174 AVFormatContext *is = ifile->ctx;
4175 AVFormatContext *os = ofile->ctx;
4178 for (i = 0; i < is->nb_chapters; i++) {
4179 AVChapter *in_ch = is->chapters[i], *out_ch;
4180 int64_t ts_off = av_rescale_q(ofile->start_time - ifile->ts_offset,
4181 AV_TIME_BASE_Q, in_ch->time_base);
4182 int64_t rt = (ofile->recording_time == INT64_MAX) ? INT64_MAX :
4183 av_rescale_q(ofile->recording_time, AV_TIME_BASE_Q, in_ch->time_base);
4186 if (in_ch->end < ts_off)
4188 if (rt != INT64_MAX && in_ch->start > rt + ts_off)
4191 out_ch = av_mallocz(sizeof(AVChapter));
4193 return AVERROR(ENOMEM);
4195 out_ch->id = in_ch->id;
4196 out_ch->time_base = in_ch->time_base;
4197 out_ch->start = FFMAX(0, in_ch->start - ts_off);
4198 out_ch->end = FFMIN(rt, in_ch->end - ts_off);
4201 av_dict_copy(&out_ch->metadata, in_ch->metadata, 0);
4204 os->chapters = av_realloc(os->chapters, sizeof(AVChapter) * os->nb_chapters);
4206 return AVERROR(ENOMEM);
4207 os->chapters[os->nb_chapters - 1] = out_ch;
4212 static void init_output_filter(OutputFilter *ofilter, OptionsContext *o,
4213 AVFormatContext *oc)
4217 if (ofilter->out_tmp->filter_ctx->output_pads[ofilter->out_tmp->pad_idx].type != AVMEDIA_TYPE_VIDEO) {
4218 av_log(NULL, AV_LOG_FATAL, "Only video filters are supported currently.\n");
4222 ost = new_video_stream(o, oc);
4223 ost->source_index = -1;
4224 ost->filter = ofilter;
4228 if (ost->stream_copy) {
4229 av_log(NULL, AV_LOG_ERROR, "Streamcopy requested for output stream %d:%d, "
4230 "which is fed from a complex filtergraph. Filtering and streamcopy "
4231 "cannot be used together.\n", ost->file_index, ost->index);
4235 if (configure_output_filter(ofilter->graph, ofilter, ofilter->out_tmp) < 0) {
4236 av_log(NULL, AV_LOG_FATAL, "Error configuring filter.\n");
4239 avfilter_inout_free(&ofilter->out_tmp);
4242 static void opt_output_file(void *optctx, const char *filename)
4244 OptionsContext *o = optctx;
4245 AVFormatContext *oc;
4247 AVOutputFormat *file_oformat;
4251 if (configure_complex_filters() < 0) {
4252 av_log(NULL, AV_LOG_FATAL, "Error configuring filters.\n");
4256 if (!strcmp(filename, "-"))
4259 oc = avformat_alloc_context();
4261 print_error(filename, AVERROR(ENOMEM));
4266 file_oformat = av_guess_format(o->format, NULL, NULL);
4267 if (!file_oformat) {
4268 av_log(NULL, AV_LOG_FATAL, "Requested output format '%s' is not a suitable output format\n", o->format);
4272 file_oformat = av_guess_format(NULL, filename, NULL);
4273 if (!file_oformat) {
4274 av_log(NULL, AV_LOG_FATAL, "Unable to find a suitable output format for '%s'\n",
4280 oc->oformat = file_oformat;
4281 oc->interrupt_callback = int_cb;
4282 av_strlcpy(oc->filename, filename, sizeof(oc->filename));
4284 /* create streams for all unlabeled output pads */
4285 for (i = 0; i < nb_filtergraphs; i++) {
4286 FilterGraph *fg = filtergraphs[i];
4287 for (j = 0; j < fg->nb_outputs; j++) {
4288 OutputFilter *ofilter = fg->outputs[j];
4290 if (!ofilter->out_tmp || ofilter->out_tmp->name)
4293 switch (ofilter->out_tmp->filter_ctx->output_pads[ofilter->out_tmp->pad_idx].type) {
4294 case AVMEDIA_TYPE_VIDEO: o->video_disable = 1; break;
4295 case AVMEDIA_TYPE_AUDIO: o->audio_disable = 1; break;
4296 case AVMEDIA_TYPE_SUBTITLE: o->subtitle_disable = 1; break;
4298 init_output_filter(ofilter, o, oc);
4302 if (!o->nb_stream_maps) {
4303 /* pick the "best" stream of each type */
4304 #define NEW_STREAM(type, index)\
4306 ost = new_ ## type ## _stream(o, oc);\
4307 ost->source_index = index;\
4308 ost->sync_ist = input_streams[index];\
4309 input_streams[index]->discard = 0;\
4310 input_streams[index]->st->discard = AVDISCARD_NONE;\
4313 /* video: highest resolution */
4314 if (!o->video_disable && oc->oformat->video_codec != CODEC_ID_NONE) {
4315 int area = 0, idx = -1;
4316 for (i = 0; i < nb_input_streams; i++) {
4317 ist = input_streams[i];
4318 if (ist->st->codec->codec_type == AVMEDIA_TYPE_VIDEO &&
4319 ist->st->codec->width * ist->st->codec->height > area) {
4320 area = ist->st->codec->width * ist->st->codec->height;
4324 NEW_STREAM(video, idx);
4327 /* audio: most channels */
4328 if (!o->audio_disable && oc->oformat->audio_codec != CODEC_ID_NONE) {
4329 int channels = 0, idx = -1;
4330 for (i = 0; i < nb_input_streams; i++) {
4331 ist = input_streams[i];
4332 if (ist->st->codec->codec_type == AVMEDIA_TYPE_AUDIO &&
4333 ist->st->codec->channels > channels) {
4334 channels = ist->st->codec->channels;
4338 NEW_STREAM(audio, idx);
4341 /* subtitles: pick first */
4342 if (!o->subtitle_disable && oc->oformat->subtitle_codec != CODEC_ID_NONE) {
4343 for (i = 0; i < nb_input_streams; i++)
4344 if (input_streams[i]->st->codec->codec_type == AVMEDIA_TYPE_SUBTITLE) {
4345 NEW_STREAM(subtitle, i);
4349 /* do something with data? */
4351 for (i = 0; i < o->nb_stream_maps; i++) {
4352 StreamMap *map = &o->stream_maps[i];
4357 if (map->linklabel) {
4359 OutputFilter *ofilter = NULL;
4362 for (j = 0; j < nb_filtergraphs; j++) {
4363 fg = filtergraphs[j];
4364 for (k = 0; k < fg->nb_outputs; k++) {
4365 AVFilterInOut *out = fg->outputs[k]->out_tmp;
4366 if (out && !strcmp(out->name, map->linklabel)) {
4367 ofilter = fg->outputs[k];
4374 av_log(NULL, AV_LOG_FATAL, "Output with label '%s' does not exist "
4375 "in any defined filter graph.\n", map->linklabel);
4378 init_output_filter(ofilter, o, oc);
4380 ist = input_streams[input_files[map->file_index]->ist_index + map->stream_index];
4381 switch (ist->st->codec->codec_type) {
4382 case AVMEDIA_TYPE_VIDEO: ost = new_video_stream(o, oc); break;
4383 case AVMEDIA_TYPE_AUDIO: ost = new_audio_stream(o, oc); break;
4384 case AVMEDIA_TYPE_SUBTITLE: ost = new_subtitle_stream(o, oc); break;
4385 case AVMEDIA_TYPE_DATA: ost = new_data_stream(o, oc); break;
4386 case AVMEDIA_TYPE_ATTACHMENT: ost = new_attachment_stream(o, oc); break;
4388 av_log(NULL, AV_LOG_FATAL, "Cannot map stream #%d:%d - unsupported type.\n",
4389 map->file_index, map->stream_index);
4393 ost->source_index = input_files[map->file_index]->ist_index + map->stream_index;
4394 ost->sync_ist = input_streams[input_files[map->sync_file_index]->ist_index +
4395 map->sync_stream_index];
4397 ist->st->discard = AVDISCARD_NONE;
4402 /* handle attached files */
4403 for (i = 0; i < o->nb_attachments; i++) {
4405 uint8_t *attachment;
4409 if ((err = avio_open2(&pb, o->attachments[i], AVIO_FLAG_READ, &int_cb, NULL)) < 0) {
4410 av_log(NULL, AV_LOG_FATAL, "Could not open attachment file %s.\n",
4414 if ((len = avio_size(pb)) <= 0) {
4415 av_log(NULL, AV_LOG_FATAL, "Could not get size of the attachment %s.\n",
4419 if (!(attachment = av_malloc(len))) {
4420 av_log(NULL, AV_LOG_FATAL, "Attachment %s too large to fit into memory.\n",
4424 avio_read(pb, attachment, len);
4426 ost = new_attachment_stream(o, oc);
4427 ost->stream_copy = 0;
4428 ost->source_index = -1;
4429 ost->attachment_filename = o->attachments[i];
4430 ost->st->codec->extradata = attachment;
4431 ost->st->codec->extradata_size = len;
4433 p = strrchr(o->attachments[i], '/');
4434 av_dict_set(&ost->st->metadata, "filename", (p && *p) ? p + 1 : o->attachments[i], AV_DICT_DONT_OVERWRITE);
4438 output_files = grow_array(output_files, sizeof(*output_files), &nb_output_files, nb_output_files + 1);
4439 if (!(output_files[nb_output_files - 1] = av_mallocz(sizeof(*output_files[0]))))
4442 output_files[nb_output_files - 1]->ctx = oc;
4443 output_files[nb_output_files - 1]->ost_index = nb_output_streams - oc->nb_streams;
4444 output_files[nb_output_files - 1]->recording_time = o->recording_time;
4445 if (o->recording_time != INT64_MAX)
4446 oc->duration = o->recording_time;
4447 output_files[nb_output_files - 1]->start_time = o->start_time;
4448 output_files[nb_output_files - 1]->limit_filesize = o->limit_filesize;
4449 av_dict_copy(&output_files[nb_output_files - 1]->opts, format_opts, 0);
4451 /* check filename in case of an image number is expected */
4452 if (oc->oformat->flags & AVFMT_NEEDNUMBER) {
4453 if (!av_filename_number_test(oc->filename)) {
4454 print_error(oc->filename, AVERROR(EINVAL));
4459 if (!(oc->oformat->flags & AVFMT_NOFILE)) {
4460 /* test if it already exists to avoid losing precious files */
4461 assert_file_overwrite(filename);
4464 if ((err = avio_open2(&oc->pb, filename, AVIO_FLAG_WRITE,
4465 &oc->interrupt_callback,
4466 &output_files[nb_output_files - 1]->opts)) < 0) {
4467 print_error(filename, err);
4472 if (o->mux_preload) {
4474 snprintf(buf, sizeof(buf), "%d", (int)(o->mux_preload*AV_TIME_BASE));
4475 av_dict_set(&output_files[nb_output_files - 1]->opts, "preload", buf, 0);
4477 oc->max_delay = (int)(o->mux_max_delay * AV_TIME_BASE);
4478 oc->flags |= AVFMT_FLAG_NONBLOCK;
4481 for (i = 0; i < o->nb_metadata_map; i++) {
4483 int in_file_index = strtol(o->metadata_map[i].u.str, &p, 0);
4485 if (in_file_index < 0)
4487 if (in_file_index >= nb_input_files) {
4488 av_log(NULL, AV_LOG_FATAL, "Invalid input file index %d while processing metadata maps\n", in_file_index);
4491 copy_metadata(o->metadata_map[i].specifier, *p ? p + 1 : p, oc, input_files[in_file_index]->ctx, o);
4495 if (o->chapters_input_file >= nb_input_files) {
4496 if (o->chapters_input_file == INT_MAX) {
4497 /* copy chapters from the first input file that has them*/
4498 o->chapters_input_file = -1;
4499 for (i = 0; i < nb_input_files; i++)
4500 if (input_files[i]->ctx->nb_chapters) {
4501 o->chapters_input_file = i;
4505 av_log(NULL, AV_LOG_FATAL, "Invalid input file index %d in chapter mapping.\n",
4506 o->chapters_input_file);
4510 if (o->chapters_input_file >= 0)
4511 copy_chapters(input_files[o->chapters_input_file], output_files[nb_output_files - 1],
4512 !o->metadata_chapters_manual);
4514 /* copy global metadata by default */
4515 if (!o->metadata_global_manual && nb_input_files)
4516 av_dict_copy(&oc->metadata, input_files[0]->ctx->metadata,
4517 AV_DICT_DONT_OVERWRITE);
4518 if (!o->metadata_streams_manual)
4519 for (i = output_files[nb_output_files - 1]->ost_index; i < nb_output_streams; i++) {
4521 if (output_streams[i]->source_index < 0) /* this is true e.g. for attached files */
4523 ist = input_streams[output_streams[i]->source_index];
4524 av_dict_copy(&output_streams[i]->st->metadata, ist->st->metadata, AV_DICT_DONT_OVERWRITE);
4527 /* process manually set metadata */
4528 for (i = 0; i < o->nb_metadata; i++) {
4531 const char *stream_spec;
4532 int index = 0, j, ret;
4534 val = strchr(o->metadata[i].u.str, '=');
4536 av_log(NULL, AV_LOG_FATAL, "No '=' character in metadata string %s.\n",
4537 o->metadata[i].u.str);
4542 parse_meta_type(o->metadata[i].specifier, &type, &index, &stream_spec);
4544 for (j = 0; j < oc->nb_streams; j++) {
4545 if ((ret = check_stream_specifier(oc, oc->streams[j], stream_spec)) > 0) {
4546 av_dict_set(&oc->streams[j]->metadata, o->metadata[i].u.str, *val ? val : NULL, 0);
4550 printf("ret %d, stream_spec %s\n", ret, stream_spec);
4558 if (index < 0 || index >= oc->nb_chapters) {
4559 av_log(NULL, AV_LOG_FATAL, "Invalid chapter index %d in metadata specifier.\n", index);
4562 m = &oc->chapters[index]->metadata;
4565 av_log(NULL, AV_LOG_FATAL, "Invalid metadata specifier %s.\n", o->metadata[i].specifier);
4568 av_dict_set(m, o->metadata[i].u.str, *val ? val : NULL, 0);
4575 /* same option as mencoder */
4576 static int opt_pass(const char *opt, const char *arg)
4578 do_pass = parse_number_or_die(opt, arg, OPT_INT, 1, 2);
4582 static int64_t getutime(void)
4585 struct rusage rusage;
4587 getrusage(RUSAGE_SELF, &rusage);
4588 return (rusage.ru_utime.tv_sec * 1000000LL) + rusage.ru_utime.tv_usec;
4589 #elif HAVE_GETPROCESSTIMES
4591 FILETIME c, e, k, u;
4592 proc = GetCurrentProcess();
4593 GetProcessTimes(proc, &c, &e, &k, &u);
4594 return ((int64_t) u.dwHighDateTime << 32 | u.dwLowDateTime) / 10;
4596 return av_gettime();
4600 static int64_t getmaxrss(void)
4602 #if HAVE_GETRUSAGE && HAVE_STRUCT_RUSAGE_RU_MAXRSS
4603 struct rusage rusage;
4604 getrusage(RUSAGE_SELF, &rusage);
4605 return (int64_t)rusage.ru_maxrss * 1024;
4606 #elif HAVE_GETPROCESSMEMORYINFO
4608 PROCESS_MEMORY_COUNTERS memcounters;
4609 proc = GetCurrentProcess();
4610 memcounters.cb = sizeof(memcounters);
4611 GetProcessMemoryInfo(proc, &memcounters, sizeof(memcounters));
4612 return memcounters.PeakPagefileUsage;
4618 static int opt_audio_qscale(OptionsContext *o, const char *opt, const char *arg)
4620 return parse_option(o, "q:a", arg, options);
4623 static void show_usage(void)
4625 printf("Hyper fast Audio and Video encoder\n");
4626 printf("usage: %s [options] [[infile options] -i infile]... {[outfile options] outfile}...\n", program_name);
4630 static void show_help(void)
4632 int flags = AV_OPT_FLAG_DECODING_PARAM | AV_OPT_FLAG_ENCODING_PARAM;
4633 av_log_set_callback(log_callback_help);
4635 show_help_options(options, "Main options:\n",
4636 OPT_EXPERT | OPT_AUDIO | OPT_VIDEO | OPT_SUBTITLE | OPT_GRAB, 0);
4637 show_help_options(options, "\nAdvanced options:\n",
4638 OPT_EXPERT | OPT_AUDIO | OPT_VIDEO | OPT_SUBTITLE | OPT_GRAB,
4640 show_help_options(options, "\nVideo options:\n",
4641 OPT_EXPERT | OPT_AUDIO | OPT_VIDEO | OPT_GRAB,
4643 show_help_options(options, "\nAdvanced Video options:\n",
4644 OPT_EXPERT | OPT_AUDIO | OPT_VIDEO | OPT_GRAB,
4645 OPT_VIDEO | OPT_EXPERT);
4646 show_help_options(options, "\nAudio options:\n",
4647 OPT_EXPERT | OPT_AUDIO | OPT_VIDEO | OPT_GRAB,
4649 show_help_options(options, "\nAdvanced Audio options:\n",
4650 OPT_EXPERT | OPT_AUDIO | OPT_VIDEO | OPT_GRAB,
4651 OPT_AUDIO | OPT_EXPERT);
4652 show_help_options(options, "\nSubtitle options:\n",
4653 OPT_SUBTITLE | OPT_GRAB,
4655 show_help_options(options, "\nAudio/Video grab options:\n",
4659 show_help_children(avcodec_get_class(), flags);
4660 show_help_children(avformat_get_class(), flags);
4661 show_help_children(sws_get_class(), flags);
4664 static int opt_target(OptionsContext *o, const char *opt, const char *arg)
4666 enum { PAL, NTSC, FILM, UNKNOWN } norm = UNKNOWN;
4667 static const char *const frame_rates[] = { "25", "30000/1001", "24000/1001" };
4669 if (!strncmp(arg, "pal-", 4)) {
4672 } else if (!strncmp(arg, "ntsc-", 5)) {
4675 } else if (!strncmp(arg, "film-", 5)) {
4679 /* Try to determine PAL/NTSC by peeking in the input files */
4680 if (nb_input_files) {
4682 for (j = 0; j < nb_input_files; j++) {
4683 for (i = 0; i < input_files[j]->nb_streams; i++) {
4684 AVCodecContext *c = input_files[j]->ctx->streams[i]->codec;
4685 if (c->codec_type != AVMEDIA_TYPE_VIDEO)
4687 fr = c->time_base.den * 1000 / c->time_base.num;
4691 } else if ((fr == 29970) || (fr == 23976)) {
4696 if (norm != UNKNOWN)
4700 if (norm != UNKNOWN)
4701 av_log(NULL, AV_LOG_INFO, "Assuming %s for target.\n", norm == PAL ? "PAL" : "NTSC");
4704 if (norm == UNKNOWN) {
4705 av_log(NULL, AV_LOG_FATAL, "Could not determine norm (PAL/NTSC/NTSC-Film) for target.\n");
4706 av_log(NULL, AV_LOG_FATAL, "Please prefix target with \"pal-\", \"ntsc-\" or \"film-\",\n");
4707 av_log(NULL, AV_LOG_FATAL, "or set a framerate with \"-r xxx\".\n");
4711 if (!strcmp(arg, "vcd")) {
4712 opt_video_codec(o, "c:v", "mpeg1video");
4713 opt_audio_codec(o, "c:a", "mp2");
4714 parse_option(o, "f", "vcd", options);
4716 parse_option(o, "s", norm == PAL ? "352x288" : "352x240", options);
4717 parse_option(o, "r", frame_rates[norm], options);
4718 opt_default("g", norm == PAL ? "15" : "18");
4720 opt_default("b", "1150000");
4721 opt_default("maxrate", "1150000");
4722 opt_default("minrate", "1150000");
4723 opt_default("bufsize", "327680"); // 40*1024*8;
4725 opt_default("b:a", "224000");
4726 parse_option(o, "ar", "44100", options);
4727 parse_option(o, "ac", "2", options);
4729 opt_default("packetsize", "2324");
4730 opt_default("muxrate", "1411200"); // 2352 * 75 * 8;
4732 /* We have to offset the PTS, so that it is consistent with the SCR.
4733 SCR starts at 36000, but the first two packs contain only padding
4734 and the first pack from the other stream, respectively, may also have
4735 been written before.
4736 So the real data starts at SCR 36000+3*1200. */
4737 o->mux_preload = (36000 + 3 * 1200) / 90000.0; // 0.44
4738 } else if (!strcmp(arg, "svcd")) {
4740 opt_video_codec(o, "c:v", "mpeg2video");
4741 opt_audio_codec(o, "c:a", "mp2");
4742 parse_option(o, "f", "svcd", options);
4744 parse_option(o, "s", norm == PAL ? "480x576" : "480x480", options);
4745 parse_option(o, "r", frame_rates[norm], options);
4746 opt_default("g", norm == PAL ? "15" : "18");
4748 opt_default("b", "2040000");
4749 opt_default("maxrate", "2516000");
4750 opt_default("minrate", "0"); // 1145000;
4751 opt_default("bufsize", "1835008"); // 224*1024*8;
4752 opt_default("flags", "+scan_offset");
4755 opt_default("b:a", "224000");
4756 parse_option(o, "ar", "44100", options);
4758 opt_default("packetsize", "2324");
4760 } else if (!strcmp(arg, "dvd")) {
4762 opt_video_codec(o, "c:v", "mpeg2video");
4763 opt_audio_codec(o, "c:a", "ac3");
4764 parse_option(o, "f", "dvd", options);
4766 parse_option(o, "s", norm == PAL ? "720x576" : "720x480", options);
4767 parse_option(o, "r", frame_rates[norm], options);
4768 opt_default("g", norm == PAL ? "15" : "18");
4770 opt_default("b", "6000000");
4771 opt_default("maxrate", "9000000");
4772 opt_default("minrate", "0"); // 1500000;
4773 opt_default("bufsize", "1835008"); // 224*1024*8;
4775 opt_default("packetsize", "2048"); // from www.mpucoder.com: DVD sectors contain 2048 bytes of data, this is also the size of one pack.
4776 opt_default("muxrate", "10080000"); // from mplex project: data_rate = 1260000. mux_rate = data_rate * 8
4778 opt_default("b:a", "448000");
4779 parse_option(o, "ar", "48000", options);
4781 } else if (!strncmp(arg, "dv", 2)) {
4783 parse_option(o, "f", "dv", options);
4785 parse_option(o, "s", norm == PAL ? "720x576" : "720x480", options);
4786 parse_option(o, "pix_fmt", !strncmp(arg, "dv50", 4) ? "yuv422p" :
4787 norm == PAL ? "yuv420p" : "yuv411p", options);
4788 parse_option(o, "r", frame_rates[norm], options);
4790 parse_option(o, "ar", "48000", options);
4791 parse_option(o, "ac", "2", options);
4794 av_log(NULL, AV_LOG_ERROR, "Unknown target: %s\n", arg);
4795 return AVERROR(EINVAL);
4800 static int opt_vstats_file(const char *opt, const char *arg)
4802 av_free (vstats_filename);
4803 vstats_filename = av_strdup (arg);
4807 static int opt_vstats(const char *opt, const char *arg)
4810 time_t today2 = time(NULL);
4811 struct tm *today = localtime(&today2);
4813 snprintf(filename, sizeof(filename), "vstats_%02d%02d%02d.log", today->tm_hour, today->tm_min,
4815 return opt_vstats_file(opt, filename);
4818 static int opt_video_frames(OptionsContext *o, const char *opt, const char *arg)
4820 return parse_option(o, "frames:v", arg, options);
4823 static int opt_audio_frames(OptionsContext *o, const char *opt, const char *arg)
4825 return parse_option(o, "frames:a", arg, options);
4828 static int opt_data_frames(OptionsContext *o, const char *opt, const char *arg)
4830 return parse_option(o, "frames:d", arg, options);
4833 static int opt_video_tag(OptionsContext *o, const char *opt, const char *arg)
4835 return parse_option(o, "tag:v", arg, options);
4838 static int opt_audio_tag(OptionsContext *o, const char *opt, const char *arg)
4840 return parse_option(o, "tag:a", arg, options);
4843 static int opt_subtitle_tag(OptionsContext *o, const char *opt, const char *arg)
4845 return parse_option(o, "tag:s", arg, options);
4848 static int opt_video_filters(OptionsContext *o, const char *opt, const char *arg)
4850 return parse_option(o, "filter:v", arg, options);
4853 static int opt_vsync(const char *opt, const char *arg)
4855 if (!av_strcasecmp(arg, "cfr")) video_sync_method = VSYNC_CFR;
4856 else if (!av_strcasecmp(arg, "vfr")) video_sync_method = VSYNC_VFR;
4857 else if (!av_strcasecmp(arg, "passthrough")) video_sync_method = VSYNC_PASSTHROUGH;
4859 if (video_sync_method == VSYNC_AUTO)
4860 video_sync_method = parse_number_or_die("vsync", arg, OPT_INT, VSYNC_AUTO, VSYNC_VFR);
4864 static int opt_deinterlace(const char *opt, const char *arg)
4866 av_log(NULL, AV_LOG_WARNING, "-%s is deprecated, use -filter:v yadif instead\n", opt);
4871 static int opt_cpuflags(const char *opt, const char *arg)
4873 int flags = av_parse_cpu_flags(arg);
4878 av_set_cpu_flags_mask(flags);
4882 static void parse_cpuflags(int argc, char **argv, const OptionDef *options)
4884 int idx = locate_option(argc, argv, options, "cpuflags");
4885 if (idx && argv[idx + 1])
4886 opt_cpuflags("cpuflags", argv[idx + 1]);
4889 static int opt_channel_layout(OptionsContext *o, const char *opt, const char *arg)
4891 char layout_str[32];
4894 int ret, channels, ac_str_size;
4897 layout = av_get_channel_layout(arg);
4899 av_log(NULL, AV_LOG_ERROR, "Unknown channel layout: %s\n", arg);
4900 return AVERROR(EINVAL);
4902 snprintf(layout_str, sizeof(layout_str), "%"PRIu64, layout);
4903 ret = opt_default(opt, layout_str);
4907 /* set 'ac' option based on channel layout */
4908 channels = av_get_channel_layout_nb_channels(layout);
4909 snprintf(layout_str, sizeof(layout_str), "%d", channels);
4910 stream_str = strchr(opt, ':');
4911 ac_str_size = 3 + (stream_str ? strlen(stream_str) : 0);
4912 ac_str = av_mallocz(ac_str_size);
4914 return AVERROR(ENOMEM);
4915 av_strlcpy(ac_str, "ac", 3);
4917 av_strlcat(ac_str, stream_str, ac_str_size);
4918 ret = parse_option(o, ac_str, layout_str, options);
4924 static int opt_filter_complex(const char *opt, const char *arg)
4926 filtergraphs = grow_array(filtergraphs, sizeof(*filtergraphs),
4927 &nb_filtergraphs, nb_filtergraphs + 1);
4928 if (!(filtergraphs[nb_filtergraphs - 1] = av_mallocz(sizeof(*filtergraphs[0]))))
4929 return AVERROR(ENOMEM);
4930 filtergraphs[nb_filtergraphs - 1]->index = nb_filtergraphs - 1;
4931 filtergraphs[nb_filtergraphs - 1]->graph_desc = arg;
4935 #define OFFSET(x) offsetof(OptionsContext, x)
4936 static const OptionDef options[] = {
4938 #include "cmdutils_common_opts.h"
4939 { "f", HAS_ARG | OPT_STRING | OPT_OFFSET, {.off = OFFSET(format)}, "force format", "fmt" },
4940 { "i", HAS_ARG | OPT_FUNC2, {(void*)opt_input_file}, "input file name", "filename" },
4941 { "y", OPT_BOOL, {(void*)&file_overwrite}, "overwrite output files" },
4942 { "c", HAS_ARG | OPT_STRING | OPT_SPEC, {.off = OFFSET(codec_names)}, "codec name", "codec" },
4943 { "codec", HAS_ARG | OPT_STRING | OPT_SPEC, {.off = OFFSET(codec_names)}, "codec name", "codec" },
4944 { "pre", HAS_ARG | OPT_STRING | OPT_SPEC, {.off = OFFSET(presets)}, "preset name", "preset" },
4945 { "map", HAS_ARG | OPT_EXPERT | OPT_FUNC2, {(void*)opt_map}, "set input stream mapping", "[-]input_file_id[:stream_specifier][,sync_file_id[:stream_specifier]]" },
4946 { "map_metadata", HAS_ARG | OPT_STRING | OPT_SPEC, {.off = OFFSET(metadata_map)}, "set metadata information of outfile from infile",
4947 "outfile[,metadata]:infile[,metadata]" },
4948 { "map_chapters", OPT_INT | HAS_ARG | OPT_EXPERT | OPT_OFFSET, {.off = OFFSET(chapters_input_file)}, "set chapters mapping", "input_file_index" },
4949 { "t", HAS_ARG | OPT_TIME | OPT_OFFSET, {.off = OFFSET(recording_time)}, "record or transcode \"duration\" seconds of audio/video", "duration" },
4950 { "fs", HAS_ARG | OPT_INT64 | OPT_OFFSET, {.off = OFFSET(limit_filesize)}, "set the limit file size in bytes", "limit_size" }, //
4951 { "ss", HAS_ARG | OPT_TIME | OPT_OFFSET, {.off = OFFSET(start_time)}, "set the start time offset", "time_off" },
4952 { "itsoffset", HAS_ARG | OPT_TIME | OPT_OFFSET, {.off = OFFSET(input_ts_offset)}, "set the input ts offset", "time_off" },
4953 { "itsscale", HAS_ARG | OPT_DOUBLE | OPT_SPEC, {.off = OFFSET(ts_scale)}, "set the input ts scale", "scale" },
4954 { "metadata", HAS_ARG | OPT_STRING | OPT_SPEC, {.off = OFFSET(metadata)}, "add metadata", "string=string" },
4955 { "dframes", HAS_ARG | OPT_FUNC2, {(void*)opt_data_frames}, "set the number of data frames to record", "number" },
4956 { "benchmark", OPT_BOOL | OPT_EXPERT, {(void*)&do_benchmark},
4957 "add timings for benchmarking" },
4958 { "timelimit", HAS_ARG, {(void*)opt_timelimit}, "set max runtime in seconds", "limit" },
4959 { "dump", OPT_BOOL | OPT_EXPERT, {(void*)&do_pkt_dump},
4960 "dump each input packet" },
4961 { "hex", OPT_BOOL | OPT_EXPERT, {(void*)&do_hex_dump},
4962 "when dumping packets, also dump the payload" },
4963 { "re", OPT_BOOL | OPT_EXPERT | OPT_OFFSET, {.off = OFFSET(rate_emu)}, "read input at native frame rate", "" },
4964 { "target", HAS_ARG | OPT_FUNC2, {(void*)opt_target}, "specify target file type (\"vcd\", \"svcd\", \"dvd\", \"dv\", \"dv50\", \"pal-vcd\", \"ntsc-svcd\", ...)", "type" },
4965 { "vsync", HAS_ARG | OPT_EXPERT, {(void*)opt_vsync}, "video sync method", "" },
4966 { "async", HAS_ARG | OPT_INT | OPT_EXPERT, {(void*)&audio_sync_method}, "audio sync method", "" },
4967 { "adrift_threshold", HAS_ARG | OPT_FLOAT | OPT_EXPERT, {(void*)&audio_drift_threshold}, "audio drift threshold", "threshold" },
4968 { "copyts", OPT_BOOL | OPT_EXPERT, {(void*)©_ts}, "copy timestamps" },
4969 { "copytb", OPT_BOOL | OPT_EXPERT, {(void*)©_tb}, "copy input stream time base when stream copying" },
4970 { "shortest", OPT_BOOL | OPT_EXPERT, {(void*)&opt_shortest}, "finish encoding within shortest input" }, //
4971 { "dts_delta_threshold", HAS_ARG | OPT_FLOAT | OPT_EXPERT, {(void*)&dts_delta_threshold}, "timestamp discontinuity delta threshold", "threshold" },
4972 { "xerror", OPT_BOOL, {(void*)&exit_on_error}, "exit on error", "error" },
4973 { "copyinkf", OPT_BOOL | OPT_EXPERT | OPT_SPEC, {.off = OFFSET(copy_initial_nonkeyframes)}, "copy initial non-keyframes" },
4974 { "frames", OPT_INT64 | HAS_ARG | OPT_SPEC, {.off = OFFSET(max_frames)}, "set the number of frames to record", "number" },
4975 { "tag", OPT_STRING | HAS_ARG | OPT_SPEC, {.off = OFFSET(codec_tags)}, "force codec tag/fourcc", "fourcc/tag" },
4976 { "q", HAS_ARG | OPT_EXPERT | OPT_DOUBLE | OPT_SPEC, {.off = OFFSET(qscale)}, "use fixed quality scale (VBR)", "q" },
4977 { "qscale", HAS_ARG | OPT_EXPERT | OPT_DOUBLE | OPT_SPEC, {.off = OFFSET(qscale)}, "use fixed quality scale (VBR)", "q" },
4978 { "filter", HAS_ARG | OPT_STRING | OPT_SPEC, {.off = OFFSET(filters)}, "set stream filterchain", "filter_list" },
4979 { "filter_complex", HAS_ARG | OPT_EXPERT, {(void*)opt_filter_complex}, "create a complex filtergraph", "graph_description" },
4980 { "stats", OPT_BOOL, {&print_stats}, "print progress report during encoding", },
4981 { "attach", HAS_ARG | OPT_FUNC2, {(void*)opt_attach}, "add an attachment to the output file", "filename" },
4982 { "dump_attachment", HAS_ARG | OPT_STRING | OPT_SPEC, {.off = OFFSET(dump_attachment)}, "extract an attachment into a file", "filename" },
4983 { "cpuflags", HAS_ARG | OPT_EXPERT, {(void*)opt_cpuflags}, "set CPU flags mask", "mask" },
4986 { "vframes", HAS_ARG | OPT_VIDEO | OPT_FUNC2, {(void*)opt_video_frames}, "set the number of video frames to record", "number" },
4987 { "r", HAS_ARG | OPT_VIDEO | OPT_STRING | OPT_SPEC, {.off = OFFSET(frame_rates)}, "set frame rate (Hz value, fraction or abbreviation)", "rate" },
4988 { "s", HAS_ARG | OPT_VIDEO | OPT_STRING | OPT_SPEC, {.off = OFFSET(frame_sizes)}, "set frame size (WxH or abbreviation)", "size" },
4989 { "aspect", HAS_ARG | OPT_VIDEO | OPT_STRING | OPT_SPEC, {.off = OFFSET(frame_aspect_ratios)}, "set aspect ratio (4:3, 16:9 or 1.3333, 1.7777)", "aspect" },
4990 { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO | OPT_STRING | OPT_SPEC, {.off = OFFSET(frame_pix_fmts)}, "set pixel format", "format" },
4991 { "vn", OPT_BOOL | OPT_VIDEO | OPT_OFFSET, {.off = OFFSET(video_disable)}, "disable video" },
4992 { "vdt", OPT_INT | HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)&video_discard}, "discard threshold", "n" },
4993 { "rc_override", HAS_ARG | OPT_EXPERT | OPT_VIDEO | OPT_STRING | OPT_SPEC, {.off = OFFSET(rc_overrides)}, "rate control override for specific intervals", "override" },
4994 { "vcodec", HAS_ARG | OPT_VIDEO | OPT_FUNC2, {(void*)opt_video_codec}, "force video codec ('copy' to copy stream)", "codec" },
4995 { "same_quant", OPT_BOOL | OPT_VIDEO, {(void*)&same_quant},
4996 "use same quantizer as source (implies VBR)" },
4997 { "pass", HAS_ARG | OPT_VIDEO, {(void*)opt_pass}, "select the pass number (1 or 2)", "n" },
4998 { "passlogfile", HAS_ARG | OPT_STRING | OPT_VIDEO, {(void*)&pass_logfilename_prefix}, "select two pass log file name prefix", "prefix" },
4999 { "deinterlace", OPT_EXPERT | OPT_VIDEO, {(void*)opt_deinterlace},
5000 "this option is deprecated, use the yadif filter instead" },
5001 { "vstats", OPT_EXPERT | OPT_VIDEO, {(void*)&opt_vstats}, "dump video coding statistics to file" },
5002 { "vstats_file", HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)opt_vstats_file}, "dump video coding statistics to file", "file" },
5003 { "vf", HAS_ARG | OPT_VIDEO | OPT_FUNC2, {(void*)opt_video_filters}, "video filters", "filter list" },
5004 { "intra_matrix", HAS_ARG | OPT_EXPERT | OPT_VIDEO | OPT_STRING | OPT_SPEC, {.off = OFFSET(intra_matrices)}, "specify intra matrix coeffs", "matrix" },
5005 { "inter_matrix", HAS_ARG | OPT_EXPERT | OPT_VIDEO | OPT_STRING | OPT_SPEC, {.off = OFFSET(inter_matrices)}, "specify inter matrix coeffs", "matrix" },
5006 { "top", HAS_ARG | OPT_EXPERT | OPT_VIDEO | OPT_INT| OPT_SPEC, {.off = OFFSET(top_field_first)}, "top=1/bottom=0/auto=-1 field first", "" },
5007 { "dc", OPT_INT | HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)&intra_dc_precision}, "intra_dc_precision", "precision" },
5008 { "vtag", HAS_ARG | OPT_EXPERT | OPT_VIDEO | OPT_FUNC2, {(void*)opt_video_tag}, "force video tag/fourcc", "fourcc/tag" },
5009 { "qphist", OPT_BOOL | OPT_EXPERT | OPT_VIDEO, { (void *)&qp_hist }, "show QP histogram" },
5010 { "force_fps", OPT_BOOL | OPT_EXPERT | OPT_VIDEO | OPT_SPEC, {.off = OFFSET(force_fps)}, "force the selected framerate, disable the best supported framerate selection" },
5011 { "streamid", HAS_ARG | OPT_EXPERT | OPT_FUNC2, {(void*)opt_streamid}, "set the value of an outfile streamid", "streamIndex:value" },
5012 { "force_key_frames", OPT_STRING | HAS_ARG | OPT_EXPERT | OPT_VIDEO | OPT_SPEC, {.off = OFFSET(forced_key_frames)}, "force key frames at specified timestamps", "timestamps" },
5015 { "aframes", HAS_ARG | OPT_AUDIO | OPT_FUNC2, {(void*)opt_audio_frames}, "set the number of audio frames to record", "number" },
5016 { "aq", HAS_ARG | OPT_AUDIO | OPT_FUNC2, {(void*)opt_audio_qscale}, "set audio quality (codec-specific)", "quality", },
5017 { "ar", HAS_ARG | OPT_AUDIO | OPT_INT | OPT_SPEC, {.off = OFFSET(audio_sample_rate)}, "set audio sampling rate (in Hz)", "rate" },
5018 { "ac", HAS_ARG | OPT_AUDIO | OPT_INT | OPT_SPEC, {.off = OFFSET(audio_channels)}, "set number of audio channels", "channels" },
5019 { "an", OPT_BOOL | OPT_AUDIO | OPT_OFFSET, {.off = OFFSET(audio_disable)}, "disable audio" },
5020 { "acodec", HAS_ARG | OPT_AUDIO | OPT_FUNC2, {(void*)opt_audio_codec}, "force audio codec ('copy' to copy stream)", "codec" },
5021 { "atag", HAS_ARG | OPT_EXPERT | OPT_AUDIO | OPT_FUNC2, {(void*)opt_audio_tag}, "force audio tag/fourcc", "fourcc/tag" },
5022 { "vol", OPT_INT | HAS_ARG | OPT_AUDIO, {(void*)&audio_volume}, "change audio volume (256=normal)" , "volume" }, //
5023 { "sample_fmt", HAS_ARG | OPT_EXPERT | OPT_AUDIO | OPT_SPEC | OPT_STRING, {.off = OFFSET(sample_fmts)}, "set sample format", "format" },
5024 { "channel_layout", HAS_ARG | OPT_EXPERT | OPT_AUDIO | OPT_FUNC2, {(void*)opt_channel_layout}, "set channel layout", "layout" },
5026 /* subtitle options */
5027 { "sn", OPT_BOOL | OPT_SUBTITLE | OPT_OFFSET, {.off = OFFSET(subtitle_disable)}, "disable subtitle" },
5028 { "scodec", HAS_ARG | OPT_SUBTITLE | OPT_FUNC2, {(void*)opt_subtitle_codec}, "force subtitle codec ('copy' to copy stream)", "codec" },
5029 { "stag", HAS_ARG | OPT_EXPERT | OPT_SUBTITLE | OPT_FUNC2, {(void*)opt_subtitle_tag}, "force subtitle tag/fourcc", "fourcc/tag" },
5032 { "isync", OPT_BOOL | OPT_EXPERT | OPT_GRAB, {(void*)&input_sync}, "sync read on input", "" },
5035 { "muxdelay", OPT_FLOAT | HAS_ARG | OPT_EXPERT | OPT_OFFSET, {.off = OFFSET(mux_max_delay)}, "set the maximum demux-decode delay", "seconds" },
5036 { "muxpreload", OPT_FLOAT | HAS_ARG | OPT_EXPERT | OPT_OFFSET, {.off = OFFSET(mux_preload)}, "set the initial demux-decode delay", "seconds" },
5038 { "bsf", HAS_ARG | OPT_STRING | OPT_SPEC, {.off = OFFSET(bitstream_filters)}, "A comma-separated list of bitstream filters", "bitstream_filters" },
5040 /* data codec support */
5041 { "dcodec", HAS_ARG | OPT_DATA | OPT_FUNC2, {(void*)opt_data_codec}, "force data codec ('copy' to copy stream)", "codec" },
5043 { "default", HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, {(void*)opt_default}, "generic catch all option", "" },
5047 int main(int argc, char **argv)
5049 OptionsContext o = { 0 };
5054 av_log_set_flags(AV_LOG_SKIP_REPEATED);
5055 parse_loglevel(argc, argv, options);
5057 avcodec_register_all();
5059 avdevice_register_all();
5061 avfilter_register_all();
5063 avformat_network_init();
5067 parse_cpuflags(argc, argv, options);
5070 parse_options(&o, argc, argv, options, opt_output_file);
5072 if (nb_output_files <= 0 && nb_input_files == 0) {
5074 av_log(NULL, AV_LOG_WARNING, "Use -h to get full help or, even better, run 'man %s'\n", program_name);
5078 /* file converter / grab */
5079 if (nb_output_files <= 0) {
5080 fprintf(stderr, "At least one output file must be specified\n");
5084 if (nb_input_files == 0) {
5085 av_log(NULL, AV_LOG_FATAL, "At least one input file must be specified\n");
5090 if (transcode() < 0)
5092 ti = getutime() - ti;
5094 int maxrss = getmaxrss() / 1024;
5095 printf("bench: utime=%0.3fs maxrss=%ikB\n", ti / 1000000.0, maxrss);