2 * This file is part of Libav.
4 * Libav is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU Lesser General Public
6 * License as published by the Free Software Foundation; either
7 * version 2.1 of the License, or (at your option) any later version.
9 * Libav is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * Lesser General Public License for more details.
14 * You should have received a copy of the GNU Lesser General Public
15 * License along with Libav; if not, write to the Free Software
16 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
24 #include "libavutil/avassert.h"
25 #include "libavutil/channel_layout.h"
26 #include "libavutil/common.h"
27 #include "libavutil/log.h"
28 #include "libavutil/mathematics.h"
29 #include "libavutil/opt.h"
30 #include "libavutil/samplefmt.h"
36 typedef struct TrimContext {
43 double start_time, end_time;
44 int64_t start_frame, end_frame;
46 * in the link timebase for video,
47 * in 1/samplerate for audio
49 int64_t start_pts, end_pts;
50 int64_t start_sample, end_sample;
53 * number of video frames that arrived on this filter so far
57 * number of audio samples that arrived on this filter so far
61 * timestamp of the first frame in the output, in the timebase units
65 * duration in the timebase units
75 static int init(AVFilterContext *ctx)
77 TrimContext *s = ctx->priv;
79 s->first_pts = AV_NOPTS_VALUE;
84 static int config_input(AVFilterLink *inlink)
86 AVFilterContext *ctx = inlink->dst;
87 TrimContext *s = ctx->priv;
88 AVRational tb = (inlink->type == AVMEDIA_TYPE_VIDEO) ?
89 inlink->time_base : (AVRational){ 1, inlink->sample_rate };
91 if (s->start_time != DBL_MAX) {
92 int64_t start_pts = lrintf(s->start_time / av_q2d(tb));
93 if (s->start_pts == AV_NOPTS_VALUE || start_pts < s->start_pts)
94 s->start_pts = start_pts;
96 if (s->end_time != DBL_MAX) {
97 int64_t end_pts = lrintf(s->end_time / av_q2d(tb));
98 if (s->end_pts == AV_NOPTS_VALUE || end_pts > s->end_pts)
102 s->duration_tb = lrintf(s->duration / av_q2d(tb));
107 static int request_frame(AVFilterLink *outlink)
109 AVFilterContext *ctx = outlink->src;
110 TrimContext *s = ctx->priv;
114 while (!s->got_output) {
118 ret = ff_request_frame(ctx->inputs[0]);
126 #define OFFSET(x) offsetof(TrimContext, x)
127 #define COMMON_OPTS \
128 { "start", "Timestamp in seconds of the first frame that " \
129 "should be passed", OFFSET(start_time), AV_OPT_TYPE_DOUBLE, { .dbl = DBL_MAX }, -DBL_MAX, DBL_MAX, FLAGS }, \
130 { "end", "Timestamp in seconds of the first frame that " \
131 "should be dropped again", OFFSET(end_time), AV_OPT_TYPE_DOUBLE, { .dbl = DBL_MAX }, -DBL_MAX, DBL_MAX, FLAGS }, \
132 { "start_pts", "Timestamp of the first frame that should be " \
133 " passed", OFFSET(start_pts), AV_OPT_TYPE_INT64, { .i64 = AV_NOPTS_VALUE }, INT64_MIN, INT64_MAX, FLAGS }, \
134 { "end_pts", "Timestamp of the first frame that should be " \
135 "dropped again", OFFSET(end_pts), AV_OPT_TYPE_INT64, { .i64 = AV_NOPTS_VALUE }, INT64_MIN, INT64_MAX, FLAGS }, \
136 { "duration", "Maximum duration of the output in seconds", OFFSET(duration), AV_OPT_TYPE_DOUBLE, { .dbl = 0 }, 0, DBL_MAX, FLAGS },
139 #if CONFIG_TRIM_FILTER
140 static int trim_filter_frame(AVFilterLink *inlink, AVFrame *frame)
142 AVFilterContext *ctx = inlink->dst;
143 TrimContext *s = ctx->priv;
146 /* drop everything if EOF has already been returned */
148 av_frame_free(&frame);
152 if (s->start_frame >= 0 || s->start_pts != AV_NOPTS_VALUE) {
154 if (s->start_frame >= 0 && s->nb_frames >= s->start_frame)
156 if (s->start_pts != AV_NOPTS_VALUE && frame->pts != AV_NOPTS_VALUE &&
157 frame->pts >= s->start_pts)
163 if (s->first_pts == AV_NOPTS_VALUE && frame->pts != AV_NOPTS_VALUE)
164 s->first_pts = frame->pts;
166 if (s->end_frame != INT64_MAX || s->end_pts != AV_NOPTS_VALUE || s->duration_tb) {
169 if (s->end_frame != INT64_MAX && s->nb_frames < s->end_frame)
171 if (s->end_pts != AV_NOPTS_VALUE && frame->pts != AV_NOPTS_VALUE &&
172 frame->pts < s->end_pts)
174 if (s->duration_tb && frame->pts != AV_NOPTS_VALUE &&
175 frame->pts - s->first_pts < s->duration_tb)
187 return ff_filter_frame(ctx->outputs[0], frame);
191 av_frame_free(&frame);
195 #define FLAGS AV_OPT_FLAG_VIDEO_PARAM
196 static const AVOption trim_options[] = {
198 { "start_frame", "Number of the first frame that should be passed "
199 "to the output", OFFSET(start_frame), AV_OPT_TYPE_INT64, { .i64 = -1 }, -1, INT64_MAX, FLAGS },
200 { "end_frame", "Number of the first frame that should be dropped "
201 "again", OFFSET(end_frame), AV_OPT_TYPE_INT64, { .i64 = INT64_MAX }, 0, INT64_MAX, FLAGS },
206 static const AVClass trim_class = {
207 .class_name = "trim",
208 .item_name = av_default_item_name,
209 .option = trim_options,
210 .version = LIBAVUTIL_VERSION_INT,
213 static const AVFilterPad trim_inputs[] = {
216 .type = AVMEDIA_TYPE_VIDEO,
217 .filter_frame = trim_filter_frame,
218 .config_props = config_input,
223 static const AVFilterPad trim_outputs[] = {
226 .type = AVMEDIA_TYPE_VIDEO,
227 .request_frame = request_frame,
232 AVFilter avfilter_vf_trim = {
234 .description = NULL_IF_CONFIG_SMALL("Pick one continuous section from the input, drop the rest."),
238 .priv_size = sizeof(TrimContext),
239 .priv_class = &trim_class,
241 .inputs = trim_inputs,
242 .outputs = trim_outputs,
244 #endif // CONFIG_TRIM_FILTER
246 #if CONFIG_ATRIM_FILTER
247 static int atrim_filter_frame(AVFilterLink *inlink, AVFrame *frame)
249 AVFilterContext *ctx = inlink->dst;
250 TrimContext *s = ctx->priv;
251 int64_t start_sample, end_sample = frame->nb_samples;
255 /* drop everything if EOF has already been returned */
257 av_frame_free(&frame);
261 if (frame->pts != AV_NOPTS_VALUE)
262 pts = av_rescale_q(frame->pts, inlink->time_base,
263 (AVRational){ 1, inlink->sample_rate });
266 s->next_pts = pts + frame->nb_samples;
268 /* check if at least a part of the frame is after the start time */
269 if (s->start_sample < 0 && s->start_pts == AV_NOPTS_VALUE) {
273 start_sample = frame->nb_samples;
275 if (s->start_sample >= 0 &&
276 s->nb_samples + frame->nb_samples > s->start_sample) {
278 start_sample = FFMIN(start_sample, s->start_sample - s->nb_samples);
281 if (s->start_pts != AV_NOPTS_VALUE && pts != AV_NOPTS_VALUE &&
282 pts + frame->nb_samples > s->start_pts) {
284 start_sample = FFMIN(start_sample, s->start_pts - pts);
291 if (s->first_pts == AV_NOPTS_VALUE)
292 s->first_pts = pts + start_sample;
294 /* check if at least a part of the frame is before the end time */
295 if (s->end_sample == INT64_MAX && s->end_pts == AV_NOPTS_VALUE && !s->duration_tb) {
296 end_sample = frame->nb_samples;
301 if (s->end_sample != INT64_MAX &&
302 s->nb_samples < s->end_sample) {
304 end_sample = FFMAX(end_sample, s->end_sample - s->nb_samples);
307 if (s->end_pts != AV_NOPTS_VALUE && pts != AV_NOPTS_VALUE &&
310 end_sample = FFMAX(end_sample, s->end_pts - pts);
313 if (s->duration_tb && pts - s->first_pts < s->duration_tb) {
315 end_sample = FFMAX(end_sample, s->first_pts + s->duration_tb - pts);
324 s->nb_samples += frame->nb_samples;
325 start_sample = FFMAX(0, start_sample);
326 end_sample = FFMIN(frame->nb_samples, end_sample);
327 av_assert0(start_sample < end_sample);
330 AVFrame *out = ff_get_audio_buffer(ctx->outputs[0], end_sample - start_sample);
332 av_frame_free(&frame);
333 return AVERROR(ENOMEM);
336 av_frame_copy_props(out, frame);
337 av_samples_copy(out->extended_data, frame->extended_data, 0, start_sample,
338 out->nb_samples, av_get_channel_layout_nb_channels(frame->channel_layout),
340 if (out->pts != AV_NOPTS_VALUE)
341 out->pts += av_rescale_q(start_sample, (AVRational){ 1, out->sample_rate },
344 av_frame_free(&frame);
347 frame->nb_samples = end_sample;
350 return ff_filter_frame(ctx->outputs[0], frame);
353 s->nb_samples += frame->nb_samples;
354 av_frame_free(&frame);
358 #define FLAGS AV_OPT_FLAG_AUDIO_PARAM
359 static const AVOption atrim_options[] = {
361 { "start_sample", "Number of the first audio sample that should be "
362 "passed to the output", OFFSET(start_sample), AV_OPT_TYPE_INT64, { .i64 = -1 }, -1, INT64_MAX, FLAGS },
363 { "end_sample", "Number of the first audio sample that should be "
364 "dropped again", OFFSET(end_sample), AV_OPT_TYPE_INT64, { .i64 = INT64_MAX }, 0, INT64_MAX, FLAGS },
369 static const AVClass atrim_class = {
370 .class_name = "atrim",
371 .item_name = av_default_item_name,
372 .option = atrim_options,
373 .version = LIBAVUTIL_VERSION_INT,
376 static const AVFilterPad atrim_inputs[] = {
379 .type = AVMEDIA_TYPE_AUDIO,
380 .filter_frame = atrim_filter_frame,
381 .config_props = config_input,
386 static const AVFilterPad atrim_outputs[] = {
389 .type = AVMEDIA_TYPE_AUDIO,
390 .request_frame = request_frame,
395 AVFilter avfilter_af_atrim = {
397 .description = NULL_IF_CONFIG_SMALL("Pick one continuous section from the input, drop the rest."),
401 .priv_size = sizeof(TrimContext),
402 .priv_class = &atrim_class,
404 .inputs = atrim_inputs,
405 .outputs = atrim_outputs,
407 #endif // CONFIG_ATRIM_FILTER