2 * Copyright (C) <1999> Erik Walthinsen <omega@cse.ogi.edu>
4 * This library is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU Library General Public
6 * License as published by the Free Software Foundation; either
7 * version 2 of the License, or (at your option) any later version.
9 * This library is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * Library General Public License for more details.
14 * You should have received a copy of the GNU Library General Public
15 * License along with this library; if not, write to the
16 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
17 * Boston, MA 02111-1307, USA.
27 #ifdef HAVE_FFMPEG_UNINSTALLED
30 #include <libavcodec/avcodec.h>
34 #include <gst/video/video.h>
36 #include "gstffmpeg.h"
37 #include "gstffmpegcodecmap.h"
38 #include "gstffmpegutils.h"
40 /* define to enable alternative buffer refcounting algorithm */
43 typedef struct _GstFFMpegDec GstFFMpegDec;
45 #define MAX_TS_MASK 0xff
47 /* for each incomming buffer we keep all timing info in a structure like this.
48 * We keep a circular array of these structures around to store the timing info.
49 * The index in the array is what we pass as opaque data (to pictures) and
50 * pts (to parsers) so that ffmpeg can remember them for us. */
54 GstClockTime timestamp;
55 GstClockTime duration;
63 /* We need to keep track of our pads, so we do so here. */
68 AVCodecContext *context;
76 gint clip_width, clip_height;
79 gint old_fps_n, old_fps_d;
82 enum PixelFormat pix_fmt;
91 gboolean waiting_for_key;
95 /* for tracking DTS/PTS */
96 gboolean has_b_frames;
97 gboolean reordered_in;
99 GstClockTime last_diff;
101 gboolean reordered_out;
102 GstClockTime last_out;
103 GstClockTime next_out;
106 gboolean turnoff_parser; /* used for turning off aac raw parsing
108 AVCodecParserContext *pctx;
113 GValue *par; /* pixel aspect ratio of incoming data */
114 gboolean current_dr; /* if direct rendering is enabled */
115 gboolean extra_ref; /* keep extra ref around in get/release */
117 /* some properties */
118 enum AVDiscard skip_frame;
120 gboolean direct_rendering;
126 /* QoS stuff *//* with LOCK */
128 GstClockTime earliest_time;
132 /* clipping segment */
135 gboolean is_realvideo;
137 GstTSInfo ts_info[MAX_TS_MASK + 1];
140 /* reverse playback queue */
143 /* Can downstream allocate 16bytes aligned data. */
144 gboolean can_allocate_aligned;
147 typedef struct _GstFFMpegDecClass GstFFMpegDecClass;
149 struct _GstFFMpegDecClass
151 GstElementClass parent_class;
154 GstPadTemplate *srctempl, *sinktempl;
157 #define GST_TS_INFO_NONE &ts_info_none
158 static const GstTSInfo ts_info_none = { -1, -1, -1, -1 };
160 static const GstTSInfo *
161 gst_ts_info_store (GstFFMpegDec * dec, GstClockTime timestamp,
162 GstClockTime duration, gint64 offset)
164 gint idx = dec->ts_idx;
165 dec->ts_info[idx].idx = idx;
166 dec->ts_info[idx].timestamp = timestamp;
167 dec->ts_info[idx].duration = duration;
168 dec->ts_info[idx].offset = offset;
169 dec->ts_idx = (idx + 1) & MAX_TS_MASK;
171 return &dec->ts_info[idx];
174 static const GstTSInfo *
175 gst_ts_info_get (GstFFMpegDec * dec, gint idx)
177 if (G_UNLIKELY (idx < 0 || idx > MAX_TS_MASK))
178 return GST_TS_INFO_NONE;
180 return &dec->ts_info[idx];
183 #define GST_TYPE_FFMPEGDEC \
184 (gst_ffmpegdec_get_type())
185 #define GST_FFMPEGDEC(obj) \
186 (G_TYPE_CHECK_INSTANCE_CAST((obj),GST_TYPE_FFMPEGDEC,GstFFMpegDec))
187 #define GST_FFMPEGDEC_CLASS(klass) \
188 (G_TYPE_CHECK_CLASS_CAST((klass),GST_TYPE_FFMPEGDEC,GstFFMpegDecClass))
189 #define GST_IS_FFMPEGDEC(obj) \
190 (G_TYPE_CHECK_INSTANCE_TYPE((obj),GST_TYPE_FFMPEGDEC))
191 #define GST_IS_FFMPEGDEC_CLASS(klass) \
192 (G_TYPE_CHECK_CLASS_TYPE((klass),GST_TYPE_FFMPEGDEC))
194 #define DEFAULT_LOWRES 0
195 #define DEFAULT_SKIPFRAME 0
196 #define DEFAULT_DIRECT_RENDERING TRUE
197 #define DEFAULT_DO_PADDING TRUE
198 #define DEFAULT_DEBUG_MV FALSE
199 #define DEFAULT_CROP TRUE
200 #define DEFAULT_MAX_THREADS 0
207 PROP_DIRECT_RENDERING,
215 /* A number of function prototypes are given so we can refer to them later. */
216 static void gst_ffmpegdec_base_init (GstFFMpegDecClass * klass);
217 static void gst_ffmpegdec_class_init (GstFFMpegDecClass * klass);
218 static void gst_ffmpegdec_init (GstFFMpegDec * ffmpegdec);
219 static void gst_ffmpegdec_finalize (GObject * object);
221 static gboolean gst_ffmpegdec_query (GstPad * pad, GstQuery * query);
222 static gboolean gst_ffmpegdec_src_event (GstPad * pad, GstEvent * event);
224 static gboolean gst_ffmpegdec_setcaps (GstPad * pad, GstCaps * caps);
225 static gboolean gst_ffmpegdec_sink_event (GstPad * pad, GstEvent * event);
226 static GstFlowReturn gst_ffmpegdec_chain (GstPad * pad, GstBuffer * buf);
228 static GstStateChangeReturn gst_ffmpegdec_change_state (GstElement * element,
229 GstStateChange transition);
231 static void gst_ffmpegdec_set_property (GObject * object,
232 guint prop_id, const GValue * value, GParamSpec * pspec);
233 static void gst_ffmpegdec_get_property (GObject * object,
234 guint prop_id, GValue * value, GParamSpec * pspec);
236 static gboolean gst_ffmpegdec_negotiate (GstFFMpegDec * ffmpegdec,
239 /* some sort of bufferpool handling, but different */
240 static int gst_ffmpegdec_get_buffer (AVCodecContext * context,
242 static void gst_ffmpegdec_release_buffer (AVCodecContext * context,
245 static void gst_ffmpegdec_drain (GstFFMpegDec * ffmpegdec);
247 #define GST_FFDEC_PARAMS_QDATA g_quark_from_static_string("ffdec-params")
249 static GstElementClass *parent_class = NULL;
251 #define GST_FFMPEGDEC_TYPE_LOWRES (gst_ffmpegdec_lowres_get_type())
253 gst_ffmpegdec_lowres_get_type (void)
255 static GType ffmpegdec_lowres_type = 0;
257 if (!ffmpegdec_lowres_type) {
258 static const GEnumValue ffmpegdec_lowres[] = {
260 {1, "1", "1/2-size"},
261 {2, "2", "1/4-size"},
265 ffmpegdec_lowres_type =
266 g_enum_register_static ("GstFFMpegDecLowres", ffmpegdec_lowres);
269 return ffmpegdec_lowres_type;
272 #define GST_FFMPEGDEC_TYPE_SKIPFRAME (gst_ffmpegdec_skipframe_get_type())
274 gst_ffmpegdec_skipframe_get_type (void)
276 static GType ffmpegdec_skipframe_type = 0;
278 if (!ffmpegdec_skipframe_type) {
279 static const GEnumValue ffmpegdec_skipframe[] = {
280 {0, "0", "Skip nothing"},
281 {1, "1", "Skip B-frames"},
282 {2, "2", "Skip IDCT/Dequantization"},
283 {5, "5", "Skip everything"},
287 ffmpegdec_skipframe_type =
288 g_enum_register_static ("GstFFMpegDecSkipFrame", ffmpegdec_skipframe);
291 return ffmpegdec_skipframe_type;
295 gst_ffmpegdec_base_init (GstFFMpegDecClass * klass)
297 GstElementClass *element_class = GST_ELEMENT_CLASS (klass);
298 GstPadTemplate *sinktempl, *srctempl;
299 GstCaps *sinkcaps, *srccaps;
301 gchar *longname, *classification, *description;
304 (AVCodec *) g_type_get_qdata (G_OBJECT_CLASS_TYPE (klass),
305 GST_FFDEC_PARAMS_QDATA);
306 g_assert (in_plugin != NULL);
308 /* construct the element details struct */
309 longname = g_strdup_printf ("FFmpeg %s decoder", in_plugin->long_name);
310 classification = g_strdup_printf ("Codec/Decoder/%s",
311 (in_plugin->type == AVMEDIA_TYPE_VIDEO) ? "Video" : "Audio");
312 description = g_strdup_printf ("FFmpeg %s decoder", in_plugin->name);
313 gst_element_class_set_details_simple (element_class, longname, classification,
315 "Wim Taymans <wim.taymans@gmail.com>, "
316 "Ronald Bultje <rbultje@ronald.bitfreak.net>, "
317 "Edward Hervey <bilboed@bilboed.com>");
319 g_free (classification);
320 g_free (description);
323 sinkcaps = gst_ffmpeg_codecid_to_caps (in_plugin->id, NULL, FALSE);
325 GST_DEBUG ("Couldn't get sink caps for decoder '%s'", in_plugin->name);
326 sinkcaps = gst_caps_from_string ("unknown/unknown");
328 if (in_plugin->type == AVMEDIA_TYPE_VIDEO) {
329 srccaps = gst_caps_from_string ("video/x-raw-rgb; video/x-raw-yuv");
331 srccaps = gst_ffmpeg_codectype_to_audio_caps (NULL,
332 in_plugin->id, FALSE, in_plugin);
335 GST_DEBUG ("Couldn't get source caps for decoder '%s'", in_plugin->name);
336 srccaps = gst_caps_from_string ("unknown/unknown");
340 sinktempl = gst_pad_template_new ("sink", GST_PAD_SINK,
341 GST_PAD_ALWAYS, sinkcaps);
342 srctempl = gst_pad_template_new ("src", GST_PAD_SRC, GST_PAD_ALWAYS, srccaps);
344 gst_element_class_add_pad_template (element_class, srctempl);
345 gst_element_class_add_pad_template (element_class, sinktempl);
347 klass->in_plugin = in_plugin;
348 klass->srctempl = srctempl;
349 klass->sinktempl = sinktempl;
353 gst_ffmpegdec_class_init (GstFFMpegDecClass * klass)
355 GObjectClass *gobject_class = G_OBJECT_CLASS (klass);
356 GstElementClass *gstelement_class = GST_ELEMENT_CLASS (klass);
358 parent_class = g_type_class_peek_parent (klass);
360 gobject_class->finalize = gst_ffmpegdec_finalize;
362 gobject_class->set_property = gst_ffmpegdec_set_property;
363 gobject_class->get_property = gst_ffmpegdec_get_property;
365 if (klass->in_plugin->type == AVMEDIA_TYPE_VIDEO) {
368 g_object_class_install_property (gobject_class, PROP_SKIPFRAME,
369 g_param_spec_enum ("skip-frame", "Skip frames",
370 "Which types of frames to skip during decoding",
371 GST_FFMPEGDEC_TYPE_SKIPFRAME, 0,
372 G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
373 g_object_class_install_property (gobject_class, PROP_LOWRES,
374 g_param_spec_enum ("lowres", "Low resolution",
375 "At which resolution to decode images", GST_FFMPEGDEC_TYPE_LOWRES,
376 0, G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
377 g_object_class_install_property (gobject_class, PROP_DIRECT_RENDERING,
378 g_param_spec_boolean ("direct-rendering", "Direct Rendering",
379 "Enable direct rendering", DEFAULT_DIRECT_RENDERING,
380 G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
381 g_object_class_install_property (gobject_class, PROP_DO_PADDING,
382 g_param_spec_boolean ("do-padding", "Do Padding",
383 "Add 0 padding before decoding data", DEFAULT_DO_PADDING,
384 G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
385 g_object_class_install_property (gobject_class, PROP_DEBUG_MV,
386 g_param_spec_boolean ("debug-mv", "Debug motion vectors",
387 "Whether ffmpeg should print motion vectors on top of the image",
388 DEFAULT_DEBUG_MV, G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
390 g_object_class_install_property (gobject_class, PROP_CROP,
391 g_param_spec_boolean ("crop", "Crop",
392 "Crop images to the display region",
393 DEFAULT_CROP, G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
396 caps = klass->in_plugin->capabilities;
397 if (caps & (CODEC_CAP_FRAME_THREADS | CODEC_CAP_SLICE_THREADS)) {
398 g_object_class_install_property (G_OBJECT_CLASS (klass), PROP_MAX_THREADS,
399 g_param_spec_int ("max-threads", "Maximum decode threads",
400 "Maximum number of worker threads to spawn. (0 = auto)",
401 0, G_MAXINT, DEFAULT_MAX_THREADS,
402 G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
406 gstelement_class->change_state = gst_ffmpegdec_change_state;
410 gst_ffmpegdec_init (GstFFMpegDec * ffmpegdec)
412 GstFFMpegDecClass *oclass;
414 oclass = (GstFFMpegDecClass *) (G_OBJECT_GET_CLASS (ffmpegdec));
417 ffmpegdec->sinkpad = gst_pad_new_from_template (oclass->sinktempl, "sink");
418 gst_pad_set_setcaps_function (ffmpegdec->sinkpad,
419 GST_DEBUG_FUNCPTR (gst_ffmpegdec_setcaps));
420 gst_pad_set_event_function (ffmpegdec->sinkpad,
421 GST_DEBUG_FUNCPTR (gst_ffmpegdec_sink_event));
422 gst_pad_set_chain_function (ffmpegdec->sinkpad,
423 GST_DEBUG_FUNCPTR (gst_ffmpegdec_chain));
424 gst_element_add_pad (GST_ELEMENT (ffmpegdec), ffmpegdec->sinkpad);
426 ffmpegdec->srcpad = gst_pad_new_from_template (oclass->srctempl, "src");
427 gst_pad_use_fixed_caps (ffmpegdec->srcpad);
428 gst_pad_set_event_function (ffmpegdec->srcpad,
429 GST_DEBUG_FUNCPTR (gst_ffmpegdec_src_event));
430 gst_pad_set_query_function (ffmpegdec->srcpad,
431 GST_DEBUG_FUNCPTR (gst_ffmpegdec_query));
432 gst_element_add_pad (GST_ELEMENT (ffmpegdec), ffmpegdec->srcpad);
434 /* some ffmpeg data */
435 ffmpegdec->context = avcodec_alloc_context ();
436 ffmpegdec->picture = avcodec_alloc_frame ();
437 ffmpegdec->pctx = NULL;
438 ffmpegdec->pcache = NULL;
439 ffmpegdec->par = NULL;
440 ffmpegdec->opened = FALSE;
441 ffmpegdec->waiting_for_key = TRUE;
442 ffmpegdec->skip_frame = ffmpegdec->lowres = 0;
443 ffmpegdec->direct_rendering = DEFAULT_DIRECT_RENDERING;
444 ffmpegdec->do_padding = DEFAULT_DO_PADDING;
445 ffmpegdec->debug_mv = DEFAULT_DEBUG_MV;
446 ffmpegdec->crop = DEFAULT_CROP;
447 ffmpegdec->max_threads = DEFAULT_MAX_THREADS;
449 ffmpegdec->format.video.par_n = -1;
450 ffmpegdec->format.video.fps_n = -1;
451 ffmpegdec->format.video.old_fps_n = -1;
452 gst_segment_init (&ffmpegdec->segment, GST_FORMAT_TIME);
454 /* We initially assume downstream can allocate 16 bytes aligned buffers */
455 ffmpegdec->can_allocate_aligned = TRUE;
459 gst_ffmpegdec_finalize (GObject * object)
461 GstFFMpegDec *ffmpegdec = (GstFFMpegDec *) object;
463 if (ffmpegdec->context != NULL) {
464 av_free (ffmpegdec->context);
465 ffmpegdec->context = NULL;
468 if (ffmpegdec->picture != NULL) {
469 av_free (ffmpegdec->picture);
470 ffmpegdec->picture = NULL;
473 G_OBJECT_CLASS (parent_class)->finalize (object);
477 gst_ffmpegdec_query (GstPad * pad, GstQuery * query)
479 GstFFMpegDec *ffmpegdec;
480 gboolean res = FALSE;
482 ffmpegdec = (GstFFMpegDec *) gst_pad_get_parent (pad);
484 switch (GST_QUERY_TYPE (query)) {
485 case GST_QUERY_LATENCY:
487 GST_DEBUG_OBJECT (ffmpegdec, "latency query %d",
488 ffmpegdec->context->has_b_frames);
489 if ((res = gst_pad_peer_query (ffmpegdec->sinkpad, query))) {
490 if (ffmpegdec->context->has_b_frames) {
492 GstClockTime min_lat, max_lat, our_lat;
494 gst_query_parse_latency (query, &live, &min_lat, &max_lat);
495 if (ffmpegdec->format.video.fps_n > 0)
497 gst_util_uint64_scale_int (ffmpegdec->context->has_b_frames *
498 GST_SECOND, ffmpegdec->format.video.fps_d,
499 ffmpegdec->format.video.fps_n);
502 gst_util_uint64_scale_int (ffmpegdec->context->has_b_frames *
508 gst_query_set_latency (query, live, min_lat, max_lat);
514 res = gst_pad_query_default (pad, query);
518 gst_object_unref (ffmpegdec);
524 gst_ffmpegdec_reset_ts (GstFFMpegDec * ffmpegdec)
526 ffmpegdec->last_in = GST_CLOCK_TIME_NONE;
527 ffmpegdec->last_diff = GST_CLOCK_TIME_NONE;
528 ffmpegdec->last_frames = 0;
529 ffmpegdec->last_out = GST_CLOCK_TIME_NONE;
530 ffmpegdec->next_out = GST_CLOCK_TIME_NONE;
531 ffmpegdec->reordered_in = FALSE;
532 ffmpegdec->reordered_out = FALSE;
536 gst_ffmpegdec_update_qos (GstFFMpegDec * ffmpegdec, gdouble proportion,
537 GstClockTime timestamp)
539 GST_LOG_OBJECT (ffmpegdec, "update QOS: %f, %" GST_TIME_FORMAT,
540 proportion, GST_TIME_ARGS (timestamp));
542 GST_OBJECT_LOCK (ffmpegdec);
543 ffmpegdec->proportion = proportion;
544 ffmpegdec->earliest_time = timestamp;
545 GST_OBJECT_UNLOCK (ffmpegdec);
549 gst_ffmpegdec_reset_qos (GstFFMpegDec * ffmpegdec)
551 gst_ffmpegdec_update_qos (ffmpegdec, 0.5, GST_CLOCK_TIME_NONE);
552 ffmpegdec->processed = 0;
553 ffmpegdec->dropped = 0;
557 gst_ffmpegdec_read_qos (GstFFMpegDec * ffmpegdec, gdouble * proportion,
558 GstClockTime * timestamp)
560 GST_OBJECT_LOCK (ffmpegdec);
561 *proportion = ffmpegdec->proportion;
562 *timestamp = ffmpegdec->earliest_time;
563 GST_OBJECT_UNLOCK (ffmpegdec);
567 gst_ffmpegdec_src_event (GstPad * pad, GstEvent * event)
569 GstFFMpegDec *ffmpegdec;
572 ffmpegdec = (GstFFMpegDec *) gst_pad_get_parent (pad);
574 switch (GST_EVENT_TYPE (event)) {
578 GstClockTimeDiff diff;
579 GstClockTime timestamp;
581 gst_event_parse_qos (event, &proportion, &diff, ×tamp);
583 /* update our QoS values */
584 gst_ffmpegdec_update_qos (ffmpegdec, proportion, timestamp + diff);
586 /* forward upstream */
587 res = gst_pad_push_event (ffmpegdec->sinkpad, event);
591 /* forward upstream */
592 res = gst_pad_push_event (ffmpegdec->sinkpad, event);
596 gst_object_unref (ffmpegdec);
603 gst_ffmpegdec_close (GstFFMpegDec * ffmpegdec)
605 if (!ffmpegdec->opened)
608 GST_LOG_OBJECT (ffmpegdec, "closing ffmpeg codec");
610 if (ffmpegdec->par) {
611 g_free (ffmpegdec->par);
612 ffmpegdec->par = NULL;
615 if (ffmpegdec->context->priv_data)
616 gst_ffmpeg_avcodec_close (ffmpegdec->context);
617 ffmpegdec->opened = FALSE;
619 if (ffmpegdec->context->palctrl) {
620 av_free (ffmpegdec->context->palctrl);
621 ffmpegdec->context->palctrl = NULL;
624 if (ffmpegdec->context->extradata) {
625 av_free (ffmpegdec->context->extradata);
626 ffmpegdec->context->extradata = NULL;
629 if (ffmpegdec->pctx) {
630 if (ffmpegdec->pcache) {
631 gst_buffer_unref (ffmpegdec->pcache);
632 ffmpegdec->pcache = NULL;
634 av_parser_close (ffmpegdec->pctx);
635 ffmpegdec->pctx = NULL;
638 ffmpegdec->format.video.par_n = -1;
639 ffmpegdec->format.video.fps_n = -1;
640 ffmpegdec->format.video.old_fps_n = -1;
641 ffmpegdec->format.video.interlaced = FALSE;
646 gst_ffmpegdec_open (GstFFMpegDec * ffmpegdec)
648 GstFFMpegDecClass *oclass;
650 oclass = (GstFFMpegDecClass *) (G_OBJECT_GET_CLASS (ffmpegdec));
652 if (gst_ffmpeg_avcodec_open (ffmpegdec->context, oclass->in_plugin) < 0)
655 ffmpegdec->opened = TRUE;
656 ffmpegdec->is_realvideo = FALSE;
658 GST_LOG_OBJECT (ffmpegdec, "Opened ffmpeg codec %s, id %d",
659 oclass->in_plugin->name, oclass->in_plugin->id);
661 /* open a parser if we can */
662 switch (oclass->in_plugin->id) {
666 GST_LOG_OBJECT (ffmpegdec, "not using parser, blacklisted codec");
667 ffmpegdec->pctx = NULL;
670 /* For H264, only use a parser if there is no context data, if there is,
671 * we're talking AVC */
672 if (ffmpegdec->context->extradata_size == 0) {
673 GST_LOG_OBJECT (ffmpegdec, "H264 with no extradata, creating parser");
674 ffmpegdec->pctx = av_parser_init (oclass->in_plugin->id);
676 GST_LOG_OBJECT (ffmpegdec,
677 "H264 with extradata implies framed data - not using parser");
678 ffmpegdec->pctx = NULL;
685 ffmpegdec->is_realvideo = TRUE;
688 if (!ffmpegdec->turnoff_parser) {
689 ffmpegdec->pctx = av_parser_init (oclass->in_plugin->id);
691 GST_LOG_OBJECT (ffmpegdec, "Using parser %p", ffmpegdec->pctx);
693 GST_LOG_OBJECT (ffmpegdec, "No parser for codec");
695 GST_LOG_OBJECT (ffmpegdec, "Parser deactivated for format");
700 switch (oclass->in_plugin->type) {
701 case AVMEDIA_TYPE_VIDEO:
702 ffmpegdec->format.video.width = 0;
703 ffmpegdec->format.video.height = 0;
704 ffmpegdec->format.video.clip_width = -1;
705 ffmpegdec->format.video.clip_height = -1;
706 ffmpegdec->format.video.pix_fmt = PIX_FMT_NB;
707 ffmpegdec->format.video.interlaced = FALSE;
709 case AVMEDIA_TYPE_AUDIO:
710 ffmpegdec->format.audio.samplerate = 0;
711 ffmpegdec->format.audio.channels = 0;
712 ffmpegdec->format.audio.depth = 0;
718 gst_ffmpegdec_reset_ts (ffmpegdec);
719 /* FIXME, reset_qos holds the LOCK */
720 ffmpegdec->proportion = 0.0;
721 ffmpegdec->earliest_time = -1;
728 gst_ffmpegdec_close (ffmpegdec);
729 GST_DEBUG_OBJECT (ffmpegdec, "ffdec_%s: Failed to open FFMPEG codec",
730 oclass->in_plugin->name);
736 gst_ffmpegdec_setcaps (GstPad * pad, GstCaps * caps)
738 GstFFMpegDec *ffmpegdec;
739 GstFFMpegDecClass *oclass;
740 GstStructure *structure;
745 ffmpegdec = (GstFFMpegDec *) (gst_pad_get_parent (pad));
746 oclass = (GstFFMpegDecClass *) (G_OBJECT_GET_CLASS (ffmpegdec));
748 GST_DEBUG_OBJECT (pad, "setcaps called");
750 GST_OBJECT_LOCK (ffmpegdec);
752 /* stupid check for VC1 */
753 if ((oclass->in_plugin->id == CODEC_ID_WMV3) ||
754 (oclass->in_plugin->id == CODEC_ID_VC1))
755 oclass->in_plugin->id = gst_ffmpeg_caps_to_codecid (caps, NULL);
757 /* close old session */
758 if (ffmpegdec->opened) {
759 GST_OBJECT_UNLOCK (ffmpegdec);
760 gst_ffmpegdec_drain (ffmpegdec);
761 GST_OBJECT_LOCK (ffmpegdec);
762 gst_ffmpegdec_close (ffmpegdec);
764 /* and reset the defaults that were set when a context is created */
765 avcodec_get_context_defaults (ffmpegdec->context);
768 /* set buffer functions */
769 if (oclass->in_plugin->type == AVMEDIA_TYPE_VIDEO) {
770 ffmpegdec->context->get_buffer = gst_ffmpegdec_get_buffer;
771 ffmpegdec->context->release_buffer = gst_ffmpegdec_release_buffer;
772 ffmpegdec->context->draw_horiz_band = NULL;
775 /* default is to let format decide if it needs a parser */
776 ffmpegdec->turnoff_parser = FALSE;
778 ffmpegdec->has_b_frames = FALSE;
780 GST_LOG_OBJECT (ffmpegdec, "size %dx%d", ffmpegdec->context->width,
781 ffmpegdec->context->height);
783 /* get size and so */
784 gst_ffmpeg_caps_with_codecid (oclass->in_plugin->id,
785 oclass->in_plugin->type, caps, ffmpegdec->context);
787 GST_LOG_OBJECT (ffmpegdec, "size after %dx%d", ffmpegdec->context->width,
788 ffmpegdec->context->height);
790 if (!ffmpegdec->context->time_base.den || !ffmpegdec->context->time_base.num) {
791 GST_DEBUG_OBJECT (ffmpegdec, "forcing 25/1 framerate");
792 ffmpegdec->context->time_base.num = 1;
793 ffmpegdec->context->time_base.den = 25;
796 /* get pixel aspect ratio if it's set */
797 structure = gst_caps_get_structure (caps, 0);
799 par = gst_structure_get_value (structure, "pixel-aspect-ratio");
801 GST_DEBUG_OBJECT (ffmpegdec, "sink caps have pixel-aspect-ratio of %d:%d",
802 gst_value_get_fraction_numerator (par),
803 gst_value_get_fraction_denominator (par));
806 g_free (ffmpegdec->par);
807 ffmpegdec->par = g_new0 (GValue, 1);
808 gst_value_init_and_copy (ffmpegdec->par, par);
811 /* get the framerate from incoming caps. fps_n is set to -1 when
812 * there is no valid framerate */
813 fps = gst_structure_get_value (structure, "framerate");
814 if (fps != NULL && GST_VALUE_HOLDS_FRACTION (fps)) {
815 ffmpegdec->format.video.fps_n = gst_value_get_fraction_numerator (fps);
816 ffmpegdec->format.video.fps_d = gst_value_get_fraction_denominator (fps);
817 GST_DEBUG_OBJECT (ffmpegdec, "Using framerate %d/%d from incoming caps",
818 ffmpegdec->format.video.fps_n, ffmpegdec->format.video.fps_d);
820 ffmpegdec->format.video.fps_n = -1;
821 GST_DEBUG_OBJECT (ffmpegdec, "Using framerate from codec");
824 /* figure out if we can use direct rendering */
825 ffmpegdec->current_dr = FALSE;
826 ffmpegdec->extra_ref = FALSE;
827 if (ffmpegdec->direct_rendering) {
828 GST_DEBUG_OBJECT (ffmpegdec, "trying to enable direct rendering");
829 if (oclass->in_plugin->capabilities & CODEC_CAP_DR1) {
830 if (oclass->in_plugin->id == CODEC_ID_H264) {
831 GST_DEBUG_OBJECT (ffmpegdec, "disable direct rendering setup for H264");
832 /* does not work, many stuff reads outside of the planes */
833 ffmpegdec->current_dr = FALSE;
834 ffmpegdec->extra_ref = TRUE;
835 } else if ((oclass->in_plugin->id == CODEC_ID_SVQ1) ||
836 (oclass->in_plugin->id == CODEC_ID_VP5) ||
837 (oclass->in_plugin->id == CODEC_ID_VP6) ||
838 (oclass->in_plugin->id == CODEC_ID_VP6F) ||
839 (oclass->in_plugin->id == CODEC_ID_VP6A)) {
840 GST_DEBUG_OBJECT (ffmpegdec,
841 "disable direct rendering setup for broken stride support");
842 /* does not work, uses a incompatible stride. See #610613 */
843 ffmpegdec->current_dr = FALSE;
844 ffmpegdec->extra_ref = TRUE;
846 GST_DEBUG_OBJECT (ffmpegdec, "enabled direct rendering");
847 ffmpegdec->current_dr = TRUE;
850 GST_DEBUG_OBJECT (ffmpegdec, "direct rendering not supported");
853 if (ffmpegdec->current_dr) {
854 /* do *not* draw edges when in direct rendering, for some reason it draws
855 * outside of the memory. */
856 ffmpegdec->context->flags |= CODEC_FLAG_EMU_EDGE;
859 /* for AAC we only use av_parse if not on stream-format==raw or ==loas */
860 if (oclass->in_plugin->id == CODEC_ID_AAC
861 || oclass->in_plugin->id == CODEC_ID_AAC_LATM) {
862 const gchar *format = gst_structure_get_string (structure, "stream-format");
864 if (format == NULL || strcmp (format, "raw") == 0) {
865 ffmpegdec->turnoff_parser = TRUE;
869 /* for FLAC, don't parse if it's already parsed */
870 if (oclass->in_plugin->id == CODEC_ID_FLAC) {
871 if (gst_structure_has_field (structure, "streamheader"))
872 ffmpegdec->turnoff_parser = TRUE;
875 /* workaround encoder bugs */
876 ffmpegdec->context->workaround_bugs |= FF_BUG_AUTODETECT;
877 ffmpegdec->context->error_recognition = 1;
880 ffmpegdec->context->lowres = ffmpegdec->lowres;
881 ffmpegdec->context->skip_frame = ffmpegdec->skip_frame;
883 /* ffmpeg can draw motion vectors on top of the image (not every decoder
885 ffmpegdec->context->debug_mv = ffmpegdec->debug_mv;
887 if (ffmpegdec->max_threads == 0) {
888 if (!(oclass->in_plugin->capabilities & CODEC_CAP_AUTO_THREADS))
889 ffmpegdec->context->thread_count = gst_ffmpeg_auto_max_threads ();
891 ffmpegdec->context->thread_count = 0;
893 ffmpegdec->context->thread_count = ffmpegdec->max_threads;
895 ffmpegdec->context->thread_type = FF_THREAD_SLICE;
897 /* open codec - we don't select an output pix_fmt yet,
898 * simply because we don't know! We only get it
899 * during playback... */
900 if (!gst_ffmpegdec_open (ffmpegdec))
903 /* clipping region */
904 gst_structure_get_int (structure, "width",
905 &ffmpegdec->format.video.clip_width);
906 gst_structure_get_int (structure, "height",
907 &ffmpegdec->format.video.clip_height);
909 GST_DEBUG_OBJECT (pad, "clipping to %dx%d",
910 ffmpegdec->format.video.clip_width, ffmpegdec->format.video.clip_height);
912 /* take into account the lowres property */
913 if (ffmpegdec->format.video.clip_width != -1)
914 ffmpegdec->format.video.clip_width >>= ffmpegdec->lowres;
915 if (ffmpegdec->format.video.clip_height != -1)
916 ffmpegdec->format.video.clip_height >>= ffmpegdec->lowres;
918 GST_DEBUG_OBJECT (pad, "final clipping to %dx%d",
919 ffmpegdec->format.video.clip_width, ffmpegdec->format.video.clip_height);
922 GST_OBJECT_UNLOCK (ffmpegdec);
924 gst_object_unref (ffmpegdec);
931 GST_DEBUG_OBJECT (ffmpegdec, "Failed to open");
932 if (ffmpegdec->par) {
933 g_free (ffmpegdec->par);
934 ffmpegdec->par = NULL;
942 alloc_output_buffer (GstFFMpegDec * ffmpegdec, GstBuffer ** outbuf,
943 gint width, gint height)
948 ret = GST_FLOW_ERROR;
951 GST_LOG_OBJECT (ffmpegdec, "alloc output buffer");
953 /* see if we need renegotiation */
954 if (G_UNLIKELY (!gst_ffmpegdec_negotiate (ffmpegdec, FALSE)))
955 goto negotiate_failed;
957 /* get the size of the gstreamer output buffer given a
958 * width/height/format */
959 fsize = gst_ffmpeg_avpicture_get_size (ffmpegdec->context->pix_fmt,
962 if (!ffmpegdec->context->palctrl && ffmpegdec->can_allocate_aligned) {
963 GST_LOG_OBJECT (ffmpegdec, "calling pad_alloc");
964 /* no pallete, we can use the buffer size to alloc */
965 ret = gst_pad_alloc_buffer_and_set_caps (ffmpegdec->srcpad,
966 GST_BUFFER_OFFSET_NONE, fsize,
967 GST_PAD_CAPS (ffmpegdec->srcpad), outbuf);
968 if (G_UNLIKELY (ret != GST_FLOW_OK))
971 /* If buffer isn't 128-bit aligned, create a memaligned one ourselves */
972 if (((uintptr_t) GST_BUFFER_DATA (*outbuf)) % 16) {
973 GST_DEBUG_OBJECT (ffmpegdec,
974 "Downstream can't allocate aligned buffers.");
975 ffmpegdec->can_allocate_aligned = FALSE;
976 gst_buffer_unref (*outbuf);
977 *outbuf = new_aligned_buffer (fsize, GST_PAD_CAPS (ffmpegdec->srcpad));
980 GST_LOG_OBJECT (ffmpegdec,
981 "not calling pad_alloc, we have a pallete or downstream can't give 16 byte aligned buffers.");
982 /* for paletted data we can't use pad_alloc_buffer(), because
983 * fsize contains the size of the palette, so the overall size
984 * is bigger than ffmpegcolorspace's unit size, which will
985 * prompt GstBaseTransform to complain endlessly ... */
986 *outbuf = new_aligned_buffer (fsize, GST_PAD_CAPS (ffmpegdec->srcpad));
989 /* set caps, we do this here because the buffer is still writable here and we
990 * are sure to be negotiated */
991 gst_buffer_set_caps (*outbuf, GST_PAD_CAPS (ffmpegdec->srcpad));
998 GST_DEBUG_OBJECT (ffmpegdec, "negotiate failed");
999 return GST_FLOW_NOT_NEGOTIATED;
1003 GST_DEBUG_OBJECT (ffmpegdec, "pad_alloc failed %d (%s)", ret,
1004 gst_flow_get_name (ret));
1010 gst_ffmpegdec_get_buffer (AVCodecContext * context, AVFrame * picture)
1012 GstBuffer *buf = NULL;
1013 GstFFMpegDec *ffmpegdec;
1015 gint coded_width, coded_height;
1018 ffmpegdec = (GstFFMpegDec *) context->opaque;
1020 GST_DEBUG_OBJECT (ffmpegdec, "getting buffer");
1022 /* apply the last info we have seen to this picture, when we get the
1023 * picture back from ffmpeg we can use this to correctly timestamp the output
1025 picture->reordered_opaque = context->reordered_opaque;
1026 /* make sure we don't free the buffer when it's not ours */
1027 picture->opaque = NULL;
1029 /* take width and height before clipping */
1030 width = context->width;
1031 height = context->height;
1032 coded_width = context->coded_width;
1033 coded_height = context->coded_height;
1035 GST_LOG_OBJECT (ffmpegdec, "dimension %dx%d, coded %dx%d", width, height,
1036 coded_width, coded_height);
1037 if (!ffmpegdec->current_dr) {
1038 GST_LOG_OBJECT (ffmpegdec, "direct rendering disabled, fallback alloc");
1039 res = avcodec_default_get_buffer (context, picture);
1041 GST_LOG_OBJECT (ffmpegdec, "linsize %d %d %d", picture->linesize[0],
1042 picture->linesize[1], picture->linesize[2]);
1043 GST_LOG_OBJECT (ffmpegdec, "data %u %u %u", 0,
1044 (guint) (picture->data[1] - picture->data[0]),
1045 (guint) (picture->data[2] - picture->data[0]));
1049 switch (context->codec_type) {
1050 case AVMEDIA_TYPE_VIDEO:
1051 /* some ffmpeg video plugins don't see the point in setting codec_type ... */
1052 case AVMEDIA_TYPE_UNKNOWN:
1055 gint clip_width, clip_height;
1057 /* take final clipped output size */
1058 if ((clip_width = ffmpegdec->format.video.clip_width) == -1)
1060 if ((clip_height = ffmpegdec->format.video.clip_height) == -1)
1061 clip_height = height;
1063 GST_LOG_OBJECT (ffmpegdec, "raw outsize %d/%d", width, height);
1065 /* this is the size ffmpeg needs for the buffer */
1066 avcodec_align_dimensions (context, &width, &height);
1068 GST_LOG_OBJECT (ffmpegdec, "aligned outsize %d/%d, clip %d/%d",
1069 width, height, clip_width, clip_height);
1071 if (width != clip_width || height != clip_height) {
1072 /* We can't alloc if we need to clip the output buffer later */
1073 GST_LOG_OBJECT (ffmpegdec, "we need clipping, fallback alloc");
1074 return avcodec_default_get_buffer (context, picture);
1077 /* alloc with aligned dimensions for ffmpeg */
1078 ret = alloc_output_buffer (ffmpegdec, &buf, width, height);
1079 if (G_UNLIKELY (ret != GST_FLOW_OK)) {
1080 /* alloc default buffer when we can't get one from downstream */
1081 GST_LOG_OBJECT (ffmpegdec, "alloc failed, fallback alloc");
1082 return avcodec_default_get_buffer (context, picture);
1085 /* copy the right pointers and strides in the picture object */
1086 gst_ffmpeg_avpicture_fill ((AVPicture *) picture,
1087 GST_BUFFER_DATA (buf), context->pix_fmt, width, height);
1090 case AVMEDIA_TYPE_AUDIO:
1092 GST_ERROR_OBJECT (ffmpegdec,
1093 "_get_buffer() should never get called for non-video buffers !");
1094 g_assert_not_reached ();
1098 /* tell ffmpeg we own this buffer, tranfer the ref we have on the buffer to
1099 * the opaque data. */
1100 picture->type = FF_BUFFER_TYPE_USER;
1101 picture->age = 256 * 256 * 256 * 64;
1102 picture->opaque = buf;
1105 if (picture->reference != 0 || ffmpegdec->extra_ref) {
1106 GST_DEBUG_OBJECT (ffmpegdec, "adding extra ref");
1107 gst_buffer_ref (buf);
1111 GST_LOG_OBJECT (ffmpegdec, "returned buffer %p", buf);
1117 gst_ffmpegdec_release_buffer (AVCodecContext * context, AVFrame * picture)
1121 GstFFMpegDec *ffmpegdec;
1123 ffmpegdec = (GstFFMpegDec *) context->opaque;
1125 /* check if it was our buffer */
1126 if (picture->opaque == NULL) {
1127 GST_DEBUG_OBJECT (ffmpegdec, "default release buffer");
1128 avcodec_default_release_buffer (context, picture);
1132 /* we remove the opaque data now */
1133 buf = GST_BUFFER_CAST (picture->opaque);
1134 GST_DEBUG_OBJECT (ffmpegdec, "release buffer %p", buf);
1135 picture->opaque = NULL;
1138 if (picture->reference != 0 || ffmpegdec->extra_ref) {
1139 GST_DEBUG_OBJECT (ffmpegdec, "remove extra ref");
1140 gst_buffer_unref (buf);
1143 gst_buffer_unref (buf);
1146 /* zero out the reference in ffmpeg */
1147 for (i = 0; i < 4; i++) {
1148 picture->data[i] = NULL;
1149 picture->linesize[i] = 0;
1154 gst_ffmpegdec_add_pixel_aspect_ratio (GstFFMpegDec * ffmpegdec,
1157 gboolean demuxer_par_set = FALSE;
1158 gboolean decoder_par_set = FALSE;
1159 gint demuxer_num = 1, demuxer_denom = 1;
1160 gint decoder_num = 1, decoder_denom = 1;
1162 GST_OBJECT_LOCK (ffmpegdec);
1164 if (ffmpegdec->par) {
1165 demuxer_num = gst_value_get_fraction_numerator (ffmpegdec->par);
1166 demuxer_denom = gst_value_get_fraction_denominator (ffmpegdec->par);
1167 demuxer_par_set = TRUE;
1168 GST_DEBUG_OBJECT (ffmpegdec, "Demuxer PAR: %d:%d", demuxer_num,
1172 if (ffmpegdec->context->sample_aspect_ratio.num &&
1173 ffmpegdec->context->sample_aspect_ratio.den) {
1174 decoder_num = ffmpegdec->context->sample_aspect_ratio.num;
1175 decoder_denom = ffmpegdec->context->sample_aspect_ratio.den;
1176 decoder_par_set = TRUE;
1177 GST_DEBUG_OBJECT (ffmpegdec, "Decoder PAR: %d:%d", decoder_num,
1181 GST_OBJECT_UNLOCK (ffmpegdec);
1183 if (!demuxer_par_set && !decoder_par_set)
1186 if (demuxer_par_set && !decoder_par_set)
1187 goto use_demuxer_par;
1189 if (decoder_par_set && !demuxer_par_set)
1190 goto use_decoder_par;
1192 /* Both the demuxer and the decoder provide a PAR. If one of
1193 * the two PARs is 1:1 and the other one is not, use the one
1194 * that is not 1:1. */
1195 if (demuxer_num == demuxer_denom && decoder_num != decoder_denom)
1196 goto use_decoder_par;
1198 if (decoder_num == decoder_denom && demuxer_num != demuxer_denom)
1199 goto use_demuxer_par;
1201 /* Both PARs are non-1:1, so use the PAR provided by the demuxer */
1202 goto use_demuxer_par;
1206 GST_DEBUG_OBJECT (ffmpegdec,
1207 "Setting decoder provided pixel-aspect-ratio of %u:%u", decoder_num,
1209 gst_structure_set (s, "pixel-aspect-ratio", GST_TYPE_FRACTION, decoder_num,
1210 decoder_denom, NULL);
1216 GST_DEBUG_OBJECT (ffmpegdec,
1217 "Setting demuxer provided pixel-aspect-ratio of %u:%u", demuxer_num,
1219 gst_structure_set (s, "pixel-aspect-ratio", GST_TYPE_FRACTION, demuxer_num,
1220 demuxer_denom, NULL);
1225 GST_DEBUG_OBJECT (ffmpegdec,
1226 "Neither demuxer nor codec provide a pixel-aspect-ratio");
1232 gst_ffmpegdec_negotiate (GstFFMpegDec * ffmpegdec, gboolean force)
1234 GstFFMpegDecClass *oclass;
1237 oclass = (GstFFMpegDecClass *) (G_OBJECT_GET_CLASS (ffmpegdec));
1239 switch (oclass->in_plugin->type) {
1240 case AVMEDIA_TYPE_VIDEO:
1241 if (!force && ffmpegdec->format.video.width == ffmpegdec->context->width
1242 && ffmpegdec->format.video.height == ffmpegdec->context->height
1243 && ffmpegdec->format.video.fps_n == ffmpegdec->format.video.old_fps_n
1244 && ffmpegdec->format.video.fps_d == ffmpegdec->format.video.old_fps_d
1245 && ffmpegdec->format.video.pix_fmt == ffmpegdec->context->pix_fmt
1246 && ffmpegdec->format.video.par_n ==
1247 ffmpegdec->context->sample_aspect_ratio.num
1248 && ffmpegdec->format.video.par_d ==
1249 ffmpegdec->context->sample_aspect_ratio.den)
1251 GST_DEBUG_OBJECT (ffmpegdec,
1252 "Renegotiating video from %dx%d@ %d:%d PAR %d/%d fps to %dx%d@ %d:%d PAR %d/%d fps",
1253 ffmpegdec->format.video.width, ffmpegdec->format.video.height,
1254 ffmpegdec->format.video.par_n, ffmpegdec->format.video.par_d,
1255 ffmpegdec->format.video.old_fps_n, ffmpegdec->format.video.old_fps_n,
1256 ffmpegdec->context->width, ffmpegdec->context->height,
1257 ffmpegdec->context->sample_aspect_ratio.num,
1258 ffmpegdec->context->sample_aspect_ratio.den,
1259 ffmpegdec->format.video.fps_n, ffmpegdec->format.video.fps_d);
1260 ffmpegdec->format.video.width = ffmpegdec->context->width;
1261 ffmpegdec->format.video.height = ffmpegdec->context->height;
1262 ffmpegdec->format.video.old_fps_n = ffmpegdec->format.video.fps_n;
1263 ffmpegdec->format.video.old_fps_d = ffmpegdec->format.video.fps_d;
1264 ffmpegdec->format.video.pix_fmt = ffmpegdec->context->pix_fmt;
1265 ffmpegdec->format.video.par_n =
1266 ffmpegdec->context->sample_aspect_ratio.num;
1267 ffmpegdec->format.video.par_d =
1268 ffmpegdec->context->sample_aspect_ratio.den;
1270 case AVMEDIA_TYPE_AUDIO:
1272 gint depth = av_smp_format_depth (ffmpegdec->context->sample_fmt);
1273 if (!force && ffmpegdec->format.audio.samplerate ==
1274 ffmpegdec->context->sample_rate &&
1275 ffmpegdec->format.audio.channels == ffmpegdec->context->channels &&
1276 ffmpegdec->format.audio.depth == depth)
1278 GST_DEBUG_OBJECT (ffmpegdec,
1279 "Renegotiating audio from %dHz@%dchannels (%d) to %dHz@%dchannels (%d)",
1280 ffmpegdec->format.audio.samplerate, ffmpegdec->format.audio.channels,
1281 ffmpegdec->format.audio.depth,
1282 ffmpegdec->context->sample_rate, ffmpegdec->context->channels, depth);
1283 ffmpegdec->format.audio.samplerate = ffmpegdec->context->sample_rate;
1284 ffmpegdec->format.audio.channels = ffmpegdec->context->channels;
1285 ffmpegdec->format.audio.depth = depth;
1292 caps = gst_ffmpeg_codectype_to_caps (oclass->in_plugin->type,
1293 ffmpegdec->context, oclass->in_plugin->id, FALSE);
1298 switch (oclass->in_plugin->type) {
1299 case AVMEDIA_TYPE_VIDEO:
1302 gboolean interlaced;
1304 width = ffmpegdec->format.video.clip_width;
1305 height = ffmpegdec->format.video.clip_height;
1306 interlaced = ffmpegdec->format.video.interlaced;
1308 if (width != -1 && height != -1) {
1309 /* overwrite the output size with the dimension of the
1310 * clipping region but only if they are smaller. */
1311 if (width < ffmpegdec->context->width)
1312 gst_caps_set_simple (caps, "width", G_TYPE_INT, width, NULL);
1313 if (height < ffmpegdec->context->height)
1314 gst_caps_set_simple (caps, "height", G_TYPE_INT, height, NULL);
1316 gst_caps_set_simple (caps, "interlaced", G_TYPE_BOOLEAN, interlaced,
1319 /* If a demuxer provided a framerate then use it (#313970) */
1320 if (ffmpegdec->format.video.fps_n != -1) {
1321 gst_caps_set_simple (caps, "framerate",
1322 GST_TYPE_FRACTION, ffmpegdec->format.video.fps_n,
1323 ffmpegdec->format.video.fps_d, NULL);
1325 gst_ffmpegdec_add_pixel_aspect_ratio (ffmpegdec,
1326 gst_caps_get_structure (caps, 0));
1329 case AVMEDIA_TYPE_AUDIO:
1337 if (!gst_pad_set_caps (ffmpegdec->srcpad, caps))
1340 gst_caps_unref (caps);
1347 #ifdef HAVE_FFMPEG_UNINSTALLED
1348 /* using internal ffmpeg snapshot */
1349 GST_ELEMENT_ERROR (ffmpegdec, CORE, NEGOTIATION,
1350 ("Could not find GStreamer caps mapping for FFmpeg codec '%s'.",
1351 oclass->in_plugin->name), (NULL));
1353 /* using external ffmpeg */
1354 GST_ELEMENT_ERROR (ffmpegdec, CORE, NEGOTIATION,
1355 ("Could not find GStreamer caps mapping for FFmpeg codec '%s', and "
1356 "you are using an external libavcodec. This is most likely due to "
1357 "a packaging problem and/or libavcodec having been upgraded to a "
1358 "version that is not compatible with this version of "
1359 "gstreamer-ffmpeg. Make sure your gstreamer-ffmpeg and libavcodec "
1360 "packages come from the same source/repository.",
1361 oclass->in_plugin->name), (NULL));
1367 GST_ELEMENT_ERROR (ffmpegdec, CORE, NEGOTIATION, (NULL),
1368 ("Could not set caps for ffmpeg decoder (%s), not fixed?",
1369 oclass->in_plugin->name));
1370 gst_caps_unref (caps);
1376 /* perform qos calculations before decoding the next frame.
1378 * Sets the skip_frame flag and if things are really bad, skips to the next
1381 * Returns TRUE if the frame should be decoded, FALSE if the frame can be dropped
1385 gst_ffmpegdec_do_qos (GstFFMpegDec * ffmpegdec, GstClockTime timestamp,
1386 gboolean * mode_switch)
1388 GstClockTimeDiff diff;
1390 GstClockTime qostime, earliest_time;
1391 gboolean res = TRUE;
1393 *mode_switch = FALSE;
1395 /* no timestamp, can't do QoS */
1396 if (G_UNLIKELY (!GST_CLOCK_TIME_IS_VALID (timestamp)))
1399 /* get latest QoS observation values */
1400 gst_ffmpegdec_read_qos (ffmpegdec, &proportion, &earliest_time);
1402 /* skip qos if we have no observation (yet) */
1403 if (G_UNLIKELY (!GST_CLOCK_TIME_IS_VALID (earliest_time))) {
1404 /* no skip_frame initialy */
1405 ffmpegdec->context->skip_frame = AVDISCARD_DEFAULT;
1409 /* qos is done on running time of the timestamp */
1410 qostime = gst_segment_to_running_time (&ffmpegdec->segment, GST_FORMAT_TIME,
1413 /* timestamp can be out of segment, then we don't do QoS */
1414 if (G_UNLIKELY (!GST_CLOCK_TIME_IS_VALID (qostime)))
1417 /* see how our next timestamp relates to the latest qos timestamp. negative
1418 * values mean we are early, positive values mean we are too late. */
1419 diff = GST_CLOCK_DIFF (qostime, earliest_time);
1421 GST_DEBUG_OBJECT (ffmpegdec, "QOS: qostime %" GST_TIME_FORMAT
1422 ", earliest %" GST_TIME_FORMAT, GST_TIME_ARGS (qostime),
1423 GST_TIME_ARGS (earliest_time));
1425 /* if we using less than 40% of the available time, we can try to
1426 * speed up again when we were slow. */
1427 if (proportion < 0.4 && diff < 0) {
1431 /* we're too slow, try to speed up */
1432 if (ffmpegdec->waiting_for_key) {
1433 /* we were waiting for a keyframe, that's ok */
1436 /* switch to skip_frame mode */
1442 ffmpegdec->processed++;
1452 if (ffmpegdec->context->skip_frame != AVDISCARD_DEFAULT) {
1453 ffmpegdec->context->skip_frame = AVDISCARD_DEFAULT;
1454 *mode_switch = TRUE;
1455 GST_DEBUG_OBJECT (ffmpegdec, "QOS: normal mode %g < 0.4", proportion);
1457 ffmpegdec->processed++;
1462 if (ffmpegdec->context->skip_frame != AVDISCARD_NONREF) {
1463 ffmpegdec->context->skip_frame = AVDISCARD_NONREF;
1464 *mode_switch = TRUE;
1465 GST_DEBUG_OBJECT (ffmpegdec,
1466 "QOS: hurry up, diff %" G_GINT64_FORMAT " >= 0", diff);
1472 GstClockTime stream_time, jitter;
1473 GstMessage *qos_msg;
1475 ffmpegdec->dropped++;
1477 gst_segment_to_stream_time (&ffmpegdec->segment, GST_FORMAT_TIME,
1479 jitter = GST_CLOCK_DIFF (qostime, earliest_time);
1481 gst_message_new_qos (GST_OBJECT_CAST (ffmpegdec), FALSE, qostime,
1482 stream_time, timestamp, GST_CLOCK_TIME_NONE);
1483 gst_message_set_qos_values (qos_msg, jitter, proportion, 1000000);
1484 gst_message_set_qos_stats (qos_msg, GST_FORMAT_BUFFERS,
1485 ffmpegdec->processed, ffmpegdec->dropped);
1486 gst_element_post_message (GST_ELEMENT_CAST (ffmpegdec), qos_msg);
1492 /* returns TRUE if buffer is within segment, else FALSE.
1493 * if Buffer is on segment border, it's timestamp and duration will be clipped */
1495 clip_video_buffer (GstFFMpegDec * dec, GstBuffer * buf, GstClockTime in_ts,
1496 GstClockTime in_dur)
1498 gboolean res = TRUE;
1499 gint64 cstart, cstop;
1502 GST_LOG_OBJECT (dec,
1503 "timestamp:%" GST_TIME_FORMAT " , duration:%" GST_TIME_FORMAT,
1504 GST_TIME_ARGS (in_ts), GST_TIME_ARGS (in_dur));
1506 /* can't clip without TIME segment */
1507 if (G_UNLIKELY (dec->segment.format != GST_FORMAT_TIME))
1510 /* we need a start time */
1511 if (G_UNLIKELY (!GST_CLOCK_TIME_IS_VALID (in_ts)))
1514 /* generate valid stop, if duration unknown, we have unknown stop */
1516 GST_CLOCK_TIME_IS_VALID (in_dur) ? (in_ts + in_dur) : GST_CLOCK_TIME_NONE;
1520 gst_segment_clip (&dec->segment, GST_FORMAT_TIME, in_ts, stop, &cstart,
1522 if (G_UNLIKELY (!res))
1525 /* we're pretty sure the duration of this buffer is not till the end of this
1526 * segment (which _clip will assume when the stop is -1) */
1527 if (stop == GST_CLOCK_TIME_NONE)
1528 cstop = GST_CLOCK_TIME_NONE;
1530 /* update timestamp and possibly duration if the clipped stop time is
1532 GST_BUFFER_TIMESTAMP (buf) = cstart;
1533 if (GST_CLOCK_TIME_IS_VALID (cstop))
1534 GST_BUFFER_DURATION (buf) = cstop - cstart;
1536 GST_LOG_OBJECT (dec,
1537 "clipped timestamp:%" GST_TIME_FORMAT " , duration:%" GST_TIME_FORMAT,
1538 GST_TIME_ARGS (cstart), GST_TIME_ARGS (GST_BUFFER_DURATION (buf)));
1541 GST_LOG_OBJECT (dec, "%sdropping", (res ? "not " : ""));
1546 /* figure out if the current picture is a keyframe, return TRUE if that is
1549 check_keyframe (GstFFMpegDec * ffmpegdec)
1551 GstFFMpegDecClass *oclass;
1552 gboolean is_itype = FALSE;
1553 gboolean is_reference = FALSE;
1554 gboolean iskeyframe;
1556 /* figure out if we are dealing with a keyframe */
1557 oclass = (GstFFMpegDecClass *) (G_OBJECT_GET_CLASS (ffmpegdec));
1559 /* remember that we have B frames, we need this for the DTS -> PTS conversion
1561 if (!ffmpegdec->has_b_frames && ffmpegdec->picture->pict_type == FF_B_TYPE) {
1562 GST_DEBUG_OBJECT (ffmpegdec, "we have B frames");
1563 ffmpegdec->has_b_frames = TRUE;
1564 /* Emit latency message to recalculate it */
1565 gst_element_post_message (GST_ELEMENT_CAST (ffmpegdec),
1566 gst_message_new_latency (GST_OBJECT_CAST (ffmpegdec)));
1569 is_itype = (ffmpegdec->picture->pict_type == FF_I_TYPE);
1570 is_reference = (ffmpegdec->picture->reference == 1);
1572 iskeyframe = (is_itype || is_reference || ffmpegdec->picture->key_frame)
1573 || (oclass->in_plugin->id == CODEC_ID_INDEO3)
1574 || (oclass->in_plugin->id == CODEC_ID_MSZH)
1575 || (oclass->in_plugin->id == CODEC_ID_ZLIB)
1576 || (oclass->in_plugin->id == CODEC_ID_VP3)
1577 || (oclass->in_plugin->id == CODEC_ID_HUFFYUV);
1579 GST_LOG_OBJECT (ffmpegdec,
1580 "current picture: type: %d, is_keyframe:%d, is_itype:%d, is_reference:%d",
1581 ffmpegdec->picture->pict_type, iskeyframe, is_itype, is_reference);
1586 /* get an outbuf buffer with the current picture */
1587 static GstFlowReturn
1588 get_output_buffer (GstFFMpegDec * ffmpegdec, GstBuffer ** outbuf)
1595 if (ffmpegdec->picture->opaque != NULL) {
1596 /* we allocated a picture already for ffmpeg to decode into, let's pick it
1597 * up and use it now. */
1598 *outbuf = (GstBuffer *) ffmpegdec->picture->opaque;
1599 GST_LOG_OBJECT (ffmpegdec, "using opaque buffer %p", *outbuf);
1601 gst_buffer_ref (*outbuf);
1604 AVPicture pic, *outpic;
1607 GST_LOG_OBJECT (ffmpegdec, "get output buffer");
1609 /* figure out size of output buffer, this is the clipped output size because
1610 * we will copy the picture into it but only when the clipping region is
1611 * smaller than the actual picture size. */
1612 if ((width = ffmpegdec->format.video.clip_width) == -1)
1613 width = ffmpegdec->context->width;
1614 else if (width > ffmpegdec->context->width)
1615 width = ffmpegdec->context->width;
1617 if ((height = ffmpegdec->format.video.clip_height) == -1)
1618 height = ffmpegdec->context->height;
1619 else if (height > ffmpegdec->context->height)
1620 height = ffmpegdec->context->height;
1622 GST_LOG_OBJECT (ffmpegdec, "clip width %d/height %d", width, height);
1624 ret = alloc_output_buffer (ffmpegdec, outbuf, width, height);
1625 if (G_UNLIKELY (ret != GST_FLOW_OK))
1628 /* original ffmpeg code does not handle odd sizes correctly.
1629 * This patched up version does */
1630 gst_ffmpeg_avpicture_fill (&pic, GST_BUFFER_DATA (*outbuf),
1631 ffmpegdec->context->pix_fmt, width, height);
1633 outpic = (AVPicture *) ffmpegdec->picture;
1635 GST_LOG_OBJECT (ffmpegdec, "linsize %d %d %d", outpic->linesize[0],
1636 outpic->linesize[1], outpic->linesize[2]);
1637 GST_LOG_OBJECT (ffmpegdec, "data %u %u %u", 0,
1638 (guint) (outpic->data[1] - outpic->data[0]),
1639 (guint) (outpic->data[2] - outpic->data[0]));
1641 av_picture_copy (&pic, outpic, ffmpegdec->context->pix_fmt, width, height);
1643 ffmpegdec->picture->reordered_opaque = -1;
1650 GST_DEBUG_OBJECT (ffmpegdec, "pad_alloc failed");
1656 clear_queued (GstFFMpegDec * ffmpegdec)
1658 g_list_foreach (ffmpegdec->queued, (GFunc) gst_mini_object_unref, NULL);
1659 g_list_free (ffmpegdec->queued);
1660 ffmpegdec->queued = NULL;
1663 static GstFlowReturn
1664 flush_queued (GstFFMpegDec * ffmpegdec)
1666 GstFlowReturn res = GST_FLOW_OK;
1668 while (ffmpegdec->queued) {
1669 GstBuffer *buf = GST_BUFFER_CAST (ffmpegdec->queued->data);
1671 GST_LOG_OBJECT (ffmpegdec, "pushing buffer %p, offset %"
1672 G_GUINT64_FORMAT ", timestamp %"
1673 GST_TIME_FORMAT ", duration %" GST_TIME_FORMAT, buf,
1674 GST_BUFFER_OFFSET (buf),
1675 GST_TIME_ARGS (GST_BUFFER_TIMESTAMP (buf)),
1676 GST_TIME_ARGS (GST_BUFFER_DURATION (buf)));
1678 /* iterate ouput queue an push downstream */
1679 res = gst_pad_push (ffmpegdec->srcpad, buf);
1682 g_list_delete_link (ffmpegdec->queued, ffmpegdec->queued);
1688 gst_avpacket_init (AVPacket * packet, guint8 * data, guint size)
1690 memset (packet, 0, sizeof (AVPacket));
1691 packet->data = data;
1692 packet->size = size;
1695 /* gst_ffmpegdec_[video|audio]_frame:
1697 * data: pointer to the data to decode
1698 * size: size of data in bytes
1699 * in_timestamp: incoming timestamp.
1700 * in_duration: incoming duration.
1701 * in_offset: incoming offset (frame number).
1702 * outbuf: outgoing buffer. Different from NULL ONLY if it contains decoded data.
1705 * Returns: number of bytes used in decoding. The check for successful decode is
1706 * outbuf being non-NULL.
1709 gst_ffmpegdec_video_frame (GstFFMpegDec * ffmpegdec,
1710 guint8 * data, guint size,
1711 const GstTSInfo * dec_info, GstBuffer ** outbuf, GstFlowReturn * ret)
1715 gboolean iskeyframe;
1716 gboolean mode_switch;
1718 gint skip_frame = AVDISCARD_DEFAULT;
1719 GstClockTime out_timestamp, out_duration, out_pts;
1721 const GstTSInfo *out_info;
1727 ffmpegdec->context->opaque = ffmpegdec;
1729 /* in case we skip frames */
1730 ffmpegdec->picture->pict_type = -1;
1732 /* run QoS code, we don't stop decoding the frame when we are late because
1733 * else we might skip a reference frame */
1734 decode = gst_ffmpegdec_do_qos (ffmpegdec, dec_info->timestamp, &mode_switch);
1736 if (ffmpegdec->is_realvideo && data != NULL) {
1740 /* setup the slice table for realvideo */
1741 if (ffmpegdec->context->slice_offset == NULL)
1742 ffmpegdec->context->slice_offset = g_malloc (sizeof (guint32) * 1000);
1744 slice_count = (*data++) + 1;
1745 ffmpegdec->context->slice_count = slice_count;
1747 for (i = 0; i < slice_count; i++) {
1749 ffmpegdec->context->slice_offset[i] = GST_READ_UINT32_LE (data);
1755 /* no decoding needed, save previous skip_frame value and brutely skip
1756 * decoding everything */
1757 skip_frame = ffmpegdec->context->skip_frame;
1758 ffmpegdec->context->skip_frame = AVDISCARD_NONREF;
1761 /* save reference to the timing info */
1762 ffmpegdec->context->reordered_opaque = (gint64) dec_info->idx;
1763 ffmpegdec->picture->reordered_opaque = (gint64) dec_info->idx;
1765 GST_DEBUG_OBJECT (ffmpegdec, "stored opaque values idx %d", dec_info->idx);
1767 /* now decode the frame */
1768 gst_avpacket_init (&packet, data, size);
1769 len = avcodec_decode_video2 (ffmpegdec->context,
1770 ffmpegdec->picture, &have_data, &packet);
1772 /* restore previous state */
1774 ffmpegdec->context->skip_frame = skip_frame;
1776 GST_DEBUG_OBJECT (ffmpegdec, "after decode: len %d, have_data %d",
1779 /* when we are in skip_frame mode, don't complain when ffmpeg returned
1780 * no data because we told it to skip stuff. */
1781 if (len < 0 && (mode_switch || ffmpegdec->context->skip_frame))
1784 if (len > 0 && have_data <= 0 && (mode_switch
1785 || ffmpegdec->context->skip_frame)) {
1786 /* we consumed some bytes but nothing decoded and we are skipping frames,
1787 * disable the interpollation of DTS timestamps */
1788 ffmpegdec->last_out = -1;
1791 /* no data, we're done */
1792 if (len < 0 || have_data <= 0)
1795 /* get the output picture timing info again */
1796 out_info = gst_ts_info_get (ffmpegdec, ffmpegdec->picture->reordered_opaque);
1797 out_pts = out_info->timestamp;
1798 out_duration = out_info->duration;
1799 out_offset = out_info->offset;
1801 GST_DEBUG_OBJECT (ffmpegdec,
1802 "pts %" G_GUINT64_FORMAT " duration %" G_GUINT64_FORMAT " offset %"
1803 G_GINT64_FORMAT, out_pts, out_duration, out_offset);
1804 GST_DEBUG_OBJECT (ffmpegdec, "picture: pts %" G_GUINT64_FORMAT,
1805 (guint64) ffmpegdec->picture->pts);
1806 GST_DEBUG_OBJECT (ffmpegdec, "picture: num %d",
1807 ffmpegdec->picture->coded_picture_number);
1808 GST_DEBUG_OBJECT (ffmpegdec, "picture: ref %d",
1809 ffmpegdec->picture->reference);
1810 GST_DEBUG_OBJECT (ffmpegdec, "picture: display %d",
1811 ffmpegdec->picture->display_picture_number);
1812 GST_DEBUG_OBJECT (ffmpegdec, "picture: opaque %p",
1813 ffmpegdec->picture->opaque);
1814 GST_DEBUG_OBJECT (ffmpegdec, "picture: reordered opaque %" G_GUINT64_FORMAT,
1815 (guint64) ffmpegdec->picture->reordered_opaque);
1816 GST_DEBUG_OBJECT (ffmpegdec, "repeat_pict:%d",
1817 ffmpegdec->picture->repeat_pict);
1818 GST_DEBUG_OBJECT (ffmpegdec, "interlaced_frame:%d",
1819 ffmpegdec->picture->interlaced_frame);
1821 if (G_UNLIKELY (ffmpegdec->picture->interlaced_frame !=
1822 ffmpegdec->format.video.interlaced)) {
1823 GST_WARNING ("Change in interlacing ! picture:%d, recorded:%d",
1824 ffmpegdec->picture->interlaced_frame,
1825 ffmpegdec->format.video.interlaced);
1826 ffmpegdec->format.video.interlaced = ffmpegdec->picture->interlaced_frame;
1827 gst_ffmpegdec_negotiate (ffmpegdec, TRUE);
1831 /* Whether a frame is interlaced or not is unknown at the time of
1832 buffer allocation, so caps on the buffer in opaque will have
1833 the previous frame's interlaced flag set. So if interlacedness
1834 has changed since allocation, we update the buffer (if any)
1835 caps now with the correct interlaced flag. */
1836 if (ffmpegdec->picture->opaque != NULL) {
1837 GstBuffer *buffer = ffmpegdec->picture->opaque;
1838 if (GST_BUFFER_CAPS (buffer) && GST_PAD_CAPS (ffmpegdec->srcpad)) {
1839 GstStructure *s = gst_caps_get_structure (GST_BUFFER_CAPS (buffer), 0);
1840 gboolean interlaced;
1841 gboolean found = gst_structure_get_boolean (s, "interlaced", &interlaced);
1842 if (!found || (! !interlaced != ! !ffmpegdec->format.video.interlaced)) {
1843 GST_DEBUG_OBJECT (ffmpegdec,
1844 "Buffer interlacing does not match pad, updating");
1845 buffer = gst_buffer_make_metadata_writable (buffer);
1846 gst_buffer_set_caps (buffer, GST_PAD_CAPS (ffmpegdec->srcpad));
1847 ffmpegdec->picture->opaque = buffer;
1852 /* check if we are dealing with a keyframe here, this will also check if we
1853 * are dealing with B frames. */
1854 iskeyframe = check_keyframe (ffmpegdec);
1856 /* check that the timestamps go upwards */
1857 if (ffmpegdec->last_out != -1 && ffmpegdec->last_out > out_pts) {
1858 /* timestamps go backwards, this means frames were reordered and we must
1859 * be dealing with DTS as the buffer timestamps */
1860 if (!ffmpegdec->reordered_out) {
1861 GST_DEBUG_OBJECT (ffmpegdec, "detected reordered out timestamps");
1862 ffmpegdec->reordered_out = TRUE;
1864 if (ffmpegdec->reordered_in) {
1865 /* we reset the input reordering here because we want to recover from an
1866 * occasionally wrong reordered input timestamp */
1867 GST_DEBUG_OBJECT (ffmpegdec, "assuming DTS input timestamps");
1868 ffmpegdec->reordered_in = FALSE;
1872 if (out_pts == 0 && out_pts == ffmpegdec->last_out) {
1873 GST_LOG_OBJECT (ffmpegdec, "ffmpeg returns 0 timestamps, ignoring");
1874 /* some codecs only output 0 timestamps, when that happens, make us select an
1875 * output timestamp based on the input timestamp. We do this by making the
1876 * ffmpeg timestamp and the interpollated next timestamp invalid. */
1878 ffmpegdec->next_out = -1;
1880 ffmpegdec->last_out = out_pts;
1882 /* we assume DTS as input timestamps unless we see reordered input
1884 if (!ffmpegdec->reordered_in && ffmpegdec->reordered_out) {
1885 /* PTS and DTS are the same for keyframes */
1886 if (!iskeyframe && ffmpegdec->next_out != -1) {
1887 /* interpolate all timestamps except for keyframes, FIXME, this is
1888 * wrong when QoS is active. */
1889 GST_DEBUG_OBJECT (ffmpegdec, "interpolate timestamps");
1895 /* when we're waiting for a keyframe, see if we have one or drop the current
1897 if (G_UNLIKELY (ffmpegdec->waiting_for_key)) {
1898 if (G_LIKELY (!iskeyframe))
1899 goto drop_non_keyframe;
1901 /* we have a keyframe, we can stop waiting for one */
1902 ffmpegdec->waiting_for_key = FALSE;
1905 /* get a handle to the output buffer */
1906 *ret = get_output_buffer (ffmpegdec, outbuf);
1907 if (G_UNLIKELY (*ret != GST_FLOW_OK))
1913 * 1) Copy picture timestamp if valid
1914 * 2) else interpolate from previous output timestamp
1915 * 3) else copy input timestamp
1918 if (out_pts != -1) {
1919 /* Get (interpolated) timestamp from FFMPEG */
1920 out_timestamp = (GstClockTime) out_pts;
1921 GST_LOG_OBJECT (ffmpegdec, "using timestamp %" GST_TIME_FORMAT
1922 " returned by ffmpeg", GST_TIME_ARGS (out_timestamp));
1924 if (!GST_CLOCK_TIME_IS_VALID (out_timestamp) && ffmpegdec->next_out != -1) {
1925 out_timestamp = ffmpegdec->next_out;
1926 GST_LOG_OBJECT (ffmpegdec, "using next timestamp %" GST_TIME_FORMAT,
1927 GST_TIME_ARGS (out_timestamp));
1929 if (!GST_CLOCK_TIME_IS_VALID (out_timestamp)) {
1930 out_timestamp = dec_info->timestamp;
1931 GST_LOG_OBJECT (ffmpegdec, "using in timestamp %" GST_TIME_FORMAT,
1932 GST_TIME_ARGS (out_timestamp));
1934 GST_BUFFER_TIMESTAMP (*outbuf) = out_timestamp;
1938 * 0) Use stored input offset (from opaque)
1939 * 1) Use value converted from timestamp if valid
1940 * 2) Use input offset if valid
1942 if (out_offset != GST_BUFFER_OFFSET_NONE) {
1943 /* out_offset already contains the offset from ts_info */
1944 GST_LOG_OBJECT (ffmpegdec, "Using offset returned by ffmpeg");
1945 } else if (out_timestamp != GST_CLOCK_TIME_NONE) {
1946 GstFormat out_fmt = GST_FORMAT_DEFAULT;
1947 GST_LOG_OBJECT (ffmpegdec, "Using offset converted from timestamp");
1948 /* FIXME, we should really remove this as it's not nice at all to do
1949 * upstream queries for each frame to get the frame offset. We also can't
1950 * really remove this because it is the only way of setting frame offsets
1951 * on outgoing buffers. We should have metadata so that the upstream peer
1952 * can set a frame number on the encoded data. */
1953 gst_pad_query_peer_convert (ffmpegdec->sinkpad,
1954 GST_FORMAT_TIME, out_timestamp, &out_fmt, &out_offset);
1955 } else if (dec_info->offset != GST_BUFFER_OFFSET_NONE) {
1956 /* FIXME, the input offset is input media specific and might not
1957 * be the same for the output media. (byte offset as input, frame number
1958 * as output, for example) */
1959 GST_LOG_OBJECT (ffmpegdec, "using in_offset %" G_GINT64_FORMAT,
1961 out_offset = dec_info->offset;
1963 GST_LOG_OBJECT (ffmpegdec, "no valid offset found");
1964 out_offset = GST_BUFFER_OFFSET_NONE;
1966 GST_BUFFER_OFFSET (*outbuf) = out_offset;
1971 * 1) Use reordered input duration if valid
1972 * 2) Else use input duration
1973 * 3) else use input framerate
1974 * 4) else use ffmpeg framerate
1976 if (GST_CLOCK_TIME_IS_VALID (out_duration)) {
1977 /* We have a valid (reordered) duration */
1978 GST_LOG_OBJECT (ffmpegdec, "Using duration returned by ffmpeg");
1979 } else if (GST_CLOCK_TIME_IS_VALID (dec_info->duration)) {
1980 GST_LOG_OBJECT (ffmpegdec, "using in_duration");
1981 out_duration = dec_info->duration;
1982 } else if (GST_CLOCK_TIME_IS_VALID (ffmpegdec->last_diff)) {
1983 GST_LOG_OBJECT (ffmpegdec, "using last-diff");
1984 out_duration = ffmpegdec->last_diff;
1986 /* if we have an input framerate, use that */
1987 if (ffmpegdec->format.video.fps_n != -1 &&
1988 (ffmpegdec->format.video.fps_n != 1000 &&
1989 ffmpegdec->format.video.fps_d != 1)) {
1990 GST_LOG_OBJECT (ffmpegdec, "using input framerate for duration");
1991 out_duration = gst_util_uint64_scale_int (GST_SECOND,
1992 ffmpegdec->format.video.fps_d, ffmpegdec->format.video.fps_n);
1994 /* don't try to use the decoder's framerate when it seems a bit abnormal,
1995 * which we assume when den >= 1000... */
1996 if (ffmpegdec->context->time_base.num != 0 &&
1997 (ffmpegdec->context->time_base.den > 0 &&
1998 ffmpegdec->context->time_base.den < 1000)) {
1999 GST_LOG_OBJECT (ffmpegdec, "using decoder's framerate for duration");
2000 out_duration = gst_util_uint64_scale_int (GST_SECOND,
2001 ffmpegdec->context->time_base.num *
2002 ffmpegdec->context->ticks_per_frame,
2003 ffmpegdec->context->time_base.den);
2005 GST_LOG_OBJECT (ffmpegdec, "no valid duration found");
2010 /* Take repeat_pict into account */
2011 if (GST_CLOCK_TIME_IS_VALID (out_duration)) {
2012 out_duration += out_duration * ffmpegdec->picture->repeat_pict / 2;
2014 GST_BUFFER_DURATION (*outbuf) = out_duration;
2016 if (out_timestamp != -1 && out_duration != -1 && out_duration != 0)
2017 ffmpegdec->next_out = out_timestamp + out_duration;
2019 ffmpegdec->next_out = -1;
2021 /* palette is not part of raw video frame in gst and the size
2022 * of the outgoing buffer needs to be adjusted accordingly */
2023 if (ffmpegdec->context->palctrl != NULL)
2024 GST_BUFFER_SIZE (*outbuf) -= AVPALETTE_SIZE;
2026 /* now see if we need to clip the buffer against the segment boundaries. */
2027 if (G_UNLIKELY (!clip_video_buffer (ffmpegdec, *outbuf, out_timestamp,
2031 /* mark as keyframe or delta unit */
2033 GST_BUFFER_FLAG_SET (*outbuf, GST_BUFFER_FLAG_DELTA_UNIT);
2035 if (ffmpegdec->picture->top_field_first)
2036 GST_BUFFER_FLAG_SET (*outbuf, GST_VIDEO_BUFFER_TFF);
2040 GST_DEBUG_OBJECT (ffmpegdec, "return flow %d, out %p, len %d",
2041 *ret, *outbuf, len);
2047 GST_WARNING_OBJECT (ffmpegdec, "Dropping non-keyframe (seek/init)");
2052 GST_DEBUG_OBJECT (ffmpegdec, "no output buffer");
2058 GST_DEBUG_OBJECT (ffmpegdec, "buffer clipped");
2059 gst_buffer_unref (*outbuf);
2065 /* returns TRUE if buffer is within segment, else FALSE.
2066 * if Buffer is on segment border, it's timestamp and duration will be clipped */
2068 clip_audio_buffer (GstFFMpegDec * dec, GstBuffer * buf, GstClockTime in_ts,
2069 GstClockTime in_dur)
2072 gint64 diff, ctime, cstop;
2073 gboolean res = TRUE;
2075 GST_LOG_OBJECT (dec,
2076 "timestamp:%" GST_TIME_FORMAT ", duration:%" GST_TIME_FORMAT
2077 ", size %u", GST_TIME_ARGS (in_ts), GST_TIME_ARGS (in_dur),
2078 GST_BUFFER_SIZE (buf));
2080 /* can't clip without TIME segment */
2081 if (G_UNLIKELY (dec->segment.format != GST_FORMAT_TIME))
2084 /* we need a start time */
2085 if (G_UNLIKELY (!GST_CLOCK_TIME_IS_VALID (in_ts)))
2088 /* trust duration */
2089 stop = in_ts + in_dur;
2091 res = gst_segment_clip (&dec->segment, GST_FORMAT_TIME, in_ts, stop, &ctime,
2093 if (G_UNLIKELY (!res))
2094 goto out_of_segment;
2096 /* see if some clipping happened */
2097 if (G_UNLIKELY ((diff = ctime - in_ts) > 0)) {
2098 /* bring clipped time to bytes */
2100 gst_util_uint64_scale_int (diff, dec->format.audio.samplerate,
2101 GST_SECOND) * (dec->format.audio.depth * dec->format.audio.channels);
2103 GST_DEBUG_OBJECT (dec, "clipping start to %" GST_TIME_FORMAT " %"
2104 G_GINT64_FORMAT " bytes", GST_TIME_ARGS (ctime), diff);
2106 GST_BUFFER_SIZE (buf) -= diff;
2107 GST_BUFFER_DATA (buf) += diff;
2109 if (G_UNLIKELY ((diff = stop - cstop) > 0)) {
2110 /* bring clipped time to bytes */
2112 gst_util_uint64_scale_int (diff, dec->format.audio.samplerate,
2113 GST_SECOND) * (dec->format.audio.depth * dec->format.audio.channels);
2115 GST_DEBUG_OBJECT (dec, "clipping stop to %" GST_TIME_FORMAT " %"
2116 G_GINT64_FORMAT " bytes", GST_TIME_ARGS (cstop), diff);
2118 GST_BUFFER_SIZE (buf) -= diff;
2120 GST_BUFFER_TIMESTAMP (buf) = ctime;
2121 GST_BUFFER_DURATION (buf) = cstop - ctime;
2124 GST_LOG_OBJECT (dec, "%sdropping", (res ? "not " : ""));
2130 GST_LOG_OBJECT (dec, "out of segment");
2136 gst_ffmpegdec_audio_frame (GstFFMpegDec * ffmpegdec,
2137 AVCodec * in_plugin, guint8 * data, guint size,
2138 const GstTSInfo * dec_info, GstBuffer ** outbuf, GstFlowReturn * ret)
2141 gint have_data = AVCODEC_MAX_AUDIO_FRAME_SIZE;
2142 GstClockTime out_timestamp, out_duration;
2146 GST_DEBUG_OBJECT (ffmpegdec,
2147 "size:%d, offset:%" G_GINT64_FORMAT ", ts:%" GST_TIME_FORMAT ", dur:%"
2148 GST_TIME_FORMAT ", ffmpegdec->next_out:%" GST_TIME_FORMAT, size,
2149 dec_info->offset, GST_TIME_ARGS (dec_info->timestamp),
2150 GST_TIME_ARGS (dec_info->duration), GST_TIME_ARGS (ffmpegdec->next_out));
2153 new_aligned_buffer (AVCODEC_MAX_AUDIO_FRAME_SIZE,
2154 GST_PAD_CAPS (ffmpegdec->srcpad));
2156 gst_avpacket_init (&packet, data, size);
2157 len = avcodec_decode_audio3 (ffmpegdec->context,
2158 (int16_t *) GST_BUFFER_DATA (*outbuf), &have_data, &packet);
2159 GST_DEBUG_OBJECT (ffmpegdec,
2160 "Decode audio: len=%d, have_data=%d", len, have_data);
2162 if (len >= 0 && have_data > 0) {
2163 GST_DEBUG_OBJECT (ffmpegdec, "Creating output buffer");
2164 if (!gst_ffmpegdec_negotiate (ffmpegdec, FALSE)) {
2165 gst_buffer_unref (*outbuf);
2172 GST_BUFFER_SIZE (*outbuf) = have_data;
2177 * 1) Copy input timestamp if valid
2178 * 2) else interpolate from previous input timestamp
2180 /* always take timestamps from the input buffer if any */
2181 if (GST_CLOCK_TIME_IS_VALID (dec_info->timestamp)) {
2182 out_timestamp = dec_info->timestamp;
2184 out_timestamp = ffmpegdec->next_out;
2190 * 1) calculate based on number of samples
2192 out_duration = gst_util_uint64_scale (have_data, GST_SECOND,
2193 ffmpegdec->format.audio.depth * ffmpegdec->format.audio.channels *
2194 ffmpegdec->format.audio.samplerate);
2200 out_offset = dec_info->offset;
2202 GST_DEBUG_OBJECT (ffmpegdec,
2203 "Buffer created. Size:%d , timestamp:%" GST_TIME_FORMAT " , duration:%"
2204 GST_TIME_FORMAT, have_data,
2205 GST_TIME_ARGS (out_timestamp), GST_TIME_ARGS (out_duration));
2207 GST_BUFFER_TIMESTAMP (*outbuf) = out_timestamp;
2208 GST_BUFFER_DURATION (*outbuf) = out_duration;
2209 GST_BUFFER_OFFSET (*outbuf) = out_offset;
2210 gst_buffer_set_caps (*outbuf, GST_PAD_CAPS (ffmpegdec->srcpad));
2212 /* the next timestamp we'll use when interpolating */
2213 if (GST_CLOCK_TIME_IS_VALID (out_timestamp))
2214 ffmpegdec->next_out = out_timestamp + out_duration;
2216 /* now see if we need to clip the buffer against the segment boundaries. */
2217 if (G_UNLIKELY (!clip_audio_buffer (ffmpegdec, *outbuf, out_timestamp,
2222 gst_buffer_unref (*outbuf);
2226 /* If we don't error out after the first failed read with the AAC decoder,
2227 * we must *not* carry on pushing data, else we'll cause segfaults... */
2228 if (len == -1 && (in_plugin->id == CODEC_ID_AAC
2229 || in_plugin->id == CODEC_ID_AAC_LATM)) {
2230 GST_ELEMENT_ERROR (ffmpegdec, STREAM, DECODE, (NULL),
2231 ("Decoding of AAC stream by FFMPEG failed."));
2232 *ret = GST_FLOW_ERROR;
2236 GST_DEBUG_OBJECT (ffmpegdec, "return flow %d, out %p, len %d",
2237 *ret, *outbuf, len);
2243 GST_DEBUG_OBJECT (ffmpegdec, "buffer clipped");
2244 gst_buffer_unref (*outbuf);
2250 /* gst_ffmpegdec_frame:
2252 * data: pointer to the data to decode
2253 * size: size of data in bytes
2254 * got_data: 0 if no data was decoded, != 0 otherwise.
2255 * in_time: timestamp of data
2256 * in_duration: duration of data
2257 * ret: GstFlowReturn to return in the chain function
2259 * Decode the given frame and pushes it downstream.
2261 * Returns: Number of bytes used in decoding, -1 on error/failure.
2265 gst_ffmpegdec_frame (GstFFMpegDec * ffmpegdec,
2266 guint8 * data, guint size, gint * got_data, const GstTSInfo * dec_info,
2267 GstFlowReturn * ret)
2269 GstFFMpegDecClass *oclass;
2270 GstBuffer *outbuf = NULL;
2271 gint have_data = 0, len = 0;
2273 if (G_UNLIKELY (ffmpegdec->context->codec == NULL))
2276 GST_LOG_OBJECT (ffmpegdec, "data:%p, size:%d, id:%d", data, size,
2280 ffmpegdec->context->frame_number++;
2282 oclass = (GstFFMpegDecClass *) (G_OBJECT_GET_CLASS (ffmpegdec));
2284 switch (oclass->in_plugin->type) {
2285 case AVMEDIA_TYPE_VIDEO:
2287 gst_ffmpegdec_video_frame (ffmpegdec, data, size, dec_info, &outbuf,
2290 case AVMEDIA_TYPE_AUDIO:
2292 gst_ffmpegdec_audio_frame (ffmpegdec, oclass->in_plugin, data, size,
2293 dec_info, &outbuf, ret);
2295 /* if we did not get an output buffer and we have a pending discont, don't
2296 * clear the input timestamps, we will put them on the next buffer because
2297 * else we might create the first buffer with a very big timestamp gap. */
2298 if (outbuf == NULL && ffmpegdec->discont) {
2299 GST_DEBUG_OBJECT (ffmpegdec, "no buffer but keeping timestamp");
2300 ffmpegdec->clear_ts = FALSE;
2304 GST_ERROR_OBJECT (ffmpegdec, "Asked to decode non-audio/video frame !");
2305 g_assert_not_reached ();
2312 if (len < 0 || have_data < 0) {
2313 GST_WARNING_OBJECT (ffmpegdec,
2314 "ffdec_%s: decoding error (len: %d, have_data: %d)",
2315 oclass->in_plugin->name, len, have_data);
2318 } else if (len == 0 && have_data == 0) {
2322 /* this is where I lost my last clue on ffmpeg... */
2327 GST_LOG_OBJECT (ffmpegdec,
2328 "Decoded data, now pushing buffer %p with offset %" G_GINT64_FORMAT
2329 ", timestamp %" GST_TIME_FORMAT " and duration %" GST_TIME_FORMAT,
2330 outbuf, GST_BUFFER_OFFSET (outbuf),
2331 GST_TIME_ARGS (GST_BUFFER_TIMESTAMP (outbuf)),
2332 GST_TIME_ARGS (GST_BUFFER_DURATION (outbuf)));
2334 /* mark pending discont */
2335 if (ffmpegdec->discont) {
2336 GST_BUFFER_FLAG_SET (outbuf, GST_BUFFER_FLAG_DISCONT);
2337 ffmpegdec->discont = FALSE;
2340 if (ffmpegdec->segment.rate > 0.0) {
2342 *ret = gst_pad_push (ffmpegdec->srcpad, outbuf);
2344 /* reverse playback, queue frame till later when we get a discont. */
2345 GST_DEBUG_OBJECT (ffmpegdec, "queued frame");
2346 ffmpegdec->queued = g_list_prepend (ffmpegdec->queued, outbuf);
2350 GST_DEBUG_OBJECT (ffmpegdec, "We didn't get a decoded buffer");
2359 GST_ERROR_OBJECT (ffmpegdec, "no codec context");
2365 gst_ffmpegdec_drain (GstFFMpegDec * ffmpegdec)
2367 GstFFMpegDecClass *oclass;
2369 oclass = (GstFFMpegDecClass *) (G_OBJECT_GET_CLASS (ffmpegdec));
2371 if (oclass->in_plugin->capabilities & CODEC_CAP_DELAY) {
2372 gint have_data, len, try = 0;
2374 GST_LOG_OBJECT (ffmpegdec,
2375 "codec has delay capabilities, calling until ffmpeg has drained everything");
2381 gst_ffmpegdec_frame (ffmpegdec, NULL, 0, &have_data, &ts_info_none,
2383 if (len < 0 || have_data == 0)
2385 } while (try++ < 10);
2387 if (ffmpegdec->segment.rate < 0.0) {
2388 /* if we have some queued frames for reverse playback, flush them now */
2389 flush_queued (ffmpegdec);
2394 gst_ffmpegdec_flush_pcache (GstFFMpegDec * ffmpegdec)
2396 if (ffmpegdec->pctx) {
2399 guint8 bdata[FF_INPUT_BUFFER_PADDING_SIZE];
2401 bsize = FF_INPUT_BUFFER_PADDING_SIZE;
2402 memset (bdata, 0, bsize);
2404 /* parse some dummy data to work around some ffmpeg weirdness where it keeps
2405 * the previous pts around */
2406 av_parser_parse2 (ffmpegdec->pctx, ffmpegdec->context,
2407 &data, &size, bdata, bsize, -1, -1, -1);
2408 ffmpegdec->pctx->pts = -1;
2409 ffmpegdec->pctx->dts = -1;
2412 if (ffmpegdec->pcache) {
2413 gst_buffer_unref (ffmpegdec->pcache);
2414 ffmpegdec->pcache = NULL;
2419 gst_ffmpegdec_sink_event (GstPad * pad, GstEvent * event)
2421 GstFFMpegDec *ffmpegdec;
2422 gboolean ret = FALSE;
2424 ffmpegdec = (GstFFMpegDec *) gst_pad_get_parent (pad);
2426 GST_DEBUG_OBJECT (ffmpegdec, "Handling %s event",
2427 GST_EVENT_TYPE_NAME (event));
2429 switch (GST_EVENT_TYPE (event)) {
2432 gst_ffmpegdec_drain (ffmpegdec);
2435 case GST_EVENT_FLUSH_STOP:
2437 if (ffmpegdec->opened) {
2438 avcodec_flush_buffers (ffmpegdec->context);
2440 gst_ffmpegdec_reset_ts (ffmpegdec);
2441 gst_ffmpegdec_reset_qos (ffmpegdec);
2442 gst_ffmpegdec_flush_pcache (ffmpegdec);
2443 ffmpegdec->waiting_for_key = TRUE;
2444 gst_segment_init (&ffmpegdec->segment, GST_FORMAT_TIME);
2445 clear_queued (ffmpegdec);
2448 case GST_EVENT_NEWSEGMENT:
2452 gint64 start, stop, time;
2453 gdouble rate, arate;
2455 gst_event_parse_new_segment_full (event, &update, &rate, &arate, &fmt,
2456 &start, &stop, &time);
2459 case GST_FORMAT_TIME:
2460 /* fine, our native segment format */
2462 case GST_FORMAT_BYTES:
2466 bit_rate = ffmpegdec->context->bit_rate;
2468 /* convert to time or fail */
2472 GST_DEBUG_OBJECT (ffmpegdec, "bitrate: %d", bit_rate);
2474 /* convert values to TIME */
2476 start = gst_util_uint64_scale_int (start, GST_SECOND, bit_rate);
2478 stop = gst_util_uint64_scale_int (stop, GST_SECOND, bit_rate);
2480 time = gst_util_uint64_scale_int (time, GST_SECOND, bit_rate);
2482 /* unref old event */
2483 gst_event_unref (event);
2485 /* create new converted time segment */
2486 fmt = GST_FORMAT_TIME;
2487 /* FIXME, bitrate is not good enough too find a good stop, let's
2488 * hope start and time were 0... meh. */
2490 event = gst_event_new_new_segment (update, rate, fmt,
2495 /* invalid format */
2496 goto invalid_format;
2499 /* drain pending frames before trying to use the new segment, queued
2500 * buffers belonged to the previous segment. */
2501 if (ffmpegdec->context->codec)
2502 gst_ffmpegdec_drain (ffmpegdec);
2504 GST_DEBUG_OBJECT (ffmpegdec,
2505 "NEWSEGMENT in time start %" GST_TIME_FORMAT " -- stop %"
2506 GST_TIME_FORMAT, GST_TIME_ARGS (start), GST_TIME_ARGS (stop));
2508 /* and store the values */
2509 gst_segment_set_newsegment_full (&ffmpegdec->segment, update,
2510 rate, arate, fmt, start, stop, time);
2517 /* and push segment downstream */
2518 ret = gst_pad_push_event (ffmpegdec->srcpad, event);
2521 gst_object_unref (ffmpegdec);
2528 GST_WARNING_OBJECT (ffmpegdec, "no bitrate to convert BYTES to TIME");
2529 gst_event_unref (event);
2534 GST_WARNING_OBJECT (ffmpegdec, "unknown format received in NEWSEGMENT");
2535 gst_event_unref (event);
2540 static GstFlowReturn
2541 gst_ffmpegdec_chain (GstPad * pad, GstBuffer * inbuf)
2543 GstFFMpegDec *ffmpegdec;
2544 GstFFMpegDecClass *oclass;
2545 guint8 *data, *bdata;
2546 gint size, bsize, len, have_data;
2547 GstFlowReturn ret = GST_FLOW_OK;
2548 GstClockTime in_timestamp;
2549 GstClockTime in_duration;
2552 const GstTSInfo *in_info;
2553 const GstTSInfo *dec_info;
2555 ffmpegdec = (GstFFMpegDec *) (GST_PAD_PARENT (pad));
2557 if (G_UNLIKELY (!ffmpegdec->opened))
2558 goto not_negotiated;
2560 discont = GST_BUFFER_IS_DISCONT (inbuf);
2562 /* The discont flags marks a buffer that is not continuous with the previous
2563 * buffer. This means we need to clear whatever data we currently have. We
2564 * currently also wait for a new keyframe, which might be suboptimal in the
2565 * case of a network error, better show the errors than to drop all data.. */
2566 if (G_UNLIKELY (discont)) {
2567 GST_DEBUG_OBJECT (ffmpegdec, "received DISCONT");
2568 /* drain what we have queued */
2569 gst_ffmpegdec_drain (ffmpegdec);
2570 gst_ffmpegdec_flush_pcache (ffmpegdec);
2571 avcodec_flush_buffers (ffmpegdec->context);
2572 ffmpegdec->discont = TRUE;
2573 gst_ffmpegdec_reset_ts (ffmpegdec);
2575 /* by default we clear the input timestamp after decoding each frame so that
2576 * interpollation can work. */
2577 ffmpegdec->clear_ts = TRUE;
2579 oclass = (GstFFMpegDecClass *) (G_OBJECT_GET_CLASS (ffmpegdec));
2581 /* do early keyframe check pretty bad to rely on the keyframe flag in the
2582 * source for this as it might not even be parsed (UDP/file/..). */
2583 if (G_UNLIKELY (ffmpegdec->waiting_for_key)) {
2584 GST_DEBUG_OBJECT (ffmpegdec, "waiting for keyframe");
2585 if (GST_BUFFER_FLAG_IS_SET (inbuf, GST_BUFFER_FLAG_DELTA_UNIT) &&
2586 oclass->in_plugin->type != AVMEDIA_TYPE_AUDIO)
2589 GST_DEBUG_OBJECT (ffmpegdec, "got keyframe");
2590 ffmpegdec->waiting_for_key = FALSE;
2592 /* parse cache joining. If there is cached data */
2593 if (ffmpegdec->pcache) {
2594 /* join with previous data */
2595 GST_LOG_OBJECT (ffmpegdec, "join parse cache");
2596 inbuf = gst_buffer_join (ffmpegdec->pcache, inbuf);
2597 /* no more cached data, we assume we can consume the complete cache */
2598 ffmpegdec->pcache = NULL;
2601 in_timestamp = GST_BUFFER_TIMESTAMP (inbuf);
2602 in_duration = GST_BUFFER_DURATION (inbuf);
2603 in_offset = GST_BUFFER_OFFSET (inbuf);
2605 /* get handle to timestamp info, we can pass this around to ffmpeg */
2606 in_info = gst_ts_info_store (ffmpegdec, in_timestamp, in_duration, in_offset);
2608 if (in_timestamp != -1) {
2609 /* check for increasing timestamps if they are jumping backwards, we
2610 * probably are dealing with PTS as timestamps */
2611 if (!ffmpegdec->reordered_in && ffmpegdec->last_in != -1) {
2612 if (in_timestamp < ffmpegdec->last_in) {
2613 GST_LOG_OBJECT (ffmpegdec, "detected reordered input timestamps");
2614 ffmpegdec->reordered_in = TRUE;
2615 ffmpegdec->last_diff = GST_CLOCK_TIME_NONE;
2616 } else if (in_timestamp > ffmpegdec->last_in) {
2618 /* keep track of timestamp diff to estimate duration */
2619 diff = in_timestamp - ffmpegdec->last_in;
2620 /* need to scale with amount of frames in the interval */
2621 if (ffmpegdec->last_frames)
2622 diff /= ffmpegdec->last_frames;
2624 GST_LOG_OBJECT (ffmpegdec, "estimated duration %" GST_TIME_FORMAT " %u",
2625 GST_TIME_ARGS (diff), ffmpegdec->last_frames);
2627 ffmpegdec->last_diff = diff;
2630 ffmpegdec->last_in = in_timestamp;
2631 ffmpegdec->last_frames = 0;
2634 GST_LOG_OBJECT (ffmpegdec,
2635 "Received new data of size %u, offset:%" G_GUINT64_FORMAT ", ts:%"
2636 GST_TIME_FORMAT ", dur:%" GST_TIME_FORMAT ", info %d",
2637 GST_BUFFER_SIZE (inbuf), GST_BUFFER_OFFSET (inbuf),
2638 GST_TIME_ARGS (in_timestamp), GST_TIME_ARGS (in_duration), in_info->idx);
2640 /* workarounds, functions write to buffers:
2641 * libavcodec/svq1.c:svq1_decode_frame writes to the given buffer.
2642 * libavcodec/svq3.c:svq3_decode_slice_header too.
2643 * ffmpeg devs know about it and will fix it (they said). */
2644 if (oclass->in_plugin->id == CODEC_ID_SVQ1 ||
2645 oclass->in_plugin->id == CODEC_ID_SVQ3) {
2646 inbuf = gst_buffer_make_writable (inbuf);
2649 bdata = GST_BUFFER_DATA (inbuf);
2650 bsize = GST_BUFFER_SIZE (inbuf);
2652 if (ffmpegdec->do_padding) {
2654 if (ffmpegdec->padded_size < bsize + FF_INPUT_BUFFER_PADDING_SIZE) {
2655 ffmpegdec->padded_size = bsize + FF_INPUT_BUFFER_PADDING_SIZE;
2656 ffmpegdec->padded = g_realloc (ffmpegdec->padded, ffmpegdec->padded_size);
2657 GST_LOG_OBJECT (ffmpegdec, "resized padding buffer to %d",
2658 ffmpegdec->padded_size);
2660 memcpy (ffmpegdec->padded, bdata, bsize);
2661 memset (ffmpegdec->padded + bsize, 0, FF_INPUT_BUFFER_PADDING_SIZE);
2663 bdata = ffmpegdec->padded;
2667 guint8 tmp_padding[FF_INPUT_BUFFER_PADDING_SIZE];
2669 /* parse, if at all possible */
2670 if (ffmpegdec->pctx) {
2673 GST_LOG_OBJECT (ffmpegdec,
2674 "Calling av_parser_parse2 with offset %" G_GINT64_FORMAT ", ts:%"
2675 GST_TIME_FORMAT " size %d", in_offset, GST_TIME_ARGS (in_timestamp),
2678 /* feed the parser. We pass the timestamp info so that we can recover all
2679 * info again later */
2680 res = av_parser_parse2 (ffmpegdec->pctx, ffmpegdec->context,
2681 &data, &size, bdata, bsize, in_info->idx, in_info->idx, in_offset);
2683 GST_LOG_OBJECT (ffmpegdec,
2684 "parser returned res %d and size %d, id %" G_GINT64_FORMAT, res, size,
2685 ffmpegdec->pctx->pts);
2687 /* store pts for decoding */
2688 if (ffmpegdec->pctx->pts != AV_NOPTS_VALUE && ffmpegdec->pctx->pts != -1)
2689 dec_info = gst_ts_info_get (ffmpegdec, ffmpegdec->pctx->pts);
2691 /* ffmpeg sometimes loses track after a flush, help it by feeding a
2692 * valid start time */
2693 ffmpegdec->pctx->pts = in_info->idx;
2694 ffmpegdec->pctx->dts = in_info->idx;
2698 GST_LOG_OBJECT (ffmpegdec, "consuming %d bytes. id %d", size,
2702 /* there is output, set pointers for next round. */
2706 /* Parser did not consume any data, make sure we don't clear the
2707 * timestamp for the next round */
2708 ffmpegdec->clear_ts = FALSE;
2711 /* if there is no output, we must break and wait for more data. also the
2712 * timestamp in the context is not updated. */
2726 if (ffmpegdec->do_padding) {
2727 /* add temporary padding */
2728 memcpy (tmp_padding, data + size, FF_INPUT_BUFFER_PADDING_SIZE);
2729 memset (data + size, 0, FF_INPUT_BUFFER_PADDING_SIZE);
2732 /* decode a frame of audio/video now */
2734 gst_ffmpegdec_frame (ffmpegdec, data, size, &have_data, dec_info, &ret);
2736 if (ffmpegdec->do_padding) {
2737 memcpy (data + size, tmp_padding, FF_INPUT_BUFFER_PADDING_SIZE);
2740 if (ret != GST_FLOW_OK) {
2741 GST_LOG_OBJECT (ffmpegdec, "breaking because of flow ret %s",
2742 gst_flow_get_name (ret));
2743 /* bad flow retun, make sure we discard all data and exit */
2747 if (!ffmpegdec->pctx) {
2748 if (len == 0 && !have_data) {
2749 /* nothing was decoded, this could be because no data was available or
2750 * because we were skipping frames.
2751 * If we have no context we must exit and wait for more data, we keep the
2753 GST_LOG_OBJECT (ffmpegdec, "Decoding didn't return any data, breaking");
2755 } else if (len < 0) {
2756 /* a decoding error happened, we must break and try again with next data. */
2757 GST_LOG_OBJECT (ffmpegdec, "Decoding error, breaking");
2761 /* prepare for the next round, for codecs with a context we did this
2762 * already when using the parser. */
2767 /* nothing was decoded, this could be because no data was available or
2768 * because we were skipping frames. Since we have a parser we can
2769 * continue with the next frame */
2770 GST_LOG_OBJECT (ffmpegdec,
2771 "Decoding didn't return any data, trying next");
2772 } else if (len < 0) {
2773 /* we have a context that will bring us to the next frame */
2774 GST_LOG_OBJECT (ffmpegdec, "Decoding error, trying next");
2778 /* make sure we don't use the same old timestamp for the next frame and let
2779 * the interpollation take care of it. */
2780 if (ffmpegdec->clear_ts) {
2781 in_timestamp = GST_CLOCK_TIME_NONE;
2782 in_duration = GST_CLOCK_TIME_NONE;
2783 in_offset = GST_BUFFER_OFFSET_NONE;
2784 in_info = GST_TS_INFO_NONE;
2786 ffmpegdec->clear_ts = TRUE;
2788 ffmpegdec->last_frames++;
2790 GST_LOG_OBJECT (ffmpegdec, "Before (while bsize>0). bsize:%d , bdata:%p",
2792 } while (bsize > 0);
2794 /* keep left-over */
2795 if (ffmpegdec->pctx && bsize > 0) {
2796 in_timestamp = GST_BUFFER_TIMESTAMP (inbuf);
2797 in_offset = GST_BUFFER_OFFSET (inbuf);
2799 GST_LOG_OBJECT (ffmpegdec,
2800 "Keeping %d bytes of data with offset %" G_GINT64_FORMAT ", timestamp %"
2801 GST_TIME_FORMAT, bsize, in_offset, GST_TIME_ARGS (in_timestamp));
2803 ffmpegdec->pcache = gst_buffer_create_sub (inbuf,
2804 GST_BUFFER_SIZE (inbuf) - bsize, bsize);
2805 /* we keep timestamp, even though all we really know is that the correct
2806 * timestamp is not below the one from inbuf */
2807 GST_BUFFER_TIMESTAMP (ffmpegdec->pcache) = in_timestamp;
2808 GST_BUFFER_OFFSET (ffmpegdec->pcache) = in_offset;
2809 } else if (bsize > 0) {
2810 GST_DEBUG_OBJECT (ffmpegdec, "Dropping %d bytes of data", bsize);
2812 gst_buffer_unref (inbuf);
2819 oclass = (GstFFMpegDecClass *) (G_OBJECT_GET_CLASS (ffmpegdec));
2820 GST_ELEMENT_ERROR (ffmpegdec, CORE, NEGOTIATION, (NULL),
2821 ("ffdec_%s: input format was not set before data start",
2822 oclass->in_plugin->name));
2823 gst_buffer_unref (inbuf);
2824 return GST_FLOW_NOT_NEGOTIATED;
2828 GST_DEBUG_OBJECT (ffmpegdec, "skipping non keyframe");
2829 gst_buffer_unref (inbuf);
2834 static GstStateChangeReturn
2835 gst_ffmpegdec_change_state (GstElement * element, GstStateChange transition)
2837 GstFFMpegDec *ffmpegdec = (GstFFMpegDec *) element;
2838 GstStateChangeReturn ret;
2840 ret = GST_ELEMENT_CLASS (parent_class)->change_state (element, transition);
2842 switch (transition) {
2843 case GST_STATE_CHANGE_PAUSED_TO_READY:
2844 GST_OBJECT_LOCK (ffmpegdec);
2845 gst_ffmpegdec_close (ffmpegdec);
2846 GST_OBJECT_UNLOCK (ffmpegdec);
2847 clear_queued (ffmpegdec);
2848 g_free (ffmpegdec->padded);
2849 ffmpegdec->padded = NULL;
2850 ffmpegdec->padded_size = 0;
2851 ffmpegdec->can_allocate_aligned = TRUE;
2861 gst_ffmpegdec_set_property (GObject * object,
2862 guint prop_id, const GValue * value, GParamSpec * pspec)
2864 GstFFMpegDec *ffmpegdec = (GstFFMpegDec *) object;
2868 ffmpegdec->lowres = ffmpegdec->context->lowres = g_value_get_enum (value);
2870 case PROP_SKIPFRAME:
2871 ffmpegdec->skip_frame = ffmpegdec->context->skip_frame =
2872 g_value_get_enum (value);
2874 case PROP_DIRECT_RENDERING:
2875 ffmpegdec->direct_rendering = g_value_get_boolean (value);
2877 case PROP_DO_PADDING:
2878 ffmpegdec->do_padding = g_value_get_boolean (value);
2881 ffmpegdec->debug_mv = ffmpegdec->context->debug_mv =
2882 g_value_get_boolean (value);
2885 ffmpegdec->crop = g_value_get_boolean (value);
2887 case PROP_MAX_THREADS:
2888 ffmpegdec->max_threads = g_value_get_int (value);
2891 G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec);
2897 gst_ffmpegdec_get_property (GObject * object,
2898 guint prop_id, GValue * value, GParamSpec * pspec)
2900 GstFFMpegDec *ffmpegdec = (GstFFMpegDec *) object;
2904 g_value_set_enum (value, ffmpegdec->context->lowres);
2906 case PROP_SKIPFRAME:
2907 g_value_set_enum (value, ffmpegdec->context->skip_frame);
2909 case PROP_DIRECT_RENDERING:
2910 g_value_set_boolean (value, ffmpegdec->direct_rendering);
2912 case PROP_DO_PADDING:
2913 g_value_set_boolean (value, ffmpegdec->do_padding);
2916 g_value_set_boolean (value, ffmpegdec->context->debug_mv);
2919 g_value_set_boolean (value, ffmpegdec->crop);
2921 case PROP_MAX_THREADS:
2922 g_value_set_int (value, ffmpegdec->max_threads);
2925 G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec);
2931 gst_ffmpegdec_register (GstPlugin * plugin)
2933 GTypeInfo typeinfo = {
2934 sizeof (GstFFMpegDecClass),
2935 (GBaseInitFunc) gst_ffmpegdec_base_init,
2937 (GClassInitFunc) gst_ffmpegdec_class_init,
2940 sizeof (GstFFMpegDec),
2942 (GInstanceInitFunc) gst_ffmpegdec_init,
2948 in_plugin = av_codec_next (NULL);
2950 GST_LOG ("Registering decoders");
2957 if (!in_plugin->decode) {
2961 /* no quasi-codecs, please */
2962 if (in_plugin->id == CODEC_ID_RAWVIDEO ||
2963 in_plugin->id == CODEC_ID_V210 ||
2964 in_plugin->id == CODEC_ID_V210X ||
2965 in_plugin->id == CODEC_ID_R210 ||
2966 (in_plugin->id >= CODEC_ID_PCM_S16LE &&
2967 in_plugin->id <= CODEC_ID_PCM_BLURAY)) {
2971 /* No decoders depending on external libraries (we don't build them, but
2972 * people who build against an external ffmpeg might have them.
2973 * We have native gstreamer plugins for all of those libraries anyway. */
2974 if (!strncmp (in_plugin->name, "lib", 3)) {
2976 ("Not using external library decoder %s. Use the gstreamer-native ones instead.",
2981 /* No vdpau plugins until we can figure out how to properly use them
2982 * outside of ffmpeg. */
2983 if (g_str_has_suffix (in_plugin->name, "_vdpau")) {
2985 ("Ignoring VDPAU decoder %s. We can't handle this outside of ffmpeg",
2990 if (g_str_has_suffix (in_plugin->name, "_xvmc")) {
2992 ("Ignoring XVMC decoder %s. We can't handle this outside of ffmpeg",
2997 GST_DEBUG ("Trying plugin %s [%s]", in_plugin->name, in_plugin->long_name);
2999 /* no codecs for which we're GUARANTEED to have better alternatives */
3000 /* MPEG1VIDEO : the mpeg2video decoder is preferred */
3001 /* MP1 : Use MP3 for decoding */
3002 /* MP2 : Use MP3 for decoding */
3003 /* Theora: Use libtheora based theoradec */
3004 if (!strcmp (in_plugin->name, "gif") ||
3005 !strcmp (in_plugin->name, "vorbis") ||
3006 !strcmp (in_plugin->name, "theora") ||
3007 !strcmp (in_plugin->name, "mpeg1video") ||
3008 !strcmp (in_plugin->name, "wavpack") ||
3009 !strcmp (in_plugin->name, "mp1") ||
3010 !strcmp (in_plugin->name, "mp2") ||
3011 !strcmp (in_plugin->name, "libfaad") ||
3012 !strcmp (in_plugin->name, "mpeg4aac") ||
3013 !strcmp (in_plugin->name, "ass") ||
3014 !strcmp (in_plugin->name, "srt") ||
3015 !strcmp (in_plugin->name, "pgssub") ||
3016 !strcmp (in_plugin->name, "dvdsub") ||
3017 !strcmp (in_plugin->name, "dvbsub")) {
3018 GST_LOG ("Ignoring decoder %s", in_plugin->name);
3022 /* construct the type */
3023 plugin_name = g_strdup ((gchar *) in_plugin->name);
3024 g_strdelimit (plugin_name, NULL, '_');
3025 type_name = g_strdup_printf ("ffdec_%s", plugin_name);
3026 g_free (plugin_name);
3028 type = g_type_from_name (type_name);
3031 /* create the gtype now */
3032 type = g_type_register_static (GST_TYPE_ELEMENT, type_name, &typeinfo, 0);
3033 g_type_set_qdata (type, GST_FFDEC_PARAMS_QDATA, (gpointer) in_plugin);
3036 /* (Ronald) MPEG-4 gets a higher priority because it has been well-
3037 * tested and by far outperforms divxdec/xviddec - so we prefer it.
3038 * msmpeg4v3 same, as it outperforms divxdec for divx3 playback.
3039 * VC1/WMV3 are not working and thus unpreferred for now. */
3040 switch (in_plugin->id) {
3041 case CODEC_ID_MPEG4:
3042 case CODEC_ID_MSMPEG4V3:
3044 case CODEC_ID_RA_144:
3045 case CODEC_ID_RA_288:
3051 rank = GST_RANK_PRIMARY;
3053 /* DVVIDEO: we have a good dv decoder, fast on both ppc as well as x86.
3054 * They say libdv's quality is better though. leave as secondary.
3055 * note: if you change this, see the code in gstdv.c in good/ext/dv.
3057 * SIPR: decoder should have a higher rank than realaudiodec.
3059 case CODEC_ID_DVVIDEO:
3061 rank = GST_RANK_SECONDARY;
3064 rank = GST_RANK_NONE;
3067 rank = GST_RANK_MARGINAL;
3070 if (!gst_element_register (plugin, type_name, rank, type)) {
3071 g_warning ("Failed to register %s", type_name);
3079 in_plugin = av_codec_next (in_plugin);
3082 GST_LOG ("Finished Registering decoders");