2 * Copyright (C) <1999> Erik Walthinsen <omega@cse.ogi.edu>
4 * This library is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU Library General Public
6 * License as published by the Free Software Foundation; either
7 * version 2 of the License, or (at your option) any later version.
9 * This library is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * Library General Public License for more details.
14 * You should have received a copy of the GNU Library General Public
15 * License along with this library; if not, write to the
16 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
17 * Boston, MA 02111-1307, USA.
27 #ifdef HAVE_FFMPEG_UNINSTALLED
30 #include <libavcodec/avcodec.h>
34 #include <gst/video/video.h>
36 #include "gstffmpeg.h"
37 #include "gstffmpegcodecmap.h"
38 #include "gstffmpegutils.h"
40 /* define to enable alternative buffer refcounting algorithm */
43 typedef struct _GstFFMpegVidDec GstFFMpegVidDec;
45 #define MAX_TS_MASK 0xff
47 /* for each incomming buffer we keep all timing info in a structure like this.
48 * We keep a circular array of these structures around to store the timing info.
49 * The index in the array is what we pass as opaque data (to pictures) and
50 * pts (to parsers) so that ffmpeg can remember them for us. */
54 GstClockTime timestamp;
55 GstClockTime duration;
59 struct _GstFFMpegVidDec
63 /* We need to keep track of our pads, so we do so here. */
68 AVCodecContext *context;
76 gint clip_width, clip_height;
79 gint old_fps_n, old_fps_d;
82 enum PixelFormat pix_fmt;
85 gboolean waiting_for_key;
89 /* for tracking DTS/PTS */
90 gboolean has_b_frames;
91 gboolean reordered_in;
93 GstClockTime last_diff;
95 gboolean reordered_out;
96 GstClockTime last_out;
97 GstClockTime next_out;
100 gboolean turnoff_parser; /* used for turning off aac raw parsing
102 AVCodecParserContext *pctx;
107 GValue *par; /* pixel aspect ratio of incoming data */
108 gboolean current_dr; /* if direct rendering is enabled */
109 gboolean extra_ref; /* keep extra ref around in get/release */
111 /* some properties */
112 enum AVDiscard skip_frame;
114 gboolean direct_rendering;
120 /* QoS stuff *//* with LOCK */
122 GstClockTime earliest_time;
126 /* clipping segment */
129 gboolean is_realvideo;
131 GstTSInfo ts_info[MAX_TS_MASK + 1];
134 /* reverse playback queue */
137 /* Can downstream allocate 16bytes aligned data. */
138 gboolean can_allocate_aligned;
141 typedef struct _GstFFMpegVidDecClass GstFFMpegVidDecClass;
143 struct _GstFFMpegVidDecClass
145 GstElementClass parent_class;
148 GstPadTemplate *srctempl, *sinktempl;
151 #define GST_TS_INFO_NONE &ts_info_none
152 static const GstTSInfo ts_info_none = { -1, -1, -1, -1 };
154 static const GstTSInfo *
155 gst_ts_info_store (GstFFMpegVidDec * dec, GstClockTime timestamp,
156 GstClockTime duration, gint64 offset)
158 gint idx = dec->ts_idx;
159 dec->ts_info[idx].idx = idx;
160 dec->ts_info[idx].timestamp = timestamp;
161 dec->ts_info[idx].duration = duration;
162 dec->ts_info[idx].offset = offset;
163 dec->ts_idx = (idx + 1) & MAX_TS_MASK;
165 return &dec->ts_info[idx];
168 static const GstTSInfo *
169 gst_ts_info_get (GstFFMpegVidDec * dec, gint idx)
171 if (G_UNLIKELY (idx < 0 || idx > MAX_TS_MASK))
172 return GST_TS_INFO_NONE;
174 return &dec->ts_info[idx];
177 #define GST_TYPE_FFMPEGDEC \
178 (gst_ffmpegviddec_get_type())
179 #define GST_FFMPEGDEC(obj) \
180 (G_TYPE_CHECK_INSTANCE_CAST((obj),GST_TYPE_FFMPEGDEC,GstFFMpegVidDec))
181 #define GST_FFMPEGVIDDEC_CLASS(klass) \
182 (G_TYPE_CHECK_CLASS_CAST((klass),GST_TYPE_FFMPEGDEC,GstFFMpegVidDecClass))
183 #define GST_IS_FFMPEGDEC(obj) \
184 (G_TYPE_CHECK_INSTANCE_TYPE((obj),GST_TYPE_FFMPEGDEC))
185 #define GST_IS_FFMPEGVIDDEC_CLASS(klass) \
186 (G_TYPE_CHECK_CLASS_TYPE((klass),GST_TYPE_FFMPEGDEC))
188 #define DEFAULT_LOWRES 0
189 #define DEFAULT_SKIPFRAME 0
190 #define DEFAULT_DIRECT_RENDERING TRUE
191 #define DEFAULT_DO_PADDING TRUE
192 #define DEFAULT_DEBUG_MV FALSE
193 #define DEFAULT_CROP TRUE
194 #define DEFAULT_MAX_THREADS 0
201 PROP_DIRECT_RENDERING,
209 /* A number of function prototypes are given so we can refer to them later. */
210 static void gst_ffmpegviddec_base_init (GstFFMpegVidDecClass * klass);
211 static void gst_ffmpegviddec_class_init (GstFFMpegVidDecClass * klass);
212 static void gst_ffmpegviddec_init (GstFFMpegVidDec * ffmpegdec);
213 static void gst_ffmpegviddec_finalize (GObject * object);
215 static gboolean gst_ffmpegviddec_query (GstPad * pad, GstQuery * query);
216 static gboolean gst_ffmpegviddec_src_event (GstPad * pad, GstEvent * event);
218 static gboolean gst_ffmpegviddec_setcaps (GstPad * pad, GstCaps * caps);
219 static gboolean gst_ffmpegviddec_sink_event (GstPad * pad, GstEvent * event);
220 static GstFlowReturn gst_ffmpegviddec_chain (GstPad * pad, GstBuffer * buf);
222 static GstStateChangeReturn gst_ffmpegviddec_change_state (GstElement * element,
223 GstStateChange transition);
225 static void gst_ffmpegviddec_set_property (GObject * object,
226 guint prop_id, const GValue * value, GParamSpec * pspec);
227 static void gst_ffmpegviddec_get_property (GObject * object,
228 guint prop_id, GValue * value, GParamSpec * pspec);
230 static gboolean gst_ffmpegviddec_negotiate (GstFFMpegVidDec * ffmpegdec,
233 /* some sort of bufferpool handling, but different */
234 static int gst_ffmpegviddec_get_buffer (AVCodecContext * context,
236 static void gst_ffmpegviddec_release_buffer (AVCodecContext * context,
239 static void gst_ffmpegviddec_drain (GstFFMpegVidDec * ffmpegdec);
241 #define GST_FFDEC_PARAMS_QDATA g_quark_from_static_string("ffdec-params")
243 static GstElementClass *parent_class = NULL;
245 #define GST_FFMPEGVIDDEC_TYPE_LOWRES (gst_ffmpegviddec_lowres_get_type())
247 gst_ffmpegviddec_lowres_get_type (void)
249 static GType ffmpegdec_lowres_type = 0;
251 if (!ffmpegdec_lowres_type) {
252 static const GEnumValue ffmpegdec_lowres[] = {
254 {1, "1", "1/2-size"},
255 {2, "2", "1/4-size"},
259 ffmpegdec_lowres_type =
260 g_enum_register_static ("GstFFMpegVidDecLowres", ffmpegdec_lowres);
263 return ffmpegdec_lowres_type;
266 #define GST_FFMPEGVIDDEC_TYPE_SKIPFRAME (gst_ffmpegviddec_skipframe_get_type())
268 gst_ffmpegviddec_skipframe_get_type (void)
270 static GType ffmpegdec_skipframe_type = 0;
272 if (!ffmpegdec_skipframe_type) {
273 static const GEnumValue ffmpegdec_skipframe[] = {
274 {0, "0", "Skip nothing"},
275 {1, "1", "Skip B-frames"},
276 {2, "2", "Skip IDCT/Dequantization"},
277 {5, "5", "Skip everything"},
281 ffmpegdec_skipframe_type =
282 g_enum_register_static ("GstFFMpegVidDecSkipFrame",
283 ffmpegdec_skipframe);
286 return ffmpegdec_skipframe_type;
290 gst_ffmpegviddec_base_init (GstFFMpegVidDecClass * klass)
292 GstElementClass *element_class = GST_ELEMENT_CLASS (klass);
293 GstPadTemplate *sinktempl, *srctempl;
294 GstCaps *sinkcaps, *srccaps;
296 gchar *longname, *description;
299 (AVCodec *) g_type_get_qdata (G_OBJECT_CLASS_TYPE (klass),
300 GST_FFDEC_PARAMS_QDATA);
301 g_assert (in_plugin != NULL);
303 /* construct the element details struct */
304 longname = g_strdup_printf ("FFmpeg %s decoder", in_plugin->long_name);
305 description = g_strdup_printf ("FFmpeg %s decoder", in_plugin->name);
306 gst_element_class_set_details_simple (element_class, longname,
307 "Codec/Decoder/Video", description,
308 "Wim Taymans <wim.taymans@gmail.com>, "
309 "Ronald Bultje <rbultje@ronald.bitfreak.net>, "
310 "Edward Hervey <bilboed@bilboed.com>");
312 g_free (description);
315 sinkcaps = gst_ffmpeg_codecid_to_caps (in_plugin->id, NULL, FALSE);
317 GST_DEBUG ("Couldn't get sink caps for decoder '%s'", in_plugin->name);
318 sinkcaps = gst_caps_from_string ("unknown/unknown");
320 srccaps = gst_caps_from_string ("video/x-raw-rgb; video/x-raw-yuv");
323 sinktempl = gst_pad_template_new ("sink", GST_PAD_SINK,
324 GST_PAD_ALWAYS, sinkcaps);
325 srctempl = gst_pad_template_new ("src", GST_PAD_SRC, GST_PAD_ALWAYS, srccaps);
327 gst_element_class_add_pad_template (element_class, srctempl);
328 gst_element_class_add_pad_template (element_class, sinktempl);
330 klass->in_plugin = in_plugin;
331 klass->srctempl = srctempl;
332 klass->sinktempl = sinktempl;
336 gst_ffmpegviddec_class_init (GstFFMpegVidDecClass * klass)
338 GObjectClass *gobject_class = G_OBJECT_CLASS (klass);
339 GstElementClass *gstelement_class = GST_ELEMENT_CLASS (klass);
341 parent_class = g_type_class_peek_parent (klass);
343 gobject_class->finalize = gst_ffmpegviddec_finalize;
345 gobject_class->set_property = gst_ffmpegviddec_set_property;
346 gobject_class->get_property = gst_ffmpegviddec_get_property;
348 if (klass->in_plugin->type == AVMEDIA_TYPE_VIDEO) {
351 g_object_class_install_property (gobject_class, PROP_SKIPFRAME,
352 g_param_spec_enum ("skip-frame", "Skip frames",
353 "Which types of frames to skip during decoding",
354 GST_FFMPEGVIDDEC_TYPE_SKIPFRAME, 0,
355 G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
356 g_object_class_install_property (gobject_class, PROP_LOWRES,
357 g_param_spec_enum ("lowres", "Low resolution",
358 "At which resolution to decode images",
359 GST_FFMPEGVIDDEC_TYPE_LOWRES, 0,
360 G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
361 g_object_class_install_property (gobject_class, PROP_DIRECT_RENDERING,
362 g_param_spec_boolean ("direct-rendering", "Direct Rendering",
363 "Enable direct rendering", DEFAULT_DIRECT_RENDERING,
364 G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
365 g_object_class_install_property (gobject_class, PROP_DO_PADDING,
366 g_param_spec_boolean ("do-padding", "Do Padding",
367 "Add 0 padding before decoding data", DEFAULT_DO_PADDING,
368 G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
369 g_object_class_install_property (gobject_class, PROP_DEBUG_MV,
370 g_param_spec_boolean ("debug-mv", "Debug motion vectors",
371 "Whether ffmpeg should print motion vectors on top of the image",
372 DEFAULT_DEBUG_MV, G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
374 g_object_class_install_property (gobject_class, PROP_CROP,
375 g_param_spec_boolean ("crop", "Crop",
376 "Crop images to the display region",
377 DEFAULT_CROP, G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
380 caps = klass->in_plugin->capabilities;
381 if (caps & (CODEC_CAP_FRAME_THREADS | CODEC_CAP_SLICE_THREADS)) {
382 g_object_class_install_property (G_OBJECT_CLASS (klass), PROP_MAX_THREADS,
383 g_param_spec_int ("max-threads", "Maximum decode threads",
384 "Maximum number of worker threads to spawn. (0 = auto)",
385 0, G_MAXINT, DEFAULT_MAX_THREADS,
386 G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
390 gstelement_class->change_state = gst_ffmpegviddec_change_state;
394 gst_ffmpegviddec_init (GstFFMpegVidDec * ffmpegdec)
396 GstFFMpegVidDecClass *oclass;
398 oclass = (GstFFMpegVidDecClass *) (G_OBJECT_GET_CLASS (ffmpegdec));
401 ffmpegdec->sinkpad = gst_pad_new_from_template (oclass->sinktempl, "sink");
402 gst_pad_set_setcaps_function (ffmpegdec->sinkpad,
403 GST_DEBUG_FUNCPTR (gst_ffmpegviddec_setcaps));
404 gst_pad_set_event_function (ffmpegdec->sinkpad,
405 GST_DEBUG_FUNCPTR (gst_ffmpegviddec_sink_event));
406 gst_pad_set_chain_function (ffmpegdec->sinkpad,
407 GST_DEBUG_FUNCPTR (gst_ffmpegviddec_chain));
408 gst_element_add_pad (GST_ELEMENT (ffmpegdec), ffmpegdec->sinkpad);
410 ffmpegdec->srcpad = gst_pad_new_from_template (oclass->srctempl, "src");
411 gst_pad_use_fixed_caps (ffmpegdec->srcpad);
412 gst_pad_set_event_function (ffmpegdec->srcpad,
413 GST_DEBUG_FUNCPTR (gst_ffmpegviddec_src_event));
414 gst_pad_set_query_function (ffmpegdec->srcpad,
415 GST_DEBUG_FUNCPTR (gst_ffmpegviddec_query));
416 gst_element_add_pad (GST_ELEMENT (ffmpegdec), ffmpegdec->srcpad);
418 /* some ffmpeg data */
419 ffmpegdec->context = avcodec_alloc_context ();
420 ffmpegdec->picture = avcodec_alloc_frame ();
421 ffmpegdec->pctx = NULL;
422 ffmpegdec->pcache = NULL;
423 ffmpegdec->par = NULL;
424 ffmpegdec->opened = FALSE;
425 ffmpegdec->waiting_for_key = TRUE;
426 ffmpegdec->skip_frame = ffmpegdec->lowres = 0;
427 ffmpegdec->direct_rendering = DEFAULT_DIRECT_RENDERING;
428 ffmpegdec->do_padding = DEFAULT_DO_PADDING;
429 ffmpegdec->debug_mv = DEFAULT_DEBUG_MV;
430 ffmpegdec->crop = DEFAULT_CROP;
431 ffmpegdec->max_threads = DEFAULT_MAX_THREADS;
433 ffmpegdec->format.video.par_n = -1;
434 ffmpegdec->format.video.fps_n = -1;
435 ffmpegdec->format.video.old_fps_n = -1;
436 gst_segment_init (&ffmpegdec->segment, GST_FORMAT_TIME);
438 /* We initially assume downstream can allocate 16 bytes aligned buffers */
439 ffmpegdec->can_allocate_aligned = TRUE;
443 gst_ffmpegviddec_finalize (GObject * object)
445 GstFFMpegVidDec *ffmpegdec = (GstFFMpegVidDec *) object;
447 if (ffmpegdec->context != NULL) {
448 av_free (ffmpegdec->context);
449 ffmpegdec->context = NULL;
452 if (ffmpegdec->picture != NULL) {
453 av_free (ffmpegdec->picture);
454 ffmpegdec->picture = NULL;
457 G_OBJECT_CLASS (parent_class)->finalize (object);
461 gst_ffmpegviddec_query (GstPad * pad, GstQuery * query)
463 GstFFMpegVidDec *ffmpegdec;
464 gboolean res = FALSE;
466 ffmpegdec = (GstFFMpegVidDec *) gst_pad_get_parent (pad);
468 switch (GST_QUERY_TYPE (query)) {
469 case GST_QUERY_LATENCY:
471 GST_DEBUG_OBJECT (ffmpegdec, "latency query %d",
472 ffmpegdec->context->has_b_frames);
473 if ((res = gst_pad_peer_query (ffmpegdec->sinkpad, query))) {
474 if (ffmpegdec->context->has_b_frames) {
476 GstClockTime min_lat, max_lat, our_lat;
478 gst_query_parse_latency (query, &live, &min_lat, &max_lat);
479 if (ffmpegdec->format.video.fps_n > 0)
481 gst_util_uint64_scale_int (ffmpegdec->context->has_b_frames *
482 GST_SECOND, ffmpegdec->format.video.fps_d,
483 ffmpegdec->format.video.fps_n);
486 gst_util_uint64_scale_int (ffmpegdec->context->has_b_frames *
492 gst_query_set_latency (query, live, min_lat, max_lat);
498 res = gst_pad_query_default (pad, query);
502 gst_object_unref (ffmpegdec);
508 gst_ffmpegviddec_reset_ts (GstFFMpegVidDec * ffmpegdec)
510 ffmpegdec->last_in = GST_CLOCK_TIME_NONE;
511 ffmpegdec->last_diff = GST_CLOCK_TIME_NONE;
512 ffmpegdec->last_frames = 0;
513 ffmpegdec->last_out = GST_CLOCK_TIME_NONE;
514 ffmpegdec->next_out = GST_CLOCK_TIME_NONE;
515 ffmpegdec->reordered_in = FALSE;
516 ffmpegdec->reordered_out = FALSE;
520 gst_ffmpegviddec_update_qos (GstFFMpegVidDec * ffmpegdec, gdouble proportion,
521 GstClockTime timestamp)
523 GST_LOG_OBJECT (ffmpegdec, "update QOS: %f, %" GST_TIME_FORMAT,
524 proportion, GST_TIME_ARGS (timestamp));
526 GST_OBJECT_LOCK (ffmpegdec);
527 ffmpegdec->proportion = proportion;
528 ffmpegdec->earliest_time = timestamp;
529 GST_OBJECT_UNLOCK (ffmpegdec);
533 gst_ffmpegviddec_reset_qos (GstFFMpegVidDec * ffmpegdec)
535 gst_ffmpegviddec_update_qos (ffmpegdec, 0.5, GST_CLOCK_TIME_NONE);
536 ffmpegdec->processed = 0;
537 ffmpegdec->dropped = 0;
541 gst_ffmpegviddec_read_qos (GstFFMpegVidDec * ffmpegdec, gdouble * proportion,
542 GstClockTime * timestamp)
544 GST_OBJECT_LOCK (ffmpegdec);
545 *proportion = ffmpegdec->proportion;
546 *timestamp = ffmpegdec->earliest_time;
547 GST_OBJECT_UNLOCK (ffmpegdec);
551 gst_ffmpegviddec_src_event (GstPad * pad, GstEvent * event)
553 GstFFMpegVidDec *ffmpegdec;
556 ffmpegdec = (GstFFMpegVidDec *) gst_pad_get_parent (pad);
558 switch (GST_EVENT_TYPE (event)) {
562 GstClockTimeDiff diff;
563 GstClockTime timestamp;
565 gst_event_parse_qos (event, &proportion, &diff, ×tamp);
567 /* update our QoS values */
568 gst_ffmpegviddec_update_qos (ffmpegdec, proportion, timestamp + diff);
570 /* forward upstream */
571 res = gst_pad_push_event (ffmpegdec->sinkpad, event);
575 /* forward upstream */
576 res = gst_pad_push_event (ffmpegdec->sinkpad, event);
580 gst_object_unref (ffmpegdec);
587 gst_ffmpegviddec_close (GstFFMpegVidDec * ffmpegdec)
589 if (!ffmpegdec->opened)
592 GST_LOG_OBJECT (ffmpegdec, "closing ffmpeg codec");
594 if (ffmpegdec->par) {
595 g_free (ffmpegdec->par);
596 ffmpegdec->par = NULL;
599 if (ffmpegdec->context->priv_data)
600 gst_ffmpeg_avcodec_close (ffmpegdec->context);
601 ffmpegdec->opened = FALSE;
603 if (ffmpegdec->context->palctrl) {
604 av_free (ffmpegdec->context->palctrl);
605 ffmpegdec->context->palctrl = NULL;
608 if (ffmpegdec->context->extradata) {
609 av_free (ffmpegdec->context->extradata);
610 ffmpegdec->context->extradata = NULL;
613 if (ffmpegdec->pctx) {
614 if (ffmpegdec->pcache) {
615 gst_buffer_unref (ffmpegdec->pcache);
616 ffmpegdec->pcache = NULL;
618 av_parser_close (ffmpegdec->pctx);
619 ffmpegdec->pctx = NULL;
622 ffmpegdec->format.video.par_n = -1;
623 ffmpegdec->format.video.fps_n = -1;
624 ffmpegdec->format.video.old_fps_n = -1;
625 ffmpegdec->format.video.interlaced = FALSE;
630 gst_ffmpegviddec_open (GstFFMpegVidDec * ffmpegdec)
632 GstFFMpegVidDecClass *oclass;
634 oclass = (GstFFMpegVidDecClass *) (G_OBJECT_GET_CLASS (ffmpegdec));
636 if (gst_ffmpeg_avcodec_open (ffmpegdec->context, oclass->in_plugin) < 0)
639 ffmpegdec->opened = TRUE;
640 ffmpegdec->is_realvideo = FALSE;
642 GST_LOG_OBJECT (ffmpegdec, "Opened ffmpeg codec %s, id %d",
643 oclass->in_plugin->name, oclass->in_plugin->id);
645 /* open a parser if we can */
646 switch (oclass->in_plugin->id) {
650 GST_LOG_OBJECT (ffmpegdec, "not using parser, blacklisted codec");
651 ffmpegdec->pctx = NULL;
654 /* For H264, only use a parser if there is no context data, if there is,
655 * we're talking AVC */
656 if (ffmpegdec->context->extradata_size == 0) {
657 GST_LOG_OBJECT (ffmpegdec, "H264 with no extradata, creating parser");
658 ffmpegdec->pctx = av_parser_init (oclass->in_plugin->id);
660 GST_LOG_OBJECT (ffmpegdec,
661 "H264 with extradata implies framed data - not using parser");
662 ffmpegdec->pctx = NULL;
669 ffmpegdec->is_realvideo = TRUE;
672 if (!ffmpegdec->turnoff_parser) {
673 ffmpegdec->pctx = av_parser_init (oclass->in_plugin->id);
675 GST_LOG_OBJECT (ffmpegdec, "Using parser %p", ffmpegdec->pctx);
677 GST_LOG_OBJECT (ffmpegdec, "No parser for codec");
679 GST_LOG_OBJECT (ffmpegdec, "Parser deactivated for format");
684 ffmpegdec->format.video.width = 0;
685 ffmpegdec->format.video.height = 0;
686 ffmpegdec->format.video.clip_width = -1;
687 ffmpegdec->format.video.clip_height = -1;
688 ffmpegdec->format.video.pix_fmt = PIX_FMT_NB;
689 ffmpegdec->format.video.interlaced = FALSE;
691 gst_ffmpegviddec_reset_ts (ffmpegdec);
692 /* FIXME, reset_qos holds the LOCK */
693 ffmpegdec->proportion = 0.0;
694 ffmpegdec->earliest_time = -1;
701 gst_ffmpegviddec_close (ffmpegdec);
702 GST_DEBUG_OBJECT (ffmpegdec, "ffdec_%s: Failed to open FFMPEG codec",
703 oclass->in_plugin->name);
709 gst_ffmpegviddec_setcaps (GstPad * pad, GstCaps * caps)
711 GstFFMpegVidDec *ffmpegdec;
712 GstFFMpegVidDecClass *oclass;
713 GstStructure *structure;
718 ffmpegdec = (GstFFMpegVidDec *) (gst_pad_get_parent (pad));
719 oclass = (GstFFMpegVidDecClass *) (G_OBJECT_GET_CLASS (ffmpegdec));
721 GST_DEBUG_OBJECT (pad, "setcaps called");
723 GST_OBJECT_LOCK (ffmpegdec);
725 /* stupid check for VC1 */
726 if ((oclass->in_plugin->id == CODEC_ID_WMV3) ||
727 (oclass->in_plugin->id == CODEC_ID_VC1))
728 oclass->in_plugin->id = gst_ffmpeg_caps_to_codecid (caps, NULL);
730 /* close old session */
731 if (ffmpegdec->opened) {
732 GST_OBJECT_UNLOCK (ffmpegdec);
733 gst_ffmpegviddec_drain (ffmpegdec);
734 GST_OBJECT_LOCK (ffmpegdec);
735 gst_ffmpegviddec_close (ffmpegdec);
737 /* and reset the defaults that were set when a context is created */
738 avcodec_get_context_defaults (ffmpegdec->context);
741 /* set buffer functions */
742 if (oclass->in_plugin->type == AVMEDIA_TYPE_VIDEO) {
743 ffmpegdec->context->get_buffer = gst_ffmpegviddec_get_buffer;
744 ffmpegdec->context->release_buffer = gst_ffmpegviddec_release_buffer;
745 ffmpegdec->context->draw_horiz_band = NULL;
748 /* default is to let format decide if it needs a parser */
749 ffmpegdec->turnoff_parser = FALSE;
751 ffmpegdec->has_b_frames = FALSE;
753 GST_LOG_OBJECT (ffmpegdec, "size %dx%d", ffmpegdec->context->width,
754 ffmpegdec->context->height);
756 /* get size and so */
757 gst_ffmpeg_caps_with_codecid (oclass->in_plugin->id,
758 oclass->in_plugin->type, caps, ffmpegdec->context);
760 GST_LOG_OBJECT (ffmpegdec, "size after %dx%d", ffmpegdec->context->width,
761 ffmpegdec->context->height);
763 if (!ffmpegdec->context->time_base.den || !ffmpegdec->context->time_base.num) {
764 GST_DEBUG_OBJECT (ffmpegdec, "forcing 25/1 framerate");
765 ffmpegdec->context->time_base.num = 1;
766 ffmpegdec->context->time_base.den = 25;
769 /* get pixel aspect ratio if it's set */
770 structure = gst_caps_get_structure (caps, 0);
772 par = gst_structure_get_value (structure, "pixel-aspect-ratio");
774 GST_DEBUG_OBJECT (ffmpegdec, "sink caps have pixel-aspect-ratio of %d:%d",
775 gst_value_get_fraction_numerator (par),
776 gst_value_get_fraction_denominator (par));
779 g_free (ffmpegdec->par);
780 ffmpegdec->par = g_new0 (GValue, 1);
781 gst_value_init_and_copy (ffmpegdec->par, par);
784 /* get the framerate from incoming caps. fps_n is set to -1 when
785 * there is no valid framerate */
786 fps = gst_structure_get_value (structure, "framerate");
787 if (fps != NULL && GST_VALUE_HOLDS_FRACTION (fps)) {
788 ffmpegdec->format.video.fps_n = gst_value_get_fraction_numerator (fps);
789 ffmpegdec->format.video.fps_d = gst_value_get_fraction_denominator (fps);
790 GST_DEBUG_OBJECT (ffmpegdec, "Using framerate %d/%d from incoming caps",
791 ffmpegdec->format.video.fps_n, ffmpegdec->format.video.fps_d);
793 ffmpegdec->format.video.fps_n = -1;
794 GST_DEBUG_OBJECT (ffmpegdec, "Using framerate from codec");
797 /* figure out if we can use direct rendering */
798 ffmpegdec->current_dr = FALSE;
799 ffmpegdec->extra_ref = FALSE;
800 if (ffmpegdec->direct_rendering) {
801 GST_DEBUG_OBJECT (ffmpegdec, "trying to enable direct rendering");
802 if (oclass->in_plugin->capabilities & CODEC_CAP_DR1) {
803 if (oclass->in_plugin->id == CODEC_ID_H264) {
804 GST_DEBUG_OBJECT (ffmpegdec, "disable direct rendering setup for H264");
805 /* does not work, many stuff reads outside of the planes */
806 ffmpegdec->current_dr = FALSE;
807 ffmpegdec->extra_ref = TRUE;
808 } else if ((oclass->in_plugin->id == CODEC_ID_SVQ1) ||
809 (oclass->in_plugin->id == CODEC_ID_VP5) ||
810 (oclass->in_plugin->id == CODEC_ID_VP6) ||
811 (oclass->in_plugin->id == CODEC_ID_VP6F) ||
812 (oclass->in_plugin->id == CODEC_ID_VP6A)) {
813 GST_DEBUG_OBJECT (ffmpegdec,
814 "disable direct rendering setup for broken stride support");
815 /* does not work, uses a incompatible stride. See #610613 */
816 ffmpegdec->current_dr = FALSE;
817 ffmpegdec->extra_ref = TRUE;
819 GST_DEBUG_OBJECT (ffmpegdec, "enabled direct rendering");
820 ffmpegdec->current_dr = TRUE;
823 GST_DEBUG_OBJECT (ffmpegdec, "direct rendering not supported");
826 if (ffmpegdec->current_dr) {
827 /* do *not* draw edges when in direct rendering, for some reason it draws
828 * outside of the memory. */
829 ffmpegdec->context->flags |= CODEC_FLAG_EMU_EDGE;
832 /* for AAC we only use av_parse if not on stream-format==raw or ==loas */
833 if (oclass->in_plugin->id == CODEC_ID_AAC
834 || oclass->in_plugin->id == CODEC_ID_AAC_LATM) {
835 const gchar *format = gst_structure_get_string (structure, "stream-format");
837 if (format == NULL || strcmp (format, "raw") == 0) {
838 ffmpegdec->turnoff_parser = TRUE;
842 /* for FLAC, don't parse if it's already parsed */
843 if (oclass->in_plugin->id == CODEC_ID_FLAC) {
844 if (gst_structure_has_field (structure, "streamheader"))
845 ffmpegdec->turnoff_parser = TRUE;
848 /* workaround encoder bugs */
849 ffmpegdec->context->workaround_bugs |= FF_BUG_AUTODETECT;
850 ffmpegdec->context->error_recognition = 1;
853 ffmpegdec->context->lowres = ffmpegdec->lowres;
854 ffmpegdec->context->skip_frame = ffmpegdec->skip_frame;
856 /* ffmpeg can draw motion vectors on top of the image (not every decoder
858 ffmpegdec->context->debug_mv = ffmpegdec->debug_mv;
860 if (ffmpegdec->max_threads == 0) {
861 if (!(oclass->in_plugin->capabilities & CODEC_CAP_AUTO_THREADS))
862 ffmpegdec->context->thread_count = gst_ffmpeg_auto_max_threads ();
864 ffmpegdec->context->thread_count = 0;
866 ffmpegdec->context->thread_count = ffmpegdec->max_threads;
868 ffmpegdec->context->thread_type = FF_THREAD_SLICE;
870 /* open codec - we don't select an output pix_fmt yet,
871 * simply because we don't know! We only get it
872 * during playback... */
873 if (!gst_ffmpegviddec_open (ffmpegdec))
876 /* clipping region */
877 gst_structure_get_int (structure, "width",
878 &ffmpegdec->format.video.clip_width);
879 gst_structure_get_int (structure, "height",
880 &ffmpegdec->format.video.clip_height);
882 GST_DEBUG_OBJECT (pad, "clipping to %dx%d",
883 ffmpegdec->format.video.clip_width, ffmpegdec->format.video.clip_height);
885 /* take into account the lowres property */
886 if (ffmpegdec->format.video.clip_width != -1)
887 ffmpegdec->format.video.clip_width >>= ffmpegdec->lowres;
888 if (ffmpegdec->format.video.clip_height != -1)
889 ffmpegdec->format.video.clip_height >>= ffmpegdec->lowres;
891 GST_DEBUG_OBJECT (pad, "final clipping to %dx%d",
892 ffmpegdec->format.video.clip_width, ffmpegdec->format.video.clip_height);
895 GST_OBJECT_UNLOCK (ffmpegdec);
897 gst_object_unref (ffmpegdec);
904 GST_DEBUG_OBJECT (ffmpegdec, "Failed to open");
905 if (ffmpegdec->par) {
906 g_free (ffmpegdec->par);
907 ffmpegdec->par = NULL;
915 alloc_output_buffer (GstFFMpegVidDec * ffmpegdec, GstBuffer ** outbuf,
916 gint width, gint height)
921 ret = GST_FLOW_ERROR;
924 GST_LOG_OBJECT (ffmpegdec, "alloc output buffer");
926 /* see if we need renegotiation */
927 if (G_UNLIKELY (!gst_ffmpegviddec_negotiate (ffmpegdec, FALSE)))
928 goto negotiate_failed;
930 /* get the size of the gstreamer output buffer given a
931 * width/height/format */
932 fsize = gst_ffmpeg_avpicture_get_size (ffmpegdec->context->pix_fmt,
935 if (!ffmpegdec->context->palctrl && ffmpegdec->can_allocate_aligned) {
936 GST_LOG_OBJECT (ffmpegdec, "calling pad_alloc");
937 /* no pallete, we can use the buffer size to alloc */
938 ret = gst_pad_alloc_buffer_and_set_caps (ffmpegdec->srcpad,
939 GST_BUFFER_OFFSET_NONE, fsize,
940 GST_PAD_CAPS (ffmpegdec->srcpad), outbuf);
941 if (G_UNLIKELY (ret != GST_FLOW_OK))
944 /* If buffer isn't 128-bit aligned, create a memaligned one ourselves */
945 if (((uintptr_t) GST_BUFFER_DATA (*outbuf)) % 16) {
946 GST_DEBUG_OBJECT (ffmpegdec,
947 "Downstream can't allocate aligned buffers.");
948 ffmpegdec->can_allocate_aligned = FALSE;
949 gst_buffer_unref (*outbuf);
950 *outbuf = new_aligned_buffer (fsize, GST_PAD_CAPS (ffmpegdec->srcpad));
953 GST_LOG_OBJECT (ffmpegdec,
954 "not calling pad_alloc, we have a pallete or downstream can't give 16 byte aligned buffers.");
955 /* for paletted data we can't use pad_alloc_buffer(), because
956 * fsize contains the size of the palette, so the overall size
957 * is bigger than ffmpegcolorspace's unit size, which will
958 * prompt GstBaseTransform to complain endlessly ... */
959 *outbuf = new_aligned_buffer (fsize, GST_PAD_CAPS (ffmpegdec->srcpad));
962 /* set caps, we do this here because the buffer is still writable here and we
963 * are sure to be negotiated */
964 gst_buffer_set_caps (*outbuf, GST_PAD_CAPS (ffmpegdec->srcpad));
971 GST_DEBUG_OBJECT (ffmpegdec, "negotiate failed");
972 return GST_FLOW_NOT_NEGOTIATED;
976 GST_DEBUG_OBJECT (ffmpegdec, "pad_alloc failed %d (%s)", ret,
977 gst_flow_get_name (ret));
983 gst_ffmpegviddec_get_buffer (AVCodecContext * context, AVFrame * picture)
985 GstBuffer *buf = NULL;
986 GstFFMpegVidDec *ffmpegdec;
988 gint coded_width, coded_height;
991 gint clip_width, clip_height;
994 ffmpegdec = (GstFFMpegVidDec *) context->opaque;
996 GST_DEBUG_OBJECT (ffmpegdec, "getting buffer");
998 /* apply the last info we have seen to this picture, when we get the
999 * picture back from ffmpeg we can use this to correctly timestamp the output
1001 picture->reordered_opaque = context->reordered_opaque;
1002 /* make sure we don't free the buffer when it's not ours */
1003 picture->opaque = NULL;
1005 /* take width and height before clipping */
1006 width = context->width;
1007 height = context->height;
1008 coded_width = context->coded_width;
1009 coded_height = context->coded_height;
1011 GST_LOG_OBJECT (ffmpegdec, "dimension %dx%d, coded %dx%d", width, height,
1012 coded_width, coded_height);
1013 if (!ffmpegdec->current_dr) {
1014 GST_LOG_OBJECT (ffmpegdec, "direct rendering disabled, fallback alloc");
1015 res = avcodec_default_get_buffer (context, picture);
1017 GST_LOG_OBJECT (ffmpegdec, "linsize %d %d %d", picture->linesize[0],
1018 picture->linesize[1], picture->linesize[2]);
1019 GST_LOG_OBJECT (ffmpegdec, "data %u %u %u", 0,
1020 (guint) (picture->data[1] - picture->data[0]),
1021 (guint) (picture->data[2] - picture->data[0]));
1025 /* take final clipped output size */
1026 if ((clip_width = ffmpegdec->format.video.clip_width) == -1)
1028 if ((clip_height = ffmpegdec->format.video.clip_height) == -1)
1029 clip_height = height;
1031 GST_LOG_OBJECT (ffmpegdec, "raw outsize %d/%d", width, height);
1033 /* this is the size ffmpeg needs for the buffer */
1034 avcodec_align_dimensions (context, &width, &height);
1036 GST_LOG_OBJECT (ffmpegdec, "aligned outsize %d/%d, clip %d/%d",
1037 width, height, clip_width, clip_height);
1039 if (width != clip_width || height != clip_height) {
1040 /* We can't alloc if we need to clip the output buffer later */
1041 GST_LOG_OBJECT (ffmpegdec, "we need clipping, fallback alloc");
1042 return avcodec_default_get_buffer (context, picture);
1045 /* alloc with aligned dimensions for ffmpeg */
1046 ret = alloc_output_buffer (ffmpegdec, &buf, width, height);
1047 if (G_UNLIKELY (ret != GST_FLOW_OK)) {
1048 /* alloc default buffer when we can't get one from downstream */
1049 GST_LOG_OBJECT (ffmpegdec, "alloc failed, fallback alloc");
1050 return avcodec_default_get_buffer (context, picture);
1053 /* copy the right pointers and strides in the picture object */
1054 gst_ffmpeg_avpicture_fill ((AVPicture *) picture,
1055 GST_BUFFER_DATA (buf), context->pix_fmt, width, height);
1057 /* tell ffmpeg we own this buffer, tranfer the ref we have on the buffer to
1058 * the opaque data. */
1059 picture->type = FF_BUFFER_TYPE_USER;
1060 picture->age = 256 * 256 * 256 * 64;
1061 picture->opaque = buf;
1064 if (picture->reference != 0 || ffmpegdec->extra_ref) {
1065 GST_DEBUG_OBJECT (ffmpegdec, "adding extra ref");
1066 gst_buffer_ref (buf);
1070 GST_LOG_OBJECT (ffmpegdec, "returned buffer %p", buf);
1076 gst_ffmpegviddec_release_buffer (AVCodecContext * context, AVFrame * picture)
1080 GstFFMpegVidDec *ffmpegdec;
1082 ffmpegdec = (GstFFMpegVidDec *) context->opaque;
1084 /* check if it was our buffer */
1085 if (picture->opaque == NULL) {
1086 GST_DEBUG_OBJECT (ffmpegdec, "default release buffer");
1087 avcodec_default_release_buffer (context, picture);
1091 /* we remove the opaque data now */
1092 buf = GST_BUFFER_CAST (picture->opaque);
1093 GST_DEBUG_OBJECT (ffmpegdec, "release buffer %p", buf);
1094 picture->opaque = NULL;
1097 if (picture->reference != 0 || ffmpegdec->extra_ref) {
1098 GST_DEBUG_OBJECT (ffmpegdec, "remove extra ref");
1099 gst_buffer_unref (buf);
1102 gst_buffer_unref (buf);
1105 /* zero out the reference in ffmpeg */
1106 for (i = 0; i < 4; i++) {
1107 picture->data[i] = NULL;
1108 picture->linesize[i] = 0;
1113 gst_ffmpegviddec_add_pixel_aspect_ratio (GstFFMpegVidDec * ffmpegdec,
1116 gboolean demuxer_par_set = FALSE;
1117 gboolean decoder_par_set = FALSE;
1118 gint demuxer_num = 1, demuxer_denom = 1;
1119 gint decoder_num = 1, decoder_denom = 1;
1121 GST_OBJECT_LOCK (ffmpegdec);
1123 if (ffmpegdec->par) {
1124 demuxer_num = gst_value_get_fraction_numerator (ffmpegdec->par);
1125 demuxer_denom = gst_value_get_fraction_denominator (ffmpegdec->par);
1126 demuxer_par_set = TRUE;
1127 GST_DEBUG_OBJECT (ffmpegdec, "Demuxer PAR: %d:%d", demuxer_num,
1131 if (ffmpegdec->context->sample_aspect_ratio.num &&
1132 ffmpegdec->context->sample_aspect_ratio.den) {
1133 decoder_num = ffmpegdec->context->sample_aspect_ratio.num;
1134 decoder_denom = ffmpegdec->context->sample_aspect_ratio.den;
1135 decoder_par_set = TRUE;
1136 GST_DEBUG_OBJECT (ffmpegdec, "Decoder PAR: %d:%d", decoder_num,
1140 GST_OBJECT_UNLOCK (ffmpegdec);
1142 if (!demuxer_par_set && !decoder_par_set)
1145 if (demuxer_par_set && !decoder_par_set)
1146 goto use_demuxer_par;
1148 if (decoder_par_set && !demuxer_par_set)
1149 goto use_decoder_par;
1151 /* Both the demuxer and the decoder provide a PAR. If one of
1152 * the two PARs is 1:1 and the other one is not, use the one
1153 * that is not 1:1. */
1154 if (demuxer_num == demuxer_denom && decoder_num != decoder_denom)
1155 goto use_decoder_par;
1157 if (decoder_num == decoder_denom && demuxer_num != demuxer_denom)
1158 goto use_demuxer_par;
1160 /* Both PARs are non-1:1, so use the PAR provided by the demuxer */
1161 goto use_demuxer_par;
1165 GST_DEBUG_OBJECT (ffmpegdec,
1166 "Setting decoder provided pixel-aspect-ratio of %u:%u", decoder_num,
1168 gst_structure_set (s, "pixel-aspect-ratio", GST_TYPE_FRACTION, decoder_num,
1169 decoder_denom, NULL);
1175 GST_DEBUG_OBJECT (ffmpegdec,
1176 "Setting demuxer provided pixel-aspect-ratio of %u:%u", demuxer_num,
1178 gst_structure_set (s, "pixel-aspect-ratio", GST_TYPE_FRACTION, demuxer_num,
1179 demuxer_denom, NULL);
1184 GST_DEBUG_OBJECT (ffmpegdec,
1185 "Neither demuxer nor codec provide a pixel-aspect-ratio");
1191 gst_ffmpegviddec_negotiate (GstFFMpegVidDec * ffmpegdec, gboolean force)
1193 GstFFMpegVidDecClass *oclass;
1196 gboolean interlaced;
1198 oclass = (GstFFMpegVidDecClass *) (G_OBJECT_GET_CLASS (ffmpegdec));
1200 if (!force && ffmpegdec->format.video.width == ffmpegdec->context->width
1201 && ffmpegdec->format.video.height == ffmpegdec->context->height
1202 && ffmpegdec->format.video.fps_n == ffmpegdec->format.video.old_fps_n
1203 && ffmpegdec->format.video.fps_d == ffmpegdec->format.video.old_fps_d
1204 && ffmpegdec->format.video.pix_fmt == ffmpegdec->context->pix_fmt
1205 && ffmpegdec->format.video.par_n ==
1206 ffmpegdec->context->sample_aspect_ratio.num
1207 && ffmpegdec->format.video.par_d ==
1208 ffmpegdec->context->sample_aspect_ratio.den)
1210 GST_DEBUG_OBJECT (ffmpegdec,
1211 "Renegotiating video from %dx%d@ %d:%d PAR %d/%d fps to %dx%d@ %d:%d PAR %d/%d fps",
1212 ffmpegdec->format.video.width, ffmpegdec->format.video.height,
1213 ffmpegdec->format.video.par_n, ffmpegdec->format.video.par_d,
1214 ffmpegdec->format.video.old_fps_n, ffmpegdec->format.video.old_fps_n,
1215 ffmpegdec->context->width, ffmpegdec->context->height,
1216 ffmpegdec->context->sample_aspect_ratio.num,
1217 ffmpegdec->context->sample_aspect_ratio.den,
1218 ffmpegdec->format.video.fps_n, ffmpegdec->format.video.fps_d);
1219 ffmpegdec->format.video.width = ffmpegdec->context->width;
1220 ffmpegdec->format.video.height = ffmpegdec->context->height;
1221 ffmpegdec->format.video.old_fps_n = ffmpegdec->format.video.fps_n;
1222 ffmpegdec->format.video.old_fps_d = ffmpegdec->format.video.fps_d;
1223 ffmpegdec->format.video.pix_fmt = ffmpegdec->context->pix_fmt;
1224 ffmpegdec->format.video.par_n = ffmpegdec->context->sample_aspect_ratio.num;
1225 ffmpegdec->format.video.par_d = ffmpegdec->context->sample_aspect_ratio.den;
1227 caps = gst_ffmpeg_codectype_to_caps (oclass->in_plugin->type,
1228 ffmpegdec->context, oclass->in_plugin->id, FALSE);
1233 width = ffmpegdec->format.video.clip_width;
1234 height = ffmpegdec->format.video.clip_height;
1235 interlaced = ffmpegdec->format.video.interlaced;
1237 if (width != -1 && height != -1) {
1238 /* overwrite the output size with the dimension of the
1239 * clipping region but only if they are smaller. */
1240 if (width < ffmpegdec->context->width)
1241 gst_caps_set_simple (caps, "width", G_TYPE_INT, width, NULL);
1242 if (height < ffmpegdec->context->height)
1243 gst_caps_set_simple (caps, "height", G_TYPE_INT, height, NULL);
1245 gst_caps_set_simple (caps, "interlaced", G_TYPE_BOOLEAN, interlaced, NULL);
1247 /* If a demuxer provided a framerate then use it (#313970) */
1248 if (ffmpegdec->format.video.fps_n != -1) {
1249 gst_caps_set_simple (caps, "framerate",
1250 GST_TYPE_FRACTION, ffmpegdec->format.video.fps_n,
1251 ffmpegdec->format.video.fps_d, NULL);
1253 gst_ffmpegviddec_add_pixel_aspect_ratio (ffmpegdec,
1254 gst_caps_get_structure (caps, 0));
1256 if (!gst_pad_set_caps (ffmpegdec->srcpad, caps))
1259 gst_caps_unref (caps);
1266 #ifdef HAVE_FFMPEG_UNINSTALLED
1267 /* using internal ffmpeg snapshot */
1268 GST_ELEMENT_ERROR (ffmpegdec, CORE, NEGOTIATION,
1269 ("Could not find GStreamer caps mapping for FFmpeg codec '%s'.",
1270 oclass->in_plugin->name), (NULL));
1272 /* using external ffmpeg */
1273 GST_ELEMENT_ERROR (ffmpegdec, CORE, NEGOTIATION,
1274 ("Could not find GStreamer caps mapping for FFmpeg codec '%s', and "
1275 "you are using an external libavcodec. This is most likely due to "
1276 "a packaging problem and/or libavcodec having been upgraded to a "
1277 "version that is not compatible with this version of "
1278 "gstreamer-ffmpeg. Make sure your gstreamer-ffmpeg and libavcodec "
1279 "packages come from the same source/repository.",
1280 oclass->in_plugin->name), (NULL));
1286 GST_ELEMENT_ERROR (ffmpegdec, CORE, NEGOTIATION, (NULL),
1287 ("Could not set caps for ffmpeg decoder (%s), not fixed?",
1288 oclass->in_plugin->name));
1289 gst_caps_unref (caps);
1295 /* perform qos calculations before decoding the next frame.
1297 * Sets the skip_frame flag and if things are really bad, skips to the next
1300 * Returns TRUE if the frame should be decoded, FALSE if the frame can be dropped
1304 gst_ffmpegviddec_do_qos (GstFFMpegVidDec * ffmpegdec, GstClockTime timestamp,
1305 gboolean * mode_switch)
1307 GstClockTimeDiff diff;
1309 GstClockTime qostime, earliest_time;
1310 gboolean res = TRUE;
1312 *mode_switch = FALSE;
1314 /* no timestamp, can't do QoS */
1315 if (G_UNLIKELY (!GST_CLOCK_TIME_IS_VALID (timestamp)))
1318 /* get latest QoS observation values */
1319 gst_ffmpegviddec_read_qos (ffmpegdec, &proportion, &earliest_time);
1321 /* skip qos if we have no observation (yet) */
1322 if (G_UNLIKELY (!GST_CLOCK_TIME_IS_VALID (earliest_time))) {
1323 /* no skip_frame initialy */
1324 ffmpegdec->context->skip_frame = AVDISCARD_DEFAULT;
1328 /* qos is done on running time of the timestamp */
1329 qostime = gst_segment_to_running_time (&ffmpegdec->segment, GST_FORMAT_TIME,
1332 /* timestamp can be out of segment, then we don't do QoS */
1333 if (G_UNLIKELY (!GST_CLOCK_TIME_IS_VALID (qostime)))
1336 /* see how our next timestamp relates to the latest qos timestamp. negative
1337 * values mean we are early, positive values mean we are too late. */
1338 diff = GST_CLOCK_DIFF (qostime, earliest_time);
1340 GST_DEBUG_OBJECT (ffmpegdec, "QOS: qostime %" GST_TIME_FORMAT
1341 ", earliest %" GST_TIME_FORMAT, GST_TIME_ARGS (qostime),
1342 GST_TIME_ARGS (earliest_time));
1344 /* if we using less than 40% of the available time, we can try to
1345 * speed up again when we were slow. */
1346 if (proportion < 0.4 && diff < 0) {
1350 /* we're too slow, try to speed up */
1351 if (ffmpegdec->waiting_for_key) {
1352 /* we were waiting for a keyframe, that's ok */
1355 /* switch to skip_frame mode */
1361 ffmpegdec->processed++;
1371 if (ffmpegdec->context->skip_frame != AVDISCARD_DEFAULT) {
1372 ffmpegdec->context->skip_frame = AVDISCARD_DEFAULT;
1373 *mode_switch = TRUE;
1374 GST_DEBUG_OBJECT (ffmpegdec, "QOS: normal mode %g < 0.4", proportion);
1376 ffmpegdec->processed++;
1381 if (ffmpegdec->context->skip_frame != AVDISCARD_NONREF) {
1382 ffmpegdec->context->skip_frame = AVDISCARD_NONREF;
1383 *mode_switch = TRUE;
1384 GST_DEBUG_OBJECT (ffmpegdec,
1385 "QOS: hurry up, diff %" G_GINT64_FORMAT " >= 0", diff);
1391 GstClockTime stream_time, jitter;
1392 GstMessage *qos_msg;
1394 ffmpegdec->dropped++;
1396 gst_segment_to_stream_time (&ffmpegdec->segment, GST_FORMAT_TIME,
1398 jitter = GST_CLOCK_DIFF (qostime, earliest_time);
1400 gst_message_new_qos (GST_OBJECT_CAST (ffmpegdec), FALSE, qostime,
1401 stream_time, timestamp, GST_CLOCK_TIME_NONE);
1402 gst_message_set_qos_values (qos_msg, jitter, proportion, 1000000);
1403 gst_message_set_qos_stats (qos_msg, GST_FORMAT_BUFFERS,
1404 ffmpegdec->processed, ffmpegdec->dropped);
1405 gst_element_post_message (GST_ELEMENT_CAST (ffmpegdec), qos_msg);
1411 /* returns TRUE if buffer is within segment, else FALSE.
1412 * if Buffer is on segment border, it's timestamp and duration will be clipped */
1414 clip_video_buffer (GstFFMpegVidDec * dec, GstBuffer * buf, GstClockTime in_ts,
1415 GstClockTime in_dur)
1417 gboolean res = TRUE;
1418 gint64 cstart, cstop;
1421 GST_LOG_OBJECT (dec,
1422 "timestamp:%" GST_TIME_FORMAT " , duration:%" GST_TIME_FORMAT,
1423 GST_TIME_ARGS (in_ts), GST_TIME_ARGS (in_dur));
1425 /* can't clip without TIME segment */
1426 if (G_UNLIKELY (dec->segment.format != GST_FORMAT_TIME))
1429 /* we need a start time */
1430 if (G_UNLIKELY (!GST_CLOCK_TIME_IS_VALID (in_ts)))
1433 /* generate valid stop, if duration unknown, we have unknown stop */
1435 GST_CLOCK_TIME_IS_VALID (in_dur) ? (in_ts + in_dur) : GST_CLOCK_TIME_NONE;
1439 gst_segment_clip (&dec->segment, GST_FORMAT_TIME, in_ts, stop, &cstart,
1441 if (G_UNLIKELY (!res))
1444 /* we're pretty sure the duration of this buffer is not till the end of this
1445 * segment (which _clip will assume when the stop is -1) */
1446 if (stop == GST_CLOCK_TIME_NONE)
1447 cstop = GST_CLOCK_TIME_NONE;
1449 /* update timestamp and possibly duration if the clipped stop time is
1451 GST_BUFFER_TIMESTAMP (buf) = cstart;
1452 if (GST_CLOCK_TIME_IS_VALID (cstop))
1453 GST_BUFFER_DURATION (buf) = cstop - cstart;
1455 GST_LOG_OBJECT (dec,
1456 "clipped timestamp:%" GST_TIME_FORMAT " , duration:%" GST_TIME_FORMAT,
1457 GST_TIME_ARGS (cstart), GST_TIME_ARGS (GST_BUFFER_DURATION (buf)));
1460 GST_LOG_OBJECT (dec, "%sdropping", (res ? "not " : ""));
1465 /* figure out if the current picture is a keyframe, return TRUE if that is
1468 check_keyframe (GstFFMpegVidDec * ffmpegdec)
1470 GstFFMpegVidDecClass *oclass;
1471 gboolean is_itype = FALSE;
1472 gboolean is_reference = FALSE;
1473 gboolean iskeyframe;
1475 /* figure out if we are dealing with a keyframe */
1476 oclass = (GstFFMpegVidDecClass *) (G_OBJECT_GET_CLASS (ffmpegdec));
1478 /* remember that we have B frames, we need this for the DTS -> PTS conversion
1480 if (!ffmpegdec->has_b_frames && ffmpegdec->picture->pict_type == FF_B_TYPE) {
1481 GST_DEBUG_OBJECT (ffmpegdec, "we have B frames");
1482 ffmpegdec->has_b_frames = TRUE;
1483 /* Emit latency message to recalculate it */
1484 gst_element_post_message (GST_ELEMENT_CAST (ffmpegdec),
1485 gst_message_new_latency (GST_OBJECT_CAST (ffmpegdec)));
1488 is_itype = (ffmpegdec->picture->pict_type == FF_I_TYPE);
1489 is_reference = (ffmpegdec->picture->reference == 1);
1491 iskeyframe = (is_itype || is_reference || ffmpegdec->picture->key_frame)
1492 || (oclass->in_plugin->id == CODEC_ID_INDEO3)
1493 || (oclass->in_plugin->id == CODEC_ID_MSZH)
1494 || (oclass->in_plugin->id == CODEC_ID_ZLIB)
1495 || (oclass->in_plugin->id == CODEC_ID_VP3)
1496 || (oclass->in_plugin->id == CODEC_ID_HUFFYUV);
1498 GST_LOG_OBJECT (ffmpegdec,
1499 "current picture: type: %d, is_keyframe:%d, is_itype:%d, is_reference:%d",
1500 ffmpegdec->picture->pict_type, iskeyframe, is_itype, is_reference);
1505 /* get an outbuf buffer with the current picture */
1506 static GstFlowReturn
1507 get_output_buffer (GstFFMpegVidDec * ffmpegdec, GstBuffer ** outbuf)
1514 if (ffmpegdec->picture->opaque != NULL) {
1515 /* we allocated a picture already for ffmpeg to decode into, let's pick it
1516 * up and use it now. */
1517 *outbuf = (GstBuffer *) ffmpegdec->picture->opaque;
1518 GST_LOG_OBJECT (ffmpegdec, "using opaque buffer %p", *outbuf);
1520 gst_buffer_ref (*outbuf);
1523 AVPicture pic, *outpic;
1526 GST_LOG_OBJECT (ffmpegdec, "get output buffer");
1528 /* figure out size of output buffer, this is the clipped output size because
1529 * we will copy the picture into it but only when the clipping region is
1530 * smaller than the actual picture size. */
1531 if ((width = ffmpegdec->format.video.clip_width) == -1)
1532 width = ffmpegdec->context->width;
1533 else if (width > ffmpegdec->context->width)
1534 width = ffmpegdec->context->width;
1536 if ((height = ffmpegdec->format.video.clip_height) == -1)
1537 height = ffmpegdec->context->height;
1538 else if (height > ffmpegdec->context->height)
1539 height = ffmpegdec->context->height;
1541 GST_LOG_OBJECT (ffmpegdec, "clip width %d/height %d", width, height);
1543 ret = alloc_output_buffer (ffmpegdec, outbuf, width, height);
1544 if (G_UNLIKELY (ret != GST_FLOW_OK))
1547 /* original ffmpeg code does not handle odd sizes correctly.
1548 * This patched up version does */
1549 gst_ffmpeg_avpicture_fill (&pic, GST_BUFFER_DATA (*outbuf),
1550 ffmpegdec->context->pix_fmt, width, height);
1552 outpic = (AVPicture *) ffmpegdec->picture;
1554 GST_LOG_OBJECT (ffmpegdec, "linsize %d %d %d", outpic->linesize[0],
1555 outpic->linesize[1], outpic->linesize[2]);
1556 GST_LOG_OBJECT (ffmpegdec, "data %u %u %u", 0,
1557 (guint) (outpic->data[1] - outpic->data[0]),
1558 (guint) (outpic->data[2] - outpic->data[0]));
1560 av_picture_copy (&pic, outpic, ffmpegdec->context->pix_fmt, width, height);
1562 ffmpegdec->picture->reordered_opaque = -1;
1569 GST_DEBUG_OBJECT (ffmpegdec, "pad_alloc failed");
1575 clear_queued (GstFFMpegVidDec * ffmpegdec)
1577 g_list_foreach (ffmpegdec->queued, (GFunc) gst_mini_object_unref, NULL);
1578 g_list_free (ffmpegdec->queued);
1579 ffmpegdec->queued = NULL;
1582 static GstFlowReturn
1583 flush_queued (GstFFMpegVidDec * ffmpegdec)
1585 GstFlowReturn res = GST_FLOW_OK;
1587 while (ffmpegdec->queued) {
1588 GstBuffer *buf = GST_BUFFER_CAST (ffmpegdec->queued->data);
1590 GST_LOG_OBJECT (ffmpegdec, "pushing buffer %p, offset %"
1591 G_GUINT64_FORMAT ", timestamp %"
1592 GST_TIME_FORMAT ", duration %" GST_TIME_FORMAT, buf,
1593 GST_BUFFER_OFFSET (buf),
1594 GST_TIME_ARGS (GST_BUFFER_TIMESTAMP (buf)),
1595 GST_TIME_ARGS (GST_BUFFER_DURATION (buf)));
1597 /* iterate ouput queue an push downstream */
1598 res = gst_pad_push (ffmpegdec->srcpad, buf);
1601 g_list_delete_link (ffmpegdec->queued, ffmpegdec->queued);
1607 gst_avpacket_init (AVPacket * packet, guint8 * data, guint size)
1609 memset (packet, 0, sizeof (AVPacket));
1610 packet->data = data;
1611 packet->size = size;
1614 /* gst_ffmpegviddec_[video|audio]_frame:
1616 * data: pointer to the data to decode
1617 * size: size of data in bytes
1618 * in_timestamp: incoming timestamp.
1619 * in_duration: incoming duration.
1620 * in_offset: incoming offset (frame number).
1621 * outbuf: outgoing buffer. Different from NULL ONLY if it contains decoded data.
1624 * Returns: number of bytes used in decoding. The check for successful decode is
1625 * outbuf being non-NULL.
1628 gst_ffmpegviddec_video_frame (GstFFMpegVidDec * ffmpegdec,
1629 guint8 * data, guint size,
1630 const GstTSInfo * dec_info, GstBuffer ** outbuf, GstFlowReturn * ret)
1634 gboolean iskeyframe;
1635 gboolean mode_switch;
1637 gint skip_frame = AVDISCARD_DEFAULT;
1638 GstClockTime out_timestamp, out_duration, out_pts;
1640 const GstTSInfo *out_info;
1646 ffmpegdec->context->opaque = ffmpegdec;
1648 /* in case we skip frames */
1649 ffmpegdec->picture->pict_type = -1;
1651 /* run QoS code, we don't stop decoding the frame when we are late because
1652 * else we might skip a reference frame */
1654 gst_ffmpegviddec_do_qos (ffmpegdec, dec_info->timestamp, &mode_switch);
1656 if (ffmpegdec->is_realvideo && data != NULL) {
1660 /* setup the slice table for realvideo */
1661 if (ffmpegdec->context->slice_offset == NULL)
1662 ffmpegdec->context->slice_offset = g_malloc (sizeof (guint32) * 1000);
1664 slice_count = (*data++) + 1;
1665 ffmpegdec->context->slice_count = slice_count;
1667 for (i = 0; i < slice_count; i++) {
1669 ffmpegdec->context->slice_offset[i] = GST_READ_UINT32_LE (data);
1675 /* no decoding needed, save previous skip_frame value and brutely skip
1676 * decoding everything */
1677 skip_frame = ffmpegdec->context->skip_frame;
1678 ffmpegdec->context->skip_frame = AVDISCARD_NONREF;
1681 /* save reference to the timing info */
1682 ffmpegdec->context->reordered_opaque = (gint64) dec_info->idx;
1683 ffmpegdec->picture->reordered_opaque = (gint64) dec_info->idx;
1685 GST_DEBUG_OBJECT (ffmpegdec, "stored opaque values idx %d", dec_info->idx);
1687 /* now decode the frame */
1688 gst_avpacket_init (&packet, data, size);
1689 len = avcodec_decode_video2 (ffmpegdec->context,
1690 ffmpegdec->picture, &have_data, &packet);
1692 /* restore previous state */
1694 ffmpegdec->context->skip_frame = skip_frame;
1696 GST_DEBUG_OBJECT (ffmpegdec, "after decode: len %d, have_data %d",
1699 /* when we are in skip_frame mode, don't complain when ffmpeg returned
1700 * no data because we told it to skip stuff. */
1701 if (len < 0 && (mode_switch || ffmpegdec->context->skip_frame))
1704 if (len > 0 && have_data <= 0 && (mode_switch
1705 || ffmpegdec->context->skip_frame)) {
1706 /* we consumed some bytes but nothing decoded and we are skipping frames,
1707 * disable the interpollation of DTS timestamps */
1708 ffmpegdec->last_out = -1;
1711 /* no data, we're done */
1712 if (len < 0 || have_data <= 0)
1715 /* get the output picture timing info again */
1716 out_info = gst_ts_info_get (ffmpegdec, ffmpegdec->picture->reordered_opaque);
1717 out_pts = out_info->timestamp;
1718 out_duration = out_info->duration;
1719 out_offset = out_info->offset;
1721 GST_DEBUG_OBJECT (ffmpegdec,
1722 "pts %" G_GUINT64_FORMAT " duration %" G_GUINT64_FORMAT " offset %"
1723 G_GINT64_FORMAT, out_pts, out_duration, out_offset);
1724 GST_DEBUG_OBJECT (ffmpegdec, "picture: pts %" G_GUINT64_FORMAT,
1725 (guint64) ffmpegdec->picture->pts);
1726 GST_DEBUG_OBJECT (ffmpegdec, "picture: num %d",
1727 ffmpegdec->picture->coded_picture_number);
1728 GST_DEBUG_OBJECT (ffmpegdec, "picture: ref %d",
1729 ffmpegdec->picture->reference);
1730 GST_DEBUG_OBJECT (ffmpegdec, "picture: display %d",
1731 ffmpegdec->picture->display_picture_number);
1732 GST_DEBUG_OBJECT (ffmpegdec, "picture: opaque %p",
1733 ffmpegdec->picture->opaque);
1734 GST_DEBUG_OBJECT (ffmpegdec, "picture: reordered opaque %" G_GUINT64_FORMAT,
1735 (guint64) ffmpegdec->picture->reordered_opaque);
1736 GST_DEBUG_OBJECT (ffmpegdec, "repeat_pict:%d",
1737 ffmpegdec->picture->repeat_pict);
1738 GST_DEBUG_OBJECT (ffmpegdec, "interlaced_frame:%d",
1739 ffmpegdec->picture->interlaced_frame);
1741 if (G_UNLIKELY (ffmpegdec->picture->interlaced_frame !=
1742 ffmpegdec->format.video.interlaced)) {
1743 GST_WARNING ("Change in interlacing ! picture:%d, recorded:%d",
1744 ffmpegdec->picture->interlaced_frame,
1745 ffmpegdec->format.video.interlaced);
1746 ffmpegdec->format.video.interlaced = ffmpegdec->picture->interlaced_frame;
1747 gst_ffmpegviddec_negotiate (ffmpegdec, TRUE);
1751 /* Whether a frame is interlaced or not is unknown at the time of
1752 buffer allocation, so caps on the buffer in opaque will have
1753 the previous frame's interlaced flag set. So if interlacedness
1754 has changed since allocation, we update the buffer (if any)
1755 caps now with the correct interlaced flag. */
1756 if (ffmpegdec->picture->opaque != NULL) {
1757 GstBuffer *buffer = ffmpegdec->picture->opaque;
1758 if (GST_BUFFER_CAPS (buffer) && GST_PAD_CAPS (ffmpegdec->srcpad)) {
1759 GstStructure *s = gst_caps_get_structure (GST_BUFFER_CAPS (buffer), 0);
1760 gboolean interlaced;
1761 gboolean found = gst_structure_get_boolean (s, "interlaced", &interlaced);
1762 if (!found || (! !interlaced != ! !ffmpegdec->format.video.interlaced)) {
1763 GST_DEBUG_OBJECT (ffmpegdec,
1764 "Buffer interlacing does not match pad, updating");
1765 buffer = gst_buffer_make_metadata_writable (buffer);
1766 gst_buffer_set_caps (buffer, GST_PAD_CAPS (ffmpegdec->srcpad));
1767 ffmpegdec->picture->opaque = buffer;
1772 /* check if we are dealing with a keyframe here, this will also check if we
1773 * are dealing with B frames. */
1774 iskeyframe = check_keyframe (ffmpegdec);
1776 /* check that the timestamps go upwards */
1777 if (ffmpegdec->last_out != -1 && ffmpegdec->last_out > out_pts) {
1778 /* timestamps go backwards, this means frames were reordered and we must
1779 * be dealing with DTS as the buffer timestamps */
1780 if (!ffmpegdec->reordered_out) {
1781 GST_DEBUG_OBJECT (ffmpegdec, "detected reordered out timestamps");
1782 ffmpegdec->reordered_out = TRUE;
1784 if (ffmpegdec->reordered_in) {
1785 /* we reset the input reordering here because we want to recover from an
1786 * occasionally wrong reordered input timestamp */
1787 GST_DEBUG_OBJECT (ffmpegdec, "assuming DTS input timestamps");
1788 ffmpegdec->reordered_in = FALSE;
1792 if (out_pts == 0 && out_pts == ffmpegdec->last_out) {
1793 GST_LOG_OBJECT (ffmpegdec, "ffmpeg returns 0 timestamps, ignoring");
1794 /* some codecs only output 0 timestamps, when that happens, make us select an
1795 * output timestamp based on the input timestamp. We do this by making the
1796 * ffmpeg timestamp and the interpollated next timestamp invalid. */
1798 ffmpegdec->next_out = -1;
1800 ffmpegdec->last_out = out_pts;
1802 /* we assume DTS as input timestamps unless we see reordered input
1804 if (!ffmpegdec->reordered_in && ffmpegdec->reordered_out) {
1805 /* PTS and DTS are the same for keyframes */
1806 if (!iskeyframe && ffmpegdec->next_out != -1) {
1807 /* interpolate all timestamps except for keyframes, FIXME, this is
1808 * wrong when QoS is active. */
1809 GST_DEBUG_OBJECT (ffmpegdec, "interpolate timestamps");
1815 /* when we're waiting for a keyframe, see if we have one or drop the current
1817 if (G_UNLIKELY (ffmpegdec->waiting_for_key)) {
1818 if (G_LIKELY (!iskeyframe))
1819 goto drop_non_keyframe;
1821 /* we have a keyframe, we can stop waiting for one */
1822 ffmpegdec->waiting_for_key = FALSE;
1825 /* get a handle to the output buffer */
1826 *ret = get_output_buffer (ffmpegdec, outbuf);
1827 if (G_UNLIKELY (*ret != GST_FLOW_OK))
1833 * 1) Copy picture timestamp if valid
1834 * 2) else interpolate from previous output timestamp
1835 * 3) else copy input timestamp
1838 if (out_pts != -1) {
1839 /* Get (interpolated) timestamp from FFMPEG */
1840 out_timestamp = (GstClockTime) out_pts;
1841 GST_LOG_OBJECT (ffmpegdec, "using timestamp %" GST_TIME_FORMAT
1842 " returned by ffmpeg", GST_TIME_ARGS (out_timestamp));
1844 if (!GST_CLOCK_TIME_IS_VALID (out_timestamp) && ffmpegdec->next_out != -1) {
1845 out_timestamp = ffmpegdec->next_out;
1846 GST_LOG_OBJECT (ffmpegdec, "using next timestamp %" GST_TIME_FORMAT,
1847 GST_TIME_ARGS (out_timestamp));
1849 if (!GST_CLOCK_TIME_IS_VALID (out_timestamp)) {
1850 out_timestamp = dec_info->timestamp;
1851 GST_LOG_OBJECT (ffmpegdec, "using in timestamp %" GST_TIME_FORMAT,
1852 GST_TIME_ARGS (out_timestamp));
1854 GST_BUFFER_TIMESTAMP (*outbuf) = out_timestamp;
1858 * 0) Use stored input offset (from opaque)
1859 * 1) Use value converted from timestamp if valid
1860 * 2) Use input offset if valid
1862 if (out_offset != GST_BUFFER_OFFSET_NONE) {
1863 /* out_offset already contains the offset from ts_info */
1864 GST_LOG_OBJECT (ffmpegdec, "Using offset returned by ffmpeg");
1865 } else if (out_timestamp != GST_CLOCK_TIME_NONE) {
1866 GstFormat out_fmt = GST_FORMAT_DEFAULT;
1867 GST_LOG_OBJECT (ffmpegdec, "Using offset converted from timestamp");
1868 /* FIXME, we should really remove this as it's not nice at all to do
1869 * upstream queries for each frame to get the frame offset. We also can't
1870 * really remove this because it is the only way of setting frame offsets
1871 * on outgoing buffers. We should have metadata so that the upstream peer
1872 * can set a frame number on the encoded data. */
1873 gst_pad_query_peer_convert (ffmpegdec->sinkpad,
1874 GST_FORMAT_TIME, out_timestamp, &out_fmt, &out_offset);
1875 } else if (dec_info->offset != GST_BUFFER_OFFSET_NONE) {
1876 /* FIXME, the input offset is input media specific and might not
1877 * be the same for the output media. (byte offset as input, frame number
1878 * as output, for example) */
1879 GST_LOG_OBJECT (ffmpegdec, "using in_offset %" G_GINT64_FORMAT,
1881 out_offset = dec_info->offset;
1883 GST_LOG_OBJECT (ffmpegdec, "no valid offset found");
1884 out_offset = GST_BUFFER_OFFSET_NONE;
1886 GST_BUFFER_OFFSET (*outbuf) = out_offset;
1891 * 1) Use reordered input duration if valid
1892 * 2) Else use input duration
1893 * 3) else use input framerate
1894 * 4) else use ffmpeg framerate
1896 if (GST_CLOCK_TIME_IS_VALID (out_duration)) {
1897 /* We have a valid (reordered) duration */
1898 GST_LOG_OBJECT (ffmpegdec, "Using duration returned by ffmpeg");
1899 } else if (GST_CLOCK_TIME_IS_VALID (dec_info->duration)) {
1900 GST_LOG_OBJECT (ffmpegdec, "using in_duration");
1901 out_duration = dec_info->duration;
1902 } else if (GST_CLOCK_TIME_IS_VALID (ffmpegdec->last_diff)) {
1903 GST_LOG_OBJECT (ffmpegdec, "using last-diff");
1904 out_duration = ffmpegdec->last_diff;
1906 /* if we have an input framerate, use that */
1907 if (ffmpegdec->format.video.fps_n != -1 &&
1908 (ffmpegdec->format.video.fps_n != 1000 &&
1909 ffmpegdec->format.video.fps_d != 1)) {
1910 GST_LOG_OBJECT (ffmpegdec, "using input framerate for duration");
1911 out_duration = gst_util_uint64_scale_int (GST_SECOND,
1912 ffmpegdec->format.video.fps_d, ffmpegdec->format.video.fps_n);
1914 /* don't try to use the decoder's framerate when it seems a bit abnormal,
1915 * which we assume when den >= 1000... */
1916 if (ffmpegdec->context->time_base.num != 0 &&
1917 (ffmpegdec->context->time_base.den > 0 &&
1918 ffmpegdec->context->time_base.den < 1000)) {
1919 GST_LOG_OBJECT (ffmpegdec, "using decoder's framerate for duration");
1920 out_duration = gst_util_uint64_scale_int (GST_SECOND,
1921 ffmpegdec->context->time_base.num *
1922 ffmpegdec->context->ticks_per_frame,
1923 ffmpegdec->context->time_base.den);
1925 GST_LOG_OBJECT (ffmpegdec, "no valid duration found");
1930 /* Take repeat_pict into account */
1931 if (GST_CLOCK_TIME_IS_VALID (out_duration)) {
1932 out_duration += out_duration * ffmpegdec->picture->repeat_pict / 2;
1934 GST_BUFFER_DURATION (*outbuf) = out_duration;
1936 if (out_timestamp != -1 && out_duration != -1 && out_duration != 0)
1937 ffmpegdec->next_out = out_timestamp + out_duration;
1939 ffmpegdec->next_out = -1;
1941 /* palette is not part of raw video frame in gst and the size
1942 * of the outgoing buffer needs to be adjusted accordingly */
1943 if (ffmpegdec->context->palctrl != NULL)
1944 GST_BUFFER_SIZE (*outbuf) -= AVPALETTE_SIZE;
1946 /* now see if we need to clip the buffer against the segment boundaries. */
1947 if (G_UNLIKELY (!clip_video_buffer (ffmpegdec, *outbuf, out_timestamp,
1951 /* mark as keyframe or delta unit */
1953 GST_BUFFER_FLAG_SET (*outbuf, GST_BUFFER_FLAG_DELTA_UNIT);
1955 if (ffmpegdec->picture->top_field_first)
1956 GST_BUFFER_FLAG_SET (*outbuf, GST_VIDEO_BUFFER_TFF);
1960 GST_DEBUG_OBJECT (ffmpegdec, "return flow %d, out %p, len %d",
1961 *ret, *outbuf, len);
1967 GST_WARNING_OBJECT (ffmpegdec, "Dropping non-keyframe (seek/init)");
1972 GST_DEBUG_OBJECT (ffmpegdec, "no output buffer");
1978 GST_DEBUG_OBJECT (ffmpegdec, "buffer clipped");
1979 gst_buffer_unref (*outbuf);
1986 /* gst_ffmpegviddec_frame:
1988 * data: pointer to the data to decode
1989 * size: size of data in bytes
1990 * got_data: 0 if no data was decoded, != 0 otherwise.
1991 * in_time: timestamp of data
1992 * in_duration: duration of data
1993 * ret: GstFlowReturn to return in the chain function
1995 * Decode the given frame and pushes it downstream.
1997 * Returns: Number of bytes used in decoding, -1 on error/failure.
2001 gst_ffmpegviddec_frame (GstFFMpegVidDec * ffmpegdec,
2002 guint8 * data, guint size, gint * got_data, const GstTSInfo * dec_info,
2003 GstFlowReturn * ret)
2005 GstFFMpegVidDecClass *oclass;
2006 GstBuffer *outbuf = NULL;
2007 gint have_data = 0, len = 0;
2009 if (G_UNLIKELY (ffmpegdec->context->codec == NULL))
2012 GST_LOG_OBJECT (ffmpegdec, "data:%p, size:%d, id:%d", data, size,
2016 ffmpegdec->context->frame_number++;
2018 oclass = (GstFFMpegVidDecClass *) (G_OBJECT_GET_CLASS (ffmpegdec));
2021 gst_ffmpegviddec_video_frame (ffmpegdec, data, size, dec_info,
2027 if (len < 0 || have_data < 0) {
2028 GST_WARNING_OBJECT (ffmpegdec,
2029 "ffdec_%s: decoding error (len: %d, have_data: %d)",
2030 oclass->in_plugin->name, len, have_data);
2033 } else if (len == 0 && have_data == 0) {
2037 /* this is where I lost my last clue on ffmpeg... */
2042 GST_LOG_OBJECT (ffmpegdec,
2043 "Decoded data, now pushing buffer %p with offset %" G_GINT64_FORMAT
2044 ", timestamp %" GST_TIME_FORMAT " and duration %" GST_TIME_FORMAT,
2045 outbuf, GST_BUFFER_OFFSET (outbuf),
2046 GST_TIME_ARGS (GST_BUFFER_TIMESTAMP (outbuf)),
2047 GST_TIME_ARGS (GST_BUFFER_DURATION (outbuf)));
2049 /* mark pending discont */
2050 if (ffmpegdec->discont) {
2051 GST_BUFFER_FLAG_SET (outbuf, GST_BUFFER_FLAG_DISCONT);
2052 ffmpegdec->discont = FALSE;
2055 if (ffmpegdec->segment.rate > 0.0) {
2057 *ret = gst_pad_push (ffmpegdec->srcpad, outbuf);
2059 /* reverse playback, queue frame till later when we get a discont. */
2060 GST_DEBUG_OBJECT (ffmpegdec, "queued frame");
2061 ffmpegdec->queued = g_list_prepend (ffmpegdec->queued, outbuf);
2065 GST_DEBUG_OBJECT (ffmpegdec, "We didn't get a decoded buffer");
2074 GST_ERROR_OBJECT (ffmpegdec, "no codec context");
2080 gst_ffmpegviddec_drain (GstFFMpegVidDec * ffmpegdec)
2082 GstFFMpegVidDecClass *oclass;
2084 oclass = (GstFFMpegVidDecClass *) (G_OBJECT_GET_CLASS (ffmpegdec));
2086 if (oclass->in_plugin->capabilities & CODEC_CAP_DELAY) {
2087 gint have_data, len, try = 0;
2089 GST_LOG_OBJECT (ffmpegdec,
2090 "codec has delay capabilities, calling until ffmpeg has drained everything");
2096 gst_ffmpegviddec_frame (ffmpegdec, NULL, 0, &have_data, &ts_info_none,
2098 if (len < 0 || have_data == 0)
2100 } while (try++ < 10);
2102 if (ffmpegdec->segment.rate < 0.0) {
2103 /* if we have some queued frames for reverse playback, flush them now */
2104 flush_queued (ffmpegdec);
2109 gst_ffmpegviddec_flush_pcache (GstFFMpegVidDec * ffmpegdec)
2111 if (ffmpegdec->pctx) {
2114 guint8 bdata[FF_INPUT_BUFFER_PADDING_SIZE];
2116 bsize = FF_INPUT_BUFFER_PADDING_SIZE;
2117 memset (bdata, 0, bsize);
2119 /* parse some dummy data to work around some ffmpeg weirdness where it keeps
2120 * the previous pts around */
2121 av_parser_parse2 (ffmpegdec->pctx, ffmpegdec->context,
2122 &data, &size, bdata, bsize, -1, -1, -1);
2123 ffmpegdec->pctx->pts = -1;
2124 ffmpegdec->pctx->dts = -1;
2127 if (ffmpegdec->pcache) {
2128 gst_buffer_unref (ffmpegdec->pcache);
2129 ffmpegdec->pcache = NULL;
2134 gst_ffmpegviddec_sink_event (GstPad * pad, GstEvent * event)
2136 GstFFMpegVidDec *ffmpegdec;
2137 gboolean ret = FALSE;
2139 ffmpegdec = (GstFFMpegVidDec *) gst_pad_get_parent (pad);
2141 GST_DEBUG_OBJECT (ffmpegdec, "Handling %s event",
2142 GST_EVENT_TYPE_NAME (event));
2144 switch (GST_EVENT_TYPE (event)) {
2147 gst_ffmpegviddec_drain (ffmpegdec);
2150 case GST_EVENT_FLUSH_STOP:
2152 if (ffmpegdec->opened) {
2153 avcodec_flush_buffers (ffmpegdec->context);
2155 gst_ffmpegviddec_reset_ts (ffmpegdec);
2156 gst_ffmpegviddec_reset_qos (ffmpegdec);
2157 gst_ffmpegviddec_flush_pcache (ffmpegdec);
2158 ffmpegdec->waiting_for_key = TRUE;
2159 gst_segment_init (&ffmpegdec->segment, GST_FORMAT_TIME);
2160 clear_queued (ffmpegdec);
2163 case GST_EVENT_NEWSEGMENT:
2167 gint64 start, stop, time;
2168 gdouble rate, arate;
2170 gst_event_parse_new_segment_full (event, &update, &rate, &arate, &fmt,
2171 &start, &stop, &time);
2174 case GST_FORMAT_TIME:
2175 /* fine, our native segment format */
2177 case GST_FORMAT_BYTES:
2181 bit_rate = ffmpegdec->context->bit_rate;
2183 /* convert to time or fail */
2187 GST_DEBUG_OBJECT (ffmpegdec, "bitrate: %d", bit_rate);
2189 /* convert values to TIME */
2191 start = gst_util_uint64_scale_int (start, GST_SECOND, bit_rate);
2193 stop = gst_util_uint64_scale_int (stop, GST_SECOND, bit_rate);
2195 time = gst_util_uint64_scale_int (time, GST_SECOND, bit_rate);
2197 /* unref old event */
2198 gst_event_unref (event);
2200 /* create new converted time segment */
2201 fmt = GST_FORMAT_TIME;
2202 /* FIXME, bitrate is not good enough too find a good stop, let's
2203 * hope start and time were 0... meh. */
2205 event = gst_event_new_new_segment (update, rate, fmt,
2210 /* invalid format */
2211 goto invalid_format;
2214 /* drain pending frames before trying to use the new segment, queued
2215 * buffers belonged to the previous segment. */
2216 if (ffmpegdec->context->codec)
2217 gst_ffmpegviddec_drain (ffmpegdec);
2219 GST_DEBUG_OBJECT (ffmpegdec,
2220 "NEWSEGMENT in time start %" GST_TIME_FORMAT " -- stop %"
2221 GST_TIME_FORMAT, GST_TIME_ARGS (start), GST_TIME_ARGS (stop));
2223 /* and store the values */
2224 gst_segment_set_newsegment_full (&ffmpegdec->segment, update,
2225 rate, arate, fmt, start, stop, time);
2232 /* and push segment downstream */
2233 ret = gst_pad_push_event (ffmpegdec->srcpad, event);
2236 gst_object_unref (ffmpegdec);
2243 GST_WARNING_OBJECT (ffmpegdec, "no bitrate to convert BYTES to TIME");
2244 gst_event_unref (event);
2249 GST_WARNING_OBJECT (ffmpegdec, "unknown format received in NEWSEGMENT");
2250 gst_event_unref (event);
2255 static GstFlowReturn
2256 gst_ffmpegviddec_chain (GstPad * pad, GstBuffer * inbuf)
2258 GstFFMpegVidDec *ffmpegdec;
2259 GstFFMpegVidDecClass *oclass;
2260 guint8 *data, *bdata;
2261 gint size, bsize, len, have_data;
2262 GstFlowReturn ret = GST_FLOW_OK;
2263 GstClockTime in_timestamp;
2264 GstClockTime in_duration;
2267 const GstTSInfo *in_info;
2268 const GstTSInfo *dec_info;
2270 ffmpegdec = (GstFFMpegVidDec *) (GST_PAD_PARENT (pad));
2272 if (G_UNLIKELY (!ffmpegdec->opened))
2273 goto not_negotiated;
2275 discont = GST_BUFFER_IS_DISCONT (inbuf);
2277 /* The discont flags marks a buffer that is not continuous with the previous
2278 * buffer. This means we need to clear whatever data we currently have. We
2279 * currently also wait for a new keyframe, which might be suboptimal in the
2280 * case of a network error, better show the errors than to drop all data.. */
2281 if (G_UNLIKELY (discont)) {
2282 GST_DEBUG_OBJECT (ffmpegdec, "received DISCONT");
2283 /* drain what we have queued */
2284 gst_ffmpegviddec_drain (ffmpegdec);
2285 gst_ffmpegviddec_flush_pcache (ffmpegdec);
2286 avcodec_flush_buffers (ffmpegdec->context);
2287 ffmpegdec->discont = TRUE;
2288 gst_ffmpegviddec_reset_ts (ffmpegdec);
2290 /* by default we clear the input timestamp after decoding each frame so that
2291 * interpollation can work. */
2292 ffmpegdec->clear_ts = TRUE;
2294 oclass = (GstFFMpegVidDecClass *) (G_OBJECT_GET_CLASS (ffmpegdec));
2296 /* do early keyframe check pretty bad to rely on the keyframe flag in the
2297 * source for this as it might not even be parsed (UDP/file/..). */
2298 if (G_UNLIKELY (ffmpegdec->waiting_for_key)) {
2299 GST_DEBUG_OBJECT (ffmpegdec, "waiting for keyframe");
2300 if (GST_BUFFER_FLAG_IS_SET (inbuf, GST_BUFFER_FLAG_DELTA_UNIT) &&
2301 oclass->in_plugin->type != AVMEDIA_TYPE_AUDIO)
2304 GST_DEBUG_OBJECT (ffmpegdec, "got keyframe");
2305 ffmpegdec->waiting_for_key = FALSE;
2307 /* parse cache joining. If there is cached data */
2308 if (ffmpegdec->pcache) {
2309 /* join with previous data */
2310 GST_LOG_OBJECT (ffmpegdec, "join parse cache");
2311 inbuf = gst_buffer_join (ffmpegdec->pcache, inbuf);
2312 /* no more cached data, we assume we can consume the complete cache */
2313 ffmpegdec->pcache = NULL;
2316 in_timestamp = GST_BUFFER_TIMESTAMP (inbuf);
2317 in_duration = GST_BUFFER_DURATION (inbuf);
2318 in_offset = GST_BUFFER_OFFSET (inbuf);
2320 /* get handle to timestamp info, we can pass this around to ffmpeg */
2321 in_info = gst_ts_info_store (ffmpegdec, in_timestamp, in_duration, in_offset);
2323 if (in_timestamp != -1) {
2324 /* check for increasing timestamps if they are jumping backwards, we
2325 * probably are dealing with PTS as timestamps */
2326 if (!ffmpegdec->reordered_in && ffmpegdec->last_in != -1) {
2327 if (in_timestamp < ffmpegdec->last_in) {
2328 GST_LOG_OBJECT (ffmpegdec, "detected reordered input timestamps");
2329 ffmpegdec->reordered_in = TRUE;
2330 ffmpegdec->last_diff = GST_CLOCK_TIME_NONE;
2331 } else if (in_timestamp > ffmpegdec->last_in) {
2333 /* keep track of timestamp diff to estimate duration */
2334 diff = in_timestamp - ffmpegdec->last_in;
2335 /* need to scale with amount of frames in the interval */
2336 if (ffmpegdec->last_frames)
2337 diff /= ffmpegdec->last_frames;
2339 GST_LOG_OBJECT (ffmpegdec, "estimated duration %" GST_TIME_FORMAT " %u",
2340 GST_TIME_ARGS (diff), ffmpegdec->last_frames);
2342 ffmpegdec->last_diff = diff;
2345 ffmpegdec->last_in = in_timestamp;
2346 ffmpegdec->last_frames = 0;
2349 GST_LOG_OBJECT (ffmpegdec,
2350 "Received new data of size %u, offset:%" G_GUINT64_FORMAT ", ts:%"
2351 GST_TIME_FORMAT ", dur:%" GST_TIME_FORMAT ", info %d",
2352 GST_BUFFER_SIZE (inbuf), GST_BUFFER_OFFSET (inbuf),
2353 GST_TIME_ARGS (in_timestamp), GST_TIME_ARGS (in_duration), in_info->idx);
2355 /* workarounds, functions write to buffers:
2356 * libavcodec/svq1.c:svq1_decode_frame writes to the given buffer.
2357 * libavcodec/svq3.c:svq3_decode_slice_header too.
2358 * ffmpeg devs know about it and will fix it (they said). */
2359 if (oclass->in_plugin->id == CODEC_ID_SVQ1 ||
2360 oclass->in_plugin->id == CODEC_ID_SVQ3) {
2361 inbuf = gst_buffer_make_writable (inbuf);
2364 bdata = GST_BUFFER_DATA (inbuf);
2365 bsize = GST_BUFFER_SIZE (inbuf);
2367 if (ffmpegdec->do_padding) {
2369 if (ffmpegdec->padded_size < bsize + FF_INPUT_BUFFER_PADDING_SIZE) {
2370 ffmpegdec->padded_size = bsize + FF_INPUT_BUFFER_PADDING_SIZE;
2371 ffmpegdec->padded = g_realloc (ffmpegdec->padded, ffmpegdec->padded_size);
2372 GST_LOG_OBJECT (ffmpegdec, "resized padding buffer to %d",
2373 ffmpegdec->padded_size);
2375 memcpy (ffmpegdec->padded, bdata, bsize);
2376 memset (ffmpegdec->padded + bsize, 0, FF_INPUT_BUFFER_PADDING_SIZE);
2378 bdata = ffmpegdec->padded;
2382 guint8 tmp_padding[FF_INPUT_BUFFER_PADDING_SIZE];
2384 /* parse, if at all possible */
2385 if (ffmpegdec->pctx) {
2388 GST_LOG_OBJECT (ffmpegdec,
2389 "Calling av_parser_parse2 with offset %" G_GINT64_FORMAT ", ts:%"
2390 GST_TIME_FORMAT " size %d", in_offset, GST_TIME_ARGS (in_timestamp),
2393 /* feed the parser. We pass the timestamp info so that we can recover all
2394 * info again later */
2395 res = av_parser_parse2 (ffmpegdec->pctx, ffmpegdec->context,
2396 &data, &size, bdata, bsize, in_info->idx, in_info->idx, in_offset);
2398 GST_LOG_OBJECT (ffmpegdec,
2399 "parser returned res %d and size %d, id %" G_GINT64_FORMAT, res, size,
2400 ffmpegdec->pctx->pts);
2402 /* store pts for decoding */
2403 if (ffmpegdec->pctx->pts != AV_NOPTS_VALUE && ffmpegdec->pctx->pts != -1)
2404 dec_info = gst_ts_info_get (ffmpegdec, ffmpegdec->pctx->pts);
2406 /* ffmpeg sometimes loses track after a flush, help it by feeding a
2407 * valid start time */
2408 ffmpegdec->pctx->pts = in_info->idx;
2409 ffmpegdec->pctx->dts = in_info->idx;
2413 GST_LOG_OBJECT (ffmpegdec, "consuming %d bytes. id %d", size,
2417 /* there is output, set pointers for next round. */
2421 /* Parser did not consume any data, make sure we don't clear the
2422 * timestamp for the next round */
2423 ffmpegdec->clear_ts = FALSE;
2426 /* if there is no output, we must break and wait for more data. also the
2427 * timestamp in the context is not updated. */
2441 if (ffmpegdec->do_padding) {
2442 /* add temporary padding */
2443 memcpy (tmp_padding, data + size, FF_INPUT_BUFFER_PADDING_SIZE);
2444 memset (data + size, 0, FF_INPUT_BUFFER_PADDING_SIZE);
2447 /* decode a frame of audio/video now */
2449 gst_ffmpegviddec_frame (ffmpegdec, data, size, &have_data, dec_info,
2452 if (ffmpegdec->do_padding) {
2453 memcpy (data + size, tmp_padding, FF_INPUT_BUFFER_PADDING_SIZE);
2456 if (ret != GST_FLOW_OK) {
2457 GST_LOG_OBJECT (ffmpegdec, "breaking because of flow ret %s",
2458 gst_flow_get_name (ret));
2459 /* bad flow retun, make sure we discard all data and exit */
2463 if (!ffmpegdec->pctx) {
2464 if (len == 0 && !have_data) {
2465 /* nothing was decoded, this could be because no data was available or
2466 * because we were skipping frames.
2467 * If we have no context we must exit and wait for more data, we keep the
2469 GST_LOG_OBJECT (ffmpegdec, "Decoding didn't return any data, breaking");
2471 } else if (len < 0) {
2472 /* a decoding error happened, we must break and try again with next data. */
2473 GST_LOG_OBJECT (ffmpegdec, "Decoding error, breaking");
2477 /* prepare for the next round, for codecs with a context we did this
2478 * already when using the parser. */
2483 /* nothing was decoded, this could be because no data was available or
2484 * because we were skipping frames. Since we have a parser we can
2485 * continue with the next frame */
2486 GST_LOG_OBJECT (ffmpegdec,
2487 "Decoding didn't return any data, trying next");
2488 } else if (len < 0) {
2489 /* we have a context that will bring us to the next frame */
2490 GST_LOG_OBJECT (ffmpegdec, "Decoding error, trying next");
2494 /* make sure we don't use the same old timestamp for the next frame and let
2495 * the interpollation take care of it. */
2496 if (ffmpegdec->clear_ts) {
2497 in_timestamp = GST_CLOCK_TIME_NONE;
2498 in_duration = GST_CLOCK_TIME_NONE;
2499 in_offset = GST_BUFFER_OFFSET_NONE;
2500 in_info = GST_TS_INFO_NONE;
2502 ffmpegdec->clear_ts = TRUE;
2504 ffmpegdec->last_frames++;
2506 GST_LOG_OBJECT (ffmpegdec, "Before (while bsize>0). bsize:%d , bdata:%p",
2508 } while (bsize > 0);
2510 /* keep left-over */
2511 if (ffmpegdec->pctx && bsize > 0) {
2512 in_timestamp = GST_BUFFER_TIMESTAMP (inbuf);
2513 in_offset = GST_BUFFER_OFFSET (inbuf);
2515 GST_LOG_OBJECT (ffmpegdec,
2516 "Keeping %d bytes of data with offset %" G_GINT64_FORMAT ", timestamp %"
2517 GST_TIME_FORMAT, bsize, in_offset, GST_TIME_ARGS (in_timestamp));
2519 ffmpegdec->pcache = gst_buffer_create_sub (inbuf,
2520 GST_BUFFER_SIZE (inbuf) - bsize, bsize);
2521 /* we keep timestamp, even though all we really know is that the correct
2522 * timestamp is not below the one from inbuf */
2523 GST_BUFFER_TIMESTAMP (ffmpegdec->pcache) = in_timestamp;
2524 GST_BUFFER_OFFSET (ffmpegdec->pcache) = in_offset;
2525 } else if (bsize > 0) {
2526 GST_DEBUG_OBJECT (ffmpegdec, "Dropping %d bytes of data", bsize);
2528 gst_buffer_unref (inbuf);
2535 oclass = (GstFFMpegVidDecClass *) (G_OBJECT_GET_CLASS (ffmpegdec));
2536 GST_ELEMENT_ERROR (ffmpegdec, CORE, NEGOTIATION, (NULL),
2537 ("ffdec_%s: input format was not set before data start",
2538 oclass->in_plugin->name));
2539 gst_buffer_unref (inbuf);
2540 return GST_FLOW_NOT_NEGOTIATED;
2544 GST_DEBUG_OBJECT (ffmpegdec, "skipping non keyframe");
2545 gst_buffer_unref (inbuf);
2550 static GstStateChangeReturn
2551 gst_ffmpegviddec_change_state (GstElement * element, GstStateChange transition)
2553 GstFFMpegVidDec *ffmpegdec = (GstFFMpegVidDec *) element;
2554 GstStateChangeReturn ret;
2556 ret = GST_ELEMENT_CLASS (parent_class)->change_state (element, transition);
2558 switch (transition) {
2559 case GST_STATE_CHANGE_PAUSED_TO_READY:
2560 GST_OBJECT_LOCK (ffmpegdec);
2561 gst_ffmpegviddec_close (ffmpegdec);
2562 GST_OBJECT_UNLOCK (ffmpegdec);
2563 clear_queued (ffmpegdec);
2564 g_free (ffmpegdec->padded);
2565 ffmpegdec->padded = NULL;
2566 ffmpegdec->padded_size = 0;
2567 ffmpegdec->can_allocate_aligned = TRUE;
2577 gst_ffmpegviddec_set_property (GObject * object,
2578 guint prop_id, const GValue * value, GParamSpec * pspec)
2580 GstFFMpegVidDec *ffmpegdec = (GstFFMpegVidDec *) object;
2584 ffmpegdec->lowres = ffmpegdec->context->lowres = g_value_get_enum (value);
2586 case PROP_SKIPFRAME:
2587 ffmpegdec->skip_frame = ffmpegdec->context->skip_frame =
2588 g_value_get_enum (value);
2590 case PROP_DIRECT_RENDERING:
2591 ffmpegdec->direct_rendering = g_value_get_boolean (value);
2593 case PROP_DO_PADDING:
2594 ffmpegdec->do_padding = g_value_get_boolean (value);
2597 ffmpegdec->debug_mv = ffmpegdec->context->debug_mv =
2598 g_value_get_boolean (value);
2601 ffmpegdec->crop = g_value_get_boolean (value);
2603 case PROP_MAX_THREADS:
2604 ffmpegdec->max_threads = g_value_get_int (value);
2607 G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec);
2613 gst_ffmpegviddec_get_property (GObject * object,
2614 guint prop_id, GValue * value, GParamSpec * pspec)
2616 GstFFMpegVidDec *ffmpegdec = (GstFFMpegVidDec *) object;
2620 g_value_set_enum (value, ffmpegdec->context->lowres);
2622 case PROP_SKIPFRAME:
2623 g_value_set_enum (value, ffmpegdec->context->skip_frame);
2625 case PROP_DIRECT_RENDERING:
2626 g_value_set_boolean (value, ffmpegdec->direct_rendering);
2628 case PROP_DO_PADDING:
2629 g_value_set_boolean (value, ffmpegdec->do_padding);
2632 g_value_set_boolean (value, ffmpegdec->context->debug_mv);
2635 g_value_set_boolean (value, ffmpegdec->crop);
2637 case PROP_MAX_THREADS:
2638 g_value_set_int (value, ffmpegdec->max_threads);
2641 G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec);
2647 gst_ffmpegviddec_register (GstPlugin * plugin)
2649 GTypeInfo typeinfo = {
2650 sizeof (GstFFMpegVidDecClass),
2651 (GBaseInitFunc) gst_ffmpegviddec_base_init,
2653 (GClassInitFunc) gst_ffmpegviddec_class_init,
2656 sizeof (GstFFMpegVidDec),
2658 (GInstanceInitFunc) gst_ffmpegviddec_init,
2664 in_plugin = av_codec_next (NULL);
2666 GST_LOG ("Registering decoders");
2673 if (!in_plugin->decode) {
2677 /* no quasi-codecs, please */
2678 if (in_plugin->id == CODEC_ID_RAWVIDEO ||
2679 in_plugin->id == CODEC_ID_V210 ||
2680 in_plugin->id == CODEC_ID_V210X ||
2681 in_plugin->id == CODEC_ID_R210 ||
2682 (in_plugin->id >= CODEC_ID_PCM_S16LE &&
2683 in_plugin->id <= CODEC_ID_PCM_BLURAY)) {
2687 /* No decoders depending on external libraries (we don't build them, but
2688 * people who build against an external ffmpeg might have them.
2689 * We have native gstreamer plugins for all of those libraries anyway. */
2690 if (!strncmp (in_plugin->name, "lib", 3)) {
2692 ("Not using external library decoder %s. Use the gstreamer-native ones instead.",
2697 /* No vdpau plugins until we can figure out how to properly use them
2698 * outside of ffmpeg. */
2699 if (g_str_has_suffix (in_plugin->name, "_vdpau")) {
2701 ("Ignoring VDPAU decoder %s. We can't handle this outside of ffmpeg",
2706 if (g_str_has_suffix (in_plugin->name, "_xvmc")) {
2708 ("Ignoring XVMC decoder %s. We can't handle this outside of ffmpeg",
2713 GST_DEBUG ("Trying plugin %s [%s]", in_plugin->name, in_plugin->long_name);
2715 /* no codecs for which we're GUARANTEED to have better alternatives */
2716 /* MPEG1VIDEO : the mpeg2video decoder is preferred */
2717 /* MP1 : Use MP3 for decoding */
2718 /* MP2 : Use MP3 for decoding */
2719 /* Theora: Use libtheora based theoradec */
2720 if (!strcmp (in_plugin->name, "gif") ||
2721 !strcmp (in_plugin->name, "theora") ||
2722 !strcmp (in_plugin->name, "mpeg1video") ||
2723 !strcmp (in_plugin->name, "ass") ||
2724 !strcmp (in_plugin->name, "srt") ||
2725 !strcmp (in_plugin->name, "pgssub") ||
2726 !strcmp (in_plugin->name, "dvdsub") ||
2727 !strcmp (in_plugin->name, "dvbsub")) {
2728 GST_LOG ("Ignoring decoder %s", in_plugin->name);
2732 /* construct the type */
2733 plugin_name = g_strdup ((gchar *) in_plugin->name);
2734 g_strdelimit (plugin_name, NULL, '_');
2735 type_name = g_strdup_printf ("ffdec_%s", plugin_name);
2736 g_free (plugin_name);
2738 type = g_type_from_name (type_name);
2741 /* create the gtype now */
2742 type = g_type_register_static (GST_TYPE_ELEMENT, type_name, &typeinfo, 0);
2743 g_type_set_qdata (type, GST_FFDEC_PARAMS_QDATA, (gpointer) in_plugin);
2746 /* (Ronald) MPEG-4 gets a higher priority because it has been well-
2747 * tested and by far outperforms divxdec/xviddec - so we prefer it.
2748 * msmpeg4v3 same, as it outperforms divxdec for divx3 playback.
2749 * VC1/WMV3 are not working and thus unpreferred for now. */
2750 switch (in_plugin->id) {
2751 case CODEC_ID_MPEG4:
2752 case CODEC_ID_MSMPEG4V3:
2758 rank = GST_RANK_PRIMARY;
2760 /* DVVIDEO: we have a good dv decoder, fast on both ppc as well as x86.
2761 * They say libdv's quality is better though. leave as secondary.
2762 * note: if you change this, see the code in gstdv.c in good/ext/dv.
2764 case CODEC_ID_DVVIDEO:
2765 rank = GST_RANK_SECONDARY;
2768 rank = GST_RANK_MARGINAL;
2771 if (!gst_element_register (plugin, type_name, rank, type)) {
2772 g_warning ("Failed to register %s", type_name);
2780 in_plugin = av_codec_next (in_plugin);
2783 GST_LOG ("Finished Registering decoders");