2 * Copyright (C) 2008 David Schleef <ds@schleef.org>
3 * Copyright (C) 2011 Mark Nauwelaerts <mark.nauwelaerts@collabora.co.uk>.
4 * Copyright (C) 2011 Nokia Corporation. All rights reserved.
5 * Contact: Stefan Kost <stefan.kost@nokia.com>
6 * Copyright (C) 2012 Collabora Ltd.
7 * Author : Edward Hervey <edward@collabora.com>
9 * This library is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Library General Public
11 * License as published by the Free Software Foundation; either
12 * version 2 of the License, or (at your option) any later version.
14 * This library is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Library General Public License for more details.
19 * You should have received a copy of the GNU Library General Public
20 * License along with this library; if not, write to the
21 * Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
22 * Boston, MA 02110-1301, USA.
26 * SECTION:gstvideodecoder
27 * @title: GstVideoDecoder
28 * @short_description: Base class for video decoders
30 * This base class is for video decoders turning encoded data into raw video
33 * The GstVideoDecoder base class and derived subclasses should cooperate as
38 * * Initially, GstVideoDecoder calls @start when the decoder element
39 * is activated, which allows the subclass to perform any global setup.
41 * * GstVideoDecoder calls @set_format to inform the subclass of caps
42 * describing input video data that it is about to receive, including
43 * possibly configuration data.
44 * While unlikely, it might be called more than once, if changing input
45 * parameters require reconfiguration.
47 * * Incoming data buffers are processed as needed, described in Data
50 * * GstVideoDecoder calls @stop at end of all processing.
54 * * The base class gathers input data, and optionally allows subclass
55 * to parse this into subsequently manageable chunks, typically
56 * corresponding to and referred to as 'frames'.
58 * * Each input frame is provided in turn to the subclass' @handle_frame
60 * The ownership of the frame is given to the @handle_frame callback.
62 * * If codec processing results in decoded data, the subclass should call
63 * @gst_video_decoder_finish_frame to have decoded data pushed.
64 * downstream. Otherwise, the subclass must call
65 * @gst_video_decoder_drop_frame, to allow the base class to do timestamp
66 * and offset tracking, and possibly to requeue the frame for a later
67 * attempt in the case of reverse playback.
71 * * The GstVideoDecoder class calls @stop to inform the subclass that data
72 * parsing will be stopped.
78 * * When the pipeline is seeked or otherwise flushed, the subclass is
79 * informed via a call to its @reset callback, with the hard parameter
80 * set to true. This indicates the subclass should drop any internal data
81 * queues and timestamps and prepare for a fresh set of buffers to arrive
82 * for parsing and decoding.
86 * * At end-of-stream, the subclass @parse function may be called some final
87 * times with the at_eos parameter set to true, indicating that the element
88 * should not expect any more data to be arriving, and it should parse and
89 * remaining frames and call gst_video_decoder_have_frame() if possible.
91 * The subclass is responsible for providing pad template caps for
92 * source and sink pads. The pads need to be named "sink" and "src". It also
93 * needs to provide information about the output caps, when they are known.
94 * This may be when the base class calls the subclass' @set_format function,
95 * though it might be during decoding, before calling
96 * @gst_video_decoder_finish_frame. This is done via
97 * @gst_video_decoder_set_output_state
99 * The subclass is also responsible for providing (presentation) timestamps
100 * (likely based on corresponding input ones). If that is not applicable
101 * or possible, the base class provides limited framerate based interpolation.
103 * Similarly, the base class provides some limited (legacy) seeking support
104 * if specifically requested by the subclass, as full-fledged support
105 * should rather be left to upstream demuxer, parser or alike. This simple
106 * approach caters for seeking and duration reporting using estimated input
107 * bitrates. To enable it, a subclass should call
108 * @gst_video_decoder_set_estimate_rate to enable handling of incoming
111 * The base class provides some support for reverse playback, in particular
112 * in case incoming data is not packetized or upstream does not provide
113 * fragments on keyframe boundaries. However, the subclass should then be
114 * prepared for the parsing and frame processing stage to occur separately
115 * (in normal forward processing, the latter immediately follows the former),
116 * The subclass also needs to ensure the parsing stage properly marks
117 * keyframes, unless it knows the upstream elements will do so properly for
120 * The bare minimum that a functional subclass needs to implement is:
122 * * Provide pad templates
123 * * Inform the base class of output caps via
124 * @gst_video_decoder_set_output_state
126 * * Parse input data, if it is not considered packetized from upstream
127 * Data will be provided to @parse which should invoke
128 * @gst_video_decoder_add_to_frame and @gst_video_decoder_have_frame to
129 * separate the data belonging to each video frame.
131 * * Accept data in @handle_frame and provide decoded results to
132 * @gst_video_decoder_finish_frame, or call @gst_video_decoder_drop_frame.
141 * * Add a flag/boolean for I-frame-only/image decoders so we can do extra
142 * features, like applying QoS on input (as opposed to after the frame is
144 * * Add a flag/boolean for decoders that require keyframes, so the base
145 * class can automatically discard non-keyframes before one has arrived
146 * * Detect reordered frame/timestamps and fix the pts/dts
147 * * Support for GstIndex (or shall we not care ?)
148 * * Calculate actual latency based on input/output timestamp/frame_number
149 * and if it exceeds the recorded one, save it and emit a GST_MESSAGE_LATENCY
150 * * Emit latency message when it changes
154 /* Implementation notes:
155 * The Video Decoder base class operates in 2 primary processing modes, depending
156 * on whether forward or reverse playback is requested.
159 * * Incoming buffer -> @parse() -> add_to_frame()/have_frame() ->
160 * handle_frame() -> push downstream
162 * Reverse playback is more complicated, since it involves gathering incoming
163 * data regions as we loop backwards through the upstream data. The processing
164 * concept (using incoming buffers as containing one frame each to simplify
167 * Upstream data we want to play:
168 * Buffer encoded order: 1 2 3 4 5 6 7 8 9 EOS
170 * Groupings: AAAAAAA BBBBBBB CCCCCCC
173 * Buffer reception order: 7 8 9 4 5 6 1 2 3 EOS
175 * Discont flag: D D D
177 * - Each Discont marks a discont in the decoding order.
178 * - The keyframes mark where we can start decoding.
180 * Initially, we prepend incoming buffers to the gather queue. Whenever the
181 * discont flag is set on an incoming buffer, the gather queue is flushed out
182 * before the new buffer is collected.
184 * The above data will be accumulated in the gather queue like this:
186 * gather queue: 9 8 7
189 * When buffer 4 is received (with a DISCONT), we flush the gather queue like
193 * take head of queue and prepend to parse queue (this reverses the
194 * sequence, so parse queue is 7 -> 8 -> 9)
196 * Next, we process the parse queue, which now contains all un-parsed packets
197 * (including any leftover ones from the previous decode section)
199 * for each buffer now in the parse queue:
200 * Call the subclass parse function, prepending each resulting frame to
201 * the parse_gather queue. Buffers which precede the first one that
202 * produces a parsed frame are retained in the parse queue for
203 * re-processing on the next cycle of parsing.
205 * The parse_gather queue now contains frame objects ready for decoding,
207 * parse_gather: 9 -> 8 -> 7
209 * while (parse_gather)
210 * Take the head of the queue and prepend it to the decode queue
211 * If the frame was a keyframe, process the decode queue
212 * decode is now 7-8-9
214 * Processing the decode queue results in frames with attached output buffers
215 * stored in the 'output_queue' ready for outputting in reverse order.
217 * After we flushed the gather queue and parsed it, we add 4 to the (now empty)
218 * gather queue. We get the following situation:
221 * decode queue: 7 8 9
223 * After we received 5 (Keyframe) and 6:
225 * gather queue: 6 5 4
226 * decode queue: 7 8 9
228 * When we receive 1 (DISCONT) which triggers a flush of the gather queue:
230 * Copy head of the gather queue (6) to decode queue:
233 * decode queue: 6 7 8 9
235 * Copy head of the gather queue (5) to decode queue. This is a keyframe so we
236 * can start decoding.
239 * decode queue: 5 6 7 8 9
241 * Decode frames in decode queue, store raw decoded data in output queue, we
242 * can take the head of the decode queue and prepend the decoded result in the
247 * output queue: 9 8 7 6 5
249 * Now output all the frames in the output queue, picking a frame from the
252 * Copy head of the gather queue (4) to decode queue, we flushed the gather
253 * queue and can now store input buffer in the gather queue:
258 * When we receive EOS, the queue looks like:
260 * gather queue: 3 2 1
263 * Fill decode queue, first keyframe we copy is 2:
266 * decode queue: 2 3 4
272 * output queue: 4 3 2
274 * Leftover buffer 1 cannot be decoded and must be discarded.
277 #include "gstvideodecoder.h"
278 #include "gstvideoutils.h"
279 #include "gstvideoutilsprivate.h"
281 #include <gst/video/video.h>
282 #include <gst/video/video-event.h>
283 #include <gst/video/gstvideopool.h>
284 #include <gst/video/gstvideometa.h>
287 GST_DEBUG_CATEGORY (videodecoder_debug);
288 #define GST_CAT_DEFAULT videodecoder_debug
291 #define DEFAULT_QOS TRUE
292 #define DEFAULT_MAX_ERRORS GST_VIDEO_DECODER_MAX_ERRORS
293 #define DEFAULT_MIN_FORCE_KEY_UNIT_INTERVAL 0
294 #define DEFAULT_DISCARD_CORRUPTED_FRAMES FALSE
296 /* Used for request_sync_point_frame_number. These are out of range for the
297 * frame numbers and can be given special meaning */
298 #define REQUEST_SYNC_POINT_PENDING G_MAXUINT + 1
299 #define REQUEST_SYNC_POINT_UNSET G_MAXUINT64
306 PROP_MIN_FORCE_KEY_UNIT_INTERVAL,
307 PROP_DISCARD_CORRUPTED_FRAMES
310 struct _GstVideoDecoderPrivate
312 /* FIXME introduce a context ? */
315 GstAllocator *allocator;
316 GstAllocationParams params;
320 GstAdapter *input_adapter;
321 /* assembles current frame */
322 GstAdapter *output_adapter;
324 /* Whether we attempt to convert newsegment from bytes to
325 * time using a bitrate estimation */
326 gboolean do_estimate_rate;
328 /* Whether input is considered packetized or not */
334 gboolean had_output_data;
335 gboolean had_input_data;
337 gboolean needs_format;
338 /* input_segment are output_segment identical */
339 gboolean in_out_segment_sync;
341 /* TRUE if we have an active set of instant rate flags */
342 gboolean decode_flags_override;
343 GstSegmentFlags decode_flags;
345 /* ... being tracked here;
346 * only available during parsing */
347 GstVideoCodecFrame *current_frame;
348 /* events that should apply to the current frame */
349 /* FIXME 2.0: Use a GQueue or similar, see GstVideoCodecFrame::events */
350 GList *current_frame_events;
351 /* events that should be pushed before the next frame */
352 /* FIXME 2.0: Use a GQueue or similar, see GstVideoCodecFrame::events */
353 GList *pending_events;
355 /* relative offset of input data */
356 guint64 input_offset;
357 /* relative offset of frame */
358 guint64 frame_offset;
359 /* tracking ts and offsets */
362 /* last outgoing ts */
363 GstClockTime last_timestamp_out;
364 /* incoming pts - dts */
365 GstClockTime pts_delta;
366 gboolean reordered_output;
368 /* FIXME: Consider using a GQueue or other better fitting data structure */
369 /* reverse playback */
374 /* collected parsed frames */
376 /* frames to be handled == decoded */
378 /* collected output - of buffer objects, not frames */
379 GList *output_queued;
382 /* base_picture_number is the picture number of the reference picture */
383 guint64 base_picture_number;
384 /* combine with base_picture_number, framerate and calcs to yield (presentation) ts */
385 GstClockTime base_timestamp;
388 GstClockTime min_force_key_unit_interval;
389 gboolean discard_corrupted_frames;
391 /* Key unit related state */
392 gboolean needs_sync_point;
393 GstVideoDecoderRequestSyncPointFlags request_sync_point_flags;
394 guint64 request_sync_point_frame_number;
395 GstClockTime last_force_key_unit_time;
396 /* -1 if we saw no sync point yet */
397 guint64 distance_from_sync;
399 guint32 system_frame_number;
400 guint32 decode_frame_number;
402 GQueue frames; /* Protected with OBJECT_LOCK */
403 GstVideoCodecState *input_state;
404 GstVideoCodecState *output_state; /* OBJECT_LOCK and STREAM_LOCK */
405 gboolean output_state_changed;
409 gdouble proportion; /* OBJECT_LOCK */
410 GstClockTime earliest_time; /* OBJECT_LOCK */
411 GstClockTime qos_frame_duration; /* OBJECT_LOCK */
413 /* qos messages: frames dropped/processed */
417 /* Outgoing byte size ? */
424 /* upstream stream tags (global tags are passed through as-is) */
425 GstTagList *upstream_tags;
429 GstTagMergeMode tags_merge_mode;
431 gboolean tags_changed;
434 gboolean use_default_pad_acceptcaps;
436 #ifndef GST_DISABLE_DEBUG
437 /* Diagnostic time for reporting the time
438 * from flush to first output */
439 GstClockTime last_reset_time;
443 static GstElementClass *parent_class = NULL;
444 static gint private_offset = 0;
446 /* cached quark to avoid contention on the global quark table lock */
447 #define META_TAG_VIDEO meta_tag_video_quark
448 static GQuark meta_tag_video_quark;
450 static void gst_video_decoder_class_init (GstVideoDecoderClass * klass);
451 static void gst_video_decoder_init (GstVideoDecoder * dec,
452 GstVideoDecoderClass * klass);
454 static void gst_video_decoder_finalize (GObject * object);
455 static void gst_video_decoder_get_property (GObject * object, guint property_id,
456 GValue * value, GParamSpec * pspec);
457 static void gst_video_decoder_set_property (GObject * object, guint property_id,
458 const GValue * value, GParamSpec * pspec);
460 static gboolean gst_video_decoder_setcaps (GstVideoDecoder * dec,
462 static gboolean gst_video_decoder_sink_event (GstPad * pad, GstObject * parent,
464 static gboolean gst_video_decoder_src_event (GstPad * pad, GstObject * parent,
466 static GstFlowReturn gst_video_decoder_chain (GstPad * pad, GstObject * parent,
468 static gboolean gst_video_decoder_sink_query (GstPad * pad, GstObject * parent,
470 static GstStateChangeReturn gst_video_decoder_change_state (GstElement *
471 element, GstStateChange transition);
472 static gboolean gst_video_decoder_src_query (GstPad * pad, GstObject * parent,
474 static void gst_video_decoder_reset (GstVideoDecoder * decoder, gboolean full,
475 gboolean flush_hard);
477 static GstFlowReturn gst_video_decoder_decode_frame (GstVideoDecoder * decoder,
478 GstVideoCodecFrame * frame);
480 static void gst_video_decoder_push_event_list (GstVideoDecoder * decoder,
482 static GstClockTime gst_video_decoder_get_frame_duration (GstVideoDecoder *
483 decoder, GstVideoCodecFrame * frame);
484 static GstVideoCodecFrame *gst_video_decoder_new_frame (GstVideoDecoder *
486 static GstFlowReturn gst_video_decoder_clip_and_push_buf (GstVideoDecoder *
487 decoder, GstBuffer * buf);
488 static GstFlowReturn gst_video_decoder_flush_parse (GstVideoDecoder * dec,
491 static void gst_video_decoder_clear_queues (GstVideoDecoder * dec);
493 static gboolean gst_video_decoder_sink_event_default (GstVideoDecoder * decoder,
495 static gboolean gst_video_decoder_src_event_default (GstVideoDecoder * decoder,
497 static gboolean gst_video_decoder_decide_allocation_default (GstVideoDecoder *
498 decoder, GstQuery * query);
499 static gboolean gst_video_decoder_propose_allocation_default (GstVideoDecoder *
500 decoder, GstQuery * query);
501 static gboolean gst_video_decoder_negotiate_default (GstVideoDecoder * decoder);
502 static GstFlowReturn gst_video_decoder_parse_available (GstVideoDecoder * dec,
503 gboolean at_eos, gboolean new_buffer);
504 static gboolean gst_video_decoder_negotiate_unlocked (GstVideoDecoder *
506 static gboolean gst_video_decoder_sink_query_default (GstVideoDecoder * decoder,
508 static gboolean gst_video_decoder_src_query_default (GstVideoDecoder * decoder,
511 static gboolean gst_video_decoder_transform_meta_default (GstVideoDecoder *
512 decoder, GstVideoCodecFrame * frame, GstMeta * meta);
514 /* we can't use G_DEFINE_ABSTRACT_TYPE because we need the klass in the _init
515 * method to get to the padtemplates */
517 gst_video_decoder_get_type (void)
519 static gsize type = 0;
521 if (g_once_init_enter (&type)) {
523 static const GTypeInfo info = {
524 sizeof (GstVideoDecoderClass),
527 (GClassInitFunc) gst_video_decoder_class_init,
530 sizeof (GstVideoDecoder),
532 (GInstanceInitFunc) gst_video_decoder_init,
535 _type = g_type_register_static (GST_TYPE_ELEMENT,
536 "GstVideoDecoder", &info, G_TYPE_FLAG_ABSTRACT);
539 g_type_add_instance_private (_type, sizeof (GstVideoDecoderPrivate));
541 g_once_init_leave (&type, _type);
546 static inline GstVideoDecoderPrivate *
547 gst_video_decoder_get_instance_private (GstVideoDecoder * self)
549 return (G_STRUCT_MEMBER_P (self, private_offset));
553 gst_video_decoder_class_init (GstVideoDecoderClass * klass)
555 GObjectClass *gobject_class;
556 GstElementClass *gstelement_class;
558 gobject_class = G_OBJECT_CLASS (klass);
559 gstelement_class = GST_ELEMENT_CLASS (klass);
561 GST_DEBUG_CATEGORY_INIT (videodecoder_debug, "videodecoder", 0,
562 "Base Video Decoder");
564 parent_class = g_type_class_peek_parent (klass);
566 if (private_offset != 0)
567 g_type_class_adjust_private_offset (klass, &private_offset);
569 gobject_class->finalize = gst_video_decoder_finalize;
570 gobject_class->get_property = gst_video_decoder_get_property;
571 gobject_class->set_property = gst_video_decoder_set_property;
573 gstelement_class->change_state =
574 GST_DEBUG_FUNCPTR (gst_video_decoder_change_state);
576 klass->sink_event = gst_video_decoder_sink_event_default;
577 klass->src_event = gst_video_decoder_src_event_default;
578 klass->decide_allocation = gst_video_decoder_decide_allocation_default;
579 klass->propose_allocation = gst_video_decoder_propose_allocation_default;
580 klass->negotiate = gst_video_decoder_negotiate_default;
581 klass->sink_query = gst_video_decoder_sink_query_default;
582 klass->src_query = gst_video_decoder_src_query_default;
583 klass->transform_meta = gst_video_decoder_transform_meta_default;
586 * GstVideoDecoder:qos:
588 * If set to %TRUE the decoder will handle QoS events received
589 * from downstream elements.
590 * This includes dropping output frames which are detected as late
591 * using the metrics reported by those events.
595 g_object_class_install_property (gobject_class, PROP_QOS,
596 g_param_spec_boolean ("qos", "Quality of Service",
597 "Handle Quality-of-Service events from downstream",
598 DEFAULT_QOS, G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
601 * GstVideoDecoder:max-errors:
603 * Maximum number of tolerated consecutive decode errors. See
604 * gst_video_decoder_set_max_errors() for more details.
608 g_object_class_install_property (gobject_class, PROP_MAX_ERRORS,
609 g_param_spec_int ("max-errors", "Max errors",
610 "Max consecutive decoder errors before returning flow error",
611 -1, G_MAXINT, DEFAULT_MAX_ERRORS,
612 G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
615 * GstVideoDecoder:min-force-key-unit-interval:
617 * Minimum interval between force-key-unit events sent upstream by the
618 * decoder. Setting this to 0 will cause every event to be handled, setting
619 * this to %GST_CLOCK_TIME_NONE will cause every event to be ignored.
621 * See gst_video_event_new_upstream_force_key_unit() for more details about
622 * force-key-unit events.
626 g_object_class_install_property (gobject_class,
627 PROP_MIN_FORCE_KEY_UNIT_INTERVAL,
628 g_param_spec_uint64 ("min-force-key-unit-interval",
629 "Minimum Force Keyunit Interval",
630 "Minimum interval between force-keyunit requests in nanoseconds", 0,
631 G_MAXUINT64, DEFAULT_MIN_FORCE_KEY_UNIT_INTERVAL,
632 G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
635 * GstVideoDecoder:discard-corrupted-frames:
637 * If set to %TRUE the decoder will discard frames that are marked as
638 * corrupted instead of outputting them.
642 g_object_class_install_property (gobject_class, PROP_DISCARD_CORRUPTED_FRAMES,
643 g_param_spec_boolean ("discard-corrupted-frames",
644 "Discard Corrupted Frames",
645 "Discard frames marked as corrupted instead of outputting them",
646 DEFAULT_DISCARD_CORRUPTED_FRAMES,
647 G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
649 meta_tag_video_quark = g_quark_from_static_string (GST_META_TAG_VIDEO_STR);
653 gst_video_decoder_init (GstVideoDecoder * decoder, GstVideoDecoderClass * klass)
655 GstPadTemplate *pad_template;
658 GST_DEBUG_OBJECT (decoder, "gst_video_decoder_init");
660 decoder->priv = gst_video_decoder_get_instance_private (decoder);
663 gst_element_class_get_pad_template (GST_ELEMENT_CLASS (klass), "sink");
664 g_return_if_fail (pad_template != NULL);
666 decoder->sinkpad = pad = gst_pad_new_from_template (pad_template, "sink");
668 gst_pad_set_chain_function (pad, GST_DEBUG_FUNCPTR (gst_video_decoder_chain));
669 gst_pad_set_event_function (pad,
670 GST_DEBUG_FUNCPTR (gst_video_decoder_sink_event));
671 gst_pad_set_query_function (pad,
672 GST_DEBUG_FUNCPTR (gst_video_decoder_sink_query));
673 gst_element_add_pad (GST_ELEMENT (decoder), decoder->sinkpad);
676 gst_element_class_get_pad_template (GST_ELEMENT_CLASS (klass), "src");
677 g_return_if_fail (pad_template != NULL);
679 decoder->srcpad = pad = gst_pad_new_from_template (pad_template, "src");
681 gst_pad_set_event_function (pad,
682 GST_DEBUG_FUNCPTR (gst_video_decoder_src_event));
683 gst_pad_set_query_function (pad,
684 GST_DEBUG_FUNCPTR (gst_video_decoder_src_query));
685 gst_element_add_pad (GST_ELEMENT (decoder), decoder->srcpad);
687 gst_segment_init (&decoder->input_segment, GST_FORMAT_TIME);
688 gst_segment_init (&decoder->output_segment, GST_FORMAT_TIME);
690 g_rec_mutex_init (&decoder->stream_lock);
692 decoder->priv->input_adapter = gst_adapter_new ();
693 decoder->priv->output_adapter = gst_adapter_new ();
694 decoder->priv->packetized = TRUE;
695 decoder->priv->needs_format = FALSE;
697 g_queue_init (&decoder->priv->frames);
698 g_queue_init (&decoder->priv->timestamps);
701 decoder->priv->do_qos = DEFAULT_QOS;
702 decoder->priv->max_errors = GST_VIDEO_DECODER_MAX_ERRORS;
704 decoder->priv->min_latency = 0;
705 decoder->priv->max_latency = 0;
707 gst_video_decoder_reset (decoder, TRUE, TRUE);
710 static GstVideoCodecState *
711 _new_input_state (GstCaps * caps)
713 GstVideoCodecState *state;
714 GstStructure *structure;
715 const GValue *codec_data;
717 state = g_slice_new0 (GstVideoCodecState);
718 state->ref_count = 1;
719 gst_video_info_init (&state->info);
720 if (G_UNLIKELY (!gst_video_info_from_caps (&state->info, caps)))
722 state->caps = gst_caps_ref (caps);
724 structure = gst_caps_get_structure (caps, 0);
726 codec_data = gst_structure_get_value (structure, "codec_data");
727 if (codec_data && G_VALUE_TYPE (codec_data) == GST_TYPE_BUFFER)
728 state->codec_data = GST_BUFFER (g_value_dup_boxed (codec_data));
734 g_slice_free (GstVideoCodecState, state);
739 static GstVideoCodecState *
740 _new_output_state (GstVideoFormat fmt, GstVideoInterlaceMode interlace_mode,
741 guint width, guint height, GstVideoCodecState * reference,
742 gboolean copy_interlace_mode)
744 GstVideoCodecState *state;
746 state = g_slice_new0 (GstVideoCodecState);
747 state->ref_count = 1;
748 gst_video_info_init (&state->info);
749 if (!gst_video_info_set_interlaced_format (&state->info, fmt, interlace_mode,
751 g_slice_free (GstVideoCodecState, state);
756 GstVideoInfo *tgt, *ref;
759 ref = &reference->info;
761 /* Copy over extra fields from reference state */
762 if (copy_interlace_mode)
763 tgt->interlace_mode = ref->interlace_mode;
764 tgt->flags = ref->flags;
765 tgt->chroma_site = ref->chroma_site;
766 tgt->colorimetry = ref->colorimetry;
767 GST_DEBUG ("reference par %d/%d fps %d/%d",
768 ref->par_n, ref->par_d, ref->fps_n, ref->fps_d);
769 tgt->par_n = ref->par_n;
770 tgt->par_d = ref->par_d;
771 tgt->fps_n = ref->fps_n;
772 tgt->fps_d = ref->fps_d;
773 tgt->views = ref->views;
775 GST_VIDEO_INFO_FIELD_ORDER (tgt) = GST_VIDEO_INFO_FIELD_ORDER (ref);
777 if (GST_VIDEO_INFO_MULTIVIEW_MODE (ref) != GST_VIDEO_MULTIVIEW_MODE_NONE) {
778 GST_VIDEO_INFO_MULTIVIEW_MODE (tgt) = GST_VIDEO_INFO_MULTIVIEW_MODE (ref);
779 GST_VIDEO_INFO_MULTIVIEW_FLAGS (tgt) =
780 GST_VIDEO_INFO_MULTIVIEW_FLAGS (ref);
782 /* Default to MONO, overridden as needed by sub-classes */
783 GST_VIDEO_INFO_MULTIVIEW_MODE (tgt) = GST_VIDEO_MULTIVIEW_MODE_MONO;
784 GST_VIDEO_INFO_MULTIVIEW_FLAGS (tgt) = GST_VIDEO_MULTIVIEW_FLAGS_NONE;
788 GST_DEBUG ("reference par %d/%d fps %d/%d",
789 state->info.par_n, state->info.par_d,
790 state->info.fps_n, state->info.fps_d);
796 gst_video_decoder_setcaps (GstVideoDecoder * decoder, GstCaps * caps)
798 GstVideoDecoderClass *decoder_class;
799 GstVideoCodecState *state;
802 decoder_class = GST_VIDEO_DECODER_GET_CLASS (decoder);
804 GST_DEBUG_OBJECT (decoder, "setcaps %" GST_PTR_FORMAT, caps);
806 GST_VIDEO_DECODER_STREAM_LOCK (decoder);
808 if (decoder->priv->input_state) {
809 GST_DEBUG_OBJECT (decoder,
810 "Checking if caps changed old %" GST_PTR_FORMAT " new %" GST_PTR_FORMAT,
811 decoder->priv->input_state->caps, caps);
812 if (gst_caps_is_equal (decoder->priv->input_state->caps, caps))
813 goto caps_not_changed;
816 state = _new_input_state (caps);
818 if (G_UNLIKELY (state == NULL))
821 if (decoder_class->set_format)
822 ret = decoder_class->set_format (decoder, state);
827 if (decoder->priv->input_state)
828 gst_video_codec_state_unref (decoder->priv->input_state);
829 decoder->priv->input_state = state;
831 GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
837 GST_DEBUG_OBJECT (decoder, "Caps did not change - ignore");
838 GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
845 GST_WARNING_OBJECT (decoder, "Failed to parse caps");
846 GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
852 GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
853 GST_WARNING_OBJECT (decoder, "Subclass refused caps");
854 gst_video_codec_state_unref (state);
860 gst_video_decoder_finalize (GObject * object)
862 GstVideoDecoder *decoder;
864 decoder = GST_VIDEO_DECODER (object);
866 GST_DEBUG_OBJECT (object, "finalize");
868 g_rec_mutex_clear (&decoder->stream_lock);
870 if (decoder->priv->input_adapter) {
871 g_object_unref (decoder->priv->input_adapter);
872 decoder->priv->input_adapter = NULL;
874 if (decoder->priv->output_adapter) {
875 g_object_unref (decoder->priv->output_adapter);
876 decoder->priv->output_adapter = NULL;
879 if (decoder->priv->input_state)
880 gst_video_codec_state_unref (decoder->priv->input_state);
881 if (decoder->priv->output_state)
882 gst_video_codec_state_unref (decoder->priv->output_state);
884 if (decoder->priv->pool) {
885 gst_object_unref (decoder->priv->pool);
886 decoder->priv->pool = NULL;
889 if (decoder->priv->allocator) {
890 gst_object_unref (decoder->priv->allocator);
891 decoder->priv->allocator = NULL;
894 G_OBJECT_CLASS (parent_class)->finalize (object);
898 gst_video_decoder_get_property (GObject * object, guint property_id,
899 GValue * value, GParamSpec * pspec)
901 GstVideoDecoder *dec = GST_VIDEO_DECODER (object);
902 GstVideoDecoderPrivate *priv = dec->priv;
904 switch (property_id) {
906 g_value_set_boolean (value, priv->do_qos);
908 case PROP_MAX_ERRORS:
909 g_value_set_int (value, gst_video_decoder_get_max_errors (dec));
911 case PROP_MIN_FORCE_KEY_UNIT_INTERVAL:
912 g_value_set_uint64 (value, priv->min_force_key_unit_interval);
914 case PROP_DISCARD_CORRUPTED_FRAMES:
915 g_value_set_boolean (value, priv->discard_corrupted_frames);
918 G_OBJECT_WARN_INVALID_PROPERTY_ID (object, property_id, pspec);
924 gst_video_decoder_set_property (GObject * object, guint property_id,
925 const GValue * value, GParamSpec * pspec)
927 GstVideoDecoder *dec = GST_VIDEO_DECODER (object);
928 GstVideoDecoderPrivate *priv = dec->priv;
930 switch (property_id) {
932 priv->do_qos = g_value_get_boolean (value);
934 case PROP_MAX_ERRORS:
935 gst_video_decoder_set_max_errors (dec, g_value_get_int (value));
937 case PROP_MIN_FORCE_KEY_UNIT_INTERVAL:
938 priv->min_force_key_unit_interval = g_value_get_uint64 (value);
940 case PROP_DISCARD_CORRUPTED_FRAMES:
941 priv->discard_corrupted_frames = g_value_get_boolean (value);
944 G_OBJECT_WARN_INVALID_PROPERTY_ID (object, property_id, pspec);
949 /* hard == FLUSH, otherwise discont */
951 gst_video_decoder_flush (GstVideoDecoder * dec, gboolean hard)
953 GstVideoDecoderClass *klass = GST_VIDEO_DECODER_GET_CLASS (dec);
954 GstFlowReturn ret = GST_FLOW_OK;
956 GST_LOG_OBJECT (dec, "flush hard %d", hard);
958 /* Inform subclass */
960 GST_FIXME_OBJECT (dec, "GstVideoDecoder::reset() is deprecated");
961 klass->reset (dec, hard);
967 /* and get (re)set for the sequel */
968 gst_video_decoder_reset (dec, FALSE, hard);
974 gst_video_decoder_create_merged_tags_event (GstVideoDecoder * dec)
976 GstTagList *merged_tags;
978 GST_LOG_OBJECT (dec, "upstream : %" GST_PTR_FORMAT, dec->priv->upstream_tags);
979 GST_LOG_OBJECT (dec, "decoder : %" GST_PTR_FORMAT, dec->priv->tags);
980 GST_LOG_OBJECT (dec, "mode : %d", dec->priv->tags_merge_mode);
983 gst_tag_list_merge (dec->priv->upstream_tags, dec->priv->tags,
984 dec->priv->tags_merge_mode);
986 GST_DEBUG_OBJECT (dec, "merged : %" GST_PTR_FORMAT, merged_tags);
988 if (merged_tags == NULL)
991 if (gst_tag_list_is_empty (merged_tags)) {
992 gst_tag_list_unref (merged_tags);
996 return gst_event_new_tag (merged_tags);
1000 gst_video_decoder_push_event (GstVideoDecoder * decoder, GstEvent * event)
1002 switch (GST_EVENT_TYPE (event)) {
1003 case GST_EVENT_SEGMENT:
1007 gst_event_copy_segment (event, &segment);
1009 GST_DEBUG_OBJECT (decoder, "segment %" GST_SEGMENT_FORMAT, &segment);
1011 if (segment.format != GST_FORMAT_TIME) {
1012 GST_DEBUG_OBJECT (decoder, "received non TIME newsegment");
1016 GST_VIDEO_DECODER_STREAM_LOCK (decoder);
1017 decoder->output_segment = segment;
1018 decoder->priv->in_out_segment_sync =
1019 gst_segment_is_equal (&decoder->input_segment, &segment);
1020 decoder->priv->last_timestamp_out = GST_CLOCK_TIME_NONE;
1021 decoder->priv->earliest_time = GST_CLOCK_TIME_NONE;
1022 GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
1029 GST_DEBUG_OBJECT (decoder, "pushing event %s",
1030 gst_event_type_get_name (GST_EVENT_TYPE (event)));
1032 return gst_pad_push_event (decoder->srcpad, event);
1035 static GstFlowReturn
1036 gst_video_decoder_parse_available (GstVideoDecoder * dec, gboolean at_eos,
1037 gboolean new_buffer)
1039 GstVideoDecoderClass *decoder_class = GST_VIDEO_DECODER_GET_CLASS (dec);
1040 GstVideoDecoderPrivate *priv = dec->priv;
1041 GstFlowReturn ret = GST_FLOW_OK;
1042 gsize was_available, available;
1045 available = gst_adapter_available (priv->input_adapter);
1047 while (available || new_buffer) {
1049 /* current frame may have been parsed and handled,
1050 * so we need to set up a new one when asking subclass to parse */
1051 if (priv->current_frame == NULL)
1052 priv->current_frame = gst_video_decoder_new_frame (dec);
1054 was_available = available;
1055 ret = decoder_class->parse (dec, priv->current_frame,
1056 priv->input_adapter, at_eos);
1057 if (ret != GST_FLOW_OK)
1060 /* if the subclass returned success (GST_FLOW_OK), it is expected
1061 * to have collected and submitted a frame, i.e. it should have
1062 * called gst_video_decoder_have_frame(), or at least consumed a
1063 * few bytes through gst_video_decoder_add_to_frame().
1065 * Otherwise, this is an implementation bug, and we error out
1066 * after 2 failed attempts */
1067 available = gst_adapter_available (priv->input_adapter);
1068 if (!priv->current_frame || available != was_available)
1070 else if (++inactive == 2)
1071 goto error_inactive;
1079 GST_ERROR_OBJECT (dec, "Failed to consume data. Error in subclass?");
1080 return GST_FLOW_ERROR;
1084 /* This function has to be called with the stream lock taken. */
1085 static GstFlowReturn
1086 gst_video_decoder_drain_out (GstVideoDecoder * dec, gboolean at_eos)
1088 GstVideoDecoderClass *decoder_class = GST_VIDEO_DECODER_GET_CLASS (dec);
1089 GstVideoDecoderPrivate *priv = dec->priv;
1090 GstFlowReturn ret = GST_FLOW_OK;
1092 if (dec->input_segment.rate > 0.0) {
1093 /* Forward mode, if unpacketized, give the child class
1094 * a final chance to flush out packets */
1095 if (!priv->packetized) {
1096 ret = gst_video_decoder_parse_available (dec, TRUE, FALSE);
1100 if (decoder_class->finish)
1101 ret = decoder_class->finish (dec);
1103 if (decoder_class->drain) {
1104 ret = decoder_class->drain (dec);
1106 GST_FIXME_OBJECT (dec, "Sub-class should implement drain()");
1110 /* Reverse playback mode */
1111 ret = gst_video_decoder_flush_parse (dec, TRUE);
1118 _flush_events (GstPad * pad, GList * events)
1122 for (tmp = events; tmp; tmp = tmp->next) {
1123 if (GST_EVENT_TYPE (tmp->data) != GST_EVENT_EOS &&
1124 GST_EVENT_TYPE (tmp->data) != GST_EVENT_SEGMENT &&
1125 GST_EVENT_IS_STICKY (tmp->data)) {
1126 gst_pad_store_sticky_event (pad, GST_EVENT_CAST (tmp->data));
1128 gst_event_unref (tmp->data);
1130 g_list_free (events);
1135 /* Must be called holding the GST_VIDEO_DECODER_STREAM_LOCK */
1137 gst_video_decoder_negotiate_default_caps (GstVideoDecoder * decoder)
1139 GstCaps *caps, *templcaps;
1140 GstVideoCodecState *state;
1144 GstStructure *structure;
1146 templcaps = gst_pad_get_pad_template_caps (decoder->srcpad);
1147 caps = gst_pad_peer_query_caps (decoder->srcpad, templcaps);
1149 gst_caps_unref (templcaps);
1154 if (!caps || gst_caps_is_empty (caps) || gst_caps_is_any (caps))
1157 GST_LOG_OBJECT (decoder, "peer caps %" GST_PTR_FORMAT, caps);
1159 /* before fixating, try to use whatever upstream provided */
1160 caps = gst_caps_make_writable (caps);
1161 caps_size = gst_caps_get_size (caps);
1162 if (decoder->priv->input_state && decoder->priv->input_state->caps) {
1163 GstCaps *sinkcaps = decoder->priv->input_state->caps;
1164 GstStructure *structure = gst_caps_get_structure (sinkcaps, 0);
1167 if (gst_structure_get_int (structure, "width", &width)) {
1168 for (i = 0; i < caps_size; i++) {
1169 gst_structure_set (gst_caps_get_structure (caps, i), "width",
1170 G_TYPE_INT, width, NULL);
1174 if (gst_structure_get_int (structure, "height", &height)) {
1175 for (i = 0; i < caps_size; i++) {
1176 gst_structure_set (gst_caps_get_structure (caps, i), "height",
1177 G_TYPE_INT, height, NULL);
1182 for (i = 0; i < caps_size; i++) {
1183 structure = gst_caps_get_structure (caps, i);
1184 /* Random I420 1280x720 for fixation */
1185 if (gst_structure_has_field (structure, "format"))
1186 gst_structure_fixate_field_string (structure, "format", "I420");
1188 gst_structure_set (structure, "format", G_TYPE_STRING, "I420", NULL);
1190 if (gst_structure_has_field (structure, "width"))
1191 gst_structure_fixate_field_nearest_int (structure, "width", 1280);
1193 gst_structure_set (structure, "width", G_TYPE_INT, 1280, NULL);
1195 if (gst_structure_has_field (structure, "height"))
1196 gst_structure_fixate_field_nearest_int (structure, "height", 720);
1198 gst_structure_set (structure, "height", G_TYPE_INT, 720, NULL);
1200 caps = gst_caps_fixate (caps);
1202 if (!caps || !gst_video_info_from_caps (&info, caps))
1205 GST_INFO_OBJECT (decoder,
1206 "Chose default caps %" GST_PTR_FORMAT " for initial gap", caps);
1208 gst_video_decoder_set_output_state (decoder, info.finfo->format,
1209 info.width, info.height, decoder->priv->input_state);
1210 gst_video_codec_state_unref (state);
1211 gst_caps_unref (caps);
1218 gst_caps_unref (caps);
1224 gst_video_decoder_sink_event_default (GstVideoDecoder * decoder,
1227 GstVideoDecoderPrivate *priv;
1228 gboolean ret = FALSE;
1229 gboolean forward_immediate = FALSE;
1231 priv = decoder->priv;
1233 switch (GST_EVENT_TYPE (event)) {
1234 case GST_EVENT_STREAM_START:
1236 GstFlowReturn flow_ret = GST_FLOW_OK;
1238 GST_VIDEO_DECODER_STREAM_LOCK (decoder);
1239 flow_ret = gst_video_decoder_drain_out (decoder, FALSE);
1240 ret = (flow_ret == GST_FLOW_OK);
1242 GST_DEBUG_OBJECT (decoder, "received STREAM_START. Clearing taglist");
1243 /* Flush upstream tags after a STREAM_START */
1244 if (priv->upstream_tags) {
1245 gst_tag_list_unref (priv->upstream_tags);
1246 priv->upstream_tags = NULL;
1247 priv->tags_changed = TRUE;
1249 GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
1251 /* Forward STREAM_START immediately. Everything is drained after
1252 * the STREAM_START event and we can forward this event immediately
1253 * now without having buffers out of order.
1255 forward_immediate = TRUE;
1258 case GST_EVENT_CAPS:
1262 gst_event_parse_caps (event, &caps);
1263 ret = gst_video_decoder_setcaps (decoder, caps);
1264 gst_event_unref (event);
1268 case GST_EVENT_SEGMENT_DONE:
1270 GstFlowReturn flow_ret = GST_FLOW_OK;
1272 GST_VIDEO_DECODER_STREAM_LOCK (decoder);
1273 flow_ret = gst_video_decoder_drain_out (decoder, FALSE);
1274 GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
1275 ret = (flow_ret == GST_FLOW_OK);
1277 /* Forward SEGMENT_DONE immediately. This is required
1278 * because no buffer or serialized event might come
1279 * after SEGMENT_DONE and nothing could trigger another
1280 * _finish_frame() call.
1282 * The subclass can override this behaviour by overriding
1283 * the ::sink_event() vfunc and not chaining up to the
1284 * parent class' ::sink_event() until a later time.
1286 forward_immediate = TRUE;
1291 GstFlowReturn flow_ret = GST_FLOW_OK;
1293 GST_VIDEO_DECODER_STREAM_LOCK (decoder);
1294 flow_ret = gst_video_decoder_drain_out (decoder, TRUE);
1295 GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
1296 ret = (flow_ret == GST_FLOW_OK);
1298 /* Error out even if EOS was ok when we had input, but no output */
1299 if (ret && priv->had_input_data && !priv->had_output_data) {
1300 GST_ELEMENT_ERROR (decoder, STREAM, DECODE,
1301 ("No valid frames decoded before end of stream"),
1302 ("no valid frames found"));
1305 /* Forward EOS immediately. This is required because no
1306 * buffer or serialized event will come after EOS and
1307 * nothing could trigger another _finish_frame() call.
1309 * The subclass can override this behaviour by overriding
1310 * the ::sink_event() vfunc and not chaining up to the
1311 * parent class' ::sink_event() until a later time.
1313 forward_immediate = TRUE;
1318 GstFlowReturn flow_ret = GST_FLOW_OK;
1319 gboolean needs_reconfigure = FALSE;
1321 GList *frame_events;
1323 GST_VIDEO_DECODER_STREAM_LOCK (decoder);
1324 if (decoder->input_segment.flags & GST_SEEK_FLAG_TRICKMODE_KEY_UNITS)
1325 flow_ret = gst_video_decoder_drain_out (decoder, FALSE);
1326 ret = (flow_ret == GST_FLOW_OK);
1328 /* Ensure we have caps before forwarding the event */
1329 if (!decoder->priv->output_state) {
1330 if (!gst_video_decoder_negotiate_default_caps (decoder)) {
1331 GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
1332 GST_ELEMENT_ERROR (decoder, STREAM, FORMAT, (NULL),
1333 ("Decoder output not negotiated before GAP event."));
1334 forward_immediate = TRUE;
1337 needs_reconfigure = TRUE;
1340 needs_reconfigure = gst_pad_check_reconfigure (decoder->srcpad)
1341 || needs_reconfigure;
1342 if (decoder->priv->output_state_changed || needs_reconfigure) {
1343 if (!gst_video_decoder_negotiate_unlocked (decoder)) {
1344 GST_WARNING_OBJECT (decoder, "Failed to negotiate with downstream");
1345 gst_pad_mark_reconfigure (decoder->srcpad);
1349 GST_DEBUG_OBJECT (decoder, "Pushing all pending serialized events"
1351 events = decoder->priv->pending_events;
1352 frame_events = decoder->priv->current_frame_events;
1353 decoder->priv->pending_events = NULL;
1354 decoder->priv->current_frame_events = NULL;
1356 GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
1358 gst_video_decoder_push_event_list (decoder, events);
1359 gst_video_decoder_push_event_list (decoder, frame_events);
1361 /* Forward GAP immediately. Everything is drained after
1362 * the GAP event and we can forward this event immediately
1363 * now without having buffers out of order.
1365 forward_immediate = TRUE;
1368 case GST_EVENT_CUSTOM_DOWNSTREAM:
1371 GstFlowReturn flow_ret = GST_FLOW_OK;
1373 if (gst_video_event_parse_still_frame (event, &in_still)) {
1375 GST_DEBUG_OBJECT (decoder, "draining current data for still-frame");
1376 GST_VIDEO_DECODER_STREAM_LOCK (decoder);
1377 flow_ret = gst_video_decoder_drain_out (decoder, FALSE);
1378 GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
1379 ret = (flow_ret == GST_FLOW_OK);
1381 /* Forward STILL_FRAME immediately. Everything is drained after
1382 * the STILL_FRAME event and we can forward this event immediately
1383 * now without having buffers out of order.
1385 forward_immediate = TRUE;
1389 case GST_EVENT_SEGMENT:
1393 gst_event_copy_segment (event, &segment);
1395 if (segment.format == GST_FORMAT_TIME) {
1396 GST_DEBUG_OBJECT (decoder,
1397 "received TIME SEGMENT %" GST_SEGMENT_FORMAT, &segment);
1401 GST_DEBUG_OBJECT (decoder,
1402 "received SEGMENT %" GST_SEGMENT_FORMAT, &segment);
1404 /* handle newsegment as a result from our legacy simple seeking */
1405 /* note that initial 0 should convert to 0 in any case */
1406 if (priv->do_estimate_rate &&
1407 gst_pad_query_convert (decoder->sinkpad, GST_FORMAT_BYTES,
1408 segment.start, GST_FORMAT_TIME, &start)) {
1409 /* best attempt convert */
1410 /* as these are only estimates, stop is kept open-ended to avoid
1411 * premature cutting */
1412 GST_DEBUG_OBJECT (decoder,
1413 "converted to TIME start %" GST_TIME_FORMAT,
1414 GST_TIME_ARGS (start));
1415 segment.start = start;
1416 segment.stop = GST_CLOCK_TIME_NONE;
1417 segment.time = start;
1419 gst_event_unref (event);
1420 event = gst_event_new_segment (&segment);
1422 goto newseg_wrong_format;
1426 GST_VIDEO_DECODER_STREAM_LOCK (decoder);
1428 /* Update the decode flags in the segment if we have an instant-rate
1429 * override active */
1430 GST_OBJECT_LOCK (decoder);
1431 if (!priv->decode_flags_override)
1432 priv->decode_flags = segment.flags;
1434 segment.flags &= ~GST_SEGMENT_INSTANT_FLAGS;
1435 segment.flags |= priv->decode_flags & GST_SEGMENT_INSTANT_FLAGS;
1438 priv->base_timestamp = GST_CLOCK_TIME_NONE;
1439 priv->base_picture_number = 0;
1441 decoder->input_segment = segment;
1442 decoder->priv->in_out_segment_sync = FALSE;
1444 GST_OBJECT_UNLOCK (decoder);
1445 GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
1449 case GST_EVENT_INSTANT_RATE_CHANGE:
1451 GstSegmentFlags flags;
1454 gst_event_parse_instant_rate_change (event, NULL, &flags);
1456 GST_OBJECT_LOCK (decoder);
1457 priv->decode_flags_override = TRUE;
1458 priv->decode_flags = flags;
1460 /* Update the input segment flags */
1461 seg = &decoder->input_segment;
1462 seg->flags &= ~GST_SEGMENT_INSTANT_FLAGS;
1463 seg->flags |= priv->decode_flags & GST_SEGMENT_INSTANT_FLAGS;
1464 GST_OBJECT_UNLOCK (decoder);
1467 case GST_EVENT_FLUSH_STOP:
1471 GST_VIDEO_DECODER_STREAM_LOCK (decoder);
1472 for (l = priv->frames.head; l; l = l->next) {
1473 GstVideoCodecFrame *frame = l->data;
1475 frame->events = _flush_events (decoder->srcpad, frame->events);
1477 priv->current_frame_events = _flush_events (decoder->srcpad,
1478 decoder->priv->current_frame_events);
1480 /* well, this is kind of worse than a DISCONT */
1481 gst_video_decoder_flush (decoder, TRUE);
1482 GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
1483 /* Forward FLUSH_STOP immediately. This is required because it is
1484 * expected to be forwarded immediately and no buffers are queued
1487 forward_immediate = TRUE;
1494 gst_event_parse_tag (event, &tags);
1496 if (gst_tag_list_get_scope (tags) == GST_TAG_SCOPE_STREAM) {
1497 GST_VIDEO_DECODER_STREAM_LOCK (decoder);
1498 if (priv->upstream_tags != tags) {
1499 if (priv->upstream_tags)
1500 gst_tag_list_unref (priv->upstream_tags);
1501 priv->upstream_tags = gst_tag_list_ref (tags);
1502 GST_INFO_OBJECT (decoder, "upstream tags: %" GST_PTR_FORMAT, tags);
1504 gst_event_unref (event);
1505 event = gst_video_decoder_create_merged_tags_event (decoder);
1506 GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
1516 /* Forward non-serialized events immediately, and all other
1517 * events which can be forwarded immediately without potentially
1518 * causing the event to go out of order with other events and
1519 * buffers as decided above.
1522 if (!GST_EVENT_IS_SERIALIZED (event) || forward_immediate) {
1523 ret = gst_video_decoder_push_event (decoder, event);
1525 GST_VIDEO_DECODER_STREAM_LOCK (decoder);
1526 decoder->priv->current_frame_events =
1527 g_list_prepend (decoder->priv->current_frame_events, event);
1528 GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
1535 newseg_wrong_format:
1537 GST_DEBUG_OBJECT (decoder, "received non TIME newsegment");
1538 gst_event_unref (event);
1545 gst_video_decoder_sink_event (GstPad * pad, GstObject * parent,
1548 GstVideoDecoder *decoder;
1549 GstVideoDecoderClass *decoder_class;
1550 gboolean ret = FALSE;
1552 decoder = GST_VIDEO_DECODER (parent);
1553 decoder_class = GST_VIDEO_DECODER_GET_CLASS (decoder);
1555 GST_DEBUG_OBJECT (decoder, "received event %d, %s", GST_EVENT_TYPE (event),
1556 GST_EVENT_TYPE_NAME (event));
1558 if (decoder_class->sink_event)
1559 ret = decoder_class->sink_event (decoder, event);
1564 /* perform upstream byte <-> time conversion (duration, seeking)
1565 * if subclass allows and if enough data for moderately decent conversion */
1566 static inline gboolean
1567 gst_video_decoder_do_byte (GstVideoDecoder * dec)
1571 GST_OBJECT_LOCK (dec);
1572 ret = dec->priv->do_estimate_rate && (dec->priv->bytes_out > 0)
1573 && (dec->priv->time > GST_SECOND);
1574 GST_OBJECT_UNLOCK (dec);
1580 gst_video_decoder_do_seek (GstVideoDecoder * dec, GstEvent * event)
1584 GstSeekType start_type, end_type;
1586 gint64 start, start_time, end_time;
1587 GstSegment seek_segment;
1590 gst_event_parse_seek (event, &rate, &format, &flags, &start_type,
1591 &start_time, &end_type, &end_time);
1593 /* we'll handle plain open-ended flushing seeks with the simple approach */
1595 GST_DEBUG_OBJECT (dec, "unsupported seek: rate");
1599 if (start_type != GST_SEEK_TYPE_SET) {
1600 GST_DEBUG_OBJECT (dec, "unsupported seek: start time");
1604 if ((end_type != GST_SEEK_TYPE_SET && end_type != GST_SEEK_TYPE_NONE) ||
1605 (end_type == GST_SEEK_TYPE_SET && end_time != GST_CLOCK_TIME_NONE)) {
1606 GST_DEBUG_OBJECT (dec, "unsupported seek: end time");
1610 if (!(flags & GST_SEEK_FLAG_FLUSH)) {
1611 GST_DEBUG_OBJECT (dec, "unsupported seek: not flushing");
1615 memcpy (&seek_segment, &dec->output_segment, sizeof (seek_segment));
1616 gst_segment_do_seek (&seek_segment, rate, format, flags, start_type,
1617 start_time, end_type, end_time, NULL);
1618 start_time = seek_segment.position;
1620 if (!gst_pad_query_convert (dec->sinkpad, GST_FORMAT_TIME, start_time,
1621 GST_FORMAT_BYTES, &start)) {
1622 GST_DEBUG_OBJECT (dec, "conversion failed");
1626 seqnum = gst_event_get_seqnum (event);
1627 event = gst_event_new_seek (1.0, GST_FORMAT_BYTES, flags,
1628 GST_SEEK_TYPE_SET, start, GST_SEEK_TYPE_NONE, -1);
1629 gst_event_set_seqnum (event, seqnum);
1631 GST_DEBUG_OBJECT (dec, "seeking to %" GST_TIME_FORMAT " at byte offset %"
1632 G_GINT64_FORMAT, GST_TIME_ARGS (start_time), start);
1634 return gst_pad_push_event (dec->sinkpad, event);
1638 gst_video_decoder_src_event_default (GstVideoDecoder * decoder,
1641 GstVideoDecoderPrivate *priv;
1642 gboolean res = FALSE;
1644 priv = decoder->priv;
1646 GST_DEBUG_OBJECT (decoder,
1647 "received event %d, %s", GST_EVENT_TYPE (event),
1648 GST_EVENT_TYPE_NAME (event));
1650 switch (GST_EVENT_TYPE (event)) {
1651 case GST_EVENT_SEEK:
1656 GstSeekType start_type, stop_type;
1658 gint64 tstart, tstop;
1661 gst_event_parse_seek (event, &rate, &format, &flags, &start_type, &start,
1663 seqnum = gst_event_get_seqnum (event);
1665 /* upstream gets a chance first */
1666 if ((res = gst_pad_push_event (decoder->sinkpad, event)))
1669 /* if upstream fails for a time seek, maybe we can help if allowed */
1670 if (format == GST_FORMAT_TIME) {
1671 if (gst_video_decoder_do_byte (decoder))
1672 res = gst_video_decoder_do_seek (decoder, event);
1676 /* ... though a non-time seek can be aided as well */
1677 /* First bring the requested format to time */
1679 gst_pad_query_convert (decoder->srcpad, format, start,
1680 GST_FORMAT_TIME, &tstart)))
1683 gst_pad_query_convert (decoder->srcpad, format, stop,
1684 GST_FORMAT_TIME, &tstop)))
1687 /* then seek with time on the peer */
1688 event = gst_event_new_seek (rate, GST_FORMAT_TIME,
1689 flags, start_type, tstart, stop_type, tstop);
1690 gst_event_set_seqnum (event, seqnum);
1692 res = gst_pad_push_event (decoder->sinkpad, event);
1699 GstClockTimeDiff diff;
1700 GstClockTime timestamp;
1702 gst_event_parse_qos (event, &type, &proportion, &diff, ×tamp);
1704 GST_OBJECT_LOCK (decoder);
1705 priv->proportion = proportion;
1706 if (G_LIKELY (GST_CLOCK_TIME_IS_VALID (timestamp))) {
1707 if (G_UNLIKELY (diff > 0)) {
1708 priv->earliest_time = timestamp + 2 * diff + priv->qos_frame_duration;
1710 priv->earliest_time = timestamp + diff;
1713 priv->earliest_time = GST_CLOCK_TIME_NONE;
1715 GST_OBJECT_UNLOCK (decoder);
1717 GST_DEBUG_OBJECT (decoder,
1718 "got QoS %" GST_TIME_FORMAT ", %" GST_STIME_FORMAT ", %g",
1719 GST_TIME_ARGS (timestamp), GST_STIME_ARGS (diff), proportion);
1721 res = gst_pad_push_event (decoder->sinkpad, event);
1725 res = gst_pad_push_event (decoder->sinkpad, event);
1732 GST_DEBUG_OBJECT (decoder, "could not convert format");
1737 gst_video_decoder_src_event (GstPad * pad, GstObject * parent, GstEvent * event)
1739 GstVideoDecoder *decoder;
1740 GstVideoDecoderClass *decoder_class;
1741 gboolean ret = FALSE;
1743 decoder = GST_VIDEO_DECODER (parent);
1744 decoder_class = GST_VIDEO_DECODER_GET_CLASS (decoder);
1746 GST_DEBUG_OBJECT (decoder, "received event %d, %s", GST_EVENT_TYPE (event),
1747 GST_EVENT_TYPE_NAME (event));
1749 if (decoder_class->src_event)
1750 ret = decoder_class->src_event (decoder, event);
1756 gst_video_decoder_src_query_default (GstVideoDecoder * dec, GstQuery * query)
1758 GstPad *pad = GST_VIDEO_DECODER_SRC_PAD (dec);
1759 gboolean res = TRUE;
1761 GST_LOG_OBJECT (dec, "handling query: %" GST_PTR_FORMAT, query);
1763 switch (GST_QUERY_TYPE (query)) {
1764 case GST_QUERY_POSITION:
1769 /* upstream gets a chance first */
1770 if ((res = gst_pad_peer_query (dec->sinkpad, query))) {
1771 GST_LOG_OBJECT (dec, "returning peer response");
1775 /* Refuse BYTES format queries. If it made sense to
1776 * answer them, upstream would have already */
1777 gst_query_parse_position (query, &format, NULL);
1779 if (format == GST_FORMAT_BYTES) {
1780 GST_LOG_OBJECT (dec, "Ignoring BYTES position query");
1784 /* we start from the last seen time */
1785 time = dec->priv->last_timestamp_out;
1786 /* correct for the segment values */
1787 time = gst_segment_to_stream_time (&dec->output_segment,
1788 GST_FORMAT_TIME, time);
1790 GST_LOG_OBJECT (dec,
1791 "query %p: our time: %" GST_TIME_FORMAT, query, GST_TIME_ARGS (time));
1793 /* and convert to the final format */
1794 if (!(res = gst_pad_query_convert (pad, GST_FORMAT_TIME, time,
1798 gst_query_set_position (query, format, value);
1800 GST_LOG_OBJECT (dec,
1801 "query %p: we return %" G_GINT64_FORMAT " (format %u)", query, value,
1805 case GST_QUERY_DURATION:
1809 /* upstream in any case */
1810 if ((res = gst_pad_query_default (pad, GST_OBJECT (dec), query)))
1813 gst_query_parse_duration (query, &format, NULL);
1814 /* try answering TIME by converting from BYTE if subclass allows */
1815 if (format == GST_FORMAT_TIME && gst_video_decoder_do_byte (dec)) {
1818 if (gst_pad_peer_query_duration (dec->sinkpad, GST_FORMAT_BYTES,
1820 GST_LOG_OBJECT (dec, "upstream size %" G_GINT64_FORMAT, value);
1821 if (gst_pad_query_convert (dec->sinkpad,
1822 GST_FORMAT_BYTES, value, GST_FORMAT_TIME, &value)) {
1823 gst_query_set_duration (query, GST_FORMAT_TIME, value);
1830 case GST_QUERY_CONVERT:
1832 GstFormat src_fmt, dest_fmt;
1833 gint64 src_val, dest_val;
1835 GST_DEBUG_OBJECT (dec, "convert query");
1837 gst_query_parse_convert (query, &src_fmt, &src_val, &dest_fmt, &dest_val);
1838 GST_OBJECT_LOCK (dec);
1839 if (dec->priv->output_state != NULL)
1840 res = __gst_video_rawvideo_convert (dec->priv->output_state,
1841 src_fmt, src_val, &dest_fmt, &dest_val);
1844 GST_OBJECT_UNLOCK (dec);
1847 gst_query_set_convert (query, src_fmt, src_val, dest_fmt, dest_val);
1850 case GST_QUERY_LATENCY:
1853 GstClockTime min_latency, max_latency;
1855 res = gst_pad_peer_query (dec->sinkpad, query);
1857 gst_query_parse_latency (query, &live, &min_latency, &max_latency);
1858 GST_DEBUG_OBJECT (dec, "Peer qlatency: live %d, min %"
1859 GST_TIME_FORMAT " max %" GST_TIME_FORMAT, live,
1860 GST_TIME_ARGS (min_latency), GST_TIME_ARGS (max_latency));
1862 GST_OBJECT_LOCK (dec);
1863 min_latency += dec->priv->min_latency;
1864 if (max_latency == GST_CLOCK_TIME_NONE
1865 || dec->priv->max_latency == GST_CLOCK_TIME_NONE)
1866 max_latency = GST_CLOCK_TIME_NONE;
1868 max_latency += dec->priv->max_latency;
1869 GST_OBJECT_UNLOCK (dec);
1871 gst_query_set_latency (query, live, min_latency, max_latency);
1876 res = gst_pad_query_default (pad, GST_OBJECT (dec), query);
1881 GST_ERROR_OBJECT (dec, "query failed");
1886 gst_video_decoder_src_query (GstPad * pad, GstObject * parent, GstQuery * query)
1888 GstVideoDecoder *decoder;
1889 GstVideoDecoderClass *decoder_class;
1890 gboolean ret = FALSE;
1892 decoder = GST_VIDEO_DECODER (parent);
1893 decoder_class = GST_VIDEO_DECODER_GET_CLASS (decoder);
1895 GST_DEBUG_OBJECT (decoder, "received query %d, %s", GST_QUERY_TYPE (query),
1896 GST_QUERY_TYPE_NAME (query));
1898 if (decoder_class->src_query)
1899 ret = decoder_class->src_query (decoder, query);
1905 * gst_video_decoder_proxy_getcaps:
1906 * @decoder: a #GstVideoDecoder
1907 * @caps: (allow-none): initial caps
1908 * @filter: (allow-none): filter caps
1910 * Returns caps that express @caps (or sink template caps if @caps == NULL)
1911 * restricted to resolution/format/... combinations supported by downstream
1914 * Returns: (transfer full): a #GstCaps owned by caller
1919 gst_video_decoder_proxy_getcaps (GstVideoDecoder * decoder, GstCaps * caps,
1922 return __gst_video_element_proxy_getcaps (GST_ELEMENT_CAST (decoder),
1923 GST_VIDEO_DECODER_SINK_PAD (decoder),
1924 GST_VIDEO_DECODER_SRC_PAD (decoder), caps, filter);
1928 gst_video_decoder_sink_getcaps (GstVideoDecoder * decoder, GstCaps * filter)
1930 GstVideoDecoderClass *klass;
1933 klass = GST_VIDEO_DECODER_GET_CLASS (decoder);
1936 caps = klass->getcaps (decoder, filter);
1938 caps = gst_video_decoder_proxy_getcaps (decoder, NULL, filter);
1940 GST_LOG_OBJECT (decoder, "Returning caps %" GST_PTR_FORMAT, caps);
1946 gst_video_decoder_sink_query_default (GstVideoDecoder * decoder,
1949 GstPad *pad = GST_VIDEO_DECODER_SINK_PAD (decoder);
1950 GstVideoDecoderPrivate *priv;
1951 gboolean res = FALSE;
1953 priv = decoder->priv;
1955 GST_LOG_OBJECT (decoder, "handling query: %" GST_PTR_FORMAT, query);
1957 switch (GST_QUERY_TYPE (query)) {
1958 case GST_QUERY_CONVERT:
1960 GstFormat src_fmt, dest_fmt;
1961 gint64 src_val, dest_val;
1963 gst_query_parse_convert (query, &src_fmt, &src_val, &dest_fmt, &dest_val);
1964 GST_OBJECT_LOCK (decoder);
1966 __gst_video_encoded_video_convert (priv->bytes_out, priv->time,
1967 src_fmt, src_val, &dest_fmt, &dest_val);
1968 GST_OBJECT_UNLOCK (decoder);
1971 gst_query_set_convert (query, src_fmt, src_val, dest_fmt, dest_val);
1974 case GST_QUERY_ALLOCATION:{
1975 GstVideoDecoderClass *klass = GST_VIDEO_DECODER_GET_CLASS (decoder);
1977 if (klass->propose_allocation)
1978 res = klass->propose_allocation (decoder, query);
1981 case GST_QUERY_CAPS:{
1982 GstCaps *filter, *caps;
1984 gst_query_parse_caps (query, &filter);
1985 caps = gst_video_decoder_sink_getcaps (decoder, filter);
1986 gst_query_set_caps_result (query, caps);
1987 gst_caps_unref (caps);
1991 case GST_QUERY_ACCEPT_CAPS:{
1992 if (decoder->priv->use_default_pad_acceptcaps) {
1994 gst_pad_query_default (GST_VIDEO_DECODER_SINK_PAD (decoder),
1995 GST_OBJECT_CAST (decoder), query);
1998 GstCaps *allowed_caps;
1999 GstCaps *template_caps;
2002 gst_query_parse_accept_caps (query, &caps);
2004 template_caps = gst_pad_get_pad_template_caps (pad);
2005 accept = gst_caps_is_subset (caps, template_caps);
2006 gst_caps_unref (template_caps);
2010 gst_pad_query_caps (GST_VIDEO_DECODER_SINK_PAD (decoder), caps);
2012 accept = gst_caps_can_intersect (caps, allowed_caps);
2014 gst_caps_unref (allowed_caps);
2017 gst_query_set_accept_caps_result (query, accept);
2023 res = gst_pad_query_default (pad, GST_OBJECT (decoder), query);
2030 GST_DEBUG_OBJECT (decoder, "query failed");
2036 gst_video_decoder_sink_query (GstPad * pad, GstObject * parent,
2039 GstVideoDecoder *decoder;
2040 GstVideoDecoderClass *decoder_class;
2041 gboolean ret = FALSE;
2043 decoder = GST_VIDEO_DECODER (parent);
2044 decoder_class = GST_VIDEO_DECODER_GET_CLASS (decoder);
2046 GST_DEBUG_OBJECT (decoder, "received query %d, %s", GST_QUERY_TYPE (query),
2047 GST_QUERY_TYPE_NAME (query));
2049 if (decoder_class->sink_query)
2050 ret = decoder_class->sink_query (decoder, query);
2055 typedef struct _Timestamp Timestamp;
2061 GstClockTime duration;
2066 timestamp_free (Timestamp * ts)
2068 g_slice_free (Timestamp, ts);
2072 gst_video_decoder_add_buffer_info (GstVideoDecoder * decoder,
2075 GstVideoDecoderPrivate *priv = decoder->priv;
2078 if (!GST_BUFFER_PTS_IS_VALID (buffer) &&
2079 !GST_BUFFER_DTS_IS_VALID (buffer) &&
2080 !GST_BUFFER_DURATION_IS_VALID (buffer) &&
2081 GST_BUFFER_FLAGS (buffer) == 0) {
2082 /* Save memory - don't bother storing info
2083 * for buffers with no distinguishing info */
2087 ts = g_slice_new (Timestamp);
2089 GST_LOG_OBJECT (decoder,
2090 "adding PTS %" GST_TIME_FORMAT " DTS %" GST_TIME_FORMAT
2091 " (offset:%" G_GUINT64_FORMAT ")",
2092 GST_TIME_ARGS (GST_BUFFER_PTS (buffer)),
2093 GST_TIME_ARGS (GST_BUFFER_DTS (buffer)), priv->input_offset);
2095 ts->offset = priv->input_offset;
2096 ts->pts = GST_BUFFER_PTS (buffer);
2097 ts->dts = GST_BUFFER_DTS (buffer);
2098 ts->duration = GST_BUFFER_DURATION (buffer);
2099 ts->flags = GST_BUFFER_FLAGS (buffer);
2101 g_queue_push_tail (&priv->timestamps, ts);
2105 gst_video_decoder_get_buffer_info_at_offset (GstVideoDecoder *
2106 decoder, guint64 offset, GstClockTime * pts, GstClockTime * dts,
2107 GstClockTime * duration, guint * flags)
2109 #ifndef GST_DISABLE_GST_DEBUG
2110 guint64 got_offset = 0;
2115 *pts = GST_CLOCK_TIME_NONE;
2116 *dts = GST_CLOCK_TIME_NONE;
2117 *duration = GST_CLOCK_TIME_NONE;
2120 g = decoder->priv->timestamps.head;
2123 if (ts->offset <= offset) {
2124 GList *next = g->next;
2125 #ifndef GST_DISABLE_GST_DEBUG
2126 got_offset = ts->offset;
2130 *duration = ts->duration;
2132 g_queue_delete_link (&decoder->priv->timestamps, g);
2134 timestamp_free (ts);
2140 GST_LOG_OBJECT (decoder,
2141 "got PTS %" GST_TIME_FORMAT " DTS %" GST_TIME_FORMAT " flags %x @ offs %"
2142 G_GUINT64_FORMAT " (wanted offset:%" G_GUINT64_FORMAT ")",
2143 GST_TIME_ARGS (*pts), GST_TIME_ARGS (*dts), *flags, got_offset, offset);
2146 #if !GLIB_CHECK_VERSION(2, 60, 0)
2147 #define g_queue_clear_full queue_clear_full
2149 queue_clear_full (GQueue * queue, GDestroyNotify free_func)
2153 while ((data = g_queue_pop_head (queue)) != NULL)
2159 gst_video_decoder_clear_queues (GstVideoDecoder * dec)
2161 GstVideoDecoderPrivate *priv = dec->priv;
2163 g_list_free_full (priv->output_queued,
2164 (GDestroyNotify) gst_mini_object_unref);
2165 priv->output_queued = NULL;
2167 g_list_free_full (priv->gather, (GDestroyNotify) gst_mini_object_unref);
2168 priv->gather = NULL;
2169 g_list_free_full (priv->decode, (GDestroyNotify) gst_video_codec_frame_unref);
2170 priv->decode = NULL;
2171 g_list_free_full (priv->parse, (GDestroyNotify) gst_mini_object_unref);
2173 g_list_free_full (priv->parse_gather,
2174 (GDestroyNotify) gst_video_codec_frame_unref);
2175 priv->parse_gather = NULL;
2176 g_queue_clear_full (&priv->frames,
2177 (GDestroyNotify) gst_video_codec_frame_unref);
2181 gst_video_decoder_reset (GstVideoDecoder * decoder, gboolean full,
2182 gboolean flush_hard)
2184 GstVideoDecoderPrivate *priv = decoder->priv;
2186 GST_DEBUG_OBJECT (decoder, "reset full %d", full);
2188 GST_VIDEO_DECODER_STREAM_LOCK (decoder);
2190 if (full || flush_hard) {
2191 gst_segment_init (&decoder->input_segment, GST_FORMAT_UNDEFINED);
2192 gst_segment_init (&decoder->output_segment, GST_FORMAT_UNDEFINED);
2193 gst_video_decoder_clear_queues (decoder);
2194 decoder->priv->in_out_segment_sync = TRUE;
2196 if (priv->current_frame) {
2197 gst_video_codec_frame_unref (priv->current_frame);
2198 priv->current_frame = NULL;
2201 g_list_free_full (priv->current_frame_events,
2202 (GDestroyNotify) gst_event_unref);
2203 priv->current_frame_events = NULL;
2204 g_list_free_full (priv->pending_events, (GDestroyNotify) gst_event_unref);
2205 priv->pending_events = NULL;
2207 priv->error_count = 0;
2208 priv->had_output_data = FALSE;
2209 priv->had_input_data = FALSE;
2211 GST_OBJECT_LOCK (decoder);
2212 priv->earliest_time = GST_CLOCK_TIME_NONE;
2213 priv->proportion = 0.5;
2214 priv->decode_flags_override = FALSE;
2216 priv->request_sync_point_flags = 0;
2217 priv->request_sync_point_frame_number = REQUEST_SYNC_POINT_UNSET;
2218 priv->last_force_key_unit_time = GST_CLOCK_TIME_NONE;
2219 GST_OBJECT_UNLOCK (decoder);
2220 priv->distance_from_sync = -1;
2224 if (priv->input_state)
2225 gst_video_codec_state_unref (priv->input_state);
2226 priv->input_state = NULL;
2227 GST_OBJECT_LOCK (decoder);
2228 if (priv->output_state)
2229 gst_video_codec_state_unref (priv->output_state);
2230 priv->output_state = NULL;
2232 priv->qos_frame_duration = 0;
2233 GST_OBJECT_UNLOCK (decoder);
2236 gst_tag_list_unref (priv->tags);
2238 priv->tags_merge_mode = GST_TAG_MERGE_APPEND;
2239 if (priv->upstream_tags) {
2240 gst_tag_list_unref (priv->upstream_tags);
2241 priv->upstream_tags = NULL;
2243 priv->tags_changed = FALSE;
2244 priv->reordered_output = FALSE;
2247 priv->processed = 0;
2249 priv->decode_frame_number = 0;
2250 priv->base_picture_number = 0;
2253 GST_DEBUG_OBJECT (decoder, "deactivate pool %" GST_PTR_FORMAT,
2255 gst_buffer_pool_set_active (priv->pool, FALSE);
2256 gst_object_unref (priv->pool);
2260 if (priv->allocator) {
2261 gst_object_unref (priv->allocator);
2262 priv->allocator = NULL;
2266 priv->discont = TRUE;
2268 priv->base_timestamp = GST_CLOCK_TIME_NONE;
2269 priv->last_timestamp_out = GST_CLOCK_TIME_NONE;
2270 priv->pts_delta = GST_CLOCK_TIME_NONE;
2272 priv->input_offset = 0;
2273 priv->frame_offset = 0;
2274 gst_adapter_clear (priv->input_adapter);
2275 gst_adapter_clear (priv->output_adapter);
2276 g_queue_clear_full (&priv->timestamps, (GDestroyNotify) timestamp_free);
2278 GST_OBJECT_LOCK (decoder);
2279 priv->bytes_out = 0;
2281 GST_OBJECT_UNLOCK (decoder);
2283 #ifndef GST_DISABLE_DEBUG
2284 priv->last_reset_time = gst_util_get_timestamp ();
2287 GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
2290 static GstFlowReturn
2291 gst_video_decoder_chain_forward (GstVideoDecoder * decoder,
2292 GstBuffer * buf, gboolean at_eos)
2294 GstVideoDecoderPrivate *priv;
2295 GstVideoDecoderClass *klass;
2296 GstFlowReturn ret = GST_FLOW_OK;
2298 klass = GST_VIDEO_DECODER_GET_CLASS (decoder);
2299 priv = decoder->priv;
2301 g_return_val_if_fail (priv->packetized || klass->parse, GST_FLOW_ERROR);
2303 /* Draining on DISCONT is handled in chain_reverse() for reverse playback,
2304 * and this function would only be called to get everything collected GOP
2305 * by GOP in the parse_gather list */
2306 if (decoder->input_segment.rate > 0.0 && GST_BUFFER_IS_DISCONT (buf)
2307 && (decoder->input_segment.flags & GST_SEEK_FLAG_TRICKMODE_KEY_UNITS))
2308 ret = gst_video_decoder_drain_out (decoder, FALSE);
2310 if (priv->current_frame == NULL)
2311 priv->current_frame = gst_video_decoder_new_frame (decoder);
2313 if (!priv->packetized)
2314 gst_video_decoder_add_buffer_info (decoder, buf);
2316 priv->input_offset += gst_buffer_get_size (buf);
2318 if (priv->packetized) {
2319 gboolean was_keyframe = FALSE;
2320 if (!GST_BUFFER_FLAG_IS_SET (buf, GST_BUFFER_FLAG_DELTA_UNIT)) {
2321 was_keyframe = TRUE;
2322 GST_DEBUG_OBJECT (decoder, "Marking current_frame as sync point");
2323 GST_VIDEO_CODEC_FRAME_SET_SYNC_POINT (priv->current_frame);
2326 priv->current_frame->input_buffer = buf;
2328 if (decoder->input_segment.rate < 0.0) {
2329 priv->parse_gather =
2330 g_list_prepend (priv->parse_gather, priv->current_frame);
2332 ret = gst_video_decoder_decode_frame (decoder, priv->current_frame);
2334 priv->current_frame = NULL;
2335 /* If in trick mode and it was a keyframe, drain decoder to avoid extra
2336 * latency. Only do this for forwards playback as reverse playback handles
2337 * draining on keyframes in flush_parse(), and would otherwise call back
2338 * from drain_out() to here causing an infinite loop.
2339 * Also this function is only called for reverse playback to gather frames
2340 * GOP by GOP, and does not do any actual decoding. That would be done by
2342 if (ret == GST_FLOW_OK && was_keyframe && decoder->input_segment.rate > 0.0
2343 && (decoder->input_segment.flags & GST_SEEK_FLAG_TRICKMODE_KEY_UNITS))
2344 ret = gst_video_decoder_drain_out (decoder, FALSE);
2346 gst_adapter_push (priv->input_adapter, buf);
2348 ret = gst_video_decoder_parse_available (decoder, at_eos, TRUE);
2351 if (ret == GST_VIDEO_DECODER_FLOW_NEED_DATA)
2357 static GstFlowReturn
2358 gst_video_decoder_flush_decode (GstVideoDecoder * dec)
2360 GstVideoDecoderPrivate *priv = dec->priv;
2361 GstFlowReturn res = GST_FLOW_OK;
2364 GST_DEBUG_OBJECT (dec, "flushing buffers to decode");
2366 walk = priv->decode;
2369 GstVideoCodecFrame *frame = (GstVideoCodecFrame *) (walk->data);
2371 GST_DEBUG_OBJECT (dec, "decoding frame %p buffer %p, PTS %" GST_TIME_FORMAT
2372 ", DTS %" GST_TIME_FORMAT, frame, frame->input_buffer,
2373 GST_TIME_ARGS (GST_BUFFER_PTS (frame->input_buffer)),
2374 GST_TIME_ARGS (GST_BUFFER_DTS (frame->input_buffer)));
2378 priv->decode = g_list_delete_link (priv->decode, walk);
2380 /* decode buffer, resulting data prepended to queue */
2381 res = gst_video_decoder_decode_frame (dec, frame);
2382 if (res != GST_FLOW_OK)
2391 /* gst_video_decoder_flush_parse is called from the
2392 * chain_reverse() function when a buffer containing
2393 * a DISCONT - indicating that reverse playback
2394 * looped back to the next data block, and therefore
2395 * all available data should be fed through the
2396 * decoder and frames gathered for reversed output
2398 static GstFlowReturn
2399 gst_video_decoder_flush_parse (GstVideoDecoder * dec, gboolean at_eos)
2401 GstVideoDecoderPrivate *priv = dec->priv;
2402 GstFlowReturn res = GST_FLOW_OK;
2404 GstVideoDecoderClass *decoder_class;
2406 decoder_class = GST_VIDEO_DECODER_GET_CLASS (dec);
2408 GST_DEBUG_OBJECT (dec, "flushing buffers to parsing");
2410 /* Reverse the gather list, and prepend it to the parse list,
2411 * then flush to parse whatever we can */
2412 priv->gather = g_list_reverse (priv->gather);
2413 priv->parse = g_list_concat (priv->gather, priv->parse);
2414 priv->gather = NULL;
2416 /* clear buffer and decoder state */
2417 gst_video_decoder_flush (dec, FALSE);
2421 GstBuffer *buf = GST_BUFFER_CAST (walk->data);
2422 GList *next = walk->next;
2424 GST_DEBUG_OBJECT (dec, "parsing buffer %p, PTS %" GST_TIME_FORMAT
2425 ", DTS %" GST_TIME_FORMAT " flags %x", buf,
2426 GST_TIME_ARGS (GST_BUFFER_PTS (buf)),
2427 GST_TIME_ARGS (GST_BUFFER_DTS (buf)), GST_BUFFER_FLAGS (buf));
2429 /* parse buffer, resulting frames prepended to parse_gather queue */
2430 gst_buffer_ref (buf);
2431 res = gst_video_decoder_chain_forward (dec, buf, at_eos);
2433 /* if we generated output, we can discard the buffer, else we
2434 * keep it in the queue */
2435 if (priv->parse_gather) {
2436 GST_DEBUG_OBJECT (dec, "parsed buffer to %p", priv->parse_gather->data);
2437 priv->parse = g_list_delete_link (priv->parse, walk);
2438 gst_buffer_unref (buf);
2440 GST_DEBUG_OBJECT (dec, "buffer did not decode, keeping");
2445 walk = priv->parse_gather;
2447 GstVideoCodecFrame *frame = (GstVideoCodecFrame *) (walk->data);
2450 /* this is reverse playback, check if we need to apply some segment
2451 * to the output before decoding, as during decoding the segment.rate
2452 * must be used to determine if a buffer should be pushed or added to
2453 * the output list for reverse pushing.
2455 * The new segment is not immediately pushed here because we must
2456 * wait for negotiation to happen before it can be pushed to avoid
2457 * pushing a segment before caps event. Negotiation only happens
2458 * when finish_frame is called.
2460 for (walk2 = frame->events; walk2;) {
2462 GstEvent *event = walk2->data;
2464 walk2 = g_list_next (walk2);
2465 if (GST_EVENT_TYPE (event) <= GST_EVENT_SEGMENT) {
2467 if (GST_EVENT_TYPE (event) == GST_EVENT_SEGMENT) {
2470 GST_DEBUG_OBJECT (dec, "Segment at frame %p %" GST_TIME_FORMAT,
2471 frame, GST_TIME_ARGS (GST_BUFFER_PTS (frame->input_buffer)));
2472 gst_event_copy_segment (event, &segment);
2473 if (segment.format == GST_FORMAT_TIME) {
2474 dec->output_segment = segment;
2475 dec->priv->in_out_segment_sync =
2476 gst_segment_is_equal (&dec->input_segment, &segment);
2479 dec->priv->pending_events =
2480 g_list_append (dec->priv->pending_events, event);
2481 frame->events = g_list_delete_link (frame->events, cur);
2488 /* now we can process frames. Start by moving each frame from the parse_gather
2489 * to the decode list, reverse the order as we go, and stopping when/if we
2490 * copy a keyframe. */
2491 GST_DEBUG_OBJECT (dec, "checking parsed frames for a keyframe to decode");
2492 walk = priv->parse_gather;
2494 GstVideoCodecFrame *frame = (GstVideoCodecFrame *) (walk->data);
2496 /* remove from the gather list */
2497 priv->parse_gather = g_list_remove_link (priv->parse_gather, walk);
2499 /* move it to the front of the decode queue */
2500 priv->decode = g_list_concat (walk, priv->decode);
2502 /* if we copied a keyframe, flush and decode the decode queue */
2503 if (GST_VIDEO_CODEC_FRAME_IS_SYNC_POINT (frame)) {
2504 GST_DEBUG_OBJECT (dec, "found keyframe %p with PTS %" GST_TIME_FORMAT
2505 ", DTS %" GST_TIME_FORMAT, frame,
2506 GST_TIME_ARGS (GST_BUFFER_PTS (frame->input_buffer)),
2507 GST_TIME_ARGS (GST_BUFFER_DTS (frame->input_buffer)));
2508 res = gst_video_decoder_flush_decode (dec);
2509 if (res != GST_FLOW_OK)
2512 /* We need to tell the subclass to drain now.
2513 * We prefer the drain vfunc, but for backward-compat
2514 * we use a finish() vfunc if drain isn't implemented */
2515 if (decoder_class->drain) {
2516 GST_DEBUG_OBJECT (dec, "Draining");
2517 res = decoder_class->drain (dec);
2518 } else if (decoder_class->finish) {
2519 GST_FIXME_OBJECT (dec, "Sub-class should implement drain(). "
2520 "Calling finish() for backwards-compat");
2521 res = decoder_class->finish (dec);
2524 if (res != GST_FLOW_OK)
2527 /* now send queued data downstream */
2528 walk = priv->output_queued;
2530 GstBuffer *buf = GST_BUFFER_CAST (walk->data);
2532 priv->output_queued =
2533 g_list_delete_link (priv->output_queued, priv->output_queued);
2535 if (G_LIKELY (res == GST_FLOW_OK)) {
2536 /* avoid stray DISCONT from forward processing,
2537 * which have no meaning in reverse pushing */
2538 GST_BUFFER_FLAG_UNSET (buf, GST_BUFFER_FLAG_DISCONT);
2540 /* Last chance to calculate a timestamp as we loop backwards
2541 * through the list */
2542 if (GST_BUFFER_TIMESTAMP (buf) != GST_CLOCK_TIME_NONE)
2543 priv->last_timestamp_out = GST_BUFFER_TIMESTAMP (buf);
2544 else if (priv->last_timestamp_out != GST_CLOCK_TIME_NONE &&
2545 GST_BUFFER_DURATION (buf) != GST_CLOCK_TIME_NONE) {
2546 GST_BUFFER_TIMESTAMP (buf) =
2547 priv->last_timestamp_out - GST_BUFFER_DURATION (buf);
2548 priv->last_timestamp_out = GST_BUFFER_TIMESTAMP (buf);
2549 GST_LOG_OBJECT (dec,
2550 "Calculated TS %" GST_TIME_FORMAT " working backwards",
2551 GST_TIME_ARGS (priv->last_timestamp_out));
2554 res = gst_video_decoder_clip_and_push_buf (dec, buf);
2556 gst_buffer_unref (buf);
2559 walk = priv->output_queued;
2562 /* clear buffer and decoder state again
2563 * before moving to the previous keyframe */
2564 gst_video_decoder_flush (dec, FALSE);
2567 walk = priv->parse_gather;
2574 static GstFlowReturn
2575 gst_video_decoder_chain_reverse (GstVideoDecoder * dec, GstBuffer * buf)
2577 GstVideoDecoderPrivate *priv = dec->priv;
2578 GstFlowReturn result = GST_FLOW_OK;
2580 /* if we have a discont, move buffers to the decode list */
2581 if (!buf || GST_BUFFER_IS_DISCONT (buf)) {
2582 GST_DEBUG_OBJECT (dec, "received discont");
2584 /* parse and decode stuff in the gather and parse queues */
2585 result = gst_video_decoder_flush_parse (dec, FALSE);
2588 if (G_LIKELY (buf)) {
2589 GST_DEBUG_OBJECT (dec, "gathering buffer %p of size %" G_GSIZE_FORMAT ", "
2590 "PTS %" GST_TIME_FORMAT ", DTS %" GST_TIME_FORMAT ", dur %"
2591 GST_TIME_FORMAT, buf, gst_buffer_get_size (buf),
2592 GST_TIME_ARGS (GST_BUFFER_PTS (buf)),
2593 GST_TIME_ARGS (GST_BUFFER_DTS (buf)),
2594 GST_TIME_ARGS (GST_BUFFER_DURATION (buf)));
2596 /* add buffer to gather queue */
2597 priv->gather = g_list_prepend (priv->gather, buf);
2603 static GstFlowReturn
2604 gst_video_decoder_chain (GstPad * pad, GstObject * parent, GstBuffer * buf)
2606 GstVideoDecoder *decoder;
2607 GstFlowReturn ret = GST_FLOW_OK;
2609 decoder = GST_VIDEO_DECODER (parent);
2611 if (G_UNLIKELY (!decoder->priv->input_state && decoder->priv->needs_format))
2612 goto not_negotiated;
2614 GST_LOG_OBJECT (decoder,
2615 "chain PTS %" GST_TIME_FORMAT ", DTS %" GST_TIME_FORMAT " duration %"
2616 GST_TIME_FORMAT " size %" G_GSIZE_FORMAT " flags %x",
2617 GST_TIME_ARGS (GST_BUFFER_PTS (buf)),
2618 GST_TIME_ARGS (GST_BUFFER_DTS (buf)),
2619 GST_TIME_ARGS (GST_BUFFER_DURATION (buf)),
2620 gst_buffer_get_size (buf), GST_BUFFER_FLAGS (buf));
2622 GST_VIDEO_DECODER_STREAM_LOCK (decoder);
2625 * requiring the pad to be negotiated makes it impossible to use
2626 * oggdemux or filesrc ! decoder */
2628 if (decoder->input_segment.format == GST_FORMAT_UNDEFINED) {
2630 GstSegment *segment = &decoder->input_segment;
2632 GST_WARNING_OBJECT (decoder,
2633 "Received buffer without a new-segment. "
2634 "Assuming timestamps start from 0.");
2636 gst_segment_init (segment, GST_FORMAT_TIME);
2638 event = gst_event_new_segment (segment);
2640 decoder->priv->current_frame_events =
2641 g_list_prepend (decoder->priv->current_frame_events, event);
2644 decoder->priv->had_input_data = TRUE;
2646 if (decoder->input_segment.rate > 0.0)
2647 ret = gst_video_decoder_chain_forward (decoder, buf, FALSE);
2649 ret = gst_video_decoder_chain_reverse (decoder, buf);
2651 GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
2657 GST_ELEMENT_ERROR (decoder, CORE, NEGOTIATION, (NULL),
2658 ("decoder not initialized"));
2659 gst_buffer_unref (buf);
2660 return GST_FLOW_NOT_NEGOTIATED;
2664 static GstStateChangeReturn
2665 gst_video_decoder_change_state (GstElement * element, GstStateChange transition)
2667 GstVideoDecoder *decoder;
2668 GstVideoDecoderClass *decoder_class;
2669 GstStateChangeReturn ret;
2671 decoder = GST_VIDEO_DECODER (element);
2672 decoder_class = GST_VIDEO_DECODER_GET_CLASS (element);
2674 switch (transition) {
2675 case GST_STATE_CHANGE_NULL_TO_READY:
2676 /* open device/library if needed */
2677 if (decoder_class->open && !decoder_class->open (decoder))
2680 case GST_STATE_CHANGE_READY_TO_PAUSED:
2681 GST_VIDEO_DECODER_STREAM_LOCK (decoder);
2682 gst_video_decoder_reset (decoder, TRUE, TRUE);
2683 GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
2685 /* Initialize device/library if needed */
2686 if (decoder_class->start && !decoder_class->start (decoder))
2693 ret = GST_ELEMENT_CLASS (parent_class)->change_state (element, transition);
2695 switch (transition) {
2696 case GST_STATE_CHANGE_PAUSED_TO_READY:{
2697 gboolean stopped = TRUE;
2699 if (decoder_class->stop)
2700 stopped = decoder_class->stop (decoder);
2702 GST_VIDEO_DECODER_STREAM_LOCK (decoder);
2703 gst_video_decoder_reset (decoder, TRUE, TRUE);
2704 GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
2711 case GST_STATE_CHANGE_READY_TO_NULL:
2712 /* close device/library if needed */
2713 if (decoder_class->close && !decoder_class->close (decoder))
2725 GST_ELEMENT_ERROR (decoder, LIBRARY, INIT, (NULL),
2726 ("Failed to open decoder"));
2727 return GST_STATE_CHANGE_FAILURE;
2732 GST_ELEMENT_ERROR (decoder, LIBRARY, INIT, (NULL),
2733 ("Failed to start decoder"));
2734 return GST_STATE_CHANGE_FAILURE;
2739 GST_ELEMENT_ERROR (decoder, LIBRARY, INIT, (NULL),
2740 ("Failed to stop decoder"));
2741 return GST_STATE_CHANGE_FAILURE;
2746 GST_ELEMENT_ERROR (decoder, LIBRARY, INIT, (NULL),
2747 ("Failed to close decoder"));
2748 return GST_STATE_CHANGE_FAILURE;
2752 static GstVideoCodecFrame *
2753 gst_video_decoder_new_frame (GstVideoDecoder * decoder)
2755 GstVideoDecoderPrivate *priv = decoder->priv;
2756 GstVideoCodecFrame *frame;
2758 frame = g_slice_new0 (GstVideoCodecFrame);
2760 frame->ref_count = 1;
2762 GST_VIDEO_DECODER_STREAM_LOCK (decoder);
2763 frame->system_frame_number = priv->system_frame_number;
2764 priv->system_frame_number++;
2765 frame->decode_frame_number = priv->decode_frame_number;
2766 priv->decode_frame_number++;
2768 frame->dts = GST_CLOCK_TIME_NONE;
2769 frame->pts = GST_CLOCK_TIME_NONE;
2770 frame->duration = GST_CLOCK_TIME_NONE;
2771 frame->events = priv->current_frame_events;
2772 priv->current_frame_events = NULL;
2774 GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
2776 GST_LOG_OBJECT (decoder, "Created new frame %p (sfn:%d)",
2777 frame, frame->system_frame_number);
2783 gst_video_decoder_push_event_list (GstVideoDecoder * decoder, GList * events)
2787 /* events are stored in reverse order */
2788 for (l = g_list_last (events); l; l = g_list_previous (l)) {
2789 GST_LOG_OBJECT (decoder, "pushing %s event", GST_EVENT_TYPE_NAME (l->data));
2790 gst_video_decoder_push_event (decoder, l->data);
2792 g_list_free (events);
2796 gst_video_decoder_prepare_finish_frame (GstVideoDecoder *
2797 decoder, GstVideoCodecFrame * frame, gboolean dropping)
2799 GstVideoDecoderPrivate *priv = decoder->priv;
2800 GList *l, *events = NULL;
2803 #ifndef GST_DISABLE_GST_DEBUG
2804 GST_LOG_OBJECT (decoder, "n %d in %" G_GSIZE_FORMAT " out %" G_GSIZE_FORMAT,
2805 priv->frames.length,
2806 gst_adapter_available (priv->input_adapter),
2807 gst_adapter_available (priv->output_adapter));
2810 sync = GST_VIDEO_CODEC_FRAME_IS_SYNC_POINT (frame);
2812 GST_LOG_OBJECT (decoder,
2813 "finish frame %p (#%d) sync:%d PTS:%" GST_TIME_FORMAT " DTS:%"
2815 frame, frame->system_frame_number,
2816 sync, GST_TIME_ARGS (frame->pts), GST_TIME_ARGS (frame->dts));
2818 /* Push all pending events that arrived before this frame */
2819 for (l = priv->frames.head; l; l = l->next) {
2820 GstVideoCodecFrame *tmp = l->data;
2823 events = g_list_concat (tmp->events, events);
2831 if (dropping || !decoder->priv->output_state) {
2832 /* Push before the next frame that is not dropped */
2833 decoder->priv->pending_events =
2834 g_list_concat (events, decoder->priv->pending_events);
2836 gst_video_decoder_push_event_list (decoder, decoder->priv->pending_events);
2837 decoder->priv->pending_events = NULL;
2839 gst_video_decoder_push_event_list (decoder, events);
2842 /* Check if the data should not be displayed. For example altref/invisible
2843 * frame in vp8. In this case we should not update the timestamps. */
2844 if (GST_VIDEO_CODEC_FRAME_IS_DECODE_ONLY (frame))
2847 /* If the frame is meant to be output but we don't have an output_buffer
2848 * we have a problem :) */
2849 if (G_UNLIKELY ((frame->output_buffer == NULL) && !dropping))
2850 goto no_output_buffer;
2852 if (GST_CLOCK_TIME_IS_VALID (frame->pts)) {
2853 if (frame->pts != priv->base_timestamp) {
2854 GST_DEBUG_OBJECT (decoder,
2855 "sync timestamp %" GST_TIME_FORMAT " diff %" GST_STIME_FORMAT,
2856 GST_TIME_ARGS (frame->pts),
2857 GST_STIME_ARGS (GST_CLOCK_DIFF (frame->pts,
2858 decoder->output_segment.start)));
2859 priv->base_timestamp = frame->pts;
2860 priv->base_picture_number = frame->decode_frame_number;
2864 if (frame->duration == GST_CLOCK_TIME_NONE) {
2865 frame->duration = gst_video_decoder_get_frame_duration (decoder, frame);
2866 GST_LOG_OBJECT (decoder,
2867 "Guessing duration %" GST_TIME_FORMAT " for frame...",
2868 GST_TIME_ARGS (frame->duration));
2871 /* PTS is expected montone ascending,
2872 * so a good guess is lowest unsent DTS */
2874 GstClockTime min_ts = GST_CLOCK_TIME_NONE;
2875 GstVideoCodecFrame *oframe = NULL;
2876 gboolean seen_none = FALSE;
2878 /* some maintenance regardless */
2879 for (l = priv->frames.head; l; l = l->next) {
2880 GstVideoCodecFrame *tmp = l->data;
2882 if (!GST_CLOCK_TIME_IS_VALID (tmp->abidata.ABI.ts)) {
2887 if (!GST_CLOCK_TIME_IS_VALID (min_ts) || tmp->abidata.ABI.ts < min_ts) {
2888 min_ts = tmp->abidata.ABI.ts;
2892 /* save a ts if needed */
2893 if (oframe && oframe != frame) {
2894 oframe->abidata.ABI.ts = frame->abidata.ABI.ts;
2897 /* and set if needed;
2898 * valid delta means we have reasonable DTS input */
2899 /* also, if we ended up reordered, means this approach is conflicting
2900 * with some sparse existing PTS, and so it does not work out */
2901 if (!priv->reordered_output &&
2902 !GST_CLOCK_TIME_IS_VALID (frame->pts) && !seen_none &&
2903 GST_CLOCK_TIME_IS_VALID (priv->pts_delta)) {
2904 frame->pts = min_ts + priv->pts_delta;
2905 GST_DEBUG_OBJECT (decoder,
2906 "no valid PTS, using oldest DTS %" GST_TIME_FORMAT,
2907 GST_TIME_ARGS (frame->pts));
2910 /* some more maintenance, ts2 holds PTS */
2911 min_ts = GST_CLOCK_TIME_NONE;
2913 for (l = priv->frames.head; l; l = l->next) {
2914 GstVideoCodecFrame *tmp = l->data;
2916 if (!GST_CLOCK_TIME_IS_VALID (tmp->abidata.ABI.ts2)) {
2921 if (!GST_CLOCK_TIME_IS_VALID (min_ts) || tmp->abidata.ABI.ts2 < min_ts) {
2922 min_ts = tmp->abidata.ABI.ts2;
2926 /* save a ts if needed */
2927 if (oframe && oframe != frame) {
2928 oframe->abidata.ABI.ts2 = frame->abidata.ABI.ts2;
2931 /* if we detected reordered output, then PTS are void,
2932 * however those were obtained; bogus input, subclass etc */
2933 if (priv->reordered_output && !seen_none) {
2934 GST_DEBUG_OBJECT (decoder, "invalidating PTS");
2935 frame->pts = GST_CLOCK_TIME_NONE;
2938 if (!GST_CLOCK_TIME_IS_VALID (frame->pts) && !seen_none) {
2939 frame->pts = min_ts;
2940 GST_DEBUG_OBJECT (decoder,
2941 "no valid PTS, using oldest PTS %" GST_TIME_FORMAT,
2942 GST_TIME_ARGS (frame->pts));
2947 if (frame->pts == GST_CLOCK_TIME_NONE) {
2948 /* Last ditch timestamp guess: Just add the duration to the previous
2949 * frame. If it's the first frame, just use the segment start. */
2950 if (frame->duration != GST_CLOCK_TIME_NONE) {
2951 if (GST_CLOCK_TIME_IS_VALID (priv->last_timestamp_out))
2952 frame->pts = priv->last_timestamp_out + frame->duration;
2953 else if (frame->dts != GST_CLOCK_TIME_NONE) {
2954 frame->pts = frame->dts;
2955 GST_LOG_OBJECT (decoder,
2956 "Setting DTS as PTS %" GST_TIME_FORMAT " for frame...",
2957 GST_TIME_ARGS (frame->pts));
2958 } else if (decoder->output_segment.rate > 0.0)
2959 frame->pts = decoder->output_segment.start;
2960 GST_INFO_OBJECT (decoder,
2961 "Guessing PTS=%" GST_TIME_FORMAT " for frame... DTS=%"
2962 GST_TIME_FORMAT, GST_TIME_ARGS (frame->pts),
2963 GST_TIME_ARGS (frame->dts));
2964 } else if (sync && frame->dts != GST_CLOCK_TIME_NONE) {
2965 frame->pts = frame->dts;
2966 GST_LOG_OBJECT (decoder,
2967 "Setting DTS as PTS %" GST_TIME_FORMAT " for frame...",
2968 GST_TIME_ARGS (frame->pts));
2972 if (GST_CLOCK_TIME_IS_VALID (priv->last_timestamp_out)) {
2973 if (frame->pts < priv->last_timestamp_out) {
2974 GST_WARNING_OBJECT (decoder,
2975 "decreasing timestamp (%" GST_TIME_FORMAT " < %"
2976 GST_TIME_FORMAT ")",
2977 GST_TIME_ARGS (frame->pts), GST_TIME_ARGS (priv->last_timestamp_out));
2978 priv->reordered_output = TRUE;
2979 /* make it a bit less weird downstream */
2980 frame->pts = priv->last_timestamp_out;
2984 if (GST_CLOCK_TIME_IS_VALID (frame->pts))
2985 priv->last_timestamp_out = frame->pts;
2992 GST_ERROR_OBJECT (decoder, "No buffer to output !");
2997 * gst_video_decoder_release_frame:
2998 * @dec: a #GstVideoDecoder
2999 * @frame: (transfer full): the #GstVideoCodecFrame to release
3001 * Similar to gst_video_decoder_drop_frame(), but simply releases @frame
3002 * without any processing other than removing it from list of pending frames,
3003 * after which it is considered finished and released.
3008 gst_video_decoder_release_frame (GstVideoDecoder * dec,
3009 GstVideoCodecFrame * frame)
3013 /* unref once from the list */
3014 GST_VIDEO_DECODER_STREAM_LOCK (dec);
3015 link = g_queue_find (&dec->priv->frames, frame);
3017 gst_video_codec_frame_unref (frame);
3018 g_queue_delete_link (&dec->priv->frames, link);
3020 if (frame->events) {
3021 dec->priv->pending_events =
3022 g_list_concat (frame->events, dec->priv->pending_events);
3023 frame->events = NULL;
3025 GST_VIDEO_DECODER_STREAM_UNLOCK (dec);
3027 /* unref because this function takes ownership */
3028 gst_video_codec_frame_unref (frame);
3031 /* called with STREAM_LOCK */
3033 gst_video_decoder_post_qos_drop (GstVideoDecoder * dec, GstClockTime timestamp)
3035 GstClockTime stream_time, jitter, earliest_time, qostime;
3036 GstSegment *segment;
3037 GstMessage *qos_msg;
3039 dec->priv->dropped++;
3041 /* post QoS message */
3042 GST_OBJECT_LOCK (dec);
3043 proportion = dec->priv->proportion;
3044 earliest_time = dec->priv->earliest_time;
3045 GST_OBJECT_UNLOCK (dec);
3047 segment = &dec->output_segment;
3048 if (G_UNLIKELY (segment->format == GST_FORMAT_UNDEFINED))
3049 segment = &dec->input_segment;
3051 gst_segment_to_stream_time (segment, GST_FORMAT_TIME, timestamp);
3052 qostime = gst_segment_to_running_time (segment, GST_FORMAT_TIME, timestamp);
3053 jitter = GST_CLOCK_DIFF (qostime, earliest_time);
3055 gst_message_new_qos (GST_OBJECT_CAST (dec), FALSE, qostime, stream_time,
3056 timestamp, GST_CLOCK_TIME_NONE);
3057 gst_message_set_qos_values (qos_msg, jitter, proportion, 1000000);
3058 gst_message_set_qos_stats (qos_msg, GST_FORMAT_BUFFERS,
3059 dec->priv->processed, dec->priv->dropped);
3060 gst_element_post_message (GST_ELEMENT_CAST (dec), qos_msg);
3064 * gst_video_decoder_drop_frame:
3065 * @dec: a #GstVideoDecoder
3066 * @frame: (transfer full): the #GstVideoCodecFrame to drop
3068 * Similar to gst_video_decoder_finish_frame(), but drops @frame in any
3069 * case and posts a QoS message with the frame's details on the bus.
3070 * In any case, the frame is considered finished and released.
3072 * Returns: a #GstFlowReturn, usually GST_FLOW_OK.
3075 gst_video_decoder_drop_frame (GstVideoDecoder * dec, GstVideoCodecFrame * frame)
3077 GST_LOG_OBJECT (dec, "drop frame %p", frame);
3079 GST_VIDEO_DECODER_STREAM_LOCK (dec);
3081 gst_video_decoder_prepare_finish_frame (dec, frame, TRUE);
3083 GST_DEBUG_OBJECT (dec, "dropping frame %" GST_TIME_FORMAT,
3084 GST_TIME_ARGS (frame->pts));
3086 gst_video_decoder_post_qos_drop (dec, frame->pts);
3088 /* now free the frame */
3089 gst_video_decoder_release_frame (dec, frame);
3091 GST_VIDEO_DECODER_STREAM_UNLOCK (dec);
3097 gst_video_decoder_transform_meta_default (GstVideoDecoder *
3098 decoder, GstVideoCodecFrame * frame, GstMeta * meta)
3100 const GstMetaInfo *info = meta->info;
3101 const gchar *const *tags;
3102 const gchar *const supported_tags[] = {
3103 GST_META_TAG_VIDEO_STR,
3104 GST_META_TAG_VIDEO_ORIENTATION_STR,
3105 GST_META_TAG_VIDEO_SIZE_STR,
3109 tags = gst_meta_api_type_get_tags (info->api);
3115 if (!g_strv_contains (supported_tags, *tags))
3125 GstVideoDecoder *decoder;
3126 GstVideoCodecFrame *frame;
3130 foreach_metadata (GstBuffer * inbuf, GstMeta ** meta, gpointer user_data)
3132 CopyMetaData *data = user_data;
3133 GstVideoDecoder *decoder = data->decoder;
3134 GstVideoDecoderClass *klass = GST_VIDEO_DECODER_GET_CLASS (decoder);
3135 GstVideoCodecFrame *frame = data->frame;
3136 const GstMetaInfo *info = (*meta)->info;
3137 gboolean do_copy = FALSE;
3139 if (gst_meta_api_type_has_tag (info->api, _gst_meta_tag_memory)) {
3140 /* never call the transform_meta with memory specific metadata */
3141 GST_DEBUG_OBJECT (decoder, "not copying memory specific metadata %s",
3142 g_type_name (info->api));
3144 } else if (klass->transform_meta) {
3145 do_copy = klass->transform_meta (decoder, frame, *meta);
3146 GST_DEBUG_OBJECT (decoder, "transformed metadata %s: copy: %d",
3147 g_type_name (info->api), do_copy);
3150 /* we only copy metadata when the subclass implemented a transform_meta
3151 * function and when it returns %TRUE */
3152 if (do_copy && info->transform_func) {
3153 GstMetaTransformCopy copy_data = { FALSE, 0, -1 };
3154 GST_DEBUG_OBJECT (decoder, "copy metadata %s", g_type_name (info->api));
3155 /* simply copy then */
3156 info->transform_func (frame->output_buffer, *meta, inbuf,
3157 _gst_meta_transform_copy, ©_data);
3163 * gst_video_decoder_finish_frame:
3164 * @decoder: a #GstVideoDecoder
3165 * @frame: (transfer full): a decoded #GstVideoCodecFrame
3167 * @frame should have a valid decoded data buffer, whose metadata fields
3168 * are then appropriately set according to frame data and pushed downstream.
3169 * If no output data is provided, @frame is considered skipped.
3170 * In any case, the frame is considered finished and released.
3172 * After calling this function the output buffer of the frame is to be
3173 * considered read-only. This function will also change the metadata
3176 * Returns: a #GstFlowReturn resulting from sending data downstream
3179 gst_video_decoder_finish_frame (GstVideoDecoder * decoder,
3180 GstVideoCodecFrame * frame)
3182 GstFlowReturn ret = GST_FLOW_OK;
3183 GstVideoDecoderClass *decoder_class = GST_VIDEO_DECODER_GET_CLASS (decoder);
3184 GstVideoDecoderPrivate *priv = decoder->priv;
3185 GstBuffer *output_buffer;
3186 gboolean needs_reconfigure = FALSE;
3188 GST_LOG_OBJECT (decoder, "finish frame %p", frame);
3190 GST_VIDEO_DECODER_STREAM_LOCK (decoder);
3192 needs_reconfigure = gst_pad_check_reconfigure (decoder->srcpad);
3193 if (G_UNLIKELY (priv->output_state_changed || (priv->output_state
3194 && needs_reconfigure))) {
3195 if (!gst_video_decoder_negotiate_unlocked (decoder)) {
3196 gst_pad_mark_reconfigure (decoder->srcpad);
3197 if (GST_PAD_IS_FLUSHING (decoder->srcpad))
3198 ret = GST_FLOW_FLUSHING;
3200 ret = GST_FLOW_NOT_NEGOTIATED;
3205 gst_video_decoder_prepare_finish_frame (decoder, frame, FALSE);
3208 if (priv->tags_changed) {
3209 GstEvent *tags_event;
3211 tags_event = gst_video_decoder_create_merged_tags_event (decoder);
3213 if (tags_event != NULL)
3214 gst_video_decoder_push_event (decoder, tags_event);
3216 priv->tags_changed = FALSE;
3219 /* no buffer data means this frame is skipped */
3220 if (!frame->output_buffer || GST_VIDEO_CODEC_FRAME_IS_DECODE_ONLY (frame)) {
3221 GST_DEBUG_OBJECT (decoder,
3222 "skipping frame %" GST_TIME_FORMAT " because not output was produced",
3223 GST_TIME_ARGS (frame->pts));
3227 /* Mark output as corrupted if the subclass requested so and we're either
3228 * still before the sync point after the request, or we don't even know the
3229 * frame number of the sync point yet (it is 0) */
3230 GST_OBJECT_LOCK (decoder);
3231 if (frame->system_frame_number <= priv->request_sync_point_frame_number
3232 && priv->request_sync_point_frame_number != REQUEST_SYNC_POINT_UNSET) {
3233 if (priv->request_sync_point_flags &
3234 GST_VIDEO_DECODER_REQUEST_SYNC_POINT_CORRUPT_OUTPUT) {
3235 GST_DEBUG_OBJECT (decoder,
3236 "marking frame %" GST_TIME_FORMAT
3237 " as corrupted because it is still before the sync point",
3238 GST_TIME_ARGS (frame->pts));
3239 GST_VIDEO_CODEC_FRAME_FLAG_SET (frame,
3240 GST_VIDEO_CODEC_FRAME_FLAG_CORRUPTED);
3243 /* Reset to -1 to mark it as unset now that we've reached the frame */
3244 priv->request_sync_point_frame_number = REQUEST_SYNC_POINT_UNSET;
3246 GST_OBJECT_UNLOCK (decoder);
3248 if (priv->discard_corrupted_frames
3249 && (GST_VIDEO_CODEC_FRAME_FLAG_IS_SET (frame,
3250 GST_VIDEO_CODEC_FRAME_FLAG_CORRUPTED)
3251 || GST_BUFFER_FLAG_IS_SET (frame->output_buffer,
3252 GST_BUFFER_FLAG_CORRUPTED))) {
3253 GST_DEBUG_OBJECT (decoder,
3254 "skipping frame %" GST_TIME_FORMAT " because it is corrupted",
3255 GST_TIME_ARGS (frame->pts));
3259 /* We need a writable buffer for the metadata changes below */
3260 output_buffer = frame->output_buffer =
3261 gst_buffer_make_writable (frame->output_buffer);
3263 GST_BUFFER_FLAG_UNSET (output_buffer, GST_BUFFER_FLAG_DELTA_UNIT);
3265 GST_BUFFER_PTS (output_buffer) = frame->pts;
3266 GST_BUFFER_DTS (output_buffer) = GST_CLOCK_TIME_NONE;
3267 GST_BUFFER_DURATION (output_buffer) = frame->duration;
3269 GST_BUFFER_OFFSET (output_buffer) = GST_BUFFER_OFFSET_NONE;
3270 GST_BUFFER_OFFSET_END (output_buffer) = GST_BUFFER_OFFSET_NONE;
3272 if (priv->discont) {
3273 GST_BUFFER_FLAG_SET (output_buffer, GST_BUFFER_FLAG_DISCONT);
3276 if (GST_VIDEO_CODEC_FRAME_FLAG_IS_SET (frame,
3277 GST_VIDEO_CODEC_FRAME_FLAG_CORRUPTED)) {
3278 GST_DEBUG_OBJECT (decoder,
3279 "marking frame %" GST_TIME_FORMAT " as corrupted",
3280 GST_TIME_ARGS (frame->pts));
3281 GST_BUFFER_FLAG_SET (output_buffer, GST_BUFFER_FLAG_CORRUPTED);
3284 if (decoder_class->transform_meta) {
3285 if (G_LIKELY (frame->input_buffer)) {
3288 data.decoder = decoder;
3290 gst_buffer_foreach_meta (frame->input_buffer, foreach_metadata, &data);
3292 GST_WARNING_OBJECT (decoder,
3293 "Can't copy metadata because input frame disappeared");
3297 /* Get an additional ref to the buffer, which is going to be pushed
3298 * downstream, the original ref is owned by the frame
3300 output_buffer = gst_buffer_ref (output_buffer);
3302 /* Release frame so the buffer is writable when we push it downstream
3303 * if possible, i.e. if the subclass does not hold additional references
3306 gst_video_decoder_release_frame (decoder, frame);
3309 if (decoder->output_segment.rate < 0.0
3310 && !(decoder->output_segment.flags & GST_SEEK_FLAG_TRICKMODE_KEY_UNITS)) {
3311 GST_LOG_OBJECT (decoder, "queued frame");
3312 priv->output_queued = g_list_prepend (priv->output_queued, output_buffer);
3314 ret = gst_video_decoder_clip_and_push_buf (decoder, output_buffer);
3319 gst_video_decoder_release_frame (decoder, frame);
3320 GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
3324 /* With stream lock, takes the frame reference */
3325 static GstFlowReturn
3326 gst_video_decoder_clip_and_push_buf (GstVideoDecoder * decoder, GstBuffer * buf)
3328 GstFlowReturn ret = GST_FLOW_OK;
3329 GstVideoDecoderPrivate *priv = decoder->priv;
3330 guint64 start, stop;
3331 guint64 cstart, cstop;
3332 GstSegment *segment;
3333 GstClockTime duration;
3335 /* Check for clipping */
3336 start = GST_BUFFER_PTS (buf);
3337 duration = GST_BUFFER_DURATION (buf);
3339 /* store that we have valid decoded data */
3340 priv->had_output_data = TRUE;
3342 stop = GST_CLOCK_TIME_NONE;
3344 if (GST_CLOCK_TIME_IS_VALID (start) && GST_CLOCK_TIME_IS_VALID (duration)) {
3345 stop = start + duration;
3346 } else if (GST_CLOCK_TIME_IS_VALID (start)
3347 && !GST_CLOCK_TIME_IS_VALID (duration)) {
3348 /* If we don't clip away buffers that far before the segment we
3349 * can cause the pipeline to lockup. This can happen if audio is
3350 * properly clipped, and thus the audio sink does not preroll yet
3351 * but the video sink prerolls because we already outputted a
3352 * buffer here... and then queues run full.
3354 * In the worst case we will clip one buffer too many here now if no
3355 * framerate is given, no buffer duration is given and the actual
3356 * framerate is lower than 25fps */
3357 stop = start + 40 * GST_MSECOND;
3360 segment = &decoder->output_segment;
3361 if (gst_segment_clip (segment, GST_FORMAT_TIME, start, stop, &cstart, &cstop)) {
3362 GST_BUFFER_PTS (buf) = cstart;
3364 if (stop != GST_CLOCK_TIME_NONE && GST_CLOCK_TIME_IS_VALID (duration))
3365 GST_BUFFER_DURATION (buf) = cstop - cstart;
3367 GST_LOG_OBJECT (decoder,
3368 "accepting buffer inside segment: %" GST_TIME_FORMAT " %"
3369 GST_TIME_FORMAT " seg %" GST_TIME_FORMAT " to %" GST_TIME_FORMAT
3370 " time %" GST_TIME_FORMAT,
3371 GST_TIME_ARGS (cstart),
3372 GST_TIME_ARGS (cstop),
3373 GST_TIME_ARGS (segment->start), GST_TIME_ARGS (segment->stop),
3374 GST_TIME_ARGS (segment->time));
3376 GST_LOG_OBJECT (decoder,
3377 "dropping buffer outside segment: %" GST_TIME_FORMAT
3378 " %" GST_TIME_FORMAT
3379 " seg %" GST_TIME_FORMAT " to %" GST_TIME_FORMAT
3380 " time %" GST_TIME_FORMAT,
3381 GST_TIME_ARGS (start), GST_TIME_ARGS (stop),
3382 GST_TIME_ARGS (segment->start),
3383 GST_TIME_ARGS (segment->stop), GST_TIME_ARGS (segment->time));
3384 /* only check and return EOS if upstream still
3385 * in the same segment and interested as such */
3386 if (decoder->priv->in_out_segment_sync) {
3387 if (segment->rate >= 0) {
3388 if (GST_BUFFER_PTS (buf) >= segment->stop)
3390 } else if (GST_BUFFER_PTS (buf) < segment->start) {
3394 gst_buffer_unref (buf);
3398 /* Is buffer too late (QoS) ? */
3399 if (priv->do_qos && GST_CLOCK_TIME_IS_VALID (priv->earliest_time)
3400 && GST_CLOCK_TIME_IS_VALID (cstart)) {
3401 GstClockTime deadline =
3402 gst_segment_to_running_time (segment, GST_FORMAT_TIME, cstart);
3403 if (GST_CLOCK_TIME_IS_VALID (deadline) && deadline < priv->earliest_time) {
3404 GST_WARNING_OBJECT (decoder,
3405 "Dropping frame due to QoS. start:%" GST_TIME_FORMAT " deadline:%"
3406 GST_TIME_FORMAT " earliest_time:%" GST_TIME_FORMAT,
3407 GST_TIME_ARGS (start), GST_TIME_ARGS (deadline),
3408 GST_TIME_ARGS (priv->earliest_time));
3409 gst_video_decoder_post_qos_drop (decoder, cstart);
3410 gst_buffer_unref (buf);
3411 priv->discont = TRUE;
3416 /* Set DISCONT flag here ! */
3418 if (priv->discont) {
3419 GST_DEBUG_OBJECT (decoder, "Setting discont on output buffer");
3420 GST_BUFFER_FLAG_SET (buf, GST_BUFFER_FLAG_DISCONT);
3421 priv->discont = FALSE;
3424 /* update rate estimate */
3425 GST_OBJECT_LOCK (decoder);
3426 priv->bytes_out += gst_buffer_get_size (buf);
3427 if (GST_CLOCK_TIME_IS_VALID (duration)) {
3428 priv->time += duration;
3430 /* FIXME : Use difference between current and previous outgoing
3431 * timestamp, and relate to difference between current and previous
3433 /* better none than nothing valid */
3434 priv->time = GST_CLOCK_TIME_NONE;
3436 GST_OBJECT_UNLOCK (decoder);
3438 GST_DEBUG_OBJECT (decoder, "pushing buffer %p of size %" G_GSIZE_FORMAT ", "
3439 "PTS %" GST_TIME_FORMAT ", dur %" GST_TIME_FORMAT, buf,
3440 gst_buffer_get_size (buf),
3441 GST_TIME_ARGS (GST_BUFFER_PTS (buf)),
3442 GST_TIME_ARGS (GST_BUFFER_DURATION (buf)));
3444 /* we got data, so note things are looking up again, reduce
3445 * the error count, if there is one */
3446 if (G_UNLIKELY (priv->error_count))
3447 priv->error_count = 0;
3449 #ifndef GST_DISABLE_DEBUG
3450 if (G_UNLIKELY (priv->last_reset_time != GST_CLOCK_TIME_NONE)) {
3451 GstClockTime elapsed = gst_util_get_timestamp () - priv->last_reset_time;
3453 /* First buffer since reset, report how long we took */
3454 GST_INFO_OBJECT (decoder, "First buffer since flush took %" GST_TIME_FORMAT
3455 " to produce", GST_TIME_ARGS (elapsed));
3456 priv->last_reset_time = GST_CLOCK_TIME_NONE;
3460 /* release STREAM_LOCK not to block upstream
3461 * while pushing buffer downstream */
3462 GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
3463 ret = gst_pad_push (decoder->srcpad, buf);
3464 GST_VIDEO_DECODER_STREAM_LOCK (decoder);
3471 * gst_video_decoder_add_to_frame:
3472 * @decoder: a #GstVideoDecoder
3473 * @n_bytes: the number of bytes to add
3475 * Removes next @n_bytes of input data and adds it to currently parsed frame.
3478 gst_video_decoder_add_to_frame (GstVideoDecoder * decoder, int n_bytes)
3480 GstVideoDecoderPrivate *priv = decoder->priv;
3483 GST_LOG_OBJECT (decoder, "add %d bytes to frame", n_bytes);
3488 GST_VIDEO_DECODER_STREAM_LOCK (decoder);
3489 if (gst_adapter_available (priv->output_adapter) == 0) {
3490 priv->frame_offset =
3491 priv->input_offset - gst_adapter_available (priv->input_adapter);
3493 buf = gst_adapter_take_buffer (priv->input_adapter, n_bytes);
3495 gst_adapter_push (priv->output_adapter, buf);
3496 GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
3500 * gst_video_decoder_get_pending_frame_size:
3501 * @decoder: a #GstVideoDecoder
3503 * Returns the number of bytes previously added to the current frame
3504 * by calling gst_video_decoder_add_to_frame().
3506 * Returns: The number of bytes pending for the current frame
3511 gst_video_decoder_get_pending_frame_size (GstVideoDecoder * decoder)
3513 GstVideoDecoderPrivate *priv = decoder->priv;
3516 GST_VIDEO_DECODER_STREAM_LOCK (decoder);
3517 ret = gst_adapter_available (priv->output_adapter);
3518 GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
3520 GST_LOG_OBJECT (decoder, "Current pending frame has %" G_GSIZE_FORMAT "bytes",
3527 gst_video_decoder_get_frame_duration (GstVideoDecoder * decoder,
3528 GstVideoCodecFrame * frame)
3530 GstVideoCodecState *state = decoder->priv->output_state;
3532 /* it's possible that we don't have a state yet when we are dropping the
3533 * initial buffers */
3535 return GST_CLOCK_TIME_NONE;
3537 if (state->info.fps_d == 0 || state->info.fps_n == 0) {
3538 return GST_CLOCK_TIME_NONE;
3541 /* FIXME: For interlaced frames this needs to take into account
3542 * the number of valid fields in the frame
3545 return gst_util_uint64_scale (GST_SECOND, state->info.fps_d,
3550 * gst_video_decoder_have_frame:
3551 * @decoder: a #GstVideoDecoder
3553 * Gathers all data collected for currently parsed frame, gathers corresponding
3554 * metadata and passes it along for further processing, i.e. @handle_frame.
3556 * Returns: a #GstFlowReturn
3559 gst_video_decoder_have_frame (GstVideoDecoder * decoder)
3561 GstVideoDecoderPrivate *priv = decoder->priv;
3564 GstClockTime pts, dts, duration;
3566 GstFlowReturn ret = GST_FLOW_OK;
3568 GST_LOG_OBJECT (decoder, "have_frame at offset %" G_GUINT64_FORMAT,
3569 priv->frame_offset);
3571 GST_VIDEO_DECODER_STREAM_LOCK (decoder);
3573 n_available = gst_adapter_available (priv->output_adapter);
3575 buffer = gst_adapter_take_buffer (priv->output_adapter, n_available);
3577 buffer = gst_buffer_new_and_alloc (0);
3580 priv->current_frame->input_buffer = buffer;
3582 gst_video_decoder_get_buffer_info_at_offset (decoder,
3583 priv->frame_offset, &pts, &dts, &duration, &flags);
3585 GST_BUFFER_PTS (buffer) = pts;
3586 GST_BUFFER_DTS (buffer) = dts;
3587 GST_BUFFER_DURATION (buffer) = duration;
3588 GST_BUFFER_FLAGS (buffer) = flags;
3590 GST_LOG_OBJECT (decoder, "collected frame size %d, "
3591 "PTS %" GST_TIME_FORMAT ", DTS %" GST_TIME_FORMAT ", dur %"
3592 GST_TIME_FORMAT, n_available, GST_TIME_ARGS (pts), GST_TIME_ARGS (dts),
3593 GST_TIME_ARGS (duration));
3595 if (!GST_BUFFER_FLAG_IS_SET (buffer, GST_BUFFER_FLAG_DELTA_UNIT)) {
3596 GST_DEBUG_OBJECT (decoder, "Marking as sync point");
3597 GST_VIDEO_CODEC_FRAME_SET_SYNC_POINT (priv->current_frame);
3600 if (GST_BUFFER_FLAG_IS_SET (buffer, GST_BUFFER_FLAG_CORRUPTED)) {
3601 GST_DEBUG_OBJECT (decoder, "Marking as corrupted");
3602 GST_VIDEO_CODEC_FRAME_FLAG_SET (priv->current_frame,
3603 GST_VIDEO_CODEC_FRAME_FLAG_CORRUPTED);
3606 /* In reverse playback, just capture and queue frames for later processing */
3607 if (decoder->input_segment.rate < 0.0) {
3608 priv->parse_gather =
3609 g_list_prepend (priv->parse_gather, priv->current_frame);
3611 /* Otherwise, decode the frame, which gives away our ref */
3612 ret = gst_video_decoder_decode_frame (decoder, priv->current_frame);
3614 /* Current frame is gone now, either way */
3615 priv->current_frame = NULL;
3617 GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
3622 /* Pass the frame in priv->current_frame through the
3623 * handle_frame() callback for decoding and passing to gvd_finish_frame(),
3624 * or dropping by passing to gvd_drop_frame() */
3625 static GstFlowReturn
3626 gst_video_decoder_decode_frame (GstVideoDecoder * decoder,
3627 GstVideoCodecFrame * frame)
3629 GstVideoDecoderPrivate *priv = decoder->priv;
3630 GstVideoDecoderClass *decoder_class;
3631 GstFlowReturn ret = GST_FLOW_OK;
3633 decoder_class = GST_VIDEO_DECODER_GET_CLASS (decoder);
3635 /* FIXME : This should only have to be checked once (either the subclass has an
3636 * implementation, or it doesn't) */
3637 g_return_val_if_fail (decoder_class->handle_frame != NULL, GST_FLOW_ERROR);
3639 frame->pts = GST_BUFFER_PTS (frame->input_buffer);
3640 frame->dts = GST_BUFFER_DTS (frame->input_buffer);
3641 frame->duration = GST_BUFFER_DURATION (frame->input_buffer);
3643 /* For keyframes, PTS = DTS + constant_offset, usually 0 to 3 frame
3645 /* FIXME upstream can be quite wrong about the keyframe aspect,
3646 * so we could be going off here as well,
3647 * maybe let subclass decide if it really is/was a keyframe */
3648 if (GST_VIDEO_CODEC_FRAME_IS_SYNC_POINT (frame)) {
3649 priv->distance_from_sync = 0;
3651 GST_OBJECT_LOCK (decoder);
3652 priv->request_sync_point_flags &=
3653 ~GST_VIDEO_DECODER_REQUEST_SYNC_POINT_DISCARD_INPUT;
3654 if (priv->request_sync_point_frame_number == REQUEST_SYNC_POINT_PENDING)
3655 priv->request_sync_point_frame_number = frame->system_frame_number;
3656 GST_OBJECT_UNLOCK (decoder);
3658 if (GST_CLOCK_TIME_IS_VALID (frame->pts)
3659 && GST_CLOCK_TIME_IS_VALID (frame->dts)) {
3660 /* just in case they are not equal as might ideally be,
3661 * e.g. quicktime has a (positive) delta approach */
3662 priv->pts_delta = frame->pts - frame->dts;
3663 GST_DEBUG_OBJECT (decoder, "PTS delta %d ms",
3664 (gint) (priv->pts_delta / GST_MSECOND));
3667 GST_OBJECT_LOCK (decoder);
3668 if ((priv->needs_sync_point && priv->distance_from_sync == -1)
3669 || (priv->request_sync_point_flags &
3670 GST_VIDEO_DECODER_REQUEST_SYNC_POINT_DISCARD_INPUT)) {
3671 GST_WARNING_OBJECT (decoder,
3672 "Subclass requires a sync point but we didn't receive one yet, discarding input");
3673 GST_OBJECT_UNLOCK (decoder);
3674 gst_video_decoder_release_frame (decoder, frame);
3677 GST_OBJECT_UNLOCK (decoder);
3679 priv->distance_from_sync++;
3682 frame->distance_from_sync = priv->distance_from_sync;
3684 frame->abidata.ABI.ts = frame->dts;
3685 frame->abidata.ABI.ts2 = frame->pts;
3687 GST_LOG_OBJECT (decoder, "PTS %" GST_TIME_FORMAT ", DTS %" GST_TIME_FORMAT
3688 ", dist %d", GST_TIME_ARGS (frame->pts), GST_TIME_ARGS (frame->dts),
3689 frame->distance_from_sync);
3691 g_queue_push_tail (&priv->frames, gst_video_codec_frame_ref (frame));
3693 if (priv->frames.length > 10) {
3694 GST_DEBUG_OBJECT (decoder, "decoder frame list getting long: %d frames,"
3695 "possible internal leaking?", priv->frames.length);
3699 gst_segment_to_running_time (&decoder->input_segment, GST_FORMAT_TIME,
3702 /* do something with frame */
3703 ret = decoder_class->handle_frame (decoder, frame);
3704 if (ret != GST_FLOW_OK)
3705 GST_DEBUG_OBJECT (decoder, "flow error %s", gst_flow_get_name (ret));
3707 /* the frame has either been added to parse_gather or sent to
3708 handle frame so there is no need to unref it */
3714 * gst_video_decoder_get_output_state:
3715 * @decoder: a #GstVideoDecoder
3717 * Get the #GstVideoCodecState currently describing the output stream.
3719 * Returns: (transfer full): #GstVideoCodecState describing format of video data.
3721 GstVideoCodecState *
3722 gst_video_decoder_get_output_state (GstVideoDecoder * decoder)
3724 GstVideoCodecState *state = NULL;
3726 GST_OBJECT_LOCK (decoder);
3727 if (decoder->priv->output_state)
3728 state = gst_video_codec_state_ref (decoder->priv->output_state);
3729 GST_OBJECT_UNLOCK (decoder);
3734 static GstVideoCodecState *
3735 _set_interlaced_output_state (GstVideoDecoder * decoder,
3736 GstVideoFormat fmt, GstVideoInterlaceMode interlace_mode, guint width,
3737 guint height, GstVideoCodecState * reference, gboolean copy_interlace_mode)
3739 GstVideoDecoderPrivate *priv = decoder->priv;
3740 GstVideoCodecState *state;
3742 g_assert ((copy_interlace_mode
3743 && interlace_mode == GST_VIDEO_INTERLACE_MODE_PROGRESSIVE)
3744 || !copy_interlace_mode);
3746 GST_DEBUG_OBJECT (decoder,
3747 "fmt:%d, width:%d, height:%d, interlace-mode: %s, reference:%p", fmt,
3748 width, height, gst_video_interlace_mode_to_string (interlace_mode),
3751 /* Create the new output state */
3753 _new_output_state (fmt, interlace_mode, width, height, reference,
3754 copy_interlace_mode);
3758 GST_VIDEO_DECODER_STREAM_LOCK (decoder);
3760 GST_OBJECT_LOCK (decoder);
3761 /* Replace existing output state by new one */
3762 if (priv->output_state)
3763 gst_video_codec_state_unref (priv->output_state);
3764 priv->output_state = gst_video_codec_state_ref (state);
3766 if (priv->output_state != NULL && priv->output_state->info.fps_n > 0) {
3767 priv->qos_frame_duration =
3768 gst_util_uint64_scale (GST_SECOND, priv->output_state->info.fps_d,
3769 priv->output_state->info.fps_n);
3771 priv->qos_frame_duration = 0;
3773 priv->output_state_changed = TRUE;
3774 GST_OBJECT_UNLOCK (decoder);
3776 GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
3782 * gst_video_decoder_set_output_state:
3783 * @decoder: a #GstVideoDecoder
3784 * @fmt: a #GstVideoFormat
3785 * @width: The width in pixels
3786 * @height: The height in pixels
3787 * @reference: (allow-none) (transfer none): An optional reference #GstVideoCodecState
3789 * Creates a new #GstVideoCodecState with the specified @fmt, @width and @height
3790 * as the output state for the decoder.
3791 * Any previously set output state on @decoder will be replaced by the newly
3794 * If the subclass wishes to copy over existing fields (like pixel aspec ratio,
3795 * or framerate) from an existing #GstVideoCodecState, it can be provided as a
3798 * If the subclass wishes to override some fields from the output state (like
3799 * pixel-aspect-ratio or framerate) it can do so on the returned #GstVideoCodecState.
3801 * The new output state will only take effect (set on pads and buffers) starting
3802 * from the next call to #gst_video_decoder_finish_frame().
3804 * Returns: (transfer full): the newly configured output state.
3806 GstVideoCodecState *
3807 gst_video_decoder_set_output_state (GstVideoDecoder * decoder,
3808 GstVideoFormat fmt, guint width, guint height,
3809 GstVideoCodecState * reference)
3811 return _set_interlaced_output_state (decoder, fmt,
3812 GST_VIDEO_INTERLACE_MODE_PROGRESSIVE, width, height, reference, TRUE);
3816 * gst_video_decoder_set_interlaced_output_state:
3817 * @decoder: a #GstVideoDecoder
3818 * @fmt: a #GstVideoFormat
3819 * @width: The width in pixels
3820 * @height: The height in pixels
3821 * @interlace_mode: A #GstVideoInterlaceMode
3822 * @reference: (allow-none) (transfer none): An optional reference #GstVideoCodecState
3824 * Same as #gst_video_decoder_set_output_state() but also allows you to also set
3825 * the interlacing mode.
3827 * Returns: (transfer full): the newly configured output state.
3831 GstVideoCodecState *
3832 gst_video_decoder_set_interlaced_output_state (GstVideoDecoder * decoder,
3833 GstVideoFormat fmt, GstVideoInterlaceMode interlace_mode, guint width,
3834 guint height, GstVideoCodecState * reference)
3836 return _set_interlaced_output_state (decoder, fmt, interlace_mode, width,
3837 height, reference, FALSE);
3842 * gst_video_decoder_get_oldest_frame:
3843 * @decoder: a #GstVideoDecoder
3845 * Get the oldest pending unfinished #GstVideoCodecFrame
3847 * Returns: (transfer full): oldest pending unfinished #GstVideoCodecFrame.
3849 GstVideoCodecFrame *
3850 gst_video_decoder_get_oldest_frame (GstVideoDecoder * decoder)
3852 GstVideoCodecFrame *frame = NULL;
3854 GST_VIDEO_DECODER_STREAM_LOCK (decoder);
3855 if (decoder->priv->frames.head)
3856 frame = gst_video_codec_frame_ref (decoder->priv->frames.head->data);
3857 GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
3859 return (GstVideoCodecFrame *) frame;
3863 * gst_video_decoder_get_frame:
3864 * @decoder: a #GstVideoDecoder
3865 * @frame_number: system_frame_number of a frame
3867 * Get a pending unfinished #GstVideoCodecFrame
3869 * Returns: (transfer full): pending unfinished #GstVideoCodecFrame identified by @frame_number.
3871 GstVideoCodecFrame *
3872 gst_video_decoder_get_frame (GstVideoDecoder * decoder, int frame_number)
3875 GstVideoCodecFrame *frame = NULL;
3877 GST_DEBUG_OBJECT (decoder, "frame_number : %d", frame_number);
3879 GST_VIDEO_DECODER_STREAM_LOCK (decoder);
3880 for (g = decoder->priv->frames.head; g; g = g->next) {
3881 GstVideoCodecFrame *tmp = g->data;
3883 if (tmp->system_frame_number == frame_number) {
3884 frame = gst_video_codec_frame_ref (tmp);
3888 GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
3894 * gst_video_decoder_get_frames:
3895 * @decoder: a #GstVideoDecoder
3897 * Get all pending unfinished #GstVideoCodecFrame
3899 * Returns: (transfer full) (element-type GstVideoCodecFrame): pending unfinished #GstVideoCodecFrame.
3902 gst_video_decoder_get_frames (GstVideoDecoder * decoder)
3906 GST_VIDEO_DECODER_STREAM_LOCK (decoder);
3908 g_list_copy_deep (decoder->priv->frames.head,
3909 (GCopyFunc) gst_video_codec_frame_ref, NULL);
3910 GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
3916 gst_video_decoder_decide_allocation_default (GstVideoDecoder * decoder,
3919 GstCaps *outcaps = NULL;
3920 GstBufferPool *pool = NULL;
3921 guint size, min, max;
3922 GstAllocator *allocator = NULL;
3923 GstAllocationParams params;
3924 GstStructure *config;
3925 gboolean update_pool, update_allocator;
3928 gst_query_parse_allocation (query, &outcaps, NULL);
3929 gst_video_info_init (&vinfo);
3931 gst_video_info_from_caps (&vinfo, outcaps);
3933 /* we got configuration from our peer or the decide_allocation method,
3935 if (gst_query_get_n_allocation_params (query) > 0) {
3936 /* try the allocator */
3937 gst_query_parse_nth_allocation_param (query, 0, &allocator, ¶ms);
3938 update_allocator = TRUE;
3941 gst_allocation_params_init (¶ms);
3942 update_allocator = FALSE;
3945 if (gst_query_get_n_allocation_pools (query) > 0) {
3946 gst_query_parse_nth_allocation_pool (query, 0, &pool, &size, &min, &max);
3947 size = MAX (size, vinfo.size);
3954 update_pool = FALSE;
3958 /* no pool, we can make our own */
3959 GST_DEBUG_OBJECT (decoder, "no pool, making new pool");
3960 pool = gst_video_buffer_pool_new ();
3964 config = gst_buffer_pool_get_config (pool);
3965 gst_buffer_pool_config_set_params (config, outcaps, size, min, max);
3966 gst_buffer_pool_config_set_allocator (config, allocator, ¶ms);
3968 GST_DEBUG_OBJECT (decoder,
3969 "setting config %" GST_PTR_FORMAT " in pool %" GST_PTR_FORMAT, config,
3971 if (!gst_buffer_pool_set_config (pool, config)) {
3972 config = gst_buffer_pool_get_config (pool);
3974 /* If change are not acceptable, fallback to generic pool */
3975 if (!gst_buffer_pool_config_validate_params (config, outcaps, size, min,
3977 GST_DEBUG_OBJECT (decoder, "unsupported pool, making new pool");
3979 gst_object_unref (pool);
3980 pool = gst_video_buffer_pool_new ();
3981 gst_buffer_pool_config_set_params (config, outcaps, size, min, max);
3982 gst_buffer_pool_config_set_allocator (config, allocator, ¶ms);
3985 if (!gst_buffer_pool_set_config (pool, config))
3989 if (update_allocator)
3990 gst_query_set_nth_allocation_param (query, 0, allocator, ¶ms);
3992 gst_query_add_allocation_param (query, allocator, ¶ms);
3994 gst_object_unref (allocator);
3997 gst_query_set_nth_allocation_pool (query, 0, pool, size, min, max);
3999 gst_query_add_allocation_pool (query, pool, size, min, max);
4002 gst_object_unref (pool);
4008 gst_object_unref (allocator);
4010 gst_object_unref (pool);
4011 GST_ELEMENT_ERROR (decoder, RESOURCE, SETTINGS,
4012 ("Failed to configure the buffer pool"),
4013 ("Configuration is most likely invalid, please report this issue."));
4018 gst_video_decoder_propose_allocation_default (GstVideoDecoder * decoder,
4025 gst_video_decoder_negotiate_pool (GstVideoDecoder * decoder, GstCaps * caps)
4027 GstVideoDecoderClass *klass;
4028 GstQuery *query = NULL;
4029 GstBufferPool *pool = NULL;
4030 GstAllocator *allocator;
4031 GstAllocationParams params;
4032 gboolean ret = TRUE;
4034 klass = GST_VIDEO_DECODER_GET_CLASS (decoder);
4036 query = gst_query_new_allocation (caps, TRUE);
4038 GST_DEBUG_OBJECT (decoder, "do query ALLOCATION");
4040 if (!gst_pad_peer_query (decoder->srcpad, query)) {
4041 GST_DEBUG_OBJECT (decoder, "didn't get downstream ALLOCATION hints");
4044 g_assert (klass->decide_allocation != NULL);
4045 ret = klass->decide_allocation (decoder, query);
4047 GST_DEBUG_OBJECT (decoder, "ALLOCATION (%d) params: %" GST_PTR_FORMAT, ret,
4051 goto no_decide_allocation;
4053 /* we got configuration from our peer or the decide_allocation method,
4055 if (gst_query_get_n_allocation_params (query) > 0) {
4056 gst_query_parse_nth_allocation_param (query, 0, &allocator, ¶ms);
4059 gst_allocation_params_init (¶ms);
4062 if (gst_query_get_n_allocation_pools (query) > 0)
4063 gst_query_parse_nth_allocation_pool (query, 0, &pool, NULL, NULL, NULL);
4066 gst_object_unref (allocator);
4068 goto no_decide_allocation;
4071 if (decoder->priv->allocator)
4072 gst_object_unref (decoder->priv->allocator);
4073 decoder->priv->allocator = allocator;
4074 decoder->priv->params = params;
4076 if (decoder->priv->pool) {
4077 /* do not set the bufferpool to inactive here, it will be done
4078 * on its finalize function. As videodecoder do late renegotiation
4079 * it might happen that some element downstream is already using this
4080 * same bufferpool and deactivating it will make it fail.
4081 * Happens when a downstream element changes from passthrough to
4082 * non-passthrough and gets this same bufferpool to use */
4083 GST_DEBUG_OBJECT (decoder, "unref pool %" GST_PTR_FORMAT,
4084 decoder->priv->pool);
4085 gst_object_unref (decoder->priv->pool);
4087 decoder->priv->pool = pool;
4090 GST_DEBUG_OBJECT (decoder, "activate pool %" GST_PTR_FORMAT, pool);
4091 gst_buffer_pool_set_active (pool, TRUE);
4095 gst_query_unref (query);
4100 no_decide_allocation:
4102 GST_WARNING_OBJECT (decoder, "Subclass failed to decide allocation");
4108 gst_video_decoder_negotiate_default (GstVideoDecoder * decoder)
4110 GstVideoCodecState *state = decoder->priv->output_state;
4111 gboolean ret = TRUE;
4112 GstVideoCodecFrame *frame;
4117 GST_DEBUG_OBJECT (decoder,
4118 "Trying to negotiate the pool with out setting the o/p format");
4119 ret = gst_video_decoder_negotiate_pool (decoder, NULL);
4123 g_return_val_if_fail (GST_VIDEO_INFO_WIDTH (&state->info) != 0, FALSE);
4124 g_return_val_if_fail (GST_VIDEO_INFO_HEIGHT (&state->info) != 0, FALSE);
4126 /* If the base class didn't set any multiview params, assume mono
4128 if (GST_VIDEO_INFO_MULTIVIEW_MODE (&state->info) ==
4129 GST_VIDEO_MULTIVIEW_MODE_NONE) {
4130 GST_VIDEO_INFO_MULTIVIEW_MODE (&state->info) =
4131 GST_VIDEO_MULTIVIEW_MODE_MONO;
4132 GST_VIDEO_INFO_MULTIVIEW_FLAGS (&state->info) =
4133 GST_VIDEO_MULTIVIEW_FLAGS_NONE;
4136 GST_DEBUG_OBJECT (decoder, "output_state par %d/%d fps %d/%d",
4137 state->info.par_n, state->info.par_d,
4138 state->info.fps_n, state->info.fps_d);
4140 if (state->caps == NULL)
4141 state->caps = gst_video_info_to_caps (&state->info);
4143 incaps = gst_pad_get_current_caps (GST_VIDEO_DECODER_SINK_PAD (decoder));
4145 GstStructure *in_struct;
4147 in_struct = gst_caps_get_structure (incaps, 0);
4148 if (gst_structure_has_field (in_struct, "mastering-display-info") ||
4149 gst_structure_has_field (in_struct, "content-light-level")) {
4152 /* prefer upstream information */
4153 state->caps = gst_caps_make_writable (state->caps);
4154 if ((s = gst_structure_get_string (in_struct, "mastering-display-info"))) {
4155 gst_caps_set_simple (state->caps,
4156 "mastering-display-info", G_TYPE_STRING, s, NULL);
4159 if ((s = gst_structure_get_string (in_struct, "content-light-level"))) {
4160 gst_caps_set_simple (state->caps,
4161 "content-light-level", G_TYPE_STRING, s, NULL);
4164 if (gst_structure_has_field (in_struct, "hdr-format")) {
4166 state->caps = gst_caps_make_writable (state->caps);
4168 if ((s = gst_structure_get_string (in_struct, "hdr-format"))) {
4169 gst_caps_set_simple (state->caps, "hdr-format", G_TYPE_STRING, s, NULL);
4173 gst_caps_unref (incaps);
4176 if (state->allocation_caps == NULL)
4177 state->allocation_caps = gst_caps_ref (state->caps);
4179 GST_DEBUG_OBJECT (decoder, "setting caps %" GST_PTR_FORMAT, state->caps);
4181 /* Push all pending pre-caps events of the oldest frame before
4183 frame = decoder->priv->frames.head ? decoder->priv->frames.head->data : NULL;
4184 if (frame || decoder->priv->current_frame_events) {
4188 events = &frame->events;
4190 events = &decoder->priv->current_frame_events;
4193 for (l = g_list_last (*events); l;) {
4194 GstEvent *event = GST_EVENT (l->data);
4197 if (GST_EVENT_TYPE (event) < GST_EVENT_CAPS) {
4198 gst_video_decoder_push_event (decoder, event);
4201 *events = g_list_delete_link (*events, tmp);
4208 prevcaps = gst_pad_get_current_caps (decoder->srcpad);
4209 if (!prevcaps || !gst_caps_is_equal (prevcaps, state->caps)) {
4211 GST_DEBUG_OBJECT (decoder, "decoder src pad has currently NULL caps");
4213 ret = gst_pad_set_caps (decoder->srcpad, state->caps);
4216 GST_DEBUG_OBJECT (decoder,
4217 "current src pad and output state caps are the same");
4220 gst_caps_unref (prevcaps);
4224 decoder->priv->output_state_changed = FALSE;
4225 /* Negotiate pool */
4226 ret = gst_video_decoder_negotiate_pool (decoder, state->allocation_caps);
4233 gst_video_decoder_negotiate_unlocked (GstVideoDecoder * decoder)
4235 GstVideoDecoderClass *klass = GST_VIDEO_DECODER_GET_CLASS (decoder);
4236 gboolean ret = TRUE;
4238 if (G_LIKELY (klass->negotiate))
4239 ret = klass->negotiate (decoder);
4245 * gst_video_decoder_negotiate:
4246 * @decoder: a #GstVideoDecoder
4248 * Negotiate with downstream elements to currently configured #GstVideoCodecState.
4249 * Unmark GST_PAD_FLAG_NEED_RECONFIGURE in any case. But mark it again if
4252 * Returns: %TRUE if the negotiation succeeded, else %FALSE.
4255 gst_video_decoder_negotiate (GstVideoDecoder * decoder)
4257 GstVideoDecoderClass *klass;
4258 gboolean ret = TRUE;
4260 g_return_val_if_fail (GST_IS_VIDEO_DECODER (decoder), FALSE);
4262 klass = GST_VIDEO_DECODER_GET_CLASS (decoder);
4264 GST_VIDEO_DECODER_STREAM_LOCK (decoder);
4265 gst_pad_check_reconfigure (decoder->srcpad);
4266 if (klass->negotiate) {
4267 ret = klass->negotiate (decoder);
4269 gst_pad_mark_reconfigure (decoder->srcpad);
4271 GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
4277 * gst_video_decoder_allocate_output_buffer:
4278 * @decoder: a #GstVideoDecoder
4280 * Helper function that allocates a buffer to hold a video frame for @decoder's
4281 * current #GstVideoCodecState.
4283 * You should use gst_video_decoder_allocate_output_frame() instead of this
4284 * function, if possible at all.
4286 * Returns: (transfer full): allocated buffer, or NULL if no buffer could be
4287 * allocated (e.g. when downstream is flushing or shutting down)
4290 gst_video_decoder_allocate_output_buffer (GstVideoDecoder * decoder)
4293 GstBuffer *buffer = NULL;
4294 gboolean needs_reconfigure = FALSE;
4296 GST_DEBUG ("alloc src buffer");
4298 GST_VIDEO_DECODER_STREAM_LOCK (decoder);
4299 needs_reconfigure = gst_pad_check_reconfigure (decoder->srcpad);
4300 if (G_UNLIKELY (!decoder->priv->output_state
4301 || decoder->priv->output_state_changed || needs_reconfigure)) {
4302 if (!gst_video_decoder_negotiate_unlocked (decoder)) {
4303 if (decoder->priv->output_state) {
4304 GST_DEBUG_OBJECT (decoder, "Failed to negotiate, fallback allocation");
4305 gst_pad_mark_reconfigure (decoder->srcpad);
4308 GST_DEBUG_OBJECT (decoder, "Failed to negotiate, output_buffer=NULL");
4309 goto failed_allocation;
4314 flow = gst_buffer_pool_acquire_buffer (decoder->priv->pool, &buffer, NULL);
4316 if (flow != GST_FLOW_OK) {
4317 GST_INFO_OBJECT (decoder, "couldn't allocate output buffer, flow %s",
4318 gst_flow_get_name (flow));
4319 if (decoder->priv->output_state && decoder->priv->output_state->info.size)
4322 goto failed_allocation;
4324 GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
4329 GST_INFO_OBJECT (decoder,
4330 "Fallback allocation, creating new buffer which doesn't belongs to any buffer pool");
4332 gst_buffer_new_allocate (NULL, decoder->priv->output_state->info.size,
4336 GST_ERROR_OBJECT (decoder, "Failed to allocate the buffer..");
4337 GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
4343 * gst_video_decoder_allocate_output_frame:
4344 * @decoder: a #GstVideoDecoder
4345 * @frame: a #GstVideoCodecFrame
4347 * Helper function that allocates a buffer to hold a video frame for @decoder's
4348 * current #GstVideoCodecState. Subclass should already have configured video
4349 * state and set src pad caps.
4351 * The buffer allocated here is owned by the frame and you should only
4352 * keep references to the frame, not the buffer.
4354 * Returns: %GST_FLOW_OK if an output buffer could be allocated
4357 gst_video_decoder_allocate_output_frame (GstVideoDecoder *
4358 decoder, GstVideoCodecFrame * frame)
4360 return gst_video_decoder_allocate_output_frame_with_params (decoder, frame,
4365 * gst_video_decoder_allocate_output_frame_with_params:
4366 * @decoder: a #GstVideoDecoder
4367 * @frame: a #GstVideoCodecFrame
4368 * @params: a #GstBufferPoolAcquireParams
4370 * Same as #gst_video_decoder_allocate_output_frame except it allows passing
4371 * #GstBufferPoolAcquireParams to the sub call gst_buffer_pool_acquire_buffer.
4373 * Returns: %GST_FLOW_OK if an output buffer could be allocated
4378 gst_video_decoder_allocate_output_frame_with_params (GstVideoDecoder *
4379 decoder, GstVideoCodecFrame * frame, GstBufferPoolAcquireParams * params)
4381 GstFlowReturn flow_ret;
4382 GstVideoCodecState *state;
4384 gboolean needs_reconfigure = FALSE;
4386 g_return_val_if_fail (decoder->priv->output_state, GST_FLOW_NOT_NEGOTIATED);
4387 g_return_val_if_fail (frame->output_buffer == NULL, GST_FLOW_ERROR);
4389 GST_VIDEO_DECODER_STREAM_LOCK (decoder);
4391 state = decoder->priv->output_state;
4392 if (state == NULL) {
4393 g_warning ("Output state should be set before allocating frame");
4396 num_bytes = GST_VIDEO_INFO_SIZE (&state->info);
4397 if (num_bytes == 0) {
4398 g_warning ("Frame size should not be 0");
4402 needs_reconfigure = gst_pad_check_reconfigure (decoder->srcpad);
4403 if (G_UNLIKELY (decoder->priv->output_state_changed || needs_reconfigure)) {
4404 if (!gst_video_decoder_negotiate_unlocked (decoder)) {
4405 gst_pad_mark_reconfigure (decoder->srcpad);
4406 if (GST_PAD_IS_FLUSHING (decoder->srcpad)) {
4407 GST_DEBUG_OBJECT (decoder,
4408 "Failed to negotiate a pool: pad is flushing");
4410 } else if (!decoder->priv->pool || decoder->priv->output_state_changed) {
4411 GST_DEBUG_OBJECT (decoder,
4412 "Failed to negotiate a pool and no previous pool to reuse");
4415 GST_DEBUG_OBJECT (decoder,
4416 "Failed to negotiate a pool, falling back to the previous pool");
4421 GST_LOG_OBJECT (decoder, "alloc buffer size %d", num_bytes);
4423 flow_ret = gst_buffer_pool_acquire_buffer (decoder->priv->pool,
4424 &frame->output_buffer, params);
4426 GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
4431 GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
4432 return GST_FLOW_FLUSHING;
4435 GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
4436 return GST_FLOW_ERROR;
4440 * gst_video_decoder_get_max_decode_time:
4441 * @decoder: a #GstVideoDecoder
4442 * @frame: a #GstVideoCodecFrame
4444 * Determines maximum possible decoding time for @frame that will
4445 * allow it to decode and arrive in time (as determined by QoS events).
4446 * In particular, a negative result means decoding in time is no longer possible
4447 * and should therefore occur as soon/skippy as possible.
4449 * Returns: max decoding time.
4452 gst_video_decoder_get_max_decode_time (GstVideoDecoder *
4453 decoder, GstVideoCodecFrame * frame)
4455 GstClockTimeDiff deadline;
4456 GstClockTime earliest_time;
4458 GST_OBJECT_LOCK (decoder);
4459 earliest_time = decoder->priv->earliest_time;
4460 if (GST_CLOCK_TIME_IS_VALID (earliest_time)
4461 && GST_CLOCK_TIME_IS_VALID (frame->deadline))
4462 deadline = GST_CLOCK_DIFF (earliest_time, frame->deadline);
4464 deadline = G_MAXINT64;
4466 GST_LOG_OBJECT (decoder, "earliest %" GST_TIME_FORMAT
4467 ", frame deadline %" GST_TIME_FORMAT ", deadline %" GST_STIME_FORMAT,
4468 GST_TIME_ARGS (earliest_time), GST_TIME_ARGS (frame->deadline),
4469 GST_STIME_ARGS (deadline));
4471 GST_OBJECT_UNLOCK (decoder);
4477 * gst_video_decoder_get_qos_proportion:
4478 * @decoder: a #GstVideoDecoder
4479 * current QoS proportion, or %NULL
4481 * Returns: The current QoS proportion.
4486 gst_video_decoder_get_qos_proportion (GstVideoDecoder * decoder)
4490 g_return_val_if_fail (GST_IS_VIDEO_DECODER (decoder), 1.0);
4492 GST_OBJECT_LOCK (decoder);
4493 proportion = decoder->priv->proportion;
4494 GST_OBJECT_UNLOCK (decoder);
4500 _gst_video_decoder_error (GstVideoDecoder * dec, gint weight,
4501 GQuark domain, gint code, gchar * txt, gchar * dbg, const gchar * file,
4502 const gchar * function, gint line)
4505 GST_WARNING_OBJECT (dec, "error: %s", txt);
4507 GST_WARNING_OBJECT (dec, "error: %s", dbg);
4508 dec->priv->error_count += weight;
4509 dec->priv->discont = TRUE;
4510 if (dec->priv->max_errors >= 0 &&
4511 dec->priv->error_count > dec->priv->max_errors) {
4512 gst_element_message_full (GST_ELEMENT (dec), GST_MESSAGE_ERROR,
4513 domain, code, txt, dbg, file, function, line);
4514 return GST_FLOW_ERROR;
4523 * gst_video_decoder_set_max_errors:
4524 * @dec: a #GstVideoDecoder
4525 * @num: max tolerated errors
4527 * Sets numbers of tolerated decoder errors, where a tolerated one is then only
4528 * warned about, but more than tolerated will lead to fatal error. You can set
4529 * -1 for never returning fatal errors. Default is set to
4530 * GST_VIDEO_DECODER_MAX_ERRORS.
4532 * The '-1' option was added in 1.4
4535 gst_video_decoder_set_max_errors (GstVideoDecoder * dec, gint num)
4537 g_return_if_fail (GST_IS_VIDEO_DECODER (dec));
4539 dec->priv->max_errors = num;
4543 * gst_video_decoder_get_max_errors:
4544 * @dec: a #GstVideoDecoder
4546 * Returns: currently configured decoder tolerated error count.
4549 gst_video_decoder_get_max_errors (GstVideoDecoder * dec)
4551 g_return_val_if_fail (GST_IS_VIDEO_DECODER (dec), 0);
4553 return dec->priv->max_errors;
4557 * gst_video_decoder_set_needs_format:
4558 * @dec: a #GstVideoDecoder
4559 * @enabled: new state
4561 * Configures decoder format needs. If enabled, subclass needs to be
4562 * negotiated with format caps before it can process any data. It will then
4563 * never be handed any data before it has been configured.
4564 * Otherwise, it might be handed data without having been configured and
4565 * is then expected being able to do so either by default
4566 * or based on the input data.
4571 gst_video_decoder_set_needs_format (GstVideoDecoder * dec, gboolean enabled)
4573 g_return_if_fail (GST_IS_VIDEO_DECODER (dec));
4575 dec->priv->needs_format = enabled;
4579 * gst_video_decoder_get_needs_format:
4580 * @dec: a #GstVideoDecoder
4582 * Queries decoder required format handling.
4584 * Returns: %TRUE if required format handling is enabled.
4589 gst_video_decoder_get_needs_format (GstVideoDecoder * dec)
4593 g_return_val_if_fail (GST_IS_VIDEO_DECODER (dec), FALSE);
4595 result = dec->priv->needs_format;
4601 * gst_video_decoder_set_packetized:
4602 * @decoder: a #GstVideoDecoder
4603 * @packetized: whether the input data should be considered as packetized.
4605 * Allows baseclass to consider input data as packetized or not. If the
4606 * input is packetized, then the @parse method will not be called.
4609 gst_video_decoder_set_packetized (GstVideoDecoder * decoder,
4610 gboolean packetized)
4612 decoder->priv->packetized = packetized;
4616 * gst_video_decoder_get_packetized:
4617 * @decoder: a #GstVideoDecoder
4619 * Queries whether input data is considered packetized or not by the
4622 * Returns: TRUE if input data is considered packetized.
4625 gst_video_decoder_get_packetized (GstVideoDecoder * decoder)
4627 return decoder->priv->packetized;
4631 * gst_video_decoder_set_estimate_rate:
4632 * @dec: a #GstVideoDecoder
4633 * @enabled: whether to enable byte to time conversion
4635 * Allows baseclass to perform byte to time estimated conversion.
4638 gst_video_decoder_set_estimate_rate (GstVideoDecoder * dec, gboolean enabled)
4640 g_return_if_fail (GST_IS_VIDEO_DECODER (dec));
4642 dec->priv->do_estimate_rate = enabled;
4646 * gst_video_decoder_get_estimate_rate:
4647 * @dec: a #GstVideoDecoder
4649 * Returns: currently configured byte to time conversion setting
4652 gst_video_decoder_get_estimate_rate (GstVideoDecoder * dec)
4654 g_return_val_if_fail (GST_IS_VIDEO_DECODER (dec), 0);
4656 return dec->priv->do_estimate_rate;
4660 * gst_video_decoder_set_latency:
4661 * @decoder: a #GstVideoDecoder
4662 * @min_latency: minimum latency
4663 * @max_latency: maximum latency
4665 * Lets #GstVideoDecoder sub-classes tell the baseclass what the decoder
4666 * latency is. Will also post a LATENCY message on the bus so the pipeline
4667 * can reconfigure its global latency.
4670 gst_video_decoder_set_latency (GstVideoDecoder * decoder,
4671 GstClockTime min_latency, GstClockTime max_latency)
4673 g_return_if_fail (GST_CLOCK_TIME_IS_VALID (min_latency));
4674 g_return_if_fail (max_latency >= min_latency);
4676 GST_OBJECT_LOCK (decoder);
4677 decoder->priv->min_latency = min_latency;
4678 decoder->priv->max_latency = max_latency;
4679 GST_OBJECT_UNLOCK (decoder);
4681 gst_element_post_message (GST_ELEMENT_CAST (decoder),
4682 gst_message_new_latency (GST_OBJECT_CAST (decoder)));
4686 * gst_video_decoder_get_latency:
4687 * @decoder: a #GstVideoDecoder
4688 * @min_latency: (out) (allow-none): address of variable in which to store the
4689 * configured minimum latency, or %NULL
4690 * @max_latency: (out) (allow-none): address of variable in which to store the
4691 * configured mximum latency, or %NULL
4693 * Query the configured decoder latency. Results will be returned via
4694 * @min_latency and @max_latency.
4697 gst_video_decoder_get_latency (GstVideoDecoder * decoder,
4698 GstClockTime * min_latency, GstClockTime * max_latency)
4700 GST_OBJECT_LOCK (decoder);
4702 *min_latency = decoder->priv->min_latency;
4704 *max_latency = decoder->priv->max_latency;
4705 GST_OBJECT_UNLOCK (decoder);
4709 * gst_video_decoder_merge_tags:
4710 * @decoder: a #GstVideoDecoder
4711 * @tags: (allow-none): a #GstTagList to merge, or NULL to unset
4712 * previously-set tags
4713 * @mode: the #GstTagMergeMode to use, usually #GST_TAG_MERGE_REPLACE
4715 * Sets the audio decoder tags and how they should be merged with any
4716 * upstream stream tags. This will override any tags previously-set
4717 * with gst_audio_decoder_merge_tags().
4719 * Note that this is provided for convenience, and the subclass is
4720 * not required to use this and can still do tag handling on its own.
4725 gst_video_decoder_merge_tags (GstVideoDecoder * decoder,
4726 const GstTagList * tags, GstTagMergeMode mode)
4728 g_return_if_fail (GST_IS_VIDEO_DECODER (decoder));
4729 g_return_if_fail (tags == NULL || GST_IS_TAG_LIST (tags));
4730 g_return_if_fail (tags == NULL || mode != GST_TAG_MERGE_UNDEFINED);
4732 GST_VIDEO_DECODER_STREAM_LOCK (decoder);
4733 if (decoder->priv->tags != tags) {
4734 if (decoder->priv->tags) {
4735 gst_tag_list_unref (decoder->priv->tags);
4736 decoder->priv->tags = NULL;
4737 decoder->priv->tags_merge_mode = GST_TAG_MERGE_APPEND;
4740 decoder->priv->tags = gst_tag_list_ref ((GstTagList *) tags);
4741 decoder->priv->tags_merge_mode = mode;
4744 GST_DEBUG_OBJECT (decoder, "set decoder tags to %" GST_PTR_FORMAT, tags);
4745 decoder->priv->tags_changed = TRUE;
4747 GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
4751 * gst_video_decoder_get_buffer_pool:
4752 * @decoder: a #GstVideoDecoder
4754 * Returns: (transfer full): the instance of the #GstBufferPool used
4755 * by the decoder; free it after use it
4758 gst_video_decoder_get_buffer_pool (GstVideoDecoder * decoder)
4760 g_return_val_if_fail (GST_IS_VIDEO_DECODER (decoder), NULL);
4762 if (decoder->priv->pool)
4763 return gst_object_ref (decoder->priv->pool);
4769 * gst_video_decoder_get_allocator:
4770 * @decoder: a #GstVideoDecoder
4771 * @allocator: (out) (allow-none) (transfer full): the #GstAllocator
4773 * @params: (out) (allow-none) (transfer full): the
4774 * #GstAllocationParams of @allocator
4776 * Lets #GstVideoDecoder sub-classes to know the memory @allocator
4777 * used by the base class and its @params.
4779 * Unref the @allocator after use it.
4782 gst_video_decoder_get_allocator (GstVideoDecoder * decoder,
4783 GstAllocator ** allocator, GstAllocationParams * params)
4785 g_return_if_fail (GST_IS_VIDEO_DECODER (decoder));
4788 *allocator = decoder->priv->allocator ?
4789 gst_object_ref (decoder->priv->allocator) : NULL;
4792 *params = decoder->priv->params;
4796 * gst_video_decoder_set_use_default_pad_acceptcaps:
4797 * @decoder: a #GstVideoDecoder
4798 * @use: if the default pad accept-caps query handling should be used
4800 * Lets #GstVideoDecoder sub-classes decide if they want the sink pad
4801 * to use the default pad query handler to reply to accept-caps queries.
4803 * By setting this to true it is possible to further customize the default
4804 * handler with %GST_PAD_SET_ACCEPT_INTERSECT and
4805 * %GST_PAD_SET_ACCEPT_TEMPLATE
4810 gst_video_decoder_set_use_default_pad_acceptcaps (GstVideoDecoder * decoder,
4813 decoder->priv->use_default_pad_acceptcaps = use;
4817 * gst_video_decoder_request_sync_point:
4818 * @dec: a #GstVideoDecoder
4819 * @frame: a #GstVideoCodecFrame
4820 * @flags: #GstVideoDecoderRequestSyncPointFlags
4822 * Allows the #GstVideoDecoder subclass to request from the base class that
4823 * a new sync should be requested from upstream, and that @frame was the frame
4824 * when the subclass noticed that a new sync point is required. A reason for
4825 * the subclass to do this could be missing reference frames, for example.
4827 * The base class will then request a new sync point from upstream as long as
4828 * the time that passed since the last one is exceeding
4829 * #GstVideoDecoder:min-force-key-unit-interval.
4831 * The subclass can signal via @flags how the frames until the next sync point
4832 * should be handled:
4834 * * If %GST_VIDEO_DECODER_REQUEST_SYNC_POINT_DISCARD_INPUT is selected then
4835 * all following input frames until the next sync point are discarded.
4836 * This can be useful if the lack of a sync point will prevent all further
4837 * decoding and the decoder implementation is not very robust in handling
4838 * missing references frames.
4839 * * If %GST_VIDEO_DECODER_REQUEST_SYNC_POINT_CORRUPT_OUTPUT is selected
4840 * then all output frames following @frame are marked as corrupted via
4841 * %GST_BUFFER_FLAG_CORRUPTED. Corrupted frames can be automatically
4842 * dropped by the base class, see #GstVideoDecoder:discard-corrupted-frames.
4843 * Subclasses can manually mark frames as corrupted via %GST_VIDEO_CODEC_FRAME_FLAG_CORRUPTED
4844 * before calling gst_video_decoder_finish_frame().
4849 gst_video_decoder_request_sync_point (GstVideoDecoder * dec,
4850 GstVideoCodecFrame * frame, GstVideoDecoderRequestSyncPointFlags flags)
4852 GstEvent *fku = NULL;
4853 GstVideoDecoderPrivate *priv;
4855 g_return_if_fail (GST_IS_VIDEO_DECODER (dec));
4856 g_return_if_fail (frame != NULL);
4860 GST_OBJECT_LOCK (dec);
4862 /* Check if we're allowed to send a new force-keyunit event.
4863 * frame->deadline is set to the running time of the PTS. */
4864 if (priv->min_force_key_unit_interval == 0 ||
4865 frame->deadline == GST_CLOCK_TIME_NONE ||
4866 (priv->min_force_key_unit_interval != GST_CLOCK_TIME_NONE &&
4867 (priv->last_force_key_unit_time == GST_CLOCK_TIME_NONE
4868 || (priv->last_force_key_unit_time +
4869 priv->min_force_key_unit_interval >= frame->deadline)))) {
4870 GST_DEBUG_OBJECT (dec,
4871 "Requesting a new key-unit for frame with PTS %" GST_TIME_FORMAT,
4872 GST_TIME_ARGS (frame->pts));
4874 gst_video_event_new_upstream_force_key_unit (GST_CLOCK_TIME_NONE, FALSE,
4876 priv->last_force_key_unit_time = frame->deadline;
4878 GST_DEBUG_OBJECT (dec,
4879 "Can't request a new key-unit for frame with PTS %" GST_TIME_FORMAT,
4880 GST_TIME_ARGS (frame->pts));
4882 priv->request_sync_point_flags |= flags;
4883 /* We don't know yet the frame number of the sync point so set it to a
4884 * frame number higher than any allowed frame number */
4885 priv->request_sync_point_frame_number = REQUEST_SYNC_POINT_PENDING;
4886 GST_OBJECT_UNLOCK (dec);
4889 gst_pad_push_event (dec->sinkpad, fku);
4893 * gst_video_decoder_set_needs_sync_point:
4894 * @dec: a #GstVideoDecoder
4895 * @enabled: new state
4897 * Configures whether the decoder requires a sync point before it starts
4898 * outputting data in the beginning. If enabled, the base class will discard
4899 * all non-sync point frames in the beginning and after a flush and does not
4900 * pass it to the subclass.
4902 * If the first frame is not a sync point, the base class will request a sync
4903 * point via the force-key-unit event.
4908 gst_video_decoder_set_needs_sync_point (GstVideoDecoder * dec, gboolean enabled)
4910 g_return_if_fail (GST_IS_VIDEO_DECODER (dec));
4912 dec->priv->needs_sync_point = enabled;
4916 * gst_video_decoder_get_needs_sync_point:
4917 * @dec: a #GstVideoDecoder
4919 * Queries if the decoder requires a sync point before it starts outputting
4920 * data in the beginning.
4922 * Returns: %TRUE if a sync point is required in the beginning.
4927 gst_video_decoder_get_needs_sync_point (GstVideoDecoder * dec)
4931 g_return_val_if_fail (GST_IS_VIDEO_DECODER (dec), FALSE);
4933 result = dec->priv->needs_sync_point;