2 * Copyright (C) 2008 David Schleef <ds@schleef.org>
3 * Copyright (C) 2011 Mark Nauwelaerts <mark.nauwelaerts@collabora.co.uk>.
4 * Copyright (C) 2011 Nokia Corporation. All rights reserved.
5 * Contact: Stefan Kost <stefan.kost@nokia.com>
6 * Copyright (C) 2012 Collabora Ltd.
7 * Author : Edward Hervey <edward@collabora.com>
9 * This library is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Library General Public
11 * License as published by the Free Software Foundation; either
12 * version 2 of the License, or (at your option) any later version.
14 * This library is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Library General Public License for more details.
19 * You should have received a copy of the GNU Library General Public
20 * License along with this library; if not, write to the
21 * Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
22 * Boston, MA 02110-1301, USA.
26 * SECTION:gstvideodecoder
27 * @title: GstVideoDecoder
28 * @short_description: Base class for video decoders
30 * This base class is for video decoders turning encoded data into raw video
33 * The GstVideoDecoder base class and derived subclasses should cooperate as
38 * * Initially, GstVideoDecoder calls @start when the decoder element
39 * is activated, which allows the subclass to perform any global setup.
41 * * GstVideoDecoder calls @set_format to inform the subclass of caps
42 * describing input video data that it is about to receive, including
43 * possibly configuration data.
44 * While unlikely, it might be called more than once, if changing input
45 * parameters require reconfiguration.
47 * * Incoming data buffers are processed as needed, described in Data
50 * * GstVideoDecoder calls @stop at end of all processing.
54 * * The base class gathers input data, and optionally allows subclass
55 * to parse this into subsequently manageable chunks, typically
56 * corresponding to and referred to as 'frames'.
58 * * Each input frame is provided in turn to the subclass' @handle_frame
60 * The ownership of the frame is given to the @handle_frame callback.
62 * * If codec processing results in decoded data, the subclass should call
63 * @gst_video_decoder_finish_frame to have decoded data pushed.
64 * downstream. Otherwise, the subclass must call
65 * @gst_video_decoder_drop_frame, to allow the base class to do timestamp
66 * and offset tracking, and possibly to requeue the frame for a later
67 * attempt in the case of reverse playback.
71 * * The GstVideoDecoder class calls @stop to inform the subclass that data
72 * parsing will be stopped.
78 * * When the pipeline is seeked or otherwise flushed, the subclass is
79 * informed via a call to its @reset callback, with the hard parameter
80 * set to true. This indicates the subclass should drop any internal data
81 * queues and timestamps and prepare for a fresh set of buffers to arrive
82 * for parsing and decoding.
86 * * At end-of-stream, the subclass @parse function may be called some final
87 * times with the at_eos parameter set to true, indicating that the element
88 * should not expect any more data to be arriving, and it should parse and
89 * remaining frames and call gst_video_decoder_have_frame() if possible.
91 * The subclass is responsible for providing pad template caps for
92 * source and sink pads. The pads need to be named "sink" and "src". It also
93 * needs to provide information about the output caps, when they are known.
94 * This may be when the base class calls the subclass' @set_format function,
95 * though it might be during decoding, before calling
96 * @gst_video_decoder_finish_frame. This is done via
97 * @gst_video_decoder_set_output_state
99 * The subclass is also responsible for providing (presentation) timestamps
100 * (likely based on corresponding input ones). If that is not applicable
101 * or possible, the base class provides limited framerate based interpolation.
103 * Similarly, the base class provides some limited (legacy) seeking support
104 * if specifically requested by the subclass, as full-fledged support
105 * should rather be left to upstream demuxer, parser or alike. This simple
106 * approach caters for seeking and duration reporting using estimated input
107 * bitrates. To enable it, a subclass should call
108 * @gst_video_decoder_set_estimate_rate to enable handling of incoming
111 * The base class provides some support for reverse playback, in particular
112 * in case incoming data is not packetized or upstream does not provide
113 * fragments on keyframe boundaries. However, the subclass should then be
114 * prepared for the parsing and frame processing stage to occur separately
115 * (in normal forward processing, the latter immediately follows the former),
116 * The subclass also needs to ensure the parsing stage properly marks
117 * keyframes, unless it knows the upstream elements will do so properly for
120 * The bare minimum that a functional subclass needs to implement is:
122 * * Provide pad templates
123 * * Inform the base class of output caps via
124 * @gst_video_decoder_set_output_state
126 * * Parse input data, if it is not considered packetized from upstream
127 * Data will be provided to @parse which should invoke
128 * @gst_video_decoder_add_to_frame and @gst_video_decoder_have_frame to
129 * separate the data belonging to each video frame.
131 * * Accept data in @handle_frame and provide decoded results to
132 * @gst_video_decoder_finish_frame, or call @gst_video_decoder_drop_frame.
141 * * Add a flag/boolean for I-frame-only/image decoders so we can do extra
142 * features, like applying QoS on input (as opposed to after the frame is
144 * * Add a flag/boolean for decoders that require keyframes, so the base
145 * class can automatically discard non-keyframes before one has arrived
146 * * Detect reordered frame/timestamps and fix the pts/dts
147 * * Support for GstIndex (or shall we not care ?)
148 * * Calculate actual latency based on input/output timestamp/frame_number
149 * and if it exceeds the recorded one, save it and emit a GST_MESSAGE_LATENCY
150 * * Emit latency message when it changes
154 /* Implementation notes:
155 * The Video Decoder base class operates in 2 primary processing modes, depending
156 * on whether forward or reverse playback is requested.
159 * * Incoming buffer -> @parse() -> add_to_frame()/have_frame() ->
160 * handle_frame() -> push downstream
162 * Reverse playback is more complicated, since it involves gathering incoming
163 * data regions as we loop backwards through the upstream data. The processing
164 * concept (using incoming buffers as containing one frame each to simplify
167 * Upstream data we want to play:
168 * Buffer encoded order: 1 2 3 4 5 6 7 8 9 EOS
170 * Groupings: AAAAAAA BBBBBBB CCCCCCC
173 * Buffer reception order: 7 8 9 4 5 6 1 2 3 EOS
175 * Discont flag: D D D
177 * - Each Discont marks a discont in the decoding order.
178 * - The keyframes mark where we can start decoding.
180 * Initially, we prepend incoming buffers to the gather queue. Whenever the
181 * discont flag is set on an incoming buffer, the gather queue is flushed out
182 * before the new buffer is collected.
184 * The above data will be accumulated in the gather queue like this:
186 * gather queue: 9 8 7
189 * When buffer 4 is received (with a DISCONT), we flush the gather queue like
193 * take head of queue and prepend to parse queue (this reverses the
194 * sequence, so parse queue is 7 -> 8 -> 9)
196 * Next, we process the parse queue, which now contains all un-parsed packets
197 * (including any leftover ones from the previous decode section)
199 * for each buffer now in the parse queue:
200 * Call the subclass parse function, prepending each resulting frame to
201 * the parse_gather queue. Buffers which precede the first one that
202 * produces a parsed frame are retained in the parse queue for
203 * re-processing on the next cycle of parsing.
205 * The parse_gather queue now contains frame objects ready for decoding,
207 * parse_gather: 9 -> 8 -> 7
209 * while (parse_gather)
210 * Take the head of the queue and prepend it to the decode queue
211 * If the frame was a keyframe, process the decode queue
212 * decode is now 7-8-9
214 * Processing the decode queue results in frames with attached output buffers
215 * stored in the 'output_queue' ready for outputting in reverse order.
217 * After we flushed the gather queue and parsed it, we add 4 to the (now empty)
218 * gather queue. We get the following situation:
221 * decode queue: 7 8 9
223 * After we received 5 (Keyframe) and 6:
225 * gather queue: 6 5 4
226 * decode queue: 7 8 9
228 * When we receive 1 (DISCONT) which triggers a flush of the gather queue:
230 * Copy head of the gather queue (6) to decode queue:
233 * decode queue: 6 7 8 9
235 * Copy head of the gather queue (5) to decode queue. This is a keyframe so we
236 * can start decoding.
239 * decode queue: 5 6 7 8 9
241 * Decode frames in decode queue, store raw decoded data in output queue, we
242 * can take the head of the decode queue and prepend the decoded result in the
247 * output queue: 9 8 7 6 5
249 * Now output all the frames in the output queue, picking a frame from the
252 * Copy head of the gather queue (4) to decode queue, we flushed the gather
253 * queue and can now store input buffer in the gather queue:
258 * When we receive EOS, the queue looks like:
260 * gather queue: 3 2 1
263 * Fill decode queue, first keyframe we copy is 2:
266 * decode queue: 2 3 4
272 * output queue: 4 3 2
274 * Leftover buffer 1 cannot be decoded and must be discarded.
277 #include "gstvideodecoder.h"
278 #include "gstvideoutils.h"
279 #include "gstvideoutilsprivate.h"
281 #include <gst/video/video.h>
282 #include <gst/video/video-event.h>
283 #include <gst/video/gstvideopool.h>
284 #include <gst/video/gstvideometa.h>
287 GST_DEBUG_CATEGORY (videodecoder_debug);
288 #define GST_CAT_DEFAULT videodecoder_debug
291 #define DEFAULT_QOS TRUE
292 #define DEFAULT_MAX_ERRORS GST_VIDEO_DECODER_MAX_ERRORS
293 #define DEFAULT_MIN_FORCE_KEY_UNIT_INTERVAL 0
294 #define DEFAULT_DISCARD_CORRUPTED_FRAMES FALSE
296 /* Used for request_sync_point_frame_number. These are out of range for the
297 * frame numbers and can be given special meaning */
298 #define REQUEST_SYNC_POINT_PENDING G_MAXUINT + 1
299 #define REQUEST_SYNC_POINT_UNSET G_MAXUINT64
306 PROP_MIN_FORCE_KEY_UNIT_INTERVAL,
307 PROP_DISCARD_CORRUPTED_FRAMES
310 struct _GstVideoDecoderPrivate
312 /* FIXME introduce a context ? */
315 GstAllocator *allocator;
316 GstAllocationParams params;
320 GstAdapter *input_adapter;
321 /* assembles current frame */
322 GstAdapter *output_adapter;
324 /* Whether we attempt to convert newsegment from bytes to
325 * time using a bitrate estimation */
326 gboolean do_estimate_rate;
328 /* Whether input is considered packetized or not */
334 gboolean had_output_data;
335 gboolean had_input_data;
337 gboolean needs_format;
338 /* input_segment are output_segment identical */
339 gboolean in_out_segment_sync;
341 /* TRUE if we have an active set of instant rate flags */
342 gboolean decode_flags_override;
343 GstSegmentFlags decode_flags;
345 /* ... being tracked here;
346 * only available during parsing */
347 GstVideoCodecFrame *current_frame;
348 /* events that should apply to the current frame */
349 /* FIXME 2.0: Use a GQueue or similar, see GstVideoCodecFrame::events */
350 GList *current_frame_events;
351 /* events that should be pushed before the next frame */
352 /* FIXME 2.0: Use a GQueue or similar, see GstVideoCodecFrame::events */
353 GList *pending_events;
355 /* relative offset of input data */
356 guint64 input_offset;
357 /* relative offset of frame */
358 guint64 frame_offset;
359 /* tracking ts and offsets */
362 /* last outgoing ts */
363 GstClockTime last_timestamp_out;
364 /* incoming pts - dts */
365 GstClockTime pts_delta;
366 gboolean reordered_output;
368 /* FIXME: Consider using a GQueue or other better fitting data structure */
369 /* reverse playback */
374 /* collected parsed frames */
376 /* frames to be handled == decoded */
378 /* collected output - of buffer objects, not frames */
379 GList *output_queued;
382 /* base_picture_number is the picture number of the reference picture */
383 guint64 base_picture_number;
384 /* combine with base_picture_number, framerate and calcs to yield (presentation) ts */
385 GstClockTime base_timestamp;
388 GstClockTime min_force_key_unit_interval;
389 gboolean discard_corrupted_frames;
391 /* Key unit related state */
392 gboolean needs_sync_point;
393 GstVideoDecoderRequestSyncPointFlags request_sync_point_flags;
394 guint64 request_sync_point_frame_number;
395 GstClockTime last_force_key_unit_time;
396 /* -1 if we saw no sync point yet */
397 guint64 distance_from_sync;
399 guint32 system_frame_number;
400 guint32 decode_frame_number;
402 GQueue frames; /* Protected with OBJECT_LOCK */
403 GstVideoCodecState *input_state;
404 GstVideoCodecState *output_state; /* OBJECT_LOCK and STREAM_LOCK */
405 gboolean output_state_changed;
409 gdouble proportion; /* OBJECT_LOCK */
410 GstClockTime earliest_time; /* OBJECT_LOCK */
411 GstClockTime qos_frame_duration; /* OBJECT_LOCK */
413 /* qos messages: frames dropped/processed */
417 /* Outgoing byte size ? */
424 /* upstream stream tags (global tags are passed through as-is) */
425 GstTagList *upstream_tags;
429 GstTagMergeMode tags_merge_mode;
431 gboolean tags_changed;
434 gboolean use_default_pad_acceptcaps;
436 #ifndef GST_DISABLE_DEBUG
437 /* Diagnostic time for reporting the time
438 * from flush to first output */
439 GstClockTime last_reset_time;
443 static GstElementClass *parent_class = NULL;
444 static gint private_offset = 0;
446 /* cached quark to avoid contention on the global quark table lock */
447 #define META_TAG_VIDEO meta_tag_video_quark
448 static GQuark meta_tag_video_quark;
450 static void gst_video_decoder_class_init (GstVideoDecoderClass * klass);
451 static void gst_video_decoder_init (GstVideoDecoder * dec,
452 GstVideoDecoderClass * klass);
454 static void gst_video_decoder_finalize (GObject * object);
455 static void gst_video_decoder_get_property (GObject * object, guint property_id,
456 GValue * value, GParamSpec * pspec);
457 static void gst_video_decoder_set_property (GObject * object, guint property_id,
458 const GValue * value, GParamSpec * pspec);
460 static gboolean gst_video_decoder_setcaps (GstVideoDecoder * dec,
462 static gboolean gst_video_decoder_sink_event (GstPad * pad, GstObject * parent,
464 static gboolean gst_video_decoder_src_event (GstPad * pad, GstObject * parent,
466 static GstFlowReturn gst_video_decoder_chain (GstPad * pad, GstObject * parent,
468 static gboolean gst_video_decoder_sink_query (GstPad * pad, GstObject * parent,
470 static GstStateChangeReturn gst_video_decoder_change_state (GstElement *
471 element, GstStateChange transition);
472 static gboolean gst_video_decoder_src_query (GstPad * pad, GstObject * parent,
474 static void gst_video_decoder_reset (GstVideoDecoder * decoder, gboolean full,
475 gboolean flush_hard);
477 static GstFlowReturn gst_video_decoder_decode_frame (GstVideoDecoder * decoder,
478 GstVideoCodecFrame * frame);
480 static void gst_video_decoder_push_event_list (GstVideoDecoder * decoder,
482 static GstClockTime gst_video_decoder_get_frame_duration (GstVideoDecoder *
483 decoder, GstVideoCodecFrame * frame);
484 static GstVideoCodecFrame *gst_video_decoder_new_frame (GstVideoDecoder *
486 static GstFlowReturn gst_video_decoder_clip_and_push_buf (GstVideoDecoder *
487 decoder, GstBuffer * buf);
488 static GstFlowReturn gst_video_decoder_flush_parse (GstVideoDecoder * dec,
491 static void gst_video_decoder_clear_queues (GstVideoDecoder * dec);
493 static gboolean gst_video_decoder_sink_event_default (GstVideoDecoder * decoder,
495 static gboolean gst_video_decoder_src_event_default (GstVideoDecoder * decoder,
497 static gboolean gst_video_decoder_decide_allocation_default (GstVideoDecoder *
498 decoder, GstQuery * query);
499 static gboolean gst_video_decoder_propose_allocation_default (GstVideoDecoder *
500 decoder, GstQuery * query);
501 static gboolean gst_video_decoder_negotiate_default (GstVideoDecoder * decoder);
502 static GstFlowReturn gst_video_decoder_parse_available (GstVideoDecoder * dec,
503 gboolean at_eos, gboolean new_buffer);
504 static gboolean gst_video_decoder_negotiate_unlocked (GstVideoDecoder *
506 static gboolean gst_video_decoder_sink_query_default (GstVideoDecoder * decoder,
508 static gboolean gst_video_decoder_src_query_default (GstVideoDecoder * decoder,
511 static gboolean gst_video_decoder_transform_meta_default (GstVideoDecoder *
512 decoder, GstVideoCodecFrame * frame, GstMeta * meta);
514 /* we can't use G_DEFINE_ABSTRACT_TYPE because we need the klass in the _init
515 * method to get to the padtemplates */
517 gst_video_decoder_get_type (void)
519 static volatile gsize type = 0;
521 if (g_once_init_enter (&type)) {
523 static const GTypeInfo info = {
524 sizeof (GstVideoDecoderClass),
527 (GClassInitFunc) gst_video_decoder_class_init,
530 sizeof (GstVideoDecoder),
532 (GInstanceInitFunc) gst_video_decoder_init,
535 _type = g_type_register_static (GST_TYPE_ELEMENT,
536 "GstVideoDecoder", &info, G_TYPE_FLAG_ABSTRACT);
539 g_type_add_instance_private (_type, sizeof (GstVideoDecoderPrivate));
541 g_once_init_leave (&type, _type);
546 static inline GstVideoDecoderPrivate *
547 gst_video_decoder_get_instance_private (GstVideoDecoder * self)
549 return (G_STRUCT_MEMBER_P (self, private_offset));
553 gst_video_decoder_class_init (GstVideoDecoderClass * klass)
555 GObjectClass *gobject_class;
556 GstElementClass *gstelement_class;
558 gobject_class = G_OBJECT_CLASS (klass);
559 gstelement_class = GST_ELEMENT_CLASS (klass);
561 GST_DEBUG_CATEGORY_INIT (videodecoder_debug, "videodecoder", 0,
562 "Base Video Decoder");
564 parent_class = g_type_class_peek_parent (klass);
566 if (private_offset != 0)
567 g_type_class_adjust_private_offset (klass, &private_offset);
569 gobject_class->finalize = gst_video_decoder_finalize;
570 gobject_class->get_property = gst_video_decoder_get_property;
571 gobject_class->set_property = gst_video_decoder_set_property;
573 gstelement_class->change_state =
574 GST_DEBUG_FUNCPTR (gst_video_decoder_change_state);
576 klass->sink_event = gst_video_decoder_sink_event_default;
577 klass->src_event = gst_video_decoder_src_event_default;
578 klass->decide_allocation = gst_video_decoder_decide_allocation_default;
579 klass->propose_allocation = gst_video_decoder_propose_allocation_default;
580 klass->negotiate = gst_video_decoder_negotiate_default;
581 klass->sink_query = gst_video_decoder_sink_query_default;
582 klass->src_query = gst_video_decoder_src_query_default;
583 klass->transform_meta = gst_video_decoder_transform_meta_default;
586 * GstVideoDecoder:qos:
588 * If set to %TRUE the decoder will handle QoS events received
589 * from downstream elements.
590 * This includes dropping output frames which are detected as late
591 * using the metrics reported by those events.
595 g_object_class_install_property (gobject_class, PROP_QOS,
596 g_param_spec_boolean ("qos", "Quality of Service",
597 "Handle Quality-of-Service events from downstream",
598 DEFAULT_QOS, G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
601 * GstVideoDecoder:max-errors:
603 * Maximum number of tolerated consecutive decode errors. See
604 * gst_video_decoder_set_max_errors() for more details.
608 g_object_class_install_property (gobject_class, PROP_MAX_ERRORS,
609 g_param_spec_int ("max-errors", "Max errors",
610 "Max consecutive decoder errors before returning flow error",
611 -1, G_MAXINT, DEFAULT_MAX_ERRORS,
612 G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
615 * GstVideoDecoder:min-force-key-unit-interval:
617 * Minimum interval between force-key-unit events sent upstream by the
618 * decoder. Setting this to 0 will cause every event to be handled, setting
619 * this to %GST_CLOCK_TIME_NONE will cause every event to be ignored.
621 * See gst_video_event_new_upstream_force_key_unit() for more details about
622 * force-key-unit events.
626 g_object_class_install_property (gobject_class,
627 PROP_MIN_FORCE_KEY_UNIT_INTERVAL,
628 g_param_spec_uint64 ("min-force-key-unit-interval",
629 "Minimum Force Keyunit Interval",
630 "Minimum interval between force-keyunit requests in nanoseconds", 0,
631 G_MAXUINT64, DEFAULT_MIN_FORCE_KEY_UNIT_INTERVAL,
632 G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
635 * GstVideoDecoder:discard-corrupted-frames:
637 * If set to %TRUE the decoder will discard frames that are marked as
638 * corrupted instead of outputting them.
642 g_object_class_install_property (gobject_class, PROP_DISCARD_CORRUPTED_FRAMES,
643 g_param_spec_boolean ("discard-corrupted-frames",
644 "Discard Corrupted Frames",
645 "Discard frames marked as corrupted instead of outputting them",
646 DEFAULT_DISCARD_CORRUPTED_FRAMES,
647 G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
649 meta_tag_video_quark = g_quark_from_static_string (GST_META_TAG_VIDEO_STR);
653 gst_video_decoder_init (GstVideoDecoder * decoder, GstVideoDecoderClass * klass)
655 GstPadTemplate *pad_template;
658 GST_DEBUG_OBJECT (decoder, "gst_video_decoder_init");
660 decoder->priv = gst_video_decoder_get_instance_private (decoder);
663 gst_element_class_get_pad_template (GST_ELEMENT_CLASS (klass), "sink");
664 g_return_if_fail (pad_template != NULL);
666 decoder->sinkpad = pad = gst_pad_new_from_template (pad_template, "sink");
668 gst_pad_set_chain_function (pad, GST_DEBUG_FUNCPTR (gst_video_decoder_chain));
669 gst_pad_set_event_function (pad,
670 GST_DEBUG_FUNCPTR (gst_video_decoder_sink_event));
671 gst_pad_set_query_function (pad,
672 GST_DEBUG_FUNCPTR (gst_video_decoder_sink_query));
673 gst_element_add_pad (GST_ELEMENT (decoder), decoder->sinkpad);
676 gst_element_class_get_pad_template (GST_ELEMENT_CLASS (klass), "src");
677 g_return_if_fail (pad_template != NULL);
679 decoder->srcpad = pad = gst_pad_new_from_template (pad_template, "src");
681 gst_pad_set_event_function (pad,
682 GST_DEBUG_FUNCPTR (gst_video_decoder_src_event));
683 gst_pad_set_query_function (pad,
684 GST_DEBUG_FUNCPTR (gst_video_decoder_src_query));
685 gst_element_add_pad (GST_ELEMENT (decoder), decoder->srcpad);
687 gst_segment_init (&decoder->input_segment, GST_FORMAT_TIME);
688 gst_segment_init (&decoder->output_segment, GST_FORMAT_TIME);
690 g_rec_mutex_init (&decoder->stream_lock);
692 decoder->priv->input_adapter = gst_adapter_new ();
693 decoder->priv->output_adapter = gst_adapter_new ();
694 decoder->priv->packetized = TRUE;
695 decoder->priv->needs_format = FALSE;
697 g_queue_init (&decoder->priv->frames);
698 g_queue_init (&decoder->priv->timestamps);
701 decoder->priv->do_qos = DEFAULT_QOS;
702 decoder->priv->max_errors = GST_VIDEO_DECODER_MAX_ERRORS;
704 decoder->priv->min_latency = 0;
705 decoder->priv->max_latency = 0;
707 gst_video_decoder_reset (decoder, TRUE, TRUE);
710 static GstVideoCodecState *
711 _new_input_state (GstCaps * caps)
713 GstVideoCodecState *state;
714 GstStructure *structure;
715 const GValue *codec_data;
717 state = g_slice_new0 (GstVideoCodecState);
718 state->ref_count = 1;
719 gst_video_info_init (&state->info);
720 if (G_UNLIKELY (!gst_video_info_from_caps (&state->info, caps)))
722 state->caps = gst_caps_ref (caps);
724 structure = gst_caps_get_structure (caps, 0);
726 codec_data = gst_structure_get_value (structure, "codec_data");
727 if (codec_data && G_VALUE_TYPE (codec_data) == GST_TYPE_BUFFER)
728 state->codec_data = GST_BUFFER (g_value_dup_boxed (codec_data));
734 g_slice_free (GstVideoCodecState, state);
739 static GstVideoCodecState *
740 _new_output_state (GstVideoFormat fmt, GstVideoInterlaceMode interlace_mode,
741 guint width, guint height, GstVideoCodecState * reference,
742 gboolean copy_interlace_mode)
744 GstVideoCodecState *state;
746 state = g_slice_new0 (GstVideoCodecState);
747 state->ref_count = 1;
748 gst_video_info_init (&state->info);
749 if (!gst_video_info_set_interlaced_format (&state->info, fmt, interlace_mode,
751 g_slice_free (GstVideoCodecState, state);
756 GstVideoInfo *tgt, *ref;
759 ref = &reference->info;
761 /* Copy over extra fields from reference state */
762 if (copy_interlace_mode)
763 tgt->interlace_mode = ref->interlace_mode;
764 tgt->flags = ref->flags;
765 /* only copy values that are not unknown so that we don't override the
766 * defaults. subclasses should really fill these in when they know. */
767 if (ref->chroma_site)
768 tgt->chroma_site = ref->chroma_site;
769 if (ref->colorimetry.range)
770 tgt->colorimetry.range = ref->colorimetry.range;
771 if (ref->colorimetry.matrix)
772 tgt->colorimetry.matrix = ref->colorimetry.matrix;
773 if (ref->colorimetry.transfer)
774 tgt->colorimetry.transfer = ref->colorimetry.transfer;
775 if (ref->colorimetry.primaries)
776 tgt->colorimetry.primaries = ref->colorimetry.primaries;
777 GST_DEBUG ("reference par %d/%d fps %d/%d",
778 ref->par_n, ref->par_d, ref->fps_n, ref->fps_d);
779 tgt->par_n = ref->par_n;
780 tgt->par_d = ref->par_d;
781 tgt->fps_n = ref->fps_n;
782 tgt->fps_d = ref->fps_d;
783 tgt->views = ref->views;
785 GST_VIDEO_INFO_FIELD_ORDER (tgt) = GST_VIDEO_INFO_FIELD_ORDER (ref);
787 if (GST_VIDEO_INFO_MULTIVIEW_MODE (ref) != GST_VIDEO_MULTIVIEW_MODE_NONE) {
788 GST_VIDEO_INFO_MULTIVIEW_MODE (tgt) = GST_VIDEO_INFO_MULTIVIEW_MODE (ref);
789 GST_VIDEO_INFO_MULTIVIEW_FLAGS (tgt) =
790 GST_VIDEO_INFO_MULTIVIEW_FLAGS (ref);
792 /* Default to MONO, overridden as needed by sub-classes */
793 GST_VIDEO_INFO_MULTIVIEW_MODE (tgt) = GST_VIDEO_MULTIVIEW_MODE_MONO;
794 GST_VIDEO_INFO_MULTIVIEW_FLAGS (tgt) = GST_VIDEO_MULTIVIEW_FLAGS_NONE;
798 GST_DEBUG ("reference par %d/%d fps %d/%d",
799 state->info.par_n, state->info.par_d,
800 state->info.fps_n, state->info.fps_d);
806 gst_video_decoder_setcaps (GstVideoDecoder * decoder, GstCaps * caps)
808 GstVideoDecoderClass *decoder_class;
809 GstVideoCodecState *state;
812 decoder_class = GST_VIDEO_DECODER_GET_CLASS (decoder);
814 GST_DEBUG_OBJECT (decoder, "setcaps %" GST_PTR_FORMAT, caps);
816 GST_VIDEO_DECODER_STREAM_LOCK (decoder);
818 if (decoder->priv->input_state) {
819 GST_DEBUG_OBJECT (decoder,
820 "Checking if caps changed old %" GST_PTR_FORMAT " new %" GST_PTR_FORMAT,
821 decoder->priv->input_state->caps, caps);
822 if (gst_caps_is_equal (decoder->priv->input_state->caps, caps))
823 goto caps_not_changed;
826 state = _new_input_state (caps);
828 if (G_UNLIKELY (state == NULL))
831 if (decoder_class->set_format)
832 ret = decoder_class->set_format (decoder, state);
837 if (decoder->priv->input_state)
838 gst_video_codec_state_unref (decoder->priv->input_state);
839 decoder->priv->input_state = state;
841 GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
847 GST_DEBUG_OBJECT (decoder, "Caps did not change - ignore");
848 GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
855 GST_WARNING_OBJECT (decoder, "Failed to parse caps");
856 GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
862 GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
863 GST_WARNING_OBJECT (decoder, "Subclass refused caps");
864 gst_video_codec_state_unref (state);
870 gst_video_decoder_finalize (GObject * object)
872 GstVideoDecoder *decoder;
874 decoder = GST_VIDEO_DECODER (object);
876 GST_DEBUG_OBJECT (object, "finalize");
878 g_rec_mutex_clear (&decoder->stream_lock);
880 if (decoder->priv->input_adapter) {
881 g_object_unref (decoder->priv->input_adapter);
882 decoder->priv->input_adapter = NULL;
884 if (decoder->priv->output_adapter) {
885 g_object_unref (decoder->priv->output_adapter);
886 decoder->priv->output_adapter = NULL;
889 if (decoder->priv->input_state)
890 gst_video_codec_state_unref (decoder->priv->input_state);
891 if (decoder->priv->output_state)
892 gst_video_codec_state_unref (decoder->priv->output_state);
894 if (decoder->priv->pool) {
895 gst_object_unref (decoder->priv->pool);
896 decoder->priv->pool = NULL;
899 if (decoder->priv->allocator) {
900 gst_object_unref (decoder->priv->allocator);
901 decoder->priv->allocator = NULL;
904 G_OBJECT_CLASS (parent_class)->finalize (object);
908 gst_video_decoder_get_property (GObject * object, guint property_id,
909 GValue * value, GParamSpec * pspec)
911 GstVideoDecoder *dec = GST_VIDEO_DECODER (object);
912 GstVideoDecoderPrivate *priv = dec->priv;
914 switch (property_id) {
916 g_value_set_boolean (value, priv->do_qos);
918 case PROP_MAX_ERRORS:
919 g_value_set_int (value, gst_video_decoder_get_max_errors (dec));
921 case PROP_MIN_FORCE_KEY_UNIT_INTERVAL:
922 g_value_set_uint64 (value, priv->min_force_key_unit_interval);
924 case PROP_DISCARD_CORRUPTED_FRAMES:
925 g_value_set_boolean (value, priv->discard_corrupted_frames);
928 G_OBJECT_WARN_INVALID_PROPERTY_ID (object, property_id, pspec);
934 gst_video_decoder_set_property (GObject * object, guint property_id,
935 const GValue * value, GParamSpec * pspec)
937 GstVideoDecoder *dec = GST_VIDEO_DECODER (object);
938 GstVideoDecoderPrivate *priv = dec->priv;
940 switch (property_id) {
942 priv->do_qos = g_value_get_boolean (value);
944 case PROP_MAX_ERRORS:
945 gst_video_decoder_set_max_errors (dec, g_value_get_int (value));
947 case PROP_MIN_FORCE_KEY_UNIT_INTERVAL:
948 priv->min_force_key_unit_interval = g_value_get_uint64 (value);
950 case PROP_DISCARD_CORRUPTED_FRAMES:
951 priv->discard_corrupted_frames = g_value_get_boolean (value);
954 G_OBJECT_WARN_INVALID_PROPERTY_ID (object, property_id, pspec);
959 /* hard == FLUSH, otherwise discont */
961 gst_video_decoder_flush (GstVideoDecoder * dec, gboolean hard)
963 GstVideoDecoderClass *klass = GST_VIDEO_DECODER_GET_CLASS (dec);
964 GstFlowReturn ret = GST_FLOW_OK;
966 GST_LOG_OBJECT (dec, "flush hard %d", hard);
968 /* Inform subclass */
970 GST_FIXME_OBJECT (dec, "GstVideoDecoder::reset() is deprecated");
971 klass->reset (dec, hard);
977 /* and get (re)set for the sequel */
978 gst_video_decoder_reset (dec, FALSE, hard);
984 gst_video_decoder_create_merged_tags_event (GstVideoDecoder * dec)
986 GstTagList *merged_tags;
988 GST_LOG_OBJECT (dec, "upstream : %" GST_PTR_FORMAT, dec->priv->upstream_tags);
989 GST_LOG_OBJECT (dec, "decoder : %" GST_PTR_FORMAT, dec->priv->tags);
990 GST_LOG_OBJECT (dec, "mode : %d", dec->priv->tags_merge_mode);
993 gst_tag_list_merge (dec->priv->upstream_tags, dec->priv->tags,
994 dec->priv->tags_merge_mode);
996 GST_DEBUG_OBJECT (dec, "merged : %" GST_PTR_FORMAT, merged_tags);
998 if (merged_tags == NULL)
1001 if (gst_tag_list_is_empty (merged_tags)) {
1002 gst_tag_list_unref (merged_tags);
1006 return gst_event_new_tag (merged_tags);
1010 gst_video_decoder_push_event (GstVideoDecoder * decoder, GstEvent * event)
1012 switch (GST_EVENT_TYPE (event)) {
1013 case GST_EVENT_SEGMENT:
1017 gst_event_copy_segment (event, &segment);
1019 GST_DEBUG_OBJECT (decoder, "segment %" GST_SEGMENT_FORMAT, &segment);
1021 if (segment.format != GST_FORMAT_TIME) {
1022 GST_DEBUG_OBJECT (decoder, "received non TIME newsegment");
1026 GST_VIDEO_DECODER_STREAM_LOCK (decoder);
1027 decoder->output_segment = segment;
1028 decoder->priv->in_out_segment_sync =
1029 gst_segment_is_equal (&decoder->input_segment, &segment);
1030 decoder->priv->last_timestamp_out = GST_CLOCK_TIME_NONE;
1031 decoder->priv->earliest_time = GST_CLOCK_TIME_NONE;
1032 GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
1039 GST_DEBUG_OBJECT (decoder, "pushing event %s",
1040 gst_event_type_get_name (GST_EVENT_TYPE (event)));
1042 return gst_pad_push_event (decoder->srcpad, event);
1045 static GstFlowReturn
1046 gst_video_decoder_parse_available (GstVideoDecoder * dec, gboolean at_eos,
1047 gboolean new_buffer)
1049 GstVideoDecoderClass *decoder_class = GST_VIDEO_DECODER_GET_CLASS (dec);
1050 GstVideoDecoderPrivate *priv = dec->priv;
1051 GstFlowReturn ret = GST_FLOW_OK;
1052 gsize was_available, available;
1055 available = gst_adapter_available (priv->input_adapter);
1057 while (available || new_buffer) {
1059 /* current frame may have been parsed and handled,
1060 * so we need to set up a new one when asking subclass to parse */
1061 if (priv->current_frame == NULL)
1062 priv->current_frame = gst_video_decoder_new_frame (dec);
1064 was_available = available;
1065 ret = decoder_class->parse (dec, priv->current_frame,
1066 priv->input_adapter, at_eos);
1067 if (ret != GST_FLOW_OK)
1070 /* if the subclass returned success (GST_FLOW_OK), it is expected
1071 * to have collected and submitted a frame, i.e. it should have
1072 * called gst_video_decoder_have_frame(), or at least consumed a
1073 * few bytes through gst_video_decoder_add_to_frame().
1075 * Otherwise, this is an implementation bug, and we error out
1076 * after 2 failed attempts */
1077 available = gst_adapter_available (priv->input_adapter);
1078 if (!priv->current_frame || available != was_available)
1080 else if (++inactive == 2)
1081 goto error_inactive;
1089 GST_ERROR_OBJECT (dec, "Failed to consume data. Error in subclass?");
1090 return GST_FLOW_ERROR;
1094 /* This function has to be called with the stream lock taken. */
1095 static GstFlowReturn
1096 gst_video_decoder_drain_out (GstVideoDecoder * dec, gboolean at_eos)
1098 GstVideoDecoderClass *decoder_class = GST_VIDEO_DECODER_GET_CLASS (dec);
1099 GstVideoDecoderPrivate *priv = dec->priv;
1100 GstFlowReturn ret = GST_FLOW_OK;
1102 if (dec->input_segment.rate > 0.0) {
1103 /* Forward mode, if unpacketized, give the child class
1104 * a final chance to flush out packets */
1105 if (!priv->packetized) {
1106 ret = gst_video_decoder_parse_available (dec, TRUE, FALSE);
1110 if (decoder_class->finish)
1111 ret = decoder_class->finish (dec);
1113 if (decoder_class->drain) {
1114 ret = decoder_class->drain (dec);
1116 GST_FIXME_OBJECT (dec, "Sub-class should implement drain()");
1120 /* Reverse playback mode */
1121 ret = gst_video_decoder_flush_parse (dec, TRUE);
1128 _flush_events (GstPad * pad, GList * events)
1132 for (tmp = events; tmp; tmp = tmp->next) {
1133 if (GST_EVENT_TYPE (tmp->data) != GST_EVENT_EOS &&
1134 GST_EVENT_TYPE (tmp->data) != GST_EVENT_SEGMENT &&
1135 GST_EVENT_IS_STICKY (tmp->data)) {
1136 gst_pad_store_sticky_event (pad, GST_EVENT_CAST (tmp->data));
1138 gst_event_unref (tmp->data);
1140 g_list_free (events);
1145 /* Must be called holding the GST_VIDEO_DECODER_STREAM_LOCK */
1147 gst_video_decoder_negotiate_default_caps (GstVideoDecoder * decoder)
1149 GstCaps *caps, *templcaps;
1150 GstVideoCodecState *state;
1154 GstStructure *structure;
1156 templcaps = gst_pad_get_pad_template_caps (decoder->srcpad);
1157 caps = gst_pad_peer_query_caps (decoder->srcpad, templcaps);
1159 gst_caps_unref (templcaps);
1164 if (!caps || gst_caps_is_empty (caps) || gst_caps_is_any (caps))
1167 GST_LOG_OBJECT (decoder, "peer caps %" GST_PTR_FORMAT, caps);
1169 /* before fixating, try to use whatever upstream provided */
1170 caps = gst_caps_make_writable (caps);
1171 caps_size = gst_caps_get_size (caps);
1172 if (decoder->priv->input_state && decoder->priv->input_state->caps) {
1173 GstCaps *sinkcaps = decoder->priv->input_state->caps;
1174 GstStructure *structure = gst_caps_get_structure (sinkcaps, 0);
1177 if (gst_structure_get_int (structure, "width", &width)) {
1178 for (i = 0; i < caps_size; i++) {
1179 gst_structure_set (gst_caps_get_structure (caps, i), "width",
1180 G_TYPE_INT, width, NULL);
1184 if (gst_structure_get_int (structure, "height", &height)) {
1185 for (i = 0; i < caps_size; i++) {
1186 gst_structure_set (gst_caps_get_structure (caps, i), "height",
1187 G_TYPE_INT, height, NULL);
1192 for (i = 0; i < caps_size; i++) {
1193 structure = gst_caps_get_structure (caps, i);
1194 /* Random I420 1280x720 for fixation */
1195 if (gst_structure_has_field (structure, "format"))
1196 gst_structure_fixate_field_string (structure, "format", "I420");
1198 gst_structure_set (structure, "format", G_TYPE_STRING, "I420", NULL);
1200 if (gst_structure_has_field (structure, "width"))
1201 gst_structure_fixate_field_nearest_int (structure, "width", 1280);
1203 gst_structure_set (structure, "width", G_TYPE_INT, 1280, NULL);
1205 if (gst_structure_has_field (structure, "height"))
1206 gst_structure_fixate_field_nearest_int (structure, "height", 720);
1208 gst_structure_set (structure, "height", G_TYPE_INT, 720, NULL);
1210 caps = gst_caps_fixate (caps);
1212 if (!caps || !gst_video_info_from_caps (&info, caps))
1215 GST_INFO_OBJECT (decoder,
1216 "Chose default caps %" GST_PTR_FORMAT " for initial gap", caps);
1218 gst_video_decoder_set_output_state (decoder, info.finfo->format,
1219 info.width, info.height, decoder->priv->input_state);
1220 gst_video_codec_state_unref (state);
1221 gst_caps_unref (caps);
1228 gst_caps_unref (caps);
1234 gst_video_decoder_sink_event_default (GstVideoDecoder * decoder,
1237 GstVideoDecoderPrivate *priv;
1238 gboolean ret = FALSE;
1239 gboolean forward_immediate = FALSE;
1241 priv = decoder->priv;
1243 switch (GST_EVENT_TYPE (event)) {
1244 case GST_EVENT_STREAM_START:
1246 GstFlowReturn flow_ret = GST_FLOW_OK;
1248 GST_VIDEO_DECODER_STREAM_LOCK (decoder);
1249 flow_ret = gst_video_decoder_drain_out (decoder, FALSE);
1250 ret = (flow_ret == GST_FLOW_OK);
1252 GST_DEBUG_OBJECT (decoder, "received STREAM_START. Clearing taglist");
1253 /* Flush upstream tags after a STREAM_START */
1254 if (priv->upstream_tags) {
1255 gst_tag_list_unref (priv->upstream_tags);
1256 priv->upstream_tags = NULL;
1257 priv->tags_changed = TRUE;
1259 GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
1261 /* Forward STREAM_START immediately. Everything is drained after
1262 * the STREAM_START event and we can forward this event immediately
1263 * now without having buffers out of order.
1265 forward_immediate = TRUE;
1268 case GST_EVENT_CAPS:
1272 gst_event_parse_caps (event, &caps);
1273 ret = gst_video_decoder_setcaps (decoder, caps);
1274 gst_event_unref (event);
1278 case GST_EVENT_SEGMENT_DONE:
1280 GstFlowReturn flow_ret = GST_FLOW_OK;
1282 GST_VIDEO_DECODER_STREAM_LOCK (decoder);
1283 flow_ret = gst_video_decoder_drain_out (decoder, TRUE);
1284 GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
1285 ret = (flow_ret == GST_FLOW_OK);
1287 /* Forward SEGMENT_DONE immediately. This is required
1288 * because no buffer or serialized event might come
1289 * after SEGMENT_DONE and nothing could trigger another
1290 * _finish_frame() call.
1292 * The subclass can override this behaviour by overriding
1293 * the ::sink_event() vfunc and not chaining up to the
1294 * parent class' ::sink_event() until a later time.
1296 forward_immediate = TRUE;
1301 GstFlowReturn flow_ret = GST_FLOW_OK;
1303 GST_VIDEO_DECODER_STREAM_LOCK (decoder);
1304 flow_ret = gst_video_decoder_drain_out (decoder, TRUE);
1305 GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
1306 ret = (flow_ret == GST_FLOW_OK);
1308 /* Error out even if EOS was ok when we had input, but no output */
1309 if (ret && priv->had_input_data && !priv->had_output_data) {
1310 GST_ELEMENT_ERROR (decoder, STREAM, DECODE,
1311 ("No valid frames decoded before end of stream"),
1312 ("no valid frames found"));
1315 /* Forward EOS immediately. This is required because no
1316 * buffer or serialized event will come after EOS and
1317 * nothing could trigger another _finish_frame() call.
1319 * The subclass can override this behaviour by overriding
1320 * the ::sink_event() vfunc and not chaining up to the
1321 * parent class' ::sink_event() until a later time.
1323 forward_immediate = TRUE;
1328 GstFlowReturn flow_ret = GST_FLOW_OK;
1329 gboolean needs_reconfigure = FALSE;
1331 GList *frame_events;
1333 GST_VIDEO_DECODER_STREAM_LOCK (decoder);
1334 if (decoder->input_segment.flags & GST_SEEK_FLAG_TRICKMODE_KEY_UNITS)
1335 flow_ret = gst_video_decoder_drain_out (decoder, FALSE);
1336 ret = (flow_ret == GST_FLOW_OK);
1338 /* Ensure we have caps before forwarding the event */
1339 if (!decoder->priv->output_state) {
1340 if (!gst_video_decoder_negotiate_default_caps (decoder)) {
1341 GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
1342 GST_ELEMENT_ERROR (decoder, STREAM, FORMAT, (NULL),
1343 ("Decoder output not negotiated before GAP event."));
1344 forward_immediate = TRUE;
1347 needs_reconfigure = TRUE;
1350 needs_reconfigure = gst_pad_check_reconfigure (decoder->srcpad)
1351 || needs_reconfigure;
1352 if (decoder->priv->output_state_changed || needs_reconfigure) {
1353 if (!gst_video_decoder_negotiate_unlocked (decoder)) {
1354 GST_WARNING_OBJECT (decoder, "Failed to negotiate with downstream");
1355 gst_pad_mark_reconfigure (decoder->srcpad);
1359 GST_DEBUG_OBJECT (decoder, "Pushing all pending serialized events"
1361 events = decoder->priv->pending_events;
1362 frame_events = decoder->priv->current_frame_events;
1363 decoder->priv->pending_events = NULL;
1364 decoder->priv->current_frame_events = NULL;
1366 GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
1368 gst_video_decoder_push_event_list (decoder, events);
1369 gst_video_decoder_push_event_list (decoder, frame_events);
1371 /* Forward GAP immediately. Everything is drained after
1372 * the GAP event and we can forward this event immediately
1373 * now without having buffers out of order.
1375 forward_immediate = TRUE;
1378 case GST_EVENT_CUSTOM_DOWNSTREAM:
1381 GstFlowReturn flow_ret = GST_FLOW_OK;
1383 if (gst_video_event_parse_still_frame (event, &in_still)) {
1385 GST_DEBUG_OBJECT (decoder, "draining current data for still-frame");
1386 GST_VIDEO_DECODER_STREAM_LOCK (decoder);
1387 flow_ret = gst_video_decoder_drain_out (decoder, FALSE);
1388 GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
1389 ret = (flow_ret == GST_FLOW_OK);
1391 /* Forward STILL_FRAME immediately. Everything is drained after
1392 * the STILL_FRAME event and we can forward this event immediately
1393 * now without having buffers out of order.
1395 forward_immediate = TRUE;
1399 case GST_EVENT_SEGMENT:
1403 gst_event_copy_segment (event, &segment);
1405 if (segment.format == GST_FORMAT_TIME) {
1406 GST_DEBUG_OBJECT (decoder,
1407 "received TIME SEGMENT %" GST_SEGMENT_FORMAT, &segment);
1411 GST_DEBUG_OBJECT (decoder,
1412 "received SEGMENT %" GST_SEGMENT_FORMAT, &segment);
1414 /* handle newsegment as a result from our legacy simple seeking */
1415 /* note that initial 0 should convert to 0 in any case */
1416 if (priv->do_estimate_rate &&
1417 gst_pad_query_convert (decoder->sinkpad, GST_FORMAT_BYTES,
1418 segment.start, GST_FORMAT_TIME, &start)) {
1419 /* best attempt convert */
1420 /* as these are only estimates, stop is kept open-ended to avoid
1421 * premature cutting */
1422 GST_DEBUG_OBJECT (decoder,
1423 "converted to TIME start %" GST_TIME_FORMAT,
1424 GST_TIME_ARGS (start));
1425 segment.start = start;
1426 segment.stop = GST_CLOCK_TIME_NONE;
1427 segment.time = start;
1429 gst_event_unref (event);
1430 event = gst_event_new_segment (&segment);
1432 goto newseg_wrong_format;
1436 GST_VIDEO_DECODER_STREAM_LOCK (decoder);
1438 /* Update the decode flags in the segment if we have an instant-rate
1439 * override active */
1440 GST_OBJECT_LOCK (decoder);
1441 if (!priv->decode_flags_override)
1442 priv->decode_flags = segment.flags;
1444 segment.flags &= ~GST_SEGMENT_INSTANT_FLAGS;
1445 segment.flags |= priv->decode_flags & GST_SEGMENT_INSTANT_FLAGS;
1448 priv->base_timestamp = GST_CLOCK_TIME_NONE;
1449 priv->base_picture_number = 0;
1451 decoder->input_segment = segment;
1452 decoder->priv->in_out_segment_sync = FALSE;
1454 GST_OBJECT_UNLOCK (decoder);
1455 GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
1459 case GST_EVENT_INSTANT_RATE_CHANGE:
1461 GstSegmentFlags flags;
1464 gst_event_parse_instant_rate_change (event, NULL, &flags);
1466 GST_OBJECT_LOCK (decoder);
1467 priv->decode_flags_override = TRUE;
1468 priv->decode_flags = flags;
1470 /* Update the input segment flags */
1471 seg = &decoder->input_segment;
1472 seg->flags &= ~GST_SEGMENT_INSTANT_FLAGS;
1473 seg->flags |= priv->decode_flags & GST_SEGMENT_INSTANT_FLAGS;
1474 GST_OBJECT_UNLOCK (decoder);
1477 case GST_EVENT_FLUSH_STOP:
1481 GST_VIDEO_DECODER_STREAM_LOCK (decoder);
1482 for (l = priv->frames.head; l; l = l->next) {
1483 GstVideoCodecFrame *frame = l->data;
1485 frame->events = _flush_events (decoder->srcpad, frame->events);
1487 priv->current_frame_events = _flush_events (decoder->srcpad,
1488 decoder->priv->current_frame_events);
1490 /* well, this is kind of worse than a DISCONT */
1491 gst_video_decoder_flush (decoder, TRUE);
1492 GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
1493 /* Forward FLUSH_STOP immediately. This is required because it is
1494 * expected to be forwarded immediately and no buffers are queued
1497 forward_immediate = TRUE;
1504 gst_event_parse_tag (event, &tags);
1506 if (gst_tag_list_get_scope (tags) == GST_TAG_SCOPE_STREAM) {
1507 GST_VIDEO_DECODER_STREAM_LOCK (decoder);
1508 if (priv->upstream_tags != tags) {
1509 if (priv->upstream_tags)
1510 gst_tag_list_unref (priv->upstream_tags);
1511 priv->upstream_tags = gst_tag_list_ref (tags);
1512 GST_INFO_OBJECT (decoder, "upstream tags: %" GST_PTR_FORMAT, tags);
1514 gst_event_unref (event);
1515 event = gst_video_decoder_create_merged_tags_event (decoder);
1516 GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
1526 /* Forward non-serialized events immediately, and all other
1527 * events which can be forwarded immediately without potentially
1528 * causing the event to go out of order with other events and
1529 * buffers as decided above.
1532 if (!GST_EVENT_IS_SERIALIZED (event) || forward_immediate) {
1533 ret = gst_video_decoder_push_event (decoder, event);
1535 GST_VIDEO_DECODER_STREAM_LOCK (decoder);
1536 decoder->priv->current_frame_events =
1537 g_list_prepend (decoder->priv->current_frame_events, event);
1538 GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
1545 newseg_wrong_format:
1547 GST_DEBUG_OBJECT (decoder, "received non TIME newsegment");
1548 gst_event_unref (event);
1555 gst_video_decoder_sink_event (GstPad * pad, GstObject * parent,
1558 GstVideoDecoder *decoder;
1559 GstVideoDecoderClass *decoder_class;
1560 gboolean ret = FALSE;
1562 decoder = GST_VIDEO_DECODER (parent);
1563 decoder_class = GST_VIDEO_DECODER_GET_CLASS (decoder);
1565 GST_DEBUG_OBJECT (decoder, "received event %d, %s", GST_EVENT_TYPE (event),
1566 GST_EVENT_TYPE_NAME (event));
1568 if (decoder_class->sink_event)
1569 ret = decoder_class->sink_event (decoder, event);
1574 /* perform upstream byte <-> time conversion (duration, seeking)
1575 * if subclass allows and if enough data for moderately decent conversion */
1576 static inline gboolean
1577 gst_video_decoder_do_byte (GstVideoDecoder * dec)
1581 GST_OBJECT_LOCK (dec);
1582 ret = dec->priv->do_estimate_rate && (dec->priv->bytes_out > 0)
1583 && (dec->priv->time > GST_SECOND);
1584 GST_OBJECT_UNLOCK (dec);
1590 gst_video_decoder_do_seek (GstVideoDecoder * dec, GstEvent * event)
1594 GstSeekType start_type, end_type;
1596 gint64 start, start_time, end_time;
1597 GstSegment seek_segment;
1600 gst_event_parse_seek (event, &rate, &format, &flags, &start_type,
1601 &start_time, &end_type, &end_time);
1603 /* we'll handle plain open-ended flushing seeks with the simple approach */
1605 GST_DEBUG_OBJECT (dec, "unsupported seek: rate");
1609 if (start_type != GST_SEEK_TYPE_SET) {
1610 GST_DEBUG_OBJECT (dec, "unsupported seek: start time");
1614 if ((end_type != GST_SEEK_TYPE_SET && end_type != GST_SEEK_TYPE_NONE) ||
1615 (end_type == GST_SEEK_TYPE_SET && end_time != GST_CLOCK_TIME_NONE)) {
1616 GST_DEBUG_OBJECT (dec, "unsupported seek: end time");
1620 if (!(flags & GST_SEEK_FLAG_FLUSH)) {
1621 GST_DEBUG_OBJECT (dec, "unsupported seek: not flushing");
1625 memcpy (&seek_segment, &dec->output_segment, sizeof (seek_segment));
1626 gst_segment_do_seek (&seek_segment, rate, format, flags, start_type,
1627 start_time, end_type, end_time, NULL);
1628 start_time = seek_segment.position;
1630 if (!gst_pad_query_convert (dec->sinkpad, GST_FORMAT_TIME, start_time,
1631 GST_FORMAT_BYTES, &start)) {
1632 GST_DEBUG_OBJECT (dec, "conversion failed");
1636 seqnum = gst_event_get_seqnum (event);
1637 event = gst_event_new_seek (1.0, GST_FORMAT_BYTES, flags,
1638 GST_SEEK_TYPE_SET, start, GST_SEEK_TYPE_NONE, -1);
1639 gst_event_set_seqnum (event, seqnum);
1641 GST_DEBUG_OBJECT (dec, "seeking to %" GST_TIME_FORMAT " at byte offset %"
1642 G_GINT64_FORMAT, GST_TIME_ARGS (start_time), start);
1644 return gst_pad_push_event (dec->sinkpad, event);
1648 gst_video_decoder_src_event_default (GstVideoDecoder * decoder,
1651 GstVideoDecoderPrivate *priv;
1652 gboolean res = FALSE;
1654 priv = decoder->priv;
1656 GST_DEBUG_OBJECT (decoder,
1657 "received event %d, %s", GST_EVENT_TYPE (event),
1658 GST_EVENT_TYPE_NAME (event));
1660 switch (GST_EVENT_TYPE (event)) {
1661 case GST_EVENT_SEEK:
1666 GstSeekType start_type, stop_type;
1668 gint64 tstart, tstop;
1671 gst_event_parse_seek (event, &rate, &format, &flags, &start_type, &start,
1673 seqnum = gst_event_get_seqnum (event);
1675 /* upstream gets a chance first */
1676 if ((res = gst_pad_push_event (decoder->sinkpad, event)))
1679 /* if upstream fails for a time seek, maybe we can help if allowed */
1680 if (format == GST_FORMAT_TIME) {
1681 if (gst_video_decoder_do_byte (decoder))
1682 res = gst_video_decoder_do_seek (decoder, event);
1686 /* ... though a non-time seek can be aided as well */
1687 /* First bring the requested format to time */
1689 gst_pad_query_convert (decoder->srcpad, format, start,
1690 GST_FORMAT_TIME, &tstart)))
1693 gst_pad_query_convert (decoder->srcpad, format, stop,
1694 GST_FORMAT_TIME, &tstop)))
1697 /* then seek with time on the peer */
1698 event = gst_event_new_seek (rate, GST_FORMAT_TIME,
1699 flags, start_type, tstart, stop_type, tstop);
1700 gst_event_set_seqnum (event, seqnum);
1702 res = gst_pad_push_event (decoder->sinkpad, event);
1709 GstClockTimeDiff diff;
1710 GstClockTime timestamp;
1712 gst_event_parse_qos (event, &type, &proportion, &diff, ×tamp);
1714 GST_OBJECT_LOCK (decoder);
1715 priv->proportion = proportion;
1716 if (G_LIKELY (GST_CLOCK_TIME_IS_VALID (timestamp))) {
1717 if (G_UNLIKELY (diff > 0)) {
1718 priv->earliest_time = timestamp + 2 * diff + priv->qos_frame_duration;
1720 priv->earliest_time = timestamp + diff;
1723 priv->earliest_time = GST_CLOCK_TIME_NONE;
1725 GST_OBJECT_UNLOCK (decoder);
1727 GST_DEBUG_OBJECT (decoder,
1728 "got QoS %" GST_TIME_FORMAT ", %" GST_STIME_FORMAT ", %g",
1729 GST_TIME_ARGS (timestamp), GST_STIME_ARGS (diff), proportion);
1731 res = gst_pad_push_event (decoder->sinkpad, event);
1735 res = gst_pad_push_event (decoder->sinkpad, event);
1742 GST_DEBUG_OBJECT (decoder, "could not convert format");
1747 gst_video_decoder_src_event (GstPad * pad, GstObject * parent, GstEvent * event)
1749 GstVideoDecoder *decoder;
1750 GstVideoDecoderClass *decoder_class;
1751 gboolean ret = FALSE;
1753 decoder = GST_VIDEO_DECODER (parent);
1754 decoder_class = GST_VIDEO_DECODER_GET_CLASS (decoder);
1756 GST_DEBUG_OBJECT (decoder, "received event %d, %s", GST_EVENT_TYPE (event),
1757 GST_EVENT_TYPE_NAME (event));
1759 if (decoder_class->src_event)
1760 ret = decoder_class->src_event (decoder, event);
1766 gst_video_decoder_src_query_default (GstVideoDecoder * dec, GstQuery * query)
1768 GstPad *pad = GST_VIDEO_DECODER_SRC_PAD (dec);
1769 gboolean res = TRUE;
1771 GST_LOG_OBJECT (dec, "handling query: %" GST_PTR_FORMAT, query);
1773 switch (GST_QUERY_TYPE (query)) {
1774 case GST_QUERY_POSITION:
1779 /* upstream gets a chance first */
1780 if ((res = gst_pad_peer_query (dec->sinkpad, query))) {
1781 GST_LOG_OBJECT (dec, "returning peer response");
1785 /* Refuse BYTES format queries. If it made sense to
1786 * answer them, upstream would have already */
1787 gst_query_parse_position (query, &format, NULL);
1789 if (format == GST_FORMAT_BYTES) {
1790 GST_LOG_OBJECT (dec, "Ignoring BYTES position query");
1794 /* we start from the last seen time */
1795 time = dec->priv->last_timestamp_out;
1796 /* correct for the segment values */
1797 time = gst_segment_to_stream_time (&dec->output_segment,
1798 GST_FORMAT_TIME, time);
1800 GST_LOG_OBJECT (dec,
1801 "query %p: our time: %" GST_TIME_FORMAT, query, GST_TIME_ARGS (time));
1803 /* and convert to the final format */
1804 if (!(res = gst_pad_query_convert (pad, GST_FORMAT_TIME, time,
1808 gst_query_set_position (query, format, value);
1810 GST_LOG_OBJECT (dec,
1811 "query %p: we return %" G_GINT64_FORMAT " (format %u)", query, value,
1815 case GST_QUERY_DURATION:
1819 /* upstream in any case */
1820 if ((res = gst_pad_query_default (pad, GST_OBJECT (dec), query)))
1823 gst_query_parse_duration (query, &format, NULL);
1824 /* try answering TIME by converting from BYTE if subclass allows */
1825 if (format == GST_FORMAT_TIME && gst_video_decoder_do_byte (dec)) {
1828 if (gst_pad_peer_query_duration (dec->sinkpad, GST_FORMAT_BYTES,
1830 GST_LOG_OBJECT (dec, "upstream size %" G_GINT64_FORMAT, value);
1831 if (gst_pad_query_convert (dec->sinkpad,
1832 GST_FORMAT_BYTES, value, GST_FORMAT_TIME, &value)) {
1833 gst_query_set_duration (query, GST_FORMAT_TIME, value);
1840 case GST_QUERY_CONVERT:
1842 GstFormat src_fmt, dest_fmt;
1843 gint64 src_val, dest_val;
1845 GST_DEBUG_OBJECT (dec, "convert query");
1847 gst_query_parse_convert (query, &src_fmt, &src_val, &dest_fmt, &dest_val);
1848 GST_OBJECT_LOCK (dec);
1849 if (dec->priv->output_state != NULL)
1850 res = __gst_video_rawvideo_convert (dec->priv->output_state,
1851 src_fmt, src_val, &dest_fmt, &dest_val);
1854 GST_OBJECT_UNLOCK (dec);
1857 gst_query_set_convert (query, src_fmt, src_val, dest_fmt, dest_val);
1860 case GST_QUERY_LATENCY:
1863 GstClockTime min_latency, max_latency;
1865 res = gst_pad_peer_query (dec->sinkpad, query);
1867 gst_query_parse_latency (query, &live, &min_latency, &max_latency);
1868 GST_DEBUG_OBJECT (dec, "Peer qlatency: live %d, min %"
1869 GST_TIME_FORMAT " max %" GST_TIME_FORMAT, live,
1870 GST_TIME_ARGS (min_latency), GST_TIME_ARGS (max_latency));
1872 GST_OBJECT_LOCK (dec);
1873 min_latency += dec->priv->min_latency;
1874 if (max_latency == GST_CLOCK_TIME_NONE
1875 || dec->priv->max_latency == GST_CLOCK_TIME_NONE)
1876 max_latency = GST_CLOCK_TIME_NONE;
1878 max_latency += dec->priv->max_latency;
1879 GST_OBJECT_UNLOCK (dec);
1881 gst_query_set_latency (query, live, min_latency, max_latency);
1886 res = gst_pad_query_default (pad, GST_OBJECT (dec), query);
1891 GST_ERROR_OBJECT (dec, "query failed");
1896 gst_video_decoder_src_query (GstPad * pad, GstObject * parent, GstQuery * query)
1898 GstVideoDecoder *decoder;
1899 GstVideoDecoderClass *decoder_class;
1900 gboolean ret = FALSE;
1902 decoder = GST_VIDEO_DECODER (parent);
1903 decoder_class = GST_VIDEO_DECODER_GET_CLASS (decoder);
1905 GST_DEBUG_OBJECT (decoder, "received query %d, %s", GST_QUERY_TYPE (query),
1906 GST_QUERY_TYPE_NAME (query));
1908 if (decoder_class->src_query)
1909 ret = decoder_class->src_query (decoder, query);
1915 * gst_video_decoder_proxy_getcaps:
1916 * @decoder: a #GstVideoDecoder
1917 * @caps: (allow-none): initial caps
1918 * @filter: (allow-none): filter caps
1920 * Returns caps that express @caps (or sink template caps if @caps == NULL)
1921 * restricted to resolution/format/... combinations supported by downstream
1924 * Returns: (transfer full): a #GstCaps owned by caller
1929 gst_video_decoder_proxy_getcaps (GstVideoDecoder * decoder, GstCaps * caps,
1932 return __gst_video_element_proxy_getcaps (GST_ELEMENT_CAST (decoder),
1933 GST_VIDEO_DECODER_SINK_PAD (decoder),
1934 GST_VIDEO_DECODER_SRC_PAD (decoder), caps, filter);
1938 gst_video_decoder_sink_getcaps (GstVideoDecoder * decoder, GstCaps * filter)
1940 GstVideoDecoderClass *klass;
1943 klass = GST_VIDEO_DECODER_GET_CLASS (decoder);
1946 caps = klass->getcaps (decoder, filter);
1948 caps = gst_video_decoder_proxy_getcaps (decoder, NULL, filter);
1950 GST_LOG_OBJECT (decoder, "Returning caps %" GST_PTR_FORMAT, caps);
1956 gst_video_decoder_sink_query_default (GstVideoDecoder * decoder,
1959 GstPad *pad = GST_VIDEO_DECODER_SINK_PAD (decoder);
1960 GstVideoDecoderPrivate *priv;
1961 gboolean res = FALSE;
1963 priv = decoder->priv;
1965 GST_LOG_OBJECT (decoder, "handling query: %" GST_PTR_FORMAT, query);
1967 switch (GST_QUERY_TYPE (query)) {
1968 case GST_QUERY_CONVERT:
1970 GstFormat src_fmt, dest_fmt;
1971 gint64 src_val, dest_val;
1973 gst_query_parse_convert (query, &src_fmt, &src_val, &dest_fmt, &dest_val);
1974 GST_OBJECT_LOCK (decoder);
1976 __gst_video_encoded_video_convert (priv->bytes_out, priv->time,
1977 src_fmt, src_val, &dest_fmt, &dest_val);
1978 GST_OBJECT_UNLOCK (decoder);
1981 gst_query_set_convert (query, src_fmt, src_val, dest_fmt, dest_val);
1984 case GST_QUERY_ALLOCATION:{
1985 GstVideoDecoderClass *klass = GST_VIDEO_DECODER_GET_CLASS (decoder);
1987 if (klass->propose_allocation)
1988 res = klass->propose_allocation (decoder, query);
1991 case GST_QUERY_CAPS:{
1992 GstCaps *filter, *caps;
1994 gst_query_parse_caps (query, &filter);
1995 caps = gst_video_decoder_sink_getcaps (decoder, filter);
1996 gst_query_set_caps_result (query, caps);
1997 gst_caps_unref (caps);
2001 case GST_QUERY_ACCEPT_CAPS:{
2002 if (decoder->priv->use_default_pad_acceptcaps) {
2004 gst_pad_query_default (GST_VIDEO_DECODER_SINK_PAD (decoder),
2005 GST_OBJECT_CAST (decoder), query);
2008 GstCaps *allowed_caps;
2009 GstCaps *template_caps;
2012 gst_query_parse_accept_caps (query, &caps);
2014 template_caps = gst_pad_get_pad_template_caps (pad);
2015 accept = gst_caps_is_subset (caps, template_caps);
2016 gst_caps_unref (template_caps);
2020 gst_pad_query_caps (GST_VIDEO_DECODER_SINK_PAD (decoder), caps);
2022 accept = gst_caps_can_intersect (caps, allowed_caps);
2024 gst_caps_unref (allowed_caps);
2027 gst_query_set_accept_caps_result (query, accept);
2033 res = gst_pad_query_default (pad, GST_OBJECT (decoder), query);
2040 GST_DEBUG_OBJECT (decoder, "query failed");
2046 gst_video_decoder_sink_query (GstPad * pad, GstObject * parent,
2049 GstVideoDecoder *decoder;
2050 GstVideoDecoderClass *decoder_class;
2051 gboolean ret = FALSE;
2053 decoder = GST_VIDEO_DECODER (parent);
2054 decoder_class = GST_VIDEO_DECODER_GET_CLASS (decoder);
2056 GST_DEBUG_OBJECT (decoder, "received query %d, %s", GST_QUERY_TYPE (query),
2057 GST_QUERY_TYPE_NAME (query));
2059 if (decoder_class->sink_query)
2060 ret = decoder_class->sink_query (decoder, query);
2065 typedef struct _Timestamp Timestamp;
2071 GstClockTime duration;
2076 timestamp_free (Timestamp * ts)
2078 g_slice_free (Timestamp, ts);
2082 gst_video_decoder_add_buffer_info (GstVideoDecoder * decoder,
2085 GstVideoDecoderPrivate *priv = decoder->priv;
2088 if (!GST_BUFFER_PTS_IS_VALID (buffer) &&
2089 !GST_BUFFER_DTS_IS_VALID (buffer) &&
2090 !GST_BUFFER_DURATION_IS_VALID (buffer) &&
2091 GST_BUFFER_FLAGS (buffer) == 0) {
2092 /* Save memory - don't bother storing info
2093 * for buffers with no distinguishing info */
2097 ts = g_slice_new (Timestamp);
2099 GST_LOG_OBJECT (decoder,
2100 "adding PTS %" GST_TIME_FORMAT " DTS %" GST_TIME_FORMAT
2101 " (offset:%" G_GUINT64_FORMAT ")",
2102 GST_TIME_ARGS (GST_BUFFER_PTS (buffer)),
2103 GST_TIME_ARGS (GST_BUFFER_DTS (buffer)), priv->input_offset);
2105 ts->offset = priv->input_offset;
2106 ts->pts = GST_BUFFER_PTS (buffer);
2107 ts->dts = GST_BUFFER_DTS (buffer);
2108 ts->duration = GST_BUFFER_DURATION (buffer);
2109 ts->flags = GST_BUFFER_FLAGS (buffer);
2111 g_queue_push_tail (&priv->timestamps, ts);
2115 gst_video_decoder_get_buffer_info_at_offset (GstVideoDecoder *
2116 decoder, guint64 offset, GstClockTime * pts, GstClockTime * dts,
2117 GstClockTime * duration, guint * flags)
2119 #ifndef GST_DISABLE_GST_DEBUG
2120 guint64 got_offset = 0;
2125 *pts = GST_CLOCK_TIME_NONE;
2126 *dts = GST_CLOCK_TIME_NONE;
2127 *duration = GST_CLOCK_TIME_NONE;
2130 g = decoder->priv->timestamps.head;
2133 if (ts->offset <= offset) {
2134 GList *next = g->next;
2135 #ifndef GST_DISABLE_GST_DEBUG
2136 got_offset = ts->offset;
2140 *duration = ts->duration;
2142 g_queue_delete_link (&decoder->priv->timestamps, g);
2144 timestamp_free (ts);
2150 GST_LOG_OBJECT (decoder,
2151 "got PTS %" GST_TIME_FORMAT " DTS %" GST_TIME_FORMAT " flags %x @ offs %"
2152 G_GUINT64_FORMAT " (wanted offset:%" G_GUINT64_FORMAT ")",
2153 GST_TIME_ARGS (*pts), GST_TIME_ARGS (*dts), *flags, got_offset, offset);
2156 #if !GLIB_CHECK_VERSION(2, 60, 0)
2157 #define g_queue_clear_full queue_clear_full
2159 queue_clear_full (GQueue * queue, GDestroyNotify free_func)
2163 while ((data = g_queue_pop_head (queue)) != NULL)
2169 gst_video_decoder_clear_queues (GstVideoDecoder * dec)
2171 GstVideoDecoderPrivate *priv = dec->priv;
2173 g_list_free_full (priv->output_queued,
2174 (GDestroyNotify) gst_mini_object_unref);
2175 priv->output_queued = NULL;
2177 g_list_free_full (priv->gather, (GDestroyNotify) gst_mini_object_unref);
2178 priv->gather = NULL;
2179 g_list_free_full (priv->decode, (GDestroyNotify) gst_video_codec_frame_unref);
2180 priv->decode = NULL;
2181 g_list_free_full (priv->parse, (GDestroyNotify) gst_mini_object_unref);
2183 g_list_free_full (priv->parse_gather,
2184 (GDestroyNotify) gst_video_codec_frame_unref);
2185 priv->parse_gather = NULL;
2186 g_queue_clear_full (&priv->frames,
2187 (GDestroyNotify) gst_video_codec_frame_unref);
2191 gst_video_decoder_reset (GstVideoDecoder * decoder, gboolean full,
2192 gboolean flush_hard)
2194 GstVideoDecoderPrivate *priv = decoder->priv;
2196 GST_DEBUG_OBJECT (decoder, "reset full %d", full);
2198 GST_VIDEO_DECODER_STREAM_LOCK (decoder);
2200 if (full || flush_hard) {
2201 gst_segment_init (&decoder->input_segment, GST_FORMAT_UNDEFINED);
2202 gst_segment_init (&decoder->output_segment, GST_FORMAT_UNDEFINED);
2203 gst_video_decoder_clear_queues (decoder);
2204 decoder->priv->in_out_segment_sync = TRUE;
2206 if (priv->current_frame) {
2207 gst_video_codec_frame_unref (priv->current_frame);
2208 priv->current_frame = NULL;
2211 g_list_free_full (priv->current_frame_events,
2212 (GDestroyNotify) gst_event_unref);
2213 priv->current_frame_events = NULL;
2214 g_list_free_full (priv->pending_events, (GDestroyNotify) gst_event_unref);
2215 priv->pending_events = NULL;
2217 priv->error_count = 0;
2218 priv->had_output_data = FALSE;
2219 priv->had_input_data = FALSE;
2221 GST_OBJECT_LOCK (decoder);
2222 priv->earliest_time = GST_CLOCK_TIME_NONE;
2223 priv->proportion = 0.5;
2224 priv->decode_flags_override = FALSE;
2226 priv->request_sync_point_flags = 0;
2227 priv->request_sync_point_frame_number = REQUEST_SYNC_POINT_UNSET;
2228 priv->last_force_key_unit_time = GST_CLOCK_TIME_NONE;
2229 GST_OBJECT_UNLOCK (decoder);
2230 priv->distance_from_sync = -1;
2234 if (priv->input_state)
2235 gst_video_codec_state_unref (priv->input_state);
2236 priv->input_state = NULL;
2237 GST_OBJECT_LOCK (decoder);
2238 if (priv->output_state)
2239 gst_video_codec_state_unref (priv->output_state);
2240 priv->output_state = NULL;
2242 priv->qos_frame_duration = 0;
2243 GST_OBJECT_UNLOCK (decoder);
2246 gst_tag_list_unref (priv->tags);
2248 priv->tags_merge_mode = GST_TAG_MERGE_APPEND;
2249 if (priv->upstream_tags) {
2250 gst_tag_list_unref (priv->upstream_tags);
2251 priv->upstream_tags = NULL;
2253 priv->tags_changed = FALSE;
2254 priv->reordered_output = FALSE;
2257 priv->processed = 0;
2259 priv->decode_frame_number = 0;
2260 priv->base_picture_number = 0;
2263 GST_DEBUG_OBJECT (decoder, "deactivate pool %" GST_PTR_FORMAT,
2265 gst_buffer_pool_set_active (priv->pool, FALSE);
2266 gst_object_unref (priv->pool);
2270 if (priv->allocator) {
2271 gst_object_unref (priv->allocator);
2272 priv->allocator = NULL;
2276 priv->discont = TRUE;
2278 priv->base_timestamp = GST_CLOCK_TIME_NONE;
2279 priv->last_timestamp_out = GST_CLOCK_TIME_NONE;
2280 priv->pts_delta = GST_CLOCK_TIME_NONE;
2282 priv->input_offset = 0;
2283 priv->frame_offset = 0;
2284 gst_adapter_clear (priv->input_adapter);
2285 gst_adapter_clear (priv->output_adapter);
2286 g_queue_clear_full (&priv->timestamps, (GDestroyNotify) timestamp_free);
2288 GST_OBJECT_LOCK (decoder);
2289 priv->bytes_out = 0;
2291 GST_OBJECT_UNLOCK (decoder);
2293 #ifndef GST_DISABLE_DEBUG
2294 priv->last_reset_time = gst_util_get_timestamp ();
2297 GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
2300 static GstFlowReturn
2301 gst_video_decoder_chain_forward (GstVideoDecoder * decoder,
2302 GstBuffer * buf, gboolean at_eos)
2304 GstVideoDecoderPrivate *priv;
2305 GstVideoDecoderClass *klass;
2306 GstFlowReturn ret = GST_FLOW_OK;
2308 klass = GST_VIDEO_DECODER_GET_CLASS (decoder);
2309 priv = decoder->priv;
2311 g_return_val_if_fail (priv->packetized || klass->parse, GST_FLOW_ERROR);
2313 /* Draining on DISCONT is handled in chain_reverse() for reverse playback,
2314 * and this function would only be called to get everything collected GOP
2315 * by GOP in the parse_gather list */
2316 if (decoder->input_segment.rate > 0.0 && GST_BUFFER_IS_DISCONT (buf)
2317 && (decoder->input_segment.flags & GST_SEEK_FLAG_TRICKMODE_KEY_UNITS))
2318 ret = gst_video_decoder_drain_out (decoder, FALSE);
2320 if (priv->current_frame == NULL)
2321 priv->current_frame = gst_video_decoder_new_frame (decoder);
2323 if (!priv->packetized)
2324 gst_video_decoder_add_buffer_info (decoder, buf);
2326 priv->input_offset += gst_buffer_get_size (buf);
2328 if (priv->packetized) {
2329 gboolean was_keyframe = FALSE;
2330 if (!GST_BUFFER_FLAG_IS_SET (buf, GST_BUFFER_FLAG_DELTA_UNIT)) {
2331 was_keyframe = TRUE;
2332 GST_DEBUG_OBJECT (decoder, "Marking current_frame as sync point");
2333 GST_VIDEO_CODEC_FRAME_SET_SYNC_POINT (priv->current_frame);
2336 priv->current_frame->input_buffer = buf;
2338 if (decoder->input_segment.rate < 0.0) {
2339 priv->parse_gather =
2340 g_list_prepend (priv->parse_gather, priv->current_frame);
2342 ret = gst_video_decoder_decode_frame (decoder, priv->current_frame);
2344 priv->current_frame = NULL;
2345 /* If in trick mode and it was a keyframe, drain decoder to avoid extra
2346 * latency. Only do this for forwards playback as reverse playback handles
2347 * draining on keyframes in flush_parse(), and would otherwise call back
2348 * from drain_out() to here causing an infinite loop.
2349 * Also this function is only called for reverse playback to gather frames
2350 * GOP by GOP, and does not do any actual decoding. That would be done by
2352 if (ret == GST_FLOW_OK && was_keyframe && decoder->input_segment.rate > 0.0
2353 && (decoder->input_segment.flags & GST_SEEK_FLAG_TRICKMODE_KEY_UNITS))
2354 ret = gst_video_decoder_drain_out (decoder, FALSE);
2356 gst_adapter_push (priv->input_adapter, buf);
2358 ret = gst_video_decoder_parse_available (decoder, at_eos, TRUE);
2361 if (ret == GST_VIDEO_DECODER_FLOW_NEED_DATA)
2367 static GstFlowReturn
2368 gst_video_decoder_flush_decode (GstVideoDecoder * dec)
2370 GstVideoDecoderPrivate *priv = dec->priv;
2371 GstFlowReturn res = GST_FLOW_OK;
2374 GST_DEBUG_OBJECT (dec, "flushing buffers to decode");
2376 walk = priv->decode;
2379 GstVideoCodecFrame *frame = (GstVideoCodecFrame *) (walk->data);
2381 GST_DEBUG_OBJECT (dec, "decoding frame %p buffer %p, PTS %" GST_TIME_FORMAT
2382 ", DTS %" GST_TIME_FORMAT, frame, frame->input_buffer,
2383 GST_TIME_ARGS (GST_BUFFER_PTS (frame->input_buffer)),
2384 GST_TIME_ARGS (GST_BUFFER_DTS (frame->input_buffer)));
2388 priv->decode = g_list_delete_link (priv->decode, walk);
2390 /* decode buffer, resulting data prepended to queue */
2391 res = gst_video_decoder_decode_frame (dec, frame);
2392 if (res != GST_FLOW_OK)
2401 /* gst_video_decoder_flush_parse is called from the
2402 * chain_reverse() function when a buffer containing
2403 * a DISCONT - indicating that reverse playback
2404 * looped back to the next data block, and therefore
2405 * all available data should be fed through the
2406 * decoder and frames gathered for reversed output
2408 static GstFlowReturn
2409 gst_video_decoder_flush_parse (GstVideoDecoder * dec, gboolean at_eos)
2411 GstVideoDecoderPrivate *priv = dec->priv;
2412 GstFlowReturn res = GST_FLOW_OK;
2414 GstVideoDecoderClass *decoder_class;
2416 decoder_class = GST_VIDEO_DECODER_GET_CLASS (dec);
2418 GST_DEBUG_OBJECT (dec, "flushing buffers to parsing");
2420 /* Reverse the gather list, and prepend it to the parse list,
2421 * then flush to parse whatever we can */
2422 priv->gather = g_list_reverse (priv->gather);
2423 priv->parse = g_list_concat (priv->gather, priv->parse);
2424 priv->gather = NULL;
2426 /* clear buffer and decoder state */
2427 gst_video_decoder_flush (dec, FALSE);
2431 GstBuffer *buf = GST_BUFFER_CAST (walk->data);
2432 GList *next = walk->next;
2434 GST_DEBUG_OBJECT (dec, "parsing buffer %p, PTS %" GST_TIME_FORMAT
2435 ", DTS %" GST_TIME_FORMAT " flags %x", buf,
2436 GST_TIME_ARGS (GST_BUFFER_PTS (buf)),
2437 GST_TIME_ARGS (GST_BUFFER_DTS (buf)), GST_BUFFER_FLAGS (buf));
2439 /* parse buffer, resulting frames prepended to parse_gather queue */
2440 gst_buffer_ref (buf);
2441 res = gst_video_decoder_chain_forward (dec, buf, at_eos);
2443 /* if we generated output, we can discard the buffer, else we
2444 * keep it in the queue */
2445 if (priv->parse_gather) {
2446 GST_DEBUG_OBJECT (dec, "parsed buffer to %p", priv->parse_gather->data);
2447 priv->parse = g_list_delete_link (priv->parse, walk);
2448 gst_buffer_unref (buf);
2450 GST_DEBUG_OBJECT (dec, "buffer did not decode, keeping");
2455 walk = priv->parse_gather;
2457 GstVideoCodecFrame *frame = (GstVideoCodecFrame *) (walk->data);
2460 /* this is reverse playback, check if we need to apply some segment
2461 * to the output before decoding, as during decoding the segment.rate
2462 * must be used to determine if a buffer should be pushed or added to
2463 * the output list for reverse pushing.
2465 * The new segment is not immediately pushed here because we must
2466 * wait for negotiation to happen before it can be pushed to avoid
2467 * pushing a segment before caps event. Negotiation only happens
2468 * when finish_frame is called.
2470 for (walk2 = frame->events; walk2;) {
2472 GstEvent *event = walk2->data;
2474 walk2 = g_list_next (walk2);
2475 if (GST_EVENT_TYPE (event) <= GST_EVENT_SEGMENT) {
2477 if (GST_EVENT_TYPE (event) == GST_EVENT_SEGMENT) {
2480 GST_DEBUG_OBJECT (dec, "Segment at frame %p %" GST_TIME_FORMAT,
2481 frame, GST_TIME_ARGS (GST_BUFFER_PTS (frame->input_buffer)));
2482 gst_event_copy_segment (event, &segment);
2483 if (segment.format == GST_FORMAT_TIME) {
2484 dec->output_segment = segment;
2485 dec->priv->in_out_segment_sync =
2486 gst_segment_is_equal (&dec->input_segment, &segment);
2489 dec->priv->pending_events =
2490 g_list_append (dec->priv->pending_events, event);
2491 frame->events = g_list_delete_link (frame->events, cur);
2498 /* now we can process frames. Start by moving each frame from the parse_gather
2499 * to the decode list, reverse the order as we go, and stopping when/if we
2500 * copy a keyframe. */
2501 GST_DEBUG_OBJECT (dec, "checking parsed frames for a keyframe to decode");
2502 walk = priv->parse_gather;
2504 GstVideoCodecFrame *frame = (GstVideoCodecFrame *) (walk->data);
2506 /* remove from the gather list */
2507 priv->parse_gather = g_list_remove_link (priv->parse_gather, walk);
2509 /* move it to the front of the decode queue */
2510 priv->decode = g_list_concat (walk, priv->decode);
2512 /* if we copied a keyframe, flush and decode the decode queue */
2513 if (GST_VIDEO_CODEC_FRAME_IS_SYNC_POINT (frame)) {
2514 GST_DEBUG_OBJECT (dec, "found keyframe %p with PTS %" GST_TIME_FORMAT
2515 ", DTS %" GST_TIME_FORMAT, frame,
2516 GST_TIME_ARGS (GST_BUFFER_PTS (frame->input_buffer)),
2517 GST_TIME_ARGS (GST_BUFFER_DTS (frame->input_buffer)));
2518 res = gst_video_decoder_flush_decode (dec);
2519 if (res != GST_FLOW_OK)
2522 /* We need to tell the subclass to drain now.
2523 * We prefer the drain vfunc, but for backward-compat
2524 * we use a finish() vfunc if drain isn't implemented */
2525 if (decoder_class->drain) {
2526 GST_DEBUG_OBJECT (dec, "Draining");
2527 res = decoder_class->drain (dec);
2528 } else if (decoder_class->finish) {
2529 GST_FIXME_OBJECT (dec, "Sub-class should implement drain(). "
2530 "Calling finish() for backwards-compat");
2531 res = decoder_class->finish (dec);
2534 if (res != GST_FLOW_OK)
2537 /* now send queued data downstream */
2538 walk = priv->output_queued;
2540 GstBuffer *buf = GST_BUFFER_CAST (walk->data);
2542 priv->output_queued =
2543 g_list_delete_link (priv->output_queued, priv->output_queued);
2545 if (G_LIKELY (res == GST_FLOW_OK)) {
2546 /* avoid stray DISCONT from forward processing,
2547 * which have no meaning in reverse pushing */
2548 GST_BUFFER_FLAG_UNSET (buf, GST_BUFFER_FLAG_DISCONT);
2550 /* Last chance to calculate a timestamp as we loop backwards
2551 * through the list */
2552 if (GST_BUFFER_TIMESTAMP (buf) != GST_CLOCK_TIME_NONE)
2553 priv->last_timestamp_out = GST_BUFFER_TIMESTAMP (buf);
2554 else if (priv->last_timestamp_out != GST_CLOCK_TIME_NONE &&
2555 GST_BUFFER_DURATION (buf) != GST_CLOCK_TIME_NONE) {
2556 GST_BUFFER_TIMESTAMP (buf) =
2557 priv->last_timestamp_out - GST_BUFFER_DURATION (buf);
2558 priv->last_timestamp_out = GST_BUFFER_TIMESTAMP (buf);
2559 GST_LOG_OBJECT (dec,
2560 "Calculated TS %" GST_TIME_FORMAT " working backwards",
2561 GST_TIME_ARGS (priv->last_timestamp_out));
2564 res = gst_video_decoder_clip_and_push_buf (dec, buf);
2566 gst_buffer_unref (buf);
2569 walk = priv->output_queued;
2572 /* clear buffer and decoder state again
2573 * before moving to the previous keyframe */
2574 gst_video_decoder_flush (dec, FALSE);
2577 walk = priv->parse_gather;
2584 static GstFlowReturn
2585 gst_video_decoder_chain_reverse (GstVideoDecoder * dec, GstBuffer * buf)
2587 GstVideoDecoderPrivate *priv = dec->priv;
2588 GstFlowReturn result = GST_FLOW_OK;
2590 /* if we have a discont, move buffers to the decode list */
2591 if (!buf || GST_BUFFER_IS_DISCONT (buf)) {
2592 GST_DEBUG_OBJECT (dec, "received discont");
2594 /* parse and decode stuff in the gather and parse queues */
2595 result = gst_video_decoder_flush_parse (dec, FALSE);
2598 if (G_LIKELY (buf)) {
2599 GST_DEBUG_OBJECT (dec, "gathering buffer %p of size %" G_GSIZE_FORMAT ", "
2600 "PTS %" GST_TIME_FORMAT ", DTS %" GST_TIME_FORMAT ", dur %"
2601 GST_TIME_FORMAT, buf, gst_buffer_get_size (buf),
2602 GST_TIME_ARGS (GST_BUFFER_PTS (buf)),
2603 GST_TIME_ARGS (GST_BUFFER_DTS (buf)),
2604 GST_TIME_ARGS (GST_BUFFER_DURATION (buf)));
2606 /* add buffer to gather queue */
2607 priv->gather = g_list_prepend (priv->gather, buf);
2613 static GstFlowReturn
2614 gst_video_decoder_chain (GstPad * pad, GstObject * parent, GstBuffer * buf)
2616 GstVideoDecoder *decoder;
2617 GstFlowReturn ret = GST_FLOW_OK;
2619 decoder = GST_VIDEO_DECODER (parent);
2621 if (G_UNLIKELY (!decoder->priv->input_state && decoder->priv->needs_format))
2622 goto not_negotiated;
2624 GST_LOG_OBJECT (decoder,
2625 "chain PTS %" GST_TIME_FORMAT ", DTS %" GST_TIME_FORMAT " duration %"
2626 GST_TIME_FORMAT " size %" G_GSIZE_FORMAT " flags %x",
2627 GST_TIME_ARGS (GST_BUFFER_PTS (buf)),
2628 GST_TIME_ARGS (GST_BUFFER_DTS (buf)),
2629 GST_TIME_ARGS (GST_BUFFER_DURATION (buf)),
2630 gst_buffer_get_size (buf), GST_BUFFER_FLAGS (buf));
2632 GST_VIDEO_DECODER_STREAM_LOCK (decoder);
2635 * requiring the pad to be negotiated makes it impossible to use
2636 * oggdemux or filesrc ! decoder */
2638 if (decoder->input_segment.format == GST_FORMAT_UNDEFINED) {
2640 GstSegment *segment = &decoder->input_segment;
2642 GST_WARNING_OBJECT (decoder,
2643 "Received buffer without a new-segment. "
2644 "Assuming timestamps start from 0.");
2646 gst_segment_init (segment, GST_FORMAT_TIME);
2648 event = gst_event_new_segment (segment);
2650 decoder->priv->current_frame_events =
2651 g_list_prepend (decoder->priv->current_frame_events, event);
2654 decoder->priv->had_input_data = TRUE;
2656 if (decoder->input_segment.rate > 0.0)
2657 ret = gst_video_decoder_chain_forward (decoder, buf, FALSE);
2659 ret = gst_video_decoder_chain_reverse (decoder, buf);
2661 GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
2667 GST_ELEMENT_ERROR (decoder, CORE, NEGOTIATION, (NULL),
2668 ("decoder not initialized"));
2669 gst_buffer_unref (buf);
2670 return GST_FLOW_NOT_NEGOTIATED;
2674 static GstStateChangeReturn
2675 gst_video_decoder_change_state (GstElement * element, GstStateChange transition)
2677 GstVideoDecoder *decoder;
2678 GstVideoDecoderClass *decoder_class;
2679 GstStateChangeReturn ret;
2681 decoder = GST_VIDEO_DECODER (element);
2682 decoder_class = GST_VIDEO_DECODER_GET_CLASS (element);
2684 switch (transition) {
2685 case GST_STATE_CHANGE_NULL_TO_READY:
2686 /* open device/library if needed */
2687 if (decoder_class->open && !decoder_class->open (decoder))
2690 case GST_STATE_CHANGE_READY_TO_PAUSED:
2691 GST_VIDEO_DECODER_STREAM_LOCK (decoder);
2692 gst_video_decoder_reset (decoder, TRUE, TRUE);
2693 GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
2695 /* Initialize device/library if needed */
2696 if (decoder_class->start && !decoder_class->start (decoder))
2703 ret = GST_ELEMENT_CLASS (parent_class)->change_state (element, transition);
2705 switch (transition) {
2706 case GST_STATE_CHANGE_PAUSED_TO_READY:{
2707 gboolean stopped = TRUE;
2709 if (decoder_class->stop)
2710 stopped = decoder_class->stop (decoder);
2712 GST_VIDEO_DECODER_STREAM_LOCK (decoder);
2713 gst_video_decoder_reset (decoder, TRUE, TRUE);
2714 GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
2721 case GST_STATE_CHANGE_READY_TO_NULL:
2722 /* close device/library if needed */
2723 if (decoder_class->close && !decoder_class->close (decoder))
2735 GST_ELEMENT_ERROR (decoder, LIBRARY, INIT, (NULL),
2736 ("Failed to open decoder"));
2737 return GST_STATE_CHANGE_FAILURE;
2742 GST_ELEMENT_ERROR (decoder, LIBRARY, INIT, (NULL),
2743 ("Failed to start decoder"));
2744 return GST_STATE_CHANGE_FAILURE;
2749 GST_ELEMENT_ERROR (decoder, LIBRARY, INIT, (NULL),
2750 ("Failed to stop decoder"));
2751 return GST_STATE_CHANGE_FAILURE;
2756 GST_ELEMENT_ERROR (decoder, LIBRARY, INIT, (NULL),
2757 ("Failed to close decoder"));
2758 return GST_STATE_CHANGE_FAILURE;
2762 static GstVideoCodecFrame *
2763 gst_video_decoder_new_frame (GstVideoDecoder * decoder)
2765 GstVideoDecoderPrivate *priv = decoder->priv;
2766 GstVideoCodecFrame *frame;
2768 frame = g_slice_new0 (GstVideoCodecFrame);
2770 frame->ref_count = 1;
2772 GST_VIDEO_DECODER_STREAM_LOCK (decoder);
2773 frame->system_frame_number = priv->system_frame_number;
2774 priv->system_frame_number++;
2775 frame->decode_frame_number = priv->decode_frame_number;
2776 priv->decode_frame_number++;
2778 frame->dts = GST_CLOCK_TIME_NONE;
2779 frame->pts = GST_CLOCK_TIME_NONE;
2780 frame->duration = GST_CLOCK_TIME_NONE;
2781 frame->events = priv->current_frame_events;
2782 priv->current_frame_events = NULL;
2784 GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
2786 GST_LOG_OBJECT (decoder, "Created new frame %p (sfn:%d)",
2787 frame, frame->system_frame_number);
2793 gst_video_decoder_push_event_list (GstVideoDecoder * decoder, GList * events)
2797 /* events are stored in reverse order */
2798 for (l = g_list_last (events); l; l = g_list_previous (l)) {
2799 GST_LOG_OBJECT (decoder, "pushing %s event", GST_EVENT_TYPE_NAME (l->data));
2800 gst_video_decoder_push_event (decoder, l->data);
2802 g_list_free (events);
2806 gst_video_decoder_prepare_finish_frame (GstVideoDecoder *
2807 decoder, GstVideoCodecFrame * frame, gboolean dropping)
2809 GstVideoDecoderPrivate *priv = decoder->priv;
2810 GList *l, *events = NULL;
2813 #ifndef GST_DISABLE_GST_DEBUG
2814 GST_LOG_OBJECT (decoder, "n %d in %" G_GSIZE_FORMAT " out %" G_GSIZE_FORMAT,
2815 priv->frames.length,
2816 gst_adapter_available (priv->input_adapter),
2817 gst_adapter_available (priv->output_adapter));
2820 sync = GST_VIDEO_CODEC_FRAME_IS_SYNC_POINT (frame);
2822 GST_LOG_OBJECT (decoder,
2823 "finish frame %p (#%d) sync:%d PTS:%" GST_TIME_FORMAT " DTS:%"
2825 frame, frame->system_frame_number,
2826 sync, GST_TIME_ARGS (frame->pts), GST_TIME_ARGS (frame->dts));
2828 /* Push all pending events that arrived before this frame */
2829 for (l = priv->frames.head; l; l = l->next) {
2830 GstVideoCodecFrame *tmp = l->data;
2833 events = g_list_concat (tmp->events, events);
2841 if (dropping || !decoder->priv->output_state) {
2842 /* Push before the next frame that is not dropped */
2843 decoder->priv->pending_events =
2844 g_list_concat (events, decoder->priv->pending_events);
2846 gst_video_decoder_push_event_list (decoder, decoder->priv->pending_events);
2847 decoder->priv->pending_events = NULL;
2849 gst_video_decoder_push_event_list (decoder, events);
2852 /* Check if the data should not be displayed. For example altref/invisible
2853 * frame in vp8. In this case we should not update the timestamps. */
2854 if (GST_VIDEO_CODEC_FRAME_IS_DECODE_ONLY (frame))
2857 /* If the frame is meant to be output but we don't have an output_buffer
2858 * we have a problem :) */
2859 if (G_UNLIKELY ((frame->output_buffer == NULL) && !dropping))
2860 goto no_output_buffer;
2862 if (GST_CLOCK_TIME_IS_VALID (frame->pts)) {
2863 if (frame->pts != priv->base_timestamp) {
2864 GST_DEBUG_OBJECT (decoder,
2865 "sync timestamp %" GST_TIME_FORMAT " diff %" GST_STIME_FORMAT,
2866 GST_TIME_ARGS (frame->pts),
2867 GST_STIME_ARGS (GST_CLOCK_DIFF (frame->pts,
2868 decoder->output_segment.start)));
2869 priv->base_timestamp = frame->pts;
2870 priv->base_picture_number = frame->decode_frame_number;
2874 if (frame->duration == GST_CLOCK_TIME_NONE) {
2875 frame->duration = gst_video_decoder_get_frame_duration (decoder, frame);
2876 GST_LOG_OBJECT (decoder,
2877 "Guessing duration %" GST_TIME_FORMAT " for frame...",
2878 GST_TIME_ARGS (frame->duration));
2881 /* PTS is expected montone ascending,
2882 * so a good guess is lowest unsent DTS */
2884 GstClockTime min_ts = GST_CLOCK_TIME_NONE;
2885 GstVideoCodecFrame *oframe = NULL;
2886 gboolean seen_none = FALSE;
2888 /* some maintenance regardless */
2889 for (l = priv->frames.head; l; l = l->next) {
2890 GstVideoCodecFrame *tmp = l->data;
2892 if (!GST_CLOCK_TIME_IS_VALID (tmp->abidata.ABI.ts)) {
2897 if (!GST_CLOCK_TIME_IS_VALID (min_ts) || tmp->abidata.ABI.ts < min_ts) {
2898 min_ts = tmp->abidata.ABI.ts;
2902 /* save a ts if needed */
2903 if (oframe && oframe != frame) {
2904 oframe->abidata.ABI.ts = frame->abidata.ABI.ts;
2907 /* and set if needed;
2908 * valid delta means we have reasonable DTS input */
2909 /* also, if we ended up reordered, means this approach is conflicting
2910 * with some sparse existing PTS, and so it does not work out */
2911 if (!priv->reordered_output &&
2912 !GST_CLOCK_TIME_IS_VALID (frame->pts) && !seen_none &&
2913 GST_CLOCK_TIME_IS_VALID (priv->pts_delta)) {
2914 frame->pts = min_ts + priv->pts_delta;
2915 GST_DEBUG_OBJECT (decoder,
2916 "no valid PTS, using oldest DTS %" GST_TIME_FORMAT,
2917 GST_TIME_ARGS (frame->pts));
2920 /* some more maintenance, ts2 holds PTS */
2921 min_ts = GST_CLOCK_TIME_NONE;
2923 for (l = priv->frames.head; l; l = l->next) {
2924 GstVideoCodecFrame *tmp = l->data;
2926 if (!GST_CLOCK_TIME_IS_VALID (tmp->abidata.ABI.ts2)) {
2931 if (!GST_CLOCK_TIME_IS_VALID (min_ts) || tmp->abidata.ABI.ts2 < min_ts) {
2932 min_ts = tmp->abidata.ABI.ts2;
2936 /* save a ts if needed */
2937 if (oframe && oframe != frame) {
2938 oframe->abidata.ABI.ts2 = frame->abidata.ABI.ts2;
2941 /* if we detected reordered output, then PTS are void,
2942 * however those were obtained; bogus input, subclass etc */
2943 if (priv->reordered_output && !seen_none) {
2944 GST_DEBUG_OBJECT (decoder, "invalidating PTS");
2945 frame->pts = GST_CLOCK_TIME_NONE;
2948 if (!GST_CLOCK_TIME_IS_VALID (frame->pts) && !seen_none) {
2949 frame->pts = min_ts;
2950 GST_DEBUG_OBJECT (decoder,
2951 "no valid PTS, using oldest PTS %" GST_TIME_FORMAT,
2952 GST_TIME_ARGS (frame->pts));
2957 if (frame->pts == GST_CLOCK_TIME_NONE) {
2958 /* Last ditch timestamp guess: Just add the duration to the previous
2959 * frame. If it's the first frame, just use the segment start. */
2960 if (frame->duration != GST_CLOCK_TIME_NONE) {
2961 if (GST_CLOCK_TIME_IS_VALID (priv->last_timestamp_out))
2962 frame->pts = priv->last_timestamp_out + frame->duration;
2963 else if (decoder->output_segment.rate > 0.0)
2964 frame->pts = decoder->output_segment.start;
2965 GST_LOG_OBJECT (decoder,
2966 "Guessing timestamp %" GST_TIME_FORMAT " for frame...",
2967 GST_TIME_ARGS (frame->pts));
2968 } else if (sync && frame->dts != GST_CLOCK_TIME_NONE) {
2969 frame->pts = frame->dts;
2970 GST_LOG_OBJECT (decoder,
2971 "Setting DTS as PTS %" GST_TIME_FORMAT " for frame...",
2972 GST_TIME_ARGS (frame->pts));
2976 if (GST_CLOCK_TIME_IS_VALID (priv->last_timestamp_out)) {
2977 if (frame->pts < priv->last_timestamp_out) {
2978 GST_WARNING_OBJECT (decoder,
2979 "decreasing timestamp (%" GST_TIME_FORMAT " < %"
2980 GST_TIME_FORMAT ")",
2981 GST_TIME_ARGS (frame->pts), GST_TIME_ARGS (priv->last_timestamp_out));
2982 priv->reordered_output = TRUE;
2983 /* make it a bit less weird downstream */
2984 frame->pts = priv->last_timestamp_out;
2988 if (GST_CLOCK_TIME_IS_VALID (frame->pts))
2989 priv->last_timestamp_out = frame->pts;
2996 GST_ERROR_OBJECT (decoder, "No buffer to output !");
3001 * gst_video_decoder_release_frame:
3002 * @dec: a #GstVideoDecoder
3003 * @frame: (transfer full): the #GstVideoCodecFrame to release
3005 * Similar to gst_video_decoder_drop_frame(), but simply releases @frame
3006 * without any processing other than removing it from list of pending frames,
3007 * after which it is considered finished and released.
3012 gst_video_decoder_release_frame (GstVideoDecoder * dec,
3013 GstVideoCodecFrame * frame)
3017 /* unref once from the list */
3018 GST_VIDEO_DECODER_STREAM_LOCK (dec);
3019 link = g_queue_find (&dec->priv->frames, frame);
3021 gst_video_codec_frame_unref (frame);
3022 g_queue_delete_link (&dec->priv->frames, link);
3024 if (frame->events) {
3025 dec->priv->pending_events =
3026 g_list_concat (frame->events, dec->priv->pending_events);
3027 frame->events = NULL;
3029 GST_VIDEO_DECODER_STREAM_UNLOCK (dec);
3031 /* unref because this function takes ownership */
3032 gst_video_codec_frame_unref (frame);
3035 /* called with STREAM_LOCK */
3037 gst_video_decoder_post_qos_drop (GstVideoDecoder * dec, GstClockTime timestamp)
3039 GstClockTime stream_time, jitter, earliest_time, qostime;
3040 GstSegment *segment;
3041 GstMessage *qos_msg;
3043 dec->priv->dropped++;
3045 /* post QoS message */
3046 GST_OBJECT_LOCK (dec);
3047 proportion = dec->priv->proportion;
3048 earliest_time = dec->priv->earliest_time;
3049 GST_OBJECT_UNLOCK (dec);
3051 segment = &dec->output_segment;
3052 if (G_UNLIKELY (segment->format == GST_FORMAT_UNDEFINED))
3053 segment = &dec->input_segment;
3055 gst_segment_to_stream_time (segment, GST_FORMAT_TIME, timestamp);
3056 qostime = gst_segment_to_running_time (segment, GST_FORMAT_TIME, timestamp);
3057 jitter = GST_CLOCK_DIFF (qostime, earliest_time);
3059 gst_message_new_qos (GST_OBJECT_CAST (dec), FALSE, qostime, stream_time,
3060 timestamp, GST_CLOCK_TIME_NONE);
3061 gst_message_set_qos_values (qos_msg, jitter, proportion, 1000000);
3062 gst_message_set_qos_stats (qos_msg, GST_FORMAT_BUFFERS,
3063 dec->priv->processed, dec->priv->dropped);
3064 gst_element_post_message (GST_ELEMENT_CAST (dec), qos_msg);
3068 * gst_video_decoder_drop_frame:
3069 * @dec: a #GstVideoDecoder
3070 * @frame: (transfer full): the #GstVideoCodecFrame to drop
3072 * Similar to gst_video_decoder_finish_frame(), but drops @frame in any
3073 * case and posts a QoS message with the frame's details on the bus.
3074 * In any case, the frame is considered finished and released.
3076 * Returns: a #GstFlowReturn, usually GST_FLOW_OK.
3079 gst_video_decoder_drop_frame (GstVideoDecoder * dec, GstVideoCodecFrame * frame)
3081 GST_LOG_OBJECT (dec, "drop frame %p", frame);
3083 GST_VIDEO_DECODER_STREAM_LOCK (dec);
3085 gst_video_decoder_prepare_finish_frame (dec, frame, TRUE);
3087 GST_DEBUG_OBJECT (dec, "dropping frame %" GST_TIME_FORMAT,
3088 GST_TIME_ARGS (frame->pts));
3090 gst_video_decoder_post_qos_drop (dec, frame->pts);
3092 /* now free the frame */
3093 gst_video_decoder_release_frame (dec, frame);
3095 GST_VIDEO_DECODER_STREAM_UNLOCK (dec);
3101 gst_video_decoder_transform_meta_default (GstVideoDecoder *
3102 decoder, GstVideoCodecFrame * frame, GstMeta * meta)
3104 const GstMetaInfo *info = meta->info;
3105 const gchar *const *tags;
3106 const gchar *const supported_tags[] = {
3107 GST_META_TAG_VIDEO_STR,
3108 GST_META_TAG_VIDEO_ORIENTATION_STR,
3109 GST_META_TAG_VIDEO_SIZE_STR,
3113 tags = gst_meta_api_type_get_tags (info->api);
3119 if (!g_strv_contains (supported_tags, *tags))
3129 GstVideoDecoder *decoder;
3130 GstVideoCodecFrame *frame;
3134 foreach_metadata (GstBuffer * inbuf, GstMeta ** meta, gpointer user_data)
3136 CopyMetaData *data = user_data;
3137 GstVideoDecoder *decoder = data->decoder;
3138 GstVideoDecoderClass *klass = GST_VIDEO_DECODER_GET_CLASS (decoder);
3139 GstVideoCodecFrame *frame = data->frame;
3140 const GstMetaInfo *info = (*meta)->info;
3141 gboolean do_copy = FALSE;
3143 if (gst_meta_api_type_has_tag (info->api, _gst_meta_tag_memory)) {
3144 /* never call the transform_meta with memory specific metadata */
3145 GST_DEBUG_OBJECT (decoder, "not copying memory specific metadata %s",
3146 g_type_name (info->api));
3148 } else if (klass->transform_meta) {
3149 do_copy = klass->transform_meta (decoder, frame, *meta);
3150 GST_DEBUG_OBJECT (decoder, "transformed metadata %s: copy: %d",
3151 g_type_name (info->api), do_copy);
3154 /* we only copy metadata when the subclass implemented a transform_meta
3155 * function and when it returns %TRUE */
3156 if (do_copy && info->transform_func) {
3157 GstMetaTransformCopy copy_data = { FALSE, 0, -1 };
3158 GST_DEBUG_OBJECT (decoder, "copy metadata %s", g_type_name (info->api));
3159 /* simply copy then */
3160 info->transform_func (frame->output_buffer, *meta, inbuf,
3161 _gst_meta_transform_copy, ©_data);
3167 * gst_video_decoder_finish_frame:
3168 * @decoder: a #GstVideoDecoder
3169 * @frame: (transfer full): a decoded #GstVideoCodecFrame
3171 * @frame should have a valid decoded data buffer, whose metadata fields
3172 * are then appropriately set according to frame data and pushed downstream.
3173 * If no output data is provided, @frame is considered skipped.
3174 * In any case, the frame is considered finished and released.
3176 * After calling this function the output buffer of the frame is to be
3177 * considered read-only. This function will also change the metadata
3180 * Returns: a #GstFlowReturn resulting from sending data downstream
3183 gst_video_decoder_finish_frame (GstVideoDecoder * decoder,
3184 GstVideoCodecFrame * frame)
3186 GstFlowReturn ret = GST_FLOW_OK;
3187 GstVideoDecoderClass *decoder_class = GST_VIDEO_DECODER_GET_CLASS (decoder);
3188 GstVideoDecoderPrivate *priv = decoder->priv;
3189 GstBuffer *output_buffer;
3190 gboolean needs_reconfigure = FALSE;
3192 GST_LOG_OBJECT (decoder, "finish frame %p", frame);
3194 GST_VIDEO_DECODER_STREAM_LOCK (decoder);
3196 needs_reconfigure = gst_pad_check_reconfigure (decoder->srcpad);
3197 if (G_UNLIKELY (priv->output_state_changed || (priv->output_state
3198 && needs_reconfigure))) {
3199 if (!gst_video_decoder_negotiate_unlocked (decoder)) {
3200 gst_pad_mark_reconfigure (decoder->srcpad);
3201 if (GST_PAD_IS_FLUSHING (decoder->srcpad))
3202 ret = GST_FLOW_FLUSHING;
3204 ret = GST_FLOW_NOT_NEGOTIATED;
3209 gst_video_decoder_prepare_finish_frame (decoder, frame, FALSE);
3212 if (priv->tags_changed) {
3213 GstEvent *tags_event;
3215 tags_event = gst_video_decoder_create_merged_tags_event (decoder);
3217 if (tags_event != NULL)
3218 gst_video_decoder_push_event (decoder, tags_event);
3220 priv->tags_changed = FALSE;
3223 /* no buffer data means this frame is skipped */
3224 if (!frame->output_buffer || GST_VIDEO_CODEC_FRAME_IS_DECODE_ONLY (frame)) {
3225 GST_DEBUG_OBJECT (decoder,
3226 "skipping frame %" GST_TIME_FORMAT " because not output was produced",
3227 GST_TIME_ARGS (frame->pts));
3231 /* Mark output as corrupted if the subclass requested so and we're either
3232 * still before the sync point after the request, or we don't even know the
3233 * frame number of the sync point yet (it is 0) */
3234 GST_OBJECT_LOCK (decoder);
3235 if (frame->system_frame_number <= priv->request_sync_point_frame_number
3236 && priv->request_sync_point_frame_number != REQUEST_SYNC_POINT_UNSET) {
3237 if (priv->request_sync_point_flags &
3238 GST_VIDEO_DECODER_REQUEST_SYNC_POINT_CORRUPT_OUTPUT) {
3239 GST_DEBUG_OBJECT (decoder,
3240 "marking frame %" GST_TIME_FORMAT
3241 " as corrupted because it is still before the sync point",
3242 GST_TIME_ARGS (frame->pts));
3243 GST_VIDEO_CODEC_FRAME_FLAG_SET (frame,
3244 GST_VIDEO_CODEC_FRAME_FLAG_CORRUPTED);
3247 /* Reset to -1 to mark it as unset now that we've reached the frame */
3248 priv->request_sync_point_frame_number = REQUEST_SYNC_POINT_UNSET;
3250 GST_OBJECT_UNLOCK (decoder);
3252 if (priv->discard_corrupted_frames
3253 && (GST_VIDEO_CODEC_FRAME_FLAG_IS_SET (frame,
3254 GST_VIDEO_CODEC_FRAME_FLAG_CORRUPTED)
3255 || GST_BUFFER_FLAG_IS_SET (frame->output_buffer,
3256 GST_BUFFER_FLAG_CORRUPTED))) {
3257 GST_DEBUG_OBJECT (decoder,
3258 "skipping frame %" GST_TIME_FORMAT " because it is corrupted",
3259 GST_TIME_ARGS (frame->pts));
3263 /* We need a writable buffer for the metadata changes below */
3264 output_buffer = frame->output_buffer =
3265 gst_buffer_make_writable (frame->output_buffer);
3267 GST_BUFFER_FLAG_UNSET (output_buffer, GST_BUFFER_FLAG_DELTA_UNIT);
3269 GST_BUFFER_PTS (output_buffer) = frame->pts;
3270 GST_BUFFER_DTS (output_buffer) = GST_CLOCK_TIME_NONE;
3271 GST_BUFFER_DURATION (output_buffer) = frame->duration;
3273 GST_BUFFER_OFFSET (output_buffer) = GST_BUFFER_OFFSET_NONE;
3274 GST_BUFFER_OFFSET_END (output_buffer) = GST_BUFFER_OFFSET_NONE;
3276 if (priv->discont) {
3277 GST_BUFFER_FLAG_SET (output_buffer, GST_BUFFER_FLAG_DISCONT);
3280 if (GST_VIDEO_CODEC_FRAME_FLAG_IS_SET (frame,
3281 GST_VIDEO_CODEC_FRAME_FLAG_CORRUPTED)) {
3282 GST_DEBUG_OBJECT (decoder,
3283 "marking frame %" GST_TIME_FORMAT " as corrupted",
3284 GST_TIME_ARGS (frame->pts));
3285 GST_BUFFER_FLAG_SET (output_buffer, GST_BUFFER_FLAG_CORRUPTED);
3288 if (decoder_class->transform_meta) {
3289 if (G_LIKELY (frame->input_buffer)) {
3292 data.decoder = decoder;
3294 gst_buffer_foreach_meta (frame->input_buffer, foreach_metadata, &data);
3296 GST_WARNING_OBJECT (decoder,
3297 "Can't copy metadata because input frame disappeared");
3301 /* Get an additional ref to the buffer, which is going to be pushed
3302 * downstream, the original ref is owned by the frame
3304 output_buffer = gst_buffer_ref (output_buffer);
3306 /* Release frame so the buffer is writable when we push it downstream
3307 * if possible, i.e. if the subclass does not hold additional references
3310 gst_video_decoder_release_frame (decoder, frame);
3313 if (decoder->output_segment.rate < 0.0
3314 && !(decoder->output_segment.flags & GST_SEEK_FLAG_TRICKMODE_KEY_UNITS)) {
3315 GST_LOG_OBJECT (decoder, "queued frame");
3316 priv->output_queued = g_list_prepend (priv->output_queued, output_buffer);
3318 ret = gst_video_decoder_clip_and_push_buf (decoder, output_buffer);
3323 gst_video_decoder_release_frame (decoder, frame);
3324 GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
3328 /* With stream lock, takes the frame reference */
3329 static GstFlowReturn
3330 gst_video_decoder_clip_and_push_buf (GstVideoDecoder * decoder, GstBuffer * buf)
3332 GstFlowReturn ret = GST_FLOW_OK;
3333 GstVideoDecoderPrivate *priv = decoder->priv;
3334 guint64 start, stop;
3335 guint64 cstart, cstop;
3336 GstSegment *segment;
3337 GstClockTime duration;
3339 /* Check for clipping */
3340 start = GST_BUFFER_PTS (buf);
3341 duration = GST_BUFFER_DURATION (buf);
3343 /* store that we have valid decoded data */
3344 priv->had_output_data = TRUE;
3346 stop = GST_CLOCK_TIME_NONE;
3348 if (GST_CLOCK_TIME_IS_VALID (start) && GST_CLOCK_TIME_IS_VALID (duration)) {
3349 stop = start + duration;
3350 } else if (GST_CLOCK_TIME_IS_VALID (start)
3351 && !GST_CLOCK_TIME_IS_VALID (duration)) {
3352 /* If we don't clip away buffers that far before the segment we
3353 * can cause the pipeline to lockup. This can happen if audio is
3354 * properly clipped, and thus the audio sink does not preroll yet
3355 * but the video sink prerolls because we already outputted a
3356 * buffer here... and then queues run full.
3358 * In the worst case we will clip one buffer too many here now if no
3359 * framerate is given, no buffer duration is given and the actual
3360 * framerate is lower than 25fps */
3361 stop = start + 40 * GST_MSECOND;
3364 segment = &decoder->output_segment;
3365 if (gst_segment_clip (segment, GST_FORMAT_TIME, start, stop, &cstart, &cstop)) {
3366 GST_BUFFER_PTS (buf) = cstart;
3368 if (stop != GST_CLOCK_TIME_NONE && GST_CLOCK_TIME_IS_VALID (duration))
3369 GST_BUFFER_DURATION (buf) = cstop - cstart;
3371 GST_LOG_OBJECT (decoder,
3372 "accepting buffer inside segment: %" GST_TIME_FORMAT " %"
3373 GST_TIME_FORMAT " seg %" GST_TIME_FORMAT " to %" GST_TIME_FORMAT
3374 " time %" GST_TIME_FORMAT,
3375 GST_TIME_ARGS (cstart),
3376 GST_TIME_ARGS (cstop),
3377 GST_TIME_ARGS (segment->start), GST_TIME_ARGS (segment->stop),
3378 GST_TIME_ARGS (segment->time));
3380 GST_LOG_OBJECT (decoder,
3381 "dropping buffer outside segment: %" GST_TIME_FORMAT
3382 " %" GST_TIME_FORMAT
3383 " seg %" GST_TIME_FORMAT " to %" GST_TIME_FORMAT
3384 " time %" GST_TIME_FORMAT,
3385 GST_TIME_ARGS (start), GST_TIME_ARGS (stop),
3386 GST_TIME_ARGS (segment->start),
3387 GST_TIME_ARGS (segment->stop), GST_TIME_ARGS (segment->time));
3388 /* only check and return EOS if upstream still
3389 * in the same segment and interested as such */
3390 if (decoder->priv->in_out_segment_sync) {
3391 if (segment->rate >= 0) {
3392 if (GST_BUFFER_PTS (buf) >= segment->stop)
3394 } else if (GST_BUFFER_PTS (buf) < segment->start) {
3398 gst_buffer_unref (buf);
3402 /* Is buffer too late (QoS) ? */
3403 if (priv->do_qos && GST_CLOCK_TIME_IS_VALID (priv->earliest_time)
3404 && GST_CLOCK_TIME_IS_VALID (cstart)) {
3405 GstClockTime deadline =
3406 gst_segment_to_running_time (segment, GST_FORMAT_TIME, cstart);
3407 if (GST_CLOCK_TIME_IS_VALID (deadline) && deadline < priv->earliest_time) {
3408 GST_WARNING_OBJECT (decoder,
3409 "Dropping frame due to QoS. start:%" GST_TIME_FORMAT " deadline:%"
3410 GST_TIME_FORMAT " earliest_time:%" GST_TIME_FORMAT,
3411 GST_TIME_ARGS (start), GST_TIME_ARGS (deadline),
3412 GST_TIME_ARGS (priv->earliest_time));
3413 gst_video_decoder_post_qos_drop (decoder, cstart);
3414 gst_buffer_unref (buf);
3415 priv->discont = TRUE;
3420 /* Set DISCONT flag here ! */
3422 if (priv->discont) {
3423 GST_DEBUG_OBJECT (decoder, "Setting discont on output buffer");
3424 GST_BUFFER_FLAG_SET (buf, GST_BUFFER_FLAG_DISCONT);
3425 priv->discont = FALSE;
3428 /* update rate estimate */
3429 GST_OBJECT_LOCK (decoder);
3430 priv->bytes_out += gst_buffer_get_size (buf);
3431 if (GST_CLOCK_TIME_IS_VALID (duration)) {
3432 priv->time += duration;
3434 /* FIXME : Use difference between current and previous outgoing
3435 * timestamp, and relate to difference between current and previous
3437 /* better none than nothing valid */
3438 priv->time = GST_CLOCK_TIME_NONE;
3440 GST_OBJECT_UNLOCK (decoder);
3442 GST_DEBUG_OBJECT (decoder, "pushing buffer %p of size %" G_GSIZE_FORMAT ", "
3443 "PTS %" GST_TIME_FORMAT ", dur %" GST_TIME_FORMAT, buf,
3444 gst_buffer_get_size (buf),
3445 GST_TIME_ARGS (GST_BUFFER_PTS (buf)),
3446 GST_TIME_ARGS (GST_BUFFER_DURATION (buf)));
3448 /* we got data, so note things are looking up again, reduce
3449 * the error count, if there is one */
3450 if (G_UNLIKELY (priv->error_count))
3451 priv->error_count = 0;
3453 #ifndef GST_DISABLE_DEBUG
3454 if (G_UNLIKELY (priv->last_reset_time != GST_CLOCK_TIME_NONE)) {
3455 GstClockTime elapsed = gst_util_get_timestamp () - priv->last_reset_time;
3457 /* First buffer since reset, report how long we took */
3458 GST_INFO_OBJECT (decoder, "First buffer since flush took %" GST_TIME_FORMAT
3459 " to produce", GST_TIME_ARGS (elapsed));
3460 priv->last_reset_time = GST_CLOCK_TIME_NONE;
3464 /* release STREAM_LOCK not to block upstream
3465 * while pushing buffer downstream */
3466 GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
3467 ret = gst_pad_push (decoder->srcpad, buf);
3468 GST_VIDEO_DECODER_STREAM_LOCK (decoder);
3475 * gst_video_decoder_add_to_frame:
3476 * @decoder: a #GstVideoDecoder
3477 * @n_bytes: the number of bytes to add
3479 * Removes next @n_bytes of input data and adds it to currently parsed frame.
3482 gst_video_decoder_add_to_frame (GstVideoDecoder * decoder, int n_bytes)
3484 GstVideoDecoderPrivate *priv = decoder->priv;
3487 GST_LOG_OBJECT (decoder, "add %d bytes to frame", n_bytes);
3492 GST_VIDEO_DECODER_STREAM_LOCK (decoder);
3493 if (gst_adapter_available (priv->output_adapter) == 0) {
3494 priv->frame_offset =
3495 priv->input_offset - gst_adapter_available (priv->input_adapter);
3497 buf = gst_adapter_take_buffer (priv->input_adapter, n_bytes);
3499 gst_adapter_push (priv->output_adapter, buf);
3500 GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
3504 * gst_video_decoder_get_pending_frame_size:
3505 * @decoder: a #GstVideoDecoder
3507 * Returns the number of bytes previously added to the current frame
3508 * by calling gst_video_decoder_add_to_frame().
3510 * Returns: The number of bytes pending for the current frame
3515 gst_video_decoder_get_pending_frame_size (GstVideoDecoder * decoder)
3517 GstVideoDecoderPrivate *priv = decoder->priv;
3520 GST_VIDEO_DECODER_STREAM_LOCK (decoder);
3521 ret = gst_adapter_available (priv->output_adapter);
3522 GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
3524 GST_LOG_OBJECT (decoder, "Current pending frame has %" G_GSIZE_FORMAT "bytes",
3531 gst_video_decoder_get_frame_duration (GstVideoDecoder * decoder,
3532 GstVideoCodecFrame * frame)
3534 GstVideoCodecState *state = decoder->priv->output_state;
3536 /* it's possible that we don't have a state yet when we are dropping the
3537 * initial buffers */
3539 return GST_CLOCK_TIME_NONE;
3541 if (state->info.fps_d == 0 || state->info.fps_n == 0) {
3542 return GST_CLOCK_TIME_NONE;
3545 /* FIXME: For interlaced frames this needs to take into account
3546 * the number of valid fields in the frame
3549 return gst_util_uint64_scale (GST_SECOND, state->info.fps_d,
3554 * gst_video_decoder_have_frame:
3555 * @decoder: a #GstVideoDecoder
3557 * Gathers all data collected for currently parsed frame, gathers corresponding
3558 * metadata and passes it along for further processing, i.e. @handle_frame.
3560 * Returns: a #GstFlowReturn
3563 gst_video_decoder_have_frame (GstVideoDecoder * decoder)
3565 GstVideoDecoderPrivate *priv = decoder->priv;
3568 GstClockTime pts, dts, duration;
3570 GstFlowReturn ret = GST_FLOW_OK;
3572 GST_LOG_OBJECT (decoder, "have_frame at offset %" G_GUINT64_FORMAT,
3573 priv->frame_offset);
3575 GST_VIDEO_DECODER_STREAM_LOCK (decoder);
3577 n_available = gst_adapter_available (priv->output_adapter);
3579 buffer = gst_adapter_take_buffer (priv->output_adapter, n_available);
3581 buffer = gst_buffer_new_and_alloc (0);
3584 priv->current_frame->input_buffer = buffer;
3586 gst_video_decoder_get_buffer_info_at_offset (decoder,
3587 priv->frame_offset, &pts, &dts, &duration, &flags);
3589 GST_BUFFER_PTS (buffer) = pts;
3590 GST_BUFFER_DTS (buffer) = dts;
3591 GST_BUFFER_DURATION (buffer) = duration;
3592 GST_BUFFER_FLAGS (buffer) = flags;
3594 GST_LOG_OBJECT (decoder, "collected frame size %d, "
3595 "PTS %" GST_TIME_FORMAT ", DTS %" GST_TIME_FORMAT ", dur %"
3596 GST_TIME_FORMAT, n_available, GST_TIME_ARGS (pts), GST_TIME_ARGS (dts),
3597 GST_TIME_ARGS (duration));
3599 if (!GST_BUFFER_FLAG_IS_SET (buffer, GST_BUFFER_FLAG_DELTA_UNIT)) {
3600 GST_DEBUG_OBJECT (decoder, "Marking as sync point");
3601 GST_VIDEO_CODEC_FRAME_SET_SYNC_POINT (priv->current_frame);
3604 if (GST_BUFFER_FLAG_IS_SET (buffer, GST_BUFFER_FLAG_CORRUPTED)) {
3605 GST_DEBUG_OBJECT (decoder, "Marking as corrupted");
3606 GST_VIDEO_CODEC_FRAME_FLAG_SET (priv->current_frame,
3607 GST_VIDEO_CODEC_FRAME_FLAG_CORRUPTED);
3610 /* In reverse playback, just capture and queue frames for later processing */
3611 if (decoder->input_segment.rate < 0.0) {
3612 priv->parse_gather =
3613 g_list_prepend (priv->parse_gather, priv->current_frame);
3615 /* Otherwise, decode the frame, which gives away our ref */
3616 ret = gst_video_decoder_decode_frame (decoder, priv->current_frame);
3618 /* Current frame is gone now, either way */
3619 priv->current_frame = NULL;
3621 GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
3626 /* Pass the frame in priv->current_frame through the
3627 * handle_frame() callback for decoding and passing to gvd_finish_frame(),
3628 * or dropping by passing to gvd_drop_frame() */
3629 static GstFlowReturn
3630 gst_video_decoder_decode_frame (GstVideoDecoder * decoder,
3631 GstVideoCodecFrame * frame)
3633 GstVideoDecoderPrivate *priv = decoder->priv;
3634 GstVideoDecoderClass *decoder_class;
3635 GstFlowReturn ret = GST_FLOW_OK;
3637 decoder_class = GST_VIDEO_DECODER_GET_CLASS (decoder);
3639 /* FIXME : This should only have to be checked once (either the subclass has an
3640 * implementation, or it doesn't) */
3641 g_return_val_if_fail (decoder_class->handle_frame != NULL, GST_FLOW_ERROR);
3643 frame->pts = GST_BUFFER_PTS (frame->input_buffer);
3644 frame->dts = GST_BUFFER_DTS (frame->input_buffer);
3645 frame->duration = GST_BUFFER_DURATION (frame->input_buffer);
3647 /* For keyframes, PTS = DTS + constant_offset, usually 0 to 3 frame
3649 /* FIXME upstream can be quite wrong about the keyframe aspect,
3650 * so we could be going off here as well,
3651 * maybe let subclass decide if it really is/was a keyframe */
3652 if (GST_VIDEO_CODEC_FRAME_IS_SYNC_POINT (frame)) {
3653 priv->distance_from_sync = 0;
3655 GST_OBJECT_LOCK (decoder);
3656 priv->request_sync_point_flags &=
3657 ~GST_VIDEO_DECODER_REQUEST_SYNC_POINT_DISCARD_INPUT;
3658 if (priv->request_sync_point_frame_number == REQUEST_SYNC_POINT_PENDING)
3659 priv->request_sync_point_frame_number = frame->system_frame_number;
3660 GST_OBJECT_UNLOCK (decoder);
3662 if (GST_CLOCK_TIME_IS_VALID (frame->pts)
3663 && GST_CLOCK_TIME_IS_VALID (frame->dts)) {
3664 /* just in case they are not equal as might ideally be,
3665 * e.g. quicktime has a (positive) delta approach */
3666 priv->pts_delta = frame->pts - frame->dts;
3667 GST_DEBUG_OBJECT (decoder, "PTS delta %d ms",
3668 (gint) (priv->pts_delta / GST_MSECOND));
3671 GST_OBJECT_LOCK (decoder);
3672 if ((priv->needs_sync_point && priv->distance_from_sync == -1)
3673 || (priv->request_sync_point_flags &
3674 GST_VIDEO_DECODER_REQUEST_SYNC_POINT_DISCARD_INPUT)) {
3675 GST_WARNING_OBJECT (decoder,
3676 "Subclass requires a sync point but we didn't receive one yet, discarding input");
3677 GST_OBJECT_UNLOCK (decoder);
3678 gst_video_decoder_release_frame (decoder, frame);
3681 GST_OBJECT_UNLOCK (decoder);
3683 priv->distance_from_sync++;
3686 frame->distance_from_sync = priv->distance_from_sync;
3688 frame->abidata.ABI.ts = frame->dts;
3689 frame->abidata.ABI.ts2 = frame->pts;
3691 GST_LOG_OBJECT (decoder, "PTS %" GST_TIME_FORMAT ", DTS %" GST_TIME_FORMAT
3692 ", dist %d", GST_TIME_ARGS (frame->pts), GST_TIME_ARGS (frame->dts),
3693 frame->distance_from_sync);
3695 g_queue_push_tail (&priv->frames, gst_video_codec_frame_ref (frame));
3697 if (priv->frames.length > 10) {
3698 GST_DEBUG_OBJECT (decoder, "decoder frame list getting long: %d frames,"
3699 "possible internal leaking?", priv->frames.length);
3703 gst_segment_to_running_time (&decoder->input_segment, GST_FORMAT_TIME,
3706 /* do something with frame */
3707 ret = decoder_class->handle_frame (decoder, frame);
3708 if (ret != GST_FLOW_OK)
3709 GST_DEBUG_OBJECT (decoder, "flow error %s", gst_flow_get_name (ret));
3711 /* the frame has either been added to parse_gather or sent to
3712 handle frame so there is no need to unref it */
3718 * gst_video_decoder_get_output_state:
3719 * @decoder: a #GstVideoDecoder
3721 * Get the #GstVideoCodecState currently describing the output stream.
3723 * Returns: (transfer full): #GstVideoCodecState describing format of video data.
3725 GstVideoCodecState *
3726 gst_video_decoder_get_output_state (GstVideoDecoder * decoder)
3728 GstVideoCodecState *state = NULL;
3730 GST_OBJECT_LOCK (decoder);
3731 if (decoder->priv->output_state)
3732 state = gst_video_codec_state_ref (decoder->priv->output_state);
3733 GST_OBJECT_UNLOCK (decoder);
3738 static GstVideoCodecState *
3739 _set_interlaced_output_state (GstVideoDecoder * decoder,
3740 GstVideoFormat fmt, GstVideoInterlaceMode interlace_mode, guint width,
3741 guint height, GstVideoCodecState * reference, gboolean copy_interlace_mode)
3743 GstVideoDecoderPrivate *priv = decoder->priv;
3744 GstVideoCodecState *state;
3746 g_assert ((copy_interlace_mode
3747 && interlace_mode == GST_VIDEO_INTERLACE_MODE_PROGRESSIVE)
3748 || !copy_interlace_mode);
3750 GST_DEBUG_OBJECT (decoder,
3751 "fmt:%d, width:%d, height:%d, interlace-mode: %s, reference:%p", fmt,
3752 width, height, gst_video_interlace_mode_to_string (interlace_mode),
3755 /* Create the new output state */
3757 _new_output_state (fmt, interlace_mode, width, height, reference,
3758 copy_interlace_mode);
3762 GST_VIDEO_DECODER_STREAM_LOCK (decoder);
3764 GST_OBJECT_LOCK (decoder);
3765 /* Replace existing output state by new one */
3766 if (priv->output_state)
3767 gst_video_codec_state_unref (priv->output_state);
3768 priv->output_state = gst_video_codec_state_ref (state);
3770 if (priv->output_state != NULL && priv->output_state->info.fps_n > 0) {
3771 priv->qos_frame_duration =
3772 gst_util_uint64_scale (GST_SECOND, priv->output_state->info.fps_d,
3773 priv->output_state->info.fps_n);
3775 priv->qos_frame_duration = 0;
3777 priv->output_state_changed = TRUE;
3778 GST_OBJECT_UNLOCK (decoder);
3780 GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
3786 * gst_video_decoder_set_output_state:
3787 * @decoder: a #GstVideoDecoder
3788 * @fmt: a #GstVideoFormat
3789 * @width: The width in pixels
3790 * @height: The height in pixels
3791 * @reference: (allow-none) (transfer none): An optional reference #GstVideoCodecState
3793 * Creates a new #GstVideoCodecState with the specified @fmt, @width and @height
3794 * as the output state for the decoder.
3795 * Any previously set output state on @decoder will be replaced by the newly
3798 * If the subclass wishes to copy over existing fields (like pixel aspec ratio,
3799 * or framerate) from an existing #GstVideoCodecState, it can be provided as a
3802 * If the subclass wishes to override some fields from the output state (like
3803 * pixel-aspect-ratio or framerate) it can do so on the returned #GstVideoCodecState.
3805 * The new output state will only take effect (set on pads and buffers) starting
3806 * from the next call to #gst_video_decoder_finish_frame().
3808 * Returns: (transfer full): the newly configured output state.
3810 GstVideoCodecState *
3811 gst_video_decoder_set_output_state (GstVideoDecoder * decoder,
3812 GstVideoFormat fmt, guint width, guint height,
3813 GstVideoCodecState * reference)
3815 return _set_interlaced_output_state (decoder, fmt,
3816 GST_VIDEO_INTERLACE_MODE_PROGRESSIVE, width, height, reference, TRUE);
3820 * gst_video_decoder_set_interlaced_output_state:
3821 * @decoder: a #GstVideoDecoder
3822 * @fmt: a #GstVideoFormat
3823 * @width: The width in pixels
3824 * @height: The height in pixels
3825 * @interlace_mode: A #GstVideoInterlaceMode
3826 * @reference: (allow-none) (transfer none): An optional reference #GstVideoCodecState
3828 * Same as #gst_video_decoder_set_output_state() but also allows you to also set
3829 * the interlacing mode.
3831 * Returns: (transfer full): the newly configured output state.
3835 GstVideoCodecState *
3836 gst_video_decoder_set_interlaced_output_state (GstVideoDecoder * decoder,
3837 GstVideoFormat fmt, GstVideoInterlaceMode interlace_mode, guint width,
3838 guint height, GstVideoCodecState * reference)
3840 return _set_interlaced_output_state (decoder, fmt, interlace_mode, width,
3841 height, reference, FALSE);
3846 * gst_video_decoder_get_oldest_frame:
3847 * @decoder: a #GstVideoDecoder
3849 * Get the oldest pending unfinished #GstVideoCodecFrame
3851 * Returns: (transfer full): oldest pending unfinished #GstVideoCodecFrame.
3853 GstVideoCodecFrame *
3854 gst_video_decoder_get_oldest_frame (GstVideoDecoder * decoder)
3856 GstVideoCodecFrame *frame = NULL;
3858 GST_VIDEO_DECODER_STREAM_LOCK (decoder);
3859 if (decoder->priv->frames.head)
3860 frame = gst_video_codec_frame_ref (decoder->priv->frames.head->data);
3861 GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
3863 return (GstVideoCodecFrame *) frame;
3867 * gst_video_decoder_get_frame:
3868 * @decoder: a #GstVideoDecoder
3869 * @frame_number: system_frame_number of a frame
3871 * Get a pending unfinished #GstVideoCodecFrame
3873 * Returns: (transfer full): pending unfinished #GstVideoCodecFrame identified by @frame_number.
3875 GstVideoCodecFrame *
3876 gst_video_decoder_get_frame (GstVideoDecoder * decoder, int frame_number)
3879 GstVideoCodecFrame *frame = NULL;
3881 GST_DEBUG_OBJECT (decoder, "frame_number : %d", frame_number);
3883 GST_VIDEO_DECODER_STREAM_LOCK (decoder);
3884 for (g = decoder->priv->frames.head; g; g = g->next) {
3885 GstVideoCodecFrame *tmp = g->data;
3887 if (tmp->system_frame_number == frame_number) {
3888 frame = gst_video_codec_frame_ref (tmp);
3892 GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
3898 * gst_video_decoder_get_frames:
3899 * @decoder: a #GstVideoDecoder
3901 * Get all pending unfinished #GstVideoCodecFrame
3903 * Returns: (transfer full) (element-type GstVideoCodecFrame): pending unfinished #GstVideoCodecFrame.
3906 gst_video_decoder_get_frames (GstVideoDecoder * decoder)
3910 GST_VIDEO_DECODER_STREAM_LOCK (decoder);
3912 g_list_copy_deep (decoder->priv->frames.head,
3913 (GCopyFunc) gst_video_codec_frame_ref, NULL);
3914 GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
3920 gst_video_decoder_decide_allocation_default (GstVideoDecoder * decoder,
3923 GstCaps *outcaps = NULL;
3924 GstBufferPool *pool = NULL;
3925 guint size, min, max;
3926 GstAllocator *allocator = NULL;
3927 GstAllocationParams params;
3928 GstStructure *config;
3929 gboolean update_pool, update_allocator;
3932 gst_query_parse_allocation (query, &outcaps, NULL);
3933 gst_video_info_init (&vinfo);
3935 gst_video_info_from_caps (&vinfo, outcaps);
3937 /* we got configuration from our peer or the decide_allocation method,
3939 if (gst_query_get_n_allocation_params (query) > 0) {
3940 /* try the allocator */
3941 gst_query_parse_nth_allocation_param (query, 0, &allocator, ¶ms);
3942 update_allocator = TRUE;
3945 gst_allocation_params_init (¶ms);
3946 update_allocator = FALSE;
3949 if (gst_query_get_n_allocation_pools (query) > 0) {
3950 gst_query_parse_nth_allocation_pool (query, 0, &pool, &size, &min, &max);
3951 size = MAX (size, vinfo.size);
3958 update_pool = FALSE;
3962 /* no pool, we can make our own */
3963 GST_DEBUG_OBJECT (decoder, "no pool, making new pool");
3964 pool = gst_video_buffer_pool_new ();
3968 config = gst_buffer_pool_get_config (pool);
3969 gst_buffer_pool_config_set_params (config, outcaps, size, min, max);
3970 gst_buffer_pool_config_set_allocator (config, allocator, ¶ms);
3972 GST_DEBUG_OBJECT (decoder,
3973 "setting config %" GST_PTR_FORMAT " in pool %" GST_PTR_FORMAT, config,
3975 if (!gst_buffer_pool_set_config (pool, config)) {
3976 config = gst_buffer_pool_get_config (pool);
3978 /* If change are not acceptable, fallback to generic pool */
3979 if (!gst_buffer_pool_config_validate_params (config, outcaps, size, min,
3981 GST_DEBUG_OBJECT (decoder, "unsupported pool, making new pool");
3983 gst_object_unref (pool);
3984 pool = gst_video_buffer_pool_new ();
3985 gst_buffer_pool_config_set_params (config, outcaps, size, min, max);
3986 gst_buffer_pool_config_set_allocator (config, allocator, ¶ms);
3989 if (!gst_buffer_pool_set_config (pool, config))
3993 if (update_allocator)
3994 gst_query_set_nth_allocation_param (query, 0, allocator, ¶ms);
3996 gst_query_add_allocation_param (query, allocator, ¶ms);
3998 gst_object_unref (allocator);
4001 gst_query_set_nth_allocation_pool (query, 0, pool, size, min, max);
4003 gst_query_add_allocation_pool (query, pool, size, min, max);
4006 gst_object_unref (pool);
4012 gst_object_unref (allocator);
4014 gst_object_unref (pool);
4015 GST_ELEMENT_ERROR (decoder, RESOURCE, SETTINGS,
4016 ("Failed to configure the buffer pool"),
4017 ("Configuration is most likely invalid, please report this issue."));
4022 gst_video_decoder_propose_allocation_default (GstVideoDecoder * decoder,
4029 gst_video_decoder_negotiate_pool (GstVideoDecoder * decoder, GstCaps * caps)
4031 GstVideoDecoderClass *klass;
4032 GstQuery *query = NULL;
4033 GstBufferPool *pool = NULL;
4034 GstAllocator *allocator;
4035 GstAllocationParams params;
4036 gboolean ret = TRUE;
4038 klass = GST_VIDEO_DECODER_GET_CLASS (decoder);
4040 query = gst_query_new_allocation (caps, TRUE);
4042 GST_DEBUG_OBJECT (decoder, "do query ALLOCATION");
4044 if (!gst_pad_peer_query (decoder->srcpad, query)) {
4045 GST_DEBUG_OBJECT (decoder, "didn't get downstream ALLOCATION hints");
4048 g_assert (klass->decide_allocation != NULL);
4049 ret = klass->decide_allocation (decoder, query);
4051 GST_DEBUG_OBJECT (decoder, "ALLOCATION (%d) params: %" GST_PTR_FORMAT, ret,
4055 goto no_decide_allocation;
4057 /* we got configuration from our peer or the decide_allocation method,
4059 if (gst_query_get_n_allocation_params (query) > 0) {
4060 gst_query_parse_nth_allocation_param (query, 0, &allocator, ¶ms);
4063 gst_allocation_params_init (¶ms);
4066 if (gst_query_get_n_allocation_pools (query) > 0)
4067 gst_query_parse_nth_allocation_pool (query, 0, &pool, NULL, NULL, NULL);
4070 gst_object_unref (allocator);
4072 goto no_decide_allocation;
4075 if (decoder->priv->allocator)
4076 gst_object_unref (decoder->priv->allocator);
4077 decoder->priv->allocator = allocator;
4078 decoder->priv->params = params;
4080 if (decoder->priv->pool) {
4081 /* do not set the bufferpool to inactive here, it will be done
4082 * on its finalize function. As videodecoder do late renegotiation
4083 * it might happen that some element downstream is already using this
4084 * same bufferpool and deactivating it will make it fail.
4085 * Happens when a downstream element changes from passthrough to
4086 * non-passthrough and gets this same bufferpool to use */
4087 GST_DEBUG_OBJECT (decoder, "unref pool %" GST_PTR_FORMAT,
4088 decoder->priv->pool);
4089 gst_object_unref (decoder->priv->pool);
4091 decoder->priv->pool = pool;
4094 GST_DEBUG_OBJECT (decoder, "activate pool %" GST_PTR_FORMAT, pool);
4095 gst_buffer_pool_set_active (pool, TRUE);
4099 gst_query_unref (query);
4104 no_decide_allocation:
4106 GST_WARNING_OBJECT (decoder, "Subclass failed to decide allocation");
4112 gst_video_decoder_negotiate_default (GstVideoDecoder * decoder)
4114 GstVideoCodecState *state = decoder->priv->output_state;
4115 gboolean ret = TRUE;
4116 GstVideoCodecFrame *frame;
4121 GST_DEBUG_OBJECT (decoder,
4122 "Trying to negotiate the pool with out setting the o/p format");
4123 ret = gst_video_decoder_negotiate_pool (decoder, NULL);
4127 g_return_val_if_fail (GST_VIDEO_INFO_WIDTH (&state->info) != 0, FALSE);
4128 g_return_val_if_fail (GST_VIDEO_INFO_HEIGHT (&state->info) != 0, FALSE);
4130 /* If the base class didn't set any multiview params, assume mono
4132 if (GST_VIDEO_INFO_MULTIVIEW_MODE (&state->info) ==
4133 GST_VIDEO_MULTIVIEW_MODE_NONE) {
4134 GST_VIDEO_INFO_MULTIVIEW_MODE (&state->info) =
4135 GST_VIDEO_MULTIVIEW_MODE_MONO;
4136 GST_VIDEO_INFO_MULTIVIEW_FLAGS (&state->info) =
4137 GST_VIDEO_MULTIVIEW_FLAGS_NONE;
4140 GST_DEBUG_OBJECT (decoder, "output_state par %d/%d fps %d/%d",
4141 state->info.par_n, state->info.par_d,
4142 state->info.fps_n, state->info.fps_d);
4144 if (state->caps == NULL)
4145 state->caps = gst_video_info_to_caps (&state->info);
4147 incaps = gst_pad_get_current_caps (GST_VIDEO_DECODER_SINK_PAD (decoder));
4149 GstStructure *in_struct;
4151 in_struct = gst_caps_get_structure (incaps, 0);
4152 if (gst_structure_has_field (in_struct, "mastering-display-info") ||
4153 gst_structure_has_field (in_struct, "content-light-level")) {
4156 /* prefer upstream information */
4157 state->caps = gst_caps_make_writable (state->caps);
4158 if ((s = gst_structure_get_string (in_struct, "mastering-display-info"))) {
4159 gst_caps_set_simple (state->caps,
4160 "mastering-display-info", G_TYPE_STRING, s, NULL);
4163 if ((s = gst_structure_get_string (in_struct, "content-light-level"))) {
4164 gst_caps_set_simple (state->caps,
4165 "content-light-level", G_TYPE_STRING, s, NULL);
4169 gst_caps_unref (incaps);
4172 if (state->allocation_caps == NULL)
4173 state->allocation_caps = gst_caps_ref (state->caps);
4175 GST_DEBUG_OBJECT (decoder, "setting caps %" GST_PTR_FORMAT, state->caps);
4177 /* Push all pending pre-caps events of the oldest frame before
4179 frame = decoder->priv->frames.head ? decoder->priv->frames.head->data : NULL;
4180 if (frame || decoder->priv->current_frame_events) {
4184 events = &frame->events;
4186 events = &decoder->priv->current_frame_events;
4189 for (l = g_list_last (*events); l;) {
4190 GstEvent *event = GST_EVENT (l->data);
4193 if (GST_EVENT_TYPE (event) < GST_EVENT_CAPS) {
4194 gst_video_decoder_push_event (decoder, event);
4197 *events = g_list_delete_link (*events, tmp);
4204 prevcaps = gst_pad_get_current_caps (decoder->srcpad);
4205 if (!prevcaps || !gst_caps_is_equal (prevcaps, state->caps)) {
4207 GST_DEBUG_OBJECT (decoder, "decoder src pad has currently NULL caps");
4209 ret = gst_pad_set_caps (decoder->srcpad, state->caps);
4212 GST_DEBUG_OBJECT (decoder,
4213 "current src pad and output state caps are the same");
4216 gst_caps_unref (prevcaps);
4220 decoder->priv->output_state_changed = FALSE;
4221 /* Negotiate pool */
4222 ret = gst_video_decoder_negotiate_pool (decoder, state->allocation_caps);
4229 gst_video_decoder_negotiate_unlocked (GstVideoDecoder * decoder)
4231 GstVideoDecoderClass *klass = GST_VIDEO_DECODER_GET_CLASS (decoder);
4232 gboolean ret = TRUE;
4234 if (G_LIKELY (klass->negotiate))
4235 ret = klass->negotiate (decoder);
4241 * gst_video_decoder_negotiate:
4242 * @decoder: a #GstVideoDecoder
4244 * Negotiate with downstream elements to currently configured #GstVideoCodecState.
4245 * Unmark GST_PAD_FLAG_NEED_RECONFIGURE in any case. But mark it again if
4248 * Returns: %TRUE if the negotiation succeeded, else %FALSE.
4251 gst_video_decoder_negotiate (GstVideoDecoder * decoder)
4253 GstVideoDecoderClass *klass;
4254 gboolean ret = TRUE;
4256 g_return_val_if_fail (GST_IS_VIDEO_DECODER (decoder), FALSE);
4258 klass = GST_VIDEO_DECODER_GET_CLASS (decoder);
4260 GST_VIDEO_DECODER_STREAM_LOCK (decoder);
4261 gst_pad_check_reconfigure (decoder->srcpad);
4262 if (klass->negotiate) {
4263 ret = klass->negotiate (decoder);
4265 gst_pad_mark_reconfigure (decoder->srcpad);
4267 GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
4273 * gst_video_decoder_allocate_output_buffer:
4274 * @decoder: a #GstVideoDecoder
4276 * Helper function that allocates a buffer to hold a video frame for @decoder's
4277 * current #GstVideoCodecState.
4279 * You should use gst_video_decoder_allocate_output_frame() instead of this
4280 * function, if possible at all.
4282 * Returns: (transfer full): allocated buffer, or NULL if no buffer could be
4283 * allocated (e.g. when downstream is flushing or shutting down)
4286 gst_video_decoder_allocate_output_buffer (GstVideoDecoder * decoder)
4289 GstBuffer *buffer = NULL;
4290 gboolean needs_reconfigure = FALSE;
4292 GST_DEBUG ("alloc src buffer");
4294 GST_VIDEO_DECODER_STREAM_LOCK (decoder);
4295 needs_reconfigure = gst_pad_check_reconfigure (decoder->srcpad);
4296 if (G_UNLIKELY (!decoder->priv->output_state
4297 || decoder->priv->output_state_changed || needs_reconfigure)) {
4298 if (!gst_video_decoder_negotiate_unlocked (decoder)) {
4299 if (decoder->priv->output_state) {
4300 GST_DEBUG_OBJECT (decoder, "Failed to negotiate, fallback allocation");
4301 gst_pad_mark_reconfigure (decoder->srcpad);
4304 GST_DEBUG_OBJECT (decoder, "Failed to negotiate, output_buffer=NULL");
4305 goto failed_allocation;
4310 flow = gst_buffer_pool_acquire_buffer (decoder->priv->pool, &buffer, NULL);
4312 if (flow != GST_FLOW_OK) {
4313 GST_INFO_OBJECT (decoder, "couldn't allocate output buffer, flow %s",
4314 gst_flow_get_name (flow));
4315 if (decoder->priv->output_state && decoder->priv->output_state->info.size)
4318 goto failed_allocation;
4320 GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
4325 GST_INFO_OBJECT (decoder,
4326 "Fallback allocation, creating new buffer which doesn't belongs to any buffer pool");
4328 gst_buffer_new_allocate (NULL, decoder->priv->output_state->info.size,
4332 GST_ERROR_OBJECT (decoder, "Failed to allocate the buffer..");
4333 GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
4339 * gst_video_decoder_allocate_output_frame:
4340 * @decoder: a #GstVideoDecoder
4341 * @frame: a #GstVideoCodecFrame
4343 * Helper function that allocates a buffer to hold a video frame for @decoder's
4344 * current #GstVideoCodecState. Subclass should already have configured video
4345 * state and set src pad caps.
4347 * The buffer allocated here is owned by the frame and you should only
4348 * keep references to the frame, not the buffer.
4350 * Returns: %GST_FLOW_OK if an output buffer could be allocated
4353 gst_video_decoder_allocate_output_frame (GstVideoDecoder *
4354 decoder, GstVideoCodecFrame * frame)
4356 return gst_video_decoder_allocate_output_frame_with_params (decoder, frame,
4361 * gst_video_decoder_allocate_output_frame_with_params:
4362 * @decoder: a #GstVideoDecoder
4363 * @frame: a #GstVideoCodecFrame
4364 * @params: a #GstBufferPoolAcquireParams
4366 * Same as #gst_video_decoder_allocate_output_frame except it allows passing
4367 * #GstBufferPoolAcquireParams to the sub call gst_buffer_pool_acquire_buffer.
4369 * Returns: %GST_FLOW_OK if an output buffer could be allocated
4374 gst_video_decoder_allocate_output_frame_with_params (GstVideoDecoder *
4375 decoder, GstVideoCodecFrame * frame, GstBufferPoolAcquireParams * params)
4377 GstFlowReturn flow_ret;
4378 GstVideoCodecState *state;
4380 gboolean needs_reconfigure = FALSE;
4382 g_return_val_if_fail (decoder->priv->output_state, GST_FLOW_NOT_NEGOTIATED);
4383 g_return_val_if_fail (frame->output_buffer == NULL, GST_FLOW_ERROR);
4385 GST_VIDEO_DECODER_STREAM_LOCK (decoder);
4387 state = decoder->priv->output_state;
4388 if (state == NULL) {
4389 g_warning ("Output state should be set before allocating frame");
4392 num_bytes = GST_VIDEO_INFO_SIZE (&state->info);
4393 if (num_bytes == 0) {
4394 g_warning ("Frame size should not be 0");
4398 needs_reconfigure = gst_pad_check_reconfigure (decoder->srcpad);
4399 if (G_UNLIKELY (decoder->priv->output_state_changed || needs_reconfigure)) {
4400 if (!gst_video_decoder_negotiate_unlocked (decoder)) {
4401 GST_DEBUG_OBJECT (decoder, "Failed to negotiate, fallback allocation");
4402 gst_pad_mark_reconfigure (decoder->srcpad);
4406 GST_LOG_OBJECT (decoder, "alloc buffer size %d", num_bytes);
4408 flow_ret = gst_buffer_pool_acquire_buffer (decoder->priv->pool,
4409 &frame->output_buffer, params);
4411 GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
4416 GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
4417 return GST_FLOW_ERROR;
4421 * gst_video_decoder_get_max_decode_time:
4422 * @decoder: a #GstVideoDecoder
4423 * @frame: a #GstVideoCodecFrame
4425 * Determines maximum possible decoding time for @frame that will
4426 * allow it to decode and arrive in time (as determined by QoS events).
4427 * In particular, a negative result means decoding in time is no longer possible
4428 * and should therefore occur as soon/skippy as possible.
4430 * Returns: max decoding time.
4433 gst_video_decoder_get_max_decode_time (GstVideoDecoder *
4434 decoder, GstVideoCodecFrame * frame)
4436 GstClockTimeDiff deadline;
4437 GstClockTime earliest_time;
4439 GST_OBJECT_LOCK (decoder);
4440 earliest_time = decoder->priv->earliest_time;
4441 if (GST_CLOCK_TIME_IS_VALID (earliest_time)
4442 && GST_CLOCK_TIME_IS_VALID (frame->deadline))
4443 deadline = GST_CLOCK_DIFF (earliest_time, frame->deadline);
4445 deadline = G_MAXINT64;
4447 GST_LOG_OBJECT (decoder, "earliest %" GST_TIME_FORMAT
4448 ", frame deadline %" GST_TIME_FORMAT ", deadline %" GST_STIME_FORMAT,
4449 GST_TIME_ARGS (earliest_time), GST_TIME_ARGS (frame->deadline),
4450 GST_STIME_ARGS (deadline));
4452 GST_OBJECT_UNLOCK (decoder);
4458 * gst_video_decoder_get_qos_proportion:
4459 * @decoder: a #GstVideoDecoder
4460 * current QoS proportion, or %NULL
4462 * Returns: The current QoS proportion.
4467 gst_video_decoder_get_qos_proportion (GstVideoDecoder * decoder)
4471 g_return_val_if_fail (GST_IS_VIDEO_DECODER (decoder), 1.0);
4473 GST_OBJECT_LOCK (decoder);
4474 proportion = decoder->priv->proportion;
4475 GST_OBJECT_UNLOCK (decoder);
4481 _gst_video_decoder_error (GstVideoDecoder * dec, gint weight,
4482 GQuark domain, gint code, gchar * txt, gchar * dbg, const gchar * file,
4483 const gchar * function, gint line)
4486 GST_WARNING_OBJECT (dec, "error: %s", txt);
4488 GST_WARNING_OBJECT (dec, "error: %s", dbg);
4489 dec->priv->error_count += weight;
4490 dec->priv->discont = TRUE;
4491 if (dec->priv->max_errors >= 0 &&
4492 dec->priv->error_count > dec->priv->max_errors) {
4493 gst_element_message_full (GST_ELEMENT (dec), GST_MESSAGE_ERROR,
4494 domain, code, txt, dbg, file, function, line);
4495 return GST_FLOW_ERROR;
4504 * gst_video_decoder_set_max_errors:
4505 * @dec: a #GstVideoDecoder
4506 * @num: max tolerated errors
4508 * Sets numbers of tolerated decoder errors, where a tolerated one is then only
4509 * warned about, but more than tolerated will lead to fatal error. You can set
4510 * -1 for never returning fatal errors. Default is set to
4511 * GST_VIDEO_DECODER_MAX_ERRORS.
4513 * The '-1' option was added in 1.4
4516 gst_video_decoder_set_max_errors (GstVideoDecoder * dec, gint num)
4518 g_return_if_fail (GST_IS_VIDEO_DECODER (dec));
4520 dec->priv->max_errors = num;
4524 * gst_video_decoder_get_max_errors:
4525 * @dec: a #GstVideoDecoder
4527 * Returns: currently configured decoder tolerated error count.
4530 gst_video_decoder_get_max_errors (GstVideoDecoder * dec)
4532 g_return_val_if_fail (GST_IS_VIDEO_DECODER (dec), 0);
4534 return dec->priv->max_errors;
4538 * gst_video_decoder_set_needs_format:
4539 * @dec: a #GstVideoDecoder
4540 * @enabled: new state
4542 * Configures decoder format needs. If enabled, subclass needs to be
4543 * negotiated with format caps before it can process any data. It will then
4544 * never be handed any data before it has been configured.
4545 * Otherwise, it might be handed data without having been configured and
4546 * is then expected being able to do so either by default
4547 * or based on the input data.
4552 gst_video_decoder_set_needs_format (GstVideoDecoder * dec, gboolean enabled)
4554 g_return_if_fail (GST_IS_VIDEO_DECODER (dec));
4556 dec->priv->needs_format = enabled;
4560 * gst_video_decoder_get_needs_format:
4561 * @dec: a #GstVideoDecoder
4563 * Queries decoder required format handling.
4565 * Returns: %TRUE if required format handling is enabled.
4570 gst_video_decoder_get_needs_format (GstVideoDecoder * dec)
4574 g_return_val_if_fail (GST_IS_VIDEO_DECODER (dec), FALSE);
4576 result = dec->priv->needs_format;
4582 * gst_video_decoder_set_packetized:
4583 * @decoder: a #GstVideoDecoder
4584 * @packetized: whether the input data should be considered as packetized.
4586 * Allows baseclass to consider input data as packetized or not. If the
4587 * input is packetized, then the @parse method will not be called.
4590 gst_video_decoder_set_packetized (GstVideoDecoder * decoder,
4591 gboolean packetized)
4593 decoder->priv->packetized = packetized;
4597 * gst_video_decoder_get_packetized:
4598 * @decoder: a #GstVideoDecoder
4600 * Queries whether input data is considered packetized or not by the
4603 * Returns: TRUE if input data is considered packetized.
4606 gst_video_decoder_get_packetized (GstVideoDecoder * decoder)
4608 return decoder->priv->packetized;
4612 * gst_video_decoder_set_estimate_rate:
4613 * @dec: a #GstVideoDecoder
4614 * @enabled: whether to enable byte to time conversion
4616 * Allows baseclass to perform byte to time estimated conversion.
4619 gst_video_decoder_set_estimate_rate (GstVideoDecoder * dec, gboolean enabled)
4621 g_return_if_fail (GST_IS_VIDEO_DECODER (dec));
4623 dec->priv->do_estimate_rate = enabled;
4627 * gst_video_decoder_get_estimate_rate:
4628 * @dec: a #GstVideoDecoder
4630 * Returns: currently configured byte to time conversion setting
4633 gst_video_decoder_get_estimate_rate (GstVideoDecoder * dec)
4635 g_return_val_if_fail (GST_IS_VIDEO_DECODER (dec), 0);
4637 return dec->priv->do_estimate_rate;
4641 * gst_video_decoder_set_latency:
4642 * @decoder: a #GstVideoDecoder
4643 * @min_latency: minimum latency
4644 * @max_latency: maximum latency
4646 * Lets #GstVideoDecoder sub-classes tell the baseclass what the decoder
4647 * latency is. Will also post a LATENCY message on the bus so the pipeline
4648 * can reconfigure its global latency.
4651 gst_video_decoder_set_latency (GstVideoDecoder * decoder,
4652 GstClockTime min_latency, GstClockTime max_latency)
4654 g_return_if_fail (GST_CLOCK_TIME_IS_VALID (min_latency));
4655 g_return_if_fail (max_latency >= min_latency);
4657 GST_OBJECT_LOCK (decoder);
4658 decoder->priv->min_latency = min_latency;
4659 decoder->priv->max_latency = max_latency;
4660 GST_OBJECT_UNLOCK (decoder);
4662 gst_element_post_message (GST_ELEMENT_CAST (decoder),
4663 gst_message_new_latency (GST_OBJECT_CAST (decoder)));
4667 * gst_video_decoder_get_latency:
4668 * @decoder: a #GstVideoDecoder
4669 * @min_latency: (out) (allow-none): address of variable in which to store the
4670 * configured minimum latency, or %NULL
4671 * @max_latency: (out) (allow-none): address of variable in which to store the
4672 * configured mximum latency, or %NULL
4674 * Query the configured decoder latency. Results will be returned via
4675 * @min_latency and @max_latency.
4678 gst_video_decoder_get_latency (GstVideoDecoder * decoder,
4679 GstClockTime * min_latency, GstClockTime * max_latency)
4681 GST_OBJECT_LOCK (decoder);
4683 *min_latency = decoder->priv->min_latency;
4685 *max_latency = decoder->priv->max_latency;
4686 GST_OBJECT_UNLOCK (decoder);
4690 * gst_video_decoder_merge_tags:
4691 * @decoder: a #GstVideoDecoder
4692 * @tags: (allow-none): a #GstTagList to merge, or NULL to unset
4693 * previously-set tags
4694 * @mode: the #GstTagMergeMode to use, usually #GST_TAG_MERGE_REPLACE
4696 * Sets the audio decoder tags and how they should be merged with any
4697 * upstream stream tags. This will override any tags previously-set
4698 * with gst_audio_decoder_merge_tags().
4700 * Note that this is provided for convenience, and the subclass is
4701 * not required to use this and can still do tag handling on its own.
4706 gst_video_decoder_merge_tags (GstVideoDecoder * decoder,
4707 const GstTagList * tags, GstTagMergeMode mode)
4709 g_return_if_fail (GST_IS_VIDEO_DECODER (decoder));
4710 g_return_if_fail (tags == NULL || GST_IS_TAG_LIST (tags));
4711 g_return_if_fail (tags == NULL || mode != GST_TAG_MERGE_UNDEFINED);
4713 GST_VIDEO_DECODER_STREAM_LOCK (decoder);
4714 if (decoder->priv->tags != tags) {
4715 if (decoder->priv->tags) {
4716 gst_tag_list_unref (decoder->priv->tags);
4717 decoder->priv->tags = NULL;
4718 decoder->priv->tags_merge_mode = GST_TAG_MERGE_APPEND;
4721 decoder->priv->tags = gst_tag_list_ref ((GstTagList *) tags);
4722 decoder->priv->tags_merge_mode = mode;
4725 GST_DEBUG_OBJECT (decoder, "set decoder tags to %" GST_PTR_FORMAT, tags);
4726 decoder->priv->tags_changed = TRUE;
4728 GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
4732 * gst_video_decoder_get_buffer_pool:
4733 * @decoder: a #GstVideoDecoder
4735 * Returns: (transfer full): the instance of the #GstBufferPool used
4736 * by the decoder; free it after use it
4739 gst_video_decoder_get_buffer_pool (GstVideoDecoder * decoder)
4741 g_return_val_if_fail (GST_IS_VIDEO_DECODER (decoder), NULL);
4743 if (decoder->priv->pool)
4744 return gst_object_ref (decoder->priv->pool);
4750 * gst_video_decoder_get_allocator:
4751 * @decoder: a #GstVideoDecoder
4752 * @allocator: (out) (allow-none) (transfer full): the #GstAllocator
4754 * @params: (out) (allow-none) (transfer full): the
4755 * #GstAllocationParams of @allocator
4757 * Lets #GstVideoDecoder sub-classes to know the memory @allocator
4758 * used by the base class and its @params.
4760 * Unref the @allocator after use it.
4763 gst_video_decoder_get_allocator (GstVideoDecoder * decoder,
4764 GstAllocator ** allocator, GstAllocationParams * params)
4766 g_return_if_fail (GST_IS_VIDEO_DECODER (decoder));
4769 *allocator = decoder->priv->allocator ?
4770 gst_object_ref (decoder->priv->allocator) : NULL;
4773 *params = decoder->priv->params;
4777 * gst_video_decoder_set_use_default_pad_acceptcaps:
4778 * @decoder: a #GstVideoDecoder
4779 * @use: if the default pad accept-caps query handling should be used
4781 * Lets #GstVideoDecoder sub-classes decide if they want the sink pad
4782 * to use the default pad query handler to reply to accept-caps queries.
4784 * By setting this to true it is possible to further customize the default
4785 * handler with %GST_PAD_SET_ACCEPT_INTERSECT and
4786 * %GST_PAD_SET_ACCEPT_TEMPLATE
4791 gst_video_decoder_set_use_default_pad_acceptcaps (GstVideoDecoder * decoder,
4794 decoder->priv->use_default_pad_acceptcaps = use;
4798 * gst_video_decoder_request_sync_point:
4799 * @dec: a #GstVideoDecoder
4800 * @frame: a #GstVideoCodecFrame
4801 * @flags: #GstVideoDecoderRequestSyncPointFlags
4803 * Allows the #GstVideoDecoder subclass to request from the base class that
4804 * a new sync should be requested from upstream, and that @frame was the frame
4805 * when the subclass noticed that a new sync point is required. A reason for
4806 * the subclass to do this could be missing reference frames, for example.
4808 * The base class will then request a new sync point from upstream as long as
4809 * the time that passed since the last one is exceeding
4810 * #GstVideoDecoder:min-force-key-unit-interval.
4812 * The subclass can signal via @flags how the frames until the next sync point
4813 * should be handled:
4815 * * If %GST_VIDEO_DECODER_REQUEST_SYNC_POINT_DISCARD_INPUT is selected then
4816 * all following input frames until the next sync point are discarded.
4817 * This can be useful if the lack of a sync point will prevent all further
4818 * decoding and the decoder implementation is not very robust in handling
4819 * missing references frames.
4820 * * If %GST_VIDEO_DECODER_REQUEST_SYNC_POINT_CORRUPT_OUTPUT is selected
4821 * then all output frames following @frame are marked as corrupted via
4822 * %GST_BUFFER_FLAG_CORRUPTED. Corrupted frames can be automatically
4823 * dropped by the base class, see #GstVideoDecoder:discard-corrupted-frames.
4824 * Subclasses can manually mark frames as corrupted via %GST_VIDEO_CODEC_FRAME_FLAG_CORRUPTED
4825 * before calling gst_video_decoder_finish_frame().
4830 gst_video_decoder_request_sync_point (GstVideoDecoder * dec,
4831 GstVideoCodecFrame * frame, GstVideoDecoderRequestSyncPointFlags flags)
4833 GstEvent *fku = NULL;
4834 GstVideoDecoderPrivate *priv;
4836 g_return_if_fail (GST_IS_VIDEO_DECODER (dec));
4837 g_return_if_fail (frame != NULL);
4841 GST_OBJECT_LOCK (dec);
4843 /* Check if we're allowed to send a new force-keyunit event.
4844 * frame->deadline is set to the running time of the PTS. */
4845 if (priv->min_force_key_unit_interval == 0 ||
4846 frame->deadline == GST_CLOCK_TIME_NONE ||
4847 (priv->min_force_key_unit_interval != GST_CLOCK_TIME_NONE &&
4848 (priv->last_force_key_unit_time == GST_CLOCK_TIME_NONE
4849 || (priv->last_force_key_unit_time +
4850 priv->min_force_key_unit_interval >= frame->deadline)))) {
4851 GST_DEBUG_OBJECT (dec,
4852 "Requesting a new key-unit for frame with PTS %" GST_TIME_FORMAT,
4853 GST_TIME_ARGS (frame->pts));
4855 gst_video_event_new_upstream_force_key_unit (GST_CLOCK_TIME_NONE, FALSE,
4857 priv->last_force_key_unit_time = frame->deadline;
4859 GST_DEBUG_OBJECT (dec,
4860 "Can't request a new key-unit for frame with PTS %" GST_TIME_FORMAT,
4861 GST_TIME_ARGS (frame->pts));
4863 priv->request_sync_point_flags |= flags;
4864 /* We don't know yet the frame number of the sync point so set it to a
4865 * frame number higher than any allowed frame number */
4866 priv->request_sync_point_frame_number = REQUEST_SYNC_POINT_PENDING;
4867 GST_OBJECT_UNLOCK (dec);
4870 gst_pad_push_event (dec->sinkpad, fku);
4874 * gst_video_decoder_set_needs_sync_point:
4875 * @dec: a #GstVideoDecoder
4876 * @enabled: new state
4878 * Configures whether the decoder requires a sync point before it starts
4879 * outputting data in the beginning. If enabled, the base class will discard
4880 * all non-sync point frames in the beginning and after a flush and does not
4881 * pass it to the subclass.
4883 * If the first frame is not a sync point, the base class will request a sync
4884 * point via the force-key-unit event.
4889 gst_video_decoder_set_needs_sync_point (GstVideoDecoder * dec, gboolean enabled)
4891 g_return_if_fail (GST_IS_VIDEO_DECODER (dec));
4893 dec->priv->needs_sync_point = enabled;
4897 * gst_video_decoder_get_needs_sync_point:
4898 * @dec: a #GstVideoDecoder
4900 * Queries if the decoder requires a sync point before it starts outputting
4901 * data in the beginning.
4903 * Returns: %TRUE if a sync point is required in the beginning.
4908 gst_video_decoder_get_needs_sync_point (GstVideoDecoder * dec)
4912 g_return_val_if_fail (GST_IS_VIDEO_DECODER (dec), FALSE);
4914 result = dec->priv->needs_sync_point;