2 * Copyright (C) 2008 David Schleef <ds@schleef.org>
3 * Copyright (C) 2011 Mark Nauwelaerts <mark.nauwelaerts@collabora.co.uk>.
4 * Copyright (C) 2011 Nokia Corporation. All rights reserved.
5 * Contact: Stefan Kost <stefan.kost@nokia.com>
6 * Copyright (C) 2012 Collabora Ltd.
7 * Author : Edward Hervey <edward@collabora.com>
9 * This library is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Library General Public
11 * License as published by the Free Software Foundation; either
12 * version 2 of the License, or (at your option) any later version.
14 * This library is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Library General Public License for more details.
19 * You should have received a copy of the GNU Library General Public
20 * License along with this library; if not, write to the
21 * Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
22 * Boston, MA 02110-1301, USA.
26 * SECTION:gstvideodecoder
27 * @title: GstVideoDecoder
28 * @short_description: Base class for video decoders
30 * This base class is for video decoders turning encoded data into raw video
33 * The GstVideoDecoder base class and derived subclasses should cooperate as
38 * * Initially, GstVideoDecoder calls @start when the decoder element
39 * is activated, which allows the subclass to perform any global setup.
41 * * GstVideoDecoder calls @set_format to inform the subclass of caps
42 * describing input video data that it is about to receive, including
43 * possibly configuration data.
44 * While unlikely, it might be called more than once, if changing input
45 * parameters require reconfiguration.
47 * * Incoming data buffers are processed as needed, described in Data
50 * * GstVideoDecoder calls @stop at end of all processing.
54 * * The base class gathers input data, and optionally allows subclass
55 * to parse this into subsequently manageable chunks, typically
56 * corresponding to and referred to as 'frames'.
58 * * Each input frame is provided in turn to the subclass' @handle_frame
60 * * When the subclass enables the subframe mode with `gst_video_decoder_set_subframe_mode`,
61 * the base class will provide to the subclass the same input frame with
62 * different input buffers to the subclass @handle_frame
63 * callback. During this call, the subclass needs to take
64 * ownership of the input_buffer as @GstVideoCodecFrame.input_buffer
65 * will have been changed before the next subframe buffer is received.
66 * The subclass will call `gst_video_decoder_have_last_subframe`
67 * when a new input frame can be created by the base class.
68 * Every subframe will share the same @GstVideoCodecFrame.output_buffer
69 * to write the decoding result. The subclass is responsible to protect
72 * * If codec processing results in decoded data, the subclass should call
73 * @gst_video_decoder_finish_frame to have decoded data pushed
74 * downstream. In subframe mode
75 * the subclass should call @gst_video_decoder_finish_subframe until the
76 * last subframe where it should call @gst_video_decoder_finish_frame.
77 * The subclass can detect the last subframe using GST_VIDEO_BUFFER_FLAG_MARKER
78 * on buffers or using its own logic to collect the subframes.
79 * In case of decoding failure, the subclass must call
80 * @gst_video_decoder_drop_frame or @gst_video_decoder_drop_subframe,
81 * to allow the base class to do timestamp and offset tracking, and possibly
82 * to requeue the frame for a later attempt in the case of reverse playback.
86 * * The GstVideoDecoder class calls @stop to inform the subclass that data
87 * parsing will be stopped.
93 * * When the pipeline is seeked or otherwise flushed, the subclass is
94 * informed via a call to its @reset callback, with the hard parameter
95 * set to true. This indicates the subclass should drop any internal data
96 * queues and timestamps and prepare for a fresh set of buffers to arrive
97 * for parsing and decoding.
101 * * At end-of-stream, the subclass @parse function may be called some final
102 * times with the at_eos parameter set to true, indicating that the element
103 * should not expect any more data to be arriving, and it should parse and
104 * remaining frames and call gst_video_decoder_have_frame() if possible.
106 * The subclass is responsible for providing pad template caps for
107 * source and sink pads. The pads need to be named "sink" and "src". It also
108 * needs to provide information about the output caps, when they are known.
109 * This may be when the base class calls the subclass' @set_format function,
110 * though it might be during decoding, before calling
111 * @gst_video_decoder_finish_frame. This is done via
112 * @gst_video_decoder_set_output_state
114 * The subclass is also responsible for providing (presentation) timestamps
115 * (likely based on corresponding input ones). If that is not applicable
116 * or possible, the base class provides limited framerate based interpolation.
118 * Similarly, the base class provides some limited (legacy) seeking support
119 * if specifically requested by the subclass, as full-fledged support
120 * should rather be left to upstream demuxer, parser or alike. This simple
121 * approach caters for seeking and duration reporting using estimated input
122 * bitrates. To enable it, a subclass should call
123 * @gst_video_decoder_set_estimate_rate to enable handling of incoming
126 * The base class provides some support for reverse playback, in particular
127 * in case incoming data is not packetized or upstream does not provide
128 * fragments on keyframe boundaries. However, the subclass should then be
129 * prepared for the parsing and frame processing stage to occur separately
130 * (in normal forward processing, the latter immediately follows the former),
131 * The subclass also needs to ensure the parsing stage properly marks
132 * keyframes, unless it knows the upstream elements will do so properly for
135 * The bare minimum that a functional subclass needs to implement is:
137 * * Provide pad templates
138 * * Inform the base class of output caps via
139 * @gst_video_decoder_set_output_state
141 * * Parse input data, if it is not considered packetized from upstream
142 * Data will be provided to @parse which should invoke
143 * @gst_video_decoder_add_to_frame and @gst_video_decoder_have_frame to
144 * separate the data belonging to each video frame.
146 * * Accept data in @handle_frame and provide decoded results to
147 * @gst_video_decoder_finish_frame, or call @gst_video_decoder_drop_frame.
156 * * Add a flag/boolean for I-frame-only/image decoders so we can do extra
157 * features, like applying QoS on input (as opposed to after the frame is
159 * * Add a flag/boolean for decoders that require keyframes, so the base
160 * class can automatically discard non-keyframes before one has arrived
161 * * Detect reordered frame/timestamps and fix the pts/dts
162 * * Support for GstIndex (or shall we not care ?)
163 * * Calculate actual latency based on input/output timestamp/frame_number
164 * and if it exceeds the recorded one, save it and emit a GST_MESSAGE_LATENCY
165 * * Emit latency message when it changes
169 /* Implementation notes:
170 * The Video Decoder base class operates in 2 primary processing modes, depending
171 * on whether forward or reverse playback is requested.
174 * * Incoming buffer -> @parse() -> add_to_frame()/have_frame() ->
175 * handle_frame() -> push downstream
177 * Reverse playback is more complicated, since it involves gathering incoming
178 * data regions as we loop backwards through the upstream data. The processing
179 * concept (using incoming buffers as containing one frame each to simplify
182 * Upstream data we want to play:
183 * Buffer encoded order: 1 2 3 4 5 6 7 8 9 EOS
185 * Groupings: AAAAAAA BBBBBBB CCCCCCC
188 * Buffer reception order: 7 8 9 4 5 6 1 2 3 EOS
190 * Discont flag: D D D
192 * - Each Discont marks a discont in the decoding order.
193 * - The keyframes mark where we can start decoding.
195 * Initially, we prepend incoming buffers to the gather queue. Whenever the
196 * discont flag is set on an incoming buffer, the gather queue is flushed out
197 * before the new buffer is collected.
199 * The above data will be accumulated in the gather queue like this:
201 * gather queue: 9 8 7
204 * When buffer 4 is received (with a DISCONT), we flush the gather queue like
208 * take head of queue and prepend to parse queue (this reverses the
209 * sequence, so parse queue is 7 -> 8 -> 9)
211 * Next, we process the parse queue, which now contains all un-parsed packets
212 * (including any leftover ones from the previous decode section)
214 * for each buffer now in the parse queue:
215 * Call the subclass parse function, prepending each resulting frame to
216 * the parse_gather queue. Buffers which precede the first one that
217 * produces a parsed frame are retained in the parse queue for
218 * re-processing on the next cycle of parsing.
220 * The parse_gather queue now contains frame objects ready for decoding,
222 * parse_gather: 9 -> 8 -> 7
224 * while (parse_gather)
225 * Take the head of the queue and prepend it to the decode queue
226 * If the frame was a keyframe, process the decode queue
227 * decode is now 7-8-9
229 * Processing the decode queue results in frames with attached output buffers
230 * stored in the 'output_queue' ready for outputting in reverse order.
232 * After we flushed the gather queue and parsed it, we add 4 to the (now empty)
233 * gather queue. We get the following situation:
236 * decode queue: 7 8 9
238 * After we received 5 (Keyframe) and 6:
240 * gather queue: 6 5 4
241 * decode queue: 7 8 9
243 * When we receive 1 (DISCONT) which triggers a flush of the gather queue:
245 * Copy head of the gather queue (6) to decode queue:
248 * decode queue: 6 7 8 9
250 * Copy head of the gather queue (5) to decode queue. This is a keyframe so we
251 * can start decoding.
254 * decode queue: 5 6 7 8 9
256 * Decode frames in decode queue, store raw decoded data in output queue, we
257 * can take the head of the decode queue and prepend the decoded result in the
262 * output queue: 9 8 7 6 5
264 * Now output all the frames in the output queue, picking a frame from the
267 * Copy head of the gather queue (4) to decode queue, we flushed the gather
268 * queue and can now store input buffer in the gather queue:
273 * When we receive EOS, the queue looks like:
275 * gather queue: 3 2 1
278 * Fill decode queue, first keyframe we copy is 2:
281 * decode queue: 2 3 4
287 * output queue: 4 3 2
289 * Leftover buffer 1 cannot be decoded and must be discarded.
292 #include "gstvideodecoder.h"
293 #include "gstvideoutils.h"
294 #include "gstvideoutilsprivate.h"
296 #include <gst/video/video.h>
297 #include <gst/video/video-event.h>
298 #include <gst/video/gstvideopool.h>
299 #include <gst/video/gstvideometa.h>
302 GST_DEBUG_CATEGORY (videodecoder_debug);
303 #define GST_CAT_DEFAULT videodecoder_debug
306 #define DEFAULT_QOS TRUE
307 #define DEFAULT_MAX_ERRORS GST_VIDEO_DECODER_MAX_ERRORS
308 #define DEFAULT_MIN_FORCE_KEY_UNIT_INTERVAL 0
309 #define DEFAULT_DISCARD_CORRUPTED_FRAMES FALSE
311 /* Used for request_sync_point_frame_number. These are out of range for the
312 * frame numbers and can be given special meaning */
313 #define REQUEST_SYNC_POINT_PENDING G_MAXUINT + 1
314 #define REQUEST_SYNC_POINT_UNSET G_MAXUINT64
321 PROP_MIN_FORCE_KEY_UNIT_INTERVAL,
322 PROP_DISCARD_CORRUPTED_FRAMES
325 struct _GstVideoDecoderPrivate
327 /* FIXME introduce a context ? */
330 GstAllocator *allocator;
331 GstAllocationParams params;
335 GstAdapter *input_adapter;
336 /* assembles current frame */
337 GstAdapter *output_adapter;
339 /* Whether we attempt to convert newsegment from bytes to
340 * time using a bitrate estimation */
341 gboolean do_estimate_rate;
343 /* Whether input is considered packetized or not */
346 /* whether input is considered as subframes */
347 gboolean subframe_mode;
352 gboolean had_output_data;
353 gboolean had_input_data;
355 gboolean needs_format;
356 /* input_segment are output_segment identical */
357 gboolean in_out_segment_sync;
359 /* TRUE if we have an active set of instant rate flags */
360 gboolean decode_flags_override;
361 GstSegmentFlags decode_flags;
363 /* ... being tracked here;
364 * only available during parsing or when doing subframe decoding */
365 GstVideoCodecFrame *current_frame;
366 /* events that should apply to the current frame */
367 /* FIXME 2.0: Use a GQueue or similar, see GstVideoCodecFrame::events */
368 GList *current_frame_events;
369 /* events that should be pushed before the next frame */
370 /* FIXME 2.0: Use a GQueue or similar, see GstVideoCodecFrame::events */
371 GList *pending_events;
373 /* relative offset of input data */
374 guint64 input_offset;
375 /* relative offset of frame */
376 guint64 frame_offset;
377 /* tracking ts and offsets */
380 /* last outgoing ts */
381 GstClockTime last_timestamp_out;
382 /* incoming pts - dts */
383 GstClockTime pts_delta;
384 gboolean reordered_output;
386 /* FIXME: Consider using a GQueue or other better fitting data structure */
387 /* reverse playback */
392 /* collected parsed frames */
394 /* frames to be handled == decoded */
396 /* collected output - of buffer objects, not frames */
397 GList *output_queued;
400 /* base_picture_number is the picture number of the reference picture */
401 guint64 base_picture_number;
402 /* combine with base_picture_number, framerate and calcs to yield (presentation) ts */
403 GstClockTime base_timestamp;
406 GstClockTime min_force_key_unit_interval;
407 gboolean discard_corrupted_frames;
409 /* Key unit related state */
410 gboolean needs_sync_point;
411 GstVideoDecoderRequestSyncPointFlags request_sync_point_flags;
412 guint64 request_sync_point_frame_number;
413 GstClockTime last_force_key_unit_time;
414 /* -1 if we saw no sync point yet */
415 guint64 distance_from_sync;
417 guint32 system_frame_number;
418 guint32 decode_frame_number;
420 GQueue frames; /* Protected with OBJECT_LOCK */
421 GstVideoCodecState *input_state;
422 GstVideoCodecState *output_state; /* OBJECT_LOCK and STREAM_LOCK */
423 gboolean output_state_changed;
427 gdouble proportion; /* OBJECT_LOCK */
428 GstClockTime earliest_time; /* OBJECT_LOCK */
429 GstClockTime qos_frame_duration; /* OBJECT_LOCK */
431 /* qos messages: frames dropped/processed */
435 /* Outgoing byte size ? */
442 /* upstream stream tags (global tags are passed through as-is) */
443 GstTagList *upstream_tags;
447 GstTagMergeMode tags_merge_mode;
449 gboolean tags_changed;
452 gboolean use_default_pad_acceptcaps;
454 #ifndef GST_DISABLE_DEBUG
455 /* Diagnostic time for reporting the time
456 * from flush to first output */
457 GstClockTime last_reset_time;
461 static GstElementClass *parent_class = NULL;
462 static gint private_offset = 0;
464 /* cached quark to avoid contention on the global quark table lock */
465 #define META_TAG_VIDEO meta_tag_video_quark
466 static GQuark meta_tag_video_quark;
468 static void gst_video_decoder_class_init (GstVideoDecoderClass * klass);
469 static void gst_video_decoder_init (GstVideoDecoder * dec,
470 GstVideoDecoderClass * klass);
472 static void gst_video_decoder_finalize (GObject * object);
473 static void gst_video_decoder_get_property (GObject * object, guint property_id,
474 GValue * value, GParamSpec * pspec);
475 static void gst_video_decoder_set_property (GObject * object, guint property_id,
476 const GValue * value, GParamSpec * pspec);
478 static gboolean gst_video_decoder_setcaps (GstVideoDecoder * dec,
480 static gboolean gst_video_decoder_sink_event (GstPad * pad, GstObject * parent,
482 static gboolean gst_video_decoder_src_event (GstPad * pad, GstObject * parent,
484 static GstFlowReturn gst_video_decoder_chain (GstPad * pad, GstObject * parent,
486 static gboolean gst_video_decoder_sink_query (GstPad * pad, GstObject * parent,
488 static GstStateChangeReturn gst_video_decoder_change_state (GstElement *
489 element, GstStateChange transition);
490 static gboolean gst_video_decoder_src_query (GstPad * pad, GstObject * parent,
492 static void gst_video_decoder_reset (GstVideoDecoder * decoder, gboolean full,
493 gboolean flush_hard);
495 static GstFlowReturn gst_video_decoder_decode_frame (GstVideoDecoder * decoder,
496 GstVideoCodecFrame * frame);
498 static void gst_video_decoder_push_event_list (GstVideoDecoder * decoder,
500 static GstClockTime gst_video_decoder_get_frame_duration (GstVideoDecoder *
501 decoder, GstVideoCodecFrame * frame);
502 static GstVideoCodecFrame *gst_video_decoder_new_frame (GstVideoDecoder *
504 static GstFlowReturn gst_video_decoder_clip_and_push_buf (GstVideoDecoder *
505 decoder, GstBuffer * buf);
506 static GstFlowReturn gst_video_decoder_flush_parse (GstVideoDecoder * dec,
509 static void gst_video_decoder_clear_queues (GstVideoDecoder * dec);
511 static gboolean gst_video_decoder_sink_event_default (GstVideoDecoder * decoder,
513 static gboolean gst_video_decoder_src_event_default (GstVideoDecoder * decoder,
515 static gboolean gst_video_decoder_decide_allocation_default (GstVideoDecoder *
516 decoder, GstQuery * query);
517 static gboolean gst_video_decoder_propose_allocation_default (GstVideoDecoder *
518 decoder, GstQuery * query);
519 static gboolean gst_video_decoder_negotiate_default (GstVideoDecoder * decoder);
520 static GstFlowReturn gst_video_decoder_parse_available (GstVideoDecoder * dec,
521 gboolean at_eos, gboolean new_buffer);
522 static gboolean gst_video_decoder_negotiate_unlocked (GstVideoDecoder *
524 static gboolean gst_video_decoder_sink_query_default (GstVideoDecoder * decoder,
526 static gboolean gst_video_decoder_src_query_default (GstVideoDecoder * decoder,
529 static gboolean gst_video_decoder_transform_meta_default (GstVideoDecoder *
530 decoder, GstVideoCodecFrame * frame, GstMeta * meta);
532 static void gst_video_decoder_copy_metas (GstVideoDecoder * decoder,
533 GstVideoCodecFrame * frame, GstBuffer * src_buffer,
534 GstBuffer * dest_buffer);
536 /* we can't use G_DEFINE_ABSTRACT_TYPE because we need the klass in the _init
537 * method to get to the padtemplates */
539 gst_video_decoder_get_type (void)
541 static gsize type = 0;
543 if (g_once_init_enter (&type)) {
545 static const GTypeInfo info = {
546 sizeof (GstVideoDecoderClass),
549 (GClassInitFunc) gst_video_decoder_class_init,
552 sizeof (GstVideoDecoder),
554 (GInstanceInitFunc) gst_video_decoder_init,
557 _type = g_type_register_static (GST_TYPE_ELEMENT,
558 "GstVideoDecoder", &info, G_TYPE_FLAG_ABSTRACT);
561 g_type_add_instance_private (_type, sizeof (GstVideoDecoderPrivate));
563 g_once_init_leave (&type, _type);
568 static inline GstVideoDecoderPrivate *
569 gst_video_decoder_get_instance_private (GstVideoDecoder * self)
571 return (G_STRUCT_MEMBER_P (self, private_offset));
575 gst_video_decoder_class_init (GstVideoDecoderClass * klass)
577 GObjectClass *gobject_class;
578 GstElementClass *gstelement_class;
580 gobject_class = G_OBJECT_CLASS (klass);
581 gstelement_class = GST_ELEMENT_CLASS (klass);
583 GST_DEBUG_CATEGORY_INIT (videodecoder_debug, "videodecoder", 0,
584 "Base Video Decoder");
586 parent_class = g_type_class_peek_parent (klass);
588 if (private_offset != 0)
589 g_type_class_adjust_private_offset (klass, &private_offset);
591 gobject_class->finalize = gst_video_decoder_finalize;
592 gobject_class->get_property = gst_video_decoder_get_property;
593 gobject_class->set_property = gst_video_decoder_set_property;
595 gstelement_class->change_state =
596 GST_DEBUG_FUNCPTR (gst_video_decoder_change_state);
598 klass->sink_event = gst_video_decoder_sink_event_default;
599 klass->src_event = gst_video_decoder_src_event_default;
600 klass->decide_allocation = gst_video_decoder_decide_allocation_default;
601 klass->propose_allocation = gst_video_decoder_propose_allocation_default;
602 klass->negotiate = gst_video_decoder_negotiate_default;
603 klass->sink_query = gst_video_decoder_sink_query_default;
604 klass->src_query = gst_video_decoder_src_query_default;
605 klass->transform_meta = gst_video_decoder_transform_meta_default;
608 * GstVideoDecoder:qos:
610 * If set to %TRUE the decoder will handle QoS events received
611 * from downstream elements.
612 * This includes dropping output frames which are detected as late
613 * using the metrics reported by those events.
617 g_object_class_install_property (gobject_class, PROP_QOS,
618 g_param_spec_boolean ("qos", "Quality of Service",
619 "Handle Quality-of-Service events from downstream",
620 DEFAULT_QOS, G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
623 * GstVideoDecoder:max-errors:
625 * Maximum number of tolerated consecutive decode errors. See
626 * gst_video_decoder_set_max_errors() for more details.
630 g_object_class_install_property (gobject_class, PROP_MAX_ERRORS,
631 g_param_spec_int ("max-errors", "Max errors",
632 "Max consecutive decoder errors before returning flow error",
633 -1, G_MAXINT, DEFAULT_MAX_ERRORS,
634 G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
637 * GstVideoDecoder:min-force-key-unit-interval:
639 * Minimum interval between force-key-unit events sent upstream by the
640 * decoder. Setting this to 0 will cause every event to be handled, setting
641 * this to %GST_CLOCK_TIME_NONE will cause every event to be ignored.
643 * See gst_video_event_new_upstream_force_key_unit() for more details about
644 * force-key-unit events.
648 g_object_class_install_property (gobject_class,
649 PROP_MIN_FORCE_KEY_UNIT_INTERVAL,
650 g_param_spec_uint64 ("min-force-key-unit-interval",
651 "Minimum Force Keyunit Interval",
652 "Minimum interval between force-keyunit requests in nanoseconds", 0,
653 G_MAXUINT64, DEFAULT_MIN_FORCE_KEY_UNIT_INTERVAL,
654 G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
657 * GstVideoDecoder:discard-corrupted-frames:
659 * If set to %TRUE the decoder will discard frames that are marked as
660 * corrupted instead of outputting them.
664 g_object_class_install_property (gobject_class, PROP_DISCARD_CORRUPTED_FRAMES,
665 g_param_spec_boolean ("discard-corrupted-frames",
666 "Discard Corrupted Frames",
667 "Discard frames marked as corrupted instead of outputting them",
668 DEFAULT_DISCARD_CORRUPTED_FRAMES,
669 G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
671 meta_tag_video_quark = g_quark_from_static_string (GST_META_TAG_VIDEO_STR);
675 gst_video_decoder_init (GstVideoDecoder * decoder, GstVideoDecoderClass * klass)
677 GstPadTemplate *pad_template;
680 GST_DEBUG_OBJECT (decoder, "gst_video_decoder_init");
682 decoder->priv = gst_video_decoder_get_instance_private (decoder);
685 gst_element_class_get_pad_template (GST_ELEMENT_CLASS (klass), "sink");
686 g_return_if_fail (pad_template != NULL);
688 decoder->sinkpad = pad = gst_pad_new_from_template (pad_template, "sink");
690 gst_pad_set_chain_function (pad, GST_DEBUG_FUNCPTR (gst_video_decoder_chain));
691 gst_pad_set_event_function (pad,
692 GST_DEBUG_FUNCPTR (gst_video_decoder_sink_event));
693 gst_pad_set_query_function (pad,
694 GST_DEBUG_FUNCPTR (gst_video_decoder_sink_query));
695 gst_element_add_pad (GST_ELEMENT (decoder), decoder->sinkpad);
698 gst_element_class_get_pad_template (GST_ELEMENT_CLASS (klass), "src");
699 g_return_if_fail (pad_template != NULL);
701 decoder->srcpad = pad = gst_pad_new_from_template (pad_template, "src");
703 gst_pad_set_event_function (pad,
704 GST_DEBUG_FUNCPTR (gst_video_decoder_src_event));
705 gst_pad_set_query_function (pad,
706 GST_DEBUG_FUNCPTR (gst_video_decoder_src_query));
707 gst_element_add_pad (GST_ELEMENT (decoder), decoder->srcpad);
709 gst_segment_init (&decoder->input_segment, GST_FORMAT_TIME);
710 gst_segment_init (&decoder->output_segment, GST_FORMAT_TIME);
712 g_rec_mutex_init (&decoder->stream_lock);
714 decoder->priv->input_adapter = gst_adapter_new ();
715 decoder->priv->output_adapter = gst_adapter_new ();
716 decoder->priv->packetized = TRUE;
717 decoder->priv->needs_format = FALSE;
719 g_queue_init (&decoder->priv->frames);
720 g_queue_init (&decoder->priv->timestamps);
723 decoder->priv->do_qos = DEFAULT_QOS;
724 decoder->priv->max_errors = GST_VIDEO_DECODER_MAX_ERRORS;
726 decoder->priv->min_latency = 0;
727 decoder->priv->max_latency = 0;
729 gst_video_decoder_reset (decoder, TRUE, TRUE);
732 static GstVideoCodecState *
733 _new_input_state (GstCaps * caps)
735 GstVideoCodecState *state;
736 GstStructure *structure;
737 const GValue *codec_data;
739 state = g_slice_new0 (GstVideoCodecState);
740 state->ref_count = 1;
741 gst_video_info_init (&state->info);
742 if (G_UNLIKELY (!gst_video_info_from_caps (&state->info, caps)))
744 state->caps = gst_caps_ref (caps);
746 structure = gst_caps_get_structure (caps, 0);
748 codec_data = gst_structure_get_value (structure, "codec_data");
749 if (codec_data && G_VALUE_TYPE (codec_data) == GST_TYPE_BUFFER)
750 state->codec_data = GST_BUFFER (g_value_dup_boxed (codec_data));
756 g_slice_free (GstVideoCodecState, state);
761 static GstVideoCodecState *
762 _new_output_state (GstVideoFormat fmt, GstVideoInterlaceMode interlace_mode,
763 guint width, guint height, GstVideoCodecState * reference,
764 gboolean copy_interlace_mode)
766 GstVideoCodecState *state;
768 state = g_slice_new0 (GstVideoCodecState);
769 state->ref_count = 1;
770 gst_video_info_init (&state->info);
771 if (!gst_video_info_set_interlaced_format (&state->info, fmt, interlace_mode,
773 g_slice_free (GstVideoCodecState, state);
778 GstVideoInfo *tgt, *ref;
781 ref = &reference->info;
783 /* Copy over extra fields from reference state */
784 if (copy_interlace_mode)
785 tgt->interlace_mode = ref->interlace_mode;
786 tgt->flags = ref->flags;
787 tgt->chroma_site = ref->chroma_site;
788 tgt->colorimetry = ref->colorimetry;
789 GST_DEBUG ("reference par %d/%d fps %d/%d",
790 ref->par_n, ref->par_d, ref->fps_n, ref->fps_d);
791 tgt->par_n = ref->par_n;
792 tgt->par_d = ref->par_d;
793 tgt->fps_n = ref->fps_n;
794 tgt->fps_d = ref->fps_d;
795 tgt->views = ref->views;
797 GST_VIDEO_INFO_FIELD_ORDER (tgt) = GST_VIDEO_INFO_FIELD_ORDER (ref);
799 if (GST_VIDEO_INFO_MULTIVIEW_MODE (ref) != GST_VIDEO_MULTIVIEW_MODE_NONE) {
800 GST_VIDEO_INFO_MULTIVIEW_MODE (tgt) = GST_VIDEO_INFO_MULTIVIEW_MODE (ref);
801 GST_VIDEO_INFO_MULTIVIEW_FLAGS (tgt) =
802 GST_VIDEO_INFO_MULTIVIEW_FLAGS (ref);
804 /* Default to MONO, overridden as needed by sub-classes */
805 GST_VIDEO_INFO_MULTIVIEW_MODE (tgt) = GST_VIDEO_MULTIVIEW_MODE_MONO;
806 GST_VIDEO_INFO_MULTIVIEW_FLAGS (tgt) = GST_VIDEO_MULTIVIEW_FLAGS_NONE;
810 GST_DEBUG ("reference par %d/%d fps %d/%d",
811 state->info.par_n, state->info.par_d,
812 state->info.fps_n, state->info.fps_d);
818 gst_video_decoder_setcaps (GstVideoDecoder * decoder, GstCaps * caps)
820 GstVideoDecoderClass *decoder_class;
821 GstVideoCodecState *state;
824 decoder_class = GST_VIDEO_DECODER_GET_CLASS (decoder);
826 GST_DEBUG_OBJECT (decoder, "setcaps %" GST_PTR_FORMAT, caps);
828 GST_VIDEO_DECODER_STREAM_LOCK (decoder);
830 if (decoder->priv->input_state) {
831 GST_DEBUG_OBJECT (decoder,
832 "Checking if caps changed old %" GST_PTR_FORMAT " new %" GST_PTR_FORMAT,
833 decoder->priv->input_state->caps, caps);
834 if (gst_caps_is_equal (decoder->priv->input_state->caps, caps))
835 goto caps_not_changed;
838 state = _new_input_state (caps);
840 if (G_UNLIKELY (state == NULL))
843 if (decoder_class->set_format)
844 ret = decoder_class->set_format (decoder, state);
849 if (decoder->priv->input_state)
850 gst_video_codec_state_unref (decoder->priv->input_state);
851 decoder->priv->input_state = state;
853 GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
859 GST_DEBUG_OBJECT (decoder, "Caps did not change - ignore");
860 GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
867 GST_WARNING_OBJECT (decoder, "Failed to parse caps");
868 GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
874 GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
875 GST_WARNING_OBJECT (decoder, "Subclass refused caps");
876 gst_video_codec_state_unref (state);
882 gst_video_decoder_finalize (GObject * object)
884 GstVideoDecoder *decoder;
886 decoder = GST_VIDEO_DECODER (object);
888 GST_DEBUG_OBJECT (object, "finalize");
890 g_rec_mutex_clear (&decoder->stream_lock);
892 if (decoder->priv->input_adapter) {
893 g_object_unref (decoder->priv->input_adapter);
894 decoder->priv->input_adapter = NULL;
896 if (decoder->priv->output_adapter) {
897 g_object_unref (decoder->priv->output_adapter);
898 decoder->priv->output_adapter = NULL;
901 if (decoder->priv->input_state)
902 gst_video_codec_state_unref (decoder->priv->input_state);
903 if (decoder->priv->output_state)
904 gst_video_codec_state_unref (decoder->priv->output_state);
906 if (decoder->priv->pool) {
907 gst_object_unref (decoder->priv->pool);
908 decoder->priv->pool = NULL;
911 if (decoder->priv->allocator) {
912 gst_object_unref (decoder->priv->allocator);
913 decoder->priv->allocator = NULL;
916 G_OBJECT_CLASS (parent_class)->finalize (object);
920 gst_video_decoder_get_property (GObject * object, guint property_id,
921 GValue * value, GParamSpec * pspec)
923 GstVideoDecoder *dec = GST_VIDEO_DECODER (object);
924 GstVideoDecoderPrivate *priv = dec->priv;
926 switch (property_id) {
928 g_value_set_boolean (value, priv->do_qos);
930 case PROP_MAX_ERRORS:
931 g_value_set_int (value, gst_video_decoder_get_max_errors (dec));
933 case PROP_MIN_FORCE_KEY_UNIT_INTERVAL:
934 g_value_set_uint64 (value, priv->min_force_key_unit_interval);
936 case PROP_DISCARD_CORRUPTED_FRAMES:
937 g_value_set_boolean (value, priv->discard_corrupted_frames);
940 G_OBJECT_WARN_INVALID_PROPERTY_ID (object, property_id, pspec);
946 gst_video_decoder_set_property (GObject * object, guint property_id,
947 const GValue * value, GParamSpec * pspec)
949 GstVideoDecoder *dec = GST_VIDEO_DECODER (object);
950 GstVideoDecoderPrivate *priv = dec->priv;
952 switch (property_id) {
954 priv->do_qos = g_value_get_boolean (value);
956 case PROP_MAX_ERRORS:
957 gst_video_decoder_set_max_errors (dec, g_value_get_int (value));
959 case PROP_MIN_FORCE_KEY_UNIT_INTERVAL:
960 priv->min_force_key_unit_interval = g_value_get_uint64 (value);
962 case PROP_DISCARD_CORRUPTED_FRAMES:
963 priv->discard_corrupted_frames = g_value_get_boolean (value);
966 G_OBJECT_WARN_INVALID_PROPERTY_ID (object, property_id, pspec);
971 /* hard == FLUSH, otherwise discont */
973 gst_video_decoder_flush (GstVideoDecoder * dec, gboolean hard)
975 GstVideoDecoderClass *klass = GST_VIDEO_DECODER_GET_CLASS (dec);
976 GstFlowReturn ret = GST_FLOW_OK;
978 GST_LOG_OBJECT (dec, "flush hard %d", hard);
980 /* Inform subclass */
982 GST_FIXME_OBJECT (dec, "GstVideoDecoder::reset() is deprecated");
983 klass->reset (dec, hard);
989 /* and get (re)set for the sequel */
990 gst_video_decoder_reset (dec, FALSE, hard);
996 gst_video_decoder_create_merged_tags_event (GstVideoDecoder * dec)
998 GstTagList *merged_tags;
1000 GST_LOG_OBJECT (dec, "upstream : %" GST_PTR_FORMAT, dec->priv->upstream_tags);
1001 GST_LOG_OBJECT (dec, "decoder : %" GST_PTR_FORMAT, dec->priv->tags);
1002 GST_LOG_OBJECT (dec, "mode : %d", dec->priv->tags_merge_mode);
1005 gst_tag_list_merge (dec->priv->upstream_tags, dec->priv->tags,
1006 dec->priv->tags_merge_mode);
1008 GST_DEBUG_OBJECT (dec, "merged : %" GST_PTR_FORMAT, merged_tags);
1010 if (merged_tags == NULL)
1013 if (gst_tag_list_is_empty (merged_tags)) {
1014 gst_tag_list_unref (merged_tags);
1018 return gst_event_new_tag (merged_tags);
1022 gst_video_decoder_push_event (GstVideoDecoder * decoder, GstEvent * event)
1024 switch (GST_EVENT_TYPE (event)) {
1025 case GST_EVENT_SEGMENT:
1029 gst_event_copy_segment (event, &segment);
1031 GST_DEBUG_OBJECT (decoder, "segment %" GST_SEGMENT_FORMAT, &segment);
1033 if (segment.format != GST_FORMAT_TIME) {
1034 GST_DEBUG_OBJECT (decoder, "received non TIME newsegment");
1038 GST_VIDEO_DECODER_STREAM_LOCK (decoder);
1039 decoder->output_segment = segment;
1040 decoder->priv->in_out_segment_sync =
1041 gst_segment_is_equal (&decoder->input_segment, &segment);
1042 decoder->priv->last_timestamp_out = GST_CLOCK_TIME_NONE;
1043 decoder->priv->earliest_time = GST_CLOCK_TIME_NONE;
1044 GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
1051 GST_DEBUG_OBJECT (decoder, "pushing event %s",
1052 gst_event_type_get_name (GST_EVENT_TYPE (event)));
1054 return gst_pad_push_event (decoder->srcpad, event);
1057 static GstFlowReturn
1058 gst_video_decoder_parse_available (GstVideoDecoder * dec, gboolean at_eos,
1059 gboolean new_buffer)
1061 GstVideoDecoderClass *decoder_class = GST_VIDEO_DECODER_GET_CLASS (dec);
1062 GstVideoDecoderPrivate *priv = dec->priv;
1063 GstFlowReturn ret = GST_FLOW_OK;
1064 gsize was_available, available;
1067 available = gst_adapter_available (priv->input_adapter);
1069 while (available || new_buffer) {
1071 /* current frame may have been parsed and handled,
1072 * so we need to set up a new one when asking subclass to parse */
1073 if (priv->current_frame == NULL)
1074 priv->current_frame = gst_video_decoder_new_frame (dec);
1076 was_available = available;
1077 ret = decoder_class->parse (dec, priv->current_frame,
1078 priv->input_adapter, at_eos);
1079 if (ret != GST_FLOW_OK)
1082 /* if the subclass returned success (GST_FLOW_OK), it is expected
1083 * to have collected and submitted a frame, i.e. it should have
1084 * called gst_video_decoder_have_frame(), or at least consumed a
1085 * few bytes through gst_video_decoder_add_to_frame().
1087 * Otherwise, this is an implementation bug, and we error out
1088 * after 2 failed attempts */
1089 available = gst_adapter_available (priv->input_adapter);
1090 if (!priv->current_frame || available != was_available)
1092 else if (++inactive == 2)
1093 goto error_inactive;
1101 GST_ERROR_OBJECT (dec, "Failed to consume data. Error in subclass?");
1102 return GST_FLOW_ERROR;
1106 /* This function has to be called with the stream lock taken. */
1107 static GstFlowReturn
1108 gst_video_decoder_drain_out (GstVideoDecoder * dec, gboolean at_eos)
1110 GstVideoDecoderClass *decoder_class = GST_VIDEO_DECODER_GET_CLASS (dec);
1111 GstVideoDecoderPrivate *priv = dec->priv;
1112 GstFlowReturn ret = GST_FLOW_OK;
1114 if (dec->input_segment.rate > 0.0) {
1115 /* Forward mode, if unpacketized, give the child class
1116 * a final chance to flush out packets */
1117 if (!priv->packetized) {
1118 ret = gst_video_decoder_parse_available (dec, TRUE, FALSE);
1122 if (decoder_class->finish)
1123 ret = decoder_class->finish (dec);
1125 if (decoder_class->drain) {
1126 ret = decoder_class->drain (dec);
1128 GST_FIXME_OBJECT (dec, "Sub-class should implement drain()");
1132 /* Reverse playback mode */
1133 ret = gst_video_decoder_flush_parse (dec, TRUE);
1140 _flush_events (GstPad * pad, GList * events)
1144 for (tmp = events; tmp; tmp = tmp->next) {
1145 if (GST_EVENT_TYPE (tmp->data) != GST_EVENT_EOS &&
1146 GST_EVENT_TYPE (tmp->data) != GST_EVENT_SEGMENT &&
1147 GST_EVENT_IS_STICKY (tmp->data)) {
1148 gst_pad_store_sticky_event (pad, GST_EVENT_CAST (tmp->data));
1150 gst_event_unref (tmp->data);
1152 g_list_free (events);
1157 /* Must be called holding the GST_VIDEO_DECODER_STREAM_LOCK */
1159 gst_video_decoder_negotiate_default_caps (GstVideoDecoder * decoder)
1161 GstCaps *caps, *templcaps;
1162 GstVideoCodecState *state;
1166 GstStructure *structure;
1168 templcaps = gst_pad_get_pad_template_caps (decoder->srcpad);
1169 caps = gst_pad_peer_query_caps (decoder->srcpad, templcaps);
1171 gst_caps_unref (templcaps);
1176 if (!caps || gst_caps_is_empty (caps) || gst_caps_is_any (caps))
1179 GST_LOG_OBJECT (decoder, "peer caps %" GST_PTR_FORMAT, caps);
1181 /* before fixating, try to use whatever upstream provided */
1182 caps = gst_caps_make_writable (caps);
1183 caps_size = gst_caps_get_size (caps);
1184 if (decoder->priv->input_state && decoder->priv->input_state->caps) {
1185 GstCaps *sinkcaps = decoder->priv->input_state->caps;
1186 GstStructure *structure = gst_caps_get_structure (sinkcaps, 0);
1189 if (gst_structure_get_int (structure, "width", &width)) {
1190 for (i = 0; i < caps_size; i++) {
1191 gst_structure_set (gst_caps_get_structure (caps, i), "width",
1192 G_TYPE_INT, width, NULL);
1196 if (gst_structure_get_int (structure, "height", &height)) {
1197 for (i = 0; i < caps_size; i++) {
1198 gst_structure_set (gst_caps_get_structure (caps, i), "height",
1199 G_TYPE_INT, height, NULL);
1204 for (i = 0; i < caps_size; i++) {
1205 structure = gst_caps_get_structure (caps, i);
1206 /* Random I420 1280x720 for fixation */
1207 if (gst_structure_has_field (structure, "format"))
1208 gst_structure_fixate_field_string (structure, "format", "I420");
1210 gst_structure_set (structure, "format", G_TYPE_STRING, "I420", NULL);
1212 if (gst_structure_has_field (structure, "width"))
1213 gst_structure_fixate_field_nearest_int (structure, "width", 1280);
1215 gst_structure_set (structure, "width", G_TYPE_INT, 1280, NULL);
1217 if (gst_structure_has_field (structure, "height"))
1218 gst_structure_fixate_field_nearest_int (structure, "height", 720);
1220 gst_structure_set (structure, "height", G_TYPE_INT, 720, NULL);
1222 caps = gst_caps_fixate (caps);
1224 if (!caps || !gst_video_info_from_caps (&info, caps))
1227 GST_INFO_OBJECT (decoder,
1228 "Chose default caps %" GST_PTR_FORMAT " for initial gap", caps);
1230 gst_video_decoder_set_output_state (decoder, info.finfo->format,
1231 info.width, info.height, decoder->priv->input_state);
1232 gst_video_codec_state_unref (state);
1233 gst_caps_unref (caps);
1240 gst_caps_unref (caps);
1246 gst_video_decoder_sink_event_default (GstVideoDecoder * decoder,
1249 GstVideoDecoderPrivate *priv;
1250 gboolean ret = FALSE;
1251 gboolean forward_immediate = FALSE;
1253 priv = decoder->priv;
1255 switch (GST_EVENT_TYPE (event)) {
1256 case GST_EVENT_STREAM_START:
1258 GstFlowReturn flow_ret = GST_FLOW_OK;
1260 GST_VIDEO_DECODER_STREAM_LOCK (decoder);
1261 flow_ret = gst_video_decoder_drain_out (decoder, FALSE);
1262 ret = (flow_ret == GST_FLOW_OK);
1264 GST_DEBUG_OBJECT (decoder, "received STREAM_START. Clearing taglist");
1265 /* Flush upstream tags after a STREAM_START */
1266 if (priv->upstream_tags) {
1267 gst_tag_list_unref (priv->upstream_tags);
1268 priv->upstream_tags = NULL;
1269 priv->tags_changed = TRUE;
1271 GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
1273 /* Forward STREAM_START immediately. Everything is drained after
1274 * the STREAM_START event and we can forward this event immediately
1275 * now without having buffers out of order.
1277 forward_immediate = TRUE;
1280 case GST_EVENT_CAPS:
1284 gst_event_parse_caps (event, &caps);
1285 ret = gst_video_decoder_setcaps (decoder, caps);
1286 gst_event_unref (event);
1290 case GST_EVENT_SEGMENT_DONE:
1292 GstFlowReturn flow_ret = GST_FLOW_OK;
1294 GST_VIDEO_DECODER_STREAM_LOCK (decoder);
1295 flow_ret = gst_video_decoder_drain_out (decoder, FALSE);
1296 GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
1297 ret = (flow_ret == GST_FLOW_OK);
1299 /* Forward SEGMENT_DONE immediately. This is required
1300 * because no buffer or serialized event might come
1301 * after SEGMENT_DONE and nothing could trigger another
1302 * _finish_frame() call.
1304 * The subclass can override this behaviour by overriding
1305 * the ::sink_event() vfunc and not chaining up to the
1306 * parent class' ::sink_event() until a later time.
1308 forward_immediate = TRUE;
1313 GstFlowReturn flow_ret = GST_FLOW_OK;
1315 GST_VIDEO_DECODER_STREAM_LOCK (decoder);
1316 flow_ret = gst_video_decoder_drain_out (decoder, TRUE);
1317 GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
1318 ret = (flow_ret == GST_FLOW_OK);
1320 /* Error out even if EOS was ok when we had input, but no output */
1321 if (ret && priv->had_input_data && !priv->had_output_data) {
1322 GST_ELEMENT_ERROR (decoder, STREAM, DECODE,
1323 ("No valid frames decoded before end of stream"),
1324 ("no valid frames found"));
1327 /* Forward EOS immediately. This is required because no
1328 * buffer or serialized event will come after EOS and
1329 * nothing could trigger another _finish_frame() call.
1331 * The subclass can override this behaviour by overriding
1332 * the ::sink_event() vfunc and not chaining up to the
1333 * parent class' ::sink_event() until a later time.
1335 forward_immediate = TRUE;
1340 GstFlowReturn flow_ret = GST_FLOW_OK;
1341 gboolean needs_reconfigure = FALSE;
1343 GList *frame_events;
1345 GST_VIDEO_DECODER_STREAM_LOCK (decoder);
1346 if (decoder->input_segment.flags & GST_SEEK_FLAG_TRICKMODE_KEY_UNITS)
1347 flow_ret = gst_video_decoder_drain_out (decoder, FALSE);
1348 ret = (flow_ret == GST_FLOW_OK);
1350 /* Ensure we have caps before forwarding the event */
1351 if (!decoder->priv->output_state) {
1352 if (!gst_video_decoder_negotiate_default_caps (decoder)) {
1353 GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
1354 GST_ELEMENT_ERROR (decoder, STREAM, FORMAT, (NULL),
1355 ("Decoder output not negotiated before GAP event."));
1356 forward_immediate = TRUE;
1359 needs_reconfigure = TRUE;
1362 needs_reconfigure = gst_pad_check_reconfigure (decoder->srcpad)
1363 || needs_reconfigure;
1364 if (decoder->priv->output_state_changed || needs_reconfigure) {
1365 if (!gst_video_decoder_negotiate_unlocked (decoder)) {
1366 GST_WARNING_OBJECT (decoder, "Failed to negotiate with downstream");
1367 gst_pad_mark_reconfigure (decoder->srcpad);
1371 GST_DEBUG_OBJECT (decoder, "Pushing all pending serialized events"
1373 events = decoder->priv->pending_events;
1374 frame_events = decoder->priv->current_frame_events;
1375 decoder->priv->pending_events = NULL;
1376 decoder->priv->current_frame_events = NULL;
1378 GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
1380 gst_video_decoder_push_event_list (decoder, events);
1381 gst_video_decoder_push_event_list (decoder, frame_events);
1383 /* Forward GAP immediately. Everything is drained after
1384 * the GAP event and we can forward this event immediately
1385 * now without having buffers out of order.
1387 forward_immediate = TRUE;
1390 case GST_EVENT_CUSTOM_DOWNSTREAM:
1393 GstFlowReturn flow_ret = GST_FLOW_OK;
1395 if (gst_video_event_parse_still_frame (event, &in_still)) {
1397 GST_DEBUG_OBJECT (decoder, "draining current data for still-frame");
1398 GST_VIDEO_DECODER_STREAM_LOCK (decoder);
1399 flow_ret = gst_video_decoder_drain_out (decoder, FALSE);
1400 GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
1401 ret = (flow_ret == GST_FLOW_OK);
1403 /* Forward STILL_FRAME immediately. Everything is drained after
1404 * the STILL_FRAME event and we can forward this event immediately
1405 * now without having buffers out of order.
1407 forward_immediate = TRUE;
1411 case GST_EVENT_SEGMENT:
1415 gst_event_copy_segment (event, &segment);
1417 if (segment.format == GST_FORMAT_TIME) {
1418 GST_DEBUG_OBJECT (decoder,
1419 "received TIME SEGMENT %" GST_SEGMENT_FORMAT, &segment);
1423 GST_DEBUG_OBJECT (decoder,
1424 "received SEGMENT %" GST_SEGMENT_FORMAT, &segment);
1426 /* handle newsegment as a result from our legacy simple seeking */
1427 /* note that initial 0 should convert to 0 in any case */
1428 if (priv->do_estimate_rate &&
1429 gst_pad_query_convert (decoder->sinkpad, GST_FORMAT_BYTES,
1430 segment.start, GST_FORMAT_TIME, &start)) {
1431 /* best attempt convert */
1432 /* as these are only estimates, stop is kept open-ended to avoid
1433 * premature cutting */
1434 GST_DEBUG_OBJECT (decoder,
1435 "converted to TIME start %" GST_TIME_FORMAT,
1436 GST_TIME_ARGS (start));
1437 segment.start = start;
1438 segment.stop = GST_CLOCK_TIME_NONE;
1439 segment.time = start;
1441 gst_event_unref (event);
1442 event = gst_event_new_segment (&segment);
1444 goto newseg_wrong_format;
1448 GST_VIDEO_DECODER_STREAM_LOCK (decoder);
1450 /* Update the decode flags in the segment if we have an instant-rate
1451 * override active */
1452 GST_OBJECT_LOCK (decoder);
1453 if (!priv->decode_flags_override)
1454 priv->decode_flags = segment.flags;
1456 segment.flags &= ~GST_SEGMENT_INSTANT_FLAGS;
1457 segment.flags |= priv->decode_flags & GST_SEGMENT_INSTANT_FLAGS;
1460 priv->base_timestamp = GST_CLOCK_TIME_NONE;
1461 priv->base_picture_number = 0;
1463 decoder->input_segment = segment;
1464 decoder->priv->in_out_segment_sync = FALSE;
1466 GST_OBJECT_UNLOCK (decoder);
1467 GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
1471 case GST_EVENT_INSTANT_RATE_CHANGE:
1473 GstSegmentFlags flags;
1476 gst_event_parse_instant_rate_change (event, NULL, &flags);
1478 GST_OBJECT_LOCK (decoder);
1479 priv->decode_flags_override = TRUE;
1480 priv->decode_flags = flags;
1482 /* Update the input segment flags */
1483 seg = &decoder->input_segment;
1484 seg->flags &= ~GST_SEGMENT_INSTANT_FLAGS;
1485 seg->flags |= priv->decode_flags & GST_SEGMENT_INSTANT_FLAGS;
1486 GST_OBJECT_UNLOCK (decoder);
1489 case GST_EVENT_FLUSH_STOP:
1493 GST_VIDEO_DECODER_STREAM_LOCK (decoder);
1494 for (l = priv->frames.head; l; l = l->next) {
1495 GstVideoCodecFrame *frame = l->data;
1497 frame->events = _flush_events (decoder->srcpad, frame->events);
1499 priv->current_frame_events = _flush_events (decoder->srcpad,
1500 decoder->priv->current_frame_events);
1502 /* well, this is kind of worse than a DISCONT */
1503 gst_video_decoder_flush (decoder, TRUE);
1504 GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
1505 /* Forward FLUSH_STOP immediately. This is required because it is
1506 * expected to be forwarded immediately and no buffers are queued
1509 forward_immediate = TRUE;
1516 gst_event_parse_tag (event, &tags);
1518 if (gst_tag_list_get_scope (tags) == GST_TAG_SCOPE_STREAM) {
1519 GST_VIDEO_DECODER_STREAM_LOCK (decoder);
1520 if (priv->upstream_tags != tags) {
1521 if (priv->upstream_tags)
1522 gst_tag_list_unref (priv->upstream_tags);
1523 priv->upstream_tags = gst_tag_list_ref (tags);
1524 GST_INFO_OBJECT (decoder, "upstream tags: %" GST_PTR_FORMAT, tags);
1526 gst_event_unref (event);
1527 event = gst_video_decoder_create_merged_tags_event (decoder);
1528 GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
1538 /* Forward non-serialized events immediately, and all other
1539 * events which can be forwarded immediately without potentially
1540 * causing the event to go out of order with other events and
1541 * buffers as decided above.
1544 if (!GST_EVENT_IS_SERIALIZED (event) || forward_immediate) {
1545 ret = gst_video_decoder_push_event (decoder, event);
1547 GST_VIDEO_DECODER_STREAM_LOCK (decoder);
1548 decoder->priv->current_frame_events =
1549 g_list_prepend (decoder->priv->current_frame_events, event);
1550 GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
1557 newseg_wrong_format:
1559 GST_DEBUG_OBJECT (decoder, "received non TIME newsegment");
1560 gst_event_unref (event);
1567 gst_video_decoder_sink_event (GstPad * pad, GstObject * parent,
1570 GstVideoDecoder *decoder;
1571 GstVideoDecoderClass *decoder_class;
1572 gboolean ret = FALSE;
1574 decoder = GST_VIDEO_DECODER (parent);
1575 decoder_class = GST_VIDEO_DECODER_GET_CLASS (decoder);
1577 GST_DEBUG_OBJECT (decoder, "received event %d, %s", GST_EVENT_TYPE (event),
1578 GST_EVENT_TYPE_NAME (event));
1580 if (decoder_class->sink_event)
1581 ret = decoder_class->sink_event (decoder, event);
1586 /* perform upstream byte <-> time conversion (duration, seeking)
1587 * if subclass allows and if enough data for moderately decent conversion */
1588 static inline gboolean
1589 gst_video_decoder_do_byte (GstVideoDecoder * dec)
1593 GST_OBJECT_LOCK (dec);
1594 ret = dec->priv->do_estimate_rate && (dec->priv->bytes_out > 0)
1595 && (dec->priv->time > GST_SECOND);
1596 GST_OBJECT_UNLOCK (dec);
1602 gst_video_decoder_do_seek (GstVideoDecoder * dec, GstEvent * event)
1606 GstSeekType start_type, end_type;
1608 gint64 start, start_time, end_time;
1609 GstSegment seek_segment;
1612 gst_event_parse_seek (event, &rate, &format, &flags, &start_type,
1613 &start_time, &end_type, &end_time);
1615 /* we'll handle plain open-ended flushing seeks with the simple approach */
1617 GST_DEBUG_OBJECT (dec, "unsupported seek: rate");
1621 if (start_type != GST_SEEK_TYPE_SET) {
1622 GST_DEBUG_OBJECT (dec, "unsupported seek: start time");
1626 if ((end_type != GST_SEEK_TYPE_SET && end_type != GST_SEEK_TYPE_NONE) ||
1627 (end_type == GST_SEEK_TYPE_SET && end_time != GST_CLOCK_TIME_NONE)) {
1628 GST_DEBUG_OBJECT (dec, "unsupported seek: end time");
1632 if (!(flags & GST_SEEK_FLAG_FLUSH)) {
1633 GST_DEBUG_OBJECT (dec, "unsupported seek: not flushing");
1637 memcpy (&seek_segment, &dec->output_segment, sizeof (seek_segment));
1638 gst_segment_do_seek (&seek_segment, rate, format, flags, start_type,
1639 start_time, end_type, end_time, NULL);
1640 start_time = seek_segment.position;
1642 if (!gst_pad_query_convert (dec->sinkpad, GST_FORMAT_TIME, start_time,
1643 GST_FORMAT_BYTES, &start)) {
1644 GST_DEBUG_OBJECT (dec, "conversion failed");
1648 seqnum = gst_event_get_seqnum (event);
1649 event = gst_event_new_seek (1.0, GST_FORMAT_BYTES, flags,
1650 GST_SEEK_TYPE_SET, start, GST_SEEK_TYPE_NONE, -1);
1651 gst_event_set_seqnum (event, seqnum);
1653 GST_DEBUG_OBJECT (dec, "seeking to %" GST_TIME_FORMAT " at byte offset %"
1654 G_GINT64_FORMAT, GST_TIME_ARGS (start_time), start);
1656 return gst_pad_push_event (dec->sinkpad, event);
1660 gst_video_decoder_src_event_default (GstVideoDecoder * decoder,
1663 GstVideoDecoderPrivate *priv;
1664 gboolean res = FALSE;
1666 priv = decoder->priv;
1668 GST_DEBUG_OBJECT (decoder,
1669 "received event %d, %s", GST_EVENT_TYPE (event),
1670 GST_EVENT_TYPE_NAME (event));
1672 switch (GST_EVENT_TYPE (event)) {
1673 case GST_EVENT_SEEK:
1678 GstSeekType start_type, stop_type;
1680 gint64 tstart, tstop;
1683 gst_event_parse_seek (event, &rate, &format, &flags, &start_type, &start,
1685 seqnum = gst_event_get_seqnum (event);
1687 /* upstream gets a chance first */
1688 if ((res = gst_pad_push_event (decoder->sinkpad, event)))
1691 /* if upstream fails for a time seek, maybe we can help if allowed */
1692 if (format == GST_FORMAT_TIME) {
1693 if (gst_video_decoder_do_byte (decoder))
1694 res = gst_video_decoder_do_seek (decoder, event);
1698 /* ... though a non-time seek can be aided as well */
1699 /* First bring the requested format to time */
1701 gst_pad_query_convert (decoder->srcpad, format, start,
1702 GST_FORMAT_TIME, &tstart)))
1705 gst_pad_query_convert (decoder->srcpad, format, stop,
1706 GST_FORMAT_TIME, &tstop)))
1709 /* then seek with time on the peer */
1710 event = gst_event_new_seek (rate, GST_FORMAT_TIME,
1711 flags, start_type, tstart, stop_type, tstop);
1712 gst_event_set_seqnum (event, seqnum);
1714 res = gst_pad_push_event (decoder->sinkpad, event);
1721 GstClockTimeDiff diff;
1722 GstClockTime timestamp;
1724 gst_event_parse_qos (event, &type, &proportion, &diff, ×tamp);
1726 GST_OBJECT_LOCK (decoder);
1727 priv->proportion = proportion;
1728 if (G_LIKELY (GST_CLOCK_TIME_IS_VALID (timestamp))) {
1729 if (G_UNLIKELY (diff > 0)) {
1730 priv->earliest_time = timestamp + 2 * diff + priv->qos_frame_duration;
1732 priv->earliest_time = timestamp + diff;
1735 priv->earliest_time = GST_CLOCK_TIME_NONE;
1737 GST_OBJECT_UNLOCK (decoder);
1739 GST_DEBUG_OBJECT (decoder,
1740 "got QoS %" GST_TIME_FORMAT ", %" GST_STIME_FORMAT ", %g",
1741 GST_TIME_ARGS (timestamp), GST_STIME_ARGS (diff), proportion);
1743 res = gst_pad_push_event (decoder->sinkpad, event);
1747 res = gst_pad_push_event (decoder->sinkpad, event);
1754 GST_DEBUG_OBJECT (decoder, "could not convert format");
1759 gst_video_decoder_src_event (GstPad * pad, GstObject * parent, GstEvent * event)
1761 GstVideoDecoder *decoder;
1762 GstVideoDecoderClass *decoder_class;
1763 gboolean ret = FALSE;
1765 decoder = GST_VIDEO_DECODER (parent);
1766 decoder_class = GST_VIDEO_DECODER_GET_CLASS (decoder);
1768 GST_DEBUG_OBJECT (decoder, "received event %d, %s", GST_EVENT_TYPE (event),
1769 GST_EVENT_TYPE_NAME (event));
1771 if (decoder_class->src_event)
1772 ret = decoder_class->src_event (decoder, event);
1778 gst_video_decoder_src_query_default (GstVideoDecoder * dec, GstQuery * query)
1780 GstPad *pad = GST_VIDEO_DECODER_SRC_PAD (dec);
1781 gboolean res = TRUE;
1783 GST_LOG_OBJECT (dec, "handling query: %" GST_PTR_FORMAT, query);
1785 switch (GST_QUERY_TYPE (query)) {
1786 case GST_QUERY_POSITION:
1791 /* upstream gets a chance first */
1792 if ((res = gst_pad_peer_query (dec->sinkpad, query))) {
1793 GST_LOG_OBJECT (dec, "returning peer response");
1797 /* Refuse BYTES format queries. If it made sense to
1798 * answer them, upstream would have already */
1799 gst_query_parse_position (query, &format, NULL);
1801 if (format == GST_FORMAT_BYTES) {
1802 GST_LOG_OBJECT (dec, "Ignoring BYTES position query");
1806 /* we start from the last seen time */
1807 time = dec->priv->last_timestamp_out;
1808 /* correct for the segment values */
1809 time = gst_segment_to_stream_time (&dec->output_segment,
1810 GST_FORMAT_TIME, time);
1812 GST_LOG_OBJECT (dec,
1813 "query %p: our time: %" GST_TIME_FORMAT, query, GST_TIME_ARGS (time));
1815 /* and convert to the final format */
1816 if (!(res = gst_pad_query_convert (pad, GST_FORMAT_TIME, time,
1820 gst_query_set_position (query, format, value);
1822 GST_LOG_OBJECT (dec,
1823 "query %p: we return %" G_GINT64_FORMAT " (format %u)", query, value,
1827 case GST_QUERY_DURATION:
1831 /* upstream in any case */
1832 if ((res = gst_pad_query_default (pad, GST_OBJECT (dec), query)))
1835 gst_query_parse_duration (query, &format, NULL);
1836 /* try answering TIME by converting from BYTE if subclass allows */
1837 if (format == GST_FORMAT_TIME && gst_video_decoder_do_byte (dec)) {
1840 if (gst_pad_peer_query_duration (dec->sinkpad, GST_FORMAT_BYTES,
1842 GST_LOG_OBJECT (dec, "upstream size %" G_GINT64_FORMAT, value);
1843 if (gst_pad_query_convert (dec->sinkpad,
1844 GST_FORMAT_BYTES, value, GST_FORMAT_TIME, &value)) {
1845 gst_query_set_duration (query, GST_FORMAT_TIME, value);
1852 case GST_QUERY_CONVERT:
1854 GstFormat src_fmt, dest_fmt;
1855 gint64 src_val, dest_val;
1857 GST_DEBUG_OBJECT (dec, "convert query");
1859 gst_query_parse_convert (query, &src_fmt, &src_val, &dest_fmt, &dest_val);
1860 GST_OBJECT_LOCK (dec);
1861 if (dec->priv->output_state != NULL)
1862 res = __gst_video_rawvideo_convert (dec->priv->output_state,
1863 src_fmt, src_val, &dest_fmt, &dest_val);
1866 GST_OBJECT_UNLOCK (dec);
1869 gst_query_set_convert (query, src_fmt, src_val, dest_fmt, dest_val);
1872 case GST_QUERY_LATENCY:
1875 GstClockTime min_latency, max_latency;
1877 res = gst_pad_peer_query (dec->sinkpad, query);
1879 gst_query_parse_latency (query, &live, &min_latency, &max_latency);
1880 GST_DEBUG_OBJECT (dec, "Peer qlatency: live %d, min %"
1881 GST_TIME_FORMAT " max %" GST_TIME_FORMAT, live,
1882 GST_TIME_ARGS (min_latency), GST_TIME_ARGS (max_latency));
1884 GST_OBJECT_LOCK (dec);
1885 min_latency += dec->priv->min_latency;
1886 if (max_latency == GST_CLOCK_TIME_NONE
1887 || dec->priv->max_latency == GST_CLOCK_TIME_NONE)
1888 max_latency = GST_CLOCK_TIME_NONE;
1890 max_latency += dec->priv->max_latency;
1891 GST_OBJECT_UNLOCK (dec);
1893 gst_query_set_latency (query, live, min_latency, max_latency);
1898 res = gst_pad_query_default (pad, GST_OBJECT (dec), query);
1903 GST_ERROR_OBJECT (dec, "query failed");
1908 gst_video_decoder_src_query (GstPad * pad, GstObject * parent, GstQuery * query)
1910 GstVideoDecoder *decoder;
1911 GstVideoDecoderClass *decoder_class;
1912 gboolean ret = FALSE;
1914 decoder = GST_VIDEO_DECODER (parent);
1915 decoder_class = GST_VIDEO_DECODER_GET_CLASS (decoder);
1917 GST_DEBUG_OBJECT (decoder, "received query %d, %s", GST_QUERY_TYPE (query),
1918 GST_QUERY_TYPE_NAME (query));
1920 if (decoder_class->src_query)
1921 ret = decoder_class->src_query (decoder, query);
1927 * gst_video_decoder_proxy_getcaps:
1928 * @decoder: a #GstVideoDecoder
1929 * @caps: (allow-none): initial caps
1930 * @filter: (allow-none): filter caps
1932 * Returns caps that express @caps (or sink template caps if @caps == NULL)
1933 * restricted to resolution/format/... combinations supported by downstream
1936 * Returns: (transfer full): a #GstCaps owned by caller
1941 gst_video_decoder_proxy_getcaps (GstVideoDecoder * decoder, GstCaps * caps,
1944 return __gst_video_element_proxy_getcaps (GST_ELEMENT_CAST (decoder),
1945 GST_VIDEO_DECODER_SINK_PAD (decoder),
1946 GST_VIDEO_DECODER_SRC_PAD (decoder), caps, filter);
1950 gst_video_decoder_sink_getcaps (GstVideoDecoder * decoder, GstCaps * filter)
1952 GstVideoDecoderClass *klass;
1955 klass = GST_VIDEO_DECODER_GET_CLASS (decoder);
1958 caps = klass->getcaps (decoder, filter);
1960 caps = gst_video_decoder_proxy_getcaps (decoder, NULL, filter);
1962 GST_LOG_OBJECT (decoder, "Returning caps %" GST_PTR_FORMAT, caps);
1968 gst_video_decoder_sink_query_default (GstVideoDecoder * decoder,
1971 GstPad *pad = GST_VIDEO_DECODER_SINK_PAD (decoder);
1972 GstVideoDecoderPrivate *priv;
1973 gboolean res = FALSE;
1975 priv = decoder->priv;
1977 GST_LOG_OBJECT (decoder, "handling query: %" GST_PTR_FORMAT, query);
1979 switch (GST_QUERY_TYPE (query)) {
1980 case GST_QUERY_CONVERT:
1982 GstFormat src_fmt, dest_fmt;
1983 gint64 src_val, dest_val;
1985 gst_query_parse_convert (query, &src_fmt, &src_val, &dest_fmt, &dest_val);
1986 GST_OBJECT_LOCK (decoder);
1988 __gst_video_encoded_video_convert (priv->bytes_out, priv->time,
1989 src_fmt, src_val, &dest_fmt, &dest_val);
1990 GST_OBJECT_UNLOCK (decoder);
1993 gst_query_set_convert (query, src_fmt, src_val, dest_fmt, dest_val);
1996 case GST_QUERY_ALLOCATION:{
1997 GstVideoDecoderClass *klass = GST_VIDEO_DECODER_GET_CLASS (decoder);
1999 if (klass->propose_allocation)
2000 res = klass->propose_allocation (decoder, query);
2003 case GST_QUERY_CAPS:{
2004 GstCaps *filter, *caps;
2006 gst_query_parse_caps (query, &filter);
2007 caps = gst_video_decoder_sink_getcaps (decoder, filter);
2008 gst_query_set_caps_result (query, caps);
2009 gst_caps_unref (caps);
2013 case GST_QUERY_ACCEPT_CAPS:{
2014 if (decoder->priv->use_default_pad_acceptcaps) {
2016 gst_pad_query_default (GST_VIDEO_DECODER_SINK_PAD (decoder),
2017 GST_OBJECT_CAST (decoder), query);
2020 GstCaps *allowed_caps;
2021 GstCaps *template_caps;
2024 gst_query_parse_accept_caps (query, &caps);
2026 template_caps = gst_pad_get_pad_template_caps (pad);
2027 accept = gst_caps_is_subset (caps, template_caps);
2028 gst_caps_unref (template_caps);
2032 gst_pad_query_caps (GST_VIDEO_DECODER_SINK_PAD (decoder), caps);
2034 accept = gst_caps_can_intersect (caps, allowed_caps);
2036 gst_caps_unref (allowed_caps);
2039 gst_query_set_accept_caps_result (query, accept);
2045 res = gst_pad_query_default (pad, GST_OBJECT (decoder), query);
2052 GST_DEBUG_OBJECT (decoder, "query failed");
2058 gst_video_decoder_sink_query (GstPad * pad, GstObject * parent,
2061 GstVideoDecoder *decoder;
2062 GstVideoDecoderClass *decoder_class;
2063 gboolean ret = FALSE;
2065 decoder = GST_VIDEO_DECODER (parent);
2066 decoder_class = GST_VIDEO_DECODER_GET_CLASS (decoder);
2068 GST_DEBUG_OBJECT (decoder, "received query %d, %s", GST_QUERY_TYPE (query),
2069 GST_QUERY_TYPE_NAME (query));
2071 if (decoder_class->sink_query)
2072 ret = decoder_class->sink_query (decoder, query);
2077 typedef struct _Timestamp Timestamp;
2083 GstClockTime duration;
2088 timestamp_free (Timestamp * ts)
2090 g_slice_free (Timestamp, ts);
2094 gst_video_decoder_add_buffer_info (GstVideoDecoder * decoder,
2097 GstVideoDecoderPrivate *priv = decoder->priv;
2100 if (!GST_BUFFER_PTS_IS_VALID (buffer) &&
2101 !GST_BUFFER_DTS_IS_VALID (buffer) &&
2102 !GST_BUFFER_DURATION_IS_VALID (buffer) &&
2103 GST_BUFFER_FLAGS (buffer) == 0) {
2104 /* Save memory - don't bother storing info
2105 * for buffers with no distinguishing info */
2109 ts = g_slice_new (Timestamp);
2111 GST_LOG_OBJECT (decoder,
2112 "adding PTS %" GST_TIME_FORMAT " DTS %" GST_TIME_FORMAT
2113 " (offset:%" G_GUINT64_FORMAT ")",
2114 GST_TIME_ARGS (GST_BUFFER_PTS (buffer)),
2115 GST_TIME_ARGS (GST_BUFFER_DTS (buffer)), priv->input_offset);
2117 ts->offset = priv->input_offset;
2118 ts->pts = GST_BUFFER_PTS (buffer);
2119 ts->dts = GST_BUFFER_DTS (buffer);
2120 ts->duration = GST_BUFFER_DURATION (buffer);
2121 ts->flags = GST_BUFFER_FLAGS (buffer);
2123 g_queue_push_tail (&priv->timestamps, ts);
2125 if (g_queue_get_length (&priv->timestamps) > 40) {
2126 GST_WARNING_OBJECT (decoder,
2127 "decoder timestamp list getting long: %d timestamps,"
2128 "possible internal leaking?", g_queue_get_length (&priv->timestamps));
2133 gst_video_decoder_get_buffer_info_at_offset (GstVideoDecoder *
2134 decoder, guint64 offset, GstClockTime * pts, GstClockTime * dts,
2135 GstClockTime * duration, guint * flags)
2137 #ifndef GST_DISABLE_GST_DEBUG
2138 guint64 got_offset = 0;
2143 *pts = GST_CLOCK_TIME_NONE;
2144 *dts = GST_CLOCK_TIME_NONE;
2145 *duration = GST_CLOCK_TIME_NONE;
2148 g = decoder->priv->timestamps.head;
2151 if (ts->offset <= offset) {
2152 GList *next = g->next;
2153 #ifndef GST_DISABLE_GST_DEBUG
2154 got_offset = ts->offset;
2158 *duration = ts->duration;
2160 g_queue_delete_link (&decoder->priv->timestamps, g);
2162 timestamp_free (ts);
2168 GST_LOG_OBJECT (decoder,
2169 "got PTS %" GST_TIME_FORMAT " DTS %" GST_TIME_FORMAT " flags %x @ offs %"
2170 G_GUINT64_FORMAT " (wanted offset:%" G_GUINT64_FORMAT ")",
2171 GST_TIME_ARGS (*pts), GST_TIME_ARGS (*dts), *flags, got_offset, offset);
2174 #if !GLIB_CHECK_VERSION(2, 60, 0)
2175 #define g_queue_clear_full queue_clear_full
2177 queue_clear_full (GQueue * queue, GDestroyNotify free_func)
2181 while ((data = g_queue_pop_head (queue)) != NULL)
2187 gst_video_decoder_clear_queues (GstVideoDecoder * dec)
2189 GstVideoDecoderPrivate *priv = dec->priv;
2191 g_list_free_full (priv->output_queued,
2192 (GDestroyNotify) gst_mini_object_unref);
2193 priv->output_queued = NULL;
2195 g_list_free_full (priv->gather, (GDestroyNotify) gst_mini_object_unref);
2196 priv->gather = NULL;
2197 g_list_free_full (priv->decode, (GDestroyNotify) gst_video_codec_frame_unref);
2198 priv->decode = NULL;
2199 g_list_free_full (priv->parse, (GDestroyNotify) gst_mini_object_unref);
2201 g_list_free_full (priv->parse_gather,
2202 (GDestroyNotify) gst_video_codec_frame_unref);
2203 priv->parse_gather = NULL;
2204 g_queue_clear_full (&priv->frames,
2205 (GDestroyNotify) gst_video_codec_frame_unref);
2209 gst_video_decoder_reset (GstVideoDecoder * decoder, gboolean full,
2210 gboolean flush_hard)
2212 GstVideoDecoderPrivate *priv = decoder->priv;
2214 GST_DEBUG_OBJECT (decoder, "reset full %d", full);
2216 GST_VIDEO_DECODER_STREAM_LOCK (decoder);
2218 if (full || flush_hard) {
2219 gst_segment_init (&decoder->input_segment, GST_FORMAT_UNDEFINED);
2220 gst_segment_init (&decoder->output_segment, GST_FORMAT_UNDEFINED);
2221 gst_video_decoder_clear_queues (decoder);
2222 decoder->priv->in_out_segment_sync = TRUE;
2224 if (priv->current_frame) {
2225 gst_video_codec_frame_unref (priv->current_frame);
2226 priv->current_frame = NULL;
2229 g_list_free_full (priv->current_frame_events,
2230 (GDestroyNotify) gst_event_unref);
2231 priv->current_frame_events = NULL;
2232 g_list_free_full (priv->pending_events, (GDestroyNotify) gst_event_unref);
2233 priv->pending_events = NULL;
2235 priv->error_count = 0;
2236 priv->had_output_data = FALSE;
2237 priv->had_input_data = FALSE;
2239 GST_OBJECT_LOCK (decoder);
2240 priv->earliest_time = GST_CLOCK_TIME_NONE;
2241 priv->proportion = 0.5;
2242 priv->decode_flags_override = FALSE;
2244 priv->request_sync_point_flags = 0;
2245 priv->request_sync_point_frame_number = REQUEST_SYNC_POINT_UNSET;
2246 priv->last_force_key_unit_time = GST_CLOCK_TIME_NONE;
2247 GST_OBJECT_UNLOCK (decoder);
2248 priv->distance_from_sync = -1;
2252 if (priv->input_state)
2253 gst_video_codec_state_unref (priv->input_state);
2254 priv->input_state = NULL;
2255 GST_OBJECT_LOCK (decoder);
2256 if (priv->output_state)
2257 gst_video_codec_state_unref (priv->output_state);
2258 priv->output_state = NULL;
2260 priv->qos_frame_duration = 0;
2261 GST_OBJECT_UNLOCK (decoder);
2264 gst_tag_list_unref (priv->tags);
2266 priv->tags_merge_mode = GST_TAG_MERGE_APPEND;
2267 if (priv->upstream_tags) {
2268 gst_tag_list_unref (priv->upstream_tags);
2269 priv->upstream_tags = NULL;
2271 priv->tags_changed = FALSE;
2272 priv->reordered_output = FALSE;
2275 priv->processed = 0;
2277 priv->decode_frame_number = 0;
2278 priv->base_picture_number = 0;
2281 GST_DEBUG_OBJECT (decoder, "deactivate pool %" GST_PTR_FORMAT,
2283 gst_buffer_pool_set_active (priv->pool, FALSE);
2284 gst_object_unref (priv->pool);
2288 if (priv->allocator) {
2289 gst_object_unref (priv->allocator);
2290 priv->allocator = NULL;
2294 priv->discont = TRUE;
2296 priv->base_timestamp = GST_CLOCK_TIME_NONE;
2297 priv->last_timestamp_out = GST_CLOCK_TIME_NONE;
2298 priv->pts_delta = GST_CLOCK_TIME_NONE;
2300 priv->input_offset = 0;
2301 priv->frame_offset = 0;
2302 gst_adapter_clear (priv->input_adapter);
2303 gst_adapter_clear (priv->output_adapter);
2304 g_queue_clear_full (&priv->timestamps, (GDestroyNotify) timestamp_free);
2306 GST_OBJECT_LOCK (decoder);
2307 priv->bytes_out = 0;
2309 GST_OBJECT_UNLOCK (decoder);
2311 #ifndef GST_DISABLE_DEBUG
2312 priv->last_reset_time = gst_util_get_timestamp ();
2315 GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
2318 static GstFlowReturn
2319 gst_video_decoder_chain_forward (GstVideoDecoder * decoder,
2320 GstBuffer * buf, gboolean at_eos)
2322 GstVideoDecoderPrivate *priv;
2323 GstVideoDecoderClass *klass;
2324 GstFlowReturn ret = GST_FLOW_OK;
2326 klass = GST_VIDEO_DECODER_GET_CLASS (decoder);
2327 priv = decoder->priv;
2329 g_return_val_if_fail (priv->packetized || klass->parse, GST_FLOW_ERROR);
2331 /* Draining on DISCONT is handled in chain_reverse() for reverse playback,
2332 * and this function would only be called to get everything collected GOP
2333 * by GOP in the parse_gather list */
2334 if (decoder->input_segment.rate > 0.0 && GST_BUFFER_IS_DISCONT (buf)
2335 && (decoder->input_segment.flags & GST_SEEK_FLAG_TRICKMODE_KEY_UNITS))
2336 ret = gst_video_decoder_drain_out (decoder, FALSE);
2338 if (priv->current_frame == NULL)
2339 priv->current_frame = gst_video_decoder_new_frame (decoder);
2341 if (!priv->packetized)
2342 gst_video_decoder_add_buffer_info (decoder, buf);
2344 priv->input_offset += gst_buffer_get_size (buf);
2346 if (priv->packetized) {
2347 GstVideoCodecFrame *frame;
2348 gboolean was_keyframe = FALSE;
2350 frame = priv->current_frame;
2352 frame->abidata.ABI.num_subframes++;
2353 if (gst_video_decoder_get_subframe_mode (decoder)) {
2354 /* End the frame if the marker flag is set */
2355 if (!GST_BUFFER_FLAG_IS_SET (buf, GST_VIDEO_BUFFER_FLAG_MARKER)
2356 && (decoder->input_segment.rate > 0.0))
2357 priv->current_frame = gst_video_codec_frame_ref (frame);
2359 priv->current_frame = NULL;
2361 priv->current_frame = frame;
2364 if (!GST_BUFFER_FLAG_IS_SET (buf, GST_BUFFER_FLAG_DELTA_UNIT)) {
2365 was_keyframe = TRUE;
2366 GST_DEBUG_OBJECT (decoder, "Marking current_frame as sync point");
2367 GST_VIDEO_CODEC_FRAME_SET_SYNC_POINT (frame);
2370 if (frame->input_buffer) {
2371 gst_video_decoder_copy_metas (decoder, frame, frame->input_buffer, buf);
2372 gst_buffer_unref (frame->input_buffer);
2374 frame->input_buffer = buf;
2376 if (decoder->input_segment.rate < 0.0) {
2377 priv->parse_gather = g_list_prepend (priv->parse_gather, frame);
2378 priv->current_frame = NULL;
2380 ret = gst_video_decoder_decode_frame (decoder, frame);
2381 if (!gst_video_decoder_get_subframe_mode (decoder))
2382 priv->current_frame = NULL;
2384 /* If in trick mode and it was a keyframe, drain decoder to avoid extra
2385 * latency. Only do this for forwards playback as reverse playback handles
2386 * draining on keyframes in flush_parse(), and would otherwise call back
2387 * from drain_out() to here causing an infinite loop.
2388 * Also this function is only called for reverse playback to gather frames
2389 * GOP by GOP, and does not do any actual decoding. That would be done by
2391 if (ret == GST_FLOW_OK && was_keyframe && decoder->input_segment.rate > 0.0
2392 && (decoder->input_segment.flags & GST_SEEK_FLAG_TRICKMODE_KEY_UNITS))
2393 ret = gst_video_decoder_drain_out (decoder, FALSE);
2395 gst_adapter_push (priv->input_adapter, buf);
2397 ret = gst_video_decoder_parse_available (decoder, at_eos, TRUE);
2400 if (ret == GST_VIDEO_DECODER_FLOW_NEED_DATA)
2406 static GstFlowReturn
2407 gst_video_decoder_flush_decode (GstVideoDecoder * dec)
2409 GstVideoDecoderPrivate *priv = dec->priv;
2410 GstFlowReturn res = GST_FLOW_OK;
2412 GstVideoCodecFrame *current_frame = NULL;
2413 gboolean last_subframe;
2414 GST_DEBUG_OBJECT (dec, "flushing buffers to decode");
2416 walk = priv->decode;
2419 GstVideoCodecFrame *frame = (GstVideoCodecFrame *) (walk->data);
2420 last_subframe = TRUE;
2421 /* In subframe mode, we need to get rid of intermediary frames
2422 * created during the buffer gather stage. That's why that we keep a current
2423 * frame as the main frame and drop all the frame afterwhile until the end
2424 * of the subframes batch.
2426 if (gst_video_decoder_get_subframe_mode (dec)) {
2427 if (current_frame == NULL) {
2428 current_frame = gst_video_codec_frame_ref (frame);
2430 if (current_frame->input_buffer) {
2431 gst_video_decoder_copy_metas (dec, current_frame,
2432 current_frame->input_buffer, current_frame->output_buffer);
2433 gst_buffer_unref (current_frame->input_buffer);
2435 current_frame->input_buffer = gst_buffer_ref (frame->input_buffer);
2436 gst_video_codec_frame_unref (frame);
2438 last_subframe = GST_BUFFER_FLAG_IS_SET (current_frame->input_buffer,
2439 GST_VIDEO_BUFFER_FLAG_MARKER);
2441 current_frame = frame;
2444 GST_DEBUG_OBJECT (dec, "decoding frame %p buffer %p, PTS %" GST_TIME_FORMAT
2445 ", DTS %" GST_TIME_FORMAT, frame, frame->input_buffer,
2446 GST_TIME_ARGS (GST_BUFFER_PTS (frame->input_buffer)),
2447 GST_TIME_ARGS (GST_BUFFER_DTS (frame->input_buffer)));
2451 priv->decode = g_list_delete_link (priv->decode, walk);
2453 /* decode buffer, resulting data prepended to queue */
2454 res = gst_video_decoder_decode_frame (dec, current_frame);
2455 if (res != GST_FLOW_OK)
2457 if (!gst_video_decoder_get_subframe_mode (dec)
2459 current_frame = NULL;
2466 /* gst_video_decoder_flush_parse is called from the
2467 * chain_reverse() function when a buffer containing
2468 * a DISCONT - indicating that reverse playback
2469 * looped back to the next data block, and therefore
2470 * all available data should be fed through the
2471 * decoder and frames gathered for reversed output
2473 static GstFlowReturn
2474 gst_video_decoder_flush_parse (GstVideoDecoder * dec, gboolean at_eos)
2476 GstVideoDecoderPrivate *priv = dec->priv;
2477 GstFlowReturn res = GST_FLOW_OK;
2479 GstVideoDecoderClass *decoder_class;
2481 decoder_class = GST_VIDEO_DECODER_GET_CLASS (dec);
2483 GST_DEBUG_OBJECT (dec, "flushing buffers to parsing");
2485 /* Reverse the gather list, and prepend it to the parse list,
2486 * then flush to parse whatever we can */
2487 priv->gather = g_list_reverse (priv->gather);
2488 priv->parse = g_list_concat (priv->gather, priv->parse);
2489 priv->gather = NULL;
2491 /* clear buffer and decoder state */
2492 gst_video_decoder_flush (dec, FALSE);
2496 GstBuffer *buf = GST_BUFFER_CAST (walk->data);
2497 GList *next = walk->next;
2499 GST_DEBUG_OBJECT (dec, "parsing buffer %p, PTS %" GST_TIME_FORMAT
2500 ", DTS %" GST_TIME_FORMAT " flags %x", buf,
2501 GST_TIME_ARGS (GST_BUFFER_PTS (buf)),
2502 GST_TIME_ARGS (GST_BUFFER_DTS (buf)), GST_BUFFER_FLAGS (buf));
2504 /* parse buffer, resulting frames prepended to parse_gather queue */
2505 gst_buffer_ref (buf);
2506 res = gst_video_decoder_chain_forward (dec, buf, at_eos);
2508 /* if we generated output, we can discard the buffer, else we
2509 * keep it in the queue */
2510 if (priv->parse_gather) {
2511 GST_DEBUG_OBJECT (dec, "parsed buffer to %p", priv->parse_gather->data);
2512 priv->parse = g_list_delete_link (priv->parse, walk);
2513 gst_buffer_unref (buf);
2515 GST_DEBUG_OBJECT (dec, "buffer did not decode, keeping");
2520 walk = priv->parse_gather;
2522 GstVideoCodecFrame *frame = (GstVideoCodecFrame *) (walk->data);
2525 /* this is reverse playback, check if we need to apply some segment
2526 * to the output before decoding, as during decoding the segment.rate
2527 * must be used to determine if a buffer should be pushed or added to
2528 * the output list for reverse pushing.
2530 * The new segment is not immediately pushed here because we must
2531 * wait for negotiation to happen before it can be pushed to avoid
2532 * pushing a segment before caps event. Negotiation only happens
2533 * when finish_frame is called.
2535 for (walk2 = frame->events; walk2;) {
2537 GstEvent *event = walk2->data;
2539 walk2 = g_list_next (walk2);
2540 if (GST_EVENT_TYPE (event) <= GST_EVENT_SEGMENT) {
2542 if (GST_EVENT_TYPE (event) == GST_EVENT_SEGMENT) {
2545 GST_DEBUG_OBJECT (dec, "Segment at frame %p %" GST_TIME_FORMAT,
2546 frame, GST_TIME_ARGS (GST_BUFFER_PTS (frame->input_buffer)));
2547 gst_event_copy_segment (event, &segment);
2548 if (segment.format == GST_FORMAT_TIME) {
2549 dec->output_segment = segment;
2550 dec->priv->in_out_segment_sync =
2551 gst_segment_is_equal (&dec->input_segment, &segment);
2554 dec->priv->pending_events =
2555 g_list_append (dec->priv->pending_events, event);
2556 frame->events = g_list_delete_link (frame->events, cur);
2563 /* now we can process frames. Start by moving each frame from the parse_gather
2564 * to the decode list, reverse the order as we go, and stopping when/if we
2565 * copy a keyframe. */
2566 GST_DEBUG_OBJECT (dec, "checking parsed frames for a keyframe to decode");
2567 walk = priv->parse_gather;
2569 GstVideoCodecFrame *frame = (GstVideoCodecFrame *) (walk->data);
2571 /* remove from the gather list */
2572 priv->parse_gather = g_list_remove_link (priv->parse_gather, walk);
2574 /* move it to the front of the decode queue */
2575 priv->decode = g_list_concat (walk, priv->decode);
2577 /* if we copied a keyframe, flush and decode the decode queue */
2578 if (GST_VIDEO_CODEC_FRAME_IS_SYNC_POINT (frame)) {
2579 GST_DEBUG_OBJECT (dec, "found keyframe %p with PTS %" GST_TIME_FORMAT
2580 ", DTS %" GST_TIME_FORMAT, frame,
2581 GST_TIME_ARGS (GST_BUFFER_PTS (frame->input_buffer)),
2582 GST_TIME_ARGS (GST_BUFFER_DTS (frame->input_buffer)));
2583 res = gst_video_decoder_flush_decode (dec);
2584 if (res != GST_FLOW_OK)
2587 /* We need to tell the subclass to drain now.
2588 * We prefer the drain vfunc, but for backward-compat
2589 * we use a finish() vfunc if drain isn't implemented */
2590 if (decoder_class->drain) {
2591 GST_DEBUG_OBJECT (dec, "Draining");
2592 res = decoder_class->drain (dec);
2593 } else if (decoder_class->finish) {
2594 GST_FIXME_OBJECT (dec, "Sub-class should implement drain(). "
2595 "Calling finish() for backwards-compat");
2596 res = decoder_class->finish (dec);
2599 if (res != GST_FLOW_OK)
2602 /* now send queued data downstream */
2603 walk = priv->output_queued;
2605 GstBuffer *buf = GST_BUFFER_CAST (walk->data);
2607 priv->output_queued =
2608 g_list_delete_link (priv->output_queued, priv->output_queued);
2610 if (G_LIKELY (res == GST_FLOW_OK)) {
2611 /* avoid stray DISCONT from forward processing,
2612 * which have no meaning in reverse pushing */
2613 GST_BUFFER_FLAG_UNSET (buf, GST_BUFFER_FLAG_DISCONT);
2615 /* Last chance to calculate a timestamp as we loop backwards
2616 * through the list */
2617 if (GST_BUFFER_TIMESTAMP (buf) != GST_CLOCK_TIME_NONE)
2618 priv->last_timestamp_out = GST_BUFFER_TIMESTAMP (buf);
2619 else if (priv->last_timestamp_out != GST_CLOCK_TIME_NONE &&
2620 GST_BUFFER_DURATION (buf) != GST_CLOCK_TIME_NONE) {
2621 GST_BUFFER_TIMESTAMP (buf) =
2622 priv->last_timestamp_out - GST_BUFFER_DURATION (buf);
2623 priv->last_timestamp_out = GST_BUFFER_TIMESTAMP (buf);
2624 GST_LOG_OBJECT (dec,
2625 "Calculated TS %" GST_TIME_FORMAT " working backwards",
2626 GST_TIME_ARGS (priv->last_timestamp_out));
2629 res = gst_video_decoder_clip_and_push_buf (dec, buf);
2631 gst_buffer_unref (buf);
2634 walk = priv->output_queued;
2637 /* clear buffer and decoder state again
2638 * before moving to the previous keyframe */
2639 gst_video_decoder_flush (dec, FALSE);
2642 walk = priv->parse_gather;
2649 static GstFlowReturn
2650 gst_video_decoder_chain_reverse (GstVideoDecoder * dec, GstBuffer * buf)
2652 GstVideoDecoderPrivate *priv = dec->priv;
2653 GstFlowReturn result = GST_FLOW_OK;
2655 /* if we have a discont, move buffers to the decode list */
2656 if (!buf || GST_BUFFER_IS_DISCONT (buf)) {
2657 GST_DEBUG_OBJECT (dec, "received discont");
2659 /* parse and decode stuff in the gather and parse queues */
2660 result = gst_video_decoder_flush_parse (dec, FALSE);
2663 if (G_LIKELY (buf)) {
2664 GST_DEBUG_OBJECT (dec, "gathering buffer %p of size %" G_GSIZE_FORMAT ", "
2665 "PTS %" GST_TIME_FORMAT ", DTS %" GST_TIME_FORMAT ", dur %"
2666 GST_TIME_FORMAT, buf, gst_buffer_get_size (buf),
2667 GST_TIME_ARGS (GST_BUFFER_PTS (buf)),
2668 GST_TIME_ARGS (GST_BUFFER_DTS (buf)),
2669 GST_TIME_ARGS (GST_BUFFER_DURATION (buf)));
2671 /* add buffer to gather queue */
2672 priv->gather = g_list_prepend (priv->gather, buf);
2678 static GstFlowReturn
2679 gst_video_decoder_chain (GstPad * pad, GstObject * parent, GstBuffer * buf)
2681 GstVideoDecoder *decoder;
2682 GstFlowReturn ret = GST_FLOW_OK;
2684 decoder = GST_VIDEO_DECODER (parent);
2686 if (G_UNLIKELY (!decoder->priv->input_state && decoder->priv->needs_format))
2687 goto not_negotiated;
2689 GST_LOG_OBJECT (decoder,
2690 "chain PTS %" GST_TIME_FORMAT ", DTS %" GST_TIME_FORMAT " duration %"
2691 GST_TIME_FORMAT " size %" G_GSIZE_FORMAT " flags %x",
2692 GST_TIME_ARGS (GST_BUFFER_PTS (buf)),
2693 GST_TIME_ARGS (GST_BUFFER_DTS (buf)),
2694 GST_TIME_ARGS (GST_BUFFER_DURATION (buf)),
2695 gst_buffer_get_size (buf), GST_BUFFER_FLAGS (buf));
2697 GST_VIDEO_DECODER_STREAM_LOCK (decoder);
2700 * requiring the pad to be negotiated makes it impossible to use
2701 * oggdemux or filesrc ! decoder */
2703 if (decoder->input_segment.format == GST_FORMAT_UNDEFINED) {
2705 GstSegment *segment = &decoder->input_segment;
2707 GST_WARNING_OBJECT (decoder,
2708 "Received buffer without a new-segment. "
2709 "Assuming timestamps start from 0.");
2711 gst_segment_init (segment, GST_FORMAT_TIME);
2713 event = gst_event_new_segment (segment);
2715 decoder->priv->current_frame_events =
2716 g_list_prepend (decoder->priv->current_frame_events, event);
2719 decoder->priv->had_input_data = TRUE;
2721 if (decoder->input_segment.rate > 0.0)
2722 ret = gst_video_decoder_chain_forward (decoder, buf, FALSE);
2724 ret = gst_video_decoder_chain_reverse (decoder, buf);
2726 GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
2732 GST_ELEMENT_ERROR (decoder, CORE, NEGOTIATION, (NULL),
2733 ("decoder not initialized"));
2734 gst_buffer_unref (buf);
2735 return GST_FLOW_NOT_NEGOTIATED;
2739 static GstStateChangeReturn
2740 gst_video_decoder_change_state (GstElement * element, GstStateChange transition)
2742 GstVideoDecoder *decoder;
2743 GstVideoDecoderClass *decoder_class;
2744 GstStateChangeReturn ret;
2746 decoder = GST_VIDEO_DECODER (element);
2747 decoder_class = GST_VIDEO_DECODER_GET_CLASS (element);
2749 switch (transition) {
2750 case GST_STATE_CHANGE_NULL_TO_READY:
2751 /* open device/library if needed */
2752 if (decoder_class->open && !decoder_class->open (decoder))
2755 case GST_STATE_CHANGE_READY_TO_PAUSED:
2756 GST_VIDEO_DECODER_STREAM_LOCK (decoder);
2757 gst_video_decoder_reset (decoder, TRUE, TRUE);
2758 GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
2760 /* Initialize device/library if needed */
2761 if (decoder_class->start && !decoder_class->start (decoder))
2768 ret = GST_ELEMENT_CLASS (parent_class)->change_state (element, transition);
2770 switch (transition) {
2771 case GST_STATE_CHANGE_PAUSED_TO_READY:{
2772 gboolean stopped = TRUE;
2774 if (decoder_class->stop)
2775 stopped = decoder_class->stop (decoder);
2777 GST_VIDEO_DECODER_STREAM_LOCK (decoder);
2778 gst_video_decoder_reset (decoder, TRUE, TRUE);
2779 GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
2786 case GST_STATE_CHANGE_READY_TO_NULL:
2787 /* close device/library if needed */
2788 if (decoder_class->close && !decoder_class->close (decoder))
2800 GST_ELEMENT_ERROR (decoder, LIBRARY, INIT, (NULL),
2801 ("Failed to open decoder"));
2802 return GST_STATE_CHANGE_FAILURE;
2807 GST_ELEMENT_ERROR (decoder, LIBRARY, INIT, (NULL),
2808 ("Failed to start decoder"));
2809 return GST_STATE_CHANGE_FAILURE;
2814 GST_ELEMENT_ERROR (decoder, LIBRARY, INIT, (NULL),
2815 ("Failed to stop decoder"));
2816 return GST_STATE_CHANGE_FAILURE;
2821 GST_ELEMENT_ERROR (decoder, LIBRARY, INIT, (NULL),
2822 ("Failed to close decoder"));
2823 return GST_STATE_CHANGE_FAILURE;
2827 static GstVideoCodecFrame *
2828 gst_video_decoder_new_frame (GstVideoDecoder * decoder)
2830 GstVideoDecoderPrivate *priv = decoder->priv;
2831 GstVideoCodecFrame *frame;
2833 frame = g_slice_new0 (GstVideoCodecFrame);
2835 frame->ref_count = 1;
2837 GST_VIDEO_DECODER_STREAM_LOCK (decoder);
2838 frame->system_frame_number = priv->system_frame_number;
2839 priv->system_frame_number++;
2840 frame->decode_frame_number = priv->decode_frame_number;
2841 priv->decode_frame_number++;
2843 frame->dts = GST_CLOCK_TIME_NONE;
2844 frame->pts = GST_CLOCK_TIME_NONE;
2845 frame->duration = GST_CLOCK_TIME_NONE;
2846 frame->events = priv->current_frame_events;
2847 priv->current_frame_events = NULL;
2849 GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
2851 GST_LOG_OBJECT (decoder, "Created new frame %p (sfn:%d)",
2852 frame, frame->system_frame_number);
2858 gst_video_decoder_push_event_list (GstVideoDecoder * decoder, GList * events)
2862 /* events are stored in reverse order */
2863 for (l = g_list_last (events); l; l = g_list_previous (l)) {
2864 GST_LOG_OBJECT (decoder, "pushing %s event", GST_EVENT_TYPE_NAME (l->data));
2865 gst_video_decoder_push_event (decoder, l->data);
2867 g_list_free (events);
2871 gst_video_decoder_prepare_finish_frame (GstVideoDecoder *
2872 decoder, GstVideoCodecFrame * frame, gboolean dropping)
2874 GstVideoDecoderPrivate *priv = decoder->priv;
2875 GList *l, *events = NULL;
2878 #ifndef GST_DISABLE_GST_DEBUG
2879 GST_LOG_OBJECT (decoder, "n %d in %" G_GSIZE_FORMAT " out %" G_GSIZE_FORMAT,
2880 priv->frames.length,
2881 gst_adapter_available (priv->input_adapter),
2882 gst_adapter_available (priv->output_adapter));
2885 sync = GST_VIDEO_CODEC_FRAME_IS_SYNC_POINT (frame);
2887 GST_LOG_OBJECT (decoder,
2888 "finish frame %p (#%d)(sub=#%d) sync:%d PTS:%" GST_TIME_FORMAT " DTS:%"
2890 frame, frame->system_frame_number, frame->abidata.ABI.num_subframes,
2891 sync, GST_TIME_ARGS (frame->pts), GST_TIME_ARGS (frame->dts));
2893 /* Push all pending events that arrived before this frame */
2894 for (l = priv->frames.head; l; l = l->next) {
2895 GstVideoCodecFrame *tmp = l->data;
2898 events = g_list_concat (tmp->events, events);
2906 if (dropping || !decoder->priv->output_state) {
2907 /* Push before the next frame that is not dropped */
2908 decoder->priv->pending_events =
2909 g_list_concat (events, decoder->priv->pending_events);
2911 gst_video_decoder_push_event_list (decoder, decoder->priv->pending_events);
2912 decoder->priv->pending_events = NULL;
2914 gst_video_decoder_push_event_list (decoder, events);
2917 /* Check if the data should not be displayed. For example altref/invisible
2918 * frame in vp8. In this case we should not update the timestamps. */
2919 if (GST_VIDEO_CODEC_FRAME_IS_DECODE_ONLY (frame))
2922 /* If the frame is meant to be output but we don't have an output_buffer
2923 * we have a problem :) */
2924 if (G_UNLIKELY ((frame->output_buffer == NULL) && !dropping))
2925 goto no_output_buffer;
2927 if (GST_CLOCK_TIME_IS_VALID (frame->pts)) {
2928 if (frame->pts != priv->base_timestamp) {
2929 GST_DEBUG_OBJECT (decoder,
2930 "sync timestamp %" GST_TIME_FORMAT " diff %" GST_STIME_FORMAT,
2931 GST_TIME_ARGS (frame->pts),
2932 GST_STIME_ARGS (GST_CLOCK_DIFF (frame->pts,
2933 decoder->output_segment.start)));
2934 priv->base_timestamp = frame->pts;
2935 priv->base_picture_number = frame->decode_frame_number;
2939 if (frame->duration == GST_CLOCK_TIME_NONE) {
2940 frame->duration = gst_video_decoder_get_frame_duration (decoder, frame);
2941 GST_LOG_OBJECT (decoder,
2942 "Guessing duration %" GST_TIME_FORMAT " for frame...",
2943 GST_TIME_ARGS (frame->duration));
2946 /* PTS is expected montone ascending,
2947 * so a good guess is lowest unsent DTS */
2949 GstClockTime min_ts = GST_CLOCK_TIME_NONE;
2950 GstVideoCodecFrame *oframe = NULL;
2951 gboolean seen_none = FALSE;
2953 /* some maintenance regardless */
2954 for (l = priv->frames.head; l; l = l->next) {
2955 GstVideoCodecFrame *tmp = l->data;
2957 if (!GST_CLOCK_TIME_IS_VALID (tmp->abidata.ABI.ts)) {
2962 if (!GST_CLOCK_TIME_IS_VALID (min_ts) || tmp->abidata.ABI.ts < min_ts) {
2963 min_ts = tmp->abidata.ABI.ts;
2967 /* save a ts if needed */
2968 if (oframe && oframe != frame) {
2969 oframe->abidata.ABI.ts = frame->abidata.ABI.ts;
2972 /* and set if needed;
2973 * valid delta means we have reasonable DTS input */
2974 /* also, if we ended up reordered, means this approach is conflicting
2975 * with some sparse existing PTS, and so it does not work out */
2976 if (!priv->reordered_output &&
2977 !GST_CLOCK_TIME_IS_VALID (frame->pts) && !seen_none &&
2978 GST_CLOCK_TIME_IS_VALID (priv->pts_delta)) {
2979 frame->pts = min_ts + priv->pts_delta;
2980 GST_DEBUG_OBJECT (decoder,
2981 "no valid PTS, using oldest DTS %" GST_TIME_FORMAT,
2982 GST_TIME_ARGS (frame->pts));
2985 /* some more maintenance, ts2 holds PTS */
2986 min_ts = GST_CLOCK_TIME_NONE;
2988 for (l = priv->frames.head; l; l = l->next) {
2989 GstVideoCodecFrame *tmp = l->data;
2991 if (!GST_CLOCK_TIME_IS_VALID (tmp->abidata.ABI.ts2)) {
2996 if (!GST_CLOCK_TIME_IS_VALID (min_ts) || tmp->abidata.ABI.ts2 < min_ts) {
2997 min_ts = tmp->abidata.ABI.ts2;
3001 /* save a ts if needed */
3002 if (oframe && oframe != frame) {
3003 oframe->abidata.ABI.ts2 = frame->abidata.ABI.ts2;
3006 /* if we detected reordered output, then PTS are void,
3007 * however those were obtained; bogus input, subclass etc */
3008 if (priv->reordered_output && !seen_none) {
3009 GST_DEBUG_OBJECT (decoder, "invalidating PTS");
3010 frame->pts = GST_CLOCK_TIME_NONE;
3013 if (!GST_CLOCK_TIME_IS_VALID (frame->pts) && !seen_none) {
3014 frame->pts = min_ts;
3015 GST_DEBUG_OBJECT (decoder,
3016 "no valid PTS, using oldest PTS %" GST_TIME_FORMAT,
3017 GST_TIME_ARGS (frame->pts));
3022 if (frame->pts == GST_CLOCK_TIME_NONE) {
3023 /* Last ditch timestamp guess: Just add the duration to the previous
3024 * frame. If it's the first frame, just use the segment start. */
3025 if (frame->duration != GST_CLOCK_TIME_NONE) {
3026 if (GST_CLOCK_TIME_IS_VALID (priv->last_timestamp_out))
3027 frame->pts = priv->last_timestamp_out + frame->duration;
3028 else if (frame->dts != GST_CLOCK_TIME_NONE) {
3029 frame->pts = frame->dts;
3030 GST_LOG_OBJECT (decoder,
3031 "Setting DTS as PTS %" GST_TIME_FORMAT " for frame...",
3032 GST_TIME_ARGS (frame->pts));
3033 } else if (decoder->output_segment.rate > 0.0)
3034 frame->pts = decoder->output_segment.start;
3035 GST_INFO_OBJECT (decoder,
3036 "Guessing PTS=%" GST_TIME_FORMAT " for frame... DTS=%"
3037 GST_TIME_FORMAT, GST_TIME_ARGS (frame->pts),
3038 GST_TIME_ARGS (frame->dts));
3039 } else if (sync && frame->dts != GST_CLOCK_TIME_NONE) {
3040 frame->pts = frame->dts;
3041 GST_LOG_OBJECT (decoder,
3042 "Setting DTS as PTS %" GST_TIME_FORMAT " for frame...",
3043 GST_TIME_ARGS (frame->pts));
3047 if (GST_CLOCK_TIME_IS_VALID (priv->last_timestamp_out)) {
3048 if (frame->pts < priv->last_timestamp_out) {
3049 GST_WARNING_OBJECT (decoder,
3050 "decreasing timestamp (%" GST_TIME_FORMAT " < %"
3051 GST_TIME_FORMAT ")",
3052 GST_TIME_ARGS (frame->pts), GST_TIME_ARGS (priv->last_timestamp_out));
3053 priv->reordered_output = TRUE;
3054 /* make it a bit less weird downstream */
3055 frame->pts = priv->last_timestamp_out;
3059 if (GST_CLOCK_TIME_IS_VALID (frame->pts))
3060 priv->last_timestamp_out = frame->pts;
3067 GST_ERROR_OBJECT (decoder, "No buffer to output !");
3072 * gst_video_decoder_release_frame:
3073 * @dec: a #GstVideoDecoder
3074 * @frame: (transfer full): the #GstVideoCodecFrame to release
3076 * Similar to gst_video_decoder_drop_frame(), but simply releases @frame
3077 * without any processing other than removing it from list of pending frames,
3078 * after which it is considered finished and released.
3083 gst_video_decoder_release_frame (GstVideoDecoder * dec,
3084 GstVideoCodecFrame * frame)
3088 /* unref once from the list */
3089 GST_VIDEO_DECODER_STREAM_LOCK (dec);
3090 link = g_queue_find (&dec->priv->frames, frame);
3092 gst_video_codec_frame_unref (frame);
3093 g_queue_delete_link (&dec->priv->frames, link);
3095 if (frame->events) {
3096 dec->priv->pending_events =
3097 g_list_concat (frame->events, dec->priv->pending_events);
3098 frame->events = NULL;
3100 GST_VIDEO_DECODER_STREAM_UNLOCK (dec);
3102 /* unref because this function takes ownership */
3103 gst_video_codec_frame_unref (frame);
3106 /* called with STREAM_LOCK */
3108 gst_video_decoder_post_qos_drop (GstVideoDecoder * dec, GstClockTime timestamp)
3110 GstClockTime stream_time, jitter, earliest_time, qostime;
3111 GstSegment *segment;
3112 GstMessage *qos_msg;
3114 dec->priv->dropped++;
3116 /* post QoS message */
3117 GST_OBJECT_LOCK (dec);
3118 proportion = dec->priv->proportion;
3119 earliest_time = dec->priv->earliest_time;
3120 GST_OBJECT_UNLOCK (dec);
3122 segment = &dec->output_segment;
3123 if (G_UNLIKELY (segment->format == GST_FORMAT_UNDEFINED))
3124 segment = &dec->input_segment;
3126 gst_segment_to_stream_time (segment, GST_FORMAT_TIME, timestamp);
3127 qostime = gst_segment_to_running_time (segment, GST_FORMAT_TIME, timestamp);
3128 jitter = GST_CLOCK_DIFF (qostime, earliest_time);
3130 gst_message_new_qos (GST_OBJECT_CAST (dec), FALSE, qostime, stream_time,
3131 timestamp, GST_CLOCK_TIME_NONE);
3132 gst_message_set_qos_values (qos_msg, jitter, proportion, 1000000);
3133 gst_message_set_qos_stats (qos_msg, GST_FORMAT_BUFFERS,
3134 dec->priv->processed, dec->priv->dropped);
3135 gst_element_post_message (GST_ELEMENT_CAST (dec), qos_msg);
3139 * gst_video_decoder_drop_frame:
3140 * @dec: a #GstVideoDecoder
3141 * @frame: (transfer full): the #GstVideoCodecFrame to drop
3143 * Similar to gst_video_decoder_finish_frame(), but drops @frame in any
3144 * case and posts a QoS message with the frame's details on the bus.
3145 * In any case, the frame is considered finished and released.
3147 * Returns: a #GstFlowReturn, usually GST_FLOW_OK.
3150 gst_video_decoder_drop_frame (GstVideoDecoder * dec, GstVideoCodecFrame * frame)
3152 GST_LOG_OBJECT (dec, "drop frame %p", frame);
3154 if (gst_video_decoder_get_subframe_mode (dec))
3155 GST_DEBUG_OBJECT (dec, "Drop subframe %d. Must be the last one.",
3156 frame->abidata.ABI.num_subframes);
3158 GST_VIDEO_DECODER_STREAM_LOCK (dec);
3160 gst_video_decoder_prepare_finish_frame (dec, frame, TRUE);
3162 GST_DEBUG_OBJECT (dec, "dropping frame %" GST_TIME_FORMAT,
3163 GST_TIME_ARGS (frame->pts));
3165 gst_video_decoder_post_qos_drop (dec, frame->pts);
3167 /* now free the frame */
3168 gst_video_decoder_release_frame (dec, frame);
3170 GST_VIDEO_DECODER_STREAM_UNLOCK (dec);
3176 * gst_video_decoder_drop_subframe:
3177 * @dec: a #GstVideoDecoder
3178 * @frame: (transfer full): the #GstVideoCodecFrame
3181 * The frame is not considered finished until the whole frame
3182 * is finished or dropped by the subclass.
3184 * Returns: a #GstFlowReturn, usually GST_FLOW_OK.
3189 gst_video_decoder_drop_subframe (GstVideoDecoder * dec,
3190 GstVideoCodecFrame * frame)
3192 g_return_val_if_fail (gst_video_decoder_get_subframe_mode (dec),
3193 GST_FLOW_NOT_SUPPORTED);
3195 GST_LOG_OBJECT (dec, "drop subframe %p num=%d", frame->input_buffer,
3196 gst_video_decoder_get_input_subframe_index (dec, frame));
3198 GST_VIDEO_DECODER_STREAM_LOCK (dec);
3200 gst_video_codec_frame_unref (frame);
3202 GST_VIDEO_DECODER_STREAM_UNLOCK (dec);
3208 gst_video_decoder_transform_meta_default (GstVideoDecoder *
3209 decoder, GstVideoCodecFrame * frame, GstMeta * meta)
3211 const GstMetaInfo *info = meta->info;
3212 const gchar *const *tags;
3213 const gchar *const supported_tags[] = {
3214 GST_META_TAG_VIDEO_STR,
3215 GST_META_TAG_VIDEO_ORIENTATION_STR,
3216 GST_META_TAG_VIDEO_SIZE_STR,
3220 tags = gst_meta_api_type_get_tags (info->api);
3226 if (!g_strv_contains (supported_tags, *tags))
3236 GstVideoDecoder *decoder;
3237 GstVideoCodecFrame *frame;
3242 foreach_metadata (GstBuffer * inbuf, GstMeta ** meta, gpointer user_data)
3244 CopyMetaData *data = user_data;
3245 GstVideoDecoder *decoder = data->decoder;
3246 GstVideoDecoderClass *klass = GST_VIDEO_DECODER_GET_CLASS (decoder);
3247 GstVideoCodecFrame *frame = data->frame;
3248 GstBuffer *buffer = data->buffer;
3249 const GstMetaInfo *info = (*meta)->info;
3250 gboolean do_copy = FALSE;
3252 if (gst_meta_api_type_has_tag (info->api, _gst_meta_tag_memory)) {
3253 /* never call the transform_meta with memory specific metadata */
3254 GST_DEBUG_OBJECT (decoder, "not copying memory specific metadata %s",
3255 g_type_name (info->api));
3257 } else if (klass->transform_meta) {
3258 do_copy = klass->transform_meta (decoder, frame, *meta);
3259 GST_DEBUG_OBJECT (decoder, "transformed metadata %s: copy: %d",
3260 g_type_name (info->api), do_copy);
3263 /* we only copy metadata when the subclass implemented a transform_meta
3264 * function and when it returns %TRUE */
3265 if (do_copy && info->transform_func) {
3266 GstMetaTransformCopy copy_data = { FALSE, 0, -1 };
3267 GST_DEBUG_OBJECT (decoder, "copy metadata %s", g_type_name (info->api));
3268 /* simply copy then */
3270 info->transform_func (buffer, *meta, inbuf, _gst_meta_transform_copy,
3277 gst_video_decoder_copy_metas (GstVideoDecoder * decoder,
3278 GstVideoCodecFrame * frame, GstBuffer * src_buffer, GstBuffer * dest_buffer)
3280 GstVideoDecoderClass *decoder_class = GST_VIDEO_DECODER_GET_CLASS (decoder);
3282 if (decoder_class->transform_meta) {
3283 if (G_LIKELY (frame)) {
3286 data.decoder = decoder;
3288 data.buffer = dest_buffer;
3289 gst_buffer_foreach_meta (src_buffer, foreach_metadata, &data);
3291 GST_WARNING_OBJECT (decoder,
3292 "Can't copy metadata because input frame disappeared");
3298 * gst_video_decoder_finish_frame:
3299 * @decoder: a #GstVideoDecoder
3300 * @frame: (transfer full): a decoded #GstVideoCodecFrame
3302 * @frame should have a valid decoded data buffer, whose metadata fields
3303 * are then appropriately set according to frame data and pushed downstream.
3304 * If no output data is provided, @frame is considered skipped.
3305 * In any case, the frame is considered finished and released.
3307 * After calling this function the output buffer of the frame is to be
3308 * considered read-only. This function will also change the metadata
3311 * Returns: a #GstFlowReturn resulting from sending data downstream
3314 gst_video_decoder_finish_frame (GstVideoDecoder * decoder,
3315 GstVideoCodecFrame * frame)
3317 GstFlowReturn ret = GST_FLOW_OK;
3318 GstVideoDecoderPrivate *priv = decoder->priv;
3319 GstBuffer *output_buffer;
3320 gboolean needs_reconfigure = FALSE;
3322 GST_LOG_OBJECT (decoder, "finish frame %p", frame);
3324 GST_VIDEO_DECODER_STREAM_LOCK (decoder);
3326 needs_reconfigure = gst_pad_check_reconfigure (decoder->srcpad);
3327 if (G_UNLIKELY (priv->output_state_changed || (priv->output_state
3328 && needs_reconfigure))) {
3329 if (!gst_video_decoder_negotiate_unlocked (decoder)) {
3330 gst_pad_mark_reconfigure (decoder->srcpad);
3331 if (GST_PAD_IS_FLUSHING (decoder->srcpad))
3332 ret = GST_FLOW_FLUSHING;
3334 ret = GST_FLOW_NOT_NEGOTIATED;
3339 gst_video_decoder_prepare_finish_frame (decoder, frame, FALSE);
3342 if (priv->tags_changed) {
3343 GstEvent *tags_event;
3345 tags_event = gst_video_decoder_create_merged_tags_event (decoder);
3347 if (tags_event != NULL)
3348 gst_video_decoder_push_event (decoder, tags_event);
3350 priv->tags_changed = FALSE;
3353 /* no buffer data means this frame is skipped */
3354 if (!frame->output_buffer || GST_VIDEO_CODEC_FRAME_IS_DECODE_ONLY (frame)) {
3355 GST_DEBUG_OBJECT (decoder,
3356 "skipping frame %" GST_TIME_FORMAT " because not output was produced",
3357 GST_TIME_ARGS (frame->pts));
3361 /* Mark output as corrupted if the subclass requested so and we're either
3362 * still before the sync point after the request, or we don't even know the
3363 * frame number of the sync point yet (it is 0) */
3364 GST_OBJECT_LOCK (decoder);
3365 if (frame->system_frame_number <= priv->request_sync_point_frame_number
3366 && priv->request_sync_point_frame_number != REQUEST_SYNC_POINT_UNSET) {
3367 if (priv->request_sync_point_flags &
3368 GST_VIDEO_DECODER_REQUEST_SYNC_POINT_CORRUPT_OUTPUT) {
3369 GST_DEBUG_OBJECT (decoder,
3370 "marking frame %" GST_TIME_FORMAT
3371 " as corrupted because it is still before the sync point",
3372 GST_TIME_ARGS (frame->pts));
3373 GST_VIDEO_CODEC_FRAME_FLAG_SET (frame,
3374 GST_VIDEO_CODEC_FRAME_FLAG_CORRUPTED);
3377 /* Reset to -1 to mark it as unset now that we've reached the frame */
3378 priv->request_sync_point_frame_number = REQUEST_SYNC_POINT_UNSET;
3380 GST_OBJECT_UNLOCK (decoder);
3382 if (priv->discard_corrupted_frames
3383 && (GST_VIDEO_CODEC_FRAME_FLAG_IS_SET (frame,
3384 GST_VIDEO_CODEC_FRAME_FLAG_CORRUPTED)
3385 || GST_BUFFER_FLAG_IS_SET (frame->output_buffer,
3386 GST_BUFFER_FLAG_CORRUPTED))) {
3387 GST_DEBUG_OBJECT (decoder,
3388 "skipping frame %" GST_TIME_FORMAT " because it is corrupted",
3389 GST_TIME_ARGS (frame->pts));
3393 /* We need a writable buffer for the metadata changes below */
3394 output_buffer = frame->output_buffer =
3395 gst_buffer_make_writable (frame->output_buffer);
3397 GST_BUFFER_FLAG_UNSET (output_buffer, GST_BUFFER_FLAG_DELTA_UNIT);
3399 GST_BUFFER_PTS (output_buffer) = frame->pts;
3400 GST_BUFFER_DTS (output_buffer) = GST_CLOCK_TIME_NONE;
3401 GST_BUFFER_DURATION (output_buffer) = frame->duration;
3403 GST_BUFFER_OFFSET (output_buffer) = GST_BUFFER_OFFSET_NONE;
3404 GST_BUFFER_OFFSET_END (output_buffer) = GST_BUFFER_OFFSET_NONE;
3406 if (priv->discont) {
3407 GST_BUFFER_FLAG_SET (output_buffer, GST_BUFFER_FLAG_DISCONT);
3410 if (GST_VIDEO_CODEC_FRAME_FLAG_IS_SET (frame,
3411 GST_VIDEO_CODEC_FRAME_FLAG_CORRUPTED)) {
3412 GST_DEBUG_OBJECT (decoder,
3413 "marking frame %" GST_TIME_FORMAT " as corrupted",
3414 GST_TIME_ARGS (frame->pts));
3415 GST_BUFFER_FLAG_SET (output_buffer, GST_BUFFER_FLAG_CORRUPTED);
3418 gst_video_decoder_copy_metas (decoder, frame, frame->input_buffer,
3419 frame->output_buffer);
3421 /* Get an additional ref to the buffer, which is going to be pushed
3422 * downstream, the original ref is owned by the frame
3424 output_buffer = gst_buffer_ref (output_buffer);
3426 /* Release frame so the buffer is writable when we push it downstream
3427 * if possible, i.e. if the subclass does not hold additional references
3430 gst_video_decoder_release_frame (decoder, frame);
3433 if (decoder->output_segment.rate < 0.0
3434 && !(decoder->output_segment.flags & GST_SEEK_FLAG_TRICKMODE_KEY_UNITS)) {
3435 GST_LOG_OBJECT (decoder, "queued frame");
3436 priv->output_queued = g_list_prepend (priv->output_queued, output_buffer);
3438 ret = gst_video_decoder_clip_and_push_buf (decoder, output_buffer);
3443 gst_video_decoder_release_frame (decoder, frame);
3444 GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
3449 * gst_video_decoder_finish_subframe:
3450 * @decoder: a #GstVideoDecoder
3451 * @frame: (transfer full): the #GstVideoCodecFrame
3453 * Indicate that a subframe has been finished to be decoded
3454 * by the subclass. This method should be called for all subframes
3455 * except the last subframe where @gst_video_decoder_finish_frame
3456 * should be called instead.
3458 * Returns: a #GstFlowReturn, usually GST_FLOW_OK.
3463 gst_video_decoder_finish_subframe (GstVideoDecoder * decoder,
3464 GstVideoCodecFrame * frame)
3466 g_return_val_if_fail (gst_video_decoder_get_subframe_mode (decoder),
3467 GST_FLOW_NOT_SUPPORTED);
3469 GST_LOG_OBJECT (decoder, "finish subframe %p num=%d", frame->input_buffer,
3470 gst_video_decoder_get_input_subframe_index (decoder, frame));
3472 GST_VIDEO_DECODER_STREAM_LOCK (decoder);
3473 frame->abidata.ABI.subframes_processed++;
3474 gst_video_codec_frame_unref (frame);
3476 GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
3481 /* With stream lock, takes the frame reference */
3482 static GstFlowReturn
3483 gst_video_decoder_clip_and_push_buf (GstVideoDecoder * decoder, GstBuffer * buf)
3485 GstFlowReturn ret = GST_FLOW_OK;
3486 GstVideoDecoderPrivate *priv = decoder->priv;
3487 guint64 start, stop;
3488 guint64 cstart, cstop;
3489 GstSegment *segment;
3490 GstClockTime duration;
3492 /* Check for clipping */
3493 start = GST_BUFFER_PTS (buf);
3494 duration = GST_BUFFER_DURATION (buf);
3496 /* store that we have valid decoded data */
3497 priv->had_output_data = TRUE;
3499 stop = GST_CLOCK_TIME_NONE;
3501 if (GST_CLOCK_TIME_IS_VALID (start) && GST_CLOCK_TIME_IS_VALID (duration)) {
3502 stop = start + duration;
3503 } else if (GST_CLOCK_TIME_IS_VALID (start)
3504 && !GST_CLOCK_TIME_IS_VALID (duration)) {
3505 /* If we don't clip away buffers that far before the segment we
3506 * can cause the pipeline to lockup. This can happen if audio is
3507 * properly clipped, and thus the audio sink does not preroll yet
3508 * but the video sink prerolls because we already outputted a
3509 * buffer here... and then queues run full.
3511 * In the worst case we will clip one buffer too many here now if no
3512 * framerate is given, no buffer duration is given and the actual
3513 * framerate is lower than 25fps */
3514 stop = start + 40 * GST_MSECOND;
3517 segment = &decoder->output_segment;
3518 if (gst_segment_clip (segment, GST_FORMAT_TIME, start, stop, &cstart, &cstop)) {
3519 GST_BUFFER_PTS (buf) = cstart;
3521 if (stop != GST_CLOCK_TIME_NONE && GST_CLOCK_TIME_IS_VALID (duration))
3522 GST_BUFFER_DURATION (buf) = cstop - cstart;
3524 GST_LOG_OBJECT (decoder,
3525 "accepting buffer inside segment: %" GST_TIME_FORMAT " %"
3526 GST_TIME_FORMAT " seg %" GST_TIME_FORMAT " to %" GST_TIME_FORMAT
3527 " time %" GST_TIME_FORMAT,
3528 GST_TIME_ARGS (cstart),
3529 GST_TIME_ARGS (cstop),
3530 GST_TIME_ARGS (segment->start), GST_TIME_ARGS (segment->stop),
3531 GST_TIME_ARGS (segment->time));
3533 GST_LOG_OBJECT (decoder,
3534 "dropping buffer outside segment: %" GST_TIME_FORMAT
3535 " %" GST_TIME_FORMAT
3536 " seg %" GST_TIME_FORMAT " to %" GST_TIME_FORMAT
3537 " time %" GST_TIME_FORMAT,
3538 GST_TIME_ARGS (start), GST_TIME_ARGS (stop),
3539 GST_TIME_ARGS (segment->start),
3540 GST_TIME_ARGS (segment->stop), GST_TIME_ARGS (segment->time));
3541 /* only check and return EOS if upstream still
3542 * in the same segment and interested as such */
3543 if (decoder->priv->in_out_segment_sync) {
3544 if (segment->rate >= 0) {
3545 if (GST_BUFFER_PTS (buf) >= segment->stop)
3547 } else if (GST_BUFFER_PTS (buf) < segment->start) {
3551 gst_buffer_unref (buf);
3555 /* Is buffer too late (QoS) ? */
3556 if (priv->do_qos && GST_CLOCK_TIME_IS_VALID (priv->earliest_time)
3557 && GST_CLOCK_TIME_IS_VALID (cstart)) {
3558 GstClockTime deadline =
3559 gst_segment_to_running_time (segment, GST_FORMAT_TIME, cstart);
3560 if (GST_CLOCK_TIME_IS_VALID (deadline) && deadline < priv->earliest_time) {
3561 GST_WARNING_OBJECT (decoder,
3562 "Dropping frame due to QoS. start:%" GST_TIME_FORMAT " deadline:%"
3563 GST_TIME_FORMAT " earliest_time:%" GST_TIME_FORMAT,
3564 GST_TIME_ARGS (start), GST_TIME_ARGS (deadline),
3565 GST_TIME_ARGS (priv->earliest_time));
3566 gst_video_decoder_post_qos_drop (decoder, cstart);
3567 gst_buffer_unref (buf);
3568 priv->discont = TRUE;
3573 /* Set DISCONT flag here ! */
3575 if (priv->discont) {
3576 GST_DEBUG_OBJECT (decoder, "Setting discont on output buffer");
3577 GST_BUFFER_FLAG_SET (buf, GST_BUFFER_FLAG_DISCONT);
3578 priv->discont = FALSE;
3581 /* update rate estimate */
3582 GST_OBJECT_LOCK (decoder);
3583 priv->bytes_out += gst_buffer_get_size (buf);
3584 if (GST_CLOCK_TIME_IS_VALID (duration)) {
3585 priv->time += duration;
3587 /* FIXME : Use difference between current and previous outgoing
3588 * timestamp, and relate to difference between current and previous
3590 /* better none than nothing valid */
3591 priv->time = GST_CLOCK_TIME_NONE;
3593 GST_OBJECT_UNLOCK (decoder);
3595 GST_DEBUG_OBJECT (decoder, "pushing buffer %p of size %" G_GSIZE_FORMAT ", "
3596 "PTS %" GST_TIME_FORMAT ", dur %" GST_TIME_FORMAT, buf,
3597 gst_buffer_get_size (buf),
3598 GST_TIME_ARGS (GST_BUFFER_PTS (buf)),
3599 GST_TIME_ARGS (GST_BUFFER_DURATION (buf)));
3601 /* we got data, so note things are looking up again, reduce
3602 * the error count, if there is one */
3603 if (G_UNLIKELY (priv->error_count))
3604 priv->error_count = 0;
3606 #ifndef GST_DISABLE_DEBUG
3607 if (G_UNLIKELY (priv->last_reset_time != GST_CLOCK_TIME_NONE)) {
3608 GstClockTime elapsed = gst_util_get_timestamp () - priv->last_reset_time;
3610 /* First buffer since reset, report how long we took */
3611 GST_INFO_OBJECT (decoder, "First buffer since flush took %" GST_TIME_FORMAT
3612 " to produce", GST_TIME_ARGS (elapsed));
3613 priv->last_reset_time = GST_CLOCK_TIME_NONE;
3617 /* release STREAM_LOCK not to block upstream
3618 * while pushing buffer downstream */
3619 GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
3620 ret = gst_pad_push (decoder->srcpad, buf);
3621 GST_VIDEO_DECODER_STREAM_LOCK (decoder);
3628 * gst_video_decoder_add_to_frame:
3629 * @decoder: a #GstVideoDecoder
3630 * @n_bytes: the number of bytes to add
3632 * Removes next @n_bytes of input data and adds it to currently parsed frame.
3635 gst_video_decoder_add_to_frame (GstVideoDecoder * decoder, int n_bytes)
3637 GstVideoDecoderPrivate *priv = decoder->priv;
3640 GST_LOG_OBJECT (decoder, "add %d bytes to frame", n_bytes);
3645 GST_VIDEO_DECODER_STREAM_LOCK (decoder);
3646 if (gst_adapter_available (priv->output_adapter) == 0) {
3647 priv->frame_offset =
3648 priv->input_offset - gst_adapter_available (priv->input_adapter);
3650 buf = gst_adapter_take_buffer (priv->input_adapter, n_bytes);
3652 gst_adapter_push (priv->output_adapter, buf);
3653 GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
3657 * gst_video_decoder_get_pending_frame_size:
3658 * @decoder: a #GstVideoDecoder
3660 * Returns the number of bytes previously added to the current frame
3661 * by calling gst_video_decoder_add_to_frame().
3663 * Returns: The number of bytes pending for the current frame
3668 gst_video_decoder_get_pending_frame_size (GstVideoDecoder * decoder)
3670 GstVideoDecoderPrivate *priv = decoder->priv;
3673 GST_VIDEO_DECODER_STREAM_LOCK (decoder);
3674 ret = gst_adapter_available (priv->output_adapter);
3675 GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
3677 GST_LOG_OBJECT (decoder, "Current pending frame has %" G_GSIZE_FORMAT "bytes",
3684 gst_video_decoder_get_frame_duration (GstVideoDecoder * decoder,
3685 GstVideoCodecFrame * frame)
3687 GstVideoCodecState *state = decoder->priv->output_state;
3689 /* it's possible that we don't have a state yet when we are dropping the
3690 * initial buffers */
3692 return GST_CLOCK_TIME_NONE;
3694 if (state->info.fps_d == 0 || state->info.fps_n == 0) {
3695 return GST_CLOCK_TIME_NONE;
3698 /* FIXME: For interlaced frames this needs to take into account
3699 * the number of valid fields in the frame
3702 return gst_util_uint64_scale (GST_SECOND, state->info.fps_d,
3707 * gst_video_decoder_have_frame:
3708 * @decoder: a #GstVideoDecoder
3710 * Gathers all data collected for currently parsed frame, gathers corresponding
3711 * metadata and passes it along for further processing, i.e. @handle_frame.
3713 * Returns: a #GstFlowReturn
3716 gst_video_decoder_have_frame (GstVideoDecoder * decoder)
3718 GstVideoDecoderPrivate *priv = decoder->priv;
3721 GstClockTime pts, dts, duration;
3723 GstFlowReturn ret = GST_FLOW_OK;
3725 GST_LOG_OBJECT (decoder, "have_frame at offset %" G_GUINT64_FORMAT,
3726 priv->frame_offset);
3728 GST_VIDEO_DECODER_STREAM_LOCK (decoder);
3730 n_available = gst_adapter_available (priv->output_adapter);
3732 buffer = gst_adapter_take_buffer (priv->output_adapter, n_available);
3734 buffer = gst_buffer_new_and_alloc (0);
3737 if (priv->current_frame->input_buffer) {
3738 gst_video_decoder_copy_metas (decoder, priv->current_frame,
3739 priv->current_frame->input_buffer, buffer);
3740 gst_buffer_unref (priv->current_frame->input_buffer);
3742 priv->current_frame->input_buffer = buffer;
3744 gst_video_decoder_get_buffer_info_at_offset (decoder,
3745 priv->frame_offset, &pts, &dts, &duration, &flags);
3747 GST_BUFFER_PTS (buffer) = pts;
3748 GST_BUFFER_DTS (buffer) = dts;
3749 GST_BUFFER_DURATION (buffer) = duration;
3750 GST_BUFFER_FLAGS (buffer) = flags;
3752 GST_LOG_OBJECT (decoder, "collected frame size %d, "
3753 "PTS %" GST_TIME_FORMAT ", DTS %" GST_TIME_FORMAT ", dur %"
3754 GST_TIME_FORMAT, n_available, GST_TIME_ARGS (pts), GST_TIME_ARGS (dts),
3755 GST_TIME_ARGS (duration));
3757 if (!GST_BUFFER_FLAG_IS_SET (buffer, GST_BUFFER_FLAG_DELTA_UNIT)) {
3758 GST_DEBUG_OBJECT (decoder, "Marking as sync point");
3759 GST_VIDEO_CODEC_FRAME_SET_SYNC_POINT (priv->current_frame);
3762 if (GST_BUFFER_FLAG_IS_SET (buffer, GST_BUFFER_FLAG_CORRUPTED)) {
3763 GST_DEBUG_OBJECT (decoder, "Marking as corrupted");
3764 GST_VIDEO_CODEC_FRAME_FLAG_SET (priv->current_frame,
3765 GST_VIDEO_CODEC_FRAME_FLAG_CORRUPTED);
3768 /* In reverse playback, just capture and queue frames for later processing */
3769 if (decoder->input_segment.rate < 0.0) {
3770 priv->parse_gather =
3771 g_list_prepend (priv->parse_gather, priv->current_frame);
3772 priv->current_frame = NULL;
3774 GstVideoCodecFrame *frame = priv->current_frame;
3775 frame->abidata.ABI.num_subframes++;
3776 /* In subframe mode, we keep a ref for ourselves
3777 * as this frame will be kept during the data collection
3778 * in parsed mode. The frame reference will be released by
3779 * finish_(sub)frame or drop_(sub)frame.*/
3780 if (gst_video_decoder_get_subframe_mode (decoder))
3781 gst_video_codec_frame_ref (priv->current_frame);
3783 priv->current_frame = NULL;
3785 /* Decode the frame, which gives away our ref */
3786 ret = gst_video_decoder_decode_frame (decoder, frame);
3789 GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
3794 /* Pass the frame in priv->current_frame through the
3795 * handle_frame() callback for decoding and passing to gvd_finish_frame(),
3796 * or dropping by passing to gvd_drop_frame() */
3797 static GstFlowReturn
3798 gst_video_decoder_decode_frame (GstVideoDecoder * decoder,
3799 GstVideoCodecFrame * frame)
3801 GstVideoDecoderPrivate *priv = decoder->priv;
3802 GstVideoDecoderClass *decoder_class;
3803 GstFlowReturn ret = GST_FLOW_OK;
3805 decoder_class = GST_VIDEO_DECODER_GET_CLASS (decoder);
3807 /* FIXME : This should only have to be checked once (either the subclass has an
3808 * implementation, or it doesn't) */
3809 g_return_val_if_fail (decoder_class->handle_frame != NULL, GST_FLOW_ERROR);
3810 g_return_val_if_fail (frame != NULL, GST_FLOW_ERROR);
3812 frame->pts = GST_BUFFER_PTS (frame->input_buffer);
3813 frame->dts = GST_BUFFER_DTS (frame->input_buffer);
3814 frame->duration = GST_BUFFER_DURATION (frame->input_buffer);
3816 /* For keyframes, PTS = DTS + constant_offset, usually 0 to 3 frame
3818 /* FIXME upstream can be quite wrong about the keyframe aspect,
3819 * so we could be going off here as well,
3820 * maybe let subclass decide if it really is/was a keyframe */
3821 if (GST_VIDEO_CODEC_FRAME_IS_SYNC_POINT (frame)) {
3822 priv->distance_from_sync = 0;
3824 GST_OBJECT_LOCK (decoder);
3825 priv->request_sync_point_flags &=
3826 ~GST_VIDEO_DECODER_REQUEST_SYNC_POINT_DISCARD_INPUT;
3827 if (priv->request_sync_point_frame_number == REQUEST_SYNC_POINT_PENDING)
3828 priv->request_sync_point_frame_number = frame->system_frame_number;
3829 GST_OBJECT_UNLOCK (decoder);
3831 if (GST_CLOCK_TIME_IS_VALID (frame->pts)
3832 && GST_CLOCK_TIME_IS_VALID (frame->dts)) {
3833 /* just in case they are not equal as might ideally be,
3834 * e.g. quicktime has a (positive) delta approach */
3835 priv->pts_delta = frame->pts - frame->dts;
3836 GST_DEBUG_OBJECT (decoder, "PTS delta %d ms",
3837 (gint) (priv->pts_delta / GST_MSECOND));
3840 GST_OBJECT_LOCK (decoder);
3841 if ((priv->needs_sync_point && priv->distance_from_sync == -1)
3842 || (priv->request_sync_point_flags &
3843 GST_VIDEO_DECODER_REQUEST_SYNC_POINT_DISCARD_INPUT)) {
3844 GST_WARNING_OBJECT (decoder,
3845 "Subclass requires a sync point but we didn't receive one yet, discarding input");
3846 GST_OBJECT_UNLOCK (decoder);
3847 gst_video_decoder_release_frame (decoder, frame);
3850 GST_OBJECT_UNLOCK (decoder);
3852 priv->distance_from_sync++;
3855 frame->distance_from_sync = priv->distance_from_sync;
3857 if (frame->abidata.ABI.num_subframes == 1) {
3858 frame->abidata.ABI.ts = frame->dts;
3859 frame->abidata.ABI.ts2 = frame->pts;
3862 GST_LOG_OBJECT (decoder,
3863 "frame %p PTS %" GST_TIME_FORMAT ", DTS %" GST_TIME_FORMAT ", dist %d",
3864 frame, GST_TIME_ARGS (frame->pts), GST_TIME_ARGS (frame->dts),
3865 frame->distance_from_sync);
3866 /* FIXME: suboptimal way to add a unique frame to the list, in case of subframe mode. */
3867 if (!g_queue_find (&priv->frames, frame)) {
3868 g_queue_push_tail (&priv->frames, gst_video_codec_frame_ref (frame));
3870 GST_LOG_OBJECT (decoder,
3871 "Do not add an existing frame used to decode subframes");
3874 if (priv->frames.length > 10) {
3875 GST_DEBUG_OBJECT (decoder, "decoder frame list getting long: %d frames,"
3876 "possible internal leaking?", priv->frames.length);
3880 gst_segment_to_running_time (&decoder->input_segment, GST_FORMAT_TIME,
3883 /* do something with frame */
3884 ret = decoder_class->handle_frame (decoder, frame);
3885 if (ret != GST_FLOW_OK)
3886 GST_DEBUG_OBJECT (decoder, "flow error %s", gst_flow_get_name (ret));
3888 /* the frame has either been added to parse_gather or sent to
3889 handle frame so there is no need to unref it */
3895 * gst_video_decoder_get_output_state:
3896 * @decoder: a #GstVideoDecoder
3898 * Get the #GstVideoCodecState currently describing the output stream.
3900 * Returns: (transfer full): #GstVideoCodecState describing format of video data.
3902 GstVideoCodecState *
3903 gst_video_decoder_get_output_state (GstVideoDecoder * decoder)
3905 GstVideoCodecState *state = NULL;
3907 GST_OBJECT_LOCK (decoder);
3908 if (decoder->priv->output_state)
3909 state = gst_video_codec_state_ref (decoder->priv->output_state);
3910 GST_OBJECT_UNLOCK (decoder);
3915 static GstVideoCodecState *
3916 _set_interlaced_output_state (GstVideoDecoder * decoder,
3917 GstVideoFormat fmt, GstVideoInterlaceMode interlace_mode, guint width,
3918 guint height, GstVideoCodecState * reference, gboolean copy_interlace_mode)
3920 GstVideoDecoderPrivate *priv = decoder->priv;
3921 GstVideoCodecState *state;
3923 g_assert ((copy_interlace_mode
3924 && interlace_mode == GST_VIDEO_INTERLACE_MODE_PROGRESSIVE)
3925 || !copy_interlace_mode);
3927 GST_DEBUG_OBJECT (decoder,
3928 "fmt:%d, width:%d, height:%d, interlace-mode: %s, reference:%p", fmt,
3929 width, height, gst_video_interlace_mode_to_string (interlace_mode),
3932 /* Create the new output state */
3934 _new_output_state (fmt, interlace_mode, width, height, reference,
3935 copy_interlace_mode);
3939 GST_VIDEO_DECODER_STREAM_LOCK (decoder);
3941 GST_OBJECT_LOCK (decoder);
3942 /* Replace existing output state by new one */
3943 if (priv->output_state)
3944 gst_video_codec_state_unref (priv->output_state);
3945 priv->output_state = gst_video_codec_state_ref (state);
3947 if (priv->output_state != NULL && priv->output_state->info.fps_n > 0) {
3948 priv->qos_frame_duration =
3949 gst_util_uint64_scale (GST_SECOND, priv->output_state->info.fps_d,
3950 priv->output_state->info.fps_n);
3952 priv->qos_frame_duration = 0;
3954 priv->output_state_changed = TRUE;
3955 GST_OBJECT_UNLOCK (decoder);
3957 GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
3963 * gst_video_decoder_set_output_state:
3964 * @decoder: a #GstVideoDecoder
3965 * @fmt: a #GstVideoFormat
3966 * @width: The width in pixels
3967 * @height: The height in pixels
3968 * @reference: (allow-none) (transfer none): An optional reference #GstVideoCodecState
3970 * Creates a new #GstVideoCodecState with the specified @fmt, @width and @height
3971 * as the output state for the decoder.
3972 * Any previously set output state on @decoder will be replaced by the newly
3975 * If the subclass wishes to copy over existing fields (like pixel aspec ratio,
3976 * or framerate) from an existing #GstVideoCodecState, it can be provided as a
3979 * If the subclass wishes to override some fields from the output state (like
3980 * pixel-aspect-ratio or framerate) it can do so on the returned #GstVideoCodecState.
3982 * The new output state will only take effect (set on pads and buffers) starting
3983 * from the next call to #gst_video_decoder_finish_frame().
3985 * Returns: (transfer full): the newly configured output state.
3987 GstVideoCodecState *
3988 gst_video_decoder_set_output_state (GstVideoDecoder * decoder,
3989 GstVideoFormat fmt, guint width, guint height,
3990 GstVideoCodecState * reference)
3992 return _set_interlaced_output_state (decoder, fmt,
3993 GST_VIDEO_INTERLACE_MODE_PROGRESSIVE, width, height, reference, TRUE);
3997 * gst_video_decoder_set_interlaced_output_state:
3998 * @decoder: a #GstVideoDecoder
3999 * @fmt: a #GstVideoFormat
4000 * @width: The width in pixels
4001 * @height: The height in pixels
4002 * @interlace_mode: A #GstVideoInterlaceMode
4003 * @reference: (allow-none) (transfer none): An optional reference #GstVideoCodecState
4005 * Same as #gst_video_decoder_set_output_state() but also allows you to also set
4006 * the interlacing mode.
4008 * Returns: (transfer full): the newly configured output state.
4012 GstVideoCodecState *
4013 gst_video_decoder_set_interlaced_output_state (GstVideoDecoder * decoder,
4014 GstVideoFormat fmt, GstVideoInterlaceMode interlace_mode, guint width,
4015 guint height, GstVideoCodecState * reference)
4017 return _set_interlaced_output_state (decoder, fmt, interlace_mode, width,
4018 height, reference, FALSE);
4023 * gst_video_decoder_get_oldest_frame:
4024 * @decoder: a #GstVideoDecoder
4026 * Get the oldest pending unfinished #GstVideoCodecFrame
4028 * Returns: (transfer full): oldest pending unfinished #GstVideoCodecFrame.
4030 GstVideoCodecFrame *
4031 gst_video_decoder_get_oldest_frame (GstVideoDecoder * decoder)
4033 GstVideoCodecFrame *frame = NULL;
4035 GST_VIDEO_DECODER_STREAM_LOCK (decoder);
4036 if (decoder->priv->frames.head)
4037 frame = gst_video_codec_frame_ref (decoder->priv->frames.head->data);
4038 GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
4040 return (GstVideoCodecFrame *) frame;
4044 * gst_video_decoder_get_frame:
4045 * @decoder: a #GstVideoDecoder
4046 * @frame_number: system_frame_number of a frame
4048 * Get a pending unfinished #GstVideoCodecFrame
4050 * Returns: (transfer full): pending unfinished #GstVideoCodecFrame identified by @frame_number.
4052 GstVideoCodecFrame *
4053 gst_video_decoder_get_frame (GstVideoDecoder * decoder, int frame_number)
4056 GstVideoCodecFrame *frame = NULL;
4058 GST_DEBUG_OBJECT (decoder, "frame_number : %d", frame_number);
4060 GST_VIDEO_DECODER_STREAM_LOCK (decoder);
4061 for (g = decoder->priv->frames.head; g; g = g->next) {
4062 GstVideoCodecFrame *tmp = g->data;
4064 if (tmp->system_frame_number == frame_number) {
4065 frame = gst_video_codec_frame_ref (tmp);
4069 GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
4075 * gst_video_decoder_get_frames:
4076 * @decoder: a #GstVideoDecoder
4078 * Get all pending unfinished #GstVideoCodecFrame
4080 * Returns: (transfer full) (element-type GstVideoCodecFrame): pending unfinished #GstVideoCodecFrame.
4083 gst_video_decoder_get_frames (GstVideoDecoder * decoder)
4087 GST_VIDEO_DECODER_STREAM_LOCK (decoder);
4089 g_list_copy_deep (decoder->priv->frames.head,
4090 (GCopyFunc) gst_video_codec_frame_ref, NULL);
4091 GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
4097 gst_video_decoder_decide_allocation_default (GstVideoDecoder * decoder,
4100 GstCaps *outcaps = NULL;
4101 GstBufferPool *pool = NULL;
4102 guint size, min, max;
4103 GstAllocator *allocator = NULL;
4104 GstAllocationParams params;
4105 GstStructure *config;
4106 gboolean update_pool, update_allocator;
4109 gst_query_parse_allocation (query, &outcaps, NULL);
4110 gst_video_info_init (&vinfo);
4112 gst_video_info_from_caps (&vinfo, outcaps);
4114 /* we got configuration from our peer or the decide_allocation method,
4116 if (gst_query_get_n_allocation_params (query) > 0) {
4117 /* try the allocator */
4118 gst_query_parse_nth_allocation_param (query, 0, &allocator, ¶ms);
4119 update_allocator = TRUE;
4122 gst_allocation_params_init (¶ms);
4123 update_allocator = FALSE;
4126 if (gst_query_get_n_allocation_pools (query) > 0) {
4127 gst_query_parse_nth_allocation_pool (query, 0, &pool, &size, &min, &max);
4128 size = MAX (size, vinfo.size);
4135 update_pool = FALSE;
4139 /* no pool, we can make our own */
4140 GST_DEBUG_OBJECT (decoder, "no pool, making new pool");
4141 pool = gst_video_buffer_pool_new ();
4145 config = gst_buffer_pool_get_config (pool);
4146 gst_buffer_pool_config_set_params (config, outcaps, size, min, max);
4147 gst_buffer_pool_config_set_allocator (config, allocator, ¶ms);
4149 GST_DEBUG_OBJECT (decoder,
4150 "setting config %" GST_PTR_FORMAT " in pool %" GST_PTR_FORMAT, config,
4152 if (!gst_buffer_pool_set_config (pool, config)) {
4153 config = gst_buffer_pool_get_config (pool);
4155 /* If change are not acceptable, fallback to generic pool */
4156 if (!gst_buffer_pool_config_validate_params (config, outcaps, size, min,
4158 GST_DEBUG_OBJECT (decoder, "unsupported pool, making new pool");
4160 gst_object_unref (pool);
4161 pool = gst_video_buffer_pool_new ();
4162 gst_buffer_pool_config_set_params (config, outcaps, size, min, max);
4163 gst_buffer_pool_config_set_allocator (config, allocator, ¶ms);
4166 if (!gst_buffer_pool_set_config (pool, config))
4170 if (update_allocator)
4171 gst_query_set_nth_allocation_param (query, 0, allocator, ¶ms);
4173 gst_query_add_allocation_param (query, allocator, ¶ms);
4175 gst_object_unref (allocator);
4178 gst_query_set_nth_allocation_pool (query, 0, pool, size, min, max);
4180 gst_query_add_allocation_pool (query, pool, size, min, max);
4183 gst_object_unref (pool);
4189 gst_object_unref (allocator);
4191 gst_object_unref (pool);
4192 GST_ELEMENT_ERROR (decoder, RESOURCE, SETTINGS,
4193 ("Failed to configure the buffer pool"),
4194 ("Configuration is most likely invalid, please report this issue."));
4199 gst_video_decoder_propose_allocation_default (GstVideoDecoder * decoder,
4206 gst_video_decoder_negotiate_pool (GstVideoDecoder * decoder, GstCaps * caps)
4208 GstVideoDecoderClass *klass;
4209 GstQuery *query = NULL;
4210 GstBufferPool *pool = NULL;
4211 GstAllocator *allocator;
4212 GstAllocationParams params;
4213 gboolean ret = TRUE;
4215 klass = GST_VIDEO_DECODER_GET_CLASS (decoder);
4217 query = gst_query_new_allocation (caps, TRUE);
4219 GST_DEBUG_OBJECT (decoder, "do query ALLOCATION");
4221 if (!gst_pad_peer_query (decoder->srcpad, query)) {
4222 GST_DEBUG_OBJECT (decoder, "didn't get downstream ALLOCATION hints");
4225 g_assert (klass->decide_allocation != NULL);
4226 ret = klass->decide_allocation (decoder, query);
4228 GST_DEBUG_OBJECT (decoder, "ALLOCATION (%d) params: %" GST_PTR_FORMAT, ret,
4232 goto no_decide_allocation;
4234 /* we got configuration from our peer or the decide_allocation method,
4236 if (gst_query_get_n_allocation_params (query) > 0) {
4237 gst_query_parse_nth_allocation_param (query, 0, &allocator, ¶ms);
4240 gst_allocation_params_init (¶ms);
4243 if (gst_query_get_n_allocation_pools (query) > 0)
4244 gst_query_parse_nth_allocation_pool (query, 0, &pool, NULL, NULL, NULL);
4247 gst_object_unref (allocator);
4249 goto no_decide_allocation;
4252 if (decoder->priv->allocator)
4253 gst_object_unref (decoder->priv->allocator);
4254 decoder->priv->allocator = allocator;
4255 decoder->priv->params = params;
4257 if (decoder->priv->pool) {
4258 /* do not set the bufferpool to inactive here, it will be done
4259 * on its finalize function. As videodecoder do late renegotiation
4260 * it might happen that some element downstream is already using this
4261 * same bufferpool and deactivating it will make it fail.
4262 * Happens when a downstream element changes from passthrough to
4263 * non-passthrough and gets this same bufferpool to use */
4264 GST_DEBUG_OBJECT (decoder, "unref pool %" GST_PTR_FORMAT,
4265 decoder->priv->pool);
4266 gst_object_unref (decoder->priv->pool);
4268 decoder->priv->pool = pool;
4271 GST_DEBUG_OBJECT (decoder, "activate pool %" GST_PTR_FORMAT, pool);
4272 gst_buffer_pool_set_active (pool, TRUE);
4276 gst_query_unref (query);
4281 no_decide_allocation:
4283 GST_WARNING_OBJECT (decoder, "Subclass failed to decide allocation");
4289 gst_video_decoder_negotiate_default (GstVideoDecoder * decoder)
4291 GstVideoCodecState *state = decoder->priv->output_state;
4292 gboolean ret = TRUE;
4293 GstVideoCodecFrame *frame;
4298 GST_DEBUG_OBJECT (decoder,
4299 "Trying to negotiate the pool with out setting the o/p format");
4300 ret = gst_video_decoder_negotiate_pool (decoder, NULL);
4304 g_return_val_if_fail (GST_VIDEO_INFO_WIDTH (&state->info) != 0, FALSE);
4305 g_return_val_if_fail (GST_VIDEO_INFO_HEIGHT (&state->info) != 0, FALSE);
4307 /* If the base class didn't set any multiview params, assume mono
4309 if (GST_VIDEO_INFO_MULTIVIEW_MODE (&state->info) ==
4310 GST_VIDEO_MULTIVIEW_MODE_NONE) {
4311 GST_VIDEO_INFO_MULTIVIEW_MODE (&state->info) =
4312 GST_VIDEO_MULTIVIEW_MODE_MONO;
4313 GST_VIDEO_INFO_MULTIVIEW_FLAGS (&state->info) =
4314 GST_VIDEO_MULTIVIEW_FLAGS_NONE;
4317 GST_DEBUG_OBJECT (decoder, "output_state par %d/%d fps %d/%d",
4318 state->info.par_n, state->info.par_d,
4319 state->info.fps_n, state->info.fps_d);
4321 if (state->caps == NULL)
4322 state->caps = gst_video_info_to_caps (&state->info);
4324 incaps = gst_pad_get_current_caps (GST_VIDEO_DECODER_SINK_PAD (decoder));
4326 GstStructure *in_struct;
4328 in_struct = gst_caps_get_structure (incaps, 0);
4329 if (gst_structure_has_field (in_struct, "mastering-display-info") ||
4330 gst_structure_has_field (in_struct, "content-light-level")) {
4333 /* prefer upstream information */
4334 state->caps = gst_caps_make_writable (state->caps);
4335 if ((s = gst_structure_get_string (in_struct, "mastering-display-info"))) {
4336 gst_caps_set_simple (state->caps,
4337 "mastering-display-info", G_TYPE_STRING, s, NULL);
4340 if ((s = gst_structure_get_string (in_struct, "content-light-level"))) {
4341 gst_caps_set_simple (state->caps,
4342 "content-light-level", G_TYPE_STRING, s, NULL);
4345 if (gst_structure_has_field (in_struct, "hdr-format")) {
4347 state->caps = gst_caps_make_writable (state->caps);
4349 if ((s = gst_structure_get_string (in_struct, "hdr-format"))) {
4350 gst_caps_set_simple (state->caps, "hdr-format", G_TYPE_STRING, s, NULL);
4354 gst_caps_unref (incaps);
4357 if (state->allocation_caps == NULL)
4358 state->allocation_caps = gst_caps_ref (state->caps);
4360 GST_DEBUG_OBJECT (decoder, "setting caps %" GST_PTR_FORMAT, state->caps);
4362 /* Push all pending pre-caps events of the oldest frame before
4364 frame = decoder->priv->frames.head ? decoder->priv->frames.head->data : NULL;
4365 if (frame || decoder->priv->current_frame_events) {
4369 events = &frame->events;
4371 events = &decoder->priv->current_frame_events;
4374 for (l = g_list_last (*events); l;) {
4375 GstEvent *event = GST_EVENT (l->data);
4378 if (GST_EVENT_TYPE (event) < GST_EVENT_CAPS) {
4379 gst_video_decoder_push_event (decoder, event);
4382 *events = g_list_delete_link (*events, tmp);
4389 prevcaps = gst_pad_get_current_caps (decoder->srcpad);
4390 if (!prevcaps || !gst_caps_is_equal (prevcaps, state->caps)) {
4392 GST_DEBUG_OBJECT (decoder, "decoder src pad has currently NULL caps");
4394 ret = gst_pad_set_caps (decoder->srcpad, state->caps);
4397 GST_DEBUG_OBJECT (decoder,
4398 "current src pad and output state caps are the same");
4401 gst_caps_unref (prevcaps);
4405 decoder->priv->output_state_changed = FALSE;
4406 /* Negotiate pool */
4407 ret = gst_video_decoder_negotiate_pool (decoder, state->allocation_caps);
4414 gst_video_decoder_negotiate_unlocked (GstVideoDecoder * decoder)
4416 GstVideoDecoderClass *klass = GST_VIDEO_DECODER_GET_CLASS (decoder);
4417 gboolean ret = TRUE;
4419 if (G_LIKELY (klass->negotiate))
4420 ret = klass->negotiate (decoder);
4426 * gst_video_decoder_negotiate:
4427 * @decoder: a #GstVideoDecoder
4429 * Negotiate with downstream elements to currently configured #GstVideoCodecState.
4430 * Unmark GST_PAD_FLAG_NEED_RECONFIGURE in any case. But mark it again if
4433 * Returns: %TRUE if the negotiation succeeded, else %FALSE.
4436 gst_video_decoder_negotiate (GstVideoDecoder * decoder)
4438 GstVideoDecoderClass *klass;
4439 gboolean ret = TRUE;
4441 g_return_val_if_fail (GST_IS_VIDEO_DECODER (decoder), FALSE);
4443 klass = GST_VIDEO_DECODER_GET_CLASS (decoder);
4445 GST_VIDEO_DECODER_STREAM_LOCK (decoder);
4446 gst_pad_check_reconfigure (decoder->srcpad);
4447 if (klass->negotiate) {
4448 ret = klass->negotiate (decoder);
4450 gst_pad_mark_reconfigure (decoder->srcpad);
4452 GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
4458 * gst_video_decoder_allocate_output_buffer:
4459 * @decoder: a #GstVideoDecoder
4461 * Helper function that allocates a buffer to hold a video frame for @decoder's
4462 * current #GstVideoCodecState.
4464 * You should use gst_video_decoder_allocate_output_frame() instead of this
4465 * function, if possible at all.
4467 * Returns: (transfer full): allocated buffer, or NULL if no buffer could be
4468 * allocated (e.g. when downstream is flushing or shutting down)
4471 gst_video_decoder_allocate_output_buffer (GstVideoDecoder * decoder)
4474 GstBuffer *buffer = NULL;
4475 gboolean needs_reconfigure = FALSE;
4477 GST_DEBUG ("alloc src buffer");
4479 GST_VIDEO_DECODER_STREAM_LOCK (decoder);
4480 needs_reconfigure = gst_pad_check_reconfigure (decoder->srcpad);
4481 if (G_UNLIKELY (!decoder->priv->output_state
4482 || decoder->priv->output_state_changed || needs_reconfigure)) {
4483 if (!gst_video_decoder_negotiate_unlocked (decoder)) {
4484 if (decoder->priv->output_state) {
4485 GST_DEBUG_OBJECT (decoder, "Failed to negotiate, fallback allocation");
4486 gst_pad_mark_reconfigure (decoder->srcpad);
4489 GST_DEBUG_OBJECT (decoder, "Failed to negotiate, output_buffer=NULL");
4490 goto failed_allocation;
4495 flow = gst_buffer_pool_acquire_buffer (decoder->priv->pool, &buffer, NULL);
4497 if (flow != GST_FLOW_OK) {
4498 GST_INFO_OBJECT (decoder, "couldn't allocate output buffer, flow %s",
4499 gst_flow_get_name (flow));
4500 if (decoder->priv->output_state && decoder->priv->output_state->info.size)
4503 goto failed_allocation;
4505 GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
4510 GST_INFO_OBJECT (decoder,
4511 "Fallback allocation, creating new buffer which doesn't belongs to any buffer pool");
4513 gst_buffer_new_allocate (NULL, decoder->priv->output_state->info.size,
4517 GST_ERROR_OBJECT (decoder, "Failed to allocate the buffer..");
4518 GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
4524 * gst_video_decoder_allocate_output_frame:
4525 * @decoder: a #GstVideoDecoder
4526 * @frame: a #GstVideoCodecFrame
4528 * Helper function that allocates a buffer to hold a video frame for @decoder's
4529 * current #GstVideoCodecState. Subclass should already have configured video
4530 * state and set src pad caps.
4532 * The buffer allocated here is owned by the frame and you should only
4533 * keep references to the frame, not the buffer.
4535 * Returns: %GST_FLOW_OK if an output buffer could be allocated
4538 gst_video_decoder_allocate_output_frame (GstVideoDecoder *
4539 decoder, GstVideoCodecFrame * frame)
4541 return gst_video_decoder_allocate_output_frame_with_params (decoder, frame,
4546 * gst_video_decoder_allocate_output_frame_with_params:
4547 * @decoder: a #GstVideoDecoder
4548 * @frame: a #GstVideoCodecFrame
4549 * @params: a #GstBufferPoolAcquireParams
4551 * Same as #gst_video_decoder_allocate_output_frame except it allows passing
4552 * #GstBufferPoolAcquireParams to the sub call gst_buffer_pool_acquire_buffer.
4554 * Returns: %GST_FLOW_OK if an output buffer could be allocated
4559 gst_video_decoder_allocate_output_frame_with_params (GstVideoDecoder *
4560 decoder, GstVideoCodecFrame * frame, GstBufferPoolAcquireParams * params)
4562 GstFlowReturn flow_ret;
4563 GstVideoCodecState *state;
4565 gboolean needs_reconfigure = FALSE;
4567 g_return_val_if_fail (decoder->priv->output_state, GST_FLOW_NOT_NEGOTIATED);
4568 g_return_val_if_fail (frame->output_buffer == NULL, GST_FLOW_ERROR);
4570 GST_VIDEO_DECODER_STREAM_LOCK (decoder);
4572 state = decoder->priv->output_state;
4573 if (state == NULL) {
4574 g_warning ("Output state should be set before allocating frame");
4577 num_bytes = GST_VIDEO_INFO_SIZE (&state->info);
4578 if (num_bytes == 0) {
4579 g_warning ("Frame size should not be 0");
4583 needs_reconfigure = gst_pad_check_reconfigure (decoder->srcpad);
4584 if (G_UNLIKELY (decoder->priv->output_state_changed || needs_reconfigure)) {
4585 if (!gst_video_decoder_negotiate_unlocked (decoder)) {
4586 gst_pad_mark_reconfigure (decoder->srcpad);
4587 if (GST_PAD_IS_FLUSHING (decoder->srcpad)) {
4588 GST_DEBUG_OBJECT (decoder,
4589 "Failed to negotiate a pool: pad is flushing");
4591 } else if (!decoder->priv->pool || decoder->priv->output_state_changed) {
4592 GST_DEBUG_OBJECT (decoder,
4593 "Failed to negotiate a pool and no previous pool to reuse");
4596 GST_DEBUG_OBJECT (decoder,
4597 "Failed to negotiate a pool, falling back to the previous pool");
4602 GST_LOG_OBJECT (decoder, "alloc buffer size %d", num_bytes);
4604 flow_ret = gst_buffer_pool_acquire_buffer (decoder->priv->pool,
4605 &frame->output_buffer, params);
4607 GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
4612 GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
4613 return GST_FLOW_FLUSHING;
4616 GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
4617 return GST_FLOW_ERROR;
4621 * gst_video_decoder_get_max_decode_time:
4622 * @decoder: a #GstVideoDecoder
4623 * @frame: a #GstVideoCodecFrame
4625 * Determines maximum possible decoding time for @frame that will
4626 * allow it to decode and arrive in time (as determined by QoS events).
4627 * In particular, a negative result means decoding in time is no longer possible
4628 * and should therefore occur as soon/skippy as possible.
4630 * Returns: max decoding time.
4633 gst_video_decoder_get_max_decode_time (GstVideoDecoder *
4634 decoder, GstVideoCodecFrame * frame)
4636 GstClockTimeDiff deadline;
4637 GstClockTime earliest_time;
4639 GST_OBJECT_LOCK (decoder);
4640 earliest_time = decoder->priv->earliest_time;
4641 if (GST_CLOCK_TIME_IS_VALID (earliest_time)
4642 && GST_CLOCK_TIME_IS_VALID (frame->deadline))
4643 deadline = GST_CLOCK_DIFF (earliest_time, frame->deadline);
4645 deadline = G_MAXINT64;
4647 GST_LOG_OBJECT (decoder, "earliest %" GST_TIME_FORMAT
4648 ", frame deadline %" GST_TIME_FORMAT ", deadline %" GST_STIME_FORMAT,
4649 GST_TIME_ARGS (earliest_time), GST_TIME_ARGS (frame->deadline),
4650 GST_STIME_ARGS (deadline));
4652 GST_OBJECT_UNLOCK (decoder);
4658 * gst_video_decoder_get_qos_proportion:
4659 * @decoder: a #GstVideoDecoder
4660 * current QoS proportion, or %NULL
4662 * Returns: The current QoS proportion.
4667 gst_video_decoder_get_qos_proportion (GstVideoDecoder * decoder)
4671 g_return_val_if_fail (GST_IS_VIDEO_DECODER (decoder), 1.0);
4673 GST_OBJECT_LOCK (decoder);
4674 proportion = decoder->priv->proportion;
4675 GST_OBJECT_UNLOCK (decoder);
4681 _gst_video_decoder_error (GstVideoDecoder * dec, gint weight,
4682 GQuark domain, gint code, gchar * txt, gchar * dbg, const gchar * file,
4683 const gchar * function, gint line)
4686 GST_WARNING_OBJECT (dec, "error: %s", txt);
4688 GST_WARNING_OBJECT (dec, "error: %s", dbg);
4689 dec->priv->error_count += weight;
4690 dec->priv->discont = TRUE;
4691 if (dec->priv->max_errors >= 0 &&
4692 dec->priv->error_count > dec->priv->max_errors) {
4693 gst_element_message_full (GST_ELEMENT (dec), GST_MESSAGE_ERROR,
4694 domain, code, txt, dbg, file, function, line);
4695 return GST_FLOW_ERROR;
4704 * gst_video_decoder_set_max_errors:
4705 * @dec: a #GstVideoDecoder
4706 * @num: max tolerated errors
4708 * Sets numbers of tolerated decoder errors, where a tolerated one is then only
4709 * warned about, but more than tolerated will lead to fatal error. You can set
4710 * -1 for never returning fatal errors. Default is set to
4711 * GST_VIDEO_DECODER_MAX_ERRORS.
4713 * The '-1' option was added in 1.4
4716 gst_video_decoder_set_max_errors (GstVideoDecoder * dec, gint num)
4718 g_return_if_fail (GST_IS_VIDEO_DECODER (dec));
4720 dec->priv->max_errors = num;
4724 * gst_video_decoder_get_max_errors:
4725 * @dec: a #GstVideoDecoder
4727 * Returns: currently configured decoder tolerated error count.
4730 gst_video_decoder_get_max_errors (GstVideoDecoder * dec)
4732 g_return_val_if_fail (GST_IS_VIDEO_DECODER (dec), 0);
4734 return dec->priv->max_errors;
4738 * gst_video_decoder_set_needs_format:
4739 * @dec: a #GstVideoDecoder
4740 * @enabled: new state
4742 * Configures decoder format needs. If enabled, subclass needs to be
4743 * negotiated with format caps before it can process any data. It will then
4744 * never be handed any data before it has been configured.
4745 * Otherwise, it might be handed data without having been configured and
4746 * is then expected being able to do so either by default
4747 * or based on the input data.
4752 gst_video_decoder_set_needs_format (GstVideoDecoder * dec, gboolean enabled)
4754 g_return_if_fail (GST_IS_VIDEO_DECODER (dec));
4756 dec->priv->needs_format = enabled;
4760 * gst_video_decoder_get_needs_format:
4761 * @dec: a #GstVideoDecoder
4763 * Queries decoder required format handling.
4765 * Returns: %TRUE if required format handling is enabled.
4770 gst_video_decoder_get_needs_format (GstVideoDecoder * dec)
4774 g_return_val_if_fail (GST_IS_VIDEO_DECODER (dec), FALSE);
4776 result = dec->priv->needs_format;
4782 * gst_video_decoder_set_packetized:
4783 * @decoder: a #GstVideoDecoder
4784 * @packetized: whether the input data should be considered as packetized.
4786 * Allows baseclass to consider input data as packetized or not. If the
4787 * input is packetized, then the @parse method will not be called.
4790 gst_video_decoder_set_packetized (GstVideoDecoder * decoder,
4791 gboolean packetized)
4793 decoder->priv->packetized = packetized;
4797 * gst_video_decoder_get_packetized:
4798 * @decoder: a #GstVideoDecoder
4800 * Queries whether input data is considered packetized or not by the
4803 * Returns: TRUE if input data is considered packetized.
4806 gst_video_decoder_get_packetized (GstVideoDecoder * decoder)
4808 return decoder->priv->packetized;
4812 * gst_video_decoder_have_last_subframe:
4813 * @decoder: a #GstVideoDecoder
4814 * @frame: (transfer none): the #GstVideoCodecFrame to update
4816 * Indicates that the last subframe has been processed by the decoder
4817 * in @frame. This will release the current frame in video decoder
4818 * allowing to receive new frames from upstream elements. This method
4819 * must be called in the subclass @handle_frame callback.
4821 * Returns: a #GstFlowReturn, usually GST_FLOW_OK.
4826 gst_video_decoder_have_last_subframe (GstVideoDecoder * decoder,
4827 GstVideoCodecFrame * frame)
4829 g_return_val_if_fail (gst_video_decoder_get_subframe_mode (decoder),
4831 /* unref once from the list */
4832 GST_VIDEO_DECODER_STREAM_LOCK (decoder);
4833 if (decoder->priv->current_frame == frame) {
4834 gst_video_codec_frame_unref (decoder->priv->current_frame);
4835 decoder->priv->current_frame = NULL;
4837 GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
4843 * gst_video_decoder_set_subframe_mode:
4844 * @decoder: a #GstVideoDecoder
4845 * @subframe_mode: whether the input data should be considered as subframes.
4847 * If this is set to TRUE, it informs the base class that the subclass
4848 * can receive the data at a granularity lower than one frame.
4850 * Note that in this mode, the subclass has two options. It can either
4851 * require the presence of a GST_VIDEO_BUFFER_FLAG_MARKER to mark the
4852 * end of a frame. Or it can operate in such a way that it will decode
4853 * a single frame at a time. In this second case, every buffer that
4854 * arrives to the element is considered part of the same frame until
4855 * gst_video_decoder_finish_frame() is called.
4857 * In either case, the same #GstVideoCodecFrame will be passed to the
4858 * GstVideoDecoderClass:handle_frame vmethod repeatedly with a
4859 * different GstVideoCodecFrame:input_buffer every time until the end of the
4860 * frame has been signaled using either method.
4861 * This method must be called during the decoder subclass @set_format call.
4866 gst_video_decoder_set_subframe_mode (GstVideoDecoder * decoder,
4867 gboolean subframe_mode)
4869 decoder->priv->subframe_mode = subframe_mode;
4873 * gst_video_decoder_get_subframe_mode:
4874 * @decoder: a #GstVideoDecoder
4876 * Queries whether input data is considered as subframes or not by the
4877 * base class. If FALSE, each input buffer will be considered as a full
4880 * Returns: TRUE if input data is considered as sub frames.
4885 gst_video_decoder_get_subframe_mode (GstVideoDecoder * decoder)
4887 return decoder->priv->subframe_mode;
4891 * gst_video_decoder_get_input_subframe_index:
4892 * @decoder: a #GstVideoDecoder
4893 * @frame: (transfer none): the #GstVideoCodecFrame to update
4895 * Queries the number of the last subframe received by
4896 * the decoder baseclass in the @frame.
4898 * Returns: the current subframe index received in subframe mode, 1 otherwise.
4903 gst_video_decoder_get_input_subframe_index (GstVideoDecoder * decoder,
4904 GstVideoCodecFrame * frame)
4906 return frame->abidata.ABI.num_subframes;
4910 * gst_video_decoder_get_processed_subframe_index:
4911 * @decoder: a #GstVideoDecoder
4912 * @frame: (transfer none): the #GstVideoCodecFrame to update
4914 * Queries the number of subframes in the frame processed by
4915 * the decoder baseclass.
4917 * Returns: the current subframe processed received in subframe mode.
4922 gst_video_decoder_get_processed_subframe_index (GstVideoDecoder * decoder,
4923 GstVideoCodecFrame * frame)
4925 return frame->abidata.ABI.subframes_processed;
4929 * gst_video_decoder_set_estimate_rate:
4930 * @dec: a #GstVideoDecoder
4931 * @enabled: whether to enable byte to time conversion
4933 * Allows baseclass to perform byte to time estimated conversion.
4936 gst_video_decoder_set_estimate_rate (GstVideoDecoder * dec, gboolean enabled)
4938 g_return_if_fail (GST_IS_VIDEO_DECODER (dec));
4940 dec->priv->do_estimate_rate = enabled;
4944 * gst_video_decoder_get_estimate_rate:
4945 * @dec: a #GstVideoDecoder
4947 * Returns: currently configured byte to time conversion setting
4950 gst_video_decoder_get_estimate_rate (GstVideoDecoder * dec)
4952 g_return_val_if_fail (GST_IS_VIDEO_DECODER (dec), 0);
4954 return dec->priv->do_estimate_rate;
4958 * gst_video_decoder_set_latency:
4959 * @decoder: a #GstVideoDecoder
4960 * @min_latency: minimum latency
4961 * @max_latency: maximum latency
4963 * Lets #GstVideoDecoder sub-classes tell the baseclass what the decoder
4964 * latency is. Will also post a LATENCY message on the bus so the pipeline
4965 * can reconfigure its global latency.
4968 gst_video_decoder_set_latency (GstVideoDecoder * decoder,
4969 GstClockTime min_latency, GstClockTime max_latency)
4971 g_return_if_fail (GST_CLOCK_TIME_IS_VALID (min_latency));
4972 g_return_if_fail (max_latency >= min_latency);
4974 GST_OBJECT_LOCK (decoder);
4975 decoder->priv->min_latency = min_latency;
4976 decoder->priv->max_latency = max_latency;
4977 GST_OBJECT_UNLOCK (decoder);
4979 gst_element_post_message (GST_ELEMENT_CAST (decoder),
4980 gst_message_new_latency (GST_OBJECT_CAST (decoder)));
4984 * gst_video_decoder_get_latency:
4985 * @decoder: a #GstVideoDecoder
4986 * @min_latency: (out) (allow-none): address of variable in which to store the
4987 * configured minimum latency, or %NULL
4988 * @max_latency: (out) (allow-none): address of variable in which to store the
4989 * configured mximum latency, or %NULL
4991 * Query the configured decoder latency. Results will be returned via
4992 * @min_latency and @max_latency.
4995 gst_video_decoder_get_latency (GstVideoDecoder * decoder,
4996 GstClockTime * min_latency, GstClockTime * max_latency)
4998 GST_OBJECT_LOCK (decoder);
5000 *min_latency = decoder->priv->min_latency;
5002 *max_latency = decoder->priv->max_latency;
5003 GST_OBJECT_UNLOCK (decoder);
5007 * gst_video_decoder_merge_tags:
5008 * @decoder: a #GstVideoDecoder
5009 * @tags: (allow-none): a #GstTagList to merge, or NULL to unset
5010 * previously-set tags
5011 * @mode: the #GstTagMergeMode to use, usually #GST_TAG_MERGE_REPLACE
5013 * Sets the audio decoder tags and how they should be merged with any
5014 * upstream stream tags. This will override any tags previously-set
5015 * with gst_audio_decoder_merge_tags().
5017 * Note that this is provided for convenience, and the subclass is
5018 * not required to use this and can still do tag handling on its own.
5023 gst_video_decoder_merge_tags (GstVideoDecoder * decoder,
5024 const GstTagList * tags, GstTagMergeMode mode)
5026 g_return_if_fail (GST_IS_VIDEO_DECODER (decoder));
5027 g_return_if_fail (tags == NULL || GST_IS_TAG_LIST (tags));
5028 g_return_if_fail (tags == NULL || mode != GST_TAG_MERGE_UNDEFINED);
5030 GST_VIDEO_DECODER_STREAM_LOCK (decoder);
5031 if (decoder->priv->tags != tags) {
5032 if (decoder->priv->tags) {
5033 gst_tag_list_unref (decoder->priv->tags);
5034 decoder->priv->tags = NULL;
5035 decoder->priv->tags_merge_mode = GST_TAG_MERGE_APPEND;
5038 decoder->priv->tags = gst_tag_list_ref ((GstTagList *) tags);
5039 decoder->priv->tags_merge_mode = mode;
5042 GST_DEBUG_OBJECT (decoder, "set decoder tags to %" GST_PTR_FORMAT, tags);
5043 decoder->priv->tags_changed = TRUE;
5045 GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
5049 * gst_video_decoder_get_buffer_pool:
5050 * @decoder: a #GstVideoDecoder
5052 * Returns: (transfer full): the instance of the #GstBufferPool used
5053 * by the decoder; free it after use it
5056 gst_video_decoder_get_buffer_pool (GstVideoDecoder * decoder)
5058 g_return_val_if_fail (GST_IS_VIDEO_DECODER (decoder), NULL);
5060 if (decoder->priv->pool)
5061 return gst_object_ref (decoder->priv->pool);
5067 * gst_video_decoder_get_allocator:
5068 * @decoder: a #GstVideoDecoder
5069 * @allocator: (out) (allow-none) (transfer full): the #GstAllocator
5071 * @params: (out) (allow-none) (transfer full): the
5072 * #GstAllocationParams of @allocator
5074 * Lets #GstVideoDecoder sub-classes to know the memory @allocator
5075 * used by the base class and its @params.
5077 * Unref the @allocator after use it.
5080 gst_video_decoder_get_allocator (GstVideoDecoder * decoder,
5081 GstAllocator ** allocator, GstAllocationParams * params)
5083 g_return_if_fail (GST_IS_VIDEO_DECODER (decoder));
5086 *allocator = decoder->priv->allocator ?
5087 gst_object_ref (decoder->priv->allocator) : NULL;
5090 *params = decoder->priv->params;
5094 * gst_video_decoder_set_use_default_pad_acceptcaps:
5095 * @decoder: a #GstVideoDecoder
5096 * @use: if the default pad accept-caps query handling should be used
5098 * Lets #GstVideoDecoder sub-classes decide if they want the sink pad
5099 * to use the default pad query handler to reply to accept-caps queries.
5101 * By setting this to true it is possible to further customize the default
5102 * handler with %GST_PAD_SET_ACCEPT_INTERSECT and
5103 * %GST_PAD_SET_ACCEPT_TEMPLATE
5108 gst_video_decoder_set_use_default_pad_acceptcaps (GstVideoDecoder * decoder,
5111 decoder->priv->use_default_pad_acceptcaps = use;
5115 * gst_video_decoder_request_sync_point:
5116 * @dec: a #GstVideoDecoder
5117 * @frame: a #GstVideoCodecFrame
5118 * @flags: #GstVideoDecoderRequestSyncPointFlags
5120 * Allows the #GstVideoDecoder subclass to request from the base class that
5121 * a new sync should be requested from upstream, and that @frame was the frame
5122 * when the subclass noticed that a new sync point is required. A reason for
5123 * the subclass to do this could be missing reference frames, for example.
5125 * The base class will then request a new sync point from upstream as long as
5126 * the time that passed since the last one is exceeding
5127 * #GstVideoDecoder:min-force-key-unit-interval.
5129 * The subclass can signal via @flags how the frames until the next sync point
5130 * should be handled:
5132 * * If %GST_VIDEO_DECODER_REQUEST_SYNC_POINT_DISCARD_INPUT is selected then
5133 * all following input frames until the next sync point are discarded.
5134 * This can be useful if the lack of a sync point will prevent all further
5135 * decoding and the decoder implementation is not very robust in handling
5136 * missing references frames.
5137 * * If %GST_VIDEO_DECODER_REQUEST_SYNC_POINT_CORRUPT_OUTPUT is selected
5138 * then all output frames following @frame are marked as corrupted via
5139 * %GST_BUFFER_FLAG_CORRUPTED. Corrupted frames can be automatically
5140 * dropped by the base class, see #GstVideoDecoder:discard-corrupted-frames.
5141 * Subclasses can manually mark frames as corrupted via %GST_VIDEO_CODEC_FRAME_FLAG_CORRUPTED
5142 * before calling gst_video_decoder_finish_frame().
5147 gst_video_decoder_request_sync_point (GstVideoDecoder * dec,
5148 GstVideoCodecFrame * frame, GstVideoDecoderRequestSyncPointFlags flags)
5150 GstEvent *fku = NULL;
5151 GstVideoDecoderPrivate *priv;
5153 g_return_if_fail (GST_IS_VIDEO_DECODER (dec));
5154 g_return_if_fail (frame != NULL);
5158 GST_OBJECT_LOCK (dec);
5160 /* Check if we're allowed to send a new force-keyunit event.
5161 * frame->deadline is set to the running time of the PTS. */
5162 if (priv->min_force_key_unit_interval == 0 ||
5163 frame->deadline == GST_CLOCK_TIME_NONE ||
5164 (priv->min_force_key_unit_interval != GST_CLOCK_TIME_NONE &&
5165 (priv->last_force_key_unit_time == GST_CLOCK_TIME_NONE
5166 || (priv->last_force_key_unit_time +
5167 priv->min_force_key_unit_interval <= frame->deadline)))) {
5168 GST_DEBUG_OBJECT (dec,
5169 "Requesting a new key-unit for frame with deadline %" GST_TIME_FORMAT,
5170 GST_TIME_ARGS (frame->deadline));
5172 gst_video_event_new_upstream_force_key_unit (GST_CLOCK_TIME_NONE, FALSE,
5174 priv->last_force_key_unit_time = frame->deadline;
5176 GST_DEBUG_OBJECT (dec,
5177 "Can't request a new key-unit for frame with deadline %"
5178 GST_TIME_FORMAT, GST_TIME_ARGS (frame->deadline));
5180 priv->request_sync_point_flags |= flags;
5181 /* We don't know yet the frame number of the sync point so set it to a
5182 * frame number higher than any allowed frame number */
5183 priv->request_sync_point_frame_number = REQUEST_SYNC_POINT_PENDING;
5184 GST_OBJECT_UNLOCK (dec);
5187 gst_pad_push_event (dec->sinkpad, fku);
5191 * gst_video_decoder_set_needs_sync_point:
5192 * @dec: a #GstVideoDecoder
5193 * @enabled: new state
5195 * Configures whether the decoder requires a sync point before it starts
5196 * outputting data in the beginning. If enabled, the base class will discard
5197 * all non-sync point frames in the beginning and after a flush and does not
5198 * pass it to the subclass.
5200 * If the first frame is not a sync point, the base class will request a sync
5201 * point via the force-key-unit event.
5206 gst_video_decoder_set_needs_sync_point (GstVideoDecoder * dec, gboolean enabled)
5208 g_return_if_fail (GST_IS_VIDEO_DECODER (dec));
5210 dec->priv->needs_sync_point = enabled;
5214 * gst_video_decoder_get_needs_sync_point:
5215 * @dec: a #GstVideoDecoder
5217 * Queries if the decoder requires a sync point before it starts outputting
5218 * data in the beginning.
5220 * Returns: %TRUE if a sync point is required in the beginning.
5225 gst_video_decoder_get_needs_sync_point (GstVideoDecoder * dec)
5229 g_return_val_if_fail (GST_IS_VIDEO_DECODER (dec), FALSE);
5231 result = dec->priv->needs_sync_point;