<xi:include href="xml/gstvideooverlaycomposition.xml" />
<xi:include href="xml/gstvideofilter.xml" />
<xi:include href="xml/gstvideosink.xml" />
+ <xi:include href="xml/gstvideodecoder.xml" />
+ <xi:include href="xml/gstvideoencoder.xml" />
+ <xi:include href="xml/gstvideoutils.xml" />
</chapter>
</part>
GST_VIDEO_FORMAT_INFO_IS_LE
GST_VIDEO_FORMAT_INFO_IS_RGB
GST_VIDEO_FORMAT_INFO_IS_YUV
+GST_VIDEO_FORMAT_INFO_IS_COMPLEX
GST_VIDEO_FORMAT_INFO_NAME
GST_VIDEO_FORMAT_INFO_N_COMPONENTS
GST_VIDEO_FORMAT_INFO_N_PLANES
</SECTION>
<SECTION>
+<FILE>gstvideodecoder</FILE>
+<TITLE>GstVideoDecoder</TITLE>
+GST_VIDEO_DECODER_ERROR
+GST_VIDEO_DECODER_FLOW_DROPPED
+GST_VIDEO_DECODER_FLOW_NEED_DATA
+GST_VIDEO_DECODER_MAX_ERRORS
+GST_VIDEO_DECODER_SINK_NAME
+GST_VIDEO_DECODER_SINK_PAD
+GST_VIDEO_DECODER_SRC_NAME
+GST_VIDEO_DECODER_SRC_PAD
+GST_VIDEO_DECODER_STREAM_LOCK
+GST_VIDEO_DECODER_STREAM_UNLOCK
+GST_VIDEO_DECODER_INPUT_SEGMENT
+GST_VIDEO_DECODER_OUTPUT_SEGMENT
+GstVideoDecoder
+GstVideoDecoderClass
+gst_video_decoder_add_to_frame
+gst_video_decoder_alloc_output_buffer
+gst_video_decoder_alloc_output_frame
+gst_video_decoder_drop_frame
+gst_video_decoder_finish_frame
+gst_video_decoder_get_frame
+gst_video_decoder_get_max_decode_time
+gst_video_decoder_get_max_errors
+gst_video_decoder_get_oldest_frame
+gst_video_decoder_get_packetized
+gst_video_decoder_have_frame
+gst_video_decoder_get_latency
+gst_video_decoder_set_latency
+gst_video_decoder_get_estimate_rate
+gst_video_decoder_get_output_state
+gst_video_decoder_set_estimate_rate
+gst_video_decoder_set_output_state
+gst_video_decoder_set_max_errors
+gst_video_decoder_set_packetized
+<SUBSECTION Standard>
+GST_IS_VIDEO_DECODER
+GST_IS_VIDEO_DECODER_CLASS
+GST_TYPE_VIDEO_DECODER
+GST_VIDEO_DECODER
+GST_VIDEO_DECODER_CLASS
+GST_VIDEO_DECODER_GET_CLASS
+GstVideoDecoderPrivate
+gst_video_decoder_get_type
+</SECTION>
+
+<SECTION>
+<FILE>gstvideoencoder</FILE>
+<TITLE>GstVideoEncoder</TITLE>
+GST_VIDEO_ENCODER_CAST
+GST_VIDEO_ENCODER_FLOW_DROPPED
+GST_VIDEO_ENCODER_FLOW_NEED_DATA
+GST_VIDEO_ENCODER_SINK_NAME
+GST_VIDEO_ENCODER_SINK_PAD
+GST_VIDEO_ENCODER_SRC_NAME
+GST_VIDEO_ENCODER_SRC_PAD
+GST_VIDEO_ENCODER_INPUT_SEGMENT
+GST_VIDEO_ENCODER_OUTPUT_SEGMENT
+GST_VIDEO_ENCODER_STREAM_LOCK
+GST_VIDEO_ENCODER_STREAM_UNLOCK
+GstVideoEncoder
+GstVideoEncoderClass
+gst_video_encoder_finish_frame
+gst_video_encoder_get_frame
+gst_video_encoder_get_oldest_frame
+gst_video_encoder_set_headers
+gst_video_encoder_get_latency
+gst_video_encoder_set_latency
+gst_video_encoder_get_discont
+gst_video_encoder_set_discont
+gst_video_encoder_set_output_state
+gst_video_encoder_get_output_state
+gst_video_encoder_proxy_getcaps
+<SUBSECTION Standard>
+GST_IS_VIDEO_ENCODER
+GST_IS_VIDEO_ENCODER_CLASS
+GST_TYPE_VIDEO_ENCODER
+GST_VIDEO_ENCODER
+GST_VIDEO_ENCODER_CLASS
+GST_VIDEO_ENCODER_GET_CLASS
+GstVideoEncoderPrivate
+gst_video_encoder_get_type
+</SECTION>
+
+<SECTION>
+<FILE>gstvideoutils</FILE>
+GstVideoCodecFrame
+GstVideoCodecFrameFlags
+GST_VIDEO_CODEC_FRAME_FLAGS
+GST_VIDEO_CODEC_FRAME_FLAG_IS_SET
+GST_VIDEO_CODEC_FRAME_FLAG_SET
+GST_VIDEO_CODEC_FRAME_FLAG_UNSET
+GST_VIDEO_CODEC_FRAME_IS_DECODE_ONLY
+GST_VIDEO_CODEC_FRAME_IS_FORCE_KEYFRAME
+GST_VIDEO_CODEC_FRAME_IS_FORCE_KEYFRAME_HEADERS
+GST_VIDEO_CODEC_FRAME_IS_SYNC_POINT
+GST_VIDEO_CODEC_FRAME_SET_DECODE_ONLY
+GST_VIDEO_CODEC_FRAME_SET_FORCE_KEYFRAME
+GST_VIDEO_CODEC_FRAME_SET_FORCE_KEYFRAME_HEADERS
+GST_VIDEO_CODEC_FRAME_SET_SYNC_POINT
+GST_VIDEO_CODEC_FRAME_UNSET_FORCE_KEYFRAME
+GST_VIDEO_CODEC_FRAME_UNSET_FORCE_KEYFRAME_HEADERS
+GST_VIDEO_CODEC_FRAME_UNSET_SYNC_POINT
+gst_video_codec_frame_ref
+gst_video_codec_frame_unref
+gst_video_codec_frame_set_hook
+GstVideoCodecState
+gst_video_codec_state_ref
+gst_video_codec_state_unref
+<SUBSECTION Standard>
+GST_TYPE_VIDEO_CODEC_FRAME
+GST_TYPE_VIDEO_CODEC_STATE
+gst_video_codec_frame_get_type
+gst_video_codec_state_get_type
+</SECTION>
+
+<SECTION>
<FILE>gstdiscoverer</FILE>
<INCLUDE>gst/pbutils/pbutils.h</INCLUDE>
<SUBSECTION>
#include <gst/video/video-overlay-composition.h>
gst_video_overlay_rectangle_get_type
gst_video_overlay_composition_get_type
+#include <gst/video/gstvideodecoder.h>
+gst_video_decoder_get_type
+#include <gst/video/gstvideoencoder.h>
+gst_video_encoder_get_type
+#include <gst/video/gstvideoutils.h>
+gst_video_codec_frame_get_type
+gst_video_codec_state_get_type
#include <gst/pbutils/pbutils.h>
gst_discoverer_get_type
# video-blend.h should be disted but not installed into the includedir
libgstvideo_@GST_MAJORMINOR@_la_SOURCES = \
video.c gstvideosink.c gstvideofilter.c convertframe.c \
- video-blend.c video-blend.h video-overlay-composition.c
+ video-blend.c video-blend.h video-overlay-composition.c \
+ gstvideodecoder.c gstvideoencoder.c \
+ gstvideoutils.c
nodist_libgstvideo_@GST_MAJORMINOR@_la_SOURCES = \
$(built_sources) $(built_headers) \
$(ORC_NODIST_SOURCES)
libgstvideo_@GST_MAJORMINOR@includedir = $(includedir)/gstreamer-@GST_MAJORMINOR@/gst/video
libgstvideo_@GST_MAJORMINOR@include_HEADERS = \
- video.h gstvideosink.h gstvideofilter.h video-overlay-composition.h
+ video.h gstvideosink.h gstvideofilter.h video-overlay-composition.h \
+ gstvideodecoder.h gstvideoencoder.h \
+ gstvideoutils.h
nodist_libgstvideo_@GST_MAJORMINOR@include_HEADERS = $(built_headers)
libgstvideo_@GST_MAJORMINOR@_la_CFLAGS = \
$(INTROSPECTION_SCANNER) -v --namespace GstVideo \
--nsversion=@GST_MAJORMINOR@ \
--strip-prefix=Gst \
+ --warn-all \
$(gir_cincludes) \
-I$(top_srcdir)/gst-libs \
-I$(top_builddir)/gst-libs \
--- /dev/null
+Base Video Classes TODO
+
+Main goal:
+
+ Make the video encoder/decoder base classes more consistent with the
+other GStreamer API, especially with the audio encoder/decoder base
+classes.
+
+API:
+
+ The API should be named similar, the base classes should be used
+similar by the sub-classes, the behaviour of the base classes should
+be similar.
+ Currently there are many, mostly small, differences between the audio
+and video base classes API. Things like GstVideoState should be merged
+with the stuff we have in 0.11 to make the API the same in 0.11 and
+0.10 as far as possible, things like GstVideoInfo might make sense to
+be backported (at least partially).
+
+
+Specifics:
+ * Use a GInstancePrivate for extensability.
+
+ * Try to move more common video objects to video.[ch]
+
+Known bugs:
+ https://bugzilla.gnome.org/show_bug.cgi?id=664127
+ [basevideodecoder] Add separate drain vfunc and differentiate
+ between hard/soft reset in reset vfunc
+
+ https://bugzilla.gnome.org/show_bug.cgi?id=660770
+ Doesn't support handling of "partial" frames
+
+ https://bugzilla.gnome.org/show_bug.cgi?id=654294
+ Impossible to flush pending frames in ::set_format
+
+ https://bugzilla.gnome.org/show_bug.cgi?id=658241
+ add API to handle QoS events and dropping logic
+
+ https://bugzilla.gnome.org/show_bug.cgi?id=667653
+ Autodetect multicore/multithread processors
+
+ https://bugzilla.gnome.org/show_bug.cgi?id=617021
+ Add support for QoS messages to -bad decoders and other elements
+
+
+Peripheral issues
+ https://bugzilla.gnome.org/show_bug.cgi?id=663262
+ theoraenc: spurious encoder resets due to unstable upstream
+ timestamps cause quality issues
+ => Investigate how to specify/handle variable framerates.
--- /dev/null
+/* GStreamer
+ * Copyright (C) 2008 David Schleef <ds@schleef.org>
+ * Copyright (C) 2011 Mark Nauwelaerts <mark.nauwelaerts@collabora.co.uk>.
+ * Copyright (C) 2011 Nokia Corporation. All rights reserved.
+ * Contact: Stefan Kost <stefan.kost@nokia.com>
+ * Copyright (C) 2012 Collabora Ltd.
+ * Author : Edward Hervey <edward@collabora.com>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Library General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Library General Public License for more details.
+ *
+ * You should have received a copy of the GNU Library General Public
+ * License along with this library; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 02111-1307, USA.
+ */
+
+/**
+ * SECTION:gstvideodecoder
+ * @short_description: Base class for video decoders
+ * @see_also:
+ *
+ * This base class is for video decoders turning encoded data into raw video
+ * frames.
+ *
+ * GstVideoDecoder and subclass should cooperate as follows.
+ * <orderedlist>
+ * <listitem>
+ * <itemizedlist><title>Configuration</title>
+ * <listitem><para>
+ * Initially, GstVideoDecoder calls @start when the decoder element
+ * is activated, which allows subclass to perform any global setup.
+ * </para></listitem>
+ * <listitem><para>
+ * GstVideoDecoder calls @set_format to inform subclass of caps
+ * describing input video data that it is about to receive, including
+ * possibly configuration data.
+ * While unlikely, it might be called more than once, if changing input
+ * parameters require reconfiguration.
+ * </para></listitem>
+ * <listitem><para>
+ * GstVideoDecoder calls @stop at end of all processing.
+ * </para></listitem>
+ * </itemizedlist>
+ * </listitem>
+ * <listitem>
+ * <itemizedlist>
+ * <title>Data processing</title>
+ * <listitem><para>
+ * Base class gathers input data, and optionally allows subclass
+ * to parse this into subsequently manageable chunks, typically
+ * corresponding to and referred to as 'frames'.
+ * </para></listitem>
+ * <listitem><para>
+ * Input frame is provided to subclass' @handle_frame. The ownership of
+ * the frame is given to @handle_frame.
+ * </para></listitem>
+ * <listitem><para>
+ * If codec processing results in decoded data, subclass should call
+ * @gst_video_decoder_finish_frame to have decoded data pushed
+ * downstream.
+ * </para></listitem>
+ * </itemizedlist>
+ * </listitem>
+ * <listitem>
+ * <itemizedlist><title>Shutdown phase</title>
+ * <listitem><para>
+ * GstVideoDecoder class calls @stop to inform the subclass that data
+ * parsing will be stopped.
+ * </para></listitem>
+ * </itemizedlist>
+ * </listitem>
+ * </orderedlist>
+ *
+ * Subclass is responsible for providing pad template caps for
+ * source and sink pads. The pads need to be named "sink" and "src". It also
+ * needs to set the fixed caps on srcpad, when the format is ensured. This
+ * is typically when base class calls subclass' @set_format function, though
+ * it might be delayed until calling @gst_video_decoder_finish_frame.
+ *
+ * Subclass is also responsible for providing (presentation) timestamps
+ * (likely based on corresponding input ones). If that is not applicable
+ * or possible, baseclass provides limited framerate based interpolation.
+ *
+ * Similarly, the baseclass provides some limited (legacy) seeking support
+ * (upon explicit subclass request), as full-fledged support
+ * should rather be left to upstream demuxer, parser or alike. This simple
+ * approach caters for seeking and duration reporting using estimated input
+ * bitrates.
+ *
+ * Baseclass provides some support for reverse playback, in particular
+ * in case incoming data is not packetized or upstream does not provide
+ * fragments on keyframe boundaries. However, subclass should then be prepared
+ * for the parsing and frame processing stage to occur separately (rather
+ * than otherwise the latter immediately following the former),
+ * and should ensure the parsing stage properly marks keyframes or rely on
+ * upstream to do so properly for incoming data.
+ *
+ * Things that subclass need to take care of:
+ * <itemizedlist>
+ * <listitem><para>Provide pad templates</para></listitem>
+ * <listitem><para>
+ * Set source pad caps when appropriate
+ * </para></listitem>
+ * <listitem><para>
+ * Configure some baseclass behaviour parameters.
+ * </para></listitem>
+ * <listitem><para>
+ * Optionally parse input data, if it is not considered packetized.
+ * Data will be provided to @parse which should invoke @gst_video_decoder_add_to_frame and
+ * @gst_video_decoder_have_frame as appropriate.
+ * </para></listitem>
+ * <listitem><para>
+ * Accept data in @handle_frame and provide decoded results to
+ * @gst_video_decoder_finish_frame.
+ * </para></listitem>
+ * </itemizedlist>
+ */
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+/* TODO
+ *
+ * * Add a flag/boolean for I-frame-only/image decoders so we can do extra
+ * features, like applying QoS on input (as opposed to after the frame is
+ * decoded).
+ * * Add a flag/boolean for decoders that require keyframes, so the base
+ * class can automatically discard non-keyframes before one has arrived
+ * * Detect reordered frame/timestamps and fix the pts/dts
+ * * Support for GstIndex (or shall we not care ?)
+ * * Calculate actual latency based on input/output timestamp/frame_number
+ * and if it exceeds the recorded one, save it and emit a GST_MESSAGE_LATENCY
+ * * Emit latency message when it changes
+ *
+ */
+
+/* FIXME 0.11: suppress warnings for deprecated API such as GStaticRecMutex
+ * with newer GLib versions (>= 2.31.0) */
+#define GLIB_DISABLE_DEPRECATION_WARNINGS
+
+#include "gstvideodecoder.h"
+#include "gstvideoutils.h"
+
+#include <string.h>
+
+GST_DEBUG_CATEGORY (videodecoder_debug);
+#define GST_CAT_DEFAULT videodecoder_debug
+
+#define GST_VIDEO_DECODER_GET_PRIVATE(obj) \
+ (G_TYPE_INSTANCE_GET_PRIVATE ((obj), GST_TYPE_VIDEO_DECODER, \
+ GstVideoDecoderPrivate))
+
+struct _GstVideoDecoderPrivate
+{
+ /* FIXME introduce a context ? */
+
+ /* parse tracking */
+ /* input data */
+ GstAdapter *input_adapter;
+ /* assembles current frame */
+ GstAdapter *output_adapter;
+
+ /* Whether we attempt to convert newsegment from bytes to
+ * time using a bitrate estimation */
+ gboolean do_estimate_rate;
+
+ /* Whether input is considered packetized or not */
+ gboolean packetized;
+
+ /* Error handling */
+ gint max_errors;
+ gint error_count;
+
+ /* ... being tracked here;
+ * only available during parsing */
+ GstVideoCodecFrame *current_frame;
+ /* events that should apply to the current frame */
+ GList *current_frame_events;
+
+ /* relative offset of input data */
+ guint64 input_offset;
+ /* relative offset of frame */
+ guint64 frame_offset;
+ /* tracking ts and offsets */
+ GList *timestamps;
+
+ /* combine to yield (presentation) ts */
+ GstClockTime timestamp_offset;
+
+ /* last outgoing ts */
+ GstClockTime last_timestamp;
+
+ /* reverse playback */
+ /* collect input */
+ GList *gather;
+ /* to-be-parsed */
+ GList *parse;
+ /* collected parsed frames */
+ GList *parse_gather;
+ /* frames to be handled == decoded */
+ GList *decode;
+ /* collected output */
+ GList *queued;
+ /* Used internally for avoiding processing of frames to flush */
+ gboolean process;
+
+
+ /* FIXME : base_picture_number is never set */
+ guint64 base_picture_number;
+ /* FIXME : reorder_depth is never set */
+ int reorder_depth;
+ int distance_from_sync;
+
+ guint64 system_frame_number;
+ guint64 decode_frame_number;
+
+ GList *frames; /* Protected with OBJECT_LOCK */
+ GstVideoCodecState *input_state;
+ GstVideoCodecState *output_state;
+ gboolean output_state_changed;
+
+ /* QoS properties */
+ gdouble proportion;
+ GstClockTime earliest_time;
+ gboolean discont;
+ /* qos messages: frames dropped/processed */
+ guint dropped;
+ guint processed;
+
+ /* Outgoing byte size ? */
+ gint64 bytes_out;
+ gint64 time;
+
+ gint64 min_latency;
+ gint64 max_latency;
+};
+
+static void gst_video_decoder_finalize (GObject * object);
+
+static gboolean gst_video_decoder_sink_setcaps (GstPad * pad, GstCaps * caps);
+static gboolean gst_video_decoder_sink_event (GstPad * pad, GstEvent * event);
+static gboolean gst_video_decoder_src_event (GstPad * pad, GstEvent * event);
+static GstFlowReturn gst_video_decoder_chain (GstPad * pad, GstBuffer * buf);
+static gboolean gst_video_decoder_sink_query (GstPad * pad, GstQuery * query);
+static GstStateChangeReturn
+gst_video_decoder_change_state (GstElement * element,
+ GstStateChange transition);
+static const GstQueryType *gst_video_decoder_get_query_types (GstPad * pad);
+static gboolean gst_video_decoder_src_query (GstPad * pad, GstQuery * query);
+static void gst_video_decoder_reset (GstVideoDecoder * decoder, gboolean full);
+
+static GstFlowReturn gst_video_decoder_have_frame_2 (GstVideoDecoder * decoder);
+static gboolean gst_video_decoder_set_src_caps (GstVideoDecoder * decoder);
+
+static guint64
+gst_video_decoder_get_timestamp (GstVideoDecoder * decoder, int picture_number);
+static guint64 gst_video_decoder_get_frame_duration (GstVideoDecoder * decoder,
+ GstVideoCodecFrame * frame);
+static GstVideoCodecFrame *gst_video_decoder_new_frame (GstVideoDecoder *
+ decoder);
+
+static void gst_video_decoder_clear_queues (GstVideoDecoder * dec);
+
+GST_BOILERPLATE (GstVideoDecoder, gst_video_decoder,
+ GstElement, GST_TYPE_ELEMENT);
+
+static void
+gst_video_decoder_base_init (gpointer g_class)
+{
+ GST_DEBUG_CATEGORY_INIT (videodecoder_debug, "videodecoder", 0,
+ "Base Video Decoder");
+}
+
+static void
+gst_video_decoder_class_init (GstVideoDecoderClass * klass)
+{
+ GObjectClass *gobject_class;
+ GstElementClass *gstelement_class;
+
+ gobject_class = G_OBJECT_CLASS (klass);
+ gstelement_class = GST_ELEMENT_CLASS (klass);
+
+ g_type_class_add_private (klass, sizeof (GstVideoDecoderPrivate));
+
+ gobject_class->finalize = gst_video_decoder_finalize;
+
+ gstelement_class->change_state =
+ GST_DEBUG_FUNCPTR (gst_video_decoder_change_state);
+}
+
+static void
+gst_video_decoder_init (GstVideoDecoder * decoder, GstVideoDecoderClass * klass)
+{
+ GstPadTemplate *pad_template;
+ GstPad *pad;
+
+ GST_DEBUG_OBJECT (decoder, "gst_video_decoder_init");
+
+ decoder->priv = GST_VIDEO_DECODER_GET_PRIVATE (decoder);
+
+ pad_template =
+ gst_element_class_get_pad_template (GST_ELEMENT_CLASS (klass), "sink");
+ g_return_if_fail (pad_template != NULL);
+
+ decoder->sinkpad = pad = gst_pad_new_from_template (pad_template, "sink");
+
+ gst_pad_set_chain_function (pad, GST_DEBUG_FUNCPTR (gst_video_decoder_chain));
+ gst_pad_set_event_function (pad,
+ GST_DEBUG_FUNCPTR (gst_video_decoder_sink_event));
+ gst_pad_set_setcaps_function (pad,
+ GST_DEBUG_FUNCPTR (gst_video_decoder_sink_setcaps));
+ gst_pad_set_query_function (pad,
+ GST_DEBUG_FUNCPTR (gst_video_decoder_sink_query));
+ gst_element_add_pad (GST_ELEMENT (decoder), decoder->sinkpad);
+
+ pad_template =
+ gst_element_class_get_pad_template (GST_ELEMENT_CLASS (klass), "src");
+ g_return_if_fail (pad_template != NULL);
+
+ decoder->srcpad = pad = gst_pad_new_from_template (pad_template, "src");
+
+ gst_pad_set_event_function (pad,
+ GST_DEBUG_FUNCPTR (gst_video_decoder_src_event));
+ gst_pad_set_query_type_function (pad,
+ GST_DEBUG_FUNCPTR (gst_video_decoder_get_query_types));
+ gst_pad_set_query_function (pad,
+ GST_DEBUG_FUNCPTR (gst_video_decoder_src_query));
+ gst_pad_use_fixed_caps (pad);
+ gst_element_add_pad (GST_ELEMENT (decoder), decoder->srcpad);
+
+ gst_segment_init (&decoder->input_segment, GST_FORMAT_TIME);
+ gst_segment_init (&decoder->output_segment, GST_FORMAT_TIME);
+
+ g_static_rec_mutex_init (&decoder->stream_lock);
+
+ decoder->priv->input_adapter = gst_adapter_new ();
+ decoder->priv->output_adapter = gst_adapter_new ();
+ decoder->priv->packetized = TRUE;
+
+ gst_video_decoder_reset (decoder, TRUE);
+}
+
+static gboolean
+gst_video_rawvideo_convert (GstVideoCodecState * state,
+ GstFormat src_format, gint64 src_value,
+ GstFormat * dest_format, gint64 * dest_value)
+{
+ gboolean res = FALSE;
+ guint vidsize;
+ guint fps_n, fps_d;
+
+ g_return_val_if_fail (dest_format != NULL, FALSE);
+ g_return_val_if_fail (dest_value != NULL, FALSE);
+
+ if (src_format == *dest_format || src_value == 0 || src_value == -1) {
+ *dest_value = src_value;
+ return TRUE;
+ }
+
+ vidsize = GST_VIDEO_INFO_SIZE (&state->info);
+ fps_n = GST_VIDEO_INFO_FPS_N (&state->info);
+ fps_d = GST_VIDEO_INFO_FPS_D (&state->info);
+
+ if (src_format == GST_FORMAT_BYTES &&
+ *dest_format == GST_FORMAT_DEFAULT && vidsize) {
+ /* convert bytes to frames */
+ *dest_value = gst_util_uint64_scale_int (src_value, 1, vidsize);
+ res = TRUE;
+ } else if (src_format == GST_FORMAT_DEFAULT &&
+ *dest_format == GST_FORMAT_BYTES && vidsize) {
+ /* convert bytes to frames */
+ *dest_value = src_value * vidsize;
+ res = TRUE;
+ } else if (src_format == GST_FORMAT_DEFAULT &&
+ *dest_format == GST_FORMAT_TIME && fps_n) {
+ /* convert frames to time */
+ /* FIXME add segment time? */
+ *dest_value = gst_util_uint64_scale (src_value, GST_SECOND * fps_d, fps_n);
+ res = TRUE;
+ } else if (src_format == GST_FORMAT_TIME &&
+ *dest_format == GST_FORMAT_DEFAULT && fps_d) {
+ /* convert time to frames */
+ /* FIXME subtract segment time? */
+ *dest_value = gst_util_uint64_scale (src_value, fps_n, GST_SECOND * fps_d);
+ res = TRUE;
+ } else if (src_format == GST_FORMAT_TIME &&
+ *dest_format == GST_FORMAT_BYTES && fps_d && vidsize) {
+ /* convert time to frames */
+ /* FIXME subtract segment time? */
+ *dest_value = gst_util_uint64_scale (src_value,
+ fps_n * vidsize, GST_SECOND * fps_d);
+ res = TRUE;
+ } else if (src_format == GST_FORMAT_BYTES &&
+ *dest_format == GST_FORMAT_TIME && fps_n && vidsize) {
+ /* convert frames to time */
+ /* FIXME add segment time? */
+ *dest_value = gst_util_uint64_scale (src_value,
+ GST_SECOND * fps_d, fps_n * vidsize);
+ res = TRUE;
+ }
+
+ return res;
+}
+
+static gboolean
+gst_video_encoded_video_convert (gint64 bytes, gint64 time,
+ GstFormat src_format, gint64 src_value, GstFormat * dest_format,
+ gint64 * dest_value)
+{
+ gboolean res = FALSE;
+
+ g_return_val_if_fail (dest_format != NULL, FALSE);
+ g_return_val_if_fail (dest_value != NULL, FALSE);
+
+ if (G_UNLIKELY (src_format == *dest_format || src_value == 0 ||
+ src_value == -1)) {
+ if (dest_value)
+ *dest_value = src_value;
+ return TRUE;
+ }
+
+ if (bytes <= 0 || time <= 0) {
+ GST_DEBUG ("not enough metadata yet to convert");
+ goto exit;
+ }
+
+ switch (src_format) {
+ case GST_FORMAT_BYTES:
+ switch (*dest_format) {
+ case GST_FORMAT_TIME:
+ *dest_value = gst_util_uint64_scale (src_value, time, bytes);
+ res = TRUE;
+ break;
+ default:
+ res = FALSE;
+ }
+ break;
+ case GST_FORMAT_TIME:
+ switch (*dest_format) {
+ case GST_FORMAT_BYTES:
+ *dest_value = gst_util_uint64_scale (src_value, bytes, time);
+ res = TRUE;
+ break;
+ default:
+ res = FALSE;
+ }
+ break;
+ default:
+ GST_DEBUG ("unhandled conversion from %d to %d", src_format,
+ *dest_format);
+ res = FALSE;
+ }
+
+exit:
+ return res;
+}
+
+static GstVideoCodecState *
+_new_input_state (GstCaps * caps)
+{
+ GstVideoCodecState *state;
+ GstStructure *structure;
+ const GValue *codec_data;
+
+ state = g_slice_new0 (GstVideoCodecState);
+ state->ref_count = 1;
+ gst_video_info_init (&state->info);
+ if (G_UNLIKELY (!gst_video_info_from_caps (&state->info, caps)))
+ goto parse_fail;
+ state->caps = gst_caps_ref (caps);
+
+ structure = gst_caps_get_structure (caps, 0);
+
+ codec_data = gst_structure_get_value (structure, "codec_data");
+ if (codec_data && G_VALUE_TYPE (codec_data) == GST_TYPE_BUFFER)
+ state->codec_data = GST_BUFFER (gst_value_dup_mini_object (codec_data));
+
+ return state;
+
+parse_fail:
+ {
+ g_slice_free (GstVideoCodecState, state);
+ return NULL;
+ }
+}
+
+static GstVideoCodecState *
+_new_output_state (GstVideoFormat fmt, guint width, guint height,
+ GstVideoCodecState * reference)
+{
+ GstVideoCodecState *state;
+
+ state = g_slice_new0 (GstVideoCodecState);
+ state->ref_count = 1;
+ gst_video_info_init (&state->info);
+ gst_video_info_set_format (&state->info, fmt, width, height);
+
+ if (reference) {
+ GstVideoInfo *tgt, *ref;
+
+ tgt = &state->info;
+ ref = &reference->info;
+
+ /* Copy over extra fields from reference state */
+ tgt->interlace_mode = ref->interlace_mode;
+ tgt->flags = ref->flags;
+ tgt->chroma_site = ref->chroma_site;
+ tgt->colorimetry = ref->colorimetry;
+ GST_DEBUG ("reference par %d/%d fps %d/%d",
+ ref->par_n, ref->par_d, ref->fps_n, ref->fps_d);
+ tgt->par_n = ref->par_n;
+ tgt->par_d = ref->par_d;
+ tgt->fps_n = ref->fps_n;
+ tgt->fps_d = ref->fps_d;
+ }
+
+ GST_DEBUG ("reference par %d/%d fps %d/%d",
+ state->info.par_n, state->info.par_d,
+ state->info.fps_n, state->info.fps_d);
+
+ return state;
+}
+
+static gboolean
+gst_video_decoder_sink_setcaps (GstPad * pad, GstCaps * caps)
+{
+ GstVideoDecoder *decoder;
+ GstVideoDecoderClass *decoder_class;
+ GstVideoCodecState *state;
+ gboolean ret = TRUE;
+
+ decoder = GST_VIDEO_DECODER (gst_pad_get_parent (pad));
+ decoder_class = GST_VIDEO_DECODER_GET_CLASS (decoder);
+
+ GST_DEBUG_OBJECT (decoder, "setcaps %" GST_PTR_FORMAT, caps);
+
+ state = _new_input_state (caps);
+
+ if (G_UNLIKELY (state == NULL))
+ goto parse_fail;
+
+ GST_VIDEO_DECODER_STREAM_LOCK (decoder);
+
+ if (decoder_class->set_format)
+ ret = decoder_class->set_format (decoder, state);
+
+ if (!ret)
+ goto refused_format;
+
+ if (decoder->priv->input_state)
+ gst_video_codec_state_unref (decoder->priv->input_state);
+ decoder->priv->input_state = state;
+
+ GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
+ gst_object_unref (decoder);
+
+ return ret;
+
+ /* ERRORS */
+
+parse_fail:
+ {
+ GST_WARNING_OBJECT (decoder, "Failed to parse caps");
+ gst_object_unref (decoder);
+ return FALSE;
+ }
+
+refused_format:
+ {
+ GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
+ GST_WARNING_OBJECT (decoder, "Subclass refused caps");
+ gst_video_codec_state_unref (state);
+ return FALSE;
+ }
+}
+
+static void
+gst_video_decoder_finalize (GObject * object)
+{
+ GstVideoDecoder *decoder;
+
+ decoder = GST_VIDEO_DECODER (object);
+
+ GST_DEBUG_OBJECT (object, "finalize");
+
+ g_static_rec_mutex_free (&decoder->stream_lock);
+
+ if (decoder->priv->input_adapter) {
+ g_object_unref (decoder->priv->input_adapter);
+ decoder->priv->input_adapter = NULL;
+ }
+ if (decoder->priv->output_adapter) {
+ g_object_unref (decoder->priv->output_adapter);
+ decoder->priv->output_adapter = NULL;
+ }
+
+ if (decoder->priv->input_state)
+ gst_video_codec_state_unref (decoder->priv->input_state);
+ if (decoder->priv->output_state)
+ gst_video_codec_state_unref (decoder->priv->output_state);
+
+ G_OBJECT_CLASS (parent_class)->finalize (object);
+}
+
+/* hard == FLUSH, otherwise discont */
+static GstFlowReturn
+gst_video_decoder_flush (GstVideoDecoder * dec, gboolean hard)
+{
+ GstVideoDecoderClass *klass;
+ GstVideoDecoderPrivate *priv = dec->priv;
+ GstFlowReturn ret = GST_FLOW_OK;
+
+ klass = GST_VIDEO_DECODER_GET_CLASS (dec);
+
+ GST_LOG_OBJECT (dec, "flush hard %d", hard);
+
+ /* Inform subclass */
+ if (klass->reset)
+ klass->reset (dec, hard);
+
+ /* FIXME make some more distinction between hard and soft,
+ * but subclass may not be prepared for that */
+ /* FIXME perhaps also clear pending frames ?,
+ * but again, subclass may still come up with one of those */
+ if (!hard) {
+ /* TODO ? finish/drain some stuff */
+ } else {
+ gst_segment_init (&dec->input_segment, GST_FORMAT_UNDEFINED);
+ gst_segment_init (&dec->output_segment, GST_FORMAT_UNDEFINED);
+ gst_video_decoder_clear_queues (dec);
+ priv->error_count = 0;
+ g_list_foreach (priv->current_frame_events, (GFunc) gst_event_unref, NULL);
+ g_list_free (priv->current_frame_events);
+ priv->current_frame_events = NULL;
+ }
+ /* and get (re)set for the sequel */
+ gst_video_decoder_reset (dec, FALSE);
+
+ return ret;
+}
+
+static gboolean
+gst_video_decoder_sink_eventfunc (GstVideoDecoder * decoder, GstEvent * event)
+{
+ GstVideoDecoderClass *decoder_class;
+ GstVideoDecoderPrivate *priv;
+ gboolean handled = FALSE;
+
+ priv = decoder->priv;
+ decoder_class = GST_VIDEO_DECODER_GET_CLASS (decoder);
+
+ switch (GST_EVENT_TYPE (event)) {
+ case GST_EVENT_EOS:
+ {
+ GstFlowReturn flow_ret = GST_FLOW_OK;
+
+ GST_VIDEO_DECODER_STREAM_LOCK (decoder);
+ if (!priv->packetized)
+ while (flow_ret == GST_FLOW_OK &&
+ gst_adapter_available (priv->input_adapter))
+ flow_ret =
+ decoder_class->parse (decoder, priv->current_frame,
+ priv->input_adapter, TRUE);
+
+ if (decoder_class->finish) {
+ flow_ret = decoder_class->finish (decoder);
+ } else {
+ flow_ret = GST_FLOW_OK;
+ }
+
+ handled = (flow_ret == GST_VIDEO_DECODER_FLOW_DROPPED);
+ GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
+ break;
+ }
+ case GST_EVENT_NEWSEGMENT:
+ {
+ gboolean update;
+ double rate, arate;
+ GstFormat format;
+ gint64 start;
+ gint64 stop;
+ gint64 pos;
+ GstSegment *segment = &decoder->input_segment;
+
+ GST_VIDEO_DECODER_STREAM_LOCK (decoder);
+ gst_event_parse_new_segment_full (event, &update, &rate,
+ &arate, &format, &start, &stop, &pos);
+
+ if (format == GST_FORMAT_TIME) {
+ GST_DEBUG_OBJECT (decoder,
+ "received TIME NEW_SEGMENT %" GST_TIME_FORMAT
+ " -- %" GST_TIME_FORMAT ", pos %" GST_TIME_FORMAT
+ ", rate %g, applied_rate %g",
+ GST_TIME_ARGS (start), GST_TIME_ARGS (stop), GST_TIME_ARGS (pos),
+ rate, arate);
+ } else {
+ GstFormat dformat = GST_FORMAT_TIME;
+
+ GST_DEBUG_OBJECT (decoder,
+ "received NEW_SEGMENT %" G_GINT64_FORMAT
+ " -- %" G_GINT64_FORMAT ", time %" G_GINT64_FORMAT
+ ", rate %g, applied_rate %g", start, stop, pos, rate, arate);
+
+ /* handle newsegment as a result from our legacy simple seeking */
+ /* note that initial 0 should convert to 0 in any case */
+ if (priv->do_estimate_rate &&
+ gst_pad_query_convert (decoder->sinkpad, GST_FORMAT_BYTES, start,
+ &dformat, &start)) {
+ /* best attempt convert */
+ /* as these are only estimates, stop is kept open-ended to avoid
+ * premature cutting */
+ GST_DEBUG_OBJECT (decoder,
+ "converted to TIME start %" GST_TIME_FORMAT,
+ GST_TIME_ARGS (start));
+ pos = start;
+ stop = GST_CLOCK_TIME_NONE;
+ /* replace event */
+ gst_event_unref (event);
+ event = gst_event_new_new_segment_full (update, rate, arate,
+ GST_FORMAT_TIME, start, stop, pos);
+ } else {
+ GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
+ goto newseg_wrong_format;
+ }
+ }
+
+ if (!update) {
+ gst_video_decoder_flush (decoder, FALSE);
+ }
+
+ priv->timestamp_offset = start;
+
+ gst_segment_set_newsegment_full (segment,
+ update, rate, arate, format, start, stop, pos);
+
+ GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
+ break;
+ }
+ case GST_EVENT_FLUSH_STOP:
+ {
+ GST_VIDEO_DECODER_STREAM_LOCK (decoder);
+ /* well, this is kind of worse than a DISCONT */
+ gst_video_decoder_flush (decoder, TRUE);
+ GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
+ }
+ default:
+ break;
+ }
+
+ return handled;
+
+newseg_wrong_format:
+ {
+ GST_DEBUG_OBJECT (decoder, "received non TIME newsegment");
+ gst_event_unref (event);
+ /* SWALLOW EVENT */
+ /* FIXME : Ideally we'd like to return FALSE in the event handler */
+ return TRUE;
+ }
+}
+
+static gboolean
+gst_video_decoder_push_event (GstVideoDecoder * decoder, GstEvent * event)
+{
+ switch (GST_EVENT_TYPE (event)) {
+ case GST_EVENT_NEWSEGMENT:
+ {
+ gboolean update;
+ double rate;
+ double applied_rate;
+ GstFormat format;
+ gint64 start;
+ gint64 stop;
+ gint64 position;
+
+ GST_VIDEO_DECODER_STREAM_LOCK (decoder);
+ gst_event_parse_new_segment_full (event, &update, &rate, &applied_rate,
+ &format, &start, &stop, &position);
+
+ GST_DEBUG_OBJECT (decoder, "newseg rate %g, applied rate %g, "
+ "format %d, start = %" GST_TIME_FORMAT ", stop = %" GST_TIME_FORMAT
+ ", pos = %" GST_TIME_FORMAT, rate, applied_rate, format,
+ GST_TIME_ARGS (start), GST_TIME_ARGS (stop),
+ GST_TIME_ARGS (position));
+
+ if (format != GST_FORMAT_TIME) {
+ GST_DEBUG_OBJECT (decoder, "received non TIME newsegment");
+ GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
+ break;
+ }
+
+ gst_segment_set_newsegment_full (&decoder->output_segment, update, rate,
+ applied_rate, format, start, stop, position);
+ GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
+ break;
+ }
+ default:
+ break;
+ }
+
+ return gst_pad_push_event (decoder->srcpad, event);
+}
+
+static gboolean
+gst_video_decoder_sink_event (GstPad * pad, GstEvent * event)
+{
+ GstVideoDecoder *decoder;
+ GstVideoDecoderClass *decoder_class;
+ gboolean ret = FALSE;
+ gboolean handled = FALSE;
+
+ decoder = GST_VIDEO_DECODER (gst_pad_get_parent (pad));
+ decoder_class = GST_VIDEO_DECODER_GET_CLASS (decoder);
+
+ GST_DEBUG_OBJECT (decoder, "received event %d, %s", GST_EVENT_TYPE (event),
+ GST_EVENT_TYPE_NAME (event));
+
+ if (decoder_class->sink_event)
+ handled = decoder_class->sink_event (decoder, event);
+
+ if (!handled)
+ handled = gst_video_decoder_sink_eventfunc (decoder, event);
+
+ if (!handled) {
+ /* Forward non-serialized events and EOS/FLUSH_STOP immediately.
+ * For EOS this is required because no buffer or serialized event
+ * will come after EOS and nothing could trigger another
+ * _finish_frame() call. *
+ * If the subclass handles sending of EOS manually it can return
+ * _DROPPED from ::finish() and all other subclasses should have
+ * decoded/flushed all remaining data before this
+ *
+ * For FLUSH_STOP this is required because it is expected
+ * to be forwarded immediately and no buffers are queued anyway.
+ */
+ if (!GST_EVENT_IS_SERIALIZED (event)
+ || GST_EVENT_TYPE (event) == GST_EVENT_EOS
+ || GST_EVENT_TYPE (event) == GST_EVENT_FLUSH_STOP) {
+ ret = gst_video_decoder_push_event (decoder, event);
+ } else {
+ GST_VIDEO_DECODER_STREAM_LOCK (decoder);
+ decoder->priv->current_frame_events =
+ g_list_prepend (decoder->priv->current_frame_events, event);
+ GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
+ }
+ }
+
+ gst_object_unref (decoder);
+ return ret;
+
+}
+
+/* perform upstream byte <-> time conversion (duration, seeking)
+ * if subclass allows and if enough data for moderately decent conversion */
+static inline gboolean
+gst_video_decoder_do_byte (GstVideoDecoder * dec)
+{
+ return dec->priv->do_estimate_rate && (dec->priv->bytes_out > 0)
+ && (dec->priv->time > GST_SECOND);
+}
+
+static gboolean
+gst_video_decoder_do_seek (GstVideoDecoder * dec, GstEvent * event)
+{
+ GstSeekFlags flags;
+ GstSeekType start_type, end_type;
+ GstFormat format;
+ gdouble rate;
+ gint64 start, start_time, end_time;
+ GstSegment seek_segment;
+ guint32 seqnum;
+
+ gst_event_parse_seek (event, &rate, &format, &flags, &start_type,
+ &start_time, &end_type, &end_time);
+
+ /* we'll handle plain open-ended flushing seeks with the simple approach */
+ if (rate != 1.0) {
+ GST_DEBUG_OBJECT (dec, "unsupported seek: rate");
+ return FALSE;
+ }
+
+ if (start_type != GST_SEEK_TYPE_SET) {
+ GST_DEBUG_OBJECT (dec, "unsupported seek: start time");
+ return FALSE;
+ }
+
+ if (end_type != GST_SEEK_TYPE_NONE ||
+ (end_type == GST_SEEK_TYPE_SET && end_time != GST_CLOCK_TIME_NONE)) {
+ GST_DEBUG_OBJECT (dec, "unsupported seek: end time");
+ return FALSE;
+ }
+
+ if (!(flags & GST_SEEK_FLAG_FLUSH)) {
+ GST_DEBUG_OBJECT (dec, "unsupported seek: not flushing");
+ return FALSE;
+ }
+
+ memcpy (&seek_segment, &dec->output_segment, sizeof (seek_segment));
+ gst_segment_set_seek (&seek_segment, rate, format, flags, start_type,
+ start_time, end_type, end_time, NULL);
+ start_time = seek_segment.last_stop;
+
+ format = GST_FORMAT_BYTES;
+ if (!gst_pad_query_convert (dec->sinkpad, GST_FORMAT_TIME, start_time,
+ &format, &start)) {
+ GST_DEBUG_OBJECT (dec, "conversion failed");
+ return FALSE;
+ }
+
+ seqnum = gst_event_get_seqnum (event);
+ event = gst_event_new_seek (1.0, GST_FORMAT_BYTES, flags,
+ GST_SEEK_TYPE_SET, start, GST_SEEK_TYPE_NONE, -1);
+ gst_event_set_seqnum (event, seqnum);
+
+ GST_DEBUG_OBJECT (dec, "seeking to %" GST_TIME_FORMAT " at byte offset %"
+ G_GINT64_FORMAT, GST_TIME_ARGS (start_time), start);
+
+ return gst_pad_push_event (dec->sinkpad, event);
+}
+
+static gboolean
+gst_video_decoder_src_event (GstPad * pad, GstEvent * event)
+{
+ GstVideoDecoder *decoder;
+ GstVideoDecoderPrivate *priv;
+ gboolean res = FALSE;
+
+ decoder = GST_VIDEO_DECODER (gst_pad_get_parent (pad));
+ priv = decoder->priv;
+
+ GST_DEBUG_OBJECT (decoder,
+ "received event %d, %s", GST_EVENT_TYPE (event),
+ GST_EVENT_TYPE_NAME (event));
+
+ switch (GST_EVENT_TYPE (event)) {
+ case GST_EVENT_SEEK:
+ {
+ GstFormat format, tformat;
+ gdouble rate;
+ GstSeekFlags flags;
+ GstSeekType cur_type, stop_type;
+ gint64 cur, stop;
+ gint64 tcur, tstop;
+ guint32 seqnum;
+
+ gst_event_parse_seek (event, &rate, &format, &flags, &cur_type, &cur,
+ &stop_type, &stop);
+ seqnum = gst_event_get_seqnum (event);
+
+ /* upstream gets a chance first */
+ if ((res = gst_pad_push_event (decoder->sinkpad, event)))
+ break;
+
+ /* if upstream fails for a time seek, maybe we can help if allowed */
+ if (format == GST_FORMAT_TIME) {
+ if (gst_video_decoder_do_byte (decoder))
+ res = gst_video_decoder_do_seek (decoder, event);
+ break;
+ }
+
+ /* ... though a non-time seek can be aided as well */
+ /* First bring the requested format to time */
+ tformat = GST_FORMAT_TIME;
+ if (!(res = gst_pad_query_convert (pad, format, cur, &tformat, &tcur)))
+ goto convert_error;
+ if (!(res = gst_pad_query_convert (pad, format, stop, &tformat, &tstop)))
+ goto convert_error;
+
+ /* then seek with time on the peer */
+ event = gst_event_new_seek (rate, GST_FORMAT_TIME,
+ flags, cur_type, tcur, stop_type, tstop);
+ gst_event_set_seqnum (event, seqnum);
+
+ res = gst_pad_push_event (decoder->sinkpad, event);
+ break;
+ }
+ case GST_EVENT_QOS:
+ {
+ gdouble proportion;
+ GstClockTimeDiff diff;
+ GstClockTime timestamp;
+ GstClockTime duration;
+
+ gst_event_parse_qos (event, &proportion, &diff, ×tamp);
+
+ GST_OBJECT_LOCK (decoder);
+ priv->proportion = proportion;
+ if (G_LIKELY (GST_CLOCK_TIME_IS_VALID (timestamp))) {
+ if (G_UNLIKELY (diff > 0)) {
+ if (priv->output_state->info.fps_n > 0)
+ duration =
+ gst_util_uint64_scale (GST_SECOND,
+ priv->output_state->info.fps_d, priv->output_state->info.fps_n);
+ else
+ duration = 0;
+ priv->earliest_time = timestamp + 2 * diff + duration;
+ } else {
+ priv->earliest_time = timestamp + diff;
+ }
+ } else {
+ priv->earliest_time = GST_CLOCK_TIME_NONE;
+ }
+ GST_OBJECT_UNLOCK (decoder);
+
+ GST_DEBUG_OBJECT (decoder,
+ "got QoS %" GST_TIME_FORMAT ", %" G_GINT64_FORMAT ", %g",
+ GST_TIME_ARGS (timestamp), diff, proportion);
+
+ res = gst_pad_push_event (decoder->sinkpad, event);
+ break;
+ }
+ default:
+ res = gst_pad_push_event (decoder->sinkpad, event);
+ break;
+ }
+done:
+ gst_object_unref (decoder);
+ return res;
+
+convert_error:
+ GST_DEBUG_OBJECT (decoder, "could not convert format");
+ goto done;
+}
+
+static const GstQueryType *
+gst_video_decoder_get_query_types (GstPad * pad)
+{
+ static const GstQueryType query_types[] = {
+ GST_QUERY_POSITION,
+ GST_QUERY_DURATION,
+ GST_QUERY_CONVERT,
+ GST_QUERY_LATENCY,
+ 0
+ };
+
+ return query_types;
+}
+
+static gboolean
+gst_video_decoder_src_query (GstPad * pad, GstQuery * query)
+{
+ GstVideoDecoder *dec;
+ gboolean res = TRUE;
+
+ dec = GST_VIDEO_DECODER (gst_pad_get_parent (pad));
+
+ GST_LOG_OBJECT (dec, "handling query: %" GST_PTR_FORMAT, query);
+
+ switch (GST_QUERY_TYPE (query)) {
+ case GST_QUERY_POSITION:
+ {
+ GstFormat format;
+ gint64 time, value;
+
+ /* upstream gets a chance first */
+ if ((res = gst_pad_peer_query (dec->sinkpad, query))) {
+ GST_LOG_OBJECT (dec, "returning peer response");
+ break;
+ }
+
+ /* we start from the last seen time */
+ time = dec->priv->last_timestamp;
+ /* correct for the segment values */
+ time = gst_segment_to_stream_time (&dec->output_segment,
+ GST_FORMAT_TIME, time);
+
+ GST_LOG_OBJECT (dec,
+ "query %p: our time: %" GST_TIME_FORMAT, query, GST_TIME_ARGS (time));
+
+ /* and convert to the final format */
+ gst_query_parse_position (query, &format, NULL);
+ if (!(res = gst_pad_query_convert (pad, GST_FORMAT_TIME, time,
+ &format, &value)))
+ break;
+
+ gst_query_set_position (query, format, value);
+
+ GST_LOG_OBJECT (dec,
+ "query %p: we return %" G_GINT64_FORMAT " (format %u)", query, value,
+ format);
+ break;
+ }
+ case GST_QUERY_DURATION:
+ {
+ GstFormat format;
+
+ /* upstream in any case */
+ if ((res = gst_pad_query_default (pad, query)))
+ break;
+
+ gst_query_parse_duration (query, &format, NULL);
+ /* try answering TIME by converting from BYTE if subclass allows */
+ if (format == GST_FORMAT_TIME && gst_video_decoder_do_byte (dec)) {
+ gint64 value;
+
+ format = GST_FORMAT_BYTES;
+ if (gst_pad_query_peer_duration (dec->sinkpad, &format, &value)) {
+ GST_LOG_OBJECT (dec, "upstream size %" G_GINT64_FORMAT, value);
+ format = GST_FORMAT_TIME;
+ if (gst_pad_query_convert (dec->sinkpad,
+ GST_FORMAT_BYTES, value, &format, &value)) {
+ gst_query_set_duration (query, GST_FORMAT_TIME, value);
+ res = TRUE;
+ }
+ }
+ }
+ break;
+ }
+ case GST_QUERY_CONVERT:
+ {
+ GstFormat src_fmt, dest_fmt;
+ gint64 src_val, dest_val;
+
+ GST_DEBUG_OBJECT (dec, "convert query");
+
+ gst_query_parse_convert (query, &src_fmt, &src_val, &dest_fmt, &dest_val);
+ res = gst_video_rawvideo_convert (dec->priv->output_state,
+ src_fmt, src_val, &dest_fmt, &dest_val);
+ if (!res)
+ goto error;
+ gst_query_set_convert (query, src_fmt, src_val, dest_fmt, dest_val);
+ break;
+ }
+ case GST_QUERY_LATENCY:
+ {
+ gboolean live;
+ GstClockTime min_latency, max_latency;
+
+ res = gst_pad_peer_query (dec->sinkpad, query);
+ if (res) {
+ gst_query_parse_latency (query, &live, &min_latency, &max_latency);
+ GST_DEBUG_OBJECT (dec, "Peer latency: live %d, min %"
+ GST_TIME_FORMAT " max %" GST_TIME_FORMAT, live,
+ GST_TIME_ARGS (min_latency), GST_TIME_ARGS (max_latency));
+
+ GST_OBJECT_LOCK (dec);
+ min_latency += dec->priv->min_latency;
+ if (max_latency != GST_CLOCK_TIME_NONE) {
+ max_latency += dec->priv->max_latency;
+ }
+ GST_OBJECT_UNLOCK (dec);
+
+ gst_query_set_latency (query, live, min_latency, max_latency);
+ }
+ }
+ break;
+ default:
+ res = gst_pad_query_default (pad, query);
+ }
+ gst_object_unref (dec);
+ return res;
+
+error:
+ GST_ERROR_OBJECT (dec, "query failed");
+ gst_object_unref (dec);
+ return res;
+}
+
+static gboolean
+gst_video_decoder_sink_query (GstPad * pad, GstQuery * query)
+{
+ GstVideoDecoder *decoder;
+ GstVideoDecoderPrivate *priv;
+ gboolean res = FALSE;
+
+ decoder = GST_VIDEO_DECODER (gst_pad_get_parent (pad));
+ priv = decoder->priv;
+
+ GST_LOG_OBJECT (decoder, "handling query: %" GST_PTR_FORMAT, query);
+
+ switch (GST_QUERY_TYPE (query)) {
+ case GST_QUERY_CONVERT:
+ {
+ GstFormat src_fmt, dest_fmt;
+ gint64 src_val, dest_val;
+
+ gst_query_parse_convert (query, &src_fmt, &src_val, &dest_fmt, &dest_val);
+ res =
+ gst_video_encoded_video_convert (priv->bytes_out, priv->time, src_fmt,
+ src_val, &dest_fmt, &dest_val);
+ if (!res)
+ goto error;
+ gst_query_set_convert (query, src_fmt, src_val, dest_fmt, dest_val);
+ break;
+ }
+ default:
+ res = gst_pad_query_default (pad, query);
+ break;
+ }
+done:
+ gst_object_unref (decoder);
+
+ return res;
+error:
+ GST_DEBUG_OBJECT (decoder, "query failed");
+ goto done;
+}
+
+typedef struct _Timestamp Timestamp;
+struct _Timestamp
+{
+ guint64 offset;
+ GstClockTime timestamp;
+ GstClockTime duration;
+};
+
+static void
+gst_video_decoder_add_timestamp (GstVideoDecoder * decoder, GstBuffer * buffer)
+{
+ GstVideoDecoderPrivate *priv = decoder->priv;
+ Timestamp *ts;
+
+ ts = g_malloc (sizeof (Timestamp));
+
+ GST_LOG_OBJECT (decoder,
+ "adding timestamp %" GST_TIME_FORMAT " (offset:%" G_GUINT64_FORMAT ")",
+ GST_TIME_ARGS (GST_BUFFER_TIMESTAMP (buffer)), priv->input_offset);
+
+ ts->offset = priv->input_offset;
+ ts->timestamp = GST_BUFFER_TIMESTAMP (buffer);
+ ts->duration = GST_BUFFER_DURATION (buffer);
+
+ priv->timestamps = g_list_append (priv->timestamps, ts);
+}
+
+static void
+gst_video_decoder_get_timestamp_at_offset (GstVideoDecoder *
+ decoder, guint64 offset, GstClockTime * timestamp, GstClockTime * duration)
+{
+ Timestamp *ts;
+ GList *g;
+
+ *timestamp = GST_CLOCK_TIME_NONE;
+ *duration = GST_CLOCK_TIME_NONE;
+
+ g = decoder->priv->timestamps;
+ while (g) {
+ ts = g->data;
+ if (ts->offset <= offset) {
+ *timestamp = ts->timestamp;
+ *duration = ts->duration;
+ g_free (ts);
+ g = g->next;
+ decoder->priv->timestamps = g_list_remove (decoder->priv->timestamps, ts);
+ } else {
+ break;
+ }
+ }
+
+ GST_LOG_OBJECT (decoder,
+ "got timestamp %" GST_TIME_FORMAT " (offset:%" G_GUINT64_FORMAT ")",
+ GST_TIME_ARGS (*timestamp), offset);
+}
+
+static void
+gst_video_decoder_clear_queues (GstVideoDecoder * dec)
+{
+ GstVideoDecoderPrivate *priv = dec->priv;
+
+ g_list_foreach (priv->queued, (GFunc) gst_mini_object_unref, NULL);
+ g_list_free (priv->queued);
+ priv->queued = NULL;
+ g_list_foreach (priv->gather, (GFunc) gst_mini_object_unref, NULL);
+ g_list_free (priv->gather);
+ priv->gather = NULL;
+ g_list_foreach (priv->decode, (GFunc) gst_video_codec_frame_unref, NULL);
+ g_list_free (priv->decode);
+ priv->decode = NULL;
+ g_list_foreach (priv->parse, (GFunc) gst_mini_object_unref, NULL);
+ g_list_free (priv->parse);
+ priv->parse = NULL;
+ g_list_foreach (priv->parse_gather, (GFunc) gst_video_codec_frame_unref,
+ NULL);
+ g_list_free (priv->parse_gather);
+ priv->parse_gather = NULL;
+ g_list_foreach (priv->frames, (GFunc) gst_video_codec_frame_unref, NULL);
+ g_list_free (priv->frames);
+ priv->frames = NULL;
+}
+
+static void
+gst_video_decoder_reset (GstVideoDecoder * decoder, gboolean full)
+{
+ GstVideoDecoderPrivate *priv = decoder->priv;
+ GList *g;
+
+ GST_DEBUG_OBJECT (decoder, "reset full %d", full);
+
+ GST_VIDEO_DECODER_STREAM_LOCK (decoder);
+
+ if (full) {
+ gst_segment_init (&decoder->input_segment, GST_FORMAT_UNDEFINED);
+ gst_segment_init (&decoder->output_segment, GST_FORMAT_UNDEFINED);
+ gst_video_decoder_clear_queues (decoder);
+ priv->error_count = 0;
+ priv->max_errors = GST_VIDEO_DECODER_MAX_ERRORS;
+ if (priv->input_state)
+ gst_video_codec_state_unref (priv->input_state);
+ priv->input_state = NULL;
+ if (priv->output_state)
+ gst_video_codec_state_unref (priv->output_state);
+ priv->output_state = NULL;
+ priv->min_latency = 0;
+ priv->max_latency = 0;
+ }
+
+ priv->discont = TRUE;
+
+ priv->timestamp_offset = GST_CLOCK_TIME_NONE;
+ priv->last_timestamp = GST_CLOCK_TIME_NONE;
+
+ priv->input_offset = 0;
+ priv->frame_offset = 0;
+ gst_adapter_clear (priv->input_adapter);
+ gst_adapter_clear (priv->output_adapter);
+ g_list_foreach (priv->timestamps, (GFunc) g_free, NULL);
+ g_list_free (priv->timestamps);
+ priv->timestamps = NULL;
+
+ if (priv->current_frame) {
+ gst_video_codec_frame_unref (priv->current_frame);
+ priv->current_frame = NULL;
+ }
+
+ priv->dropped = 0;
+ priv->processed = 0;
+
+ priv->decode_frame_number = 0;
+ priv->base_picture_number = 0;
+ for (g = priv->frames; g; g = g->next) {
+ gst_video_codec_frame_unref ((GstVideoCodecFrame *) g->data);
+ }
+ g_list_free (priv->frames);
+ priv->frames = NULL;
+
+ priv->bytes_out = 0;
+ priv->time = 0;
+
+ priv->earliest_time = GST_CLOCK_TIME_NONE;
+ priv->proportion = 0.5;
+
+ GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
+}
+
+static GstFlowReturn
+gst_video_decoder_chain_forward (GstVideoDecoder * decoder, GstBuffer * buf)
+{
+ GstVideoDecoderPrivate *priv;
+ GstVideoDecoderClass *klass;
+ GstFlowReturn ret = GST_FLOW_OK;
+
+ klass = GST_VIDEO_DECODER_GET_CLASS (decoder);
+ priv = decoder->priv;
+
+ g_return_val_if_fail (priv->packetized || klass->parse, GST_FLOW_ERROR);
+
+ if (priv->current_frame == NULL) {
+ priv->current_frame = gst_video_decoder_new_frame (decoder);
+ }
+
+ if (GST_BUFFER_TIMESTAMP_IS_VALID (buf)) {
+ gst_video_decoder_add_timestamp (decoder, buf);
+ }
+ priv->input_offset += GST_BUFFER_SIZE (buf);
+
+ if (priv->packetized) {
+ priv->current_frame->input_buffer = buf;
+
+ if (!GST_BUFFER_FLAG_IS_SET (buf, GST_BUFFER_FLAG_DELTA_UNIT)) {
+ GST_VIDEO_CODEC_FRAME_SET_SYNC_POINT (priv->current_frame);
+ }
+
+ ret = gst_video_decoder_have_frame_2 (decoder);
+ } else {
+
+ gst_adapter_push (priv->input_adapter, buf);
+
+ if (G_UNLIKELY (!gst_adapter_available (priv->input_adapter)))
+ goto beach;
+
+ do {
+ ret =
+ klass->parse (decoder, priv->current_frame, priv->input_adapter,
+ FALSE);
+ } while (ret == GST_FLOW_OK && gst_adapter_available (priv->input_adapter));
+
+ }
+
+beach:
+ if (ret == GST_VIDEO_DECODER_FLOW_NEED_DATA)
+ return GST_FLOW_OK;
+
+ return ret;
+}
+
+static GstFlowReturn
+gst_video_decoder_flush_decode (GstVideoDecoder * dec)
+{
+ GstVideoDecoderPrivate *priv = dec->priv;
+ GstFlowReturn res = GST_FLOW_OK;
+ GList *walk;
+
+ walk = priv->decode;
+
+ GST_DEBUG_OBJECT (dec, "flushing buffers to decode");
+
+ /* clear buffer and decoder state */
+ gst_video_decoder_flush (dec, FALSE);
+
+ /* signal have_frame it should not capture frames */
+ priv->process = TRUE;
+
+ while (walk) {
+ GList *next;
+ GstVideoCodecFrame *frame = (GstVideoCodecFrame *) (walk->data);
+ GstBuffer *buf = frame->input_buffer;
+
+ GST_DEBUG_OBJECT (dec, "decoding frame %p, ts %" GST_TIME_FORMAT,
+ buf, GST_TIME_ARGS (GST_BUFFER_TIMESTAMP (buf)));
+
+ next = walk->next;
+ if (priv->current_frame)
+ gst_video_codec_frame_unref (priv->current_frame);
+ priv->current_frame = frame;
+ gst_video_codec_frame_ref (priv->current_frame);
+
+ /* decode buffer, resulting data prepended to queue */
+ res = gst_video_decoder_have_frame_2 (dec);
+
+ walk = next;
+ }
+
+ priv->process = FALSE;
+
+ return res;
+}
+
+static GstFlowReturn
+gst_video_decoder_flush_parse (GstVideoDecoder * dec)
+{
+ GstVideoDecoderPrivate *priv = dec->priv;
+ GstFlowReturn res = GST_FLOW_OK;
+ GList *walk;
+
+ walk = priv->parse;
+
+ GST_DEBUG_OBJECT (dec, "flushing buffers to parsing");
+
+ /* clear buffer and decoder state */
+ gst_video_decoder_flush (dec, FALSE);
+
+ while (walk) {
+ GList *next;
+ GstBuffer *buf = GST_BUFFER_CAST (walk->data);
+
+ GST_DEBUG_OBJECT (dec, "parsing buffer %p, ts %" GST_TIME_FORMAT,
+ buf, GST_TIME_ARGS (GST_BUFFER_TIMESTAMP (buf)));
+
+ next = walk->next;
+ /* parse buffer, resulting frames prepended to parse_gather queue */
+ gst_buffer_ref (buf);
+ res = gst_video_decoder_chain_forward (dec, buf);
+
+ /* if we generated output, we can discard the buffer, else we
+ * keep it in the queue */
+ if (priv->parse_gather) {
+ GST_DEBUG_OBJECT (dec, "parsed buffer to %p", priv->parse_gather->data);
+ priv->parse = g_list_delete_link (priv->parse, walk);
+ gst_buffer_unref (buf);
+ } else {
+ GST_DEBUG_OBJECT (dec, "buffer did not decode, keeping");
+ }
+ walk = next;
+ }
+
+ /* now we can process frames */
+ GST_DEBUG_OBJECT (dec, "checking frames");
+ while (priv->parse_gather) {
+ GstVideoCodecFrame *frame;
+
+ frame = (GstVideoCodecFrame *) (priv->parse_gather->data);
+ /* remove from the gather list */
+ priv->parse_gather =
+ g_list_delete_link (priv->parse_gather, priv->parse_gather);
+ /* copy to decode queue */
+ priv->decode = g_list_prepend (priv->decode, frame);
+
+ /* if we copied a keyframe, flush and decode the decode queue */
+ if (GST_VIDEO_CODEC_FRAME_IS_SYNC_POINT (frame)) {
+ GST_DEBUG_OBJECT (dec, "copied keyframe");
+ res = gst_video_decoder_flush_decode (dec);
+ }
+ }
+
+ /* now send queued data downstream */
+ while (priv->queued) {
+ GstBuffer *buf = GST_BUFFER_CAST (priv->queued->data);
+
+ if (G_LIKELY (res == GST_FLOW_OK)) {
+ GST_DEBUG_OBJECT (dec, "pushing buffer %p of size %u, "
+ "time %" GST_TIME_FORMAT ", dur %" GST_TIME_FORMAT, buf,
+ GST_BUFFER_SIZE (buf), GST_TIME_ARGS (GST_BUFFER_TIMESTAMP (buf)),
+ GST_TIME_ARGS (GST_BUFFER_DURATION (buf)));
+ /* should be already, but let's be sure */
+ buf = gst_buffer_make_metadata_writable (buf);
+ /* avoid stray DISCONT from forward processing,
+ * which have no meaning in reverse pushing */
+ GST_BUFFER_FLAG_UNSET (buf, GST_BUFFER_FLAG_DISCONT);
+ res = gst_pad_push (dec->srcpad, buf);
+ } else {
+ gst_buffer_unref (buf);
+ }
+
+ priv->queued = g_list_delete_link (priv->queued, priv->queued);
+ }
+
+ return res;
+}
+
+static GstFlowReturn
+gst_video_decoder_chain_reverse (GstVideoDecoder * dec, GstBuffer * buf)
+{
+ GstVideoDecoderPrivate *priv = dec->priv;
+ GstFlowReturn result = GST_FLOW_OK;
+
+ /* if we have a discont, move buffers to the decode list */
+ if (!buf || GST_BUFFER_IS_DISCONT (buf)) {
+ GST_DEBUG_OBJECT (dec, "received discont");
+ while (priv->gather) {
+ GstBuffer *gbuf;
+
+ gbuf = GST_BUFFER_CAST (priv->gather->data);
+ /* remove from the gather list */
+ priv->gather = g_list_delete_link (priv->gather, priv->gather);
+ /* copy to parse queue */
+ priv->parse = g_list_prepend (priv->parse, gbuf);
+ }
+ /* parse and decode stuff in the parse queue */
+ gst_video_decoder_flush_parse (dec);
+ }
+
+ if (G_LIKELY (buf)) {
+ GST_DEBUG_OBJECT (dec, "gathering buffer %p of size %u, "
+ "time %" GST_TIME_FORMAT ", dur %" GST_TIME_FORMAT, buf,
+ GST_BUFFER_SIZE (buf), GST_TIME_ARGS (GST_BUFFER_TIMESTAMP (buf)),
+ GST_TIME_ARGS (GST_BUFFER_DURATION (buf)));
+
+ /* add buffer to gather queue */
+ priv->gather = g_list_prepend (priv->gather, buf);
+ }
+
+ return result;
+}
+
+static GstFlowReturn
+gst_video_decoder_chain (GstPad * pad, GstBuffer * buf)
+{
+ GstVideoDecoder *decoder;
+ GstVideoDecoderPrivate *priv;
+ GstFlowReturn ret = GST_FLOW_OK;
+
+ decoder = GST_VIDEO_DECODER (GST_PAD_PARENT (pad));
+ priv = decoder->priv;
+
+ GST_LOG_OBJECT (decoder,
+ "chain %" GST_TIME_FORMAT " duration %" GST_TIME_FORMAT " size %d",
+ GST_TIME_ARGS (GST_BUFFER_TIMESTAMP (buf)),
+ GST_TIME_ARGS (GST_BUFFER_DURATION (buf)), GST_BUFFER_SIZE (buf));
+
+ GST_VIDEO_DECODER_STREAM_LOCK (decoder);
+
+ /* NOTE:
+ * requiring the pad to be negotiated makes it impossible to use
+ * oggdemux or filesrc ! decoder */
+
+ if (decoder->input_segment.format == GST_FORMAT_UNDEFINED) {
+ GstEvent *event;
+
+ GST_WARNING_OBJECT (decoder,
+ "Received buffer without a new-segment. "
+ "Assuming timestamps start from 0.");
+
+ gst_segment_set_newsegment_full (&decoder->input_segment, FALSE, 1.0, 1.0,
+ GST_FORMAT_TIME, 0, GST_CLOCK_TIME_NONE, 0);
+
+ event = gst_event_new_new_segment (FALSE, 1.0, GST_FORMAT_TIME, 0,
+ GST_CLOCK_TIME_NONE, 0);
+
+ decoder->priv->current_frame_events =
+ g_list_prepend (decoder->priv->current_frame_events, event);
+ }
+
+ if (G_UNLIKELY (GST_BUFFER_IS_DISCONT (buf))) {
+ gint64 ts;
+
+ GST_DEBUG_OBJECT (decoder, "received DISCONT buffer");
+
+ /* track present position */
+ ts = priv->timestamp_offset;
+
+ /* buffer may claim DISCONT loudly, if it can't tell us where we are now,
+ * we'll stick to where we were ...
+ * Particularly useful/needed for upstream BYTE based */
+ if (decoder->input_segment.rate > 0.0
+ && !GST_BUFFER_TIMESTAMP_IS_VALID (buf)) {
+ GST_DEBUG_OBJECT (decoder, "... but restoring previous ts tracking");
+ priv->timestamp_offset = ts;
+ }
+ }
+
+ if (decoder->input_segment.rate > 0.0)
+ ret = gst_video_decoder_chain_forward (decoder, buf);
+ else
+ ret = gst_video_decoder_chain_reverse (decoder, buf);
+
+ GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
+ return ret;
+}
+
+static GstStateChangeReturn
+gst_video_decoder_change_state (GstElement * element, GstStateChange transition)
+{
+ GstVideoDecoder *decoder;
+ GstVideoDecoderClass *decoder_class;
+ GstStateChangeReturn ret;
+
+ decoder = GST_VIDEO_DECODER (element);
+ decoder_class = GST_VIDEO_DECODER_GET_CLASS (element);
+
+ switch (transition) {
+ case GST_STATE_CHANGE_NULL_TO_READY:
+ /* open device/library if needed */
+ if (decoder_class->open && !decoder_class->open (decoder))
+ goto open_failed;
+ break;
+ case GST_STATE_CHANGE_READY_TO_PAUSED:
+ /* Initialize device/library if needed */
+ if (decoder_class->start && !decoder_class->start (decoder))
+ goto start_failed;
+ break;
+ default:
+ break;
+ }
+
+ ret = GST_ELEMENT_CLASS (parent_class)->change_state (element, transition);
+
+ switch (transition) {
+ case GST_STATE_CHANGE_PAUSED_TO_READY:
+ if (decoder_class->stop && !decoder_class->stop (decoder))
+ goto stop_failed;
+
+ GST_VIDEO_DECODER_STREAM_LOCK (decoder);
+ gst_video_decoder_reset (decoder, TRUE);
+ g_list_foreach (decoder->priv->current_frame_events,
+ (GFunc) gst_event_unref, NULL);
+ g_list_free (decoder->priv->current_frame_events);
+ decoder->priv->current_frame_events = NULL;
+ GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
+ break;
+ case GST_STATE_CHANGE_READY_TO_NULL:
+ /* close device/library if needed */
+ if (decoder_class->close && !decoder_class->close (decoder))
+ goto close_failed;
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+
+ /* Errors */
+open_failed:
+ {
+ GST_ELEMENT_ERROR (decoder, LIBRARY, INIT, (NULL),
+ ("Failed to open decoder"));
+ return GST_STATE_CHANGE_FAILURE;
+ }
+
+start_failed:
+ {
+ GST_ELEMENT_ERROR (decoder, LIBRARY, INIT, (NULL),
+ ("Failed to start decoder"));
+ return GST_STATE_CHANGE_FAILURE;
+ }
+
+stop_failed:
+ {
+ GST_ELEMENT_ERROR (decoder, LIBRARY, INIT, (NULL),
+ ("Failed to stop decoder"));
+ return GST_STATE_CHANGE_FAILURE;
+ }
+
+close_failed:
+ {
+ GST_ELEMENT_ERROR (decoder, LIBRARY, INIT, (NULL),
+ ("Failed to close decoder"));
+ return GST_STATE_CHANGE_FAILURE;
+ }
+}
+
+static GstVideoCodecFrame *
+gst_video_decoder_new_frame (GstVideoDecoder * decoder)
+{
+ GstVideoDecoderPrivate *priv = decoder->priv;
+ GstVideoCodecFrame *frame;
+
+ frame = g_slice_new0 (GstVideoCodecFrame);
+
+ frame->ref_count = 1;
+
+ GST_VIDEO_DECODER_STREAM_LOCK (decoder);
+ frame->system_frame_number = priv->system_frame_number;
+ priv->system_frame_number++;
+ frame->decode_frame_number = priv->decode_frame_number;
+ priv->decode_frame_number++;
+ GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
+
+ GST_LOG_OBJECT (decoder, "Created new frame %p (sfn:%d)",
+ frame, frame->system_frame_number);
+
+ frame->dts = GST_CLOCK_TIME_NONE;
+ frame->pts = GST_CLOCK_TIME_NONE;
+ frame->duration = GST_CLOCK_TIME_NONE;
+ frame->events = priv->current_frame_events;
+ priv->current_frame_events = NULL;
+ GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
+
+ return frame;
+}
+
+static void
+gst_video_decoder_prepare_finish_frame (GstVideoDecoder *
+ decoder, GstVideoCodecFrame * frame)
+{
+ GstVideoDecoderPrivate *priv = decoder->priv;
+ GList *l, *events = NULL;
+
+#ifndef GST_DISABLE_GST_DEBUG
+ GST_LOG_OBJECT (decoder, "n %d in %d out %d",
+ g_list_length (priv->frames),
+ gst_adapter_available (priv->input_adapter),
+ gst_adapter_available (priv->output_adapter));
+#endif
+
+ GST_LOG_OBJECT (decoder,
+ "finish frame sync=%d pts=%" GST_TIME_FORMAT,
+ GST_VIDEO_CODEC_FRAME_IS_SYNC_POINT (frame), GST_TIME_ARGS (frame->pts));
+
+ /* Push all pending events that arrived before this frame */
+ for (l = priv->frames; l; l = l->next) {
+ GstVideoCodecFrame *tmp = l->data;
+
+ if (tmp->events) {
+ events = tmp->events;
+ tmp->events = NULL;
+ }
+
+ if (tmp == frame)
+ break;
+ }
+
+ for (l = g_list_last (events); l; l = l->prev) {
+ GST_LOG_OBJECT (decoder, "pushing %s event", GST_EVENT_TYPE_NAME (l->data));
+ gst_video_decoder_push_event (decoder, l->data);
+ }
+ g_list_free (events);
+
+ /* Check if the data should not be displayed. For example altref/invisible
+ * frame in vp8. In this case we should not update the timestamps. */
+ if (GST_VIDEO_CODEC_FRAME_IS_DECODE_ONLY (frame))
+ return;
+
+ /* If the frame is meant to be outputted but we don't have an output buffer
+ * we have a problem :) */
+ if (G_UNLIKELY (frame->output_buffer == NULL))
+ goto no_output_buffer;
+
+ if (GST_CLOCK_TIME_IS_VALID (frame->pts)) {
+ if (frame->pts != priv->timestamp_offset) {
+ GST_DEBUG_OBJECT (decoder,
+ "sync timestamp %" GST_TIME_FORMAT " diff %" GST_TIME_FORMAT,
+ GST_TIME_ARGS (frame->pts),
+ GST_TIME_ARGS (frame->pts - decoder->output_segment.start));
+ priv->timestamp_offset = frame->pts;
+ } else {
+ /* This case is for one initial timestamp and no others, e.g.,
+ * filesrc ! decoder ! xvimagesink */
+ GST_WARNING_OBJECT (decoder, "sync timestamp didn't change, ignoring");
+ frame->pts = GST_CLOCK_TIME_NONE;
+ }
+ } else {
+ if (GST_VIDEO_CODEC_FRAME_IS_SYNC_POINT (frame)) {
+ GST_WARNING_OBJECT (decoder, "sync point doesn't have timestamp");
+ if (!GST_CLOCK_TIME_IS_VALID (priv->timestamp_offset)) {
+ GST_WARNING_OBJECT (decoder,
+ "No base timestamp. Assuming frames start at segment start");
+ priv->timestamp_offset = decoder->output_segment.start;
+ }
+ }
+ }
+ if (frame->pts == GST_CLOCK_TIME_NONE) {
+ frame->pts =
+ gst_video_decoder_get_timestamp (decoder, frame->decode_frame_number);
+ frame->duration = GST_CLOCK_TIME_NONE;
+ }
+ if (frame->duration == GST_CLOCK_TIME_NONE) {
+ frame->duration = gst_video_decoder_get_frame_duration (decoder, frame);
+ }
+
+ if (GST_CLOCK_TIME_IS_VALID (priv->last_timestamp)) {
+ if (frame->pts < priv->last_timestamp) {
+ GST_WARNING_OBJECT (decoder,
+ "decreasing timestamp (%" GST_TIME_FORMAT " < %"
+ GST_TIME_FORMAT ")",
+ GST_TIME_ARGS (frame->pts), GST_TIME_ARGS (priv->last_timestamp));
+ }
+ }
+ priv->last_timestamp = frame->pts;
+
+ return;
+
+ /* ERRORS */
+no_output_buffer:
+ {
+ GST_ERROR_OBJECT (decoder, "No buffer to output !");
+ }
+}
+
+static void
+gst_video_decoder_do_finish_frame (GstVideoDecoder * dec,
+ GstVideoCodecFrame * frame)
+{
+ dec->priv->frames = g_list_remove (dec->priv->frames, frame);
+
+ gst_video_codec_frame_unref (frame);
+}
+
+/**
+ * gst_video_decoder_drop_frame:
+ * @dec: a #GstVideoDecoder
+ * @frame: (transfer full): the #GstVideoCodecFrame to drop
+ *
+ * Similar to gst_video_decoder_finish_frame(), but drops @frame in any
+ * case and posts a QoS message with the frame's details on the bus.
+ * In any case, the frame is considered finished and released.
+ *
+ * Returns: a #GstFlowReturn, usually GST_FLOW_OK.
+ *
+ * Since: 0.10.36
+ */
+GstFlowReturn
+gst_video_decoder_drop_frame (GstVideoDecoder * dec, GstVideoCodecFrame * frame)
+{
+ GstClockTime stream_time, jitter, earliest_time, qostime, timestamp;
+ GstSegment *segment;
+ GstMessage *qos_msg;
+ gdouble proportion;
+
+ GST_LOG_OBJECT (dec, "drop frame");
+
+ GST_VIDEO_DECODER_STREAM_LOCK (dec);
+
+ gst_video_decoder_prepare_finish_frame (dec, frame);
+
+ GST_DEBUG_OBJECT (dec, "dropping frame %" GST_TIME_FORMAT,
+ GST_TIME_ARGS (frame->pts));
+
+ dec->priv->dropped++;
+
+ /* post QoS message */
+ timestamp = frame->pts;
+ proportion = dec->priv->proportion;
+ segment = &dec->output_segment;
+ stream_time =
+ gst_segment_to_stream_time (segment, GST_FORMAT_TIME, timestamp);
+ qostime = gst_segment_to_running_time (segment, GST_FORMAT_TIME, timestamp);
+ earliest_time = dec->priv->earliest_time;
+ jitter = GST_CLOCK_DIFF (qostime, earliest_time);
+ qos_msg =
+ gst_message_new_qos (GST_OBJECT_CAST (dec), FALSE, qostime, stream_time,
+ timestamp, GST_CLOCK_TIME_NONE);
+ gst_message_set_qos_values (qos_msg, jitter, proportion, 1000000);
+ gst_message_set_qos_stats (qos_msg, GST_FORMAT_BUFFERS,
+ dec->priv->processed, dec->priv->dropped);
+ gst_element_post_message (GST_ELEMENT_CAST (dec), qos_msg);
+
+ /* now free the frame */
+ gst_video_decoder_do_finish_frame (dec, frame);
+
+ GST_VIDEO_DECODER_STREAM_UNLOCK (dec);
+
+ return GST_FLOW_OK;
+}
+
+/**
+ * gst_video_decoder_finish_frame:
+ * @decoder: a #GstVideoDecoder
+ * @frame: (transfer full): a decoded #GstVideoCodecFrame
+ *
+ * @frame should have a valid decoded data buffer, whose metadata fields
+ * are then appropriately set according to frame data and pushed downstream.
+ * If no output data is provided, @frame is considered skipped.
+ * In any case, the frame is considered finished and released.
+ *
+ * Returns: a #GstFlowReturn resulting from sending data downstream
+ *
+ * Since: 0.10.36
+ */
+GstFlowReturn
+gst_video_decoder_finish_frame (GstVideoDecoder * decoder,
+ GstVideoCodecFrame * frame)
+{
+ GstVideoDecoderPrivate *priv = decoder->priv;
+ GstVideoCodecState *state = priv->output_state;
+ GstBuffer *output_buffer;
+ GstFlowReturn ret = GST_FLOW_OK;
+ gint64 start, stop;
+ GstSegment *segment;
+
+ GST_LOG_OBJECT (decoder, "finish frame");
+
+ if (G_UNLIKELY (priv->output_state_changed))
+ gst_video_decoder_set_src_caps (decoder);
+
+ GST_VIDEO_DECODER_STREAM_LOCK (decoder);
+
+ gst_video_decoder_prepare_finish_frame (decoder, frame);
+ priv->processed++;
+ /* no buffer data means this frame is skipped */
+ if (!frame->output_buffer || GST_VIDEO_CODEC_FRAME_IS_DECODE_ONLY (frame)) {
+ GST_DEBUG_OBJECT (decoder, "skipping frame %" GST_TIME_FORMAT,
+ GST_TIME_ARGS (frame->pts));
+ goto done;
+ }
+
+ output_buffer = gst_buffer_make_metadata_writable (frame->output_buffer);
+ frame->output_buffer = NULL;
+
+ GST_BUFFER_FLAG_UNSET (output_buffer, GST_BUFFER_FLAG_DELTA_UNIT);
+ if (GST_VIDEO_INFO_IS_INTERLACED (&state->info)) {
+ if (GST_VIDEO_CODEC_FRAME_FLAG_IS_SET (frame,
+ GST_VIDEO_CODEC_FRAME_FLAG_TFF)) {
+ GST_BUFFER_FLAG_SET (output_buffer, GST_VIDEO_BUFFER_TFF);
+ } else {
+ GST_BUFFER_FLAG_UNSET (output_buffer, GST_VIDEO_BUFFER_TFF);
+ }
+ if (GST_VIDEO_CODEC_FRAME_FLAG_IS_SET (frame,
+ GST_VIDEO_CODEC_FRAME_FLAG_RFF)) {
+ GST_BUFFER_FLAG_SET (output_buffer, GST_VIDEO_BUFFER_RFF);
+ } else {
+ GST_BUFFER_FLAG_UNSET (output_buffer, GST_VIDEO_BUFFER_RFF);
+ }
+ if (GST_VIDEO_CODEC_FRAME_FLAG_IS_SET (frame,
+ GST_VIDEO_CODEC_FRAME_FLAG_ONEFIELD)) {
+ GST_BUFFER_FLAG_SET (output_buffer, GST_VIDEO_BUFFER_ONEFIELD);
+ } else {
+ GST_BUFFER_FLAG_UNSET (output_buffer, GST_VIDEO_BUFFER_ONEFIELD);
+ }
+ }
+
+ if (priv->discont) {
+ GST_BUFFER_FLAG_SET (output_buffer, GST_BUFFER_FLAG_DISCONT);
+ priv->discont = FALSE;
+ }
+
+ /* Check for clipping */
+ start = frame->pts;
+ stop = frame->pts + frame->duration;
+
+ segment = &decoder->output_segment;
+ if (gst_segment_clip (segment, GST_FORMAT_TIME, start, stop, &start, &stop)) {
+ GST_BUFFER_TIMESTAMP (output_buffer) = start;
+ GST_BUFFER_DURATION (output_buffer) = stop - start;
+ GST_LOG_OBJECT (decoder,
+ "accepting buffer inside segment: %" GST_TIME_FORMAT " %"
+ GST_TIME_FORMAT " seg %" GST_TIME_FORMAT " to %" GST_TIME_FORMAT
+ " time %" GST_TIME_FORMAT,
+ GST_TIME_ARGS (GST_BUFFER_TIMESTAMP (output_buffer)),
+ GST_TIME_ARGS (GST_BUFFER_TIMESTAMP (output_buffer) +
+ GST_BUFFER_DURATION (output_buffer)),
+ GST_TIME_ARGS (segment->start), GST_TIME_ARGS (segment->stop),
+ GST_TIME_ARGS (segment->time));
+ } else {
+ GST_LOG_OBJECT (decoder,
+ "dropping buffer outside segment: %" GST_TIME_FORMAT
+ " %" GST_TIME_FORMAT
+ " seg %" GST_TIME_FORMAT " to %" GST_TIME_FORMAT
+ " time %" GST_TIME_FORMAT,
+ GST_TIME_ARGS (frame->pts),
+ GST_TIME_ARGS (frame->pts + frame->duration),
+ GST_TIME_ARGS (segment->start),
+ GST_TIME_ARGS (segment->stop), GST_TIME_ARGS (segment->time));
+ gst_buffer_unref (output_buffer);
+ ret = GST_FLOW_OK;
+ goto done;
+ }
+
+ GST_BUFFER_OFFSET (output_buffer) = GST_BUFFER_OFFSET_NONE;
+ GST_BUFFER_OFFSET_END (output_buffer) = GST_BUFFER_OFFSET_NONE;
+
+ /* update rate estimate */
+ priv->bytes_out += GST_BUFFER_SIZE (output_buffer);
+ if (GST_CLOCK_TIME_IS_VALID (frame->duration)) {
+ priv->time += frame->duration;
+ } else {
+ /* FIXME : Use difference between current and previous outgoing
+ * timestamp, and relate to difference between current and previous
+ * bytes */
+ /* better none than nothing valid */
+ priv->time = GST_CLOCK_TIME_NONE;
+ }
+
+ gst_buffer_set_caps (output_buffer, GST_PAD_CAPS (decoder->srcpad));
+
+ GST_LOG_OBJECT (decoder, "pushing frame ts %" GST_TIME_FORMAT
+ ", duration %" GST_TIME_FORMAT,
+ GST_TIME_ARGS (GST_BUFFER_TIMESTAMP (output_buffer)),
+ GST_TIME_ARGS (GST_BUFFER_DURATION (output_buffer)));
+
+
+
+ /* we got data, so note things are looking up again */
+ /* FIXME : Shouldn't we avoid going under zero ? */
+ if (G_UNLIKELY (priv->error_count))
+ priv->error_count--;
+ if (decoder->output_segment.rate < 0.0) {
+ GST_LOG_OBJECT (decoder, "queued buffer");
+ priv->queued = g_list_prepend (priv->queued, output_buffer);
+ } else {
+ ret = gst_pad_push (decoder->srcpad, output_buffer);
+ }
+
+done:
+
+ gst_video_decoder_do_finish_frame (decoder, frame);
+
+ GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
+
+ return ret;
+}
+
+/**
+ * gst_video_decoder_add_to_frame:
+ * @decoder: a #GstVideoDecoder
+ * @n_bytes: an encoded #GstVideoCodecFrame
+ *
+ * Removes next @n_bytes of input data and adds it to currently parsed frame.
+ *
+ * Since: 0.10.36
+ */
+void
+gst_video_decoder_add_to_frame (GstVideoDecoder * decoder, int n_bytes)
+{
+ GstVideoDecoderPrivate *priv = decoder->priv;
+ GstBuffer *buf;
+
+ GST_LOG_OBJECT (decoder, "add %d bytes to frame", n_bytes);
+
+ if (n_bytes == 0)
+ return;
+
+ GST_VIDEO_DECODER_STREAM_LOCK (decoder);
+ if (gst_adapter_available (priv->output_adapter) == 0) {
+ priv->frame_offset =
+ priv->input_offset - gst_adapter_available (priv->input_adapter);
+ }
+ buf = gst_adapter_take_buffer (priv->input_adapter, n_bytes);
+
+ gst_adapter_push (priv->output_adapter, buf);
+ GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
+}
+
+static guint64
+gst_video_decoder_get_timestamp (GstVideoDecoder * decoder, int picture_number)
+{
+ GstVideoDecoderPrivate *priv = decoder->priv;
+ GstVideoCodecState *state = priv->output_state;
+
+ if (state->info.fps_d == 0 || state->info.fps_n == 0) {
+ return -1;
+ }
+ if (picture_number < priv->base_picture_number) {
+ return priv->timestamp_offset -
+ (gint64) gst_util_uint64_scale (priv->base_picture_number
+ - picture_number, state->info.fps_d * GST_SECOND, state->info.fps_n);
+ } else {
+ return priv->timestamp_offset +
+ gst_util_uint64_scale (picture_number -
+ priv->base_picture_number, state->info.fps_d * GST_SECOND,
+ state->info.fps_n);
+ }
+}
+
+static guint64
+gst_video_decoder_get_frame_duration (GstVideoDecoder * decoder,
+ GstVideoCodecFrame * frame)
+{
+ GstVideoCodecState *state = decoder->priv->output_state;
+ gint fields;
+
+ if (state->info.fps_d == 0 || state->info.fps_n == 0) {
+ return GST_CLOCK_TIME_NONE;
+ }
+
+ if (GST_VIDEO_CODEC_FRAME_FLAG_IS_SET (frame, GST_VIDEO_CODEC_FRAME_FLAG_RFF))
+ fields = 3;
+ else if (GST_VIDEO_CODEC_FRAME_FLAG_IS_SET (frame,
+ GST_VIDEO_CODEC_FRAME_FLAG_ONEFIELD))
+ fields = 1;
+ else
+ fields = 2;
+
+ return gst_util_uint64_scale (fields * GST_SECOND, state->info.fps_d,
+ state->info.fps_n);
+}
+
+/**
+ * gst_video_decoder_have_frame:
+ * @decoder: a #GstVideoDecoder
+ *
+ * Gathers all data collected for currently parsed frame, gathers corresponding
+ * metadata and passes it along for further processing, i.e. @handle_frame.
+ *
+ * Returns: a #GstFlowReturn
+ *
+ * Since: 0.10.36
+ */
+GstFlowReturn
+gst_video_decoder_have_frame (GstVideoDecoder * decoder)
+{
+ GstBuffer *buffer;
+ int n_available;
+ GstClockTime timestamp;
+ GstClockTime duration;
+ GstFlowReturn ret = GST_FLOW_OK;
+
+ GST_LOG_OBJECT (decoder, "have_frame");
+
+ GST_VIDEO_DECODER_STREAM_LOCK (decoder);
+
+ n_available = gst_adapter_available (decoder->priv->output_adapter);
+ if (n_available) {
+ buffer =
+ gst_adapter_take_buffer (decoder->priv->output_adapter, n_available);
+ } else {
+ buffer = gst_buffer_new_and_alloc (0);
+ }
+
+ decoder->priv->current_frame->input_buffer = buffer;
+
+ gst_video_decoder_get_timestamp_at_offset (decoder,
+ decoder->priv->frame_offset, ×tamp, &duration);
+
+ GST_BUFFER_TIMESTAMP (buffer) = timestamp;
+ GST_BUFFER_DURATION (buffer) = duration;
+
+ GST_LOG_OBJECT (decoder, "collected frame size %d, "
+ "ts %" GST_TIME_FORMAT ", dur %" GST_TIME_FORMAT,
+ n_available, GST_TIME_ARGS (timestamp), GST_TIME_ARGS (duration));
+
+ ret = gst_video_decoder_have_frame_2 (decoder);
+
+ GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
+
+ return ret;
+}
+
+static GstFlowReturn
+gst_video_decoder_have_frame_2 (GstVideoDecoder * decoder)
+{
+ GstVideoDecoderPrivate *priv = decoder->priv;
+ GstVideoCodecFrame *frame = priv->current_frame;
+ GstVideoDecoderClass *decoder_class;
+ GstFlowReturn ret = GST_FLOW_OK;
+
+ decoder_class = GST_VIDEO_DECODER_GET_CLASS (decoder);
+
+ /* FIXME : This should only have to be checked once (either the subclass has an
+ * implementation, or it doesn't) */
+ g_return_val_if_fail (decoder_class->handle_frame != NULL, GST_FLOW_ERROR);
+
+ /* capture frames and queue for later processing */
+ if (decoder->output_segment.rate < 0.0 && !priv->process) {
+ priv->parse_gather = g_list_prepend (priv->parse_gather, frame);
+ goto exit;
+ }
+
+ frame->distance_from_sync = priv->distance_from_sync;
+ priv->distance_from_sync++;
+ frame->pts = GST_BUFFER_TIMESTAMP (frame->input_buffer);
+ frame->duration = GST_BUFFER_DURATION (frame->input_buffer);
+
+ /* For keyframes, DTS = PTS */
+ if (GST_VIDEO_CODEC_FRAME_IS_SYNC_POINT (frame))
+ frame->dts = frame->pts;
+
+ GST_LOG_OBJECT (decoder, "pts %" GST_TIME_FORMAT, GST_TIME_ARGS (frame->pts));
+ GST_LOG_OBJECT (decoder, "dts %" GST_TIME_FORMAT, GST_TIME_ARGS (frame->dts));
+ GST_LOG_OBJECT (decoder, "dist %d", frame->distance_from_sync);
+ priv->frames = g_list_append (priv->frames, frame);
+ frame->deadline =
+ gst_segment_to_running_time (&decoder->input_segment, GST_FORMAT_TIME,
+ frame->pts);
+
+ /* do something with frame */
+ ret = decoder_class->handle_frame (decoder, frame);
+ if (ret != GST_FLOW_OK)
+ GST_DEBUG_OBJECT (decoder, "flow error %s", gst_flow_get_name (ret));
+
+exit:
+ /* current frame has either been added to parse_gather or sent to
+ handle frame so there is no need to unref it */
+
+ /* create new frame */
+ priv->current_frame = gst_video_decoder_new_frame (decoder);
+ return ret;
+}
+
+
+/**
+ * gst_video_decoder_get_output_state:
+ * @decoder: a #GstVideoDecoder
+ *
+ * Get the #GstVideoCodecState currently describing the output stream.
+ *
+ * Returns: (transfer full): #GstVideoCodecState describing format of video data.
+ *
+ * Since: 0.10.36
+ */
+GstVideoCodecState *
+gst_video_decoder_get_output_state (GstVideoDecoder * decoder)
+{
+ GstVideoCodecState *state = NULL;
+
+ GST_VIDEO_DECODER_STREAM_LOCK (decoder);
+ if (decoder->priv->output_state)
+ state = gst_video_codec_state_ref (decoder->priv->output_state);
+ GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
+
+ return state;
+}
+
+/**
+ * gst_video_decoder_set_output_state:
+ * @decoder: a #GstVideoDecoder
+ * @fmt: a #GstVideoFormat
+ * @width: The width in pixels
+ * @height: The height in pixels
+ * @reference: (allow-none) (transfer none): An optional reference #GstVideoCodecState
+ *
+ * Creates a new #GstVideoCodecState with the specified @fmt, @width and @height
+ * as the output state for the decoder.
+ * Any previously set output state on @decoder will be replaced by the newly
+ * created one.
+ *
+ * If the subclass wishes to copy over existing fields (like pixel aspec ratio,
+ * or framerate) from an existing #GstVideoCodecState, it can be provided as a
+ * @reference.
+ *
+ * If the subclass wishes to override some fields from the output state (like
+ * pixel-aspect-ratio or framerate) it can do so on the returned #GstVideoCodecState.
+ *
+ * The new output state will only take effect (set on pads and buffers) starting
+ * from the next call to #gst_video_decoder_finish_frame().
+ *
+ * Returns: (transfer full): the newly configured output state.
+ *
+ * Since: 0.10.36
+ */
+GstVideoCodecState *
+gst_video_decoder_set_output_state (GstVideoDecoder * decoder,
+ GstVideoFormat fmt, guint width, guint height,
+ GstVideoCodecState * reference)
+{
+ GstVideoDecoderPrivate *priv = decoder->priv;
+ GstVideoCodecState *state;
+
+ GST_DEBUG_OBJECT (decoder, "fmt:%d, width:%d, height:%d, reference:%p",
+ fmt, width, height, reference);
+
+ /* Create the new output state */
+ state = _new_output_state (fmt, width, height, reference);
+
+ GST_VIDEO_DECODER_STREAM_LOCK (decoder);
+ /* Replace existing output state by new one */
+ if (priv->output_state)
+ gst_video_codec_state_unref (priv->output_state);
+ priv->output_state = gst_video_codec_state_ref (state);
+
+ priv->output_state_changed = TRUE;
+ GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
+
+ return state;
+}
+
+
+/**
+ * gst_video_decoder_get_oldest_frame:
+ * @decoder: a #GstVideoDecoder
+ *
+ * Get the oldest pending unfinished #GstVideoCodecFrame
+ *
+ * Returns: (transfer none): oldest pending unfinished #GstVideoCodecFrame.
+ *
+ * Since: 0.10.36
+ */
+GstVideoCodecFrame *
+gst_video_decoder_get_oldest_frame (GstVideoDecoder * decoder)
+{
+ GList *g;
+
+ GST_VIDEO_DECODER_STREAM_LOCK (decoder);
+ g = decoder->priv->frames;
+ GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
+
+ if (g == NULL)
+ return NULL;
+ return (GstVideoCodecFrame *) (g->data);
+}
+
+/**
+ * gst_video_decoder_get_frame:
+ * @decoder: a #GstVideoDecoder
+ * @frame_number: system_frame_number of a frame
+ *
+ * Get a pending unfinished #GstVideoCodecFrame
+ *
+ * Returns: (transfer none): pending unfinished #GstVideoCodecFrame identified by @frame_number.
+ *
+ * Since: 0.10.36
+ */
+GstVideoCodecFrame *
+gst_video_decoder_get_frame (GstVideoDecoder * decoder, int frame_number)
+{
+ GList *g;
+ GstVideoCodecFrame *frame = NULL;
+
+ GST_DEBUG_OBJECT (decoder, "frame_number : %d", frame_number);
+
+ GST_VIDEO_DECODER_STREAM_LOCK (decoder);
+ for (g = decoder->priv->frames; g; g = g->next) {
+ GstVideoCodecFrame *tmp = g->data;
+
+ if (tmp->system_frame_number == frame_number) {
+ frame = tmp;
+ break;
+ }
+ }
+ GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
+
+ return frame;
+}
+
+/**
+ * gst_video_decoder_set_src_caps:
+ * @decoder: a #GstVideoDecoder
+ *
+ * Sets src pad caps according to currently configured #GstVideoCodecState.
+ *
+ * Returns: #TRUE if the caps were accepted downstream, else #FALSE.
+ *
+ * Since: 0.10.36
+ */
+static gboolean
+gst_video_decoder_set_src_caps (GstVideoDecoder * decoder)
+{
+ GstVideoCodecState *state = decoder->priv->output_state;
+ gboolean ret;
+
+ g_return_val_if_fail (GST_VIDEO_INFO_WIDTH (&state->info) != 0, FALSE);
+ g_return_val_if_fail (GST_VIDEO_INFO_HEIGHT (&state->info) != 0, FALSE);
+
+ GST_VIDEO_DECODER_STREAM_LOCK (decoder);
+
+ GST_DEBUG_OBJECT (decoder, "output_state par %d/%d fps %d/%d",
+ state->info.par_n, state->info.par_d,
+ state->info.fps_n, state->info.fps_d);
+
+ if (G_UNLIKELY (state->caps == NULL))
+ state->caps = gst_video_info_to_caps (&state->info);
+
+ GST_DEBUG_OBJECT (decoder, "setting caps %" GST_PTR_FORMAT, state->caps);
+
+ ret = gst_pad_set_caps (decoder->srcpad, state->caps);
+ decoder->priv->output_state_changed = FALSE;
+
+ GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
+
+ return ret;
+}
+
+/**
+ * gst_video_decoder_alloc_output_buffer:
+ * @decoder: a #GstVideoDecoder
+ *
+ * Helper function that uses @gst_pad_alloc_buffer_and_set_caps()
+ * to allocate a buffer to hold a video frame for @decoder's
+ * current #GstVideoCodecState.
+ *
+ * Returns: (transfer full): allocated buffer
+ *
+ * Since: 0.10.36
+ */
+GstBuffer *
+gst_video_decoder_alloc_output_buffer (GstVideoDecoder * decoder)
+{
+ GstBuffer *buffer;
+ GstFlowReturn flow_ret;
+ GstVideoCodecState *state = decoder->priv->output_state;
+ int num_bytes = GST_VIDEO_INFO_SIZE (&state->info);
+
+ GST_DEBUG ("alloc src buffer caps=%" GST_PTR_FORMAT,
+ GST_PAD_CAPS (decoder->srcpad));
+
+ GST_VIDEO_DECODER_STREAM_LOCK (decoder);
+ if (G_UNLIKELY (decoder->priv->output_state_changed))
+ gst_video_decoder_set_src_caps (decoder);
+
+ flow_ret =
+ gst_pad_alloc_buffer_and_set_caps (decoder->srcpad,
+ GST_BUFFER_OFFSET_NONE, num_bytes, GST_PAD_CAPS (decoder->srcpad),
+ &buffer);
+
+ if (flow_ret != GST_FLOW_OK) {
+ buffer = gst_buffer_new_and_alloc (num_bytes);
+ gst_buffer_set_caps (buffer, GST_PAD_CAPS (decoder->srcpad));
+ }
+
+ GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
+
+ return buffer;
+}
+
+/**
+ * gst_video_decoder_alloc_output_frame:
+ * @decoder: a #GstVideoDecoder
+ * @frame: a #GstVideoCodecFrame
+ *
+ * Helper function that uses @gst_pad_alloc_buffer_and_set_caps()
+ * to allocate a buffer to hold a video frame for @decoder's
+ * current #GstVideoCodecState. Subclass should already have configured video state
+ * and set src pad caps.
+ *
+ * Returns: result from pad alloc call
+ *
+ * Since: 0.10.36
+ */
+GstFlowReturn
+gst_video_decoder_alloc_output_frame (GstVideoDecoder *
+ decoder, GstVideoCodecFrame * frame)
+{
+ GstFlowReturn flow_ret;
+ GstVideoCodecState *state = decoder->priv->output_state;
+ int num_bytes = GST_VIDEO_INFO_SIZE (&state->info);
+
+ g_return_val_if_fail (num_bytes != 0, GST_FLOW_ERROR);
+
+ if (G_UNLIKELY (decoder->priv->output_state_changed))
+ gst_video_decoder_set_src_caps (decoder);
+
+ g_return_val_if_fail (GST_PAD_CAPS (decoder->srcpad) != NULL, GST_FLOW_ERROR);
+
+ GST_LOG_OBJECT (decoder, "alloc buffer size %d", num_bytes);
+ GST_VIDEO_DECODER_STREAM_LOCK (decoder);
+
+ flow_ret =
+ gst_pad_alloc_buffer_and_set_caps (decoder->srcpad,
+ GST_BUFFER_OFFSET_NONE, num_bytes, GST_PAD_CAPS (decoder->srcpad),
+ &frame->output_buffer);
+
+ if (flow_ret != GST_FLOW_OK) {
+ GST_WARNING_OBJECT (decoder, "failed to get buffer %s",
+ gst_flow_get_name (flow_ret));
+ }
+
+ GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
+
+ return flow_ret;
+}
+
+/**
+ * gst_video_decoder_get_max_decode_time:
+ * @decoder: a #GstVideoDecoder
+ * @frame: a #GstVideoCodecFrame
+ *
+ * Determines maximum possible decoding time for @frame that will
+ * allow it to decode and arrive in time (as determined by QoS events).
+ * In particular, a negative result means decoding in time is no longer possible
+ * and should therefore occur as soon/skippy as possible.
+ *
+ * Returns: max decoding time.
+ *
+ * Since: 0.10.36
+ */
+GstClockTimeDiff
+gst_video_decoder_get_max_decode_time (GstVideoDecoder *
+ decoder, GstVideoCodecFrame * frame)
+{
+ GstClockTimeDiff deadline;
+ GstClockTime earliest_time;
+
+ GST_OBJECT_LOCK (decoder);
+ earliest_time = decoder->priv->earliest_time;
+ if (GST_CLOCK_TIME_IS_VALID (earliest_time))
+ deadline = GST_CLOCK_DIFF (earliest_time, frame->deadline);
+ else
+ deadline = G_MAXINT64;
+
+ GST_LOG_OBJECT (decoder, "earliest %" GST_TIME_FORMAT
+ ", frame deadline %" GST_TIME_FORMAT ", deadline %" GST_TIME_FORMAT,
+ GST_TIME_ARGS (earliest_time), GST_TIME_ARGS (frame->deadline),
+ GST_TIME_ARGS (deadline));
+
+ GST_OBJECT_UNLOCK (decoder);
+
+ return deadline;
+}
+
+GstFlowReturn
+_gst_video_decoder_error (GstVideoDecoder * dec, gint weight,
+ GQuark domain, gint code, gchar * txt, gchar * dbg, const gchar * file,
+ const gchar * function, gint line)
+{
+ if (txt)
+ GST_WARNING_OBJECT (dec, "error: %s", txt);
+ if (dbg)
+ GST_WARNING_OBJECT (dec, "error: %s", dbg);
+ dec->priv->error_count += weight;
+ dec->priv->discont = TRUE;
+ if (dec->priv->max_errors < dec->priv->error_count) {
+ gst_element_message_full (GST_ELEMENT (dec), GST_MESSAGE_ERROR,
+ domain, code, txt, dbg, file, function, line);
+ return GST_FLOW_ERROR;
+ } else {
+ return GST_FLOW_OK;
+ }
+}
+
+/**
+ * gst_video_decoder_set_max_errors:
+ * @dec: a #GstVideoDecoder
+ * @num: max tolerated errors
+ *
+ * Sets numbers of tolerated decoder errors, where a tolerated one is then only
+ * warned about, but more than tolerated will lead to fatal error. Default
+ * is set to GST_VIDEO_DECODER_MAX_ERRORS.
+ *
+ * Since: 0.10.36
+ */
+void
+gst_video_decoder_set_max_errors (GstVideoDecoder * dec, gint num)
+{
+ g_return_if_fail (GST_IS_VIDEO_DECODER (dec));
+
+ dec->priv->max_errors = num;
+}
+
+/**
+ * gst_video_decoder_get_max_errors:
+ * @dec: a #GstVideoDecoder
+ *
+ * Returns: currently configured decoder tolerated error count.
+ *
+ * Since: 0.10.36
+ */
+gint
+gst_video_decoder_get_max_errors (GstVideoDecoder * dec)
+{
+ g_return_val_if_fail (GST_IS_VIDEO_DECODER (dec), 0);
+
+ return dec->priv->max_errors;
+}
+
+/**
+ * gst_video_decoder_set_packetized:
+ * @decoder: a #GstVideoDecoder
+ * @packetized: whether the input data should be considered as packetized.
+ *
+ * Allows baseclass to consider input data as packetized or not. If the
+ * input is packetized, then the @parse method will not be called.
+ *
+ * Since: 0.10.36
+ */
+void
+gst_video_decoder_set_packetized (GstVideoDecoder * decoder,
+ gboolean packetized)
+{
+ decoder->priv->packetized = packetized;
+}
+
+/**
+ * gst_video_decoder_get_packetized:
+ * @decoder: a #GstVideoDecoder
+ *
+ * Queries whether input data is considered packetized or not by the
+ * base class.
+ *
+ * Returns: TRUE if input data is considered packetized.
+ *
+ * Since: 0.10.36
+ */
+gboolean
+gst_video_decoder_get_packetized (GstVideoDecoder * decoder)
+{
+ return decoder->priv->packetized;
+}
+
+/**
+ * gst_video_decoder_set_estimate_rate:
+ * @dec: a #GstVideoDecoder
+ * @enabled: whether to enable byte to time conversion
+ *
+ * Allows baseclass to perform byte to time estimated conversion.
+ *
+ * Since: 0.10.36
+ */
+void
+gst_video_decoder_set_estimate_rate (GstVideoDecoder * dec, gboolean enabled)
+{
+ g_return_if_fail (GST_IS_VIDEO_DECODER (dec));
+
+ dec->priv->do_estimate_rate = enabled;
+}
+
+/**
+ * gst_video_decoder_get_estimate_rate:
+ * @dec: a #GstVideoDecoder
+ *
+ * Returns: currently configured byte to time conversion setting
+ *
+ * Since: 0.10.36
+ */
+gboolean
+gst_video_decoder_get_estimate_rate (GstVideoDecoder * dec)
+{
+ g_return_val_if_fail (GST_IS_VIDEO_DECODER (dec), 0);
+
+ return dec->priv->do_estimate_rate;
+}
+
+/**
+ * gst_video_decoder_set_latency:
+ * @decoder: a #GstVideoDecoder
+ * @min_latency: minimum latency
+ * @max_latency: maximum latency
+ *
+ * Informs baseclass of encoding latency.
+ *
+ * Since: 0.10.36
+ */
+void
+gst_video_decoder_set_latency (GstVideoDecoder * decoder,
+ GstClockTime min_latency, GstClockTime max_latency)
+{
+ g_return_if_fail (GST_CLOCK_TIME_IS_VALID (min_latency));
+ g_return_if_fail (max_latency >= min_latency);
+
+ GST_OBJECT_LOCK (decoder);
+ decoder->priv->min_latency = min_latency;
+ decoder->priv->max_latency = max_latency;
+ GST_OBJECT_UNLOCK (decoder);
+
+ gst_element_post_message (GST_ELEMENT_CAST (decoder),
+ gst_message_new_latency (GST_OBJECT_CAST (decoder)));
+}
+
+/**
+ * gst_video_decoder_get_latency:
+ * @decoder: a #GstVideoDecoder
+ * @min_latency: (out) (allow-none): the configured minimum latency
+ * @max_latency: (out) (allow-none): the configured maximum latency
+ *
+ * Returns the configured encoding latency.
+ *
+ * Since: 0.10.36
+ */
+void
+gst_video_decoder_get_latency (GstVideoDecoder * decoder,
+ GstClockTime * min_latency, GstClockTime * max_latency)
+{
+ GST_OBJECT_LOCK (decoder);
+ if (min_latency)
+ *min_latency = decoder->priv->min_latency;
+ if (max_latency)
+ *max_latency = decoder->priv->max_latency;
+ GST_OBJECT_UNLOCK (decoder);
+}
--- /dev/null
+/* GStreamer
+ * Copyright (C) 2008 David Schleef <ds@schleef.org>
+ * Copyright (C) 2011 Mark Nauwelaerts <mark.nauwelaerts@collabora.co.uk>.
+ * Copyright (C) 2011 Nokia Corporation. All rights reserved.
+ * Contact: Stefan Kost <stefan.kost@nokia.com>
+ * Copyright (C) 2012 Collabora Ltd.
+ * Author : Edward Hervey <edward@collabora.com>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Library General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Library General Public License for more details.
+ *
+ * You should have received a copy of the GNU Library General Public
+ * License along with this library; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 02111-1307, USA.
+ */
+
+#ifndef _GST_VIDEO_DECODER_H_
+#define _GST_VIDEO_DECODER_H_
+
+#include <gst/base/gstadapter.h>
+#include <gst/video/gstvideoutils.h>
+
+G_BEGIN_DECLS
+
+#define GST_TYPE_VIDEO_DECODER \
+ (gst_video_decoder_get_type())
+#define GST_VIDEO_DECODER(obj) \
+ (G_TYPE_CHECK_INSTANCE_CAST((obj),GST_TYPE_VIDEO_DECODER,GstVideoDecoder))
+#define GST_VIDEO_DECODER_CLASS(klass) \
+ (G_TYPE_CHECK_CLASS_CAST((klass),GST_TYPE_VIDEO_DECODER,GstVideoDecoderClass))
+#define GST_VIDEO_DECODER_GET_CLASS(obj) \
+ (G_TYPE_INSTANCE_GET_CLASS((obj),GST_TYPE_VIDEO_DECODER,GstVideoDecoderClass))
+#define GST_IS_VIDEO_DECODER(obj) \
+ (G_TYPE_CHECK_INSTANCE_TYPE((obj),GST_TYPE_VIDEO_DECODER))
+#define GST_IS_VIDEO_DECODER_CLASS(obj) \
+ (G_TYPE_CHECK_CLASS_TYPE((klass),GST_TYPE_VIDEO_DECODER))
+
+/**
+ * GST_VIDEO_DECODER_SINK_NAME:
+ *
+ * The name of the templates for the sink pad.
+ *
+ * Since: 0.10.36
+ */
+#define GST_VIDEO_DECODER_SINK_NAME "sink"
+/**
+ * GST_VIDEO_DECODER_SRC_NAME:
+ *
+ * The name of the templates for the source pad.
+ *
+ * Since: 0.10.36
+ */
+#define GST_VIDEO_DECODER_SRC_NAME "src"
+
+/**
+ * GST_VIDEO_DECODER_SRC_PAD:
+ * @obj: a #GstVideoDecoder
+ *
+ * Gives the pointer to the source #GstPad object of the element.
+ *
+ * Since: 0.10.36
+ */
+#define GST_VIDEO_DECODER_SRC_PAD(obj) (((GstVideoDecoder *) (obj))->srcpad)
+
+/**
+ * GST_VIDEO_DECODER_SINK_PAD:
+ * @obj: a #GstVideoDecoder
+ *
+ * Gives the pointer to the sink #GstPad object of the element.
+ *
+ * Since: 0.10.36
+ */
+#define GST_VIDEO_DECODER_SINK_PAD(obj) (((GstVideoDecoder *) (obj))->sinkpad)
+/**
+ * GST_VIDEO_DECODER_FLOW_NEED_DATA:
+ *
+ * Returned while parsing to indicate more data is needed.
+ *
+ * Since: 0.10.36
+ **/
+#define GST_VIDEO_DECODER_FLOW_NEED_DATA GST_FLOW_CUSTOM_SUCCESS
+
+/**
+ * GST_VIDEO_DECODER_FLOW_DROPPED:
+ *
+ * Returned when the event/buffer should be dropped.
+ *
+ * Since: 0.10.36
+ */
+#define GST_VIDEO_DECODER_FLOW_DROPPED GST_FLOW_CUSTOM_SUCCESS_1
+
+/**
+ * GST_VIDEO_DECODER_INPUT_SEGMENT:
+ * @obj: base decoder instance
+ *
+ * Gives the segment of the element.
+ *
+ * Since: 0.10.36
+ */
+#define GST_VIDEO_DECODER_INPUT_SEGMENT(obj) (GST_VIDEO_DECODER_CAST (obj)->input_segment)
+
+/**
+ * GST_VIDEO_DECODER_OUTPUT_SEGMENT:
+ * @obj: base decoder instance
+ *
+ * Gives the segment of the element.
+ *
+ * Since: 0.10.36
+ */
+#define GST_VIDEO_DECODER_OUTPUT_SEGMENT(obj) (GST_VIDEO_DECODER_CAST (obj)->output_segment)
+
+/**
+ * GST_VIDEO_DECODER_STREAM_LOCK:
+ * @decoder: video decoder instance
+ *
+ * Obtain a lock to protect the decoder function from concurrent access.
+ *
+ * Since: 0.10.36
+ */
+#define GST_VIDEO_DECODER_STREAM_LOCK(decoder) g_static_rec_mutex_lock (&GST_VIDEO_DECODER (decoder)->stream_lock)
+
+/**
+ * GST_VIDEO_DECODER_STREAM_UNLOCK:
+ * @decoder: video decoder instance
+ *
+ * Release the lock that protects the decoder function from concurrent access.
+ *
+ * Since: 0.10.36
+ */
+#define GST_VIDEO_DECODER_STREAM_UNLOCK(decoder) g_static_rec_mutex_unlock (&GST_VIDEO_DECODER (decoder)->stream_lock)
+
+typedef struct _GstVideoDecoder GstVideoDecoder;
+typedef struct _GstVideoDecoderClass GstVideoDecoderClass;
+typedef struct _GstVideoDecoderPrivate GstVideoDecoderPrivate;
+
+
+/* do not use this one, use macro below */
+GstFlowReturn _gst_video_decoder_error (GstVideoDecoder *dec, gint weight,
+ GQuark domain, gint code,
+ gchar *txt, gchar *debug,
+ const gchar *file, const gchar *function,
+ gint line);
+
+/**
+ * GST_VIDEO_DECODER_ERROR:
+ * @el: the base video decoder element that generates the error
+ * @weight: element defined weight of the error, added to error count
+ * @domain: like CORE, LIBRARY, RESOURCE or STREAM (see #gstreamer-GstGError)
+ * @code: error code defined for that domain (see #gstreamer-GstGError)
+ * @text: the message to display (format string and args enclosed in
+ * parentheses)
+ * @debug: debugging information for the message (format string and args
+ * enclosed in parentheses)
+ * @ret: variable to receive return value
+ *
+ * Utility function that video decoder elements can use in case they encountered
+ * a data processing error that may be fatal for the current "data unit" but
+ * need not prevent subsequent decoding. Such errors are counted and if there
+ * are too many, as configured in the context's max_errors, the pipeline will
+ * post an error message and the application will be requested to stop further
+ * media processing. Otherwise, it is considered a "glitch" and only a warning
+ * is logged. In either case, @ret is set to the proper value to
+ * return to upstream/caller (indicating either GST_FLOW_ERROR or GST_FLOW_OK).
+ *
+ * Since: 0.10.36
+ */
+#define GST_VIDEO_DECODER_ERROR(el, w, domain, code, text, debug, ret) \
+G_STMT_START { \
+ gchar *__txt = _gst_element_error_printf text; \
+ gchar *__dbg = _gst_element_error_printf debug; \
+ GstVideoDecoder *dec = GST_VIDEO_DECODER (el); \
+ ret = _gst_video_decoder_error (dec, w, GST_ ## domain ## _ERROR, \
+ GST_ ## domain ## _ERROR_ ## code, __txt, __dbg, __FILE__, \
+ GST_FUNCTION, __LINE__); \
+} G_STMT_END
+
+/**
+ * GST_VIDEO_DECODER_MAX_ERRORS:
+ *
+ * Default maximum number of errors tolerated before signaling error.
+ *
+ * Since: 0.10.36
+ */
+#define GST_VIDEO_DECODER_MAX_ERRORS 10
+
+
+/**
+ * GstVideoDecoder:
+ *
+ * The opaque #GstVideoDecoder data structure.
+ *
+ * Since: 0.10.36
+ */
+struct _GstVideoDecoder
+{
+ /*< private >*/
+ GstElement element;
+
+ /*< protected >*/
+ GstPad *sinkpad;
+ GstPad *srcpad;
+
+ /* protects all data processing, i.e. is locked
+ * in the chain function, finish_frame and when
+ * processing serialized events */
+ GStaticRecMutex stream_lock;
+
+ /* MT-protected (with STREAM_LOCK) */
+ GstSegment input_segment;
+ GstSegment output_segment;
+
+ GstVideoDecoderPrivate *priv;
+
+ /* FIXME before moving to base */
+ void *padding[GST_PADDING_LARGE];
+};
+
+/**
+ * GstVideoDecoderClass:
+ * @open: Optional.
+ * Called when the element changes to GST_STATE_READY.
+ * Allows opening external resources.
+ * @close: Optional.
+ * Called when the element changes to GST_STATE_NULL.
+ * Allows closing external resources.
+ * @start: Optional.
+ * Called when the element starts processing.
+ * Allows opening external resources.
+ * @stop: Optional.
+ * Called when the element stops processing.
+ * Allows closing external resources.
+ * @set_format: Notifies subclass of incoming data format (caps).
+ * @parse: Required for non-packetized input.
+ * Allows chopping incoming data into manageable units (frames)
+ * for subsequent decoding.
+ * @reset: Optional.
+ * Allows subclass (decoder) to perform post-seek semantics reset.
+ * @handle_frame: Provides input data frame to subclass.
+ * @finish: Optional.
+ * Called to request subclass to dispatch any pending remaining
+ * data (e.g. at EOS).
+ * @sink_event: Optional.
+ * Event handler on the sink pad. This function should return
+ * TRUE if the event was handled and should be discarded
+ * (i.e. not unref'ed).
+ * @src_event: Optional.
+ * Event handler on the source pad. This function should return
+ * TRUE if the event was handled and should be discarded
+ * (i.e. not unref'ed).
+ *
+ * Subclasses can override any of the available virtual methods or not, as
+ * needed. At minimum @handle_frame needs to be overridden, and @set_format
+ * and likely as well. If non-packetized input is supported or expected,
+ * @parse needs to be overridden as well.
+ *
+ * Since: 0.10.36
+ */
+struct _GstVideoDecoderClass
+{
+ /*< private >*/
+ GstElementClass element_class;
+
+ /*< public >*/
+ gboolean (*open) (GstVideoDecoder *decoder);
+
+ gboolean (*close) (GstVideoDecoder *decoder);
+
+ gboolean (*start) (GstVideoDecoder *decoder);
+
+ gboolean (*stop) (GstVideoDecoder *decoder);
+
+ GstFlowReturn (*parse) (GstVideoDecoder *decoder,
+ GstVideoCodecFrame *frame,
+ GstAdapter *adapter,
+ gboolean at_eos);
+
+ gboolean (*set_format) (GstVideoDecoder *decoder,
+ GstVideoCodecState * state);
+
+ gboolean (*reset) (GstVideoDecoder *decoder,
+ gboolean hard);
+
+ GstFlowReturn (*finish) (GstVideoDecoder *decoder);
+
+ GstFlowReturn (*handle_frame) (GstVideoDecoder *decoder,
+ GstVideoCodecFrame *frame);
+
+ gboolean (*sink_event) (GstVideoDecoder *decoder,
+ GstEvent *event);
+
+ gboolean (*src_event) (GstVideoDecoder *decoder,
+ GstEvent *event);
+
+
+ /*< private >*/
+ /* FIXME before moving to base */
+ void *padding[GST_PADDING_LARGE];
+};
+
+GType gst_video_decoder_get_type (void);
+
+/* Context parameters */
+void gst_video_decoder_set_packetized (GstVideoDecoder * decoder,
+ gboolean packetized);
+
+gboolean gst_video_decoder_get_packetized (GstVideoDecoder * decoder);
+
+void gst_video_decoder_set_estimate_rate (GstVideoDecoder * dec,
+ gboolean enabled);
+
+gint gst_video_decoder_get_estimate_rate (GstVideoDecoder * dec);
+
+void gst_video_decoder_set_max_errors (GstVideoDecoder * dec,
+ gint num);
+
+gint gst_video_decoder_get_max_errors (GstVideoDecoder * dec);
+
+void gst_video_decoder_set_latency (GstVideoDecoder *decoder,
+ GstClockTime min_latency,
+ GstClockTime max_latency);
+void gst_video_decoder_get_latency (GstVideoDecoder *decoder,
+ GstClockTime *min_latency,
+ GstClockTime *max_latency);
+
+
+/* Object methods */
+
+GstVideoCodecFrame *gst_video_decoder_get_frame (GstVideoDecoder *decoder,
+ int frame_number);
+
+GstVideoCodecFrame *gst_video_decoder_get_oldest_frame (GstVideoDecoder *decoder);
+
+/* Parsing related methods */
+void gst_video_decoder_add_to_frame (GstVideoDecoder *decoder,
+ int n_bytes);
+GstFlowReturn gst_video_decoder_have_frame (GstVideoDecoder *decoder);
+
+GstBuffer *gst_video_decoder_alloc_output_buffer (GstVideoDecoder * decoder);
+
+GstFlowReturn gst_video_decoder_alloc_output_frame (GstVideoDecoder *decoder,
+ GstVideoCodecFrame *frame);
+
+GstVideoCodecState *gst_video_decoder_set_output_state (GstVideoDecoder *decoder,
+ GstVideoFormat fmt, guint width, guint height,
+ GstVideoCodecState *reference);
+
+GstVideoCodecState *gst_video_decoder_get_output_state (GstVideoDecoder *decoder);
+
+GstClockTimeDiff gst_video_decoder_get_max_decode_time (GstVideoDecoder *decoder,
+ GstVideoCodecFrame *frame);
+
+GstFlowReturn gst_video_decoder_finish_frame (GstVideoDecoder *decoder,
+ GstVideoCodecFrame *frame);
+
+GstFlowReturn gst_video_decoder_drop_frame (GstVideoDecoder *dec,
+ GstVideoCodecFrame *frame);
+
+G_END_DECLS
+
+#endif
+
--- /dev/null
+/* GStreamer
+ * Copyright (C) 2008 David Schleef <ds@schleef.org>
+ * Copyright (C) 2011 Mark Nauwelaerts <mark.nauwelaerts@collabora.co.uk>.
+ * Copyright (C) 2011 Nokia Corporation. All rights reserved.
+ * Contact: Stefan Kost <stefan.kost@nokia.com>
+ * Copyright (C) 2012 Collabora Ltd.
+ * Author : Edward Hervey <edward@collabora.com>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Library General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Library General Public License for more details.
+ *
+ * You should have received a copy of the GNU Library General Public
+ * License along with this library; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 02111-1307, USA.
+ */
+
+/**
+ * SECTION:gstvideoencoder
+ * @short_description: Base class for video encoders
+ * @see_also:
+ *
+ * This base class is for video encoders turning raw video into
+ * encoded video data.
+ *
+ * GstVideoEncoder and subclass should cooperate as follows.
+ * <orderedlist>
+ * <listitem>
+ * <itemizedlist><title>Configuration</title>
+ * <listitem><para>
+ * Initially, GstVideoEncoder calls @start when the encoder element
+ * is activated, which allows subclass to perform any global setup.
+ * </para></listitem>
+ * <listitem><para>
+ * GstVideoEncoder calls @set_format to inform subclass of the format
+ * of input video data that it is about to receive. Subclass should
+ * setup for encoding and configure base class as appropriate
+ * (e.g. latency). While unlikely, it might be called more than once,
+ * if changing input parameters require reconfiguration. Baseclass
+ * will ensure that processing of current configuration is finished.
+ * </para></listitem>
+ * <listitem><para>
+ * GstVideoEncoder calls @stop at end of all processing.
+ * </para></listitem>
+ * </itemizedlist>
+ * </listitem>
+ * <listitem>
+ * <itemizedlist>
+ * <title>Data processing</title>
+ * <listitem><para>
+ * Base class collects input data and metadata into a frame and hands
+ * this to subclass' @handle_frame.
+ * </para></listitem>
+ * <listitem><para>
+ * If codec processing results in encoded data, subclass should call
+ * @gst_video_encoder_finish_frame to have encoded data pushed
+ * downstream.
+ * </para></listitem>
+ * <listitem><para>
+ * If implemented, baseclass calls subclass @pre_push just prior to
+ * pushing to allow subclasses to modify some metadata on the buffer.
+ * If it returns GST_FLOW_OK, the buffer is pushed downstream.
+ * </para></listitem>
+ * <listitem><para>
+ * GstVideoEncoderClass will handle both srcpad and sinkpad events.
+ * Sink events will be passed to subclass if @event callback has been
+ * provided.
+ * </para></listitem>
+ * </itemizedlist>
+ * </listitem>
+ * <listitem>
+ * <itemizedlist><title>Shutdown phase</title>
+ * <listitem><para>
+ * GstVideoEncoder class calls @stop to inform the subclass that data
+ * parsing will be stopped.
+ * </para></listitem>
+ * </itemizedlist>
+ * </listitem>
+ * </orderedlist>
+ *
+ * Subclass is responsible for providing pad template caps for
+ * source and sink pads. The pads need to be named "sink" and "src". It should
+ * also be able to provide fixed src pad caps in @getcaps by the time it calls
+ * @gst_video_encoder_finish_frame.
+ *
+ * Things that subclass need to take care of:
+ * <itemizedlist>
+ * <listitem><para>Provide pad templates</para></listitem>
+ * <listitem><para>
+ * Provide source pad caps before pushing the first buffer
+ * </para></listitem>
+ * <listitem><para>
+ * Accept data in @handle_frame and provide encoded results to
+ * @gst_video_encoder_finish_frame.
+ * </para></listitem>
+ * </itemizedlist>
+ *
+ */
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+/* TODO
+ *
+ * * Change _set_output_format() to steal the reference of the provided caps
+ * * Calculate actual latency based on input/output timestamp/frame_number
+ * and if it exceeds the recorded one, save it and emit a GST_MESSAGE_LATENCY
+ */
+
+/* FIXME 0.11: suppress warnings for deprecated API such as GStaticRecMutex
+ * with newer GLib versions (>= 2.31.0) */
+#define GLIB_DISABLE_DEPRECATION_WARNINGS
+
+#include "gstvideoencoder.h"
+#include "gstvideoutils.h"
+
+#include <string.h>
+
+GST_DEBUG_CATEGORY (videoencoder_debug);
+#define GST_CAT_DEFAULT videoencoder_debug
+
+#define GST_VIDEO_ENCODER_GET_PRIVATE(obj) \
+ (G_TYPE_INSTANCE_GET_PRIVATE ((obj), GST_TYPE_VIDEO_ENCODER, \
+ GstVideoEncoderPrivate))
+
+struct _GstVideoEncoderPrivate
+{
+ guint64 presentation_frame_number;
+ int distance_from_sync;
+
+ /* FIXME : (and introduce a context ?) */
+ gboolean drained;
+ gboolean at_eos;
+
+ gint64 min_latency;
+ gint64 max_latency;
+
+ GList *current_frame_events;
+
+ GList *headers;
+ gboolean new_headers; /* Whether new headers were just set */
+
+ GList *force_key_unit; /* List of pending forced keyunits */
+
+ guint64 system_frame_number;
+
+ GList *frames; /* Protected with OBJECT_LOCK */
+ GstVideoCodecState *input_state;
+ GstVideoCodecState *output_state;
+ gboolean output_state_changed;
+
+ gint64 bytes;
+ gint64 time;
+};
+
+typedef struct _ForcedKeyUnitEvent ForcedKeyUnitEvent;
+struct _ForcedKeyUnitEvent
+{
+ GstClockTime running_time;
+ gboolean pending; /* TRUE if this was requested already */
+ gboolean all_headers;
+ guint count;
+};
+
+static void
+forced_key_unit_event_free (ForcedKeyUnitEvent * evt)
+{
+ g_slice_free (ForcedKeyUnitEvent, evt);
+}
+
+static ForcedKeyUnitEvent *
+forced_key_unit_event_new (GstClockTime running_time, gboolean all_headers,
+ guint count)
+{
+ ForcedKeyUnitEvent *evt = g_slice_new0 (ForcedKeyUnitEvent);
+
+ evt->running_time = running_time;
+ evt->all_headers = all_headers;
+ evt->count = count;
+
+ return evt;
+}
+
+static void gst_video_encoder_finalize (GObject * object);
+
+static gboolean gst_video_encoder_sink_setcaps (GstPad * pad, GstCaps * caps);
+static GstCaps *gst_video_encoder_sink_getcaps (GstPad * pad);
+static gboolean gst_video_encoder_src_event (GstPad * pad, GstEvent * event);
+static gboolean gst_video_encoder_sink_event (GstPad * pad, GstEvent * event);
+static GstFlowReturn gst_video_encoder_chain (GstPad * pad, GstBuffer * buf);
+static GstStateChangeReturn gst_video_encoder_change_state (GstElement *
+ element, GstStateChange transition);
+static const GstQueryType *gst_video_encoder_get_query_types (GstPad * pad);
+static gboolean gst_video_encoder_src_query (GstPad * pad, GstQuery * query);
+static GstVideoCodecFrame *gst_video_encoder_new_frame (GstVideoEncoder *
+ encoder, GstBuffer * buf, GstClockTime timestamp, GstClockTime duration);
+
+static void
+_do_init (GType object_type)
+{
+ const GInterfaceInfo preset_interface_info = {
+ NULL, /* interface_init */
+ NULL, /* interface_finalize */
+ NULL /* interface_data */
+ };
+
+ g_type_add_interface_static (object_type, GST_TYPE_PRESET,
+ &preset_interface_info);
+}
+
+GST_BOILERPLATE_FULL (GstVideoEncoder, gst_video_encoder,
+ GstElement, GST_TYPE_ELEMENT, _do_init);
+
+static void
+gst_video_encoder_base_init (gpointer g_class)
+{
+ GST_DEBUG_CATEGORY_INIT (videoencoder_debug, "videoencoder", 0,
+ "Base Video Encoder");
+}
+
+static void
+gst_video_encoder_class_init (GstVideoEncoderClass * klass)
+{
+ GObjectClass *gobject_class;
+ GstElementClass *gstelement_class;
+
+ gobject_class = G_OBJECT_CLASS (klass);
+ gstelement_class = GST_ELEMENT_CLASS (klass);
+
+ g_type_class_add_private (klass, sizeof (GstVideoEncoderPrivate));
+
+ gobject_class->finalize = gst_video_encoder_finalize;
+
+ gstelement_class->change_state =
+ GST_DEBUG_FUNCPTR (gst_video_encoder_change_state);
+}
+
+static void
+gst_video_encoder_reset (GstVideoEncoder * encoder)
+{
+ GstVideoEncoderPrivate *priv = encoder->priv;
+ GList *g;
+
+ GST_VIDEO_ENCODER_STREAM_LOCK (encoder);
+
+ priv->presentation_frame_number = 0;
+ priv->distance_from_sync = 0;
+
+ g_list_foreach (priv->force_key_unit, (GFunc) forced_key_unit_event_free,
+ NULL);
+ g_list_free (priv->force_key_unit);
+ priv->force_key_unit = NULL;
+
+ priv->drained = TRUE;
+ priv->min_latency = 0;
+ priv->max_latency = 0;
+
+ g_list_foreach (priv->headers, (GFunc) gst_event_unref, NULL);
+ g_list_free (priv->headers);
+ priv->headers = NULL;
+ priv->new_headers = FALSE;
+
+ g_list_foreach (priv->current_frame_events, (GFunc) gst_event_unref, NULL);
+ g_list_free (priv->current_frame_events);
+ priv->current_frame_events = NULL;
+
+ for (g = priv->frames; g; g = g->next) {
+ gst_video_codec_frame_unref ((GstVideoCodecFrame *) g->data);
+ }
+ g_list_free (priv->frames);
+ priv->frames = NULL;
+
+ priv->bytes = 0;
+ priv->time = 0;
+
+ if (priv->input_state)
+ gst_video_codec_state_unref (priv->input_state);
+ priv->input_state = NULL;
+ if (priv->output_state)
+ gst_video_codec_state_unref (priv->output_state);
+ priv->output_state = NULL;
+
+ GST_VIDEO_ENCODER_STREAM_UNLOCK (encoder);
+}
+
+static void
+gst_video_encoder_init (GstVideoEncoder * encoder, GstVideoEncoderClass * klass)
+{
+ GstVideoEncoderPrivate *priv;
+ GstPadTemplate *pad_template;
+ GstPad *pad;
+
+ GST_DEBUG_OBJECT (encoder, "gst_video_encoder_init");
+
+ priv = encoder->priv = GST_VIDEO_ENCODER_GET_PRIVATE (encoder);
+
+ pad_template =
+ gst_element_class_get_pad_template (GST_ELEMENT_CLASS (klass), "sink");
+ g_return_if_fail (pad_template != NULL);
+
+ encoder->sinkpad = pad = gst_pad_new_from_template (pad_template, "sink");
+
+ gst_pad_set_chain_function (pad, GST_DEBUG_FUNCPTR (gst_video_encoder_chain));
+ gst_pad_set_event_function (pad,
+ GST_DEBUG_FUNCPTR (gst_video_encoder_sink_event));
+ gst_pad_set_setcaps_function (pad,
+ GST_DEBUG_FUNCPTR (gst_video_encoder_sink_setcaps));
+ gst_pad_set_getcaps_function (pad,
+ GST_DEBUG_FUNCPTR (gst_video_encoder_sink_getcaps));
+ gst_element_add_pad (GST_ELEMENT (encoder), encoder->sinkpad);
+
+ pad_template =
+ gst_element_class_get_pad_template (GST_ELEMENT_CLASS (klass), "src");
+ g_return_if_fail (pad_template != NULL);
+
+ encoder->srcpad = pad = gst_pad_new_from_template (pad_template, "src");
+
+ gst_pad_set_query_type_function (pad,
+ GST_DEBUG_FUNCPTR (gst_video_encoder_get_query_types));
+ gst_pad_set_query_function (pad,
+ GST_DEBUG_FUNCPTR (gst_video_encoder_src_query));
+ gst_pad_set_event_function (pad,
+ GST_DEBUG_FUNCPTR (gst_video_encoder_src_event));
+ gst_element_add_pad (GST_ELEMENT (encoder), encoder->srcpad);
+
+ gst_segment_init (&encoder->input_segment, GST_FORMAT_TIME);
+ gst_segment_init (&encoder->output_segment, GST_FORMAT_TIME);
+
+ g_static_rec_mutex_init (&encoder->stream_lock);
+
+ priv->at_eos = FALSE;
+ priv->headers = NULL;
+ priv->new_headers = FALSE;
+
+ gst_video_encoder_reset (encoder);
+}
+
+static gboolean
+gst_video_encoded_video_convert (gint64 bytes, gint64 time,
+ GstFormat src_format, gint64 src_value, GstFormat * dest_format,
+ gint64 * dest_value)
+{
+ gboolean res = FALSE;
+
+ g_return_val_if_fail (dest_format != NULL, FALSE);
+ g_return_val_if_fail (dest_value != NULL, FALSE);
+
+ if (G_UNLIKELY (src_format == *dest_format || src_value == 0 ||
+ src_value == -1)) {
+ if (dest_value)
+ *dest_value = src_value;
+ return TRUE;
+ }
+
+ if (bytes <= 0 || time <= 0) {
+ GST_DEBUG ("not enough metadata yet to convert");
+ goto exit;
+ }
+
+ switch (src_format) {
+ case GST_FORMAT_BYTES:
+ switch (*dest_format) {
+ case GST_FORMAT_TIME:
+ *dest_value = gst_util_uint64_scale (src_value, time, bytes);
+ res = TRUE;
+ break;
+ default:
+ res = FALSE;
+ }
+ break;
+ case GST_FORMAT_TIME:
+ switch (*dest_format) {
+ case GST_FORMAT_BYTES:
+ *dest_value = gst_util_uint64_scale (src_value, bytes, time);
+ res = TRUE;
+ break;
+ default:
+ res = FALSE;
+ }
+ break;
+ default:
+ GST_DEBUG ("unhandled conversion from %d to %d", src_format,
+ *dest_format);
+ res = FALSE;
+ }
+
+exit:
+ return res;
+}
+
+/**
+ * gst_video_encoder_set_headers:
+ * @encoder: a #GstVideoEncoder
+ * @headers: (transfer full) (element-type GstBuffer): a list of #GstBuffer containing the codec header
+ *
+ * Set the codec headers to be sent downstream whenever requested.
+ *
+ * Since: 0.10.36
+ */
+void
+gst_video_encoder_set_headers (GstVideoEncoder * video_encoder, GList * headers)
+{
+ GST_VIDEO_ENCODER_STREAM_LOCK (video_encoder);
+
+ GST_DEBUG_OBJECT (video_encoder, "new headers %p", headers);
+ if (video_encoder->priv->headers) {
+ g_list_foreach (video_encoder->priv->headers, (GFunc) gst_buffer_unref,
+ NULL);
+ g_list_free (video_encoder->priv->headers);
+ }
+ video_encoder->priv->headers = headers;
+ video_encoder->priv->new_headers = TRUE;
+
+ GST_VIDEO_ENCODER_STREAM_UNLOCK (video_encoder);
+}
+
+static gboolean
+gst_video_encoder_drain (GstVideoEncoder * enc)
+{
+ GstVideoEncoderPrivate *priv;
+ GstVideoEncoderClass *enc_class;
+ gboolean ret = TRUE;
+
+ enc_class = GST_VIDEO_ENCODER_GET_CLASS (enc);
+ priv = enc->priv;
+
+ GST_DEBUG_OBJECT (enc, "draining");
+
+ if (priv->drained) {
+ GST_DEBUG_OBJECT (enc, "already drained");
+ return TRUE;
+ }
+
+ if (enc_class->reset) {
+ GST_DEBUG_OBJECT (enc, "requesting subclass to finish");
+ ret = enc_class->reset (enc, TRUE);
+ }
+ /* everything should be away now */
+ if (priv->frames) {
+ /* not fatal/impossible though if subclass/enc eats stuff */
+ g_list_foreach (priv->frames, (GFunc) gst_video_codec_frame_unref, NULL);
+ g_list_free (priv->frames);
+ priv->frames = NULL;
+ }
+
+ return ret;
+}
+
+static GstVideoCodecState *
+_new_output_state (GstCaps * caps, GstVideoCodecState * reference)
+{
+ GstVideoCodecState *state;
+
+ state = g_slice_new0 (GstVideoCodecState);
+ state->ref_count = 1;
+ gst_video_info_init (&state->info);
+ gst_video_info_set_format (&state->info, GST_VIDEO_FORMAT_ENCODED, 0, 0);
+
+ state->caps = caps;
+
+ if (reference) {
+ GstVideoInfo *tgt, *ref;
+
+ tgt = &state->info;
+ ref = &reference->info;
+
+ /* Copy over extra fields from reference state */
+ tgt->interlace_mode = ref->interlace_mode;
+ tgt->flags = ref->flags;
+ tgt->width = ref->width;
+ tgt->height = ref->height;
+ tgt->chroma_site = ref->chroma_site;
+ tgt->colorimetry = ref->colorimetry;
+ tgt->par_n = ref->par_n;
+ tgt->par_d = ref->par_d;
+ tgt->fps_n = ref->fps_n;
+ tgt->fps_d = ref->fps_d;
+ }
+
+ return state;
+}
+
+static GstVideoCodecState *
+_new_input_state (GstCaps * caps)
+{
+ GstVideoCodecState *state;
+
+ state = g_slice_new0 (GstVideoCodecState);
+ state->ref_count = 1;
+ gst_video_info_init (&state->info);
+ if (G_UNLIKELY (!gst_video_info_from_caps (&state->info, caps)))
+ goto parse_fail;
+ state->caps = gst_caps_ref (caps);
+
+ return state;
+
+parse_fail:
+ {
+ g_slice_free (GstVideoCodecState, state);
+ return NULL;
+ }
+}
+
+static gboolean
+gst_video_encoder_sink_setcaps (GstPad * pad, GstCaps * caps)
+{
+ GstVideoEncoder *encoder;
+ GstVideoEncoderClass *encoder_class;
+ GstVideoCodecState *state;
+ gboolean ret;
+ gboolean samecaps = FALSE;
+
+ encoder = GST_VIDEO_ENCODER (gst_pad_get_parent (pad));
+ encoder_class = GST_VIDEO_ENCODER_GET_CLASS (encoder);
+
+ /* subclass should do something here ... */
+ g_return_val_if_fail (encoder_class->set_format != NULL, FALSE);
+
+ GST_DEBUG_OBJECT (encoder, "setcaps %" GST_PTR_FORMAT, caps);
+
+ state = _new_input_state (caps);
+ if (G_UNLIKELY (!state))
+ goto parse_fail;
+
+ GST_VIDEO_ENCODER_STREAM_LOCK (encoder);
+
+ if (encoder->priv->input_state)
+ samecaps =
+ gst_video_info_is_equal (&state->info,
+ &encoder->priv->input_state->info);
+
+ if (!samecaps) {
+ /* arrange draining pending frames */
+ gst_video_encoder_drain (encoder);
+
+ /* and subclass should be ready to configure format at any time around */
+ ret = encoder_class->set_format (encoder, state);
+ if (ret) {
+ if (encoder->priv->input_state)
+ gst_video_codec_state_unref (encoder->priv->input_state);
+ encoder->priv->input_state = state;
+ } else
+ gst_video_codec_state_unref (state);
+ } else {
+ /* no need to stir things up */
+ GST_DEBUG_OBJECT (encoder,
+ "new video format identical to configured format");
+ gst_video_codec_state_unref (state);
+ ret = TRUE;
+ }
+
+ GST_VIDEO_ENCODER_STREAM_UNLOCK (encoder);
+
+ if (!ret)
+ GST_WARNING_OBJECT (encoder, "rejected caps %" GST_PTR_FORMAT, caps);
+
+ gst_object_unref (encoder);
+
+ return ret;
+
+parse_fail:
+ {
+ GST_WARNING_OBJECT (encoder, "Failed to parse caps");
+ gst_object_unref (encoder);
+ return FALSE;
+ }
+}
+
+/**
+ * gst_video_encoder_proxy_getcaps:
+ * @enc: a #GstVideoEncoder
+ * @caps: initial caps
+ *
+ * Returns caps that express @caps (or sink template caps if @caps == NULL)
+ * restricted to resolution/format/... combinations supported by downstream
+ * elements (e.g. muxers).
+ *
+ * Returns: a #GstCaps owned by caller
+ *
+ * Since: 0.10.36
+ */
+GstCaps *
+gst_video_encoder_proxy_getcaps (GstVideoEncoder * encoder, GstCaps * caps)
+{
+ const GstCaps *templ_caps;
+ GstCaps *allowed;
+ GstCaps *fcaps, *filter_caps;
+ gint i, j;
+
+ /* Allow downstream to specify width/height/framerate/PAR constraints
+ * and forward them upstream for video converters to handle
+ */
+ templ_caps = caps ? caps : gst_pad_get_pad_template_caps (encoder->sinkpad);
+ allowed = gst_pad_get_allowed_caps (encoder->srcpad);
+
+ if (!allowed || gst_caps_is_empty (allowed) || gst_caps_is_any (allowed)) {
+ fcaps = gst_caps_copy (templ_caps);
+ goto done;
+ }
+
+ GST_LOG_OBJECT (encoder, "template caps %" GST_PTR_FORMAT, templ_caps);
+ GST_LOG_OBJECT (encoder, "allowed caps %" GST_PTR_FORMAT, allowed);
+
+ filter_caps = gst_caps_new_empty ();
+
+ for (i = 0; i < gst_caps_get_size (templ_caps); i++) {
+ GQuark q_name =
+ gst_structure_get_name_id (gst_caps_get_structure (templ_caps, i));
+
+ for (j = 0; j < gst_caps_get_size (allowed); j++) {
+ const GstStructure *allowed_s = gst_caps_get_structure (allowed, j);
+ const GValue *val;
+ GstStructure *s;
+
+ s = gst_structure_id_empty_new (q_name);
+ if ((val = gst_structure_get_value (allowed_s, "width")))
+ gst_structure_set_value (s, "width", val);
+ if ((val = gst_structure_get_value (allowed_s, "height")))
+ gst_structure_set_value (s, "height", val);
+ if ((val = gst_structure_get_value (allowed_s, "framerate")))
+ gst_structure_set_value (s, "framerate", val);
+ if ((val = gst_structure_get_value (allowed_s, "pixel-aspect-ratio")))
+ gst_structure_set_value (s, "pixel-aspect-ratio", val);
+
+ gst_caps_merge_structure (filter_caps, s);
+ }
+ }
+
+ fcaps = gst_caps_intersect (filter_caps, templ_caps);
+ gst_caps_unref (filter_caps);
+
+done:
+ gst_caps_replace (&allowed, NULL);
+
+ GST_LOG_OBJECT (encoder, "proxy caps %" GST_PTR_FORMAT, fcaps);
+
+ return fcaps;
+}
+
+static GstCaps *
+gst_video_encoder_sink_getcaps (GstPad * pad)
+{
+ GstVideoEncoder *encoder;
+ GstVideoEncoderClass *klass;
+ GstCaps *caps;
+
+ encoder = GST_VIDEO_ENCODER (gst_pad_get_parent (pad));
+ klass = GST_VIDEO_ENCODER_GET_CLASS (encoder);
+
+ if (klass->getcaps)
+ caps = klass->getcaps (encoder);
+ else
+ caps = gst_video_encoder_proxy_getcaps (encoder, NULL);
+ gst_object_unref (encoder);
+
+ GST_LOG_OBJECT (encoder, "Returning caps %" GST_PTR_FORMAT, caps);
+
+ return caps;
+}
+
+static void
+gst_video_encoder_finalize (GObject * object)
+{
+ GstVideoEncoder *encoder;
+
+ GST_DEBUG_OBJECT (object, "finalize");
+
+ encoder = GST_VIDEO_ENCODER (object);
+ if (encoder->priv->headers) {
+ g_list_foreach (encoder->priv->headers, (GFunc) gst_buffer_unref, NULL);
+ g_list_free (encoder->priv->headers);
+ }
+ g_static_rec_mutex_free (&encoder->stream_lock);
+
+ G_OBJECT_CLASS (parent_class)->finalize (object);
+}
+
+static gboolean
+gst_video_encoder_push_event (GstVideoEncoder * encoder, GstEvent * event)
+{
+ switch (GST_EVENT_TYPE (event)) {
+ case GST_EVENT_NEWSEGMENT:
+ {
+ gboolean update;
+ double rate;
+ double applied_rate;
+ GstFormat format;
+ gint64 start;
+ gint64 stop;
+ gint64 position;
+
+ GST_VIDEO_ENCODER_STREAM_LOCK (encoder);
+ gst_event_parse_new_segment_full (event, &update, &rate, &applied_rate,
+ &format, &start, &stop, &position);
+
+ GST_DEBUG_OBJECT (encoder, "newseg rate %g, applied rate %g, "
+ "format %d, start = %" GST_TIME_FORMAT ", stop = %" GST_TIME_FORMAT
+ ", pos = %" GST_TIME_FORMAT, rate, applied_rate, format,
+ GST_TIME_ARGS (start), GST_TIME_ARGS (stop),
+ GST_TIME_ARGS (position));
+
+ if (format != GST_FORMAT_TIME) {
+ GST_DEBUG_OBJECT (encoder, "received non TIME newsegment");
+ GST_VIDEO_ENCODER_STREAM_UNLOCK (encoder);
+ break;
+ }
+
+ gst_segment_set_newsegment_full (&encoder->output_segment, update, rate,
+ applied_rate, format, start, stop, position);
+ GST_VIDEO_ENCODER_STREAM_UNLOCK (encoder);
+ break;
+ }
+ default:
+ break;
+ }
+
+ return gst_pad_push_event (encoder->srcpad, event);
+}
+
+static gboolean
+gst_video_encoder_sink_eventfunc (GstVideoEncoder * encoder, GstEvent * event)
+{
+ GstVideoEncoderClass *encoder_class;
+ gboolean ret = FALSE;
+
+ encoder_class = GST_VIDEO_ENCODER_GET_CLASS (encoder);
+
+ switch (GST_EVENT_TYPE (event)) {
+ case GST_EVENT_EOS:
+ {
+ GstFlowReturn flow_ret;
+
+ GST_VIDEO_ENCODER_STREAM_LOCK (encoder);
+ encoder->priv->at_eos = TRUE;
+
+ if (encoder_class->finish) {
+ flow_ret = encoder_class->finish (encoder);
+ } else {
+ flow_ret = GST_FLOW_OK;
+ }
+
+ ret = (flow_ret == GST_VIDEO_ENCODER_FLOW_DROPPED);
+ GST_VIDEO_ENCODER_STREAM_UNLOCK (encoder);
+ break;
+ }
+ case GST_EVENT_NEWSEGMENT:
+ {
+ gboolean update;
+ double rate;
+ double applied_rate;
+ GstFormat format;
+ gint64 start;
+ gint64 stop;
+ gint64 position;
+
+ GST_VIDEO_ENCODER_STREAM_LOCK (encoder);
+ gst_event_parse_new_segment_full (event, &update, &rate, &applied_rate,
+ &format, &start, &stop, &position);
+
+ GST_DEBUG_OBJECT (encoder, "newseg rate %g, applied rate %g, "
+ "format %d, start = %" GST_TIME_FORMAT ", stop = %" GST_TIME_FORMAT
+ ", pos = %" GST_TIME_FORMAT, rate, applied_rate, format,
+ GST_TIME_ARGS (start), GST_TIME_ARGS (stop),
+ GST_TIME_ARGS (position));
+
+ if (format != GST_FORMAT_TIME) {
+ GST_DEBUG_OBJECT (encoder, "received non TIME newsegment");
+ GST_VIDEO_ENCODER_STREAM_UNLOCK (encoder);
+ break;
+ }
+
+ encoder->priv->at_eos = FALSE;
+
+ gst_segment_set_newsegment_full (&encoder->input_segment, update, rate,
+ applied_rate, format, start, stop, position);
+ GST_VIDEO_ENCODER_STREAM_UNLOCK (encoder);
+ break;
+ }
+ case GST_EVENT_CUSTOM_DOWNSTREAM:
+ {
+ if (gst_video_event_is_force_key_unit (event)) {
+ GstClockTime running_time;
+ gboolean all_headers;
+ guint count;
+
+ if (gst_video_event_parse_downstream_force_key_unit (event,
+ NULL, NULL, &running_time, &all_headers, &count)) {
+ ForcedKeyUnitEvent *fevt;
+
+ GST_OBJECT_LOCK (encoder);
+ fevt = forced_key_unit_event_new (running_time, all_headers, count);
+ encoder->priv->force_key_unit =
+ g_list_append (encoder->priv->force_key_unit, fevt);
+ GST_OBJECT_UNLOCK (encoder);
+
+ GST_DEBUG_OBJECT (encoder,
+ "force-key-unit event: running-time %" GST_TIME_FORMAT
+ ", all_headers %d, count %u",
+ GST_TIME_ARGS (running_time), all_headers, count);
+ }
+ gst_event_unref (event);
+ ret = TRUE;
+ }
+ break;
+ }
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+static gboolean
+gst_video_encoder_sink_event (GstPad * pad, GstEvent * event)
+{
+ GstVideoEncoder *enc;
+ GstVideoEncoderClass *klass;
+ gboolean handled = FALSE;
+ gboolean ret = TRUE;
+
+ enc = GST_VIDEO_ENCODER (gst_pad_get_parent (pad));
+ klass = GST_VIDEO_ENCODER_GET_CLASS (enc);
+
+ GST_DEBUG_OBJECT (enc, "received event %d, %s", GST_EVENT_TYPE (event),
+ GST_EVENT_TYPE_NAME (event));
+
+ if (klass->sink_event)
+ handled = klass->sink_event (enc, event);
+
+ if (!handled)
+ handled = gst_video_encoder_sink_eventfunc (enc, event);
+
+ if (!handled) {
+ /* Forward non-serialized events and EOS/FLUSH_STOP immediately.
+ * For EOS this is required because no buffer or serialized event
+ * will come after EOS and nothing could trigger another
+ * _finish_frame() call. *
+ * If the subclass handles sending of EOS manually it can return
+ * _DROPPED from ::finish() and all other subclasses should have
+ * decoded/flushed all remaining data before this
+ *
+ * For FLUSH_STOP this is required because it is expected
+ * to be forwarded immediately and no buffers are queued anyway.
+ */
+ if (!GST_EVENT_IS_SERIALIZED (event)
+ || GST_EVENT_TYPE (event) == GST_EVENT_EOS
+ || GST_EVENT_TYPE (event) == GST_EVENT_FLUSH_STOP) {
+ ret = gst_video_encoder_push_event (enc, event);
+ } else {
+ GST_VIDEO_ENCODER_STREAM_LOCK (enc);
+ enc->priv->current_frame_events =
+ g_list_prepend (enc->priv->current_frame_events, event);
+ GST_VIDEO_ENCODER_STREAM_UNLOCK (enc);
+ }
+ }
+
+ GST_DEBUG_OBJECT (enc, "event handled");
+
+ gst_object_unref (enc);
+ return ret;
+}
+
+static gboolean
+gst_video_encoder_src_eventfunc (GstVideoEncoder * encoder, GstEvent * event)
+{
+ gboolean handled = FALSE;
+
+ switch (GST_EVENT_TYPE (event)) {
+ case GST_EVENT_CUSTOM_UPSTREAM:
+ {
+ if (gst_video_event_is_force_key_unit (event)) {
+ GstClockTime running_time;
+ gboolean all_headers;
+ guint count;
+
+ if (gst_video_event_parse_upstream_force_key_unit (event,
+ &running_time, &all_headers, &count)) {
+ ForcedKeyUnitEvent *fevt;
+
+ GST_OBJECT_LOCK (encoder);
+ fevt = forced_key_unit_event_new (running_time, all_headers, count);
+ encoder->priv->force_key_unit =
+ g_list_append (encoder->priv->force_key_unit, fevt);
+ GST_OBJECT_UNLOCK (encoder);
+
+ GST_DEBUG_OBJECT (encoder,
+ "force-key-unit event: running-time %" GST_TIME_FORMAT
+ ", all_headers %d, count %u",
+ GST_TIME_ARGS (running_time), all_headers, count);
+ }
+ gst_event_unref (event);
+ handled = TRUE;
+ }
+ break;
+ }
+ default:
+ break;
+ }
+
+ return handled;
+}
+
+static gboolean
+gst_video_encoder_src_event (GstPad * pad, GstEvent * event)
+{
+ GstVideoEncoder *encoder;
+ GstVideoEncoderClass *klass;
+ gboolean ret = FALSE;
+ gboolean handled = FALSE;
+
+ encoder = GST_VIDEO_ENCODER (gst_pad_get_parent (pad));
+ klass = GST_VIDEO_ENCODER_GET_CLASS (encoder);
+
+ GST_LOG_OBJECT (encoder, "handling event: %" GST_PTR_FORMAT, event);
+
+ if (klass->src_event)
+ handled = klass->src_event (encoder, event);
+
+ if (!handled)
+ handled = gst_video_encoder_src_eventfunc (encoder, event);
+
+ if (!handled)
+ ret = gst_pad_event_default (pad, event);
+
+ gst_object_unref (encoder);
+
+ return ret;
+}
+
+static const GstQueryType *
+gst_video_encoder_get_query_types (GstPad * pad)
+{
+ static const GstQueryType query_types[] = {
+ GST_QUERY_CONVERT,
+ GST_QUERY_LATENCY,
+ 0
+ };
+
+ return query_types;
+}
+
+static gboolean
+gst_video_encoder_src_query (GstPad * pad, GstQuery * query)
+{
+ GstVideoEncoderPrivate *priv;
+ GstVideoEncoder *enc;
+ gboolean res;
+ GstPad *peerpad;
+
+ enc = GST_VIDEO_ENCODER (gst_pad_get_parent (pad));
+ priv = enc->priv;
+ peerpad = gst_pad_get_peer (enc->sinkpad);
+
+ GST_LOG_OBJECT (enc, "handling query: %" GST_PTR_FORMAT, query);
+
+ switch (GST_QUERY_TYPE (query)) {
+ case GST_QUERY_CONVERT:
+ {
+ GstFormat src_fmt, dest_fmt;
+ gint64 src_val, dest_val;
+
+ gst_query_parse_convert (query, &src_fmt, &src_val, &dest_fmt, &dest_val);
+ res =
+ gst_video_encoded_video_convert (priv->bytes, priv->time, src_fmt,
+ src_val, &dest_fmt, &dest_val);
+ if (!res)
+ goto error;
+ gst_query_set_convert (query, src_fmt, src_val, dest_fmt, dest_val);
+ break;
+ }
+ case GST_QUERY_LATENCY:
+ {
+ gboolean live;
+ GstClockTime min_latency, max_latency;
+
+ res = gst_pad_query (peerpad, query);
+ if (res) {
+ gst_query_parse_latency (query, &live, &min_latency, &max_latency);
+ GST_DEBUG_OBJECT (enc, "Peer latency: live %d, min %"
+ GST_TIME_FORMAT " max %" GST_TIME_FORMAT, live,
+ GST_TIME_ARGS (min_latency), GST_TIME_ARGS (max_latency));
+
+ GST_OBJECT_LOCK (enc);
+ min_latency += priv->min_latency;
+ if (max_latency != GST_CLOCK_TIME_NONE) {
+ max_latency += priv->max_latency;
+ }
+ GST_OBJECT_UNLOCK (enc);
+
+ gst_query_set_latency (query, live, min_latency, max_latency);
+ }
+ }
+ break;
+ default:
+ res = gst_pad_query_default (pad, query);
+ }
+ gst_object_unref (peerpad);
+ gst_object_unref (enc);
+ return res;
+
+error:
+ GST_DEBUG_OBJECT (enc, "query failed");
+ gst_object_unref (peerpad);
+ gst_object_unref (enc);
+ return res;
+}
+
+static GstVideoCodecFrame *
+gst_video_encoder_new_frame (GstVideoEncoder * encoder, GstBuffer * buf,
+ GstClockTime timestamp, GstClockTime duration)
+{
+ GstVideoEncoderPrivate *priv = encoder->priv;
+ GstVideoCodecFrame *frame;
+
+ frame = g_slice_new0 (GstVideoCodecFrame);
+
+ frame->ref_count = 1;
+
+ GST_VIDEO_ENCODER_STREAM_LOCK (encoder);
+ frame->system_frame_number = priv->system_frame_number;
+ priv->system_frame_number++;
+
+ frame->presentation_frame_number = priv->presentation_frame_number;
+ priv->presentation_frame_number++;
+ GST_VIDEO_ENCODER_STREAM_UNLOCK (encoder);
+
+ frame->events = priv->current_frame_events;
+ priv->current_frame_events = NULL;
+ frame->input_buffer = buf;
+ frame->pts = timestamp;
+ frame->duration = duration;
+
+ return frame;
+}
+
+
+static GstFlowReturn
+gst_video_encoder_chain (GstPad * pad, GstBuffer * buf)
+{
+ GstVideoEncoder *encoder;
+ GstVideoEncoderPrivate *priv;
+ GstVideoEncoderClass *klass;
+ GstVideoCodecFrame *frame;
+ GstFlowReturn ret = GST_FLOW_OK;
+ gint64 start, stop = GST_CLOCK_TIME_NONE, cstart, cstop;
+
+ encoder = GST_VIDEO_ENCODER (gst_pad_get_parent (pad));
+ priv = encoder->priv;
+ klass = GST_VIDEO_ENCODER_GET_CLASS (encoder);
+
+ g_return_val_if_fail (klass->handle_frame != NULL, GST_FLOW_ERROR);
+
+ GST_VIDEO_ENCODER_STREAM_LOCK (encoder);
+
+ /* .... ?? */
+ if (!GST_PAD_CAPS (pad)) {
+ ret = GST_FLOW_NOT_NEGOTIATED;
+ goto done;
+ }
+
+ start = GST_BUFFER_TIMESTAMP (buf);
+ if (GST_CLOCK_TIME_IS_VALID (GST_BUFFER_DURATION (buf)))
+ stop = start + GST_BUFFER_DURATION (buf);
+
+ GST_LOG_OBJECT (encoder,
+ "received buffer of size %d with ts %" GST_TIME_FORMAT
+ ", duration %" GST_TIME_FORMAT, GST_BUFFER_SIZE (buf),
+ GST_TIME_ARGS (start), GST_TIME_ARGS (GST_BUFFER_DURATION (buf)));
+
+ if (priv->at_eos) {
+ ret = GST_FLOW_UNEXPECTED;
+ goto done;
+ }
+
+ /* Drop buffers outside of segment */
+ if (!gst_segment_clip (&encoder->output_segment,
+ GST_FORMAT_TIME, start, stop, &cstart, &cstop)) {
+ GST_DEBUG_OBJECT (encoder, "clipping to segment dropped frame");
+ gst_buffer_unref (buf);
+ goto done;
+ }
+
+ frame = gst_video_encoder_new_frame (encoder, buf, cstart, cstop - cstart);
+
+ GST_OBJECT_LOCK (encoder);
+ if (priv->force_key_unit) {
+ ForcedKeyUnitEvent *fevt = NULL;
+ GstClockTime running_time;
+ GList *l;
+
+ running_time =
+ gst_segment_to_running_time (&encoder->output_segment, GST_FORMAT_TIME,
+ GST_BUFFER_TIMESTAMP (buf));
+
+ for (l = priv->force_key_unit; l; l = l->next) {
+ ForcedKeyUnitEvent *tmp = l->data;
+
+ /* Skip pending keyunits */
+ if (tmp->pending)
+ continue;
+
+ /* Simple case, keyunit ASAP */
+ if (tmp->running_time == GST_CLOCK_TIME_NONE) {
+ fevt = tmp;
+ break;
+ }
+
+ /* Event for before this frame */
+ if (tmp->running_time <= running_time) {
+ fevt = tmp;
+ break;
+ }
+ }
+
+ if (fevt) {
+ GST_DEBUG_OBJECT (encoder,
+ "Forcing a key unit at running time %" GST_TIME_FORMAT,
+ GST_TIME_ARGS (running_time));
+ GST_VIDEO_CODEC_FRAME_SET_FORCE_KEYFRAME (frame);
+ if (fevt->all_headers)
+ GST_VIDEO_CODEC_FRAME_SET_FORCE_KEYFRAME_HEADERS (frame);
+ fevt->pending = TRUE;
+ }
+ }
+ GST_OBJECT_UNLOCK (encoder);
+
+ priv->frames = g_list_append (priv->frames, frame);
+
+ /* new data, more finish needed */
+ priv->drained = FALSE;
+
+ GST_LOG_OBJECT (encoder, "passing frame pfn %d to subclass",
+ frame->presentation_frame_number);
+
+ ret = klass->handle_frame (encoder, frame);
+
+done:
+ GST_VIDEO_ENCODER_STREAM_UNLOCK (encoder);
+
+ gst_object_unref (encoder);
+
+ return ret;
+}
+
+static GstStateChangeReturn
+gst_video_encoder_change_state (GstElement * element, GstStateChange transition)
+{
+ GstVideoEncoder *encoder;
+ GstVideoEncoderClass *encoder_class;
+ GstStateChangeReturn ret;
+
+ encoder = GST_VIDEO_ENCODER (element);
+ encoder_class = GST_VIDEO_ENCODER_GET_CLASS (element);
+
+ switch (transition) {
+ case GST_STATE_CHANGE_NULL_TO_READY:
+ /* open device/library if needed */
+ if (encoder_class->open && !encoder_class->open (encoder))
+ goto open_failed;
+ break;
+ case GST_STATE_CHANGE_READY_TO_PAUSED:
+ /* Initialize device/library if needed */
+ if (encoder_class->start && !encoder_class->start (encoder))
+ goto start_failed;
+ break;
+ default:
+ break;
+ }
+
+ ret = GST_ELEMENT_CLASS (parent_class)->change_state (element, transition);
+
+ switch (transition) {
+ case GST_STATE_CHANGE_PAUSED_TO_READY:
+ gst_video_encoder_reset (encoder);
+ if (encoder_class->stop && !encoder_class->stop (encoder))
+ goto stop_failed;
+ break;
+ case GST_STATE_CHANGE_READY_TO_NULL:
+ /* close device/library if needed */
+ if (encoder_class->close && !encoder_class->close (encoder))
+ goto close_failed;
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+
+ /* Errors */
+
+open_failed:
+ {
+ GST_ELEMENT_ERROR (encoder, LIBRARY, INIT, (NULL),
+ ("Failed to open encoder"));
+ return GST_STATE_CHANGE_FAILURE;
+ }
+
+start_failed:
+ {
+ GST_ELEMENT_ERROR (encoder, LIBRARY, INIT, (NULL),
+ ("Failed to start encoder"));
+ return GST_STATE_CHANGE_FAILURE;
+ }
+
+stop_failed:
+ {
+ GST_ELEMENT_ERROR (encoder, LIBRARY, INIT, (NULL),
+ ("Failed to stop encoder"));
+ return GST_STATE_CHANGE_FAILURE;
+ }
+
+close_failed:
+ {
+ GST_ELEMENT_ERROR (encoder, LIBRARY, INIT, (NULL),
+ ("Failed to close encoder"));
+ return GST_STATE_CHANGE_FAILURE;
+ }
+}
+
+static gboolean
+gst_video_encoder_set_src_caps (GstVideoEncoder * encoder)
+{
+ gboolean ret;
+ GstVideoCodecState *state = encoder->priv->output_state;
+ GstVideoInfo *info = &state->info;
+
+ g_return_val_if_fail (state->caps != NULL, FALSE);
+
+ if (encoder->priv->output_state_changed) {
+ state->caps = gst_caps_make_writable (state->caps);
+
+ /* Fill caps */
+ gst_caps_set_simple (state->caps, "width", G_TYPE_INT, info->width,
+ "height", G_TYPE_INT, info->height,
+ "pixel-aspect-ratio", GST_TYPE_FRACTION,
+ info->par_n, info->par_d, NULL);
+ if (info->flags & GST_VIDEO_FLAG_VARIABLE_FPS && info->fps_n != 0) {
+ /* variable fps with a max-framerate */
+ gst_caps_set_simple (state->caps, "framerate", GST_TYPE_FRACTION, 0, 1,
+ "max-framerate", GST_TYPE_FRACTION, info->fps_n, info->fps_d, NULL);
+ } else {
+ /* no variable fps or no max-framerate */
+ gst_caps_set_simple (state->caps, "framerate", GST_TYPE_FRACTION,
+ info->fps_n, info->fps_d, NULL);
+ }
+ if (state->codec_data)
+ gst_caps_set_simple (state->caps, "codec_data", GST_TYPE_BUFFER,
+ state->codec_data, NULL);
+ encoder->priv->output_state_changed = FALSE;
+ }
+
+ ret = gst_pad_set_caps (encoder->srcpad, state->caps);
+
+ return ret;
+}
+
+/**
+ * gst_video_encoder_finish_frame:
+ * @encoder: a #GstVideoEncoder
+ * @frame: (transfer full): an encoded #GstVideoCodecFrame
+ *
+ * @frame must have a valid encoded data buffer, whose metadata fields
+ * are then appropriately set according to frame data or no buffer at
+ * all if the frame should be dropped.
+ * It is subsequently pushed downstream or provided to @pre_push.
+ * In any case, the frame is considered finished and released.
+ *
+ * Returns: a #GstFlowReturn resulting from sending data downstream
+ *
+ * Since: 0.10.36
+ */
+GstFlowReturn
+gst_video_encoder_finish_frame (GstVideoEncoder * encoder,
+ GstVideoCodecFrame * frame)
+{
+ GstVideoEncoderPrivate *priv = encoder->priv;
+ GstFlowReturn ret = GST_FLOW_OK;
+ GstVideoEncoderClass *encoder_class;
+ GList *l;
+ gboolean send_headers = FALSE;
+ gboolean discont = (frame->presentation_frame_number == 0);
+
+ encoder_class = GST_VIDEO_ENCODER_GET_CLASS (encoder);
+
+ GST_LOG_OBJECT (encoder,
+ "finish frame fpn %d", frame->presentation_frame_number);
+
+ GST_VIDEO_ENCODER_STREAM_LOCK (encoder);
+
+ if (G_UNLIKELY (priv->output_state_changed))
+ gst_video_encoder_set_src_caps (encoder);
+
+ if (G_UNLIKELY (priv->output_state == NULL))
+ goto no_output_state;
+
+ /* Push all pending events that arrived before this frame */
+ for (l = priv->frames; l; l = l->next) {
+ GstVideoCodecFrame *tmp = l->data;
+
+ if (tmp->events) {
+ GList *k;
+
+ for (k = g_list_last (tmp->events); k; k = k->prev)
+ gst_video_encoder_push_event (encoder, k->data);
+ g_list_free (tmp->events);
+ tmp->events = NULL;
+ }
+
+ if (tmp == frame)
+ break;
+ }
+
+ /* no buffer data means this frame is skipped/dropped */
+ if (!frame->output_buffer) {
+ GST_DEBUG_OBJECT (encoder, "skipping frame %" GST_TIME_FORMAT,
+ GST_TIME_ARGS (frame->pts));
+ goto done;
+ }
+
+ if (GST_VIDEO_CODEC_FRAME_IS_SYNC_POINT (frame) && priv->force_key_unit) {
+ GstClockTime stream_time, running_time;
+ GstEvent *ev;
+ ForcedKeyUnitEvent *fevt = NULL;
+ GList *l;
+
+ running_time =
+ gst_segment_to_running_time (&encoder->output_segment, GST_FORMAT_TIME,
+ frame->pts);
+
+ GST_OBJECT_LOCK (encoder);
+ for (l = priv->force_key_unit; l; l = l->next) {
+ ForcedKeyUnitEvent *tmp = l->data;
+
+ /* Skip non-pending keyunits */
+ if (!tmp->pending)
+ continue;
+
+ /* Simple case, keyunit ASAP */
+ if (tmp->running_time == GST_CLOCK_TIME_NONE) {
+ fevt = tmp;
+ break;
+ }
+
+ /* Event for before this frame */
+ if (tmp->running_time <= running_time) {
+ fevt = tmp;
+ break;
+ }
+ }
+
+ if (fevt) {
+ priv->force_key_unit = g_list_remove (priv->force_key_unit, fevt);
+ }
+ GST_OBJECT_UNLOCK (encoder);
+
+ if (fevt) {
+ stream_time =
+ gst_segment_to_stream_time (&encoder->output_segment, GST_FORMAT_TIME,
+ frame->pts);
+
+ ev = gst_video_event_new_downstream_force_key_unit
+ (frame->pts, stream_time, running_time,
+ fevt->all_headers, fevt->count);
+
+ gst_video_encoder_push_event (encoder, ev);
+
+ if (fevt->all_headers)
+ send_headers = TRUE;
+
+ GST_DEBUG_OBJECT (encoder,
+ "Forced key unit: running-time %" GST_TIME_FORMAT
+ ", all_headers %d, count %u",
+ GST_TIME_ARGS (running_time), fevt->all_headers, fevt->count);
+ forced_key_unit_event_free (fevt);
+ }
+ }
+
+ if (GST_VIDEO_CODEC_FRAME_IS_SYNC_POINT (frame)) {
+ priv->distance_from_sync = 0;
+ GST_BUFFER_FLAG_UNSET (frame->output_buffer, GST_BUFFER_FLAG_DELTA_UNIT);
+ /* For keyframes, DTS = PTS */
+ frame->dts = frame->pts;
+ } else {
+ GST_BUFFER_FLAG_SET (frame->output_buffer, GST_BUFFER_FLAG_DELTA_UNIT);
+ }
+
+ frame->distance_from_sync = priv->distance_from_sync;
+ priv->distance_from_sync++;
+
+ GST_BUFFER_TIMESTAMP (frame->output_buffer) = frame->pts;
+ GST_BUFFER_DURATION (frame->output_buffer) = frame->duration;
+
+ /* update rate estimate */
+ priv->bytes += GST_BUFFER_SIZE (frame->output_buffer);
+ if (GST_CLOCK_TIME_IS_VALID (frame->duration)) {
+ priv->time += frame->duration;
+ } else {
+ /* better none than nothing valid */
+ priv->time = GST_CLOCK_TIME_NONE;
+ }
+
+ if (G_UNLIKELY (send_headers || priv->new_headers)) {
+ GList *tmp, *copy = NULL;
+
+ GST_DEBUG_OBJECT (encoder, "Sending headers");
+
+ /* First make all buffers metadata-writable */
+ for (tmp = priv->headers; tmp; tmp = tmp->next) {
+ GstBuffer *tmpbuf = GST_BUFFER (tmp->data);
+
+ copy = g_list_append (copy, gst_buffer_make_metadata_writable (tmpbuf));
+ }
+ g_list_free (priv->headers);
+ priv->headers = copy;
+
+ for (tmp = priv->headers; tmp; tmp = tmp->next) {
+ GstBuffer *tmpbuf = GST_BUFFER (tmp->data);
+
+ gst_buffer_set_caps (tmpbuf, GST_PAD_CAPS (encoder->srcpad));
+ gst_buffer_ref (tmpbuf);
+ priv->bytes += GST_BUFFER_SIZE (tmpbuf);
+ if (G_UNLIKELY (discont)) {
+ GST_LOG_OBJECT (encoder, "marking discont");
+ GST_BUFFER_FLAG_SET (tmpbuf, GST_BUFFER_FLAG_DISCONT);
+ discont = FALSE;
+ }
+
+ gst_pad_push (encoder->srcpad, tmpbuf);
+ }
+ priv->new_headers = FALSE;
+ }
+
+ if (G_UNLIKELY (discont)) {
+ GST_LOG_OBJECT (encoder, "marking discont");
+ GST_BUFFER_FLAG_SET (frame->output_buffer, GST_BUFFER_FLAG_DISCONT);
+ }
+
+ gst_buffer_set_caps (GST_BUFFER (frame->output_buffer),
+ GST_PAD_CAPS (encoder->srcpad));
+
+ if (encoder_class->pre_push)
+ ret = encoder_class->pre_push (encoder, frame);
+
+ if (ret == GST_FLOW_OK)
+ ret = gst_pad_push (encoder->srcpad, frame->output_buffer);
+
+ frame->output_buffer = NULL;
+
+done:
+ /* handed out */
+ priv->frames = g_list_remove (priv->frames, frame);
+
+ gst_video_codec_frame_unref (frame);
+
+ GST_VIDEO_ENCODER_STREAM_UNLOCK (encoder);
+
+ return ret;
+
+ /* ERRORS */
+no_output_state:
+ {
+ GST_VIDEO_ENCODER_STREAM_UNLOCK (encoder);
+ GST_ERROR_OBJECT (encoder, "Output state was not configured");
+ return GST_FLOW_ERROR;
+ }
+}
+
+/**
+ * gst_video_encoder_get_output_state:
+ * @encoder: a #GstVideoEncoder
+ *
+ * Get the current #GstVideoCodecState
+ *
+ * Returns: (transfer full): #GstVideoCodecState describing format of video data.
+ *
+ * Since: 0.10.36
+ */
+GstVideoCodecState *
+gst_video_encoder_get_output_state (GstVideoEncoder * encoder)
+{
+ GstVideoCodecState *state;
+
+ GST_VIDEO_ENCODER_STREAM_LOCK (encoder);
+ state = gst_video_codec_state_ref (encoder->priv->output_state);
+ GST_VIDEO_ENCODER_STREAM_UNLOCK (encoder);
+
+ return state;
+}
+
+/**
+ * gst_video_encoder_set_output_state:
+ * @encoder: a #GstVideoEncoder
+ * @caps: (transfer full): the #GstCaps to use for the output
+ * @reference: (allow-none) (transfer none): An optional reference @GstVideoCodecState
+ *
+ * Creates a new #GstVideoCodecState with the specified caps as the output state
+ * for the encoder.
+ * Any previously set output state on @decoder will be replaced by the newly
+ * created one.
+ *
+ * The specified @caps should not contain any resolution, pixel-aspect-ratio,
+ * framerate, codec-data, .... Those should be specified instead in the returned
+ * #GstVideoCodecState.
+ *
+ * If the subclass wishes to copy over existing fields (like pixel aspect ratio,
+ * or framerate) from an existing #GstVideoCodecState, it can be provided as a
+ * @reference.
+ *
+ * If the subclass wishes to override some fields from the output state (like
+ * pixel-aspect-ratio or framerate) it can do so on the returned #GstVideoCodecState.
+ *
+ * The new output state will only take effect (set on pads and buffers) starting
+ * from the next call to #gst_video_encoder_finish_frame().
+ *
+ * Returns: (transfer full): the newly configured output state.
+ *
+ * Since: 0.10.36
+ */
+GstVideoCodecState *
+gst_video_encoder_set_output_state (GstVideoEncoder * encoder, GstCaps * caps,
+ GstVideoCodecState * reference)
+{
+ GstVideoEncoderPrivate *priv = encoder->priv;
+ GstVideoCodecState *state;
+
+ g_return_val_if_fail (caps != NULL, NULL);
+
+ state = _new_output_state (caps, reference);
+
+ GST_VIDEO_ENCODER_STREAM_LOCK (encoder);
+ if (priv->output_state)
+ gst_video_codec_state_unref (priv->output_state);
+ priv->output_state = gst_video_codec_state_ref (state);
+
+ priv->output_state_changed = TRUE;
+ GST_VIDEO_ENCODER_STREAM_UNLOCK (encoder);
+
+ return state;
+}
+
+/**
+ * gst_video_encoder_set_latency:
+ * @encoder: a #GstVideoEncoder
+ * @min_latency: minimum latency
+ * @max_latency: maximum latency
+ *
+ * Informs baseclass of encoding latency.
+ *
+ * Since: 0.10.36
+ */
+void
+gst_video_encoder_set_latency (GstVideoEncoder * encoder,
+ GstClockTime min_latency, GstClockTime max_latency)
+{
+ g_return_if_fail (GST_CLOCK_TIME_IS_VALID (min_latency));
+ g_return_if_fail (max_latency >= min_latency);
+
+ GST_OBJECT_LOCK (encoder);
+ encoder->priv->min_latency = min_latency;
+ encoder->priv->max_latency = max_latency;
+ GST_OBJECT_UNLOCK (encoder);
+
+ gst_element_post_message (GST_ELEMENT_CAST (encoder),
+ gst_message_new_latency (GST_OBJECT_CAST (encoder)));
+}
+
+/**
+ * gst_video_encoder_get_latency:
+ * @encoder: a #GstVideoEncoder
+ * @min_latency: (out) (allow-none): the configured minimum latency
+ * @max_latency: (out) (allow-none): the configured maximum latency
+ *
+ * Returns the configured encoding latency.
+ *
+ * Since: 0.10.36
+ */
+void
+gst_video_encoder_get_latency (GstVideoEncoder * encoder,
+ GstClockTime * min_latency, GstClockTime * max_latency)
+{
+ GST_OBJECT_LOCK (encoder);
+ if (min_latency)
+ *min_latency = encoder->priv->min_latency;
+ if (max_latency)
+ *max_latency = encoder->priv->max_latency;
+ GST_OBJECT_UNLOCK (encoder);
+}
+
+/**
+ * gst_video_encoder_get_oldest_frame:
+ * @encoder: a #GstVideoEncoder
+ *
+ * Get the oldest unfinished pending #GstVideoCodecFrame
+ *
+ * Returns: oldest unfinished pending #GstVideoCodecFrame
+ *
+ * Since: 0.10.36
+ */
+GstVideoCodecFrame *
+gst_video_encoder_get_oldest_frame (GstVideoEncoder * encoder)
+{
+ GList *g;
+
+ GST_VIDEO_ENCODER_STREAM_LOCK (encoder);
+ g = encoder->priv->frames;
+ GST_VIDEO_ENCODER_STREAM_UNLOCK (encoder);
+
+ if (g == NULL)
+ return NULL;
+ return (GstVideoCodecFrame *) (g->data);
+}
+
+/**
+ * gst_video_encoder_get_frame:
+ * @encoder: a #GstVideoEnccoder
+ * @frame_number: system_frame_number of a frame
+ *
+ * Get a pending unfinished #GstVideoCodecFrame
+ *
+ * Returns: (transfer none): pending unfinished #GstVideoCodecFrame identified by @frame_number.
+ *
+ * Since: 0.10.36
+ */
+GstVideoCodecFrame *
+gst_video_encoder_get_frame (GstVideoEncoder * encoder, int frame_number)
+{
+ GList *g;
+ GstVideoCodecFrame *frame = NULL;
+
+ GST_DEBUG_OBJECT (encoder, "frame_number : %d", frame_number);
+
+ GST_VIDEO_ENCODER_STREAM_LOCK (encoder);
+ for (g = encoder->priv->frames; g; g = g->next) {
+ GstVideoCodecFrame *tmp = g->data;
+
+ if (tmp->system_frame_number == frame_number) {
+ frame = tmp;
+ break;
+ }
+ }
+ GST_VIDEO_ENCODER_STREAM_UNLOCK (encoder);
+
+ return frame;
+}
--- /dev/null
+/* GStreamer
+ * Copyright (C) 2008 David Schleef <ds@schleef.org>
+ * Copyright (C) 2011 Mark Nauwelaerts <mark.nauwelaerts@collabora.co.uk>.
+ * Copyright (C) 2011 Nokia Corporation. All rights reserved.
+ * Contact: Stefan Kost <stefan.kost@nokia.com>
+ * Copyright (C) 2012 Collabora Ltd.
+ * Author : Edward Hervey <edward@collabora.com>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Library General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Library General Public License for more details.
+ *
+ * You should have received a copy of the GNU Library General Public
+ * License along with this library; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 02111-1307, USA.
+ */
+
+#ifndef _GST_VIDEO_ENCODER_H_
+#define _GST_VIDEO_ENCODER_H_
+
+#include <gst/video/gstvideoutils.h>
+
+G_BEGIN_DECLS
+
+#define GST_TYPE_VIDEO_ENCODER \
+ (gst_video_encoder_get_type())
+#define GST_VIDEO_ENCODER(obj) \
+ (G_TYPE_CHECK_INSTANCE_CAST((obj),GST_TYPE_VIDEO_ENCODER,GstVideoEncoder))
+#define GST_VIDEO_ENCODER_CLASS(klass) \
+ (G_TYPE_CHECK_CLASS_CAST((klass),GST_TYPE_VIDEO_ENCODER,GstVideoEncoderClass))
+#define GST_VIDEO_ENCODER_GET_CLASS(obj) \
+ (G_TYPE_INSTANCE_GET_CLASS((obj),GST_TYPE_VIDEO_ENCODER,GstVideoEncoderClass))
+#define GST_IS_VIDEO_ENCODER(obj) \
+ (G_TYPE_CHECK_INSTANCE_TYPE((obj),GST_TYPE_VIDEO_ENCODER))
+#define GST_IS_VIDEO_ENCODER_CLASS(obj) \
+ (G_TYPE_CHECK_CLASS_TYPE((klass),GST_TYPE_VIDEO_ENCODER))
+#define GST_VIDEO_ENCODER_CAST(enc) ((GstVideoEncoder*)enc)
+
+/**
+ * GST_VIDEO_ENCODER_SINK_NAME:
+ *
+ * The name of the templates for the sink pad.
+ *
+ * Since: 0.10.36
+ */
+#define GST_VIDEO_ENCODER_SINK_NAME "sink"
+/**
+ * GST_VIDEO_ENCODER_SRC_NAME:
+ *
+ * The name of the templates for the source pad.
+ *
+ * Since: 0.10.36
+ */
+#define GST_VIDEO_ENCODER_SRC_NAME "src"
+
+/**
+ * GST_VIDEO_ENCODER_FLOW_DROPPED:
+ *
+ * Returned when the event/buffer should be dropped.
+ *
+ * Since: 0.10.36
+ */
+#define GST_VIDEO_ENCODER_FLOW_DROPPED GST_FLOW_CUSTOM_SUCCESS_1
+
+/**
+ * GST_VIDEO_ENCODER_SRC_PAD:
+ * @obj: a #GstVideoEncoder
+ *
+ * Gives the pointer to the source #GstPad object of the element.
+ *
+ * Since: 0.10.36
+ */
+#define GST_VIDEO_ENCODER_SRC_PAD(obj) (((GstVideoEncoder *) (obj))->srcpad)
+
+/**
+ * GST_VIDEO_ENCODER_SINK_PAD:
+ * @obj: a #GstVideoEncoder
+ *
+ * Gives the pointer to the sink #GstPad object of the element.
+ *
+ * Since: 0.10.36
+ */
+#define GST_VIDEO_ENCODER_SINK_PAD(obj) (((GstVideoEncoder *) (obj))->sinkpad)
+
+/**
+ * GST_VIDEO_ENCODER_FLOW_NEED_DATA:
+ *
+ * Returned while parsing to indicate more data is needed.
+ *
+ * Since: 0.10.36
+ **/
+#define GST_VIDEO_ENCODER_FLOW_NEED_DATA GST_FLOW_CUSTOM_SUCCESS
+
+/**
+ * GST_VIDEO_ENCODER_FLOW_DROPPED:
+ *
+ * Returned when the event/buffer should be dropped.
+ *
+ * Since: 0.10.36
+ */
+#define GST_VIDEO_ENCODER_FLOW_DROPPED GST_FLOW_CUSTOM_SUCCESS_1
+
+/**
+ * GST_VIDEO_ENCODER_INPUT_SEGMENT:
+ * @obj: base parse instance
+ *
+ * Gives the segment of the element.
+ *
+ * Since: 0.10.36
+ */
+#define GST_VIDEO_ENCODER_INPUT_SEGMENT(obj) (GST_VIDEO_ENCODER_CAST (obj)->input_segment)
+
+/**
+ * GST_VIDEO_ENCODER_OUTPUT_SEGMENT:
+ * @obj: base parse instance
+ *
+ * Gives the segment of the element.
+ *
+ * Since: 0.10.36
+ */
+#define GST_VIDEO_ENCODER_OUTPUT_SEGMENT(obj) (GST_VIDEO_ENCODER_CAST (obj)->output_segment)
+
+/**
+ * GST_VIDEO_ENCODER_STREAM_LOCK:
+ * @encoder: video encoder instance
+ *
+ * Obtain a lock to protect the encoder function from concurrent access.
+ *
+ * Since: 0.10.36
+ */
+#define GST_VIDEO_ENCODER_STREAM_LOCK(encoder) g_static_rec_mutex_lock (&GST_VIDEO_ENCODER (encoder)->stream_lock)
+
+/**
+ * GST_VIDEO_ENCODER_STREAM_UNLOCK:
+ * @encoder: video encoder instance
+ *
+ * Release the lock that protects the encoder function from concurrent access.
+ *
+ * Since: 0.10.36
+ */
+#define GST_VIDEO_ENCODER_STREAM_UNLOCK(encoder) g_static_rec_mutex_unlock (&GST_VIDEO_ENCODER (encoder)->stream_lock)
+
+typedef struct _GstVideoEncoder GstVideoEncoder;
+typedef struct _GstVideoEncoderPrivate GstVideoEncoderPrivate;
+typedef struct _GstVideoEncoderClass GstVideoEncoderClass;
+
+/**
+ * GstVideoEncoder:
+ *
+ * The opaque #GstVideoEncoder data structure.
+ *
+ * Since: 0.10.36
+ */
+struct _GstVideoEncoder
+{
+ /*< private >*/
+ GstElement element;
+
+ /*< protected >*/
+ GstPad *sinkpad;
+ GstPad *srcpad;
+
+ /* protects all data processing, i.e. is locked
+ * in the chain function, finish_frame and when
+ * processing serialized events */
+ GStaticRecMutex stream_lock;
+
+ /* MT-protected (with STREAM_LOCK) */
+ GstSegment input_segment;
+ GstSegment output_segment;
+
+ GstVideoEncoderPrivate *priv;
+ /* FIXME before moving to base */
+ void *padding[GST_PADDING_LARGE];
+};
+
+/**
+ * GstVideoEncoderClass:
+ * @open: Optional.
+ * Called when the element changes to GST_STATE_READY.
+ * Allows opening external resources. Since: 0.10.37.
+ * @close: Optional.
+ * Called when the element changes to GST_STATE_NULL.
+ * Allows closing external resources. Since: 0.10.37.
+ * @start: Optional.
+ * Called when the element starts processing.
+ * Allows opening external resources.
+ * @stop: Optional.
+ * Called when the element stops processing.
+ * Allows closing external resources.
+ * @set_format: Optional.
+ * Notifies subclass of incoming data format.
+ * GstVideoCodecState fields have already been
+ * set according to provided caps.
+ * @handle_frame: Provides input frame to subclass.
+ * @reset: Optional.
+ * Allows subclass (encoder) to perform post-seek semantics reset.
+ * @finish: Optional.
+ * Called to request subclass to dispatch any pending remaining
+ * data (e.g. at EOS).
+ * @pre_push: Optional.
+ * Allows subclass to push frame downstream in whatever
+ * shape or form it deems appropriate. If not provided,
+ * provided encoded frame data is simply pushed downstream.
+ * @getcaps: Optional.
+ * Allows for a custom sink getcaps implementation (e.g.
+ * for multichannel input specification). If not implemented,
+ * default returns gst_video_encoder_proxy_getcaps
+ * applied to sink template caps.
+ * @sink_event: Optional.
+ * Event handler on the sink pad. This function should return
+ * TRUE if the event was handled and should be discarded
+ * (i.e. not unref'ed).
+ * @src_event: Optional.
+ * Event handler on the source pad. This function should return
+ * TRUE if the event was handled and should be discarded
+ * (i.e. not unref'ed).
+ *
+ * Subclasses can override any of the available virtual methods or not, as
+ * needed. At minimum @handle_frame needs to be overridden, and @set_format
+ * and @get_caps are likely needed as well.
+ *
+ * Since: 0.10.36
+ */
+struct _GstVideoEncoderClass
+{
+ /*< private >*/
+ GstElementClass element_class;
+
+ /*< public >*/
+ /* virtual methods for subclasses */
+ gboolean (*open) (GstVideoEncoder *encoder);
+
+ gboolean (*close) (GstVideoEncoder *encoder);
+
+ gboolean (*start) (GstVideoEncoder *encoder);
+
+ gboolean (*stop) (GstVideoEncoder *encoder);
+
+ gboolean (*set_format) (GstVideoEncoder *encoder,
+ GstVideoCodecState *state);
+
+ GstFlowReturn (*handle_frame) (GstVideoEncoder *encoder,
+ GstVideoCodecFrame *frame);
+
+ gboolean (*reset) (GstVideoEncoder *encoder,
+ gboolean hard);
+
+ GstFlowReturn (*finish) (GstVideoEncoder *encoder);
+
+ GstFlowReturn (*pre_push) (GstVideoEncoder *encoder,
+ GstVideoCodecFrame *frame);
+
+ GstCaps * (*getcaps) (GstVideoEncoder *enc);
+
+ gboolean (*sink_event) (GstVideoEncoder *encoder,
+ GstEvent *event);
+
+ gboolean (*src_event) (GstVideoEncoder *encoder,
+ GstEvent *event);
+
+ /*< private >*/
+ /* FIXME before moving to base */
+ gpointer _gst_reserved[GST_PADDING_LARGE];
+};
+
+GType gst_video_encoder_get_type (void);
+
+GstVideoCodecState* gst_video_encoder_get_output_state (GstVideoEncoder *encoder);
+
+GstVideoCodecState* gst_video_encoder_set_output_state (GstVideoEncoder * encoder,
+ GstCaps * caps,
+ GstVideoCodecState * reference);
+
+GstVideoCodecFrame* gst_video_encoder_get_frame (GstVideoEncoder *encoder,
+ int frame_number);
+GstVideoCodecFrame* gst_video_encoder_get_oldest_frame (GstVideoEncoder *encoder);
+
+GstFlowReturn gst_video_encoder_finish_frame (GstVideoEncoder *encoder,
+ GstVideoCodecFrame *frame);
+
+GstCaps * gst_video_encoder_proxy_getcaps (GstVideoEncoder * enc,
+ GstCaps * caps);
+void gst_video_encoder_set_discont (GstVideoEncoder *encoder);
+gboolean gst_video_encoder_get_discont (GstVideoEncoder *encoder);
+
+void gst_video_encoder_set_latency (GstVideoEncoder *encoder,
+ GstClockTime min_latency,
+ GstClockTime max_latency);
+void gst_video_encoder_get_latency (GstVideoEncoder *encoder,
+ GstClockTime *min_latency,
+ GstClockTime *max_latency);
+
+void gst_video_encoder_set_headers (GstVideoEncoder *encoder,
+ GList *headers);
+G_END_DECLS
+
+#endif
+
--- /dev/null
+/* GStreamer
+ * Copyright (C) 2008 David Schleef <ds@schleef.org>
+ * Copyright (C) 2012 Collabora Ltd.
+ * Author : Edward Hervey <edward@collabora.com>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Library General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Library General Public License for more details.
+ *
+ * You should have received a copy of the GNU Library General Public
+ * License along with this library; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 02111-1307, USA.
+ */
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include "gstvideoutils.h"
+
+#include <string.h>
+
+GType
+gst_video_codec_frame_get_type (void)
+{
+ static volatile gsize type = 0;
+
+ if (g_once_init_enter (&type)) {
+ GType _type;
+
+ _type = g_boxed_type_register_static ("GstVideoCodecFrame",
+ (GBoxedCopyFunc) gst_video_codec_frame_ref,
+ (GBoxedFreeFunc) gst_video_codec_frame_unref);
+ g_once_init_leave (&type, _type);
+ }
+ return (GType) type;
+}
+
+
+
+static void
+_gst_video_codec_frame_free (GstVideoCodecFrame * frame)
+{
+ g_return_if_fail (frame != NULL);
+
+ if (frame->input_buffer) {
+ gst_buffer_unref (frame->input_buffer);
+ }
+
+ if (frame->output_buffer) {
+ gst_buffer_unref (frame->output_buffer);
+ }
+
+ g_list_foreach (frame->events, (GFunc) gst_event_unref, NULL);
+ g_list_free (frame->events);
+
+ if (frame->coder_hook_destroy_notify && frame->coder_hook)
+ frame->coder_hook_destroy_notify (frame->coder_hook);
+
+ g_slice_free (GstVideoCodecFrame, frame);
+}
+
+/**
+ * gst_video_codec_frame_set_hook:
+ * @frame: a #GstVideoCodecFrame
+ * @hook: private data
+ * @notify: (closure hook): a #GDestroyNotify
+ *
+ * Sets the #GDestroyNotify that will be called (along with the @hook) when
+ * the frame is freed.
+ *
+ * If a @hook was previously set, then the previous set @notify will be called
+ * before the @hook is replaced.
+ */
+void
+gst_video_codec_frame_set_hook (GstVideoCodecFrame * frame, void *hook,
+ GDestroyNotify notify)
+{
+ if (frame->coder_hook_destroy_notify && frame->coder_hook)
+ frame->coder_hook_destroy_notify (frame->coder_hook);
+
+ frame->coder_hook = hook;
+ frame->coder_hook_destroy_notify = notify;
+}
+
+/**
+ * gst_video_codec_frame_ref:
+ * @frame: a #GstVideoCodecFrame
+ *
+ * Increases the refcount of the given frame by one.
+ *
+ * Returns: @buf
+ */
+GstVideoCodecFrame *
+gst_video_codec_frame_ref (GstVideoCodecFrame * frame)
+{
+ g_return_val_if_fail (frame != NULL, NULL);
+
+ g_atomic_int_inc (&frame->ref_count);
+
+ return frame;
+}
+
+/**
+ * gst_video_codec_frame_unref:
+ * @frame: a #GstVideoCodecFrame
+ *
+ * Decreases the refcount of the frame. If the refcount reaches 0, the frame
+ * will be freed.
+ */
+void
+gst_video_codec_frame_unref (GstVideoCodecFrame * frame)
+{
+ g_return_if_fail (frame != NULL);
+ g_return_if_fail (frame->ref_count > 0);
+
+ if (g_atomic_int_dec_and_test (&frame->ref_count)) {
+ _gst_video_codec_frame_free (frame);
+ }
+}
+
+
+/**
+ * gst_video_codec_state_ref:
+ * @state: a #GstVideoCodecState
+ *
+ * Increases the refcount of the given state by one.
+ *
+ * Returns: @buf
+ */
+GstVideoCodecState *
+gst_video_codec_state_ref (GstVideoCodecState * state)
+{
+ g_return_val_if_fail (state != NULL, NULL);
+
+ g_atomic_int_inc (&state->ref_count);
+
+ return state;
+}
+
+static void
+_gst_video_codec_state_free (GstVideoCodecState * state)
+{
+ if (state->caps)
+ gst_caps_unref (state->caps);
+ if (state->codec_data)
+ gst_buffer_unref (state->codec_data);
+}
+
+/**
+ * gst_video_codec_state_unref:
+ * @state: a #GstVideoCodecState
+ *
+ * Decreases the refcount of the state. If the refcount reaches 0, the state
+ * will be freed.
+ */
+void
+gst_video_codec_state_unref (GstVideoCodecState * state)
+{
+ g_return_if_fail (state != NULL);
+ g_return_if_fail (state->ref_count > 0);
+
+ if (g_atomic_int_dec_and_test (&state->ref_count)) {
+ _gst_video_codec_state_free (state);
+ }
+}
+
+GType
+gst_video_codec_state_get_type (void)
+{
+ static volatile gsize type = 0;
+
+ if (g_once_init_enter (&type)) {
+ GType _type;
+
+ _type = g_boxed_type_register_static ("GstVideoCodecState",
+ (GBoxedCopyFunc) gst_video_codec_state_ref,
+ (GBoxedFreeFunc) gst_video_codec_state_unref);
+ g_once_init_leave (&type, _type);
+ }
+ return (GType) type;
+}
--- /dev/null
+/* GStreamer
+ * Copyright (C) 2008 David Schleef <ds@schleef.org>
+ * Copyright (C) 2012 Collabora Ltd.
+ * Author : Edward Hervey <edward@collabora.com>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Library General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Library General Public License for more details.
+ *
+ * You should have received a copy of the GNU Library General Public
+ * License along with this library; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 02111-1307, USA.
+ */
+
+#ifndef _GST_VIDEO_UTILS_H_
+#define _GST_VIDEO_UTILS_H_
+
+#include <gst/gst.h>
+#include <gst/video/video.h>
+
+G_BEGIN_DECLS
+#define GST_TYPE_VIDEO_CODEC_STATE \
+ (gst_video_codec_state_get_type())
+
+#define GST_TYPE_VIDEO_CODEC_FRAME \
+ (gst_video_codec_frame_get_type())
+
+typedef struct _GstVideoCodecState GstVideoCodecState;
+typedef struct _GstVideoCodecFrame GstVideoCodecFrame;
+
+/**
+ * GstVideoCodecState:
+ * @info: The #GstVideoInfo describing the stream
+ * @caps: The #GstCaps
+ * @codec_data: (optional) a #GstBuffer corresponding to the
+ * 'codec_data' field of a stream.
+ *
+ * Structure representing the state of an incoming or outgoing video
+ * stream for encoders and decoders.
+ *
+ * Decoders and encoders will receive such a state through their
+ * respective @set_format vmethods.
+ *
+ * Decoders and encoders can set the downstream state, by using the
+ * @gst_video_decoder_set_output_state() or
+ * @gst_video_encoder_set_output_state() methods.
+ */
+struct _GstVideoCodecState
+{
+ /*< private >*/
+ gint ref_count;
+
+ /*< public >*/
+ GstVideoInfo info;
+
+ GstCaps *caps;
+
+ GstBuffer *codec_data;
+
+ /*< private >*/
+ void *padding[GST_PADDING_LARGE];
+};
+
+/**
+ * GstVideoCodecFrameFlags:
+ * @GST_VIDEO_CODEC_FRAME_FLAG_DECODE_ONLY: is the frame only meant to be decoded
+ * @GST_VIDEO_CODEC_FRAME_FLAG_SYNC_POINT: is the frame a synchronization point (keyframe)
+ * @GST_VIDEO_CODEC_FRAME_FLAG_FORCE_KEYFRAME: should the output frame be made a keyframe
+ * @GST_VIDEO_CODEC_FRAME_FLAG_FORCE_KEYFRAME_HEADERS: should the encoder output stream headers
+ * @GST_VIDEO_CODEC_FRAME_FLAG_TFF: top-field first
+ * @GST_VIDEO_CODEC_FRAME_FLAG_RFF: the field is repeated
+ * @GST_VIDEO_CODEC_FRAME_FLAG_ONEFIELD: only one field is present
+ *
+ * Flags for #GstVideoCodecFrame
+ */
+typedef enum
+{
+ GST_VIDEO_CODEC_FRAME_FLAG_DECODE_ONLY = (1<<0),
+ GST_VIDEO_CODEC_FRAME_FLAG_SYNC_POINT = (1<<1),
+ GST_VIDEO_CODEC_FRAME_FLAG_FORCE_KEYFRAME = (1<<2),
+ GST_VIDEO_CODEC_FRAME_FLAG_FORCE_KEYFRAME_HEADERS = (1<<3),
+ GST_VIDEO_CODEC_FRAME_FLAG_TFF = (1<<4),
+ GST_VIDEO_CODEC_FRAME_FLAG_RFF = (1<<5),
+ GST_VIDEO_CODEC_FRAME_FLAG_ONEFIELD = (1<<6)
+} GstVideoCodecFrameFlags;
+
+/**
+ * GST_VIDEO_CODEC_FRAME_FLAGS:
+ * @frame: a #GstVideoCodecFrame
+ *
+ * The entire set of flags for the @frame
+ */
+#define GST_VIDEO_CODEC_FRAME_FLAGS(frame) ((frame)->flags)
+
+/**
+ * GST_VIDEO_CODEC_FRAME_FLAG_IS_SET:
+ * @frame: a #GstVideoCodecFrame
+ * @flag: a flag to check for
+ *
+ * Checks whether the given @flag is set
+ */
+#define GST_VIDEO_CODEC_FRAME_FLAG_IS_SET(frame,flag) !!(GST_VIDEO_CODEC_FRAME_FLAGS(frame) & (flag))
+
+/**
+ * GST_VIDEO_CODEC_FRAME_FLAG_SET:
+ * @frame: a #GstVideoCodecFrame
+ * @flag: Flag to set, can be any number of bits in guint32.
+ *
+ * This macro sets the given bits
+ */
+#define GST_VIDEO_CODEC_FRAME_FLAG_SET(frame,flag) (GST_VIDEO_CODEC_FRAME_FLAGS(frame) |= (flag))
+
+/**
+ * GST_VIDEO_CODEC_FRAME_FLAG_UNSET:
+ * @frame: a #GstVideoCodecFrame
+ * @flag: Flag to unset
+ *
+ * This macro usets the given bits.
+ */
+#define GST_VIDEO_CODEC_FRAME_FLAG_UNSET(frame,flag) (GST_VIDEO_CODEC_FRAME_FLAGS(frame) &= ~(flag))
+
+/**
+ * GST_VIDEO_CODEC_FRAME_IS_DECODE_ONLY:
+ * @frame: a #GstVideoCodecFrame
+ *
+ * Tests if the buffer should only be decoded but not sent downstream.
+ */
+#define GST_VIDEO_CODEC_FRAME_IS_DECODE_ONLY(frame) (GST_VIDEO_CODEC_FRAME_FLAG_IS_SET(frame, GST_VIDEO_CODEC_FRAME_FLAG_DECODE_ONLY))
+
+/**
+ * GST_VIDEO_CODEC_FRAME_SET_DECODE_ONLY:
+ * @frame: a #GstVideoCodecFrame
+ *
+ * Sets the buffer to not be sent downstream.
+ *
+ * Decoder implementation can use this if they have frames that
+ * are not meant to be displayed.
+ *
+ * Encoder implementation can safely ignore this field.
+ */
+#define GST_VIDEO_CODEC_FRAME_SET_DECODE_ONLY(frame) (GST_VIDEO_CODEC_FRAME_FLAG_SET(frame, GST_VIDEO_CODEC_FRAME_FLAG_DECODE_ONLY))
+
+/**
+ * GST_VIDEO_CODEC_FRAME_IS_SYNC_POINT:
+ * @frame: a #GstVideoCodecFrame
+ *
+ * Tests if the frame is a synchronization point (like a keyframe).
+ *
+ * Decoder implementations can use this to detect keyframes.
+ */
+#define GST_VIDEO_CODEC_FRAME_IS_SYNC_POINT(frame) (GST_VIDEO_CODEC_FRAME_FLAG_IS_SET(frame, GST_VIDEO_CODEC_FRAME_FLAG_SYNC_POINT))
+
+/**
+ * GST_VIDEO_CODEC_FRAME_SET_SYNC_POINT:
+ * @frame: a #GstVideoCodecFrame
+ *
+ * Sets the frame to be a synchronization point (like a keyframe).
+ *
+ * Encoder implementations should set this accordingly.
+ *
+ * Decoder implementing parsing features should set this when they
+ * detect such a synchronization point.
+ */
+#define GST_VIDEO_CODEC_FRAME_SET_SYNC_POINT(frame) (GST_VIDEO_CODEC_FRAME_FLAG_SET(frame, GST_VIDEO_CODEC_FRAME_FLAG_SYNC_POINT))
+#define GST_VIDEO_CODEC_FRAME_UNSET_SYNC_POINT(frame) (GST_VIDEO_CODEC_FRAME_FLAG_UNSET(frame, GST_VIDEO_CODEC_FRAME_FLAG_SYNC_POINT))
+
+
+/**
+ * GST_VIDEO_CODEC_FRAME_IS_FORCE_KEYFRAME:
+ * @frame: a #GstVideoCodecFrame
+ *
+ * Tests if the frame must be encoded as a keyframe. Applies only to
+ * frames provided to encoders. Decoders can safely ignore this field.
+ */
+#define GST_VIDEO_CODEC_FRAME_IS_FORCE_KEYFRAME(frame) (GST_VIDEO_CODEC_FRAME_FLAG_IS_SET(frame, GST_VIDEO_CODEC_FRAME_FLAG_FORCE_KEYFRAME))
+#define GST_VIDEO_CODEC_FRAME_SET_FORCE_KEYFRAME(frame) (GST_VIDEO_CODEC_FRAME_FLAG_SET(frame, GST_VIDEO_CODEC_FRAME_FLAG_FORCE_KEYFRAME))
+#define GST_VIDEO_CODEC_FRAME_UNSET_FORCE_KEYFRAME(frame) (GST_VIDEO_CODEC_FRAME_FLAG_UNSET(frame, GST_VIDEO_CODEC_FRAME_FLAG_FORCE_KEYFRAME))
+
+/**
+ * GST_VIDEO_CODEC_FRAME_IS_FORCE_KEYFRAME_HEADERS:
+ * @frame: a #GstVideoCodecFrame
+ *
+ * Tests if encoder should output stream headers before outputting the
+ * resulting encoded buffer for the given frame.
+ *
+ * Applies only to frames provided to encoders. Decoders can safely
+ * ignore this field.
+ */
+#define GST_VIDEO_CODEC_FRAME_IS_FORCE_KEYFRAME_HEADERS(frame) (GST_VIDEO_CODEC_FRAME_FLAG_IS_SET(frame, GST_VIDEO_CODEC_FRAME_FLAG_FORCE_KEYFRAME_HEADERS))
+#define GST_VIDEO_CODEC_FRAME_SET_FORCE_KEYFRAME_HEADERS(frame) (GST_VIDEO_CODEC_FRAME_FLAG_SET(frame, GST_VIDEO_CODEC_FRAME_FLAG_FORCE_KEYFRAME_HEADERS))
+#define GST_VIDEO_CODEC_FRAME_UNSET_FORCE_KEYFRAME_HEADERS(frame) (GST_VIDEO_CODEC_FRAME_FLAG_UNSET(frame, GST_VIDEO_CODEC_FRAME_FLAG_FORCE_KEYFRAME_HEADERS))
+
+/**
+ * GstVideoCodecFrame:
+ * @pts: Presentation timestamp
+ * @dts: Decoding timestamp
+ * @duration: Duration of the frame
+ * @system_frame_number: Unique identifier for the frame. Use this if you need
+ * to get hold of the frame later (like when data is being decoded).
+ * Typical usage in decoders is to set this on the opaque value provided
+ * to the library and get back the frame using gst_video_decoder_get_frame()
+ * @distance_from_sync: Distance in frames from the last synchronization point.
+ * @input_buffer: the input #GstBuffer that created this frame.
+ * @output_buffer: the output #GstBuffer. Implementations should set this either
+ * directly, or by using the @gst_video_decoder_alloc_output_frame() or
+ * @gst_video_decoder_alloc_output_buffer() methods.
+ * @field_index:
+ * @n_fields: number of fields in the frame (default 2). Decoders can change
+ * this if the frame contains a different number of fields.
+ * @deadline: Running time when the frame will be used.
+ * @events: Events that will be pushed downstream before this frame is pushed.
+ *
+ * A #GstVideoCodecFrame represents a video frame both in raw and
+ * encoded form.
+ */
+struct _GstVideoCodecFrame
+{
+ /*< private >*/
+ gint ref_count;
+
+ guint32 flags;
+
+ /*< public >*/
+ gint system_frame_number; /* ED */
+ gint decode_frame_number; /* ED */
+ gint presentation_frame_number; /* ED */
+
+ GstClockTime dts; /* ED */
+ GstClockTime pts; /* ED */
+ GstClockTime duration; /* ED */
+
+ int distance_from_sync; /* ED */
+
+ GstBuffer *input_buffer; /* ED */
+ GstBuffer *output_buffer; /* ED */
+
+ GstClockTime deadline; /* D */
+
+ /* Events that should be pushed downstream *before*
+ * the next output_buffer */
+ GList *events; /* ED */
+
+ /*< private >*/
+
+ void *coder_hook;
+ GDestroyNotify coder_hook_destroy_notify;
+
+ void *padding[GST_PADDING_LARGE];
+};
+
+/* GstVideoCodecState */
+GType gst_video_codec_state_get_type (void);
+
+GstVideoCodecState *gst_video_codec_state_ref (GstVideoCodecState * state);
+
+void gst_video_codec_state_unref (GstVideoCodecState * state);
+
+
+/* GstVideoCodecFrame */
+GType gst_video_codec_frame_get_type (void);
+
+GstVideoCodecFrame *gst_video_codec_frame_ref (GstVideoCodecFrame * frame);
+void gst_video_codec_frame_unref (GstVideoCodecFrame * frame);
+void gst_video_codec_frame_set_hook (GstVideoCodecFrame *frame,
+ void *hook,
+ GDestroyNotify notify);
+
+G_END_DECLS
+
+#endif
EXPORTS
+ _gst_video_decoder_error
gst_video_buffer_get_overlay_composition
gst_video_buffer_set_overlay_composition
gst_video_calculate_display_ratio
+ gst_video_codec_frame_get_type
+ gst_video_codec_frame_ref
+ gst_video_codec_frame_set_hook
+ gst_video_codec_frame_unref
+ gst_video_codec_state_get_type
+ gst_video_codec_state_ref
+ gst_video_codec_state_unref
gst_video_convert_frame
gst_video_convert_frame_async
+ gst_video_decoder_add_to_frame
+ gst_video_decoder_alloc_output_buffer
+ gst_video_decoder_alloc_output_frame
+ gst_video_decoder_drop_frame
+ gst_video_decoder_finish_frame
+ gst_video_decoder_get_estimate_rate
+ gst_video_decoder_get_frame
+ gst_video_decoder_get_latency
+ gst_video_decoder_get_max_decode_time
+ gst_video_decoder_get_max_errors
+ gst_video_decoder_get_oldest_frame
+ gst_video_decoder_get_output_state
+ gst_video_decoder_get_packetized
+ gst_video_decoder_get_type
+ gst_video_decoder_have_frame
+ gst_video_decoder_set_estimate_rate
+ gst_video_decoder_set_latency
+ gst_video_decoder_set_max_errors
+ gst_video_decoder_set_output_state
+ gst_video_decoder_set_packetized
+ gst_video_decoder_set_src_caps
+ gst_video_encoder_finish_frame
+ gst_video_encoder_get_frame
+ gst_video_encoder_get_latency
+ gst_video_encoder_get_oldest_frame
+ gst_video_encoder_get_output_state
+ gst_video_encoder_get_type
+ gst_video_encoder_proxy_getcaps
+ gst_video_encoder_set_headers
+ gst_video_encoder_set_latency
+ gst_video_encoder_set_output_state
gst_video_event_is_force_key_unit
gst_video_event_new_downstream_force_key_unit
gst_video_event_new_still_frame