* decoded).
* * Add a flag/boolean for decoders that require keyframes, so the base
* class can automatically discard non-keyframes before one has arrived
+ * * Detect reordered frame/timestamps and fix the pts/dts
* * Support for GstIndex (or shall we not care ?)
* * Calculate actual latency based on input/output timestamp/frame_number
* and if it exceeds the recorded one, save it and emit a GST_MESSAGE_LATENCY
*
*/
-/* FIXME 0.11: suppress warnings for deprecated API such as GStaticRecMutex
- * with newer GLib versions (>= 2.31.0) */
-#define GLIB_DISABLE_DEPRECATION_WARNINGS
-
#include "gstvideodecoder.h"
#include "gstvideoutils.h"
(G_TYPE_INSTANCE_GET_PRIVATE ((obj), GST_TYPE_VIDEO_DECODER, \
GstVideoDecoderPrivate))
-/* FIXME : I really hope we never see streams that go over this */
-#define MAX_DTS_PTS_REORDER_DEPTH 36
-
struct _GstVideoDecoderPrivate
{
/* FIXME introduce a context ? */
/* combine to yield (presentation) ts */
GstClockTime timestamp_offset;
- /* last incoming and outgoing ts */
- GstClockTime last_timestamp_in;
- GstClockTime last_timestamp_out;
-
- /* last outgoing system frame number (used to detect reordering) */
- guint last_out_frame_number;
-
- /* TRUE if input timestamp is not monotonically increasing */
- gboolean reordered_input;
-
- /* TRUE if frames come out in a different order than they were inputted */
- gboolean reordered_output;
+ /* last outgoing ts */
+ GstClockTime last_timestamp;
/* reverse playback */
/* collect input */
gint64 min_latency;
gint64 max_latency;
-
- /* Handle incoming buffers with DTS instead of PTS as timestamps */
- GstClockTime incoming_timestamps[MAX_DTS_PTS_REORDER_DEPTH];
- guint reorder_idx_in;
- guint reorder_idx_out;
};
+static GstElementClass *parent_class = NULL;
+static void gst_video_decoder_class_init (GstVideoDecoderClass * klass);
+static void gst_video_decoder_init (GstVideoDecoder * dec,
+ GstVideoDecoderClass * klass);
+
static void gst_video_decoder_finalize (GObject * object);
-static gboolean gst_video_decoder_sink_setcaps (GstPad * pad, GstCaps * caps);
-static gboolean gst_video_decoder_sink_event (GstPad * pad, GstEvent * event);
-static gboolean gst_video_decoder_src_event (GstPad * pad, GstEvent * event);
-static GstFlowReturn gst_video_decoder_chain (GstPad * pad, GstBuffer * buf);
-static gboolean gst_video_decoder_sink_query (GstPad * pad, GstQuery * query);
-static GstStateChangeReturn
-gst_video_decoder_change_state (GstElement * element,
- GstStateChange transition);
-static const GstQueryType *gst_video_decoder_get_query_types (GstPad * pad);
-static gboolean gst_video_decoder_src_query (GstPad * pad, GstQuery * query);
+static gboolean gst_video_decoder_setcaps (GstVideoDecoder * dec,
+ GstCaps * caps);
+static gboolean gst_video_decoder_sink_event (GstPad * pad, GstObject * parent,
+ GstEvent * event);
+static gboolean gst_video_decoder_src_event (GstPad * pad, GstObject * parent,
+ GstEvent * event);
+static GstFlowReturn gst_video_decoder_chain (GstPad * pad, GstObject * parent,
+ GstBuffer * buf);
+static gboolean gst_video_decoder_sink_query (GstPad * pad, GstObject * parent,
+ GstQuery * query);
+static GstStateChangeReturn gst_video_decoder_change_state (GstElement *
+ element, GstStateChange transition);
+static gboolean gst_video_decoder_src_query (GstPad * pad, GstObject * parent,
+ GstQuery * query);
static void gst_video_decoder_reset (GstVideoDecoder * decoder, gboolean full);
static GstFlowReturn gst_video_decoder_have_frame_2 (GstVideoDecoder * decoder);
static void gst_video_decoder_clear_queues (GstVideoDecoder * dec);
-GST_BOILERPLATE (GstVideoDecoder, gst_video_decoder,
- GstElement, GST_TYPE_ELEMENT);
-
-static void
-gst_video_decoder_base_init (gpointer g_class)
+/* we can't use G_DEFINE_ABSTRACT_TYPE because we need the klass in the _init
+ * method to get to the padtemplates */
+GType
+gst_video_decoder_get_type (void)
{
- GST_DEBUG_CATEGORY_INIT (videodecoder_debug, "videodecoder", 0,
- "Base Video Decoder");
+ static volatile gsize type = 0;
+
+ if (g_once_init_enter (&type)) {
+ GType _type;
+ static const GTypeInfo info = {
+ sizeof (GstVideoDecoderClass),
+ NULL,
+ NULL,
+ (GClassInitFunc) gst_video_decoder_class_init,
+ NULL,
+ NULL,
+ sizeof (GstVideoDecoder),
+ 0,
+ (GInstanceInitFunc) gst_video_decoder_init,
+ };
+
+ _type = g_type_register_static (GST_TYPE_ELEMENT,
+ "GstVideoDecoder", &info, G_TYPE_FLAG_ABSTRACT);
+ g_once_init_leave (&type, _type);
+ }
+ return type;
}
static void
gobject_class = G_OBJECT_CLASS (klass);
gstelement_class = GST_ELEMENT_CLASS (klass);
+ GST_DEBUG_CATEGORY_INIT (videodecoder_debug, "videodecoder", 0,
+ "Base Video Decoder");
+
+ parent_class = g_type_class_peek_parent (klass);
g_type_class_add_private (klass, sizeof (GstVideoDecoderPrivate));
gobject_class->finalize = gst_video_decoder_finalize;
gst_pad_set_chain_function (pad, GST_DEBUG_FUNCPTR (gst_video_decoder_chain));
gst_pad_set_event_function (pad,
GST_DEBUG_FUNCPTR (gst_video_decoder_sink_event));
- gst_pad_set_setcaps_function (pad,
- GST_DEBUG_FUNCPTR (gst_video_decoder_sink_setcaps));
gst_pad_set_query_function (pad,
GST_DEBUG_FUNCPTR (gst_video_decoder_sink_query));
gst_element_add_pad (GST_ELEMENT (decoder), decoder->sinkpad);
gst_pad_set_event_function (pad,
GST_DEBUG_FUNCPTR (gst_video_decoder_src_event));
- gst_pad_set_query_type_function (pad,
- GST_DEBUG_FUNCPTR (gst_video_decoder_get_query_types));
gst_pad_set_query_function (pad,
GST_DEBUG_FUNCPTR (gst_video_decoder_src_query));
gst_pad_use_fixed_caps (pad);
gst_segment_init (&decoder->input_segment, GST_FORMAT_TIME);
gst_segment_init (&decoder->output_segment, GST_FORMAT_TIME);
- g_static_rec_mutex_init (&decoder->stream_lock);
+ g_rec_mutex_init (&decoder->stream_lock);
decoder->priv->input_adapter = gst_adapter_new ();
decoder->priv->output_adapter = gst_adapter_new ();
codec_data = gst_structure_get_value (structure, "codec_data");
if (codec_data && G_VALUE_TYPE (codec_data) == GST_TYPE_BUFFER)
- state->codec_data = GST_BUFFER (gst_value_dup_mini_object (codec_data));
+ state->codec_data = GST_BUFFER (g_value_dup_boxed (codec_data));
return state;
}
static gboolean
-gst_video_decoder_sink_setcaps (GstPad * pad, GstCaps * caps)
+gst_video_decoder_setcaps (GstVideoDecoder * decoder, GstCaps * caps)
{
- GstVideoDecoder *decoder;
GstVideoDecoderClass *decoder_class;
GstVideoCodecState *state;
gboolean ret = TRUE;
- decoder = GST_VIDEO_DECODER (gst_pad_get_parent (pad));
decoder_class = GST_VIDEO_DECODER_GET_CLASS (decoder);
GST_DEBUG_OBJECT (decoder, "setcaps %" GST_PTR_FORMAT, caps);
decoder->priv->input_state = state;
GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
- gst_object_unref (decoder);
return ret;
parse_fail:
{
GST_WARNING_OBJECT (decoder, "Failed to parse caps");
- gst_object_unref (decoder);
return FALSE;
}
GST_DEBUG_OBJECT (object, "finalize");
- g_static_rec_mutex_free (&decoder->stream_lock);
+ g_rec_mutex_clear (&decoder->stream_lock);
if (decoder->priv->input_adapter) {
g_object_unref (decoder->priv->input_adapter);
decoder_class = GST_VIDEO_DECODER_GET_CLASS (decoder);
switch (GST_EVENT_TYPE (event)) {
+ case GST_EVENT_CAPS:
+ {
+ GstCaps *caps;
+
+ gst_event_parse_caps (event, &caps);
+ handled = gst_video_decoder_setcaps (decoder, caps);
+ gst_event_unref (event);
+ break;
+ }
case GST_EVENT_EOS:
{
GstFlowReturn flow_ret = GST_FLOW_OK;
GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
break;
}
- case GST_EVENT_NEWSEGMENT:
+ case GST_EVENT_SEGMENT:
{
- gboolean update;
- double rate, arate;
- GstFormat format;
- gint64 start;
- gint64 stop;
- gint64 pos;
- GstSegment *segment = &decoder->input_segment;
+ GstSegment segment;
GST_VIDEO_DECODER_STREAM_LOCK (decoder);
- gst_event_parse_new_segment_full (event, &update, &rate,
- &arate, &format, &start, &stop, &pos);
- if (format == GST_FORMAT_TIME) {
+ gst_event_copy_segment (event, &segment);
+
+ if (segment.format == GST_FORMAT_TIME) {
GST_DEBUG_OBJECT (decoder,
- "received TIME NEW_SEGMENT %" GST_TIME_FORMAT
- " -- %" GST_TIME_FORMAT ", pos %" GST_TIME_FORMAT
- ", rate %g, applied_rate %g",
- GST_TIME_ARGS (start), GST_TIME_ARGS (stop), GST_TIME_ARGS (pos),
- rate, arate);
+ "received TIME SEGMENT %" GST_SEGMENT_FORMAT, &segment);
} else {
- GstFormat dformat = GST_FORMAT_TIME;
+ gint64 start;
GST_DEBUG_OBJECT (decoder,
- "received NEW_SEGMENT %" G_GINT64_FORMAT
- " -- %" G_GINT64_FORMAT ", time %" G_GINT64_FORMAT
- ", rate %g, applied_rate %g", start, stop, pos, rate, arate);
+ "received SEGMENT %" GST_SEGMENT_FORMAT, &segment);
/* handle newsegment as a result from our legacy simple seeking */
/* note that initial 0 should convert to 0 in any case */
if (priv->do_estimate_rate &&
- gst_pad_query_convert (decoder->sinkpad, GST_FORMAT_BYTES, start,
- &dformat, &start)) {
+ gst_pad_query_convert (decoder->sinkpad, GST_FORMAT_BYTES,
+ segment.start, GST_FORMAT_TIME, &start)) {
/* best attempt convert */
/* as these are only estimates, stop is kept open-ended to avoid
* premature cutting */
GST_DEBUG_OBJECT (decoder,
"converted to TIME start %" GST_TIME_FORMAT,
GST_TIME_ARGS (start));
- pos = start;
- stop = GST_CLOCK_TIME_NONE;
+ segment.start = start;
+ segment.stop = GST_CLOCK_TIME_NONE;
+ segment.time = start;
/* replace event */
gst_event_unref (event);
- event = gst_event_new_new_segment_full (update, rate, arate,
- GST_FORMAT_TIME, start, stop, pos);
+ event = gst_event_new_segment (&segment);
} else {
GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
goto newseg_wrong_format;
}
}
- if (!update) {
- gst_video_decoder_flush (decoder, FALSE);
- }
+ gst_video_decoder_flush (decoder, FALSE);
- priv->timestamp_offset = start;
+ priv->timestamp_offset = segment.start;
- gst_segment_set_newsegment_full (segment,
- update, rate, arate, format, start, stop, pos);
+ decoder->input_segment = segment;
GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
break;
gst_video_decoder_push_event (GstVideoDecoder * decoder, GstEvent * event)
{
switch (GST_EVENT_TYPE (event)) {
- case GST_EVENT_NEWSEGMENT:
+ case GST_EVENT_SEGMENT:
{
- gboolean update;
- double rate;
- double applied_rate;
- GstFormat format;
- gint64 start;
- gint64 stop;
- gint64 position;
+ GstSegment segment;
GST_VIDEO_DECODER_STREAM_LOCK (decoder);
- gst_event_parse_new_segment_full (event, &update, &rate, &applied_rate,
- &format, &start, &stop, &position);
- GST_DEBUG_OBJECT (decoder, "newseg rate %g, applied rate %g, "
- "format %d, start = %" GST_TIME_FORMAT ", stop = %" GST_TIME_FORMAT
- ", pos = %" GST_TIME_FORMAT, rate, applied_rate, format,
- GST_TIME_ARGS (start), GST_TIME_ARGS (stop),
- GST_TIME_ARGS (position));
+ gst_event_copy_segment (event, &segment);
+
+ GST_DEBUG_OBJECT (decoder, "segment %" GST_SEGMENT_FORMAT, &segment);
- if (format != GST_FORMAT_TIME) {
+ if (segment.format != GST_FORMAT_TIME) {
GST_DEBUG_OBJECT (decoder, "received non TIME newsegment");
GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
break;
}
- gst_segment_set_newsegment_full (&decoder->output_segment, update, rate,
- applied_rate, format, start, stop, position);
+ decoder->output_segment = segment;
GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
break;
}
}
static gboolean
-gst_video_decoder_sink_event (GstPad * pad, GstEvent * event)
+gst_video_decoder_sink_event (GstPad * pad, GstObject * parent,
+ GstEvent * event)
{
GstVideoDecoder *decoder;
GstVideoDecoderClass *decoder_class;
gboolean ret = FALSE;
gboolean handled = FALSE;
- decoder = GST_VIDEO_DECODER (gst_pad_get_parent (pad));
+ decoder = GST_VIDEO_DECODER (parent);
decoder_class = GST_VIDEO_DECODER_GET_CLASS (decoder);
GST_DEBUG_OBJECT (decoder, "received event %d, %s", GST_EVENT_TYPE (event),
}
}
- gst_object_unref (decoder);
return ret;
-
}
/* perform upstream byte <-> time conversion (duration, seeking)
static gboolean
gst_video_decoder_do_seek (GstVideoDecoder * dec, GstEvent * event)
{
+ GstFormat format;
GstSeekFlags flags;
GstSeekType start_type, end_type;
- GstFormat format;
gdouble rate;
gint64 start, start_time, end_time;
GstSegment seek_segment;
}
memcpy (&seek_segment, &dec->output_segment, sizeof (seek_segment));
- gst_segment_set_seek (&seek_segment, rate, format, flags, start_type,
+ gst_segment_do_seek (&seek_segment, rate, format, flags, start_type,
start_time, end_type, end_time, NULL);
- start_time = seek_segment.last_stop;
+ start_time = seek_segment.position;
- format = GST_FORMAT_BYTES;
if (!gst_pad_query_convert (dec->sinkpad, GST_FORMAT_TIME, start_time,
- &format, &start)) {
+ GST_FORMAT_BYTES, &start)) {
GST_DEBUG_OBJECT (dec, "conversion failed");
return FALSE;
}
}
static gboolean
-gst_video_decoder_src_event (GstPad * pad, GstEvent * event)
+gst_video_decoder_src_event (GstPad * pad, GstObject * parent, GstEvent * event)
{
GstVideoDecoder *decoder;
GstVideoDecoderPrivate *priv;
gboolean res = FALSE;
- decoder = GST_VIDEO_DECODER (gst_pad_get_parent (pad));
+ decoder = GST_VIDEO_DECODER (parent);
priv = decoder->priv;
GST_DEBUG_OBJECT (decoder,
switch (GST_EVENT_TYPE (event)) {
case GST_EVENT_SEEK:
{
- GstFormat format, tformat;
+ GstFormat format;
gdouble rate;
GstSeekFlags flags;
GstSeekType cur_type, stop_type;
/* ... though a non-time seek can be aided as well */
/* First bring the requested format to time */
- tformat = GST_FORMAT_TIME;
- if (!(res = gst_pad_query_convert (pad, format, cur, &tformat, &tcur)))
+ if (!(res =
+ gst_pad_query_convert (pad, format, cur, GST_FORMAT_TIME, &tcur)))
goto convert_error;
- if (!(res = gst_pad_query_convert (pad, format, stop, &tformat, &tstop)))
+ if (!(res =
+ gst_pad_query_convert (pad, format, stop, GST_FORMAT_TIME,
+ &tstop)))
goto convert_error;
/* then seek with time on the peer */
}
case GST_EVENT_QOS:
{
+ GstQOSType type;
gdouble proportion;
GstClockTimeDiff diff;
GstClockTime timestamp;
GstClockTime duration;
- gst_event_parse_qos (event, &proportion, &diff, ×tamp);
+ gst_event_parse_qos (event, &type, &proportion, &diff, ×tamp);
GST_OBJECT_LOCK (decoder);
priv->proportion = proportion;
break;
}
done:
- gst_object_unref (decoder);
return res;
convert_error:
goto done;
}
-static const GstQueryType *
-gst_video_decoder_get_query_types (GstPad * pad)
-{
- static const GstQueryType query_types[] = {
- GST_QUERY_POSITION,
- GST_QUERY_DURATION,
- GST_QUERY_CONVERT,
- GST_QUERY_LATENCY,
- 0
- };
-
- return query_types;
-}
-
static gboolean
-gst_video_decoder_src_query (GstPad * pad, GstQuery * query)
+gst_video_decoder_src_query (GstPad * pad, GstObject * parent, GstQuery * query)
{
GstVideoDecoder *dec;
gboolean res = TRUE;
- dec = GST_VIDEO_DECODER (gst_pad_get_parent (pad));
+ dec = GST_VIDEO_DECODER (parent);
GST_LOG_OBJECT (dec, "handling query: %" GST_PTR_FORMAT, query);
}
/* we start from the last seen time */
- time = dec->priv->last_timestamp_out;
+ time = dec->priv->last_timestamp;
/* correct for the segment values */
time = gst_segment_to_stream_time (&dec->output_segment,
GST_FORMAT_TIME, time);
/* and convert to the final format */
gst_query_parse_position (query, &format, NULL);
if (!(res = gst_pad_query_convert (pad, GST_FORMAT_TIME, time,
- &format, &value)))
+ format, &value)))
break;
gst_query_set_position (query, format, value);
GstFormat format;
/* upstream in any case */
- if ((res = gst_pad_query_default (pad, query)))
+ if ((res = gst_pad_query_default (pad, parent, query)))
break;
gst_query_parse_duration (query, &format, NULL);
if (format == GST_FORMAT_TIME && gst_video_decoder_do_byte (dec)) {
gint64 value;
- format = GST_FORMAT_BYTES;
- if (gst_pad_query_peer_duration (dec->sinkpad, &format, &value)) {
+ if (gst_pad_peer_query_duration (dec->sinkpad, GST_FORMAT_BYTES,
+ &value)) {
GST_LOG_OBJECT (dec, "upstream size %" G_GINT64_FORMAT, value);
- format = GST_FORMAT_TIME;
if (gst_pad_query_convert (dec->sinkpad,
- GST_FORMAT_BYTES, value, &format, &value)) {
+ GST_FORMAT_BYTES, value, GST_FORMAT_TIME, &value)) {
gst_query_set_duration (query, GST_FORMAT_TIME, value);
res = TRUE;
}
}
break;
default:
- res = gst_pad_query_default (pad, query);
+ res = gst_pad_query_default (pad, parent, query);
}
- gst_object_unref (dec);
return res;
error:
GST_ERROR_OBJECT (dec, "query failed");
- gst_object_unref (dec);
return res;
}
static gboolean
-gst_video_decoder_sink_query (GstPad * pad, GstQuery * query)
+gst_video_decoder_sink_query (GstPad * pad, GstObject * parent,
+ GstQuery * query)
{
GstVideoDecoder *decoder;
GstVideoDecoderPrivate *priv;
gboolean res = FALSE;
- decoder = GST_VIDEO_DECODER (gst_pad_get_parent (pad));
+ decoder = GST_VIDEO_DECODER (parent);
priv = decoder->priv;
GST_LOG_OBJECT (decoder, "handling query: %" GST_PTR_FORMAT, query);
break;
}
default:
- res = gst_pad_query_default (pad, query);
+ res = gst_pad_query_default (pad, parent, query);
break;
}
done:
- gst_object_unref (decoder);
return res;
error:
priv->discont = TRUE;
priv->timestamp_offset = GST_CLOCK_TIME_NONE;
- priv->last_timestamp_in = GST_CLOCK_TIME_NONE;
- priv->last_timestamp_out = GST_CLOCK_TIME_NONE;
- priv->last_out_frame_number = 0;
- priv->reordered_output = FALSE;
- priv->reordered_input = FALSE;
+ priv->last_timestamp = GST_CLOCK_TIME_NONE;
priv->input_offset = 0;
priv->frame_offset = 0;
priv->earliest_time = GST_CLOCK_TIME_NONE;
priv->proportion = 0.5;
- priv->reorder_idx_out = priv->reorder_idx_in = 0;
-
GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
}
if (GST_BUFFER_TIMESTAMP_IS_VALID (buf)) {
gst_video_decoder_add_timestamp (decoder, buf);
}
- priv->input_offset += GST_BUFFER_SIZE (buf);
+ priv->input_offset += gst_buffer_get_size (buf);
if (priv->packetized) {
priv->current_frame->input_buffer = buf;
GstBuffer *buf = GST_BUFFER_CAST (priv->queued->data);
if (G_LIKELY (res == GST_FLOW_OK)) {
- GST_DEBUG_OBJECT (dec, "pushing buffer %p of size %u, "
+ GST_DEBUG_OBJECT (dec, "pushing buffer %p of size %" G_GSIZE_FORMAT ", "
"time %" GST_TIME_FORMAT ", dur %" GST_TIME_FORMAT, buf,
- GST_BUFFER_SIZE (buf), GST_TIME_ARGS (GST_BUFFER_TIMESTAMP (buf)),
+ gst_buffer_get_size (buf), GST_TIME_ARGS (GST_BUFFER_TIMESTAMP (buf)),
GST_TIME_ARGS (GST_BUFFER_DURATION (buf)));
/* should be already, but let's be sure */
- buf = gst_buffer_make_metadata_writable (buf);
+ buf = gst_buffer_make_writable (buf);
/* avoid stray DISCONT from forward processing,
* which have no meaning in reverse pushing */
GST_BUFFER_FLAG_UNSET (buf, GST_BUFFER_FLAG_DISCONT);
}
if (G_LIKELY (buf)) {
- GST_DEBUG_OBJECT (dec, "gathering buffer %p of size %u, "
+ GST_DEBUG_OBJECT (dec, "gathering buffer %p of size %" G_GSIZE_FORMAT ", "
"time %" GST_TIME_FORMAT ", dur %" GST_TIME_FORMAT, buf,
- GST_BUFFER_SIZE (buf), GST_TIME_ARGS (GST_BUFFER_TIMESTAMP (buf)),
+ gst_buffer_get_size (buf), GST_TIME_ARGS (GST_BUFFER_TIMESTAMP (buf)),
GST_TIME_ARGS (GST_BUFFER_DURATION (buf)));
/* add buffer to gather queue */
}
static GstFlowReturn
-gst_video_decoder_chain (GstPad * pad, GstBuffer * buf)
+gst_video_decoder_chain (GstPad * pad, GstObject * parent, GstBuffer * buf)
{
GstVideoDecoder *decoder;
GstVideoDecoderPrivate *priv;
GstFlowReturn ret = GST_FLOW_OK;
- decoder = GST_VIDEO_DECODER (GST_PAD_PARENT (pad));
+ decoder = GST_VIDEO_DECODER (parent);
priv = decoder->priv;
GST_LOG_OBJECT (decoder,
- "chain %" GST_TIME_FORMAT " duration %" GST_TIME_FORMAT " size %d",
- GST_TIME_ARGS (GST_BUFFER_TIMESTAMP (buf)),
- GST_TIME_ARGS (GST_BUFFER_DURATION (buf)), GST_BUFFER_SIZE (buf));
+ "chain %" GST_TIME_FORMAT " duration %" GST_TIME_FORMAT " size %"
+ G_GSIZE_FORMAT, GST_TIME_ARGS (GST_BUFFER_TIMESTAMP (buf)),
+ GST_TIME_ARGS (GST_BUFFER_DURATION (buf)), gst_buffer_get_size (buf));
GST_VIDEO_DECODER_STREAM_LOCK (decoder);
if (decoder->input_segment.format == GST_FORMAT_UNDEFINED) {
GstEvent *event;
+ GstSegment *segment = &decoder->input_segment;
GST_WARNING_OBJECT (decoder,
"Received buffer without a new-segment. "
"Assuming timestamps start from 0.");
- gst_segment_set_newsegment_full (&decoder->input_segment, FALSE, 1.0, 1.0,
- GST_FORMAT_TIME, 0, GST_CLOCK_TIME_NONE, 0);
+ gst_segment_init (segment, GST_FORMAT_TIME);
- event = gst_event_new_new_segment (FALSE, 1.0, GST_FORMAT_TIME, 0,
- GST_CLOCK_TIME_NONE, 0);
+ event = gst_event_new_segment (segment);
decoder->priv->current_frame_events =
g_list_prepend (decoder->priv->current_frame_events, event);
{
GstVideoDecoderPrivate *priv = decoder->priv;
GList *l, *events = NULL;
- GstClockTime reorder_pts;
#ifndef GST_DISABLE_GST_DEBUG
GST_LOG_OBJECT (decoder, "n %d in %d out %d",
gst_adapter_available (priv->output_adapter));
#endif
- reorder_pts = priv->incoming_timestamps[priv->reorder_idx_out];
- priv->reorder_idx_out =
- (priv->reorder_idx_out + 1) % MAX_DTS_PTS_REORDER_DEPTH;
-
- if (!priv->reordered_output && frame->system_frame_number &&
- frame->system_frame_number != (priv->last_out_frame_number + 1)) {
- GST_DEBUG_OBJECT (decoder, "Detected reordered output");
- priv->reordered_output = TRUE;
- }
-
GST_LOG_OBJECT (decoder,
- "finish frame (#%d) sync:%d pts:%" GST_TIME_FORMAT " dts:%"
- GST_TIME_FORMAT " reorder_pts:%" GST_TIME_FORMAT,
- frame->system_frame_number, GST_VIDEO_CODEC_FRAME_IS_SYNC_POINT (frame),
- GST_TIME_ARGS (frame->pts), GST_TIME_ARGS (frame->dts),
- GST_TIME_ARGS (reorder_pts));
+ "finish frame sync=%d pts=%" GST_TIME_FORMAT,
+ GST_VIDEO_CODEC_FRAME_IS_SYNC_POINT (frame), GST_TIME_ARGS (frame->pts));
/* Push all pending events that arrived before this frame */
for (l = priv->frames; l; l = l->next) {
/* Check if the data should not be displayed. For example altref/invisible
* frame in vp8. In this case we should not update the timestamps. */
- if (GST_VIDEO_CODEC_FRAME_IS_DECODE_ONLY (frame) || !frame->output_buffer)
+ if (GST_VIDEO_CODEC_FRAME_IS_DECODE_ONLY (frame))
return;
+ /* If the frame is meant to be outputted but we don't have an output buffer
+ * we have a problem :) */
+ if (G_UNLIKELY (frame->output_buffer == NULL))
+ goto no_output_buffer;
+
if (GST_CLOCK_TIME_IS_VALID (frame->pts)) {
if (frame->pts != priv->timestamp_offset) {
GST_DEBUG_OBJECT (decoder,
}
}
}
-
if (frame->pts == GST_CLOCK_TIME_NONE) {
frame->pts =
gst_video_decoder_get_timestamp (decoder, frame->decode_frame_number);
frame->duration = GST_CLOCK_TIME_NONE;
}
-
if (frame->duration == GST_CLOCK_TIME_NONE) {
frame->duration = gst_video_decoder_get_frame_duration (decoder, frame);
}
- /* Fix buffers that came in with DTS and were reordered */
- if (!priv->reordered_input && priv->reordered_output) {
- GST_DEBUG_OBJECT (decoder,
- "Correcting PTS, input buffers had DTS on their timestamps");
- frame->pts = reorder_pts;
- }
-
- if (GST_CLOCK_TIME_IS_VALID (priv->last_timestamp_out)) {
- if (frame->pts < priv->last_timestamp_out) {
+ if (GST_CLOCK_TIME_IS_VALID (priv->last_timestamp)) {
+ if (frame->pts < priv->last_timestamp) {
GST_WARNING_OBJECT (decoder,
"decreasing timestamp (%" GST_TIME_FORMAT " < %"
GST_TIME_FORMAT ")",
- GST_TIME_ARGS (frame->pts), GST_TIME_ARGS (priv->last_timestamp_out));
- frame->pts = reorder_pts;
+ GST_TIME_ARGS (frame->pts), GST_TIME_ARGS (priv->last_timestamp));
}
}
-
- priv->last_timestamp_out = frame->pts;
- priv->last_out_frame_number = frame->system_frame_number;
+ priv->last_timestamp = frame->pts;
return;
+
+ /* ERRORS */
+no_output_buffer:
+ {
+ GST_ERROR_OBJECT (decoder, "No buffer to output !");
+ }
}
static void
GstVideoCodecState *state = priv->output_state;
GstBuffer *output_buffer;
GstFlowReturn ret = GST_FLOW_OK;
- gint64 start, stop;
+ guint64 start, stop;
GstSegment *segment;
GST_LOG_OBJECT (decoder, "finish frame");
goto done;
}
- output_buffer = gst_buffer_make_metadata_writable (frame->output_buffer);
+ output_buffer = gst_buffer_make_writable (frame->output_buffer);
frame->output_buffer = NULL;
GST_BUFFER_FLAG_UNSET (output_buffer, GST_BUFFER_FLAG_DELTA_UNIT);
if (GST_VIDEO_INFO_IS_INTERLACED (&state->info)) {
if (GST_VIDEO_CODEC_FRAME_FLAG_IS_SET (frame,
GST_VIDEO_CODEC_FRAME_FLAG_TFF)) {
- GST_BUFFER_FLAG_SET (output_buffer, GST_VIDEO_BUFFER_TFF);
+ GST_BUFFER_FLAG_SET (output_buffer, GST_VIDEO_BUFFER_FLAG_TFF);
} else {
- GST_BUFFER_FLAG_UNSET (output_buffer, GST_VIDEO_BUFFER_TFF);
+ GST_BUFFER_FLAG_UNSET (output_buffer, GST_VIDEO_BUFFER_FLAG_TFF);
}
if (GST_VIDEO_CODEC_FRAME_FLAG_IS_SET (frame,
GST_VIDEO_CODEC_FRAME_FLAG_RFF)) {
- GST_BUFFER_FLAG_SET (output_buffer, GST_VIDEO_BUFFER_RFF);
+ GST_BUFFER_FLAG_SET (output_buffer, GST_VIDEO_BUFFER_FLAG_RFF);
} else {
- GST_BUFFER_FLAG_UNSET (output_buffer, GST_VIDEO_BUFFER_RFF);
+ GST_BUFFER_FLAG_UNSET (output_buffer, GST_VIDEO_BUFFER_FLAG_RFF);
}
if (GST_VIDEO_CODEC_FRAME_FLAG_IS_SET (frame,
GST_VIDEO_CODEC_FRAME_FLAG_ONEFIELD)) {
- GST_BUFFER_FLAG_SET (output_buffer, GST_VIDEO_BUFFER_ONEFIELD);
+ GST_BUFFER_FLAG_SET (output_buffer, GST_VIDEO_BUFFER_FLAG_ONEFIELD);
} else {
- GST_BUFFER_FLAG_UNSET (output_buffer, GST_VIDEO_BUFFER_ONEFIELD);
+ GST_BUFFER_FLAG_UNSET (output_buffer, GST_VIDEO_BUFFER_FLAG_ONEFIELD);
}
}
GST_BUFFER_OFFSET_END (output_buffer) = GST_BUFFER_OFFSET_NONE;
/* update rate estimate */
- priv->bytes_out += GST_BUFFER_SIZE (output_buffer);
+ priv->bytes_out += gst_buffer_get_size (output_buffer);
if (GST_CLOCK_TIME_IS_VALID (frame->duration)) {
priv->time += frame->duration;
} else {
priv->time = GST_CLOCK_TIME_NONE;
}
- gst_buffer_set_caps (output_buffer, GST_PAD_CAPS (decoder->srcpad));
-
GST_LOG_OBJECT (decoder, "pushing frame ts %" GST_TIME_FORMAT
", duration %" GST_TIME_FORMAT,
GST_TIME_ARGS (GST_BUFFER_TIMESTAMP (output_buffer)),
GST_TIME_ARGS (GST_BUFFER_DURATION (output_buffer)));
-
-
/* we got data, so note things are looking up again */
/* FIXME : Shouldn't we avoid going under zero ? */
if (G_UNLIKELY (priv->error_count))
gst_segment_to_running_time (&decoder->input_segment, GST_FORMAT_TIME,
frame->pts);
- /* Store pts */
- if (GST_CLOCK_TIME_IS_VALID (frame->pts)
- && GST_CLOCK_TIME_IS_VALID (priv->last_timestamp_in)
- && frame->pts < priv->last_timestamp_in) {
- GST_DEBUG_OBJECT (decoder, "Incoming timestamps are out of order");
- priv->reordered_input = TRUE;
- }
- priv->last_timestamp_in = frame->pts;
- priv->incoming_timestamps[priv->reorder_idx_in] = frame->pts;
- priv->reorder_idx_in = (priv->reorder_idx_in + 1) % MAX_DTS_PTS_REORDER_DEPTH;
-
/* do something with frame */
ret = decoder_class->handle_frame (decoder, frame);
if (ret != GST_FLOW_OK)
GstVideoCodecState *state = decoder->priv->output_state;
int num_bytes = GST_VIDEO_INFO_SIZE (&state->info);
- GST_DEBUG ("alloc src buffer caps=%" GST_PTR_FORMAT,
- GST_PAD_CAPS (decoder->srcpad));
+ GST_DEBUG ("alloc src buffer");
GST_VIDEO_DECODER_STREAM_LOCK (decoder);
if (G_UNLIKELY (decoder->priv->output_state_changed))
gst_video_decoder_set_src_caps (decoder);
- flow_ret =
- gst_pad_alloc_buffer_and_set_caps (decoder->srcpad,
- GST_BUFFER_OFFSET_NONE, num_bytes, GST_PAD_CAPS (decoder->srcpad),
- &buffer);
-
- if (flow_ret != GST_FLOW_OK) {
- buffer = gst_buffer_new_and_alloc (num_bytes);
- gst_buffer_set_caps (buffer, GST_PAD_CAPS (decoder->srcpad));
- }
+ flow_ret = GST_FLOW_OK;
+ buffer = gst_buffer_new_allocate (NULL, num_bytes, NULL);
GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
if (G_UNLIKELY (decoder->priv->output_state_changed))
gst_video_decoder_set_src_caps (decoder);
- g_return_val_if_fail (GST_PAD_CAPS (decoder->srcpad) != NULL, GST_FLOW_ERROR);
-
GST_LOG_OBJECT (decoder, "alloc buffer size %d", num_bytes);
GST_VIDEO_DECODER_STREAM_LOCK (decoder);
- flow_ret =
- gst_pad_alloc_buffer_and_set_caps (decoder->srcpad,
- GST_BUFFER_OFFSET_NONE, num_bytes, GST_PAD_CAPS (decoder->srcpad),
- &frame->output_buffer);
-
- if (flow_ret != GST_FLOW_OK) {
- GST_WARNING_OBJECT (decoder, "failed to get buffer %s",
- gst_flow_get_name (flow_ret));
- }
+ flow_ret = GST_FLOW_OK;
+ frame->output_buffer = gst_buffer_new_allocate (NULL, num_bytes, NULL);
GST_VIDEO_DECODER_STREAM_UNLOCK (decoder);
return evt;
}
+static GstElementClass *parent_class = NULL;
+static void gst_video_encoder_class_init (GstVideoEncoderClass * klass);
+static void gst_video_encoder_init (GstVideoEncoder * enc,
+ GstVideoEncoderClass * klass);
+
static void gst_video_encoder_finalize (GObject * object);
-static gboolean gst_video_encoder_sink_setcaps (GstPad * pad, GstCaps * caps);
-static GstCaps *gst_video_encoder_sink_getcaps (GstPad * pad);
-static gboolean gst_video_encoder_src_event (GstPad * pad, GstEvent * event);
-static gboolean gst_video_encoder_sink_event (GstPad * pad, GstEvent * event);
-static GstFlowReturn gst_video_encoder_chain (GstPad * pad, GstBuffer * buf);
+static gboolean gst_video_encoder_setcaps (GstVideoEncoder * enc,
+ GstCaps * caps);
+static GstCaps *gst_video_encoder_sink_getcaps (GstVideoEncoder * encoder,
+ GstCaps * filter);
+static gboolean gst_video_encoder_src_event (GstPad * pad, GstObject * parent,
+ GstEvent * event);
+static gboolean gst_video_encoder_sink_event (GstPad * pad, GstObject * parent,
+ GstEvent * event);
+static GstFlowReturn gst_video_encoder_chain (GstPad * pad, GstObject * parent,
+ GstBuffer * buf);
static GstStateChangeReturn gst_video_encoder_change_state (GstElement *
element, GstStateChange transition);
-static const GstQueryType *gst_video_encoder_get_query_types (GstPad * pad);
-static gboolean gst_video_encoder_src_query (GstPad * pad, GstQuery * query);
+static gboolean gst_video_encoder_sink_query (GstPad * pad, GstObject * parent,
+ GstQuery * query);
+static gboolean gst_video_encoder_src_query (GstPad * pad, GstObject * parent,
+ GstQuery * query);
static GstVideoCodecFrame *gst_video_encoder_new_frame (GstVideoEncoder *
encoder, GstBuffer * buf, GstClockTime timestamp, GstClockTime duration);
-static void
-_do_init (GType object_type)
-{
- const GInterfaceInfo preset_interface_info = {
- NULL, /* interface_init */
- NULL, /* interface_finalize */
- NULL /* interface_data */
- };
-
- g_type_add_interface_static (object_type, GST_TYPE_PRESET,
- &preset_interface_info);
-}
-
-GST_BOILERPLATE_FULL (GstVideoEncoder, gst_video_encoder,
- GstElement, GST_TYPE_ELEMENT, _do_init);
-
-static void
-gst_video_encoder_base_init (gpointer g_class)
+/* we can't use G_DEFINE_ABSTRACT_TYPE because we need the klass in the _init
+ * method to get to the padtemplates */
+GType
+gst_video_encoder_get_type (void)
{
- GST_DEBUG_CATEGORY_INIT (videoencoder_debug, "videoencoder", 0,
- "Base Video Encoder");
+ static volatile gsize type = 0;
+
+ if (g_once_init_enter (&type)) {
+ GType _type;
+ static const GTypeInfo info = {
+ sizeof (GstVideoEncoderClass),
+ NULL,
+ NULL,
+ (GClassInitFunc) gst_video_encoder_class_init,
+ NULL,
+ NULL,
+ sizeof (GstVideoEncoder),
+ 0,
+ (GInstanceInitFunc) gst_video_encoder_init,
+ };
+ const GInterfaceInfo preset_interface_info = {
+ NULL, /* interface_init */
+ NULL, /* interface_finalize */
+ NULL /* interface_data */
+ };
+
+ _type = g_type_register_static (GST_TYPE_ELEMENT,
+ "GstVideoEncoder", &info, G_TYPE_FLAG_ABSTRACT);
+ g_type_add_interface_static (_type, GST_TYPE_PRESET,
+ &preset_interface_info);
+ g_once_init_leave (&type, _type);
+ }
+ return type;
}
static void
gobject_class = G_OBJECT_CLASS (klass);
gstelement_class = GST_ELEMENT_CLASS (klass);
+ GST_DEBUG_CATEGORY_INIT (videoencoder_debug, "videoencoder", 0,
+ "Base Video Encoder");
+
+ parent_class = g_type_class_peek_parent (klass);
+
g_type_class_add_private (klass, sizeof (GstVideoEncoderPrivate));
gobject_class->finalize = gst_video_encoder_finalize;
gst_pad_set_chain_function (pad, GST_DEBUG_FUNCPTR (gst_video_encoder_chain));
gst_pad_set_event_function (pad,
GST_DEBUG_FUNCPTR (gst_video_encoder_sink_event));
- gst_pad_set_setcaps_function (pad,
- GST_DEBUG_FUNCPTR (gst_video_encoder_sink_setcaps));
- gst_pad_set_getcaps_function (pad,
- GST_DEBUG_FUNCPTR (gst_video_encoder_sink_getcaps));
+ gst_pad_set_query_function (pad,
+ GST_DEBUG_FUNCPTR (gst_video_encoder_sink_query));
gst_element_add_pad (GST_ELEMENT (encoder), encoder->sinkpad);
pad_template =
encoder->srcpad = pad = gst_pad_new_from_template (pad_template, "src");
- gst_pad_set_query_type_function (pad,
- GST_DEBUG_FUNCPTR (gst_video_encoder_get_query_types));
gst_pad_set_query_function (pad,
GST_DEBUG_FUNCPTR (gst_video_encoder_src_query));
gst_pad_set_event_function (pad,
gst_segment_init (&encoder->input_segment, GST_FORMAT_TIME);
gst_segment_init (&encoder->output_segment, GST_FORMAT_TIME);
- g_static_rec_mutex_init (&encoder->stream_lock);
+ g_rec_mutex_init (&encoder->stream_lock);
priv->at_eos = FALSE;
priv->headers = NULL;
}
static gboolean
-gst_video_encoder_sink_setcaps (GstPad * pad, GstCaps * caps)
+gst_video_encoder_setcaps (GstVideoEncoder * encoder, GstCaps * caps)
{
- GstVideoEncoder *encoder;
GstVideoEncoderClass *encoder_class;
GstVideoCodecState *state;
gboolean ret;
gboolean samecaps = FALSE;
- encoder = GST_VIDEO_ENCODER (gst_pad_get_parent (pad));
encoder_class = GST_VIDEO_ENCODER_GET_CLASS (encoder);
/* subclass should do something here ... */
* Since: 0.10.36
*/
GstCaps *
-gst_video_encoder_proxy_getcaps (GstVideoEncoder * encoder, GstCaps * caps)
+gst_video_encoder_proxy_getcaps (GstVideoEncoder * encoder, GstCaps * caps,
+ GstCaps * filter)
{
- const GstCaps *templ_caps;
+ GstCaps *templ_caps;
GstCaps *allowed;
GstCaps *fcaps, *filter_caps;
gint i, j;
allowed = gst_pad_get_allowed_caps (encoder->srcpad);
if (!allowed || gst_caps_is_empty (allowed) || gst_caps_is_any (allowed)) {
- fcaps = gst_caps_copy (templ_caps);
+ fcaps = templ_caps;
goto done;
}
const GValue *val;
GstStructure *s;
- s = gst_structure_id_empty_new (q_name);
+ s = gst_structure_new_id_empty (q_name);
if ((val = gst_structure_get_value (allowed_s, "width")))
gst_structure_set_value (s, "width", val);
if ((val = gst_structure_get_value (allowed_s, "height")))
if ((val = gst_structure_get_value (allowed_s, "pixel-aspect-ratio")))
gst_structure_set_value (s, "pixel-aspect-ratio", val);
- gst_caps_merge_structure (filter_caps, s);
+ filter_caps = gst_caps_merge_structure (filter_caps, s);
}
}
fcaps = gst_caps_intersect (filter_caps, templ_caps);
gst_caps_unref (filter_caps);
+ gst_caps_unref (templ_caps);
+
+ if (filter) {
+ GST_LOG_OBJECT (encoder, "intersecting with %" GST_PTR_FORMAT, filter);
+ filter_caps = gst_caps_intersect (fcaps, filter);
+ gst_caps_unref (fcaps);
+ fcaps = filter_caps;
+ }
done:
gst_caps_replace (&allowed, NULL);
}
static GstCaps *
-gst_video_encoder_sink_getcaps (GstPad * pad)
+gst_video_encoder_sink_getcaps (GstVideoEncoder * encoder, GstCaps * filter)
{
- GstVideoEncoder *encoder;
GstVideoEncoderClass *klass;
GstCaps *caps;
- encoder = GST_VIDEO_ENCODER (gst_pad_get_parent (pad));
klass = GST_VIDEO_ENCODER_GET_CLASS (encoder);
if (klass->getcaps)
- caps = klass->getcaps (encoder);
+ caps = klass->getcaps (encoder, filter);
else
- caps = gst_video_encoder_proxy_getcaps (encoder, NULL);
- gst_object_unref (encoder);
+ caps = gst_video_encoder_proxy_getcaps (encoder, NULL, filter);
GST_LOG_OBJECT (encoder, "Returning caps %" GST_PTR_FORMAT, caps);
return caps;
}
+
+static gboolean
+gst_video_encoder_sink_query (GstPad * pad, GstObject * parent,
+ GstQuery * query)
+{
+ GstVideoEncoder *encoder;
+ gboolean res = FALSE;
+
+ encoder = GST_VIDEO_ENCODER (parent);
+
+ switch (GST_QUERY_TYPE (query)) {
+ case GST_QUERY_CAPS:
+ {
+ GstCaps *filter, *caps;
+
+ gst_query_parse_caps (query, &filter);
+ caps = gst_video_encoder_sink_getcaps (encoder, filter);
+ gst_query_set_caps_result (query, caps);
+ gst_caps_unref (caps);
+ res = TRUE;
+ break;
+ }
+ default:
+ res = gst_pad_query_default (pad, parent, query);
+ break;
+ }
+ return res;
+}
+
static void
gst_video_encoder_finalize (GObject * object)
{
g_list_foreach (encoder->priv->headers, (GFunc) gst_buffer_unref, NULL);
g_list_free (encoder->priv->headers);
}
- g_static_rec_mutex_free (&encoder->stream_lock);
+ g_rec_mutex_clear (&encoder->stream_lock);
G_OBJECT_CLASS (parent_class)->finalize (object);
}
gst_video_encoder_push_event (GstVideoEncoder * encoder, GstEvent * event)
{
switch (GST_EVENT_TYPE (event)) {
- case GST_EVENT_NEWSEGMENT:
+ case GST_EVENT_SEGMENT:
{
- gboolean update;
- double rate;
- double applied_rate;
- GstFormat format;
- gint64 start;
- gint64 stop;
- gint64 position;
+ GstSegment segment;
GST_VIDEO_ENCODER_STREAM_LOCK (encoder);
- gst_event_parse_new_segment_full (event, &update, &rate, &applied_rate,
- &format, &start, &stop, &position);
- GST_DEBUG_OBJECT (encoder, "newseg rate %g, applied rate %g, "
- "format %d, start = %" GST_TIME_FORMAT ", stop = %" GST_TIME_FORMAT
- ", pos = %" GST_TIME_FORMAT, rate, applied_rate, format,
- GST_TIME_ARGS (start), GST_TIME_ARGS (stop),
- GST_TIME_ARGS (position));
+ gst_event_copy_segment (event, &segment);
- if (format != GST_FORMAT_TIME) {
- GST_DEBUG_OBJECT (encoder, "received non TIME newsegment");
+ GST_DEBUG_OBJECT (encoder, "segment %" GST_SEGMENT_FORMAT, &segment);
+
+ if (segment.format != GST_FORMAT_TIME) {
+ GST_DEBUG_OBJECT (encoder, "received non TIME segment");
GST_VIDEO_ENCODER_STREAM_UNLOCK (encoder);
break;
}
- gst_segment_set_newsegment_full (&encoder->output_segment, update, rate,
- applied_rate, format, start, stop, position);
+ encoder->output_segment = segment;
GST_VIDEO_ENCODER_STREAM_UNLOCK (encoder);
break;
}
encoder_class = GST_VIDEO_ENCODER_GET_CLASS (encoder);
switch (GST_EVENT_TYPE (event)) {
+ case GST_EVENT_CAPS:
+ {
+ GstCaps *caps;
+
+ gst_event_parse_caps (event, &caps);
+ ret = gst_video_encoder_setcaps (encoder, caps);
+ gst_event_unref (event);
+ break;
+ }
case GST_EVENT_EOS:
{
GstFlowReturn flow_ret;
GST_VIDEO_ENCODER_STREAM_UNLOCK (encoder);
break;
}
- case GST_EVENT_NEWSEGMENT:
+ case GST_EVENT_SEGMENT:
{
- gboolean update;
- double rate;
- double applied_rate;
- GstFormat format;
- gint64 start;
- gint64 stop;
- gint64 position;
+ GstSegment segment;
GST_VIDEO_ENCODER_STREAM_LOCK (encoder);
- gst_event_parse_new_segment_full (event, &update, &rate, &applied_rate,
- &format, &start, &stop, &position);
- GST_DEBUG_OBJECT (encoder, "newseg rate %g, applied rate %g, "
- "format %d, start = %" GST_TIME_FORMAT ", stop = %" GST_TIME_FORMAT
- ", pos = %" GST_TIME_FORMAT, rate, applied_rate, format,
- GST_TIME_ARGS (start), GST_TIME_ARGS (stop),
- GST_TIME_ARGS (position));
+ gst_event_copy_segment (event, &segment);
+
+ GST_DEBUG_OBJECT (encoder, "segment %" GST_SEGMENT_FORMAT, &segment);
- if (format != GST_FORMAT_TIME) {
+ if (segment.format != GST_FORMAT_TIME) {
GST_DEBUG_OBJECT (encoder, "received non TIME newsegment");
GST_VIDEO_ENCODER_STREAM_UNLOCK (encoder);
break;
encoder->priv->at_eos = FALSE;
- gst_segment_set_newsegment_full (&encoder->input_segment, update, rate,
- applied_rate, format, start, stop, position);
+ encoder->input_segment = segment;
GST_VIDEO_ENCODER_STREAM_UNLOCK (encoder);
break;
}
}
static gboolean
-gst_video_encoder_sink_event (GstPad * pad, GstEvent * event)
+gst_video_encoder_sink_event (GstPad * pad, GstObject * parent,
+ GstEvent * event)
{
GstVideoEncoder *enc;
GstVideoEncoderClass *klass;
gboolean handled = FALSE;
gboolean ret = TRUE;
- enc = GST_VIDEO_ENCODER (gst_pad_get_parent (pad));
+ enc = GST_VIDEO_ENCODER (parent);
klass = GST_VIDEO_ENCODER_GET_CLASS (enc);
GST_DEBUG_OBJECT (enc, "received event %d, %s", GST_EVENT_TYPE (event),
GST_DEBUG_OBJECT (enc, "event handled");
- gst_object_unref (enc);
return ret;
}
}
static gboolean
-gst_video_encoder_src_event (GstPad * pad, GstEvent * event)
+gst_video_encoder_src_event (GstPad * pad, GstObject * parent, GstEvent * event)
{
GstVideoEncoder *encoder;
GstVideoEncoderClass *klass;
gboolean ret = FALSE;
gboolean handled = FALSE;
- encoder = GST_VIDEO_ENCODER (gst_pad_get_parent (pad));
+ encoder = GST_VIDEO_ENCODER (parent);
klass = GST_VIDEO_ENCODER_GET_CLASS (encoder);
GST_LOG_OBJECT (encoder, "handling event: %" GST_PTR_FORMAT, event);
handled = gst_video_encoder_src_eventfunc (encoder, event);
if (!handled)
- ret = gst_pad_event_default (pad, event);
-
- gst_object_unref (encoder);
+ ret = gst_pad_event_default (pad, parent, event);
return ret;
}
-static const GstQueryType *
-gst_video_encoder_get_query_types (GstPad * pad)
-{
- static const GstQueryType query_types[] = {
- GST_QUERY_CONVERT,
- GST_QUERY_LATENCY,
- 0
- };
-
- return query_types;
-}
-
static gboolean
-gst_video_encoder_src_query (GstPad * pad, GstQuery * query)
+gst_video_encoder_src_query (GstPad * pad, GstObject * parent, GstQuery * query)
{
GstVideoEncoderPrivate *priv;
GstVideoEncoder *enc;
gboolean res;
- GstPad *peerpad;
- enc = GST_VIDEO_ENCODER (gst_pad_get_parent (pad));
+ enc = GST_VIDEO_ENCODER (parent);
priv = enc->priv;
- peerpad = gst_pad_get_peer (enc->sinkpad);
GST_LOG_OBJECT (enc, "handling query: %" GST_PTR_FORMAT, query);
gboolean live;
GstClockTime min_latency, max_latency;
- res = gst_pad_query (peerpad, query);
+ res = gst_pad_peer_query (enc->sinkpad, query);
if (res) {
gst_query_parse_latency (query, &live, &min_latency, &max_latency);
GST_DEBUG_OBJECT (enc, "Peer latency: live %d, min %"
}
break;
default:
- res = gst_pad_query_default (pad, query);
+ res = gst_pad_query_default (pad, parent, query);
}
- gst_object_unref (peerpad);
- gst_object_unref (enc);
return res;
error:
GST_DEBUG_OBJECT (enc, "query failed");
- gst_object_unref (peerpad);
- gst_object_unref (enc);
return res;
}
static GstFlowReturn
-gst_video_encoder_chain (GstPad * pad, GstBuffer * buf)
+gst_video_encoder_chain (GstPad * pad, GstObject * parent, GstBuffer * buf)
{
GstVideoEncoder *encoder;
GstVideoEncoderPrivate *priv;
GstVideoEncoderClass *klass;
GstVideoCodecFrame *frame;
GstFlowReturn ret = GST_FLOW_OK;
- gint64 start, stop = GST_CLOCK_TIME_NONE, cstart, cstop;
+ guint64 start, stop = GST_CLOCK_TIME_NONE, cstart, cstop;
- encoder = GST_VIDEO_ENCODER (gst_pad_get_parent (pad));
+ encoder = GST_VIDEO_ENCODER (parent);
priv = encoder->priv;
klass = GST_VIDEO_ENCODER_GET_CLASS (encoder);
GST_VIDEO_ENCODER_STREAM_LOCK (encoder);
- /* .... ?? */
- if (!GST_PAD_CAPS (pad)) {
- ret = GST_FLOW_NOT_NEGOTIATED;
- goto done;
- }
-
start = GST_BUFFER_TIMESTAMP (buf);
if (GST_CLOCK_TIME_IS_VALID (GST_BUFFER_DURATION (buf)))
stop = start + GST_BUFFER_DURATION (buf);
GST_LOG_OBJECT (encoder,
"received buffer of size %d with ts %" GST_TIME_FORMAT
- ", duration %" GST_TIME_FORMAT, GST_BUFFER_SIZE (buf),
+ ", duration %" GST_TIME_FORMAT, gst_buffer_get_size (buf),
GST_TIME_ARGS (start), GST_TIME_ARGS (GST_BUFFER_DURATION (buf)));
if (priv->at_eos) {
- ret = GST_FLOW_UNEXPECTED;
+ ret = GST_FLOW_EOS;
goto done;
}
done:
GST_VIDEO_ENCODER_STREAM_UNLOCK (encoder);
- gst_object_unref (encoder);
-
return ret;
}
GST_BUFFER_DURATION (frame->output_buffer) = frame->duration;
/* update rate estimate */
- priv->bytes += GST_BUFFER_SIZE (frame->output_buffer);
+ priv->bytes += gst_buffer_get_size (frame->output_buffer);
if (GST_CLOCK_TIME_IS_VALID (frame->duration)) {
priv->time += frame->duration;
} else {
for (tmp = priv->headers; tmp; tmp = tmp->next) {
GstBuffer *tmpbuf = GST_BUFFER (tmp->data);
- copy = g_list_append (copy, gst_buffer_make_metadata_writable (tmpbuf));
+ copy = g_list_append (copy, gst_buffer_make_writable (tmpbuf));
}
g_list_free (priv->headers);
priv->headers = copy;
for (tmp = priv->headers; tmp; tmp = tmp->next) {
GstBuffer *tmpbuf = GST_BUFFER (tmp->data);
- gst_buffer_set_caps (tmpbuf, GST_PAD_CAPS (encoder->srcpad));
gst_buffer_ref (tmpbuf);
- priv->bytes += GST_BUFFER_SIZE (tmpbuf);
+ priv->bytes += gst_buffer_get_size (tmpbuf);
if (G_UNLIKELY (discont)) {
GST_LOG_OBJECT (encoder, "marking discont");
GST_BUFFER_FLAG_SET (tmpbuf, GST_BUFFER_FLAG_DISCONT);
GST_BUFFER_FLAG_SET (frame->output_buffer, GST_BUFFER_FLAG_DISCONT);
}
- gst_buffer_set_caps (GST_BUFFER (frame->output_buffer),
- GST_PAD_CAPS (encoder->srcpad));
-
if (encoder_class->pre_push)
ret = encoder_class->pre_push (encoder, frame);