* Boston, MA 02111-1307, USA.
*/
+/**
+ * SECTION:element-videorate
+ *
+ * This element takes an incoming stream of timestamped video frames.
+ * It will produce a perfect stream that matches the source pad's framerate.
+ *
+ * The correction is performed by dropping and duplicating frames, no fancy
+ * algorithm is used to interpolate frames (yet).
+ *
+ * By default the element will simply negotiate the same framerate on its
+ * source and sink pad.
+ *
+ * This operation is useful to link to elements that require a perfect stream.
+ * Typical examples are formats that do not store timestamps for video frames,
+ * but only store a framerate, like Ogg and AVI.
+ *
+ * A conversion to a specific framerate can be forced by using filtered caps on
+ * the source pad.
+ *
+ * The properties #GstVideoRate:in, #GstVideoRate:out, #GstVideoRate:duplicate
+ * and #GstVideoRate:drop can be read to obtain information about number of
+ * input frames, output frames, dropped frames (i.e. the number of unused input
+ * frames) and duplicated frames (i.e. the number of times an input frame was
+ * duplicated, beside being used normally).
+ *
+ * An input stream that needs no adjustments will thus never have dropped or
+ * duplicated frames.
+ *
+ * When the #GstVideoRate:silent property is set to FALSE, a GObject property
+ * notification will be emitted whenever one of the #GstVideoRate:duplicate or
+ * #GstVideoRate:drop values changes.
+ * This can potentially cause performance degradation.
+ * Note that property notification will happen from the streaming thread, so
+ * applications should be prepared for this.
+ *
+ * <refsect2>
+ * <title>Example pipelines</title>
+ * |[
+ * gst-launch -v filesrc location=videotestsrc.ogg ! oggdemux ! theoradec ! videorate ! video/x-raw-yuv,framerate=15/1 ! xvimagesink
+ * ]| Decode an Ogg/Theora file and adjust the framerate to 15 fps before playing.
+ * To create the test Ogg/Theora file refer to the documentation of theoraenc.
+ * |[
+ * gst-launch -v v4lsrc ! videorate ! video/x-raw-yuv,framerate=25/2 ! theoraenc ! oggmux ! filesink location=v4l.ogg
+ * ]| Capture video from a V4L device, and adjust the stream to 12.5 fps before
+ * encoding to Ogg/Theora.
+ * </refsect2>
+ *
+ * Last reviewed on 2006-09-02 (0.10.11)
+ */
+
#ifdef HAVE_CONFIG_H
#include "config.h"
#endif
-#include <gst/gst.h>
+#include "gstvideorate.h"
-GST_DEBUG_CATEGORY (video_rate_debug);
+GST_DEBUG_CATEGORY_STATIC (video_rate_debug);
#define GST_CAT_DEFAULT video_rate_debug
-#define GST_TYPE_VIDEO_RATE \
- (gst_video_rate_get_type())
-#define GST_VIDEO_RATE(obj) \
- (G_TYPE_CHECK_INSTANCE_CAST((obj),GST_TYPE_VIDEO_RATE,GstVideoRate))
-#define GST_VIDEO_RATE_CLASS(klass) \
- (G_TYPE_CHECK_CLASS_CAST((klass),GST_TYPE_VIDEO_RATE,GstVideoRate))
-#define GST_IS_VIDEO_RATE(obj) \
- (G_TYPE_CHECK_INSTANCE_TYPE((obj),GST_TYPE_VIDEO_RATE))
-#define GST_IS_VIDEO_RATE_CLASS(obj) \
- (G_TYPE_CHECK_CLASS_TYPE((klass),GST_TYPE_VIDEO_RATE))
-
-typedef struct _GstVideoRate GstVideoRate;
-typedef struct _GstVideoRateClass GstVideoRateClass;
-
-struct _GstVideoRate
-{
- GstElement element;
-
- GstPad *sinkpad, *srcpad;
-
- /* video state */
- gint from_rate_numerator, from_rate_denominator;
- gint to_rate_numerator, to_rate_denominator;
- guint64 next_ts; /* Timestamp of next buffer to output */
- GstBuffer *prevbuf;
- guint64 prev_ts; /* Previous buffer timestamp */
- guint64 in, out, dup, drop;
-
- /* segment handling */
- gint64 segment_start;
- gint64 segment_stop;
- gint64 segment_accum;
-
- gboolean silent;
- gdouble new_pref;
-};
-
-struct _GstVideoRateClass
-{
- GstElementClass parent_class;
-};
-
-/* elementfactory information */
-static GstElementDetails video_rate_details =
-GST_ELEMENT_DETAILS ("Video rate adjuster",
- "Filter/Effect/Video",
- "Drops/duplicates/adjusts timestamps on video frames to make a perfect stream",
- "Wim Taymans <wim@fluendo.com>");
-
/* GstVideoRate signals and args */
enum
{
#define DEFAULT_SILENT TRUE
#define DEFAULT_NEW_PREF 1.0
+#define DEFAULT_SKIP_TO_FIRST FALSE
enum
{
ARG_DROP,
ARG_SILENT,
ARG_NEW_PREF,
- /* FILL ME */
+ ARG_SKIP_TO_FIRST
+ /* FILL ME */
};
static GstStaticPadTemplate gst_video_rate_src_template =
GST_STATIC_PAD_TEMPLATE ("src",
GST_PAD_SRC,
GST_PAD_ALWAYS,
- GST_STATIC_CAPS ("video/x-raw-yuv; video/x-raw-rgb")
+ GST_STATIC_CAPS ("video/x-raw-yuv;"
+ "video/x-raw-rgb;" "video/x-raw-gray;" "image/jpeg;" "image/png")
);
static GstStaticPadTemplate gst_video_rate_sink_template =
GST_STATIC_PAD_TEMPLATE ("sink",
GST_PAD_SINK,
GST_PAD_ALWAYS,
- GST_STATIC_CAPS ("video/x-raw-yuv; video/x-raw-rgb")
+ GST_STATIC_CAPS ("video/x-raw-yuv;"
+ "video/x-raw-rgb;" "video/x-raw-gray;" "image/jpeg;" "image/png")
);
-static void gst_video_rate_base_init (gpointer g_class);
-static void gst_video_rate_class_init (GstVideoRateClass * klass);
-static void gst_video_rate_init (GstVideoRate * videorate);
+static void gst_video_rate_swap_prev (GstVideoRate * videorate,
+ GstBuffer * buffer, gint64 time);
static gboolean gst_video_rate_event (GstPad * pad, GstEvent * event);
+static gboolean gst_video_rate_query (GstPad * pad, GstQuery * query);
static GstFlowReturn gst_video_rate_chain (GstPad * pad, GstBuffer * buffer);
static void gst_video_rate_set_property (GObject * object,
static GstStateChangeReturn gst_video_rate_change_state (GstElement * element,
GstStateChange transition);
-static GstElementClass *parent_class = NULL;
-
/*static guint gst_video_rate_signals[LAST_SIGNAL] = { 0 }; */
-static GType
-gst_video_rate_get_type (void)
-{
- static GType video_rate_type = 0;
-
- if (!video_rate_type) {
- static const GTypeInfo video_rate_info = {
- sizeof (GstVideoRateClass),
- gst_video_rate_base_init,
- NULL,
- (GClassInitFunc) gst_video_rate_class_init,
- NULL,
- NULL,
- sizeof (GstVideoRate),
- 0,
- (GInstanceInitFunc) gst_video_rate_init,
- };
-
- video_rate_type = g_type_register_static (GST_TYPE_ELEMENT,
- "GstVideoRate", &video_rate_info, 0);
- }
+static GParamSpec *pspec_drop = NULL;
+static GParamSpec *pspec_duplicate = NULL;
- return video_rate_type;
-}
+GST_BOILERPLATE (GstVideoRate, gst_video_rate, GstElement, GST_TYPE_ELEMENT);
static void
gst_video_rate_base_init (gpointer g_class)
{
GstElementClass *element_class = GST_ELEMENT_CLASS (g_class);
- gst_element_class_set_details (element_class, &video_rate_details);
+ gst_element_class_set_details_simple (element_class,
+ "Video rate adjuster", "Filter/Effect/Video",
+ "Drops/duplicates/adjusts timestamps on video frames to make a perfect stream",
+ "Wim Taymans <wim@fluendo.com>");
gst_element_class_add_pad_template (element_class,
gst_static_pad_template_get (&gst_video_rate_sink_template));
gst_element_class_add_pad_template (element_class,
gst_static_pad_template_get (&gst_video_rate_src_template));
}
+
static void
gst_video_rate_class_init (GstVideoRateClass * klass)
{
g_object_class_install_property (object_class, ARG_IN,
g_param_spec_uint64 ("in", "In",
- "Number of input frames", 0, G_MAXUINT64, 0, G_PARAM_READABLE));
+ "Number of input frames", 0, G_MAXUINT64, 0,
+ G_PARAM_READABLE | G_PARAM_STATIC_STRINGS));
g_object_class_install_property (object_class, ARG_OUT,
- g_param_spec_uint64 ("out", "Out",
- "Number of output frames", 0, G_MAXUINT64, 0, G_PARAM_READABLE));
- g_object_class_install_property (object_class, ARG_DUP,
- g_param_spec_uint64 ("duplicate", "Duplicate",
- "Number of duplicated frames", 0, G_MAXUINT64, 0, G_PARAM_READABLE));
- g_object_class_install_property (object_class, ARG_DROP,
- g_param_spec_uint64 ("drop", "Drop",
- "Number of dropped frames", 0, G_MAXUINT64, 0, G_PARAM_READABLE));
+ g_param_spec_uint64 ("out", "Out", "Number of output frames", 0,
+ G_MAXUINT64, 0, G_PARAM_READABLE | G_PARAM_STATIC_STRINGS));
+ pspec_duplicate = g_param_spec_uint64 ("duplicate", "Duplicate",
+ "Number of duplicated frames", 0, G_MAXUINT64, 0,
+ G_PARAM_READABLE | G_PARAM_STATIC_STRINGS);
+ g_object_class_install_property (object_class, ARG_DUP, pspec_duplicate);
+ pspec_drop = g_param_spec_uint64 ("drop", "Drop", "Number of dropped frames",
+ 0, G_MAXUINT64, 0, G_PARAM_READABLE | G_PARAM_STATIC_STRINGS);
+ g_object_class_install_property (object_class, ARG_DROP, pspec_drop);
g_object_class_install_property (object_class, ARG_SILENT,
g_param_spec_boolean ("silent", "silent",
- "Don't emit notify for dropped and duplicated frames",
- DEFAULT_SILENT, G_PARAM_READWRITE));
+ "Don't emit notify for dropped and duplicated frames", DEFAULT_SILENT,
+ G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
g_object_class_install_property (object_class, ARG_NEW_PREF,
- g_param_spec_double ("new_pref", "New Pref",
- "Value indicating how much to prefer new frames",
- 0.0, 1.0, DEFAULT_NEW_PREF, G_PARAM_READWRITE));
-
- element_class->change_state = gst_video_rate_change_state;
+ g_param_spec_double ("new-pref", "New Pref",
+ "Value indicating how much to prefer new frames (unused)", 0.0, 1.0,
+ DEFAULT_NEW_PREF, G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
+
+ /**
+ * GstVideoRate:skip-to-first:
+ *
+ * Don't produce buffers before the first one we receive.
+ *
+ * Since: 0.10.25
+ */
+ g_object_class_install_property (object_class, ARG_SKIP_TO_FIRST,
+ g_param_spec_boolean ("skip-to-first", "Skip to first buffer",
+ "Don't produce buffers before the first one we receive",
+ DEFAULT_SKIP_TO_FIRST, G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
+
+ element_class->change_state = GST_DEBUG_FUNCPTR (gst_video_rate_change_state);
}
/* return the caps that can be used on out_pad given in_caps on in_pad */
GstCaps *intersect;
const GstCaps *in_templ;
gint i;
+ GSList *extra_structures = NULL;
+ GSList *iter;
in_templ = gst_pad_get_pad_template_caps (in_pad);
intersect = gst_caps_intersect (in_caps, in_templ);
structure = gst_caps_get_structure (intersect, i);
- gst_structure_set (structure,
- "framerate", GST_TYPE_FRACTION_RANGE, 0, 1, G_MAXINT, 1, NULL);
+ if (gst_structure_has_field (structure, "framerate")) {
+ GstStructure *copy_structure;
+
+ copy_structure = gst_structure_copy (structure);
+ gst_structure_set (copy_structure,
+ "framerate", GST_TYPE_FRACTION_RANGE, 0, 1, G_MAXINT, 1, NULL);
+ extra_structures = g_slist_append (extra_structures, copy_structure);
+ }
}
+
+ /* append the extra structures */
+ for (iter = extra_structures; iter != NULL; iter = g_slist_next (iter)) {
+ gst_caps_append_structure (intersect, (GstStructure *) iter->data);
+ }
+ g_slist_free (extra_structures);
+
*out_caps = intersect;
return TRUE;
GstPad *otherpad, *opeer;
gint rate_numerator, rate_denominator;
- videorate = GST_VIDEO_RATE (GST_PAD_PARENT (pad));
+ videorate = GST_VIDEO_RATE (gst_pad_get_parent (pad));
+
+ GST_DEBUG_OBJECT (pad, "setcaps called %" GST_PTR_FORMAT, caps);
structure = gst_caps_get_structure (caps, 0);
if (!gst_structure_get_fraction (structure, "framerate",
&rate_numerator, &rate_denominator))
- goto done;
+ goto no_framerate;
if (pad == videorate->srcpad) {
+ /* out_frame_count is scaled by the frame rate caps when calculating next_ts.
+ * when the frame rate caps change, we must update base_ts and reset
+ * out_frame_count */
+ if (videorate->to_rate_numerator) {
+ videorate->base_ts +=
+ gst_util_uint64_scale (videorate->out_frame_count,
+ videorate->to_rate_denominator * GST_SECOND,
+ videorate->to_rate_numerator);
+ }
+ videorate->out_frame_count = 0;
videorate->to_rate_numerator = rate_numerator;
videorate->to_rate_denominator = rate_denominator;
otherpad = videorate->sinkpad;
videorate->from_rate_denominator = rate_denominator;
otherpad = videorate->srcpad;
}
+
/* now try to find something for the peer */
opeer = gst_pad_get_peer (otherpad);
if (opeer) {
ret = TRUE;
} else {
GstCaps *peercaps;
- GstCaps *intersect;
GstCaps *transform = NULL;
ret = FALSE;
/* see how we can transform the input caps */
if (!gst_video_rate_transformcaps (pad, caps, otherpad, &transform))
- goto done;
+ goto no_transform;
/* see what the peer can do */
peercaps = gst_pad_get_caps (opeer);
- GST_DEBUG ("icaps %" GST_PTR_FORMAT, peercaps);
- GST_DEBUG ("transform %" GST_PTR_FORMAT, transform);
+ GST_DEBUG_OBJECT (opeer, "icaps %" GST_PTR_FORMAT, peercaps);
+ GST_DEBUG_OBJECT (videorate, "transform %" GST_PTR_FORMAT, transform);
/* filter against our possibilities */
- intersect = gst_caps_intersect (peercaps, transform);
+ caps = gst_caps_intersect (peercaps, transform);
gst_caps_unref (peercaps);
gst_caps_unref (transform);
- GST_DEBUG ("intersect %" GST_PTR_FORMAT, intersect);
+ GST_DEBUG_OBJECT (videorate, "intersect %" GST_PTR_FORMAT, caps);
+
+ /* could turn up empty, due to e.g. colorspace etc */
+ if (gst_caps_get_size (caps) == 0) {
+ gst_caps_unref (caps);
+ goto no_transform;
+ }
/* take first possibility */
- caps = gst_caps_copy_nth (intersect, 0);
- gst_caps_unref (intersect);
+ gst_caps_truncate (caps);
structure = gst_caps_get_structure (caps, 0);
/* and fixate */
videorate->from_rate_numerator = rate_numerator;
videorate->from_rate_denominator = rate_denominator;
}
+
+ if (gst_structure_has_field (structure, "interlaced"))
+ gst_structure_fixate_field_boolean (structure, "interlaced", FALSE);
+ if (gst_structure_has_field (structure, "color-matrix"))
+ gst_structure_fixate_field_string (structure, "color-matrix", "sdtv");
+ if (gst_structure_has_field (structure, "chroma-site"))
+ gst_structure_fixate_field_string (structure, "chroma-site", "mpeg2");
+ if (gst_structure_has_field (structure, "pixel-aspect-ratio"))
+ gst_structure_fixate_field_nearest_fraction (structure,
+ "pixel-aspect-ratio", 1, 1);
+
gst_pad_set_caps (otherpad, caps);
+ gst_caps_unref (caps);
ret = TRUE;
}
gst_object_unref (opeer);
}
done:
+ /* After a setcaps, our caps may have changed. In that case, we can't use
+ * the old buffer, if there was one (it might have different dimensions) */
+ GST_DEBUG_OBJECT (videorate, "swapping old buffers");
+ gst_video_rate_swap_prev (videorate, NULL, GST_CLOCK_TIME_NONE);
+
+ gst_object_unref (videorate);
return ret;
+
+no_framerate:
+ {
+ GST_DEBUG_OBJECT (videorate, "no framerate specified");
+ goto done;
+ }
+no_transform:
+ {
+ GST_DEBUG_OBJECT (videorate, "no framerate transform possible");
+ ret = FALSE;
+ goto done;
+ }
}
static void
-gst_video_rate_blank_data (GstVideoRate * videorate)
+gst_video_rate_reset (GstVideoRate * videorate)
{
- GST_DEBUG ("resetting data");
- if (videorate->prevbuf)
- gst_buffer_unref (videorate->prevbuf);
- videorate->prevbuf = NULL;
+ GST_DEBUG_OBJECT (videorate, "resetting internal variables");
- videorate->from_rate_numerator = 0;
- videorate->from_rate_denominator = 0;
- videorate->to_rate_numerator = 0;
- videorate->to_rate_denominator = 0;
videorate->in = 0;
videorate->out = 0;
+ videorate->base_ts = 0;
+ videorate->out_frame_count = 0;
videorate->drop = 0;
videorate->dup = 0;
- videorate->next_ts = 0LL;
- videorate->prev_ts = 0LL;
+ videorate->next_ts = GST_CLOCK_TIME_NONE;
+ videorate->last_ts = GST_CLOCK_TIME_NONE;
+ videorate->discont = TRUE;
+ gst_video_rate_swap_prev (videorate, NULL, 0);
- videorate->segment_start = 0;
- videorate->segment_stop = 0;
- videorate->segment_accum = 0;
+ gst_segment_init (&videorate->segment, GST_FORMAT_TIME);
}
static void
-gst_video_rate_init (GstVideoRate * videorate)
+gst_video_rate_init (GstVideoRate * videorate, GstVideoRateClass * klass)
{
- GST_DEBUG ("gst_video_rate_init");
videorate->sinkpad =
gst_pad_new_from_static_template (&gst_video_rate_sink_template, "sink");
+ gst_pad_set_event_function (videorate->sinkpad,
+ GST_DEBUG_FUNCPTR (gst_video_rate_event));
+ gst_pad_set_chain_function (videorate->sinkpad,
+ GST_DEBUG_FUNCPTR (gst_video_rate_chain));
+ gst_pad_set_getcaps_function (videorate->sinkpad,
+ GST_DEBUG_FUNCPTR (gst_video_rate_getcaps));
+ gst_pad_set_setcaps_function (videorate->sinkpad,
+ GST_DEBUG_FUNCPTR (gst_video_rate_setcaps));
gst_element_add_pad (GST_ELEMENT (videorate), videorate->sinkpad);
- gst_pad_set_event_function (videorate->sinkpad, gst_video_rate_event);
- gst_pad_set_chain_function (videorate->sinkpad, gst_video_rate_chain);
- gst_pad_set_getcaps_function (videorate->sinkpad, gst_video_rate_getcaps);
- gst_pad_set_setcaps_function (videorate->sinkpad, gst_video_rate_setcaps);
videorate->srcpad =
gst_pad_new_from_static_template (&gst_video_rate_src_template, "src");
+ gst_pad_set_query_function (videorate->srcpad,
+ GST_DEBUG_FUNCPTR (gst_video_rate_query));
+ gst_pad_set_getcaps_function (videorate->srcpad,
+ GST_DEBUG_FUNCPTR (gst_video_rate_getcaps));
+ gst_pad_set_setcaps_function (videorate->srcpad,
+ GST_DEBUG_FUNCPTR (gst_video_rate_setcaps));
gst_element_add_pad (GST_ELEMENT (videorate), videorate->srcpad);
- gst_pad_set_getcaps_function (videorate->srcpad, gst_video_rate_getcaps);
- gst_pad_set_setcaps_function (videorate->srcpad, gst_video_rate_setcaps);
- gst_video_rate_blank_data (videorate);
+ gst_video_rate_reset (videorate);
videorate->silent = DEFAULT_SILENT;
videorate->new_pref = DEFAULT_NEW_PREF;
+
+ videorate->from_rate_numerator = 0;
+ videorate->from_rate_denominator = 0;
+ videorate->to_rate_numerator = 0;
+ videorate->to_rate_denominator = 0;
}
+/* flush the oldest buffer */
static GstFlowReturn
+gst_video_rate_flush_prev (GstVideoRate * videorate, gboolean duplicate)
+{
+ GstFlowReturn res;
+ GstBuffer *outbuf;
+ GstClockTime push_ts;
+
+ if (!videorate->prevbuf)
+ goto eos_before_buffers;
+
+ /* make sure we can write to the metadata */
+ outbuf = gst_buffer_make_writable (gst_buffer_ref (videorate->prevbuf));
+
+ GST_BUFFER_OFFSET (outbuf) = videorate->out;
+ GST_BUFFER_OFFSET_END (outbuf) = videorate->out + 1;
+
+ if (videorate->discont) {
+ GST_BUFFER_FLAG_SET (outbuf, GST_BUFFER_FLAG_DISCONT);
+ videorate->discont = FALSE;
+ } else
+ GST_BUFFER_FLAG_UNSET (outbuf, GST_BUFFER_FLAG_DISCONT);
+
+ if (duplicate)
+ GST_BUFFER_FLAG_SET (outbuf, GST_BUFFER_FLAG_GAP);
+ else
+ GST_BUFFER_FLAG_UNSET (outbuf, GST_BUFFER_FLAG_GAP);
+
+ /* this is the timestamp we put on the buffer */
+ push_ts = videorate->next_ts;
+
+ videorate->out++;
+ videorate->out_frame_count++;
+ if (videorate->to_rate_numerator) {
+ /* interpolate next expected timestamp in the segment */
+ videorate->next_ts =
+ videorate->segment.accum + videorate->segment.start +
+ videorate->base_ts + gst_util_uint64_scale (videorate->out_frame_count,
+ videorate->to_rate_denominator * GST_SECOND,
+ videorate->to_rate_numerator);
+ GST_BUFFER_DURATION (outbuf) = videorate->next_ts - push_ts;
+ }
+
+ /* adapt for looping, bring back to time in current segment. */
+ GST_BUFFER_TIMESTAMP (outbuf) = push_ts - videorate->segment.accum;
+
+ gst_buffer_set_caps (outbuf, GST_PAD_CAPS (videorate->srcpad));
+
+ GST_LOG_OBJECT (videorate,
+ "old is best, dup, pushing buffer outgoing ts %" GST_TIME_FORMAT,
+ GST_TIME_ARGS (push_ts));
+
+ res = gst_pad_push (videorate->srcpad, outbuf);
+
+ return res;
+
+ /* WARNINGS */
+eos_before_buffers:
+ {
+ GST_INFO_OBJECT (videorate, "got EOS before any buffer was received");
+ return GST_FLOW_OK;
+ }
+}
+
+static void
+gst_video_rate_swap_prev (GstVideoRate * videorate, GstBuffer * buffer,
+ gint64 time)
+{
+ GST_LOG_OBJECT (videorate, "swap_prev: storing buffer %p in prev", buffer);
+ if (videorate->prevbuf)
+ gst_buffer_unref (videorate->prevbuf);
+ videorate->prevbuf = buffer;
+ videorate->prev_ts = time;
+}
+
+static void
+gst_video_rate_notify_drop (GstVideoRate * videorate)
+{
+#if !GLIB_CHECK_VERSION(2,26,0)
+ g_object_notify ((GObject *) videorate, "drop");
+#else
+ g_object_notify_by_pspec ((GObject *) videorate, pspec_drop);
+#endif
+}
+
+static void
+gst_video_rate_notify_duplicate (GstVideoRate * videorate)
+{
+#if !GLIB_CHECK_VERSION(2,26,0)
+ g_object_notify ((GObject *) videorate, "duplicate");
+#else
+ g_object_notify_by_pspec ((GObject *) videorate, pspec_duplicate);
+#endif
+}
+
+#define MAGIC_LIMIT 25
+static gboolean
gst_video_rate_event (GstPad * pad, GstEvent * event)
{
GstVideoRate *videorate;
+ gboolean ret;
- videorate = GST_VIDEO_RATE (GST_PAD_PARENT (pad));
+ videorate = GST_VIDEO_RATE (gst_pad_get_parent (pad));
switch (GST_EVENT_TYPE (event)) {
case GST_EVENT_NEWSEGMENT:
{
- gint64 start, stop, base;
- gdouble rate;
+ gint64 start, stop, time;
+ gdouble rate, arate;
gboolean update;
GstFormat format;
- gst_event_parse_new_segment (event, &update, &rate, &format, &start,
- &stop, &base);
+ gst_event_parse_new_segment_full (event, &update, &rate, &arate, &format,
+ &start, &stop, &time);
+
+ if (format != GST_FORMAT_TIME)
+ goto format_error;
+
+ GST_DEBUG_OBJECT (videorate, "handle NEWSEGMENT");
+
+ /* close up the previous segment, if appropriate */
+ if (!update && videorate->prevbuf) {
+ gint count = 0;
+ GstFlowReturn res;
+
+ res = GST_FLOW_OK;
+ /* fill up to the end of current segment,
+ * or only send out the stored buffer if there is no specific stop.
+ * regardless, prevent going loopy in strange cases */
+ while (res == GST_FLOW_OK && count <= MAGIC_LIMIT &&
+ ((GST_CLOCK_TIME_IS_VALID (videorate->segment.stop) &&
+ videorate->next_ts - videorate->segment.accum
+ < videorate->segment.stop)
+ || count < 1)) {
+ res = gst_video_rate_flush_prev (videorate, count > 0);
+ count++;
+ }
+ if (count > 1) {
+ videorate->dup += count - 1;
+ if (!videorate->silent)
+ gst_video_rate_notify_duplicate (videorate);
+ } else if (count == 0) {
+ videorate->drop++;
+ if (!videorate->silent)
+ gst_video_rate_notify_drop (videorate);
+ }
+ /* clean up for the new one; _chain will resume from the new start */
+ videorate->base_ts = 0;
+ videorate->out_frame_count = 0;
+ gst_video_rate_swap_prev (videorate, NULL, 0);
+ videorate->next_ts = GST_CLOCK_TIME_NONE;
+ }
+
+ /* We just want to update the accumulated stream_time */
+ gst_segment_set_newsegment_full (&videorate->segment, update, rate, arate,
+ format, start, stop, time);
- if (format != GST_FORMAT_TIME) {
- GST_WARNING ("Got discont but doesn't have GST_FORMAT_TIME value");
- } else {
- /*
- We just want to update the accumulated stream_time.
- */
- videorate->segment_accum +=
- videorate->segment_stop - videorate->segment_start;
- videorate->segment_start = start;
- videorate->segment_stop = stop;
- GST_DEBUG_OBJECT (videorate, "Updated segment_accum:%" GST_TIME_FORMAT
- " segment_start:%" GST_TIME_FORMAT " segment_stop:%"
- GST_TIME_FORMAT, GST_TIME_ARGS (videorate->segment_accum),
- GST_TIME_ARGS (videorate->segment_start),
- GST_TIME_ARGS (videorate->segment_stop));
+ GST_DEBUG_OBJECT (videorate, "updated segment: %" GST_SEGMENT_FORMAT,
+ &videorate->segment);
+ break;
+ }
+ case GST_EVENT_EOS:{
+ gint count = 0;
+ GstFlowReturn res = GST_FLOW_OK;
+
+ GST_DEBUG_OBJECT (videorate, "Got EOS");
+
+ /* If the segment has a stop position, fill the segment */
+ if (GST_CLOCK_TIME_IS_VALID (videorate->segment.stop)) {
+ /* fill up to the end of current segment,
+ * or only send out the stored buffer if there is no specific stop.
+ * regardless, prevent going loopy in strange cases */
+ while (res == GST_FLOW_OK && count <= MAGIC_LIMIT &&
+ ((videorate->next_ts - videorate->segment.accum <
+ videorate->segment.stop)
+ || count < 1)) {
+ res = gst_video_rate_flush_prev (videorate, count > 0);
+ count++;
+ }
+ } else if (videorate->prevbuf) {
+ /* Output at least one frame but if the buffer duration is valid, output
+ * enough frames to use the complete buffer duration */
+ if (GST_BUFFER_DURATION_IS_VALID (videorate->prevbuf)) {
+ GstClockTime end_ts =
+ videorate->next_ts + GST_BUFFER_DURATION (videorate->prevbuf);
+
+ while (res == GST_FLOW_OK && count <= MAGIC_LIMIT &&
+ ((videorate->next_ts - videorate->segment.accum < end_ts)
+ || count < 1)) {
+ res = gst_video_rate_flush_prev (videorate, count > 0);
+ count++;
+ }
+ } else {
+ res = gst_video_rate_flush_prev (videorate, FALSE);
+ count = 1;
+ }
+ }
+
+ if (count > 1) {
+ videorate->dup += count - 1;
+ if (!videorate->silent)
+ gst_video_rate_notify_duplicate (videorate);
+ } else if (count == 0) {
+ videorate->drop++;
+ if (!videorate->silent)
+ gst_video_rate_notify_drop (videorate);
}
break;
}
case GST_EVENT_FLUSH_STOP:
+ /* also resets the segment */
+ GST_DEBUG_OBJECT (videorate, "Got FLUSH_STOP");
+ gst_video_rate_reset (videorate);
+ break;
+ default:
+ break;
+ }
+
+ ret = gst_pad_push_event (videorate->srcpad, event);
+
+done:
+ gst_object_unref (videorate);
+
+ return ret;
+
+ /* ERRORS */
+format_error:
+ {
+ GST_WARNING_OBJECT (videorate,
+ "Got segment but doesn't have GST_FORMAT_TIME value");
+ gst_event_unref (event);
+ ret = FALSE;
+ goto done;
+ }
+}
+
+static gboolean
+gst_video_rate_query (GstPad * pad, GstQuery * query)
+{
+ GstVideoRate *videorate;
+ gboolean res = FALSE;
+
+ videorate = GST_VIDEO_RATE (gst_pad_get_parent (pad));
+
+ switch (GST_QUERY_TYPE (query)) {
+ case GST_QUERY_LATENCY:
{
- gst_video_rate_blank_data (videorate);
+ GstClockTime min, max;
+ gboolean live;
+ guint64 latency;
+ GstPad *peer;
+
+ if ((peer = gst_pad_get_peer (videorate->sinkpad))) {
+ if ((res = gst_pad_query (peer, query))) {
+ gst_query_parse_latency (query, &live, &min, &max);
+
+ GST_DEBUG_OBJECT (videorate, "Peer latency: min %"
+ GST_TIME_FORMAT " max %" GST_TIME_FORMAT,
+ GST_TIME_ARGS (min), GST_TIME_ARGS (max));
+
+ if (videorate->from_rate_numerator != 0) {
+ /* add latency. We don't really know since we hold on to the frames
+ * until we get a next frame, which can be anything. We assume
+ * however that this will take from_rate time. */
+ latency = gst_util_uint64_scale (GST_SECOND,
+ videorate->from_rate_denominator,
+ videorate->from_rate_numerator);
+ } else {
+ /* no input framerate, we don't know */
+ latency = 0;
+ }
+
+ GST_DEBUG_OBJECT (videorate, "Our latency: %"
+ GST_TIME_FORMAT, GST_TIME_ARGS (latency));
+
+ min += latency;
+ if (max != -1)
+ max += latency;
+
+ GST_DEBUG_OBJECT (videorate, "Calculated total latency : min %"
+ GST_TIME_FORMAT " max %" GST_TIME_FORMAT,
+ GST_TIME_ARGS (min), GST_TIME_ARGS (max));
+
+ gst_query_set_latency (query, live, min, max);
+ }
+ gst_object_unref (peer);
+ }
+ break;
}
default:
+ res = gst_pad_query_default (pad, query);
break;
}
+ gst_object_unref (videorate);
- return gst_pad_event_default (pad, event);
+ return res;
}
static GstFlowReturn
{
GstVideoRate *videorate;
GstFlowReturn res = GST_FLOW_OK;
+ GstClockTime intime, in_ts, in_dur;
videorate = GST_VIDEO_RATE (GST_PAD_PARENT (pad));
- if (videorate->from_rate_numerator == 0 ||
- videorate->from_rate_denominator == 0 ||
- videorate->to_rate_denominator == 0 || videorate->to_rate_numerator == 0)
- return GST_FLOW_NOT_NEGOTIATED;
+ /* make sure the denominators are not 0 */
+ if (videorate->from_rate_denominator == 0 ||
+ videorate->to_rate_denominator == 0)
+ goto not_negotiated;
+
+ in_ts = GST_BUFFER_TIMESTAMP (buffer);
+ in_dur = GST_BUFFER_DURATION (buffer);
+
+ if (G_UNLIKELY (in_ts == GST_CLOCK_TIME_NONE)) {
+ in_ts = videorate->last_ts;
+ if (G_UNLIKELY (in_ts == GST_CLOCK_TIME_NONE))
+ goto invalid_buffer;
+ }
+
+ /* get the time of the next expected buffer timestamp, we use this when the
+ * next buffer has -1 as a timestamp */
+ videorate->last_ts = in_ts;
+ if (in_dur != GST_CLOCK_TIME_NONE)
+ videorate->last_ts += in_dur;
+
+ GST_DEBUG_OBJECT (videorate, "got buffer with timestamp %" GST_TIME_FORMAT,
+ GST_TIME_ARGS (in_ts));
+
+ /* the input time is the time in the segment + all previously accumulated
+ * segments */
+ intime = in_ts + videorate->segment.accum;
- /* pull in 2 buffers */
+ /* we need to have two buffers to compare */
if (videorate->prevbuf == NULL) {
- /* We're sure it's a GstBuffer here */
- videorate->prevbuf = buffer;
- videorate->prev_ts =
- GST_BUFFER_TIMESTAMP (buffer) - videorate->segment_start +
- videorate->segment_accum;
- videorate->next_ts = 0;
+ gst_video_rate_swap_prev (videorate, buffer, intime);
+ videorate->in++;
+ if (!GST_CLOCK_TIME_IS_VALID (videorate->next_ts)) {
+ /* new buffer, we expect to output a buffer that matches the first
+ * timestamp in the segment */
+ if (videorate->skip_to_first) {
+ videorate->next_ts = intime;
+ videorate->base_ts = in_ts - videorate->segment.start;
+ videorate->out_frame_count = 0;
+ } else {
+ videorate->next_ts =
+ videorate->segment.start + videorate->segment.accum;
+ }
+ }
} else {
- GstClockTime prevtime, intime;
+ GstClockTime prevtime;
gint count = 0;
gint64 diff1, diff2;
prevtime = videorate->prev_ts;
- intime =
- GST_BUFFER_TIMESTAMP (buffer) - videorate->segment_start +
- videorate->segment_accum;
GST_LOG_OBJECT (videorate,
"BEGINNING prev buf %" GST_TIME_FORMAT " new buf %" GST_TIME_FORMAT
videorate->in++;
+ /* drop new buffer if it's before previous one */
+ if (intime < prevtime) {
+ GST_DEBUG_OBJECT (videorate,
+ "The new buffer (%" GST_TIME_FORMAT
+ ") is before the previous buffer (%"
+ GST_TIME_FORMAT "). Dropping new buffer.",
+ GST_TIME_ARGS (intime), GST_TIME_ARGS (prevtime));
+ videorate->drop++;
+ if (!videorate->silent)
+ gst_video_rate_notify_drop (videorate);
+ gst_buffer_unref (buffer);
+ goto done;
+ }
+
/* got 2 buffers, see which one is the best */
do {
+
diff1 = prevtime - videorate->next_ts;
diff2 = intime - videorate->next_ts;
GST_TIME_ARGS (videorate->next_ts));
/* output first one when its the best */
- if (diff1 < diff2) {
- GstBuffer *outbuf;
- GstClockTime push_ts;
-
+ if (diff1 <= diff2) {
count++;
- outbuf =
- gst_buffer_create_sub (videorate->prevbuf, 0,
- GST_BUFFER_SIZE (videorate->prevbuf));
- GST_BUFFER_TIMESTAMP (outbuf) = videorate->next_ts;
- push_ts = GST_BUFFER_TIMESTAMP (outbuf);
- videorate->out++;
- if (videorate->to_rate_numerator) {
- videorate->next_ts =
- gst_util_uint64_scale_int (videorate->out * GST_SECOND,
- videorate->to_rate_denominator, videorate->to_rate_numerator);
- GST_BUFFER_DURATION (outbuf) =
- videorate->next_ts - GST_BUFFER_TIMESTAMP (outbuf);
- }
- /* adapt for looping */
- GST_BUFFER_TIMESTAMP (outbuf) -= videorate->segment_accum;
- gst_buffer_set_caps (outbuf, GST_PAD_CAPS (videorate->srcpad));
-
- GST_LOG_OBJECT (videorate,
- "old is best, dup, pushing buffer outgoing ts %" GST_TIME_FORMAT,
- GST_TIME_ARGS (push_ts));
- if ((res = gst_pad_push (videorate->srcpad, outbuf)) != GST_FLOW_OK) {
- GST_WARNING_OBJECT (videorate, "couldn't push buffer on srcpad:%d",
- res);
+ /* on error the _flush function posted a warning already */
+ if ((res =
+ gst_video_rate_flush_prev (videorate,
+ count > 1)) != GST_FLOW_OK) {
+ gst_buffer_unref (buffer);
goto done;
}
-
- GST_LOG_OBJECT (videorate,
- "old is best, dup, pushed buffer outgoing ts %" GST_TIME_FORMAT,
- GST_TIME_ARGS (push_ts));
}
- /* continue while the first one was the best */
+ /* continue while the first one was the best, if they were equal avoid
+ * going into an infinite loop */
}
while (diff1 < diff2);
if (count > 1) {
videorate->dup += count - 1;
if (!videorate->silent)
- g_object_notify (G_OBJECT (videorate), "duplicate");
+ gst_video_rate_notify_duplicate (videorate);
}
/* if we didn't output the first buffer, we have a drop */
else if (count == 0) {
videorate->drop++;
+
if (!videorate->silent)
- g_object_notify (G_OBJECT (videorate), "drop");
+ gst_video_rate_notify_drop (videorate);
+
GST_LOG_OBJECT (videorate,
"new is best, old never used, drop, outgoing ts %"
GST_TIME_FORMAT, GST_TIME_ARGS (videorate->next_ts));
GST_LOG_OBJECT (videorate,
"END, putting new in old, diff1 %" GST_TIME_FORMAT
", diff2 %" GST_TIME_FORMAT ", next_ts %" GST_TIME_FORMAT
- ", in %lld, out %lld, drop %lld, dup %lld", GST_TIME_ARGS (diff1),
+ ", in %" G_GUINT64_FORMAT ", out %" G_GUINT64_FORMAT ", drop %"
+ G_GUINT64_FORMAT ", dup %" G_GUINT64_FORMAT, GST_TIME_ARGS (diff1),
GST_TIME_ARGS (diff2), GST_TIME_ARGS (videorate->next_ts),
videorate->in, videorate->out, videorate->drop, videorate->dup);
/* swap in new one when it's the best */
- gst_buffer_unref (videorate->prevbuf);
- videorate->prevbuf = buffer;
- videorate->prev_ts =
- GST_BUFFER_TIMESTAMP (buffer) - videorate->segment_start +
- videorate->segment_accum;
+ gst_video_rate_swap_prev (videorate, buffer, intime);
}
done:
-
return res;
+
+ /* ERRORS */
+not_negotiated:
+ {
+ GST_WARNING_OBJECT (videorate, "no framerate negotiated");
+ gst_buffer_unref (buffer);
+ res = GST_FLOW_NOT_NEGOTIATED;
+ goto done;
+ }
+
+invalid_buffer:
+ {
+ GST_WARNING_OBJECT (videorate,
+ "Got buffer with GST_CLOCK_TIME_NONE timestamp, discarding it");
+ gst_buffer_unref (buffer);
+ goto done;
+ }
}
static void
case ARG_NEW_PREF:
videorate->new_pref = g_value_get_double (value);
break;
+ case ARG_SKIP_TO_FIRST:
+ videorate->skip_to_first = g_value_get_boolean (value);
+ break;
default:
G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec);
break;
case ARG_NEW_PREF:
g_value_set_double (value, videorate->new_pref);
break;
+ case ARG_SKIP_TO_FIRST:
+ g_value_set_boolean (value, videorate->skip_to_first);
+ break;
default:
G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec);
break;
videorate = GST_VIDEO_RATE (element);
switch (transition) {
+ case GST_STATE_CHANGE_READY_TO_PAUSED:
+ videorate->discont = TRUE;
+ videorate->last_ts = -1;
+ break;
default:
break;
}
switch (transition) {
case GST_STATE_CHANGE_PAUSED_TO_READY:
- gst_video_rate_blank_data (videorate);
+ gst_video_rate_reset (videorate);
break;
default:
break;