/* FILL ME */
};
+typedef struct _GstPadPushCache GstPadPushCache;
+
+struct _GstPadPushCache
+{
+ GstPad *peer; /* reffed peer pad */
+ GstCaps *caps; /* caps for this link */
+};
+
+static GstPadPushCache _pad_cache_invalid = { NULL, };
+
+#define PAD_CACHE_INVALID (&_pad_cache_invalid)
+
#define GST_PAD_GET_PRIVATE(obj) \
(G_TYPE_INSTANCE_GET_PRIVATE ((obj), GST_TYPE_PAD, GstPadPrivate))
struct _GstPadPrivate
{
GstPadChainListFunction chainlistfunc;
+
+ GstPadPushCache *cache_ptr;
};
static void gst_pad_dispose (GObject * object);
static gboolean gst_pad_activate_default (GstPad * pad);
static gboolean gst_pad_acceptcaps_default (GstPad * pad, GstCaps * caps);
-#ifndef GST_DISABLE_LOADSAVE
+#if !defined(GST_DISABLE_LOADSAVE) && !defined(GST_REMOVE_DEPRECATED)
+#ifdef GST_DISABLE_DEPRECATED
+#include <libxml/parser.h>
+#endif
static xmlNodePtr gst_pad_save_thyself (GstObject * object, xmlNodePtr parent);
+void gst_pad_load_and_link (xmlNodePtr self, GstObject * parent);
#endif
/* Some deprecated stuff that we need inside here for
static GstObjectClass *parent_class = NULL;
static guint gst_pad_signals[LAST_SIGNAL] = { 0 };
+static GParamSpec *pspec_caps = NULL;
+
/* quarks for probe signals */
static GQuark buffer_quark;
static GQuark event_quark;
*
* Returns: a static string with the name of the flow return.
*/
-G_CONST_RETURN gchar *
+const gchar *
gst_flow_get_name (GstFlowReturn ret)
{
gint i;
NULL, gst_marshal_BOOLEAN__POINTER, G_TYPE_BOOLEAN, 1,
GST_TYPE_MINI_OBJECT);
- g_object_class_install_property (gobject_class, PAD_PROP_CAPS,
- g_param_spec_boxed ("caps", "Caps", "The capabilities of the pad",
- GST_TYPE_CAPS, G_PARAM_READABLE | G_PARAM_STATIC_STRINGS));
+ pspec_caps = g_param_spec_boxed ("caps", "Caps",
+ "The capabilities of the pad", GST_TYPE_CAPS,
+ G_PARAM_READABLE | G_PARAM_STATIC_STRINGS);
+ g_object_class_install_property (gobject_class, PAD_PROP_CAPS, pspec_caps);
+
g_object_class_install_property (gobject_class, PAD_PROP_DIRECTION,
g_param_spec_enum ("direction", "Direction", "The direction of the pad",
GST_TYPE_PAD_DIRECTION, GST_PAD_UNKNOWN,
"The GstPadTemplate of this pad", GST_TYPE_PAD_TEMPLATE,
G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
-#ifndef GST_DISABLE_LOADSAVE
- gstobject_class->save_thyself = GST_DEBUG_FUNCPTR (gst_pad_save_thyself);
+#if !defined(GST_DISABLE_LOADSAVE) && !defined(GST_REMOVE_DEPRECATED)
+ gstobject_class->save_thyself =
+ ((gpointer (*)(GstObject * object,
+ gpointer self)) * GST_DEBUG_FUNCPTR (gst_pad_save_thyself));
#endif
gstobject_class->path_string_separator = ".";
GST_DEBUG_REGISTER_FUNCPTR (gst_pad_iterate_internal_links_default);
GST_DEBUG_REGISTER_FUNCPTR (gst_pad_acceptcaps_default);
+ /* from gstutils.c */
+ GST_DEBUG_REGISTER_FUNCPTR (gst_pad_get_fixed_caps_func);
+
klass->have_data = default_have_data;
}
switch (prop_id) {
case PAD_PROP_DIRECTION:
- GST_PAD_DIRECTION (object) = g_value_get_enum (value);
+ GST_PAD_DIRECTION (object) = (GstPadDirection) g_value_get_enum (value);
break;
case PAD_PROP_TEMPLATE:
gst_pad_set_pad_template (GST_PAD_CAST (object),
* will be assigned.
* This function makes a copy of the name so you can safely free the name.
*
- * Returns: a new #GstPad, or NULL in case of an error.
+ * Returns: (transfer full): a new #GstPad, or NULL in case of an error.
*
* MT safe.
*/
* will be assigned.
* This function makes a copy of the name so you can safely free the name.
*
- * Returns: a new #GstPad, or NULL in case of an error.
+ * Returns: (transfer full): a new #GstPad, or NULL in case of an error.
*/
GstPad *
gst_pad_new_from_template (GstPadTemplate * templ, const gchar * name)
* will be assigned.
* This function makes a copy of the name so you can safely free the name.
*
- * Returns: a new #GstPad, or NULL in case of an error.
+ * Returns: (transfer full): a new #GstPad, or NULL in case of an error.
*/
GstPad *
gst_pad_new_from_static_template (GstStaticPadTemplate * templ,
case GST_ACTIVATE_NONE:
GST_OBJECT_LOCK (pad);
GST_DEBUG_OBJECT (pad, "setting ACTIVATE_MODE NONE, set flushing");
+ _priv_gst_pad_invalidate_cache (pad);
GST_PAD_SET_FLUSHING (pad);
GST_PAD_ACTIVATE_MODE (pad) = new_mode;
/* unlock blocked pads so element can resume and stop */
GST_OBJECT_LOCK (pad);
GST_CAT_INFO_OBJECT (GST_CAT_PADS, pad, "failed to %s in pull mode",
active ? "activate" : "deactivate");
+ _priv_gst_pad_invalidate_cache (pad);
GST_PAD_SET_FLUSHING (pad);
GST_PAD_ACTIVATE_MODE (pad) = old;
GST_OBJECT_UNLOCK (pad);
GST_OBJECT_LOCK (pad);
GST_CAT_INFO_OBJECT (GST_CAT_PADS, pad, "failed to %s in push mode",
active ? "activate" : "deactivate");
+ _priv_gst_pad_invalidate_cache (pad);
GST_PAD_SET_FLUSHING (pad);
GST_PAD_ACTIVATE_MODE (pad) = old;
GST_OBJECT_UNLOCK (pad);
* @blocked: boolean indicating whether the pad should be blocked or unblocked
* @callback: #GstPadBlockCallback that will be called when the
* operation succeeds
- * @user_data: user data passed to the callback
+ * @user_data: (closure): user data passed to the callback
* @destroy_data: #GDestroyNotify for user_data
*
* Blocks or unblocks the dataflow on a pad. The provided callback
* You can pass NULL as the callback to make this call block. Be careful with
* this blocking call as it might not return for reasons stated above.
*
- * Returns: TRUE if the pad could be blocked. This function can fail if the
+ * <note>
+ * Pad block handlers are only called for source pads in push mode
+ * and sink pads in pull mode.
+ * </note>
+ *
+ * Returns: %TRUE if the pad could be blocked. This function can fail if the
* wrong parameters were passed or the pad was already in the requested state.
*
* MT safe.
if (G_UNLIKELY (was_blocked == blocked))
goto had_right_state;
+ if (G_UNLIKELY (
+ (GST_PAD_ACTIVATE_MODE (pad) == GST_ACTIVATE_PUSH) &&
+ (GST_PAD_DIRECTION (pad) != GST_PAD_SRC)))
+ goto wrong_direction;
+ if (G_UNLIKELY (
+ (GST_PAD_ACTIVATE_MODE (pad) == GST_ACTIVATE_PULL) &&
+ (GST_PAD_DIRECTION (pad) != GST_PAD_SINK)))
+ goto wrong_direction;
+
if (blocked) {
GST_CAT_LOG_OBJECT (GST_CAT_SCHEDULING, pad, "blocking pad");
+ _priv_gst_pad_invalidate_cache (pad);
GST_OBJECT_FLAG_SET (pad, GST_PAD_BLOCKED);
if (pad->block_destroy_data && pad->block_data)
return TRUE;
+/* Errors */
+
had_right_state:
{
GST_CAT_LOG_OBJECT (GST_CAT_SCHEDULING, pad,
return FALSE;
}
+wrong_direction:
+ {
+ GST_CAT_INFO_OBJECT (GST_CAT_SCHEDULING, pad, "pad block on the wrong pad, "
+ "block src pads in push mode and sink pads in pull mode.");
+ GST_OBJECT_UNLOCK (pad);
+
+ return FALSE;
+ }
}
/**
* @blocked: boolean indicating whether the pad should be blocked or unblocked
* @callback: #GstPadBlockCallback that will be called when the
* operation succeeds
- * @user_data: user data passed to the callback
+ * @user_data: (closure): user data passed to the callback
*
* Blocks or unblocks the dataflow on a pad. The provided callback
* is called when the operation succeeds; this happens right before the next
* You can pass NULL as the callback to make this call block. Be careful with
* this blocking call as it might not return for reasons stated above.
*
+ * <note>
+ * Pad block handlers are only called for source pads in push mode
+ * and sink pads in pull mode.
+ * </note>
+ *
* Returns: TRUE if the pad could be blocked. This function can fail if the
* wrong parameters were passed or the pad was already in the requested state.
*
* a shortcut for gst_pad_set_blocked_async() with a NULL
* callback.
*
+ * <note>
+ * Pad blocks are only possible for source pads in push mode
+ * and sink pads in pull mode.
+ * </note>
+ *
* Returns: TRUE if the pad could be blocked. This function can fail if the
* wrong parameters were passed or the pad was already in the requested state.
*
* Get an array of supported queries that can be performed
* on this pad.
*
- * Returns: a zero-terminated array of #GstQueryType.
+ * Returns: (transfer none) (array zero-terminated=1): a zero-terminated array
+ * of #GstQueryType.
*/
const GstQueryType *
gst_pad_get_query_types (GstPad * pad)
* Invoke the default dispatcher for the query types on
* the pad.
*
- * Returns: an zero-terminated array of #GstQueryType, or NULL if none of the
- * internally-linked pads has a query types function.
+ * Returns: (transfer none) (array zero-terminated=1): a zero-terminated array
+ * of #GstQueryType, or NULL if none of the internally-linked pads has a
+ * query types function.
*/
const GstQueryType *
gst_pad_get_query_types_default (GstPad * pad)
* Sets the given acceptcaps function for the pad. The acceptcaps function
* will be called to check if the pad can accept the given caps. Setting the
* acceptcaps function to NULL restores the default behaviour of allowing
- * any caps that matches the caps from gst_pad_get_caps.
+ * any caps that matches the caps from gst_pad_get_caps().
*/
void
gst_pad_set_acceptcaps_function (GstPad * pad,
GST_PAD_UNLINKFUNC (sinkpad) (sinkpad);
}
+ _priv_gst_pad_invalidate_cache (srcpad);
+
/* first clear peers */
GST_PAD_PEER (srcpad) = NULL;
GST_PAD_PEER (sinkpad) = NULL;
* pads
*/
static gboolean
-gst_pad_link_check_compatible_unlocked (GstPad * src, GstPad * sink)
+gst_pad_link_check_compatible_unlocked (GstPad * src, GstPad * sink,
+ GstPadLinkCheck flags)
{
- GstCaps *srccaps;
- GstCaps *sinkcaps;
+ GstCaps *srccaps = NULL;
+ GstCaps *sinkcaps = NULL;
gboolean compatible = FALSE;
- srccaps = gst_pad_get_caps_unlocked (src);
- sinkcaps = gst_pad_get_caps_unlocked (sink);
+ if (!(flags & (GST_PAD_LINK_CHECK_CAPS | GST_PAD_LINK_CHECK_TEMPLATE_CAPS)))
+ return TRUE;
+
+ /* Doing the expensive caps checking takes priority over only checking the template caps */
+ if (flags & GST_PAD_LINK_CHECK_CAPS) {
+ srccaps = gst_pad_get_caps_unlocked (src);
+ sinkcaps = gst_pad_get_caps_unlocked (sink);
+ } else {
+ /* If one of the two pads doesn't have a template, consider the intersection
+ * as valid.*/
+ if (G_UNLIKELY ((GST_PAD_PAD_TEMPLATE (src) == NULL)
+ || (GST_PAD_PAD_TEMPLATE (sink) == NULL))) {
+ compatible = TRUE;
+ goto done;
+ }
+ srccaps = gst_caps_ref (GST_PAD_TEMPLATE_CAPS (GST_PAD_PAD_TEMPLATE (src)));
+ sinkcaps =
+ gst_caps_ref (GST_PAD_TEMPLATE_CAPS (GST_PAD_PAD_TEMPLATE (sink)));
+ }
- GST_CAT_DEBUG (GST_CAT_CAPS, "src caps %" GST_PTR_FORMAT, srccaps);
- GST_CAT_DEBUG (GST_CAT_CAPS, "sink caps %" GST_PTR_FORMAT, sinkcaps);
+ GST_CAT_DEBUG_OBJECT (GST_CAT_CAPS, src, "src caps %" GST_PTR_FORMAT,
+ srccaps);
+ GST_CAT_DEBUG_OBJECT (GST_CAT_CAPS, sink, "sink caps %" GST_PTR_FORMAT,
+ sinkcaps);
/* if we have caps on both pads we can check the intersection. If one
* of the caps is NULL, we return TRUE. */
/* call with the two pads unlocked, when this function returns GST_PAD_LINK_OK,
* the two pads will be locked in the srcpad, sinkpad order. */
static GstPadLinkReturn
-gst_pad_link_prepare (GstPad * srcpad, GstPad * sinkpad)
+gst_pad_link_prepare (GstPad * srcpad, GstPad * sinkpad, GstPadLinkCheck flags)
{
GST_CAT_INFO (GST_CAT_PADS, "trying to link %s:%s and %s:%s",
GST_DEBUG_PAD_NAME (srcpad), GST_DEBUG_PAD_NAME (sinkpad));
/* check hierarchy, pads can only be linked if the grandparents
* are the same. */
- if (!gst_pad_link_check_hierarchy (srcpad, sinkpad))
+ if ((flags & GST_PAD_LINK_CHECK_HIERARCHY)
+ && !gst_pad_link_check_hierarchy (srcpad, sinkpad))
goto wrong_hierarchy;
/* check pad caps for non-empty intersection */
- if (!gst_pad_link_check_compatible_unlocked (srcpad, sinkpad))
+ if (!gst_pad_link_check_compatible_unlocked (srcpad, sinkpad, flags))
goto no_format;
/* FIXME check pad scheduling for non-empty intersection */
* @sinkpad: the sink #GstPad.
*
* Checks if the source pad and the sink pad are compatible so they can be
- * linked.
+ * linked.
*
* Returns: TRUE if the pads can be linked.
*/
/* gst_pad_link_prepare does everything for us, we only release the locks
* on the pads that it gets us. If this function returns !OK the locks are not
* taken anymore. */
- result = gst_pad_link_prepare (srcpad, sinkpad);
+ result = gst_pad_link_prepare (srcpad, sinkpad, GST_PAD_LINK_CHECK_DEFAULT);
if (result != GST_PAD_LINK_OK)
goto done;
}
/**
- * gst_pad_link:
+ * gst_pad_link_full:
* @srcpad: the source #GstPad to link.
* @sinkpad: the sink #GstPad to link.
+ * @flags: the checks to validate when linking
*
* Links the source pad and the sink pad.
*
+ * This variant of #gst_pad_link provides a more granular control on the
+ * checks being done when linking. While providing some considerable speedups
+ * the caller of this method must be aware that wrong usage of those flags
+ * can cause severe issues. Refer to the documentation of #GstPadLinkCheck
+ * for more information.
+ *
+ * MT Safe.
+ *
* Returns: A result code indicating if the connection worked or
* what went wrong.
*
- * MT Safe.
+ * Since: 0.10.30
*/
GstPadLinkReturn
-gst_pad_link (GstPad * srcpad, GstPad * sinkpad)
+gst_pad_link_full (GstPad * srcpad, GstPad * sinkpad, GstPadLinkCheck flags)
{
GstPadLinkReturn result;
GstElement *parent;
}
/* prepare will also lock the two pads */
- result = gst_pad_link_prepare (srcpad, sinkpad);
+ result = gst_pad_link_prepare (srcpad, sinkpad, flags);
if (result != GST_PAD_LINK_OK)
goto done;
return result;
}
+/**
+ * gst_pad_link:
+ * @srcpad: the source #GstPad to link.
+ * @sinkpad: the sink #GstPad to link.
+ *
+ * Links the source pad and the sink pad.
+ *
+ * Returns: A result code indicating if the connection worked or
+ * what went wrong.
+ *
+ * MT Safe.
+ */
+GstPadLinkReturn
+gst_pad_link (GstPad * srcpad, GstPad * sinkpad)
+{
+ return gst_pad_link_full (srcpad, sinkpad, GST_PAD_LINK_CHECK_DEFAULT);
+}
+
static void
gst_pad_set_pad_template (GstPad * pad, GstPadTemplate * templ)
{
*
* Gets the template for @pad.
*
- * Returns: the #GstPadTemplate from which this pad was instantiated, or %NULL
- * if this pad has no template.
+ * Returns: (transfer none): the #GstPadTemplate from which this pad was
+ * instantiated, or %NULL if this pad has no template.
*
* FIXME: currently returns an unrefcounted padtemplate.
*/
}
/* FIXME-0.11: what about making this the default and using
- * gst_caps_make_writable() explicitely where needed
+ * gst_caps_make_writable() explicitly where needed
*/
/**
* gst_pad_get_caps_reffed:
* Gets the capabilities this pad can produce or consume. Preferred function if
* one only wants to read or intersect the caps.
*
- * Returns: the caps of the pad with incremented ref-count.
+ * Returns: (transfer full): the caps of the pad with incremented ref-count.
*
* Since: 0.10.26
*/
* the pad's get_caps function;
* this returns the pad template caps if not explicitly set.
*
- * Returns: a newly allocated copy of the #GstCaps of this pad.
+ * Returns: (transfer full): a newly allocated copy of the #GstCaps of this pad
*
* MT safe.
*/
}
/* FIXME-0.11: what about making this the default and using
- * gst_caps_make_writable() explicitely where needed
+ * gst_caps_make_writable() explicitly where needed
*/
/**
* gst_pad_peer_get_caps_reffed:
* Gets the capabilities of the peer connected to this pad. Preferred function
* if one only wants to read or intersect the caps.
*
- * Returns: the caps of the pad with incremented ref-count.
+ * Returns: (transfer full): the caps of the pad with incremented ref-count
*
* Since: 0.10.26
*/
* Gets the capabilities of the peer connected to this pad. Similar to
* gst_pad_get_caps().
*
- * Returns: a newly allocated copy of the #GstCaps of the peer pad. Use
- * gst_caps_unref() to get rid of it. This function returns %NULL if there is
- * no peer pad.
+ * Returns: (transfer full): a newly allocated copy of the #GstCaps of the
+ * peer pad. Use gst_caps_unref() to get rid of it. This function
+ * returns %NULL if there is no peer pad.
*/
GstCaps *
gst_pad_peer_get_caps (GstPad * pad)
gst_pad_fixate_caps (GstPad * pad, GstCaps * caps)
{
GstPadFixateCapsFunction fixatefunc;
- guint len;
+ GstStructure *s;
g_return_if_fail (GST_IS_PAD (pad));
g_return_if_fail (caps != NULL);
g_return_if_fail (!gst_caps_is_empty (caps));
+ /* FIXME-0.11: do not allow fixating any-caps
+ * g_return_if_fail (!gst_caps_is_any (caps));
+ */
- if (gst_caps_is_fixed (caps))
+ if (gst_caps_is_fixed (caps) || gst_caps_is_any (caps))
return;
fixatefunc = GST_PAD_FIXATECAPSFUNC (pad);
}
/* default fixation */
- len = gst_caps_get_size (caps);
- if (len > 0) {
- GstStructure *s = gst_caps_get_structure (caps, 0);
-
- gst_structure_foreach (s, gst_pad_default_fixate, s);
- }
-
- if (len > 1) {
- gst_caps_truncate (caps);
- }
+ gst_caps_truncate (caps);
+ s = gst_caps_get_structure (caps, 0);
+ gst_structure_foreach (s, gst_pad_default_fixate, s);
}
/* Default accept caps implementation just checks against
result = gst_pad_acceptcaps_default (pad, caps);
GST_DEBUG_OBJECT (pad, "default acceptcaps returned %d", result);
}
+
return result;
is_same_caps:
/**
* gst_pad_set_caps:
* @pad: a #GstPad to set the capabilities of.
- * @caps: a #GstCaps to set.
+ * @caps: (transfer none): a #GstCaps to set.
*
* Sets the capabilities of this pad. The caps must be fixed. Any previous
* caps on the pad will be unreffed. This function refs the caps so you should
caps);
GST_OBJECT_UNLOCK (pad);
- g_object_notify (G_OBJECT (pad), "caps");
+#if GLIB_CHECK_VERSION(2,26,0)
+ g_object_notify_by_pspec ((GObject *) pad, pspec_caps);
+#else
+ g_object_notify ((GObject *) pad, "caps");
+#endif
return TRUE;
gboolean res;
/* See if pad accepts the caps */
- if (!gst_pad_accept_caps (pad, caps))
+ if (!gst_caps_can_intersect (caps, gst_pad_get_pad_template_caps (pad)))
goto not_accepted;
/* set caps on pad if call succeeds */
*
* Gets the capabilities for @pad's template.
*
- * Returns: the #GstCaps of this pad template. If you intend to keep a
- * reference on the caps, make a copy (see gst_caps_copy ()).
+ * Returns: (transfer none): the #GstCaps of this pad template. If you intend
+ * to keep a reference on the caps, make a copy (see gst_caps_copy ()).
*/
const GstCaps *
gst_pad_get_pad_template_caps (GstPad * pad)
* Gets the peer of @pad. This function refs the peer pad so
* you need to unref it after use.
*
- * Returns: the peer #GstPad. Unref after usage.
+ * Returns: (transfer full): the peer #GstPad. Unref after usage.
*
* MT safe.
*/
* calling gst_pad_get_caps() on @pad and its peer. The caller owns a reference
* on the resulting caps.
*
- * Returns: the allowed #GstCaps of the pad link. Unref the caps when you no
- * longer need it. This function returns NULL when @pad has no peer.
+ * Returns: (transfer full): the allowed #GstCaps of the pad link. Unref the
+ * caps when you no longer need it. This function returns NULL when @pad
+ * has no peer.
*
* MT safe.
*/
* always negotiated before sinkpads so it is possible that the negotiated caps
* on the srcpad do not match the negotiated caps of the peer.
*
- * Returns: the negotiated #GstCaps of the pad link. Unref the caps when
- * you no longer need it. This function returns NULL when the @pad has no
- * peer or is not negotiated yet.
+ * Returns: (transfer full): the negotiated #GstCaps of the pad link. Unref
+ * the caps when you no longer need it. This function returns NULL when
+ * the @pad has no peer or is not negotiated yet.
*
* MT safe.
*/
* @pad: a source #GstPad
* @offset: the offset of the new buffer in the stream
* @size: the size of the new buffer
- * @caps: the caps of the new buffer
- * @buf: a newly allocated buffer
+ * @caps: (transfer none): the caps of the new buffer
+ * @buf: (out callee-allocates): a newly allocated buffer
*
* In addition to the function gst_pad_alloc_buffer(), this function
* automatically calls gst_pad_set_caps() when the caps of the
* two concurrent iterators were used and the last iterator would still be
* thread-unsafe. Just don't use this method anymore. */
data = g_slice_new (IntLinkIterData);
- data->list = GST_PAD_INTLINKFUNC (pad) (pad);
+ data->list = ((GstPadIntLinkFunction) GST_PAD_INTLINKFUNC (pad)) (pad);
data->cookie = 0;
GST_WARNING_OBJECT (pad, "Making unsafe iterator");
* Each #GstPad element yielded by the iterator will have its refcount increased,
* so unref after use.
*
- * Returns: a new #GstIterator of #GstPad or %NULL when the pad does not have an
- * iterator function configured. Use gst_iterator_free() after usage.
+ * Free-function: gst_iterator_free
+ *
+ * Returns: (transfer full): a new #GstIterator of #GstPad or %NULL when the
+ * pad does not have an iterator function configured. Use
+ * gst_iterator_free() after usage.
*
* Since: 0.10.21
*/
#ifndef GST_REMOVE_DEPRECATED
static void
-add_unref_pad_to_list (GstPad * pad, GList * list)
+add_unref_pad_to_list (GstPad * pad, GList ** list)
{
- list = g_list_prepend (list, pad);
+ *list = g_list_prepend (*list, pad);
gst_object_unref (pad);
}
#endif
*
* The caller must free this list after use with g_list_free().
*
- * Returns: a newly allocated #GList of pads, or NULL if the pad has no parent.
+ * Returns: (transfer full) (element-type Gst.Pad): a newly allocated #GList
+ * of pads, or NULL if the pad has no parent.
*
* Not MT safe.
*
* Deprecated: This function does not ref the pads in the list so that they
* could become invalid by the time the application accesses them. It's also
* possible that the list changes while handling the pads, which the caller of
- * this function is unable to know. Use the thread-safe
+ * this function is unable to know. Use the thread-safe
* gst_pad_iterate_internal_links_default() instead.
*/
#ifndef GST_REMOVE_DEPRECATED
it = gst_pad_iterate_internal_links (pad);
/* loop over the iterator and put all elements into a list, we also
- * immediatly unref them, which is bad. */
+ * immediately unref them, which is bad. */
do {
- ires = gst_iterator_foreach (it, (GFunc) add_unref_pad_to_list, res);
+ ires = gst_iterator_foreach (it, (GFunc) add_unref_pad_to_list, &res);
switch (ires) {
case GST_ITERATOR_OK:
case GST_ITERATOR_DONE:
*
* Not MT safe.
*
- * Returns: a newly allocated #GList of pads, free with g_list_free().
- *
+ * Returns: (transfer full) (element-type Gst.Pad): a newly allocated #GList
+ * of pads, free with g_list_free().
+ *
* Deprecated: This function does not ref the pads in the list so that they
* could become invalid by the time the application accesses them. It's also
* possible that the list changes while handling the pads, which the caller of
- * this function is unable to know. Use the thread-safe
+ * this function is unable to know. Use the thread-safe
* gst_pad_iterate_internal_links() instead.
*/
#ifndef GST_REMOVE_DEPRECATED
GST_WARNING_OBJECT (pad, "Calling unsafe internal links");
if (GST_PAD_INTLINKFUNC (pad))
- res = GST_PAD_INTLINKFUNC (pad) (pad);
+ res = ((GstPadIntLinkFunction) GST_PAD_INTLINKFUNC (pad)) (pad);
return res;
}
gst_object_unref (item);
break;
case GST_ITERATOR_RESYNC:
- /* FIXME, if we want to reset the result value we need to remember which
- * pads pushed with which result */
+ /* We don't reset the result here because we don't push the event
+ * again on pads that got the event already and because we need
+ * to consider the result of the previous pushes */
gst_iterator_resync (iter);
break;
case GST_ITERATOR_ERROR:
/**
* gst_pad_event_default:
* @pad: a #GstPad to call the default event handler on.
- * @event: the #GstEvent to handle.
+ * @event: (transfer full): the #GstEvent to handle.
*
* Invokes the default event handler for the given pad. End-of-stream and
* discontinuity events are handled specially, and then the event is sent to all
* pads that are internally linked to @pad, only one will be sent an event.
* Multi-sinkpad elements should implement custom event handlers.
*
- * Returns: TRUE if the event was sent succesfully.
+ * Returns: TRUE if the event was sent successfully.
*/
gboolean
gst_pad_event_default (GstPad * pad, GstEvent * event)
* gst_pad_dispatcher:
* @pad: a #GstPad to dispatch.
* @dispatch: the #GstPadDispatcherFunction to call.
- * @data: gpointer user data passed to the dispatcher function.
+ * @data: (closure): gpointer user data passed to the dispatcher function.
*
* Invokes the given dispatcher function on each respective peer of
* all pads that are internally linked to the given pad.
/**
* gst_pad_query:
* @pad: a #GstPad to invoke the default query on.
- * @query: the #GstQuery to perform.
+ * @query: (transfer none): the #GstQuery to perform.
*
* Dispatches a query to a pad. The query should have been allocated by the
- * caller via one of the type-specific allocation functions in gstquery.h. The
- * element is responsible for filling the query with an appropriate response,
- * which should then be parsed with a type-specific query parsing function.
+ * caller via one of the type-specific allocation functions. The element that
+ * the pad belongs to is responsible for filling the query with an appropriate
+ * response, which should then be parsed with a type-specific query parsing
+ * function.
*
* Again, the caller is responsible for both the allocation and deallocation of
* the query structure.
*
+ * Please also note that some queries might need a running pipeline to work.
+ *
* Returns: TRUE if the query could be performed.
*/
gboolean
/**
* gst_pad_peer_query:
* @pad: a #GstPad to invoke the peer query on.
- * @query: the #GstQuery to perform.
+ * @query: (transfer none): the #GstQuery to perform.
*
* Performs gst_pad_query() on the peer of @pad.
*
/**
* gst_pad_query_default:
* @pad: a #GstPad to call the default query handler on.
- * @query: the #GstQuery to handle.
+ * @query: (transfer none): the #GstQuery to handle.
*
* Invokes the default query handler for the given pad.
* The query is sent to all pads internally linked to @pad. Note that
* @pad, only one will be sent the query.
* Multi-sinkpad elements should implement custom query handlers.
*
- * Returns: TRUE if the query was performed succesfully.
+ * Returns: TRUE if the query was performed successfully.
*/
gboolean
gst_pad_query_default (GstPad * pad, GstQuery * query)
}
}
-#ifndef GST_DISABLE_LOADSAVE
+#if !defined(GST_DISABLE_LOADSAVE) && !defined(GST_REMOVE_DEPRECATED)
/* FIXME: why isn't this on a GstElement ? */
/**
* gst_pad_load_and_link:
pad->abidata.ABI.block_callback_called = TRUE;
if (callback) {
/* there is a callback installed, call it. We release the
- * lock so that the callback can do something usefull with the
+ * lock so that the callback can do something useful with the
* pad */
user_data = pad->block_data;
GST_OBJECT_UNLOCK (pad);
* checking for that little extra speed.
*/
static inline GstFlowReturn
-gst_pad_chain_data_unchecked (GstPad * pad, gboolean is_buffer, void *data)
+gst_pad_chain_data_unchecked (GstPad * pad, gboolean is_buffer, void *data,
+ GstPadPushCache * cache)
{
GstCaps *caps;
gboolean caps_changed;
/* see if the signal should be emited, we emit before caps nego as
* we might drop the buffer and do capsnego for nothing. */
if (G_UNLIKELY (emit_signal)) {
+ cache = NULL;
if (G_LIKELY (is_buffer)) {
if (!gst_pad_emit_have_data_signal (pad, GST_MINI_OBJECT (data)))
goto dropping;
goto no_function;
GST_CAT_LOG_OBJECT (GST_CAT_SCHEDULING, pad,
- "calling chainfunction &%s", GST_DEBUG_FUNCPTR_NAME (chainfunc));
+ "calling chainfunction &%s with buffer %" GST_PTR_FORMAT,
+ GST_DEBUG_FUNCPTR_NAME (chainfunc), GST_BUFFER (data));
+
+ if (cache) {
+ cache->peer = gst_object_ref (pad);
+ cache->caps = caps ? gst_caps_ref (caps) : NULL;
+ }
ret = chainfunc (pad, GST_BUFFER_CAST (data));
GST_CAT_LOG_OBJECT (GST_CAT_SCHEDULING, pad,
- "called chainfunction &%s, returned %s",
- GST_DEBUG_FUNCPTR_NAME (chainfunc), gst_flow_get_name (ret));
+ "called chainfunction &%s with buffer %p, returned %s",
+ GST_DEBUG_FUNCPTR_NAME (chainfunc), data, gst_flow_get_name (ret));
} else {
GstPadChainListFunction chainlistfunc;
} else {
GST_CAT_INFO_OBJECT (GST_CAT_SCHEDULING, pad, "chaining group");
}
- ret = gst_pad_chain_data_unchecked (pad, TRUE, group);
+ ret = gst_pad_chain_data_unchecked (pad, TRUE, group, NULL);
} while (ret == GST_FLOW_OK && gst_buffer_list_iterator_next_group (it));
} else {
GST_CAT_INFO_OBJECT (GST_CAT_SCHEDULING, pad, "chaining empty group");
- ret = gst_pad_chain_data_unchecked (pad, TRUE, gst_buffer_new ());
+ ret = gst_pad_chain_data_unchecked (pad, TRUE, gst_buffer_new (), NULL);
}
gst_buffer_list_iterator_free (it);
/**
* gst_pad_chain:
* @pad: a sink #GstPad, returns GST_FLOW_ERROR if not.
- * @buffer: the #GstBuffer to send, return GST_FLOW_ERROR if not.
+ * @buffer: (transfer full): the #GstBuffer to send, return GST_FLOW_ERROR
+ * if not.
*
* Chain a buffer to @pad.
*
g_return_val_if_fail (GST_PAD_IS_SINK (pad), GST_FLOW_ERROR);
g_return_val_if_fail (GST_IS_BUFFER (buffer), GST_FLOW_ERROR);
- return gst_pad_chain_data_unchecked (pad, TRUE, buffer);
+ return gst_pad_chain_data_unchecked (pad, TRUE, buffer, NULL);
}
/**
* gst_pad_chain_list:
* @pad: a sink #GstPad, returns GST_FLOW_ERROR if not.
- * @list: the #GstBufferList to send, return GST_FLOW_ERROR if not.
+ * @list: (transfer full): the #GstBufferList to send, return GST_FLOW_ERROR
+ * if not.
*
* Chain a bufferlist to @pad.
*
g_return_val_if_fail (GST_PAD_IS_SINK (pad), GST_FLOW_ERROR);
g_return_val_if_fail (GST_IS_BUFFER_LIST (list), GST_FLOW_ERROR);
- return gst_pad_chain_data_unchecked (pad, FALSE, list);
+ return gst_pad_chain_data_unchecked (pad, FALSE, list, NULL);
}
static GstFlowReturn
-gst_pad_push_data (GstPad * pad, gboolean is_buffer, void *data)
+gst_pad_push_data (GstPad * pad, gboolean is_buffer, void *data,
+ GstPadPushCache * cache)
{
GstPad *peer;
GstFlowReturn ret;
/* we emit signals on the pad arg, the peer will have a chance to
* emit in the _chain() function */
if (G_UNLIKELY (GST_PAD_DO_BUFFER_SIGNALS (pad) > 0)) {
+ cache = NULL;
/* unlock before emitting */
GST_OBJECT_UNLOCK (pad);
GST_OBJECT_LOCK (pad);
}
- if (G_UNLIKELY ((peer = GST_PAD_PEER (pad)) == NULL))
- goto not_linked;
-
/* Before pushing the buffer to the peer pad, ensure that caps
* are set on this pad */
caps = gst_pad_data_get_caps (is_buffer, data);
caps_changed = caps && caps != GST_PAD_CAPS (pad);
- /* take ref to peer pad before releasing the lock */
- gst_object_ref (peer);
-
- GST_OBJECT_UNLOCK (pad);
-
/* we got a new datatype from the pad, it had better handle it */
if (G_UNLIKELY (caps_changed)) {
+ /* unlock before setting */
+ GST_OBJECT_UNLOCK (pad);
GST_DEBUG_OBJECT (pad,
"caps changed from %" GST_PTR_FORMAT " to %p %" GST_PTR_FORMAT,
GST_PAD_CAPS (pad), caps, caps);
if (G_UNLIKELY (!gst_pad_set_caps (pad, caps)))
goto not_negotiated;
+ GST_OBJECT_LOCK (pad);
}
- ret = gst_pad_chain_data_unchecked (peer, is_buffer, data);
+ if (G_UNLIKELY ((peer = GST_PAD_PEER (pad)) == NULL))
+ goto not_linked;
+
+ /* take ref to peer pad before releasing the lock */
+ gst_object_ref (peer);
+ GST_OBJECT_UNLOCK (pad);
+
+ ret = gst_pad_chain_data_unchecked (peer, is_buffer, data, cache);
gst_object_unref (peer);
} else {
GST_CAT_INFO_OBJECT (GST_CAT_SCHEDULING, pad, "pushing group");
}
- ret = gst_pad_push_data (pad, TRUE, group);
+ ret = gst_pad_push_data (pad, TRUE, group, NULL);
} while (ret == GST_FLOW_OK && gst_buffer_list_iterator_next_group (it));
} else {
GST_CAT_INFO_OBJECT (GST_CAT_SCHEDULING, pad, "pushing empty group");
- ret = gst_pad_push_data (pad, TRUE, gst_buffer_new ());
+ ret = gst_pad_push_data (pad, TRUE, gst_buffer_new (), NULL);
}
gst_buffer_list_iterator_free (it);
not_negotiated:
{
gst_pad_data_unref (is_buffer, data);
- gst_object_unref (peer);
GST_CAT_DEBUG_OBJECT (GST_CAT_SCHEDULING, pad,
"element pushed data then refused to accept the caps");
return GST_FLOW_NOT_NEGOTIATED;
}
}
+static inline GstPadPushCache *
+pad_take_cache (GstPad * pad, gpointer * cache_ptr)
+{
+ GstPadPushCache *cache;
+
+ /* try to get the cached data */
+ do {
+ cache = g_atomic_pointer_get (cache_ptr);
+ /* now try to replace the pointer with NULL to mark that we are busy
+ * with it */
+ } while (!g_atomic_pointer_compare_and_exchange (cache_ptr, cache, NULL));
+
+ /* we could have a leftover invalid entry */
+ if (G_UNLIKELY (cache == PAD_CACHE_INVALID))
+ cache = NULL;
+
+ return cache;
+}
+
+static inline void
+pad_free_cache (GstPadPushCache * cache)
+{
+ gst_object_unref (cache->peer);
+ if (cache->caps)
+ gst_caps_unref (cache->caps);
+ g_slice_free (GstPadPushCache, cache);
+}
+
+static inline void
+pad_put_cache (GstPad * pad, GstPadPushCache * cache, gpointer * cache_ptr)
+{
+ /* put it back */
+ if (!g_atomic_pointer_compare_and_exchange (cache_ptr, NULL, cache)) {
+ /* something changed, clean up our cache */
+ pad_free_cache (cache);
+ }
+}
+
+/* must be called with the pad lock */
+void
+_priv_gst_pad_invalidate_cache (GstPad * pad)
+{
+ GstPadPushCache *cache;
+ gpointer *cache_ptr;
+
+ GST_LOG_OBJECT (pad, "Invalidating pad cache");
+
+ /* we hold the pad lock here so we can get the peer and it stays
+ * alive during this call */
+ if (GST_PAD_IS_SINK (pad)) {
+ if (!(pad = GST_PAD_PEER (pad)))
+ return;
+ }
+
+ cache_ptr = (gpointer *) & pad->abidata.ABI.priv->cache_ptr;
+
+ /* try to get the cached data */
+ do {
+ cache = g_atomic_pointer_get (cache_ptr);
+ /* now try to replace the pointer with INVALID. If nothing is busy with this
+ * caps, we get the cache and clean it up. If something is busy, we replace
+ * with INVALID so that when the function finishes and tries to put the
+ * cache back, it'll fail and cleanup */
+ } while (!g_atomic_pointer_compare_and_exchange (cache_ptr, cache,
+ PAD_CACHE_INVALID));
+
+ if (G_LIKELY (cache && cache != PAD_CACHE_INVALID))
+ pad_free_cache (cache);
+}
+
/**
* gst_pad_push:
* @pad: a source #GstPad, returns #GST_FLOW_ERROR if not.
- * @buffer: the #GstBuffer to push returns GST_FLOW_ERROR if not.
+ * @buffer: (transfer full): the #GstBuffer to push returns GST_FLOW_ERROR
+ * if not.
*
* Pushes a buffer to the peer of @pad.
*
GstFlowReturn
gst_pad_push (GstPad * pad, GstBuffer * buffer)
{
+ GstPadPushCache *cache;
+ GstFlowReturn ret;
+ gpointer *cache_ptr;
+ GstPad *peer;
+ GstCaps *caps;
+
g_return_val_if_fail (GST_IS_PAD (pad), GST_FLOW_ERROR);
g_return_val_if_fail (GST_PAD_IS_SRC (pad), GST_FLOW_ERROR);
g_return_val_if_fail (GST_IS_BUFFER (buffer), GST_FLOW_ERROR);
- return gst_pad_push_data (pad, TRUE, buffer);
+ cache_ptr = (gpointer *) & pad->abidata.ABI.priv->cache_ptr;
+
+ cache = pad_take_cache (pad, cache_ptr);
+
+ if (G_UNLIKELY (cache == NULL))
+ goto slow_path;
+
+ /* check caps */
+ caps = GST_BUFFER_CAPS (buffer);
+ if (G_UNLIKELY (caps && caps != cache->caps)) {
+ pad_free_cache (cache);
+ goto slow_path;
+ }
+
+ peer = cache->peer;
+
+ GST_PAD_STREAM_LOCK (peer);
+ if (G_UNLIKELY (g_atomic_pointer_get (cache_ptr) == PAD_CACHE_INVALID))
+ goto invalid;
+
+ GST_CAT_LOG_OBJECT (GST_CAT_SCHEDULING, pad,
+ "calling chainfunction &%s with buffer %" GST_PTR_FORMAT,
+ GST_DEBUG_FUNCPTR_NAME (GST_PAD_CHAINFUNC (peer)), buffer);
+
+ ret = GST_PAD_CHAINFUNC (peer) (peer, buffer);
+
+ GST_CAT_LOG_OBJECT (GST_CAT_SCHEDULING, pad,
+ "called chainfunction &%s with buffer %p, returned %s",
+ GST_DEBUG_FUNCPTR_NAME (GST_PAD_CHAINFUNC (peer)), buffer,
+ gst_flow_get_name (ret));
+
+ GST_PAD_STREAM_UNLOCK (peer);
+
+ pad_put_cache (pad, cache, cache_ptr);
+
+ return ret;
+
+ /* slow path */
+slow_path:
+ {
+ GstPadPushCache scache = { NULL, };
+
+ GST_LOG_OBJECT (pad, "Taking slow path");
+
+ ret = gst_pad_push_data (pad, TRUE, buffer, &scache);
+
+ if (scache.peer) {
+ GstPadPushCache *ncache;
+
+ GST_LOG_OBJECT (pad, "Caching push data");
+
+ /* make cache structure */
+ ncache = g_slice_new (GstPadPushCache);
+ *ncache = scache;
+
+ pad_put_cache (pad, ncache, cache_ptr);
+ }
+ return ret;
+ }
+invalid:
+ {
+ GST_PAD_STREAM_UNLOCK (peer);
+ pad_free_cache (cache);
+ goto slow_path;
+ }
}
/**
* gst_pad_push_list:
* @pad: a source #GstPad, returns #GST_FLOW_ERROR if not.
- * @list: the #GstBufferList to push returns GST_FLOW_ERROR if not.
+ * @list: (transfer full): the #GstBufferList to push returns GST_FLOW_ERROR
+ * if not.
*
* Pushes a buffer list to the peer of @pad.
*
GstFlowReturn
gst_pad_push_list (GstPad * pad, GstBufferList * list)
{
+ GstBuffer *buf;
+ GstPadPushCache *cache;
+ GstFlowReturn ret;
+ gpointer *cache_ptr;
+ GstPad *peer;
+ GstCaps *caps;
+
g_return_val_if_fail (GST_IS_PAD (pad), GST_FLOW_ERROR);
g_return_val_if_fail (GST_PAD_IS_SRC (pad), GST_FLOW_ERROR);
g_return_val_if_fail (GST_IS_BUFFER_LIST (list), GST_FLOW_ERROR);
- return gst_pad_push_data (pad, FALSE, list);
+ cache_ptr = (gpointer *) & pad->abidata.ABI.priv->cache_ptr;
+
+ cache = pad_take_cache (pad, cache_ptr);
+
+ if (G_UNLIKELY (cache == NULL))
+ goto slow_path;
+
+ /* check caps */
+ if ((buf = gst_buffer_list_get (list, 0, 0)))
+ caps = GST_BUFFER_CAPS (buf);
+ else
+ caps = NULL;
+
+ if (G_UNLIKELY (caps && caps != cache->caps)) {
+ pad_free_cache (cache);
+ goto slow_path;
+ }
+
+ peer = cache->peer;
+
+ GST_PAD_STREAM_LOCK (peer);
+ if (G_UNLIKELY (g_atomic_pointer_get (cache_ptr) == PAD_CACHE_INVALID))
+ goto invalid;
+
+ ret = GST_PAD_CHAINLISTFUNC (peer) (peer, list);
+
+ GST_PAD_STREAM_UNLOCK (peer);
+
+ pad_put_cache (pad, cache, cache_ptr);
+
+ return ret;
+
+ /* slow path */
+slow_path:
+ {
+ GstPadPushCache scache = { NULL, };
+
+ GST_LOG_OBJECT (pad, "Taking slow path");
+
+ ret = gst_pad_push_data (pad, FALSE, list, &scache);
+
+ if (scache.peer) {
+ GstPadPushCache *ncache;
+
+ GST_LOG_OBJECT (pad, "Caching push data");
+
+ /* make cache structure */
+ ncache = g_slice_new (GstPadPushCache);
+ *ncache = scache;
+
+ pad_put_cache (pad, ncache, cache_ptr);
+ }
+ return ret;
+ }
+invalid:
+ {
+ GST_PAD_STREAM_UNLOCK (peer);
+ pad_free_cache (cache);
+ goto slow_path;
+ }
}
/**
}
}
-/**
- * gst_pad_get_range:
- * @pad: a src #GstPad, returns #GST_FLOW_ERROR if not.
- * @offset: The start offset of the buffer
- * @size: The length of the buffer
- * @buffer: a pointer to hold the #GstBuffer, returns #GST_FLOW_ERROR if %NULL.
- *
- * When @pad is flushing this function returns #GST_FLOW_WRONG_STATE
- * immediatly and @buffer is %NULL.
- *
- * Calls the getrange function of @pad, see #GstPadGetRangeFunction for a
- * description of a getrange function. If @pad has no getrange function
- * installed (see gst_pad_set_getrange_function()) this function returns
- * #GST_FLOW_NOT_SUPPORTED.
- *
- * This is a lowlevel function. Usualy gst_pad_pull_range() is used.
- *
- * Returns: a #GstFlowReturn from the pad.
- *
- * MT safe.
- */
-GstFlowReturn
-gst_pad_get_range (GstPad * pad, guint64 offset, guint size,
+static GstFlowReturn
+gst_pad_get_range_unchecked (GstPad * pad, guint64 offset, guint size,
GstBuffer ** buffer)
{
GstFlowReturn ret;
GstCaps *caps;
gboolean caps_changed;
- g_return_val_if_fail (GST_IS_PAD (pad), GST_FLOW_ERROR);
- g_return_val_if_fail (GST_PAD_IS_SRC (pad), GST_FLOW_ERROR);
- g_return_val_if_fail (buffer != NULL, GST_FLOW_ERROR);
-
GST_PAD_STREAM_LOCK (pad);
GST_OBJECT_LOCK (pad);
get_range_failed:
{
*buffer = NULL;
- GST_CAT_WARNING_OBJECT (GST_CAT_SCHEDULING, pad,
- "getrange failed %s", gst_flow_get_name (ret));
+ GST_CAT_LEVEL_LOG (GST_CAT_SCHEDULING,
+ (ret >= GST_FLOW_UNEXPECTED) ? GST_LEVEL_INFO : GST_LEVEL_WARNING,
+ pad, "getrange failed, flow: %s", gst_flow_get_name (ret));
return ret;
}
not_negotiated:
}
}
+/**
+ * gst_pad_get_range:
+ * @pad: a src #GstPad, returns #GST_FLOW_ERROR if not.
+ * @offset: The start offset of the buffer
+ * @size: The length of the buffer
+ * @buffer: (out callee-allocates): a pointer to hold the #GstBuffer,
+ * returns #GST_FLOW_ERROR if %NULL.
+ *
+ * When @pad is flushing this function returns #GST_FLOW_WRONG_STATE
+ * immediately and @buffer is %NULL.
+ *
+ * Calls the getrange function of @pad, see #GstPadGetRangeFunction for a
+ * description of a getrange function. If @pad has no getrange function
+ * installed (see gst_pad_set_getrange_function()) this function returns
+ * #GST_FLOW_NOT_SUPPORTED.
+ *
+ * This is a lowlevel function. Usualy gst_pad_pull_range() is used.
+ *
+ * Returns: a #GstFlowReturn from the pad.
+ *
+ * MT safe.
+ */
+GstFlowReturn
+gst_pad_get_range (GstPad * pad, guint64 offset, guint size,
+ GstBuffer ** buffer)
+{
+ g_return_val_if_fail (GST_IS_PAD (pad), GST_FLOW_ERROR);
+ g_return_val_if_fail (GST_PAD_IS_SRC (pad), GST_FLOW_ERROR);
+ g_return_val_if_fail (buffer != NULL, GST_FLOW_ERROR);
+
+ return gst_pad_get_range_unchecked (pad, offset, size, buffer);
+}
/**
* gst_pad_pull_range:
* @pad: a sink #GstPad, returns GST_FLOW_ERROR if not.
* @offset: The start offset of the buffer
* @size: The length of the buffer
- * @buffer: a pointer to hold the #GstBuffer, returns GST_FLOW_ERROR if %NULL.
+ * @buffer: (out callee-allocates): a pointer to hold the #GstBuffer, returns
+ * GST_FLOW_ERROR if %NULL.
*
* Pulls a @buffer from the peer pad.
*
gst_object_ref (peer);
GST_OBJECT_UNLOCK (pad);
- ret = gst_pad_get_range (peer, offset, size, buffer);
+ ret = gst_pad_get_range_unchecked (peer, offset, size, buffer);
gst_object_unref (peer);
/**
* gst_pad_push_event:
* @pad: a #GstPad to push the event to.
- * @event: the #GstEvent to send to the pad.
+ * @event: (transfer full): the #GstEvent to send to the pad.
*
* Sends the event to the peer of the given pad. This function is
* mainly used by elements to send events to their peer
* . handle pad blocking */
switch (GST_EVENT_TYPE (event)) {
case GST_EVENT_FLUSH_START:
+ _priv_gst_pad_invalidate_cache (pad);
GST_PAD_SET_FLUSHING (pad);
if (G_UNLIKELY (GST_PAD_IS_BLOCKED (pad))) {
break;
}
+ if (G_UNLIKELY (GST_EVENT_SRC (event) == NULL)) {
+ GST_LOG_OBJECT (pad, "event had no source, setting pad as event source");
+ GST_EVENT_SRC (event) = gst_object_ref (pad);
+ }
+
if (G_UNLIKELY (GST_PAD_DO_EVENT_SIGNALS (pad) > 0)) {
GST_OBJECT_UNLOCK (pad);
if (peerpad == NULL)
goto not_linked;
- GST_LOG_OBJECT (pad, "sending event %s to peerpad %" GST_PTR_FORMAT,
- GST_EVENT_TYPE_NAME (event), peerpad);
+ GST_LOG_OBJECT (pad,
+ "sending event %s (%" GST_PTR_FORMAT ") to peerpad %" GST_PTR_FORMAT,
+ GST_EVENT_TYPE_NAME (event), event, peerpad);
gst_object_ref (peerpad);
GST_OBJECT_UNLOCK (pad);
/**
* gst_pad_send_event:
* @pad: a #GstPad to send the event to.
- * @event: the #GstEvent to send to the pad.
+ * @event: (transfer full): the #GstEvent to send to the pad.
*
* Sends the event to the pad. This function can be used
* by applications to send events in the pipeline.
/* can't even accept a flush begin event when flushing */
if (GST_PAD_IS_FLUSHING (pad))
goto flushing;
+
+ _priv_gst_pad_invalidate_cache (pad);
GST_PAD_SET_FLUSHING (pad);
GST_CAT_DEBUG_OBJECT (GST_CAT_EVENT, pad, "set flush flag");
break;
case GST_EVENT_FLUSH_STOP:
- GST_PAD_UNSET_FLUSHING (pad);
- GST_CAT_DEBUG_OBJECT (GST_CAT_EVENT, pad, "cleared flush flag");
+ if (G_LIKELY (GST_PAD_ACTIVATE_MODE (pad) != GST_ACTIVATE_NONE)) {
+ GST_PAD_UNSET_FLUSHING (pad);
+ GST_CAT_DEBUG_OBJECT (GST_CAT_EVENT, pad, "cleared flush flag");
+ }
GST_OBJECT_UNLOCK (pad);
/* grab stream lock */
GST_PAD_STREAM_LOCK (pad);
GST_EVENT_TYPE_NAME (event));
/* make this a little faster, no point in grabbing the lock
- * if the pad is allready flushing. */
+ * if the pad is already flushing. */
if (G_UNLIKELY (GST_PAD_IS_FLUSHING (pad)))
goto flushing;
* Gets the private data of a pad.
* No locking is performed in this function.
*
- * Returns: a #gpointer to the private data.
+ * Returns: (transfer none): a #gpointer to the private data.
*/
gpointer
gst_pad_get_element_private (GstPad * pad)