g_object_class_install_property (gobject_class, PAD_PROP_CAPS,
g_param_spec_boxed ("caps", "Caps", "The capabilities of the pad",
- GST_TYPE_CAPS, G_PARAM_READABLE));
+ GST_TYPE_CAPS, G_PARAM_READABLE | G_PARAM_STATIC_STRINGS));
g_object_class_install_property (gobject_class, PAD_PROP_DIRECTION,
g_param_spec_enum ("direction", "Direction", "The direction of the pad",
GST_TYPE_PAD_DIRECTION, GST_PAD_UNKNOWN,
- G_PARAM_READWRITE | G_PARAM_CONSTRUCT_ONLY));
+ G_PARAM_READWRITE | G_PARAM_CONSTRUCT_ONLY | G_PARAM_STATIC_STRINGS));
/* FIXME, Make G_PARAM_CONSTRUCT_ONLY when we fix ghostpads. */
g_object_class_install_property (gobject_class, PAD_PROP_TEMPLATE,
g_param_spec_object ("template", "Template",
"The GstPadTemplate of this pad", GST_TYPE_PAD_TEMPLATE,
- G_PARAM_READWRITE));
+ G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
#ifndef GST_DISABLE_LOADSAVE
gstobject_class->save_thyself = GST_DEBUG_FUNCPTR (gst_pad_save_thyself);
gst_pad_dispose (GObject * object)
{
GstPad *pad = GST_PAD (object);
+ GstPad *peer;
GST_CAT_DEBUG_OBJECT (GST_CAT_REFCOUNTING, pad, "dispose");
- /* we don't hold a ref to the peer so we can just set the
- * peer to NULL. */
- GST_PAD_PEER (pad) = NULL;
+ /* unlink the peer pad */
+ if ((peer = gst_pad_get_peer (pad))) {
+ /* window for MT unsafeness, someone else could unlink here
+ * and then we call unlink with wrong pads. The unlink
+ * function would catch this and safely return failed. */
+ if (GST_PAD_IS_SRC (pad))
+ gst_pad_unlink (pad, peer);
+ else
+ gst_pad_unlink (peer, pad);
+
+ gst_object_unref (peer);
+ }
/* clear the caps */
gst_caps_replace (&GST_PAD_CAPS (pad), NULL);
* @srcpad: the source #GstPad to unlink.
* @sinkpad: the sink #GstPad to unlink.
*
- * Unlinks the source pad from the sink pad. Will emit the "unlinked" signal on
- * both pads.
+ * Unlinks the source pad from the sink pad. Will emit the #GstPad::unlinked
+ * signal on both pads.
*
* Returns: TRUE if the pads were unlinked. This function returns FALSE if
* the pads were not linked together.
*
* Gets the capabilities this pad can produce or consume.
* Note that this method doesn't necessarily return the caps set by
- * gst_pad_set_caps() - use #GST_PAD_CAPS for that instead.
+ * gst_pad_set_caps() - use GST_PAD_CAPS() for that instead.
* gst_pad_get_caps returns all possible caps a pad can operate with, using
* the pad's get_caps function;
* this returns the pad template caps if not explicitly set.
GST_CAT_DEBUG_OBJECT (GST_CAT_CAPS, pad, "get pad caps");
result = gst_pad_get_caps_unlocked (pad);
+
+ /* be sure that we have a copy */
+ if (result)
+ result = gst_caps_make_writable (result);
+
GST_OBJECT_UNLOCK (pad);
return result;
} else if (G_VALUE_TYPE (src) == GST_TYPE_LIST) {
GValue temp = { 0 };
+ /* list could be empty */
+ if (gst_value_list_get_size (src) <= 0)
+ return FALSE;
+
gst_value_init_and_copy (&temp, gst_value_list_get_value (src, 0));
+
if (!fixate_value (dest, &temp))
gst_value_init_and_copy (dest, &temp);
g_value_unset (&temp);
if (G_UNLIKELY (peerpad == NULL))
goto no_peer;
- result = gst_pad_accept_caps (peerpad, caps);
+ gst_object_ref (peerpad);
+ /* release lock before calling external methods but keep ref to pad */
GST_OBJECT_UNLOCK (pad);
+ result = gst_pad_accept_caps (peerpad, caps);
+
+ gst_object_unref (peerpad);
+
return result;
no_peer:
goto fallback;
ret = bufferallocfunc (pad, offset, size, caps, buf);
+
if (G_UNLIKELY (ret != GST_FLOW_OK))
goto error;
+
/* no error, but NULL buffer means fallback to the default */
if (G_UNLIKELY (*buf == NULL))
goto fallback;
/* fallback case, allocate a buffer of our own, add pad caps. */
GST_CAT_DEBUG_OBJECT (GST_CAT_PADS, pad, "fallback buffer alloc");
- *buf = gst_buffer_new_and_alloc (size);
- GST_BUFFER_OFFSET (*buf) = offset;
- gst_buffer_set_caps (*buf, caps);
-
- return GST_FLOW_OK;
+ if ((*buf = gst_buffer_try_new_and_alloc (size))) {
+ GST_BUFFER_OFFSET (*buf) = offset;
+ gst_buffer_set_caps (*buf, caps);
+ return GST_FLOW_OK;
+ } else {
+ GST_CAT_DEBUG_OBJECT (GST_CAT_PADS, pad,
+ "out of memory allocating %d bytes", size);
+ return GST_FLOW_ERROR;
+ }
}
}
+/* FIXME 0.11: size should be unsigned */
static GstFlowReturn
gst_pad_alloc_buffer_full (GstPad * pad, guint64 offset, gint size,
GstCaps * caps, GstBuffer ** buf, gboolean setcaps)
{
GstPad *peer;
GstFlowReturn ret;
+ GstCaps *newcaps;
gboolean caps_changed;
g_return_val_if_fail (GST_IS_PAD (pad), GST_FLOW_ERROR);
g_return_val_if_fail (GST_PAD_IS_SRC (pad), GST_FLOW_ERROR);
g_return_val_if_fail (buf != NULL, GST_FLOW_ERROR);
+ g_return_val_if_fail (size >= 0, GST_FLOW_ERROR);
GST_DEBUG_OBJECT (pad, "offset %" G_GUINT64_FORMAT ", size %d", offset, size);
goto peer_error;
/* FIXME, move capnego this into a base class? */
- caps = GST_BUFFER_CAPS (*buf);
+ newcaps = GST_BUFFER_CAPS (*buf);
/* Lock for checking caps, pretty pointless as the _pad_push() function might
* change it concurrently, one of the problems with automatic caps setting in
* pad_alloc_and_set_caps. Worst case, if does a check too much, but only
* when there is heavy renegotiation going on in both directions. */
GST_OBJECT_LOCK (pad);
- caps_changed = caps && caps != GST_PAD_CAPS (pad);
+ caps_changed = newcaps && newcaps != GST_PAD_CAPS (pad);
GST_OBJECT_UNLOCK (pad);
/* we got a new datatype on the pad, see if it can handle it */
if (G_UNLIKELY (caps_changed)) {
GST_DEBUG_OBJECT (pad,
"caps changed from %" GST_PTR_FORMAT " to %p %" GST_PTR_FORMAT,
- GST_PAD_CAPS (pad), caps, caps);
- if (G_UNLIKELY (!gst_pad_configure_src (pad, caps, setcaps)))
+ GST_PAD_CAPS (pad), newcaps, newcaps);
+ if (G_UNLIKELY (!gst_pad_configure_src (pad, newcaps, setcaps)))
goto not_negotiated;
}
+
+ /* sanity check (only if caps are the same) */
+ if (G_LIKELY (newcaps == caps) && G_UNLIKELY (GST_BUFFER_SIZE (*buf) < size))
+ goto wrong_size_fallback;
+
return ret;
flushed:
"alloc function returned unacceptable buffer");
return GST_FLOW_NOT_NEGOTIATED;
}
+wrong_size_fallback:
+ {
+ GST_CAT_ERROR_OBJECT (GST_CAT_PADS, pad, "buffer returned by alloc "
+ "function is too small (%u < %d), doing fallback buffer alloc",
+ GST_BUFFER_SIZE (*buf), size);
+
+ gst_buffer_unref (*buf);
+
+ if ((*buf = gst_buffer_try_new_and_alloc (size))) {
+ GST_BUFFER_OFFSET (*buf) = offset;
+ gst_buffer_set_caps (*buf, caps);
+ return GST_FLOW_OK;
+ } else {
+ GST_CAT_DEBUG_OBJECT (GST_CAT_PADS, pad,
+ "out of memory allocating %d bytes", size);
+ return GST_FLOW_ERROR;
+ }
+ }
}
/**
*
* MT safe.
*/
+
+/* FIXME 0.11: size should be unsigned */
GstFlowReturn
gst_pad_alloc_buffer (GstPad * pad, guint64 offset, gint size, GstCaps * caps,
GstBuffer ** buf)
*
* MT safe.
*/
+
+/* FIXME 0.11: size should be unsigned */
GstFlowReturn
gst_pad_alloc_buffer_and_set_caps (GstPad * pad, guint64 offset, gint size,
GstCaps * caps, GstBuffer ** buf)
GST_INFO_OBJECT (pad, "Sending event %p (%s) to all internally linked pads",
event, GST_EVENT_TYPE_NAME (event));
- result = (GST_PAD_DIRECTION (pad) == GST_PAD_SINK);
-
orig = pads = gst_pad_get_internal_links (pad);
+ if (!pads) {
+ /* If this is a sinkpad and we don't have pads to send the event to, we
+ * return TRUE. This is so that when using the default handler on a sink
+ * element, we don't fail to push it. */
+ result = (GST_PAD_DIRECTION (pad) == GST_PAD_SINK);
+ } else {
+ /* we have pads, the result will be TRUE if one of the pads handled the
+ * event in the code below. */
+ result = FALSE;
+ }
+
while (pads) {
GstPad *eventpad = GST_PAD_CAST (pads->data);
GST_LOG_OBJECT (pad, "Reffing and sending event %p (%s) to %s:%s",
event, GST_EVENT_TYPE_NAME (event), GST_DEBUG_PAD_NAME (eventpad));
gst_event_ref (event);
- gst_pad_push_event (eventpad, event);
+ result |= gst_pad_push_event (eventpad, event);
} else {
/* we only send the event on one pad, multi-sinkpad elements
* should implement a handler */
}
/**
+ * gst_pad_peer_query:
+ * @pad: a #GstPad to invoke the peer query on.
+ * @query: the #GstQuery to perform.
+ *
+ * Performs gst_pad_query() on the peer of @pad.
+ *
+ * The caller is responsible for both the allocation and deallocation of
+ * the query structure.
+ *
+ * Returns: TRUE if the query could be performed. This function returns %FALSE
+ * if @pad has no peer.
+ *
+ * Since: 0.10.15
+ */
+gboolean
+gst_pad_peer_query (GstPad * pad, GstQuery * query)
+{
+ GstPad *peerpad;
+ gboolean result;
+
+ g_return_val_if_fail (GST_IS_PAD (pad), FALSE);
+ g_return_val_if_fail (GST_IS_QUERY (query), FALSE);
+
+ GST_OBJECT_LOCK (pad);
+
+ GST_DEBUG_OBJECT (pad, "peer query");
+
+ peerpad = GST_PAD_PEER (pad);
+ if (G_UNLIKELY (peerpad == NULL))
+ goto no_peer;
+
+ gst_object_ref (peerpad);
+ GST_OBJECT_UNLOCK (pad);
+
+ result = gst_pad_query (peerpad, query);
+
+ gst_object_unref (peerpad);
+
+ return result;
+
+ /* ERRORS */
+no_peer:
+ {
+ GST_WARNING_OBJECT (pad, "pad has no peer");
+ GST_OBJECT_UNLOCK (pad);
+ return FALSE;
+ }
+}
+
+/**
* gst_pad_query_default:
* @pad: a #GstPad to call the default query handler on.
* @query: the #GstQuery to handle.
while (field) {
if (!strcmp ((char *) field->name, "name")) {
name = (gchar *) xmlNodeGetContent (field);
- pad = gst_element_get_pad (GST_ELEMENT (parent), name);
+ pad = gst_element_get_static_pad (GST_ELEMENT (parent), name);
+ if (!pad)
+ pad = gst_element_get_request_pad (GST_ELEMENT (parent), name);
g_free (name);
} else if (!strcmp ((char *) field->name, "peer")) {
peer = (gchar *) xmlNodeGetContent (field);
if (target == NULL)
goto cleanup;
- targetpad = gst_element_get_pad (target, split[1]);
+ targetpad = gst_element_get_static_pad (target, split[1]);
+ if (!pad)
+ targetpad = gst_element_get_request_pad (target, split[1]);
if (targetpad == NULL)
goto cleanup;
gst_task_pause (task);
GST_OBJECT_UNLOCK (pad);
+ /* wait for task function to finish, this lock is recursive so it does nothing
+ * when the pause is called from the task itself */
GST_PAD_STREAM_LOCK (pad);
GST_PAD_STREAM_UNLOCK (pad);