to cleanup the memory.
+Sharing
+-------
+
+GstMemory objects can be shared between multiple GstBuffer objects. It is
+important that when a thread writes to the shared memory that the other
+buffer don't not see the changes.
+
+We add a separate shared counter that counts the amount of objects that share
+this GstMemory object. The counter is initially 0, meaning that the object is
+not shared with any object. When a GstBuffer (or other object) adds a ref to
+the GstMemorty, it will also increase the shared count.
+
+When the GstMemory is removed from the buffer, the ref count and the shared
+counter will be decreased.
+
+We don't want to use the refcount for this purpose because language bindings
+might keep arbitrary references to the object.
+
+A GstMemory object with a shared counter > 1 is not writable. Any attempt to
+map with WRITE access or resize will fail. _make_mapped() with WRITE access
+will make a copy.
+
+
+
+
Memory layout
~~~~~~~~~~~~~
/* our predefined allocators */
static GstAllocator *_default_mem_impl;
+#define SHARE_ONE (1 << 16)
+#define LOCK_ONE (GST_LOCK_FLAG_LAST)
+#define FLAG_MASK (GST_LOCK_FLAG_LAST - 1)
+#define LOCK_MASK ((SHARE_ONE - 1) - FLAG_MASK)
+#define LOCK_FLAG_MASK (SHARE_ONE - 1)
+
static GstMemory *
_gst_memory_copy (GstMemory * mem)
{
mem->mem.allocator = _default_mem_impl;
mem->mem.parent = parent ? gst_memory_ref (parent) : NULL;
- mem->mem.state = (flags & GST_MEMORY_FLAG_READONLY ? 0x1 : 0);
+ mem->mem.state = (flags & GST_MEMORY_FLAG_READONLY ? GST_LOCK_FLAG_READ : 0);
+ mem->mem.state |= (flags & GST_MEMORY_FLAG_NO_SHARE ? SHARE_ONE : 0);
mem->mem.maxsize = maxsize;
mem->mem.align = align;
mem->mem.offset = offset;
mem->size = size;
}
-static gboolean
-gst_memory_lock (GstMemory * mem, GstMapFlags flags)
+/**
+ * gst_memory_lock:
+ * @mem: a #GstMemory
+ * @flags: #GstLockFlags
+ *
+ * Lock the memory with the specified access mode in @flags.
+ *
+ * Returns: %TRUE if the memory could be locked.
+ */
+gboolean
+gst_memory_lock (GstMemory * mem, GstLockFlags flags)
{
gint access_mode, state, newstate;
- access_mode = flags & 3;
+ access_mode = flags & FLAG_MASK;
do {
state = g_atomic_int_get (&mem->state);
- if (state == 0) {
+ if (flags == GST_LOCK_FLAG_EXCLUSIVE) {
+ /* shared ref */
+ newstate = state + SHARE_ONE;
+ flags &= ~GST_LOCK_FLAG_EXCLUSIVE;
+ }
+
+ /* shared counter > 1 and write access */
+ if (state > SHARE_ONE && flags & GST_LOCK_FLAG_WRITE)
+ goto lock_failed;
+
+ if ((state & LOCK_FLAG_MASK) == 0) {
/* nothing mapped, set access_mode and refcount */
- newstate = 4 | access_mode;
+ newstate = state | LOCK_ONE | access_mode;
} else {
/* access_mode must match */
if ((state & access_mode) != access_mode)
goto lock_failed;
/* increase refcount */
- newstate = state + 4;
+ newstate = state + LOCK_ONE;
}
} while (!g_atomic_int_compare_and_exchange (&mem->state, state, newstate));
}
}
-static void
-gst_memory_unlock (GstMemory * mem)
+/**
+ * gst_memory_unlock:
+ * @mem: a #GstMemory
+ * @flags: #GstLockFlags
+ *
+ * Unlock the memory with the specified access mode in @flags.
+ */
+void
+gst_memory_unlock (GstMemory * mem, GstLockFlags flags)
{
- gint state, newstate;
+ gint access_mode, state, newstate;
+
+ access_mode = flags & 3;
do {
state = g_atomic_int_get (&mem->state);
+ if (flags == GST_LOCK_FLAG_EXCLUSIVE) {
+ /* shared counter */
+ g_return_if_fail (state >= SHARE_ONE);
+ newstate = state - SHARE_ONE;
+ flags &= ~GST_LOCK_FLAG_EXCLUSIVE;
+ }
+
+ g_return_if_fail ((state & access_mode) == access_mode);
/* decrease the refcount */
- newstate = state - 4;
+ newstate = state - LOCK_ONE;
/* last refcount, unset access_mode */
- if (newstate < 4)
- newstate = 0;
+ if ((newstate & LOCK_FLAG_MASK) == access_mode)
+ newstate = state & ~LOCK_FLAG_MASK;
+
} while (!g_atomic_int_compare_and_exchange (&mem->state, state, newstate));
}
-
/**
* gst_memory_make_mapped:
* @mem: (transfer full): a #GstMemory
{
/* something went wrong, restore the orginal state again */
GST_CAT_ERROR (GST_CAT_MEMORY, "mem %p: map failed", mem);
- gst_memory_unlock (mem);
+ gst_memory_unlock (mem, flags);
return FALSE;
}
}
g_return_if_fail (g_atomic_int_get (&mem->state) >= 4);
mem->allocator->info.mem_unmap (mem);
- gst_memory_unlock (mem);
+ gst_memory_unlock (mem, info->flags);
}
/**
gst_mini_object_unref (GST_MINI_OBJECT_CAST (memory));
}
+/* locking */
+/**
+ * GstLockFlags:
+ * @GST_LOCK_FLAG_READ: lock for read access
+ * @GST_LOCK_FLAG_WRITE: lock for write access
+ * @GST_LOCK_FLAG_EXCLUSIVE: lock for exclusive access
+ * @GST_LOCK_FLAG_LAST: first flag that can be used for custom purposes
+ *
+ * Flags used when locking memory
+ */
+typedef enum {
+ GST_LOCK_FLAG_READ = (1 << 0),
+ GST_LOCK_FLAG_WRITE = (1 << 1),
+ GST_LOCK_FLAG_EXCLUSIVE = (1 << 2),
+
+ GST_LOCK_FLAG_LAST = (1 << 4)
+} GstLockFlags;
+
gboolean gst_memory_is_exclusive (GstMemory *mem);
+gboolean gst_memory_lock (GstMemory *mem, GstLockFlags flags);
+void gst_memory_unlock (GstMemory *mem, GstLockFlags flags);
+
/* getting/setting memory properties */
gsize gst_memory_get_sizes (GstMemory *mem, gsize *offset, gsize *maxsize);
void gst_memory_resize (GstMemory *mem, gssize offset, gsize size);