3 * Copyright (C) 2015 Matthew Waters <matthew@centricular.com>
5 * This library is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU Library General Public
7 * License as published by the Free Software Foundation; either
8 * version 2 of the License, or (at your option) any later version.
10 * This library is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * Library General Public License for more details.
15 * You should have received a copy of the GNU Library General Public
16 * License along with this library; if not, write to the
17 * Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
18 * Boston, MA 02110-1301, USA.
27 #include "gstglbuffer.h"
29 #include "gstglcontext.h"
30 #include "gstglfuncs.h"
31 #include "gstglutils.h"
36 * @short_description: memory subclass for GL buffers
37 * @see_also: #GstMemory, #GstAllocator
39 * GstGLBuffer is a #GstMemory subclass providing support for the mapping of
42 * Data is uploaded or downloaded from the GPU as is necessary.
45 /* Implementation notes:
47 * Currently does not take into account GLES2 differences (no mapbuffer)
50 #define USING_OPENGL(context) (gst_gl_context_check_gl_version (context, GST_GL_API_OPENGL, 1, 0))
51 #define USING_OPENGL3(context) (gst_gl_context_check_gl_version (context, GST_GL_API_OPENGL3, 3, 1))
52 #define USING_GLES(context) (gst_gl_context_check_gl_version (context, GST_GL_API_GLES, 1, 0))
53 #define USING_GLES2(context) (gst_gl_context_check_gl_version (context, GST_GL_API_GLES2, 2, 0))
54 #define USING_GLES3(context) (gst_gl_context_check_gl_version (context, GST_GL_API_GLES2, 3, 0))
56 #define HAVE_BUFFER_STORAGE(context) (context->gl_vtable->BufferStorage != NULL)
58 /* compatibility definitions... */
59 #ifndef GL_MAP_READ_BIT
60 #define GL_MAP_READ_BIT 0x0001
62 #ifndef GL_MAP_WRITE_BIT
63 #define GL_MAP_WRITE_BIT 0x0002
65 #ifndef GL_MAP_FLUSH_EXPLICIT_BIT
66 #define GL_MAP_FLUSH_EXPLICIT_BIT 0x0010
68 #ifndef GL_MAP_PERSISTENT_BIT
69 #define GL_MAP_PERSISTENT_BIT 0x0040
71 #ifndef GL_DYNAMIC_STORAGE_BIT
72 #define GL_DYNAMIC_STORAGE_BIT 0x0100
74 #ifndef GL_CLIENT_STORAGE_BIT
75 #define GL_CLIENT_STORAGE_BIT 0x0200
77 #ifndef GL_COPY_READ_BUFFER
78 #define GL_COPY_READ_BUFFER 0x8F36
80 #ifndef GL_COPY_WRITE_BUFFER
81 #define GL_COPY_WRITE_BUFFER 0x8F37
84 GST_DEBUG_CATEGORY_STATIC (GST_CAT_GL_BUFFER);
85 #define GST_CAT_DEFUALT GST_CAT_GL_BUFFER
87 GST_DEFINE_MINI_OBJECT_TYPE (GstGLBuffer, gst_gl_buffer);
89 static GstAllocator *_gl_buffer_allocator;
92 _gl_buffer_create (GstGLBuffer * gl_mem, GError ** error)
94 const GstGLFuncs *gl = gl_mem->mem.context->gl_vtable;
96 gl->GenBuffers (1, &gl_mem->id);
97 gl->BindBuffer (gl_mem->target, gl_mem->id);
98 if (HAVE_BUFFER_STORAGE (gl_mem->mem.context)) {
101 flags |= GL_MAP_READ_BIT | GL_MAP_WRITE_BIT;
102 /* allow access to GPU-side data while there are outstanding mappings */
103 flags |= GL_MAP_PERSISTENT_BIT;
104 /* match the glBufferData() below and make this buffer mutable */
105 flags |= GL_DYNAMIC_STORAGE_BIT;
106 /* hint that the data should be kept CPU-side. Fixes atrocious
107 * performance when e.g. libav decoders are direct-rendering into our
109 /* XXX: make this an fine-grained option. The current assumption here is
110 * that users signal GL_STREAM_DRAW for regularly updating video frames as
111 * is done by gstglmemorypbo */
112 if (gl_mem->usage_hints == GL_STREAM_DRAW) {
113 GST_CAT_DEBUG (GST_CAT_GL_BUFFER, "using client-side storage for "
114 "buffer %p %u", gl_mem, gl_mem->id);
115 flags |= GL_CLIENT_STORAGE_BIT;
118 gl->BufferStorage (gl_mem->target, gl_mem->mem.mem.maxsize, NULL, flags);
120 gl->BufferData (gl_mem->target, gl_mem->mem.mem.maxsize, NULL,
121 gl_mem->usage_hints);
123 gl->BindBuffer (gl_mem->target, 0);
135 _gl_buffer_init (GstGLBuffer * mem, GstAllocator * allocator,
136 GstMemory * parent, GstGLContext * context, guint gl_target, guint gl_usage,
137 const GstAllocationParams * params, gsize size)
139 mem->target = gl_target;
140 mem->usage_hints = gl_usage;
142 gst_gl_base_memory_init ((GstGLBaseMemory *) mem, allocator, parent, context,
143 params, size, NULL, NULL);
145 GST_CAT_DEBUG (GST_CAT_GL_BUFFER, "new GL buffer memory:%p size:%"
146 G_GSIZE_FORMAT, mem, mem->mem.mem.maxsize);
150 _gl_buffer_new (GstAllocator * allocator, GstMemory * parent,
151 GstGLContext * context, guint gl_target, guint gl_usage,
152 const GstAllocationParams * params, gsize size)
154 GstGLBuffer *ret = g_new0 (GstGLBuffer, 1);
155 _gl_buffer_init (ret, allocator, parent, context, gl_target, gl_usage,
162 gst_map_flags_to_gl (GstMapFlags flags)
166 if (flags & GST_MAP_READ)
167 ret |= GL_MAP_READ_BIT;
169 if (flags & GST_MAP_WRITE)
170 ret |= GL_MAP_WRITE_BIT;
176 gst_gl_buffer_cpu_access (GstGLBuffer * mem, GstMapInfo * info, gsize size)
178 const GstGLFuncs *gl = mem->mem.context->gl_vtable;
181 GST_CAT_LOG (GST_CAT_GL_BUFFER, "mapping %p id %d size %" G_GSIZE_FORMAT,
184 if (HAVE_BUFFER_STORAGE (mem->mem.context)) {
185 GLenum gl_map_flags = gst_map_flags_to_gl (info->flags);
187 gl_map_flags |= GL_MAP_PERSISTENT_BIT;
188 if (info->flags & GST_MAP_WRITE)
189 gl_map_flags |= GL_MAP_FLUSH_EXPLICIT_BIT;
191 if (mem->mem.map_count == (mem->mem.gl_map_count + 1)) {
192 gl->BindBuffer (mem->target, mem->id);
193 mem->mem.data = gl->MapBufferRange (mem->target, 0, size, gl_map_flags);
194 gl->BindBuffer (mem->target, 0);
197 return mem->mem.data;
200 if (!gst_gl_base_memory_alloc_data (GST_GL_BASE_MEMORY_CAST (mem)))
205 /* The extra data pointer indirection/memcpy is needed for coherent across
206 * concurrent map()'s in both GL and CPU */
207 if (GST_MEMORY_FLAG_IS_SET (mem, GST_GL_BASE_MEMORY_TRANSFER_NEED_DOWNLOAD)
208 && (info->flags & GST_MAP_GL) == 0 && (info->flags & GST_MAP_READ) != 0) {
209 gl->BindBuffer (mem->target, mem->id);
211 if (gl->MapBufferRange) {
212 /* FIXME: optionally remove this with a flag and return the
213 * glMapBufferRange pointer (requires
214 * GL_ARB_buffer_storage/GL4/GL_COHERENT_BIT) */
215 guint gl_map_flags = GL_MAP_READ_BIT;
217 data = gl->MapBufferRange (mem->target, 0, size, gl_map_flags);
220 memcpy (mem->mem.data, data, size);
222 gl->UnmapBuffer (mem->target);
224 } else if (gl->GetBufferSubData) {
225 gl->GetBufferSubData (mem->target, 0, size, mem->mem.data);
230 gl->BindBuffer (mem->target, 0);
237 gst_gl_buffer_upload_cpu_write (GstGLBuffer * mem, GstMapInfo * info,
240 const GstGLFuncs *gl = mem->mem.context->gl_vtable;
244 /* no data pointer has been written */
247 GST_CAT_LOG (GST_CAT_GL_BUFFER, "mapping %p id %d size %" G_GSIZE_FORMAT,
250 if (HAVE_BUFFER_STORAGE (mem->mem.context)) {
251 /* flushing is done on unmap already */
255 /* The extra data pointer indirection/memcpy is needed for coherent across
256 * concurrent map()'s in both GL and CPU */
257 /* FIXME: uploading potentially half-written data for libav pushing READWRITE
259 if (GST_MEMORY_FLAG_IS_SET (mem, GST_GL_BASE_MEMORY_TRANSFER_NEED_UPLOAD)
260 || (mem->mem.map_flags & GST_MAP_WRITE) != 0) {
261 gl->BindBuffer (mem->target, mem->id);
263 if (gl->MapBufferRange) {
264 /* FIXME: optionally remove this with a flag and return the
265 * glMapBufferRange pointer (requires
266 * GL_ARB_buffer_storage/GL4/GL_COHERENT_BIT) */
267 guint gl_map_flags = GL_MAP_WRITE_BIT;
269 data = gl->MapBufferRange (mem->target, 0, size, gl_map_flags);
272 memcpy (data, mem->mem.data, size);
274 gl->UnmapBuffer (mem->target);
275 } else if (gl->BufferSubData) {
276 gl->BufferSubData (mem->target, 0, size, mem->mem.data);
278 gl->BindBuffer (mem->target, 0);
283 _gl_buffer_map (GstGLBuffer * mem, GstMapInfo * info, gsize size)
285 const GstGLFuncs *gl = mem->mem.context->gl_vtable;
287 if ((info->flags & GST_MAP_GL) != 0) {
288 if (info->flags & GST_MAP_READ) {
289 gst_gl_buffer_upload_cpu_write (mem, info, size);
291 gl->BindBuffer (mem->target, mem->id);
294 return gst_gl_buffer_cpu_access (mem, info, size);
301 _gl_buffer_unmap (GstGLBuffer * mem, GstMapInfo * info)
303 const GstGLFuncs *gl = mem->mem.context->gl_vtable;
305 GST_CAT_LOG (GST_CAT_GL_BUFFER, "unmapping %p id %d size %" G_GSIZE_FORMAT,
306 mem, mem->id, info->size);
308 if (HAVE_BUFFER_STORAGE (mem->mem.context) && (info->flags & GST_MAP_GL) == 0) {
309 gl->BindBuffer (mem->target, mem->id);
311 if (info->flags & GST_MAP_WRITE)
312 gl->FlushMappedBufferRange (mem->target, 0, info->maxsize);
314 if (mem->mem.map_count == (mem->mem.gl_map_count + 1))
315 /* if this is our last cpu-mapping, unmap the GL buffer */
316 gl->UnmapBuffer (mem->target);
318 gl->BindBuffer (mem->target, 0);
320 if ((info->flags & GST_MAP_GL) != 0) {
321 gl->BindBuffer (mem->target, 0);
323 /* XXX: optimistically transfer data */
327 * gst_gl_buffer_copy_buffer_sub_data:
328 * @src: the source #GstGLBuffer
329 * @dest: the destination #GstGLBuffer
330 * @offset: the offset to copy from @src
331 * @size: the size to copy from @src
333 * Copies @src into @dest using glCopyBufferSubData().
335 * Returns: whether the copy operation succeeded
340 gst_gl_buffer_copy_buffer_sub_data (GstGLBuffer * src,
341 GstGLBuffer * dest, gssize offset, gssize size)
343 const GstGLFuncs *gl = src->mem.context->gl_vtable;
344 GstMapInfo sinfo, dinfo;
346 if (!gl->CopyBufferSubData)
347 /* This is GL(ES) 3.0+ only */
350 if (!gst_memory_map ((GstMemory *) src, &sinfo, GST_MAP_READ | GST_MAP_GL)) {
351 GST_CAT_WARNING (GST_CAT_GL_BUFFER,
352 "failed to read map source memory %p", src);
356 if (!gst_memory_map ((GstMemory *) dest, &dinfo, GST_MAP_WRITE | GST_MAP_GL)) {
357 GST_CAT_WARNING (GST_CAT_GL_BUFFER,
358 "failed to write map destination memory %p", dest);
359 gst_memory_unmap ((GstMemory *) src, &sinfo);
363 gl->BindBuffer (GL_COPY_READ_BUFFER, src->id);
364 gl->BindBuffer (GL_COPY_WRITE_BUFFER, dest->id);
365 gl->CopyBufferSubData (GL_COPY_READ_BUFFER, GL_COPY_WRITE_BUFFER,
368 gst_memory_unmap ((GstMemory *) src, &sinfo);
369 gst_memory_unmap ((GstMemory *) dest, &dinfo);
375 _gl_buffer_copy (GstGLBuffer * src, gssize offset, gssize size)
377 GstAllocator *allocator = src->mem.mem.allocator;
378 GstAllocationParams params = { 0, src->mem.mem.align, 0, 0 };
379 GstGLBuffer *dest = NULL;
381 dest = _gl_buffer_new (allocator, NULL, src->mem.context,
382 src->target, src->usage_hints, ¶ms, src->mem.mem.maxsize);
384 /* If not doing a full copy, then copy to sysmem, the 2D represention of the
385 * texture would become wrong */
386 if (GST_MEMORY_FLAG_IS_SET (src, GST_GL_BASE_MEMORY_TRANSFER_NEED_UPLOAD)) {
387 if (!gst_gl_base_memory_memcpy (GST_GL_BASE_MEMORY_CAST (src),
388 GST_GL_BASE_MEMORY_CAST (dest), offset, size)) {
389 GST_CAT_WARNING (GST_CAT_GL_BUFFER, "Could not copy GL Buffer");
390 gst_memory_unref (GST_MEMORY_CAST (dest));
394 if (!gst_gl_buffer_copy_buffer_sub_data (src, dest, offset, size)) {
395 if (!gst_gl_base_memory_memcpy (GST_GL_BASE_MEMORY_CAST (src),
396 GST_GL_BASE_MEMORY_CAST (dest), offset, size)) {
397 GST_CAT_WARNING (GST_CAT_GL_BUFFER, "Could not copy GL Buffer");
398 gst_memory_unref (GST_MEMORY_CAST (dest));
408 _gl_buffer_alloc (GstAllocator * allocator, gsize size,
409 GstAllocationParams * params)
411 g_critical ("Need to use gst_gl_base_memory_alloc() to allocate from "
418 _gl_buffer_destroy (GstGLBuffer * mem)
420 const GstGLFuncs *gl = mem->mem.context->gl_vtable;
422 gl->DeleteBuffers (1, &mem->id);
426 _gst_gl_buffer_allocation_params_copy_data (GstGLBufferAllocationParams * src,
427 GstGLBufferAllocationParams * dest)
429 memset (dest, 0, sizeof (*dest));
431 gst_gl_allocation_params_copy_data (&src->parent, &dest->parent);
433 dest->gl_target = src->gl_target;
434 dest->gl_usage = src->gl_usage;
438 _gst_gl_buffer_allocation_params_free_data (GstGLBufferAllocationParams *
441 gst_gl_allocation_params_free_data (¶ms->parent);
444 G_DEFINE_BOXED_TYPE (GstGLBufferAllocationParams,
445 gst_gl_buffer_allocation_params,
446 (GBoxedCopyFunc) gst_gl_allocation_params_copy,
447 (GBoxedFreeFunc) gst_gl_allocation_params_free);
450 * gst_gl_buffer_allocation_params_new:
451 * @context: a #GstGLContext
452 * @alloc_size: the size in bytes to allocate
453 * @alloc_params: (allow-none): the #GstAllocationParams for @tex_id
454 * @gl_target: the OpenGL target to allocate
455 * @gl_usage: the OpenGL usage hint to allocate with
457 * Returns: a new #GstGLBufferAllocationParams for allocating OpenGL buffer
462 GstGLBufferAllocationParams *
463 gst_gl_buffer_allocation_params_new (GstGLContext * context, gsize alloc_size,
464 const GstAllocationParams * alloc_params, guint gl_target, guint gl_usage)
466 GstGLBufferAllocationParams *params;
468 g_return_val_if_fail (GST_IS_GL_CONTEXT (context), NULL);
469 g_return_val_if_fail (alloc_size > 0, NULL);
471 params = g_new0 (GstGLBufferAllocationParams, 1);
473 if (!gst_gl_allocation_params_init (¶ms->parent, sizeof (*params),
474 GST_GL_ALLOCATION_PARAMS_ALLOC_FLAG_BUFFER |
475 GST_GL_ALLOCATION_PARAMS_ALLOC_FLAG_ALLOC,
476 (GstGLAllocationParamsCopyFunc)
477 _gst_gl_buffer_allocation_params_copy_data,
478 (GstGLAllocationParamsFreeFunc)
479 _gst_gl_buffer_allocation_params_free_data, context, alloc_size,
480 alloc_params, NULL, 0, NULL, NULL)) {
485 params->gl_target = gl_target;
486 params->gl_usage = gl_usage;
492 _gl_buffer_alloc_mem (GstGLBufferAllocator * allocator,
493 GstGLBufferAllocationParams * params)
495 guint alloc_flags = params->parent.alloc_flags;
497 g_return_val_if_fail (alloc_flags &
498 GST_GL_ALLOCATION_PARAMS_ALLOC_FLAG_BUFFER, NULL);
499 g_return_val_if_fail (alloc_flags & GST_GL_ALLOCATION_PARAMS_ALLOC_FLAG_ALLOC,
502 return _gl_buffer_new (GST_ALLOCATOR (allocator), NULL,
503 params->parent.context, params->gl_target, params->gl_usage,
504 params->parent.alloc_params, params->parent.alloc_size);
507 G_DEFINE_TYPE (GstGLBufferAllocator, gst_gl_buffer_allocator,
508 GST_TYPE_GL_BASE_MEMORY_ALLOCATOR);
511 gst_gl_buffer_allocator_class_init (GstGLBufferAllocatorClass * klass)
513 GstAllocatorClass *allocator_class = (GstAllocatorClass *) klass;
514 GstGLBaseMemoryAllocatorClass *gl_base;
516 gl_base = (GstGLBaseMemoryAllocatorClass *) klass;
518 gl_base->alloc = (GstGLBaseMemoryAllocatorAllocFunction) _gl_buffer_alloc_mem;
519 gl_base->create = (GstGLBaseMemoryAllocatorCreateFunction) _gl_buffer_create;
520 gl_base->map = (GstGLBaseMemoryAllocatorMapFunction) _gl_buffer_map;
521 gl_base->unmap = (GstGLBaseMemoryAllocatorUnmapFunction) _gl_buffer_unmap;
522 gl_base->copy = (GstGLBaseMemoryAllocatorCopyFunction) _gl_buffer_copy;
524 (GstGLBaseMemoryAllocatorDestroyFunction) _gl_buffer_destroy;
526 allocator_class->alloc = _gl_buffer_alloc;
530 gst_gl_buffer_allocator_init (GstGLBufferAllocator * allocator)
532 GstAllocator *alloc = GST_ALLOCATOR_CAST (allocator);
534 alloc->mem_type = GST_GL_BUFFER_ALLOCATOR_NAME;
536 GST_OBJECT_FLAG_SET (allocator, GST_ALLOCATOR_FLAG_CUSTOM_ALLOC);
540 * gst_gl_buffer_init_once:
542 * Initializes the GL Buffer allocator. It is safe to call this function
543 * multiple times. This must be called before any other #GstGLBuffer operation.
548 gst_gl_buffer_init_once (void)
550 static gsize _init = 0;
552 if (g_once_init_enter (&_init)) {
553 gst_gl_base_memory_init_once ();
555 GST_DEBUG_CATEGORY_INIT (GST_CAT_GL_BUFFER, "glbuffer", 0, "OpenGL Buffer");
557 _gl_buffer_allocator =
558 g_object_new (gst_gl_buffer_allocator_get_type (), NULL);
559 gst_object_ref_sink (_gl_buffer_allocator);
561 /* The allocator is never unreffed */
562 GST_OBJECT_FLAG_SET (_gl_buffer_allocator, GST_OBJECT_FLAG_MAY_BE_LEAKED);
564 gst_allocator_register (GST_GL_BUFFER_ALLOCATOR_NAME,
565 gst_object_ref (_gl_buffer_allocator));
566 g_once_init_leave (&_init, 1);
574 * Returns: whether the memory at @mem is a #GstGLBuffer
579 gst_is_gl_buffer (GstMemory * mem)
581 return mem != NULL && mem->allocator != NULL &&
582 g_type_is_a (G_OBJECT_TYPE (mem->allocator),
583 GST_TYPE_GL_BUFFER_ALLOCATOR);