#include "state_tracker/st_cb_bufferobjects.h"
+#include "util/u_inlines.h"
/* Debug flags */
/*#define VBO_DEBUG*/
/*#define BOUNDS_CHECK*/
}
}
+void
+mesa_buffer_object_release_buffer(struct gl_buffer_object *obj)
+{
+ if (!obj->buffer)
+ return;
+
+ /* Subtract the remaining private references before unreferencing
+ * the buffer. See the header file for explanation.
+ */
+ if (obj->private_refcount) {
+ assert(obj->private_refcount > 0);
+ p_atomic_add(&obj->buffer->reference.count,
+ -obj->private_refcount);
+ obj->private_refcount = 0;
+ }
+ obj->private_refcount_ctx = NULL;
+
+ pipe_resource_reference(&obj->buffer, NULL);
+}
+
/**
* Delete a buffer object.
*
_mesa_delete_buffer_object(struct gl_context *ctx,
struct gl_buffer_object *bufObj)
{
- (void) ctx;
+ assert(bufObj->RefCount == 0);
+ _mesa_buffer_unmap_all_mappings(ctx, bufObj);
+ mesa_buffer_object_release_buffer(bufObj);
vbo_delete_minmax_cache(bufObj);
align_free(bufObj->Data);
*/
if (shared_binding || ctx != oldObj->Ctx) {
if (p_atomic_dec_zero(&oldObj->RefCount)) {
- st_bufferobj_free(ctx, oldObj);
+ _mesa_delete_buffer_object(ctx, oldObj);
}
} else if (ctx == oldObj->Ctx) {
/* Update the private ref count. */
_mesa_HashUnlockMutex(ctx->Shared->BufferObjects);
}
+struct gl_buffer_object *
+_mesa_internal_buffer_object_alloc(struct gl_context *ctx, GLuint id)
+{
+ struct gl_buffer_object *buf = CALLOC_STRUCT(gl_buffer_object);
+ if (!buf)
+ return NULL;
+
+ _mesa_initialize_buffer_object(ctx, buf, id);
+ return buf;
+}
/**
* Create a buffer object that will be backed by an OpenGL buffer ID
* where the creating context will hold one global buffer reference instead
static struct gl_buffer_object *
new_gl_buffer_object(struct gl_context *ctx, GLuint id)
{
- struct gl_buffer_object *buf = st_bufferobj_alloc(ctx, id);
+ struct gl_buffer_object *buf = _mesa_internal_buffer_object_alloc(ctx, id);
buf->Ctx = ctx;
buf->RefCount++; /* global buffer reference held by the context */
* Internal functions
*/
+static inline struct pipe_resource *
+_mesa_get_buffer_object_reference(struct gl_context *ctx, struct gl_buffer_object *obj)
+{
+ if (unlikely(!obj))
+ return NULL;
+
+ struct pipe_resource *buffer = obj->buffer;
+
+ if (unlikely(!buffer))
+ return NULL;
+
+ /* Only one context is using the fast path. All other contexts must use
+ * the slow path.
+ */
+ if (unlikely(obj->private_refcount_ctx != ctx)) {
+ p_atomic_inc(&buffer->reference.count);
+ return buffer;
+ }
+
+ if (unlikely(obj->private_refcount <= 0)) {
+ assert(obj->private_refcount == 0);
+
+ /* This is the number of atomic increments we will skip. */
+ obj->private_refcount = 100000000;
+ p_atomic_add(&buffer->reference.count, obj->private_refcount);
+ }
+
+ /* Return a buffer reference while decrementing the private refcount. */
+ obj->private_refcount--;
+ return buffer;
+}
+
+struct gl_buffer_object *
+_mesa_internal_buffer_object_alloc(struct gl_context *ctx, GLuint id);
+void
+mesa_buffer_object_release_buffer(struct gl_buffer_object *obj);
/** Is the given buffer object currently mapped by the GL user? */
static inline GLboolean
{
assert(ctx->GLThread.SupportsBufferUploads);
- struct gl_buffer_object *obj = st_bufferobj_alloc(ctx, -1);
+ struct gl_buffer_object *obj =
+ _mesa_internal_buffer_object_alloc(ctx, -1);
if (!obj)
return NULL;
GL_WRITE_ONLY,
GL_CLIENT_STORAGE_BIT | GL_MAP_WRITE_BIT,
obj)) {
- st_bufferobj_free(ctx, obj);
+ _mesa_delete_buffer_object(ctx, obj);
return NULL;
}
MESA_MAP_THREAD_SAFE_BIT,
obj, MAP_GLTHREAD);
if (!*ptr) {
- st_bufferobj_free(ctx, obj);
+ _mesa_delete_buffer_object(ctx, obj);
return NULL;
}
/* Set the vertex buffer. */
if (binding->BufferObj) {
vbuffer[bufidx].buffer.resource =
- st_get_buffer_reference(ctx, binding->BufferObj);
+ _mesa_get_buffer_object_reference(ctx, binding->BufferObj);
vbuffer[bufidx].is_user_buffer = false;
vbuffer[bufidx].buffer_offset = binding->Offset +
attrib->RelativeOffset;
if (binding->BufferObj) {
/* Set the binding */
vbuffer[bufidx].buffer.resource =
- st_get_buffer_reference(ctx, binding->BufferObj);
+ _mesa_get_buffer_object_reference(ctx, binding->BufferObj);
vbuffer[bufidx].is_user_buffer = false;
vbuffer[bufidx].buffer_offset = _mesa_draw_binding_offset(binding);
} else {
#include "util/u_upload_mgr.h"
#include "cso_cache/cso_context.h"
+#include "main/bufferobj.h"
#include "st_debug.h"
#include "st_context.h"
#include "st_atom.h"
binding =
&st->ctx->UniformBufferBindings[prog->sh.UniformBlocks[i]->Binding];
- cb.buffer = st_get_buffer_reference(st->ctx, binding->BufferObject);
+ cb.buffer = _mesa_get_buffer_object_reference(st->ctx, binding->BufferObject);
if (cb.buffer) {
cb.buffer_offset = binding->Offset;
/**
- * There is some duplication between mesa's bufferobjects and our
- * bufmgr buffers. Both have an integer handle and a hashtable to
- * lookup an opaque structure. It would be nice if the handles and
- * internal structure where somehow shared.
- */
-struct gl_buffer_object *
-st_bufferobj_alloc(struct gl_context *ctx, GLuint name)
-{
- struct gl_buffer_object *obj = ST_CALLOC_STRUCT(gl_buffer_object);
-
- if (!obj)
- return NULL;
-
- _mesa_initialize_buffer_object(ctx, obj, name);
-
- return obj;
-}
-
-
-static void
-release_buffer(struct gl_buffer_object *obj)
-{
- if (!obj->buffer)
- return;
-
- /* Subtract the remaining private references before unreferencing
- * the buffer. See the header file for explanation.
- */
- if (obj->private_refcount) {
- assert(obj->private_refcount > 0);
- p_atomic_add(&obj->buffer->reference.count,
- -obj->private_refcount);
- obj->private_refcount = 0;
- }
- obj->private_refcount_ctx = NULL;
-
- pipe_resource_reference(&obj->buffer, NULL);
-}
-
-
-/**
- * Deallocate/free a vertex/pixel buffer object.
- * Called via glDeleteBuffersARB().
- */
-void st_bufferobj_free(struct gl_context *ctx, struct gl_buffer_object *obj)
-{
- assert(obj->RefCount == 0);
- _mesa_buffer_unmap_all_mappings(ctx, obj);
- release_buffer(obj);
- _mesa_delete_buffer_object(ctx, obj);
-}
-
-
-
-/**
* Replace data in a subrange of buffer object. If the data range
* specified by size + offset extends beyond the end of the buffer or
* if data is NULL, no copy is performed.
obj->Usage = usage;
obj->StorageFlags = storageFlags;
- release_buffer(obj);
+ mesa_buffer_object_release_buffer(obj);
unsigned bindings = buffer_target_to_bind_flags(target);
st_init_bufferobject_functions(struct pipe_screen *screen,
struct dd_function_table *functions);
-static inline struct pipe_resource *
-st_get_buffer_reference(struct gl_context *ctx, struct gl_buffer_object *obj)
-{
- if (unlikely(!obj))
- return NULL;
-
- struct pipe_resource *buffer = obj->buffer;
-
- if (unlikely(!buffer))
- return NULL;
-
- /* Only one context is using the fast path. All other contexts must use
- * the slow path.
- */
- if (unlikely(obj->private_refcount_ctx != ctx)) {
- p_atomic_inc(&buffer->reference.count);
- return buffer;
- }
-
- if (unlikely(obj->private_refcount <= 0)) {
- assert(obj->private_refcount == 0);
-
- /* This is the number of atomic increments we will skip. */
- obj->private_refcount = 100000000;
- p_atomic_add(&buffer->reference.count, obj->private_refcount);
- }
-
- /* Return a buffer reference while decrementing the private refcount. */
- obj->private_refcount--;
- return buffer;
-}
-
-struct gl_buffer_object *st_bufferobj_alloc(struct gl_context *ctx, GLuint name);
-void st_bufferobj_free(struct gl_context *ctx, struct gl_buffer_object *obj);
void st_bufferobj_subdata(struct gl_context *ctx,
GLintptrARB offset,
GLsizeiptrARB size,
* the threaded batch buffer.
*/
info->index.resource =
- st_get_buffer_reference(ctx, info->index.gl_bo);
+ _mesa_get_buffer_object_reference(ctx, info->index.gl_bo);
info->take_index_buffer_ownership = true;
} else {
info->index.resource = info->index.gl_bo->buffer;
{
struct gl_context *ctx = gl_context_from_vbo_exec(exec);
- exec->vtx.bufferobj = st_bufferobj_alloc(ctx, IMM_BUFFER_NAME);
+ exec->vtx.bufferobj = _mesa_internal_buffer_object_alloc(ctx, IMM_BUFFER_NAME);
exec->vtx.enabled = u_bit_consecutive64(0, VBO_ATTRIB_MAX); /* reset all */
vbo_reset_all_attr(exec);
if (total_bytes_needed > available_bytes) {
if (save->current_bo)
_mesa_reference_buffer_object(ctx, &save->current_bo, NULL);
- save->current_bo = st_bufferobj_alloc(ctx, VBO_BUF_ID + 1);
+ save->current_bo = _mesa_internal_buffer_object_alloc(ctx, VBO_BUF_ID + 1);
bool success = st_bufferobj_data(ctx,
GL_ELEMENT_ARRAY_BUFFER_ARB,
MAX2(total_bytes_needed, VBO_SAVE_BUFFER_SIZE),
node->draw_begins = node->cold->prims[0].begin;
if (!save->current_bo) {
- save->current_bo = st_bufferobj_alloc(ctx, VBO_BUF_ID + 1);
+ save->current_bo = _mesa_internal_buffer_object_alloc(ctx, VBO_BUF_ID + 1);
bool success = st_bufferobj_data(ctx,
GL_ELEMENT_ARRAY_BUFFER_ARB,
VBO_SAVE_BUFFER_SIZE,