#include <assert.h>
#include <errno.h>
#include <xf86drm.h>
+#include <pthread.h>
#include "intel_bufmgr.h"
#include "intel_bufmgr_priv.h"
#include "drm.h"
typedef struct _bufmgr_fake {
dri_bufmgr bufmgr;
+ pthread_mutex_t lock;
+
unsigned long low_offset;
unsigned long size;
void *virtual;
dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)bo->bufmgr;
dri_bo_fake *bo_fake = (dri_bo_fake *)bo;
- if (bo_fake->block == NULL || !bo_fake->block->fenced)
+ pthread_mutex_lock(&bufmgr_fake->lock);
+
+ if (bo_fake->block == NULL || !bo_fake->block->fenced) {
+ pthread_mutex_unlock(&bufmgr_fake->lock);
return;
+ }
_fence_wait_internal(bufmgr_fake, bo_fake->block->fence);
+
+ pthread_mutex_unlock(&bufmgr_fake->lock);
}
/* Specifically ignore texture memory sharing.
dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)bufmgr;
struct block *block, *tmp;
+ pthread_mutex_lock(&bufmgr_fake->lock);
+
bufmgr_fake->need_fence = 1;
bufmgr_fake->fail = 0;
assert(_fence_test(bufmgr_fake, block->fence));
set_dirty(block->bo);
}
+
+ pthread_mutex_unlock(&bufmgr_fake->lock);
}
static dri_bo *
static void
dri_fake_bo_reference(dri_bo *bo)
{
+ dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)bo->bufmgr;
dri_bo_fake *bo_fake = (dri_bo_fake *)bo;
+ pthread_mutex_lock(&bufmgr_fake->lock);
bo_fake->refcount++;
+ pthread_mutex_unlock(&bufmgr_fake->lock);
}
static void
-dri_fake_bo_unreference(dri_bo *bo)
+dri_fake_bo_reference_locked(dri_bo *bo)
+{
+ dri_bo_fake *bo_fake = (dri_bo_fake *)bo;
+
+ bo_fake->refcount++;
+}
+
+static void
+dri_fake_bo_unreference_locked(dri_bo *bo)
{
dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)bo->bufmgr;
dri_bo_fake *bo_fake = (dri_bo_fake *)bo;
int i;
- if (!bo)
- return;
-
if (--bo_fake->refcount == 0) {
assert(bo_fake->map_count == 0);
/* No remaining references, so free it */
free_backing_store(bo);
for (i = 0; i < bo_fake->nr_relocs; i++)
- dri_bo_unreference(bo_fake->relocs[i].target_buf);
+ dri_fake_bo_unreference_locked(bo_fake->relocs[i].target_buf);
DBG("drm_bo_unreference: free buf %d %s\n", bo_fake->id, bo_fake->name);
free(bo_fake->relocs);
free(bo);
-
- return;
}
}
+static void
+dri_fake_bo_unreference(dri_bo *bo)
+{
+ dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)bo->bufmgr;
+
+ pthread_mutex_lock(&bufmgr_fake->lock);
+ dri_fake_bo_unreference_locked(bo);
+ pthread_mutex_unlock(&bufmgr_fake->lock);
+}
+
/**
* Set the buffer as not requiring backing store, and instead get the callback
* invoked whenever it would be set dirty.
dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)bo->bufmgr;
dri_bo_fake *bo_fake = (dri_bo_fake *)bo;
+ pthread_mutex_lock(&bufmgr_fake->lock);
+
if (bo_fake->backing_store)
free_backing_store(bo);
*/
if (invalidate_cb != NULL)
invalidate_cb(bo, ptr);
+
+ pthread_mutex_unlock(&bufmgr_fake->lock);
}
/**
* BM_NO_BACKING_STORE or BM_PINNED) or backing store, as necessary.
*/
static int
-dri_fake_bo_map(dri_bo *bo, int write_enable)
+dri_fake_bo_map_locked(dri_bo *bo, int write_enable)
{
dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)bo->bufmgr;
dri_bo_fake *bo_fake = (dri_bo_fake *)bo;
}
static int
-dri_fake_bo_unmap(dri_bo *bo)
+dri_fake_bo_map(dri_bo *bo, int write_enable)
+{
+ dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)bo->bufmgr;
+ int ret;
+
+ pthread_mutex_lock(&bufmgr_fake->lock);
+ ret = dri_fake_bo_map_locked(bo, write_enable);
+ pthread_mutex_unlock(&bufmgr_fake->lock);
+
+ return ret;
+}
+
+static int
+dri_fake_bo_unmap_locked(dri_bo *bo)
{
dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)bo->bufmgr;
dri_bo_fake *bo_fake = (dri_bo_fake *)bo;
return 0;
}
+static int
+dri_fake_bo_unmap(dri_bo *bo)
+{
+ dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)bo->bufmgr;
+ int ret;
+
+ pthread_mutex_lock(&bufmgr_fake->lock);
+ ret = dri_fake_bo_unmap_locked(bo);
+ pthread_mutex_unlock(&bufmgr_fake->lock);
+
+ return ret;
+}
+
static void
dri_fake_kick_all(dri_bufmgr_fake *bufmgr_fake)
{
struct block *block, *tmp;
+ pthread_mutex_lock(&bufmgr_fake->lock);
+
bufmgr_fake->performed_rendering = 0;
/* okay for ever BO that is on the HW kick it off.
seriously not afraid of the POLICE right now */
if (!(bo_fake->flags & BM_NO_BACKING_STORE))
bo_fake->dirty = 1;
}
+
+ pthread_mutex_unlock(&bufmgr_fake->lock);
}
static int
dri_bufmgr_fake *bufmgr_fake;
dri_bo_fake *bo_fake = (dri_bo_fake *)bo;
- /* XXX: Sanity-check whether we've already validated this one under
- * different flags. See drmAddValidateItem().
- */
bufmgr_fake = (dri_bufmgr_fake *)bo->bufmgr;
DBG("drm_bo_validate: (buf %d: %s, %d kb)\n", bo_fake->id, bo_fake->name,
{
dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)bufmgr;
+ pthread_mutex_destroy(&bufmgr_fake->lock);
mmDestroy(bufmgr_fake->heap);
free(bufmgr);
}
dri_bo_fake *target_fake = (dri_bo_fake *)target_buf;
int i;
+ pthread_mutex_lock(&bufmgr_fake->lock);
+
assert(reloc_buf);
assert(target_buf);
assert(reloc_fake->nr_relocs <= MAX_RELOCS);
- dri_bo_reference(target_buf);
+ dri_fake_bo_reference_locked(target_buf);
if (!target_fake->is_static)
reloc_fake->child_size += ALIGN(target_buf->size, target_fake->alignment);
}
}
+ pthread_mutex_unlock(&bufmgr_fake->lock);
+
return 0;
}
ret = dri_fake_reloc_and_validate_buffer(r->target_buf);
if (ret != 0) {
if (bo->virtual != NULL)
- dri_bo_unmap(bo);
+ dri_fake_bo_unmap_locked(bo);
return ret;
}
}
reloc_data = r->target_buf->offset + r->delta;
if (bo->virtual == NULL)
- dri_bo_map(bo, 1);
+ dri_fake_bo_map_locked(bo, 1);
*(uint32_t *)((uint8_t *)bo->virtual + r->offset) = reloc_data;
}
if (bo->virtual != NULL)
- dri_bo_unmap(bo);
+ dri_fake_bo_unmap_locked(bo);
if (bo_fake->write_domain != 0) {
if (!(bo_fake->flags & (BM_NO_BACKING_STORE|BM_PINNED))) {
int ret;
int retry_count = 0;
+ pthread_mutex_lock(&bufmgr_fake->lock);
+
bufmgr_fake->performed_rendering = 0;
dri_fake_calculate_domains(bo);
dri_bo_fake_post_submit(bo);
+ pthread_mutex_unlock(&bufmgr_fake->lock);
+
return 0;
}
dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)bufmgr;
struct block *block, *tmp;
+ pthread_mutex_lock(&bufmgr_fake->lock);
+
bufmgr_fake->need_fence = 1;
bufmgr_fake->fail = 0;
/* Releases the memory, and memcpys dirty contents out if necessary. */
free_block(bufmgr_fake, block);
}
+
+ pthread_mutex_unlock(&bufmgr_fake->lock);
}
void intel_bufmgr_fake_set_last_dispatch(dri_bufmgr *bufmgr,
volatile unsigned int *last_dispatch)
bufmgr_fake = calloc(1, sizeof(*bufmgr_fake));
+ if (pthread_mutex_init(&bufmgr_fake->lock, NULL) != 0) {
+ free(bufmgr_fake);
+ return NULL;
+ }
+
/* Initialize allocator */
DRMINITLISTHEAD(&bufmgr_fake->fenced);
DRMINITLISTHEAD(&bufmgr_fake->on_hardware);
#include <string.h>
#include <unistd.h>
#include <assert.h>
+#include <pthread.h>
#include <sys/ioctl.h>
#include <sys/mman.h>
int max_relocs;
+ pthread_mutex_t lock;
+
struct drm_i915_gem_exec_object *exec_objects;
dri_bo **exec_bos;
int exec_size;
dri_bo_gem *next;
};
+static void dri_gem_bo_reference_locked(dri_bo *bo);
+
static int
logbase2(int n)
{
bufmgr_gem->exec_objects[index].alignment = 0;
bufmgr_gem->exec_objects[index].offset = 0;
bufmgr_gem->exec_bos[index] = bo;
- dri_bo_reference(bo);
+ dri_gem_bo_reference_locked(bo);
bufmgr_gem->exec_count++;
}
bo_size = page_size;
}
+ pthread_mutex_lock(&bufmgr_gem->lock);
/* Get a buffer out of the cache if available */
if (bucket != NULL && bucket->num_entries > 0) {
struct drm_i915_gem_busy busy;
bucket->num_entries--;
}
}
+ pthread_mutex_unlock(&bufmgr_gem->lock);
if (!alloc_from_cache) {
struct drm_i915_gem_create create;
static void
dri_gem_bo_reference(dri_bo *bo)
{
+ dri_bufmgr_gem *bufmgr_gem = (dri_bufmgr_gem *)bo->bufmgr;
+ dri_bo_gem *bo_gem = (dri_bo_gem *)bo;
+
+ pthread_mutex_lock(&bufmgr_gem->lock);
+ bo_gem->refcount++;
+ pthread_mutex_unlock(&bufmgr_gem->lock);
+}
+
+static void
+dri_gem_bo_reference_locked(dri_bo *bo)
+{
dri_bo_gem *bo_gem = (dri_bo_gem *)bo;
bo_gem->refcount++;
}
static void
-dri_gem_bo_unreference(dri_bo *bo)
+dri_gem_bo_unreference_locked(dri_bo *bo)
{
dri_bufmgr_gem *bufmgr_gem = (dri_bufmgr_gem *)bo->bufmgr;
dri_bo_gem *bo_gem = (dri_bo_gem *)bo;
- if (!bo)
- return;
-
if (--bo_gem->refcount == 0) {
struct dri_gem_bo_bucket *bucket;
/* Unreference all the target buffers */
for (i = 0; i < bo_gem->reloc_count; i++)
- dri_bo_unreference(bo_gem->reloc_target_bo[i]);
+ dri_gem_bo_unreference_locked(bo_gem->reloc_target_bo[i]);
free(bo_gem->reloc_target_bo);
free(bo_gem->relocs);
}
} else {
dri_gem_bo_free(bo);
}
-
- return;
}
}
+static void
+dri_gem_bo_unreference(dri_bo *bo)
+{
+ dri_bufmgr_gem *bufmgr_gem = (dri_bufmgr_gem *)bo->bufmgr;
+
+ pthread_mutex_lock(&bufmgr_gem->lock);
+ dri_gem_bo_unreference_locked(bo);
+ pthread_mutex_unlock(&bufmgr_gem->lock);
+}
+
static int
dri_gem_bo_map(dri_bo *bo, int write_enable)
{
- dri_bufmgr_gem *bufmgr_gem;
+ dri_bufmgr_gem *bufmgr_gem = (dri_bufmgr_gem *)bo->bufmgr;
dri_bo_gem *bo_gem = (dri_bo_gem *)bo;
struct drm_i915_gem_set_domain set_domain;
int ret;
- bufmgr_gem = (dri_bufmgr_gem *)bo->bufmgr;
+ pthread_mutex_lock(&bufmgr_gem->lock);
/* Allow recursive mapping. Mesa may recursively map buffers with
* nested display loops.
bo_gem->swrast = 1;
}
+ pthread_mutex_unlock(&bufmgr_gem->lock);
+
return 0;
}
assert(bo_gem->mapped);
+ pthread_mutex_lock(&bufmgr_gem->lock);
if (bo_gem->swrast) {
sw_finish.handle = bo_gem->gem_handle;
do {
} while (ret == -1 && errno == EINTR);
bo_gem->swrast = 0;
}
+ pthread_mutex_unlock(&bufmgr_gem->lock);
return 0;
}
free(bufmgr_gem->exec_objects);
free(bufmgr_gem->exec_bos);
+ pthread_mutex_destroy(&bufmgr_gem->lock);
+
/* Free any cached buffer objects we were going to reuse */
for (i = 0; i < INTEL_GEM_BO_BUCKETS; i++) {
struct dri_gem_bo_bucket *bucket = &bufmgr_gem->cache_bucket[i];
dri_bo_gem *bo_gem = (dri_bo_gem *)bo;
dri_bo_gem *target_bo_gem = (dri_bo_gem *)target_bo;
+ pthread_mutex_lock(&bufmgr_gem->lock);
+
/* Create a new relocation list if needed */
if (bo_gem->relocs == NULL)
intel_setup_reloc_list(bo);
bo_gem->relocs[bo_gem->reloc_count].presumed_offset = target_bo->offset;
bo_gem->reloc_target_bo[bo_gem->reloc_count] = target_bo;
- dri_bo_reference(target_bo);
+ dri_gem_bo_reference_locked(target_bo);
bo_gem->reloc_count++;
+
+ pthread_mutex_unlock(&bufmgr_gem->lock);
+
return 0;
}
struct drm_i915_gem_execbuffer execbuf;
int ret, i;
+ pthread_mutex_lock(&bufmgr_gem->lock);
/* Update indices and set up the validate list. */
dri_gem_bo_process_reloc(bo);
/* Disconnect the buffer from the validate list */
bo_gem->validate_index = -1;
- dri_bo_unreference(bo);
+ dri_gem_bo_unreference_locked(bo);
bufmgr_gem->exec_bos[i] = NULL;
}
bufmgr_gem->exec_count = 0;
+ pthread_mutex_unlock(&bufmgr_gem->lock);
return 0;
}
bufmgr_gem = calloc(1, sizeof(*bufmgr_gem));
bufmgr_gem->fd = fd;
+ if (pthread_mutex_init(&bufmgr_gem->lock, NULL) != 0) {
+ free(bufmgr_gem);
+ return NULL;
+ }
+
/* Let's go with one relocation per every 2 dwords (but round down a bit
* since a power of two will mean an extra page allocation for the reloc
* buffer).