bof.c \
evergreen_hw_context.c \
radeon_bo.c \
- radeon_bo_pb.c \
radeon_pciid.c \
r600.c \
r600_bo.c \
r600_drm.c \
- r600_hw_context.c
+ r600_hw_context.c \
+ r600_bomgr.c
LIBRARY_INCLUDES = -I$(TOP)/src/gallium/drivers/r600 \
$(shell pkg-config libdrm --cflags-only-I)
#include "pipe/p_compiler.h"
#include "util/u_inlines.h"
#include "util/u_memory.h"
-#include <pipebuffer/pb_bufmgr.h>
#include "r600_priv.h"
#define GROUP_FORCE_NEW_BLOCK 0
#include "radeon_drm.h"
#include "pipe/p_compiler.h"
#include "util/u_inlines.h"
-#include <pipebuffer/pb_bufmgr.h>
#include "r600_priv.h"
enum radeon_family r600_get_family(struct radeon *r600)
unsigned size, unsigned alignment,
unsigned binding, unsigned usage)
{
- struct r600_bo *ws_bo = calloc(1, sizeof(struct r600_bo));
- struct pb_desc desc;
- struct pb_manager *man;
+ struct r600_bo *bo;
+ struct radeon_bo *rbo;
- desc.alignment = alignment;
- desc.usage = (PB_USAGE_CPU_READ_WRITE | PB_USAGE_GPU_READ_WRITE);
- ws_bo->size = size;
+ if (binding & (PIPE_BIND_CONSTANT_BUFFER | PIPE_BIND_VERTEX_BUFFER | PIPE_BIND_INDEX_BUFFER)) {
+ bo = r600_bomgr_bo_create(radeon->bomgr, size, alignment, *radeon->cfence);
+ if (bo) {
+ return bo;
+ }
+ }
- if (binding & (PIPE_BIND_CONSTANT_BUFFER | PIPE_BIND_VERTEX_BUFFER | PIPE_BIND_INDEX_BUFFER))
- man = radeon->cman;
- else
- man = radeon->kman;
+ rbo = radeon_bo(radeon, 0, size, alignment);
+ if (rbo == NULL) {
+ return NULL;
+ }
+
+ bo = calloc(1, sizeof(struct r600_bo));
+ bo->size = size;
+ bo->alignment = alignment;
+ bo->bo = rbo;
+ if (binding & (PIPE_BIND_CONSTANT_BUFFER | PIPE_BIND_VERTEX_BUFFER | PIPE_BIND_INDEX_BUFFER)) {
+ r600_bomgr_bo_init(radeon->bomgr, bo);
+ }
/* Staging resources particpate in transfers and blits only
* and are used for uploads and downloads from regular
* resources. We generate them internally for some transfers.
*/
if (usage == PIPE_USAGE_STAGING)
- ws_bo->domains = RADEON_GEM_DOMAIN_CPU | RADEON_GEM_DOMAIN_GTT;
- else
- ws_bo->domains = (RADEON_GEM_DOMAIN_CPU |
- RADEON_GEM_DOMAIN_GTT |
- RADEON_GEM_DOMAIN_VRAM);
-
-
- ws_bo->pb = man->create_buffer(man, size, &desc);
- if (ws_bo->pb == NULL) {
- free(ws_bo);
- return NULL;
- }
+ bo->domains = RADEON_GEM_DOMAIN_CPU | RADEON_GEM_DOMAIN_GTT;
+ else
+ bo->domains = (RADEON_GEM_DOMAIN_CPU |
+ RADEON_GEM_DOMAIN_GTT |
+ RADEON_GEM_DOMAIN_VRAM);
- pipe_reference_init(&ws_bo->reference, 1);
- return ws_bo;
+ pipe_reference_init(&bo->reference, 1);
+ return bo;
}
struct r600_bo *r600_bo_handle(struct radeon *radeon,
unsigned handle, unsigned *array_mode)
{
- struct r600_bo *ws_bo = calloc(1, sizeof(struct r600_bo));
- struct radeon_bo *bo;
+ struct r600_bo *bo = calloc(1, sizeof(struct r600_bo));
+ struct radeon_bo *rbo;
- ws_bo->pb = radeon_bo_pb_create_buffer_from_handle(radeon->kman, handle);
- if (!ws_bo->pb) {
- free(ws_bo);
+ rbo = bo->bo = radeon_bo(radeon, handle, 0, 0);
+ if (rbo == NULL) {
+ free(bo);
return NULL;
}
- bo = radeon_bo_pb_get_bo(ws_bo->pb);
- ws_bo->size = bo->size;
- ws_bo->domains = (RADEON_GEM_DOMAIN_CPU |
- RADEON_GEM_DOMAIN_GTT |
- RADEON_GEM_DOMAIN_VRAM);
+ bo->size = bo->size;
+ bo->domains = (RADEON_GEM_DOMAIN_CPU |
+ RADEON_GEM_DOMAIN_GTT |
+ RADEON_GEM_DOMAIN_VRAM);
- pipe_reference_init(&ws_bo->reference, 1);
+ pipe_reference_init(&bo->reference, 1);
- radeon_bo_get_tiling_flags(radeon, bo, &ws_bo->tiling_flags,
- &ws_bo->kernel_pitch);
+ radeon_bo_get_tiling_flags(radeon, rbo, &bo->tiling_flags, &bo->kernel_pitch);
if (array_mode) {
- if (ws_bo->tiling_flags) {
- if (ws_bo->tiling_flags & RADEON_TILING_MICRO)
+ if (bo->tiling_flags) {
+ if (bo->tiling_flags & RADEON_TILING_MICRO)
*array_mode = V_0280A0_ARRAY_1D_TILED_THIN1;
- if ((ws_bo->tiling_flags & (RADEON_TILING_MICRO | RADEON_TILING_MACRO)) ==
+ if ((bo->tiling_flags & (RADEON_TILING_MICRO | RADEON_TILING_MACRO)) ==
(RADEON_TILING_MICRO | RADEON_TILING_MACRO))
*array_mode = V_0280A0_ARRAY_2D_TILED_THIN1;
} else {
*array_mode = 0;
}
}
- return ws_bo;
+ return bo;
}
void *r600_bo_map(struct radeon *radeon, struct r600_bo *bo, unsigned usage, void *ctx)
{
- return pb_map(bo->pb, usage, ctx);
+ struct pipe_context *pctx = ctx;
+
+ if (usage & PB_USAGE_UNSYNCHRONIZED) {
+ radeon_bo_map(radeon, bo->bo);
+ return bo->bo->data + bo->offset;
+ }
+
+ if (p_atomic_read(&bo->bo->reference.count) > 1) {
+ if (usage & PB_USAGE_DONTBLOCK) {
+ return NULL;
+ }
+ if (ctx) {
+ pctx->flush(pctx, 0, NULL);
+ }
+ }
+
+ if (usage & PB_USAGE_DONTBLOCK) {
+ uint32_t domain;
+
+ if (radeon_bo_busy(radeon, bo->bo, &domain))
+ return NULL;
+ if (radeon_bo_map(radeon, bo->bo)) {
+ return NULL;
+ }
+ goto out;
+ }
+
+ radeon_bo_map(radeon, bo->bo);
+ if (radeon_bo_wait(radeon, bo->bo)) {
+ radeon_bo_unmap(radeon, bo->bo);
+ return NULL;
+ }
+
+out:
+ return bo->bo->data + bo->offset;
}
void r600_bo_unmap(struct radeon *radeon, struct r600_bo *bo)
{
- pb_unmap(bo->pb);
+ radeon_bo_unmap(radeon, bo->bo);
}
-static void r600_bo_destroy(struct radeon *radeon, struct r600_bo *bo)
+void r600_bo_destroy(struct radeon *radeon, struct r600_bo *bo)
{
- if (bo->pb)
- pb_reference(&bo->pb, NULL);
+ if (bo->manager_id) {
+ if (!r600_bomgr_bo_destroy(radeon->bomgr, bo)) {
+ /* destroy is delayed by buffer manager */
+ return;
+ }
+ }
+ radeon_bo_reference(radeon, &bo->bo, NULL);
free(bo);
}
-void r600_bo_reference(struct radeon *radeon, struct r600_bo **dst,
- struct r600_bo *src)
+void r600_bo_reference(struct radeon *radeon, struct r600_bo **dst, struct r600_bo *src)
{
struct r600_bo *old = *dst;
-
+
if (pipe_reference(&(*dst)->reference, &src->reference)) {
r600_bo_destroy(radeon, old);
}
*dst = src;
}
-unsigned r600_bo_get_handle(struct r600_bo *pb_bo)
-{
- struct radeon_bo *bo;
-
- bo = radeon_bo_pb_get_bo(pb_bo->pb);
- if (!bo)
- return 0;
-
- return bo->handle;
-}
-
-unsigned r600_bo_get_size(struct r600_bo *pb_bo)
-{
- struct radeon_bo *bo;
-
- bo = radeon_bo_pb_get_bo(pb_bo->pb);
- if (!bo)
- return 0;
-
- return bo->size;
-}
-
-boolean r600_bo_get_winsys_handle(struct radeon *radeon, struct r600_bo *pb_bo,
+boolean r600_bo_get_winsys_handle(struct radeon *radeon, struct r600_bo *bo,
unsigned stride, struct winsys_handle *whandle)
{
- struct radeon_bo *bo;
-
- bo = radeon_bo_pb_get_bo(pb_bo->pb);
- if (!bo)
- return FALSE;
-
whandle->stride = stride;
switch(whandle->type) {
case DRM_API_HANDLE_TYPE_KMS:
- whandle->handle = r600_bo_get_handle(pb_bo);
+ whandle->handle = r600_bo_get_handle(bo);
break;
case DRM_API_HANDLE_TYPE_SHARED:
- if (radeon_bo_get_name(radeon, bo, &whandle->handle))
+ if (radeon_bo_get_name(radeon, bo->bo, &whandle->handle))
return FALSE;
break;
default:
--- /dev/null
+/*
+ * Copyright 2010 VMWare.
+ * Copyright 2010 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * on the rights to use, copy, modify, merge, publish, distribute, sub
+ * license, and/or sell copies of the Software, and to permit persons to whom
+ * the Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Jose Fonseca <jrfonseca-at-vmware-dot-com>
+ * Thomas Hellström <thomas-at-vmware-dot-com>
+ * Jerome Glisse <jglisse@redhat.com>
+ */
+#include <util/u_memory.h>
+#include <util/u_double_list.h>
+#include <util/u_time.h>
+#include <pipebuffer/pb_bufmgr.h>
+#include "r600_priv.h"
+
+static void r600_bomgr_timeout_flush(struct r600_bomgr *mgr)
+{
+ struct r600_bo *bo, *tmp;
+ int64_t now;
+
+ now = os_time_get();
+ LIST_FOR_EACH_ENTRY_SAFE(bo, tmp, &mgr->delayed, list) {
+ if(!os_time_timeout(bo->start, bo->end, now))
+ break;
+
+ mgr->num_delayed--;
+ bo->manager_id = 0;
+ LIST_DEL(&bo->list);
+ r600_bo_destroy(mgr->radeon, bo);
+ }
+}
+
+static INLINE int r600_bo_is_compat(struct r600_bomgr *mgr,
+ struct r600_bo *bo,
+ unsigned size,
+ unsigned alignment,
+ unsigned cfence)
+{
+ if(bo->size < size) {
+ return 0;
+ }
+
+ /* be lenient with size */
+ if(bo->size >= 2*size) {
+ return 0;
+ }
+
+ if(!pb_check_alignment(alignment, bo->alignment)) {
+ return 0;
+ }
+
+ if (!fence_is_after(cfence, bo->fence)) {
+ return 0;
+ }
+
+ return 1;
+}
+
+struct r600_bo *r600_bomgr_bo_create(struct r600_bomgr *mgr,
+ unsigned size,
+ unsigned alignment,
+ unsigned cfence)
+{
+ struct r600_bo *bo, *tmp;
+ int64_t now;
+
+
+ pipe_mutex_lock(mgr->mutex);
+
+ now = os_time_get();
+ LIST_FOR_EACH_ENTRY_SAFE(bo, tmp, &mgr->delayed, list) {
+ if(r600_bo_is_compat(mgr, bo, size, alignment, cfence)) {
+ LIST_DEL(&bo->list);
+ --mgr->num_delayed;
+ r600_bomgr_timeout_flush(mgr);
+ pipe_mutex_unlock(mgr->mutex);
+ LIST_INITHEAD(&bo->list);
+ pipe_reference_init(&bo->reference, 1);
+ return bo;
+ }
+
+ if(os_time_timeout(bo->start, bo->end, now)) {
+ mgr->num_delayed--;
+ bo->manager_id = 0;
+ LIST_DEL(&bo->list);
+ r600_bo_destroy(mgr->radeon, bo);
+ }
+ }
+
+ pipe_mutex_unlock(mgr->mutex);
+ return NULL;
+}
+
+void r600_bomgr_bo_init(struct r600_bomgr *mgr, struct r600_bo *bo)
+{
+ LIST_INITHEAD(&bo->list);
+ bo->manager_id = 1;
+}
+
+bool r600_bomgr_bo_destroy(struct r600_bomgr *mgr, struct r600_bo *bo)
+{
+ bo->start = os_time_get();
+ bo->end = bo->start + mgr->usecs;
+ pipe_mutex_lock(mgr->mutex);
+ LIST_ADDTAIL(&bo->list, &mgr->delayed);
+ ++mgr->num_delayed;
+ pipe_mutex_unlock(mgr->mutex);
+ return FALSE;
+}
+
+void r600_bomgr_destroy(struct r600_bomgr *mgr)
+{
+ struct r600_bo *bo, *tmp;
+
+ pipe_mutex_lock(mgr->mutex);
+ LIST_FOR_EACH_ENTRY_SAFE(bo, tmp, &mgr->delayed, list) {
+ mgr->num_delayed--;
+ bo->manager_id = 0;
+ LIST_DEL(&bo->list);
+ r600_bo_destroy(mgr->radeon, bo);
+ }
+ pipe_mutex_unlock(mgr->mutex);
+
+ FREE(mgr);
+}
+
+struct r600_bomgr *r600_bomgr_create(struct radeon *radeon, unsigned usecs)
+{
+ struct r600_bomgr *mgr;
+
+ mgr = CALLOC_STRUCT(r600_bomgr);
+ if (mgr == NULL)
+ return NULL;
+
+ mgr->radeon = radeon;
+ mgr->usecs = usecs;
+ LIST_INITHEAD(&mgr->delayed);
+ mgr->num_delayed = 0;
+ pipe_mutex_init(mgr->mutex);
+
+ return mgr;
+}
#include <sys/ioctl.h>
#include "util/u_inlines.h"
#include "util/u_debug.h"
-#include <pipebuffer/pb_bufmgr.h>
#include "r600.h"
#include "r600_priv.h"
#include "r600_drm_public.h"
if (radeon_drm_get_tiling(radeon))
return NULL;
}
- radeon->kman = radeon_bo_pbmgr_create(radeon);
- if (!radeon->kman)
- return NULL;
- radeon->cman = pb_cache_manager_create(radeon->kman, 1000000);
- if (!radeon->cman)
+ radeon->bomgr = r600_bomgr_create(radeon, 1000000);
+ if (radeon->bomgr == NULL) {
return NULL;
+ }
return radeon;
}
return NULL;
}
- if (radeon->cman)
- radeon->cman->destroy(radeon->cman);
-
- if (radeon->kman)
- radeon->kman->destroy(radeon->kman);
+ if (radeon->bomgr)
+ r600_bomgr_destroy(radeon->bomgr);
if (radeon->fd >= 0)
drmClose(radeon->fd);
#include <string.h>
#include <stdlib.h>
#include <assert.h>
+#include <pipe/p_compiler.h>
+#include <util/u_inlines.h>
+#include <util/u_memory.h>
+#include <pipebuffer/pb_bufmgr.h>
#include "xf86drm.h"
-#include "r600.h"
-#include "r600d.h"
#include "radeon_drm.h"
-#include "bof.h"
-#include "pipe/p_compiler.h"
-#include "util/u_inlines.h"
-#include "util/u_memory.h"
-#include <pipebuffer/pb_bufmgr.h>
#include "r600_priv.h"
+#include "bof.h"
+#include "r600d.h"
#define GROUP_FORCE_NEW_BLOCK 0
}
ctx->cfence = r600_bo_map(ctx->radeon, ctx->fence_bo, PB_USAGE_UNSYNCHRONIZED, NULL);
*ctx->cfence = 0;
+ ctx->radeon->cfence = ctx->cfence;
LIST_INITHEAD(&ctx->fenced_bo);
return 0;
}
ctx->reloc[ctx->creloc].write_domain = rbo->domains & (RADEON_GEM_DOMAIN_GTT | RADEON_GEM_DOMAIN_VRAM);
ctx->reloc[ctx->creloc].flags = 0;
radeon_bo_reference(ctx->radeon, &ctx->bo[ctx->creloc], bo);
+ rbo->fence = ctx->fence;
ctx->creloc++;
/* set PKT3 to point to proper reloc */
*pm4 = bo->reloc_id;
/* find relocation */
id = block->pm4_bo_index[id];
r600_bo_reference(ctx->radeon, &block->reloc[id].bo, state->regs[i].bo);
+ state->regs[i].bo->fence = ctx->fence;
}
if (!(block->status & R600_BLOCK_STATUS_DIRTY)) {
block->status |= R600_BLOCK_STATUS_ENABLED;
*/
r600_bo_reference(ctx->radeon, &block->reloc[1].bo, state->regs[0].bo);
r600_bo_reference(ctx->radeon, &block->reloc[2].bo, state->regs[0].bo);
+ state->regs[0].bo->fence = ctx->fence;
} else {
/* TEXTURE RESOURCE */
r600_bo_reference(ctx->radeon, &block->reloc[1].bo, state->regs[2].bo);
r600_bo_reference(ctx->radeon, &block->reloc[2].bo, state->regs[3].bo);
+ state->regs[2].bo->fence = ctx->fence;
+ state->regs[3].bo->fence = ctx->fence;
}
if (!(block->status & R600_BLOCK_STATUS_DIRTY)) {
block->status |= R600_BLOCK_STATUS_ENABLED;
#include <stdint.h>
#include <stdlib.h>
#include <assert.h>
-#include <pipebuffer/pb_bufmgr.h>
-#include "util/u_double_list.h"
+#include <util/u_double_list.h>
+#include <util/u_inlines.h>
+#include <os/os_thread.h>
#include "r600.h"
+struct r600_bomgr;
+
struct radeon {
int fd;
int refcount;
unsigned device;
unsigned family;
enum chip_class chip_class;
- struct pb_manager *kman; /* kernel bo manager */
- struct pb_manager *cman; /* cached bo manager */
- struct r600_tiling_info tiling_info;
+ struct r600_tiling_info tiling_info;
+ struct r600_bomgr *bomgr;
+ unsigned *cfence;
};
-struct radeon *r600_new(int fd, unsigned device);
-void r600_delete(struct radeon *r600);
-
struct r600_reg {
unsigned opcode;
unsigned offset_base;
struct r600_bo {
struct pipe_reference reference;
- struct pb_buffer *pb;
unsigned size;
unsigned tiling_flags;
unsigned kernel_pitch;
unsigned domains;
+ struct radeon_bo *bo;
+ unsigned fence;
+ /* manager data */
+ struct list_head list;
+ unsigned manager_id;
+ unsigned alignment;
+ unsigned offset;
+ int64_t start;
+ int64_t end;
+};
+
+struct r600_bomgr {
+ struct radeon *radeon;
+ unsigned usecs;
+ pipe_mutex mutex;
+ struct list_head delayed;
+ unsigned num_delayed;
};
+/*
+ * r600_drm.c
+ */
+struct radeon *r600_new(int fd, unsigned device);
+void r600_delete(struct radeon *r600);
-/* radeon_pciid.c */
+/*
+ * radeon_pciid.c
+ */
unsigned radeon_family_from_device(unsigned device);
-/* radeon_bo.c */
+/*
+ * radeon_bo.c
+ */
struct radeon_bo *radeon_bo(struct radeon *radeon, unsigned handle,
unsigned size, unsigned alignment);
void radeon_bo_reference(struct radeon *radeon, struct radeon_bo **dst,
struct radeon_bo *src);
int radeon_bo_wait(struct radeon *radeon, struct radeon_bo *bo);
int radeon_bo_busy(struct radeon *radeon, struct radeon_bo *bo, uint32_t *domain);
-void radeon_bo_pbmgr_flush_maps(struct pb_manager *_mgr);
int radeon_bo_fencelist(struct radeon *radeon, struct radeon_bo **bolist, uint32_t num_bo);
int radeon_bo_get_tiling_flags(struct radeon *radeon,
struct radeon_bo *bo,
struct radeon_bo *bo,
uint32_t *name);
-/* radeon_bo_pb.c */
-struct radeon_bo *radeon_bo_pb_get_bo(struct pb_buffer *_buf);
-struct pb_manager *radeon_bo_pbmgr_create(struct radeon *radeon);
-struct pb_buffer *radeon_bo_pb_create_buffer_from_handle(struct pb_manager *_mgr,
- uint32_t handle);
-
-/* r600_hw_context.c */
+/*
+ * r600_hw_context.c
+ */
int r600_context_init_fence(struct r600_context *ctx);
void r600_context_bo_reloc(struct r600_context *ctx, u32 *pm4, struct r600_bo *rbo);
void r600_context_bo_flush(struct r600_context *ctx, unsigned flush_flags,
struct r600_bo *r600_context_reg_bo(struct r600_context *ctx, unsigned offset);
int r600_context_add_block(struct r600_context *ctx, const struct r600_reg *reg, unsigned nreg);
-/* r600_bo.c */
-unsigned r600_bo_get_handle(struct r600_bo *bo);
-unsigned r600_bo_get_size(struct r600_bo *bo);
-static INLINE struct radeon_bo *r600_bo_get_bo(struct r600_bo *bo)
-{
- return radeon_bo_pb_get_bo(bo->pb);
-}
+/*
+ * r600_bo.c
+ */
+void r600_bo_destroy(struct radeon *radeon, struct r600_bo *bo);
+/*
+ * r600_bomgr.c
+ */
+struct r600_bomgr *r600_bomgr_create(struct radeon *radeon, unsigned usecs);
+void r600_bomgr_destroy(struct r600_bomgr *mgr);
+bool r600_bomgr_bo_destroy(struct r600_bomgr *mgr, struct r600_bo *bo);
+void r600_bomgr_bo_init(struct r600_bomgr *mgr, struct r600_bo *bo);
+struct r600_bo *r600_bomgr_bo_create(struct r600_bomgr *mgr,
+ unsigned size,
+ unsigned alignment,
+ unsigned cfence);
+
+
+/*
+ * helpers
+ */
#define CTX_RANGE_ID(ctx, offset) (((offset) >> (ctx)->hash_shift) & 255)
#define CTX_BLOCK_ID(ctx, offset) ((offset) & ((1 << (ctx)->hash_shift) - 1))
LIST_DELINIT(&block->list);
}
+/*
+ * radeon_bo.c
+ */
static inline int radeon_bo_map(struct radeon *radeon, struct radeon_bo *bo)
{
bo->map_count++;
assert(bo->map_count >= 0);
}
+/*
+ * r600_bo
+ */
+static inline struct radeon_bo *r600_bo_get_bo(struct r600_bo *bo)
+{
+ return bo->bo;
+}
+
+static unsigned inline r600_bo_get_handle(struct r600_bo *bo)
+{
+ return bo->bo->handle;
+}
+
+static unsigned inline r600_bo_get_size(struct r600_bo *bo)
+{
+ return bo->size;
+}
+
+/*
+ * fence
+ */
+static inline bool fence_is_after(unsigned fence, unsigned ofence)
+{
+ /* handle wrap around */
+ if (fence < 0x80000000 && ofence > 0x80000000)
+ return TRUE;
+ if (fence > ofence)
+ return TRUE;
+ return FALSE;
+}
+
#endif
+++ /dev/null
-/*
- * Copyright 2010 Dave Airlie
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * on the rights to use, copy, modify, merge, publish, distribute, sub
- * license, and/or sell copies of the Software, and to permit persons to whom
- * the Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
- * USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors:
- * Dave Airlie
- */
-#include <util/u_inlines.h>
-#include <util/u_memory.h>
-#include <util/u_double_list.h>
-#include <pipebuffer/pb_buffer.h>
-#include <pipebuffer/pb_bufmgr.h>
-#include "r600_priv.h"
-
-struct radeon_bo_pb {
- struct pb_buffer b;
- struct radeon_bo *bo;
-
- struct radeon_bo_pbmgr *mgr;
-};
-
-extern const struct pb_vtbl radeon_bo_pb_vtbl;
-
-static INLINE struct radeon_bo_pb *radeon_bo_pb(struct pb_buffer *buf)
-{
- assert(buf);
- assert(buf->vtbl == &radeon_bo_pb_vtbl);
- return (struct radeon_bo_pb *)buf;
-}
-
-struct radeon_bo_pbmgr {
- struct pb_manager b;
- struct radeon *radeon;
-};
-
-static INLINE struct radeon_bo_pbmgr *radeon_bo_pbmgr(struct pb_manager *mgr)
-{
- assert(mgr);
- return (struct radeon_bo_pbmgr *)mgr;
-}
-
-static void radeon_bo_pb_destroy(struct pb_buffer *_buf)
-{
- struct radeon_bo_pb *buf = radeon_bo_pb(_buf);
-
- /* If this buffer is on the list of buffers to unmap,
- * do the unmapping now.
- */
- radeon_bo_unmap(buf->mgr->radeon, buf->bo);
- radeon_bo_reference(buf->mgr->radeon, &buf->bo, NULL);
- FREE(buf);
-}
-
-static void *
-radeon_bo_pb_map_internal(struct pb_buffer *_buf,
- unsigned flags, void *ctx)
-{
- struct radeon_bo_pb *buf = radeon_bo_pb(_buf);
- struct pipe_context *pctx = ctx;
-
- if (flags & PB_USAGE_UNSYNCHRONIZED) {
- if (radeon_bo_map(buf->mgr->radeon, buf->bo)) {
- return NULL;
- }
- return buf->bo->data;
- }
-
- if (p_atomic_read(&buf->bo->reference.count) > 1) {
- if (flags & PB_USAGE_DONTBLOCK) {
- return NULL;
- }
- if (ctx) {
- pctx->flush(pctx, 0, NULL);
- }
- }
-
- if (flags & PB_USAGE_DONTBLOCK) {
- uint32_t domain;
- if (radeon_bo_busy(buf->mgr->radeon, buf->bo, &domain))
- return NULL;
- if (radeon_bo_map(buf->mgr->radeon, buf->bo)) {
- return NULL;
- }
- goto out;
- }
-
- if (radeon_bo_map(buf->mgr->radeon, buf->bo)) {
- return NULL;
- }
- if (radeon_bo_wait(buf->mgr->radeon, buf->bo)) {
- radeon_bo_unmap(buf->mgr->radeon, buf->bo);
- return NULL;
- }
-out:
- return buf->bo->data;
-}
-
-static void radeon_bo_pb_unmap_internal(struct pb_buffer *_buf)
-{
-}
-
-static void
-radeon_bo_pb_get_base_buffer(struct pb_buffer *buf,
- struct pb_buffer **base_buf,
- unsigned *offset)
-{
- *base_buf = buf;
- *offset = 0;
-}
-
-static enum pipe_error
-radeon_bo_pb_validate(struct pb_buffer *_buf,
- struct pb_validate *vl,
- unsigned flags)
-{
- /* Always pinned */
- return PIPE_OK;
-}
-
-static void
-radeon_bo_pb_fence(struct pb_buffer *buf,
- struct pipe_fence_handle *fence)
-{
-}
-
-const struct pb_vtbl radeon_bo_pb_vtbl = {
- radeon_bo_pb_destroy,
- radeon_bo_pb_map_internal,
- radeon_bo_pb_unmap_internal,
- radeon_bo_pb_validate,
- radeon_bo_pb_fence,
- radeon_bo_pb_get_base_buffer,
-};
-
-struct pb_buffer *
-radeon_bo_pb_create_buffer_from_handle(struct pb_manager *_mgr,
- uint32_t handle)
-{
- struct radeon_bo_pbmgr *mgr = radeon_bo_pbmgr(_mgr);
- struct radeon *radeon = mgr->radeon;
- struct radeon_bo_pb *bo;
- struct radeon_bo *hw_bo;
-
- hw_bo = radeon_bo(radeon, handle, 0, 0);
- if (hw_bo == NULL)
- return NULL;
-
- bo = CALLOC_STRUCT(radeon_bo_pb);
- if (!bo) {
- radeon_bo_reference(radeon, &hw_bo, NULL);
- return NULL;
- }
-
- pipe_reference_init(&bo->b.base.reference, 1);
- bo->b.base.alignment = 0;
- bo->b.base.usage = PB_USAGE_GPU_WRITE | PB_USAGE_GPU_READ;
- bo->b.base.size = hw_bo->size;
- bo->b.vtbl = &radeon_bo_pb_vtbl;
- bo->mgr = mgr;
-
- bo->bo = hw_bo;
-
- return &bo->b;
-}
-
-static struct pb_buffer *
-radeon_bo_pb_create_buffer(struct pb_manager *_mgr,
- pb_size size,
- const struct pb_desc *desc)
-{
- struct radeon_bo_pbmgr *mgr = radeon_bo_pbmgr(_mgr);
- struct radeon *radeon = mgr->radeon;
- struct radeon_bo_pb *bo;
-
- bo = CALLOC_STRUCT(radeon_bo_pb);
- if (!bo)
- goto error1;
-
- pipe_reference_init(&bo->b.base.reference, 1);
- bo->b.base.alignment = desc->alignment;
- bo->b.base.usage = desc->usage;
- bo->b.base.size = size;
- bo->b.vtbl = &radeon_bo_pb_vtbl;
- bo->mgr = mgr;
-
- bo->bo = radeon_bo(radeon, 0, size, desc->alignment);
- if (bo->bo == NULL)
- goto error2;
- return &bo->b;
-
-error2:
- FREE(bo);
-error1:
- return NULL;
-}
-
-static void
-radeon_bo_pbmgr_flush(struct pb_manager *mgr)
-{
- /* NOP */
-}
-
-static void
-radeon_bo_pbmgr_destroy(struct pb_manager *_mgr)
-{
- struct radeon_bo_pbmgr *mgr = radeon_bo_pbmgr(_mgr);
- FREE(mgr);
-}
-
-struct pb_manager *radeon_bo_pbmgr_create(struct radeon *radeon)
-{
- struct radeon_bo_pbmgr *mgr;
-
- mgr = CALLOC_STRUCT(radeon_bo_pbmgr);
- if (!mgr)
- return NULL;
-
- mgr->b.destroy = radeon_bo_pbmgr_destroy;
- mgr->b.create_buffer = radeon_bo_pb_create_buffer;
- mgr->b.flush = radeon_bo_pbmgr_flush;
-
- mgr->radeon = radeon;
- return &mgr->b;
-}
-
-struct radeon_bo *radeon_bo_pb_get_bo(struct pb_buffer *_buf)
-{
- struct radeon_bo_pb *buf;
- if (_buf->vtbl == &radeon_bo_pb_vtbl) {
- buf = radeon_bo_pb(_buf);
- return buf->bo;
- } else {
- struct pb_buffer *base_buf;
- pb_size offset;
- pb_get_base_buffer(_buf, &base_buf, &offset);
- if (base_buf->vtbl == &radeon_bo_pb_vtbl) {
- buf = radeon_bo_pb(base_buf);
- return buf->bo;
- }
- }
- return NULL;
-}