ctx->init_dwords = ctx->pm4_cdwords;
}
-static void INLINE r600_context_update_fenced_list(struct r600_context *ctx)
-{
- for (int i = 0; i < ctx->creloc; i++) {
- if (!LIST_IS_EMPTY(&ctx->bo[i]->fencedlist))
- LIST_DELINIT(&ctx->bo[i]->fencedlist);
- LIST_ADDTAIL(&ctx->bo[i]->fencedlist, &ctx->fenced_bo);
- ctx->bo[i]->fence = ctx->radeon->fence;
- ctx->bo[i]->ctx = ctx;
- }
-}
-
-static void INLINE r600_context_fence_wraparound(struct r600_context *ctx, unsigned fence)
-{
- struct radeon_bo *bo = NULL;
- struct radeon_bo *tmp;
-
- LIST_FOR_EACH_ENTRY_SAFE(bo, tmp, &ctx->fenced_bo, fencedlist) {
- if (bo->fence <= *ctx->radeon->cfence) {
- LIST_DELINIT(&bo->fencedlist);
- bo->fence = 0;
- } else {
- bo->fence = fence;
- }
- }
-}
-
static void r600_init_block(struct r600_context *ctx,
struct r600_block *block,
const struct r600_reg *reg, int index, int nreg,
return r600_context_add_block(ctx, r600_loop_consts, nreg, PKT3_SET_LOOP_CONST, R600_LOOP_CONST_OFFSET);
}
-static void r600_context_clear_fenced_bo(struct r600_context *ctx)
-{
- struct radeon_bo *bo, *tmp;
-
- LIST_FOR_EACH_ENTRY_SAFE(bo, tmp, &ctx->fenced_bo, fencedlist) {
- LIST_DELINIT(&bo->fencedlist);
- bo->fence = 0;
- bo->ctx = NULL;
- }
-}
-
static void r600_free_resource_range(struct r600_context *ctx, struct r600_range *range, int nblocks)
{
struct r600_block *block;
free(ctx->bo);
free(ctx->pm4);
- r600_context_clear_fenced_bo(ctx);
memset(ctx, 0, sizeof(struct r600_context));
}
ctx->reloc[ctx->creloc].write_domain = rbo->domains & (RADEON_GEM_DOMAIN_GTT | RADEON_GEM_DOMAIN_VRAM);
ctx->reloc[ctx->creloc].flags = 0;
radeon_bo_reference(ctx->radeon, &ctx->bo[ctx->creloc], bo);
- rbo->fence = ctx->radeon->fence;
ctx->creloc++;
}
/* find relocation */
reloc_id = block->pm4_bo_index[id];
r600_bo_reference(ctx->radeon, &block->reloc[reloc_id].bo, reg->bo);
- reg->bo->fence = ctx->radeon->fence;
/* always force dirty for relocs for now */
dirty |= R600_BLOCK_STATUS_DIRTY;
}
dirty |= R600_BLOCK_STATUS_RESOURCE_DIRTY;
}
}
- if (!dirty) {
- if (is_vertex)
- state->bo[0]->fence = ctx->radeon->fence;
- else {
- state->bo[0]->fence = ctx->radeon->fence;
- state->bo[1]->fence = ctx->radeon->fence;
- }
- } else {
+
+ if (dirty) {
if (is_vertex) {
/* VERTEX RESOURCE, we preted there is 2 bo to relocate so
* we have single case btw VERTEX & TEXTURE resource
*/
r600_bo_reference(ctx->radeon, &block->reloc[1].bo, state->bo[0]);
r600_bo_reference(ctx->radeon, &block->reloc[2].bo, NULL);
- state->bo[0]->fence = ctx->radeon->fence;
} else {
/* TEXTURE RESOURCE */
r600_bo_reference(ctx->radeon, &block->reloc[1].bo, state->bo[0]);
r600_bo_reference(ctx->radeon, &block->reloc[2].bo, state->bo[1]);
- state->bo[0]->fence = ctx->radeon->fence;
- state->bo[1]->fence = ctx->radeon->fence;
state->bo[0]->bo->binding |= BO_BOUND_TEXTURE;
}
- }
- if (dirty) {
+
if (is_vertex)
block->status |= R600_BLOCK_STATUS_RESOURCE_VERTEX;
else
struct drm_radeon_cs drmib = {};
struct drm_radeon_cs_chunk chunks[2];
uint64_t chunk_array[2];
- unsigned fence;
int r;
struct r600_block *enable_block = NULL;
/* partial flush is needed to avoid lockups on some chips with user fences */
ctx->pm4[ctx->pm4_cdwords++] = PKT3(PKT3_EVENT_WRITE, 0, 0);
ctx->pm4[ctx->pm4_cdwords++] = EVENT_TYPE(EVENT_TYPE_PS_PARTIAL_FLUSH) | EVENT_INDEX(4);
- /* emit fence */
- ctx->pm4[ctx->pm4_cdwords++] = PKT3(PKT3_EVENT_WRITE_EOP, 4, 0);
- ctx->pm4[ctx->pm4_cdwords++] = EVENT_TYPE(EVENT_TYPE_CACHE_FLUSH_AND_INV_TS_EVENT) | EVENT_INDEX(5);
- ctx->pm4[ctx->pm4_cdwords++] = 0;
- ctx->pm4[ctx->pm4_cdwords++] = (1 << 29) | (0 << 24);
- ctx->pm4[ctx->pm4_cdwords++] = ctx->radeon->fence;
- ctx->pm4[ctx->pm4_cdwords++] = 0;
- ctx->pm4[ctx->pm4_cdwords++] = PKT3(PKT3_NOP, 0, 0);
- ctx->pm4[ctx->pm4_cdwords++] = 0;
- r600_context_bo_reloc(ctx, &ctx->pm4[ctx->pm4_cdwords - 1], ctx->radeon->fence_bo);
#if 1
/* emit cs */
*ctx->radeon->cfence = ctx->radeon->fence;
#endif
- r600_context_update_fenced_list(ctx);
-
- fence = ctx->radeon->fence + 1;
- if (fence < ctx->radeon->fence) {
- /* wrap around */
- fence = 1;
- r600_context_fence_wraparound(ctx, fence);
- }
- ctx->radeon->fence = fence;
-
/* restart */
for (int i = 0; i < ctx->creloc; i++) {
ctx->bo[i]->reloc = NULL;
if (bo == NULL) {
return NULL;
}
- bo->size = size;
- bo->handle = handle;
pipe_reference_init(&bo->reference, 1);
- LIST_INITHEAD(&bo->fencedlist);
if (handle) {
- unsigned size;
bo->buf = radeon->ws->buffer_from_handle(radeon->ws, &whandle, NULL, &size);
- if (!bo->buf) {
- FREE(bo);
- return NULL;
- }
- bo->handle = radeon->ws->trans_get_buffer_handle(bo->buf);
- bo->size = size;
- bo->shared = TRUE;
} else {
bo->buf = radeon->ws->buffer_create(radeon->ws, size, alignment, bind, initial_domain);
- if (!bo->buf) {
- FREE(bo);
- return NULL;
- }
- bo->handle = radeon->ws->trans_get_buffer_handle(bo->buf);
}
+ if (!bo->buf) {
+ FREE(bo);
+ return NULL;
+ }
+ bo->handle = radeon->ws->trans_get_buffer_handle(bo->buf);
+ bo->size = size;
return bo;
}
static void radeon_bo_destroy(struct radeon *radeon, struct radeon_bo *bo)
{
- LIST_DEL(&bo->fencedlist);
radeon_bo_fixed_unmap(radeon, bo);
pb_reference(&bo->buf, NULL);
FREE(bo);
struct drm_radeon_gem_wait_idle args;
int ret;
- if (!bo->shared) {
- if (!bo->fence)
- return 0;
- if (bo->fence <= *radeon->cfence) {
- LIST_DELINIT(&bo->fencedlist);
- bo->fence = 0;
- return 0;
- }
- }
-
/* Zero out args to make valgrind happy */
memset(&args, 0, sizeof(args));
args.handle = bo->handle;
struct drm_radeon_gem_busy args;
int ret;
- if (!bo->shared) {
- if (!bo->fence)
- return 0;
- if (bo->fence <= *radeon->cfence) {
- LIST_DELINIT(&bo->fencedlist);
- bo->fence = 0;
- return 0;
- }
- }
-
memset(&args, 0, sizeof(args));
args.handle = bo->handle;
args.domain = 0;