struct gen_device_info;
typedef struct _drm_bacon_bufmgr drm_bacon_bufmgr;
-typedef struct _drm_bacon_context drm_bacon_context;
typedef struct _drm_bacon_bo drm_bacon_bo;
struct _drm_bacon_bo {
int drm_bacon_gem_bo_wait(drm_bacon_bo *bo, int64_t timeout_ns);
-drm_bacon_context *drm_bacon_gem_context_create(drm_bacon_bufmgr *bufmgr);
-int drm_bacon_gem_context_get_id(drm_bacon_context *ctx,
- uint32_t *ctx_id);
-void drm_bacon_gem_context_destroy(drm_bacon_context *ctx);
+uint32_t brw_create_hw_context(drm_bacon_bufmgr *bufmgr);
+void brw_destroy_hw_context(drm_bacon_bufmgr *bufmgr, uint32_t ctx_id);
int drm_bacon_bo_gem_export_to_prime(drm_bacon_bo *bo, int *prime_fd);
drm_bacon_bo *drm_bacon_bo_gem_create_from_prime(drm_bacon_bufmgr *bufmgr,
* This is required for transform feedback buffer offsets, query objects,
* and also allows us to reduce how much state we have to emit.
*/
- brw->hw_ctx = drm_bacon_gem_context_create(brw->bufmgr);
+ brw->hw_ctx = brw_create_hw_context(brw->bufmgr);
if (!brw->hw_ctx) {
fprintf(stderr, "Failed to create hardware context.\n");
if (brw->wm.base.scratch_bo)
drm_bacon_bo_unreference(brw->wm.base.scratch_bo);
- drm_bacon_gem_context_destroy(brw->hw_ctx);
+ brw_destroy_hw_context(brw->bufmgr, brw->hw_ctx);
if (ctx->swrast_context) {
_swsetup_DestroyContext(&brw->ctx);
drm_bacon_bufmgr *bufmgr;
- drm_bacon_context *hw_ctx;
+ uint32_t hw_ctx;
/** BO for post-sync nonzero writes for gen6 workaround. */
drm_bacon_bo *workaround_bo;
/* If the OA counters aren't already on, enable them. */
if (brw->perfquery.oa_stream_fd == -1) {
__DRIscreen *screen = brw->screen->driScrnPriv;
- uint32_t ctx_id;
int period_exponent;
- if (drm_bacon_gem_context_get_id(brw->hw_ctx, &ctx_id) != 0)
- return false;
-
/* The timestamp for HSW+ increments every 80ns
*
* The period_exponent gives a sampling period as follows:
query->oa_format,
period_exponent,
screen->fd, /* drm fd */
- ctx_id))
+ brw->hw_ctx))
return false;
} else {
assert(brw->perfquery.current_oa_metrics_set_id ==
{
struct brw_context *brw = brw_context(ctx);
__DRIscreen *dri_screen = brw->screen->driScrnPriv;
- struct drm_i915_reset_stats stats;
+ struct drm_i915_reset_stats stats = { .ctx_id = brw->hw_ctx };
/* If hardware contexts are not being used (or
* DRM_IOCTL_I915_GET_RESET_STATS is not supported), this function should
* not be accessible.
*/
- assert(brw->hw_ctx != NULL);
-
- memset(&stats, 0, sizeof(stats));
- drm_bacon_gem_context_get_id(brw->hw_ctx, &stats.ctx_id);
+ assert(brw->hw_ctx != 0);
/* A reset status other than NO_ERROR was returned last time. I915 returns
* nonzero active/pending only if reset has been encountered and completed.
brw_check_for_reset(struct brw_context *brw)
{
__DRIscreen *dri_screen = brw->screen->driScrnPriv;
- struct drm_i915_reset_stats stats;
- memset(&stats, 0, sizeof(stats));
- drm_bacon_gem_context_get_id(brw->hw_ctx, &stats.ctx_id);
+ struct drm_i915_reset_stats stats = { .ctx_id = brw->hw_ctx };
if (drmIoctl(dri_screen->fd, DRM_IOCTL_I915_GET_RESET_STATS, &stats) != 0)
return;
* would otherwise be stored in the context (which for all intents and
* purposes means everything).
*/
- if (brw->hw_ctx == NULL)
+ if (brw->hw_ctx == 0)
brw->ctx.NewDriverState |= BRW_NEW_CONTEXT;
brw->ctx.NewDriverState |= BRW_NEW_BATCH;
static int
execbuffer(int fd,
struct intel_batchbuffer *batch,
- drm_bacon_context *ctx,
+ uint32_t ctx_id,
int used,
int in_fence,
int *out_fence,
int flags)
{
- uint32_t ctx_id = 0;
- drm_bacon_gem_context_get_id(ctx, &ctx_id);
-
struct drm_i915_gem_execbuffer2 execbuf = {
.buffers_ptr = (uintptr_t) batch->exec_objects,
.buffer_count = batch->exec_count,
flags |= I915_EXEC_GEN7_SOL_RESET;
if (ret == 0) {
- void *hw_ctx = batch->ring != RENDER_RING ? NULL : brw->hw_ctx;
+ uint32_t hw_ctx = batch->ring == RENDER_RING ? brw->hw_ctx : 0;
/* Add the batch itself to the end of the validation list */
add_exec_bo(batch, batch->bo);
return c == unless;
}
-struct _drm_bacon_context {
- unsigned int ctx_id;
- struct _drm_bacon_bufmgr *bufmgr;
-};
-
struct bo_cache_bucket {
struct list_head head;
unsigned long size;
bo_purge_vma_cache(bufmgr);
}
-drm_bacon_context *
-drm_bacon_gem_context_create(drm_bacon_bufmgr *bufmgr)
+uint32_t
+brw_create_hw_context(drm_bacon_bufmgr *bufmgr)
{
struct drm_i915_gem_context_create create;
- drm_bacon_context *context = NULL;
int ret;
- context = calloc(1, sizeof(*context));
- if (!context)
- return NULL;
-
memclear(create);
ret = drmIoctl(bufmgr->fd, DRM_IOCTL_I915_GEM_CONTEXT_CREATE, &create);
if (ret != 0) {
DBG("DRM_IOCTL_I915_GEM_CONTEXT_CREATE failed: %s\n",
strerror(errno));
- free(context);
- return NULL;
+ return 0;
}
- context->ctx_id = create.ctx_id;
- context->bufmgr = bufmgr;
-
- return context;
-}
-
-int
-drm_bacon_gem_context_get_id(drm_bacon_context *ctx, uint32_t *ctx_id)
-{
- if (ctx == NULL)
- return -EINVAL;
-
- *ctx_id = ctx->ctx_id;
-
- return 0;
+ return create.ctx_id;
}
void
-drm_bacon_gem_context_destroy(drm_bacon_context *ctx)
+brw_destroy_hw_context(drm_bacon_bufmgr *bufmgr, uint32_t ctx_id)
{
- struct drm_i915_gem_context_destroy destroy;
- int ret;
-
- if (ctx == NULL)
- return;
-
- memclear(destroy);
+ struct drm_i915_gem_context_destroy d = { .ctx_id = ctx_id };
- destroy.ctx_id = ctx->ctx_id;
- ret = drmIoctl(ctx->bufmgr->fd, DRM_IOCTL_I915_GEM_CONTEXT_DESTROY,
- &destroy);
- if (ret != 0)
+ if (ctx_id != 0 &&
+ drmIoctl(bufmgr->fd, DRM_IOCTL_I915_GEM_CONTEXT_DESTROY, &d) != 0) {
fprintf(stderr, "DRM_IOCTL_I915_GEM_CONTEXT_DESTROY failed: %s\n",
strerror(errno));
-
- free(ctx);
+ }
}
int