iris_batch_reset(struct iris_batch *batch)
{
struct iris_screen *screen = batch->screen;
+ struct iris_bufmgr *bufmgr = screen->bufmgr;
iris_bo_unreference(batch->bo);
batch->primary_batch_size = 0;
create_batch(batch);
assert(batch->bo->index == 0);
- struct iris_syncobj *syncobj = iris_create_syncobj(screen);
+ struct iris_syncobj *syncobj = iris_create_syncobj(bufmgr);
iris_batch_add_syncobj(batch, syncobj, I915_EXEC_FENCE_SIGNAL);
- iris_syncobj_reference(screen, &syncobj, NULL);
+ iris_syncobj_reference(bufmgr, &syncobj, NULL);
assert(!batch->sync_region_depth);
iris_batch_sync_boundary(batch);
pipe_resource_reference(&batch->fine_fences.ref.res, NULL);
util_dynarray_foreach(&batch->syncobjs, struct iris_syncobj *, s)
- iris_syncobj_reference(screen, s, NULL);
+ iris_syncobj_reference(bufmgr, s, NULL);
ralloc_free(batch->syncobjs.mem_ctx);
iris_fine_fence_reference(batch->screen, &batch->last_fence, NULL);
batch->aperture_space = 0;
util_dynarray_foreach(&batch->syncobjs, struct iris_syncobj *, s)
- iris_syncobj_reference(screen, s, NULL);
+ iris_syncobj_reference(screen->bufmgr, s, NULL);
util_dynarray_clear(&batch->syncobjs);
util_dynarray_clear(&batch->exec_fences);
struct iris_syncobj **out_syncobj)
{
struct iris_syncobj *syncobj = iris_batch_get_signal_syncobj(batch);
- iris_syncobj_reference(batch->screen, out_syncobj, syncobj);
+ iris_syncobj_reference(batch->screen->bufmgr, out_syncobj, syncobj);
}
/**
* Make a new sync-point.
*/
struct iris_syncobj *
-iris_create_syncobj(struct iris_screen *screen)
+iris_create_syncobj(struct iris_bufmgr *bufmgr)
{
+ int fd = iris_bufmgr_get_fd(bufmgr);
struct iris_syncobj *syncobj = malloc(sizeof(*syncobj));
if (!syncobj)
return NULL;
- syncobj->handle = gem_syncobj_create(screen->fd, 0);
+ syncobj->handle = gem_syncobj_create(fd, 0);
assert(syncobj->handle);
pipe_reference_init(&syncobj->ref, 1);
}
void
-iris_syncobj_destroy(struct iris_screen *screen, struct iris_syncobj *syncobj)
+iris_syncobj_destroy(struct iris_bufmgr *bufmgr, struct iris_syncobj *syncobj)
{
- gem_syncobj_destroy(screen->fd, syncobj->handle);
+ int fd = iris_bufmgr_get_fd(bufmgr);
+ gem_syncobj_destroy(fd, syncobj->handle);
free(syncobj);
}
util_dynarray_grow(&batch->syncobjs, struct iris_syncobj *, 1);
*store = NULL;
- iris_syncobj_reference(batch->screen, store, syncobj);
+ iris_syncobj_reference(batch->screen->bufmgr, store, syncobj);
}
/**
clear_stale_syncobjs(struct iris_batch *batch)
{
struct iris_screen *screen = batch->screen;
+ struct iris_bufmgr *bufmgr = screen->bufmgr;
int n = util_dynarray_num_elements(&batch->syncobjs, struct iris_syncobj *);
struct drm_i915_gem_exec_fence, i);
assert(fence->flags & I915_EXEC_FENCE_WAIT);
- if (iris_wait_syncobj(&screen->base, *syncobj, 0))
+ if (iris_wait_syncobj(bufmgr, *syncobj, 0))
continue;
/* This sync object has already passed, there's no need to continue
* marking it as a dependency; we can stop holding on to the reference.
*/
- iris_syncobj_reference(screen, syncobj, NULL);
+ iris_syncobj_reference(bufmgr, syncobj, NULL);
/* Remove it from the lists; move the last element here. */
struct iris_syncobj **nth_syncobj =
}
bool
-iris_wait_syncobj(struct pipe_screen *p_screen,
+iris_wait_syncobj(struct iris_bufmgr *bufmgr,
struct iris_syncobj *syncobj,
int64_t timeout_nsec)
{
if (!syncobj)
return false;
- struct iris_screen *screen = (struct iris_screen *)p_screen;
+ int fd = iris_bufmgr_get_fd(bufmgr);
+
struct drm_syncobj_wait args = {
.handles = (uintptr_t)&syncobj->handle,
.count_handles = 1,
.timeout_nsec = timeout_nsec,
};
- return intel_ioctl(screen->fd, DRM_IOCTL_SYNCOBJ_WAIT, &args);
+ return intel_ioctl(fd, DRM_IOCTL_SYNCOBJ_WAIT, &args);
}
#define CSI "\e["
#include "util/u_inlines.h"
struct pipe_screen;
-struct iris_screen;
struct iris_batch;
+struct iris_bufmgr;
/**
* A refcounted DRM Sync Object (drm_syncobj).
uint32_t handle;
};
-struct iris_syncobj *iris_create_syncobj(struct iris_screen *screen);
-void iris_syncobj_destroy(struct iris_screen *, struct iris_syncobj *);
+struct iris_syncobj *iris_create_syncobj(struct iris_bufmgr *bufmgr);
+void iris_syncobj_destroy(struct iris_bufmgr*, struct iris_syncobj *);
void iris_batch_add_syncobj(struct iris_batch *batch,
struct iris_syncobj *syncobj,
unsigned flags);
-bool iris_wait_syncobj(struct pipe_screen *screen,
+bool iris_wait_syncobj(struct iris_bufmgr *bufmgr,
struct iris_syncobj *syncobj,
int64_t timeout_nsec);
static inline void
-iris_syncobj_reference(struct iris_screen *screen,
+iris_syncobj_reference(struct iris_bufmgr *bufmgr,
struct iris_syncobj **dst,
struct iris_syncobj *src)
{
if (pipe_reference(*dst ? &(*dst)->ref : NULL,
src ? &src->ref : NULL))
- iris_syncobj_destroy(screen, *dst);
+ iris_syncobj_destroy(bufmgr, *dst);
*dst = src;
}
iris_fine_fence_destroy(struct iris_screen *screen,
struct iris_fine_fence *fine)
{
- iris_syncobj_reference(screen, &fine->syncobj, NULL);
+ iris_syncobj_reference(screen->bufmgr, &fine->syncobj, NULL);
pipe_resource_reference(&fine->ref.res, NULL);
free(fine);
}
fine->seqno = iris_fine_fence_next(batch);
- iris_syncobj_reference(batch->screen, &fine->syncobj,
+ iris_syncobj_reference(batch->screen->bufmgr, &fine->syncobj,
iris_batch_get_signal_syncobj(batch));
pipe_resource_reference(&fine->ref.res, batch->fine_fences.ref.res);
iris_destroy_monitor_object(ctx, query->monitor);
query->monitor = NULL;
} else {
- iris_syncobj_reference(screen, &query->syncobj, NULL);
+ iris_syncobj_reference(screen->bufmgr, &query->syncobj, NULL);
screen->base.fence_reference(ctx->screen, &query->fence, NULL);
}
pipe_resource_reference(&query->query_state_ref.res, NULL);
while (!READ_ONCE(q->map->snapshots_landed)) {
if (wait)
- iris_wait_syncobj(ctx->screen, q->syncobj, INT64_MAX);
+ iris_wait_syncobj(screen->bufmgr, q->syncobj, INT64_MAX);
else
return false;
}