iris_batch_num_fences(struct iris_batch *batch)
{
return util_dynarray_num_elements(&batch->exec_fences,
- struct drm_i915_gem_exec_fence);
+ struct iris_batch_fence);
}
/**
{
fprintf(stderr, "Fence list (length %u): ", iris_batch_num_fences(batch));
- util_dynarray_foreach(&batch->exec_fences,
- struct drm_i915_gem_exec_fence, f) {
+ util_dynarray_foreach(&batch->exec_fences, struct iris_batch_fence, f) {
fprintf(stderr, "%s%u%s ",
- (f->flags & I915_EXEC_FENCE_WAIT) ? "..." : "",
+ (f->flags & IRIS_BATCH_FENCE_WAIT) ? "..." : "",
f->handle,
- (f->flags & I915_EXEC_FENCE_SIGNAL) ? "!" : "");
+ (f->flags & IRIS_BATCH_FENCE_SIGNAL) ? "!" : "");
}
fprintf(stderr, "\n");
sizeof(BITSET_WORD) * BITSET_WORDS(batch->exec_array_size));
struct iris_syncobj *syncobj = iris_create_syncobj(bufmgr);
- iris_batch_add_syncobj(batch, syncobj, I915_EXEC_FENCE_SIGNAL);
+ iris_batch_add_syncobj(batch, syncobj, IRIS_BATCH_FENCE_SIGNAL);
iris_syncobj_reference(bufmgr, &syncobj, NULL);
assert(!batch->sync_region_depth);
static void
move_syncobj_to_batch(struct iris_batch *batch,
struct iris_syncobj **p_syncobj,
- unsigned flags)
+ uint32_t flags)
{
struct iris_bufmgr *bufmgr = batch->screen->bufmgr;
/* If the bo is being written to by others, wait for them. */
if (bo_deps->write_syncobjs[i])
move_syncobj_to_batch(batch, &bo_deps->write_syncobjs[i],
- I915_EXEC_FENCE_WAIT);
+ IRIS_BATCH_FENCE_WAIT);
/* If we're writing to the bo, wait on the reads from other batches. */
if (write)
move_syncobj_to_batch(batch, &bo_deps->read_syncobjs[i],
- I915_EXEC_FENCE_WAIT);
+ IRIS_BATCH_FENCE_WAIT);
}
struct iris_syncobj *batch_syncobj =
IRIS_BATCH_BLITTER,
};
+/* Same definition as drm_i915_gem_exec_fence so drm_i915_gem_execbuffer2
+ * can directly use exec_fences without extra memory allocation
+ */
+struct iris_batch_fence {
+ uint32_t handle;
+
+#define IRIS_BATCH_FENCE_WAIT (1 << 0)
+#define IRIS_BATCH_FENCE_SIGNAL (1 << 1)
+ uint32_t flags;
+};
+
struct iris_batch {
struct iris_context *ice;
struct iris_screen *screen;
*/
struct util_dynarray syncobjs;
- /** A list of drm_i915_exec_fences to have execbuf signal or wait on */
+ /** A list of iris_batch_fences to have execbuf signal or wait on */
struct util_dynarray exec_fences;
/** The amount of aperture space (in bytes) used by all exec_bos */
/**
* Add a sync-point to the batch, with the given flags.
*
- * \p flags One of I915_EXEC_FENCE_WAIT or I915_EXEC_FENCE_SIGNAL.
+ * \p flags One of IRIS_BATCH_FENCE_WAIT or IRIS_BATCH_FENCE_SIGNAL.
*/
void
iris_batch_add_syncobj(struct iris_batch *batch,
struct iris_syncobj *syncobj,
- unsigned flags)
+ uint32_t flags)
{
- struct drm_i915_gem_exec_fence *fence =
- util_dynarray_grow(&batch->exec_fences, struct drm_i915_gem_exec_fence, 1);
+ struct iris_batch_fence *fence =
+ util_dynarray_grow(&batch->exec_fences, struct iris_batch_fence, 1);
- *fence = (struct drm_i915_gem_exec_fence) {
+ *fence = (struct iris_batch_fence) {
.handle = syncobj->handle,
.flags = flags,
};
}
/**
- * Walk through a batch's dependencies (any I915_EXEC_FENCE_WAIT syncobjs)
+ * Walk through a batch's dependencies (any IRIS_BATCH_FENCE_WAIT syncobjs)
* and unreference any which have already passed.
*
* Sometimes the compute batch is seldom used, and accumulates references
int n = util_dynarray_num_elements(&batch->syncobjs, struct iris_syncobj *);
assert(n == util_dynarray_num_elements(&batch->exec_fences,
- struct drm_i915_gem_exec_fence));
+ struct iris_batch_fence));
/* Skip the first syncobj, as it's the signalling one. */
for (int i = n - 1; i > 0; i--) {
struct iris_syncobj **syncobj =
util_dynarray_element(&batch->syncobjs, struct iris_syncobj *, i);
- struct drm_i915_gem_exec_fence *fence =
+ struct iris_batch_fence *fence =
util_dynarray_element(&batch->exec_fences,
- struct drm_i915_gem_exec_fence, i);
- assert(fence->flags & I915_EXEC_FENCE_WAIT);
+ struct iris_batch_fence, i);
+ assert(fence->flags & IRIS_BATCH_FENCE_WAIT);
if (iris_wait_syncobj(bufmgr, *syncobj, 0))
continue;
/* Remove it from the lists; move the last element here. */
struct iris_syncobj **nth_syncobj =
util_dynarray_pop_ptr(&batch->syncobjs, struct iris_syncobj *);
- struct drm_i915_gem_exec_fence *nth_fence =
- util_dynarray_pop_ptr(&batch->exec_fences,
- struct drm_i915_gem_exec_fence);
+ struct iris_batch_fence *nth_fence =
+ util_dynarray_pop_ptr(&batch->exec_fences, struct iris_batch_fence);
if (syncobj != nth_syncobj) {
*syncobj = *nth_syncobj;
/* Before adding a new reference, clean out any stale ones. */
clear_stale_syncobjs(batch);
- iris_batch_add_syncobj(batch, fine->syncobj, I915_EXEC_FENCE_WAIT);
+ iris_batch_add_syncobj(batch, fine->syncobj, IRIS_BATCH_FENCE_WAIT);
}
}
}