fd_fence_set_batch(*fencep, batch);
fd_fence_ref(&batch->fence, *fencep);
- /* We (a) cannot substitute the provided fence with last_fence,
- * and (b) need fd_fence_populate() to be eventually called on
- * the fence that was pre-created in frontend-thread:
+ /* If we have nothing to flush, update the pre-created unflushed
+ * fence with the current state of the last-fence:
*/
- fd_fence_ref(&ctx->last_fence, NULL);
+ if (ctx->last_fence) {
+ fd_fence_repopulate(*fencep, ctx->last_fence);
+ fd_fence_ref(&fence, *fencep);
+ fd_bc_dump(ctx->screen, "%p: (deferred) reuse last_fence, remaining:\n", ctx);
+ goto out;
+ }
/* async flush is not compatible with deferred flush, since
* nothing triggers the batch flush which fence_flush() would
}
}
+void
+fd_fence_repopulate(struct pipe_fence_handle *fence, struct pipe_fence_handle *last_fence)
+{
+ int fence_fd = (last_fence->fence_fd == -1) ? -1 : dup(last_fence->fence_fd);
+ fd_fence_populate(fence, last_fence->timestamp, fence_fd);
+}
+
static void
fd_fence_destroy(struct pipe_fence_handle *fence)
{
void fd_fence_populate(struct pipe_fence_handle *fence, uint32_t timestamp,
int fence_fd);
+void fd_fence_repopulate(struct pipe_fence_handle *fence,
+ struct pipe_fence_handle *last_fence);
void fd_fence_ref(struct pipe_fence_handle **ptr,
struct pipe_fence_handle *pfence);
bool fd_fence_finish(struct pipe_screen *pscreen, struct pipe_context *ctx,