if (!ok)
return false;
- ok = v3dv_bo_wait(device, bo, PIPE_TIMEOUT_INFINITE);
+ ok = v3dv_bo_wait(device, bo, OS_TIMEOUT_INFINITE);
if (!ok) {
fprintf(stderr, "memory wait for map failed\n");
return false;
/* Make sure the GPU is not currently accessing the indirect CL for this
* job, since we are about to overwrite some of the uniform data.
*/
- v3dv_bo_wait(job->device, job->indirect.bo, PIPE_TIMEOUT_INFINITE);
+ v3dv_bo_wait(job->device, job->indirect.bo, OS_TIMEOUT_INFINITE);
for (uint32_t i = 0; i < 3; i++) {
if (info->wg_uniform_offsets[i]) {
* we handle those in the CPU.
*/
if (info->pool->query_type == VK_QUERY_TYPE_OCCLUSION)
- v3dv_bo_wait(job->device, info->pool->occlusion.bo, PIPE_TIMEOUT_INFINITE);
+ v3dv_bo_wait(job->device, info->pool->occlusion.bo, OS_TIMEOUT_INFINITE);
if (info->pool->query_type == VK_QUERY_TYPE_PERFORMANCE_QUERY_KHR) {
struct vk_sync_wait waits[info->count];
/* Make sure the GPU is no longer using the indirect buffer*/
assert(info->buffer && info->buffer->mem && info->buffer->mem->bo);
- v3dv_bo_wait(queue->device, info->buffer->mem->bo, PIPE_TIMEOUT_INFINITE);
+ v3dv_bo_wait(queue->device, info->buffer->mem->bo, OS_TIMEOUT_INFINITE);
/* Map the indirect buffer and read the dispatch parameters */
assert(info->buffer && info->buffer->mem && info->buffer->mem->bo);
struct pipe_fence_handle *new_fence = NULL;
st_context_flush(st, ST_FLUSH_FRONT, &new_fence, NULL, NULL);
if (hgl_surf->throttle_fence) {
- screen->fence_finish(screen, NULL, hgl_surf->throttle_fence, PIPE_TIMEOUT_INFINITE);
+ screen->fence_finish(screen, NULL, hgl_surf->throttle_fence, OS_TIMEOUT_INFINITE);
screen->fence_reference(screen, &hgl_surf->throttle_fence, NULL);
}
hgl_surf->throttle_fence = new_fence;
.op = op,
};
- get_abs_timeout(&req.timeout, PIPE_TIMEOUT_INFINITE);
+ get_abs_timeout(&req.timeout, OS_TIMEOUT_INFINITE);
return drmCommandWrite(bo->dev->fd, DRM_MSM_GEM_CPU_PREP, &req, sizeof(req));
}
{
struct timespec t;
- if (ns == PIPE_TIMEOUT_INFINITE)
+ if (ns == OS_TIMEOUT_INFINITE)
ns = 3600ULL * NSEC_PER_SEC; /* 1 hour timeout is almost infinite */
clock_gettime(CLOCK_MONOTONIC, &t);
if (ret)
goto out;
- if ((timeout != PIPE_TIMEOUT_INFINITE) &&
+ if ((timeout != OS_TIMEOUT_INFINITE) &&
(os_time_get_nano() >= end_time))
break;
struct pipe_fence_handle *fence = NULL;
ctx->flush(ctx, &fence, 0);
- ctx->screen->fence_finish(ctx->screen, NULL, fence, PIPE_TIMEOUT_INFINITE);
+ ctx->screen->fence_finish(ctx->screen, NULL, fence, OS_TIMEOUT_INFINITE);
}
void
/* Wait for the fence to decrease memory usage. */
if (fence) {
- screen->fence_finish(screen, pipe, *fence, PIPE_TIMEOUT_INFINITE);
+ screen->fence_finish(screen, pipe, *fence, OS_TIMEOUT_INFINITE);
screen->fence_reference(screen, fence, NULL);
}
t->wait_index = (t->wait_index + 1) % ring_size;
assert(*fence);
- screen->fence_finish(screen, pipe, *fence, PIPE_TIMEOUT_INFINITE);
+ screen->fence_finish(screen, pipe, *fence, OS_TIMEOUT_INFINITE);
screen->fence_reference(screen, fence, NULL);
}
void
d3d12_destroy_batch(struct d3d12_context *ctx, struct d3d12_batch *batch)
{
- d3d12_reset_batch(ctx, batch, PIPE_TIMEOUT_INFINITE);
+ d3d12_reset_batch(ctx, batch, OS_TIMEOUT_INFINITE);
batch->cmdalloc->Release();
d3d12_descriptor_heap_free(batch->sampler_heap);
d3d12_descriptor_heap_free(batch->view_heap);
ID3D12DescriptorHeap* heaps[2] = { d3d12_descriptor_heap_get(batch->view_heap),
d3d12_descriptor_heap_get(batch->sampler_heap) };
- d3d12_reset_batch(ctx, batch, PIPE_TIMEOUT_INFINITE);
+ d3d12_reset_batch(ctx, batch, OS_TIMEOUT_INFINITE);
/* Create or reset global command list */
if (ctx->cmdlist) {
struct d3d12_batch *batch = d3d12_current_batch(ctx);
d3d12_foreach_submitted_batch(ctx, old_batch)
- d3d12_reset_batch(ctx, old_batch, PIPE_TIMEOUT_INFINITE);
+ d3d12_reset_batch(ctx, old_batch, OS_TIMEOUT_INFINITE);
d3d12_flush_cmdlist(ctx);
- d3d12_reset_batch(ctx, batch, PIPE_TIMEOUT_INFINITE);
+ d3d12_reset_batch(ctx, batch, OS_TIMEOUT_INFINITE);
}
static void
inline bool
d3d12_fence_wait_event(HANDLE event, int event_fd, uint64_t timeout_ns)
{
- DWORD timeout_ms = (timeout_ns == PIPE_TIMEOUT_INFINITE || timeout_ns > MaxTimeoutInNs) ? INFINITE : timeout_ns / NsPerMs;
+ DWORD timeout_ms = (timeout_ns == OS_TIMEOUT_INFINITE || timeout_ns > MaxTimeoutInNs) ? INFINITE : timeout_ns / NsPerMs;
return WaitForSingleObject(event, timeout_ms) == WAIT_OBJECT_0;
}
#else
inline bool
d3d12_fence_wait_event(HANDLE event, int event_fd, uint64_t timeout_ns)
{
- int timeout_ms = (timeout_ns == PIPE_TIMEOUT_INFINITE || timeout_ns > MaxTimeoutInNs) ? -1 : timeout_ns / NsPerMs;
+ int timeout_ms = (timeout_ns == OS_TIMEOUT_INFINITE || timeout_ns > MaxTimeoutInNs) ? -1 : timeout_ns / NsPerMs;
return sync_wait(event_fd, timeout_ms) == 0;
}
#endif
query_ensure_ready(d3d12_screen(ctx->base.screen), ctx, q_parent, false);
d3d12_foreach_submitted_batch(ctx, old_batch) {
if (old_batch->fence && old_batch->fence->value <= q_parent->fence_value)
- d3d12_reset_batch(ctx, old_batch, PIPE_TIMEOUT_INFINITE);
+ d3d12_reset_batch(ctx, old_batch, OS_TIMEOUT_INFINITE);
}
/* Accumulate current results and store in first slot */
query_ensure_ready(d3d12_screen(ctx->base.screen), ctx, q_parent, false);
d3d12_foreach_submitted_batch(ctx, old_batch) {
if (old_batch->fence && old_batch->fence->value <= q_parent->fence_value)
- d3d12_reset_batch(ctx, old_batch, PIPE_TIMEOUT_INFINITE);
+ d3d12_reset_batch(ctx, old_batch, OS_TIMEOUT_INFINITE);
}
accumulate_subresult(ctx, q_parent, 0, &result, true);
query_ensure_ready(d3d12_screen(ctx->base.screen), ctx, query, false);
d3d12_foreach_submitted_batch(ctx, old_batch) {
if (old_batch->fence && old_batch->fence->value <= query->fence_value)
- d3d12_reset_batch(ctx, old_batch, PIPE_TIMEOUT_INFINITE);
+ d3d12_reset_batch(ctx, old_batch, OS_TIMEOUT_INFINITE);
}
union pipe_query_result result;
} else {
d3d12_foreach_submitted_batch(ctx, batch) {
if (d3d12_batch_has_references(batch, res->bo, want_to_write))
- d3d12_reset_batch(ctx, batch, PIPE_TIMEOUT_INFINITE);
+ d3d12_reset_batch(ctx, batch, OS_TIMEOUT_INFINITE);
}
}
}
assert(pUploadGPUCompletionFence);
debug_printf("[d3d12_video_decoder] d3d12_video_decoder_end_frame - Waiting on GPU completion fence for "
"buffer_subdata to upload compressed bitstream.\n");
- pD3D12Screen->base.fence_finish(&pD3D12Screen->base, NULL, pUploadGPUCompletionFence, PIPE_TIMEOUT_INFINITE);
+ pD3D12Screen->base.fence_finish(&pD3D12Screen->base, NULL, pUploadGPUCompletionFence, OS_TIMEOUT_INFINITE);
pD3D12Screen->base.fence_reference(&pD3D12Screen->base, &pUploadGPUCompletionFence, NULL);
pipe_resource_reference(&pPipeCompressedBufferObj, NULL);
assert(completion_fence);
debug_printf("[d3d12_video_decoder] d3d12_video_decoder_end_frame - Waiting on GPU completion fence for "
"resource_copy_region on decoded frame.\n");
- pD3D12Screen->base.fence_finish(&pD3D12Screen->base, NULL, completion_fence, PIPE_TIMEOUT_INFINITE);
+ pD3D12Screen->base.fence_finish(&pD3D12Screen->base, NULL, completion_fence, OS_TIMEOUT_INFINITE);
pD3D12Screen->base.fence_reference(&pD3D12Screen->base, &completion_fence, NULL);
pipe_resource_reference(&pPipeSrc, NULL);
}
if(pD3D12Enc->m_bPendingWorkNotFlushed){
uint64_t curBatchFence = pD3D12Enc->m_fenceValue;
d3d12_video_encoder_flush(codec);
- d3d12_video_encoder_sync_completion(codec, curBatchFence, PIPE_TIMEOUT_INFINITE);
+ d3d12_video_encoder_sync_completion(codec, curBatchFence, OS_TIMEOUT_INFINITE);
}
// Call d3d12_video_encoder dtor to make ComPtr and other member's destructors work
debug_printf("[d3d12_video_encoder] d3d12_video_encoder_begin_frame Waiting for completion of in flight resource sets with previous work with fenceValue: %" PRIu64 "\n",
fenceValueToWaitOn);
- d3d12_video_encoder_ensure_fence_finished(codec, fenceValueToWaitOn, PIPE_TIMEOUT_INFINITE);
+ d3d12_video_encoder_ensure_fence_finished(codec, fenceValueToWaitOn, OS_TIMEOUT_INFINITE);
if (!d3d12_video_encoder_reconfigure_session(pD3D12Enc, target, picture)) {
debug_printf("[d3d12_video_encoder] d3d12_video_encoder_begin_frame - Failure on "
assert(pD3D12Enc);
uint64_t requested_metadata_fence = ((uint64_t) feedback);
- d3d12_video_encoder_sync_completion(codec, requested_metadata_fence, PIPE_TIMEOUT_INFINITE);
+ d3d12_video_encoder_sync_completion(codec, requested_metadata_fence, OS_TIMEOUT_INFINITE);
uint64_t current_metadata_slot = (requested_metadata_fence % D3D12_VIDEO_ENC_METADATA_BUFFERS_COUNT);
if (!timeout)
return false;
- if (timeout == PIPE_TIMEOUT_INFINITE) {
+ if (timeout == OS_TIMEOUT_INFINITE) {
util_queue_fence_wait(&fence->ready);
} else {
int64_t abs_timeout = os_time_get_absolute_timeout(timeout);
* but if TC is not used, this will be null. Which is fine, we won't call
* threaded_context_flush() in that case
*/
- fence_flush(&fence->ctx->tc->base, fence, PIPE_TIMEOUT_INFINITE);
+ fence_flush(&fence->ctx->tc->base, fence, OS_TIMEOUT_INFINITE);
assert(fence->fence);
return os_dupfd_cloexec(fence->fence->fence_fd);
}
struct pipe_screen *screen = ctx->screen;
result->b = screen->fence_finish(screen, ctx, q->fence,
- wait ? PIPE_TIMEOUT_INFINITE : 0);
+ wait ? OS_TIMEOUT_INFINITE : 0);
return result->b;
}
fprintf(stderr, "gp job error\n");
if (job->dump) {
- if (lima_job_wait(job, LIMA_PIPE_GP, PIPE_TIMEOUT_INFINITE)) {
+ if (lima_job_wait(job, LIMA_PIPE_GP, OS_TIMEOUT_INFINITE)) {
if (ctx->gp_output) {
float *pos = lima_bo_map(ctx->gp_output);
lima_dump_command_stream_print(
}
if (job->dump) {
- if (!lima_job_wait(job, LIMA_PIPE_PP, PIPE_TIMEOUT_INFINITE)) {
+ if (!lima_job_wait(job, LIMA_PIPE_PP, OS_TIMEOUT_INFINITE)) {
fprintf(stderr, "pp wait error\n");
exit(1);
}
unsigned op = usage & PIPE_MAP_WRITE ?
LIMA_GEM_WAIT_WRITE : LIMA_GEM_WAIT_READ;
- lima_bo_wait(bo, op, PIPE_TIMEOUT_INFINITE);
+ lima_bo_wait(bo, op, OS_TIMEOUT_INFINITE);
}
if (!lima_bo_map(bo))
};
lima_flush_job_accessing_bo(ctx, res->bo, true);
- lima_bo_wait(res->bo, LIMA_GEM_WAIT_WRITE, PIPE_TIMEOUT_INFINITE);
+ lima_bo_wait(res->bo, LIMA_GEM_WAIT_WRITE, OS_TIMEOUT_INFINITE);
if (!lima_bo_map(res->bo))
return;
struct timespec current;
uint64_t current_ns;
- if (*timeout == PIPE_TIMEOUT_INFINITE)
+ if (*timeout == OS_TIMEOUT_INFINITE)
return true;
if (clock_gettime(CLOCK_MONOTONIC, ¤t))
llvmpipe_flush(pipe, &fence, reason);
if (fence) {
pipe->screen->fence_finish(pipe->screen, NULL, fence,
- PIPE_TIMEOUT_INFINITE);
+ OS_TIMEOUT_INFINITE);
pipe->screen->fence_reference(pipe->screen, &fence, NULL);
}
}
return lp_fence_signalled(f);
if (!lp_fence_signalled(f)) {
- if (timeout != PIPE_TIMEOUT_INFINITE)
+ if (timeout != OS_TIMEOUT_INFINITE)
return lp_fence_timedwait(f, timeout);
lp_fence_wait(f);
if (q->type == PIPE_QUERY_GPU_FINISHED) {
if (wait) {
- r300->rws->buffer_wait(r300->rws, q->buf, PIPE_TIMEOUT_INFINITE,
+ r300->rws->buffer_wait(r300->rws, q->buf, OS_TIMEOUT_INFINITE,
RADEON_USAGE_READWRITE);
vresult->b = TRUE;
} else {
return false;
/* Recompute the timeout after waiting. */
- if (timeout && timeout != PIPE_TIMEOUT_INFINITE) {
+ if (timeout && timeout != OS_TIMEOUT_INFINITE) {
int64_t time = os_time_get_nano();
timeout = abs_timeout > time ? abs_timeout - time : 0;
}
return false;
/* Recompute the timeout after all that. */
- if (timeout && timeout != PIPE_TIMEOUT_INFINITE) {
+ if (timeout && timeout != OS_TIMEOUT_INFINITE) {
int64_t time = os_time_get_nano();
timeout = abs_timeout > time ? abs_timeout - time : 0;
}
struct pipe_context *ctx = rquery->b.flushed ? NULL : &rctx->b;
result->b = screen->fence_finish(screen, ctx, query->fence,
- wait ? PIPE_TIMEOUT_INFINITE : 0);
+ wait ? OS_TIMEOUT_INFINITE : 0);
return result->b;
}
if (!timeout)
return false;
- if (timeout == PIPE_TIMEOUT_INFINITE) {
+ if (timeout == OS_TIMEOUT_INFINITE) {
util_queue_fence_wait(&sfence->ready);
} else {
if (!util_queue_fence_wait_timeout(&sfence->ready, abs_timeout))
return false;
}
- if (timeout && timeout != PIPE_TIMEOUT_INFINITE) {
+ if (timeout && timeout != OS_TIMEOUT_INFINITE) {
int64_t time = os_time_get_nano();
timeout = abs_timeout > time ? abs_timeout - time : 0;
}
return false;
/* Recompute the timeout after all that. */
- if (timeout && timeout != PIPE_TIMEOUT_INFINITE) {
+ if (timeout && timeout != OS_TIMEOUT_INFINITE) {
int64_t time = os_time_get_nano();
timeout = abs_timeout > time ? abs_timeout - time : 0;
}
struct pipe_screen *screen = sctx->b.screen;
struct pipe_context *ctx = squery->b.flushed ? NULL : &sctx->b;
- result->b = screen->fence_finish(screen, ctx, query->fence, wait ? PIPE_TIMEOUT_INFINITE : 0);
+ result->b = screen->fence_finish(screen, ctx, query->fence, wait ? OS_TIMEOUT_INFINITE : 0);
return result->b;
}
if (frame_trigger || file_trigger) {
/* Wait for last submission */
sctx->ws->fence_wait(sctx->ws, sctx->last_gfx_fence,
- PIPE_TIMEOUT_INFINITE);
+ OS_TIMEOUT_INFINITE);
/* Start SQTT */
si_begin_sqtt(sctx, rcs);
/* Wait for SQTT to finish and read back the bo */
if (sctx->ws->fence_wait(sctx->ws, sctx->last_sqtt_fence,
- PIPE_TIMEOUT_INFINITE) &&
+ OS_TIMEOUT_INFINITE) &&
si_get_sqtt_trace(sctx, &sqtt_trace)) {
struct ac_spm_trace spm_trace;
* have fences.
*/
pipe->screen->fence_finish(pipe->screen, NULL, fence,
- PIPE_TIMEOUT_INFINITE);
+ OS_TIMEOUT_INFINITE);
pipe->screen->fence_reference(pipe->screen, &fence, NULL);
}
} else {
if (SVGA_DEBUG & DEBUG_SYNC) {
if (fence)
svga->pipe.screen->fence_finish(svga->pipe.screen, NULL, fence,
- PIPE_TIMEOUT_INFINITE);
+ OS_TIMEOUT_INFINITE);
}
if (pfence)
SVGA_STATS_TIME_PUSH(svga_sws(svga), SVGA_STATS_TIME_CONTEXTFINISH);
svga_context_flush(svga, &fence);
- screen->fence_finish(screen, NULL, fence, PIPE_TIMEOUT_INFINITE);
+ screen->fence_finish(screen, NULL, fence, OS_TIMEOUT_INFINITE);
screen->fence_reference(screen, &fence, NULL);
SVGA_STATS_TIME_POP(svga_sws(svga));
if (state == SVGA3D_QUERYSTATE_PENDING) {
if (!wait)
return false;
- sws->fence_finish(sws, sq->fence, PIPE_TIMEOUT_INFINITE,
+ sws->fence_finish(sws, sq->fence, OS_TIMEOUT_INFINITE,
SVGA_FENCE_FLAG_QUERY);
state = sq->queryResult->state;
}
queryState == SVGA3D_QUERYSTATE_NEW) {
if (!wait)
return false;
- sws->fence_finish(sws, sq->fence, PIPE_TIMEOUT_INFINITE,
+ sws->fence_finish(sws, sq->fence, OS_TIMEOUT_INFINITE,
SVGA_FENCE_FLAG_QUERY);
sws->query_get_result(sws, sq->gb_query, sq->offset, &queryState, result, resultLen);
}
if ((mode == PIPE_RENDER_COND_WAIT ||
mode == PIPE_RENDER_COND_BY_REGION_WAIT) && sq->fence) {
- sws->fence_finish(sws, sq->fence, PIPE_TIMEOUT_INFINITE,
+ sws->fence_finish(sws, sq->fence, OS_TIMEOUT_INFINITE,
SVGA_FENCE_FLAG_QUERY);
}
}
if (transfer == SVGA3D_READ_HOST_VRAM) {
svga_context_flush(svga, &fence);
- sws->fence_finish(sws, fence, PIPE_TIMEOUT_INFINITE, 0);
+ sws->fence_finish(sws, fence, OS_TIMEOUT_INFINITE, 0);
sws->fence_reference(sws, &fence, NULL);
}
}
if (transfer == SVGA3D_READ_HOST_VRAM) {
svga_context_flush(svga, &fence);
- sws->fence_finish(sws, fence, PIPE_TIMEOUT_INFINITE, 0);
+ sws->fence_finish(sws, fence, OS_TIMEOUT_INFINITE, 0);
hw = sws->buffer_map(sws, st->hwbuf, PIPE_MAP_READ);
assert(hw);
/**
* Wait for the fence to finish.
- * \param timeout in nanoseconds (may be PIPE_TIMEOUT_INFINITE).
+ * \param timeout in nanoseconds (may be OS_TIMEOUT_INFINITE).
* 0 to return immediately, if the API suports it.
* \param flags driver-specific meaning
* \return zero on success.
{
void *map = v3d_bo_map_unsynchronized(bo);
- bool ok = v3d_bo_wait(bo, PIPE_TIMEOUT_INFINITE, "bo map");
+ bool ok = v3d_bo_wait(bo, OS_TIMEOUT_INFINITE, "bo map");
if (!ok) {
fprintf(stderr, "BO wait for map failed\n");
abort();
perf_debug("stalling on TF counts readback\n");
struct v3d_resource *rsc = v3d_resource(v3d->prim_counts);
- if (v3d_bo_wait(rsc->bo, PIPE_TIMEOUT_INFINITE, "prim-counts")) {
+ if (v3d_bo_wait(rsc->bo, OS_TIMEOUT_INFINITE, "prim-counts")) {
uint32_t *map = v3d_bo_map(rsc->bo) + v3d->prim_counts_offset;
v3d->tf_prims_generated += map[V3D_PRIM_COUNTS_TF_WRITTEN];
/* When we only have a vertex shader with no primitive
if (pquery->perfmon->job_submitted) {
if (!v3d_fence_wait(v3d->screen,
pquery->perfmon->last_job_fence,
- wait ? PIPE_TIMEOUT_INFINITE : 0))
+ wait ? OS_TIMEOUT_INFINITE : 0))
return false;
req.id = pquery->perfmon->kperfmon_id;
{
void *map = vc4_bo_map_unsynchronized(bo);
- bool ok = vc4_bo_wait(bo, PIPE_TIMEOUT_INFINITE, "bo map");
+ bool ok = vc4_bo_wait(bo, OS_TIMEOUT_INFINITE, "bo map");
if (!ok) {
fprintf(stderr, "BO wait for map failed\n");
abort();
if (vc4->last_emit_seqno - vc4->screen->finished_seqno > 5) {
if (!vc4_wait_seqno(vc4->screen,
vc4->last_emit_seqno - 5,
- PIPE_TIMEOUT_INFINITE,
+ OS_TIMEOUT_INFINITE,
"job throttling")) {
fprintf(stderr, "Job throttling failed\n");
}
if (VC4_DBG(ALWAYS_SYNC)) {
if (!vc4_wait_seqno(vc4->screen, vc4->last_emit_seqno,
- PIPE_TIMEOUT_INFINITE, "sync")) {
+ OS_TIMEOUT_INFINITE, "sync")) {
fprintf(stderr, "Wait failed.\n");
abort();
}
}
if (!vc4_wait_seqno(ctx->screen, query->hwperfmon->last_seqno,
- wait ? PIPE_TIMEOUT_INFINITE : 0, "perfmon"))
+ wait ? OS_TIMEOUT_INFINITE : 0, "perfmon"))
return false;
req.id = query->hwperfmon->id;
vws->submit_cmd(vws, cbuf, &sync_fence);
- vws->fence_wait(vws, sync_fence, PIPE_TIMEOUT_INFINITE);
+ vws->fence_wait(vws, sync_fence, OS_TIMEOUT_INFINITE);
vws->fence_reference(vws, &sync_fence, NULL);
} else {
vws->submit_cmd(vws, cbuf, fence);
struct virgl_winsys *vws = rs->vws;
struct pipe_fence_handle *sync_fence;
virgl_flush_eq(vctx, vctx, &sync_fence);
- vws->fence_wait(vws, sync_fence, PIPE_TIMEOUT_INFINITE);
+ vws->fence_wait(vws, sync_fence, OS_TIMEOUT_INFINITE);
vws->fence_reference(vws, &sync_fence, NULL);
}
}
ctx->flush(ctx, &fence, 0);
if (fence) {
- ctx->screen->fence_finish(ctx->screen, NULL, fence, PIPE_TIMEOUT_INFINITE);
+ ctx->screen->fence_finish(ctx->screen, NULL, fence, OS_TIMEOUT_INFINITE);
ctx->screen->fence_reference(ctx->screen, &fence, NULL);
}
}
screen->device_lost = true;
} else if (bs->ctx->batch_states_count > 5000) {
/* throttle in case something crazy is happening */
- zink_screen_timeline_wait(screen, bs->fence.batch_id - 2500, PIPE_TIMEOUT_INFINITE);
+ zink_screen_timeline_wait(screen, bs->fence.batch_id - 2500, OS_TIMEOUT_INFINITE);
}
/* this resets the buffer hashlist for the state's next use */
memset(&bs->buffer_indices_hashlist, -1, sizeof(bs->buffer_indices_hashlist));
{
struct zink_screen *screen = zink_screen(ctx->base.screen);
sync_flush(ctx, zink_batch_state(ctx->last_fence));
- zink_screen_timeline_wait(screen, ctx->last_fence->batch_id, PIPE_TIMEOUT_INFINITE);
+ zink_screen_timeline_wait(screen, ctx->last_fence->batch_id, OS_TIMEOUT_INFINITE);
zink_batch_reset_all(ctx);
}
/* this is a tc mfence, so we're just waiting on the queue mfence to complete
* after being signaled by the real mfence
*/
- if (*timeout_ns == PIPE_TIMEOUT_INFINITE) {
+ if (*timeout_ns == OS_TIMEOUT_INFINITE) {
util_queue_fence_wait(&mfence->ready);
} else {
if (!util_queue_fence_wait_timeout(&mfence->ready, abs_timeout))
return false;
}
- if (*timeout_ns && *timeout_ns != PIPE_TIMEOUT_INFINITE) {
+ if (*timeout_ns && *timeout_ns != OS_TIMEOUT_INFINITE) {
int64_t time_ns = os_time_get_nano();
*timeout_ns = abs_timeout > time_ns ? abs_timeout - time_ns : 0;
}
struct pipe_screen *screen = pctx->screen;
result->b = screen->fence_finish(screen, query->base.flushed ? NULL : pctx,
- query->fence, wait ? PIPE_TIMEOUT_INFINITE : 0);
+ query->fence, wait ? OS_TIMEOUT_INFINITE : 0);
return result->b;
}
queue()->flush();
if (!_fence ||
- !screen->fence_finish(screen, NULL, _fence, PIPE_TIMEOUT_INFINITE))
+ !screen->fence_finish(screen, NULL, _fence, OS_TIMEOUT_INFINITE))
throw error(CL_EXEC_STATUS_ERROR_FOR_EVENTS_IN_WAIT_LIST);
}
screen = ctx->screen->base.screen;
pipe->flush_resource(pipe, dst->texture);
st_context_flush(ctx->st, 0, &fence, NULL, NULL);
- (void) screen->fence_finish(screen, NULL, fence, PIPE_TIMEOUT_INFINITE);
+ (void) screen->fence_finish(screen, NULL, fence, OS_TIMEOUT_INFINITE);
screen->fence_reference(screen, &fence, NULL);
}
}
/* throttle on the previous fence */
if (drawable->throttle_fence) {
- screen->fence_finish(screen, NULL, drawable->throttle_fence, PIPE_TIMEOUT_INFINITE);
+ screen->fence_finish(screen, NULL, drawable->throttle_fence, OS_TIMEOUT_INFINITE);
screen->fence_reference(screen, &drawable->throttle_fence, NULL);
}
drawable->throttle_fence = new_fence;
}
screen->base.screen->fence_finish(screen->base.screen, ctx->st->pipe,
- fence, PIPE_TIMEOUT_INFINITE);
+ fence, OS_TIMEOUT_INFINITE);
screen->base.screen->fence_reference(screen->base.screen, &fence, NULL);
drisw_copy_to_front(ctx->st->pipe, drawable, ptex);
st_context_flush(ctx->st, ST_FLUSH_FRONT, &fence, NULL, NULL);
screen->base.screen->fence_finish(screen->base.screen, ctx->st->pipe,
- fence, PIPE_TIMEOUT_INFINITE);
+ fence, OS_TIMEOUT_INFINITE);
screen->base.screen->fence_reference(screen->base.screen, &fence, NULL);
if (drawable->stvis.samples > 1) {
}
/* throttle on the previous fence */
if (drawable->throttle_fence) {
- screen->fence_finish(screen, NULL, drawable->throttle_fence, PIPE_TIMEOUT_INFINITE);
+ screen->fence_finish(screen, NULL, drawable->throttle_fence, OS_TIMEOUT_INFINITE);
screen->fence_reference(screen, &drawable->throttle_fence, NULL);
}
drawable->throttle_fence = new_fence;
XMesaDisplay xmdpy = xmesa_init_display(b->xm_visual->display);
struct pipe_screen *screen = xmdpy->screen;
xmdpy->screen->fence_finish(screen, NULL, fence,
- PIPE_TIMEOUT_INFINITE);
+ OS_TIMEOUT_INFINITE);
xmdpy->screen->fence_reference(screen, &fence, NULL);
}
}
st_context_flush(c->st, ST_FLUSH_FRONT, &fence, NULL, NULL);
if (fence) {
xmdpy->screen->fence_finish(xmdpy->screen, NULL, fence,
- PIPE_TIMEOUT_INFINITE);
+ OS_TIMEOUT_INFINITE);
xmdpy->screen->fence_reference(xmdpy->screen, &fence, NULL);
}
XFlush( c->xm_visual->display );
state->pctx->screen->fence_finish(state->pctx->screen,
NULL,
- handle, PIPE_TIMEOUT_INFINITE);
+ handle, OS_TIMEOUT_INFINITE);
state->pctx->screen->fence_reference(state->pctx->screen,
&handle, NULL);
}
pipe = NineDevice9_GetPipe(device);
pipe->flush(pipe, &fence, 0);
- (void) screen->fence_finish(screen, NULL, fence, PIPE_TIMEOUT_INFINITE);
+ (void) screen->fence_finish(screen, NULL, fence, OS_TIMEOUT_INFINITE);
screen->fence_reference(screen, &fence, NULL);
}
This->need_sync_if_nooverwrite = !(Flags & (D3DLOCK_DISCARD | D3DLOCK_NOOVERWRITE));
{
struct end_present_struct *work = data;
if (work->fence_to_wait) {
- (void) work->screen->fence_finish(work->screen, NULL, work->fence_to_wait, PIPE_TIMEOUT_INFINITE);
+ (void) work->screen->fence_finish(work->screen, NULL, work->fence_to_wait, OS_TIMEOUT_INFINITE);
work->screen->fence_reference(work->screen, &(work->fence_to_wait), NULL);
}
ID3DPresent_PresentBuffer(work->present, work->present_handle, work->hDestWindowOverride, NULL, NULL, NULL, 0);
/* Throttle rendering if needed */
fence = swap_fences_pop_front(This);
if (fence) {
- (void) This->screen->fence_finish(This->screen, NULL, fence, PIPE_TIMEOUT_INFINITE);
+ (void) This->screen->fence_finish(This->screen, NULL, fence, OS_TIMEOUT_INFINITE);
This->screen->fence_reference(This->screen, &fence, NULL);
}
pub(super) fn fence_finish(&self, fence: *mut pipe_fence_handle) {
unsafe {
let s = &mut *self.screen;
- s.fence_finish.unwrap()(s, ptr::null_mut(), fence, PIPE_TIMEOUT_INFINITE as u64);
+ s.fence_finish.unwrap()(s, ptr::null_mut(), fence, OS_TIMEOUT_INFINITE as u64);
}
}
}
'--bitfield-enum', 'nir_opt_if_options',
'--bitfield-enum', 'nir_variable_mode',
'--allowlist-type', 'float_controls',
+ '--allowlist-var', 'OS_.*',
'--allowlist-var', 'PIPE_.*',
'--bitfield-enum', 'pipe_map_flags',
'--allowlist-function', 'std(err|out)_ptr',
#include "util/blob.h"
#include "util/disk_cache.h"
+#include "util/os_time.h"
#include "util/u_printf.h"
#include "util/u_sampler.h"
mtx_lock(&pq->device->mutex);
if (surf->fence) {
screen = pq->device->vscreen->pscreen;
- screen->fence_finish(screen, NULL, surf->fence, PIPE_TIMEOUT_INFINITE);
+ screen->fence_finish(screen, NULL, surf->fence, OS_TIMEOUT_INFINITE);
screen->fence_reference(screen, &surf->fence, NULL);
}
mtx_unlock(&pq->device->mutex);
#include "p_compiler.h"
#include "compiler/shader_enums.h"
+#include "util/os_time.h"
#ifdef __cplusplus
extern "C" {
PIPE_VIEWPORT_SWIZZLE_NEGATIVE_W,
};
-#define PIPE_TIMEOUT_INFINITE 0xffffffffffffffffull
-
-
/**
* Device reset status.
*/
*
* In all other cases, the ctx parameter has no effect.
*
- * \param timeout in nanoseconds (may be PIPE_TIMEOUT_INFINITE).
+ * \param timeout in nanoseconds (may be OS_TIMEOUT_INFINITE).
*/
bool (*fence_finish)(struct pipe_screen *screen,
struct pipe_context *ctx,
* by the device.
*
* The timeout of 0 will only return the status.
- * The timeout of PIPE_TIMEOUT_INFINITE will always wait until the buffer
+ * The timeout of OS_TIMEOUT_INFINITE will always wait until the buffer
* is idle.
*/
bool (*buffer_wait)(struct radeon_winsys *ws, struct pb_buffer *buf,
/**
* Wait for the fence and return true if the fence has been signalled.
* The timeout of 0 will only return the status.
- * The timeout of PIPE_TIMEOUT_INFINITE will always wait until the fence
+ * The timeout of OS_TIMEOUT_INFINITE will always wait until the fence
* is signalled.
*/
bool (*fence_wait)(struct radeon_winsys *ws, struct pipe_fence_handle *fence, uint64_t timeout);
}
}
- amdgpu_bo_wait(rws, (struct pb_buffer*)bo, PIPE_TIMEOUT_INFINITE,
+ amdgpu_bo_wait(rws, (struct pb_buffer*)bo, OS_TIMEOUT_INFINITE,
RADEON_USAGE_WRITE);
} else {
/* Mapping for write. */
}
}
- amdgpu_bo_wait(rws, (struct pb_buffer*)bo, PIPE_TIMEOUT_INFINITE,
+ amdgpu_bo_wait(rws, (struct pb_buffer*)bo, OS_TIMEOUT_INFINITE,
RADEON_USAGE_READWRITE);
}
/* Ensure all resources are flushed */
ctx->flush(ctx, &fence, PIPE_FLUSH_HINT_FINISH);
if (fence) {
- ctx->screen->fence_finish(ctx->screen, ctx, fence, PIPE_TIMEOUT_INFINITE);
+ ctx->screen->fence_finish(ctx->screen, ctx, fence, OS_TIMEOUT_INFINITE);
ctx->screen->fence_reference(ctx->screen, &fence, NULL);
}
}
/* Ensure all resources are flushed */
ctx->flush(ctx, &fence, PIPE_FLUSH_HINT_FINISH);
if (fence) {
- ctx->screen->fence_finish(ctx->screen, ctx, fence, PIPE_TIMEOUT_INFINITE);
+ ctx->screen->fence_finish(ctx->screen, ctx, fence, OS_TIMEOUT_INFINITE);
ctx->screen->fence_reference(ctx->screen, &fence, NULL);
}
/* Ensure all resources are flushed */
ctx->flush(ctx, &fence, PIPE_FLUSH_HINT_FINISH);
if (fence) {
- ctx->screen->fence_finish(ctx->screen, ctx, fence, PIPE_TIMEOUT_INFINITE);
+ ctx->screen->fence_finish(ctx->screen, ctx, fence, OS_TIMEOUT_INFINITE);
ctx->screen->fence_reference(ctx->screen, &fence, NULL);
}
}
return false;
/* Infinite timeout. */
- if (abs_timeout == PIPE_TIMEOUT_INFINITE) {
+ if (abs_timeout == OS_TIMEOUT_INFINITE) {
radeon_bo_wait_idle(bo);
return true;
}
cs->flush_cs(cs->flush_data,
RADEON_FLUSH_START_NEXT_GFX_IB_NOW, NULL);
}
- radeon_bo_wait(rws, (struct pb_buffer*)bo, PIPE_TIMEOUT_INFINITE,
+ radeon_bo_wait(rws, (struct pb_buffer*)bo, OS_TIMEOUT_INFINITE,
RADEON_USAGE_WRITE);
} else {
/* Mapping for write. */
}
}
- radeon_bo_wait(rws, (struct pb_buffer*)bo, PIPE_TIMEOUT_INFINITE,
+ radeon_bo_wait(rws, (struct pb_buffer*)bo, OS_TIMEOUT_INFINITE,
RADEON_USAGE_READWRITE);
}
memset(&args, 0, sizeof(args));
- os_wait_until_zero(&bo->num_active_ioctls, PIPE_TIMEOUT_INFINITE);
+ os_wait_until_zero(&bo->num_active_ioctls, OS_TIMEOUT_INFINITE);
if (surf) {
if (surf->u.legacy.level[0].mode >= RADEON_SURF_MODE_1D)
{
struct vmw_winsys_screen *vws = vmw_fence_ops(ops)->vws;
- return vmw_fence_finish(vws, fence, PIPE_TIMEOUT_INFINITE, flag);
+ return vmw_fence_finish(vws, fence, OS_TIMEOUT_INFINITE, flag);
}
if (timeout == 0)
return !virgl_drm_resource_is_busy(vws, fence->hw_res);
- if (timeout != PIPE_TIMEOUT_INFINITE) {
+ if (timeout != OS_TIMEOUT_INFINITE) {
int64_t start_time = os_time_get();
timeout /= 1000;
while (virgl_drm_resource_is_busy(vws, fence->hw_res)) {
if (timeout == 0)
return !virgl_vtest_resource_is_busy(vws, res);
- if (timeout != PIPE_TIMEOUT_INFINITE) {
+ if (timeout != OS_TIMEOUT_INFINITE) {
int64_t start_time = os_time_get();
timeout /= 1000;
while (virgl_vtest_resource_is_busy(vws, res)) {
if (fence) {
st->screen->fence_finish(st->screen, NULL, fence,
- PIPE_TIMEOUT_INFINITE);
+ OS_TIMEOUT_INFINITE);
st->screen->fence_reference(st->screen, &fence, NULL);
}
if ((flags & ST_FLUSH_WAIT) && fence && *fence) {
st->screen->fence_finish(st->screen, NULL, *fence,
- PIPE_TIMEOUT_INFINITE);
+ OS_TIMEOUT_INFINITE);
st->screen->fence_reference(st->screen, fence, NULL);
}
#define ONE_SECOND_IN_NS INT64_C(1000000000)
-/* must be equal to PIPE_TIMEOUT_INFINITE */
#define OS_TIMEOUT_INFINITE 0xffffffffffffffffull
/*