}
}
-/* Must be called before TC binds, maps, invalidates, or adds a buffer to a buffer list. */
-static void tc_touch_buffer(struct threaded_context *tc, struct threaded_resource *buf)
-{
- const struct threaded_context *first_user = buf->first_user;
-
- /* Fast path exit to avoid additional branches */
- if (likely(first_user == tc))
- return;
-
- if (!first_user)
- first_user = p_atomic_cmpxchg_ptr(&buf->first_user, NULL, tc);
-
- /* The NULL check might seem unnecessary here but it's actually critical:
- * p_atomic_cmpxchg will return NULL if it succeeds, meaning that NULL is
- * equivalent to "we're the first user" here. (It's equally important not
- * to ignore the result of the cmpxchg above, since it might fail.)
- * Without the NULL check, we'd set the flag unconditionally, which is bad.
- */
- if (first_user && first_user != tc && !buf->used_by_multiple_contexts)
- buf->used_by_multiple_contexts = true;
-}
-
-static bool tc_is_buffer_shared(struct threaded_resource *buf)
-{
- return buf->is_shared || buf->used_by_multiple_contexts;
-}
-
static void
tc_add_to_buffer_list(struct threaded_context *tc, struct tc_buffer_list *next, struct pipe_resource *buf)
{
- struct threaded_resource *tbuf = threaded_resource(buf);
- tc_touch_buffer(tc, tbuf);
-
- uint32_t id = tbuf->buffer_id_unique;
+ uint32_t id = threaded_resource(buf)->buffer_id_unique;
BITSET_SET(next->buffer_list, id & TC_BUFFER_ID_MASK);
}
static void
tc_bind_buffer(struct threaded_context *tc, uint32_t *binding, struct tc_buffer_list *next, struct pipe_resource *buf)
{
- struct threaded_resource *tbuf = threaded_resource(buf);
- tc_touch_buffer(tc, tbuf);
-
- uint32_t id = tbuf->buffer_id_unique;
+ uint32_t id = threaded_resource(buf)->buffer_id_unique;
*binding = id;
BITSET_SET(next->buffer_list, id & TC_BUFFER_ID_MASK);
}
{
struct threaded_resource *tres = threaded_resource(res);
- tres->first_user = NULL;
- tres->used_by_multiple_contexts = false;
tres->latest = &tres->b;
tres->cpu_storage = NULL;
util_range_init(&tres->valid_buffer_range);
return call_size(tc_replace_buffer_storage);
}
-/* Return true if the buffer has been invalidated or is idle.
- * Note that callers must've called tc_touch_buffer before calling
- * this function. */
+/* Return true if the buffer has been invalidated or is idle. */
static bool
tc_invalidate_buffer(struct threaded_context *tc,
struct threaded_resource *tbuf)
struct pipe_resource *new_buf;
/* Shared, pinned, and sparse buffers can't be reallocated. */
- if (tc_is_buffer_shared(tbuf) ||
+ if (tbuf->is_shared ||
tbuf->is_user_ptr ||
tbuf->b.flags & (PIPE_RESOURCE_FLAG_SPARSE | PIPE_RESOURCE_FLAG_UNMAPPABLE))
return false;
return true;
}
-/* Note that callers must've called tc_touch_buffer first before
- * calling tc_improve_map_buffer_flags. */
static unsigned
tc_improve_map_buffer_flags(struct threaded_context *tc,
struct threaded_resource *tres, unsigned usage,
if (usage & PIPE_MAP_THREAD_SAFE)
tc_buffer_disable_cpu_storage(resource);
- tc_touch_buffer(tc, tres);
-
- /* CPU storage relies on buffer invalidation never failing. With shared buffers,
- * invalidation might not always be possible, so CPU storage can't be used.
- */
- if (tc_is_buffer_shared(tres))
- tc_buffer_disable_cpu_storage(resource);
-
usage = tc_improve_map_buffer_flags(tc, tres, usage, box->x, box->width);
/* If the CPU storage is enabled, return it directly. */
assert(tres->cpu_storage);
if (tres->cpu_storage) {
- /* Invalidations shouldn't fail as long as CPU storage is allowed. */
- ASSERTED bool invalidated = tc_invalidate_buffer(tc, tres);
- assert(invalidated);
-
+ tc_invalidate_buffer(tc, tres);
tc_buffer_subdata(&tc->base, &tres->b,
PIPE_MAP_UNSYNCHRONIZED |
TC_TRANSFER_MAP_UPLOAD_CPU_STORAGE,
if (!size)
return;
- tc_touch_buffer(tc, tres);
-
usage |= PIPE_MAP_WRITE;
/* PIPE_MAP_DIRECTLY supresses implicit DISCARD_RANGE. */
struct threaded_context *tc = threaded_context(_pipe);
if (resource->target == PIPE_BUFFER) {
- /* This can fail, in which case we simply ignore the invalidation request. */
- struct threaded_resource *tbuf = threaded_resource(resource);
- tc_touch_buffer(tc, tbuf);
- tc_invalidate_buffer(tc, tbuf);
+ tc_invalidate_buffer(tc, threaded_resource(resource));
return;
}
struct threaded_resource {
struct pipe_resource b;
- /* Pointer to the TC that first used this threaded_resource (buffer). This is used to
- * allow TCs to determine whether they have been given a buffer that was created by a
- * different TC, in which case all TCs have to disable busyness tracking and buffer
- * replacement for that particular buffer.
- * DO NOT DEREFERENCE. The only operation allowed on this pointer is equality-checking
- * since it might be dangling if a buffer has been shared and its first_user has
- * already been destroyed. The pointer is const void to discourage such disallowed usage.
- * This is NULL if no TC has used this buffer yet.
- */
- const void *first_user;
-
/* Since buffer invalidations are queued, we can't use the base resource
* for unsychronized mappings. This points to the latest version of
* the buffer after the latest invalidation. It's only used for unsychro-
*/
struct util_range valid_buffer_range;
- /* True if multiple threaded contexts have accessed this buffer.
- * Disables non-multicontext-safe optimizations in TC.
- * We can't just re-use is_shared for that purpose as that would confuse drivers.
- */
- bool used_by_multiple_contexts;
-
/* Drivers are required to update this for shared resources and user
* pointers. */
bool is_shared;