0, &box);
}
-bool
-zink_resource_has_usage(struct zink_resource *res, enum zink_resource_access usage)
-{
- uint32_t batch_uses = get_resource_usage(res);
- return batch_uses & usage;
-}
-
ALWAYS_INLINE static void
align_offset_size(const VkDeviceSize alignment, VkDeviceSize *offset, VkDeviceSize *size, VkDeviceSize obj_size)
{
}
if ((usage & PIPE_MAP_WRITE) &&
- (usage & PIPE_MAP_DISCARD_RANGE || (!(usage & PIPE_MAP_READ) && zink_resource_has_usage(res, ZINK_RESOURCE_ACCESS_RW))) &&
+ (usage & PIPE_MAP_DISCARD_RANGE || (!(usage & PIPE_MAP_READ) && zink_resource_has_usage(res))) &&
((!res->obj->host_visible) || !(usage & (PIPE_MAP_UNSYNCHRONIZED | PIPE_MAP_PERSISTENT)))) {
/* Check if mapping this buffer would cause waiting for the GPU.
base = map_resource(screen, res);
if (!base)
return NULL;
- if (zink_resource_has_usage(res, ZINK_RESOURCE_ACCESS_RW)) {
+ if (zink_resource_has_usage(res)) {
if (usage & PIPE_MAP_WRITE)
zink_fence_wait(pctx);
else
void
zink_resource_setup_transfer_layouts(struct zink_context *ctx, struct zink_resource *src, struct zink_resource *dst);
-bool
-zink_resource_has_usage(struct zink_resource *res, enum zink_resource_access usage);
-
void
zink_destroy_resource_object(struct zink_screen *screen, struct zink_resource_object *resource_object);
bool
zink_resource_object_init_storage(struct zink_context *ctx, struct zink_resource *res);
+
+#ifndef __cplusplus
+
+static inline bool
+zink_resource_usage_is_unflushed(const struct zink_resource *res)
+{
+ return zink_batch_usage_is_unflushed(res->obj->reads) ||
+ zink_batch_usage_is_unflushed(res->obj->writes);
+}
+
+static inline bool
+zink_resource_usage_is_unflushed_write(const struct zink_resource *res)
+{
+ return zink_batch_usage_is_unflushed(res->obj->writes);
+}
+
+
+static inline bool
+zink_resource_usage_matches(const struct zink_resource *res, const struct zink_batch_state *bs)
+{
+ return zink_batch_usage_matches(res->obj->reads, bs) ||
+ zink_batch_usage_matches(res->obj->writes, bs);
+}
+
+static inline bool
+zink_resource_has_usage(const struct zink_resource *res)
+{
+ return zink_batch_usage_exists(res->obj->reads) ||
+ zink_batch_usage_exists(res->obj->writes);
+}
+
+static inline bool
+zink_resource_has_unflushed_usage(const struct zink_resource *res)
+{
+ return zink_batch_usage_is_unflushed(res->obj->reads) ||
+ zink_batch_usage_is_unflushed(res->obj->writes);
+}
+
+static inline bool
+zink_resource_usage_check_completion(struct zink_screen *screen, struct zink_resource *res, enum zink_resource_access access)
+{
+ if (access & ZINK_RESOURCE_ACCESS_READ && !zink_screen_usage_check_completion(screen, res->obj->reads))
+ return false;
+ if (access & ZINK_RESOURCE_ACCESS_WRITE && !zink_screen_usage_check_completion(screen, res->obj->writes))
+ return false;
+ return true;
+}
+
+static inline void
+zink_resource_usage_wait(struct zink_context *ctx, struct zink_resource *res, enum zink_resource_access access)
+{
+ if (access & ZINK_RESOURCE_ACCESS_READ)
+ zink_batch_usage_wait(ctx, res->obj->reads);
+ if (access & ZINK_RESOURCE_ACCESS_WRITE)
+ zink_batch_usage_wait(ctx, res->obj->writes);
+}
+
+static inline void
+zink_resource_usage_set(struct zink_resource *res, struct zink_batch_state *bs, bool write)
+{
+ if (write)
+ zink_batch_usage_set(&res->obj->writes, bs);
+ else
+ zink_batch_usage_set(&res->obj->reads, bs);
+}
+
+static inline void
+zink_resource_object_usage_unset(struct zink_resource_object *obj, struct zink_batch_state *bs)
+{
+ zink_batch_usage_unset(&obj->reads, bs);
+ zink_batch_usage_unset(&obj->writes, bs);
+}
+
+#endif
#endif