Flags pending validation were stored in a misleadingly named field, 'mask'.
As 'mask' is already used to indicate pieces of a flags field which are
changing, it seems better to use a name reflecting the actual purpose of
this field. I chose 'proposed_flags' as they may not actually end up in
'flags', and in an case will be modified when they are moved over.
This affects the API, but not ABI of the user-mode interface.
buf->size = rep->size;
buf->offset = rep->offset;
buf->mapHandle = rep->arg_handle;
- buf->mask = rep->mask;
+ buf->proposedFlags = rep->proposed_flags;
buf->start = rep->buffer_start;
buf->fenceFlags = rep->fence_flags;
buf->replyFlags = rep->rep_flags;
int drmBOCreate(int fd, unsigned long size,
unsigned pageAlignment, void *user_buffer,
- uint64_t mask,
+ uint64_t flags,
unsigned hint, drmBO *buf)
{
struct drm_bo_create_arg arg;
memset(buf, 0, sizeof(*buf));
memset(&arg, 0, sizeof(arg));
- req->mask = mask;
+ req->flags = flags;
req->hint = hint;
req->size = size;
req->page_alignment = pageAlignment;
unsigned handle;
uint64_t mapHandle;
uint64_t flags;
- uint64_t mask;
+ uint64_t proposedFlags;
unsigned mapFlags;
unsigned long size;
unsigned long offset;
DRM_ASSERT_LOCKED(&bo->dev->struct_mutex);
- if (!(bo->mem.mask & (DRM_BO_FLAG_NO_MOVE | DRM_BO_FLAG_NO_EVICT))
+ if (!(bo->mem.proposed_flags & (DRM_BO_FLAG_NO_MOVE | DRM_BO_FLAG_NO_EVICT))
|| bo->mem.mem_type != bo->pinned_mem_type) {
man = &bo->dev->bm.man[bo->mem.mem_type];
list_add_tail(&bo->lru, &man->lru);
DRM_ASSERT_LOCKED(&bo->mutex);
bo->ttm = NULL;
- if (bo->mem.mask & DRM_BO_FLAG_WRITE)
+ if (bo->mem.proposed_flags & DRM_BO_FLAG_WRITE)
page_flags |= DRM_TTM_PAGE_WRITE;
switch (bo->type) {
struct drm_bo_mem_reg *old_mem = &bo->mem;
uint64_t save_flags = old_mem->flags;
- uint64_t save_mask = old_mem->mask;
+ uint64_t save_proposed_flags = old_mem->proposed_flags;
*old_mem = *mem;
mem->mm_node = NULL;
- old_mem->mask = save_mask;
+ old_mem->proposed_flags = save_proposed_flags;
DRM_FLAG_MASKED(save_flags, mem->flags, DRM_BO_MASK_MEMTYPE);
} else if (!(old_man->flags & _DRM_FLAG_MEMTYPE_FIXED) &&
evict_mem.mm_node = NULL;
evict_mem = bo->mem;
- evict_mem.mask = dev->driver->bo_driver->evict_mask(bo);
+ evict_mem.proposed_flags = dev->driver->bo_driver->evict_flags(bo);
ret = drm_bo_mem_space(bo, &evict_mem, no_wait);
if (ret) {
type_ok = drm_bo_mt_compatible(man,
bo->type == drm_bo_type_user,
- mem_type, mem->mask,
+ mem_type, mem->proposed_flags,
&cur_flags);
if (!type_ok)
if (!drm_bo_mt_compatible(man,
bo->type == drm_bo_type_user,
mem_type,
- mem->mask,
+ mem->proposed_flags,
&cur_flags))
continue;
}
EXPORT_SYMBOL(drm_bo_mem_space);
-static int drm_bo_new_mask(struct drm_buffer_object *bo,
- uint64_t new_flags, uint64_t used_mask)
+/*
+ * drm_bo_propose_flags:
+ *
+ * @bo: the buffer object getting new flags
+ *
+ * @new_flags: the new set of proposed flag bits
+ *
+ * @new_mask: the mask of bits changed in new_flags
+ *
+ * Modify the proposed_flag bits in @bo
+ */
+static int drm_bo_modify_proposed_flags (struct drm_buffer_object *bo,
+ uint64_t new_flags, uint64_t new_mask)
{
- uint32_t new_props;
+ uint32_t new_access;
+ /* Copy unchanging bits from existing proposed_flags */
+ DRM_FLAG_MASKED(new_flags, bo->mem.proposed_flags, ~new_mask);
+
if (bo->type == drm_bo_type_user &&
((new_flags & (DRM_BO_FLAG_CACHED | DRM_BO_FLAG_FORCE_CACHING)) !=
(DRM_BO_FLAG_CACHED | DRM_BO_FLAG_FORCE_CACHING))) {
return -EINVAL;
}
- if ((used_mask & DRM_BO_FLAG_NO_EVICT) && !DRM_SUSER(DRM_CURPROC)) {
+ if ((new_mask & DRM_BO_FLAG_NO_EVICT) && !DRM_SUSER(DRM_CURPROC)) {
DRM_ERROR("DRM_BO_FLAG_NO_EVICT is only available to priviliged processes.\n");
return -EPERM;
}
return -EPERM;
}
- new_props = new_flags & (DRM_BO_FLAG_EXE | DRM_BO_FLAG_WRITE |
- DRM_BO_FLAG_READ);
+ new_access = new_flags & (DRM_BO_FLAG_EXE | DRM_BO_FLAG_WRITE |
+ DRM_BO_FLAG_READ);
- if (!new_props) {
+ if (new_access == 0) {
DRM_ERROR("Invalid buffer object rwx properties\n");
return -EINVAL;
}
- bo->mem.mask = new_flags;
+ bo->mem.proposed_flags = new_flags;
return 0;
}
ret = 0;
mutex_unlock(&bo->mutex);
- DRM_WAIT_ON(ret, bo->event_queue, 3 * DRM_HZ,
- !drm_bo_check_unfenced(bo));
+ DRM_WAIT_ON (ret, bo->event_queue, 3 * DRM_HZ,
+ !drm_bo_check_unfenced(bo));
mutex_lock(&bo->mutex);
if (ret == -EINTR)
return -EAGAIN;
else
rep->arg_handle = 0;
- rep->mask = bo->mem.mask;
+ rep->proposed_flags = bo->mem.proposed_flags;
rep->buffer_start = bo->buffer_start;
rep->fence_flags = bo->fence_type;
rep->rep_flags = 0;
/*
* bo->mutex locked.
- * Note that new_mem_flags are NOT transferred to the bo->mem.mask.
+ * Note that new_mem_flags are NOT transferred to the bo->mem.proposed_flags.
*/
int drm_bo_move_buffer(struct drm_buffer_object *bo, uint64_t new_mem_flags,
mem.num_pages = bo->num_pages;
mem.size = mem.num_pages << PAGE_SHIFT;
- mem.mask = new_mem_flags;
+ mem.proposed_flags = new_mem_flags;
mem.page_alignment = bo->mem.page_alignment;
mutex_lock(&bm->evict_mutex);
static int drm_bo_mem_compat(struct drm_bo_mem_reg *mem)
{
- uint32_t flag_diff = (mem->mask ^ mem->flags);
+ uint32_t flag_diff = (mem->proposed_flags ^ mem->flags);
- if ((mem->mask & mem->flags & DRM_BO_MASK_MEM) == 0)
+ if ((mem->proposed_flags & mem->flags & DRM_BO_MASK_MEM) == 0)
return 0;
if ((flag_diff & DRM_BO_FLAG_CACHED) &&
- (/* !(mem->mask & DRM_BO_FLAG_CACHED) ||*/
- (mem->mask & DRM_BO_FLAG_FORCE_CACHING)))
+ (/* !(mem->proposed_flags & DRM_BO_FLAG_CACHED) ||*/
+ (mem->proposed_flags & DRM_BO_FLAG_FORCE_CACHING)))
return 0;
if ((flag_diff & DRM_BO_FLAG_MAPPABLE) &&
- ((mem->mask & DRM_BO_FLAG_MAPPABLE) ||
- (mem->mask & DRM_BO_FLAG_FORCE_MAPPABLE)))
+ ((mem->proposed_flags & DRM_BO_FLAG_MAPPABLE) ||
+ (mem->proposed_flags & DRM_BO_FLAG_FORCE_MAPPABLE)))
return 0;
return 1;
}
uint32_t ftype;
int ret;
- DRM_DEBUG("New flags 0x%016llx, Old flags 0x%016llx\n",
- (unsigned long long) bo->mem.mask,
+ DRM_DEBUG("Proposed flags 0x%016llx, Old flags 0x%016llx\n",
+ (unsigned long long) bo->mem.proposed_flags,
(unsigned long long) bo->mem.flags);
ret = driver->fence_type(bo, &fence_class, &ftype);
*/
if (!drm_bo_mem_compat(&bo->mem)) {
- ret = drm_bo_move_buffer(bo, bo->mem.mask, no_wait,
+ ret = drm_bo_move_buffer(bo, bo->mem.proposed_flags, no_wait,
move_unfenced);
if (ret) {
if (ret != -EAGAIN)
* Pinned buffers.
*/
- if (bo->mem.mask & (DRM_BO_FLAG_NO_EVICT | DRM_BO_FLAG_NO_MOVE)) {
+ if (bo->mem.proposed_flags & (DRM_BO_FLAG_NO_EVICT | DRM_BO_FLAG_NO_MOVE)) {
bo->pinned_mem_type = bo->mem.mem_type;
mutex_lock(&dev->struct_mutex);
list_del_init(&bo->pinned_lru);
if (ret)
return ret;
}
- DRM_FLAG_MASKED(bo->mem.flags, bo->mem.mask, ~DRM_BO_MASK_MEMTYPE);
+ /*
+ * Validation has succeeded, move the access and other
+ * non-mapping-related flag bits from the proposed flags to
+ * the active flags
+ */
+
+ DRM_FLAG_MASKED(bo->mem.flags, bo->mem.proposed_flags, ~DRM_BO_MASK_MEMTYPE);
/*
* Finally, adjust lru to be sure.
if (ret)
goto out;
-
- DRM_FLAG_MASKED(flags, bo->mem.mask, ~mask);
- ret = drm_bo_new_mask(bo, flags, mask);
+ ret = drm_bo_modify_proposed_flags (bo, flags, mask);
if (ret)
goto out;
int drm_buffer_object_create(struct drm_device *dev,
unsigned long size,
enum drm_bo_type type,
- uint64_t mask,
+ uint64_t flags,
uint32_t hint,
uint32_t page_alignment,
unsigned long buffer_start,
bo->mem.page_alignment = page_alignment;
bo->buffer_start = buffer_start & PAGE_MASK;
bo->priv_flags = 0;
- bo->mem.flags = DRM_BO_FLAG_MEM_LOCAL | DRM_BO_FLAG_CACHED |
- DRM_BO_FLAG_MAPPABLE;
- bo->mem.mask = DRM_BO_FLAG_MEM_LOCAL | DRM_BO_FLAG_CACHED |
- DRM_BO_FLAG_MAPPABLE;
+ bo->mem.flags = (DRM_BO_FLAG_MEM_LOCAL | DRM_BO_FLAG_CACHED |
+ DRM_BO_FLAG_MAPPABLE);
+ bo->mem.proposed_flags = 0;
atomic_inc(&bm->count);
- ret = drm_bo_new_mask(bo, mask, mask);
+ /*
+ * Use drm_bo_modify_proposed_flags to error-check the proposed flags
+ */
+ ret = drm_bo_modify_proposed_flags (bo, flags, flags);
if (ret)
goto out_err;
bo_type = (req->buffer_start) ? drm_bo_type_user : drm_bo_type_dc;
+ /*
+ * User buffers cannot be shared
+ */
if (bo_type == drm_bo_type_user)
- req->mask &= ~DRM_BO_FLAG_SHAREABLE;
+ req->flags &= ~DRM_BO_FLAG_SHAREABLE;
ret = drm_buffer_object_create(file_priv->head->dev,
- req->size, bo_type, req->mask,
+ req->size, bo_type, req->flags,
req->hint, req->page_alignment,
req->buffer_start, &entry);
if (ret)
goto out;
ret = drm_bo_add_user_object(file_priv, entry,
- req->mask & DRM_BO_FLAG_SHAREABLE);
+ req->flags & DRM_BO_FLAG_SHAREABLE);
if (ret) {
drm_bo_usage_deref_unlocked(&entry);
goto out;
DRM_ERROR("A DRM_BO_NO_EVICT buffer present at "
"cleanup. Removing flag and evicting.\n");
bo->mem.flags &= ~DRM_BO_FLAG_NO_EVICT;
- bo->mem.mask &= ~DRM_BO_FLAG_NO_EVICT;
+ bo->mem.proposed_flags &= ~DRM_BO_FLAG_NO_EVICT;
}
if (bo->mem.mem_type == mem_type)
struct drm_ttm *ttm = bo->ttm;
struct drm_bo_mem_reg *old_mem = &bo->mem;
uint64_t save_flags = old_mem->flags;
- uint64_t save_mask = old_mem->mask;
+ uint64_t save_proposed_flags = old_mem->proposed_flags;
int ret;
if (old_mem->mem_type == DRM_BO_MEM_TT) {
*old_mem = *new_mem;
new_mem->mm_node = NULL;
- old_mem->mask = save_mask;
+ old_mem->proposed_flags = save_proposed_flags;
DRM_FLAG_MASKED(save_flags, new_mem->flags, DRM_BO_MASK_MEMTYPE);
return 0;
}
void *new_iomap;
int ret;
uint64_t save_flags = old_mem->flags;
- uint64_t save_mask = old_mem->mask;
+ uint64_t save_proposed_flags = old_mem->proposed_flags;
unsigned long i;
unsigned long page;
unsigned long add = 0;
*old_mem = *new_mem;
new_mem->mm_node = NULL;
- old_mem->mask = save_mask;
+ old_mem->proposed_flags = save_proposed_flags;
DRM_FLAG_MASKED(save_flags, new_mem->flags, DRM_BO_MASK_MEMTYPE);
if ((man->flags & _DRM_FLAG_MEMTYPE_FIXED) && (ttm != NULL)) {
struct drm_bo_mem_reg *old_mem = &bo->mem;
int ret;
uint64_t save_flags = old_mem->flags;
- uint64_t save_mask = old_mem->mask;
+ uint64_t save_proposed_flags = old_mem->proposed_flags;
struct drm_buffer_object *old_obj;
if (bo->fence)
*old_mem = *new_mem;
new_mem->mm_node = NULL;
- old_mem->mask = save_mask;
+ old_mem->proposed_flags = save_proposed_flags;
DRM_FLAG_MASKED(save_flags, new_mem->flags, DRM_BO_MASK_MEMTYPE);
return 0;
}
unsigned long num_pages;
uint32_t page_alignment;
uint32_t mem_type;
+ /*
+ * Current buffer status flags, indicating
+ * where the buffer is located and which
+ * access modes are in effect
+ */
uint64_t flags;
- uint64_t mask;
+ /**
+ * These are the flags proposed for
+ * a validate operation. If the
+ * validate succeeds, they'll get moved
+ * into the flags field
+ */
+ uint64_t proposed_flags;
+
uint32_t desired_tile_stride;
uint32_t hw_tile_stride;
};
int (*invalidate_caches) (struct drm_device *dev, uint64_t flags);
int (*init_mem_type) (struct drm_device *dev, uint32_t type,
struct drm_mem_type_manager *man);
- uint32_t(*evict_mask) (struct drm_buffer_object *bo);
+ /*
+ * evict_flags:
+ *
+ * @bo: the buffer object to be evicted
+ *
+ * Return the bo flags for a buffer which is not mapped to the hardware.
+ * These will be placed in proposed_flags so that when the move is
+ * finished, they'll end up in bo->mem.flags
+ */
+ uint64_t(*evict_flags) (struct drm_buffer_object *bo);
+ /*
+ * move:
+ *
+ * @bo: the buffer to move
+ *
+ * @evict: whether this motion is evicting the buffer from
+ * the graphics address space
+ *
+ * @no_wait: whether this should give up and return -EBUSY
+ * if this move would require sleeping
+ *
+ * @new_mem: the new memory region receiving the buffer
+ *
+ * Move a buffer between two memory regions.
+ */
int (*move) (struct drm_buffer_object *bo,
int evict, int no_wait, struct drm_bo_mem_reg *new_mem);
+ /*
+ * ttm_cache_flush
+ */
void (*ttm_cache_flush)(struct drm_ttm *ttm);
};
struct drm_fence_object **used_fence);
extern void drm_bo_add_to_lru(struct drm_buffer_object *bo);
extern int drm_buffer_object_create(struct drm_device *dev, unsigned long size,
- enum drm_bo_type type, uint64_t mask,
+ enum drm_bo_type type, uint64_t flags,
uint32_t hint, uint32_t page_alignment,
unsigned long buffer_start,
struct drm_buffer_object **bo);
*/
if (!(bo->mem.flags & DRM_BO_FLAG_MAPPABLE)) {
- uint32_t new_mask = bo->mem.mask |
+ uint32_t new_flags = bo->mem.proposed_flags |
DRM_BO_FLAG_MAPPABLE |
DRM_BO_FLAG_FORCE_MAPPABLE;
- err = drm_bo_move_buffer(bo, new_mask, 0, 0);
+ err = drm_bo_move_buffer(bo, new_flags, 0, 0);
if (err) {
ret = (err != -EAGAIN) ? NOPFN_SIGBUS : NOPFN_REFAULT;
goto out_unlock;
return drm_agp_init_ttm(dev);
}
-int i915_fence_types(struct drm_buffer_object *bo,
+int i915_fence_type(struct drm_buffer_object *bo,
uint32_t *fclass,
uint32_t *type)
{
- if (bo->mem.mask & (DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE))
+ if (bo->mem.proposed_flags & (DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE))
*type = 3;
else
*type = 1;
return 0;
}
-uint32_t i915_evict_mask(struct drm_buffer_object *bo)
+/*
+ * i915_evict_flags:
+ *
+ * @bo: the buffer object to be evicted
+ *
+ * Return the bo flags for a buffer which is not mapped to the hardware.
+ * These will be placed in proposed_flags so that when the move is
+ * finished, they'll end up in bo->mem.flags
+ */
+uint64_t i915_evict_flags(struct drm_buffer_object *bo)
{
switch (bo->mem.mem_type) {
case DRM_BO_MEM_LOCAL:
.num_mem_type_prio = sizeof(i915_mem_prios)/sizeof(uint32_t),
.num_mem_busy_prio = sizeof(i915_busy_prios)/sizeof(uint32_t),
.create_ttm_backend_entry = i915_create_ttm_backend_entry,
- .fence_type = i915_fence_types,
+ .fence_type = i915_fence_type,
.invalidate_caches = i915_invalidate_caches,
.init_mem_type = i915_init_mem_type,
- .evict_mask = i915_evict_mask,
+ .evict_flags = i915_evict_flags,
.move = i915_move,
.ttm_cache_flush = i915_flush_ttm,
};
{
/* When we get called, *fclass is set to the requested fence class */
- if (bo->mem.mask & (DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE))
+ if (bo->mem.proposed_flags & (DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE))
*type = 3;
else
*type = 1;
return 0;
}
-static uint32_t
-nouveau_bo_evict_mask(struct drm_buffer_object *bo)
+static uint64_t
+nouveau_bo_evict_flags(struct drm_buffer_object *bo)
{
switch (bo->mem.mem_type) {
case DRM_BO_MEM_LOCAL:
tmp_mem = *new_mem;
tmp_mem.mm_node = NULL;
- tmp_mem.mask = DRM_BO_FLAG_MEM_TT |
- DRM_BO_FLAG_CACHED | DRM_BO_FLAG_FORCE_CACHING;
+ tmp_mem.proposed_flags = (DRM_BO_FLAG_MEM_TT |
+ DRM_BO_FLAG_CACHED |
+ DRM_BO_FLAG_FORCE_CACHING);
ret = drm_bo_mem_space(bo, &tmp_mem, no_wait);
.fence_type = nouveau_bo_fence_type,
.invalidate_caches = nouveau_bo_invalidate_caches,
.init_mem_type = nouveau_bo_init_mem_type,
- .evict_mask = nouveau_bo_evict_mask,
+ .evict_flags = nouveau_bo_evict_flags,
.move = nouveau_bo_move,
.ttm_cache_flush= nouveau_bo_flush_ttm
};
return 0;
}
-uint32_t via_evict_mask(struct drm_buffer_object *bo)
+uint64_t via_evict_flags(struct drm_buffer_object *bo)
{
switch (bo->mem.mem_type) {
case DRM_BO_MEM_LOCAL:
#define DRM_BO_FLAG_EXE (1ULL << 2)
/*
+ * All of the bits related to access mode
+ */
+#define DRM_BO_MASK_ACCESS (DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE | DRM_BO_FLAG_EXE)
+/*
* Status flags. Can be read to determine the actual state of a buffer.
* Can also be set in the buffer mask before validation.
*/
#define DRM_BO_FLAG_MEM_PRIV4 (1ULL << 31)
/* We can add more of these now with a 64-bit flag type */
-/* Memory flag mask */
+/*
+ * This is a mask covering all of the memory type flags; easier to just
+ * use a single constant than a bunch of | values. It covers
+ * DRM_BO_FLAG_MEM_LOCAL through DRM_BO_FLAG_MEM_PRIV4
+ */
#define DRM_BO_MASK_MEM 0x00000000FF000000ULL
-#define DRM_BO_MASK_MEMTYPE 0x00000000FF0800A0ULL
-
+/*
+ * This adds all of the CPU-mapping options in with the memory
+ * type to label all bits which change how the page gets mapped
+ */
+#define DRM_BO_MASK_MEMTYPE (DRM_BO_MASK_MEM | \
+ DRM_BO_FLAG_CACHED_MAPPED | \
+ DRM_BO_FLAG_CACHED | \
+ DRM_BO_FLAG_MAPPABLE)
+
/* Driver-private flags */
#define DRM_BO_MASK_DRIVER 0xFFFF000000000000ULL
};
struct drm_bo_create_req {
- uint64_t mask;
+ uint64_t flags;
uint64_t size;
uint64_t buffer_start;
unsigned int hint;
struct drm_bo_info_rep {
uint64_t flags;
- uint64_t mask;
+ uint64_t proposed_flags;
uint64_t size;
uint64_t offset;
uint64_t arg_handle;
#ifdef I915_HAVE_BUFFER
/* i915_buffer.c */
extern struct drm_ttm_backend *i915_create_ttm_backend_entry(struct drm_device *dev);
-extern int i915_fence_types(struct drm_buffer_object *bo, uint32_t *fclass,
- uint32_t *type);
+extern int i915_fence_type(struct drm_buffer_object *bo, uint32_t *fclass,
+ uint32_t *type);
extern int i915_invalidate_caches(struct drm_device *dev, uint64_t buffer_flags);
extern int i915_init_mem_type(struct drm_device *dev, uint32_t type,
struct drm_mem_type_manager *man);
-extern uint32_t i915_evict_mask(struct drm_buffer_object *bo);
+extern uint64_t i915_evict_flags(struct drm_buffer_object *bo);
extern int i915_move(struct drm_buffer_object *bo, int evict,
int no_wait, struct drm_bo_mem_reg *new_mem);
void i915_flush_ttm(struct drm_ttm *ttm);
.fence_type = via_fence_types,
.invalidate_caches = via_invalidate_caches,
.init_mem_type = via_init_mem_type,
- .evict_mask = via_evict_mask,
+ .evict_flags = via_evict_flags,
.move = NULL,
};
#endif
extern int via_invalidate_caches(struct drm_device *dev, uint64_t buffer_flags);
extern int via_init_mem_type(struct drm_device *dev, uint32_t type,
struct drm_mem_type_manager *man);
-extern uint32_t via_evict_mask(struct drm_buffer_object *bo);
+extern uint64_t via_evict_flags(struct drm_buffer_object *bo);
extern int via_move(struct drm_buffer_object *bo, int evict,
int no_wait, struct drm_bo_mem_reg *new_mem);
#endif