amdgpu_vm_get_pd_bo(vm, &ctx->list, &ctx->vm_pd[0]);
ret = ttm_eu_reserve_buffers(&ctx->ticket, &ctx->list,
- false, &ctx->duplicates, true);
+ false, &ctx->duplicates);
if (!ret)
ctx->reserved = true;
else {
}
ret = ttm_eu_reserve_buffers(&ctx->ticket, &ctx->list,
- false, &ctx->duplicates, true);
+ false, &ctx->duplicates);
if (!ret)
ctx->reserved = true;
else
}
/* Reserve all BOs and page tables for validation */
- ret = ttm_eu_reserve_buffers(&ticket, &resv_list, false, &duplicates,
- true);
+ ret = ttm_eu_reserve_buffers(&ticket, &resv_list, false, &duplicates);
WARN(!list_empty(&duplicates), "Duplicates should be empty");
if (ret)
goto out_free;
}
ret = ttm_eu_reserve_buffers(&ctx.ticket, &ctx.list,
- false, &duplicate_save, true);
+ false, &duplicate_save);
if (ret) {
pr_debug("Memory eviction: TTM Reserve Failed. Try again\n");
goto ttm_reserve_fail;
}
r = ttm_eu_reserve_buffers(&p->ticket, &p->validated, true,
- &duplicates, false);
+ &duplicates);
if (unlikely(r != 0)) {
if (r != -ERESTARTSYS)
DRM_ERROR("ttm_eu_reserve_buffers failed.\n");
list_add(&csa_tv.head, &list);
amdgpu_vm_get_pd_bo(vm, &list, &pd);
- r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL, false);
+ r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL);
if (r) {
DRM_ERROR("failed to reserve CSA,PD BOs: err=%d\n", r);
return r;
amdgpu_vm_get_pd_bo(vm, &list, &vm_pd);
- r = ttm_eu_reserve_buffers(&ticket, &list, false, &duplicates, false);
+ r = ttm_eu_reserve_buffers(&ticket, &list, false, &duplicates);
if (r) {
dev_err(adev->dev, "leaking bo va because "
"we fail to reserve bo (%d)\n", r);
amdgpu_vm_get_pd_bo(&fpriv->vm, &list, &vm_pd);
- r = ttm_eu_reserve_buffers(&ticket, &list, true, &duplicates, false);
+ r = ttm_eu_reserve_buffers(&ticket, &list, true, &duplicates);
if (r)
goto error_unref;
tv.num_shared = 1;
list_add(&tv.head, &list);
- r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL, true);
+ r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL);
if (r) {
dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
return r;
return 0;
ret = ttm_eu_reserve_buffers(&release->ticket, &release->bos,
- !no_intr, NULL, true);
+ !no_intr, NULL);
if (ret)
return ret;
bo = entry->bo;
dma_resv_add_shared_fence(bo->base.resv, &release->base);
- ttm_bo_add_to_lru(bo);
+ ttm_bo_move_to_lru_tail(bo, NULL);
dma_resv_unlock(bo->base.resv);
}
spin_unlock(&glob->lru_lock);
if (!vm_bos)
return;
- r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL, true);
+ r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL);
if (r)
goto error_free;
u64 bytes_moved_threshold = radeon_bo_get_threshold_for_moves(rdev);
INIT_LIST_HEAD(&duplicates);
- r = ttm_eu_reserve_buffers(ticket, head, true, &duplicates, true);
+ r = ttm_eu_reserve_buffers(ticket, head, true, &duplicates);
if (unlikely(r != 0)) {
return r;
}
}
}
-void ttm_bo_add_to_lru(struct ttm_buffer_object *bo)
-{
- ttm_bo_add_mem_to_lru(bo, &bo->mem);
-}
-EXPORT_SYMBOL(ttm_bo_add_to_lru);
-
static void ttm_bo_ref_bug(struct kref *list_kref)
{
BUG();
}
-void ttm_bo_del_from_lru(struct ttm_buffer_object *bo)
+static void ttm_bo_del_from_lru(struct ttm_buffer_object *bo)
{
struct ttm_bo_device *bdev = bo->bdev;
bool notify = false;
bdev->driver->del_from_lru_notify(bo);
}
-void ttm_bo_del_sub_from_lru(struct ttm_buffer_object *bo)
-{
- struct ttm_bo_global *glob = bo->bdev->glob;
-
- spin_lock(&glob->lru_lock);
- ttm_bo_del_from_lru(bo);
- spin_unlock(&glob->lru_lock);
-}
-EXPORT_SYMBOL(ttm_bo_del_sub_from_lru);
-
static void ttm_bo_bulk_move_set_pos(struct ttm_lru_bulk_move_pos *pos,
struct ttm_buffer_object *bo)
{
dma_resv_assert_held(bo->base.resv);
ttm_bo_del_from_lru(bo);
- ttm_bo_add_to_lru(bo);
+ ttm_bo_add_mem_to_lru(bo, &bo->mem);
if (bulk && !(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) {
switch (bo->mem.mem_type) {
*/
if (bo->mem.placement & TTM_PL_FLAG_NO_EVICT) {
bo->mem.placement &= ~TTM_PL_FLAG_NO_EVICT;
- ttm_bo_add_to_lru(bo);
+ ttm_bo_move_to_lru_tail(bo, NULL);
}
dma_resv_unlock(bo->base.resv);
return ret;
}
- ttm_bo_del_from_lru(bo);
spin_unlock(&glob->lru_lock);
ret = ttm_bo_evict(bo, ctx);
- if (locked) {
+ if (locked)
ttm_bo_unreserve(bo);
- } else {
- spin_lock(&glob->lru_lock);
- ttm_bo_add_to_lru(bo);
- spin_unlock(&glob->lru_lock);
- }
kref_put(&bo->list_kref, ttm_bo_release_list);
return ret;
mem->mem_type = mem_type;
mem->placement = cur_flags;
- if (bo->mem.mem_type < mem_type && !list_empty(&bo->lru)) {
- spin_lock(&bo->bdev->glob->lru_lock);
- ttm_bo_del_from_lru(bo);
- ttm_bo_add_mem_to_lru(bo, mem);
- spin_unlock(&bo->bdev->glob->lru_lock);
- }
+ spin_lock(&bo->bdev->glob->lru_lock);
+ ttm_bo_del_from_lru(bo);
+ ttm_bo_add_mem_to_lru(bo, mem);
+ spin_unlock(&bo->bdev->glob->lru_lock);
return 0;
}
return ret;
}
- if (resv && !(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) {
- spin_lock(&bdev->glob->lru_lock);
- ttm_bo_add_to_lru(bo);
- spin_unlock(&bdev->glob->lru_lock);
- }
+ spin_lock(&bdev->glob->lru_lock);
+ ttm_bo_move_to_lru_tail(bo, NULL);
+ spin_unlock(&bdev->glob->lru_lock);
return ret;
}
}
}
-static void ttm_eu_del_from_lru_locked(struct list_head *list)
-{
- struct ttm_validate_buffer *entry;
-
- list_for_each_entry(entry, list, head) {
- struct ttm_buffer_object *bo = entry->bo;
- ttm_bo_del_from_lru(bo);
- }
-}
-
void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket,
struct list_head *list)
{
list_for_each_entry(entry, list, head) {
struct ttm_buffer_object *bo = entry->bo;
- if (list_empty(&bo->lru))
- ttm_bo_add_to_lru(bo);
+ ttm_bo_move_to_lru_tail(bo, NULL);
dma_resv_unlock(bo->base.resv);
}
spin_unlock(&glob->lru_lock);
int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
struct list_head *list, bool intr,
- struct list_head *dups, bool del_lru)
+ struct list_head *dups)
{
struct ttm_bo_global *glob;
struct ttm_validate_buffer *entry;
list_add(&entry->head, list);
}
- if (del_lru) {
- spin_lock(&glob->lru_lock);
- ttm_eu_del_from_lru_locked(list);
- spin_unlock(&glob->lru_lock);
- }
return 0;
}
EXPORT_SYMBOL(ttm_eu_reserve_buffers);
dma_resv_add_shared_fence(bo->base.resv, fence);
else
dma_resv_add_excl_fence(bo->base.resv, fence);
- if (list_empty(&bo->lru))
- ttm_bo_add_to_lru(bo);
- else
- ttm_bo_move_to_lru_tail(bo, NULL);
+ ttm_bo_move_to_lru_tail(bo, NULL);
dma_resv_unlock(bo->base.resv);
}
spin_unlock(&glob->lru_lock);
val_buf->bo = &res->backup->base;
val_buf->num_shared = 0;
list_add_tail(&val_buf->head, &val_list);
- ret = ttm_eu_reserve_buffers(ticket, &val_list, interruptible, NULL,
- true);
+ ret = ttm_eu_reserve_buffers(ticket, &val_list, interruptible, NULL);
if (unlikely(ret != 0))
goto out_no_reserve;
bool intr)
{
return ttm_eu_reserve_buffers(&ctx->ticket, &ctx->bo_list, intr,
- NULL, true);
+ NULL);
}
/**
*/
void ttm_bo_put(struct ttm_buffer_object *bo);
-/**
- * ttm_bo_add_to_lru
- *
- * @bo: The buffer object.
- *
- * Add this bo to the relevant mem type lru and, if it's backed by
- * system pages (ttms) to the swap list.
- * This function must be called with struct ttm_bo_global::lru_lock held, and
- * is typically called immediately prior to unreserving a bo.
- */
-void ttm_bo_add_to_lru(struct ttm_buffer_object *bo);
-
-/**
- * ttm_bo_del_from_lru
- *
- * @bo: The buffer object.
- *
- * Remove this bo from all lru lists used to lookup and reserve an object.
- * This function must be called with struct ttm_bo_global::lru_lock held,
- * and is usually called just immediately after the bo has been reserved to
- * avoid recursive reservation from lru lists.
- */
-void ttm_bo_del_from_lru(struct ttm_buffer_object *bo);
-
/**
* ttm_bo_move_to_lru_tail
*
int ttm_mem_io_lock(struct ttm_mem_type_manager *man, bool interruptible);
void ttm_mem_io_unlock(struct ttm_mem_type_manager *man);
-void ttm_bo_del_sub_from_lru(struct ttm_buffer_object *bo);
-void ttm_bo_add_to_lru(struct ttm_buffer_object *bo);
-
/**
* __ttm_bo_reserve:
*
bool interruptible, bool no_wait,
struct ww_acquire_ctx *ticket)
{
- int ret;
-
WARN_ON(!kref_read(&bo->kref));
- ret = __ttm_bo_reserve(bo, interruptible, no_wait, ticket);
- if (likely(ret == 0))
- ttm_bo_del_sub_from_lru(bo);
-
- return ret;
+ return __ttm_bo_reserve(bo, interruptible, no_wait, ticket);
}
/**
else
dma_resv_lock_slow(bo->base.resv, ticket);
- if (likely(ret == 0))
- ttm_bo_del_sub_from_lru(bo);
- else if (ret == -EINTR)
+ if (ret == -EINTR)
ret = -ERESTARTSYS;
return ret;
static inline void ttm_bo_unreserve(struct ttm_buffer_object *bo)
{
spin_lock(&bo->bdev->glob->lru_lock);
- if (list_empty(&bo->lru))
- ttm_bo_add_to_lru(bo);
- else
- ttm_bo_move_to_lru_tail(bo, NULL);
+ ttm_bo_move_to_lru_tail(bo, NULL);
spin_unlock(&bo->bdev->glob->lru_lock);
dma_resv_unlock(bo->base.resv);
}
extern int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
struct list_head *list, bool intr,
- struct list_head *dups, bool del_lru);
+ struct list_head *dups);
/**
* function ttm_eu_fence_buffer_objects.