* is currently evicted. add the bo to the evicted list to make sure it
* is validated on next vm use to avoid fault.
* */
- spin_lock(&vm->status_lock);
list_move_tail(&base->vm_status, &vm->evicted);
- spin_unlock(&vm->status_lock);
}
/**
struct ttm_bo_global *glob = adev->mman.bdev.glob;
int r;
- spin_lock(&vm->status_lock);
while (!list_empty(&vm->evicted)) {
struct amdgpu_vm_bo_base *bo_base;
struct amdgpu_bo *bo;
bo_base = list_first_entry(&vm->evicted,
struct amdgpu_vm_bo_base,
vm_status);
- spin_unlock(&vm->status_lock);
bo = bo_base->bo;
- BUG_ON(!bo);
if (bo->parent) {
r = validate(param, bo);
if (r)
return r;
}
- spin_lock(&vm->status_lock);
- if (bo->tbo.type != ttm_bo_type_kernel)
+ if (bo->tbo.type != ttm_bo_type_kernel) {
+ spin_lock(&vm->moved_lock);
list_move(&bo_base->vm_status, &vm->moved);
- else
+ spin_unlock(&vm->moved_lock);
+ } else {
list_move(&bo_base->vm_status, &vm->relocated);
+ }
}
- spin_unlock(&vm->status_lock);
return 0;
}
*/
bool amdgpu_vm_ready(struct amdgpu_vm *vm)
{
- bool ready;
-
- spin_lock(&vm->status_lock);
- ready = list_empty(&vm->evicted);
- spin_unlock(&vm->status_lock);
-
- return ready;
+ return list_empty(&vm->evicted);
}
/**
pt->parent = amdgpu_bo_ref(parent->base.bo);
amdgpu_vm_bo_base_init(&entry->base, vm, pt);
- spin_lock(&vm->status_lock);
list_move(&entry->base.vm_status, &vm->relocated);
- spin_unlock(&vm->status_lock);
}
if (level < AMDGPU_VM_PTB) {
if (!entry->base.bo)
continue;
- spin_lock(&vm->status_lock);
if (list_empty(&entry->base.vm_status))
list_add(&entry->base.vm_status, &vm->relocated);
- spin_unlock(&vm->status_lock);
amdgpu_vm_invalidate_level(adev, vm, entry, level + 1);
}
}
params.func = amdgpu_vm_do_set_ptes;
}
- spin_lock(&vm->status_lock);
while (!list_empty(&vm->relocated)) {
struct amdgpu_vm_bo_base *bo_base, *parent;
struct amdgpu_vm_pt *pt, *entry;
struct amdgpu_vm_bo_base,
vm_status);
list_del_init(&bo_base->vm_status);
- spin_unlock(&vm->status_lock);
bo = bo_base->bo->parent;
- if (!bo) {
- spin_lock(&vm->status_lock);
+ if (!bo)
continue;
- }
parent = list_first_entry(&bo->va, struct amdgpu_vm_bo_base,
bo_list);
amdgpu_vm_update_pde(¶ms, vm, pt, entry);
- spin_lock(&vm->status_lock);
if (!vm->use_cpu_for_update &&
(ndw - params.ib->length_dw) < 32)
break;
}
- spin_unlock(&vm->status_lock);
if (vm->use_cpu_for_update) {
/* Flush HDP */
if (entry->huge) {
/* Add the entry to the relocated list to update it. */
entry->huge = false;
- spin_lock(&p->vm->status_lock);
list_move(&entry->base.vm_status, &p->vm->relocated);
- spin_unlock(&p->vm->status_lock);
}
return;
}
amdgpu_asic_flush_hdp(adev, NULL);
}
- spin_lock(&vm->status_lock);
+ spin_lock(&vm->moved_lock);
list_del_init(&bo_va->base.vm_status);
+ spin_unlock(&vm->moved_lock);
/* If the BO is not in its preferred location add it back to
* the evicted list so that it gets validated again on the
!(bo->preferred_domains &
amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type)))
list_add_tail(&bo_va->base.vm_status, &vm->evicted);
- spin_unlock(&vm->status_lock);
list_splice_init(&bo_va->invalids, &bo_va->valids);
bo_va->cleared = clear;
bool clear;
int r = 0;
- spin_lock(&vm->status_lock);
+ spin_lock(&vm->moved_lock);
while (!list_empty(&vm->moved)) {
struct amdgpu_bo_va *bo_va;
struct reservation_object *resv;
bo_va = list_first_entry(&vm->moved,
struct amdgpu_bo_va, base.vm_status);
- spin_unlock(&vm->status_lock);
+ spin_unlock(&vm->moved_lock);
resv = bo_va->base.bo->tbo.resv;
if (!clear && resv != vm->root.base.bo->tbo.resv)
reservation_object_unlock(resv);
- spin_lock(&vm->status_lock);
+ spin_lock(&vm->moved_lock);
}
- spin_unlock(&vm->status_lock);
+ spin_unlock(&vm->moved_lock);
return r;
}
amdgpu_vm_prt_get(adev);
if (bo && bo->tbo.resv == vm->root.base.bo->tbo.resv) {
- spin_lock(&vm->status_lock);
+ spin_lock(&vm->moved_lock);
if (list_empty(&bo_va->base.vm_status))
list_add(&bo_va->base.vm_status, &vm->moved);
- spin_unlock(&vm->status_lock);
+ spin_unlock(&vm->moved_lock);
}
trace_amdgpu_vm_bo_map(bo_va, mapping);
}
list_del(&bo_va->base.bo_list);
- spin_lock(&vm->status_lock);
+ spin_lock(&vm->moved_lock);
list_del(&bo_va->base.vm_status);
- spin_unlock(&vm->status_lock);
+ spin_unlock(&vm->moved_lock);
list_for_each_entry_safe(mapping, next, &bo_va->valids, list) {
list_del(&mapping->list);
bo_base->moved = true;
if (evicted && bo->tbo.resv == vm->root.base.bo->tbo.resv) {
- spin_lock(&bo_base->vm->status_lock);
if (bo->tbo.type == ttm_bo_type_kernel)
list_move(&bo_base->vm_status, &vm->evicted);
else
list_move_tail(&bo_base->vm_status,
&vm->evicted);
- spin_unlock(&bo_base->vm->status_lock);
continue;
}
if (bo->tbo.type == ttm_bo_type_kernel) {
- spin_lock(&bo_base->vm->status_lock);
if (list_empty(&bo_base->vm_status))
list_add(&bo_base->vm_status, &vm->relocated);
- spin_unlock(&bo_base->vm->status_lock);
continue;
}
- spin_lock(&bo_base->vm->status_lock);
+ spin_lock(&bo_base->vm->moved_lock);
if (list_empty(&bo_base->vm_status))
list_add(&bo_base->vm_status, &vm->moved);
- spin_unlock(&bo_base->vm->status_lock);
+ spin_unlock(&bo_base->vm->moved_lock);
}
}
vm->va = RB_ROOT_CACHED;
for (i = 0; i < AMDGPU_MAX_VMHUBS; i++)
vm->reserved_vmid[i] = NULL;
- spin_lock_init(&vm->status_lock);
INIT_LIST_HEAD(&vm->evicted);
INIT_LIST_HEAD(&vm->relocated);
+ spin_lock_init(&vm->moved_lock);
INIT_LIST_HEAD(&vm->moved);
INIT_LIST_HEAD(&vm->freed);