if (bo->tbo.resv != vm->root.base.bo->tbo.resv)
return;
- vm->bulk_moveable = false;
if (bo->tbo.type == ttm_bo_type_kernel)
amdgpu_vm_bo_relocated(base);
else
struct amdgpu_vm_bo_base *bo_base, *tmp;
int r = 0;
- vm->bulk_moveable &= list_empty(&vm->evicted);
-
list_for_each_entry_safe(bo_base, tmp, &vm->evicted, vm_status) {
struct amdgpu_bo *bo = bo_base->bo;
struct amdgpu_vm_bo_base **base;
if (bo) {
- if (bo->tbo.resv == vm->root.base.bo->tbo.resv)
- vm->bulk_moveable = false;
-
for (base = &bo_va->base.bo->vm_bo; *base;
base = &(*base)->next) {
if (*base != &bo_va->base)