unsigned ring_instance;
struct amdgpu_ring *ring;
struct amd_sched_rq *rq;
- int r;
+ int r, i;
vm->va = RB_ROOT;
vm->client_id = atomic64_inc_return(&adev->vm_manager.client_counter);
+ for (i = 0; i < AMDGPU_MAX_VMHUBS; i++)
+ vm->reserved_vmid[i] = NULL;
spin_lock_init(&vm->status_lock);
INIT_LIST_HEAD(&vm->invalidated);
INIT_LIST_HEAD(&vm->cleared);
{
struct amdgpu_bo_va_mapping *mapping, *tmp;
bool prt_fini_needed = !!adev->gart.gart_funcs->set_prt;
+ int i;
amd_sched_entity_fini(vm->entity.sched, &vm->entity);
amdgpu_vm_free_levels(&vm->root);
dma_fence_put(vm->last_dir_update);
+ for (i = 0; i < AMDGPU_MAX_VMHUBS; i++) {
+ struct amdgpu_vm_id_manager *id_mgr =
+ &adev->vm_manager.id_mgr[i];
+
+ mutex_lock(&id_mgr->lock);
+ if (vm->reserved_vmid[i]) {
+ list_add(&vm->reserved_vmid[i]->list,
+ &id_mgr->ids_lru);
+ vm->reserved_vmid[i] = NULL;
+ }
+ mutex_unlock(&id_mgr->lock);
+ }
}
/**