spin_lock_init(&vm->status_lock);
INIT_LIST_HEAD(&vm->freed);
INIT_LIST_HEAD(&vm->done);
+ INIT_LIST_HEAD(&vm->pt_freed);
+ INIT_WORK(&vm->pt_free_work, amdgpu_vm_pt_free_work);
/* create scheduler entities for page table updates */
r = drm_sched_entity_init(&vm->immediate, DRM_SCHED_PRIORITY_NORMAL,
amdgpu_amdkfd_gpuvm_destroy_cb(adev, vm);
+ flush_work(&vm->pt_free_work);
+
root = amdgpu_bo_ref(vm->root.bo);
amdgpu_bo_reserve(root, true);
amdgpu_vm_set_pasid(adev, vm, 0);
/* BOs which are invalidated, has been updated in the PTs */
struct list_head done;
+ /* PT BOs scheduled to free and fill with zero if vm_resv is not hold */
+ struct list_head pt_freed;
+ struct work_struct pt_free_work;
+
/* contains the page directory */
struct amdgpu_vm_bo_base root;
struct dma_fence *last_update;
int amdgpu_vm_ptes_update(struct amdgpu_vm_update_params *params,
uint64_t start, uint64_t end,
uint64_t dst, uint64_t flags);
+void amdgpu_vm_pt_free_work(struct work_struct *work);
#if defined(CONFIG_DEBUG_FS)
void amdgpu_debugfs_vm_bo_info(struct amdgpu_vm *vm, struct seq_file *m);
amdgpu_bo_unref(&entry->bo);
}
+void amdgpu_vm_pt_free_work(struct work_struct *work)
+{
+ struct amdgpu_vm_bo_base *entry, *next;
+ struct amdgpu_vm *vm;
+ LIST_HEAD(pt_freed);
+
+ vm = container_of(work, struct amdgpu_vm, pt_free_work);
+
+ spin_lock(&vm->status_lock);
+ list_splice_init(&vm->pt_freed, &pt_freed);
+ spin_unlock(&vm->status_lock);
+
+ /* flush_work in amdgpu_vm_fini ensure vm->root.bo is valid. */
+ amdgpu_bo_reserve(vm->root.bo, true);
+
+ list_for_each_entry_safe(entry, next, &pt_freed, vm_status)
+ amdgpu_vm_pt_free(entry);
+
+ amdgpu_bo_unreserve(vm->root.bo);
+}
+
/**
* amdgpu_vm_pt_free_dfs - free PD/PT levels
*
*/
static void amdgpu_vm_pt_free_dfs(struct amdgpu_device *adev,
struct amdgpu_vm *vm,
- struct amdgpu_vm_pt_cursor *start)
+ struct amdgpu_vm_pt_cursor *start,
+ bool unlocked)
{
struct amdgpu_vm_pt_cursor cursor;
struct amdgpu_vm_bo_base *entry;
+ if (unlocked) {
+ spin_lock(&vm->status_lock);
+ for_each_amdgpu_vm_pt_dfs_safe(adev, vm, start, cursor, entry)
+ list_move(&entry->vm_status, &vm->pt_freed);
+
+ if (start)
+ list_move(&start->entry->vm_status, &vm->pt_freed);
+ spin_unlock(&vm->status_lock);
+ schedule_work(&vm->pt_free_work);
+ return;
+ }
+
for_each_amdgpu_vm_pt_dfs_safe(adev, vm, start, cursor, entry)
amdgpu_vm_pt_free(entry);
*/
void amdgpu_vm_pt_free_root(struct amdgpu_device *adev, struct amdgpu_vm *vm)
{
- amdgpu_vm_pt_free_dfs(adev, vm, NULL);
+ amdgpu_vm_pt_free_dfs(adev, vm, NULL, false);
}
/**
if (cursor.entry->bo) {
params->table_freed = true;
amdgpu_vm_pt_free_dfs(adev, params->vm,
- &cursor);
+ &cursor,
+ params->unlocked);
}
amdgpu_vm_pt_next(adev, &cursor);
}