/* contains the page directory */
struct amdgpu_bo *page_directory;
unsigned max_pde_used;
+ struct fence *page_directory_fence;
/* array of page tables, one for each page directory entry */
struct amdgpu_vm_pt *page_tables;
&fence);
if (r)
goto error_free;
+
amdgpu_bo_fence(pd, fence, true);
+ fence_put(vm->page_directory_fence);
+ vm->page_directory_fence = fence_get(fence);
fence_put(fence);
}
return -ENOMEM;
}
+ vm->page_directory_fence = NULL;
+
r = amdgpu_bo_create(adev, pd_size, align, true,
AMDGPU_GEM_DOMAIN_VRAM, 0,
NULL, &vm->page_directory);
kfree(vm->page_tables);
amdgpu_bo_unref(&vm->page_directory);
+ fence_put(vm->page_directory_fence);
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
amdgpu_fence_unref(&vm->ids[i].flushed_updates);