static unsigned amdgpu_vm_level_shift(struct amdgpu_device *adev,
unsigned level)
{
- if (level != adev->vm_manager.num_level)
- return 9 * (adev->vm_manager.num_level - level - 1) +
+ unsigned shift = 0xff;
+
+ switch (level) {
+ case AMDGPU_VM_PDB2:
+ case AMDGPU_VM_PDB1:
+ case AMDGPU_VM_PDB0:
+ shift = 9 * (AMDGPU_VM_PDB0 - level) +
adev->vm_manager.block_size;
- else
- /* For the page tables on the leaves */
- return 0;
+ break;
+ case AMDGPU_VM_PTB:
+ shift = 0;
+ break;
+ default:
+ dev_err(adev->dev, "the level%d isn't supported.\n", level);
+ }
+
+ return shift;
}
/**
static unsigned amdgpu_vm_num_entries(struct amdgpu_device *adev,
unsigned level)
{
- unsigned shift = amdgpu_vm_level_shift(adev, 0);
+ unsigned shift = amdgpu_vm_level_shift(adev,
+ adev->vm_manager.root_level);
- if (level == 0)
+ if (level == adev->vm_manager.root_level)
/* For the root directory */
return round_up(adev->vm_manager.max_pfn, 1 << shift) >> shift;
- else if (level != adev->vm_manager.num_level)
+ else if (level != AMDGPU_VM_PTB)
/* Everything in between */
return 512;
else
if (vm->pte_support_ats) {
init_value = AMDGPU_PTE_DEFAULT_ATC;
- if (level != adev->vm_manager.num_level)
+ if (level != AMDGPU_VM_PTB)
init_value |= AMDGPU_PDE_PTE;
}
spin_unlock(&vm->status_lock);
}
- if (level < adev->vm_manager.num_level) {
+ if (level < AMDGPU_VM_PTB) {
uint64_t sub_saddr = (pt_idx == from) ? saddr : 0;
uint64_t sub_eaddr = (pt_idx == to) ? eaddr :
((1 << shift) - 1);
saddr /= AMDGPU_GPU_PAGE_SIZE;
eaddr /= AMDGPU_GPU_PAGE_SIZE;
- return amdgpu_vm_alloc_levels(adev, vm, &vm->root, saddr, eaddr, 0);
+ return amdgpu_vm_alloc_levels(adev, vm, &vm->root, saddr, eaddr,
+ adev->vm_manager.root_level);
}
/**
for (level = 0, pbo = parent->base.bo->parent; pbo; ++level)
pbo = pbo->parent;
+ level += params->adev->vm_manager.root_level;
pt = amdgpu_bo_gpu_offset(bo);
flags = AMDGPU_PTE_VALID;
amdgpu_gart_get_vm_pde(params->adev, level, &pt, &flags);
return 0;
error:
- amdgpu_vm_invalidate_level(adev, vm, &vm->root, 0);
+ amdgpu_vm_invalidate_level(adev, vm, &vm->root,
+ adev->vm_manager.root_level);
amdgpu_job_free(job);
return r;
}
struct amdgpu_vm_pt **entry,
struct amdgpu_vm_pt **parent)
{
- unsigned level = 0;
+ unsigned level = p->adev->vm_manager.root_level;
*parent = NULL;
*entry = &p->vm->root;
addr &= (1ULL << shift) - 1;
}
- if (level != p->adev->vm_manager.num_level)
+ if (level != AMDGPU_VM_PTB)
*entry = NULL;
}
return;
entry->huge = !!(flags & AMDGPU_PDE_PTE);
- amdgpu_gart_get_vm_pde(p->adev, p->adev->vm_manager.num_level - 1,
+ amdgpu_gart_get_vm_pde(p->adev, AMDGPU_VM_PDB0,
&dst, &flags);
if (use_cpu_update) {
error_free:
amdgpu_job_free(job);
- amdgpu_vm_invalidate_level(adev, vm, &vm->root, 0);
+ amdgpu_vm_invalidate_level(adev, vm, &vm->root,
+ adev->vm_manager.root_level);
return r;
}
tmp >>= amdgpu_vm_block_size - 9;
tmp = DIV_ROUND_UP(fls64(tmp) - 1, 9) - 1;
adev->vm_manager.num_level = min(max_level, (unsigned)tmp);
-
+ switch (adev->vm_manager.num_level) {
+ case 3:
+ adev->vm_manager.root_level = AMDGPU_VM_PDB2;
+ break;
+ case 2:
+ adev->vm_manager.root_level = AMDGPU_VM_PDB1;
+ break;
+ case 1:
+ adev->vm_manager.root_level = AMDGPU_VM_PDB0;
+ break;
+ default:
+ dev_err(adev->dev, "VMPT only supports 2~4+1 levels\n");
+ }
/* block size depends on vm size and hw setup*/
if (amdgpu_vm_block_size != -1)
adev->vm_manager.block_size =
flags |= (AMDGPU_GEM_CREATE_NO_CPU_ACCESS |
AMDGPU_GEM_CREATE_SHADOW);
- r = amdgpu_bo_create(adev, amdgpu_vm_bo_size(adev, 0), align, true,
+ r = amdgpu_bo_create(adev,
+ amdgpu_vm_bo_size(adev, adev->vm_manager.root_level),
+ align, true,
AMDGPU_GEM_DOMAIN_VRAM,
flags,
NULL, NULL, init_pde_value, &vm->root.base.bo);
if (r) {
dev_err(adev->dev, "Leaking page tables because BO reservation failed\n");
} else {
- amdgpu_vm_free_levels(adev, &vm->root, 0);
+ amdgpu_vm_free_levels(adev, &vm->root,
+ adev->vm_manager.root_level);
amdgpu_bo_unreserve(root);
}
amdgpu_bo_unref(&root);