+ * radeon_vm_update_pdes - make sure that page directory is valid
+ *
+ * @rdev: radeon_device pointer
+ * @vm: requested vm
+ * @start: start of GPU address range
+ * @end: end of GPU address range
+ *
+ * Allocates new page tables if necessary
+ * and updates the page directory (cayman+).
+ * Returns 0 for success, error for failure.
+ *
+ * Global and local mutex must be locked!
+ */
+static int radeon_vm_update_pdes(struct radeon_device *rdev,
+ struct radeon_vm *vm,
+ uint64_t start, uint64_t end)
+{
+ static const uint32_t incr = RADEON_VM_PTE_COUNT * 8;
+
+ uint64_t last_pde = ~0, last_pt = ~0;
+ unsigned count = 0;
+ uint64_t pt_idx;
+ int r;
+
+ start = (start / RADEON_GPU_PAGE_SIZE) >> RADEON_VM_BLOCK_SIZE;
+ end = (end / RADEON_GPU_PAGE_SIZE) >> RADEON_VM_BLOCK_SIZE;
+
+ /* walk over the address space and update the page directory */
+ for (pt_idx = start; pt_idx <= end; ++pt_idx) {
+ uint64_t pde, pt;
+
+ if (vm->page_tables[pt_idx])
+ continue;
+
+retry:
+ r = radeon_sa_bo_new(rdev, &rdev->vm_manager.sa_manager,
+ &vm->page_tables[pt_idx],
+ RADEON_VM_PTE_COUNT * 8,
+ RADEON_GPU_PAGE_SIZE, false);
+
+ if (r == -ENOMEM) {
+ r = radeon_vm_evict(rdev, vm);
+ if (r)
+ return r;
+ goto retry;
+ } else if (r) {
+ return r;
+ }
+
+ pde = vm->pd_gpu_addr + pt_idx * 8;
+
+ pt = radeon_sa_bo_gpu_addr(vm->page_tables[pt_idx]);
+
+ if (((last_pde + 8 * count) != pde) ||
+ ((last_pt + incr * count) != pt)) {
+
+ if (count) {
+ radeon_asic_vm_set_page(rdev, last_pde,
+ last_pt, count, incr,
+ RADEON_VM_PAGE_VALID);
+ }
+
+ count = 1;
+ last_pde = pde;
+ last_pt = pt;
+ } else {
+ ++count;
+ }
+ }
+
+ if (count) {
+ radeon_asic_vm_set_page(rdev, last_pde, last_pt, count,
+ incr, RADEON_VM_PAGE_VALID);
+
+ }
+
+ return 0;
+}
+
+/**
+ * radeon_vm_update_ptes - make sure that page tables are valid
+ *
+ * @rdev: radeon_device pointer
+ * @vm: requested vm
+ * @start: start of GPU address range
+ * @end: end of GPU address range
+ * @dst: destination address to map to
+ * @flags: mapping flags
+ *
+ * Update the page tables in the range @start - @end (cayman+).
+ *
+ * Global and local mutex must be locked!
+ */
+static void radeon_vm_update_ptes(struct radeon_device *rdev,
+ struct radeon_vm *vm,
+ uint64_t start, uint64_t end,
+ uint64_t dst, uint32_t flags)
+{
+ static const uint64_t mask = RADEON_VM_PTE_COUNT - 1;
+
+ uint64_t last_pte = ~0, last_dst = ~0;
+ unsigned count = 0;
+ uint64_t addr;
+
+ start = start / RADEON_GPU_PAGE_SIZE;
+ end = end / RADEON_GPU_PAGE_SIZE;
+
+ /* walk over the address space and update the page tables */
+ for (addr = start; addr < end; ) {
+ uint64_t pt_idx = addr >> RADEON_VM_BLOCK_SIZE;
+ unsigned nptes;
+ uint64_t pte;
+
+ if ((addr & ~mask) == (end & ~mask))
+ nptes = end - addr;
+ else
+ nptes = RADEON_VM_PTE_COUNT - (addr & mask);
+
+ pte = radeon_sa_bo_gpu_addr(vm->page_tables[pt_idx]);
+ pte += (addr & mask) * 8;
+
+ if (((last_pte + 8 * count) != pte) ||
+ ((count + nptes) > 1 << 11)) {
+
+ if (count) {
+ radeon_asic_vm_set_page(rdev, last_pte,
+ last_dst, count,
+ RADEON_GPU_PAGE_SIZE,
+ flags);
+ }
+
+ count = nptes;
+ last_pte = pte;
+ last_dst = dst;
+ } else {
+ count += nptes;
+ }
+
+ addr += nptes;
+ dst += nptes * RADEON_GPU_PAGE_SIZE;
+ }
+
+ if (count) {
+ radeon_asic_vm_set_page(rdev, last_pte, last_dst, count,
+ RADEON_GPU_PAGE_SIZE, flags);
+ }
+}
+
+/**