#include "amdgpu_gmc.h"
#include "amdgpu_xgmi.h"
#include "amdgpu_dma_buf.h"
-#include "amdgpu_res_cursor.h"
/**
* DOC: GPUVM
* @last: last mapped entry
* @flags: flags for the entries
* @offset: offset into nodes and pages_addr
- * @res: ttm_resource to map
+ * @nodes: array of drm_mm_nodes with the MC addresses
* @pages_addr: DMA addresses to use for mapping
* @fence: optional resulting fence
*
bool unlocked, struct dma_resv *resv,
uint64_t start, uint64_t last,
uint64_t flags, uint64_t offset,
- struct ttm_resource *res,
+ struct drm_mm_node *nodes,
dma_addr_t *pages_addr,
struct dma_fence **fence)
{
struct amdgpu_vm_update_params params;
- struct amdgpu_res_cursor cursor;
enum amdgpu_sync_mode sync_mode;
+ uint64_t pfn;
int r;
memset(¶ms, 0, sizeof(params));
else
sync_mode = AMDGPU_SYNC_EXPLICIT;
+ pfn = offset >> PAGE_SHIFT;
+ if (nodes) {
+ while (pfn >= nodes->size) {
+ pfn -= nodes->size;
+ ++nodes;
+ }
+ }
+
amdgpu_vm_eviction_lock(vm);
if (vm->evicting) {
r = -EBUSY;
if (r)
goto error_unlock;
- amdgpu_res_first(res, offset, (last - start + 1) * AMDGPU_GPU_PAGE_SIZE,
- &cursor);
- while (cursor.remaining) {
+ do {
uint64_t tmp, num_entries, addr;
- num_entries = cursor.size >> AMDGPU_GPU_PAGE_SHIFT;
+
+ num_entries = last - start + 1;
+ if (nodes) {
+ addr = nodes->start << PAGE_SHIFT;
+ num_entries = min((nodes->size - pfn) *
+ AMDGPU_GPU_PAGES_IN_CPU_PAGE, num_entries);
+ } else {
+ addr = 0;
+ }
+
if (pages_addr) {
bool contiguous = true;
if (num_entries > AMDGPU_GPU_PAGES_IN_CPU_PAGE) {
- uint64_t pfn = cursor.start >> PAGE_SHIFT;
uint64_t count;
contiguous = pages_addr[pfn + 1] ==
}
if (!contiguous) {
- addr = cursor.start;
+ addr = pfn << PAGE_SHIFT;
params.pages_addr = pages_addr;
} else {
- addr = pages_addr[cursor.start >> PAGE_SHIFT];
+ addr = pages_addr[pfn];
params.pages_addr = NULL;
}
} else if (flags & (AMDGPU_PTE_VALID | AMDGPU_PTE_PRT)) {
- addr = bo_adev->vm_manager.vram_base_offset +
- cursor.start;
- } else {
- addr = 0;
+ addr += bo_adev->vm_manager.vram_base_offset;
+ addr += pfn << PAGE_SHIFT;
}
tmp = start + num_entries;
if (r)
goto error_unlock;
- amdgpu_res_next(&cursor, num_entries * AMDGPU_GPU_PAGE_SIZE);
+ pfn += num_entries / AMDGPU_GPU_PAGES_IN_CPU_PAGE;
+ if (nodes && nodes->size == pfn) {
+ pfn = 0;
+ ++nodes;
+ }
start = tmp;
- };
+
+ } while (unlikely(start != last + 1));
r = vm->update_funcs->commit(¶ms, fence);
struct amdgpu_bo_va_mapping *mapping;
dma_addr_t *pages_addr = NULL;
struct ttm_resource *mem;
+ struct drm_mm_node *nodes;
struct dma_fence **last_update;
struct dma_resv *resv;
uint64_t flags;
if (clear || !bo) {
mem = NULL;
+ nodes = NULL;
resv = vm->root.base.bo->tbo.base.resv;
} else {
struct drm_gem_object *obj = &bo->tbo.base;
bo = gem_to_amdgpu_bo(gobj);
}
mem = &bo->tbo.mem;
+ nodes = mem->mm_node;
if (mem->mem_type == TTM_PL_TT)
pages_addr = bo->tbo.ttm->dma_address;
}
r = amdgpu_vm_bo_update_mapping(adev, bo_adev, vm, false, false,
resv, mapping->start,
mapping->last, update_flags,
- mapping->offset, mem,
+ mapping->offset, nodes,
pages_addr, last_update);
if (r)
return r;