drm/amdkfd: Fix partial migration bugs
authorPhilip Yang <Philip.Yang@amd.com>
Fri, 3 Jun 2022 13:19:34 +0000 (09:19 -0400)
committerAlex Deucher <alexander.deucher@amd.com>
Fri, 3 Jun 2022 20:43:22 +0000 (16:43 -0400)
Migration range from system memory to VRAM, if system page can not be
locked or unmapped, we do partial migration and leave some pages in
system memory. Several bugs found to copy pages and update GPU mapping
for this situation:

1. copy to vram should use migrate->npage which is total pages of range
as npages, not migrate->cpages which is number of pages can be migrated.

2. After partial copy, set VRAM res cursor as j + 1, j is number of
system pages copied plus 1 page to skip copy.

3. copy to ram, should collect all continuous VRAM pages and copy
together.

4. Call amdgpu_vm_update_range, should pass in offset as bytes, not
as number of pages.

Signed-off-by: Philip Yang <Philip.Yang@amd.com>
Reviewed-by: Felix Kuehling <Felix.Kuehling@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Cc: stable@vger.kernel.org
drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
drivers/gpu/drm/amd/amdkfd/kfd_svm.c

index 997650d..e44376c 100644 (file)
@@ -296,7 +296,7 @@ svm_migrate_copy_to_vram(struct amdgpu_device *adev, struct svm_range *prange,
                         struct migrate_vma *migrate, struct dma_fence **mfence,
                         dma_addr_t *scratch)
 {
-       uint64_t npages = migrate->cpages;
+       uint64_t npages = migrate->npages;
        struct device *dev = adev->dev;
        struct amdgpu_res_cursor cursor;
        dma_addr_t *src;
@@ -344,7 +344,7 @@ svm_migrate_copy_to_vram(struct amdgpu_device *adev, struct svm_range *prange,
                                                mfence);
                                if (r)
                                        goto out_free_vram_pages;
-                               amdgpu_res_next(&cursor, j << PAGE_SHIFT);
+                               amdgpu_res_next(&cursor, (j + 1) << PAGE_SHIFT);
                                j = 0;
                        } else {
                                amdgpu_res_next(&cursor, PAGE_SIZE);
@@ -590,7 +590,7 @@ svm_migrate_copy_to_ram(struct amdgpu_device *adev, struct svm_range *prange,
                        continue;
                }
                src[i] = svm_migrate_addr(adev, spage);
-               if (i > 0 && src[i] != src[i - 1] + PAGE_SIZE) {
+               if (j > 0 && src[i] != src[i - 1] + PAGE_SIZE) {
                        r = svm_migrate_copy_memory_gart(adev, dst + i - j,
                                                         src + i - j, j,
                                                         FROM_VRAM_TO_RAM,
index 3bd0f1a..7b33224 100644 (file)
@@ -1295,7 +1295,7 @@ svm_range_map_to_gpu(struct kfd_process_device *pdd, struct svm_range *prange,
                r = amdgpu_vm_update_range(adev, vm, false, false, flush_tlb, NULL,
                                           last_start, prange->start + i,
                                           pte_flags,
-                                          last_start - prange->start,
+                                          (last_start - prange->start) << PAGE_SHIFT,
                                           bo_adev ? bo_adev->vm_manager.vram_base_offset : 0,
                                           NULL, dma_addr, &vm->last_update);