drm/amdkfd: avoid recursive lock in migrations back to RAM
authorAlex Sierra <alex.sierra@amd.com>
Fri, 29 Oct 2021 18:30:40 +0000 (13:30 -0500)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 16 Nov 2022 08:58:13 +0000 (09:58 +0100)
[ Upstream commit a6283010e2907a5576f96b839e1a1c82659f137c ]

[Why]:
When we call hmm_range_fault to map memory after a migration, we don't
expect memory to be migrated again as a result of hmm_range_fault. The
driver ensures that all memory is in GPU-accessible locations so that
no migration should be needed. However, there is one corner case where
hmm_range_fault can unexpectedly cause a migration from DEVICE_PRIVATE
back to system memory due to a write-fault when a system memory page in
the same range was mapped read-only (e.g. COW). Ranges with individual
pages in different locations are usually the result of failed page
migrations (e.g. page lock contention). The unexpected migration back
to system memory causes a deadlock from recursive locking in our
driver.

[How]:
Creating a task reference new member under svm_range_list struct.
Setting this with "current" reference, right before the hmm_range_fault
is called. This member is checked against "current" reference at
svm_migrate_to_ram callback function. If equal, the migration will be
ignored.

Signed-off-by: Alex Sierra <alex.sierra@amd.com>
Reviewed-by: Felix Kuehling <Felix.Kuehling@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Stable-dep-of: 5b994354af3c ("drm/amdkfd: Fix NULL pointer dereference in svm_migrate_to_ram()")
Signed-off-by: Sasha Levin <sashal@kernel.org>
drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
drivers/gpu/drm/amd/amdkfd/kfd_priv.h
drivers/gpu/drm/amd/amdkfd/kfd_svm.c

index 4a16e3c..a458c19 100644 (file)
@@ -796,6 +796,11 @@ static vm_fault_t svm_migrate_to_ram(struct vm_fault *vmf)
                pr_debug("failed find process at fault address 0x%lx\n", addr);
                return VM_FAULT_SIGBUS;
        }
+       if (READ_ONCE(p->svms.faulting_task) == current) {
+               pr_debug("skipping ram migration\n");
+               kfd_unref_process(p);
+               return 0;
+       }
        addr >>= PAGE_SHIFT;
        pr_debug("CPU page fault svms 0x%p address 0x%lx\n", &p->svms, addr);
 
index 6d8f9bb..47ec820 100644 (file)
@@ -755,6 +755,7 @@ struct svm_range_list {
        atomic_t                        evicted_ranges;
        struct delayed_work             restore_work;
        DECLARE_BITMAP(bitmap_supported, MAX_GPU_INSTANCE);
+       struct task_struct              *faulting_task;
 };
 
 /* Process data */
index 74e6f61..22a70aa 100644 (file)
@@ -1489,9 +1489,11 @@ static int svm_range_validate_and_map(struct mm_struct *mm,
 
                next = min(vma->vm_end, end);
                npages = (next - addr) >> PAGE_SHIFT;
+               WRITE_ONCE(p->svms.faulting_task, current);
                r = amdgpu_hmm_range_get_pages(&prange->notifier, mm, NULL,
                                               addr, npages, &hmm_range,
                                               readonly, true, owner);
+               WRITE_ONCE(p->svms.faulting_task, NULL);
                if (r) {
                        pr_debug("failed %d to get svm range pages\n", r);
                        goto unreserve_out;