drm/amdgpu: cleanup amdgpu_hmm_range_get_pages
authorChristian König <christian.koenig@amd.com>
Wed, 9 Nov 2022 11:41:08 +0000 (12:41 +0100)
committerAlex Deucher <alexander.deucher@amd.com>
Thu, 17 Nov 2022 05:23:43 +0000 (00:23 -0500)
Remove unused parameters and cleanup dead code.

Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Reviewed-by: Felix Kuehling <Felix.Kuehling@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.c
drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.h
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
drivers/gpu/drm/amd/amdkfd/kfd_svm.c

index a68072f..a48ea62 100644 (file)
@@ -158,10 +158,9 @@ void amdgpu_hmm_unregister(struct amdgpu_bo *bo)
 }
 
 int amdgpu_hmm_range_get_pages(struct mmu_interval_notifier *notifier,
-                              struct mm_struct *mm, struct page **pages,
-                              uint64_t start, uint64_t npages,
-                              struct hmm_range **phmm_range, bool readonly,
-                              bool mmap_locked, void *owner)
+                              uint64_t start, uint64_t npages, bool readonly,
+                              void *owner, struct page **pages,
+                              struct hmm_range **phmm_range)
 {
        struct hmm_range *hmm_range;
        unsigned long timeout;
@@ -194,14 +193,7 @@ int amdgpu_hmm_range_get_pages(struct mmu_interval_notifier *notifier,
 
 retry:
        hmm_range->notifier_seq = mmu_interval_read_begin(notifier);
-
-       if (likely(!mmap_locked))
-               mmap_read_lock(mm);
-
        r = hmm_range_fault(hmm_range);
-
-       if (likely(!mmap_locked))
-               mmap_read_unlock(mm);
        if (unlikely(r)) {
                /*
                 * FIXME: This timeout should encompass the retry from
index 4e596a1..13ed94d 100644 (file)
 #include <linux/interval_tree.h>
 
 int amdgpu_hmm_range_get_pages(struct mmu_interval_notifier *notifier,
-                              struct mm_struct *mm, struct page **pages,
-                              uint64_t start, uint64_t npages,
-                              struct hmm_range **phmm_range, bool readonly,
-                              bool mmap_locked, void *owner);
+                              uint64_t start, uint64_t npages, bool readonly,
+                              void *owner, struct page **pages,
+                              struct hmm_range **phmm_range);
 int amdgpu_hmm_range_get_pages_done(struct hmm_range *hmm_range);
 
 #if defined(CONFIG_HMM_MIRROR)
index ddb13b1..c8169ce 100644 (file)
@@ -692,9 +692,8 @@ int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct page **pages,
        }
 
        readonly = amdgpu_ttm_tt_is_readonly(ttm);
-       r = amdgpu_hmm_range_get_pages(&bo->notifier, mm, pages, start,
-                                      ttm->num_pages, range, readonly,
-                                      true, NULL);
+       r = amdgpu_hmm_range_get_pages(&bo->notifier, start, ttm->num_pages,
+                                      readonly, NULL, pages, range);
 out_unlock:
        mmap_read_unlock(mm);
        if (r)
index 1cf7dcb..814f998 100644 (file)
@@ -1596,9 +1596,9 @@ static int svm_range_validate_and_map(struct mm_struct *mm,
                next = min(vma->vm_end, end);
                npages = (next - addr) >> PAGE_SHIFT;
                WRITE_ONCE(p->svms.faulting_task, current);
-               r = amdgpu_hmm_range_get_pages(&prange->notifier, mm, NULL,
-                                              addr, npages, &hmm_range,
-                                              readonly, true, owner);
+               r = amdgpu_hmm_range_get_pages(&prange->notifier, addr, npages,
+                                              readonly, owner, NULL,
+                                              &hmm_range);
                WRITE_ONCE(p->svms.faulting_task, NULL);
                if (r) {
                        pr_debug("failed %d to get svm range pages\n", r);