mmap locking API: fix missed mmap_sem references in comments
authorFlorian Rommel <mail@florommel.de>
Tue, 10 May 2022 01:20:53 +0000 (18:20 -0700)
committerAndrew Morton <akpm@linux-foundation.org>
Fri, 13 May 2022 14:20:07 +0000 (07:20 -0700)
Commit c1e8d7c6a7a6 ("mmap locking API: convert mmap_sem comments") missed
replacing some references of mmap_sem by mmap_lock due to misspelling
(mm_sem instead of mmap_sem).

Link: https://lkml.kernel.org/r/20220503113333.214124-1-mail@florommel.de
Signed-off-by: Florian Rommel <mail@florommel.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
drivers/gpu/drm/ttm/ttm_bo_vm.c
mm/mmap.c

index 65552bb..e83cb1c 100644 (file)
@@ -184,7 +184,7 @@ static int kgd_hqd_load(struct amdgpu_device *adev, void *mqd,
 
        /* read_user_ptr may take the mm->mmap_lock.
         * release srbm_mutex to avoid circular dependency between
-        * srbm_mutex->mm_sem->reservation_ww_class_mutex->srbm_mutex.
+        * srbm_mutex->mmap_lock->reservation_ww_class_mutex->srbm_mutex.
         */
        release_queue(adev);
        valid_wptr = read_user_wptr(mm, wptr, wptr_val);
index 9dc5f2a..870f352 100644 (file)
@@ -208,7 +208,7 @@ static int kgd_hqd_load(struct amdgpu_device *adev, void *mqd,
 
        /* read_user_ptr may take the mm->mmap_lock.
         * release srbm_mutex to avoid circular dependency between
-        * srbm_mutex->mm_sem->reservation_ww_class_mutex->srbm_mutex.
+        * srbm_mutex->mmap_lock->reservation_ww_class_mutex->srbm_mutex.
         */
        release_queue(adev);
        valid_wptr = read_user_wptr(mm, wptr, wptr_val);
index 08ba083..8392056 100644 (file)
@@ -110,7 +110,7 @@ static unsigned long ttm_bo_io_mem_pfn(struct ttm_buffer_object *bo,
  * @bo: The buffer object
  * @vmf: The fault structure handed to the callback
  *
- * vm callbacks like fault() and *_mkwrite() allow for the mm_sem to be dropped
+ * vm callbacks like fault() and *_mkwrite() allow for the mmap_lock to be dropped
  * during long waits, and after the wait the callback will be restarted. This
  * is to allow other threads using the same virtual memory space concurrent
  * access to map(), unmap() completely unrelated buffer objects. TTM buffer
index d7e120a..7f7d982 100644 (file)
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -1285,7 +1285,7 @@ static int anon_vma_compatible(struct vm_area_struct *a, struct vm_area_struct *
  * the same as 'old', the other will be the new one that is trying
  * to share the anon_vma.
  *
- * NOTE! This runs with mm_sem held for reading, so it is possible that
+ * NOTE! This runs with mmap_lock held for reading, so it is possible that
  * the anon_vma of 'old' is concurrently in the process of being set up
  * by another page fault trying to merge _that_. But that's ok: if it
  * is being set up, that automatically means that it will be a singleton
@@ -1299,7 +1299,7 @@ static int anon_vma_compatible(struct vm_area_struct *a, struct vm_area_struct *
  *
  * We also make sure that the two vma's are compatible (adjacent,
  * and with the same memory policies). That's all stable, even with just
- * a read lock on the mm_sem.
+ * a read lock on the mmap_lock.
  */
 static struct anon_vma *reusable_anon_vma(struct vm_area_struct *old, struct vm_area_struct *a, struct vm_area_struct *b)
 {