mm: fix clear_refs_write locking
authorLinus Torvalds <torvalds@linux-foundation.org>
Fri, 8 Jan 2021 21:13:41 +0000 (13:13 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Sat, 16 Jan 2021 18:46:39 +0000 (10:46 -0800)
Turning page table entries read-only requires the mmap_sem held for
writing.

So stop doing the odd games with turning things from read locks to write
locks and back.  Just get the write lock.

Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
fs/proc/task_mmu.c

index ee5a235..ab7d700 100644 (file)
@@ -1215,41 +1215,26 @@ static ssize_t clear_refs_write(struct file *file, const char __user *buf,
                        .type = type,
                };
 
+               if (mmap_write_lock_killable(mm)) {
+                       count = -EINTR;
+                       goto out_mm;
+               }
                if (type == CLEAR_REFS_MM_HIWATER_RSS) {
-                       if (mmap_write_lock_killable(mm)) {
-                               count = -EINTR;
-                               goto out_mm;
-                       }
-
                        /*
                         * Writing 5 to /proc/pid/clear_refs resets the peak
                         * resident set size to this mm's current rss value.
                         */
                        reset_mm_hiwater_rss(mm);
-                       mmap_write_unlock(mm);
-                       goto out_mm;
+                       goto out_unlock;
                }
 
-               if (mmap_read_lock_killable(mm)) {
-                       count = -EINTR;
-                       goto out_mm;
-               }
                tlb_gather_mmu(&tlb, mm, 0, -1);
                if (type == CLEAR_REFS_SOFT_DIRTY) {
                        for (vma = mm->mmap; vma; vma = vma->vm_next) {
                                if (!(vma->vm_flags & VM_SOFTDIRTY))
                                        continue;
-                               mmap_read_unlock(mm);
-                               if (mmap_write_lock_killable(mm)) {
-                                       count = -EINTR;
-                                       goto out_mm;
-                               }
-                               for (vma = mm->mmap; vma; vma = vma->vm_next) {
-                                       vma->vm_flags &= ~VM_SOFTDIRTY;
-                                       vma_set_page_prot(vma);
-                               }
-                               mmap_write_downgrade(mm);
-                               break;
+                               vma->vm_flags &= ~VM_SOFTDIRTY;
+                               vma_set_page_prot(vma);
                        }
 
                        mmu_notifier_range_init(&range, MMU_NOTIFY_SOFT_DIRTY,
@@ -1261,7 +1246,8 @@ static ssize_t clear_refs_write(struct file *file, const char __user *buf,
                if (type == CLEAR_REFS_SOFT_DIRTY)
                        mmu_notifier_invalidate_range_end(&range);
                tlb_finish_mmu(&tlb, 0, -1);
-               mmap_read_unlock(mm);
+out_unlock:
+               mmap_write_unlock(mm);
 out_mm:
                mmput(mm);
        }