uprobes: use folios more widely in __replace_page()
authorMatthew Wilcox (Oracle) <willy@infradead.org>
Fri, 2 Sep 2022 19:46:40 +0000 (20:46 +0100)
committerAndrew Morton <akpm@linux-foundation.org>
Mon, 3 Oct 2022 21:02:52 +0000 (14:02 -0700)
Remove a few hidden calls to compound_head().

Link: https://lkml.kernel.org/r/20220902194653.1739778-45-willy@infradead.org
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
kernel/events/uprobes.c

index 401bc2d..70375c7 100644 (file)
@@ -19,7 +19,7 @@
 #include <linux/export.h>
 #include <linux/rmap.h>                /* anon_vma_prepare */
 #include <linux/mmu_notifier.h>        /* set_pte_at_notify */
-#include <linux/swap.h>                /* try_to_free_swap */
+#include <linux/swap.h>                /* folio_free_swap */
 #include <linux/ptrace.h>      /* user_enable_single_step */
 #include <linux/kdebug.h>      /* notifier mechanism */
 #include "../../mm/internal.h" /* munlock_vma_page */
@@ -154,8 +154,9 @@ static loff_t vaddr_to_offset(struct vm_area_struct *vma, unsigned long vaddr)
 static int __replace_page(struct vm_area_struct *vma, unsigned long addr,
                                struct page *old_page, struct page *new_page)
 {
+       struct folio *old_folio = page_folio(old_page);
        struct mm_struct *mm = vma->vm_mm;
-       DEFINE_FOLIO_VMA_WALK(pvmw, page_folio(old_page), vma, addr, 0);
+       DEFINE_FOLIO_VMA_WALK(pvmw, old_folio, vma, addr, 0);
        int err;
        struct mmu_notifier_range range;
 
@@ -169,8 +170,8 @@ static int __replace_page(struct vm_area_struct *vma, unsigned long addr,
                        return err;
        }
 
-       /* For try_to_free_swap() below */
-       lock_page(old_page);
+       /* For folio_free_swap() below */
+       folio_lock(old_folio);
 
        mmu_notifier_invalidate_range_start(&range);
        err = -EAGAIN;
@@ -186,7 +187,7 @@ static int __replace_page(struct vm_area_struct *vma, unsigned long addr,
                /* no new page, just dec_mm_counter for old_page */
                dec_mm_counter(mm, MM_ANONPAGES);
 
-       if (!PageAnon(old_page)) {
+       if (!folio_test_anon(old_folio)) {
                dec_mm_counter(mm, mm_counter_file(old_page));
                inc_mm_counter(mm, MM_ANONPAGES);
        }
@@ -198,15 +199,15 @@ static int __replace_page(struct vm_area_struct *vma, unsigned long addr,
                                  mk_pte(new_page, vma->vm_page_prot));
 
        page_remove_rmap(old_page, vma, false);
-       if (!page_mapped(old_page))
-               try_to_free_swap(old_page);
+       if (!folio_mapped(old_folio))
+               folio_free_swap(old_folio);
        page_vma_mapped_walk_done(&pvmw);
-       put_page(old_page);
+       folio_put(old_folio);
 
        err = 0;
  unlock:
        mmu_notifier_invalidate_range_end(&range);
-       unlock_page(old_page);
+       folio_unlock(old_folio);
        return err;
 }