swap: fold swap_ra_clamp_pfn into swap_ra_info
authorKairui Song <kasong@tencent.com>
Mon, 19 Dec 2022 18:58:39 +0000 (02:58 +0800)
committerAndrew Morton <akpm@linux-foundation.org>
Thu, 19 Jan 2023 01:12:45 +0000 (17:12 -0800)
This makes the code cleaner.  This helper is made of only two line of self
explanational code and not reused anywhere else.

And this actually make the compiled object smaller by a bit.

bloat-o-meter results on x86_64 of mm/swap_state.o:

add/remove: 0/0 grow/shrink: 0/1 up/down: 0/-35 (-35)
Function                                     old     new   delta
swap_ra_info.constprop                       512     477     -35
Total: Before=8388, After=8353, chg -0.42%

Link: https://lkml.kernel.org/r/20221219185840.25441-4-ryncsn@gmail.com
Signed-off-by: Kairui Song <kasong@tencent.com>
Reviewed-by: "Huang, Ying" <ying.huang@intel.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Miaohe Lin <linmiaohe@huawei.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/swap_state.c

index af8bc12..d8d1711 100644 (file)
@@ -693,28 +693,15 @@ void exit_swap_address_space(unsigned int type)
        swapper_spaces[type] = NULL;
 }
 
-static inline void swap_ra_clamp_pfn(struct vm_area_struct *vma,
-                                    unsigned long faddr,
-                                    unsigned long lpfn,
-                                    unsigned long rpfn,
-                                    unsigned long *start,
-                                    unsigned long *end)
-{
-       *start = max3(lpfn, PFN_DOWN(vma->vm_start),
-                     PFN_DOWN(faddr & PMD_MASK));
-       *end = min3(rpfn, PFN_DOWN(vma->vm_end),
-                   PFN_DOWN((faddr & PMD_MASK) + PMD_SIZE));
-}
-
 static void swap_ra_info(struct vm_fault *vmf,
-                       struct vma_swap_readahead *ra_info)
+                        struct vma_swap_readahead *ra_info)
 {
        struct vm_area_struct *vma = vmf->vma;
        unsigned long ra_val;
-       unsigned long faddr, pfn, fpfn;
+       unsigned long faddr, pfn, fpfn, lpfn, rpfn;
        unsigned long start, end;
        pte_t *pte, *orig_pte;
-       unsigned int max_win, hits, prev_win, win, left;
+       unsigned int max_win, hits, prev_win, win;
 #ifndef CONFIG_64BIT
        pte_t *tpte;
 #endif
@@ -742,16 +729,23 @@ static void swap_ra_info(struct vm_fault *vmf,
 
        /* Copy the PTEs because the page table may be unmapped */
        orig_pte = pte = pte_offset_map(vmf->pmd, faddr);
-       if (fpfn == pfn + 1)
-               swap_ra_clamp_pfn(vma, faddr, fpfn, fpfn + win, &start, &end);
-       else if (pfn == fpfn + 1)
-               swap_ra_clamp_pfn(vma, faddr, fpfn - win + 1, fpfn + 1,
-                                 &start, &end);
-       else {
-               left = (win - 1) / 2;
-               swap_ra_clamp_pfn(vma, faddr, fpfn - left, fpfn + win - left,
-                                 &start, &end);
+       if (fpfn == pfn + 1) {
+               lpfn = fpfn;
+               rpfn = fpfn + win;
+       } else if (pfn == fpfn + 1) {
+               lpfn = fpfn - win + 1;
+               rpfn = fpfn + 1;
+       } else {
+               unsigned int left = (win - 1) / 2;
+
+               lpfn = fpfn - left;
+               rpfn = fpfn + win - left;
        }
+       start = max3(lpfn, PFN_DOWN(vma->vm_start),
+                    PFN_DOWN(faddr & PMD_MASK));
+       end = min3(rpfn, PFN_DOWN(vma->vm_end),
+                  PFN_DOWN((faddr & PMD_MASK) + PMD_SIZE));
+
        ra_info->nr_pte = end - start;
        ra_info->offset = fpfn - start;
        pte -= ra_info->offset;