pagewalk: only split huge pages when necessary
authorDave Hansen <dave@linux.vnet.ibm.com>
Tue, 22 Mar 2011 23:32:56 +0000 (16:32 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Wed, 23 Mar 2011 00:44:04 +0000 (17:44 -0700)
Right now, if a mm_walk has either ->pte_entry or ->pmd_entry set, it will
unconditionally split any transparent huge pages it runs in to.  In
practice, that means that anyone doing a

cat /proc/$pid/smaps

will unconditionally break down every huge page in the process and depend
on khugepaged to re-collapse it later.  This is fairly suboptimal.

This patch changes that behavior.  It teaches each ->pmd_entry handler
(there are five) that they must break down the THPs themselves.  Also, the
_generic_ code will never break down a THP unless a ->pte_entry handler is
actually set.

This means that the ->pmd_entry handlers can now choose to deal with THPs
without breaking them down.

[akpm@linux-foundation.org: coding-style fixes]
Signed-off-by: Dave Hansen <dave@linux.vnet.ibm.com>
Acked-by: Mel Gorman <mel@csn.ul.ie>
Acked-by: David Rientjes <rientjes@google.com>
Reviewed-by: Eric B Munson <emunson@mgebm.net>
Tested-by: Eric B Munson <emunson@mgebm.net>
Cc: Michael J Wolf <mjwolf@us.ibm.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Matt Mackall <mpm@selenic.com>
Cc: Jeremy Fitzhardinge <jeremy@goop.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
fs/proc/task_mmu.c
include/linux/mm.h
mm/memcontrol.c
mm/pagewalk.c

index 60b9148..78fd362 100644 (file)
@@ -343,6 +343,8 @@ static int smaps_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
        struct page *page;
        int mapcount;
 
+       split_huge_page_pmd(walk->mm, pmd);
+
        pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
        for (; addr != end; pte++, addr += PAGE_SIZE) {
                ptent = *pte;
@@ -467,6 +469,8 @@ static int clear_refs_pte_range(pmd_t *pmd, unsigned long addr,
        spinlock_t *ptl;
        struct page *page;
 
+       split_huge_page_pmd(walk->mm, pmd);
+
        pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
        for (; addr != end; pte++, addr += PAGE_SIZE) {
                ptent = *pte;
@@ -623,6 +627,8 @@ static int pagemap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
        pte_t *pte;
        int err = 0;
 
+       split_huge_page_pmd(walk->mm, pmd);
+
        /* find the first VMA at or above 'addr' */
        vma = find_vma(walk->mm, addr);
        for (; addr != end; addr += PAGE_SIZE) {
index 901435e..294104e 100644 (file)
@@ -914,6 +914,9 @@ unsigned long unmap_vmas(struct mmu_gather **tlb,
  * @pgd_entry: if set, called for each non-empty PGD (top-level) entry
  * @pud_entry: if set, called for each non-empty PUD (2nd-level) entry
  * @pmd_entry: if set, called for each non-empty PMD (3rd-level) entry
+ *            this handler is required to be able to handle
+ *            pmd_trans_huge() pmds.  They may simply choose to
+ *            split_huge_page() instead of handling it explicitly.
  * @pte_entry: if set, called for each non-empty PTE (4th-level) entry
  * @pte_hole: if set, called for each hole at all levels
  * @hugetlb_entry: if set, called for each hugetlb entry
index 9e0f05e..e1ee6ad 100644 (file)
@@ -4763,7 +4763,8 @@ static int mem_cgroup_count_precharge_pte_range(pmd_t *pmd,
        pte_t *pte;
        spinlock_t *ptl;
 
-       VM_BUG_ON(pmd_trans_huge(*pmd));
+       split_huge_page_pmd(walk->mm, pmd);
+
        pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
        for (; addr != end; pte++, addr += PAGE_SIZE)
                if (is_target_pte_for_mc(vma, addr, *pte, NULL))
@@ -4925,8 +4926,8 @@ static int mem_cgroup_move_charge_pte_range(pmd_t *pmd,
        pte_t *pte;
        spinlock_t *ptl;
 
+       split_huge_page_pmd(walk->mm, pmd);
 retry:
-       VM_BUG_ON(pmd_trans_huge(*pmd));
        pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
        for (; addr != end; addr += PAGE_SIZE) {
                pte_t ptent = *(pte++);
index 7cfa6ae..c3450d5 100644 (file)
@@ -33,19 +33,35 @@ static int walk_pmd_range(pud_t *pud, unsigned long addr, unsigned long end,
 
        pmd = pmd_offset(pud, addr);
        do {
+again:
                next = pmd_addr_end(addr, end);
-               split_huge_page_pmd(walk->mm, pmd);
-               if (pmd_none_or_clear_bad(pmd)) {
+               if (pmd_none(*pmd)) {
                        if (walk->pte_hole)
                                err = walk->pte_hole(addr, next, walk);
                        if (err)
                                break;
                        continue;
                }
+               /*
+                * This implies that each ->pmd_entry() handler
+                * needs to know about pmd_trans_huge() pmds
+                */
                if (walk->pmd_entry)
                        err = walk->pmd_entry(pmd, addr, next, walk);
-               if (!err && walk->pte_entry)
-                       err = walk_pte_range(pmd, addr, next, walk);
+               if (err)
+                       break;
+
+               /*
+                * Check this here so we only break down trans_huge
+                * pages when we _need_ to
+                */
+               if (!walk->pte_entry)
+                       continue;
+
+               split_huge_page_pmd(walk->mm, pmd);
+               if (pmd_none_or_clear_bad(pmd))
+                       goto again;
+               err = walk_pte_range(pmd, addr, next, walk);
                if (err)
                        break;
        } while (pmd++, addr = next, addr != end);