mm/hugetlb: introduce minimum hugepage order
authorNaoya Horiguchi <n-horiguchi@ah.jp.nec.com>
Wed, 24 Jun 2015 23:56:59 +0000 (16:56 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Thu, 25 Jun 2015 00:49:42 +0000 (17:49 -0700)
Currently the initial value of order in dissolve_free_huge_page is 64 or
32, which leads to the following warning in static checker:

  mm/hugetlb.c:1203 dissolve_free_huge_pages()
  warn: potential right shift more than type allows '9,18,64'

This is a potential risk of infinite loop, because 1 << order (== 0) is used
in for-loop like this:

  for (pfn =3D start_pfn; pfn < end_pfn; pfn +=3D 1 << order)
      ...

So this patch fixes it by using global minimum_order calculated at boot time.

    text    data     bss     dec     hex filename
   28313     469   84236  113018   1b97a mm/hugetlb.o
   28256     473   84236  112965   1b945 mm/hugetlb.o (patched)

Fixes: c8721bbbdd36 ("mm: memory-hotplug: enable memory hotplug to handle hugepage")
Reported-by: Dan Carpenter <dan.carpenter@oracle.com>
Signed-off-by: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
mm/hugetlb.c

index 716465a..10de25c 100644 (file)
@@ -40,6 +40,11 @@ int hugepages_treat_as_movable;
 int hugetlb_max_hstate __read_mostly;
 unsigned int default_hstate_idx;
 struct hstate hstates[HUGE_MAX_HSTATE];
+/*
+ * Minimum page order among possible hugepage sizes, set to a proper value
+ * at boot time.
+ */
+static unsigned int minimum_order __read_mostly = UINT_MAX;
 
 __initdata LIST_HEAD(huge_boot_pages);
 
@@ -1188,19 +1193,13 @@ static void dissolve_free_huge_page(struct page *page)
  */
 void dissolve_free_huge_pages(unsigned long start_pfn, unsigned long end_pfn)
 {
-       unsigned int order = 8 * sizeof(void *);
        unsigned long pfn;
-       struct hstate *h;
 
        if (!hugepages_supported())
                return;
 
-       /* Set scan step to minimum hugepage size */
-       for_each_hstate(h)
-               if (order > huge_page_order(h))
-                       order = huge_page_order(h);
-       VM_BUG_ON(!IS_ALIGNED(start_pfn, 1 << order));
-       for (pfn = start_pfn; pfn < end_pfn; pfn += 1 << order)
+       VM_BUG_ON(!IS_ALIGNED(start_pfn, 1 << minimum_order));
+       for (pfn = start_pfn; pfn < end_pfn; pfn += 1 << minimum_order)
                dissolve_free_huge_page(pfn_to_page(pfn));
 }
 
@@ -1627,10 +1626,14 @@ static void __init hugetlb_init_hstates(void)
        struct hstate *h;
 
        for_each_hstate(h) {
+               if (minimum_order > huge_page_order(h))
+                       minimum_order = huge_page_order(h);
+
                /* oversize hugepages were init'ed in early boot */
                if (!hstate_is_gigantic(h))
                        hugetlb_hstate_alloc_pages(h);
        }
+       VM_BUG_ON(minimum_order == UINT_MAX);
 }
 
 static char * __init memfmt(char *buf, unsigned long n)