Merge branch 'v3.8-samsung-fixes-1' of git://git.kernel.org/pub/scm/linux/kernel...
[platform/adaptation/renesas_rcar/renesas_kernel.git] / mm / bootmem.c
index 434be4a..1324cd7 100644 (file)
@@ -147,21 +147,21 @@ unsigned long __init init_bootmem(unsigned long start, unsigned long pages)
 
 /*
  * free_bootmem_late - free bootmem pages directly to page allocator
- * @addr: starting address of the range
+ * @addr: starting physical address of the range
  * @size: size of the range in bytes
  *
  * This is only useful when the bootmem allocator has already been torn
  * down, but we are still initializing the system.  Pages are given directly
  * to the page allocator, no bootmem metadata is updated because it is gone.
  */
-void __init free_bootmem_late(unsigned long addr, unsigned long size)
+void __init free_bootmem_late(unsigned long physaddr, unsigned long size)
 {
        unsigned long cursor, end;
 
-       kmemleak_free_part(__va(addr), size);
+       kmemleak_free_part(__va(physaddr), size);
 
-       cursor = PFN_UP(addr);
-       end = PFN_DOWN(addr + size);
+       cursor = PFN_UP(physaddr);
+       end = PFN_DOWN(physaddr + size);
 
        for (; cursor < end; cursor++) {
                __free_pages_bootmem(pfn_to_page(cursor), 0);
@@ -198,8 +198,6 @@ static unsigned long __init free_all_bootmem_core(bootmem_data_t *bdata)
                        int order = ilog2(BITS_PER_LONG);
 
                        __free_pages_bootmem(pfn_to_page(start), order);
-                       fixup_zone_present_pages(page_to_nid(pfn_to_page(start)),
-                                       start, start + BITS_PER_LONG);
                        count += BITS_PER_LONG;
                        start += BITS_PER_LONG;
                } else {
@@ -210,9 +208,6 @@ static unsigned long __init free_all_bootmem_core(bootmem_data_t *bdata)
                                if (vec & 1) {
                                        page = pfn_to_page(start + off);
                                        __free_pages_bootmem(page, 0);
-                                       fixup_zone_present_pages(
-                                               page_to_nid(page),
-                                               start + off, start + off + 1);
                                        count++;
                                }
                                vec >>= 1;
@@ -226,17 +221,30 @@ static unsigned long __init free_all_bootmem_core(bootmem_data_t *bdata)
        pages = bdata->node_low_pfn - bdata->node_min_pfn;
        pages = bootmem_bootmap_pages(pages);
        count += pages;
-       while (pages--) {
-               fixup_zone_present_pages(page_to_nid(page),
-                               page_to_pfn(page), page_to_pfn(page) + 1);
+       while (pages--)
                __free_pages_bootmem(page++, 0);
-       }
 
        bdebug("nid=%td released=%lx\n", bdata - bootmem_node_data, count);
 
        return count;
 }
 
+static void reset_node_lowmem_managed_pages(pg_data_t *pgdat)
+{
+       struct zone *z;
+
+       /*
+        * In free_area_init_core(), highmem zone's managed_pages is set to
+        * present_pages, and bootmem allocator doesn't allocate from highmem
+        * zones. So there's no need to recalculate managed_pages because all
+        * highmem pages will be managed by the buddy system. Here highmem
+        * zone also includes highmem movable zone.
+        */
+       for (z = pgdat->node_zones; z < pgdat->node_zones + MAX_NR_ZONES; z++)
+               if (!is_highmem(z))
+                       z->managed_pages = 0;
+}
+
 /**
  * free_all_bootmem_node - release a node's free pages to the buddy allocator
  * @pgdat: node to be released
@@ -246,6 +254,7 @@ static unsigned long __init free_all_bootmem_core(bootmem_data_t *bdata)
 unsigned long __init free_all_bootmem_node(pg_data_t *pgdat)
 {
        register_page_bootmem_info_node(pgdat);
+       reset_node_lowmem_managed_pages(pgdat);
        return free_all_bootmem_core(pgdat->bdata);
 }
 
@@ -258,6 +267,10 @@ unsigned long __init free_all_bootmem(void)
 {
        unsigned long total_pages = 0;
        bootmem_data_t *bdata;
+       struct pglist_data *pgdat;
+
+       for_each_online_pgdat(pgdat)
+               reset_node_lowmem_managed_pages(pgdat);
 
        list_for_each_entry(bdata, &bdata_list, list)
                total_pages += free_all_bootmem_core(bdata);
@@ -385,21 +398,21 @@ void __init free_bootmem_node(pg_data_t *pgdat, unsigned long physaddr,
 
 /**
  * free_bootmem - mark a page range as usable
- * @addr: starting address of the range
+ * @addr: starting physical address of the range
  * @size: size of the range in bytes
  *
  * Partial pages will be considered reserved and left as they are.
  *
  * The range must be contiguous but may span node boundaries.
  */
-void __init free_bootmem(unsigned long addr, unsigned long size)
+void __init free_bootmem(unsigned long physaddr, unsigned long size)
 {
        unsigned long start, end;
 
-       kmemleak_free_part(__va(addr), size);
+       kmemleak_free_part(__va(physaddr), size);
 
-       start = PFN_UP(addr);
-       end = PFN_DOWN(addr + size);
+       start = PFN_UP(physaddr);
+       end = PFN_DOWN(physaddr + size);
 
        mark_bootmem(start, end, 0, 0);
 }
@@ -447,12 +460,6 @@ int __init reserve_bootmem(unsigned long addr, unsigned long size,
        return mark_bootmem(start, end, 1, flags);
 }
 
-int __weak __init reserve_bootmem_generic(unsigned long phys, unsigned long len,
-                                  int flags)
-{
-       return reserve_bootmem(phys, len, flags);
-}
-
 static unsigned long __init align_idx(struct bootmem_data *bdata,
                                      unsigned long idx, unsigned long step)
 {
@@ -583,27 +590,6 @@ find_block:
        return NULL;
 }
 
-static void * __init alloc_arch_preferred_bootmem(bootmem_data_t *bdata,
-                                       unsigned long size, unsigned long align,
-                                       unsigned long goal, unsigned long limit)
-{
-       if (WARN_ON_ONCE(slab_is_available()))
-               return kzalloc(size, GFP_NOWAIT);
-
-#ifdef CONFIG_HAVE_ARCH_BOOTMEM
-       {
-               bootmem_data_t *p_bdata;
-
-               p_bdata = bootmem_arch_preferred_node(bdata, size, align,
-                                                       goal, limit);
-               if (p_bdata)
-                       return alloc_bootmem_bdata(p_bdata, size, align,
-                                                       goal, limit);
-       }
-#endif
-       return NULL;
-}
-
 static void * __init alloc_bootmem_core(unsigned long size,
                                        unsigned long align,
                                        unsigned long goal,
@@ -612,9 +598,8 @@ static void * __init alloc_bootmem_core(unsigned long size,
        bootmem_data_t *bdata;
        void *region;
 
-       region = alloc_arch_preferred_bootmem(NULL, size, align, goal, limit);
-       if (region)
-               return region;
+       if (WARN_ON_ONCE(slab_is_available()))
+               return kzalloc(size, GFP_NOWAIT);
 
        list_for_each_entry(bdata, &bdata_list, list) {
                if (goal && bdata->node_low_pfn <= PFN_DOWN(goal))
@@ -712,11 +697,9 @@ void * __init ___alloc_bootmem_node_nopanic(pg_data_t *pgdat,
 {
        void *ptr;
 
+       if (WARN_ON_ONCE(slab_is_available()))
+               return kzalloc(size, GFP_NOWAIT);
 again:
-       ptr = alloc_arch_preferred_bootmem(pgdat->bdata, size,
-                                          align, goal, limit);
-       if (ptr)
-               return ptr;
 
        /* do not panic in alloc_bootmem_bdata() */
        if (limit && goal + size > limit)