memblock: replace alloc_bootmem_pages with memblock_alloc
authorMike Rapoport <rppt@linux.vnet.ibm.com>
Tue, 30 Oct 2018 22:08:58 +0000 (15:08 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Wed, 31 Oct 2018 15:54:15 +0000 (08:54 -0700)
The alloc_bootmem_pages() function allocates PAGE_SIZE aligned memory.
memblock_alloc() with alignment set to PAGE_SIZE does exactly the same
thing.

The conversion is done using the following semantic patch:

@@
expression e;
@@
- alloc_bootmem_pages(e)
+ memblock_alloc(e, PAGE_SIZE)

Link: http://lkml.kernel.org/r/1536927045-23536-20-git-send-email-rppt@linux.vnet.ibm.com
Signed-off-by: Mike Rapoport <rppt@linux.vnet.ibm.com>
Acked-by: Michal Hocko <mhocko@suse.com>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Chris Zankel <chris@zankel.net>
Cc: "David S. Miller" <davem@davemloft.net>
Cc: Geert Uytterhoeven <geert@linux-m68k.org>
Cc: Greentime Hu <green.hu@gmail.com>
Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Cc: Guan Xuetao <gxt@pku.edu.cn>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: "James E.J. Bottomley" <jejb@parisc-linux.org>
Cc: Jonas Bonn <jonas@southpole.se>
Cc: Jonathan Corbet <corbet@lwn.net>
Cc: Ley Foon Tan <lftan@altera.com>
Cc: Mark Salter <msalter@redhat.com>
Cc: Martin Schwidefsky <schwidefsky@de.ibm.com>
Cc: Matt Turner <mattst88@gmail.com>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Michal Simek <monstr@monstr.eu>
Cc: Palmer Dabbelt <palmer@sifive.com>
Cc: Paul Burton <paul.burton@mips.com>
Cc: Richard Kuo <rkuo@codeaurora.org>
Cc: Richard Weinberger <richard@nod.at>
Cc: Rich Felker <dalias@libc.org>
Cc: Russell King <linux@armlinux.org.uk>
Cc: Serge Semin <fancer.lancer@gmail.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Tony Luck <tony.luck@intel.com>
Cc: Vineet Gupta <vgupta@synopsys.com>
Cc: Yoshinori Sato <ysato@users.sourceforge.jp>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
arch/c6x/mm/init.c
arch/h8300/mm/init.c
arch/m68k/mm/init.c
arch/m68k/mm/mcfmmu.c
arch/m68k/mm/motorola.c
arch/m68k/mm/sun3mmu.c
arch/sh/mm/init.c
arch/x86/kernel/apic/io_apic.c
arch/x86/mm/init_64.c
drivers/xen/swiotlb-xen.c

index 4cc72b0..dc369ad 100644 (file)
@@ -38,7 +38,8 @@ void __init paging_init(void)
        struct pglist_data *pgdat = NODE_DATA(0);
        unsigned long zones_size[MAX_NR_ZONES] = {0, };
 
-       empty_zero_page      = (unsigned long) alloc_bootmem_pages(PAGE_SIZE);
+       empty_zero_page      = (unsigned long) memblock_alloc(PAGE_SIZE,
+                                                             PAGE_SIZE);
        memset((void *)empty_zero_page, 0, PAGE_SIZE);
 
        /*
index 015287a..5d31ac9 100644 (file)
@@ -67,7 +67,7 @@ void __init paging_init(void)
         * Initialize the bad page table and bad page to point
         * to a couple of allocated pages.
         */
-       empty_zero_page = (unsigned long)alloc_bootmem_pages(PAGE_SIZE);
+       empty_zero_page = (unsigned long)memblock_alloc(PAGE_SIZE, PAGE_SIZE);
        memset((void *)empty_zero_page, 0, PAGE_SIZE);
 
        /*
index 38e2b27..977363e 100644 (file)
@@ -93,7 +93,7 @@ void __init paging_init(void)
 
        high_memory = (void *) end_mem;
 
-       empty_zero_page = alloc_bootmem_pages(PAGE_SIZE);
+       empty_zero_page = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
 
        /*
         * Set up SFC/DFC registers (user data space).
index f5453d9..38a1d92 100644 (file)
@@ -44,7 +44,7 @@ void __init paging_init(void)
        enum zone_type zone;
        int i;
 
-       empty_zero_page = (void *) alloc_bootmem_pages(PAGE_SIZE);
+       empty_zero_page = (void *) memblock_alloc(PAGE_SIZE, PAGE_SIZE);
        memset((void *) empty_zero_page, 0, PAGE_SIZE);
 
        pg_dir = swapper_pg_dir;
@@ -52,7 +52,7 @@ void __init paging_init(void)
 
        size = num_pages * sizeof(pte_t);
        size = (size + PAGE_SIZE) & ~(PAGE_SIZE-1);
-       next_pgtable = (unsigned long) alloc_bootmem_pages(size);
+       next_pgtable = (unsigned long) memblock_alloc(size, PAGE_SIZE);
 
        bootmem_end = (next_pgtable + size + PAGE_SIZE) & PAGE_MASK;
        pg_dir += PAGE_OFFSET >> PGDIR_SHIFT;
index 8bcf57e..2113eec 100644 (file)
@@ -276,7 +276,7 @@ void __init paging_init(void)
         * initialize the bad page table and bad page to point
         * to a couple of allocated pages
         */
-       empty_zero_page = alloc_bootmem_pages(PAGE_SIZE);
+       empty_zero_page = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
 
        /*
         * Set up SFC/DFC registers
index 4a99799..19c05ab 100644 (file)
@@ -45,7 +45,7 @@ void __init paging_init(void)
        unsigned long zones_size[MAX_NR_ZONES] = { 0, };
        unsigned long size;
 
-       empty_zero_page = alloc_bootmem_pages(PAGE_SIZE);
+       empty_zero_page = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
 
        address = PAGE_OFFSET;
        pg_dir = swapper_pg_dir;
@@ -55,7 +55,7 @@ void __init paging_init(void)
        size = num_pages * sizeof(pte_t);
        size = (size + PAGE_SIZE) & ~(PAGE_SIZE-1);
 
-       next_pgtable = (unsigned long)alloc_bootmem_pages(size);
+       next_pgtable = (unsigned long)memblock_alloc(size, PAGE_SIZE);
        bootmem_end = (next_pgtable + size + PAGE_SIZE) & PAGE_MASK;
 
        /* Map whole memory from PAGE_OFFSET (0x0E000000) */
index 7713c08..c884b76 100644 (file)
@@ -128,7 +128,7 @@ static pmd_t * __init one_md_table_init(pud_t *pud)
        if (pud_none(*pud)) {
                pmd_t *pmd;
 
-               pmd = alloc_bootmem_pages(PAGE_SIZE);
+               pmd = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
                pud_populate(&init_mm, pud, pmd);
                BUG_ON(pmd != pmd_offset(pud, 0));
        }
@@ -141,7 +141,7 @@ static pte_t * __init one_page_table_init(pmd_t *pmd)
        if (pmd_none(*pmd)) {
                pte_t *pte;
 
-               pte = alloc_bootmem_pages(PAGE_SIZE);
+               pte = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
                pmd_populate_kernel(&init_mm, pmd, pte);
                BUG_ON(pte != pte_offset_kernel(pmd, 0));
        }
index ff0d14c..e25118f 100644 (file)
@@ -2621,7 +2621,8 @@ void __init io_apic_init_mappings(void)
 #ifdef CONFIG_X86_32
 fake_ioapic_page:
 #endif
-                       ioapic_phys = (unsigned long)alloc_bootmem_pages(PAGE_SIZE);
+                       ioapic_phys = (unsigned long)memblock_alloc(PAGE_SIZE,
+                                                                   PAGE_SIZE);
                        ioapic_phys = __pa(ioapic_phys);
                }
                set_fixmap_nocache(idx, ioapic_phys);
index dd519f3..f39b512 100644 (file)
@@ -197,7 +197,7 @@ static __ref void *spp_getpage(void)
        if (after_bootmem)
                ptr = (void *) get_zeroed_page(GFP_ATOMIC);
        else
-               ptr = alloc_bootmem_pages(PAGE_SIZE);
+               ptr = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
 
        if (!ptr || ((unsigned long)ptr & ~PAGE_MASK)) {
                panic("set_pte_phys: cannot allocate page data %s\n",
index f5c1af4..91a6208 100644 (file)
@@ -217,7 +217,8 @@ retry:
         * Get IO TLB memory from any location.
         */
        if (early)
-               xen_io_tlb_start = alloc_bootmem_pages(PAGE_ALIGN(bytes));
+               xen_io_tlb_start = memblock_alloc(PAGE_ALIGN(bytes),
+                                                 PAGE_SIZE);
        else {
 #define SLABS_PER_PAGE (1 << (PAGE_SHIFT - IO_TLB_SHIFT))
 #define IO_TLB_MIN_SLABS ((1<<20) >> IO_TLB_SHIFT)