Merge branch 'x86/urgent' into x86/mm, to pick up dependent fix
[platform/kernel/linux-exynos.git] / arch / x86 / mm / init_64.c
index 8f18fec..9686535 100644 (file)
@@ -30,6 +30,7 @@
 #include <linux/module.h>
 #include <linux/memory.h>
 #include <linux/memory_hotplug.h>
+#include <linux/memremap.h>
 #include <linux/nmi.h>
 #include <linux/gfp.h>
 #include <linux/kcore.h>
@@ -715,6 +716,12 @@ static void __meminit free_pagetable(struct page *page, int order)
 {
        unsigned long magic;
        unsigned int nr_pages = 1 << order;
+       struct vmem_altmap *altmap = to_vmem_altmap((unsigned long) page);
+
+       if (altmap) {
+               vmem_altmap_free(altmap, nr_pages);
+               return;
+       }
 
        /* bootmem page has reserved flag */
        if (PageReserved(page)) {
@@ -1018,13 +1025,19 @@ int __ref arch_remove_memory(u64 start, u64 size)
 {
        unsigned long start_pfn = start >> PAGE_SHIFT;
        unsigned long nr_pages = size >> PAGE_SHIFT;
+       struct page *page = pfn_to_page(start_pfn);
+       struct vmem_altmap *altmap;
        struct zone *zone;
        int ret;
 
-       zone = page_zone(pfn_to_page(start_pfn));
-       kernel_physical_mapping_remove(start, start + size);
+       /* With altmap the first mapped page is offset from @start */
+       altmap = to_vmem_altmap((unsigned long) page);
+       if (altmap)
+               page += vmem_altmap_offset(altmap);
+       zone = page_zone(page);
        ret = __remove_pages(zone, start_pfn, nr_pages);
        WARN_ON_ONCE(ret);
+       kernel_physical_mapping_remove(start, start + size);
 
        return ret;
 }
@@ -1223,7 +1236,7 @@ static void __meminitdata *p_start, *p_end;
 static int __meminitdata node_start;
 
 static int __meminit vmemmap_populate_hugepages(unsigned long start,
-                                               unsigned long end, int node)
+               unsigned long end, int node, struct vmem_altmap *altmap)
 {
        unsigned long addr;
        unsigned long next;
@@ -1246,7 +1259,7 @@ static int __meminit vmemmap_populate_hugepages(unsigned long start,
                if (pmd_none(*pmd)) {
                        void *p;
 
-                       p = vmemmap_alloc_block_buf(PMD_SIZE, node);
+                       p = __vmemmap_alloc_block_buf(PMD_SIZE, node, altmap);
                        if (p) {
                                pte_t entry;
 
@@ -1267,7 +1280,8 @@ static int __meminit vmemmap_populate_hugepages(unsigned long start,
                                addr_end = addr + PMD_SIZE;
                                p_end = p + PMD_SIZE;
                                continue;
-                       }
+                       } else if (altmap)
+                               return -ENOMEM; /* no fallback */
                } else if (pmd_large(*pmd)) {
                        vmemmap_verify((pte_t *)pmd, node, addr, next);
                        continue;
@@ -1281,11 +1295,16 @@ static int __meminit vmemmap_populate_hugepages(unsigned long start,
 
 int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
 {
+       struct vmem_altmap *altmap = to_vmem_altmap(start);
        int err;
 
        if (cpu_has_pse)
-               err = vmemmap_populate_hugepages(start, end, node);
-       else
+               err = vmemmap_populate_hugepages(start, end, node, altmap);
+       else if (altmap) {
+               pr_err_once("%s: no cpu support for altmap allocations\n",
+                               __func__);
+               err = -ENOMEM;
+       } else
                err = vmemmap_populate_basepages(start, end, node);
        if (!err)
                sync_global_pgds(start, end - 1, 0);