mm/slab: Fix undefined init_cache_node_node() for NUMA and !SMP
[platform/kernel/linux-rpi.git] / mm / sparse-vmemmap.c
index 46ae542..c5398a5 100644 (file)
@@ -196,6 +196,10 @@ pmd_t * __meminit vmemmap_pmd_populate(pud_t *pud, unsigned long addr, int node)
        return pmd;
 }
 
+void __weak __meminit pmd_init(void *addr)
+{
+}
+
 pud_t * __meminit vmemmap_pud_populate(p4d_t *p4d, unsigned long addr, int node)
 {
        pud_t *pud = pud_offset(p4d, addr);
@@ -203,11 +207,16 @@ pud_t * __meminit vmemmap_pud_populate(p4d_t *p4d, unsigned long addr, int node)
                void *p = vmemmap_alloc_block_zero(PAGE_SIZE, node);
                if (!p)
                        return NULL;
+               pmd_init(p);
                pud_populate(&init_mm, pud, p);
        }
        return pud;
 }
 
+void __weak __meminit pud_init(void *addr)
+{
+}
+
 p4d_t * __meminit vmemmap_p4d_populate(pgd_t *pgd, unsigned long addr, int node)
 {
        p4d_t *p4d = p4d_offset(pgd, addr);
@@ -215,6 +224,7 @@ p4d_t * __meminit vmemmap_p4d_populate(pgd_t *pgd, unsigned long addr, int node)
                void *p = vmemmap_alloc_block_zero(PAGE_SIZE, node);
                if (!p)
                        return NULL;
+               pud_init(p);
                p4d_populate(&init_mm, p4d, p);
        }
        return p4d;
@@ -285,6 +295,69 @@ int __meminit vmemmap_populate_basepages(unsigned long start, unsigned long end,
        return vmemmap_populate_range(start, end, node, altmap, NULL);
 }
 
+void __weak __meminit vmemmap_set_pmd(pmd_t *pmd, void *p, int node,
+                                     unsigned long addr, unsigned long next)
+{
+}
+
+int __weak __meminit vmemmap_check_pmd(pmd_t *pmd, int node,
+                                      unsigned long addr, unsigned long next)
+{
+       return 0;
+}
+
+int __meminit vmemmap_populate_hugepages(unsigned long start, unsigned long end,
+                                        int node, struct vmem_altmap *altmap)
+{
+       unsigned long addr;
+       unsigned long next;
+       pgd_t *pgd;
+       p4d_t *p4d;
+       pud_t *pud;
+       pmd_t *pmd;
+
+       for (addr = start; addr < end; addr = next) {
+               next = pmd_addr_end(addr, end);
+
+               pgd = vmemmap_pgd_populate(addr, node);
+               if (!pgd)
+                       return -ENOMEM;
+
+               p4d = vmemmap_p4d_populate(pgd, addr, node);
+               if (!p4d)
+                       return -ENOMEM;
+
+               pud = vmemmap_pud_populate(p4d, addr, node);
+               if (!pud)
+                       return -ENOMEM;
+
+               pmd = pmd_offset(pud, addr);
+               if (pmd_none(READ_ONCE(*pmd))) {
+                       void *p;
+
+                       p = vmemmap_alloc_block_buf(PMD_SIZE, node, altmap);
+                       if (p) {
+                               vmemmap_set_pmd(pmd, p, node, addr, next);
+                               continue;
+                       } else if (altmap) {
+                               /*
+                                * No fallback: In any case we care about, the
+                                * altmap should be reasonably sized and aligned
+                                * such that vmemmap_alloc_block_buf() will always
+                                * succeed. For consistency with the PTE case,
+                                * return an error here as failure could indicate
+                                * a configuration issue with the size of the altmap.
+                                */
+                               return -ENOMEM;
+                       }
+               } else if (vmemmap_check_pmd(pmd, node, addr, next))
+                       continue;
+               if (vmemmap_populate_basepages(addr, next, node, altmap))
+                       return -ENOMEM;
+       }
+       return 0;
+}
+
 /*
  * For compound pages bigger than section size (e.g. x86 1G compound
  * pages with 2M subsection size) fill the rest of sections as tail