Merge branch 'x86-mm-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git...
authorLinus Torvalds <torvalds@linux-foundation.org>
Tue, 30 Apr 2013 15:40:35 +0000 (08:40 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Tue, 30 Apr 2013 15:40:35 +0000 (08:40 -0700)
Pull x86 mm changes from Ingo Molnar:
 "Misc smaller changes all over the map"

* 'x86-mm-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  x86/iommu/dmar: Remove warning for HPET scope type
  x86/mm/gart: Drop unnecessary check
  x86/mm/hotplug: Put kernel_physical_mapping_remove() declaration in CONFIG_MEMORY_HOTREMOVE
  x86/mm/fixmap: Remove unused FIX_CYCLONE_TIMER
  x86/mm/numa: Simplify some bit mangling
  x86/mm: Re-enable DEBUG_TLBFLUSH for X86_32
  x86/mm/cpa: Cleanup split_large_page() and its callee
  x86: Drop always empty .text..page_aligned section

1  2 
arch/x86/include/asm/fixmap.h
arch/x86/mm/init_64.c
arch/x86/mm/pageattr.c

@@@ -104,10 -104,9 +104,7 @@@ enum fixed_addresses 
        FIX_LI_PCIA,    /* Lithium PCI Bridge A */
        FIX_LI_PCIB,    /* Lithium PCI Bridge B */
  #endif
 -#ifdef CONFIG_X86_F00F_BUG
 -      FIX_F00F_IDT,   /* Virtual mapping for IDT */
 -#endif
 +      FIX_RO_IDT,     /* Virtual mapping for read-only IDT */
- #ifdef CONFIG_X86_CYCLONE_TIMER
-       FIX_CYCLONE_TIMER, /*cyclone timer register*/
- #endif
  #ifdef CONFIG_X86_32
        FIX_KMAP_BEGIN, /* reserved pte's for temporary kernel mappings */
        FIX_KMAP_END = FIX_KMAP_BEGIN+(KM_TYPE_NR*NR_CPUS)-1,
diff --combined arch/x86/mm/init_64.c
@@@ -1011,11 -1011,15 +1011,12 @@@ remove_pagetable(unsigned long start, u
        flush_tlb_all();
  }
  
 -void __ref vmemmap_free(struct page *memmap, unsigned long nr_pages)
 +void __ref vmemmap_free(unsigned long start, unsigned long end)
  {
 -      unsigned long start = (unsigned long)memmap;
 -      unsigned long end = (unsigned long)(memmap + nr_pages);
 -
        remove_pagetable(start, end, false);
  }
  
+ #ifdef CONFIG_MEMORY_HOTREMOVE
  static void __meminit
  kernel_physical_mapping_remove(unsigned long start, unsigned long end)
  {
        remove_pagetable(start, end, true);
  }
  
- #ifdef CONFIG_MEMORY_HOTREMOVE
  int __ref arch_remove_memory(u64 start, u64 size)
  {
        unsigned long start_pfn = start >> PAGE_SHIFT;
@@@ -1064,9 -1067,10 +1064,9 @@@ void __init mem_init(void
  
        /* clear_bss() already clear the empty_zero_page */
  
 -      reservedpages = 0;
 -
 -      /* this will put all low memory onto the freelists */
        register_page_bootmem_info();
 +
 +      /* this will put all memory onto the freelists */
        totalram_pages = free_all_bootmem();
  
        absent_pages = absent_pages_in_range(0, max_pfn);
@@@ -1281,17 -1285,18 +1281,17 @@@ static long __meminitdata addr_start, a
  static void __meminitdata *p_start, *p_end;
  static int __meminitdata node_start;
  
 -int __meminit
 -vmemmap_populate(struct page *start_page, unsigned long size, int node)
 +static int __meminit vmemmap_populate_hugepages(unsigned long start,
 +                                              unsigned long end, int node)
  {
 -      unsigned long addr = (unsigned long)start_page;
 -      unsigned long end = (unsigned long)(start_page + size);
 +      unsigned long addr;
        unsigned long next;
        pgd_t *pgd;
        pud_t *pud;
        pmd_t *pmd;
  
 -      for (; addr < end; addr = next) {
 -              void *p = NULL;
 +      for (addr = start; addr < end; addr = next) {
 +              next = pmd_addr_end(addr, end);
  
                pgd = vmemmap_pgd_populate(addr, node);
                if (!pgd)
                if (!pud)
                        return -ENOMEM;
  
 -              if (!cpu_has_pse) {
 -                      next = (addr + PAGE_SIZE) & PAGE_MASK;
 -                      pmd = vmemmap_pmd_populate(pud, addr, node);
 -
 -                      if (!pmd)
 -                              return -ENOMEM;
 -
 -                      p = vmemmap_pte_populate(pmd, addr, node);
 -
 -                      if (!p)
 -                              return -ENOMEM;
 -
 -                      addr_end = addr + PAGE_SIZE;
 -                      p_end = p + PAGE_SIZE;
 -              } else {
 -                      next = pmd_addr_end(addr, end);
 +              pmd = pmd_offset(pud, addr);
 +              if (pmd_none(*pmd)) {
 +                      void *p;
  
 -                      pmd = pmd_offset(pud, addr);
 -                      if (pmd_none(*pmd)) {
 +                      p = vmemmap_alloc_block_buf(PMD_SIZE, node);
 +                      if (p) {
                                pte_t entry;
  
 -                              p = vmemmap_alloc_block_buf(PMD_SIZE, node);
 -                              if (!p)
 -                                      return -ENOMEM;
 -
                                entry = pfn_pte(__pa(p) >> PAGE_SHIFT,
                                                PAGE_KERNEL_LARGE);
                                set_pmd(pmd, __pmd(pte_val(entry)));
  
                                addr_end = addr + PMD_SIZE;
                                p_end = p + PMD_SIZE;
 -                      } else
 -                              vmemmap_verify((pte_t *)pmd, node, addr, next);
 +                              continue;
 +                      }
 +              } else if (pmd_large(*pmd)) {
 +                      vmemmap_verify((pte_t *)pmd, node, addr, next);
 +                      continue;
                }
 -
 +              pr_warn_once("vmemmap: falling back to regular page backing\n");
 +              if (vmemmap_populate_basepages(addr, next, node))
 +                      return -ENOMEM;
        }
 -      sync_global_pgds((unsigned long)start_page, end - 1);
        return 0;
  }
  
 +int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
 +{
 +      int err;
 +
 +      if (cpu_has_pse)
 +              err = vmemmap_populate_hugepages(start, end, node);
 +      else
 +              err = vmemmap_populate_basepages(start, end, node);
 +      if (!err)
 +              sync_global_pgds(start, end - 1);
 +      return err;
 +}
 +
  #if defined(CONFIG_MEMORY_HOTPLUG_SPARSE) && defined(CONFIG_HAVE_BOOTMEM_INFO_NODE)
  void register_page_bootmem_memmap(unsigned long section_nr,
                                  struct page *start_page, unsigned long size)
diff --combined arch/x86/mm/pageattr.c
@@@ -467,7 -467,7 +467,7 @@@ try_preserve_large_page(pte_t *kpte, un
         * We are safe now. Check whether the new pgprot is the same:
         */
        old_pte = *kpte;
 -      old_prot = new_prot = req_prot = pte_pgprot(old_pte);
 +      old_prot = req_prot = pte_pgprot(old_pte);
  
        pgprot_val(req_prot) &= ~pgprot_val(cpa->mask_clr);
        pgprot_val(req_prot) |= pgprot_val(cpa->mask_set);
         * a non present pmd. The canon_pgprot will clear _PAGE_GLOBAL
         * for the ancient hardware that doesn't support it.
         */
 -      if (pgprot_val(new_prot) & _PAGE_PRESENT)
 -              pgprot_val(new_prot) |= _PAGE_PSE | _PAGE_GLOBAL;
 +      if (pgprot_val(req_prot) & _PAGE_PRESENT)
 +              pgprot_val(req_prot) |= _PAGE_PSE | _PAGE_GLOBAL;
        else
 -              pgprot_val(new_prot) &= ~(_PAGE_PSE | _PAGE_GLOBAL);
 +              pgprot_val(req_prot) &= ~(_PAGE_PSE | _PAGE_GLOBAL);
  
 -      new_prot = canon_pgprot(new_prot);
 +      req_prot = canon_pgprot(req_prot);
  
        /*
         * old_pte points to the large page base address. So we need
@@@ -542,13 -542,14 +542,14 @@@ out_unlock
        return do_split;
  }
  
- int __split_large_page(pte_t *kpte, unsigned long address, pte_t *pbase)
+ static int
+ __split_large_page(pte_t *kpte, unsigned long address, struct page *base)
  {
+       pte_t *pbase = (pte_t *)page_address(base);
        unsigned long pfn, pfninc = 1;
        unsigned int i, level;
        pte_t *tmp;
        pgprot_t ref_prot;
-       struct page *base = virt_to_page(pbase);
  
        spin_lock(&pgd_lock);
        /*
  
  static int split_large_page(pte_t *kpte, unsigned long address)
  {
-       pte_t *pbase;
        struct page *base;
  
        if (!debug_pagealloc)
        if (!base)
                return -ENOMEM;
  
-       pbase = (pte_t *)page_address(base);
-       if (__split_large_page(kpte, address, pbase))
+       if (__split_large_page(kpte, address, base))
                __free_page(base);
  
        return 0;
@@@ -1413,8 -1412,6 +1412,8 @@@ void kernel_map_pages(struct page *page
         * but that can deadlock->flush only current cpu:
         */
        __flush_tlb_all();
 +
 +      arch_flush_lazy_mmu_mode();
  }
  
  #ifdef CONFIG_HIBERNATION