Merge branch 'x86-mm-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git...
authorLinus Torvalds <torvalds@linux-foundation.org>
Wed, 10 Dec 2014 21:59:34 +0000 (13:59 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Wed, 10 Dec 2014 21:59:34 +0000 (13:59 -0800)
Pull x86 mm tree changes from Ingo Molnar:
 "The biggest change is full PAT support from Jürgen Gross:

     The x86 architecture offers via the PAT (Page Attribute Table) a
     way to specify different caching modes in page table entries.  The
     PAT MSR contains 8 entries each specifying one of 6 possible cache
     modes.  A pte references one of those entries via 3 bits:
     _PAGE_PAT, _PAGE_PWT and _PAGE_PCD.

     The Linux kernel currently supports only 4 different cache modes.
     The PAT MSR is set up in a way that the setting of _PAGE_PAT in a
     pte doesn't matter: the top 4 entries in the PAT MSR are the same
     as the 4 lower entries.

     This results in the kernel not supporting e.g. write-through mode.
     Especially this cache mode would speed up drivers of video cards
     which now have to use uncached accesses.

     OTOH some old processors (Pentium) don't support PAT correctly and
     the Xen hypervisor has been using a different PAT MSR configuration
     for some time now and can't change that as this setting is part of
     the ABI.

     This patch set abstracts the cache mode from the pte and introduces
     tables to translate between cache mode and pte bits (the default
     cache mode "write back" is hard-wired to PAT entry 0).  The tables
     are statically initialized with values being compatible to old
     processors and current usage.  As soon as the PAT MSR is changed
     (or - in case of Xen - is read at boot time) the tables are changed
     accordingly.  Requests of mappings with special cache modes are
     always possible now, in case they are not supported there will be a
     fallback to a compatible but slower mode.

     Summing it up, this patch set adds the following features:

      - capability to support WT and WP cache modes on processors with
        full PAT support

      - processors with no or uncorrect PAT support are still working as
        today, even if WT or WP cache mode are selected by drivers for
        some pages

      - reduction of Xen special handling regarding cache mode

  Another change is a boot speedup on ridiculously large RAM systems,
  plus other smaller fixes"

* 'x86-mm-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (22 commits)
  x86: mm: Move PAT only functions to mm/pat.c
  xen: Support Xen pv-domains using PAT
  x86: Enable PAT to use cache mode translation tables
  x86: Respect PAT bit when copying pte values between large and normal pages
  x86: Support PAT bit in pagetable dump for lower levels
  x86: Clean up pgtable_types.h
  x86: Use new cache mode type in memtype related functions
  x86: Use new cache mode type in mm/ioremap.c
  x86: Use new cache mode type in setting page attributes
  x86: Remove looking for setting of _PAGE_PAT_LARGE in pageattr.c
  x86: Use new cache mode type in track_pfn_remap() and track_pfn_insert()
  x86: Use new cache mode type in mm/iomap_32.c
  x86: Use new cache mode type in asm/pgtable.h
  x86: Use new cache mode type in arch/x86/mm/init_64.c
  x86: Use new cache mode type in arch/x86/pci
  x86: Use new cache mode type in drivers/video/fbdev/vermilion
  x86: Use new cache mode type in drivers/video/fbdev/gbefb.c
  x86: Use new cache mode type in include/asm/fb.h
  x86: Make page cache mode a real type
  x86: mm: Use 2GB memory block size on large-memory x86-64 systems
  ...

1  2 
arch/x86/include/asm/io.h
arch/x86/mm/dump_pagetables.c
arch/x86/mm/init_64.c
arch/x86/mm/ioremap.c
arch/x86/mm/pageattr.c
arch/x86/mm/pat.c

@@@ -74,9 -74,6 +74,9 @@@ build_mmio_write(__writel, "l", unsigne
  #define __raw_readw __readw
  #define __raw_readl __readl
  
 +#define writeb_relaxed(v, a) __writeb(v, a)
 +#define writew_relaxed(v, a) __writew(v, a)
 +#define writel_relaxed(v, a) __writel(v, a)
  #define __raw_writeb __writeb
  #define __raw_writew __writew
  #define __raw_writel __writel
@@@ -89,7 -86,6 +89,7 @@@ build_mmio_read(readq, "q", unsigned lo
  build_mmio_write(writeq, "q", unsigned long, "r", :"memory")
  
  #define readq_relaxed(a)      readq(a)
 +#define writeq_relaxed(v, a)  writeq(v, a)
  
  #define __raw_readq(a)                readq(a)
  #define __raw_writeq(val, addr)       writeq(val, addr)
@@@ -314,11 -310,11 +314,11 @@@ BUILDIO(b, b, char
  BUILDIO(w, w, short)
  BUILDIO(l, , int)
  
 -extern void *xlate_dev_mem_ptr(unsigned long phys);
 -extern void unxlate_dev_mem_ptr(unsigned long phys, void *addr);
 +extern void *xlate_dev_mem_ptr(phys_addr_t phys);
 +extern void unxlate_dev_mem_ptr(phys_addr_t phys, void *addr);
  
  extern int ioremap_change_attr(unsigned long vaddr, unsigned long size,
-                               unsigned long prot_val);
+                               enum page_cache_mode pcm);
  extern void __iomem *ioremap_wc(resource_size_t offset, unsigned long size);
  
  extern bool is_early_ioremap_ptep(pte_t *ptep);
@@@ -76,9 -76,6 +76,9 @@@ static struct addr_marker address_marke
  # ifdef CONFIG_X86_ESPFIX64
        { ESPFIX_BASE_ADDR,     "ESPfix Area", 16 },
  # endif
 +# ifdef CONFIG_EFI
 +      { EFI_VA_END,           "EFI Runtime Services" },
 +# endif
        { __START_KERNEL_map,   "High Kernel Mapping" },
        { MODULES_VADDR,        "Modules" },
        { MODULES_END,          "End Modules" },
@@@ -129,7 -126,7 +129,7 @@@ static void printk_prot(struct seq_fil
  
        if (!pgprot_val(prot)) {
                /* Not present */
-               pt_dump_cont_printf(m, dmsg, "                          ");
+               pt_dump_cont_printf(m, dmsg, "                              ");
        } else {
                if (pr & _PAGE_USER)
                        pt_dump_cont_printf(m, dmsg, "USR ");
                else
                        pt_dump_cont_printf(m, dmsg, "    ");
  
-               /* Bit 9 has a different meaning on level 3 vs 4 */
-               if (level <= 3) {
-                       if (pr & _PAGE_PSE)
-                               pt_dump_cont_printf(m, dmsg, "PSE ");
-                       else
-                               pt_dump_cont_printf(m, dmsg, "    ");
-               } else {
-                       if (pr & _PAGE_PAT)
-                               pt_dump_cont_printf(m, dmsg, "pat ");
-                       else
-                               pt_dump_cont_printf(m, dmsg, "    ");
-               }
+               /* Bit 7 has a different meaning on level 3 vs 4 */
+               if (level <= 3 && pr & _PAGE_PSE)
+                       pt_dump_cont_printf(m, dmsg, "PSE ");
+               else
+                       pt_dump_cont_printf(m, dmsg, "    ");
+               if ((level == 4 && pr & _PAGE_PAT) ||
+                   ((level == 3 || level == 2) && pr & _PAGE_PAT_LARGE))
+                       pt_dump_cont_printf(m, dmsg, "pat ");
+               else
+                       pt_dump_cont_printf(m, dmsg, "    ");
                if (pr & _PAGE_GLOBAL)
                        pt_dump_cont_printf(m, dmsg, "GLB ");
                else
diff --combined arch/x86/mm/init_64.c
@@@ -52,7 -52,6 +52,6 @@@
  #include <asm/numa.h>
  #include <asm/cacheflush.h>
  #include <asm/init.h>
- #include <asm/uv/uv.h>
  #include <asm/setup.h>
  
  #include "mm_internal.h"
@@@ -338,12 -337,15 +337,15 @@@ pte_t * __init populate_extra_pte(unsig
   * Create large page table mappings for a range of physical addresses.
   */
  static void __init __init_extra_mapping(unsigned long phys, unsigned long size,
-                                               pgprot_t prot)
+                                       enum page_cache_mode cache)
  {
        pgd_t *pgd;
        pud_t *pud;
        pmd_t *pmd;
+       pgprot_t prot;
  
+       pgprot_val(prot) = pgprot_val(PAGE_KERNEL_LARGE) |
+               pgprot_val(pgprot_4k_2_large(cachemode2pgprot(cache)));
        BUG_ON((phys & ~PMD_MASK) || (size & ~PMD_MASK));
        for (; size; phys += PMD_SIZE, size -= PMD_SIZE) {
                pgd = pgd_offset_k((unsigned long)__va(phys));
  
  void __init init_extra_mapping_wb(unsigned long phys, unsigned long size)
  {
-       __init_extra_mapping(phys, size, PAGE_KERNEL_LARGE);
+       __init_extra_mapping(phys, size, _PAGE_CACHE_MODE_WB);
  }
  
  void __init init_extra_mapping_uc(unsigned long phys, unsigned long size)
  {
-       __init_extra_mapping(phys, size, PAGE_KERNEL_LARGE_NOCACHE);
+       __init_extra_mapping(phys, size, _PAGE_CACHE_MODE_UC);
  }
  
  /*
@@@ -1123,7 -1125,7 +1125,7 @@@ void mark_rodata_ro(void
        unsigned long end = (unsigned long) &__end_rodata_hpage_align;
        unsigned long text_end = PFN_ALIGN(&__stop___ex_table);
        unsigned long rodata_end = PFN_ALIGN(&__end_rodata);
 -      unsigned long all_end = PFN_ALIGN(&_end);
 +      unsigned long all_end;
  
        printk(KERN_INFO "Write protecting the kernel read-only data: %luk\n",
               (end - start) >> 10);
        /*
         * The rodata/data/bss/brk section (but not the kernel text!)
         * should also be not-executable.
 +       *
 +       * We align all_end to PMD_SIZE because the existing mapping
 +       * is a full PMD. If we would align _brk_end to PAGE_SIZE we
 +       * split the PMD and the reminder between _brk_end and the end
 +       * of the PMD will remain mapped executable.
 +       *
 +       * Any PMD which was setup after the one which covers _brk_end
 +       * has been zapped already via cleanup_highmem().
         */
 +      all_end = roundup((unsigned long)_brk_end, PMD_SIZE);
        set_memory_nx(rodata_start, (all_end - rodata_start) >> PAGE_SHIFT);
  
        rodata_test();
@@@ -1256,12 -1249,10 +1258,10 @@@ static unsigned long probe_memory_block
        /* start from 2g */
        unsigned long bz = 1UL<<31;
  
- #ifdef CONFIG_X86_UV
-       if (is_uv_system()) {
-               printk(KERN_INFO "UV: memory block size 2GB\n");
+       if (totalram_pages >= (64ULL << (30 - PAGE_SHIFT))) {
+               pr_info("Using 2GB memory block size for large-memory system\n");
                return 2UL * 1024 * 1024 * 1024;
        }
- #endif
  
        /* less than 64g installed */
        if ((max_pfn << PAGE_SHIFT) < (16UL << 32))
diff --combined arch/x86/mm/ioremap.c
   * conflicts.
   */
  int ioremap_change_attr(unsigned long vaddr, unsigned long size,
-                              unsigned long prot_val)
+                       enum page_cache_mode pcm)
  {
        unsigned long nrpages = size >> PAGE_SHIFT;
        int err;
  
-       switch (prot_val) {
-       case _PAGE_CACHE_UC:
+       switch (pcm) {
+       case _PAGE_CACHE_MODE_UC:
        default:
                err = _set_memory_uc(vaddr, nrpages);
                break;
-       case _PAGE_CACHE_WC:
+       case _PAGE_CACHE_MODE_WC:
                err = _set_memory_wc(vaddr, nrpages);
                break;
-       case _PAGE_CACHE_WB:
+       case _PAGE_CACHE_MODE_WB:
                err = _set_memory_wb(vaddr, nrpages);
                break;
        }
@@@ -75,14 -75,14 +75,14 @@@ static int __ioremap_check_ram(unsigne
   * caller shouldn't need to know that small detail.
   */
  static void __iomem *__ioremap_caller(resource_size_t phys_addr,
-               unsigned long size, unsigned long prot_val, void *caller)
+               unsigned long size, enum page_cache_mode pcm, void *caller)
  {
        unsigned long offset, vaddr;
        resource_size_t pfn, last_pfn, last_addr;
        const resource_size_t unaligned_phys_addr = phys_addr;
        const unsigned long unaligned_size = size;
        struct vm_struct *area;
-       unsigned long new_prot_val;
+       enum page_cache_mode new_pcm;
        pgprot_t prot;
        int retval;
        void __iomem *ret_addr;
        size = PAGE_ALIGN(last_addr+1) - phys_addr;
  
        retval = reserve_memtype(phys_addr, (u64)phys_addr + size,
-                                               prot_val, &new_prot_val);
+                                               pcm, &new_pcm);
        if (retval) {
                printk(KERN_ERR "ioremap reserve_memtype failed %d\n", retval);
                return NULL;
        }
  
-       if (prot_val != new_prot_val) {
-               if (!is_new_memtype_allowed(phys_addr, size,
-                                           prot_val, new_prot_val)) {
+       if (pcm != new_pcm) {
+               if (!is_new_memtype_allowed(phys_addr, size, pcm, new_pcm)) {
                        printk(KERN_ERR
-               "ioremap error for 0x%llx-0x%llx, requested 0x%lx, got 0x%lx\n",
+               "ioremap error for 0x%llx-0x%llx, requested 0x%x, got 0x%x\n",
                                (unsigned long long)phys_addr,
                                (unsigned long long)(phys_addr + size),
-                               prot_val, new_prot_val);
+                               pcm, new_pcm);
                        goto err_free_memtype;
                }
-               prot_val = new_prot_val;
+               pcm = new_pcm;
        }
  
-       switch (prot_val) {
-       case _PAGE_CACHE_UC:
+       prot = PAGE_KERNEL_IO;
+       switch (pcm) {
+       case _PAGE_CACHE_MODE_UC:
        default:
-               prot = PAGE_KERNEL_IO_NOCACHE;
+               prot = __pgprot(pgprot_val(prot) |
+                               cachemode2protval(_PAGE_CACHE_MODE_UC));
                break;
-       case _PAGE_CACHE_UC_MINUS:
-               prot = PAGE_KERNEL_IO_UC_MINUS;
+       case _PAGE_CACHE_MODE_UC_MINUS:
+               prot = __pgprot(pgprot_val(prot) |
+                               cachemode2protval(_PAGE_CACHE_MODE_UC_MINUS));
                break;
-       case _PAGE_CACHE_WC:
-               prot = PAGE_KERNEL_IO_WC;
+       case _PAGE_CACHE_MODE_WC:
+               prot = __pgprot(pgprot_val(prot) |
+                               cachemode2protval(_PAGE_CACHE_MODE_WC));
                break;
-       case _PAGE_CACHE_WB:
-               prot = PAGE_KERNEL_IO;
+       case _PAGE_CACHE_MODE_WB:
                break;
        }
  
        area->phys_addr = phys_addr;
        vaddr = (unsigned long) area->addr;
  
-       if (kernel_map_sync_memtype(phys_addr, size, prot_val))
+       if (kernel_map_sync_memtype(phys_addr, size, pcm))
                goto err_free_area;
  
        if (ioremap_page_range(vaddr, vaddr + size, phys_addr, prot))
@@@ -227,14 -229,14 +229,14 @@@ void __iomem *ioremap_nocache(resource_
  {
        /*
         * Ideally, this should be:
-        *      pat_enabled ? _PAGE_CACHE_UC : _PAGE_CACHE_UC_MINUS;
+        *      pat_enabled ? _PAGE_CACHE_MODE_UC : _PAGE_CACHE_MODE_UC_MINUS;
         *
         * Till we fix all X drivers to use ioremap_wc(), we will use
         * UC MINUS.
         */
-       unsigned long val = _PAGE_CACHE_UC_MINUS;
+       enum page_cache_mode pcm = _PAGE_CACHE_MODE_UC_MINUS;
  
-       return __ioremap_caller(phys_addr, size, val,
+       return __ioremap_caller(phys_addr, size, pcm,
                                __builtin_return_address(0));
  }
  EXPORT_SYMBOL(ioremap_nocache);
  void __iomem *ioremap_wc(resource_size_t phys_addr, unsigned long size)
  {
        if (pat_enabled)
-               return __ioremap_caller(phys_addr, size, _PAGE_CACHE_WC,
+               return __ioremap_caller(phys_addr, size, _PAGE_CACHE_MODE_WC,
                                        __builtin_return_address(0));
        else
                return ioremap_nocache(phys_addr, size);
@@@ -261,7 -263,7 +263,7 @@@ EXPORT_SYMBOL(ioremap_wc)
  
  void __iomem *ioremap_cache(resource_size_t phys_addr, unsigned long size)
  {
-       return __ioremap_caller(phys_addr, size, _PAGE_CACHE_WB,
+       return __ioremap_caller(phys_addr, size, _PAGE_CACHE_MODE_WB,
                                __builtin_return_address(0));
  }
  EXPORT_SYMBOL(ioremap_cache);
  void __iomem *ioremap_prot(resource_size_t phys_addr, unsigned long size,
                                unsigned long prot_val)
  {
-       return __ioremap_caller(phys_addr, size, (prot_val & _PAGE_CACHE_MASK),
+       return __ioremap_caller(phys_addr, size,
+                               pgprot2cachemode(__pgprot(prot_val)),
                                __builtin_return_address(0));
  }
  EXPORT_SYMBOL(ioremap_prot);
@@@ -327,7 -330,7 +330,7 @@@ EXPORT_SYMBOL(iounmap)
   * Convert a physical pointer to a virtual kernel pointer for /dev/mem
   * access
   */
 -void *xlate_dev_mem_ptr(unsigned long phys)
 +void *xlate_dev_mem_ptr(phys_addr_t phys)
  {
        void *addr;
        unsigned long start = phys & PAGE_MASK;
        return addr;
  }
  
 -void unxlate_dev_mem_ptr(unsigned long phys, void *addr)
 +void unxlate_dev_mem_ptr(phys_addr_t phys, void *addr)
  {
        if (page_is_ram(phys >> PAGE_SHIFT))
                return;
diff --combined arch/x86/mm/pageattr.c
@@@ -409,7 -409,7 +409,7 @@@ phys_addr_t slow_virt_to_phys(void *__v
        psize = page_level_size(level);
        pmask = page_level_mask(level);
        offset = virt_addr & ~pmask;
 -      phys_addr = pte_pfn(*pte) << PAGE_SHIFT;
 +      phys_addr = (phys_addr_t)pte_pfn(*pte) << PAGE_SHIFT;
        return (phys_addr | offset);
  }
  EXPORT_SYMBOL_GPL(slow_virt_to_phys);
@@@ -485,14 -485,23 +485,23 @@@ try_preserve_large_page(pte_t *kpte, un
  
        /*
         * We are safe now. Check whether the new pgprot is the same:
+        * Convert protection attributes to 4k-format, as cpa->mask* are set
+        * up accordingly.
         */
        old_pte = *kpte;
-       old_prot = req_prot = pte_pgprot(old_pte);
+       old_prot = req_prot = pgprot_large_2_4k(pte_pgprot(old_pte));
  
        pgprot_val(req_prot) &= ~pgprot_val(cpa->mask_clr);
        pgprot_val(req_prot) |= pgprot_val(cpa->mask_set);
  
        /*
+        * req_prot is in format of 4k pages. It must be converted to large
+        * page format: the caching mode includes the PAT bit located at
+        * different bit positions in the two formats.
+        */
+       req_prot = pgprot_4k_2_large(req_prot);
+       /*
         * Set the PSE and GLOBAL flags only if the PRESENT flag is
         * set otherwise pmd_present/pmd_huge will return true even on
         * a non present pmd. The canon_pgprot will clear _PAGE_GLOBAL
@@@ -585,13 -594,10 +594,10 @@@ __split_large_page(struct cpa_data *cpa
  
        paravirt_alloc_pte(&init_mm, page_to_pfn(base));
        ref_prot = pte_pgprot(pte_clrhuge(*kpte));
-       /*
-        * If we ever want to utilize the PAT bit, we need to
-        * update this function to make sure it's converted from
-        * bit 12 to bit 7 when we cross from the 2MB level to
-        * the 4K level:
-        */
-       WARN_ON_ONCE(pgprot_val(ref_prot) & _PAGE_PAT_LARGE);
+       /* promote PAT bit to correct position */
+       if (level == PG_LEVEL_2M)
+               ref_prot = pgprot_large_2_4k(ref_prot);
  
  #ifdef CONFIG_X86_64
        if (level == PG_LEVEL_1G) {
@@@ -879,6 -885,7 +885,7 @@@ static int populate_pmd(struct cpa_dat
  {
        unsigned int cur_pages = 0;
        pmd_t *pmd;
+       pgprot_t pmd_pgprot;
  
        /*
         * Not on a 2M boundary?
        if (num_pages == cur_pages)
                return cur_pages;
  
+       pmd_pgprot = pgprot_4k_2_large(pgprot);
        while (end - start >= PMD_SIZE) {
  
                /*
  
                pmd = pmd_offset(pud, start);
  
-               set_pmd(pmd, __pmd(cpa->pfn | _PAGE_PSE | massage_pgprot(pgprot)));
+               set_pmd(pmd, __pmd(cpa->pfn | _PAGE_PSE |
+                                  massage_pgprot(pmd_pgprot)));
  
                start     += PMD_SIZE;
                cpa->pfn  += PMD_SIZE;
@@@ -949,6 -959,7 +959,7 @@@ static int populate_pud(struct cpa_dat
        pud_t *pud;
        unsigned long end;
        int cur_pages = 0;
+       pgprot_t pud_pgprot;
  
        end = start + (cpa->numpages << PAGE_SHIFT);
  
                return cur_pages;
  
        pud = pud_offset(pgd, start);
+       pud_pgprot = pgprot_4k_2_large(pgprot);
  
        /*
         * Map everything starting from the Gb boundary, possibly with 1G pages
         */
        while (end - start >= PUD_SIZE) {
-               set_pud(pud, __pud(cpa->pfn | _PAGE_PSE | massage_pgprot(pgprot)));
+               set_pud(pud, __pud(cpa->pfn | _PAGE_PSE |
+                                  massage_pgprot(pud_pgprot)));
  
                start     += PUD_SIZE;
                cpa->pfn  += PUD_SIZE;
@@@ -1304,12 -1317,6 +1317,6 @@@ static int __change_page_attr_set_clr(s
        return 0;
  }
  
- static inline int cache_attr(pgprot_t attr)
- {
-       return pgprot_val(attr) &
-               (_PAGE_PAT | _PAGE_PAT_LARGE | _PAGE_PWT | _PAGE_PCD);
- }
  static int change_page_attr_set_clr(unsigned long *addr, int numpages,
                                    pgprot_t mask_set, pgprot_t mask_clr,
                                    int force_split, int in_flag,
         * No need to flush, when we did not set any of the caching
         * attributes:
         */
-       cache = cache_attr(mask_set);
+       cache = !!pgprot2cachemode(mask_set);
  
        /*
         * On success we use CLFLUSH, when the CPU supports it to
@@@ -1445,7 -1452,8 +1452,8 @@@ int _set_memory_uc(unsigned long addr, 
         * for now UC MINUS. see comments in ioremap_nocache()
         */
        return change_page_attr_set(&addr, numpages,
-                                   __pgprot(_PAGE_CACHE_UC_MINUS), 0);
+                                   cachemode2pgprot(_PAGE_CACHE_MODE_UC_MINUS),
+                                   0);
  }
  
  int set_memory_uc(unsigned long addr, int numpages)
         * for now UC MINUS. see comments in ioremap_nocache()
         */
        ret = reserve_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE,
-                           _PAGE_CACHE_UC_MINUS, NULL);
+                             _PAGE_CACHE_MODE_UC_MINUS, NULL);
        if (ret)
                goto out_err;
  
@@@ -1474,7 -1482,7 +1482,7 @@@ out_err
  EXPORT_SYMBOL(set_memory_uc);
  
  static int _set_memory_array(unsigned long *addr, int addrinarray,
-               unsigned long new_type)
+               enum page_cache_mode new_type)
  {
        int i, j;
        int ret;
        }
  
        ret = change_page_attr_set(addr, addrinarray,
-                                   __pgprot(_PAGE_CACHE_UC_MINUS), 1);
+                                  cachemode2pgprot(_PAGE_CACHE_MODE_UC_MINUS),
+                                  1);
  
-       if (!ret && new_type == _PAGE_CACHE_WC)
+       if (!ret && new_type == _PAGE_CACHE_MODE_WC)
                ret = change_page_attr_set_clr(addr, addrinarray,
-                                              __pgprot(_PAGE_CACHE_WC),
+                                              cachemode2pgprot(
+                                               _PAGE_CACHE_MODE_WC),
                                               __pgprot(_PAGE_CACHE_MASK),
                                               0, CPA_ARRAY, NULL);
        if (ret)
@@@ -1511,13 -1521,13 +1521,13 @@@ out_free
  
  int set_memory_array_uc(unsigned long *addr, int addrinarray)
  {
-       return _set_memory_array(addr, addrinarray, _PAGE_CACHE_UC_MINUS);
+       return _set_memory_array(addr, addrinarray, _PAGE_CACHE_MODE_UC_MINUS);
  }
  EXPORT_SYMBOL(set_memory_array_uc);
  
  int set_memory_array_wc(unsigned long *addr, int addrinarray)
  {
-       return _set_memory_array(addr, addrinarray, _PAGE_CACHE_WC);
+       return _set_memory_array(addr, addrinarray, _PAGE_CACHE_MODE_WC);
  }
  EXPORT_SYMBOL(set_memory_array_wc);
  
@@@ -1527,10 -1537,12 +1537,12 @@@ int _set_memory_wc(unsigned long addr, 
        unsigned long addr_copy = addr;
  
        ret = change_page_attr_set(&addr, numpages,
-                                   __pgprot(_PAGE_CACHE_UC_MINUS), 0);
+                                  cachemode2pgprot(_PAGE_CACHE_MODE_UC_MINUS),
+                                  0);
        if (!ret) {
                ret = change_page_attr_set_clr(&addr_copy, numpages,
-                                              __pgprot(_PAGE_CACHE_WC),
+                                              cachemode2pgprot(
+                                               _PAGE_CACHE_MODE_WC),
                                               __pgprot(_PAGE_CACHE_MASK),
                                               0, 0, NULL);
        }
@@@ -1545,7 -1557,7 +1557,7 @@@ int set_memory_wc(unsigned long addr, i
                return set_memory_uc(addr, numpages);
  
        ret = reserve_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE,
-               _PAGE_CACHE_WC, NULL);
+               _PAGE_CACHE_MODE_WC, NULL);
        if (ret)
                goto out_err;
  
@@@ -1564,6 -1576,7 +1576,7 @@@ EXPORT_SYMBOL(set_memory_wc)
  
  int _set_memory_wb(unsigned long addr, int numpages)
  {
+       /* WB cache mode is hard wired to all cache attribute bits being 0 */
        return change_page_attr_clear(&addr, numpages,
                                      __pgprot(_PAGE_CACHE_MASK), 0);
  }
@@@ -1586,6 -1599,7 +1599,7 @@@ int set_memory_array_wb(unsigned long *
        int i;
        int ret;
  
+       /* WB cache mode is hard wired to all cache attribute bits being 0 */
        ret = change_page_attr_clear(addr, addrinarray,
                                      __pgprot(_PAGE_CACHE_MASK), 1);
        if (ret)
@@@ -1648,7 -1662,7 +1662,7 @@@ int set_pages_uc(struct page *page, in
  EXPORT_SYMBOL(set_pages_uc);
  
  static int _set_pages_array(struct page **pages, int addrinarray,
-               unsigned long new_type)
+               enum page_cache_mode new_type)
  {
        unsigned long start;
        unsigned long end;
        }
  
        ret = cpa_set_pages_array(pages, addrinarray,
-                       __pgprot(_PAGE_CACHE_UC_MINUS));
-       if (!ret && new_type == _PAGE_CACHE_WC)
+                       cachemode2pgprot(_PAGE_CACHE_MODE_UC_MINUS));
+       if (!ret && new_type == _PAGE_CACHE_MODE_WC)
                ret = change_page_attr_set_clr(NULL, addrinarray,
-                                              __pgprot(_PAGE_CACHE_WC),
+                                              cachemode2pgprot(
+                                               _PAGE_CACHE_MODE_WC),
                                               __pgprot(_PAGE_CACHE_MASK),
                                               0, CPA_PAGES_ARRAY, pages);
        if (ret)
@@@ -1689,13 -1704,13 +1704,13 @@@ err_out
  
  int set_pages_array_uc(struct page **pages, int addrinarray)
  {
-       return _set_pages_array(pages, addrinarray, _PAGE_CACHE_UC_MINUS);
+       return _set_pages_array(pages, addrinarray, _PAGE_CACHE_MODE_UC_MINUS);
  }
  EXPORT_SYMBOL(set_pages_array_uc);
  
  int set_pages_array_wc(struct page **pages, int addrinarray)
  {
-       return _set_pages_array(pages, addrinarray, _PAGE_CACHE_WC);
+       return _set_pages_array(pages, addrinarray, _PAGE_CACHE_MODE_WC);
  }
  EXPORT_SYMBOL(set_pages_array_wc);
  
@@@ -1714,6 -1729,7 +1729,7 @@@ int set_pages_array_wb(struct page **pa
        unsigned long end;
        int i;
  
+       /* WB cache mode is hard wired to all cache attribute bits being 0 */
        retval = cpa_clear_pages_array(pages, addrinarray,
                        __pgprot(_PAGE_CACHE_MASK));
        if (retval)
diff --combined arch/x86/mm/pat.c
@@@ -31,6 -31,7 +31,7 @@@
  #include <asm/io.h>
  
  #include "pat_internal.h"
+ #include "mm_internal.h"
  
  #ifdef CONFIG_X86_PAT
  int __read_mostly pat_enabled = 1;
@@@ -66,6 -67,75 +67,75 @@@ __setup("debugpat", pat_debug_setup)
  
  static u64 __read_mostly boot_pat_state;
  
+ #ifdef CONFIG_X86_PAT
+ /*
+  * X86 PAT uses page flags WC and Uncached together to keep track of
+  * memory type of pages that have backing page struct. X86 PAT supports 3
+  * different memory types, _PAGE_CACHE_MODE_WB, _PAGE_CACHE_MODE_WC and
+  * _PAGE_CACHE_MODE_UC_MINUS and fourth state where page's memory type has not
+  * been changed from its default (value of -1 used to denote this).
+  * Note we do not support _PAGE_CACHE_MODE_UC here.
+  */
+ #define _PGMT_DEFAULT         0
+ #define _PGMT_WC              (1UL << PG_arch_1)
+ #define _PGMT_UC_MINUS                (1UL << PG_uncached)
+ #define _PGMT_WB              (1UL << PG_uncached | 1UL << PG_arch_1)
+ #define _PGMT_MASK            (1UL << PG_uncached | 1UL << PG_arch_1)
+ #define _PGMT_CLEAR_MASK      (~_PGMT_MASK)
+ static inline enum page_cache_mode get_page_memtype(struct page *pg)
+ {
+       unsigned long pg_flags = pg->flags & _PGMT_MASK;
+       if (pg_flags == _PGMT_DEFAULT)
+               return -1;
+       else if (pg_flags == _PGMT_WC)
+               return _PAGE_CACHE_MODE_WC;
+       else if (pg_flags == _PGMT_UC_MINUS)
+               return _PAGE_CACHE_MODE_UC_MINUS;
+       else
+               return _PAGE_CACHE_MODE_WB;
+ }
+ static inline void set_page_memtype(struct page *pg,
+                                   enum page_cache_mode memtype)
+ {
+       unsigned long memtype_flags;
+       unsigned long old_flags;
+       unsigned long new_flags;
+       switch (memtype) {
+       case _PAGE_CACHE_MODE_WC:
+               memtype_flags = _PGMT_WC;
+               break;
+       case _PAGE_CACHE_MODE_UC_MINUS:
+               memtype_flags = _PGMT_UC_MINUS;
+               break;
+       case _PAGE_CACHE_MODE_WB:
+               memtype_flags = _PGMT_WB;
+               break;
+       default:
+               memtype_flags = _PGMT_DEFAULT;
+               break;
+       }
+       do {
+               old_flags = pg->flags;
+               new_flags = (old_flags & _PGMT_CLEAR_MASK) | memtype_flags;
+       } while (cmpxchg(&pg->flags, old_flags, new_flags) != old_flags);
+ }
+ #else
+ static inline enum page_cache_mode get_page_memtype(struct page *pg)
+ {
+       return -1;
+ }
+ static inline void set_page_memtype(struct page *pg,
+                                   enum page_cache_mode memtype)
+ {
+ }
+ #endif
  enum {
        PAT_UC = 0,             /* uncached */
        PAT_WC = 1,             /* Write combining */
        PAT_UC_MINUS = 7,       /* UC, but can be overriden by MTRR */
  };
  
+ #define CM(c) (_PAGE_CACHE_MODE_ ## c)
+ static enum page_cache_mode pat_get_cache_mode(unsigned pat_val, char *msg)
+ {
+       enum page_cache_mode cache;
+       char *cache_mode;
+       switch (pat_val) {
+       case PAT_UC:       cache = CM(UC);       cache_mode = "UC  "; break;
+       case PAT_WC:       cache = CM(WC);       cache_mode = "WC  "; break;
+       case PAT_WT:       cache = CM(WT);       cache_mode = "WT  "; break;
+       case PAT_WP:       cache = CM(WP);       cache_mode = "WP  "; break;
+       case PAT_WB:       cache = CM(WB);       cache_mode = "WB  "; break;
+       case PAT_UC_MINUS: cache = CM(UC_MINUS); cache_mode = "UC- "; break;
+       default:           cache = CM(WB);       cache_mode = "WB  "; break;
+       }
+       memcpy(msg, cache_mode, 4);
+       return cache;
+ }
+ #undef CM
+ /*
+  * Update the cache mode to pgprot translation tables according to PAT
+  * configuration.
+  * Using lower indices is preferred, so we start with highest index.
+  */
+ void pat_init_cache_modes(void)
+ {
+       int i;
+       enum page_cache_mode cache;
+       char pat_msg[33];
+       u64 pat;
+       rdmsrl(MSR_IA32_CR_PAT, pat);
+       pat_msg[32] = 0;
+       for (i = 7; i >= 0; i--) {
+               cache = pat_get_cache_mode((pat >> (i * 8)) & 7,
+                                          pat_msg + 4 * i);
+               update_cache_mode_entry(i, cache);
+       }
+       pr_info("PAT configuration [0-7]: %s\n", pat_msg);
+ }
  #define PAT(x, y)     ((u64)PAT_ ## y << ((x)*8))
  
  void pat_init(void)
        wrmsrl(MSR_IA32_CR_PAT, pat);
  
        if (boot_cpu)
-               printk(KERN_INFO "x86 PAT enabled: cpu %d, old 0x%Lx, new 0x%Lx\n",
-                      smp_processor_id(), boot_pat_state, pat);
+               pat_init_cache_modes();
  }
  
  #undef PAT
@@@ -139,20 -254,21 +254,21 @@@ static DEFINE_SPINLOCK(memtype_lock);   /
   * The intersection is based on "Effective Memory Type" tables in IA-32
   * SDM vol 3a
   */
- static unsigned long pat_x_mtrr_type(u64 start, u64 end, unsigned long req_type)
+ static unsigned long pat_x_mtrr_type(u64 start, u64 end,
+                                    enum page_cache_mode req_type)
  {
        /*
         * Look for MTRR hint to get the effective type in case where PAT
         * request is for WB.
         */
-       if (req_type == _PAGE_CACHE_WB) {
+       if (req_type == _PAGE_CACHE_MODE_WB) {
                u8 mtrr_type;
  
                mtrr_type = mtrr_type_lookup(start, end);
                if (mtrr_type != MTRR_TYPE_WRBACK)
-                       return _PAGE_CACHE_UC_MINUS;
+                       return _PAGE_CACHE_MODE_UC_MINUS;
  
-               return _PAGE_CACHE_WB;
+               return _PAGE_CACHE_MODE_WB;
        }
  
        return req_type;
@@@ -207,25 -323,26 +323,26 @@@ static int pat_pagerange_is_ram(resourc
   * - Find the memtype of all the pages in the range, look for any conflicts
   * - In case of no conflicts, set the new memtype for pages in the range
   */
- static int reserve_ram_pages_type(u64 start, u64 end, unsigned long req_type,
-                                 unsigned long *new_type)
+ static int reserve_ram_pages_type(u64 start, u64 end,
+                                 enum page_cache_mode req_type,
+                                 enum page_cache_mode *new_type)
  {
        struct page *page;
        u64 pfn;
  
-       if (req_type == _PAGE_CACHE_UC) {
+       if (req_type == _PAGE_CACHE_MODE_UC) {
                /* We do not support strong UC */
                WARN_ON_ONCE(1);
-               req_type = _PAGE_CACHE_UC_MINUS;
+               req_type = _PAGE_CACHE_MODE_UC_MINUS;
        }
  
        for (pfn = (start >> PAGE_SHIFT); pfn < (end >> PAGE_SHIFT); ++pfn) {
-               unsigned long type;
+               enum page_cache_mode type;
  
                page = pfn_to_page(pfn);
                type = get_page_memtype(page);
                if (type != -1) {
-                       printk(KERN_INFO "reserve_ram_pages_type failed [mem %#010Lx-%#010Lx], track 0x%lx, req 0x%lx\n",
+                       pr_info("reserve_ram_pages_type failed [mem %#010Lx-%#010Lx], track 0x%x, req 0x%x\n",
                                start, end - 1, type, req_type);
                        if (new_type)
                                *new_type = type;
@@@ -258,21 -375,21 +375,21 @@@ static int free_ram_pages_type(u64 star
  
  /*
   * req_type typically has one of the:
-  * - _PAGE_CACHE_WB
-  * - _PAGE_CACHE_WC
-  * - _PAGE_CACHE_UC_MINUS
-  * - _PAGE_CACHE_UC
+  * - _PAGE_CACHE_MODE_WB
+  * - _PAGE_CACHE_MODE_WC
+  * - _PAGE_CACHE_MODE_UC_MINUS
+  * - _PAGE_CACHE_MODE_UC
   *
   * If new_type is NULL, function will return an error if it cannot reserve the
   * region with req_type. If new_type is non-NULL, function will return
   * available type in new_type in case of no error. In case of any error
   * it will return a negative return value.
   */
- int reserve_memtype(u64 start, u64 end, unsigned long req_type,
-                   unsigned long *new_type)
+ int reserve_memtype(u64 start, u64 end, enum page_cache_mode req_type,
+                   enum page_cache_mode *new_type)
  {
        struct memtype *new;
-       unsigned long actual_type;
+       enum page_cache_mode actual_type;
        int is_range_ram;
        int err = 0;
  
        if (!pat_enabled) {
                /* This is identical to page table setting without PAT */
                if (new_type) {
-                       if (req_type == _PAGE_CACHE_WC)
-                               *new_type = _PAGE_CACHE_UC_MINUS;
+                       if (req_type == _PAGE_CACHE_MODE_WC)
+                               *new_type = _PAGE_CACHE_MODE_UC_MINUS;
                        else
-                               *new_type = req_type & _PAGE_CACHE_MASK;
+                               *new_type = req_type;
                }
                return 0;
        }
        /* Low ISA region is always mapped WB in page table. No need to track */
        if (x86_platform.is_untracked_pat_range(start, end)) {
                if (new_type)
-                       *new_type = _PAGE_CACHE_WB;
+                       *new_type = _PAGE_CACHE_MODE_WB;
                return 0;
        }
  
         * tools and ACPI tools). Use WB request for WB memory and use
         * UC_MINUS otherwise.
         */
-       actual_type = pat_x_mtrr_type(start, end, req_type & _PAGE_CACHE_MASK);
+       actual_type = pat_x_mtrr_type(start, end, req_type);
  
        if (new_type)
                *new_type = actual_type;
@@@ -394,12 -511,12 +511,12 @@@ int free_memtype(u64 start, u64 end
   *
   * Only to be called when PAT is enabled
   *
-  * Returns _PAGE_CACHE_WB, _PAGE_CACHE_WC, _PAGE_CACHE_UC_MINUS or
-  * _PAGE_CACHE_UC
+  * Returns _PAGE_CACHE_MODE_WB, _PAGE_CACHE_MODE_WC, _PAGE_CACHE_MODE_UC_MINUS
+  * or _PAGE_CACHE_MODE_UC
   */
- static unsigned long lookup_memtype(u64 paddr)
+ static enum page_cache_mode lookup_memtype(u64 paddr)
  {
-       int rettype = _PAGE_CACHE_WB;
+       enum page_cache_mode rettype = _PAGE_CACHE_MODE_WB;
        struct memtype *entry;
  
        if (x86_platform.is_untracked_pat_range(paddr, paddr + PAGE_SIZE))
                 * default state and not reserved, and hence of type WB
                 */
                if (rettype == -1)
-                       rettype = _PAGE_CACHE_WB;
+                       rettype = _PAGE_CACHE_MODE_WB;
  
                return rettype;
        }
        if (entry != NULL)
                rettype = entry->type;
        else
-               rettype = _PAGE_CACHE_UC_MINUS;
+               rettype = _PAGE_CACHE_MODE_UC_MINUS;
  
        spin_unlock(&memtype_lock);
        return rettype;
   * On failure, returns non-zero
   */
  int io_reserve_memtype(resource_size_t start, resource_size_t end,
-                       unsigned long *type)
+                       enum page_cache_mode *type)
  {
        resource_size_t size = end - start;
-       unsigned long req_type = *type;
-       unsigned long new_type;
+       enum page_cache_mode req_type = *type;
+       enum page_cache_mode new_type;
        int ret;
  
        WARN_ON_ONCE(iomem_map_sanity_check(start, size));
@@@ -520,13 -637,13 +637,13 @@@ static inline int range_is_allowed(unsi
  int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
                                unsigned long size, pgprot_t *vma_prot)
  {
-       unsigned long flags = _PAGE_CACHE_WB;
+       enum page_cache_mode pcm = _PAGE_CACHE_MODE_WB;
  
        if (!range_is_allowed(pfn, size))
                return 0;
  
        if (file->f_flags & O_DSYNC)
-               flags = _PAGE_CACHE_UC_MINUS;
+               pcm = _PAGE_CACHE_MODE_UC_MINUS;
  
  #ifdef CONFIG_X86_32
        /*
              boot_cpu_has(X86_FEATURE_CYRIX_ARR) ||
              boot_cpu_has(X86_FEATURE_CENTAUR_MCR)) &&
            (pfn << PAGE_SHIFT) >= __pa(high_memory)) {
-               flags = _PAGE_CACHE_UC;
+               pcm = _PAGE_CACHE_MODE_UC;
        }
  #endif
  
        *vma_prot = __pgprot((pgprot_val(*vma_prot) & ~_PAGE_CACHE_MASK) |
-                            flags);
+                            cachemode2protval(pcm));
        return 1;
  }
  
   * Change the memory type for the physial address range in kernel identity
   * mapping space if that range is a part of identity map.
   */
- int kernel_map_sync_memtype(u64 base, unsigned long size, unsigned long flags)
+ int kernel_map_sync_memtype(u64 base, unsigned long size,
+                           enum page_cache_mode pcm)
  {
        unsigned long id_sz;
  
                                __pa(high_memory) - base :
                                size;
  
-       if (ioremap_change_attr((unsigned long)__va(base), id_sz, flags) < 0) {
+       if (ioremap_change_attr((unsigned long)__va(base), id_sz, pcm) < 0) {
                printk(KERN_INFO "%s:%d ioremap_change_attr failed %s "
                        "for [mem %#010Lx-%#010Lx]\n",
                        current->comm, current->pid,
-                       cattr_name(flags),
+                       cattr_name(pcm),
                        base, (unsigned long long)(base + size-1));
                return -EINVAL;
        }
@@@ -595,8 -713,8 +713,8 @@@ static int reserve_pfn_range(u64 paddr
  {
        int is_ram = 0;
        int ret;
-       unsigned long want_flags = (pgprot_val(*vma_prot) & _PAGE_CACHE_MASK);
-       unsigned long flags = want_flags;
+       enum page_cache_mode want_pcm = pgprot2cachemode(*vma_prot);
+       enum page_cache_mode pcm = want_pcm;
  
        is_ram = pat_pagerange_is_ram(paddr, paddr + size);
  
                if (!pat_enabled)
                        return 0;
  
-               flags = lookup_memtype(paddr);
-               if (want_flags != flags) {
+               pcm = lookup_memtype(paddr);
+               if (want_pcm != pcm) {
                        printk(KERN_WARNING "%s:%d map pfn RAM range req %s for [mem %#010Lx-%#010Lx], got %s\n",
                                current->comm, current->pid,
-                               cattr_name(want_flags),
+                               cattr_name(want_pcm),
                                (unsigned long long)paddr,
                                (unsigned long long)(paddr + size - 1),
-                               cattr_name(flags));
+                               cattr_name(pcm));
                        *vma_prot = __pgprot((pgprot_val(*vma_prot) &
-                                             (~_PAGE_CACHE_MASK)) |
-                                            flags);
+                                            (~_PAGE_CACHE_MASK)) |
+                                            cachemode2protval(pcm));
                }
                return 0;
        }
  
-       ret = reserve_memtype(paddr, paddr + size, want_flags, &flags);
+       ret = reserve_memtype(paddr, paddr + size, want_pcm, &pcm);
        if (ret)
                return ret;
  
-       if (flags != want_flags) {
+       if (pcm != want_pcm) {
                if (strict_prot ||
-                   !is_new_memtype_allowed(paddr, size, want_flags, flags)) {
+                   !is_new_memtype_allowed(paddr, size, want_pcm, pcm)) {
                        free_memtype(paddr, paddr + size);
                        printk(KERN_ERR "%s:%d map pfn expected mapping type %s"
                                " for [mem %#010Lx-%#010Lx], got %s\n",
                                current->comm, current->pid,
-                               cattr_name(want_flags),
+                               cattr_name(want_pcm),
                                (unsigned long long)paddr,
                                (unsigned long long)(paddr + size - 1),
-                               cattr_name(flags));
+                               cattr_name(pcm));
                        return -EINVAL;
                }
                /*
                 */
                *vma_prot = __pgprot((pgprot_val(*vma_prot) &
                                      (~_PAGE_CACHE_MASK)) |
-                                    flags);
+                                    cachemode2protval(pcm));
        }
  
-       if (kernel_map_sync_memtype(paddr, size, flags) < 0) {
+       if (kernel_map_sync_memtype(paddr, size, pcm) < 0) {
                free_memtype(paddr, paddr + size);
                return -EINVAL;
        }
@@@ -709,7 -827,7 +827,7 @@@ int track_pfn_remap(struct vm_area_stru
                    unsigned long pfn, unsigned long addr, unsigned long size)
  {
        resource_size_t paddr = (resource_size_t)pfn << PAGE_SHIFT;
-       unsigned long flags;
+       enum page_cache_mode pcm;
  
        /* reserve the whole chunk starting from paddr */
        if (addr == vma->vm_start && size == (vma->vm_end - vma->vm_start)) {
         * For anything smaller than the vma size we set prot based on the
         * lookup.
         */
-       flags = lookup_memtype(paddr);
+       pcm = lookup_memtype(paddr);
  
        /* Check memtype for the remaining pages */
        while (size > PAGE_SIZE) {
                size -= PAGE_SIZE;
                paddr += PAGE_SIZE;
-               if (flags != lookup_memtype(paddr))
+               if (pcm != lookup_memtype(paddr))
                        return -EINVAL;
        }
  
        *prot = __pgprot((pgprot_val(vma->vm_page_prot) & (~_PAGE_CACHE_MASK)) |
-                        flags);
+                        cachemode2protval(pcm));
  
        return 0;
  }
  int track_pfn_insert(struct vm_area_struct *vma, pgprot_t *prot,
                     unsigned long pfn)
  {
-       unsigned long flags;
+       enum page_cache_mode pcm;
  
        if (!pat_enabled)
                return 0;
  
        /* Set prot based on lookup */
-       flags = lookup_memtype((resource_size_t)pfn << PAGE_SHIFT);
+       pcm = lookup_memtype((resource_size_t)pfn << PAGE_SHIFT);
        *prot = __pgprot((pgprot_val(vma->vm_page_prot) & (~_PAGE_CACHE_MASK)) |
-                        flags);
+                        cachemode2protval(pcm));
  
        return 0;
  }
@@@ -791,7 -909,8 +909,8 @@@ void untrack_pfn(struct vm_area_struct 
  pgprot_t pgprot_writecombine(pgprot_t prot)
  {
        if (pat_enabled)
-               return __pgprot(pgprot_val(prot) | _PAGE_CACHE_WC);
+               return __pgprot(pgprot_val(prot) |
+                               cachemode2protval(_PAGE_CACHE_MODE_WC));
        else
                return pgprot_noncached(prot);
  }
@@@ -824,7 -943,7 +943,7 @@@ static void *memtype_seq_start(struct s
  {
        if (*pos == 0) {
                ++*pos;
 -              seq_printf(seq, "PAT memtype list:\n");
 +              seq_puts(seq, "PAT memtype list:\n");
        }
  
        return memtype_get_idx(*pos);