x86, mm: use pfn_range_is_mapped() with CPA
authorYinghai Lu <yinghai@kernel.org>
Sat, 17 Nov 2012 03:38:49 +0000 (19:38 -0800)
committerH. Peter Anvin <hpa@linux.intel.com>
Sat, 17 Nov 2012 19:59:09 +0000 (11:59 -0800)
We are going to map ram only, so under max_low_pfn_mapped,
between 4g and max_pfn_mapped does not mean mapped at all.

Use pfn_range_is_mapped() directly.

Signed-off-by: Yinghai Lu <yinghai@kernel.org>
Link: http://lkml.kernel.org/r/1353123563-3103-13-git-send-email-yinghai@kernel.org
Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
arch/x86/mm/pageattr.c

index a718e0d..44acfcd 100644 (file)
@@ -551,16 +551,10 @@ static int split_large_page(pte_t *kpte, unsigned long address)
        for (i = 0; i < PTRS_PER_PTE; i++, pfn += pfninc)
                set_pte(&pbase[i], pfn_pte(pfn, ref_prot));
 
-       if (address >= (unsigned long)__va(0) &&
-               address < (unsigned long)__va(max_low_pfn_mapped << PAGE_SHIFT))
+       if (pfn_range_is_mapped(PFN_DOWN(__pa(address)),
+                               PFN_DOWN(__pa(address)) + 1))
                split_page_count(level);
 
-#ifdef CONFIG_X86_64
-       if (address >= (unsigned long)__va(1UL<<32) &&
-               address < (unsigned long)__va(max_pfn_mapped << PAGE_SHIFT))
-               split_page_count(level);
-#endif
-
        /*
         * Install the new, split up pagetable.
         *
@@ -729,13 +723,9 @@ static int cpa_process_alias(struct cpa_data *cpa)
        unsigned long vaddr;
        int ret;
 
-       if (cpa->pfn >= max_pfn_mapped)
+       if (!pfn_range_is_mapped(cpa->pfn, cpa->pfn + 1))
                return 0;
 
-#ifdef CONFIG_X86_64
-       if (cpa->pfn >= max_low_pfn_mapped && cpa->pfn < (1UL<<(32-PAGE_SHIFT)))
-               return 0;
-#endif
        /*
         * No need to redo, when the primary call touched the direct
         * mapping already: