arm64: decouple check whether pfn is in linear map from pfn_valid()
authorMike Rapoport <rppt@linux.ibm.com>
Thu, 1 Jul 2021 01:51:19 +0000 (18:51 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Thu, 1 Jul 2021 03:47:29 +0000 (20:47 -0700)
The intended semantics of pfn_valid() is to verify whether there is a
struct page for the pfn in question and nothing else.

Yet, on arm64 it is used to distinguish memory areas that are mapped in
the linear map vs those that require ioremap() to access them.

Introduce a dedicated pfn_is_map_memory() wrapper for
memblock_is_map_memory() to perform such check and use it where
appropriate.

Using a wrapper allows to avoid cyclic include dependencies.

While here also update style of pfn_valid() so that both pfn_valid() and
pfn_is_map_memory() declarations will be consistent.

Link: https://lkml.kernel.org/r/20210511100550.28178-4-rppt@kernel.org
Signed-off-by: Mike Rapoport <rppt@linux.ibm.com>
Acked-by: David Hildenbrand <david@redhat.com>
Acked-by: Ard Biesheuvel <ardb@kernel.org>
Reviewed-by: Kefeng Wang <wangkefeng.wang@huawei.com>
Cc: Anshuman Khandual <anshuman.khandual@arm.com>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Marc Zyngier <maz@kernel.org>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Will Deacon <will@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
arch/arm64/include/asm/memory.h
arch/arm64/include/asm/page.h
arch/arm64/kvm/mmu.c
arch/arm64/mm/init.c
arch/arm64/mm/ioremap.c
arch/arm64/mm/mmu.c

index 87b90dc27a4327426466f73213fafea2de51aedf..9027b7e16c4c3b20d57a4aa8c6d2d3bb19412215 100644 (file)
@@ -369,7 +369,7 @@ static inline void *phys_to_virt(phys_addr_t x)
 
 #define virt_addr_valid(addr)  ({                                      \
        __typeof__(addr) __addr = __tag_reset(addr);                    \
-       __is_lm_address(__addr) && pfn_valid(virt_to_pfn(__addr));      \
+       __is_lm_address(__addr) && pfn_is_map_memory(virt_to_pfn(__addr));      \
 })
 
 void dump_mem_limit(void);
index 012cffc574e890fc417dafc43563cfcfbcd2642b..75ddfe67139394c67d5857ee6bb00ea0e9d2b9c3 100644 (file)
@@ -37,7 +37,8 @@ void copy_highpage(struct page *to, struct page *from);
 
 typedef struct page *pgtable_t;
 
-extern int pfn_valid(unsigned long);
+int pfn_valid(unsigned long pfn);
+int pfn_is_map_memory(unsigned long pfn);
 
 #include <asm/memory.h>
 
index 74b3c1a3ff5a2e90ce9060669d1402c91e5490f5..5e0d40f9fb867816c4cfdb141717ea56b9a944e7 100644 (file)
@@ -85,7 +85,7 @@ void kvm_flush_remote_tlbs(struct kvm *kvm)
 
 static bool kvm_is_device_pfn(unsigned long pfn)
 {
-       return !pfn_valid(pfn);
+       return !pfn_is_map_memory(pfn);
 }
 
 static void *stage2_memcache_zalloc_page(void *arg)
index e55409caaee340a30011d8ca3e3f684102031fc3..148e752a70f731889d435a86f303c422771ff171 100644 (file)
@@ -256,6 +256,18 @@ int pfn_valid(unsigned long pfn)
 }
 EXPORT_SYMBOL(pfn_valid);
 
+int pfn_is_map_memory(unsigned long pfn)
+{
+       phys_addr_t addr = PFN_PHYS(pfn);
+
+       /* avoid false positives for bogus PFNs, see comment in pfn_valid() */
+       if (PHYS_PFN(addr) != pfn)
+               return 0;
+
+       return memblock_is_map_memory(addr);
+}
+EXPORT_SYMBOL(pfn_is_map_memory);
+
 static phys_addr_t memory_limit = PHYS_ADDR_MAX;
 
 /*
index b5e83c46b23e7cf9cf5c3883e67527eb90dc60cd..b7c81dacabf079f50d6d6225a43b15a2b4e621ab 100644 (file)
@@ -43,7 +43,7 @@ static void __iomem *__ioremap_caller(phys_addr_t phys_addr, size_t size,
        /*
         * Don't allow RAM to be mapped.
         */
-       if (WARN_ON(pfn_valid(__phys_to_pfn(phys_addr))))
+       if (WARN_ON(pfn_is_map_memory(__phys_to_pfn(phys_addr))))
                return NULL;
 
        area = get_vm_area_caller(size, VM_IOREMAP, caller);
@@ -84,7 +84,7 @@ EXPORT_SYMBOL(iounmap);
 void __iomem *ioremap_cache(phys_addr_t phys_addr, size_t size)
 {
        /* For normal memory we already have a cacheable mapping. */
-       if (pfn_valid(__phys_to_pfn(phys_addr)))
+       if (pfn_is_map_memory(__phys_to_pfn(phys_addr)))
                return (void __iomem *)__phys_to_virt(phys_addr);
 
        return __ioremap_caller(phys_addr, size, __pgprot(PROT_NORMAL),
index add67deb491080d3de30785b478a0ceca8ce79f3..7553a7eab3db1d55e418bdfd73504901c8b570c3 100644 (file)
@@ -82,7 +82,7 @@ void set_swapper_pgd(pgd_t *pgdp, pgd_t pgd)
 pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
                              unsigned long size, pgprot_t vma_prot)
 {
-       if (!pfn_valid(pfn))
+       if (!pfn_is_map_memory(pfn))
                return pgprot_noncached(vma_prot);
        else if (file->f_flags & O_SYNC)
                return pgprot_writecombine(vma_prot);