s390/mm: make virt_to_pfn() a static inline
authorLinus Walleij <linus.walleij@linaro.org>
Sat, 12 Aug 2023 15:12:54 +0000 (17:12 +0200)
committerHeiko Carstens <hca@linux.ibm.com>
Wed, 16 Aug 2023 13:13:03 +0000 (15:13 +0200)
Making virt_to_pfn() a static inline taking a strongly typed
(const void *) makes the contract of a passing a pointer of that
type to the function explicit and exposes any misuse of the
macro virt_to_pfn() acting polymorphic and accepting many types
such as (void *), (unitptr_t) or (unsigned long) as arguments
without warnings.

For symmetry do the same with pfn_to_virt() reflecting the
current layout in asm-generic/page.h.

Doing this reveals a number of offenders in the arch code and
the S390-specific drivers, so just bite the bullet and fix up
all of those as well.

Signed-off-by: Linus Walleij <linus.walleij@linaro.org>
Reviewed-by: Alexander Gordeev <agordeev@linux.ibm.com>
Link: https://lore.kernel.org/r/20230812-virt-to-phys-s390-v2-1-6c40f31fe36f@linaro.org
Signed-off-by: Heiko Carstens <hca@linux.ibm.com>
arch/s390/include/asm/kfence.h
arch/s390/include/asm/page.h
arch/s390/mm/cmm.c
arch/s390/mm/vmem.c
drivers/s390/block/scm_blk.c
drivers/s390/char/vmcp.c

index d55ba87..e47fd8c 100644 (file)
@@ -35,7 +35,7 @@ static __always_inline void kfence_split_mapping(void)
 
 static inline bool kfence_protect_page(unsigned long addr, bool protect)
 {
-       __kernel_map_pages(virt_to_page(addr), 1, !protect);
+       __kernel_map_pages(virt_to_page((void *)addr), 1, !protect);
        return true;
 }
 
index a9c138f..cfec074 100644 (file)
@@ -191,8 +191,16 @@ int arch_make_page_accessible(struct page *page);
 #define phys_to_page(phys)     pfn_to_page(phys_to_pfn(phys))
 #define page_to_phys(page)     pfn_to_phys(page_to_pfn(page))
 
-#define pfn_to_virt(pfn)       __va(pfn_to_phys(pfn))
-#define virt_to_pfn(kaddr)     (phys_to_pfn(__pa(kaddr)))
+static inline void *pfn_to_virt(unsigned long pfn)
+{
+       return __va(pfn_to_phys(pfn));
+}
+
+static inline unsigned long virt_to_pfn(const void *kaddr)
+{
+       return phys_to_pfn(__pa(kaddr));
+}
+
 #define pfn_to_kaddr(pfn)      pfn_to_virt(pfn)
 
 #define virt_to_page(kaddr)    pfn_to_page(virt_to_pfn(kaddr))
index 5300c68..f475153 100644 (file)
@@ -90,7 +90,7 @@ static long cmm_alloc_pages(long nr, long *counter,
                        } else
                                free_page((unsigned long) npa);
                }
-               diag10_range(virt_to_pfn(addr), 1);
+               diag10_range(virt_to_pfn((void *)addr), 1);
                pa->pages[pa->index++] = addr;
                (*counter)++;
                spin_unlock(&cmm_lock);
index be69cb2..3391efb 100644 (file)
@@ -36,7 +36,7 @@ static void vmem_free_pages(unsigned long addr, int order)
 {
        /* We don't expect boot memory to be removed ever. */
        if (!slab_is_available() ||
-           WARN_ON_ONCE(PageReserved(virt_to_page(addr))))
+           WARN_ON_ONCE(PageReserved(virt_to_page((void *)addr))))
                return;
        free_pages(addr, order);
 }
index 0c1df1d..3a9cc8a 100644 (file)
@@ -134,7 +134,7 @@ static void scm_request_done(struct scm_request *scmrq)
 
                if ((msb->flags & MSB_FLAG_IDA) && aidaw &&
                    IS_ALIGNED(aidaw, PAGE_SIZE))
-                       mempool_free(virt_to_page(aidaw), aidaw_pool);
+                       mempool_free(virt_to_page((void *)aidaw), aidaw_pool);
        }
 
        spin_lock_irqsave(&list_lock, flags);
index 4cebfaa..eb0520a 100644 (file)
@@ -89,7 +89,7 @@ static void vmcp_response_free(struct vmcp_session *session)
        order = get_order(session->bufsize);
        nr_pages = ALIGN(session->bufsize, PAGE_SIZE) >> PAGE_SHIFT;
        if (session->cma_alloc) {
-               page = virt_to_page((unsigned long)session->response);
+               page = virt_to_page(session->response);
                cma_release(vmcp_cma, page, nr_pages);
                session->cma_alloc = 0;
        } else {