Merge tag 'riscv-for-linus-6.4-rc6' of git://git.kernel.org/pub/scm/linux/kernel...
authorLinus Torvalds <torvalds@linux-foundation.org>
Fri, 9 Jun 2023 16:36:17 +0000 (09:36 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Fri, 9 Jun 2023 16:36:17 +0000 (09:36 -0700)
Pull RISC-V fixes from Palmer Dabbelt:

 - A fix to avoid ISA-disallowed privilege mappings that can result from
   WRITE+EXEC mmap requests from userspace.

 - A fix for kfence to handle the huge pages.

 - A fix to avoid converting misaligned VAs to huge pages.

 - ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE has been selected so kprobe
   can understand user pointers.

* tag 'riscv-for-linus-6.4-rc6' of git://git.kernel.org/pub/scm/linux/kernel/git/riscv/linux:
  riscv: fix kprobe __user string arg print fault issue
  riscv: Check the virtual alignment before choosing a map size
  riscv: Fix kfence now that the linear mapping can be backed by PUD/P4D/PGD
  riscv: mm: Ensure prot of VM_WRITE and VM_EXEC must be readable

arch/riscv/Kconfig
arch/riscv/include/asm/kfence.h
arch/riscv/include/asm/pgtable.h
arch/riscv/mm/init.c

index 2bb0c38..5966ad9 100644 (file)
@@ -26,6 +26,7 @@ config RISCV
        select ARCH_HAS_GIGANTIC_PAGE
        select ARCH_HAS_KCOV
        select ARCH_HAS_MMIOWB
+       select ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
        select ARCH_HAS_PMEM_API
        select ARCH_HAS_PTE_SPECIAL
        select ARCH_HAS_SET_DIRECT_MAP if MMU
index d887a54..0bbffd5 100644 (file)
@@ -8,41 +8,8 @@
 #include <asm-generic/pgalloc.h>
 #include <asm/pgtable.h>
 
-static inline int split_pmd_page(unsigned long addr)
-{
-       int i;
-       unsigned long pfn = PFN_DOWN(__pa((addr & PMD_MASK)));
-       pmd_t *pmd = pmd_off_k(addr);
-       pte_t *pte = pte_alloc_one_kernel(&init_mm);
-
-       if (!pte)
-               return -ENOMEM;
-
-       for (i = 0; i < PTRS_PER_PTE; i++)
-               set_pte(pte + i, pfn_pte(pfn + i, PAGE_KERNEL));
-       set_pmd(pmd, pfn_pmd(PFN_DOWN(__pa(pte)), PAGE_TABLE));
-
-       flush_tlb_kernel_range(addr, addr + PMD_SIZE);
-       return 0;
-}
-
 static inline bool arch_kfence_init_pool(void)
 {
-       int ret;
-       unsigned long addr;
-       pmd_t *pmd;
-
-       for (addr = (unsigned long)__kfence_pool; is_kfence_address((void *)addr);
-            addr += PAGE_SIZE) {
-               pmd = pmd_off_k(addr);
-
-               if (pmd_leaf(*pmd)) {
-                       ret = split_pmd_page(addr);
-                       if (ret)
-                               return false;
-               }
-       }
-
        return true;
 }
 
index 2258b27..75970ee 100644 (file)
@@ -165,8 +165,7 @@ extern struct pt_alloc_ops pt_ops __initdata;
                                         _PAGE_EXEC | _PAGE_WRITE)
 
 #define PAGE_COPY              PAGE_READ
-#define PAGE_COPY_EXEC         PAGE_EXEC
-#define PAGE_COPY_READ_EXEC    PAGE_READ_EXEC
+#define PAGE_COPY_EXEC         PAGE_READ_EXEC
 #define PAGE_SHARED            PAGE_WRITE
 #define PAGE_SHARED_EXEC       PAGE_WRITE_EXEC
 
index c6bb966..4fa420f 100644 (file)
@@ -23,6 +23,7 @@
 #ifdef CONFIG_RELOCATABLE
 #include <linux/elf.h>
 #endif
+#include <linux/kfence.h>
 
 #include <asm/fixmap.h>
 #include <asm/tlbflush.h>
@@ -293,7 +294,7 @@ static const pgprot_t protection_map[16] = {
        [VM_EXEC]                                       = PAGE_EXEC,
        [VM_EXEC | VM_READ]                             = PAGE_READ_EXEC,
        [VM_EXEC | VM_WRITE]                            = PAGE_COPY_EXEC,
-       [VM_EXEC | VM_WRITE | VM_READ]                  = PAGE_COPY_READ_EXEC,
+       [VM_EXEC | VM_WRITE | VM_READ]                  = PAGE_COPY_EXEC,
        [VM_SHARED]                                     = PAGE_NONE,
        [VM_SHARED | VM_READ]                           = PAGE_READ,
        [VM_SHARED | VM_WRITE]                          = PAGE_SHARED,
@@ -659,18 +660,19 @@ void __init create_pgd_mapping(pgd_t *pgdp,
        create_pgd_next_mapping(nextp, va, pa, sz, prot);
 }
 
-static uintptr_t __init best_map_size(phys_addr_t base, phys_addr_t size)
+static uintptr_t __init best_map_size(phys_addr_t pa, uintptr_t va,
+                                     phys_addr_t size)
 {
-       if (!(base & (PGDIR_SIZE - 1)) && size >= PGDIR_SIZE)
+       if (!(pa & (PGDIR_SIZE - 1)) && !(va & (PGDIR_SIZE - 1)) && size >= PGDIR_SIZE)
                return PGDIR_SIZE;
 
-       if (!(base & (P4D_SIZE - 1)) && size >= P4D_SIZE)
+       if (!(pa & (P4D_SIZE - 1)) && !(va & (P4D_SIZE - 1)) && size >= P4D_SIZE)
                return P4D_SIZE;
 
-       if (!(base & (PUD_SIZE - 1)) && size >= PUD_SIZE)
+       if (!(pa & (PUD_SIZE - 1)) && !(va & (PUD_SIZE - 1)) && size >= PUD_SIZE)
                return PUD_SIZE;
 
-       if (!(base & (PMD_SIZE - 1)) && size >= PMD_SIZE)
+       if (!(pa & (PMD_SIZE - 1)) && !(va & (PMD_SIZE - 1)) && size >= PMD_SIZE)
                return PMD_SIZE;
 
        return PAGE_SIZE;
@@ -1167,14 +1169,16 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa)
 }
 
 static void __init create_linear_mapping_range(phys_addr_t start,
-                                              phys_addr_t end)
+                                              phys_addr_t end,
+                                              uintptr_t fixed_map_size)
 {
        phys_addr_t pa;
        uintptr_t va, map_size;
 
        for (pa = start; pa < end; pa += map_size) {
                va = (uintptr_t)__va(pa);
-               map_size = best_map_size(pa, end - pa);
+               map_size = fixed_map_size ? fixed_map_size :
+                                           best_map_size(pa, va, end - pa);
 
                create_pgd_mapping(swapper_pg_dir, va, pa, map_size,
                                   pgprot_from_va(va));
@@ -1184,6 +1188,7 @@ static void __init create_linear_mapping_range(phys_addr_t start,
 static void __init create_linear_mapping_page_table(void)
 {
        phys_addr_t start, end;
+       phys_addr_t kfence_pool __maybe_unused;
        u64 i;
 
 #ifdef CONFIG_STRICT_KERNEL_RWX
@@ -1197,6 +1202,19 @@ static void __init create_linear_mapping_page_table(void)
        memblock_mark_nomap(krodata_start, krodata_size);
 #endif
 
+#ifdef CONFIG_KFENCE
+       /*
+        *  kfence pool must be backed by PAGE_SIZE mappings, so allocate it
+        *  before we setup the linear mapping so that we avoid using hugepages
+        *  for this region.
+        */
+       kfence_pool = memblock_phys_alloc(KFENCE_POOL_SIZE, PAGE_SIZE);
+       BUG_ON(!kfence_pool);
+
+       memblock_mark_nomap(kfence_pool, KFENCE_POOL_SIZE);
+       __kfence_pool = __va(kfence_pool);
+#endif
+
        /* Map all memory banks in the linear mapping */
        for_each_mem_range(i, &start, &end) {
                if (start >= end)
@@ -1207,17 +1225,25 @@ static void __init create_linear_mapping_page_table(void)
                if (end >= __pa(PAGE_OFFSET) + memory_limit)
                        end = __pa(PAGE_OFFSET) + memory_limit;
 
-               create_linear_mapping_range(start, end);
+               create_linear_mapping_range(start, end, 0);
        }
 
 #ifdef CONFIG_STRICT_KERNEL_RWX
-       create_linear_mapping_range(ktext_start, ktext_start + ktext_size);
+       create_linear_mapping_range(ktext_start, ktext_start + ktext_size, 0);
        create_linear_mapping_range(krodata_start,
-                                   krodata_start + krodata_size);
+                                   krodata_start + krodata_size, 0);
 
        memblock_clear_nomap(ktext_start,  ktext_size);
        memblock_clear_nomap(krodata_start, krodata_size);
 #endif
+
+#ifdef CONFIG_KFENCE
+       create_linear_mapping_range(kfence_pool,
+                                   kfence_pool + KFENCE_POOL_SIZE,
+                                   PAGE_SIZE);
+
+       memblock_clear_nomap(kfence_pool, KFENCE_POOL_SIZE);
+#endif
 }
 
 static void __init setup_vm_final(void)