ARM: mm: Provide set_memory_valid() 62/281562/1
authorKefeng Wang <wangkefeng.wang@huawei.com>
Mon, 15 Nov 2021 13:48:46 +0000 (21:48 +0800)
committerSeung-Woo Kim <sw0312.kim@samsung.com>
Tue, 20 Sep 2022 02:48:33 +0000 (11:48 +0900)
This function validates and invalidates PTE entries, it will be used
in the later patch.

Acked-by: Marco Elver <elver@google.com>
Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com>
Link: https://lore.kernel.org/linux-arm-kernel/20211115134848.171098-2-wangkefeng.wang@huawei.com/
[port for kfence feature to rpi-5.10.95]
Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com>
Signed-off-by: Seung-Woo Kim <sw0312.kim@samsung.com>
Change-Id: I2da9a932f8d0961d72afb49f23f574784ffa2b36

arch/arm/include/asm/set_memory.h
arch/arm/mm/pageattr.c

index a1ceff4..1e65ecb 100644 (file)
@@ -11,6 +11,7 @@ int set_memory_ro(unsigned long addr, int numpages);
 int set_memory_rw(unsigned long addr, int numpages);
 int set_memory_x(unsigned long addr, int numpages);
 int set_memory_nx(unsigned long addr, int numpages);
+int set_memory_valid(unsigned long addr, int numpages, int enable);
 #else
 static inline int set_memory_ro(unsigned long addr, int numpages) { return 0; }
 static inline int set_memory_rw(unsigned long addr, int numpages) { return 0; }
index 9790ae3..c3c34fe 100644 (file)
@@ -32,14 +32,31 @@ static bool in_range(unsigned long start, unsigned long size,
                size <= range_end - start;
 }
 
+/*
+ * This function assumes that the range is mapped with PAGE_SIZE pages.
+ */
+static int __change_memory_common(unsigned long start, unsigned long size,
+                               pgprot_t set_mask, pgprot_t clear_mask)
+{
+       struct page_change_data data;
+       int ret;
+
+       data.set_mask = set_mask;
+       data.clear_mask = clear_mask;
+
+       ret = apply_to_page_range(&init_mm, start, size, change_page_range,
+                                 &data);
+
+       flush_tlb_kernel_range(start, start + size);
+       return ret;
+}
+
 static int change_memory_common(unsigned long addr, int numpages,
                                pgprot_t set_mask, pgprot_t clear_mask)
 {
        unsigned long start = addr & PAGE_MASK;
        unsigned long end = PAGE_ALIGN(addr) + numpages * PAGE_SIZE;
        unsigned long size = end - start;
-       int ret;
-       struct page_change_data data;
 
        WARN_ON_ONCE(start != addr);
 
@@ -50,14 +67,7 @@ static int change_memory_common(unsigned long addr, int numpages,
            !in_range(start, size, VMALLOC_START, VMALLOC_END))
                return -EINVAL;
 
-       data.set_mask = set_mask;
-       data.clear_mask = clear_mask;
-
-       ret = apply_to_page_range(&init_mm, start, size, change_page_range,
-                                       &data);
-
-       flush_tlb_kernel_range(start, end);
-       return ret;
+       return __change_memory_common(start, size, set_mask, clear_mask);
 }
 
 int set_memory_ro(unsigned long addr, int numpages)
@@ -87,3 +97,15 @@ int set_memory_x(unsigned long addr, int numpages)
                                        __pgprot(0),
                                        __pgprot(L_PTE_XN));
 }
+
+int set_memory_valid(unsigned long addr, int numpages, int enable)
+{
+       if (enable)
+               return __change_memory_common(addr, PAGE_SIZE * numpages,
+                                             __pgprot(L_PTE_VALID),
+                                             __pgprot(0));
+       else
+               return __change_memory_common(addr, PAGE_SIZE * numpages,
+                                             __pgprot(0),
+                                             __pgprot(L_PTE_VALID));
+}