ARM: 9164/1: mm: Provide set_memory_valid()
authorWang Kefeng <wangkefeng.wang@huawei.com>
Fri, 3 Dec 2021 09:26:31 +0000 (10:26 +0100)
committerRussell King (Oracle) <rmk+kernel@armlinux.org.uk>
Fri, 17 Dec 2021 11:34:36 +0000 (11:34 +0000)
This function validates and invalidates PTE entries, it will be used
in the later patch.

Acked-by: Marco Elver <elver@google.com>
Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com>
Signed-off-by: Russell King (Oracle) <rmk+kernel@armlinux.org.uk>
arch/arm/include/asm/set_memory.h
arch/arm/mm/pageattr.c

index ec17fc0..0211b9c 100644 (file)
@@ -11,6 +11,7 @@ int set_memory_ro(unsigned long addr, int numpages);
 int set_memory_rw(unsigned long addr, int numpages);
 int set_memory_x(unsigned long addr, int numpages);
 int set_memory_nx(unsigned long addr, int numpages);
+int set_memory_valid(unsigned long addr, int numpages, int enable);
 #else
 static inline int set_memory_ro(unsigned long addr, int numpages) { return 0; }
 static inline int set_memory_rw(unsigned long addr, int numpages) { return 0; }
index 9790ae3..c3c34fe 100644 (file)
@@ -32,14 +32,31 @@ static bool in_range(unsigned long start, unsigned long size,
                size <= range_end - start;
 }
 
+/*
+ * This function assumes that the range is mapped with PAGE_SIZE pages.
+ */
+static int __change_memory_common(unsigned long start, unsigned long size,
+                               pgprot_t set_mask, pgprot_t clear_mask)
+{
+       struct page_change_data data;
+       int ret;
+
+       data.set_mask = set_mask;
+       data.clear_mask = clear_mask;
+
+       ret = apply_to_page_range(&init_mm, start, size, change_page_range,
+                                 &data);
+
+       flush_tlb_kernel_range(start, start + size);
+       return ret;
+}
+
 static int change_memory_common(unsigned long addr, int numpages,
                                pgprot_t set_mask, pgprot_t clear_mask)
 {
        unsigned long start = addr & PAGE_MASK;
        unsigned long end = PAGE_ALIGN(addr) + numpages * PAGE_SIZE;
        unsigned long size = end - start;
-       int ret;
-       struct page_change_data data;
 
        WARN_ON_ONCE(start != addr);
 
@@ -50,14 +67,7 @@ static int change_memory_common(unsigned long addr, int numpages,
            !in_range(start, size, VMALLOC_START, VMALLOC_END))
                return -EINVAL;
 
-       data.set_mask = set_mask;
-       data.clear_mask = clear_mask;
-
-       ret = apply_to_page_range(&init_mm, start, size, change_page_range,
-                                       &data);
-
-       flush_tlb_kernel_range(start, end);
-       return ret;
+       return __change_memory_common(start, size, set_mask, clear_mask);
 }
 
 int set_memory_ro(unsigned long addr, int numpages)
@@ -87,3 +97,15 @@ int set_memory_x(unsigned long addr, int numpages)
                                        __pgprot(0),
                                        __pgprot(L_PTE_XN));
 }
+
+int set_memory_valid(unsigned long addr, int numpages, int enable)
+{
+       if (enable)
+               return __change_memory_common(addr, PAGE_SIZE * numpages,
+                                             __pgprot(L_PTE_VALID),
+                                             __pgprot(0));
+       else
+               return __change_memory_common(addr, PAGE_SIZE * numpages,
+                                             __pgprot(0),
+                                             __pgprot(L_PTE_VALID));
+}