Revert "x86: kmsan: sync metadata pages on page fault"
authorAlexander Potapenko <glider@google.com>
Wed, 11 Jan 2023 10:18:06 +0000 (11:18 +0100)
committerAndrew Morton <akpm@linux-foundation.org>
Fri, 3 Feb 2023 06:33:01 +0000 (22:33 -0800)
This reverts commit 3f1e2c7a9099c1ed32c67f12cdf432ba782cf51f.

As noticed by Qun-Wei Lin, arch_sync_kernel_mappings() in
arch/x86/mm/fault.c is only used with CONFIG_X86_32, whereas KMSAN is only
supported on x86_64, where this code is not compiled.

The patch in question dates back to downstream KMSAN branch based on
v5.8-rc5, it sneaked into upstream unnoticed in v6.1.

Link: https://lkml.kernel.org/r/20230111101806.3236991-1-glider@google.com
Signed-off-by: Alexander Potapenko <glider@google.com>
Reported-by: Qun-Wei Lin <qun-wei.lin@mediatek.com>
Link: https://github.com/google/kmsan/issues/91
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Dmitry Vyukov <dvyukov@google.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Marco Elver <elver@google.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
arch/x86/mm/fault.c

index 7b0d4ab..a498ae1 100644 (file)
@@ -260,7 +260,7 @@ static noinline int vmalloc_fault(unsigned long address)
 }
 NOKPROBE_SYMBOL(vmalloc_fault);
 
-static void __arch_sync_kernel_mappings(unsigned long start, unsigned long end)
+void arch_sync_kernel_mappings(unsigned long start, unsigned long end)
 {
        unsigned long addr;
 
@@ -284,27 +284,6 @@ static void __arch_sync_kernel_mappings(unsigned long start, unsigned long end)
        }
 }
 
-void arch_sync_kernel_mappings(unsigned long start, unsigned long end)
-{
-       __arch_sync_kernel_mappings(start, end);
-#ifdef CONFIG_KMSAN
-       /*
-        * KMSAN maintains two additional metadata page mappings for the
-        * [VMALLOC_START, VMALLOC_END) range. These mappings start at
-        * KMSAN_VMALLOC_SHADOW_START and KMSAN_VMALLOC_ORIGIN_START and
-        * have to be synced together with the vmalloc memory mapping.
-        */
-       if (start >= VMALLOC_START && end < VMALLOC_END) {
-               __arch_sync_kernel_mappings(
-                       start - VMALLOC_START + KMSAN_VMALLOC_SHADOW_START,
-                       end - VMALLOC_START + KMSAN_VMALLOC_SHADOW_START);
-               __arch_sync_kernel_mappings(
-                       start - VMALLOC_START + KMSAN_VMALLOC_ORIGIN_START,
-                       end - VMALLOC_START + KMSAN_VMALLOC_ORIGIN_START);
-       }
-#endif
-}
-
 static bool low_pfn(unsigned long pfn)
 {
        return pfn < max_low_pfn;