arm64: kasan: remove !KASAN_VMALLOC remnants
authorMark Rutland <mark.rutland@arm.com>
Tue, 30 May 2023 11:03:24 +0000 (12:03 +0100)
committerCatalin Marinas <catalin.marinas@arm.com>
Tue, 6 Jun 2023 16:39:05 +0000 (17:39 +0100)
Historically, KASAN could be selected with or without KASAN_VMALLOC, but
since commit:

  f6f37d9320a11e90 ("arm64: select KASAN_VMALLOC for SW/HW_TAGS modes")

... we can never select KASAN without KASAN_VMALLOC on arm64, and thus
arm64 code for KASAN && !KASAN_VMALLOC is redundant and can be removed.

Remove the redundant code kasan_init.c

Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Reviewed-by: Alexander Potapenko <glider@google.com>
Reviewed-by: Ard Biesheuvel <ardb@kernel.org>
Cc: Andrey Konovalov <andreyknvl@google.com>
Cc: Andrey Ryabinin <ryabinin.a.a@gmail.com>
Cc: Dmitry Vyukov <dvyukov@google.com>
Cc: Will Deacon <will@kernel.org>
Tested-by: Shanker Donthineni <sdonthineni@nvidia.com>
Link: https://lore.kernel.org/r/20230530110328.2213762-3-mark.rutland@arm.com
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
arch/arm64/mm/kasan_init.c

index e969e68..f17d066 100644 (file)
@@ -214,7 +214,7 @@ static void __init clear_pgds(unsigned long start,
 static void __init kasan_init_shadow(void)
 {
        u64 kimg_shadow_start, kimg_shadow_end;
-       u64 mod_shadow_start, mod_shadow_end;
+       u64 mod_shadow_start;
        u64 vmalloc_shadow_end;
        phys_addr_t pa_start, pa_end;
        u64 i;
@@ -223,7 +223,6 @@ static void __init kasan_init_shadow(void)
        kimg_shadow_end = PAGE_ALIGN((u64)kasan_mem_to_shadow(KERNEL_END));
 
        mod_shadow_start = (u64)kasan_mem_to_shadow((void *)MODULES_VADDR);
-       mod_shadow_end = (u64)kasan_mem_to_shadow((void *)MODULES_END);
 
        vmalloc_shadow_end = (u64)kasan_mem_to_shadow((void *)VMALLOC_END);
 
@@ -246,17 +245,9 @@ static void __init kasan_init_shadow(void)
        kasan_populate_early_shadow(kasan_mem_to_shadow((void *)PAGE_END),
                                   (void *)mod_shadow_start);
 
-       if (IS_ENABLED(CONFIG_KASAN_VMALLOC)) {
-               BUILD_BUG_ON(VMALLOC_START != MODULES_END);
-               kasan_populate_early_shadow((void *)vmalloc_shadow_end,
-                                           (void *)KASAN_SHADOW_END);
-       } else {
-               kasan_populate_early_shadow((void *)kimg_shadow_end,
-                                           (void *)KASAN_SHADOW_END);
-               if (kimg_shadow_start > mod_shadow_end)
-                       kasan_populate_early_shadow((void *)mod_shadow_end,
-                                                   (void *)kimg_shadow_start);
-       }
+       BUILD_BUG_ON(VMALLOC_START != MODULES_END);
+       kasan_populate_early_shadow((void *)vmalloc_shadow_end,
+                                   (void *)KASAN_SHADOW_END);
 
        for_each_mem_range(i, &pa_start, &pa_end) {
                void *start = (void *)__phys_to_virt(pa_start);