From 26ced8124a118e8c59d524da9d9f8d5e30502e60 Mon Sep 17 00:00:00 2001 From: Vasily Gorbik Date: Mon, 30 Jan 2023 02:11:39 +0100 Subject: [PATCH] s390/kasan: avoid mapping KASAN shadow for standby memory KASAN common code is able to handle memory hotplug and create KASAN shadow memory on a fly. Online memory ranges are available from mem_detect, use this information to avoid mapping KASAN shadow for standby memory. Reviewed-by: Alexander Gordeev Signed-off-by: Vasily Gorbik Signed-off-by: Heiko Carstens --- arch/s390/boot/kaslr.c | 2 +- arch/s390/mm/kasan_init.c | 26 +++++--------------------- 2 files changed, 6 insertions(+), 22 deletions(-) diff --git a/arch/s390/boot/kaslr.c b/arch/s390/boot/kaslr.c index 9cab7bb..70ff68d 100644 --- a/arch/s390/boot/kaslr.c +++ b/arch/s390/boot/kaslr.c @@ -182,7 +182,7 @@ unsigned long get_random_base(unsigned long safe_addr) * which vmem and kasan code will use for shadow memory and * pgtable mapping allocations. */ - memory_limit -= kasan_estimate_memory_needs(memory_limit); + memory_limit -= kasan_estimate_memory_needs(online_mem_total); memory_limit -= vmem_estimate_memory_needs(online_mem_total); safe_addr = ALIGN(safe_addr, THREAD_SIZE); diff --git a/arch/s390/mm/kasan_init.c b/arch/s390/mm/kasan_init.c index 1aaea71..4f667828 100644 --- a/arch/s390/mm/kasan_init.c +++ b/arch/s390/mm/kasan_init.c @@ -227,26 +227,13 @@ void __init kasan_early_init(void) p4d_t p4d_z = __p4d(__pa(kasan_early_shadow_pud) | _REGION2_ENTRY); unsigned long untracked_end = MODULES_VADDR; unsigned long shadow_alloc_size; - unsigned long memsize; + unsigned long start, end; + int i; kasan_early_detect_facilities(); if (!has_nx) pte_z = clear_pte_bit(pte_z, __pgprot(_PAGE_NOEXEC)); - memsize = get_mem_detect_end(); - if (!memsize) - kasan_early_panic("cannot detect physical memory size\n"); - /* - * Kasan currently supports standby memory but only if it follows - * online memory (default allocation), i.e. no memory holes. - * - memsize represents end of online memory - * - ident_map_size represents online + standby and memory limits - * accounted. - * Kasan maps "memsize" right away. - * [__sha(0), __sha(memsize)] - shadow memory for identity mapping - * The rest [memsize, ident_map_size] if memsize < ident_map_size - * could be mapped/unmapped dynamically later during memory hotplug. - */ BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_START, P4D_SIZE)); BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_END, P4D_SIZE)); @@ -256,12 +243,8 @@ void __init kasan_early_init(void) crst_table_init((unsigned long *)kasan_early_shadow_pmd, pmd_val(pmd_z)); memset64((u64 *)kasan_early_shadow_pte, pte_val(pte_z), PTRS_PER_PTE); - shadow_alloc_size = memsize >> KASAN_SHADOW_SCALE_SHIFT; - - if (pgalloc_low + shadow_alloc_size > memsize) - kasan_early_panic("out of memory during initialisation\n"); - if (has_edat) { + shadow_alloc_size = get_mem_detect_online_total() >> KASAN_SHADOW_SCALE_SHIFT; segment_pos = round_down(pgalloc_pos, _SEGMENT_SIZE); segment_low = segment_pos - shadow_alloc_size; segment_low = round_down(segment_low, _SEGMENT_SIZE); @@ -299,7 +282,8 @@ void __init kasan_early_init(void) * +- shadow end ----+---------+- shadow end ---+ */ /* populate kasan shadow (for identity mapping and zero page mapping) */ - kasan_early_pgtable_populate(__sha(0), __sha(memsize), POPULATE_MAP); + for_each_mem_detect_block(i, &start, &end) + kasan_early_pgtable_populate(__sha(start), __sha(end), POPULATE_MAP); if (IS_ENABLED(CONFIG_KASAN_VMALLOC)) { untracked_end = VMALLOC_START; /* shallowly populate kasan shadow for vmalloc and modules */ -- 2.7.4