powerpc/64s: Allow double call of kernel_[un]map_linear_page()
authorChristophe Leroy <christophe.leroy@csgroup.eu>
Mon, 26 Sep 2022 07:57:25 +0000 (07:57 +0000)
committerMichael Ellerman <mpe@ellerman.id.au>
Wed, 28 Sep 2022 09:22:10 +0000 (19:22 +1000)
If the page is already mapped resp. already unmapped, bail out.

Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu>
Signed-off-by: Nicholas Miehlbradt <nicholas@linux.ibm.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/20220926075726.2846-3-nicholas@linux.ibm.com
arch/powerpc/mm/book3s64/hash_utils.c

index 4ec003d..6d985ac 100644 (file)
@@ -1997,6 +1997,9 @@ static void kernel_map_linear_page(unsigned long vaddr, unsigned long lmi)
        if (!vsid)
                return;
 
+       if (linear_map_hash_slots[lmi] & 0x80)
+               return;
+
        ret = hpte_insert_repeating(hash, vpn, __pa(vaddr), mode,
                                    HPTE_V_BOLTED,
                                    mmu_linear_psize, mmu_kernel_ssize);
@@ -2016,7 +2019,10 @@ static void kernel_unmap_linear_page(unsigned long vaddr, unsigned long lmi)
 
        hash = hpt_hash(vpn, PAGE_SHIFT, mmu_kernel_ssize);
        spin_lock(&linear_map_hash_lock);
-       BUG_ON(!(linear_map_hash_slots[lmi] & 0x80));
+       if (!(linear_map_hash_slots[lmi] & 0x80)) {
+               spin_unlock(&linear_map_hash_lock);
+               return;
+       }
        hidx = linear_map_hash_slots[lmi] & 0x7f;
        linear_map_hash_slots[lmi] = 0;
        spin_unlock(&linear_map_hash_lock);