arm64/mm: Directly use ID_AA64MMFR2_EL1_VARange_MASK
authorAnshuman Khandual <anshuman.khandual@arm.com>
Tue, 11 Jul 2023 09:20:55 +0000 (14:50 +0530)
committerWill Deacon <will@kernel.org>
Thu, 27 Jul 2023 10:11:44 +0000 (11:11 +0100)
Tools generated register fields have in place mask macros which can be used
directly instead of shifting the older right end sided masks.

Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Will Deacon <will@kernel.org>
Cc: linux-arm-kernel@lists.infradead.org
Cc: linux-kernel@vger.kernel.org
Signed-off-by: Anshuman Khandual <anshuman.khandual@arm.com>
Link: https://lore.kernel.org/r/20230711092055.245756-1-anshuman.khandual@arm.com
Signed-off-by: Will Deacon <will@kernel.org>
arch/arm64/kernel/head.S

index 757a0de..7b23699 100644 (file)
@@ -113,7 +113,7 @@ SYM_CODE_START(primary_entry)
         */
 #if VA_BITS > 48
        mrs_s   x0, SYS_ID_AA64MMFR2_EL1
-       tst     x0, #0xf << ID_AA64MMFR2_EL1_VARange_SHIFT
+       tst     x0, ID_AA64MMFR2_EL1_VARange_MASK
        mov     x0, #VA_BITS
        mov     x25, #VA_BITS_MIN
        csel    x25, x25, x0, eq
@@ -756,7 +756,7 @@ SYM_FUNC_START(__cpu_secondary_check52bitva)
        b.ne    2f
 
        mrs_s   x0, SYS_ID_AA64MMFR2_EL1
-       and     x0, x0, #(0xf << ID_AA64MMFR2_EL1_VARange_SHIFT)
+       and     x0, x0, ID_AA64MMFR2_EL1_VARange_MASK
        cbnz    x0, 2f
 
        update_early_cpu_boot_status \