arm64/sysreg: Add _EL1 into ID_AA64MMFR0_EL1 definition names
authorMark Brown <broonie@kernel.org>
Mon, 5 Sep 2022 22:54:01 +0000 (23:54 +0100)
committerCatalin Marinas <catalin.marinas@arm.com>
Fri, 9 Sep 2022 09:59:02 +0000 (10:59 +0100)
Normally we include the full register name in the defines for fields within
registers but this has not been followed for ID registers. In preparation
for automatic generation of defines add the _EL1s into the defines for
ID_AA64MMFR0_EL1 to follow the convention. No functional changes.

Signed-off-by: Mark Brown <broonie@kernel.org>
Reviewed-by: Kristina Martsenko <kristina.martsenko@arm.com>
Link: https://lore.kernel.org/r/20220905225425.1871461-5-broonie@kernel.org
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
15 files changed:
arch/arm64/include/asm/assembler.h
arch/arm64/include/asm/cpufeature.h
arch/arm64/include/asm/el2_setup.h
arch/arm64/include/asm/kvm_pgtable.h
arch/arm64/include/asm/sysreg.h
arch/arm64/kernel/cpufeature.c
arch/arm64/kernel/head.S
arch/arm64/kvm/hyp/include/nvhe/fixed_config.h
arch/arm64/kvm/hyp/nvhe/pkvm.c
arch/arm64/kvm/hyp/pgtable.c
arch/arm64/kvm/reset.c
arch/arm64/mm/context.c
arch/arm64/mm/init.c
drivers/firmware/efi/libstub/arm64-stub.c
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c

index 5846145..a6e7061 100644 (file)
@@ -384,8 +384,8 @@ alternative_cb_end
        .macro  tcr_compute_pa_size, tcr, pos, tmp0, tmp1
        mrs     \tmp0, ID_AA64MMFR0_EL1
        // Narrow PARange to fit the PS field in TCR_ELx
-       ubfx    \tmp0, \tmp0, #ID_AA64MMFR0_PARANGE_SHIFT, #3
-       mov     \tmp1, #ID_AA64MMFR0_PARANGE_MAX
+       ubfx    \tmp0, \tmp0, #ID_AA64MMFR0_EL1_PARANGE_SHIFT, #3
+       mov     \tmp1, #ID_AA64MMFR0_EL1_PARANGE_MAX
        cmp     \tmp0, \tmp1
        csel    \tmp0, \tmp1, \tmp0, hi
        bfi     \tcr, \tmp0, \pos, #3
index fd7d75a..96ccf82 100644 (file)
@@ -597,8 +597,8 @@ static inline s64 arm64_ftr_value(const struct arm64_ftr_bits *ftrp, u64 val)
 
 static inline bool id_aa64mmfr0_mixed_endian_el0(u64 mmfr0)
 {
-       return cpuid_feature_extract_unsigned_field(mmfr0, ID_AA64MMFR0_BIGENDEL_SHIFT) == 0x1 ||
-               cpuid_feature_extract_unsigned_field(mmfr0, ID_AA64MMFR0_BIGENDEL0_SHIFT) == 0x1;
+       return cpuid_feature_extract_unsigned_field(mmfr0, ID_AA64MMFR0_EL1_BIGENDEL_SHIFT) == 0x1 ||
+               cpuid_feature_extract_unsigned_field(mmfr0, ID_AA64MMFR0_EL1_BIGENDEL0_SHIFT) == 0x1;
 }
 
 static inline bool id_aa64pfr0_32bit_el1(u64 pfr0)
@@ -694,10 +694,10 @@ static inline bool system_supports_4kb_granule(void)
 
        mmfr0 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1);
        val = cpuid_feature_extract_unsigned_field(mmfr0,
-                                               ID_AA64MMFR0_TGRAN4_SHIFT);
+                                               ID_AA64MMFR0_EL1_TGRAN4_SHIFT);
 
-       return (val >= ID_AA64MMFR0_TGRAN4_SUPPORTED_MIN) &&
-              (val <= ID_AA64MMFR0_TGRAN4_SUPPORTED_MAX);
+       return (val >= ID_AA64MMFR0_EL1_TGRAN4_SUPPORTED_MIN) &&
+              (val <= ID_AA64MMFR0_EL1_TGRAN4_SUPPORTED_MAX);
 }
 
 static inline bool system_supports_64kb_granule(void)
@@ -707,10 +707,10 @@ static inline bool system_supports_64kb_granule(void)
 
        mmfr0 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1);
        val = cpuid_feature_extract_unsigned_field(mmfr0,
-                                               ID_AA64MMFR0_TGRAN64_SHIFT);
+                                               ID_AA64MMFR0_EL1_TGRAN64_SHIFT);
 
-       return (val >= ID_AA64MMFR0_TGRAN64_SUPPORTED_MIN) &&
-              (val <= ID_AA64MMFR0_TGRAN64_SUPPORTED_MAX);
+       return (val >= ID_AA64MMFR0_EL1_TGRAN64_SUPPORTED_MIN) &&
+              (val <= ID_AA64MMFR0_EL1_TGRAN64_SUPPORTED_MAX);
 }
 
 static inline bool system_supports_16kb_granule(void)
@@ -720,10 +720,10 @@ static inline bool system_supports_16kb_granule(void)
 
        mmfr0 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1);
        val = cpuid_feature_extract_unsigned_field(mmfr0,
-                                               ID_AA64MMFR0_TGRAN16_SHIFT);
+                                               ID_AA64MMFR0_EL1_TGRAN16_SHIFT);
 
-       return (val >= ID_AA64MMFR0_TGRAN16_SUPPORTED_MIN) &&
-              (val <= ID_AA64MMFR0_TGRAN16_SUPPORTED_MAX);
+       return (val >= ID_AA64MMFR0_EL1_TGRAN16_SUPPORTED_MIN) &&
+              (val <= ID_AA64MMFR0_EL1_TGRAN16_SUPPORTED_MAX);
 }
 
 static inline bool system_supports_mixed_endian_el0(void)
@@ -738,7 +738,7 @@ static inline bool system_supports_mixed_endian(void)
 
        mmfr0 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1);
        val = cpuid_feature_extract_unsigned_field(mmfr0,
-                                               ID_AA64MMFR0_BIGENDEL_SHIFT);
+                                               ID_AA64MMFR0_EL1_BIGENDEL_SHIFT);
 
        return val == 0x1;
 }
@@ -840,13 +840,13 @@ extern int do_emulate_mrs(struct pt_regs *regs, u32 sys_reg, u32 rt);
 static inline u32 id_aa64mmfr0_parange_to_phys_shift(int parange)
 {
        switch (parange) {
-       case ID_AA64MMFR0_PARANGE_32: return 32;
-       case ID_AA64MMFR0_PARANGE_36: return 36;
-       case ID_AA64MMFR0_PARANGE_40: return 40;
-       case ID_AA64MMFR0_PARANGE_42: return 42;
-       case ID_AA64MMFR0_PARANGE_44: return 44;
-       case ID_AA64MMFR0_PARANGE_48: return 48;
-       case ID_AA64MMFR0_PARANGE_52: return 52;
+       case ID_AA64MMFR0_EL1_PARANGE_32: return 32;
+       case ID_AA64MMFR0_EL1_PARANGE_36: return 36;
+       case ID_AA64MMFR0_EL1_PARANGE_40: return 40;
+       case ID_AA64MMFR0_EL1_PARANGE_42: return 42;
+       case ID_AA64MMFR0_EL1_PARANGE_44: return 44;
+       case ID_AA64MMFR0_EL1_PARANGE_48: return 48;
+       case ID_AA64MMFR0_EL1_PARANGE_52: return 52;
        /*
         * A future PE could use a value unknown to the kernel.
         * However, by the "D10.1.4 Principles of the ID scheme
index 2630faa..faad9e0 100644 (file)
 /* Disable any fine grained traps */
 .macro __init_el2_fgt
        mrs     x1, id_aa64mmfr0_el1
-       ubfx    x1, x1, #ID_AA64MMFR0_FGT_SHIFT, #4
+       ubfx    x1, x1, #ID_AA64MMFR0_EL1_FGT_SHIFT, #4
        cbz     x1, .Lskip_fgt_\@
 
        mov     x0, xzr
index 9f339df..1b098bd 100644 (file)
@@ -16,9 +16,9 @@
 static inline u64 kvm_get_parange(u64 mmfr0)
 {
        u64 parange = cpuid_feature_extract_unsigned_field(mmfr0,
-                               ID_AA64MMFR0_PARANGE_SHIFT);
-       if (parange > ID_AA64MMFR0_PARANGE_MAX)
-               parange = ID_AA64MMFR0_PARANGE_MAX;
+                               ID_AA64MMFR0_EL1_PARANGE_SHIFT);
+       if (parange > ID_AA64MMFR0_EL1_PARANGE_MAX)
+               parange = ID_AA64MMFR0_EL1_PARANGE_MAX;
 
        return parange;
 }
index 62376ef..f9af77a 100644 (file)
 #define ID_AA64PFR1_MTE_ASYMM          0x3
 
 /* id_aa64mmfr0 */
-#define ID_AA64MMFR0_ECV_SHIFT         60
-#define ID_AA64MMFR0_FGT_SHIFT         56
-#define ID_AA64MMFR0_EXS_SHIFT         44
-#define ID_AA64MMFR0_TGRAN4_2_SHIFT    40
-#define ID_AA64MMFR0_TGRAN64_2_SHIFT   36
-#define ID_AA64MMFR0_TGRAN16_2_SHIFT   32
-#define ID_AA64MMFR0_TGRAN4_SHIFT      28
-#define ID_AA64MMFR0_TGRAN64_SHIFT     24
-#define ID_AA64MMFR0_TGRAN16_SHIFT     20
-#define ID_AA64MMFR0_BIGENDEL0_SHIFT   16
-#define ID_AA64MMFR0_SNSMEM_SHIFT      12
-#define ID_AA64MMFR0_BIGENDEL_SHIFT    8
-#define ID_AA64MMFR0_ASID_SHIFT                4
-#define ID_AA64MMFR0_PARANGE_SHIFT     0
-
-#define ID_AA64MMFR0_ASID_8            0x0
-#define ID_AA64MMFR0_ASID_16           0x2
-
-#define ID_AA64MMFR0_TGRAN4_NI                 0xf
-#define ID_AA64MMFR0_TGRAN4_SUPPORTED_MIN      0x0
-#define ID_AA64MMFR0_TGRAN4_SUPPORTED_MAX      0x7
-#define ID_AA64MMFR0_TGRAN64_NI                        0xf
-#define ID_AA64MMFR0_TGRAN64_SUPPORTED_MIN     0x0
-#define ID_AA64MMFR0_TGRAN64_SUPPORTED_MAX     0x7
-#define ID_AA64MMFR0_TGRAN16_NI                        0x0
-#define ID_AA64MMFR0_TGRAN16_SUPPORTED_MIN     0x1
-#define ID_AA64MMFR0_TGRAN16_SUPPORTED_MAX     0xf
-
-#define ID_AA64MMFR0_PARANGE_32                0x0
-#define ID_AA64MMFR0_PARANGE_36                0x1
-#define ID_AA64MMFR0_PARANGE_40                0x2
-#define ID_AA64MMFR0_PARANGE_42                0x3
-#define ID_AA64MMFR0_PARANGE_44                0x4
-#define ID_AA64MMFR0_PARANGE_48                0x5
-#define ID_AA64MMFR0_PARANGE_52                0x6
+#define ID_AA64MMFR0_EL1_ECV_SHIFT             60
+#define ID_AA64MMFR0_EL1_FGT_SHIFT             56
+#define ID_AA64MMFR0_EL1_EXS_SHIFT             44
+#define ID_AA64MMFR0_EL1_TGRAN4_2_SHIFT                40
+#define ID_AA64MMFR0_EL1_TGRAN64_2_SHIFT       36
+#define ID_AA64MMFR0_EL1_TGRAN16_2_SHIFT       32
+#define ID_AA64MMFR0_EL1_TGRAN4_SHIFT          28
+#define ID_AA64MMFR0_EL1_TGRAN64_SHIFT         24
+#define ID_AA64MMFR0_EL1_TGRAN16_SHIFT         20
+#define ID_AA64MMFR0_EL1_BIGENDEL0_SHIFT       16
+#define ID_AA64MMFR0_EL1_SNSMEM_SHIFT          12
+#define ID_AA64MMFR0_EL1_BIGENDEL_SHIFT                8
+#define ID_AA64MMFR0_EL1_ASID_SHIFT            4
+#define ID_AA64MMFR0_EL1_PARANGE_SHIFT         0
+
+#define ID_AA64MMFR0_EL1_ASID_8                        0x0
+#define ID_AA64MMFR0_EL1_ASID_16               0x2
+
+#define ID_AA64MMFR0_EL1_TGRAN4_NI             0xf
+#define ID_AA64MMFR0_EL1_TGRAN4_SUPPORTED_MIN  0x0
+#define ID_AA64MMFR0_EL1_TGRAN4_SUPPORTED_MAX  0x7
+#define ID_AA64MMFR0_EL1_TGRAN64_NI            0xf
+#define ID_AA64MMFR0_EL1_TGRAN64_SUPPORTED_MIN 0x0
+#define ID_AA64MMFR0_EL1_TGRAN64_SUPPORTED_MAX 0x7
+#define ID_AA64MMFR0_EL1_TGRAN16_NI            0x0
+#define ID_AA64MMFR0_EL1_TGRAN16_SUPPORTED_MIN 0x1
+#define ID_AA64MMFR0_EL1_TGRAN16_SUPPORTED_MAX 0xf
+
+#define ID_AA64MMFR0_EL1_PARANGE_32            0x0
+#define ID_AA64MMFR0_EL1_PARANGE_36            0x1
+#define ID_AA64MMFR0_EL1_PARANGE_40            0x2
+#define ID_AA64MMFR0_EL1_PARANGE_42            0x3
+#define ID_AA64MMFR0_EL1_PARANGE_44            0x4
+#define ID_AA64MMFR0_EL1_PARANGE_48            0x5
+#define ID_AA64MMFR0_EL1_PARANGE_52            0x6
 
 #define ARM64_MIN_PARANGE_BITS         32
 
-#define ID_AA64MMFR0_TGRAN_2_SUPPORTED_DEFAULT 0x0
-#define ID_AA64MMFR0_TGRAN_2_SUPPORTED_NONE    0x1
-#define ID_AA64MMFR0_TGRAN_2_SUPPORTED_MIN     0x2
-#define ID_AA64MMFR0_TGRAN_2_SUPPORTED_MAX     0x7
+#define ID_AA64MMFR0_EL1_TGRAN_2_SUPPORTED_DEFAULT     0x0
+#define ID_AA64MMFR0_EL1_TGRAN_2_SUPPORTED_NONE                0x1
+#define ID_AA64MMFR0_EL1_TGRAN_2_SUPPORTED_MIN         0x2
+#define ID_AA64MMFR0_EL1_TGRAN_2_SUPPORTED_MAX         0x7
 
 #ifdef CONFIG_ARM64_PA_BITS_52
-#define ID_AA64MMFR0_PARANGE_MAX       ID_AA64MMFR0_PARANGE_52
+#define ID_AA64MMFR0_EL1_PARANGE_MAX   ID_AA64MMFR0_EL1_PARANGE_52
 #else
-#define ID_AA64MMFR0_PARANGE_MAX       ID_AA64MMFR0_PARANGE_48
+#define ID_AA64MMFR0_EL1_PARANGE_MAX   ID_AA64MMFR0_EL1_PARANGE_48
 #endif
 
 /* id_aa64mmfr1 */
 #define ID_PFR1_PROGMOD_SHIFT          0
 
 #if defined(CONFIG_ARM64_4K_PAGES)
-#define ID_AA64MMFR0_TGRAN_SHIFT               ID_AA64MMFR0_TGRAN4_SHIFT
-#define ID_AA64MMFR0_TGRAN_SUPPORTED_MIN       ID_AA64MMFR0_TGRAN4_SUPPORTED_MIN
-#define ID_AA64MMFR0_TGRAN_SUPPORTED_MAX       ID_AA64MMFR0_TGRAN4_SUPPORTED_MAX
-#define ID_AA64MMFR0_TGRAN_2_SHIFT             ID_AA64MMFR0_TGRAN4_2_SHIFT
+#define ID_AA64MMFR0_EL1_TGRAN_SHIFT           ID_AA64MMFR0_EL1_TGRAN4_SHIFT
+#define ID_AA64MMFR0_EL1_TGRAN_SUPPORTED_MIN   ID_AA64MMFR0_EL1_TGRAN4_SUPPORTED_MIN
+#define ID_AA64MMFR0_EL1_TGRAN_SUPPORTED_MAX   ID_AA64MMFR0_EL1_TGRAN4_SUPPORTED_MAX
+#define ID_AA64MMFR0_EL1_TGRAN_2_SHIFT         ID_AA64MMFR0_EL1_TGRAN4_2_SHIFT
 #elif defined(CONFIG_ARM64_16K_PAGES)
-#define ID_AA64MMFR0_TGRAN_SHIFT               ID_AA64MMFR0_TGRAN16_SHIFT
-#define ID_AA64MMFR0_TGRAN_SUPPORTED_MIN       ID_AA64MMFR0_TGRAN16_SUPPORTED_MIN
-#define ID_AA64MMFR0_TGRAN_SUPPORTED_MAX       ID_AA64MMFR0_TGRAN16_SUPPORTED_MAX
-#define ID_AA64MMFR0_TGRAN_2_SHIFT             ID_AA64MMFR0_TGRAN16_2_SHIFT
+#define ID_AA64MMFR0_EL1_TGRAN_SHIFT           ID_AA64MMFR0_EL1_TGRAN16_SHIFT
+#define ID_AA64MMFR0_EL1_TGRAN_SUPPORTED_MIN   ID_AA64MMFR0_EL1_TGRAN16_SUPPORTED_MIN
+#define ID_AA64MMFR0_EL1_TGRAN_SUPPORTED_MAX   ID_AA64MMFR0_EL1_TGRAN16_SUPPORTED_MAX
+#define ID_AA64MMFR0_EL1_TGRAN_2_SHIFT         ID_AA64MMFR0_EL1_TGRAN16_2_SHIFT
 #elif defined(CONFIG_ARM64_64K_PAGES)
-#define ID_AA64MMFR0_TGRAN_SHIFT               ID_AA64MMFR0_TGRAN64_SHIFT
-#define ID_AA64MMFR0_TGRAN_SUPPORTED_MIN       ID_AA64MMFR0_TGRAN64_SUPPORTED_MIN
-#define ID_AA64MMFR0_TGRAN_SUPPORTED_MAX       ID_AA64MMFR0_TGRAN64_SUPPORTED_MAX
-#define ID_AA64MMFR0_TGRAN_2_SHIFT             ID_AA64MMFR0_TGRAN64_2_SHIFT
+#define ID_AA64MMFR0_EL1_TGRAN_SHIFT           ID_AA64MMFR0_EL1_TGRAN64_SHIFT
+#define ID_AA64MMFR0_EL1_TGRAN_SUPPORTED_MIN   ID_AA64MMFR0_EL1_TGRAN64_SUPPORTED_MIN
+#define ID_AA64MMFR0_EL1_TGRAN_SUPPORTED_MAX   ID_AA64MMFR0_EL1_TGRAN64_SUPPORTED_MAX
+#define ID_AA64MMFR0_EL1_TGRAN_2_SHIFT         ID_AA64MMFR0_EL1_TGRAN64_2_SHIFT
 #endif
 
 #define MVFR2_FPMISC_SHIFT             4
index af4de81..3f45122 100644 (file)
@@ -316,9 +316,9 @@ static const struct arm64_ftr_bits ftr_id_aa64smfr0[] = {
 };
 
 static const struct arm64_ftr_bits ftr_id_aa64mmfr0[] = {
-       ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_ECV_SHIFT, 4, 0),
-       ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_FGT_SHIFT, 4, 0),
-       ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_EXS_SHIFT, 4, 0),
+       ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_EL1_ECV_SHIFT, 4, 0),
+       ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_EL1_FGT_SHIFT, 4, 0),
+       ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_EL1_EXS_SHIFT, 4, 0),
        /*
         * Page size not being supported at Stage-2 is not fatal. You
         * just give up KVM if PAGE_SIZE isn't supported there. Go fix
@@ -334,9 +334,9 @@ static const struct arm64_ftr_bits ftr_id_aa64mmfr0[] = {
         * fields are inconsistent across vCPUs, then it isn't worth
         * trying to bring KVM up.
         */
-       ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_EXACT, ID_AA64MMFR0_TGRAN4_2_SHIFT, 4, 1),
-       ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_EXACT, ID_AA64MMFR0_TGRAN64_2_SHIFT, 4, 1),
-       ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_EXACT, ID_AA64MMFR0_TGRAN16_2_SHIFT, 4, 1),
+       ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_EXACT, ID_AA64MMFR0_EL1_TGRAN4_2_SHIFT, 4, 1),
+       ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_EXACT, ID_AA64MMFR0_EL1_TGRAN64_2_SHIFT, 4, 1),
+       ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_EXACT, ID_AA64MMFR0_EL1_TGRAN16_2_SHIFT, 4, 1),
        /*
         * We already refuse to boot CPUs that don't support our configured
         * page size, so we can only detect mismatches for a page size other
@@ -344,20 +344,20 @@ static const struct arm64_ftr_bits ftr_id_aa64mmfr0[] = {
         * exist in the wild so, even though we don't like it, we'll have to go
         * along with it and treat them as non-strict.
         */
-       S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_TGRAN4_SHIFT, 4, ID_AA64MMFR0_TGRAN4_NI),
-       S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_TGRAN64_SHIFT, 4, ID_AA64MMFR0_TGRAN64_NI),
-       ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_TGRAN16_SHIFT, 4, ID_AA64MMFR0_TGRAN16_NI),
+       S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_EL1_TGRAN4_SHIFT, 4, ID_AA64MMFR0_EL1_TGRAN4_NI),
+       S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_EL1_TGRAN64_SHIFT, 4, ID_AA64MMFR0_EL1_TGRAN64_NI),
+       ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_EL1_TGRAN16_SHIFT, 4, ID_AA64MMFR0_EL1_TGRAN16_NI),
 
-       ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_BIGENDEL0_SHIFT, 4, 0),
+       ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_EL1_BIGENDEL0_SHIFT, 4, 0),
        /* Linux shouldn't care about secure memory */
-       ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_SNSMEM_SHIFT, 4, 0),
-       ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_BIGENDEL_SHIFT, 4, 0),
-       ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_ASID_SHIFT, 4, 0),
+       ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_EL1_SNSMEM_SHIFT, 4, 0),
+       ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_EL1_BIGENDEL_SHIFT, 4, 0),
+       ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_EL1_ASID_SHIFT, 4, 0),
        /*
         * Differing PARange is fine as long as all peripherals and memory are mapped
         * within the minimum PARange of all CPUs
         */
-       ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_PARANGE_SHIFT, 4, 0),
+       ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_EL1_PARANGE_SHIFT, 4, 0),
        ARM64_FTR_END,
 };
 
@@ -2104,7 +2104,7 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
                .type = ARM64_CPUCAP_SYSTEM_FEATURE,
                .matches = has_cpuid_feature,
                .sys_reg = SYS_ID_AA64MMFR0_EL1,
-               .field_pos = ID_AA64MMFR0_ECV_SHIFT,
+               .field_pos = ID_AA64MMFR0_EL1_ECV_SHIFT,
                .field_width = 4,
                .sign = FTR_UNSIGNED,
                .min_field_value = 1,
@@ -2751,7 +2751,7 @@ static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = {
        HWCAP_CAP(SYS_ID_AA64PFR1_EL1, ID_AA64PFR1_MTE_SHIFT, 4, FTR_UNSIGNED, ID_AA64PFR1_MTE, CAP_HWCAP, KERNEL_HWCAP_MTE),
        HWCAP_CAP(SYS_ID_AA64PFR1_EL1, ID_AA64PFR1_MTE_SHIFT, 4, FTR_UNSIGNED, ID_AA64PFR1_MTE_ASYMM, CAP_HWCAP, KERNEL_HWCAP_MTE3),
 #endif /* CONFIG_ARM64_MTE */
-       HWCAP_CAP(SYS_ID_AA64MMFR0_EL1, ID_AA64MMFR0_ECV_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_ECV),
+       HWCAP_CAP(SYS_ID_AA64MMFR0_EL1, ID_AA64MMFR0_EL1_ECV_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_ECV),
        HWCAP_CAP(SYS_ID_AA64MMFR1_EL1, ID_AA64MMFR1_AFP_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_AFP),
        HWCAP_CAP(SYS_ID_AA64ISAR2_EL1, ID_AA64ISAR2_EL1_RPRES_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_RPRES),
        HWCAP_CAP(SYS_ID_AA64ISAR2_EL1, ID_AA64ISAR2_EL1_WFxT_SHIFT, 4, FTR_UNSIGNED, ID_AA64ISAR2_EL1_WFxT_IMP, CAP_HWCAP, KERNEL_HWCAP_WFXT),
@@ -3102,7 +3102,7 @@ static void verify_hyp_capabilities(void)
 
        /* Verify IPA range */
        parange = cpuid_feature_extract_unsigned_field(mmfr0,
-                               ID_AA64MMFR0_PARANGE_SHIFT);
+                               ID_AA64MMFR0_EL1_PARANGE_SHIFT);
        ipa_max = id_aa64mmfr0_parange_to_phys_shift(parange);
        if (ipa_max < get_kvm_ipa_limit()) {
                pr_crit("CPU%d: IPA range mismatch\n", smp_processor_id());
index cefe6a7..bffb034 100644 (file)
@@ -656,10 +656,10 @@ SYM_FUNC_END(__secondary_too_slow)
  */
 SYM_FUNC_START(__enable_mmu)
        mrs     x3, ID_AA64MMFR0_EL1
-       ubfx    x3, x3, #ID_AA64MMFR0_TGRAN_SHIFT, 4
-       cmp     x3, #ID_AA64MMFR0_TGRAN_SUPPORTED_MIN
+       ubfx    x3, x3, #ID_AA64MMFR0_EL1_TGRAN_SHIFT, 4
+       cmp     x3, #ID_AA64MMFR0_EL1_TGRAN_SUPPORTED_MIN
        b.lt    __no_granule_support
-       cmp     x3, #ID_AA64MMFR0_TGRAN_SUPPORTED_MAX
+       cmp     x3, #ID_AA64MMFR0_EL1_TGRAN_SUPPORTED_MAX
        b.gt    __no_granule_support
        phys_to_ttbr x2, x2
        msr     ttbr0_el1, x2                   // load TTBR0
index fa6e466..aac538c 100644 (file)
  * - Non-context synchronizing exception entry and exit
  */
 #define PVM_ID_AA64MMFR0_ALLOW (\
-       ARM64_FEATURE_MASK(ID_AA64MMFR0_BIGENDEL) | \
-       ARM64_FEATURE_MASK(ID_AA64MMFR0_SNSMEM) | \
-       ARM64_FEATURE_MASK(ID_AA64MMFR0_BIGENDEL0) | \
-       ARM64_FEATURE_MASK(ID_AA64MMFR0_EXS) \
+       ARM64_FEATURE_MASK(ID_AA64MMFR0_EL1_BIGENDEL) | \
+       ARM64_FEATURE_MASK(ID_AA64MMFR0_EL1_SNSMEM) | \
+       ARM64_FEATURE_MASK(ID_AA64MMFR0_EL1_BIGENDEL0) | \
+       ARM64_FEATURE_MASK(ID_AA64MMFR0_EL1_EXS) \
        )
 
 /*
@@ -86,8 +86,8 @@
  * - 16-bit ASID
  */
 #define PVM_ID_AA64MMFR0_RESTRICT_UNSIGNED (\
-       FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64MMFR0_PARANGE), ID_AA64MMFR0_PARANGE_40) | \
-       FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64MMFR0_ASID), ID_AA64MMFR0_ASID_16) \
+       FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64MMFR0_EL1_PARANGE), ID_AA64MMFR0_EL1_PARANGE_40) | \
+       FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64MMFR0_EL1_ASID), ID_AA64MMFR0_EL1_ASID_16) \
        )
 
 /*
index 99c8d8b..823eb4d 100644 (file)
@@ -128,7 +128,7 @@ static void pvm_init_traps_aa64mmfr0(struct kvm_vcpu *vcpu)
        u64 mdcr_set = 0;
 
        /* Trap Debug Communications Channel registers */
-       if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64MMFR0_FGT), feature_ids))
+       if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64MMFR0_EL1_FGT), feature_ids))
                mdcr_set |= MDCR_EL2_TDCC;
 
        vcpu->arch.mdcr_el2 |= mdcr_set;
index 2cb3867..cdf8e76 100644 (file)
@@ -61,7 +61,7 @@ struct kvm_pgtable_walk_data {
 
 static bool kvm_phys_is_valid(u64 phys)
 {
-       return phys < BIT(id_aa64mmfr0_parange_to_phys_shift(ID_AA64MMFR0_PARANGE_MAX));
+       return phys < BIT(id_aa64mmfr0_parange_to_phys_shift(ID_AA64MMFR0_EL1_PARANGE_MAX));
 }
 
 static bool kvm_block_mapping_supported(u64 addr, u64 end, u64 phys, u32 level)
index 0e08fbe..5ae1847 100644 (file)
@@ -359,7 +359,7 @@ int kvm_set_ipa_limit(void)
 
        mmfr0 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1);
        parange = cpuid_feature_extract_unsigned_field(mmfr0,
-                               ID_AA64MMFR0_PARANGE_SHIFT);
+                               ID_AA64MMFR0_EL1_PARANGE_SHIFT);
        /*
         * IPA size beyond 48 bits could not be supported
         * on either 4K or 16K page size. Hence let's cap
@@ -367,20 +367,20 @@ int kvm_set_ipa_limit(void)
         * on the system.
         */
        if (PAGE_SIZE != SZ_64K)
-               parange = min(parange, (unsigned int)ID_AA64MMFR0_PARANGE_48);
+               parange = min(parange, (unsigned int)ID_AA64MMFR0_EL1_PARANGE_48);
 
        /*
         * Check with ARMv8.5-GTG that our PAGE_SIZE is supported at
         * Stage-2. If not, things will stop very quickly.
         */
-       switch (cpuid_feature_extract_unsigned_field(mmfr0, ID_AA64MMFR0_TGRAN_2_SHIFT)) {
-       case ID_AA64MMFR0_TGRAN_2_SUPPORTED_NONE:
+       switch (cpuid_feature_extract_unsigned_field(mmfr0, ID_AA64MMFR0_EL1_TGRAN_2_SHIFT)) {
+       case ID_AA64MMFR0_EL1_TGRAN_2_SUPPORTED_NONE:
                kvm_err("PAGE_SIZE not supported at Stage-2, giving up\n");
                return -EINVAL;
-       case ID_AA64MMFR0_TGRAN_2_SUPPORTED_DEFAULT:
+       case ID_AA64MMFR0_EL1_TGRAN_2_SUPPORTED_DEFAULT:
                kvm_debug("PAGE_SIZE supported at Stage-2 (default)\n");
                break;
-       case ID_AA64MMFR0_TGRAN_2_SUPPORTED_MIN ... ID_AA64MMFR0_TGRAN_2_SUPPORTED_MAX:
+       case ID_AA64MMFR0_EL1_TGRAN_2_SUPPORTED_MIN ... ID_AA64MMFR0_EL1_TGRAN_2_SUPPORTED_MAX:
                kvm_debug("PAGE_SIZE supported at Stage-2 (advertised)\n");
                break;
        default:
index b8b4cf0..8f38a54 100644 (file)
@@ -43,17 +43,17 @@ static u32 get_cpu_asid_bits(void)
 {
        u32 asid;
        int fld = cpuid_feature_extract_unsigned_field(read_cpuid(ID_AA64MMFR0_EL1),
-                                               ID_AA64MMFR0_ASID_SHIFT);
+                                               ID_AA64MMFR0_EL1_ASID_SHIFT);
 
        switch (fld) {
        default:
                pr_warn("CPU%d: Unknown ASID size (%d); assuming 8-bit\n",
                                        smp_processor_id(),  fld);
                fallthrough;
-       case ID_AA64MMFR0_ASID_8:
+       case ID_AA64MMFR0_EL1_ASID_8:
                asid = 8;
                break;
-       case ID_AA64MMFR0_ASID_16:
+       case ID_AA64MMFR0_EL1_ASID_16:
                asid = 16;
        }
 
index b9af30b..4b4651e 100644 (file)
@@ -360,7 +360,7 @@ void __init arm64_memblock_init(void)
                extern u16 memstart_offset_seed;
                u64 mmfr0 = read_cpuid(ID_AA64MMFR0_EL1);
                int parange = cpuid_feature_extract_unsigned_field(
-                                       mmfr0, ID_AA64MMFR0_PARANGE_SHIFT);
+                                       mmfr0, ID_AA64MMFR0_EL1_PARANGE_SHIFT);
                s64 range = linear_region_size -
                            BIT(id_aa64mmfr0_parange_to_phys_shift(parange));
 
index 577173e..60973e8 100644 (file)
@@ -23,8 +23,8 @@ efi_status_t check_platform_features(void)
        if (IS_ENABLED(CONFIG_ARM64_4K_PAGES))
                return EFI_SUCCESS;
 
-       tg = (read_cpuid(ID_AA64MMFR0_EL1) >> ID_AA64MMFR0_TGRAN_SHIFT) & 0xf;
-       if (tg < ID_AA64MMFR0_TGRAN_SUPPORTED_MIN || tg > ID_AA64MMFR0_TGRAN_SUPPORTED_MAX) {
+       tg = (read_cpuid(ID_AA64MMFR0_EL1) >> ID_AA64MMFR0_EL1_TGRAN_SHIFT) & 0xf;
+       if (tg < ID_AA64MMFR0_EL1_TGRAN_SUPPORTED_MIN || tg > ID_AA64MMFR0_EL1_TGRAN_SUPPORTED_MAX) {
                if (IS_ENABLED(CONFIG_ARM64_64K_PAGES))
                        efi_err("This 64 KB granular kernel is not supported by your CPU\n");
                else
index 1ef7bbb..da67a75 100644 (file)
@@ -150,7 +150,7 @@ static struct arm_smmu_ctx_desc *arm_smmu_alloc_shared_cd(struct mm_struct *mm)
        }
 
        reg = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1);
-       par = cpuid_feature_extract_unsigned_field(reg, ID_AA64MMFR0_PARANGE_SHIFT);
+       par = cpuid_feature_extract_unsigned_field(reg, ID_AA64MMFR0_EL1_PARANGE_SHIFT);
        tcr |= FIELD_PREP(CTXDESC_CD_0_TCR_IPS, par);
 
        cd->ttbr = virt_to_phys(mm->pgd);
@@ -425,13 +425,13 @@ bool arm_smmu_sva_supported(struct arm_smmu_device *smmu)
         * addresses larger than what we support.
         */
        reg = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1);
-       fld = cpuid_feature_extract_unsigned_field(reg, ID_AA64MMFR0_PARANGE_SHIFT);
+       fld = cpuid_feature_extract_unsigned_field(reg, ID_AA64MMFR0_EL1_PARANGE_SHIFT);
        oas = id_aa64mmfr0_parange_to_phys_shift(fld);
        if (smmu->oas < oas)
                return false;
 
        /* We can support bigger ASIDs than the CPU, but not smaller */
-       fld = cpuid_feature_extract_unsigned_field(reg, ID_AA64MMFR0_ASID_SHIFT);
+       fld = cpuid_feature_extract_unsigned_field(reg, ID_AA64MMFR0_EL1_ASID_SHIFT);
        asid_bits = fld ? 16 : 8;
        if (smmu->asid_bits < asid_bits)
                return false;