1 /* aarch64-opc.c -- AArch64 opcode support.
2 Copyright (C) 2009-2016 Free Software Foundation, Inc.
3 Contributed by ARM Ltd.
5 This file is part of the GNU opcodes library.
7 This library is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
12 It is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program; see the file COPYING3. If not,
19 see <http://www.gnu.org/licenses/>. */
30 #include "libiberty.h"
32 #include "aarch64-opc.h"
35 int debug_dump = FALSE;
36 #endif /* DEBUG_AARCH64 */
38 /* The enumeration strings associated with each value of a 5-bit SVE
39 pattern operand. A null entry indicates a reserved meaning. */
40 const char *const aarch64_sve_pattern_array[32] = {
79 /* The enumeration strings associated with each value of a 4-bit SVE
80 prefetch operand. A null entry indicates a reserved meaning. */
81 const char *const aarch64_sve_prfop_array[16] = {
102 /* Helper functions to determine which operand to be used to encode/decode
103 the size:Q fields for AdvSIMD instructions. */
105 static inline bfd_boolean
106 vector_qualifier_p (enum aarch64_opnd_qualifier qualifier)
108 return ((qualifier >= AARCH64_OPND_QLF_V_8B
109 && qualifier <= AARCH64_OPND_QLF_V_1Q) ? TRUE
113 static inline bfd_boolean
114 fp_qualifier_p (enum aarch64_opnd_qualifier qualifier)
116 return ((qualifier >= AARCH64_OPND_QLF_S_B
117 && qualifier <= AARCH64_OPND_QLF_S_Q) ? TRUE
127 DP_VECTOR_ACROSS_LANES,
130 static const char significant_operand_index [] =
132 0, /* DP_UNKNOWN, by default using operand 0. */
133 0, /* DP_VECTOR_3SAME */
134 1, /* DP_VECTOR_LONG */
135 2, /* DP_VECTOR_WIDE */
136 1, /* DP_VECTOR_ACROSS_LANES */
139 /* Given a sequence of qualifiers in QUALIFIERS, determine and return
141 N.B. QUALIFIERS is a possible sequence of qualifiers each of which
142 corresponds to one of a sequence of operands. */
144 static enum data_pattern
145 get_data_pattern (const aarch64_opnd_qualifier_seq_t qualifiers)
147 if (vector_qualifier_p (qualifiers[0]) == TRUE)
149 /* e.g. v.4s, v.4s, v.4s
150 or v.4h, v.4h, v.h[3]. */
151 if (qualifiers[0] == qualifiers[1]
152 && vector_qualifier_p (qualifiers[2]) == TRUE
153 && (aarch64_get_qualifier_esize (qualifiers[0])
154 == aarch64_get_qualifier_esize (qualifiers[1]))
155 && (aarch64_get_qualifier_esize (qualifiers[0])
156 == aarch64_get_qualifier_esize (qualifiers[2])))
157 return DP_VECTOR_3SAME;
158 /* e.g. v.8h, v.8b, v.8b.
159 or v.4s, v.4h, v.h[2].
161 if (vector_qualifier_p (qualifiers[1]) == TRUE
162 && aarch64_get_qualifier_esize (qualifiers[0]) != 0
163 && (aarch64_get_qualifier_esize (qualifiers[0])
164 == aarch64_get_qualifier_esize (qualifiers[1]) << 1))
165 return DP_VECTOR_LONG;
166 /* e.g. v.8h, v.8h, v.8b. */
167 if (qualifiers[0] == qualifiers[1]
168 && vector_qualifier_p (qualifiers[2]) == TRUE
169 && aarch64_get_qualifier_esize (qualifiers[0]) != 0
170 && (aarch64_get_qualifier_esize (qualifiers[0])
171 == aarch64_get_qualifier_esize (qualifiers[2]) << 1)
172 && (aarch64_get_qualifier_esize (qualifiers[0])
173 == aarch64_get_qualifier_esize (qualifiers[1])))
174 return DP_VECTOR_WIDE;
176 else if (fp_qualifier_p (qualifiers[0]) == TRUE)
178 /* e.g. SADDLV <V><d>, <Vn>.<T>. */
179 if (vector_qualifier_p (qualifiers[1]) == TRUE
180 && qualifiers[2] == AARCH64_OPND_QLF_NIL)
181 return DP_VECTOR_ACROSS_LANES;
187 /* Select the operand to do the encoding/decoding of the 'size:Q' fields in
188 the AdvSIMD instructions. */
189 /* N.B. it is possible to do some optimization that doesn't call
190 get_data_pattern each time when we need to select an operand. We can
191 either buffer the caculated the result or statically generate the data,
192 however, it is not obvious that the optimization will bring significant
196 aarch64_select_operand_for_sizeq_field_coding (const aarch64_opcode *opcode)
199 significant_operand_index [get_data_pattern (opcode->qualifiers_list[0])];
202 const aarch64_field fields[] =
205 { 0, 4 }, /* cond2: condition in truly conditional-executed inst. */
206 { 0, 4 }, /* nzcv: flag bit specifier, encoded in the "nzcv" field. */
207 { 5, 5 }, /* defgh: d:e:f:g:h bits in AdvSIMD modified immediate. */
208 { 16, 3 }, /* abc: a:b:c bits in AdvSIMD modified immediate. */
209 { 5, 19 }, /* imm19: e.g. in CBZ. */
210 { 5, 19 }, /* immhi: e.g. in ADRP. */
211 { 29, 2 }, /* immlo: e.g. in ADRP. */
212 { 22, 2 }, /* size: in most AdvSIMD and floating-point instructions. */
213 { 10, 2 }, /* vldst_size: size field in the AdvSIMD load/store inst. */
214 { 29, 1 }, /* op: in AdvSIMD modified immediate instructions. */
215 { 30, 1 }, /* Q: in most AdvSIMD instructions. */
216 { 0, 5 }, /* Rt: in load/store instructions. */
217 { 0, 5 }, /* Rd: in many integer instructions. */
218 { 5, 5 }, /* Rn: in many integer instructions. */
219 { 10, 5 }, /* Rt2: in load/store pair instructions. */
220 { 10, 5 }, /* Ra: in fp instructions. */
221 { 5, 3 }, /* op2: in the system instructions. */
222 { 8, 4 }, /* CRm: in the system instructions. */
223 { 12, 4 }, /* CRn: in the system instructions. */
224 { 16, 3 }, /* op1: in the system instructions. */
225 { 19, 2 }, /* op0: in the system instructions. */
226 { 10, 3 }, /* imm3: in add/sub extended reg instructions. */
227 { 12, 4 }, /* cond: condition flags as a source operand. */
228 { 12, 4 }, /* opcode: in advsimd load/store instructions. */
229 { 12, 4 }, /* cmode: in advsimd modified immediate instructions. */
230 { 13, 3 }, /* asisdlso_opcode: opcode in advsimd ld/st single element. */
231 { 13, 2 }, /* len: in advsimd tbl/tbx instructions. */
232 { 16, 5 }, /* Rm: in ld/st reg offset and some integer inst. */
233 { 16, 5 }, /* Rs: in load/store exclusive instructions. */
234 { 13, 3 }, /* option: in ld/st reg offset + add/sub extended reg inst. */
235 { 12, 1 }, /* S: in load/store reg offset instructions. */
236 { 21, 2 }, /* hw: in move wide constant instructions. */
237 { 22, 2 }, /* opc: in load/store reg offset instructions. */
238 { 23, 1 }, /* opc1: in load/store reg offset instructions. */
239 { 22, 2 }, /* shift: in add/sub reg/imm shifted instructions. */
240 { 22, 2 }, /* type: floating point type field in fp data inst. */
241 { 30, 2 }, /* ldst_size: size field in ld/st reg offset inst. */
242 { 10, 6 }, /* imm6: in add/sub reg shifted instructions. */
243 { 11, 4 }, /* imm4: in advsimd ext and advsimd ins instructions. */
244 { 16, 5 }, /* imm5: in conditional compare (immediate) instructions. */
245 { 15, 7 }, /* imm7: in load/store pair pre/post index instructions. */
246 { 13, 8 }, /* imm8: in floating-point scalar move immediate inst. */
247 { 12, 9 }, /* imm9: in load/store pre/post index instructions. */
248 { 10, 12 }, /* imm12: in ld/st unsigned imm or add/sub shifted inst. */
249 { 5, 14 }, /* imm14: in test bit and branch instructions. */
250 { 5, 16 }, /* imm16: in exception instructions. */
251 { 0, 26 }, /* imm26: in unconditional branch instructions. */
252 { 10, 6 }, /* imms: in bitfield and logical immediate instructions. */
253 { 16, 6 }, /* immr: in bitfield and logical immediate instructions. */
254 { 16, 3 }, /* immb: in advsimd shift by immediate instructions. */
255 { 19, 4 }, /* immh: in advsimd shift by immediate instructions. */
256 { 22, 1 }, /* N: in logical (immediate) instructions. */
257 { 11, 1 }, /* index: in ld/st inst deciding the pre/post-index. */
258 { 24, 1 }, /* index2: in ld/st pair inst deciding the pre/post-index. */
259 { 31, 1 }, /* sf: in integer data processing instructions. */
260 { 30, 1 }, /* lse_size: in LSE extension atomic instructions. */
261 { 11, 1 }, /* H: in advsimd scalar x indexed element instructions. */
262 { 21, 1 }, /* L: in advsimd scalar x indexed element instructions. */
263 { 20, 1 }, /* M: in advsimd scalar x indexed element instructions. */
264 { 31, 1 }, /* b5: in the test bit and branch instructions. */
265 { 19, 5 }, /* b40: in the test bit and branch instructions. */
266 { 10, 6 }, /* scale: in the fixed-point scalar to fp converting inst. */
267 { 17, 1 }, /* SVE_N: SVE equivalent of N. */
268 { 0, 4 }, /* SVE_Pd: p0-p15, bits [3,0]. */
269 { 10, 3 }, /* SVE_Pg3: p0-p7, bits [12,10]. */
270 { 5, 4 }, /* SVE_Pg4_5: p0-p15, bits [8,5]. */
271 { 10, 4 }, /* SVE_Pg4_10: p0-p15, bits [13,10]. */
272 { 16, 4 }, /* SVE_Pg4_16: p0-p15, bits [19,16]. */
273 { 16, 4 }, /* SVE_Pm: p0-p15, bits [19,16]. */
274 { 5, 4 }, /* SVE_Pn: p0-p15, bits [8,5]. */
275 { 0, 4 }, /* SVE_Pt: p0-p15, bits [3,0]. */
276 { 5, 5 }, /* SVE_Rm: SVE alternative position for Rm. */
277 { 16, 5 }, /* SVE_Rn: SVE alternative position for Rn. */
278 { 0, 5 }, /* SVE_Vd: Scalar SIMD&FP register, bits [4,0]. */
279 { 5, 5 }, /* SVE_Vm: Scalar SIMD&FP register, bits [9,5]. */
280 { 5, 5 }, /* SVE_Vn: Scalar SIMD&FP register, bits [9,5]. */
281 { 5, 5 }, /* SVE_Za_5: SVE vector register, bits [9,5]. */
282 { 16, 5 }, /* SVE_Za_16: SVE vector register, bits [20,16]. */
283 { 0, 5 }, /* SVE_Zd: SVE vector register. bits [4,0]. */
284 { 5, 5 }, /* SVE_Zm_5: SVE vector register, bits [9,5]. */
285 { 16, 5 }, /* SVE_Zm_16: SVE vector register, bits [20,16]. */
286 { 5, 5 }, /* SVE_Zn: SVE vector register, bits [9,5]. */
287 { 0, 5 }, /* SVE_Zt: SVE vector register, bits [4,0]. */
288 { 5, 1 }, /* SVE_i1: single-bit immediate. */
289 { 16, 3 }, /* SVE_imm3: 3-bit immediate field. */
290 { 16, 4 }, /* SVE_imm4: 4-bit immediate field. */
291 { 5, 5 }, /* SVE_imm5: 5-bit immediate field. */
292 { 16, 5 }, /* SVE_imm5b: secondary 5-bit immediate field. */
293 { 16, 6 }, /* SVE_imm6: 6-bit immediate field. */
294 { 14, 7 }, /* SVE_imm7: 7-bit immediate field. */
295 { 5, 8 }, /* SVE_imm8: 8-bit immediate field. */
296 { 5, 9 }, /* SVE_imm9: 9-bit immediate field. */
297 { 11, 6 }, /* SVE_immr: SVE equivalent of immr. */
298 { 5, 6 }, /* SVE_imms: SVE equivalent of imms. */
299 { 10, 2 }, /* SVE_msz: 2-bit shift amount for ADR. */
300 { 5, 5 }, /* SVE_pattern: vector pattern enumeration. */
301 { 0, 4 }, /* SVE_prfop: prefetch operation for SVE PRF[BHWD]. */
302 { 22, 2 }, /* SVE_tszh: triangular size select high, bits [23,22]. */
303 { 14, 1 }, /* SVE_xs_14: UXTW/SXTW select (bit 14). */
304 { 22, 1 } /* SVE_xs_22: UXTW/SXTW select (bit 22). */
307 enum aarch64_operand_class
308 aarch64_get_operand_class (enum aarch64_opnd type)
310 return aarch64_operands[type].op_class;
314 aarch64_get_operand_name (enum aarch64_opnd type)
316 return aarch64_operands[type].name;
319 /* Get operand description string.
320 This is usually for the diagnosis purpose. */
322 aarch64_get_operand_desc (enum aarch64_opnd type)
324 return aarch64_operands[type].desc;
327 /* Table of all conditional affixes. */
328 const aarch64_cond aarch64_conds[16] =
333 {{"cc", "lo", "ul"}, 0x3},
349 get_cond_from_value (aarch64_insn value)
352 return &aarch64_conds[(unsigned int) value];
356 get_inverted_cond (const aarch64_cond *cond)
358 return &aarch64_conds[cond->value ^ 0x1];
361 /* Table describing the operand extension/shifting operators; indexed by
362 enum aarch64_modifier_kind.
364 The value column provides the most common values for encoding modifiers,
365 which enables table-driven encoding/decoding for the modifiers. */
366 const struct aarch64_name_value_pair aarch64_operand_modifiers [] =
387 enum aarch64_modifier_kind
388 aarch64_get_operand_modifier (const struct aarch64_name_value_pair *desc)
390 return desc - aarch64_operand_modifiers;
394 aarch64_get_operand_modifier_value (enum aarch64_modifier_kind kind)
396 return aarch64_operand_modifiers[kind].value;
399 enum aarch64_modifier_kind
400 aarch64_get_operand_modifier_from_value (aarch64_insn value,
401 bfd_boolean extend_p)
403 if (extend_p == TRUE)
404 return AARCH64_MOD_UXTB + value;
406 return AARCH64_MOD_LSL - value;
410 aarch64_extend_operator_p (enum aarch64_modifier_kind kind)
412 return (kind > AARCH64_MOD_LSL && kind <= AARCH64_MOD_SXTX)
416 static inline bfd_boolean
417 aarch64_shift_operator_p (enum aarch64_modifier_kind kind)
419 return (kind >= AARCH64_MOD_ROR && kind <= AARCH64_MOD_LSL)
423 const struct aarch64_name_value_pair aarch64_barrier_options[16] =
443 /* Table describing the operands supported by the aliases of the HINT
446 The name column is the operand that is accepted for the alias. The value
447 column is the hint number of the alias. The list of operands is terminated
448 by NULL in the name column. */
450 const struct aarch64_name_value_pair aarch64_hint_options[] =
452 { "csync", 0x11 }, /* PSB CSYNC. */
456 /* op -> op: load = 0 instruction = 1 store = 2
458 t -> temporal: temporal (retained) = 0 non-temporal (streaming) = 1 */
459 #define B(op,l,t) (((op) << 3) | (((l) - 1) << 1) | (t))
460 const struct aarch64_name_value_pair aarch64_prfops[32] =
462 { "pldl1keep", B(0, 1, 0) },
463 { "pldl1strm", B(0, 1, 1) },
464 { "pldl2keep", B(0, 2, 0) },
465 { "pldl2strm", B(0, 2, 1) },
466 { "pldl3keep", B(0, 3, 0) },
467 { "pldl3strm", B(0, 3, 1) },
470 { "plil1keep", B(1, 1, 0) },
471 { "plil1strm", B(1, 1, 1) },
472 { "plil2keep", B(1, 2, 0) },
473 { "plil2strm", B(1, 2, 1) },
474 { "plil3keep", B(1, 3, 0) },
475 { "plil3strm", B(1, 3, 1) },
478 { "pstl1keep", B(2, 1, 0) },
479 { "pstl1strm", B(2, 1, 1) },
480 { "pstl2keep", B(2, 2, 0) },
481 { "pstl2strm", B(2, 2, 1) },
482 { "pstl3keep", B(2, 3, 0) },
483 { "pstl3strm", B(2, 3, 1) },
497 /* Utilities on value constraint. */
500 value_in_range_p (int64_t value, int low, int high)
502 return (value >= low && value <= high) ? 1 : 0;
505 /* Return true if VALUE is a multiple of ALIGN. */
507 value_aligned_p (int64_t value, int align)
509 return (value % align) == 0;
512 /* A signed value fits in a field. */
514 value_fit_signed_field_p (int64_t value, unsigned width)
517 if (width < sizeof (value) * 8)
519 int64_t lim = (int64_t)1 << (width - 1);
520 if (value >= -lim && value < lim)
526 /* An unsigned value fits in a field. */
528 value_fit_unsigned_field_p (int64_t value, unsigned width)
531 if (width < sizeof (value) * 8)
533 int64_t lim = (int64_t)1 << width;
534 if (value >= 0 && value < lim)
540 /* Return 1 if OPERAND is SP or WSP. */
542 aarch64_stack_pointer_p (const aarch64_opnd_info *operand)
544 return ((aarch64_get_operand_class (operand->type)
545 == AARCH64_OPND_CLASS_INT_REG)
546 && operand_maybe_stack_pointer (aarch64_operands + operand->type)
547 && operand->reg.regno == 31);
550 /* Return 1 if OPERAND is XZR or WZP. */
552 aarch64_zero_register_p (const aarch64_opnd_info *operand)
554 return ((aarch64_get_operand_class (operand->type)
555 == AARCH64_OPND_CLASS_INT_REG)
556 && !operand_maybe_stack_pointer (aarch64_operands + operand->type)
557 && operand->reg.regno == 31);
560 /* Return true if the operand *OPERAND that has the operand code
561 OPERAND->TYPE and been qualified by OPERAND->QUALIFIER can be also
562 qualified by the qualifier TARGET. */
565 operand_also_qualified_p (const struct aarch64_opnd_info *operand,
566 aarch64_opnd_qualifier_t target)
568 switch (operand->qualifier)
570 case AARCH64_OPND_QLF_W:
571 if (target == AARCH64_OPND_QLF_WSP && aarch64_stack_pointer_p (operand))
574 case AARCH64_OPND_QLF_X:
575 if (target == AARCH64_OPND_QLF_SP && aarch64_stack_pointer_p (operand))
578 case AARCH64_OPND_QLF_WSP:
579 if (target == AARCH64_OPND_QLF_W
580 && operand_maybe_stack_pointer (aarch64_operands + operand->type))
583 case AARCH64_OPND_QLF_SP:
584 if (target == AARCH64_OPND_QLF_X
585 && operand_maybe_stack_pointer (aarch64_operands + operand->type))
595 /* Given qualifier sequence list QSEQ_LIST and the known qualifier KNOWN_QLF
596 for operand KNOWN_IDX, return the expected qualifier for operand IDX.
598 Return NIL if more than one expected qualifiers are found. */
600 aarch64_opnd_qualifier_t
601 aarch64_get_expected_qualifier (const aarch64_opnd_qualifier_seq_t *qseq_list,
603 const aarch64_opnd_qualifier_t known_qlf,
610 When the known qualifier is NIL, we have to assume that there is only
611 one qualifier sequence in the *QSEQ_LIST and return the corresponding
612 qualifier directly. One scenario is that for instruction
613 PRFM <prfop>, [<Xn|SP>, #:lo12:<symbol>]
614 which has only one possible valid qualifier sequence
616 the caller may pass NIL in KNOWN_QLF to obtain S_D so that it can
617 determine the correct relocation type (i.e. LDST64_LO12) for PRFM.
619 Because the qualifier NIL has dual roles in the qualifier sequence:
620 it can mean no qualifier for the operand, or the qualifer sequence is
621 not in use (when all qualifiers in the sequence are NILs), we have to
622 handle this special case here. */
623 if (known_qlf == AARCH64_OPND_NIL)
625 assert (qseq_list[0][known_idx] == AARCH64_OPND_NIL);
626 return qseq_list[0][idx];
629 for (i = 0, saved_i = -1; i < AARCH64_MAX_QLF_SEQ_NUM; ++i)
631 if (qseq_list[i][known_idx] == known_qlf)
634 /* More than one sequences are found to have KNOWN_QLF at
636 return AARCH64_OPND_NIL;
641 return qseq_list[saved_i][idx];
644 enum operand_qualifier_kind
652 /* Operand qualifier description. */
653 struct operand_qualifier_data
655 /* The usage of the three data fields depends on the qualifier kind. */
662 enum operand_qualifier_kind kind;
665 /* Indexed by the operand qualifier enumerators. */
666 struct operand_qualifier_data aarch64_opnd_qualifiers[] =
668 {0, 0, 0, "NIL", OQK_NIL},
670 /* Operand variant qualifiers.
672 element size, number of elements and common value for encoding. */
674 {4, 1, 0x0, "w", OQK_OPD_VARIANT},
675 {8, 1, 0x1, "x", OQK_OPD_VARIANT},
676 {4, 1, 0x0, "wsp", OQK_OPD_VARIANT},
677 {8, 1, 0x1, "sp", OQK_OPD_VARIANT},
679 {1, 1, 0x0, "b", OQK_OPD_VARIANT},
680 {2, 1, 0x1, "h", OQK_OPD_VARIANT},
681 {4, 1, 0x2, "s", OQK_OPD_VARIANT},
682 {8, 1, 0x3, "d", OQK_OPD_VARIANT},
683 {16, 1, 0x4, "q", OQK_OPD_VARIANT},
685 {1, 8, 0x0, "8b", OQK_OPD_VARIANT},
686 {1, 16, 0x1, "16b", OQK_OPD_VARIANT},
687 {2, 2, 0x0, "2h", OQK_OPD_VARIANT},
688 {2, 4, 0x2, "4h", OQK_OPD_VARIANT},
689 {2, 8, 0x3, "8h", OQK_OPD_VARIANT},
690 {4, 2, 0x4, "2s", OQK_OPD_VARIANT},
691 {4, 4, 0x5, "4s", OQK_OPD_VARIANT},
692 {8, 1, 0x6, "1d", OQK_OPD_VARIANT},
693 {8, 2, 0x7, "2d", OQK_OPD_VARIANT},
694 {16, 1, 0x8, "1q", OQK_OPD_VARIANT},
696 {0, 0, 0, "z", OQK_OPD_VARIANT},
697 {0, 0, 0, "m", OQK_OPD_VARIANT},
699 /* Qualifiers constraining the value range.
701 Lower bound, higher bound, unused. */
703 {0, 7, 0, "imm_0_7" , OQK_VALUE_IN_RANGE},
704 {0, 15, 0, "imm_0_15", OQK_VALUE_IN_RANGE},
705 {0, 31, 0, "imm_0_31", OQK_VALUE_IN_RANGE},
706 {0, 63, 0, "imm_0_63", OQK_VALUE_IN_RANGE},
707 {1, 32, 0, "imm_1_32", OQK_VALUE_IN_RANGE},
708 {1, 64, 0, "imm_1_64", OQK_VALUE_IN_RANGE},
710 /* Qualifiers for miscellaneous purpose.
712 unused, unused and unused. */
717 {0, 0, 0, "retrieving", 0},
720 static inline bfd_boolean
721 operand_variant_qualifier_p (aarch64_opnd_qualifier_t qualifier)
723 return (aarch64_opnd_qualifiers[qualifier].kind == OQK_OPD_VARIANT)
727 static inline bfd_boolean
728 qualifier_value_in_range_constraint_p (aarch64_opnd_qualifier_t qualifier)
730 return (aarch64_opnd_qualifiers[qualifier].kind == OQK_VALUE_IN_RANGE)
735 aarch64_get_qualifier_name (aarch64_opnd_qualifier_t qualifier)
737 return aarch64_opnd_qualifiers[qualifier].desc;
740 /* Given an operand qualifier, return the expected data element size
741 of a qualified operand. */
743 aarch64_get_qualifier_esize (aarch64_opnd_qualifier_t qualifier)
745 assert (operand_variant_qualifier_p (qualifier) == TRUE);
746 return aarch64_opnd_qualifiers[qualifier].data0;
750 aarch64_get_qualifier_nelem (aarch64_opnd_qualifier_t qualifier)
752 assert (operand_variant_qualifier_p (qualifier) == TRUE);
753 return aarch64_opnd_qualifiers[qualifier].data1;
757 aarch64_get_qualifier_standard_value (aarch64_opnd_qualifier_t qualifier)
759 assert (operand_variant_qualifier_p (qualifier) == TRUE);
760 return aarch64_opnd_qualifiers[qualifier].data2;
764 get_lower_bound (aarch64_opnd_qualifier_t qualifier)
766 assert (qualifier_value_in_range_constraint_p (qualifier) == TRUE);
767 return aarch64_opnd_qualifiers[qualifier].data0;
771 get_upper_bound (aarch64_opnd_qualifier_t qualifier)
773 assert (qualifier_value_in_range_constraint_p (qualifier) == TRUE);
774 return aarch64_opnd_qualifiers[qualifier].data1;
779 aarch64_verbose (const char *str, ...)
790 dump_qualifier_sequence (const aarch64_opnd_qualifier_t *qualifier)
794 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i, ++qualifier)
795 printf ("%s,", aarch64_get_qualifier_name (*qualifier));
800 dump_match_qualifiers (const struct aarch64_opnd_info *opnd,
801 const aarch64_opnd_qualifier_t *qualifier)
804 aarch64_opnd_qualifier_t curr[AARCH64_MAX_OPND_NUM];
806 aarch64_verbose ("dump_match_qualifiers:");
807 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
808 curr[i] = opnd[i].qualifier;
809 dump_qualifier_sequence (curr);
810 aarch64_verbose ("against");
811 dump_qualifier_sequence (qualifier);
813 #endif /* DEBUG_AARCH64 */
815 /* TODO improve this, we can have an extra field at the runtime to
816 store the number of operands rather than calculating it every time. */
819 aarch64_num_of_operands (const aarch64_opcode *opcode)
822 const enum aarch64_opnd *opnds = opcode->operands;
823 while (opnds[i++] != AARCH64_OPND_NIL)
826 assert (i >= 0 && i <= AARCH64_MAX_OPND_NUM);
830 /* Find the best matched qualifier sequence in *QUALIFIERS_LIST for INST.
831 If succeeds, fill the found sequence in *RET, return 1; otherwise return 0.
833 N.B. on the entry, it is very likely that only some operands in *INST
834 have had their qualifiers been established.
836 If STOP_AT is not -1, the function will only try to match
837 the qualifier sequence for operands before and including the operand
838 of index STOP_AT; and on success *RET will only be filled with the first
839 (STOP_AT+1) qualifiers.
841 A couple examples of the matching algorithm:
849 Apart from serving the main encoding routine, this can also be called
850 during or after the operand decoding. */
853 aarch64_find_best_match (const aarch64_inst *inst,
854 const aarch64_opnd_qualifier_seq_t *qualifiers_list,
855 int stop_at, aarch64_opnd_qualifier_t *ret)
859 const aarch64_opnd_qualifier_t *qualifiers;
861 num_opnds = aarch64_num_of_operands (inst->opcode);
864 DEBUG_TRACE ("SUCCEED: no operand");
868 if (stop_at < 0 || stop_at >= num_opnds)
869 stop_at = num_opnds - 1;
871 /* For each pattern. */
872 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
875 qualifiers = *qualifiers_list;
877 /* Start as positive. */
880 DEBUG_TRACE ("%d", i);
883 dump_match_qualifiers (inst->operands, qualifiers);
886 /* Most opcodes has much fewer patterns in the list.
887 First NIL qualifier indicates the end in the list. */
888 if (empty_qualifier_sequence_p (qualifiers) == TRUE)
890 DEBUG_TRACE_IF (i == 0, "SUCCEED: empty qualifier list");
896 for (j = 0; j < num_opnds && j <= stop_at; ++j, ++qualifiers)
898 if (inst->operands[j].qualifier == AARCH64_OPND_QLF_NIL)
900 /* Either the operand does not have qualifier, or the qualifier
901 for the operand needs to be deduced from the qualifier
903 In the latter case, any constraint checking related with
904 the obtained qualifier should be done later in
905 operand_general_constraint_met_p. */
908 else if (*qualifiers != inst->operands[j].qualifier)
910 /* Unless the target qualifier can also qualify the operand
911 (which has already had a non-nil qualifier), non-equal
912 qualifiers are generally un-matched. */
913 if (operand_also_qualified_p (inst->operands + j, *qualifiers))
922 continue; /* Equal qualifiers are certainly matched. */
925 /* Qualifiers established. */
932 /* Fill the result in *RET. */
934 qualifiers = *qualifiers_list;
936 DEBUG_TRACE ("complete qualifiers using list %d", i);
939 dump_qualifier_sequence (qualifiers);
942 for (j = 0; j <= stop_at; ++j, ++qualifiers)
943 ret[j] = *qualifiers;
944 for (; j < AARCH64_MAX_OPND_NUM; ++j)
945 ret[j] = AARCH64_OPND_QLF_NIL;
947 DEBUG_TRACE ("SUCCESS");
951 DEBUG_TRACE ("FAIL");
955 /* Operand qualifier matching and resolving.
957 Return 1 if the operand qualifier(s) in *INST match one of the qualifier
958 sequences in INST->OPCODE->qualifiers_list; otherwise return 0.
960 if UPDATE_P == TRUE, update the qualifier(s) in *INST after the matching
964 match_operands_qualifier (aarch64_inst *inst, bfd_boolean update_p)
967 aarch64_opnd_qualifier_seq_t qualifiers;
969 if (!aarch64_find_best_match (inst, inst->opcode->qualifiers_list, -1,
972 DEBUG_TRACE ("matching FAIL");
976 if (inst->opcode->flags & F_STRICT)
978 /* Require an exact qualifier match, even for NIL qualifiers. */
979 nops = aarch64_num_of_operands (inst->opcode);
980 for (i = 0; i < nops; ++i)
981 if (inst->operands[i].qualifier != qualifiers[i])
985 /* Update the qualifiers. */
986 if (update_p == TRUE)
987 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
989 if (inst->opcode->operands[i] == AARCH64_OPND_NIL)
991 DEBUG_TRACE_IF (inst->operands[i].qualifier != qualifiers[i],
992 "update %s with %s for operand %d",
993 aarch64_get_qualifier_name (inst->operands[i].qualifier),
994 aarch64_get_qualifier_name (qualifiers[i]), i);
995 inst->operands[i].qualifier = qualifiers[i];
998 DEBUG_TRACE ("matching SUCCESS");
1002 /* Return TRUE if VALUE is a wide constant that can be moved into a general
1005 IS32 indicates whether value is a 32-bit immediate or not.
1006 If SHIFT_AMOUNT is not NULL, on the return of TRUE, the logical left shift
1007 amount will be returned in *SHIFT_AMOUNT. */
1010 aarch64_wide_constant_p (int64_t value, int is32, unsigned int *shift_amount)
1014 DEBUG_TRACE ("enter with 0x%" PRIx64 "(%" PRIi64 ")", value, value);
1018 /* Allow all zeros or all ones in top 32-bits, so that
1019 32-bit constant expressions like ~0x80000000 are
1021 uint64_t ext = value;
1022 if (ext >> 32 != 0 && ext >> 32 != (uint64_t) 0xffffffff)
1023 /* Immediate out of range. */
1025 value &= (int64_t) 0xffffffff;
1028 /* first, try movz then movn */
1030 if ((value & ((int64_t) 0xffff << 0)) == value)
1032 else if ((value & ((int64_t) 0xffff << 16)) == value)
1034 else if (!is32 && (value & ((int64_t) 0xffff << 32)) == value)
1036 else if (!is32 && (value & ((int64_t) 0xffff << 48)) == value)
1041 DEBUG_TRACE ("exit FALSE with 0x%" PRIx64 "(%" PRIi64 ")", value, value);
1045 if (shift_amount != NULL)
1046 *shift_amount = amount;
1048 DEBUG_TRACE ("exit TRUE with amount %d", amount);
1053 /* Build the accepted values for immediate logical SIMD instructions.
1055 The standard encodings of the immediate value are:
1056 N imms immr SIMD size R S
1057 1 ssssss rrrrrr 64 UInt(rrrrrr) UInt(ssssss)
1058 0 0sssss 0rrrrr 32 UInt(rrrrr) UInt(sssss)
1059 0 10ssss 00rrrr 16 UInt(rrrr) UInt(ssss)
1060 0 110sss 000rrr 8 UInt(rrr) UInt(sss)
1061 0 1110ss 0000rr 4 UInt(rr) UInt(ss)
1062 0 11110s 00000r 2 UInt(r) UInt(s)
1063 where all-ones value of S is reserved.
1065 Let's call E the SIMD size.
1067 The immediate value is: S+1 bits '1' rotated to the right by R.
1069 The total of valid encodings is 64*63 + 32*31 + ... + 2*1 = 5334
1070 (remember S != E - 1). */
1072 #define TOTAL_IMM_NB 5334
1077 aarch64_insn encoding;
1078 } simd_imm_encoding;
1080 static simd_imm_encoding simd_immediates[TOTAL_IMM_NB];
1083 simd_imm_encoding_cmp(const void *i1, const void *i2)
1085 const simd_imm_encoding *imm1 = (const simd_imm_encoding *)i1;
1086 const simd_imm_encoding *imm2 = (const simd_imm_encoding *)i2;
1088 if (imm1->imm < imm2->imm)
1090 if (imm1->imm > imm2->imm)
1095 /* immediate bitfield standard encoding
1096 imm13<12> imm13<5:0> imm13<11:6> SIMD size R S
1097 1 ssssss rrrrrr 64 rrrrrr ssssss
1098 0 0sssss 0rrrrr 32 rrrrr sssss
1099 0 10ssss 00rrrr 16 rrrr ssss
1100 0 110sss 000rrr 8 rrr sss
1101 0 1110ss 0000rr 4 rr ss
1102 0 11110s 00000r 2 r s */
1104 encode_immediate_bitfield (int is64, uint32_t s, uint32_t r)
1106 return (is64 << 12) | (r << 6) | s;
1110 build_immediate_table (void)
1112 uint32_t log_e, e, s, r, s_mask;
1118 for (log_e = 1; log_e <= 6; log_e++)
1120 /* Get element size. */
1125 mask = 0xffffffffffffffffull;
1131 mask = (1ull << e) - 1;
1133 1 ((1 << 4) - 1) << 2 = 111100
1134 2 ((1 << 3) - 1) << 3 = 111000
1135 3 ((1 << 2) - 1) << 4 = 110000
1136 4 ((1 << 1) - 1) << 5 = 100000
1137 5 ((1 << 0) - 1) << 6 = 000000 */
1138 s_mask = ((1u << (5 - log_e)) - 1) << (log_e + 1);
1140 for (s = 0; s < e - 1; s++)
1141 for (r = 0; r < e; r++)
1143 /* s+1 consecutive bits to 1 (s < 63) */
1144 imm = (1ull << (s + 1)) - 1;
1145 /* rotate right by r */
1147 imm = (imm >> r) | ((imm << (e - r)) & mask);
1148 /* replicate the constant depending on SIMD size */
1151 case 1: imm = (imm << 2) | imm;
1152 case 2: imm = (imm << 4) | imm;
1153 case 3: imm = (imm << 8) | imm;
1154 case 4: imm = (imm << 16) | imm;
1155 case 5: imm = (imm << 32) | imm;
1159 simd_immediates[nb_imms].imm = imm;
1160 simd_immediates[nb_imms].encoding =
1161 encode_immediate_bitfield(is64, s | s_mask, r);
1165 assert (nb_imms == TOTAL_IMM_NB);
1166 qsort(simd_immediates, nb_imms,
1167 sizeof(simd_immediates[0]), simd_imm_encoding_cmp);
1170 /* Return TRUE if VALUE is a valid logical immediate, i.e. bitmask, that can
1171 be accepted by logical (immediate) instructions
1172 e.g. ORR <Xd|SP>, <Xn>, #<imm>.
1174 ESIZE is the number of bytes in the decoded immediate value.
1175 If ENCODING is not NULL, on the return of TRUE, the standard encoding for
1176 VALUE will be returned in *ENCODING. */
1179 aarch64_logical_immediate_p (uint64_t value, int esize, aarch64_insn *encoding)
1181 simd_imm_encoding imm_enc;
1182 const simd_imm_encoding *imm_encoding;
1183 static bfd_boolean initialized = FALSE;
1187 DEBUG_TRACE ("enter with 0x%" PRIx64 "(%" PRIi64 "), is32: %d", value,
1190 if (initialized == FALSE)
1192 build_immediate_table ();
1196 /* Allow all zeros or all ones in top bits, so that
1197 constant expressions like ~1 are permitted. */
1198 upper = (uint64_t) -1 << (esize * 4) << (esize * 4);
1199 if ((value & ~upper) != value && (value | upper) != value)
1202 /* Replicate to a full 64-bit value. */
1204 for (i = esize * 8; i < 64; i *= 2)
1205 value |= (value << i);
1207 imm_enc.imm = value;
1208 imm_encoding = (const simd_imm_encoding *)
1209 bsearch(&imm_enc, simd_immediates, TOTAL_IMM_NB,
1210 sizeof(simd_immediates[0]), simd_imm_encoding_cmp);
1211 if (imm_encoding == NULL)
1213 DEBUG_TRACE ("exit with FALSE");
1216 if (encoding != NULL)
1217 *encoding = imm_encoding->encoding;
1218 DEBUG_TRACE ("exit with TRUE");
1222 /* If 64-bit immediate IMM is in the format of
1223 "aaaaaaaabbbbbbbbccccccccddddddddeeeeeeeeffffffffgggggggghhhhhhhh",
1224 where a, b, c, d, e, f, g and h are independently 0 or 1, return an integer
1225 of value "abcdefgh". Otherwise return -1. */
1227 aarch64_shrink_expanded_imm8 (uint64_t imm)
1233 for (i = 0; i < 8; i++)
1235 byte = (imm >> (8 * i)) & 0xff;
1238 else if (byte != 0x00)
1244 /* Utility inline functions for operand_general_constraint_met_p. */
1247 set_error (aarch64_operand_error *mismatch_detail,
1248 enum aarch64_operand_error_kind kind, int idx,
1251 if (mismatch_detail == NULL)
1253 mismatch_detail->kind = kind;
1254 mismatch_detail->index = idx;
1255 mismatch_detail->error = error;
1259 set_syntax_error (aarch64_operand_error *mismatch_detail, int idx,
1262 if (mismatch_detail == NULL)
1264 set_error (mismatch_detail, AARCH64_OPDE_SYNTAX_ERROR, idx, error);
1268 set_out_of_range_error (aarch64_operand_error *mismatch_detail,
1269 int idx, int lower_bound, int upper_bound,
1272 if (mismatch_detail == NULL)
1274 set_error (mismatch_detail, AARCH64_OPDE_OUT_OF_RANGE, idx, error);
1275 mismatch_detail->data[0] = lower_bound;
1276 mismatch_detail->data[1] = upper_bound;
1280 set_imm_out_of_range_error (aarch64_operand_error *mismatch_detail,
1281 int idx, int lower_bound, int upper_bound)
1283 if (mismatch_detail == NULL)
1285 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1286 _("immediate value"));
1290 set_offset_out_of_range_error (aarch64_operand_error *mismatch_detail,
1291 int idx, int lower_bound, int upper_bound)
1293 if (mismatch_detail == NULL)
1295 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1296 _("immediate offset"));
1300 set_regno_out_of_range_error (aarch64_operand_error *mismatch_detail,
1301 int idx, int lower_bound, int upper_bound)
1303 if (mismatch_detail == NULL)
1305 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1306 _("register number"));
1310 set_elem_idx_out_of_range_error (aarch64_operand_error *mismatch_detail,
1311 int idx, int lower_bound, int upper_bound)
1313 if (mismatch_detail == NULL)
1315 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1316 _("register element index"));
1320 set_sft_amount_out_of_range_error (aarch64_operand_error *mismatch_detail,
1321 int idx, int lower_bound, int upper_bound)
1323 if (mismatch_detail == NULL)
1325 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1329 /* Report that the MUL modifier in operand IDX should be in the range
1330 [LOWER_BOUND, UPPER_BOUND]. */
1332 set_multiplier_out_of_range_error (aarch64_operand_error *mismatch_detail,
1333 int idx, int lower_bound, int upper_bound)
1335 if (mismatch_detail == NULL)
1337 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1342 set_unaligned_error (aarch64_operand_error *mismatch_detail, int idx,
1345 if (mismatch_detail == NULL)
1347 set_error (mismatch_detail, AARCH64_OPDE_UNALIGNED, idx, NULL);
1348 mismatch_detail->data[0] = alignment;
1352 set_reg_list_error (aarch64_operand_error *mismatch_detail, int idx,
1355 if (mismatch_detail == NULL)
1357 set_error (mismatch_detail, AARCH64_OPDE_REG_LIST, idx, NULL);
1358 mismatch_detail->data[0] = expected_num;
1362 set_other_error (aarch64_operand_error *mismatch_detail, int idx,
1365 if (mismatch_detail == NULL)
1367 set_error (mismatch_detail, AARCH64_OPDE_OTHER_ERROR, idx, error);
1370 /* General constraint checking based on operand code.
1372 Return 1 if OPNDS[IDX] meets the general constraint of operand code TYPE
1373 as the IDXth operand of opcode OPCODE. Otherwise return 0.
1375 This function has to be called after the qualifiers for all operands
1378 Mismatching error message is returned in *MISMATCH_DETAIL upon request,
1379 i.e. when MISMATCH_DETAIL is non-NULL. This avoids the generation
1380 of error message during the disassembling where error message is not
1381 wanted. We avoid the dynamic construction of strings of error messages
1382 here (i.e. in libopcodes), as it is costly and complicated; instead, we
1383 use a combination of error code, static string and some integer data to
1384 represent an error. */
1387 operand_general_constraint_met_p (const aarch64_opnd_info *opnds, int idx,
1388 enum aarch64_opnd type,
1389 const aarch64_opcode *opcode,
1390 aarch64_operand_error *mismatch_detail)
1392 unsigned num, modifiers, shift;
1394 int64_t imm, min_value, max_value;
1395 uint64_t uvalue, mask;
1396 const aarch64_opnd_info *opnd = opnds + idx;
1397 aarch64_opnd_qualifier_t qualifier = opnd->qualifier;
1399 assert (opcode->operands[idx] == opnd->type && opnd->type == type);
1401 switch (aarch64_operands[type].op_class)
1403 case AARCH64_OPND_CLASS_INT_REG:
1404 /* Check pair reg constraints for cas* instructions. */
1405 if (type == AARCH64_OPND_PAIRREG)
1407 assert (idx == 1 || idx == 3);
1408 if (opnds[idx - 1].reg.regno % 2 != 0)
1410 set_syntax_error (mismatch_detail, idx - 1,
1411 _("reg pair must start from even reg"));
1414 if (opnds[idx].reg.regno != opnds[idx - 1].reg.regno + 1)
1416 set_syntax_error (mismatch_detail, idx,
1417 _("reg pair must be contiguous"));
1423 /* <Xt> may be optional in some IC and TLBI instructions. */
1424 if (type == AARCH64_OPND_Rt_SYS)
1426 assert (idx == 1 && (aarch64_get_operand_class (opnds[0].type)
1427 == AARCH64_OPND_CLASS_SYSTEM));
1428 if (opnds[1].present
1429 && !aarch64_sys_ins_reg_has_xt (opnds[0].sysins_op))
1431 set_other_error (mismatch_detail, idx, _("extraneous register"));
1434 if (!opnds[1].present
1435 && aarch64_sys_ins_reg_has_xt (opnds[0].sysins_op))
1437 set_other_error (mismatch_detail, idx, _("missing register"));
1443 case AARCH64_OPND_QLF_WSP:
1444 case AARCH64_OPND_QLF_SP:
1445 if (!aarch64_stack_pointer_p (opnd))
1447 set_other_error (mismatch_detail, idx,
1448 _("stack pointer register expected"));
1457 case AARCH64_OPND_CLASS_SVE_REG:
1460 case AARCH64_OPND_SVE_Zn_INDEX:
1461 size = aarch64_get_qualifier_esize (opnd->qualifier);
1462 if (!value_in_range_p (opnd->reglane.index, 0, 64 / size - 1))
1464 set_elem_idx_out_of_range_error (mismatch_detail, idx,
1470 case AARCH64_OPND_SVE_ZnxN:
1471 case AARCH64_OPND_SVE_ZtxN:
1472 if (opnd->reglist.num_regs != get_opcode_dependent_value (opcode))
1474 set_other_error (mismatch_detail, idx,
1475 _("invalid register list"));
1485 case AARCH64_OPND_CLASS_PRED_REG:
1486 if (opnd->reg.regno >= 8
1487 && get_operand_fields_width (get_operand_from_code (type)) == 3)
1489 set_other_error (mismatch_detail, idx, _("p0-p7 expected"));
1494 case AARCH64_OPND_CLASS_COND:
1495 if (type == AARCH64_OPND_COND1
1496 && (opnds[idx].cond->value & 0xe) == 0xe)
1498 /* Not allow AL or NV. */
1499 set_syntax_error (mismatch_detail, idx, NULL);
1503 case AARCH64_OPND_CLASS_ADDRESS:
1504 /* Check writeback. */
1505 switch (opcode->iclass)
1509 case ldstnapair_offs:
1512 if (opnd->addr.writeback == 1)
1514 set_syntax_error (mismatch_detail, idx,
1515 _("unexpected address writeback"));
1520 case ldstpair_indexed:
1523 if (opnd->addr.writeback == 0)
1525 set_syntax_error (mismatch_detail, idx,
1526 _("address writeback expected"));
1531 assert (opnd->addr.writeback == 0);
1536 case AARCH64_OPND_ADDR_SIMM7:
1537 /* Scaled signed 7 bits immediate offset. */
1538 /* Get the size of the data element that is accessed, which may be
1539 different from that of the source register size,
1540 e.g. in strb/ldrb. */
1541 size = aarch64_get_qualifier_esize (opnd->qualifier);
1542 if (!value_in_range_p (opnd->addr.offset.imm, -64 * size, 63 * size))
1544 set_offset_out_of_range_error (mismatch_detail, idx,
1545 -64 * size, 63 * size);
1548 if (!value_aligned_p (opnd->addr.offset.imm, size))
1550 set_unaligned_error (mismatch_detail, idx, size);
1554 case AARCH64_OPND_ADDR_SIMM9:
1555 /* Unscaled signed 9 bits immediate offset. */
1556 if (!value_in_range_p (opnd->addr.offset.imm, -256, 255))
1558 set_offset_out_of_range_error (mismatch_detail, idx, -256, 255);
1563 case AARCH64_OPND_ADDR_SIMM9_2:
1564 /* Unscaled signed 9 bits immediate offset, which has to be negative
1566 size = aarch64_get_qualifier_esize (qualifier);
1567 if ((value_in_range_p (opnd->addr.offset.imm, 0, 255)
1568 && !value_aligned_p (opnd->addr.offset.imm, size))
1569 || value_in_range_p (opnd->addr.offset.imm, -256, -1))
1571 set_other_error (mismatch_detail, idx,
1572 _("negative or unaligned offset expected"));
1575 case AARCH64_OPND_SIMD_ADDR_POST:
1576 /* AdvSIMD load/store multiple structures, post-index. */
1578 if (opnd->addr.offset.is_reg)
1580 if (value_in_range_p (opnd->addr.offset.regno, 0, 30))
1584 set_other_error (mismatch_detail, idx,
1585 _("invalid register offset"));
1591 const aarch64_opnd_info *prev = &opnds[idx-1];
1592 unsigned num_bytes; /* total number of bytes transferred. */
1593 /* The opcode dependent area stores the number of elements in
1594 each structure to be loaded/stored. */
1595 int is_ld1r = get_opcode_dependent_value (opcode) == 1;
1596 if (opcode->operands[0] == AARCH64_OPND_LVt_AL)
1597 /* Special handling of loading single structure to all lane. */
1598 num_bytes = (is_ld1r ? 1 : prev->reglist.num_regs)
1599 * aarch64_get_qualifier_esize (prev->qualifier);
1601 num_bytes = prev->reglist.num_regs
1602 * aarch64_get_qualifier_esize (prev->qualifier)
1603 * aarch64_get_qualifier_nelem (prev->qualifier);
1604 if ((int) num_bytes != opnd->addr.offset.imm)
1606 set_other_error (mismatch_detail, idx,
1607 _("invalid post-increment amount"));
1613 case AARCH64_OPND_ADDR_REGOFF:
1614 /* Get the size of the data element that is accessed, which may be
1615 different from that of the source register size,
1616 e.g. in strb/ldrb. */
1617 size = aarch64_get_qualifier_esize (opnd->qualifier);
1618 /* It is either no shift or shift by the binary logarithm of SIZE. */
1619 if (opnd->shifter.amount != 0
1620 && opnd->shifter.amount != (int)get_logsz (size))
1622 set_other_error (mismatch_detail, idx,
1623 _("invalid shift amount"));
1626 /* Only UXTW, LSL, SXTW and SXTX are the accepted extending
1628 switch (opnd->shifter.kind)
1630 case AARCH64_MOD_UXTW:
1631 case AARCH64_MOD_LSL:
1632 case AARCH64_MOD_SXTW:
1633 case AARCH64_MOD_SXTX: break;
1635 set_other_error (mismatch_detail, idx,
1636 _("invalid extend/shift operator"));
1641 case AARCH64_OPND_ADDR_UIMM12:
1642 imm = opnd->addr.offset.imm;
1643 /* Get the size of the data element that is accessed, which may be
1644 different from that of the source register size,
1645 e.g. in strb/ldrb. */
1646 size = aarch64_get_qualifier_esize (qualifier);
1647 if (!value_in_range_p (opnd->addr.offset.imm, 0, 4095 * size))
1649 set_offset_out_of_range_error (mismatch_detail, idx,
1653 if (!value_aligned_p (opnd->addr.offset.imm, size))
1655 set_unaligned_error (mismatch_detail, idx, size);
1660 case AARCH64_OPND_ADDR_PCREL14:
1661 case AARCH64_OPND_ADDR_PCREL19:
1662 case AARCH64_OPND_ADDR_PCREL21:
1663 case AARCH64_OPND_ADDR_PCREL26:
1664 imm = opnd->imm.value;
1665 if (operand_need_shift_by_two (get_operand_from_code (type)))
1667 /* The offset value in a PC-relative branch instruction is alway
1668 4-byte aligned and is encoded without the lowest 2 bits. */
1669 if (!value_aligned_p (imm, 4))
1671 set_unaligned_error (mismatch_detail, idx, 4);
1674 /* Right shift by 2 so that we can carry out the following check
1678 size = get_operand_fields_width (get_operand_from_code (type));
1679 if (!value_fit_signed_field_p (imm, size))
1681 set_other_error (mismatch_detail, idx,
1682 _("immediate out of range"));
1687 case AARCH64_OPND_SVE_ADDR_RI_S4xVL:
1688 case AARCH64_OPND_SVE_ADDR_RI_S4x2xVL:
1689 case AARCH64_OPND_SVE_ADDR_RI_S4x3xVL:
1690 case AARCH64_OPND_SVE_ADDR_RI_S4x4xVL:
1694 assert (!opnd->addr.offset.is_reg);
1695 assert (opnd->addr.preind);
1696 num = 1 + get_operand_specific_data (&aarch64_operands[type]);
1699 if ((opnd->addr.offset.imm != 0 && !opnd->shifter.operator_present)
1700 || (opnd->shifter.operator_present
1701 && opnd->shifter.kind != AARCH64_MOD_MUL_VL))
1703 set_other_error (mismatch_detail, idx,
1704 _("invalid addressing mode"));
1707 if (!value_in_range_p (opnd->addr.offset.imm, min_value, max_value))
1709 set_offset_out_of_range_error (mismatch_detail, idx,
1710 min_value, max_value);
1713 if (!value_aligned_p (opnd->addr.offset.imm, num))
1715 set_unaligned_error (mismatch_detail, idx, num);
1720 case AARCH64_OPND_SVE_ADDR_RI_S6xVL:
1723 goto sve_imm_offset_vl;
1725 case AARCH64_OPND_SVE_ADDR_RI_S9xVL:
1728 goto sve_imm_offset_vl;
1730 case AARCH64_OPND_SVE_ADDR_RI_U6:
1731 case AARCH64_OPND_SVE_ADDR_RI_U6x2:
1732 case AARCH64_OPND_SVE_ADDR_RI_U6x4:
1733 case AARCH64_OPND_SVE_ADDR_RI_U6x8:
1737 assert (!opnd->addr.offset.is_reg);
1738 assert (opnd->addr.preind);
1739 num = 1 << get_operand_specific_data (&aarch64_operands[type]);
1742 if (opnd->shifter.operator_present
1743 || opnd->shifter.amount_present)
1745 set_other_error (mismatch_detail, idx,
1746 _("invalid addressing mode"));
1749 if (!value_in_range_p (opnd->addr.offset.imm, min_value, max_value))
1751 set_offset_out_of_range_error (mismatch_detail, idx,
1752 min_value, max_value);
1755 if (!value_aligned_p (opnd->addr.offset.imm, num))
1757 set_unaligned_error (mismatch_detail, idx, num);
1762 case AARCH64_OPND_SVE_ADDR_RR:
1763 case AARCH64_OPND_SVE_ADDR_RR_LSL1:
1764 case AARCH64_OPND_SVE_ADDR_RR_LSL2:
1765 case AARCH64_OPND_SVE_ADDR_RR_LSL3:
1766 case AARCH64_OPND_SVE_ADDR_RX:
1767 case AARCH64_OPND_SVE_ADDR_RX_LSL1:
1768 case AARCH64_OPND_SVE_ADDR_RX_LSL2:
1769 case AARCH64_OPND_SVE_ADDR_RX_LSL3:
1770 case AARCH64_OPND_SVE_ADDR_RZ:
1771 case AARCH64_OPND_SVE_ADDR_RZ_LSL1:
1772 case AARCH64_OPND_SVE_ADDR_RZ_LSL2:
1773 case AARCH64_OPND_SVE_ADDR_RZ_LSL3:
1774 modifiers = 1 << AARCH64_MOD_LSL;
1776 assert (opnd->addr.offset.is_reg);
1777 assert (opnd->addr.preind);
1778 if ((aarch64_operands[type].flags & OPD_F_NO_ZR) != 0
1779 && opnd->addr.offset.regno == 31)
1781 set_other_error (mismatch_detail, idx,
1782 _("index register xzr is not allowed"));
1785 if (((1 << opnd->shifter.kind) & modifiers) == 0
1786 || (opnd->shifter.amount
1787 != get_operand_specific_data (&aarch64_operands[type])))
1789 set_other_error (mismatch_detail, idx,
1790 _("invalid addressing mode"));
1795 case AARCH64_OPND_SVE_ADDR_RZ_XTW_14:
1796 case AARCH64_OPND_SVE_ADDR_RZ_XTW_22:
1797 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_14:
1798 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_22:
1799 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_14:
1800 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_22:
1801 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_14:
1802 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_22:
1803 modifiers = (1 << AARCH64_MOD_SXTW) | (1 << AARCH64_MOD_UXTW);
1804 goto sve_rr_operand;
1806 case AARCH64_OPND_SVE_ADDR_ZI_U5:
1807 case AARCH64_OPND_SVE_ADDR_ZI_U5x2:
1808 case AARCH64_OPND_SVE_ADDR_ZI_U5x4:
1809 case AARCH64_OPND_SVE_ADDR_ZI_U5x8:
1812 goto sve_imm_offset;
1814 case AARCH64_OPND_SVE_ADDR_ZZ_LSL:
1815 modifiers = 1 << AARCH64_MOD_LSL;
1817 assert (opnd->addr.offset.is_reg);
1818 assert (opnd->addr.preind);
1819 if (((1 << opnd->shifter.kind) & modifiers) == 0
1820 || opnd->shifter.amount < 0
1821 || opnd->shifter.amount > 3)
1823 set_other_error (mismatch_detail, idx,
1824 _("invalid addressing mode"));
1829 case AARCH64_OPND_SVE_ADDR_ZZ_SXTW:
1830 modifiers = (1 << AARCH64_MOD_SXTW);
1831 goto sve_zz_operand;
1833 case AARCH64_OPND_SVE_ADDR_ZZ_UXTW:
1834 modifiers = 1 << AARCH64_MOD_UXTW;
1835 goto sve_zz_operand;
1842 case AARCH64_OPND_CLASS_SIMD_REGLIST:
1843 if (type == AARCH64_OPND_LEt)
1845 /* Get the upper bound for the element index. */
1846 num = 16 / aarch64_get_qualifier_esize (qualifier) - 1;
1847 if (!value_in_range_p (opnd->reglist.index, 0, num))
1849 set_elem_idx_out_of_range_error (mismatch_detail, idx, 0, num);
1853 /* The opcode dependent area stores the number of elements in
1854 each structure to be loaded/stored. */
1855 num = get_opcode_dependent_value (opcode);
1858 case AARCH64_OPND_LVt:
1859 assert (num >= 1 && num <= 4);
1860 /* Unless LD1/ST1, the number of registers should be equal to that
1861 of the structure elements. */
1862 if (num != 1 && opnd->reglist.num_regs != num)
1864 set_reg_list_error (mismatch_detail, idx, num);
1868 case AARCH64_OPND_LVt_AL:
1869 case AARCH64_OPND_LEt:
1870 assert (num >= 1 && num <= 4);
1871 /* The number of registers should be equal to that of the structure
1873 if (opnd->reglist.num_regs != num)
1875 set_reg_list_error (mismatch_detail, idx, num);
1884 case AARCH64_OPND_CLASS_IMMEDIATE:
1885 /* Constraint check on immediate operand. */
1886 imm = opnd->imm.value;
1887 /* E.g. imm_0_31 constrains value to be 0..31. */
1888 if (qualifier_value_in_range_constraint_p (qualifier)
1889 && !value_in_range_p (imm, get_lower_bound (qualifier),
1890 get_upper_bound (qualifier)))
1892 set_imm_out_of_range_error (mismatch_detail, idx,
1893 get_lower_bound (qualifier),
1894 get_upper_bound (qualifier));
1900 case AARCH64_OPND_AIMM:
1901 if (opnd->shifter.kind != AARCH64_MOD_LSL)
1903 set_other_error (mismatch_detail, idx,
1904 _("invalid shift operator"));
1907 if (opnd->shifter.amount != 0 && opnd->shifter.amount != 12)
1909 set_other_error (mismatch_detail, idx,
1910 _("shift amount expected to be 0 or 12"));
1913 if (!value_fit_unsigned_field_p (opnd->imm.value, 12))
1915 set_other_error (mismatch_detail, idx,
1916 _("immediate out of range"));
1921 case AARCH64_OPND_HALF:
1922 assert (idx == 1 && opnds[0].type == AARCH64_OPND_Rd);
1923 if (opnd->shifter.kind != AARCH64_MOD_LSL)
1925 set_other_error (mismatch_detail, idx,
1926 _("invalid shift operator"));
1929 size = aarch64_get_qualifier_esize (opnds[0].qualifier);
1930 if (!value_aligned_p (opnd->shifter.amount, 16))
1932 set_other_error (mismatch_detail, idx,
1933 _("shift amount should be a multiple of 16"));
1936 if (!value_in_range_p (opnd->shifter.amount, 0, size * 8 - 16))
1938 set_sft_amount_out_of_range_error (mismatch_detail, idx,
1942 if (opnd->imm.value < 0)
1944 set_other_error (mismatch_detail, idx,
1945 _("negative immediate value not allowed"));
1948 if (!value_fit_unsigned_field_p (opnd->imm.value, 16))
1950 set_other_error (mismatch_detail, idx,
1951 _("immediate out of range"));
1956 case AARCH64_OPND_IMM_MOV:
1958 int esize = aarch64_get_qualifier_esize (opnds[0].qualifier);
1959 imm = opnd->imm.value;
1963 case OP_MOV_IMM_WIDEN:
1965 /* Fall through... */
1966 case OP_MOV_IMM_WIDE:
1967 if (!aarch64_wide_constant_p (imm, esize == 4, NULL))
1969 set_other_error (mismatch_detail, idx,
1970 _("immediate out of range"));
1974 case OP_MOV_IMM_LOG:
1975 if (!aarch64_logical_immediate_p (imm, esize, NULL))
1977 set_other_error (mismatch_detail, idx,
1978 _("immediate out of range"));
1989 case AARCH64_OPND_NZCV:
1990 case AARCH64_OPND_CCMP_IMM:
1991 case AARCH64_OPND_EXCEPTION:
1992 case AARCH64_OPND_UIMM4:
1993 case AARCH64_OPND_UIMM7:
1994 case AARCH64_OPND_UIMM3_OP1:
1995 case AARCH64_OPND_UIMM3_OP2:
1996 case AARCH64_OPND_SVE_UIMM3:
1997 case AARCH64_OPND_SVE_UIMM7:
1998 case AARCH64_OPND_SVE_UIMM8:
1999 case AARCH64_OPND_SVE_UIMM8_53:
2000 size = get_operand_fields_width (get_operand_from_code (type));
2002 if (!value_fit_unsigned_field_p (opnd->imm.value, size))
2004 set_imm_out_of_range_error (mismatch_detail, idx, 0,
2010 case AARCH64_OPND_SIMM5:
2011 case AARCH64_OPND_SVE_SIMM5:
2012 case AARCH64_OPND_SVE_SIMM5B:
2013 case AARCH64_OPND_SVE_SIMM6:
2014 case AARCH64_OPND_SVE_SIMM8:
2015 size = get_operand_fields_width (get_operand_from_code (type));
2017 if (!value_fit_signed_field_p (opnd->imm.value, size))
2019 set_imm_out_of_range_error (mismatch_detail, idx,
2021 (1 << (size - 1)) - 1);
2026 case AARCH64_OPND_WIDTH:
2027 assert (idx > 1 && opnds[idx-1].type == AARCH64_OPND_IMM
2028 && opnds[0].type == AARCH64_OPND_Rd);
2029 size = get_upper_bound (qualifier);
2030 if (opnd->imm.value + opnds[idx-1].imm.value > size)
2031 /* lsb+width <= reg.size */
2033 set_imm_out_of_range_error (mismatch_detail, idx, 1,
2034 size - opnds[idx-1].imm.value);
2039 case AARCH64_OPND_LIMM:
2040 case AARCH64_OPND_SVE_LIMM:
2042 int esize = aarch64_get_qualifier_esize (opnds[0].qualifier);
2043 uint64_t uimm = opnd->imm.value;
2044 if (opcode->op == OP_BIC)
2046 if (aarch64_logical_immediate_p (uimm, esize, NULL) == FALSE)
2048 set_other_error (mismatch_detail, idx,
2049 _("immediate out of range"));
2055 case AARCH64_OPND_IMM0:
2056 case AARCH64_OPND_FPIMM0:
2057 if (opnd->imm.value != 0)
2059 set_other_error (mismatch_detail, idx,
2060 _("immediate zero expected"));
2065 case AARCH64_OPND_SHLL_IMM:
2067 size = 8 * aarch64_get_qualifier_esize (opnds[idx - 1].qualifier);
2068 if (opnd->imm.value != size)
2070 set_other_error (mismatch_detail, idx,
2071 _("invalid shift amount"));
2076 case AARCH64_OPND_IMM_VLSL:
2077 size = aarch64_get_qualifier_esize (qualifier);
2078 if (!value_in_range_p (opnd->imm.value, 0, size * 8 - 1))
2080 set_imm_out_of_range_error (mismatch_detail, idx, 0,
2086 case AARCH64_OPND_IMM_VLSR:
2087 size = aarch64_get_qualifier_esize (qualifier);
2088 if (!value_in_range_p (opnd->imm.value, 1, size * 8))
2090 set_imm_out_of_range_error (mismatch_detail, idx, 1, size * 8);
2095 case AARCH64_OPND_SIMD_IMM:
2096 case AARCH64_OPND_SIMD_IMM_SFT:
2097 /* Qualifier check. */
2100 case AARCH64_OPND_QLF_LSL:
2101 if (opnd->shifter.kind != AARCH64_MOD_LSL)
2103 set_other_error (mismatch_detail, idx,
2104 _("invalid shift operator"));
2108 case AARCH64_OPND_QLF_MSL:
2109 if (opnd->shifter.kind != AARCH64_MOD_MSL)
2111 set_other_error (mismatch_detail, idx,
2112 _("invalid shift operator"));
2116 case AARCH64_OPND_QLF_NIL:
2117 if (opnd->shifter.kind != AARCH64_MOD_NONE)
2119 set_other_error (mismatch_detail, idx,
2120 _("shift is not permitted"));
2128 /* Is the immediate valid? */
2130 if (aarch64_get_qualifier_esize (opnds[0].qualifier) != 8)
2132 /* uimm8 or simm8 */
2133 if (!value_in_range_p (opnd->imm.value, -128, 255))
2135 set_imm_out_of_range_error (mismatch_detail, idx, -128, 255);
2139 else if (aarch64_shrink_expanded_imm8 (opnd->imm.value) < 0)
2142 'aaaaaaaabbbbbbbbccccccccddddddddeeeeeeee
2143 ffffffffgggggggghhhhhhhh'. */
2144 set_other_error (mismatch_detail, idx,
2145 _("invalid value for immediate"));
2148 /* Is the shift amount valid? */
2149 switch (opnd->shifter.kind)
2151 case AARCH64_MOD_LSL:
2152 size = aarch64_get_qualifier_esize (opnds[0].qualifier);
2153 if (!value_in_range_p (opnd->shifter.amount, 0, (size - 1) * 8))
2155 set_sft_amount_out_of_range_error (mismatch_detail, idx, 0,
2159 if (!value_aligned_p (opnd->shifter.amount, 8))
2161 set_unaligned_error (mismatch_detail, idx, 8);
2165 case AARCH64_MOD_MSL:
2166 /* Only 8 and 16 are valid shift amount. */
2167 if (opnd->shifter.amount != 8 && opnd->shifter.amount != 16)
2169 set_other_error (mismatch_detail, idx,
2170 _("shift amount expected to be 0 or 16"));
2175 if (opnd->shifter.kind != AARCH64_MOD_NONE)
2177 set_other_error (mismatch_detail, idx,
2178 _("invalid shift operator"));
2185 case AARCH64_OPND_FPIMM:
2186 case AARCH64_OPND_SIMD_FPIMM:
2187 case AARCH64_OPND_SVE_FPIMM8:
2188 if (opnd->imm.is_fp == 0)
2190 set_other_error (mismatch_detail, idx,
2191 _("floating-point immediate expected"));
2194 /* The value is expected to be an 8-bit floating-point constant with
2195 sign, 3-bit exponent and normalized 4 bits of precision, encoded
2196 in "a:b:c:d:e:f:g:h" or FLD_imm8 (depending on the type of the
2198 if (!value_in_range_p (opnd->imm.value, 0, 255))
2200 set_other_error (mismatch_detail, idx,
2201 _("immediate out of range"));
2204 if (opnd->shifter.kind != AARCH64_MOD_NONE)
2206 set_other_error (mismatch_detail, idx,
2207 _("invalid shift operator"));
2212 case AARCH64_OPND_SVE_AIMM:
2215 assert (opnd->shifter.kind == AARCH64_MOD_LSL);
2216 size = aarch64_get_qualifier_esize (opnds[0].qualifier);
2217 mask = ~((uint64_t) -1 << (size * 4) << (size * 4));
2218 uvalue = opnd->imm.value;
2219 shift = opnd->shifter.amount;
2224 set_other_error (mismatch_detail, idx,
2225 _("no shift amount allowed for"
2226 " 8-bit constants"));
2232 if (shift != 0 && shift != 8)
2234 set_other_error (mismatch_detail, idx,
2235 _("shift amount must be 0 or 8"));
2238 if (shift == 0 && (uvalue & 0xff) == 0)
2241 uvalue = (int64_t) uvalue / 256;
2245 if ((uvalue & mask) != uvalue && (uvalue | ~mask) != uvalue)
2247 set_other_error (mismatch_detail, idx,
2248 _("immediate too big for element size"));
2251 uvalue = (uvalue - min_value) & mask;
2254 set_other_error (mismatch_detail, idx,
2255 _("invalid arithmetic immediate"));
2260 case AARCH64_OPND_SVE_ASIMM:
2264 case AARCH64_OPND_SVE_I1_HALF_ONE:
2265 assert (opnd->imm.is_fp);
2266 if (opnd->imm.value != 0x3f000000 && opnd->imm.value != 0x3f800000)
2268 set_other_error (mismatch_detail, idx,
2269 _("floating-point value must be 0.5 or 1.0"));
2274 case AARCH64_OPND_SVE_I1_HALF_TWO:
2275 assert (opnd->imm.is_fp);
2276 if (opnd->imm.value != 0x3f000000 && opnd->imm.value != 0x40000000)
2278 set_other_error (mismatch_detail, idx,
2279 _("floating-point value must be 0.5 or 2.0"));
2284 case AARCH64_OPND_SVE_I1_ZERO_ONE:
2285 assert (opnd->imm.is_fp);
2286 if (opnd->imm.value != 0 && opnd->imm.value != 0x3f800000)
2288 set_other_error (mismatch_detail, idx,
2289 _("floating-point value must be 0.0 or 1.0"));
2294 case AARCH64_OPND_SVE_INV_LIMM:
2296 int esize = aarch64_get_qualifier_esize (opnds[0].qualifier);
2297 uint64_t uimm = ~opnd->imm.value;
2298 if (!aarch64_logical_immediate_p (uimm, esize, NULL))
2300 set_other_error (mismatch_detail, idx,
2301 _("immediate out of range"));
2307 case AARCH64_OPND_SVE_LIMM_MOV:
2309 int esize = aarch64_get_qualifier_esize (opnds[0].qualifier);
2310 uint64_t uimm = opnd->imm.value;
2311 if (!aarch64_logical_immediate_p (uimm, esize, NULL))
2313 set_other_error (mismatch_detail, idx,
2314 _("immediate out of range"));
2317 if (!aarch64_sve_dupm_mov_immediate_p (uimm, esize))
2319 set_other_error (mismatch_detail, idx,
2320 _("invalid replicated MOV immediate"));
2326 case AARCH64_OPND_SVE_PATTERN_SCALED:
2327 assert (opnd->shifter.kind == AARCH64_MOD_MUL);
2328 if (!value_in_range_p (opnd->shifter.amount, 1, 16))
2330 set_multiplier_out_of_range_error (mismatch_detail, idx, 1, 16);
2335 case AARCH64_OPND_SVE_SHLIMM_PRED:
2336 case AARCH64_OPND_SVE_SHLIMM_UNPRED:
2337 size = aarch64_get_qualifier_esize (opnds[idx - 1].qualifier);
2338 if (!value_in_range_p (opnd->imm.value, 0, 8 * size - 1))
2340 set_imm_out_of_range_error (mismatch_detail, idx,
2346 case AARCH64_OPND_SVE_SHRIMM_PRED:
2347 case AARCH64_OPND_SVE_SHRIMM_UNPRED:
2348 size = aarch64_get_qualifier_esize (opnds[idx - 1].qualifier);
2349 if (!value_in_range_p (opnd->imm.value, 1, 8 * size))
2351 set_imm_out_of_range_error (mismatch_detail, idx, 1, 8 * size);
2361 case AARCH64_OPND_CLASS_CP_REG:
2362 /* Cn or Cm: 4-bit opcode field named for historical reasons.
2363 valid range: C0 - C15. */
2364 if (opnd->reg.regno > 15)
2366 set_regno_out_of_range_error (mismatch_detail, idx, 0, 15);
2371 case AARCH64_OPND_CLASS_SYSTEM:
2374 case AARCH64_OPND_PSTATEFIELD:
2375 assert (idx == 0 && opnds[1].type == AARCH64_OPND_UIMM4);
2378 The immediate must be #0 or #1. */
2379 if ((opnd->pstatefield == 0x03 /* UAO. */
2380 || opnd->pstatefield == 0x04) /* PAN. */
2381 && opnds[1].imm.value > 1)
2383 set_imm_out_of_range_error (mismatch_detail, idx, 0, 1);
2386 /* MSR SPSel, #uimm4
2387 Uses uimm4 as a control value to select the stack pointer: if
2388 bit 0 is set it selects the current exception level's stack
2389 pointer, if bit 0 is clear it selects shared EL0 stack pointer.
2390 Bits 1 to 3 of uimm4 are reserved and should be zero. */
2391 if (opnd->pstatefield == 0x05 /* spsel */ && opnds[1].imm.value > 1)
2393 set_imm_out_of_range_error (mismatch_detail, idx, 0, 1);
2402 case AARCH64_OPND_CLASS_SIMD_ELEMENT:
2403 /* Get the upper bound for the element index. */
2404 num = 16 / aarch64_get_qualifier_esize (qualifier) - 1;
2405 /* Index out-of-range. */
2406 if (!value_in_range_p (opnd->reglane.index, 0, num))
2408 set_elem_idx_out_of_range_error (mismatch_detail, idx, 0, num);
2411 /* SMLAL<Q> <Vd>.<Ta>, <Vn>.<Tb>, <Vm>.<Ts>[<index>].
2412 <Vm> Is the vector register (V0-V31) or (V0-V15), whose
2413 number is encoded in "size:M:Rm":
2419 if (type == AARCH64_OPND_Em && qualifier == AARCH64_OPND_QLF_S_H
2420 && !value_in_range_p (opnd->reglane.regno, 0, 15))
2422 set_regno_out_of_range_error (mismatch_detail, idx, 0, 15);
2427 case AARCH64_OPND_CLASS_MODIFIED_REG:
2428 assert (idx == 1 || idx == 2);
2431 case AARCH64_OPND_Rm_EXT:
2432 if (aarch64_extend_operator_p (opnd->shifter.kind) == FALSE
2433 && opnd->shifter.kind != AARCH64_MOD_LSL)
2435 set_other_error (mismatch_detail, idx,
2436 _("extend operator expected"));
2439 /* It is not optional unless at least one of "Rd" or "Rn" is '11111'
2440 (i.e. SP), in which case it defaults to LSL. The LSL alias is
2441 only valid when "Rd" or "Rn" is '11111', and is preferred in that
2443 if (!aarch64_stack_pointer_p (opnds + 0)
2444 && (idx != 2 || !aarch64_stack_pointer_p (opnds + 1)))
2446 if (!opnd->shifter.operator_present)
2448 set_other_error (mismatch_detail, idx,
2449 _("missing extend operator"));
2452 else if (opnd->shifter.kind == AARCH64_MOD_LSL)
2454 set_other_error (mismatch_detail, idx,
2455 _("'LSL' operator not allowed"));
2459 assert (opnd->shifter.operator_present /* Default to LSL. */
2460 || opnd->shifter.kind == AARCH64_MOD_LSL);
2461 if (!value_in_range_p (opnd->shifter.amount, 0, 4))
2463 set_sft_amount_out_of_range_error (mismatch_detail, idx, 0, 4);
2466 /* In the 64-bit form, the final register operand is written as Wm
2467 for all but the (possibly omitted) UXTX/LSL and SXTX
2469 N.B. GAS allows X register to be used with any operator as a
2470 programming convenience. */
2471 if (qualifier == AARCH64_OPND_QLF_X
2472 && opnd->shifter.kind != AARCH64_MOD_LSL
2473 && opnd->shifter.kind != AARCH64_MOD_UXTX
2474 && opnd->shifter.kind != AARCH64_MOD_SXTX)
2476 set_other_error (mismatch_detail, idx, _("W register expected"));
2481 case AARCH64_OPND_Rm_SFT:
2482 /* ROR is not available to the shifted register operand in
2483 arithmetic instructions. */
2484 if (aarch64_shift_operator_p (opnd->shifter.kind) == FALSE)
2486 set_other_error (mismatch_detail, idx,
2487 _("shift operator expected"));
2490 if (opnd->shifter.kind == AARCH64_MOD_ROR
2491 && opcode->iclass != log_shift)
2493 set_other_error (mismatch_detail, idx,
2494 _("'ROR' operator not allowed"));
2497 num = qualifier == AARCH64_OPND_QLF_W ? 31 : 63;
2498 if (!value_in_range_p (opnd->shifter.amount, 0, num))
2500 set_sft_amount_out_of_range_error (mismatch_detail, idx, 0, num);
2517 /* Main entrypoint for the operand constraint checking.
2519 Return 1 if operands of *INST meet the constraint applied by the operand
2520 codes and operand qualifiers; otherwise return 0 and if MISMATCH_DETAIL is
2521 not NULL, return the detail of the error in *MISMATCH_DETAIL. N.B. when
2522 adding more constraint checking, make sure MISMATCH_DETAIL->KIND is set
2523 with a proper error kind rather than AARCH64_OPDE_NIL (GAS asserts non-NIL
2524 error kind when it is notified that an instruction does not pass the check).
2526 Un-determined operand qualifiers may get established during the process. */
2529 aarch64_match_operands_constraint (aarch64_inst *inst,
2530 aarch64_operand_error *mismatch_detail)
2534 DEBUG_TRACE ("enter");
2536 /* Check for cases where a source register needs to be the same as the
2537 destination register. Do this before matching qualifiers since if
2538 an instruction has both invalid tying and invalid qualifiers,
2539 the error about qualifiers would suggest several alternative
2540 instructions that also have invalid tying. */
2541 i = inst->opcode->tied_operand;
2542 if (i > 0 && (inst->operands[0].reg.regno != inst->operands[i].reg.regno))
2544 if (mismatch_detail)
2546 mismatch_detail->kind = AARCH64_OPDE_UNTIED_OPERAND;
2547 mismatch_detail->index = i;
2548 mismatch_detail->error = NULL;
2553 /* Match operands' qualifier.
2554 *INST has already had qualifier establish for some, if not all, of
2555 its operands; we need to find out whether these established
2556 qualifiers match one of the qualifier sequence in
2557 INST->OPCODE->QUALIFIERS_LIST. If yes, we will assign each operand
2558 with the corresponding qualifier in such a sequence.
2559 Only basic operand constraint checking is done here; the more thorough
2560 constraint checking will carried out by operand_general_constraint_met_p,
2561 which has be to called after this in order to get all of the operands'
2562 qualifiers established. */
2563 if (match_operands_qualifier (inst, TRUE /* update_p */) == 0)
2565 DEBUG_TRACE ("FAIL on operand qualifier matching");
2566 if (mismatch_detail)
2568 /* Return an error type to indicate that it is the qualifier
2569 matching failure; we don't care about which operand as there
2570 are enough information in the opcode table to reproduce it. */
2571 mismatch_detail->kind = AARCH64_OPDE_INVALID_VARIANT;
2572 mismatch_detail->index = -1;
2573 mismatch_detail->error = NULL;
2578 /* Match operands' constraint. */
2579 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2581 enum aarch64_opnd type = inst->opcode->operands[i];
2582 if (type == AARCH64_OPND_NIL)
2584 if (inst->operands[i].skip)
2586 DEBUG_TRACE ("skip the incomplete operand %d", i);
2589 if (operand_general_constraint_met_p (inst->operands, i, type,
2590 inst->opcode, mismatch_detail) == 0)
2592 DEBUG_TRACE ("FAIL on operand %d", i);
2597 DEBUG_TRACE ("PASS");
2602 /* Replace INST->OPCODE with OPCODE and return the replaced OPCODE.
2603 Also updates the TYPE of each INST->OPERANDS with the corresponding
2604 value of OPCODE->OPERANDS.
2606 Note that some operand qualifiers may need to be manually cleared by
2607 the caller before it further calls the aarch64_opcode_encode; by
2608 doing this, it helps the qualifier matching facilities work
2611 const aarch64_opcode*
2612 aarch64_replace_opcode (aarch64_inst *inst, const aarch64_opcode *opcode)
2615 const aarch64_opcode *old = inst->opcode;
2617 inst->opcode = opcode;
2619 /* Update the operand types. */
2620 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2622 inst->operands[i].type = opcode->operands[i];
2623 if (opcode->operands[i] == AARCH64_OPND_NIL)
2627 DEBUG_TRACE ("replace %s with %s", old->name, opcode->name);
2633 aarch64_operand_index (const enum aarch64_opnd *operands, enum aarch64_opnd operand)
2636 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2637 if (operands[i] == operand)
2639 else if (operands[i] == AARCH64_OPND_NIL)
2644 /* R0...R30, followed by FOR31. */
2645 #define BANK(R, FOR31) \
2646 { R (0), R (1), R (2), R (3), R (4), R (5), R (6), R (7), \
2647 R (8), R (9), R (10), R (11), R (12), R (13), R (14), R (15), \
2648 R (16), R (17), R (18), R (19), R (20), R (21), R (22), R (23), \
2649 R (24), R (25), R (26), R (27), R (28), R (29), R (30), FOR31 }
2650 /* [0][0] 32-bit integer regs with sp Wn
2651 [0][1] 64-bit integer regs with sp Xn sf=1
2652 [1][0] 32-bit integer regs with #0 Wn
2653 [1][1] 64-bit integer regs with #0 Xn sf=1 */
2654 static const char *int_reg[2][2][32] = {
2655 #define R32(X) "w" #X
2656 #define R64(X) "x" #X
2657 { BANK (R32, "wsp"), BANK (R64, "sp") },
2658 { BANK (R32, "wzr"), BANK (R64, "xzr") }
2663 /* Names of the SVE vector registers, first with .S suffixes,
2664 then with .D suffixes. */
2666 static const char *sve_reg[2][32] = {
2667 #define ZS(X) "z" #X ".s"
2668 #define ZD(X) "z" #X ".d"
2669 BANK (ZS, ZS (31)), BANK (ZD, ZD (31))
2675 /* Return the integer register name.
2676 if SP_REG_P is not 0, R31 is an SP reg, other R31 is the zero reg. */
2678 static inline const char *
2679 get_int_reg_name (int regno, aarch64_opnd_qualifier_t qualifier, int sp_reg_p)
2681 const int has_zr = sp_reg_p ? 0 : 1;
2682 const int is_64 = aarch64_get_qualifier_esize (qualifier) == 4 ? 0 : 1;
2683 return int_reg[has_zr][is_64][regno];
2686 /* Like get_int_reg_name, but IS_64 is always 1. */
2688 static inline const char *
2689 get_64bit_int_reg_name (int regno, int sp_reg_p)
2691 const int has_zr = sp_reg_p ? 0 : 1;
2692 return int_reg[has_zr][1][regno];
2695 /* Get the name of the integer offset register in OPND, using the shift type
2696 to decide whether it's a word or doubleword. */
2698 static inline const char *
2699 get_offset_int_reg_name (const aarch64_opnd_info *opnd)
2701 switch (opnd->shifter.kind)
2703 case AARCH64_MOD_UXTW:
2704 case AARCH64_MOD_SXTW:
2705 return get_int_reg_name (opnd->addr.offset.regno, AARCH64_OPND_QLF_W, 0);
2707 case AARCH64_MOD_LSL:
2708 case AARCH64_MOD_SXTX:
2709 return get_int_reg_name (opnd->addr.offset.regno, AARCH64_OPND_QLF_X, 0);
2716 /* Get the name of the SVE vector offset register in OPND, using the operand
2717 qualifier to decide whether the suffix should be .S or .D. */
2719 static inline const char *
2720 get_addr_sve_reg_name (int regno, aarch64_opnd_qualifier_t qualifier)
2722 assert (qualifier == AARCH64_OPND_QLF_S_S
2723 || qualifier == AARCH64_OPND_QLF_S_D);
2724 return sve_reg[qualifier == AARCH64_OPND_QLF_S_D][regno];
2727 /* Types for expanding an encoded 8-bit value to a floating-point value. */
2747 /* IMM8 is an 8-bit floating-point constant with sign, 3-bit exponent and
2748 normalized 4 bits of precision, encoded in "a:b:c:d:e:f:g:h" or FLD_imm8
2749 (depending on the type of the instruction). IMM8 will be expanded to a
2750 single-precision floating-point value (SIZE == 4) or a double-precision
2751 floating-point value (SIZE == 8). A half-precision floating-point value
2752 (SIZE == 2) is expanded to a single-precision floating-point value. The
2753 expanded value is returned. */
2756 expand_fp_imm (int size, uint32_t imm8)
2759 uint32_t imm8_7, imm8_6_0, imm8_6, imm8_6_repl4;
2761 imm8_7 = (imm8 >> 7) & 0x01; /* imm8<7> */
2762 imm8_6_0 = imm8 & 0x7f; /* imm8<6:0> */
2763 imm8_6 = imm8_6_0 >> 6; /* imm8<6> */
2764 imm8_6_repl4 = (imm8_6 << 3) | (imm8_6 << 2)
2765 | (imm8_6 << 1) | imm8_6; /* Replicate(imm8<6>,4) */
2768 imm = (imm8_7 << (63-32)) /* imm8<7> */
2769 | ((imm8_6 ^ 1) << (62-32)) /* NOT(imm8<6) */
2770 | (imm8_6_repl4 << (58-32)) | (imm8_6 << (57-32))
2771 | (imm8_6 << (56-32)) | (imm8_6 << (55-32)) /* Replicate(imm8<6>,7) */
2772 | (imm8_6_0 << (48-32)); /* imm8<6>:imm8<5:0> */
2775 else if (size == 4 || size == 2)
2777 imm = (imm8_7 << 31) /* imm8<7> */
2778 | ((imm8_6 ^ 1) << 30) /* NOT(imm8<6>) */
2779 | (imm8_6_repl4 << 26) /* Replicate(imm8<6>,4) */
2780 | (imm8_6_0 << 19); /* imm8<6>:imm8<5:0> */
2784 /* An unsupported size. */
2791 /* Produce the string representation of the register list operand *OPND
2792 in the buffer pointed by BUF of size SIZE. PREFIX is the part of
2793 the register name that comes before the register number, such as "v". */
2795 print_register_list (char *buf, size_t size, const aarch64_opnd_info *opnd,
2798 const int num_regs = opnd->reglist.num_regs;
2799 const int first_reg = opnd->reglist.first_regno;
2800 const int last_reg = (first_reg + num_regs - 1) & 0x1f;
2801 const char *qlf_name = aarch64_get_qualifier_name (opnd->qualifier);
2802 char tb[8]; /* Temporary buffer. */
2804 assert (opnd->type != AARCH64_OPND_LEt || opnd->reglist.has_index);
2805 assert (num_regs >= 1 && num_regs <= 4);
2807 /* Prepare the index if any. */
2808 if (opnd->reglist.has_index)
2809 snprintf (tb, 8, "[%" PRIi64 "]", opnd->reglist.index);
2813 /* The hyphenated form is preferred for disassembly if there are
2814 more than two registers in the list, and the register numbers
2815 are monotonically increasing in increments of one. */
2816 if (num_regs > 2 && last_reg > first_reg)
2817 snprintf (buf, size, "{%s%d.%s-%s%d.%s}%s", prefix, first_reg, qlf_name,
2818 prefix, last_reg, qlf_name, tb);
2821 const int reg0 = first_reg;
2822 const int reg1 = (first_reg + 1) & 0x1f;
2823 const int reg2 = (first_reg + 2) & 0x1f;
2824 const int reg3 = (first_reg + 3) & 0x1f;
2829 snprintf (buf, size, "{%s%d.%s}%s", prefix, reg0, qlf_name, tb);
2832 snprintf (buf, size, "{%s%d.%s, %s%d.%s}%s", prefix, reg0, qlf_name,
2833 prefix, reg1, qlf_name, tb);
2836 snprintf (buf, size, "{%s%d.%s, %s%d.%s, %s%d.%s}%s",
2837 prefix, reg0, qlf_name, prefix, reg1, qlf_name,
2838 prefix, reg2, qlf_name, tb);
2841 snprintf (buf, size, "{%s%d.%s, %s%d.%s, %s%d.%s, %s%d.%s}%s",
2842 prefix, reg0, qlf_name, prefix, reg1, qlf_name,
2843 prefix, reg2, qlf_name, prefix, reg3, qlf_name, tb);
2849 /* Print the register+immediate address in OPND to BUF, which has SIZE
2850 characters. BASE is the name of the base register. */
2853 print_immediate_offset_address (char *buf, size_t size,
2854 const aarch64_opnd_info *opnd,
2857 if (opnd->addr.writeback)
2859 if (opnd->addr.preind)
2860 snprintf (buf, size, "[%s,#%d]!", base, opnd->addr.offset.imm);
2862 snprintf (buf, size, "[%s],#%d", base, opnd->addr.offset.imm);
2866 if (opnd->shifter.operator_present)
2868 assert (opnd->shifter.kind == AARCH64_MOD_MUL_VL);
2869 snprintf (buf, size, "[%s,#%d,mul vl]",
2870 base, opnd->addr.offset.imm);
2872 else if (opnd->addr.offset.imm)
2873 snprintf (buf, size, "[%s,#%d]", base, opnd->addr.offset.imm);
2875 snprintf (buf, size, "[%s]", base);
2879 /* Produce the string representation of the register offset address operand
2880 *OPND in the buffer pointed by BUF of size SIZE. BASE and OFFSET are
2881 the names of the base and offset registers. */
2883 print_register_offset_address (char *buf, size_t size,
2884 const aarch64_opnd_info *opnd,
2885 const char *base, const char *offset)
2887 char tb[16]; /* Temporary buffer. */
2888 bfd_boolean print_extend_p = TRUE;
2889 bfd_boolean print_amount_p = TRUE;
2890 const char *shift_name = aarch64_operand_modifiers[opnd->shifter.kind].name;
2892 if (!opnd->shifter.amount && (opnd->qualifier != AARCH64_OPND_QLF_S_B
2893 || !opnd->shifter.amount_present))
2895 /* Not print the shift/extend amount when the amount is zero and
2896 when it is not the special case of 8-bit load/store instruction. */
2897 print_amount_p = FALSE;
2898 /* Likewise, no need to print the shift operator LSL in such a
2900 if (opnd->shifter.kind == AARCH64_MOD_LSL)
2901 print_extend_p = FALSE;
2904 /* Prepare for the extend/shift. */
2908 snprintf (tb, sizeof (tb), ",%s #%" PRIi64, shift_name,
2909 opnd->shifter.amount);
2911 snprintf (tb, sizeof (tb), ",%s", shift_name);
2916 snprintf (buf, size, "[%s,%s%s]", base, offset, tb);
2919 /* Generate the string representation of the operand OPNDS[IDX] for OPCODE
2920 in *BUF. The caller should pass in the maximum size of *BUF in SIZE.
2921 PC, PCREL_P and ADDRESS are used to pass in and return information about
2922 the PC-relative address calculation, where the PC value is passed in
2923 PC. If the operand is pc-relative related, *PCREL_P (if PCREL_P non-NULL)
2924 will return 1 and *ADDRESS (if ADDRESS non-NULL) will return the
2925 calculated address; otherwise, *PCREL_P (if PCREL_P non-NULL) returns 0.
2927 The function serves both the disassembler and the assembler diagnostics
2928 issuer, which is the reason why it lives in this file. */
2931 aarch64_print_operand (char *buf, size_t size, bfd_vma pc,
2932 const aarch64_opcode *opcode,
2933 const aarch64_opnd_info *opnds, int idx, int *pcrel_p,
2937 const char *name = NULL;
2938 const aarch64_opnd_info *opnd = opnds + idx;
2939 enum aarch64_modifier_kind kind;
2940 uint64_t addr, enum_value;
2948 case AARCH64_OPND_Rd:
2949 case AARCH64_OPND_Rn:
2950 case AARCH64_OPND_Rm:
2951 case AARCH64_OPND_Rt:
2952 case AARCH64_OPND_Rt2:
2953 case AARCH64_OPND_Rs:
2954 case AARCH64_OPND_Ra:
2955 case AARCH64_OPND_Rt_SYS:
2956 case AARCH64_OPND_PAIRREG:
2957 case AARCH64_OPND_SVE_Rm:
2958 /* The optional-ness of <Xt> in e.g. IC <ic_op>{, <Xt>} is determined by
2959 the <ic_op>, therefore we we use opnd->present to override the
2960 generic optional-ness information. */
2961 if (opnd->type == AARCH64_OPND_Rt_SYS && !opnd->present)
2963 /* Omit the operand, e.g. RET. */
2964 if (optional_operand_p (opcode, idx)
2965 && opnd->reg.regno == get_optional_operand_default_value (opcode))
2967 assert (opnd->qualifier == AARCH64_OPND_QLF_W
2968 || opnd->qualifier == AARCH64_OPND_QLF_X);
2969 snprintf (buf, size, "%s",
2970 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0));
2973 case AARCH64_OPND_Rd_SP:
2974 case AARCH64_OPND_Rn_SP:
2975 case AARCH64_OPND_SVE_Rn_SP:
2976 assert (opnd->qualifier == AARCH64_OPND_QLF_W
2977 || opnd->qualifier == AARCH64_OPND_QLF_WSP
2978 || opnd->qualifier == AARCH64_OPND_QLF_X
2979 || opnd->qualifier == AARCH64_OPND_QLF_SP);
2980 snprintf (buf, size, "%s",
2981 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 1));
2984 case AARCH64_OPND_Rm_EXT:
2985 kind = opnd->shifter.kind;
2986 assert (idx == 1 || idx == 2);
2987 if ((aarch64_stack_pointer_p (opnds)
2988 || (idx == 2 && aarch64_stack_pointer_p (opnds + 1)))
2989 && ((opnd->qualifier == AARCH64_OPND_QLF_W
2990 && opnds[0].qualifier == AARCH64_OPND_QLF_W
2991 && kind == AARCH64_MOD_UXTW)
2992 || (opnd->qualifier == AARCH64_OPND_QLF_X
2993 && kind == AARCH64_MOD_UXTX)))
2995 /* 'LSL' is the preferred form in this case. */
2996 kind = AARCH64_MOD_LSL;
2997 if (opnd->shifter.amount == 0)
2999 /* Shifter omitted. */
3000 snprintf (buf, size, "%s",
3001 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0));
3005 if (opnd->shifter.amount)
3006 snprintf (buf, size, "%s, %s #%" PRIi64,
3007 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0),
3008 aarch64_operand_modifiers[kind].name,
3009 opnd->shifter.amount);
3011 snprintf (buf, size, "%s, %s",
3012 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0),
3013 aarch64_operand_modifiers[kind].name);
3016 case AARCH64_OPND_Rm_SFT:
3017 assert (opnd->qualifier == AARCH64_OPND_QLF_W
3018 || opnd->qualifier == AARCH64_OPND_QLF_X);
3019 if (opnd->shifter.amount == 0 && opnd->shifter.kind == AARCH64_MOD_LSL)
3020 snprintf (buf, size, "%s",
3021 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0));
3023 snprintf (buf, size, "%s, %s #%" PRIi64,
3024 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0),
3025 aarch64_operand_modifiers[opnd->shifter.kind].name,
3026 opnd->shifter.amount);
3029 case AARCH64_OPND_Fd:
3030 case AARCH64_OPND_Fn:
3031 case AARCH64_OPND_Fm:
3032 case AARCH64_OPND_Fa:
3033 case AARCH64_OPND_Ft:
3034 case AARCH64_OPND_Ft2:
3035 case AARCH64_OPND_Sd:
3036 case AARCH64_OPND_Sn:
3037 case AARCH64_OPND_Sm:
3038 case AARCH64_OPND_SVE_VZn:
3039 case AARCH64_OPND_SVE_Vd:
3040 case AARCH64_OPND_SVE_Vm:
3041 case AARCH64_OPND_SVE_Vn:
3042 snprintf (buf, size, "%s%d", aarch64_get_qualifier_name (opnd->qualifier),
3046 case AARCH64_OPND_Vd:
3047 case AARCH64_OPND_Vn:
3048 case AARCH64_OPND_Vm:
3049 snprintf (buf, size, "v%d.%s", opnd->reg.regno,
3050 aarch64_get_qualifier_name (opnd->qualifier));
3053 case AARCH64_OPND_Ed:
3054 case AARCH64_OPND_En:
3055 case AARCH64_OPND_Em:
3056 snprintf (buf, size, "v%d.%s[%" PRIi64 "]", opnd->reglane.regno,
3057 aarch64_get_qualifier_name (opnd->qualifier),
3058 opnd->reglane.index);
3061 case AARCH64_OPND_VdD1:
3062 case AARCH64_OPND_VnD1:
3063 snprintf (buf, size, "v%d.d[1]", opnd->reg.regno);
3066 case AARCH64_OPND_LVn:
3067 case AARCH64_OPND_LVt:
3068 case AARCH64_OPND_LVt_AL:
3069 case AARCH64_OPND_LEt:
3070 print_register_list (buf, size, opnd, "v");
3073 case AARCH64_OPND_SVE_Pd:
3074 case AARCH64_OPND_SVE_Pg3:
3075 case AARCH64_OPND_SVE_Pg4_5:
3076 case AARCH64_OPND_SVE_Pg4_10:
3077 case AARCH64_OPND_SVE_Pg4_16:
3078 case AARCH64_OPND_SVE_Pm:
3079 case AARCH64_OPND_SVE_Pn:
3080 case AARCH64_OPND_SVE_Pt:
3081 if (opnd->qualifier == AARCH64_OPND_QLF_NIL)
3082 snprintf (buf, size, "p%d", opnd->reg.regno);
3083 else if (opnd->qualifier == AARCH64_OPND_QLF_P_Z
3084 || opnd->qualifier == AARCH64_OPND_QLF_P_M)
3085 snprintf (buf, size, "p%d/%s", opnd->reg.regno,
3086 aarch64_get_qualifier_name (opnd->qualifier));
3088 snprintf (buf, size, "p%d.%s", opnd->reg.regno,
3089 aarch64_get_qualifier_name (opnd->qualifier));
3092 case AARCH64_OPND_SVE_Za_5:
3093 case AARCH64_OPND_SVE_Za_16:
3094 case AARCH64_OPND_SVE_Zd:
3095 case AARCH64_OPND_SVE_Zm_5:
3096 case AARCH64_OPND_SVE_Zm_16:
3097 case AARCH64_OPND_SVE_Zn:
3098 case AARCH64_OPND_SVE_Zt:
3099 if (opnd->qualifier == AARCH64_OPND_QLF_NIL)
3100 snprintf (buf, size, "z%d", opnd->reg.regno);
3102 snprintf (buf, size, "z%d.%s", opnd->reg.regno,
3103 aarch64_get_qualifier_name (opnd->qualifier));
3106 case AARCH64_OPND_SVE_ZnxN:
3107 case AARCH64_OPND_SVE_ZtxN:
3108 print_register_list (buf, size, opnd, "z");
3111 case AARCH64_OPND_SVE_Zn_INDEX:
3112 snprintf (buf, size, "z%d.%s[%" PRIi64 "]", opnd->reglane.regno,
3113 aarch64_get_qualifier_name (opnd->qualifier),
3114 opnd->reglane.index);
3117 case AARCH64_OPND_Cn:
3118 case AARCH64_OPND_Cm:
3119 snprintf (buf, size, "C%d", opnd->reg.regno);
3122 case AARCH64_OPND_IDX:
3123 case AARCH64_OPND_IMM:
3124 case AARCH64_OPND_WIDTH:
3125 case AARCH64_OPND_UIMM3_OP1:
3126 case AARCH64_OPND_UIMM3_OP2:
3127 case AARCH64_OPND_BIT_NUM:
3128 case AARCH64_OPND_IMM_VLSL:
3129 case AARCH64_OPND_IMM_VLSR:
3130 case AARCH64_OPND_SHLL_IMM:
3131 case AARCH64_OPND_IMM0:
3132 case AARCH64_OPND_IMMR:
3133 case AARCH64_OPND_IMMS:
3134 case AARCH64_OPND_FBITS:
3135 case AARCH64_OPND_SIMM5:
3136 case AARCH64_OPND_SVE_SHLIMM_PRED:
3137 case AARCH64_OPND_SVE_SHLIMM_UNPRED:
3138 case AARCH64_OPND_SVE_SHRIMM_PRED:
3139 case AARCH64_OPND_SVE_SHRIMM_UNPRED:
3140 case AARCH64_OPND_SVE_SIMM5:
3141 case AARCH64_OPND_SVE_SIMM5B:
3142 case AARCH64_OPND_SVE_SIMM6:
3143 case AARCH64_OPND_SVE_SIMM8:
3144 case AARCH64_OPND_SVE_UIMM3:
3145 case AARCH64_OPND_SVE_UIMM7:
3146 case AARCH64_OPND_SVE_UIMM8:
3147 case AARCH64_OPND_SVE_UIMM8_53:
3148 snprintf (buf, size, "#%" PRIi64, opnd->imm.value);
3151 case AARCH64_OPND_SVE_I1_HALF_ONE:
3152 case AARCH64_OPND_SVE_I1_HALF_TWO:
3153 case AARCH64_OPND_SVE_I1_ZERO_ONE:
3156 c.i = opnd->imm.value;
3157 snprintf (buf, size, "#%.1f", c.f);
3161 case AARCH64_OPND_SVE_PATTERN:
3162 if (optional_operand_p (opcode, idx)
3163 && opnd->imm.value == get_optional_operand_default_value (opcode))
3165 enum_value = opnd->imm.value;
3166 assert (enum_value < ARRAY_SIZE (aarch64_sve_pattern_array));
3167 if (aarch64_sve_pattern_array[enum_value])
3168 snprintf (buf, size, "%s", aarch64_sve_pattern_array[enum_value]);
3170 snprintf (buf, size, "#%" PRIi64, opnd->imm.value);
3173 case AARCH64_OPND_SVE_PATTERN_SCALED:
3174 if (optional_operand_p (opcode, idx)
3175 && !opnd->shifter.operator_present
3176 && opnd->imm.value == get_optional_operand_default_value (opcode))
3178 enum_value = opnd->imm.value;
3179 assert (enum_value < ARRAY_SIZE (aarch64_sve_pattern_array));
3180 if (aarch64_sve_pattern_array[opnd->imm.value])
3181 snprintf (buf, size, "%s", aarch64_sve_pattern_array[opnd->imm.value]);
3183 snprintf (buf, size, "#%" PRIi64, opnd->imm.value);
3184 if (opnd->shifter.operator_present)
3186 size_t len = strlen (buf);
3187 snprintf (buf + len, size - len, ", %s #%" PRIi64,
3188 aarch64_operand_modifiers[opnd->shifter.kind].name,
3189 opnd->shifter.amount);
3193 case AARCH64_OPND_SVE_PRFOP:
3194 enum_value = opnd->imm.value;
3195 assert (enum_value < ARRAY_SIZE (aarch64_sve_prfop_array));
3196 if (aarch64_sve_prfop_array[enum_value])
3197 snprintf (buf, size, "%s", aarch64_sve_prfop_array[enum_value]);
3199 snprintf (buf, size, "#%" PRIi64, opnd->imm.value);
3202 case AARCH64_OPND_IMM_MOV:
3203 switch (aarch64_get_qualifier_esize (opnds[0].qualifier))
3205 case 4: /* e.g. MOV Wd, #<imm32>. */
3207 int imm32 = opnd->imm.value;
3208 snprintf (buf, size, "#0x%-20x\t// #%d", imm32, imm32);
3211 case 8: /* e.g. MOV Xd, #<imm64>. */
3212 snprintf (buf, size, "#0x%-20" PRIx64 "\t// #%" PRIi64,
3213 opnd->imm.value, opnd->imm.value);
3215 default: assert (0);
3219 case AARCH64_OPND_FPIMM0:
3220 snprintf (buf, size, "#0.0");
3223 case AARCH64_OPND_LIMM:
3224 case AARCH64_OPND_AIMM:
3225 case AARCH64_OPND_HALF:
3226 case AARCH64_OPND_SVE_INV_LIMM:
3227 case AARCH64_OPND_SVE_LIMM:
3228 case AARCH64_OPND_SVE_LIMM_MOV:
3229 if (opnd->shifter.amount)
3230 snprintf (buf, size, "#0x%" PRIx64 ", lsl #%" PRIi64, opnd->imm.value,
3231 opnd->shifter.amount);
3233 snprintf (buf, size, "#0x%" PRIx64, opnd->imm.value);
3236 case AARCH64_OPND_SIMD_IMM:
3237 case AARCH64_OPND_SIMD_IMM_SFT:
3238 if ((! opnd->shifter.amount && opnd->shifter.kind == AARCH64_MOD_LSL)
3239 || opnd->shifter.kind == AARCH64_MOD_NONE)
3240 snprintf (buf, size, "#0x%" PRIx64, opnd->imm.value);
3242 snprintf (buf, size, "#0x%" PRIx64 ", %s #%" PRIi64, opnd->imm.value,
3243 aarch64_operand_modifiers[opnd->shifter.kind].name,
3244 opnd->shifter.amount);
3247 case AARCH64_OPND_SVE_AIMM:
3248 case AARCH64_OPND_SVE_ASIMM:
3249 if (opnd->shifter.amount)
3250 snprintf (buf, size, "#%" PRIi64 ", lsl #%" PRIi64, opnd->imm.value,
3251 opnd->shifter.amount);
3253 snprintf (buf, size, "#%" PRIi64, opnd->imm.value);
3256 case AARCH64_OPND_FPIMM:
3257 case AARCH64_OPND_SIMD_FPIMM:
3258 case AARCH64_OPND_SVE_FPIMM8:
3259 switch (aarch64_get_qualifier_esize (opnds[0].qualifier))
3261 case 2: /* e.g. FMOV <Hd>, #<imm>. */
3264 c.i = expand_fp_imm (2, opnd->imm.value);
3265 snprintf (buf, size, "#%.18e", c.f);
3268 case 4: /* e.g. FMOV <Vd>.4S, #<imm>. */
3271 c.i = expand_fp_imm (4, opnd->imm.value);
3272 snprintf (buf, size, "#%.18e", c.f);
3275 case 8: /* e.g. FMOV <Sd>, #<imm>. */
3278 c.i = expand_fp_imm (8, opnd->imm.value);
3279 snprintf (buf, size, "#%.18e", c.d);
3282 default: assert (0);
3286 case AARCH64_OPND_CCMP_IMM:
3287 case AARCH64_OPND_NZCV:
3288 case AARCH64_OPND_EXCEPTION:
3289 case AARCH64_OPND_UIMM4:
3290 case AARCH64_OPND_UIMM7:
3291 if (optional_operand_p (opcode, idx) == TRUE
3292 && (opnd->imm.value ==
3293 (int64_t) get_optional_operand_default_value (opcode)))
3294 /* Omit the operand, e.g. DCPS1. */
3296 snprintf (buf, size, "#0x%x", (unsigned int)opnd->imm.value);
3299 case AARCH64_OPND_COND:
3300 case AARCH64_OPND_COND1:
3301 snprintf (buf, size, "%s", opnd->cond->names[0]);
3304 case AARCH64_OPND_ADDR_ADRP:
3305 addr = ((pc + AARCH64_PCREL_OFFSET) & ~(uint64_t)0xfff)
3311 /* This is not necessary during the disassembling, as print_address_func
3312 in the disassemble_info will take care of the printing. But some
3313 other callers may be still interested in getting the string in *STR,
3314 so here we do snprintf regardless. */
3315 snprintf (buf, size, "#0x%" PRIx64, addr);
3318 case AARCH64_OPND_ADDR_PCREL14:
3319 case AARCH64_OPND_ADDR_PCREL19:
3320 case AARCH64_OPND_ADDR_PCREL21:
3321 case AARCH64_OPND_ADDR_PCREL26:
3322 addr = pc + AARCH64_PCREL_OFFSET + opnd->imm.value;
3327 /* This is not necessary during the disassembling, as print_address_func
3328 in the disassemble_info will take care of the printing. But some
3329 other callers may be still interested in getting the string in *STR,
3330 so here we do snprintf regardless. */
3331 snprintf (buf, size, "#0x%" PRIx64, addr);
3334 case AARCH64_OPND_ADDR_SIMPLE:
3335 case AARCH64_OPND_SIMD_ADDR_SIMPLE:
3336 case AARCH64_OPND_SIMD_ADDR_POST:
3337 name = get_64bit_int_reg_name (opnd->addr.base_regno, 1);
3338 if (opnd->type == AARCH64_OPND_SIMD_ADDR_POST)
3340 if (opnd->addr.offset.is_reg)
3341 snprintf (buf, size, "[%s], x%d", name, opnd->addr.offset.regno);
3343 snprintf (buf, size, "[%s], #%d", name, opnd->addr.offset.imm);
3346 snprintf (buf, size, "[%s]", name);
3349 case AARCH64_OPND_ADDR_REGOFF:
3350 case AARCH64_OPND_SVE_ADDR_RR:
3351 case AARCH64_OPND_SVE_ADDR_RR_LSL1:
3352 case AARCH64_OPND_SVE_ADDR_RR_LSL2:
3353 case AARCH64_OPND_SVE_ADDR_RR_LSL3:
3354 case AARCH64_OPND_SVE_ADDR_RX:
3355 case AARCH64_OPND_SVE_ADDR_RX_LSL1:
3356 case AARCH64_OPND_SVE_ADDR_RX_LSL2:
3357 case AARCH64_OPND_SVE_ADDR_RX_LSL3:
3358 print_register_offset_address
3359 (buf, size, opnd, get_64bit_int_reg_name (opnd->addr.base_regno, 1),
3360 get_offset_int_reg_name (opnd));
3363 case AARCH64_OPND_SVE_ADDR_RZ:
3364 case AARCH64_OPND_SVE_ADDR_RZ_LSL1:
3365 case AARCH64_OPND_SVE_ADDR_RZ_LSL2:
3366 case AARCH64_OPND_SVE_ADDR_RZ_LSL3:
3367 case AARCH64_OPND_SVE_ADDR_RZ_XTW_14:
3368 case AARCH64_OPND_SVE_ADDR_RZ_XTW_22:
3369 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_14:
3370 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_22:
3371 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_14:
3372 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_22:
3373 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_14:
3374 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_22:
3375 print_register_offset_address
3376 (buf, size, opnd, get_64bit_int_reg_name (opnd->addr.base_regno, 1),
3377 get_addr_sve_reg_name (opnd->addr.offset.regno, opnd->qualifier));
3380 case AARCH64_OPND_ADDR_SIMM7:
3381 case AARCH64_OPND_ADDR_SIMM9:
3382 case AARCH64_OPND_ADDR_SIMM9_2:
3383 case AARCH64_OPND_SVE_ADDR_RI_S4xVL:
3384 case AARCH64_OPND_SVE_ADDR_RI_S4x2xVL:
3385 case AARCH64_OPND_SVE_ADDR_RI_S4x3xVL:
3386 case AARCH64_OPND_SVE_ADDR_RI_S4x4xVL:
3387 case AARCH64_OPND_SVE_ADDR_RI_S6xVL:
3388 case AARCH64_OPND_SVE_ADDR_RI_S9xVL:
3389 case AARCH64_OPND_SVE_ADDR_RI_U6:
3390 case AARCH64_OPND_SVE_ADDR_RI_U6x2:
3391 case AARCH64_OPND_SVE_ADDR_RI_U6x4:
3392 case AARCH64_OPND_SVE_ADDR_RI_U6x8:
3393 print_immediate_offset_address
3394 (buf, size, opnd, get_64bit_int_reg_name (opnd->addr.base_regno, 1));
3397 case AARCH64_OPND_SVE_ADDR_ZI_U5:
3398 case AARCH64_OPND_SVE_ADDR_ZI_U5x2:
3399 case AARCH64_OPND_SVE_ADDR_ZI_U5x4:
3400 case AARCH64_OPND_SVE_ADDR_ZI_U5x8:
3401 print_immediate_offset_address
3403 get_addr_sve_reg_name (opnd->addr.base_regno, opnd->qualifier));
3406 case AARCH64_OPND_SVE_ADDR_ZZ_LSL:
3407 case AARCH64_OPND_SVE_ADDR_ZZ_SXTW:
3408 case AARCH64_OPND_SVE_ADDR_ZZ_UXTW:
3409 print_register_offset_address
3411 get_addr_sve_reg_name (opnd->addr.base_regno, opnd->qualifier),
3412 get_addr_sve_reg_name (opnd->addr.offset.regno, opnd->qualifier));
3415 case AARCH64_OPND_ADDR_UIMM12:
3416 name = get_64bit_int_reg_name (opnd->addr.base_regno, 1);
3417 if (opnd->addr.offset.imm)
3418 snprintf (buf, size, "[%s,#%d]", name, opnd->addr.offset.imm);
3420 snprintf (buf, size, "[%s]", name);
3423 case AARCH64_OPND_SYSREG:
3424 for (i = 0; aarch64_sys_regs[i].name; ++i)
3425 if (aarch64_sys_regs[i].value == opnd->sysreg
3426 && ! aarch64_sys_reg_deprecated_p (&aarch64_sys_regs[i]))
3428 if (aarch64_sys_regs[i].name)
3429 snprintf (buf, size, "%s", aarch64_sys_regs[i].name);
3432 /* Implementation defined system register. */
3433 unsigned int value = opnd->sysreg;
3434 snprintf (buf, size, "s%u_%u_c%u_c%u_%u", (value >> 14) & 0x3,
3435 (value >> 11) & 0x7, (value >> 7) & 0xf, (value >> 3) & 0xf,
3440 case AARCH64_OPND_PSTATEFIELD:
3441 for (i = 0; aarch64_pstatefields[i].name; ++i)
3442 if (aarch64_pstatefields[i].value == opnd->pstatefield)
3444 assert (aarch64_pstatefields[i].name);
3445 snprintf (buf, size, "%s", aarch64_pstatefields[i].name);
3448 case AARCH64_OPND_SYSREG_AT:
3449 case AARCH64_OPND_SYSREG_DC:
3450 case AARCH64_OPND_SYSREG_IC:
3451 case AARCH64_OPND_SYSREG_TLBI:
3452 snprintf (buf, size, "%s", opnd->sysins_op->name);
3455 case AARCH64_OPND_BARRIER:
3456 snprintf (buf, size, "%s", opnd->barrier->name);
3459 case AARCH64_OPND_BARRIER_ISB:
3460 /* Operand can be omitted, e.g. in DCPS1. */
3461 if (! optional_operand_p (opcode, idx)
3462 || (opnd->barrier->value
3463 != get_optional_operand_default_value (opcode)))
3464 snprintf (buf, size, "#0x%x", opnd->barrier->value);
3467 case AARCH64_OPND_PRFOP:
3468 if (opnd->prfop->name != NULL)
3469 snprintf (buf, size, "%s", opnd->prfop->name);
3471 snprintf (buf, size, "#0x%02x", opnd->prfop->value);
3474 case AARCH64_OPND_BARRIER_PSB:
3475 snprintf (buf, size, "%s", opnd->hint_option->name);
3483 #define CPENC(op0,op1,crn,crm,op2) \
3484 ((((op0) << 19) | ((op1) << 16) | ((crn) << 12) | ((crm) << 8) | ((op2) << 5)) >> 5)
3485 /* for 3.9.3 Instructions for Accessing Special Purpose Registers */
3486 #define CPEN_(op1,crm,op2) CPENC(3,(op1),4,(crm),(op2))
3487 /* for 3.9.10 System Instructions */
3488 #define CPENS(op1,crn,crm,op2) CPENC(1,(op1),(crn),(crm),(op2))
3510 #define F_DEPRECATED 0x1 /* Deprecated system register. */
3515 #define F_ARCHEXT 0x2 /* Architecture dependent system register. */
3520 #define F_HASXT 0x4 /* System instruction register <Xt>
3524 /* TODO there are two more issues need to be resolved
3525 1. handle read-only and write-only system registers
3526 2. handle cpu-implementation-defined system registers. */
3527 const aarch64_sys_reg aarch64_sys_regs [] =
3529 { "spsr_el1", CPEN_(0,C0,0), 0 }, /* = spsr_svc */
3530 { "spsr_el12", CPEN_ (5, C0, 0), F_ARCHEXT },
3531 { "elr_el1", CPEN_(0,C0,1), 0 },
3532 { "elr_el12", CPEN_ (5, C0, 1), F_ARCHEXT },
3533 { "sp_el0", CPEN_(0,C1,0), 0 },
3534 { "spsel", CPEN_(0,C2,0), 0 },
3535 { "daif", CPEN_(3,C2,1), 0 },
3536 { "currentel", CPEN_(0,C2,2), 0 }, /* RO */
3537 { "pan", CPEN_(0,C2,3), F_ARCHEXT },
3538 { "uao", CPEN_ (0, C2, 4), F_ARCHEXT },
3539 { "nzcv", CPEN_(3,C2,0), 0 },
3540 { "fpcr", CPEN_(3,C4,0), 0 },
3541 { "fpsr", CPEN_(3,C4,1), 0 },
3542 { "dspsr_el0", CPEN_(3,C5,0), 0 },
3543 { "dlr_el0", CPEN_(3,C5,1), 0 },
3544 { "spsr_el2", CPEN_(4,C0,0), 0 }, /* = spsr_hyp */
3545 { "elr_el2", CPEN_(4,C0,1), 0 },
3546 { "sp_el1", CPEN_(4,C1,0), 0 },
3547 { "spsr_irq", CPEN_(4,C3,0), 0 },
3548 { "spsr_abt", CPEN_(4,C3,1), 0 },
3549 { "spsr_und", CPEN_(4,C3,2), 0 },
3550 { "spsr_fiq", CPEN_(4,C3,3), 0 },
3551 { "spsr_el3", CPEN_(6,C0,0), 0 },
3552 { "elr_el3", CPEN_(6,C0,1), 0 },
3553 { "sp_el2", CPEN_(6,C1,0), 0 },
3554 { "spsr_svc", CPEN_(0,C0,0), F_DEPRECATED }, /* = spsr_el1 */
3555 { "spsr_hyp", CPEN_(4,C0,0), F_DEPRECATED }, /* = spsr_el2 */
3556 { "midr_el1", CPENC(3,0,C0,C0,0), 0 }, /* RO */
3557 { "ctr_el0", CPENC(3,3,C0,C0,1), 0 }, /* RO */
3558 { "mpidr_el1", CPENC(3,0,C0,C0,5), 0 }, /* RO */
3559 { "revidr_el1", CPENC(3,0,C0,C0,6), 0 }, /* RO */
3560 { "aidr_el1", CPENC(3,1,C0,C0,7), 0 }, /* RO */
3561 { "dczid_el0", CPENC(3,3,C0,C0,7), 0 }, /* RO */
3562 { "id_dfr0_el1", CPENC(3,0,C0,C1,2), 0 }, /* RO */
3563 { "id_pfr0_el1", CPENC(3,0,C0,C1,0), 0 }, /* RO */
3564 { "id_pfr1_el1", CPENC(3,0,C0,C1,1), 0 }, /* RO */
3565 { "id_afr0_el1", CPENC(3,0,C0,C1,3), 0 }, /* RO */
3566 { "id_mmfr0_el1", CPENC(3,0,C0,C1,4), 0 }, /* RO */
3567 { "id_mmfr1_el1", CPENC(3,0,C0,C1,5), 0 }, /* RO */
3568 { "id_mmfr2_el1", CPENC(3,0,C0,C1,6), 0 }, /* RO */
3569 { "id_mmfr3_el1", CPENC(3,0,C0,C1,7), 0 }, /* RO */
3570 { "id_mmfr4_el1", CPENC(3,0,C0,C2,6), 0 }, /* RO */
3571 { "id_isar0_el1", CPENC(3,0,C0,C2,0), 0 }, /* RO */
3572 { "id_isar1_el1", CPENC(3,0,C0,C2,1), 0 }, /* RO */
3573 { "id_isar2_el1", CPENC(3,0,C0,C2,2), 0 }, /* RO */
3574 { "id_isar3_el1", CPENC(3,0,C0,C2,3), 0 }, /* RO */
3575 { "id_isar4_el1", CPENC(3,0,C0,C2,4), 0 }, /* RO */
3576 { "id_isar5_el1", CPENC(3,0,C0,C2,5), 0 }, /* RO */
3577 { "mvfr0_el1", CPENC(3,0,C0,C3,0), 0 }, /* RO */
3578 { "mvfr1_el1", CPENC(3,0,C0,C3,1), 0 }, /* RO */
3579 { "mvfr2_el1", CPENC(3,0,C0,C3,2), 0 }, /* RO */
3580 { "ccsidr_el1", CPENC(3,1,C0,C0,0), 0 }, /* RO */
3581 { "id_aa64pfr0_el1", CPENC(3,0,C0,C4,0), 0 }, /* RO */
3582 { "id_aa64pfr1_el1", CPENC(3,0,C0,C4,1), 0 }, /* RO */
3583 { "id_aa64dfr0_el1", CPENC(3,0,C0,C5,0), 0 }, /* RO */
3584 { "id_aa64dfr1_el1", CPENC(3,0,C0,C5,1), 0 }, /* RO */
3585 { "id_aa64isar0_el1", CPENC(3,0,C0,C6,0), 0 }, /* RO */
3586 { "id_aa64isar1_el1", CPENC(3,0,C0,C6,1), 0 }, /* RO */
3587 { "id_aa64mmfr0_el1", CPENC(3,0,C0,C7,0), 0 }, /* RO */
3588 { "id_aa64mmfr1_el1", CPENC(3,0,C0,C7,1), 0 }, /* RO */
3589 { "id_aa64mmfr2_el1", CPENC (3, 0, C0, C7, 2), F_ARCHEXT }, /* RO */
3590 { "id_aa64afr0_el1", CPENC(3,0,C0,C5,4), 0 }, /* RO */
3591 { "id_aa64afr1_el1", CPENC(3,0,C0,C5,5), 0 }, /* RO */
3592 { "clidr_el1", CPENC(3,1,C0,C0,1), 0 }, /* RO */
3593 { "csselr_el1", CPENC(3,2,C0,C0,0), 0 }, /* RO */
3594 { "vpidr_el2", CPENC(3,4,C0,C0,0), 0 },
3595 { "vmpidr_el2", CPENC(3,4,C0,C0,5), 0 },
3596 { "sctlr_el1", CPENC(3,0,C1,C0,0), 0 },
3597 { "sctlr_el2", CPENC(3,4,C1,C0,0), 0 },
3598 { "sctlr_el3", CPENC(3,6,C1,C0,0), 0 },
3599 { "sctlr_el12", CPENC (3, 5, C1, C0, 0), F_ARCHEXT },
3600 { "actlr_el1", CPENC(3,0,C1,C0,1), 0 },
3601 { "actlr_el2", CPENC(3,4,C1,C0,1), 0 },
3602 { "actlr_el3", CPENC(3,6,C1,C0,1), 0 },
3603 { "cpacr_el1", CPENC(3,0,C1,C0,2), 0 },
3604 { "cpacr_el12", CPENC (3, 5, C1, C0, 2), F_ARCHEXT },
3605 { "cptr_el2", CPENC(3,4,C1,C1,2), 0 },
3606 { "cptr_el3", CPENC(3,6,C1,C1,2), 0 },
3607 { "scr_el3", CPENC(3,6,C1,C1,0), 0 },
3608 { "hcr_el2", CPENC(3,4,C1,C1,0), 0 },
3609 { "mdcr_el2", CPENC(3,4,C1,C1,1), 0 },
3610 { "mdcr_el3", CPENC(3,6,C1,C3,1), 0 },
3611 { "hstr_el2", CPENC(3,4,C1,C1,3), 0 },
3612 { "hacr_el2", CPENC(3,4,C1,C1,7), 0 },
3613 { "ttbr0_el1", CPENC(3,0,C2,C0,0), 0 },
3614 { "ttbr1_el1", CPENC(3,0,C2,C0,1), 0 },
3615 { "ttbr0_el2", CPENC(3,4,C2,C0,0), 0 },
3616 { "ttbr1_el2", CPENC (3, 4, C2, C0, 1), F_ARCHEXT },
3617 { "ttbr0_el3", CPENC(3,6,C2,C0,0), 0 },
3618 { "ttbr0_el12", CPENC (3, 5, C2, C0, 0), F_ARCHEXT },
3619 { "ttbr1_el12", CPENC (3, 5, C2, C0, 1), F_ARCHEXT },
3620 { "vttbr_el2", CPENC(3,4,C2,C1,0), 0 },
3621 { "tcr_el1", CPENC(3,0,C2,C0,2), 0 },
3622 { "tcr_el2", CPENC(3,4,C2,C0,2), 0 },
3623 { "tcr_el3", CPENC(3,6,C2,C0,2), 0 },
3624 { "tcr_el12", CPENC (3, 5, C2, C0, 2), F_ARCHEXT },
3625 { "vtcr_el2", CPENC(3,4,C2,C1,2), 0 },
3626 { "afsr0_el1", CPENC(3,0,C5,C1,0), 0 },
3627 { "afsr1_el1", CPENC(3,0,C5,C1,1), 0 },
3628 { "afsr0_el2", CPENC(3,4,C5,C1,0), 0 },
3629 { "afsr1_el2", CPENC(3,4,C5,C1,1), 0 },
3630 { "afsr0_el3", CPENC(3,6,C5,C1,0), 0 },
3631 { "afsr0_el12", CPENC (3, 5, C5, C1, 0), F_ARCHEXT },
3632 { "afsr1_el3", CPENC(3,6,C5,C1,1), 0 },
3633 { "afsr1_el12", CPENC (3, 5, C5, C1, 1), F_ARCHEXT },
3634 { "esr_el1", CPENC(3,0,C5,C2,0), 0 },
3635 { "esr_el2", CPENC(3,4,C5,C2,0), 0 },
3636 { "esr_el3", CPENC(3,6,C5,C2,0), 0 },
3637 { "esr_el12", CPENC (3, 5, C5, C2, 0), F_ARCHEXT },
3638 { "vsesr_el2", CPENC (3, 4, C5, C2, 3), F_ARCHEXT }, /* RO */
3639 { "fpexc32_el2", CPENC(3,4,C5,C3,0), 0 },
3640 { "erridr_el1", CPENC (3, 0, C5, C3, 0), F_ARCHEXT }, /* RO */
3641 { "errselr_el1", CPENC (3, 0, C5, C3, 1), F_ARCHEXT },
3642 { "erxfr_el1", CPENC (3, 0, C5, C4, 0), F_ARCHEXT }, /* RO */
3643 { "erxctlr_el1", CPENC (3, 0, C5, C4, 1), F_ARCHEXT },
3644 { "erxstatus_el1", CPENC (3, 0, C5, C4, 2), F_ARCHEXT },
3645 { "erxaddr_el1", CPENC (3, 0, C5, C4, 3), F_ARCHEXT },
3646 { "erxmisc0_el1", CPENC (3, 0, C5, C5, 0), F_ARCHEXT },
3647 { "erxmisc1_el1", CPENC (3, 0, C5, C5, 1), F_ARCHEXT },
3648 { "far_el1", CPENC(3,0,C6,C0,0), 0 },
3649 { "far_el2", CPENC(3,4,C6,C0,0), 0 },
3650 { "far_el3", CPENC(3,6,C6,C0,0), 0 },
3651 { "far_el12", CPENC (3, 5, C6, C0, 0), F_ARCHEXT },
3652 { "hpfar_el2", CPENC(3,4,C6,C0,4), 0 },
3653 { "par_el1", CPENC(3,0,C7,C4,0), 0 },
3654 { "mair_el1", CPENC(3,0,C10,C2,0), 0 },
3655 { "mair_el2", CPENC(3,4,C10,C2,0), 0 },
3656 { "mair_el3", CPENC(3,6,C10,C2,0), 0 },
3657 { "mair_el12", CPENC (3, 5, C10, C2, 0), F_ARCHEXT },
3658 { "amair_el1", CPENC(3,0,C10,C3,0), 0 },
3659 { "amair_el2", CPENC(3,4,C10,C3,0), 0 },
3660 { "amair_el3", CPENC(3,6,C10,C3,0), 0 },
3661 { "amair_el12", CPENC (3, 5, C10, C3, 0), F_ARCHEXT },
3662 { "vbar_el1", CPENC(3,0,C12,C0,0), 0 },
3663 { "vbar_el2", CPENC(3,4,C12,C0,0), 0 },
3664 { "vbar_el3", CPENC(3,6,C12,C0,0), 0 },
3665 { "vbar_el12", CPENC (3, 5, C12, C0, 0), F_ARCHEXT },
3666 { "rvbar_el1", CPENC(3,0,C12,C0,1), 0 }, /* RO */
3667 { "rvbar_el2", CPENC(3,4,C12,C0,1), 0 }, /* RO */
3668 { "rvbar_el3", CPENC(3,6,C12,C0,1), 0 }, /* RO */
3669 { "rmr_el1", CPENC(3,0,C12,C0,2), 0 },
3670 { "rmr_el2", CPENC(3,4,C12,C0,2), 0 },
3671 { "rmr_el3", CPENC(3,6,C12,C0,2), 0 },
3672 { "isr_el1", CPENC(3,0,C12,C1,0), 0 }, /* RO */
3673 { "disr_el1", CPENC (3, 0, C12, C1, 1), F_ARCHEXT },
3674 { "vdisr_el2", CPENC (3, 4, C12, C1, 1), F_ARCHEXT },
3675 { "contextidr_el1", CPENC(3,0,C13,C0,1), 0 },
3676 { "contextidr_el2", CPENC (3, 4, C13, C0, 1), F_ARCHEXT },
3677 { "contextidr_el12", CPENC (3, 5, C13, C0, 1), F_ARCHEXT },
3678 { "tpidr_el0", CPENC(3,3,C13,C0,2), 0 },
3679 { "tpidrro_el0", CPENC(3,3,C13,C0,3), 0 }, /* RO */
3680 { "tpidr_el1", CPENC(3,0,C13,C0,4), 0 },
3681 { "tpidr_el2", CPENC(3,4,C13,C0,2), 0 },
3682 { "tpidr_el3", CPENC(3,6,C13,C0,2), 0 },
3683 { "teecr32_el1", CPENC(2,2,C0, C0,0), 0 }, /* See section 3.9.7.1 */
3684 { "cntfrq_el0", CPENC(3,3,C14,C0,0), 0 }, /* RO */
3685 { "cntpct_el0", CPENC(3,3,C14,C0,1), 0 }, /* RO */
3686 { "cntvct_el0", CPENC(3,3,C14,C0,2), 0 }, /* RO */
3687 { "cntvoff_el2", CPENC(3,4,C14,C0,3), 0 },
3688 { "cntkctl_el1", CPENC(3,0,C14,C1,0), 0 },
3689 { "cntkctl_el12", CPENC (3, 5, C14, C1, 0), F_ARCHEXT },
3690 { "cnthctl_el2", CPENC(3,4,C14,C1,0), 0 },
3691 { "cntp_tval_el0", CPENC(3,3,C14,C2,0), 0 },
3692 { "cntp_tval_el02", CPENC (3, 5, C14, C2, 0), F_ARCHEXT },
3693 { "cntp_ctl_el0", CPENC(3,3,C14,C2,1), 0 },
3694 { "cntp_ctl_el02", CPENC (3, 5, C14, C2, 1), F_ARCHEXT },
3695 { "cntp_cval_el0", CPENC(3,3,C14,C2,2), 0 },
3696 { "cntp_cval_el02", CPENC (3, 5, C14, C2, 2), F_ARCHEXT },
3697 { "cntv_tval_el0", CPENC(3,3,C14,C3,0), 0 },
3698 { "cntv_tval_el02", CPENC (3, 5, C14, C3, 0), F_ARCHEXT },
3699 { "cntv_ctl_el0", CPENC(3,3,C14,C3,1), 0 },
3700 { "cntv_ctl_el02", CPENC (3, 5, C14, C3, 1), F_ARCHEXT },
3701 { "cntv_cval_el0", CPENC(3,3,C14,C3,2), 0 },
3702 { "cntv_cval_el02", CPENC (3, 5, C14, C3, 2), F_ARCHEXT },
3703 { "cnthp_tval_el2", CPENC(3,4,C14,C2,0), 0 },
3704 { "cnthp_ctl_el2", CPENC(3,4,C14,C2,1), 0 },
3705 { "cnthp_cval_el2", CPENC(3,4,C14,C2,2), 0 },
3706 { "cntps_tval_el1", CPENC(3,7,C14,C2,0), 0 },
3707 { "cntps_ctl_el1", CPENC(3,7,C14,C2,1), 0 },
3708 { "cntps_cval_el1", CPENC(3,7,C14,C2,2), 0 },
3709 { "cnthv_tval_el2", CPENC (3, 4, C14, C3, 0), F_ARCHEXT },
3710 { "cnthv_ctl_el2", CPENC (3, 4, C14, C3, 1), F_ARCHEXT },
3711 { "cnthv_cval_el2", CPENC (3, 4, C14, C3, 2), F_ARCHEXT },
3712 { "dacr32_el2", CPENC(3,4,C3,C0,0), 0 },
3713 { "ifsr32_el2", CPENC(3,4,C5,C0,1), 0 },
3714 { "teehbr32_el1", CPENC(2,2,C1,C0,0), 0 },
3715 { "sder32_el3", CPENC(3,6,C1,C1,1), 0 },
3716 { "mdscr_el1", CPENC(2,0,C0, C2, 2), 0 },
3717 { "mdccsr_el0", CPENC(2,3,C0, C1, 0), 0 }, /* r */
3718 { "mdccint_el1", CPENC(2,0,C0, C2, 0), 0 },
3719 { "dbgdtr_el0", CPENC(2,3,C0, C4, 0), 0 },
3720 { "dbgdtrrx_el0", CPENC(2,3,C0, C5, 0), 0 }, /* r */
3721 { "dbgdtrtx_el0", CPENC(2,3,C0, C5, 0), 0 }, /* w */
3722 { "osdtrrx_el1", CPENC(2,0,C0, C0, 2), 0 }, /* r */
3723 { "osdtrtx_el1", CPENC(2,0,C0, C3, 2), 0 }, /* w */
3724 { "oseccr_el1", CPENC(2,0,C0, C6, 2), 0 },
3725 { "dbgvcr32_el2", CPENC(2,4,C0, C7, 0), 0 },
3726 { "dbgbvr0_el1", CPENC(2,0,C0, C0, 4), 0 },
3727 { "dbgbvr1_el1", CPENC(2,0,C0, C1, 4), 0 },
3728 { "dbgbvr2_el1", CPENC(2,0,C0, C2, 4), 0 },
3729 { "dbgbvr3_el1", CPENC(2,0,C0, C3, 4), 0 },
3730 { "dbgbvr4_el1", CPENC(2,0,C0, C4, 4), 0 },
3731 { "dbgbvr5_el1", CPENC(2,0,C0, C5, 4), 0 },
3732 { "dbgbvr6_el1", CPENC(2,0,C0, C6, 4), 0 },
3733 { "dbgbvr7_el1", CPENC(2,0,C0, C7, 4), 0 },
3734 { "dbgbvr8_el1", CPENC(2,0,C0, C8, 4), 0 },
3735 { "dbgbvr9_el1", CPENC(2,0,C0, C9, 4), 0 },
3736 { "dbgbvr10_el1", CPENC(2,0,C0, C10,4), 0 },
3737 { "dbgbvr11_el1", CPENC(2,0,C0, C11,4), 0 },
3738 { "dbgbvr12_el1", CPENC(2,0,C0, C12,4), 0 },
3739 { "dbgbvr13_el1", CPENC(2,0,C0, C13,4), 0 },
3740 { "dbgbvr14_el1", CPENC(2,0,C0, C14,4), 0 },
3741 { "dbgbvr15_el1", CPENC(2,0,C0, C15,4), 0 },
3742 { "dbgbcr0_el1", CPENC(2,0,C0, C0, 5), 0 },
3743 { "dbgbcr1_el1", CPENC(2,0,C0, C1, 5), 0 },
3744 { "dbgbcr2_el1", CPENC(2,0,C0, C2, 5), 0 },
3745 { "dbgbcr3_el1", CPENC(2,0,C0, C3, 5), 0 },
3746 { "dbgbcr4_el1", CPENC(2,0,C0, C4, 5), 0 },
3747 { "dbgbcr5_el1", CPENC(2,0,C0, C5, 5), 0 },
3748 { "dbgbcr6_el1", CPENC(2,0,C0, C6, 5), 0 },
3749 { "dbgbcr7_el1", CPENC(2,0,C0, C7, 5), 0 },
3750 { "dbgbcr8_el1", CPENC(2,0,C0, C8, 5), 0 },
3751 { "dbgbcr9_el1", CPENC(2,0,C0, C9, 5), 0 },
3752 { "dbgbcr10_el1", CPENC(2,0,C0, C10,5), 0 },
3753 { "dbgbcr11_el1", CPENC(2,0,C0, C11,5), 0 },
3754 { "dbgbcr12_el1", CPENC(2,0,C0, C12,5), 0 },
3755 { "dbgbcr13_el1", CPENC(2,0,C0, C13,5), 0 },
3756 { "dbgbcr14_el1", CPENC(2,0,C0, C14,5), 0 },
3757 { "dbgbcr15_el1", CPENC(2,0,C0, C15,5), 0 },
3758 { "dbgwvr0_el1", CPENC(2,0,C0, C0, 6), 0 },
3759 { "dbgwvr1_el1", CPENC(2,0,C0, C1, 6), 0 },
3760 { "dbgwvr2_el1", CPENC(2,0,C0, C2, 6), 0 },
3761 { "dbgwvr3_el1", CPENC(2,0,C0, C3, 6), 0 },
3762 { "dbgwvr4_el1", CPENC(2,0,C0, C4, 6), 0 },
3763 { "dbgwvr5_el1", CPENC(2,0,C0, C5, 6), 0 },
3764 { "dbgwvr6_el1", CPENC(2,0,C0, C6, 6), 0 },
3765 { "dbgwvr7_el1", CPENC(2,0,C0, C7, 6), 0 },
3766 { "dbgwvr8_el1", CPENC(2,0,C0, C8, 6), 0 },
3767 { "dbgwvr9_el1", CPENC(2,0,C0, C9, 6), 0 },
3768 { "dbgwvr10_el1", CPENC(2,0,C0, C10,6), 0 },
3769 { "dbgwvr11_el1", CPENC(2,0,C0, C11,6), 0 },
3770 { "dbgwvr12_el1", CPENC(2,0,C0, C12,6), 0 },
3771 { "dbgwvr13_el1", CPENC(2,0,C0, C13,6), 0 },
3772 { "dbgwvr14_el1", CPENC(2,0,C0, C14,6), 0 },
3773 { "dbgwvr15_el1", CPENC(2,0,C0, C15,6), 0 },
3774 { "dbgwcr0_el1", CPENC(2,0,C0, C0, 7), 0 },
3775 { "dbgwcr1_el1", CPENC(2,0,C0, C1, 7), 0 },
3776 { "dbgwcr2_el1", CPENC(2,0,C0, C2, 7), 0 },
3777 { "dbgwcr3_el1", CPENC(2,0,C0, C3, 7), 0 },
3778 { "dbgwcr4_el1", CPENC(2,0,C0, C4, 7), 0 },
3779 { "dbgwcr5_el1", CPENC(2,0,C0, C5, 7), 0 },
3780 { "dbgwcr6_el1", CPENC(2,0,C0, C6, 7), 0 },
3781 { "dbgwcr7_el1", CPENC(2,0,C0, C7, 7), 0 },
3782 { "dbgwcr8_el1", CPENC(2,0,C0, C8, 7), 0 },
3783 { "dbgwcr9_el1", CPENC(2,0,C0, C9, 7), 0 },
3784 { "dbgwcr10_el1", CPENC(2,0,C0, C10,7), 0 },
3785 { "dbgwcr11_el1", CPENC(2,0,C0, C11,7), 0 },
3786 { "dbgwcr12_el1", CPENC(2,0,C0, C12,7), 0 },
3787 { "dbgwcr13_el1", CPENC(2,0,C0, C13,7), 0 },
3788 { "dbgwcr14_el1", CPENC(2,0,C0, C14,7), 0 },
3789 { "dbgwcr15_el1", CPENC(2,0,C0, C15,7), 0 },
3790 { "mdrar_el1", CPENC(2,0,C1, C0, 0), 0 }, /* r */
3791 { "oslar_el1", CPENC(2,0,C1, C0, 4), 0 }, /* w */
3792 { "oslsr_el1", CPENC(2,0,C1, C1, 4), 0 }, /* r */
3793 { "osdlr_el1", CPENC(2,0,C1, C3, 4), 0 },
3794 { "dbgprcr_el1", CPENC(2,0,C1, C4, 4), 0 },
3795 { "dbgclaimset_el1", CPENC(2,0,C7, C8, 6), 0 },
3796 { "dbgclaimclr_el1", CPENC(2,0,C7, C9, 6), 0 },
3797 { "dbgauthstatus_el1", CPENC(2,0,C7, C14,6), 0 }, /* r */
3798 { "pmblimitr_el1", CPENC (3, 0, C9, C10, 0), F_ARCHEXT }, /* rw */
3799 { "pmbptr_el1", CPENC (3, 0, C9, C10, 1), F_ARCHEXT }, /* rw */
3800 { "pmbsr_el1", CPENC (3, 0, C9, C10, 3), F_ARCHEXT }, /* rw */
3801 { "pmbidr_el1", CPENC (3, 0, C9, C10, 7), F_ARCHEXT }, /* ro */
3802 { "pmscr_el1", CPENC (3, 0, C9, C9, 0), F_ARCHEXT }, /* rw */
3803 { "pmsicr_el1", CPENC (3, 0, C9, C9, 2), F_ARCHEXT }, /* rw */
3804 { "pmsirr_el1", CPENC (3, 0, C9, C9, 3), F_ARCHEXT }, /* rw */
3805 { "pmsfcr_el1", CPENC (3, 0, C9, C9, 4), F_ARCHEXT }, /* rw */
3806 { "pmsevfr_el1", CPENC (3, 0, C9, C9, 5), F_ARCHEXT }, /* rw */
3807 { "pmslatfr_el1", CPENC (3, 0, C9, C9, 6), F_ARCHEXT }, /* rw */
3808 { "pmsidr_el1", CPENC (3, 0, C9, C9, 7), F_ARCHEXT }, /* ro */
3809 { "pmscr_el2", CPENC (3, 4, C9, C9, 0), F_ARCHEXT }, /* rw */
3810 { "pmscr_el12", CPENC (3, 5, C9, C9, 0), F_ARCHEXT }, /* rw */
3811 { "pmcr_el0", CPENC(3,3,C9,C12, 0), 0 },
3812 { "pmcntenset_el0", CPENC(3,3,C9,C12, 1), 0 },
3813 { "pmcntenclr_el0", CPENC(3,3,C9,C12, 2), 0 },
3814 { "pmovsclr_el0", CPENC(3,3,C9,C12, 3), 0 },
3815 { "pmswinc_el0", CPENC(3,3,C9,C12, 4), 0 }, /* w */
3816 { "pmselr_el0", CPENC(3,3,C9,C12, 5), 0 },
3817 { "pmceid0_el0", CPENC(3,3,C9,C12, 6), 0 }, /* r */
3818 { "pmceid1_el0", CPENC(3,3,C9,C12, 7), 0 }, /* r */
3819 { "pmccntr_el0", CPENC(3,3,C9,C13, 0), 0 },
3820 { "pmxevtyper_el0", CPENC(3,3,C9,C13, 1), 0 },
3821 { "pmxevcntr_el0", CPENC(3,3,C9,C13, 2), 0 },
3822 { "pmuserenr_el0", CPENC(3,3,C9,C14, 0), 0 },
3823 { "pmintenset_el1", CPENC(3,0,C9,C14, 1), 0 },
3824 { "pmintenclr_el1", CPENC(3,0,C9,C14, 2), 0 },
3825 { "pmovsset_el0", CPENC(3,3,C9,C14, 3), 0 },
3826 { "pmevcntr0_el0", CPENC(3,3,C14,C8, 0), 0 },
3827 { "pmevcntr1_el0", CPENC(3,3,C14,C8, 1), 0 },
3828 { "pmevcntr2_el0", CPENC(3,3,C14,C8, 2), 0 },
3829 { "pmevcntr3_el0", CPENC(3,3,C14,C8, 3), 0 },
3830 { "pmevcntr4_el0", CPENC(3,3,C14,C8, 4), 0 },
3831 { "pmevcntr5_el0", CPENC(3,3,C14,C8, 5), 0 },
3832 { "pmevcntr6_el0", CPENC(3,3,C14,C8, 6), 0 },
3833 { "pmevcntr7_el0", CPENC(3,3,C14,C8, 7), 0 },
3834 { "pmevcntr8_el0", CPENC(3,3,C14,C9, 0), 0 },
3835 { "pmevcntr9_el0", CPENC(3,3,C14,C9, 1), 0 },
3836 { "pmevcntr10_el0", CPENC(3,3,C14,C9, 2), 0 },
3837 { "pmevcntr11_el0", CPENC(3,3,C14,C9, 3), 0 },
3838 { "pmevcntr12_el0", CPENC(3,3,C14,C9, 4), 0 },
3839 { "pmevcntr13_el0", CPENC(3,3,C14,C9, 5), 0 },
3840 { "pmevcntr14_el0", CPENC(3,3,C14,C9, 6), 0 },
3841 { "pmevcntr15_el0", CPENC(3,3,C14,C9, 7), 0 },
3842 { "pmevcntr16_el0", CPENC(3,3,C14,C10,0), 0 },
3843 { "pmevcntr17_el0", CPENC(3,3,C14,C10,1), 0 },
3844 { "pmevcntr18_el0", CPENC(3,3,C14,C10,2), 0 },
3845 { "pmevcntr19_el0", CPENC(3,3,C14,C10,3), 0 },
3846 { "pmevcntr20_el0", CPENC(3,3,C14,C10,4), 0 },
3847 { "pmevcntr21_el0", CPENC(3,3,C14,C10,5), 0 },
3848 { "pmevcntr22_el0", CPENC(3,3,C14,C10,6), 0 },
3849 { "pmevcntr23_el0", CPENC(3,3,C14,C10,7), 0 },
3850 { "pmevcntr24_el0", CPENC(3,3,C14,C11,0), 0 },
3851 { "pmevcntr25_el0", CPENC(3,3,C14,C11,1), 0 },
3852 { "pmevcntr26_el0", CPENC(3,3,C14,C11,2), 0 },
3853 { "pmevcntr27_el0", CPENC(3,3,C14,C11,3), 0 },
3854 { "pmevcntr28_el0", CPENC(3,3,C14,C11,4), 0 },
3855 { "pmevcntr29_el0", CPENC(3,3,C14,C11,5), 0 },
3856 { "pmevcntr30_el0", CPENC(3,3,C14,C11,6), 0 },
3857 { "pmevtyper0_el0", CPENC(3,3,C14,C12,0), 0 },
3858 { "pmevtyper1_el0", CPENC(3,3,C14,C12,1), 0 },
3859 { "pmevtyper2_el0", CPENC(3,3,C14,C12,2), 0 },
3860 { "pmevtyper3_el0", CPENC(3,3,C14,C12,3), 0 },
3861 { "pmevtyper4_el0", CPENC(3,3,C14,C12,4), 0 },
3862 { "pmevtyper5_el0", CPENC(3,3,C14,C12,5), 0 },
3863 { "pmevtyper6_el0", CPENC(3,3,C14,C12,6), 0 },
3864 { "pmevtyper7_el0", CPENC(3,3,C14,C12,7), 0 },
3865 { "pmevtyper8_el0", CPENC(3,3,C14,C13,0), 0 },
3866 { "pmevtyper9_el0", CPENC(3,3,C14,C13,1), 0 },
3867 { "pmevtyper10_el0", CPENC(3,3,C14,C13,2), 0 },
3868 { "pmevtyper11_el0", CPENC(3,3,C14,C13,3), 0 },
3869 { "pmevtyper12_el0", CPENC(3,3,C14,C13,4), 0 },
3870 { "pmevtyper13_el0", CPENC(3,3,C14,C13,5), 0 },
3871 { "pmevtyper14_el0", CPENC(3,3,C14,C13,6), 0 },
3872 { "pmevtyper15_el0", CPENC(3,3,C14,C13,7), 0 },
3873 { "pmevtyper16_el0", CPENC(3,3,C14,C14,0), 0 },
3874 { "pmevtyper17_el0", CPENC(3,3,C14,C14,1), 0 },
3875 { "pmevtyper18_el0", CPENC(3,3,C14,C14,2), 0 },
3876 { "pmevtyper19_el0", CPENC(3,3,C14,C14,3), 0 },
3877 { "pmevtyper20_el0", CPENC(3,3,C14,C14,4), 0 },
3878 { "pmevtyper21_el0", CPENC(3,3,C14,C14,5), 0 },
3879 { "pmevtyper22_el0", CPENC(3,3,C14,C14,6), 0 },
3880 { "pmevtyper23_el0", CPENC(3,3,C14,C14,7), 0 },
3881 { "pmevtyper24_el0", CPENC(3,3,C14,C15,0), 0 },
3882 { "pmevtyper25_el0", CPENC(3,3,C14,C15,1), 0 },
3883 { "pmevtyper26_el0", CPENC(3,3,C14,C15,2), 0 },
3884 { "pmevtyper27_el0", CPENC(3,3,C14,C15,3), 0 },
3885 { "pmevtyper28_el0", CPENC(3,3,C14,C15,4), 0 },
3886 { "pmevtyper29_el0", CPENC(3,3,C14,C15,5), 0 },
3887 { "pmevtyper30_el0", CPENC(3,3,C14,C15,6), 0 },
3888 { "pmccfiltr_el0", CPENC(3,3,C14,C15,7), 0 },
3889 { 0, CPENC(0,0,0,0,0), 0 },
3893 aarch64_sys_reg_deprecated_p (const aarch64_sys_reg *reg)
3895 return (reg->flags & F_DEPRECATED) != 0;
3899 aarch64_sys_reg_supported_p (const aarch64_feature_set features,
3900 const aarch64_sys_reg *reg)
3902 if (!(reg->flags & F_ARCHEXT))
3905 /* PAN. Values are from aarch64_sys_regs. */
3906 if (reg->value == CPEN_(0,C2,3)
3907 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_PAN))
3910 /* Virtualization host extensions: system registers. */
3911 if ((reg->value == CPENC (3, 4, C2, C0, 1)
3912 || reg->value == CPENC (3, 4, C13, C0, 1)
3913 || reg->value == CPENC (3, 4, C14, C3, 0)
3914 || reg->value == CPENC (3, 4, C14, C3, 1)
3915 || reg->value == CPENC (3, 4, C14, C3, 2))
3916 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_1))
3919 /* Virtualization host extensions: *_el12 names of *_el1 registers. */
3920 if ((reg->value == CPEN_ (5, C0, 0)
3921 || reg->value == CPEN_ (5, C0, 1)
3922 || reg->value == CPENC (3, 5, C1, C0, 0)
3923 || reg->value == CPENC (3, 5, C1, C0, 2)
3924 || reg->value == CPENC (3, 5, C2, C0, 0)
3925 || reg->value == CPENC (3, 5, C2, C0, 1)
3926 || reg->value == CPENC (3, 5, C2, C0, 2)
3927 || reg->value == CPENC (3, 5, C5, C1, 0)
3928 || reg->value == CPENC (3, 5, C5, C1, 1)
3929 || reg->value == CPENC (3, 5, C5, C2, 0)
3930 || reg->value == CPENC (3, 5, C6, C0, 0)
3931 || reg->value == CPENC (3, 5, C10, C2, 0)
3932 || reg->value == CPENC (3, 5, C10, C3, 0)
3933 || reg->value == CPENC (3, 5, C12, C0, 0)
3934 || reg->value == CPENC (3, 5, C13, C0, 1)
3935 || reg->value == CPENC (3, 5, C14, C1, 0))
3936 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_1))
3939 /* Virtualization host extensions: *_el02 names of *_el0 registers. */
3940 if ((reg->value == CPENC (3, 5, C14, C2, 0)
3941 || reg->value == CPENC (3, 5, C14, C2, 1)
3942 || reg->value == CPENC (3, 5, C14, C2, 2)
3943 || reg->value == CPENC (3, 5, C14, C3, 0)
3944 || reg->value == CPENC (3, 5, C14, C3, 1)
3945 || reg->value == CPENC (3, 5, C14, C3, 2))
3946 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_1))
3949 /* ARMv8.2 features. */
3951 /* ID_AA64MMFR2_EL1. */
3952 if (reg->value == CPENC (3, 0, C0, C7, 2)
3953 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_2))
3957 if (reg->value == CPEN_ (0, C2, 4)
3958 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_2))
3961 /* RAS extension. */
3963 /* ERRIDR_EL1, ERRSELR_EL1, ERXFR_EL1, ERXCTLR_EL1, ERXSTATUS_EL, ERXADDR_EL1,
3964 ERXMISC0_EL1 AND ERXMISC1_EL1. */
3965 if ((reg->value == CPENC (3, 0, C5, C3, 0)
3966 || reg->value == CPENC (3, 0, C5, C3, 1)
3967 || reg->value == CPENC (3, 0, C5, C3, 2)
3968 || reg->value == CPENC (3, 0, C5, C3, 3)
3969 || reg->value == CPENC (3, 0, C5, C4, 0)
3970 || reg->value == CPENC (3, 0, C5, C4, 1)
3971 || reg->value == CPENC (3, 0, C5, C4, 2)
3972 || reg->value == CPENC (3, 0, C5, C4, 3)
3973 || reg->value == CPENC (3, 0, C5, C5, 0)
3974 || reg->value == CPENC (3, 0, C5, C5, 1))
3975 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_RAS))
3978 /* VSESR_EL2, DISR_EL1 and VDISR_EL2. */
3979 if ((reg->value == CPENC (3, 4, C5, C2, 3)
3980 || reg->value == CPENC (3, 0, C12, C1, 1)
3981 || reg->value == CPENC (3, 4, C12, C1, 1))
3982 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_RAS))
3985 /* Statistical Profiling extension. */
3986 if ((reg->value == CPENC (3, 0, C9, C10, 0)
3987 || reg->value == CPENC (3, 0, C9, C10, 1)
3988 || reg->value == CPENC (3, 0, C9, C10, 3)
3989 || reg->value == CPENC (3, 0, C9, C10, 7)
3990 || reg->value == CPENC (3, 0, C9, C9, 0)
3991 || reg->value == CPENC (3, 0, C9, C9, 2)
3992 || reg->value == CPENC (3, 0, C9, C9, 3)
3993 || reg->value == CPENC (3, 0, C9, C9, 4)
3994 || reg->value == CPENC (3, 0, C9, C9, 5)
3995 || reg->value == CPENC (3, 0, C9, C9, 6)
3996 || reg->value == CPENC (3, 0, C9, C9, 7)
3997 || reg->value == CPENC (3, 4, C9, C9, 0)
3998 || reg->value == CPENC (3, 5, C9, C9, 0))
3999 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_PROFILE))
4005 const aarch64_sys_reg aarch64_pstatefields [] =
4007 { "spsel", 0x05, 0 },
4008 { "daifset", 0x1e, 0 },
4009 { "daifclr", 0x1f, 0 },
4010 { "pan", 0x04, F_ARCHEXT },
4011 { "uao", 0x03, F_ARCHEXT },
4012 { 0, CPENC(0,0,0,0,0), 0 },
4016 aarch64_pstatefield_supported_p (const aarch64_feature_set features,
4017 const aarch64_sys_reg *reg)
4019 if (!(reg->flags & F_ARCHEXT))
4022 /* PAN. Values are from aarch64_pstatefields. */
4023 if (reg->value == 0x04
4024 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_PAN))
4027 /* UAO. Values are from aarch64_pstatefields. */
4028 if (reg->value == 0x03
4029 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_2))
4035 const aarch64_sys_ins_reg aarch64_sys_regs_ic[] =
4037 { "ialluis", CPENS(0,C7,C1,0), 0 },
4038 { "iallu", CPENS(0,C7,C5,0), 0 },
4039 { "ivau", CPENS (3, C7, C5, 1), F_HASXT },
4040 { 0, CPENS(0,0,0,0), 0 }
4043 const aarch64_sys_ins_reg aarch64_sys_regs_dc[] =
4045 { "zva", CPENS (3, C7, C4, 1), F_HASXT },
4046 { "ivac", CPENS (0, C7, C6, 1), F_HASXT },
4047 { "isw", CPENS (0, C7, C6, 2), F_HASXT },
4048 { "cvac", CPENS (3, C7, C10, 1), F_HASXT },
4049 { "csw", CPENS (0, C7, C10, 2), F_HASXT },
4050 { "cvau", CPENS (3, C7, C11, 1), F_HASXT },
4051 { "cvap", CPENS (3, C7, C12, 1), F_HASXT | F_ARCHEXT },
4052 { "civac", CPENS (3, C7, C14, 1), F_HASXT },
4053 { "cisw", CPENS (0, C7, C14, 2), F_HASXT },
4054 { 0, CPENS(0,0,0,0), 0 }
4057 const aarch64_sys_ins_reg aarch64_sys_regs_at[] =
4059 { "s1e1r", CPENS (0, C7, C8, 0), F_HASXT },
4060 { "s1e1w", CPENS (0, C7, C8, 1), F_HASXT },
4061 { "s1e0r", CPENS (0, C7, C8, 2), F_HASXT },
4062 { "s1e0w", CPENS (0, C7, C8, 3), F_HASXT },
4063 { "s12e1r", CPENS (4, C7, C8, 4), F_HASXT },
4064 { "s12e1w", CPENS (4, C7, C8, 5), F_HASXT },
4065 { "s12e0r", CPENS (4, C7, C8, 6), F_HASXT },
4066 { "s12e0w", CPENS (4, C7, C8, 7), F_HASXT },
4067 { "s1e2r", CPENS (4, C7, C8, 0), F_HASXT },
4068 { "s1e2w", CPENS (4, C7, C8, 1), F_HASXT },
4069 { "s1e3r", CPENS (6, C7, C8, 0), F_HASXT },
4070 { "s1e3w", CPENS (6, C7, C8, 1), F_HASXT },
4071 { "s1e1rp", CPENS (0, C7, C9, 0), F_HASXT | F_ARCHEXT },
4072 { "s1e1wp", CPENS (0, C7, C9, 1), F_HASXT | F_ARCHEXT },
4073 { 0, CPENS(0,0,0,0), 0 }
4076 const aarch64_sys_ins_reg aarch64_sys_regs_tlbi[] =
4078 { "vmalle1", CPENS(0,C8,C7,0), 0 },
4079 { "vae1", CPENS (0, C8, C7, 1), F_HASXT },
4080 { "aside1", CPENS (0, C8, C7, 2), F_HASXT },
4081 { "vaae1", CPENS (0, C8, C7, 3), F_HASXT },
4082 { "vmalle1is", CPENS(0,C8,C3,0), 0 },
4083 { "vae1is", CPENS (0, C8, C3, 1), F_HASXT },
4084 { "aside1is", CPENS (0, C8, C3, 2), F_HASXT },
4085 { "vaae1is", CPENS (0, C8, C3, 3), F_HASXT },
4086 { "ipas2e1is", CPENS (4, C8, C0, 1), F_HASXT },
4087 { "ipas2le1is",CPENS (4, C8, C0, 5), F_HASXT },
4088 { "ipas2e1", CPENS (4, C8, C4, 1), F_HASXT },
4089 { "ipas2le1", CPENS (4, C8, C4, 5), F_HASXT },
4090 { "vae2", CPENS (4, C8, C7, 1), F_HASXT },
4091 { "vae2is", CPENS (4, C8, C3, 1), F_HASXT },
4092 { "vmalls12e1",CPENS(4,C8,C7,6), 0 },
4093 { "vmalls12e1is",CPENS(4,C8,C3,6), 0 },
4094 { "vae3", CPENS (6, C8, C7, 1), F_HASXT },
4095 { "vae3is", CPENS (6, C8, C3, 1), F_HASXT },
4096 { "alle2", CPENS(4,C8,C7,0), 0 },
4097 { "alle2is", CPENS(4,C8,C3,0), 0 },
4098 { "alle1", CPENS(4,C8,C7,4), 0 },
4099 { "alle1is", CPENS(4,C8,C3,4), 0 },
4100 { "alle3", CPENS(6,C8,C7,0), 0 },
4101 { "alle3is", CPENS(6,C8,C3,0), 0 },
4102 { "vale1is", CPENS (0, C8, C3, 5), F_HASXT },
4103 { "vale2is", CPENS (4, C8, C3, 5), F_HASXT },
4104 { "vale3is", CPENS (6, C8, C3, 5), F_HASXT },
4105 { "vaale1is", CPENS (0, C8, C3, 7), F_HASXT },
4106 { "vale1", CPENS (0, C8, C7, 5), F_HASXT },
4107 { "vale2", CPENS (4, C8, C7, 5), F_HASXT },
4108 { "vale3", CPENS (6, C8, C7, 5), F_HASXT },
4109 { "vaale1", CPENS (0, C8, C7, 7), F_HASXT },
4110 { 0, CPENS(0,0,0,0), 0 }
4114 aarch64_sys_ins_reg_has_xt (const aarch64_sys_ins_reg *sys_ins_reg)
4116 return (sys_ins_reg->flags & F_HASXT) != 0;
4120 aarch64_sys_ins_reg_supported_p (const aarch64_feature_set features,
4121 const aarch64_sys_ins_reg *reg)
4123 if (!(reg->flags & F_ARCHEXT))
4126 /* DC CVAP. Values are from aarch64_sys_regs_dc. */
4127 if (reg->value == CPENS (3, C7, C12, 1)
4128 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_2))
4131 /* AT S1E1RP, AT S1E1WP. Values are from aarch64_sys_regs_at. */
4132 if ((reg->value == CPENS (0, C7, C9, 0)
4133 || reg->value == CPENS (0, C7, C9, 1))
4134 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_2))
4157 #define BIT(INSN,BT) (((INSN) >> (BT)) & 1)
4158 #define BITS(INSN,HI,LO) (((INSN) >> (LO)) & ((1 << (((HI) - (LO)) + 1)) - 1))
4161 verify_ldpsw (const struct aarch64_opcode * opcode ATTRIBUTE_UNUSED,
4162 const aarch64_insn insn)
4164 int t = BITS (insn, 4, 0);
4165 int n = BITS (insn, 9, 5);
4166 int t2 = BITS (insn, 14, 10);
4170 /* Write back enabled. */
4171 if ((t == n || t2 == n) && n != 31)
4185 /* Return true if VALUE cannot be moved into an SVE register using DUP
4186 (with any element size, not just ESIZE) and if using DUPM would
4187 therefore be OK. ESIZE is the number of bytes in the immediate. */
4190 aarch64_sve_dupm_mov_immediate_p (uint64_t uvalue, int esize)
4192 int64_t svalue = uvalue;
4193 uint64_t upper = (uint64_t) -1 << (esize * 4) << (esize * 4);
4195 if ((uvalue & ~upper) != uvalue && (uvalue | upper) != uvalue)
4197 if (esize <= 4 || (uint32_t) uvalue == (uint32_t) (uvalue >> 32))
4199 svalue = (int32_t) uvalue;
4200 if (esize <= 2 || (uint16_t) uvalue == (uint16_t) (uvalue >> 16))
4202 svalue = (int16_t) uvalue;
4203 if (esize == 1 || (uint8_t) uvalue == (uint8_t) (uvalue >> 8))
4207 if ((svalue & 0xff) == 0)
4209 return svalue < -128 || svalue >= 128;
4212 /* Include the opcode description table as well as the operand description
4214 #define VERIFIER(x) verify_##x
4215 #include "aarch64-tbl.h"