1 /* aarch64-opc.c -- AArch64 opcode support.
2 Copyright (C) 2009-2016 Free Software Foundation, Inc.
3 Contributed by ARM Ltd.
5 This file is part of the GNU opcodes library.
7 This library is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
12 It is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program; see the file COPYING3. If not,
19 see <http://www.gnu.org/licenses/>. */
30 #include "libiberty.h"
32 #include "aarch64-opc.h"
35 int debug_dump = FALSE;
36 #endif /* DEBUG_AARCH64 */
38 /* The enumeration strings associated with each value of a 5-bit SVE
39 pattern operand. A null entry indicates a reserved meaning. */
40 const char *const aarch64_sve_pattern_array[32] = {
79 /* The enumeration strings associated with each value of a 4-bit SVE
80 prefetch operand. A null entry indicates a reserved meaning. */
81 const char *const aarch64_sve_prfop_array[16] = {
102 /* Helper functions to determine which operand to be used to encode/decode
103 the size:Q fields for AdvSIMD instructions. */
105 static inline bfd_boolean
106 vector_qualifier_p (enum aarch64_opnd_qualifier qualifier)
108 return ((qualifier >= AARCH64_OPND_QLF_V_8B
109 && qualifier <= AARCH64_OPND_QLF_V_1Q) ? TRUE
113 static inline bfd_boolean
114 fp_qualifier_p (enum aarch64_opnd_qualifier qualifier)
116 return ((qualifier >= AARCH64_OPND_QLF_S_B
117 && qualifier <= AARCH64_OPND_QLF_S_Q) ? TRUE
127 DP_VECTOR_ACROSS_LANES,
130 static const char significant_operand_index [] =
132 0, /* DP_UNKNOWN, by default using operand 0. */
133 0, /* DP_VECTOR_3SAME */
134 1, /* DP_VECTOR_LONG */
135 2, /* DP_VECTOR_WIDE */
136 1, /* DP_VECTOR_ACROSS_LANES */
139 /* Given a sequence of qualifiers in QUALIFIERS, determine and return
141 N.B. QUALIFIERS is a possible sequence of qualifiers each of which
142 corresponds to one of a sequence of operands. */
144 static enum data_pattern
145 get_data_pattern (const aarch64_opnd_qualifier_seq_t qualifiers)
147 if (vector_qualifier_p (qualifiers[0]) == TRUE)
149 /* e.g. v.4s, v.4s, v.4s
150 or v.4h, v.4h, v.h[3]. */
151 if (qualifiers[0] == qualifiers[1]
152 && vector_qualifier_p (qualifiers[2]) == TRUE
153 && (aarch64_get_qualifier_esize (qualifiers[0])
154 == aarch64_get_qualifier_esize (qualifiers[1]))
155 && (aarch64_get_qualifier_esize (qualifiers[0])
156 == aarch64_get_qualifier_esize (qualifiers[2])))
157 return DP_VECTOR_3SAME;
158 /* e.g. v.8h, v.8b, v.8b.
159 or v.4s, v.4h, v.h[2].
161 if (vector_qualifier_p (qualifiers[1]) == TRUE
162 && aarch64_get_qualifier_esize (qualifiers[0]) != 0
163 && (aarch64_get_qualifier_esize (qualifiers[0])
164 == aarch64_get_qualifier_esize (qualifiers[1]) << 1))
165 return DP_VECTOR_LONG;
166 /* e.g. v.8h, v.8h, v.8b. */
167 if (qualifiers[0] == qualifiers[1]
168 && vector_qualifier_p (qualifiers[2]) == TRUE
169 && aarch64_get_qualifier_esize (qualifiers[0]) != 0
170 && (aarch64_get_qualifier_esize (qualifiers[0])
171 == aarch64_get_qualifier_esize (qualifiers[2]) << 1)
172 && (aarch64_get_qualifier_esize (qualifiers[0])
173 == aarch64_get_qualifier_esize (qualifiers[1])))
174 return DP_VECTOR_WIDE;
176 else if (fp_qualifier_p (qualifiers[0]) == TRUE)
178 /* e.g. SADDLV <V><d>, <Vn>.<T>. */
179 if (vector_qualifier_p (qualifiers[1]) == TRUE
180 && qualifiers[2] == AARCH64_OPND_QLF_NIL)
181 return DP_VECTOR_ACROSS_LANES;
187 /* Select the operand to do the encoding/decoding of the 'size:Q' fields in
188 the AdvSIMD instructions. */
189 /* N.B. it is possible to do some optimization that doesn't call
190 get_data_pattern each time when we need to select an operand. We can
191 either buffer the caculated the result or statically generate the data,
192 however, it is not obvious that the optimization will bring significant
196 aarch64_select_operand_for_sizeq_field_coding (const aarch64_opcode *opcode)
199 significant_operand_index [get_data_pattern (opcode->qualifiers_list[0])];
202 const aarch64_field fields[] =
205 { 0, 4 }, /* cond2: condition in truly conditional-executed inst. */
206 { 0, 4 }, /* nzcv: flag bit specifier, encoded in the "nzcv" field. */
207 { 5, 5 }, /* defgh: d:e:f:g:h bits in AdvSIMD modified immediate. */
208 { 16, 3 }, /* abc: a:b:c bits in AdvSIMD modified immediate. */
209 { 5, 19 }, /* imm19: e.g. in CBZ. */
210 { 5, 19 }, /* immhi: e.g. in ADRP. */
211 { 29, 2 }, /* immlo: e.g. in ADRP. */
212 { 22, 2 }, /* size: in most AdvSIMD and floating-point instructions. */
213 { 10, 2 }, /* vldst_size: size field in the AdvSIMD load/store inst. */
214 { 29, 1 }, /* op: in AdvSIMD modified immediate instructions. */
215 { 30, 1 }, /* Q: in most AdvSIMD instructions. */
216 { 0, 5 }, /* Rt: in load/store instructions. */
217 { 0, 5 }, /* Rd: in many integer instructions. */
218 { 5, 5 }, /* Rn: in many integer instructions. */
219 { 10, 5 }, /* Rt2: in load/store pair instructions. */
220 { 10, 5 }, /* Ra: in fp instructions. */
221 { 5, 3 }, /* op2: in the system instructions. */
222 { 8, 4 }, /* CRm: in the system instructions. */
223 { 12, 4 }, /* CRn: in the system instructions. */
224 { 16, 3 }, /* op1: in the system instructions. */
225 { 19, 2 }, /* op0: in the system instructions. */
226 { 10, 3 }, /* imm3: in add/sub extended reg instructions. */
227 { 12, 4 }, /* cond: condition flags as a source operand. */
228 { 12, 4 }, /* opcode: in advsimd load/store instructions. */
229 { 12, 4 }, /* cmode: in advsimd modified immediate instructions. */
230 { 13, 3 }, /* asisdlso_opcode: opcode in advsimd ld/st single element. */
231 { 13, 2 }, /* len: in advsimd tbl/tbx instructions. */
232 { 16, 5 }, /* Rm: in ld/st reg offset and some integer inst. */
233 { 16, 5 }, /* Rs: in load/store exclusive instructions. */
234 { 13, 3 }, /* option: in ld/st reg offset + add/sub extended reg inst. */
235 { 12, 1 }, /* S: in load/store reg offset instructions. */
236 { 21, 2 }, /* hw: in move wide constant instructions. */
237 { 22, 2 }, /* opc: in load/store reg offset instructions. */
238 { 23, 1 }, /* opc1: in load/store reg offset instructions. */
239 { 22, 2 }, /* shift: in add/sub reg/imm shifted instructions. */
240 { 22, 2 }, /* type: floating point type field in fp data inst. */
241 { 30, 2 }, /* ldst_size: size field in ld/st reg offset inst. */
242 { 10, 6 }, /* imm6: in add/sub reg shifted instructions. */
243 { 11, 4 }, /* imm4: in advsimd ext and advsimd ins instructions. */
244 { 16, 5 }, /* imm5: in conditional compare (immediate) instructions. */
245 { 15, 7 }, /* imm7: in load/store pair pre/post index instructions. */
246 { 13, 8 }, /* imm8: in floating-point scalar move immediate inst. */
247 { 12, 9 }, /* imm9: in load/store pre/post index instructions. */
248 { 10, 12 }, /* imm12: in ld/st unsigned imm or add/sub shifted inst. */
249 { 5, 14 }, /* imm14: in test bit and branch instructions. */
250 { 5, 16 }, /* imm16: in exception instructions. */
251 { 0, 26 }, /* imm26: in unconditional branch instructions. */
252 { 10, 6 }, /* imms: in bitfield and logical immediate instructions. */
253 { 16, 6 }, /* immr: in bitfield and logical immediate instructions. */
254 { 16, 3 }, /* immb: in advsimd shift by immediate instructions. */
255 { 19, 4 }, /* immh: in advsimd shift by immediate instructions. */
256 { 22, 1 }, /* N: in logical (immediate) instructions. */
257 { 11, 1 }, /* index: in ld/st inst deciding the pre/post-index. */
258 { 24, 1 }, /* index2: in ld/st pair inst deciding the pre/post-index. */
259 { 31, 1 }, /* sf: in integer data processing instructions. */
260 { 30, 1 }, /* lse_size: in LSE extension atomic instructions. */
261 { 11, 1 }, /* H: in advsimd scalar x indexed element instructions. */
262 { 21, 1 }, /* L: in advsimd scalar x indexed element instructions. */
263 { 20, 1 }, /* M: in advsimd scalar x indexed element instructions. */
264 { 31, 1 }, /* b5: in the test bit and branch instructions. */
265 { 19, 5 }, /* b40: in the test bit and branch instructions. */
266 { 10, 6 }, /* scale: in the fixed-point scalar to fp converting inst. */
267 { 4, 1 }, /* SVE_M_4: Merge/zero select, bit 4. */
268 { 14, 1 }, /* SVE_M_14: Merge/zero select, bit 14. */
269 { 16, 1 }, /* SVE_M_16: Merge/zero select, bit 16. */
270 { 17, 1 }, /* SVE_N: SVE equivalent of N. */
271 { 0, 4 }, /* SVE_Pd: p0-p15, bits [3,0]. */
272 { 10, 3 }, /* SVE_Pg3: p0-p7, bits [12,10]. */
273 { 5, 4 }, /* SVE_Pg4_5: p0-p15, bits [8,5]. */
274 { 10, 4 }, /* SVE_Pg4_10: p0-p15, bits [13,10]. */
275 { 16, 4 }, /* SVE_Pg4_16: p0-p15, bits [19,16]. */
276 { 16, 4 }, /* SVE_Pm: p0-p15, bits [19,16]. */
277 { 5, 4 }, /* SVE_Pn: p0-p15, bits [8,5]. */
278 { 0, 4 }, /* SVE_Pt: p0-p15, bits [3,0]. */
279 { 5, 5 }, /* SVE_Rm: SVE alternative position for Rm. */
280 { 16, 5 }, /* SVE_Rn: SVE alternative position for Rn. */
281 { 0, 5 }, /* SVE_Vd: Scalar SIMD&FP register, bits [4,0]. */
282 { 5, 5 }, /* SVE_Vm: Scalar SIMD&FP register, bits [9,5]. */
283 { 5, 5 }, /* SVE_Vn: Scalar SIMD&FP register, bits [9,5]. */
284 { 5, 5 }, /* SVE_Za_5: SVE vector register, bits [9,5]. */
285 { 16, 5 }, /* SVE_Za_16: SVE vector register, bits [20,16]. */
286 { 0, 5 }, /* SVE_Zd: SVE vector register. bits [4,0]. */
287 { 5, 5 }, /* SVE_Zm_5: SVE vector register, bits [9,5]. */
288 { 16, 5 }, /* SVE_Zm_16: SVE vector register, bits [20,16]. */
289 { 5, 5 }, /* SVE_Zn: SVE vector register, bits [9,5]. */
290 { 0, 5 }, /* SVE_Zt: SVE vector register, bits [4,0]. */
291 { 5, 1 }, /* SVE_i1: single-bit immediate. */
292 { 16, 3 }, /* SVE_imm3: 3-bit immediate field. */
293 { 16, 4 }, /* SVE_imm4: 4-bit immediate field. */
294 { 5, 5 }, /* SVE_imm5: 5-bit immediate field. */
295 { 16, 5 }, /* SVE_imm5b: secondary 5-bit immediate field. */
296 { 16, 6 }, /* SVE_imm6: 6-bit immediate field. */
297 { 14, 7 }, /* SVE_imm7: 7-bit immediate field. */
298 { 5, 8 }, /* SVE_imm8: 8-bit immediate field. */
299 { 5, 9 }, /* SVE_imm9: 9-bit immediate field. */
300 { 11, 6 }, /* SVE_immr: SVE equivalent of immr. */
301 { 5, 6 }, /* SVE_imms: SVE equivalent of imms. */
302 { 10, 2 }, /* SVE_msz: 2-bit shift amount for ADR. */
303 { 5, 5 }, /* SVE_pattern: vector pattern enumeration. */
304 { 0, 4 }, /* SVE_prfop: prefetch operation for SVE PRF[BHWD]. */
305 { 22, 1 }, /* SVE_sz: 1-bit element size select. */
306 { 16, 4 }, /* SVE_tsz: triangular size select. */
307 { 22, 2 }, /* SVE_tszh: triangular size select high, bits [23,22]. */
308 { 8, 2 }, /* SVE_tszl_8: triangular size select low, bits [9,8]. */
309 { 19, 2 }, /* SVE_tszl_19: triangular size select low, bits [20,19]. */
310 { 14, 1 }, /* SVE_xs_14: UXTW/SXTW select (bit 14). */
311 { 22, 1 } /* SVE_xs_22: UXTW/SXTW select (bit 22). */
314 enum aarch64_operand_class
315 aarch64_get_operand_class (enum aarch64_opnd type)
317 return aarch64_operands[type].op_class;
321 aarch64_get_operand_name (enum aarch64_opnd type)
323 return aarch64_operands[type].name;
326 /* Get operand description string.
327 This is usually for the diagnosis purpose. */
329 aarch64_get_operand_desc (enum aarch64_opnd type)
331 return aarch64_operands[type].desc;
334 /* Table of all conditional affixes. */
335 const aarch64_cond aarch64_conds[16] =
337 {{"eq", "none"}, 0x0},
338 {{"ne", "any"}, 0x1},
339 {{"cs", "hs", "nlast"}, 0x2},
340 {{"cc", "lo", "ul", "last"}, 0x3},
341 {{"mi", "first"}, 0x4},
342 {{"pl", "nfrst"}, 0x5},
345 {{"hi", "pmore"}, 0x8},
346 {{"ls", "plast"}, 0x9},
347 {{"ge", "tcont"}, 0xa},
348 {{"lt", "tstop"}, 0xb},
356 get_cond_from_value (aarch64_insn value)
359 return &aarch64_conds[(unsigned int) value];
363 get_inverted_cond (const aarch64_cond *cond)
365 return &aarch64_conds[cond->value ^ 0x1];
368 /* Table describing the operand extension/shifting operators; indexed by
369 enum aarch64_modifier_kind.
371 The value column provides the most common values for encoding modifiers,
372 which enables table-driven encoding/decoding for the modifiers. */
373 const struct aarch64_name_value_pair aarch64_operand_modifiers [] =
394 enum aarch64_modifier_kind
395 aarch64_get_operand_modifier (const struct aarch64_name_value_pair *desc)
397 return desc - aarch64_operand_modifiers;
401 aarch64_get_operand_modifier_value (enum aarch64_modifier_kind kind)
403 return aarch64_operand_modifiers[kind].value;
406 enum aarch64_modifier_kind
407 aarch64_get_operand_modifier_from_value (aarch64_insn value,
408 bfd_boolean extend_p)
410 if (extend_p == TRUE)
411 return AARCH64_MOD_UXTB + value;
413 return AARCH64_MOD_LSL - value;
417 aarch64_extend_operator_p (enum aarch64_modifier_kind kind)
419 return (kind > AARCH64_MOD_LSL && kind <= AARCH64_MOD_SXTX)
423 static inline bfd_boolean
424 aarch64_shift_operator_p (enum aarch64_modifier_kind kind)
426 return (kind >= AARCH64_MOD_ROR && kind <= AARCH64_MOD_LSL)
430 const struct aarch64_name_value_pair aarch64_barrier_options[16] =
450 /* Table describing the operands supported by the aliases of the HINT
453 The name column is the operand that is accepted for the alias. The value
454 column is the hint number of the alias. The list of operands is terminated
455 by NULL in the name column. */
457 const struct aarch64_name_value_pair aarch64_hint_options[] =
459 { "csync", 0x11 }, /* PSB CSYNC. */
463 /* op -> op: load = 0 instruction = 1 store = 2
465 t -> temporal: temporal (retained) = 0 non-temporal (streaming) = 1 */
466 #define B(op,l,t) (((op) << 3) | (((l) - 1) << 1) | (t))
467 const struct aarch64_name_value_pair aarch64_prfops[32] =
469 { "pldl1keep", B(0, 1, 0) },
470 { "pldl1strm", B(0, 1, 1) },
471 { "pldl2keep", B(0, 2, 0) },
472 { "pldl2strm", B(0, 2, 1) },
473 { "pldl3keep", B(0, 3, 0) },
474 { "pldl3strm", B(0, 3, 1) },
477 { "plil1keep", B(1, 1, 0) },
478 { "plil1strm", B(1, 1, 1) },
479 { "plil2keep", B(1, 2, 0) },
480 { "plil2strm", B(1, 2, 1) },
481 { "plil3keep", B(1, 3, 0) },
482 { "plil3strm", B(1, 3, 1) },
485 { "pstl1keep", B(2, 1, 0) },
486 { "pstl1strm", B(2, 1, 1) },
487 { "pstl2keep", B(2, 2, 0) },
488 { "pstl2strm", B(2, 2, 1) },
489 { "pstl3keep", B(2, 3, 0) },
490 { "pstl3strm", B(2, 3, 1) },
504 /* Utilities on value constraint. */
507 value_in_range_p (int64_t value, int low, int high)
509 return (value >= low && value <= high) ? 1 : 0;
512 /* Return true if VALUE is a multiple of ALIGN. */
514 value_aligned_p (int64_t value, int align)
516 return (value % align) == 0;
519 /* A signed value fits in a field. */
521 value_fit_signed_field_p (int64_t value, unsigned width)
524 if (width < sizeof (value) * 8)
526 int64_t lim = (int64_t)1 << (width - 1);
527 if (value >= -lim && value < lim)
533 /* An unsigned value fits in a field. */
535 value_fit_unsigned_field_p (int64_t value, unsigned width)
538 if (width < sizeof (value) * 8)
540 int64_t lim = (int64_t)1 << width;
541 if (value >= 0 && value < lim)
547 /* Return 1 if OPERAND is SP or WSP. */
549 aarch64_stack_pointer_p (const aarch64_opnd_info *operand)
551 return ((aarch64_get_operand_class (operand->type)
552 == AARCH64_OPND_CLASS_INT_REG)
553 && operand_maybe_stack_pointer (aarch64_operands + operand->type)
554 && operand->reg.regno == 31);
557 /* Return 1 if OPERAND is XZR or WZP. */
559 aarch64_zero_register_p (const aarch64_opnd_info *operand)
561 return ((aarch64_get_operand_class (operand->type)
562 == AARCH64_OPND_CLASS_INT_REG)
563 && !operand_maybe_stack_pointer (aarch64_operands + operand->type)
564 && operand->reg.regno == 31);
567 /* Return true if the operand *OPERAND that has the operand code
568 OPERAND->TYPE and been qualified by OPERAND->QUALIFIER can be also
569 qualified by the qualifier TARGET. */
572 operand_also_qualified_p (const struct aarch64_opnd_info *operand,
573 aarch64_opnd_qualifier_t target)
575 switch (operand->qualifier)
577 case AARCH64_OPND_QLF_W:
578 if (target == AARCH64_OPND_QLF_WSP && aarch64_stack_pointer_p (operand))
581 case AARCH64_OPND_QLF_X:
582 if (target == AARCH64_OPND_QLF_SP && aarch64_stack_pointer_p (operand))
585 case AARCH64_OPND_QLF_WSP:
586 if (target == AARCH64_OPND_QLF_W
587 && operand_maybe_stack_pointer (aarch64_operands + operand->type))
590 case AARCH64_OPND_QLF_SP:
591 if (target == AARCH64_OPND_QLF_X
592 && operand_maybe_stack_pointer (aarch64_operands + operand->type))
602 /* Given qualifier sequence list QSEQ_LIST and the known qualifier KNOWN_QLF
603 for operand KNOWN_IDX, return the expected qualifier for operand IDX.
605 Return NIL if more than one expected qualifiers are found. */
607 aarch64_opnd_qualifier_t
608 aarch64_get_expected_qualifier (const aarch64_opnd_qualifier_seq_t *qseq_list,
610 const aarch64_opnd_qualifier_t known_qlf,
617 When the known qualifier is NIL, we have to assume that there is only
618 one qualifier sequence in the *QSEQ_LIST and return the corresponding
619 qualifier directly. One scenario is that for instruction
620 PRFM <prfop>, [<Xn|SP>, #:lo12:<symbol>]
621 which has only one possible valid qualifier sequence
623 the caller may pass NIL in KNOWN_QLF to obtain S_D so that it can
624 determine the correct relocation type (i.e. LDST64_LO12) for PRFM.
626 Because the qualifier NIL has dual roles in the qualifier sequence:
627 it can mean no qualifier for the operand, or the qualifer sequence is
628 not in use (when all qualifiers in the sequence are NILs), we have to
629 handle this special case here. */
630 if (known_qlf == AARCH64_OPND_NIL)
632 assert (qseq_list[0][known_idx] == AARCH64_OPND_NIL);
633 return qseq_list[0][idx];
636 for (i = 0, saved_i = -1; i < AARCH64_MAX_QLF_SEQ_NUM; ++i)
638 if (qseq_list[i][known_idx] == known_qlf)
641 /* More than one sequences are found to have KNOWN_QLF at
643 return AARCH64_OPND_NIL;
648 return qseq_list[saved_i][idx];
651 enum operand_qualifier_kind
659 /* Operand qualifier description. */
660 struct operand_qualifier_data
662 /* The usage of the three data fields depends on the qualifier kind. */
669 enum operand_qualifier_kind kind;
672 /* Indexed by the operand qualifier enumerators. */
673 struct operand_qualifier_data aarch64_opnd_qualifiers[] =
675 {0, 0, 0, "NIL", OQK_NIL},
677 /* Operand variant qualifiers.
679 element size, number of elements and common value for encoding. */
681 {4, 1, 0x0, "w", OQK_OPD_VARIANT},
682 {8, 1, 0x1, "x", OQK_OPD_VARIANT},
683 {4, 1, 0x0, "wsp", OQK_OPD_VARIANT},
684 {8, 1, 0x1, "sp", OQK_OPD_VARIANT},
686 {1, 1, 0x0, "b", OQK_OPD_VARIANT},
687 {2, 1, 0x1, "h", OQK_OPD_VARIANT},
688 {4, 1, 0x2, "s", OQK_OPD_VARIANT},
689 {8, 1, 0x3, "d", OQK_OPD_VARIANT},
690 {16, 1, 0x4, "q", OQK_OPD_VARIANT},
692 {1, 8, 0x0, "8b", OQK_OPD_VARIANT},
693 {1, 16, 0x1, "16b", OQK_OPD_VARIANT},
694 {2, 2, 0x0, "2h", OQK_OPD_VARIANT},
695 {2, 4, 0x2, "4h", OQK_OPD_VARIANT},
696 {2, 8, 0x3, "8h", OQK_OPD_VARIANT},
697 {4, 2, 0x4, "2s", OQK_OPD_VARIANT},
698 {4, 4, 0x5, "4s", OQK_OPD_VARIANT},
699 {8, 1, 0x6, "1d", OQK_OPD_VARIANT},
700 {8, 2, 0x7, "2d", OQK_OPD_VARIANT},
701 {16, 1, 0x8, "1q", OQK_OPD_VARIANT},
703 {0, 0, 0, "z", OQK_OPD_VARIANT},
704 {0, 0, 0, "m", OQK_OPD_VARIANT},
706 /* Qualifiers constraining the value range.
708 Lower bound, higher bound, unused. */
710 {0, 7, 0, "imm_0_7" , OQK_VALUE_IN_RANGE},
711 {0, 15, 0, "imm_0_15", OQK_VALUE_IN_RANGE},
712 {0, 31, 0, "imm_0_31", OQK_VALUE_IN_RANGE},
713 {0, 63, 0, "imm_0_63", OQK_VALUE_IN_RANGE},
714 {1, 32, 0, "imm_1_32", OQK_VALUE_IN_RANGE},
715 {1, 64, 0, "imm_1_64", OQK_VALUE_IN_RANGE},
717 /* Qualifiers for miscellaneous purpose.
719 unused, unused and unused. */
724 {0, 0, 0, "retrieving", 0},
727 static inline bfd_boolean
728 operand_variant_qualifier_p (aarch64_opnd_qualifier_t qualifier)
730 return (aarch64_opnd_qualifiers[qualifier].kind == OQK_OPD_VARIANT)
734 static inline bfd_boolean
735 qualifier_value_in_range_constraint_p (aarch64_opnd_qualifier_t qualifier)
737 return (aarch64_opnd_qualifiers[qualifier].kind == OQK_VALUE_IN_RANGE)
742 aarch64_get_qualifier_name (aarch64_opnd_qualifier_t qualifier)
744 return aarch64_opnd_qualifiers[qualifier].desc;
747 /* Given an operand qualifier, return the expected data element size
748 of a qualified operand. */
750 aarch64_get_qualifier_esize (aarch64_opnd_qualifier_t qualifier)
752 assert (operand_variant_qualifier_p (qualifier) == TRUE);
753 return aarch64_opnd_qualifiers[qualifier].data0;
757 aarch64_get_qualifier_nelem (aarch64_opnd_qualifier_t qualifier)
759 assert (operand_variant_qualifier_p (qualifier) == TRUE);
760 return aarch64_opnd_qualifiers[qualifier].data1;
764 aarch64_get_qualifier_standard_value (aarch64_opnd_qualifier_t qualifier)
766 assert (operand_variant_qualifier_p (qualifier) == TRUE);
767 return aarch64_opnd_qualifiers[qualifier].data2;
771 get_lower_bound (aarch64_opnd_qualifier_t qualifier)
773 assert (qualifier_value_in_range_constraint_p (qualifier) == TRUE);
774 return aarch64_opnd_qualifiers[qualifier].data0;
778 get_upper_bound (aarch64_opnd_qualifier_t qualifier)
780 assert (qualifier_value_in_range_constraint_p (qualifier) == TRUE);
781 return aarch64_opnd_qualifiers[qualifier].data1;
786 aarch64_verbose (const char *str, ...)
797 dump_qualifier_sequence (const aarch64_opnd_qualifier_t *qualifier)
801 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i, ++qualifier)
802 printf ("%s,", aarch64_get_qualifier_name (*qualifier));
807 dump_match_qualifiers (const struct aarch64_opnd_info *opnd,
808 const aarch64_opnd_qualifier_t *qualifier)
811 aarch64_opnd_qualifier_t curr[AARCH64_MAX_OPND_NUM];
813 aarch64_verbose ("dump_match_qualifiers:");
814 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
815 curr[i] = opnd[i].qualifier;
816 dump_qualifier_sequence (curr);
817 aarch64_verbose ("against");
818 dump_qualifier_sequence (qualifier);
820 #endif /* DEBUG_AARCH64 */
822 /* TODO improve this, we can have an extra field at the runtime to
823 store the number of operands rather than calculating it every time. */
826 aarch64_num_of_operands (const aarch64_opcode *opcode)
829 const enum aarch64_opnd *opnds = opcode->operands;
830 while (opnds[i++] != AARCH64_OPND_NIL)
833 assert (i >= 0 && i <= AARCH64_MAX_OPND_NUM);
837 /* Find the best matched qualifier sequence in *QUALIFIERS_LIST for INST.
838 If succeeds, fill the found sequence in *RET, return 1; otherwise return 0.
840 N.B. on the entry, it is very likely that only some operands in *INST
841 have had their qualifiers been established.
843 If STOP_AT is not -1, the function will only try to match
844 the qualifier sequence for operands before and including the operand
845 of index STOP_AT; and on success *RET will only be filled with the first
846 (STOP_AT+1) qualifiers.
848 A couple examples of the matching algorithm:
856 Apart from serving the main encoding routine, this can also be called
857 during or after the operand decoding. */
860 aarch64_find_best_match (const aarch64_inst *inst,
861 const aarch64_opnd_qualifier_seq_t *qualifiers_list,
862 int stop_at, aarch64_opnd_qualifier_t *ret)
866 const aarch64_opnd_qualifier_t *qualifiers;
868 num_opnds = aarch64_num_of_operands (inst->opcode);
871 DEBUG_TRACE ("SUCCEED: no operand");
875 if (stop_at < 0 || stop_at >= num_opnds)
876 stop_at = num_opnds - 1;
878 /* For each pattern. */
879 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
882 qualifiers = *qualifiers_list;
884 /* Start as positive. */
887 DEBUG_TRACE ("%d", i);
890 dump_match_qualifiers (inst->operands, qualifiers);
893 /* Most opcodes has much fewer patterns in the list.
894 First NIL qualifier indicates the end in the list. */
895 if (empty_qualifier_sequence_p (qualifiers) == TRUE)
897 DEBUG_TRACE_IF (i == 0, "SUCCEED: empty qualifier list");
903 for (j = 0; j < num_opnds && j <= stop_at; ++j, ++qualifiers)
905 if (inst->operands[j].qualifier == AARCH64_OPND_QLF_NIL)
907 /* Either the operand does not have qualifier, or the qualifier
908 for the operand needs to be deduced from the qualifier
910 In the latter case, any constraint checking related with
911 the obtained qualifier should be done later in
912 operand_general_constraint_met_p. */
915 else if (*qualifiers != inst->operands[j].qualifier)
917 /* Unless the target qualifier can also qualify the operand
918 (which has already had a non-nil qualifier), non-equal
919 qualifiers are generally un-matched. */
920 if (operand_also_qualified_p (inst->operands + j, *qualifiers))
929 continue; /* Equal qualifiers are certainly matched. */
932 /* Qualifiers established. */
939 /* Fill the result in *RET. */
941 qualifiers = *qualifiers_list;
943 DEBUG_TRACE ("complete qualifiers using list %d", i);
946 dump_qualifier_sequence (qualifiers);
949 for (j = 0; j <= stop_at; ++j, ++qualifiers)
950 ret[j] = *qualifiers;
951 for (; j < AARCH64_MAX_OPND_NUM; ++j)
952 ret[j] = AARCH64_OPND_QLF_NIL;
954 DEBUG_TRACE ("SUCCESS");
958 DEBUG_TRACE ("FAIL");
962 /* Operand qualifier matching and resolving.
964 Return 1 if the operand qualifier(s) in *INST match one of the qualifier
965 sequences in INST->OPCODE->qualifiers_list; otherwise return 0.
967 if UPDATE_P == TRUE, update the qualifier(s) in *INST after the matching
971 match_operands_qualifier (aarch64_inst *inst, bfd_boolean update_p)
974 aarch64_opnd_qualifier_seq_t qualifiers;
976 if (!aarch64_find_best_match (inst, inst->opcode->qualifiers_list, -1,
979 DEBUG_TRACE ("matching FAIL");
983 if (inst->opcode->flags & F_STRICT)
985 /* Require an exact qualifier match, even for NIL qualifiers. */
986 nops = aarch64_num_of_operands (inst->opcode);
987 for (i = 0; i < nops; ++i)
988 if (inst->operands[i].qualifier != qualifiers[i])
992 /* Update the qualifiers. */
993 if (update_p == TRUE)
994 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
996 if (inst->opcode->operands[i] == AARCH64_OPND_NIL)
998 DEBUG_TRACE_IF (inst->operands[i].qualifier != qualifiers[i],
999 "update %s with %s for operand %d",
1000 aarch64_get_qualifier_name (inst->operands[i].qualifier),
1001 aarch64_get_qualifier_name (qualifiers[i]), i);
1002 inst->operands[i].qualifier = qualifiers[i];
1005 DEBUG_TRACE ("matching SUCCESS");
1009 /* Return TRUE if VALUE is a wide constant that can be moved into a general
1012 IS32 indicates whether value is a 32-bit immediate or not.
1013 If SHIFT_AMOUNT is not NULL, on the return of TRUE, the logical left shift
1014 amount will be returned in *SHIFT_AMOUNT. */
1017 aarch64_wide_constant_p (int64_t value, int is32, unsigned int *shift_amount)
1021 DEBUG_TRACE ("enter with 0x%" PRIx64 "(%" PRIi64 ")", value, value);
1025 /* Allow all zeros or all ones in top 32-bits, so that
1026 32-bit constant expressions like ~0x80000000 are
1028 uint64_t ext = value;
1029 if (ext >> 32 != 0 && ext >> 32 != (uint64_t) 0xffffffff)
1030 /* Immediate out of range. */
1032 value &= (int64_t) 0xffffffff;
1035 /* first, try movz then movn */
1037 if ((value & ((int64_t) 0xffff << 0)) == value)
1039 else if ((value & ((int64_t) 0xffff << 16)) == value)
1041 else if (!is32 && (value & ((int64_t) 0xffff << 32)) == value)
1043 else if (!is32 && (value & ((int64_t) 0xffff << 48)) == value)
1048 DEBUG_TRACE ("exit FALSE with 0x%" PRIx64 "(%" PRIi64 ")", value, value);
1052 if (shift_amount != NULL)
1053 *shift_amount = amount;
1055 DEBUG_TRACE ("exit TRUE with amount %d", amount);
1060 /* Build the accepted values for immediate logical SIMD instructions.
1062 The standard encodings of the immediate value are:
1063 N imms immr SIMD size R S
1064 1 ssssss rrrrrr 64 UInt(rrrrrr) UInt(ssssss)
1065 0 0sssss 0rrrrr 32 UInt(rrrrr) UInt(sssss)
1066 0 10ssss 00rrrr 16 UInt(rrrr) UInt(ssss)
1067 0 110sss 000rrr 8 UInt(rrr) UInt(sss)
1068 0 1110ss 0000rr 4 UInt(rr) UInt(ss)
1069 0 11110s 00000r 2 UInt(r) UInt(s)
1070 where all-ones value of S is reserved.
1072 Let's call E the SIMD size.
1074 The immediate value is: S+1 bits '1' rotated to the right by R.
1076 The total of valid encodings is 64*63 + 32*31 + ... + 2*1 = 5334
1077 (remember S != E - 1). */
1079 #define TOTAL_IMM_NB 5334
1084 aarch64_insn encoding;
1085 } simd_imm_encoding;
1087 static simd_imm_encoding simd_immediates[TOTAL_IMM_NB];
1090 simd_imm_encoding_cmp(const void *i1, const void *i2)
1092 const simd_imm_encoding *imm1 = (const simd_imm_encoding *)i1;
1093 const simd_imm_encoding *imm2 = (const simd_imm_encoding *)i2;
1095 if (imm1->imm < imm2->imm)
1097 if (imm1->imm > imm2->imm)
1102 /* immediate bitfield standard encoding
1103 imm13<12> imm13<5:0> imm13<11:6> SIMD size R S
1104 1 ssssss rrrrrr 64 rrrrrr ssssss
1105 0 0sssss 0rrrrr 32 rrrrr sssss
1106 0 10ssss 00rrrr 16 rrrr ssss
1107 0 110sss 000rrr 8 rrr sss
1108 0 1110ss 0000rr 4 rr ss
1109 0 11110s 00000r 2 r s */
1111 encode_immediate_bitfield (int is64, uint32_t s, uint32_t r)
1113 return (is64 << 12) | (r << 6) | s;
1117 build_immediate_table (void)
1119 uint32_t log_e, e, s, r, s_mask;
1125 for (log_e = 1; log_e <= 6; log_e++)
1127 /* Get element size. */
1132 mask = 0xffffffffffffffffull;
1138 mask = (1ull << e) - 1;
1140 1 ((1 << 4) - 1) << 2 = 111100
1141 2 ((1 << 3) - 1) << 3 = 111000
1142 3 ((1 << 2) - 1) << 4 = 110000
1143 4 ((1 << 1) - 1) << 5 = 100000
1144 5 ((1 << 0) - 1) << 6 = 000000 */
1145 s_mask = ((1u << (5 - log_e)) - 1) << (log_e + 1);
1147 for (s = 0; s < e - 1; s++)
1148 for (r = 0; r < e; r++)
1150 /* s+1 consecutive bits to 1 (s < 63) */
1151 imm = (1ull << (s + 1)) - 1;
1152 /* rotate right by r */
1154 imm = (imm >> r) | ((imm << (e - r)) & mask);
1155 /* replicate the constant depending on SIMD size */
1158 case 1: imm = (imm << 2) | imm;
1160 case 2: imm = (imm << 4) | imm;
1162 case 3: imm = (imm << 8) | imm;
1164 case 4: imm = (imm << 16) | imm;
1166 case 5: imm = (imm << 32) | imm;
1171 simd_immediates[nb_imms].imm = imm;
1172 simd_immediates[nb_imms].encoding =
1173 encode_immediate_bitfield(is64, s | s_mask, r);
1177 assert (nb_imms == TOTAL_IMM_NB);
1178 qsort(simd_immediates, nb_imms,
1179 sizeof(simd_immediates[0]), simd_imm_encoding_cmp);
1182 /* Return TRUE if VALUE is a valid logical immediate, i.e. bitmask, that can
1183 be accepted by logical (immediate) instructions
1184 e.g. ORR <Xd|SP>, <Xn>, #<imm>.
1186 ESIZE is the number of bytes in the decoded immediate value.
1187 If ENCODING is not NULL, on the return of TRUE, the standard encoding for
1188 VALUE will be returned in *ENCODING. */
1191 aarch64_logical_immediate_p (uint64_t value, int esize, aarch64_insn *encoding)
1193 simd_imm_encoding imm_enc;
1194 const simd_imm_encoding *imm_encoding;
1195 static bfd_boolean initialized = FALSE;
1199 DEBUG_TRACE ("enter with 0x%" PRIx64 "(%" PRIi64 "), is32: %d", value,
1202 if (initialized == FALSE)
1204 build_immediate_table ();
1208 /* Allow all zeros or all ones in top bits, so that
1209 constant expressions like ~1 are permitted. */
1210 upper = (uint64_t) -1 << (esize * 4) << (esize * 4);
1211 if ((value & ~upper) != value && (value | upper) != value)
1214 /* Replicate to a full 64-bit value. */
1216 for (i = esize * 8; i < 64; i *= 2)
1217 value |= (value << i);
1219 imm_enc.imm = value;
1220 imm_encoding = (const simd_imm_encoding *)
1221 bsearch(&imm_enc, simd_immediates, TOTAL_IMM_NB,
1222 sizeof(simd_immediates[0]), simd_imm_encoding_cmp);
1223 if (imm_encoding == NULL)
1225 DEBUG_TRACE ("exit with FALSE");
1228 if (encoding != NULL)
1229 *encoding = imm_encoding->encoding;
1230 DEBUG_TRACE ("exit with TRUE");
1234 /* If 64-bit immediate IMM is in the format of
1235 "aaaaaaaabbbbbbbbccccccccddddddddeeeeeeeeffffffffgggggggghhhhhhhh",
1236 where a, b, c, d, e, f, g and h are independently 0 or 1, return an integer
1237 of value "abcdefgh". Otherwise return -1. */
1239 aarch64_shrink_expanded_imm8 (uint64_t imm)
1245 for (i = 0; i < 8; i++)
1247 byte = (imm >> (8 * i)) & 0xff;
1250 else if (byte != 0x00)
1256 /* Utility inline functions for operand_general_constraint_met_p. */
1259 set_error (aarch64_operand_error *mismatch_detail,
1260 enum aarch64_operand_error_kind kind, int idx,
1263 if (mismatch_detail == NULL)
1265 mismatch_detail->kind = kind;
1266 mismatch_detail->index = idx;
1267 mismatch_detail->error = error;
1271 set_syntax_error (aarch64_operand_error *mismatch_detail, int idx,
1274 if (mismatch_detail == NULL)
1276 set_error (mismatch_detail, AARCH64_OPDE_SYNTAX_ERROR, idx, error);
1280 set_out_of_range_error (aarch64_operand_error *mismatch_detail,
1281 int idx, int lower_bound, int upper_bound,
1284 if (mismatch_detail == NULL)
1286 set_error (mismatch_detail, AARCH64_OPDE_OUT_OF_RANGE, idx, error);
1287 mismatch_detail->data[0] = lower_bound;
1288 mismatch_detail->data[1] = upper_bound;
1292 set_imm_out_of_range_error (aarch64_operand_error *mismatch_detail,
1293 int idx, int lower_bound, int upper_bound)
1295 if (mismatch_detail == NULL)
1297 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1298 _("immediate value"));
1302 set_offset_out_of_range_error (aarch64_operand_error *mismatch_detail,
1303 int idx, int lower_bound, int upper_bound)
1305 if (mismatch_detail == NULL)
1307 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1308 _("immediate offset"));
1312 set_regno_out_of_range_error (aarch64_operand_error *mismatch_detail,
1313 int idx, int lower_bound, int upper_bound)
1315 if (mismatch_detail == NULL)
1317 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1318 _("register number"));
1322 set_elem_idx_out_of_range_error (aarch64_operand_error *mismatch_detail,
1323 int idx, int lower_bound, int upper_bound)
1325 if (mismatch_detail == NULL)
1327 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1328 _("register element index"));
1332 set_sft_amount_out_of_range_error (aarch64_operand_error *mismatch_detail,
1333 int idx, int lower_bound, int upper_bound)
1335 if (mismatch_detail == NULL)
1337 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1341 /* Report that the MUL modifier in operand IDX should be in the range
1342 [LOWER_BOUND, UPPER_BOUND]. */
1344 set_multiplier_out_of_range_error (aarch64_operand_error *mismatch_detail,
1345 int idx, int lower_bound, int upper_bound)
1347 if (mismatch_detail == NULL)
1349 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1354 set_unaligned_error (aarch64_operand_error *mismatch_detail, int idx,
1357 if (mismatch_detail == NULL)
1359 set_error (mismatch_detail, AARCH64_OPDE_UNALIGNED, idx, NULL);
1360 mismatch_detail->data[0] = alignment;
1364 set_reg_list_error (aarch64_operand_error *mismatch_detail, int idx,
1367 if (mismatch_detail == NULL)
1369 set_error (mismatch_detail, AARCH64_OPDE_REG_LIST, idx, NULL);
1370 mismatch_detail->data[0] = expected_num;
1374 set_other_error (aarch64_operand_error *mismatch_detail, int idx,
1377 if (mismatch_detail == NULL)
1379 set_error (mismatch_detail, AARCH64_OPDE_OTHER_ERROR, idx, error);
1382 /* General constraint checking based on operand code.
1384 Return 1 if OPNDS[IDX] meets the general constraint of operand code TYPE
1385 as the IDXth operand of opcode OPCODE. Otherwise return 0.
1387 This function has to be called after the qualifiers for all operands
1390 Mismatching error message is returned in *MISMATCH_DETAIL upon request,
1391 i.e. when MISMATCH_DETAIL is non-NULL. This avoids the generation
1392 of error message during the disassembling where error message is not
1393 wanted. We avoid the dynamic construction of strings of error messages
1394 here (i.e. in libopcodes), as it is costly and complicated; instead, we
1395 use a combination of error code, static string and some integer data to
1396 represent an error. */
1399 operand_general_constraint_met_p (const aarch64_opnd_info *opnds, int idx,
1400 enum aarch64_opnd type,
1401 const aarch64_opcode *opcode,
1402 aarch64_operand_error *mismatch_detail)
1404 unsigned num, modifiers, shift;
1406 int64_t imm, min_value, max_value;
1407 uint64_t uvalue, mask;
1408 const aarch64_opnd_info *opnd = opnds + idx;
1409 aarch64_opnd_qualifier_t qualifier = opnd->qualifier;
1411 assert (opcode->operands[idx] == opnd->type && opnd->type == type);
1413 switch (aarch64_operands[type].op_class)
1415 case AARCH64_OPND_CLASS_INT_REG:
1416 /* Check pair reg constraints for cas* instructions. */
1417 if (type == AARCH64_OPND_PAIRREG)
1419 assert (idx == 1 || idx == 3);
1420 if (opnds[idx - 1].reg.regno % 2 != 0)
1422 set_syntax_error (mismatch_detail, idx - 1,
1423 _("reg pair must start from even reg"));
1426 if (opnds[idx].reg.regno != opnds[idx - 1].reg.regno + 1)
1428 set_syntax_error (mismatch_detail, idx,
1429 _("reg pair must be contiguous"));
1435 /* <Xt> may be optional in some IC and TLBI instructions. */
1436 if (type == AARCH64_OPND_Rt_SYS)
1438 assert (idx == 1 && (aarch64_get_operand_class (opnds[0].type)
1439 == AARCH64_OPND_CLASS_SYSTEM));
1440 if (opnds[1].present
1441 && !aarch64_sys_ins_reg_has_xt (opnds[0].sysins_op))
1443 set_other_error (mismatch_detail, idx, _("extraneous register"));
1446 if (!opnds[1].present
1447 && aarch64_sys_ins_reg_has_xt (opnds[0].sysins_op))
1449 set_other_error (mismatch_detail, idx, _("missing register"));
1455 case AARCH64_OPND_QLF_WSP:
1456 case AARCH64_OPND_QLF_SP:
1457 if (!aarch64_stack_pointer_p (opnd))
1459 set_other_error (mismatch_detail, idx,
1460 _("stack pointer register expected"));
1469 case AARCH64_OPND_CLASS_SVE_REG:
1472 case AARCH64_OPND_SVE_Zn_INDEX:
1473 size = aarch64_get_qualifier_esize (opnd->qualifier);
1474 if (!value_in_range_p (opnd->reglane.index, 0, 64 / size - 1))
1476 set_elem_idx_out_of_range_error (mismatch_detail, idx,
1482 case AARCH64_OPND_SVE_ZnxN:
1483 case AARCH64_OPND_SVE_ZtxN:
1484 if (opnd->reglist.num_regs != get_opcode_dependent_value (opcode))
1486 set_other_error (mismatch_detail, idx,
1487 _("invalid register list"));
1497 case AARCH64_OPND_CLASS_PRED_REG:
1498 if (opnd->reg.regno >= 8
1499 && get_operand_fields_width (get_operand_from_code (type)) == 3)
1501 set_other_error (mismatch_detail, idx, _("p0-p7 expected"));
1506 case AARCH64_OPND_CLASS_COND:
1507 if (type == AARCH64_OPND_COND1
1508 && (opnds[idx].cond->value & 0xe) == 0xe)
1510 /* Not allow AL or NV. */
1511 set_syntax_error (mismatch_detail, idx, NULL);
1515 case AARCH64_OPND_CLASS_ADDRESS:
1516 /* Check writeback. */
1517 switch (opcode->iclass)
1521 case ldstnapair_offs:
1524 if (opnd->addr.writeback == 1)
1526 set_syntax_error (mismatch_detail, idx,
1527 _("unexpected address writeback"));
1532 case ldstpair_indexed:
1535 if (opnd->addr.writeback == 0)
1537 set_syntax_error (mismatch_detail, idx,
1538 _("address writeback expected"));
1543 assert (opnd->addr.writeback == 0);
1548 case AARCH64_OPND_ADDR_SIMM7:
1549 /* Scaled signed 7 bits immediate offset. */
1550 /* Get the size of the data element that is accessed, which may be
1551 different from that of the source register size,
1552 e.g. in strb/ldrb. */
1553 size = aarch64_get_qualifier_esize (opnd->qualifier);
1554 if (!value_in_range_p (opnd->addr.offset.imm, -64 * size, 63 * size))
1556 set_offset_out_of_range_error (mismatch_detail, idx,
1557 -64 * size, 63 * size);
1560 if (!value_aligned_p (opnd->addr.offset.imm, size))
1562 set_unaligned_error (mismatch_detail, idx, size);
1566 case AARCH64_OPND_ADDR_SIMM9:
1567 /* Unscaled signed 9 bits immediate offset. */
1568 if (!value_in_range_p (opnd->addr.offset.imm, -256, 255))
1570 set_offset_out_of_range_error (mismatch_detail, idx, -256, 255);
1575 case AARCH64_OPND_ADDR_SIMM9_2:
1576 /* Unscaled signed 9 bits immediate offset, which has to be negative
1578 size = aarch64_get_qualifier_esize (qualifier);
1579 if ((value_in_range_p (opnd->addr.offset.imm, 0, 255)
1580 && !value_aligned_p (opnd->addr.offset.imm, size))
1581 || value_in_range_p (opnd->addr.offset.imm, -256, -1))
1583 set_other_error (mismatch_detail, idx,
1584 _("negative or unaligned offset expected"));
1587 case AARCH64_OPND_SIMD_ADDR_POST:
1588 /* AdvSIMD load/store multiple structures, post-index. */
1590 if (opnd->addr.offset.is_reg)
1592 if (value_in_range_p (opnd->addr.offset.regno, 0, 30))
1596 set_other_error (mismatch_detail, idx,
1597 _("invalid register offset"));
1603 const aarch64_opnd_info *prev = &opnds[idx-1];
1604 unsigned num_bytes; /* total number of bytes transferred. */
1605 /* The opcode dependent area stores the number of elements in
1606 each structure to be loaded/stored. */
1607 int is_ld1r = get_opcode_dependent_value (opcode) == 1;
1608 if (opcode->operands[0] == AARCH64_OPND_LVt_AL)
1609 /* Special handling of loading single structure to all lane. */
1610 num_bytes = (is_ld1r ? 1 : prev->reglist.num_regs)
1611 * aarch64_get_qualifier_esize (prev->qualifier);
1613 num_bytes = prev->reglist.num_regs
1614 * aarch64_get_qualifier_esize (prev->qualifier)
1615 * aarch64_get_qualifier_nelem (prev->qualifier);
1616 if ((int) num_bytes != opnd->addr.offset.imm)
1618 set_other_error (mismatch_detail, idx,
1619 _("invalid post-increment amount"));
1625 case AARCH64_OPND_ADDR_REGOFF:
1626 /* Get the size of the data element that is accessed, which may be
1627 different from that of the source register size,
1628 e.g. in strb/ldrb. */
1629 size = aarch64_get_qualifier_esize (opnd->qualifier);
1630 /* It is either no shift or shift by the binary logarithm of SIZE. */
1631 if (opnd->shifter.amount != 0
1632 && opnd->shifter.amount != (int)get_logsz (size))
1634 set_other_error (mismatch_detail, idx,
1635 _("invalid shift amount"));
1638 /* Only UXTW, LSL, SXTW and SXTX are the accepted extending
1640 switch (opnd->shifter.kind)
1642 case AARCH64_MOD_UXTW:
1643 case AARCH64_MOD_LSL:
1644 case AARCH64_MOD_SXTW:
1645 case AARCH64_MOD_SXTX: break;
1647 set_other_error (mismatch_detail, idx,
1648 _("invalid extend/shift operator"));
1653 case AARCH64_OPND_ADDR_UIMM12:
1654 imm = opnd->addr.offset.imm;
1655 /* Get the size of the data element that is accessed, which may be
1656 different from that of the source register size,
1657 e.g. in strb/ldrb. */
1658 size = aarch64_get_qualifier_esize (qualifier);
1659 if (!value_in_range_p (opnd->addr.offset.imm, 0, 4095 * size))
1661 set_offset_out_of_range_error (mismatch_detail, idx,
1665 if (!value_aligned_p (opnd->addr.offset.imm, size))
1667 set_unaligned_error (mismatch_detail, idx, size);
1672 case AARCH64_OPND_ADDR_PCREL14:
1673 case AARCH64_OPND_ADDR_PCREL19:
1674 case AARCH64_OPND_ADDR_PCREL21:
1675 case AARCH64_OPND_ADDR_PCREL26:
1676 imm = opnd->imm.value;
1677 if (operand_need_shift_by_two (get_operand_from_code (type)))
1679 /* The offset value in a PC-relative branch instruction is alway
1680 4-byte aligned and is encoded without the lowest 2 bits. */
1681 if (!value_aligned_p (imm, 4))
1683 set_unaligned_error (mismatch_detail, idx, 4);
1686 /* Right shift by 2 so that we can carry out the following check
1690 size = get_operand_fields_width (get_operand_from_code (type));
1691 if (!value_fit_signed_field_p (imm, size))
1693 set_other_error (mismatch_detail, idx,
1694 _("immediate out of range"));
1699 case AARCH64_OPND_SVE_ADDR_RI_S4xVL:
1700 case AARCH64_OPND_SVE_ADDR_RI_S4x2xVL:
1701 case AARCH64_OPND_SVE_ADDR_RI_S4x3xVL:
1702 case AARCH64_OPND_SVE_ADDR_RI_S4x4xVL:
1706 assert (!opnd->addr.offset.is_reg);
1707 assert (opnd->addr.preind);
1708 num = 1 + get_operand_specific_data (&aarch64_operands[type]);
1711 if ((opnd->addr.offset.imm != 0 && !opnd->shifter.operator_present)
1712 || (opnd->shifter.operator_present
1713 && opnd->shifter.kind != AARCH64_MOD_MUL_VL))
1715 set_other_error (mismatch_detail, idx,
1716 _("invalid addressing mode"));
1719 if (!value_in_range_p (opnd->addr.offset.imm, min_value, max_value))
1721 set_offset_out_of_range_error (mismatch_detail, idx,
1722 min_value, max_value);
1725 if (!value_aligned_p (opnd->addr.offset.imm, num))
1727 set_unaligned_error (mismatch_detail, idx, num);
1732 case AARCH64_OPND_SVE_ADDR_RI_S6xVL:
1735 goto sve_imm_offset_vl;
1737 case AARCH64_OPND_SVE_ADDR_RI_S9xVL:
1740 goto sve_imm_offset_vl;
1742 case AARCH64_OPND_SVE_ADDR_RI_U6:
1743 case AARCH64_OPND_SVE_ADDR_RI_U6x2:
1744 case AARCH64_OPND_SVE_ADDR_RI_U6x4:
1745 case AARCH64_OPND_SVE_ADDR_RI_U6x8:
1749 assert (!opnd->addr.offset.is_reg);
1750 assert (opnd->addr.preind);
1751 num = 1 << get_operand_specific_data (&aarch64_operands[type]);
1754 if (opnd->shifter.operator_present
1755 || opnd->shifter.amount_present)
1757 set_other_error (mismatch_detail, idx,
1758 _("invalid addressing mode"));
1761 if (!value_in_range_p (opnd->addr.offset.imm, min_value, max_value))
1763 set_offset_out_of_range_error (mismatch_detail, idx,
1764 min_value, max_value);
1767 if (!value_aligned_p (opnd->addr.offset.imm, num))
1769 set_unaligned_error (mismatch_detail, idx, num);
1774 case AARCH64_OPND_SVE_ADDR_RR:
1775 case AARCH64_OPND_SVE_ADDR_RR_LSL1:
1776 case AARCH64_OPND_SVE_ADDR_RR_LSL2:
1777 case AARCH64_OPND_SVE_ADDR_RR_LSL3:
1778 case AARCH64_OPND_SVE_ADDR_RX:
1779 case AARCH64_OPND_SVE_ADDR_RX_LSL1:
1780 case AARCH64_OPND_SVE_ADDR_RX_LSL2:
1781 case AARCH64_OPND_SVE_ADDR_RX_LSL3:
1782 case AARCH64_OPND_SVE_ADDR_RZ:
1783 case AARCH64_OPND_SVE_ADDR_RZ_LSL1:
1784 case AARCH64_OPND_SVE_ADDR_RZ_LSL2:
1785 case AARCH64_OPND_SVE_ADDR_RZ_LSL3:
1786 modifiers = 1 << AARCH64_MOD_LSL;
1788 assert (opnd->addr.offset.is_reg);
1789 assert (opnd->addr.preind);
1790 if ((aarch64_operands[type].flags & OPD_F_NO_ZR) != 0
1791 && opnd->addr.offset.regno == 31)
1793 set_other_error (mismatch_detail, idx,
1794 _("index register xzr is not allowed"));
1797 if (((1 << opnd->shifter.kind) & modifiers) == 0
1798 || (opnd->shifter.amount
1799 != get_operand_specific_data (&aarch64_operands[type])))
1801 set_other_error (mismatch_detail, idx,
1802 _("invalid addressing mode"));
1807 case AARCH64_OPND_SVE_ADDR_RZ_XTW_14:
1808 case AARCH64_OPND_SVE_ADDR_RZ_XTW_22:
1809 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_14:
1810 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_22:
1811 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_14:
1812 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_22:
1813 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_14:
1814 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_22:
1815 modifiers = (1 << AARCH64_MOD_SXTW) | (1 << AARCH64_MOD_UXTW);
1816 goto sve_rr_operand;
1818 case AARCH64_OPND_SVE_ADDR_ZI_U5:
1819 case AARCH64_OPND_SVE_ADDR_ZI_U5x2:
1820 case AARCH64_OPND_SVE_ADDR_ZI_U5x4:
1821 case AARCH64_OPND_SVE_ADDR_ZI_U5x8:
1824 goto sve_imm_offset;
1826 case AARCH64_OPND_SVE_ADDR_ZZ_LSL:
1827 modifiers = 1 << AARCH64_MOD_LSL;
1829 assert (opnd->addr.offset.is_reg);
1830 assert (opnd->addr.preind);
1831 if (((1 << opnd->shifter.kind) & modifiers) == 0
1832 || opnd->shifter.amount < 0
1833 || opnd->shifter.amount > 3)
1835 set_other_error (mismatch_detail, idx,
1836 _("invalid addressing mode"));
1841 case AARCH64_OPND_SVE_ADDR_ZZ_SXTW:
1842 modifiers = (1 << AARCH64_MOD_SXTW);
1843 goto sve_zz_operand;
1845 case AARCH64_OPND_SVE_ADDR_ZZ_UXTW:
1846 modifiers = 1 << AARCH64_MOD_UXTW;
1847 goto sve_zz_operand;
1854 case AARCH64_OPND_CLASS_SIMD_REGLIST:
1855 if (type == AARCH64_OPND_LEt)
1857 /* Get the upper bound for the element index. */
1858 num = 16 / aarch64_get_qualifier_esize (qualifier) - 1;
1859 if (!value_in_range_p (opnd->reglist.index, 0, num))
1861 set_elem_idx_out_of_range_error (mismatch_detail, idx, 0, num);
1865 /* The opcode dependent area stores the number of elements in
1866 each structure to be loaded/stored. */
1867 num = get_opcode_dependent_value (opcode);
1870 case AARCH64_OPND_LVt:
1871 assert (num >= 1 && num <= 4);
1872 /* Unless LD1/ST1, the number of registers should be equal to that
1873 of the structure elements. */
1874 if (num != 1 && opnd->reglist.num_regs != num)
1876 set_reg_list_error (mismatch_detail, idx, num);
1880 case AARCH64_OPND_LVt_AL:
1881 case AARCH64_OPND_LEt:
1882 assert (num >= 1 && num <= 4);
1883 /* The number of registers should be equal to that of the structure
1885 if (opnd->reglist.num_regs != num)
1887 set_reg_list_error (mismatch_detail, idx, num);
1896 case AARCH64_OPND_CLASS_IMMEDIATE:
1897 /* Constraint check on immediate operand. */
1898 imm = opnd->imm.value;
1899 /* E.g. imm_0_31 constrains value to be 0..31. */
1900 if (qualifier_value_in_range_constraint_p (qualifier)
1901 && !value_in_range_p (imm, get_lower_bound (qualifier),
1902 get_upper_bound (qualifier)))
1904 set_imm_out_of_range_error (mismatch_detail, idx,
1905 get_lower_bound (qualifier),
1906 get_upper_bound (qualifier));
1912 case AARCH64_OPND_AIMM:
1913 if (opnd->shifter.kind != AARCH64_MOD_LSL)
1915 set_other_error (mismatch_detail, idx,
1916 _("invalid shift operator"));
1919 if (opnd->shifter.amount != 0 && opnd->shifter.amount != 12)
1921 set_other_error (mismatch_detail, idx,
1922 _("shift amount must be 0 or 12"));
1925 if (!value_fit_unsigned_field_p (opnd->imm.value, 12))
1927 set_other_error (mismatch_detail, idx,
1928 _("immediate out of range"));
1933 case AARCH64_OPND_HALF:
1934 assert (idx == 1 && opnds[0].type == AARCH64_OPND_Rd);
1935 if (opnd->shifter.kind != AARCH64_MOD_LSL)
1937 set_other_error (mismatch_detail, idx,
1938 _("invalid shift operator"));
1941 size = aarch64_get_qualifier_esize (opnds[0].qualifier);
1942 if (!value_aligned_p (opnd->shifter.amount, 16))
1944 set_other_error (mismatch_detail, idx,
1945 _("shift amount must be a multiple of 16"));
1948 if (!value_in_range_p (opnd->shifter.amount, 0, size * 8 - 16))
1950 set_sft_amount_out_of_range_error (mismatch_detail, idx,
1954 if (opnd->imm.value < 0)
1956 set_other_error (mismatch_detail, idx,
1957 _("negative immediate value not allowed"));
1960 if (!value_fit_unsigned_field_p (opnd->imm.value, 16))
1962 set_other_error (mismatch_detail, idx,
1963 _("immediate out of range"));
1968 case AARCH64_OPND_IMM_MOV:
1970 int esize = aarch64_get_qualifier_esize (opnds[0].qualifier);
1971 imm = opnd->imm.value;
1975 case OP_MOV_IMM_WIDEN:
1978 case OP_MOV_IMM_WIDE:
1979 if (!aarch64_wide_constant_p (imm, esize == 4, NULL))
1981 set_other_error (mismatch_detail, idx,
1982 _("immediate out of range"));
1986 case OP_MOV_IMM_LOG:
1987 if (!aarch64_logical_immediate_p (imm, esize, NULL))
1989 set_other_error (mismatch_detail, idx,
1990 _("immediate out of range"));
2001 case AARCH64_OPND_NZCV:
2002 case AARCH64_OPND_CCMP_IMM:
2003 case AARCH64_OPND_EXCEPTION:
2004 case AARCH64_OPND_UIMM4:
2005 case AARCH64_OPND_UIMM7:
2006 case AARCH64_OPND_UIMM3_OP1:
2007 case AARCH64_OPND_UIMM3_OP2:
2008 case AARCH64_OPND_SVE_UIMM3:
2009 case AARCH64_OPND_SVE_UIMM7:
2010 case AARCH64_OPND_SVE_UIMM8:
2011 case AARCH64_OPND_SVE_UIMM8_53:
2012 size = get_operand_fields_width (get_operand_from_code (type));
2014 if (!value_fit_unsigned_field_p (opnd->imm.value, size))
2016 set_imm_out_of_range_error (mismatch_detail, idx, 0,
2022 case AARCH64_OPND_SIMM5:
2023 case AARCH64_OPND_SVE_SIMM5:
2024 case AARCH64_OPND_SVE_SIMM5B:
2025 case AARCH64_OPND_SVE_SIMM6:
2026 case AARCH64_OPND_SVE_SIMM8:
2027 size = get_operand_fields_width (get_operand_from_code (type));
2029 if (!value_fit_signed_field_p (opnd->imm.value, size))
2031 set_imm_out_of_range_error (mismatch_detail, idx,
2033 (1 << (size - 1)) - 1);
2038 case AARCH64_OPND_WIDTH:
2039 assert (idx > 1 && opnds[idx-1].type == AARCH64_OPND_IMM
2040 && opnds[0].type == AARCH64_OPND_Rd);
2041 size = get_upper_bound (qualifier);
2042 if (opnd->imm.value + opnds[idx-1].imm.value > size)
2043 /* lsb+width <= reg.size */
2045 set_imm_out_of_range_error (mismatch_detail, idx, 1,
2046 size - opnds[idx-1].imm.value);
2051 case AARCH64_OPND_LIMM:
2052 case AARCH64_OPND_SVE_LIMM:
2054 int esize = aarch64_get_qualifier_esize (opnds[0].qualifier);
2055 uint64_t uimm = opnd->imm.value;
2056 if (opcode->op == OP_BIC)
2058 if (aarch64_logical_immediate_p (uimm, esize, NULL) == FALSE)
2060 set_other_error (mismatch_detail, idx,
2061 _("immediate out of range"));
2067 case AARCH64_OPND_IMM0:
2068 case AARCH64_OPND_FPIMM0:
2069 if (opnd->imm.value != 0)
2071 set_other_error (mismatch_detail, idx,
2072 _("immediate zero expected"));
2077 case AARCH64_OPND_SHLL_IMM:
2079 size = 8 * aarch64_get_qualifier_esize (opnds[idx - 1].qualifier);
2080 if (opnd->imm.value != size)
2082 set_other_error (mismatch_detail, idx,
2083 _("invalid shift amount"));
2088 case AARCH64_OPND_IMM_VLSL:
2089 size = aarch64_get_qualifier_esize (qualifier);
2090 if (!value_in_range_p (opnd->imm.value, 0, size * 8 - 1))
2092 set_imm_out_of_range_error (mismatch_detail, idx, 0,
2098 case AARCH64_OPND_IMM_VLSR:
2099 size = aarch64_get_qualifier_esize (qualifier);
2100 if (!value_in_range_p (opnd->imm.value, 1, size * 8))
2102 set_imm_out_of_range_error (mismatch_detail, idx, 1, size * 8);
2107 case AARCH64_OPND_SIMD_IMM:
2108 case AARCH64_OPND_SIMD_IMM_SFT:
2109 /* Qualifier check. */
2112 case AARCH64_OPND_QLF_LSL:
2113 if (opnd->shifter.kind != AARCH64_MOD_LSL)
2115 set_other_error (mismatch_detail, idx,
2116 _("invalid shift operator"));
2120 case AARCH64_OPND_QLF_MSL:
2121 if (opnd->shifter.kind != AARCH64_MOD_MSL)
2123 set_other_error (mismatch_detail, idx,
2124 _("invalid shift operator"));
2128 case AARCH64_OPND_QLF_NIL:
2129 if (opnd->shifter.kind != AARCH64_MOD_NONE)
2131 set_other_error (mismatch_detail, idx,
2132 _("shift is not permitted"));
2140 /* Is the immediate valid? */
2142 if (aarch64_get_qualifier_esize (opnds[0].qualifier) != 8)
2144 /* uimm8 or simm8 */
2145 if (!value_in_range_p (opnd->imm.value, -128, 255))
2147 set_imm_out_of_range_error (mismatch_detail, idx, -128, 255);
2151 else if (aarch64_shrink_expanded_imm8 (opnd->imm.value) < 0)
2154 'aaaaaaaabbbbbbbbccccccccddddddddeeeeeeee
2155 ffffffffgggggggghhhhhhhh'. */
2156 set_other_error (mismatch_detail, idx,
2157 _("invalid value for immediate"));
2160 /* Is the shift amount valid? */
2161 switch (opnd->shifter.kind)
2163 case AARCH64_MOD_LSL:
2164 size = aarch64_get_qualifier_esize (opnds[0].qualifier);
2165 if (!value_in_range_p (opnd->shifter.amount, 0, (size - 1) * 8))
2167 set_sft_amount_out_of_range_error (mismatch_detail, idx, 0,
2171 if (!value_aligned_p (opnd->shifter.amount, 8))
2173 set_unaligned_error (mismatch_detail, idx, 8);
2177 case AARCH64_MOD_MSL:
2178 /* Only 8 and 16 are valid shift amount. */
2179 if (opnd->shifter.amount != 8 && opnd->shifter.amount != 16)
2181 set_other_error (mismatch_detail, idx,
2182 _("shift amount must be 0 or 16"));
2187 if (opnd->shifter.kind != AARCH64_MOD_NONE)
2189 set_other_error (mismatch_detail, idx,
2190 _("invalid shift operator"));
2197 case AARCH64_OPND_FPIMM:
2198 case AARCH64_OPND_SIMD_FPIMM:
2199 case AARCH64_OPND_SVE_FPIMM8:
2200 if (opnd->imm.is_fp == 0)
2202 set_other_error (mismatch_detail, idx,
2203 _("floating-point immediate expected"));
2206 /* The value is expected to be an 8-bit floating-point constant with
2207 sign, 3-bit exponent and normalized 4 bits of precision, encoded
2208 in "a:b:c:d:e:f:g:h" or FLD_imm8 (depending on the type of the
2210 if (!value_in_range_p (opnd->imm.value, 0, 255))
2212 set_other_error (mismatch_detail, idx,
2213 _("immediate out of range"));
2216 if (opnd->shifter.kind != AARCH64_MOD_NONE)
2218 set_other_error (mismatch_detail, idx,
2219 _("invalid shift operator"));
2224 case AARCH64_OPND_SVE_AIMM:
2227 assert (opnd->shifter.kind == AARCH64_MOD_LSL);
2228 size = aarch64_get_qualifier_esize (opnds[0].qualifier);
2229 mask = ~((uint64_t) -1 << (size * 4) << (size * 4));
2230 uvalue = opnd->imm.value;
2231 shift = opnd->shifter.amount;
2236 set_other_error (mismatch_detail, idx,
2237 _("no shift amount allowed for"
2238 " 8-bit constants"));
2244 if (shift != 0 && shift != 8)
2246 set_other_error (mismatch_detail, idx,
2247 _("shift amount must be 0 or 8"));
2250 if (shift == 0 && (uvalue & 0xff) == 0)
2253 uvalue = (int64_t) uvalue / 256;
2257 if ((uvalue & mask) != uvalue && (uvalue | ~mask) != uvalue)
2259 set_other_error (mismatch_detail, idx,
2260 _("immediate too big for element size"));
2263 uvalue = (uvalue - min_value) & mask;
2266 set_other_error (mismatch_detail, idx,
2267 _("invalid arithmetic immediate"));
2272 case AARCH64_OPND_SVE_ASIMM:
2276 case AARCH64_OPND_SVE_I1_HALF_ONE:
2277 assert (opnd->imm.is_fp);
2278 if (opnd->imm.value != 0x3f000000 && opnd->imm.value != 0x3f800000)
2280 set_other_error (mismatch_detail, idx,
2281 _("floating-point value must be 0.5 or 1.0"));
2286 case AARCH64_OPND_SVE_I1_HALF_TWO:
2287 assert (opnd->imm.is_fp);
2288 if (opnd->imm.value != 0x3f000000 && opnd->imm.value != 0x40000000)
2290 set_other_error (mismatch_detail, idx,
2291 _("floating-point value must be 0.5 or 2.0"));
2296 case AARCH64_OPND_SVE_I1_ZERO_ONE:
2297 assert (opnd->imm.is_fp);
2298 if (opnd->imm.value != 0 && opnd->imm.value != 0x3f800000)
2300 set_other_error (mismatch_detail, idx,
2301 _("floating-point value must be 0.0 or 1.0"));
2306 case AARCH64_OPND_SVE_INV_LIMM:
2308 int esize = aarch64_get_qualifier_esize (opnds[0].qualifier);
2309 uint64_t uimm = ~opnd->imm.value;
2310 if (!aarch64_logical_immediate_p (uimm, esize, NULL))
2312 set_other_error (mismatch_detail, idx,
2313 _("immediate out of range"));
2319 case AARCH64_OPND_SVE_LIMM_MOV:
2321 int esize = aarch64_get_qualifier_esize (opnds[0].qualifier);
2322 uint64_t uimm = opnd->imm.value;
2323 if (!aarch64_logical_immediate_p (uimm, esize, NULL))
2325 set_other_error (mismatch_detail, idx,
2326 _("immediate out of range"));
2329 if (!aarch64_sve_dupm_mov_immediate_p (uimm, esize))
2331 set_other_error (mismatch_detail, idx,
2332 _("invalid replicated MOV immediate"));
2338 case AARCH64_OPND_SVE_PATTERN_SCALED:
2339 assert (opnd->shifter.kind == AARCH64_MOD_MUL);
2340 if (!value_in_range_p (opnd->shifter.amount, 1, 16))
2342 set_multiplier_out_of_range_error (mismatch_detail, idx, 1, 16);
2347 case AARCH64_OPND_SVE_SHLIMM_PRED:
2348 case AARCH64_OPND_SVE_SHLIMM_UNPRED:
2349 size = aarch64_get_qualifier_esize (opnds[idx - 1].qualifier);
2350 if (!value_in_range_p (opnd->imm.value, 0, 8 * size - 1))
2352 set_imm_out_of_range_error (mismatch_detail, idx,
2358 case AARCH64_OPND_SVE_SHRIMM_PRED:
2359 case AARCH64_OPND_SVE_SHRIMM_UNPRED:
2360 size = aarch64_get_qualifier_esize (opnds[idx - 1].qualifier);
2361 if (!value_in_range_p (opnd->imm.value, 1, 8 * size))
2363 set_imm_out_of_range_error (mismatch_detail, idx, 1, 8 * size);
2373 case AARCH64_OPND_CLASS_CP_REG:
2374 /* Cn or Cm: 4-bit opcode field named for historical reasons.
2375 valid range: C0 - C15. */
2376 if (opnd->reg.regno > 15)
2378 set_regno_out_of_range_error (mismatch_detail, idx, 0, 15);
2383 case AARCH64_OPND_CLASS_SYSTEM:
2386 case AARCH64_OPND_PSTATEFIELD:
2387 assert (idx == 0 && opnds[1].type == AARCH64_OPND_UIMM4);
2390 The immediate must be #0 or #1. */
2391 if ((opnd->pstatefield == 0x03 /* UAO. */
2392 || opnd->pstatefield == 0x04) /* PAN. */
2393 && opnds[1].imm.value > 1)
2395 set_imm_out_of_range_error (mismatch_detail, idx, 0, 1);
2398 /* MSR SPSel, #uimm4
2399 Uses uimm4 as a control value to select the stack pointer: if
2400 bit 0 is set it selects the current exception level's stack
2401 pointer, if bit 0 is clear it selects shared EL0 stack pointer.
2402 Bits 1 to 3 of uimm4 are reserved and should be zero. */
2403 if (opnd->pstatefield == 0x05 /* spsel */ && opnds[1].imm.value > 1)
2405 set_imm_out_of_range_error (mismatch_detail, idx, 0, 1);
2414 case AARCH64_OPND_CLASS_SIMD_ELEMENT:
2415 /* Get the upper bound for the element index. */
2416 num = 16 / aarch64_get_qualifier_esize (qualifier) - 1;
2417 /* Index out-of-range. */
2418 if (!value_in_range_p (opnd->reglane.index, 0, num))
2420 set_elem_idx_out_of_range_error (mismatch_detail, idx, 0, num);
2423 /* SMLAL<Q> <Vd>.<Ta>, <Vn>.<Tb>, <Vm>.<Ts>[<index>].
2424 <Vm> Is the vector register (V0-V31) or (V0-V15), whose
2425 number is encoded in "size:M:Rm":
2431 if (type == AARCH64_OPND_Em && qualifier == AARCH64_OPND_QLF_S_H
2432 && !value_in_range_p (opnd->reglane.regno, 0, 15))
2434 set_regno_out_of_range_error (mismatch_detail, idx, 0, 15);
2439 case AARCH64_OPND_CLASS_MODIFIED_REG:
2440 assert (idx == 1 || idx == 2);
2443 case AARCH64_OPND_Rm_EXT:
2444 if (aarch64_extend_operator_p (opnd->shifter.kind) == FALSE
2445 && opnd->shifter.kind != AARCH64_MOD_LSL)
2447 set_other_error (mismatch_detail, idx,
2448 _("extend operator expected"));
2451 /* It is not optional unless at least one of "Rd" or "Rn" is '11111'
2452 (i.e. SP), in which case it defaults to LSL. The LSL alias is
2453 only valid when "Rd" or "Rn" is '11111', and is preferred in that
2455 if (!aarch64_stack_pointer_p (opnds + 0)
2456 && (idx != 2 || !aarch64_stack_pointer_p (opnds + 1)))
2458 if (!opnd->shifter.operator_present)
2460 set_other_error (mismatch_detail, idx,
2461 _("missing extend operator"));
2464 else if (opnd->shifter.kind == AARCH64_MOD_LSL)
2466 set_other_error (mismatch_detail, idx,
2467 _("'LSL' operator not allowed"));
2471 assert (opnd->shifter.operator_present /* Default to LSL. */
2472 || opnd->shifter.kind == AARCH64_MOD_LSL);
2473 if (!value_in_range_p (opnd->shifter.amount, 0, 4))
2475 set_sft_amount_out_of_range_error (mismatch_detail, idx, 0, 4);
2478 /* In the 64-bit form, the final register operand is written as Wm
2479 for all but the (possibly omitted) UXTX/LSL and SXTX
2481 N.B. GAS allows X register to be used with any operator as a
2482 programming convenience. */
2483 if (qualifier == AARCH64_OPND_QLF_X
2484 && opnd->shifter.kind != AARCH64_MOD_LSL
2485 && opnd->shifter.kind != AARCH64_MOD_UXTX
2486 && opnd->shifter.kind != AARCH64_MOD_SXTX)
2488 set_other_error (mismatch_detail, idx, _("W register expected"));
2493 case AARCH64_OPND_Rm_SFT:
2494 /* ROR is not available to the shifted register operand in
2495 arithmetic instructions. */
2496 if (aarch64_shift_operator_p (opnd->shifter.kind) == FALSE)
2498 set_other_error (mismatch_detail, idx,
2499 _("shift operator expected"));
2502 if (opnd->shifter.kind == AARCH64_MOD_ROR
2503 && opcode->iclass != log_shift)
2505 set_other_error (mismatch_detail, idx,
2506 _("'ROR' operator not allowed"));
2509 num = qualifier == AARCH64_OPND_QLF_W ? 31 : 63;
2510 if (!value_in_range_p (opnd->shifter.amount, 0, num))
2512 set_sft_amount_out_of_range_error (mismatch_detail, idx, 0, num);
2529 /* Main entrypoint for the operand constraint checking.
2531 Return 1 if operands of *INST meet the constraint applied by the operand
2532 codes and operand qualifiers; otherwise return 0 and if MISMATCH_DETAIL is
2533 not NULL, return the detail of the error in *MISMATCH_DETAIL. N.B. when
2534 adding more constraint checking, make sure MISMATCH_DETAIL->KIND is set
2535 with a proper error kind rather than AARCH64_OPDE_NIL (GAS asserts non-NIL
2536 error kind when it is notified that an instruction does not pass the check).
2538 Un-determined operand qualifiers may get established during the process. */
2541 aarch64_match_operands_constraint (aarch64_inst *inst,
2542 aarch64_operand_error *mismatch_detail)
2546 DEBUG_TRACE ("enter");
2548 /* Check for cases where a source register needs to be the same as the
2549 destination register. Do this before matching qualifiers since if
2550 an instruction has both invalid tying and invalid qualifiers,
2551 the error about qualifiers would suggest several alternative
2552 instructions that also have invalid tying. */
2553 i = inst->opcode->tied_operand;
2554 if (i > 0 && (inst->operands[0].reg.regno != inst->operands[i].reg.regno))
2556 if (mismatch_detail)
2558 mismatch_detail->kind = AARCH64_OPDE_UNTIED_OPERAND;
2559 mismatch_detail->index = i;
2560 mismatch_detail->error = NULL;
2565 /* Match operands' qualifier.
2566 *INST has already had qualifier establish for some, if not all, of
2567 its operands; we need to find out whether these established
2568 qualifiers match one of the qualifier sequence in
2569 INST->OPCODE->QUALIFIERS_LIST. If yes, we will assign each operand
2570 with the corresponding qualifier in such a sequence.
2571 Only basic operand constraint checking is done here; the more thorough
2572 constraint checking will carried out by operand_general_constraint_met_p,
2573 which has be to called after this in order to get all of the operands'
2574 qualifiers established. */
2575 if (match_operands_qualifier (inst, TRUE /* update_p */) == 0)
2577 DEBUG_TRACE ("FAIL on operand qualifier matching");
2578 if (mismatch_detail)
2580 /* Return an error type to indicate that it is the qualifier
2581 matching failure; we don't care about which operand as there
2582 are enough information in the opcode table to reproduce it. */
2583 mismatch_detail->kind = AARCH64_OPDE_INVALID_VARIANT;
2584 mismatch_detail->index = -1;
2585 mismatch_detail->error = NULL;
2590 /* Match operands' constraint. */
2591 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2593 enum aarch64_opnd type = inst->opcode->operands[i];
2594 if (type == AARCH64_OPND_NIL)
2596 if (inst->operands[i].skip)
2598 DEBUG_TRACE ("skip the incomplete operand %d", i);
2601 if (operand_general_constraint_met_p (inst->operands, i, type,
2602 inst->opcode, mismatch_detail) == 0)
2604 DEBUG_TRACE ("FAIL on operand %d", i);
2609 DEBUG_TRACE ("PASS");
2614 /* Replace INST->OPCODE with OPCODE and return the replaced OPCODE.
2615 Also updates the TYPE of each INST->OPERANDS with the corresponding
2616 value of OPCODE->OPERANDS.
2618 Note that some operand qualifiers may need to be manually cleared by
2619 the caller before it further calls the aarch64_opcode_encode; by
2620 doing this, it helps the qualifier matching facilities work
2623 const aarch64_opcode*
2624 aarch64_replace_opcode (aarch64_inst *inst, const aarch64_opcode *opcode)
2627 const aarch64_opcode *old = inst->opcode;
2629 inst->opcode = opcode;
2631 /* Update the operand types. */
2632 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2634 inst->operands[i].type = opcode->operands[i];
2635 if (opcode->operands[i] == AARCH64_OPND_NIL)
2639 DEBUG_TRACE ("replace %s with %s", old->name, opcode->name);
2645 aarch64_operand_index (const enum aarch64_opnd *operands, enum aarch64_opnd operand)
2648 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2649 if (operands[i] == operand)
2651 else if (operands[i] == AARCH64_OPND_NIL)
2656 /* R0...R30, followed by FOR31. */
2657 #define BANK(R, FOR31) \
2658 { R (0), R (1), R (2), R (3), R (4), R (5), R (6), R (7), \
2659 R (8), R (9), R (10), R (11), R (12), R (13), R (14), R (15), \
2660 R (16), R (17), R (18), R (19), R (20), R (21), R (22), R (23), \
2661 R (24), R (25), R (26), R (27), R (28), R (29), R (30), FOR31 }
2662 /* [0][0] 32-bit integer regs with sp Wn
2663 [0][1] 64-bit integer regs with sp Xn sf=1
2664 [1][0] 32-bit integer regs with #0 Wn
2665 [1][1] 64-bit integer regs with #0 Xn sf=1 */
2666 static const char *int_reg[2][2][32] = {
2667 #define R32(X) "w" #X
2668 #define R64(X) "x" #X
2669 { BANK (R32, "wsp"), BANK (R64, "sp") },
2670 { BANK (R32, "wzr"), BANK (R64, "xzr") }
2675 /* Names of the SVE vector registers, first with .S suffixes,
2676 then with .D suffixes. */
2678 static const char *sve_reg[2][32] = {
2679 #define ZS(X) "z" #X ".s"
2680 #define ZD(X) "z" #X ".d"
2681 BANK (ZS, ZS (31)), BANK (ZD, ZD (31))
2687 /* Return the integer register name.
2688 if SP_REG_P is not 0, R31 is an SP reg, other R31 is the zero reg. */
2690 static inline const char *
2691 get_int_reg_name (int regno, aarch64_opnd_qualifier_t qualifier, int sp_reg_p)
2693 const int has_zr = sp_reg_p ? 0 : 1;
2694 const int is_64 = aarch64_get_qualifier_esize (qualifier) == 4 ? 0 : 1;
2695 return int_reg[has_zr][is_64][regno];
2698 /* Like get_int_reg_name, but IS_64 is always 1. */
2700 static inline const char *
2701 get_64bit_int_reg_name (int regno, int sp_reg_p)
2703 const int has_zr = sp_reg_p ? 0 : 1;
2704 return int_reg[has_zr][1][regno];
2707 /* Get the name of the integer offset register in OPND, using the shift type
2708 to decide whether it's a word or doubleword. */
2710 static inline const char *
2711 get_offset_int_reg_name (const aarch64_opnd_info *opnd)
2713 switch (opnd->shifter.kind)
2715 case AARCH64_MOD_UXTW:
2716 case AARCH64_MOD_SXTW:
2717 return get_int_reg_name (opnd->addr.offset.regno, AARCH64_OPND_QLF_W, 0);
2719 case AARCH64_MOD_LSL:
2720 case AARCH64_MOD_SXTX:
2721 return get_int_reg_name (opnd->addr.offset.regno, AARCH64_OPND_QLF_X, 0);
2728 /* Get the name of the SVE vector offset register in OPND, using the operand
2729 qualifier to decide whether the suffix should be .S or .D. */
2731 static inline const char *
2732 get_addr_sve_reg_name (int regno, aarch64_opnd_qualifier_t qualifier)
2734 assert (qualifier == AARCH64_OPND_QLF_S_S
2735 || qualifier == AARCH64_OPND_QLF_S_D);
2736 return sve_reg[qualifier == AARCH64_OPND_QLF_S_D][regno];
2739 /* Types for expanding an encoded 8-bit value to a floating-point value. */
2759 /* IMM8 is an 8-bit floating-point constant with sign, 3-bit exponent and
2760 normalized 4 bits of precision, encoded in "a:b:c:d:e:f:g:h" or FLD_imm8
2761 (depending on the type of the instruction). IMM8 will be expanded to a
2762 single-precision floating-point value (SIZE == 4) or a double-precision
2763 floating-point value (SIZE == 8). A half-precision floating-point value
2764 (SIZE == 2) is expanded to a single-precision floating-point value. The
2765 expanded value is returned. */
2768 expand_fp_imm (int size, uint32_t imm8)
2771 uint32_t imm8_7, imm8_6_0, imm8_6, imm8_6_repl4;
2773 imm8_7 = (imm8 >> 7) & 0x01; /* imm8<7> */
2774 imm8_6_0 = imm8 & 0x7f; /* imm8<6:0> */
2775 imm8_6 = imm8_6_0 >> 6; /* imm8<6> */
2776 imm8_6_repl4 = (imm8_6 << 3) | (imm8_6 << 2)
2777 | (imm8_6 << 1) | imm8_6; /* Replicate(imm8<6>,4) */
2780 imm = (imm8_7 << (63-32)) /* imm8<7> */
2781 | ((imm8_6 ^ 1) << (62-32)) /* NOT(imm8<6) */
2782 | (imm8_6_repl4 << (58-32)) | (imm8_6 << (57-32))
2783 | (imm8_6 << (56-32)) | (imm8_6 << (55-32)) /* Replicate(imm8<6>,7) */
2784 | (imm8_6_0 << (48-32)); /* imm8<6>:imm8<5:0> */
2787 else if (size == 4 || size == 2)
2789 imm = (imm8_7 << 31) /* imm8<7> */
2790 | ((imm8_6 ^ 1) << 30) /* NOT(imm8<6>) */
2791 | (imm8_6_repl4 << 26) /* Replicate(imm8<6>,4) */
2792 | (imm8_6_0 << 19); /* imm8<6>:imm8<5:0> */
2796 /* An unsupported size. */
2803 /* Produce the string representation of the register list operand *OPND
2804 in the buffer pointed by BUF of size SIZE. PREFIX is the part of
2805 the register name that comes before the register number, such as "v". */
2807 print_register_list (char *buf, size_t size, const aarch64_opnd_info *opnd,
2810 const int num_regs = opnd->reglist.num_regs;
2811 const int first_reg = opnd->reglist.first_regno;
2812 const int last_reg = (first_reg + num_regs - 1) & 0x1f;
2813 const char *qlf_name = aarch64_get_qualifier_name (opnd->qualifier);
2814 char tb[8]; /* Temporary buffer. */
2816 assert (opnd->type != AARCH64_OPND_LEt || opnd->reglist.has_index);
2817 assert (num_regs >= 1 && num_regs <= 4);
2819 /* Prepare the index if any. */
2820 if (opnd->reglist.has_index)
2821 snprintf (tb, 8, "[%" PRIi64 "]", opnd->reglist.index);
2825 /* The hyphenated form is preferred for disassembly if there are
2826 more than two registers in the list, and the register numbers
2827 are monotonically increasing in increments of one. */
2828 if (num_regs > 2 && last_reg > first_reg)
2829 snprintf (buf, size, "{%s%d.%s-%s%d.%s}%s", prefix, first_reg, qlf_name,
2830 prefix, last_reg, qlf_name, tb);
2833 const int reg0 = first_reg;
2834 const int reg1 = (first_reg + 1) & 0x1f;
2835 const int reg2 = (first_reg + 2) & 0x1f;
2836 const int reg3 = (first_reg + 3) & 0x1f;
2841 snprintf (buf, size, "{%s%d.%s}%s", prefix, reg0, qlf_name, tb);
2844 snprintf (buf, size, "{%s%d.%s, %s%d.%s}%s", prefix, reg0, qlf_name,
2845 prefix, reg1, qlf_name, tb);
2848 snprintf (buf, size, "{%s%d.%s, %s%d.%s, %s%d.%s}%s",
2849 prefix, reg0, qlf_name, prefix, reg1, qlf_name,
2850 prefix, reg2, qlf_name, tb);
2853 snprintf (buf, size, "{%s%d.%s, %s%d.%s, %s%d.%s, %s%d.%s}%s",
2854 prefix, reg0, qlf_name, prefix, reg1, qlf_name,
2855 prefix, reg2, qlf_name, prefix, reg3, qlf_name, tb);
2861 /* Print the register+immediate address in OPND to BUF, which has SIZE
2862 characters. BASE is the name of the base register. */
2865 print_immediate_offset_address (char *buf, size_t size,
2866 const aarch64_opnd_info *opnd,
2869 if (opnd->addr.writeback)
2871 if (opnd->addr.preind)
2872 snprintf (buf, size, "[%s, #%d]!", base, opnd->addr.offset.imm);
2874 snprintf (buf, size, "[%s], #%d", base, opnd->addr.offset.imm);
2878 if (opnd->shifter.operator_present)
2880 assert (opnd->shifter.kind == AARCH64_MOD_MUL_VL);
2881 snprintf (buf, size, "[%s, #%d, mul vl]",
2882 base, opnd->addr.offset.imm);
2884 else if (opnd->addr.offset.imm)
2885 snprintf (buf, size, "[%s, #%d]", base, opnd->addr.offset.imm);
2887 snprintf (buf, size, "[%s]", base);
2891 /* Produce the string representation of the register offset address operand
2892 *OPND in the buffer pointed by BUF of size SIZE. BASE and OFFSET are
2893 the names of the base and offset registers. */
2895 print_register_offset_address (char *buf, size_t size,
2896 const aarch64_opnd_info *opnd,
2897 const char *base, const char *offset)
2899 char tb[16]; /* Temporary buffer. */
2900 bfd_boolean print_extend_p = TRUE;
2901 bfd_boolean print_amount_p = TRUE;
2902 const char *shift_name = aarch64_operand_modifiers[opnd->shifter.kind].name;
2904 if (!opnd->shifter.amount && (opnd->qualifier != AARCH64_OPND_QLF_S_B
2905 || !opnd->shifter.amount_present))
2907 /* Not print the shift/extend amount when the amount is zero and
2908 when it is not the special case of 8-bit load/store instruction. */
2909 print_amount_p = FALSE;
2910 /* Likewise, no need to print the shift operator LSL in such a
2912 if (opnd->shifter.kind == AARCH64_MOD_LSL)
2913 print_extend_p = FALSE;
2916 /* Prepare for the extend/shift. */
2920 snprintf (tb, sizeof (tb), ", %s #%" PRIi64, shift_name,
2921 opnd->shifter.amount);
2923 snprintf (tb, sizeof (tb), ", %s", shift_name);
2928 snprintf (buf, size, "[%s, %s%s]", base, offset, tb);
2931 /* Generate the string representation of the operand OPNDS[IDX] for OPCODE
2932 in *BUF. The caller should pass in the maximum size of *BUF in SIZE.
2933 PC, PCREL_P and ADDRESS are used to pass in and return information about
2934 the PC-relative address calculation, where the PC value is passed in
2935 PC. If the operand is pc-relative related, *PCREL_P (if PCREL_P non-NULL)
2936 will return 1 and *ADDRESS (if ADDRESS non-NULL) will return the
2937 calculated address; otherwise, *PCREL_P (if PCREL_P non-NULL) returns 0.
2939 The function serves both the disassembler and the assembler diagnostics
2940 issuer, which is the reason why it lives in this file. */
2943 aarch64_print_operand (char *buf, size_t size, bfd_vma pc,
2944 const aarch64_opcode *opcode,
2945 const aarch64_opnd_info *opnds, int idx, int *pcrel_p,
2948 unsigned int i, num_conds;
2949 const char *name = NULL;
2950 const aarch64_opnd_info *opnd = opnds + idx;
2951 enum aarch64_modifier_kind kind;
2952 uint64_t addr, enum_value;
2960 case AARCH64_OPND_Rd:
2961 case AARCH64_OPND_Rn:
2962 case AARCH64_OPND_Rm:
2963 case AARCH64_OPND_Rt:
2964 case AARCH64_OPND_Rt2:
2965 case AARCH64_OPND_Rs:
2966 case AARCH64_OPND_Ra:
2967 case AARCH64_OPND_Rt_SYS:
2968 case AARCH64_OPND_PAIRREG:
2969 case AARCH64_OPND_SVE_Rm:
2970 /* The optional-ness of <Xt> in e.g. IC <ic_op>{, <Xt>} is determined by
2971 the <ic_op>, therefore we we use opnd->present to override the
2972 generic optional-ness information. */
2973 if (opnd->type == AARCH64_OPND_Rt_SYS && !opnd->present)
2975 /* Omit the operand, e.g. RET. */
2976 if (optional_operand_p (opcode, idx)
2977 && opnd->reg.regno == get_optional_operand_default_value (opcode))
2979 assert (opnd->qualifier == AARCH64_OPND_QLF_W
2980 || opnd->qualifier == AARCH64_OPND_QLF_X);
2981 snprintf (buf, size, "%s",
2982 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0));
2985 case AARCH64_OPND_Rd_SP:
2986 case AARCH64_OPND_Rn_SP:
2987 case AARCH64_OPND_SVE_Rn_SP:
2988 assert (opnd->qualifier == AARCH64_OPND_QLF_W
2989 || opnd->qualifier == AARCH64_OPND_QLF_WSP
2990 || opnd->qualifier == AARCH64_OPND_QLF_X
2991 || opnd->qualifier == AARCH64_OPND_QLF_SP);
2992 snprintf (buf, size, "%s",
2993 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 1));
2996 case AARCH64_OPND_Rm_EXT:
2997 kind = opnd->shifter.kind;
2998 assert (idx == 1 || idx == 2);
2999 if ((aarch64_stack_pointer_p (opnds)
3000 || (idx == 2 && aarch64_stack_pointer_p (opnds + 1)))
3001 && ((opnd->qualifier == AARCH64_OPND_QLF_W
3002 && opnds[0].qualifier == AARCH64_OPND_QLF_W
3003 && kind == AARCH64_MOD_UXTW)
3004 || (opnd->qualifier == AARCH64_OPND_QLF_X
3005 && kind == AARCH64_MOD_UXTX)))
3007 /* 'LSL' is the preferred form in this case. */
3008 kind = AARCH64_MOD_LSL;
3009 if (opnd->shifter.amount == 0)
3011 /* Shifter omitted. */
3012 snprintf (buf, size, "%s",
3013 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0));
3017 if (opnd->shifter.amount)
3018 snprintf (buf, size, "%s, %s #%" PRIi64,
3019 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0),
3020 aarch64_operand_modifiers[kind].name,
3021 opnd->shifter.amount);
3023 snprintf (buf, size, "%s, %s",
3024 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0),
3025 aarch64_operand_modifiers[kind].name);
3028 case AARCH64_OPND_Rm_SFT:
3029 assert (opnd->qualifier == AARCH64_OPND_QLF_W
3030 || opnd->qualifier == AARCH64_OPND_QLF_X);
3031 if (opnd->shifter.amount == 0 && opnd->shifter.kind == AARCH64_MOD_LSL)
3032 snprintf (buf, size, "%s",
3033 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0));
3035 snprintf (buf, size, "%s, %s #%" PRIi64,
3036 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0),
3037 aarch64_operand_modifiers[opnd->shifter.kind].name,
3038 opnd->shifter.amount);
3041 case AARCH64_OPND_Fd:
3042 case AARCH64_OPND_Fn:
3043 case AARCH64_OPND_Fm:
3044 case AARCH64_OPND_Fa:
3045 case AARCH64_OPND_Ft:
3046 case AARCH64_OPND_Ft2:
3047 case AARCH64_OPND_Sd:
3048 case AARCH64_OPND_Sn:
3049 case AARCH64_OPND_Sm:
3050 case AARCH64_OPND_SVE_VZn:
3051 case AARCH64_OPND_SVE_Vd:
3052 case AARCH64_OPND_SVE_Vm:
3053 case AARCH64_OPND_SVE_Vn:
3054 snprintf (buf, size, "%s%d", aarch64_get_qualifier_name (opnd->qualifier),
3058 case AARCH64_OPND_Vd:
3059 case AARCH64_OPND_Vn:
3060 case AARCH64_OPND_Vm:
3061 snprintf (buf, size, "v%d.%s", opnd->reg.regno,
3062 aarch64_get_qualifier_name (opnd->qualifier));
3065 case AARCH64_OPND_Ed:
3066 case AARCH64_OPND_En:
3067 case AARCH64_OPND_Em:
3068 snprintf (buf, size, "v%d.%s[%" PRIi64 "]", opnd->reglane.regno,
3069 aarch64_get_qualifier_name (opnd->qualifier),
3070 opnd->reglane.index);
3073 case AARCH64_OPND_VdD1:
3074 case AARCH64_OPND_VnD1:
3075 snprintf (buf, size, "v%d.d[1]", opnd->reg.regno);
3078 case AARCH64_OPND_LVn:
3079 case AARCH64_OPND_LVt:
3080 case AARCH64_OPND_LVt_AL:
3081 case AARCH64_OPND_LEt:
3082 print_register_list (buf, size, opnd, "v");
3085 case AARCH64_OPND_SVE_Pd:
3086 case AARCH64_OPND_SVE_Pg3:
3087 case AARCH64_OPND_SVE_Pg4_5:
3088 case AARCH64_OPND_SVE_Pg4_10:
3089 case AARCH64_OPND_SVE_Pg4_16:
3090 case AARCH64_OPND_SVE_Pm:
3091 case AARCH64_OPND_SVE_Pn:
3092 case AARCH64_OPND_SVE_Pt:
3093 if (opnd->qualifier == AARCH64_OPND_QLF_NIL)
3094 snprintf (buf, size, "p%d", opnd->reg.regno);
3095 else if (opnd->qualifier == AARCH64_OPND_QLF_P_Z
3096 || opnd->qualifier == AARCH64_OPND_QLF_P_M)
3097 snprintf (buf, size, "p%d/%s", opnd->reg.regno,
3098 aarch64_get_qualifier_name (opnd->qualifier));
3100 snprintf (buf, size, "p%d.%s", opnd->reg.regno,
3101 aarch64_get_qualifier_name (opnd->qualifier));
3104 case AARCH64_OPND_SVE_Za_5:
3105 case AARCH64_OPND_SVE_Za_16:
3106 case AARCH64_OPND_SVE_Zd:
3107 case AARCH64_OPND_SVE_Zm_5:
3108 case AARCH64_OPND_SVE_Zm_16:
3109 case AARCH64_OPND_SVE_Zn:
3110 case AARCH64_OPND_SVE_Zt:
3111 if (opnd->qualifier == AARCH64_OPND_QLF_NIL)
3112 snprintf (buf, size, "z%d", opnd->reg.regno);
3114 snprintf (buf, size, "z%d.%s", opnd->reg.regno,
3115 aarch64_get_qualifier_name (opnd->qualifier));
3118 case AARCH64_OPND_SVE_ZnxN:
3119 case AARCH64_OPND_SVE_ZtxN:
3120 print_register_list (buf, size, opnd, "z");
3123 case AARCH64_OPND_SVE_Zn_INDEX:
3124 snprintf (buf, size, "z%d.%s[%" PRIi64 "]", opnd->reglane.regno,
3125 aarch64_get_qualifier_name (opnd->qualifier),
3126 opnd->reglane.index);
3129 case AARCH64_OPND_Cn:
3130 case AARCH64_OPND_Cm:
3131 snprintf (buf, size, "C%d", opnd->reg.regno);
3134 case AARCH64_OPND_IDX:
3135 case AARCH64_OPND_IMM:
3136 case AARCH64_OPND_WIDTH:
3137 case AARCH64_OPND_UIMM3_OP1:
3138 case AARCH64_OPND_UIMM3_OP2:
3139 case AARCH64_OPND_BIT_NUM:
3140 case AARCH64_OPND_IMM_VLSL:
3141 case AARCH64_OPND_IMM_VLSR:
3142 case AARCH64_OPND_SHLL_IMM:
3143 case AARCH64_OPND_IMM0:
3144 case AARCH64_OPND_IMMR:
3145 case AARCH64_OPND_IMMS:
3146 case AARCH64_OPND_FBITS:
3147 case AARCH64_OPND_SIMM5:
3148 case AARCH64_OPND_SVE_SHLIMM_PRED:
3149 case AARCH64_OPND_SVE_SHLIMM_UNPRED:
3150 case AARCH64_OPND_SVE_SHRIMM_PRED:
3151 case AARCH64_OPND_SVE_SHRIMM_UNPRED:
3152 case AARCH64_OPND_SVE_SIMM5:
3153 case AARCH64_OPND_SVE_SIMM5B:
3154 case AARCH64_OPND_SVE_SIMM6:
3155 case AARCH64_OPND_SVE_SIMM8:
3156 case AARCH64_OPND_SVE_UIMM3:
3157 case AARCH64_OPND_SVE_UIMM7:
3158 case AARCH64_OPND_SVE_UIMM8:
3159 case AARCH64_OPND_SVE_UIMM8_53:
3160 snprintf (buf, size, "#%" PRIi64, opnd->imm.value);
3163 case AARCH64_OPND_SVE_I1_HALF_ONE:
3164 case AARCH64_OPND_SVE_I1_HALF_TWO:
3165 case AARCH64_OPND_SVE_I1_ZERO_ONE:
3168 c.i = opnd->imm.value;
3169 snprintf (buf, size, "#%.1f", c.f);
3173 case AARCH64_OPND_SVE_PATTERN:
3174 if (optional_operand_p (opcode, idx)
3175 && opnd->imm.value == get_optional_operand_default_value (opcode))
3177 enum_value = opnd->imm.value;
3178 assert (enum_value < ARRAY_SIZE (aarch64_sve_pattern_array));
3179 if (aarch64_sve_pattern_array[enum_value])
3180 snprintf (buf, size, "%s", aarch64_sve_pattern_array[enum_value]);
3182 snprintf (buf, size, "#%" PRIi64, opnd->imm.value);
3185 case AARCH64_OPND_SVE_PATTERN_SCALED:
3186 if (optional_operand_p (opcode, idx)
3187 && !opnd->shifter.operator_present
3188 && opnd->imm.value == get_optional_operand_default_value (opcode))
3190 enum_value = opnd->imm.value;
3191 assert (enum_value < ARRAY_SIZE (aarch64_sve_pattern_array));
3192 if (aarch64_sve_pattern_array[opnd->imm.value])
3193 snprintf (buf, size, "%s", aarch64_sve_pattern_array[opnd->imm.value]);
3195 snprintf (buf, size, "#%" PRIi64, opnd->imm.value);
3196 if (opnd->shifter.operator_present)
3198 size_t len = strlen (buf);
3199 snprintf (buf + len, size - len, ", %s #%" PRIi64,
3200 aarch64_operand_modifiers[opnd->shifter.kind].name,
3201 opnd->shifter.amount);
3205 case AARCH64_OPND_SVE_PRFOP:
3206 enum_value = opnd->imm.value;
3207 assert (enum_value < ARRAY_SIZE (aarch64_sve_prfop_array));
3208 if (aarch64_sve_prfop_array[enum_value])
3209 snprintf (buf, size, "%s", aarch64_sve_prfop_array[enum_value]);
3211 snprintf (buf, size, "#%" PRIi64, opnd->imm.value);
3214 case AARCH64_OPND_IMM_MOV:
3215 switch (aarch64_get_qualifier_esize (opnds[0].qualifier))
3217 case 4: /* e.g. MOV Wd, #<imm32>. */
3219 int imm32 = opnd->imm.value;
3220 snprintf (buf, size, "#0x%-20x\t// #%d", imm32, imm32);
3223 case 8: /* e.g. MOV Xd, #<imm64>. */
3224 snprintf (buf, size, "#0x%-20" PRIx64 "\t// #%" PRIi64,
3225 opnd->imm.value, opnd->imm.value);
3227 default: assert (0);
3231 case AARCH64_OPND_FPIMM0:
3232 snprintf (buf, size, "#0.0");
3235 case AARCH64_OPND_LIMM:
3236 case AARCH64_OPND_AIMM:
3237 case AARCH64_OPND_HALF:
3238 case AARCH64_OPND_SVE_INV_LIMM:
3239 case AARCH64_OPND_SVE_LIMM:
3240 case AARCH64_OPND_SVE_LIMM_MOV:
3241 if (opnd->shifter.amount)
3242 snprintf (buf, size, "#0x%" PRIx64 ", lsl #%" PRIi64, opnd->imm.value,
3243 opnd->shifter.amount);
3245 snprintf (buf, size, "#0x%" PRIx64, opnd->imm.value);
3248 case AARCH64_OPND_SIMD_IMM:
3249 case AARCH64_OPND_SIMD_IMM_SFT:
3250 if ((! opnd->shifter.amount && opnd->shifter.kind == AARCH64_MOD_LSL)
3251 || opnd->shifter.kind == AARCH64_MOD_NONE)
3252 snprintf (buf, size, "#0x%" PRIx64, opnd->imm.value);
3254 snprintf (buf, size, "#0x%" PRIx64 ", %s #%" PRIi64, opnd->imm.value,
3255 aarch64_operand_modifiers[opnd->shifter.kind].name,
3256 opnd->shifter.amount);
3259 case AARCH64_OPND_SVE_AIMM:
3260 case AARCH64_OPND_SVE_ASIMM:
3261 if (opnd->shifter.amount)
3262 snprintf (buf, size, "#%" PRIi64 ", lsl #%" PRIi64, opnd->imm.value,
3263 opnd->shifter.amount);
3265 snprintf (buf, size, "#%" PRIi64, opnd->imm.value);
3268 case AARCH64_OPND_FPIMM:
3269 case AARCH64_OPND_SIMD_FPIMM:
3270 case AARCH64_OPND_SVE_FPIMM8:
3271 switch (aarch64_get_qualifier_esize (opnds[0].qualifier))
3273 case 2: /* e.g. FMOV <Hd>, #<imm>. */
3276 c.i = expand_fp_imm (2, opnd->imm.value);
3277 snprintf (buf, size, "#%.18e", c.f);
3280 case 4: /* e.g. FMOV <Vd>.4S, #<imm>. */
3283 c.i = expand_fp_imm (4, opnd->imm.value);
3284 snprintf (buf, size, "#%.18e", c.f);
3287 case 8: /* e.g. FMOV <Sd>, #<imm>. */
3290 c.i = expand_fp_imm (8, opnd->imm.value);
3291 snprintf (buf, size, "#%.18e", c.d);
3294 default: assert (0);
3298 case AARCH64_OPND_CCMP_IMM:
3299 case AARCH64_OPND_NZCV:
3300 case AARCH64_OPND_EXCEPTION:
3301 case AARCH64_OPND_UIMM4:
3302 case AARCH64_OPND_UIMM7:
3303 if (optional_operand_p (opcode, idx) == TRUE
3304 && (opnd->imm.value ==
3305 (int64_t) get_optional_operand_default_value (opcode)))
3306 /* Omit the operand, e.g. DCPS1. */
3308 snprintf (buf, size, "#0x%x", (unsigned int)opnd->imm.value);
3311 case AARCH64_OPND_COND:
3312 case AARCH64_OPND_COND1:
3313 snprintf (buf, size, "%s", opnd->cond->names[0]);
3314 num_conds = ARRAY_SIZE (opnd->cond->names);
3315 for (i = 1; i < num_conds && opnd->cond->names[i]; ++i)
3317 size_t len = strlen (buf);
3319 snprintf (buf + len, size - len, " // %s = %s",
3320 opnd->cond->names[0], opnd->cond->names[i]);
3322 snprintf (buf + len, size - len, ", %s",
3323 opnd->cond->names[i]);
3327 case AARCH64_OPND_ADDR_ADRP:
3328 addr = ((pc + AARCH64_PCREL_OFFSET) & ~(uint64_t)0xfff)
3334 /* This is not necessary during the disassembling, as print_address_func
3335 in the disassemble_info will take care of the printing. But some
3336 other callers may be still interested in getting the string in *STR,
3337 so here we do snprintf regardless. */
3338 snprintf (buf, size, "#0x%" PRIx64, addr);
3341 case AARCH64_OPND_ADDR_PCREL14:
3342 case AARCH64_OPND_ADDR_PCREL19:
3343 case AARCH64_OPND_ADDR_PCREL21:
3344 case AARCH64_OPND_ADDR_PCREL26:
3345 addr = pc + AARCH64_PCREL_OFFSET + opnd->imm.value;
3350 /* This is not necessary during the disassembling, as print_address_func
3351 in the disassemble_info will take care of the printing. But some
3352 other callers may be still interested in getting the string in *STR,
3353 so here we do snprintf regardless. */
3354 snprintf (buf, size, "#0x%" PRIx64, addr);
3357 case AARCH64_OPND_ADDR_SIMPLE:
3358 case AARCH64_OPND_SIMD_ADDR_SIMPLE:
3359 case AARCH64_OPND_SIMD_ADDR_POST:
3360 name = get_64bit_int_reg_name (opnd->addr.base_regno, 1);
3361 if (opnd->type == AARCH64_OPND_SIMD_ADDR_POST)
3363 if (opnd->addr.offset.is_reg)
3364 snprintf (buf, size, "[%s], x%d", name, opnd->addr.offset.regno);
3366 snprintf (buf, size, "[%s], #%d", name, opnd->addr.offset.imm);
3369 snprintf (buf, size, "[%s]", name);
3372 case AARCH64_OPND_ADDR_REGOFF:
3373 case AARCH64_OPND_SVE_ADDR_RR:
3374 case AARCH64_OPND_SVE_ADDR_RR_LSL1:
3375 case AARCH64_OPND_SVE_ADDR_RR_LSL2:
3376 case AARCH64_OPND_SVE_ADDR_RR_LSL3:
3377 case AARCH64_OPND_SVE_ADDR_RX:
3378 case AARCH64_OPND_SVE_ADDR_RX_LSL1:
3379 case AARCH64_OPND_SVE_ADDR_RX_LSL2:
3380 case AARCH64_OPND_SVE_ADDR_RX_LSL3:
3381 print_register_offset_address
3382 (buf, size, opnd, get_64bit_int_reg_name (opnd->addr.base_regno, 1),
3383 get_offset_int_reg_name (opnd));
3386 case AARCH64_OPND_SVE_ADDR_RZ:
3387 case AARCH64_OPND_SVE_ADDR_RZ_LSL1:
3388 case AARCH64_OPND_SVE_ADDR_RZ_LSL2:
3389 case AARCH64_OPND_SVE_ADDR_RZ_LSL3:
3390 case AARCH64_OPND_SVE_ADDR_RZ_XTW_14:
3391 case AARCH64_OPND_SVE_ADDR_RZ_XTW_22:
3392 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_14:
3393 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_22:
3394 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_14:
3395 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_22:
3396 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_14:
3397 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_22:
3398 print_register_offset_address
3399 (buf, size, opnd, get_64bit_int_reg_name (opnd->addr.base_regno, 1),
3400 get_addr_sve_reg_name (opnd->addr.offset.regno, opnd->qualifier));
3403 case AARCH64_OPND_ADDR_SIMM7:
3404 case AARCH64_OPND_ADDR_SIMM9:
3405 case AARCH64_OPND_ADDR_SIMM9_2:
3406 case AARCH64_OPND_SVE_ADDR_RI_S4xVL:
3407 case AARCH64_OPND_SVE_ADDR_RI_S4x2xVL:
3408 case AARCH64_OPND_SVE_ADDR_RI_S4x3xVL:
3409 case AARCH64_OPND_SVE_ADDR_RI_S4x4xVL:
3410 case AARCH64_OPND_SVE_ADDR_RI_S6xVL:
3411 case AARCH64_OPND_SVE_ADDR_RI_S9xVL:
3412 case AARCH64_OPND_SVE_ADDR_RI_U6:
3413 case AARCH64_OPND_SVE_ADDR_RI_U6x2:
3414 case AARCH64_OPND_SVE_ADDR_RI_U6x4:
3415 case AARCH64_OPND_SVE_ADDR_RI_U6x8:
3416 print_immediate_offset_address
3417 (buf, size, opnd, get_64bit_int_reg_name (opnd->addr.base_regno, 1));
3420 case AARCH64_OPND_SVE_ADDR_ZI_U5:
3421 case AARCH64_OPND_SVE_ADDR_ZI_U5x2:
3422 case AARCH64_OPND_SVE_ADDR_ZI_U5x4:
3423 case AARCH64_OPND_SVE_ADDR_ZI_U5x8:
3424 print_immediate_offset_address
3426 get_addr_sve_reg_name (opnd->addr.base_regno, opnd->qualifier));
3429 case AARCH64_OPND_SVE_ADDR_ZZ_LSL:
3430 case AARCH64_OPND_SVE_ADDR_ZZ_SXTW:
3431 case AARCH64_OPND_SVE_ADDR_ZZ_UXTW:
3432 print_register_offset_address
3434 get_addr_sve_reg_name (opnd->addr.base_regno, opnd->qualifier),
3435 get_addr_sve_reg_name (opnd->addr.offset.regno, opnd->qualifier));
3438 case AARCH64_OPND_ADDR_UIMM12:
3439 name = get_64bit_int_reg_name (opnd->addr.base_regno, 1);
3440 if (opnd->addr.offset.imm)
3441 snprintf (buf, size, "[%s, #%d]", name, opnd->addr.offset.imm);
3443 snprintf (buf, size, "[%s]", name);
3446 case AARCH64_OPND_SYSREG:
3447 for (i = 0; aarch64_sys_regs[i].name; ++i)
3448 if (aarch64_sys_regs[i].value == opnd->sysreg
3449 && ! aarch64_sys_reg_deprecated_p (&aarch64_sys_regs[i]))
3451 if (aarch64_sys_regs[i].name)
3452 snprintf (buf, size, "%s", aarch64_sys_regs[i].name);
3455 /* Implementation defined system register. */
3456 unsigned int value = opnd->sysreg;
3457 snprintf (buf, size, "s%u_%u_c%u_c%u_%u", (value >> 14) & 0x3,
3458 (value >> 11) & 0x7, (value >> 7) & 0xf, (value >> 3) & 0xf,
3463 case AARCH64_OPND_PSTATEFIELD:
3464 for (i = 0; aarch64_pstatefields[i].name; ++i)
3465 if (aarch64_pstatefields[i].value == opnd->pstatefield)
3467 assert (aarch64_pstatefields[i].name);
3468 snprintf (buf, size, "%s", aarch64_pstatefields[i].name);
3471 case AARCH64_OPND_SYSREG_AT:
3472 case AARCH64_OPND_SYSREG_DC:
3473 case AARCH64_OPND_SYSREG_IC:
3474 case AARCH64_OPND_SYSREG_TLBI:
3475 snprintf (buf, size, "%s", opnd->sysins_op->name);
3478 case AARCH64_OPND_BARRIER:
3479 snprintf (buf, size, "%s", opnd->barrier->name);
3482 case AARCH64_OPND_BARRIER_ISB:
3483 /* Operand can be omitted, e.g. in DCPS1. */
3484 if (! optional_operand_p (opcode, idx)
3485 || (opnd->barrier->value
3486 != get_optional_operand_default_value (opcode)))
3487 snprintf (buf, size, "#0x%x", opnd->barrier->value);
3490 case AARCH64_OPND_PRFOP:
3491 if (opnd->prfop->name != NULL)
3492 snprintf (buf, size, "%s", opnd->prfop->name);
3494 snprintf (buf, size, "#0x%02x", opnd->prfop->value);
3497 case AARCH64_OPND_BARRIER_PSB:
3498 snprintf (buf, size, "%s", opnd->hint_option->name);
3506 #define CPENC(op0,op1,crn,crm,op2) \
3507 ((((op0) << 19) | ((op1) << 16) | ((crn) << 12) | ((crm) << 8) | ((op2) << 5)) >> 5)
3508 /* for 3.9.3 Instructions for Accessing Special Purpose Registers */
3509 #define CPEN_(op1,crm,op2) CPENC(3,(op1),4,(crm),(op2))
3510 /* for 3.9.10 System Instructions */
3511 #define CPENS(op1,crn,crm,op2) CPENC(1,(op1),(crn),(crm),(op2))
3533 #define F_DEPRECATED 0x1 /* Deprecated system register. */
3538 #define F_ARCHEXT 0x2 /* Architecture dependent system register. */
3543 #define F_HASXT 0x4 /* System instruction register <Xt>
3547 /* TODO there are two more issues need to be resolved
3548 1. handle read-only and write-only system registers
3549 2. handle cpu-implementation-defined system registers. */
3550 const aarch64_sys_reg aarch64_sys_regs [] =
3552 { "spsr_el1", CPEN_(0,C0,0), 0 }, /* = spsr_svc */
3553 { "spsr_el12", CPEN_ (5, C0, 0), F_ARCHEXT },
3554 { "elr_el1", CPEN_(0,C0,1), 0 },
3555 { "elr_el12", CPEN_ (5, C0, 1), F_ARCHEXT },
3556 { "sp_el0", CPEN_(0,C1,0), 0 },
3557 { "spsel", CPEN_(0,C2,0), 0 },
3558 { "daif", CPEN_(3,C2,1), 0 },
3559 { "currentel", CPEN_(0,C2,2), 0 }, /* RO */
3560 { "pan", CPEN_(0,C2,3), F_ARCHEXT },
3561 { "uao", CPEN_ (0, C2, 4), F_ARCHEXT },
3562 { "nzcv", CPEN_(3,C2,0), 0 },
3563 { "fpcr", CPEN_(3,C4,0), 0 },
3564 { "fpsr", CPEN_(3,C4,1), 0 },
3565 { "dspsr_el0", CPEN_(3,C5,0), 0 },
3566 { "dlr_el0", CPEN_(3,C5,1), 0 },
3567 { "spsr_el2", CPEN_(4,C0,0), 0 }, /* = spsr_hyp */
3568 { "elr_el2", CPEN_(4,C0,1), 0 },
3569 { "sp_el1", CPEN_(4,C1,0), 0 },
3570 { "spsr_irq", CPEN_(4,C3,0), 0 },
3571 { "spsr_abt", CPEN_(4,C3,1), 0 },
3572 { "spsr_und", CPEN_(4,C3,2), 0 },
3573 { "spsr_fiq", CPEN_(4,C3,3), 0 },
3574 { "spsr_el3", CPEN_(6,C0,0), 0 },
3575 { "elr_el3", CPEN_(6,C0,1), 0 },
3576 { "sp_el2", CPEN_(6,C1,0), 0 },
3577 { "spsr_svc", CPEN_(0,C0,0), F_DEPRECATED }, /* = spsr_el1 */
3578 { "spsr_hyp", CPEN_(4,C0,0), F_DEPRECATED }, /* = spsr_el2 */
3579 { "midr_el1", CPENC(3,0,C0,C0,0), 0 }, /* RO */
3580 { "ctr_el0", CPENC(3,3,C0,C0,1), 0 }, /* RO */
3581 { "mpidr_el1", CPENC(3,0,C0,C0,5), 0 }, /* RO */
3582 { "revidr_el1", CPENC(3,0,C0,C0,6), 0 }, /* RO */
3583 { "aidr_el1", CPENC(3,1,C0,C0,7), 0 }, /* RO */
3584 { "dczid_el0", CPENC(3,3,C0,C0,7), 0 }, /* RO */
3585 { "id_dfr0_el1", CPENC(3,0,C0,C1,2), 0 }, /* RO */
3586 { "id_pfr0_el1", CPENC(3,0,C0,C1,0), 0 }, /* RO */
3587 { "id_pfr1_el1", CPENC(3,0,C0,C1,1), 0 }, /* RO */
3588 { "id_afr0_el1", CPENC(3,0,C0,C1,3), 0 }, /* RO */
3589 { "id_mmfr0_el1", CPENC(3,0,C0,C1,4), 0 }, /* RO */
3590 { "id_mmfr1_el1", CPENC(3,0,C0,C1,5), 0 }, /* RO */
3591 { "id_mmfr2_el1", CPENC(3,0,C0,C1,6), 0 }, /* RO */
3592 { "id_mmfr3_el1", CPENC(3,0,C0,C1,7), 0 }, /* RO */
3593 { "id_mmfr4_el1", CPENC(3,0,C0,C2,6), 0 }, /* RO */
3594 { "id_isar0_el1", CPENC(3,0,C0,C2,0), 0 }, /* RO */
3595 { "id_isar1_el1", CPENC(3,0,C0,C2,1), 0 }, /* RO */
3596 { "id_isar2_el1", CPENC(3,0,C0,C2,2), 0 }, /* RO */
3597 { "id_isar3_el1", CPENC(3,0,C0,C2,3), 0 }, /* RO */
3598 { "id_isar4_el1", CPENC(3,0,C0,C2,4), 0 }, /* RO */
3599 { "id_isar5_el1", CPENC(3,0,C0,C2,5), 0 }, /* RO */
3600 { "mvfr0_el1", CPENC(3,0,C0,C3,0), 0 }, /* RO */
3601 { "mvfr1_el1", CPENC(3,0,C0,C3,1), 0 }, /* RO */
3602 { "mvfr2_el1", CPENC(3,0,C0,C3,2), 0 }, /* RO */
3603 { "ccsidr_el1", CPENC(3,1,C0,C0,0), 0 }, /* RO */
3604 { "id_aa64pfr0_el1", CPENC(3,0,C0,C4,0), 0 }, /* RO */
3605 { "id_aa64pfr1_el1", CPENC(3,0,C0,C4,1), 0 }, /* RO */
3606 { "id_aa64dfr0_el1", CPENC(3,0,C0,C5,0), 0 }, /* RO */
3607 { "id_aa64dfr1_el1", CPENC(3,0,C0,C5,1), 0 }, /* RO */
3608 { "id_aa64isar0_el1", CPENC(3,0,C0,C6,0), 0 }, /* RO */
3609 { "id_aa64isar1_el1", CPENC(3,0,C0,C6,1), 0 }, /* RO */
3610 { "id_aa64mmfr0_el1", CPENC(3,0,C0,C7,0), 0 }, /* RO */
3611 { "id_aa64mmfr1_el1", CPENC(3,0,C0,C7,1), 0 }, /* RO */
3612 { "id_aa64mmfr2_el1", CPENC (3, 0, C0, C7, 2), F_ARCHEXT }, /* RO */
3613 { "id_aa64afr0_el1", CPENC(3,0,C0,C5,4), 0 }, /* RO */
3614 { "id_aa64afr1_el1", CPENC(3,0,C0,C5,5), 0 }, /* RO */
3615 { "clidr_el1", CPENC(3,1,C0,C0,1), 0 }, /* RO */
3616 { "csselr_el1", CPENC(3,2,C0,C0,0), 0 }, /* RO */
3617 { "vpidr_el2", CPENC(3,4,C0,C0,0), 0 },
3618 { "vmpidr_el2", CPENC(3,4,C0,C0,5), 0 },
3619 { "sctlr_el1", CPENC(3,0,C1,C0,0), 0 },
3620 { "sctlr_el2", CPENC(3,4,C1,C0,0), 0 },
3621 { "sctlr_el3", CPENC(3,6,C1,C0,0), 0 },
3622 { "sctlr_el12", CPENC (3, 5, C1, C0, 0), F_ARCHEXT },
3623 { "actlr_el1", CPENC(3,0,C1,C0,1), 0 },
3624 { "actlr_el2", CPENC(3,4,C1,C0,1), 0 },
3625 { "actlr_el3", CPENC(3,6,C1,C0,1), 0 },
3626 { "cpacr_el1", CPENC(3,0,C1,C0,2), 0 },
3627 { "cpacr_el12", CPENC (3, 5, C1, C0, 2), F_ARCHEXT },
3628 { "cptr_el2", CPENC(3,4,C1,C1,2), 0 },
3629 { "cptr_el3", CPENC(3,6,C1,C1,2), 0 },
3630 { "scr_el3", CPENC(3,6,C1,C1,0), 0 },
3631 { "hcr_el2", CPENC(3,4,C1,C1,0), 0 },
3632 { "mdcr_el2", CPENC(3,4,C1,C1,1), 0 },
3633 { "mdcr_el3", CPENC(3,6,C1,C3,1), 0 },
3634 { "hstr_el2", CPENC(3,4,C1,C1,3), 0 },
3635 { "hacr_el2", CPENC(3,4,C1,C1,7), 0 },
3636 { "ttbr0_el1", CPENC(3,0,C2,C0,0), 0 },
3637 { "ttbr1_el1", CPENC(3,0,C2,C0,1), 0 },
3638 { "ttbr0_el2", CPENC(3,4,C2,C0,0), 0 },
3639 { "ttbr1_el2", CPENC (3, 4, C2, C0, 1), F_ARCHEXT },
3640 { "ttbr0_el3", CPENC(3,6,C2,C0,0), 0 },
3641 { "ttbr0_el12", CPENC (3, 5, C2, C0, 0), F_ARCHEXT },
3642 { "ttbr1_el12", CPENC (3, 5, C2, C0, 1), F_ARCHEXT },
3643 { "vttbr_el2", CPENC(3,4,C2,C1,0), 0 },
3644 { "tcr_el1", CPENC(3,0,C2,C0,2), 0 },
3645 { "tcr_el2", CPENC(3,4,C2,C0,2), 0 },
3646 { "tcr_el3", CPENC(3,6,C2,C0,2), 0 },
3647 { "tcr_el12", CPENC (3, 5, C2, C0, 2), F_ARCHEXT },
3648 { "vtcr_el2", CPENC(3,4,C2,C1,2), 0 },
3649 { "afsr0_el1", CPENC(3,0,C5,C1,0), 0 },
3650 { "afsr1_el1", CPENC(3,0,C5,C1,1), 0 },
3651 { "afsr0_el2", CPENC(3,4,C5,C1,0), 0 },
3652 { "afsr1_el2", CPENC(3,4,C5,C1,1), 0 },
3653 { "afsr0_el3", CPENC(3,6,C5,C1,0), 0 },
3654 { "afsr0_el12", CPENC (3, 5, C5, C1, 0), F_ARCHEXT },
3655 { "afsr1_el3", CPENC(3,6,C5,C1,1), 0 },
3656 { "afsr1_el12", CPENC (3, 5, C5, C1, 1), F_ARCHEXT },
3657 { "esr_el1", CPENC(3,0,C5,C2,0), 0 },
3658 { "esr_el2", CPENC(3,4,C5,C2,0), 0 },
3659 { "esr_el3", CPENC(3,6,C5,C2,0), 0 },
3660 { "esr_el12", CPENC (3, 5, C5, C2, 0), F_ARCHEXT },
3661 { "vsesr_el2", CPENC (3, 4, C5, C2, 3), F_ARCHEXT }, /* RO */
3662 { "fpexc32_el2", CPENC(3,4,C5,C3,0), 0 },
3663 { "erridr_el1", CPENC (3, 0, C5, C3, 0), F_ARCHEXT }, /* RO */
3664 { "errselr_el1", CPENC (3, 0, C5, C3, 1), F_ARCHEXT },
3665 { "erxfr_el1", CPENC (3, 0, C5, C4, 0), F_ARCHEXT }, /* RO */
3666 { "erxctlr_el1", CPENC (3, 0, C5, C4, 1), F_ARCHEXT },
3667 { "erxstatus_el1", CPENC (3, 0, C5, C4, 2), F_ARCHEXT },
3668 { "erxaddr_el1", CPENC (3, 0, C5, C4, 3), F_ARCHEXT },
3669 { "erxmisc0_el1", CPENC (3, 0, C5, C5, 0), F_ARCHEXT },
3670 { "erxmisc1_el1", CPENC (3, 0, C5, C5, 1), F_ARCHEXT },
3671 { "far_el1", CPENC(3,0,C6,C0,0), 0 },
3672 { "far_el2", CPENC(3,4,C6,C0,0), 0 },
3673 { "far_el3", CPENC(3,6,C6,C0,0), 0 },
3674 { "far_el12", CPENC (3, 5, C6, C0, 0), F_ARCHEXT },
3675 { "hpfar_el2", CPENC(3,4,C6,C0,4), 0 },
3676 { "par_el1", CPENC(3,0,C7,C4,0), 0 },
3677 { "mair_el1", CPENC(3,0,C10,C2,0), 0 },
3678 { "mair_el2", CPENC(3,4,C10,C2,0), 0 },
3679 { "mair_el3", CPENC(3,6,C10,C2,0), 0 },
3680 { "mair_el12", CPENC (3, 5, C10, C2, 0), F_ARCHEXT },
3681 { "amair_el1", CPENC(3,0,C10,C3,0), 0 },
3682 { "amair_el2", CPENC(3,4,C10,C3,0), 0 },
3683 { "amair_el3", CPENC(3,6,C10,C3,0), 0 },
3684 { "amair_el12", CPENC (3, 5, C10, C3, 0), F_ARCHEXT },
3685 { "vbar_el1", CPENC(3,0,C12,C0,0), 0 },
3686 { "vbar_el2", CPENC(3,4,C12,C0,0), 0 },
3687 { "vbar_el3", CPENC(3,6,C12,C0,0), 0 },
3688 { "vbar_el12", CPENC (3, 5, C12, C0, 0), F_ARCHEXT },
3689 { "rvbar_el1", CPENC(3,0,C12,C0,1), 0 }, /* RO */
3690 { "rvbar_el2", CPENC(3,4,C12,C0,1), 0 }, /* RO */
3691 { "rvbar_el3", CPENC(3,6,C12,C0,1), 0 }, /* RO */
3692 { "rmr_el1", CPENC(3,0,C12,C0,2), 0 },
3693 { "rmr_el2", CPENC(3,4,C12,C0,2), 0 },
3694 { "rmr_el3", CPENC(3,6,C12,C0,2), 0 },
3695 { "isr_el1", CPENC(3,0,C12,C1,0), 0 }, /* RO */
3696 { "disr_el1", CPENC (3, 0, C12, C1, 1), F_ARCHEXT },
3697 { "vdisr_el2", CPENC (3, 4, C12, C1, 1), F_ARCHEXT },
3698 { "contextidr_el1", CPENC(3,0,C13,C0,1), 0 },
3699 { "contextidr_el2", CPENC (3, 4, C13, C0, 1), F_ARCHEXT },
3700 { "contextidr_el12", CPENC (3, 5, C13, C0, 1), F_ARCHEXT },
3701 { "tpidr_el0", CPENC(3,3,C13,C0,2), 0 },
3702 { "tpidrro_el0", CPENC(3,3,C13,C0,3), 0 }, /* RO */
3703 { "tpidr_el1", CPENC(3,0,C13,C0,4), 0 },
3704 { "tpidr_el2", CPENC(3,4,C13,C0,2), 0 },
3705 { "tpidr_el3", CPENC(3,6,C13,C0,2), 0 },
3706 { "teecr32_el1", CPENC(2,2,C0, C0,0), 0 }, /* See section 3.9.7.1 */
3707 { "cntfrq_el0", CPENC(3,3,C14,C0,0), 0 }, /* RO */
3708 { "cntpct_el0", CPENC(3,3,C14,C0,1), 0 }, /* RO */
3709 { "cntvct_el0", CPENC(3,3,C14,C0,2), 0 }, /* RO */
3710 { "cntvoff_el2", CPENC(3,4,C14,C0,3), 0 },
3711 { "cntkctl_el1", CPENC(3,0,C14,C1,0), 0 },
3712 { "cntkctl_el12", CPENC (3, 5, C14, C1, 0), F_ARCHEXT },
3713 { "cnthctl_el2", CPENC(3,4,C14,C1,0), 0 },
3714 { "cntp_tval_el0", CPENC(3,3,C14,C2,0), 0 },
3715 { "cntp_tval_el02", CPENC (3, 5, C14, C2, 0), F_ARCHEXT },
3716 { "cntp_ctl_el0", CPENC(3,3,C14,C2,1), 0 },
3717 { "cntp_ctl_el02", CPENC (3, 5, C14, C2, 1), F_ARCHEXT },
3718 { "cntp_cval_el0", CPENC(3,3,C14,C2,2), 0 },
3719 { "cntp_cval_el02", CPENC (3, 5, C14, C2, 2), F_ARCHEXT },
3720 { "cntv_tval_el0", CPENC(3,3,C14,C3,0), 0 },
3721 { "cntv_tval_el02", CPENC (3, 5, C14, C3, 0), F_ARCHEXT },
3722 { "cntv_ctl_el0", CPENC(3,3,C14,C3,1), 0 },
3723 { "cntv_ctl_el02", CPENC (3, 5, C14, C3, 1), F_ARCHEXT },
3724 { "cntv_cval_el0", CPENC(3,3,C14,C3,2), 0 },
3725 { "cntv_cval_el02", CPENC (3, 5, C14, C3, 2), F_ARCHEXT },
3726 { "cnthp_tval_el2", CPENC(3,4,C14,C2,0), 0 },
3727 { "cnthp_ctl_el2", CPENC(3,4,C14,C2,1), 0 },
3728 { "cnthp_cval_el2", CPENC(3,4,C14,C2,2), 0 },
3729 { "cntps_tval_el1", CPENC(3,7,C14,C2,0), 0 },
3730 { "cntps_ctl_el1", CPENC(3,7,C14,C2,1), 0 },
3731 { "cntps_cval_el1", CPENC(3,7,C14,C2,2), 0 },
3732 { "cnthv_tval_el2", CPENC (3, 4, C14, C3, 0), F_ARCHEXT },
3733 { "cnthv_ctl_el2", CPENC (3, 4, C14, C3, 1), F_ARCHEXT },
3734 { "cnthv_cval_el2", CPENC (3, 4, C14, C3, 2), F_ARCHEXT },
3735 { "dacr32_el2", CPENC(3,4,C3,C0,0), 0 },
3736 { "ifsr32_el2", CPENC(3,4,C5,C0,1), 0 },
3737 { "teehbr32_el1", CPENC(2,2,C1,C0,0), 0 },
3738 { "sder32_el3", CPENC(3,6,C1,C1,1), 0 },
3739 { "mdscr_el1", CPENC(2,0,C0, C2, 2), 0 },
3740 { "mdccsr_el0", CPENC(2,3,C0, C1, 0), 0 }, /* r */
3741 { "mdccint_el1", CPENC(2,0,C0, C2, 0), 0 },
3742 { "dbgdtr_el0", CPENC(2,3,C0, C4, 0), 0 },
3743 { "dbgdtrrx_el0", CPENC(2,3,C0, C5, 0), 0 }, /* r */
3744 { "dbgdtrtx_el0", CPENC(2,3,C0, C5, 0), 0 }, /* w */
3745 { "osdtrrx_el1", CPENC(2,0,C0, C0, 2), 0 }, /* r */
3746 { "osdtrtx_el1", CPENC(2,0,C0, C3, 2), 0 }, /* w */
3747 { "oseccr_el1", CPENC(2,0,C0, C6, 2), 0 },
3748 { "dbgvcr32_el2", CPENC(2,4,C0, C7, 0), 0 },
3749 { "dbgbvr0_el1", CPENC(2,0,C0, C0, 4), 0 },
3750 { "dbgbvr1_el1", CPENC(2,0,C0, C1, 4), 0 },
3751 { "dbgbvr2_el1", CPENC(2,0,C0, C2, 4), 0 },
3752 { "dbgbvr3_el1", CPENC(2,0,C0, C3, 4), 0 },
3753 { "dbgbvr4_el1", CPENC(2,0,C0, C4, 4), 0 },
3754 { "dbgbvr5_el1", CPENC(2,0,C0, C5, 4), 0 },
3755 { "dbgbvr6_el1", CPENC(2,0,C0, C6, 4), 0 },
3756 { "dbgbvr7_el1", CPENC(2,0,C0, C7, 4), 0 },
3757 { "dbgbvr8_el1", CPENC(2,0,C0, C8, 4), 0 },
3758 { "dbgbvr9_el1", CPENC(2,0,C0, C9, 4), 0 },
3759 { "dbgbvr10_el1", CPENC(2,0,C0, C10,4), 0 },
3760 { "dbgbvr11_el1", CPENC(2,0,C0, C11,4), 0 },
3761 { "dbgbvr12_el1", CPENC(2,0,C0, C12,4), 0 },
3762 { "dbgbvr13_el1", CPENC(2,0,C0, C13,4), 0 },
3763 { "dbgbvr14_el1", CPENC(2,0,C0, C14,4), 0 },
3764 { "dbgbvr15_el1", CPENC(2,0,C0, C15,4), 0 },
3765 { "dbgbcr0_el1", CPENC(2,0,C0, C0, 5), 0 },
3766 { "dbgbcr1_el1", CPENC(2,0,C0, C1, 5), 0 },
3767 { "dbgbcr2_el1", CPENC(2,0,C0, C2, 5), 0 },
3768 { "dbgbcr3_el1", CPENC(2,0,C0, C3, 5), 0 },
3769 { "dbgbcr4_el1", CPENC(2,0,C0, C4, 5), 0 },
3770 { "dbgbcr5_el1", CPENC(2,0,C0, C5, 5), 0 },
3771 { "dbgbcr6_el1", CPENC(2,0,C0, C6, 5), 0 },
3772 { "dbgbcr7_el1", CPENC(2,0,C0, C7, 5), 0 },
3773 { "dbgbcr8_el1", CPENC(2,0,C0, C8, 5), 0 },
3774 { "dbgbcr9_el1", CPENC(2,0,C0, C9, 5), 0 },
3775 { "dbgbcr10_el1", CPENC(2,0,C0, C10,5), 0 },
3776 { "dbgbcr11_el1", CPENC(2,0,C0, C11,5), 0 },
3777 { "dbgbcr12_el1", CPENC(2,0,C0, C12,5), 0 },
3778 { "dbgbcr13_el1", CPENC(2,0,C0, C13,5), 0 },
3779 { "dbgbcr14_el1", CPENC(2,0,C0, C14,5), 0 },
3780 { "dbgbcr15_el1", CPENC(2,0,C0, C15,5), 0 },
3781 { "dbgwvr0_el1", CPENC(2,0,C0, C0, 6), 0 },
3782 { "dbgwvr1_el1", CPENC(2,0,C0, C1, 6), 0 },
3783 { "dbgwvr2_el1", CPENC(2,0,C0, C2, 6), 0 },
3784 { "dbgwvr3_el1", CPENC(2,0,C0, C3, 6), 0 },
3785 { "dbgwvr4_el1", CPENC(2,0,C0, C4, 6), 0 },
3786 { "dbgwvr5_el1", CPENC(2,0,C0, C5, 6), 0 },
3787 { "dbgwvr6_el1", CPENC(2,0,C0, C6, 6), 0 },
3788 { "dbgwvr7_el1", CPENC(2,0,C0, C7, 6), 0 },
3789 { "dbgwvr8_el1", CPENC(2,0,C0, C8, 6), 0 },
3790 { "dbgwvr9_el1", CPENC(2,0,C0, C9, 6), 0 },
3791 { "dbgwvr10_el1", CPENC(2,0,C0, C10,6), 0 },
3792 { "dbgwvr11_el1", CPENC(2,0,C0, C11,6), 0 },
3793 { "dbgwvr12_el1", CPENC(2,0,C0, C12,6), 0 },
3794 { "dbgwvr13_el1", CPENC(2,0,C0, C13,6), 0 },
3795 { "dbgwvr14_el1", CPENC(2,0,C0, C14,6), 0 },
3796 { "dbgwvr15_el1", CPENC(2,0,C0, C15,6), 0 },
3797 { "dbgwcr0_el1", CPENC(2,0,C0, C0, 7), 0 },
3798 { "dbgwcr1_el1", CPENC(2,0,C0, C1, 7), 0 },
3799 { "dbgwcr2_el1", CPENC(2,0,C0, C2, 7), 0 },
3800 { "dbgwcr3_el1", CPENC(2,0,C0, C3, 7), 0 },
3801 { "dbgwcr4_el1", CPENC(2,0,C0, C4, 7), 0 },
3802 { "dbgwcr5_el1", CPENC(2,0,C0, C5, 7), 0 },
3803 { "dbgwcr6_el1", CPENC(2,0,C0, C6, 7), 0 },
3804 { "dbgwcr7_el1", CPENC(2,0,C0, C7, 7), 0 },
3805 { "dbgwcr8_el1", CPENC(2,0,C0, C8, 7), 0 },
3806 { "dbgwcr9_el1", CPENC(2,0,C0, C9, 7), 0 },
3807 { "dbgwcr10_el1", CPENC(2,0,C0, C10,7), 0 },
3808 { "dbgwcr11_el1", CPENC(2,0,C0, C11,7), 0 },
3809 { "dbgwcr12_el1", CPENC(2,0,C0, C12,7), 0 },
3810 { "dbgwcr13_el1", CPENC(2,0,C0, C13,7), 0 },
3811 { "dbgwcr14_el1", CPENC(2,0,C0, C14,7), 0 },
3812 { "dbgwcr15_el1", CPENC(2,0,C0, C15,7), 0 },
3813 { "mdrar_el1", CPENC(2,0,C1, C0, 0), 0 }, /* r */
3814 { "oslar_el1", CPENC(2,0,C1, C0, 4), 0 }, /* w */
3815 { "oslsr_el1", CPENC(2,0,C1, C1, 4), 0 }, /* r */
3816 { "osdlr_el1", CPENC(2,0,C1, C3, 4), 0 },
3817 { "dbgprcr_el1", CPENC(2,0,C1, C4, 4), 0 },
3818 { "dbgclaimset_el1", CPENC(2,0,C7, C8, 6), 0 },
3819 { "dbgclaimclr_el1", CPENC(2,0,C7, C9, 6), 0 },
3820 { "dbgauthstatus_el1", CPENC(2,0,C7, C14,6), 0 }, /* r */
3821 { "pmblimitr_el1", CPENC (3, 0, C9, C10, 0), F_ARCHEXT }, /* rw */
3822 { "pmbptr_el1", CPENC (3, 0, C9, C10, 1), F_ARCHEXT }, /* rw */
3823 { "pmbsr_el1", CPENC (3, 0, C9, C10, 3), F_ARCHEXT }, /* rw */
3824 { "pmbidr_el1", CPENC (3, 0, C9, C10, 7), F_ARCHEXT }, /* ro */
3825 { "pmscr_el1", CPENC (3, 0, C9, C9, 0), F_ARCHEXT }, /* rw */
3826 { "pmsicr_el1", CPENC (3, 0, C9, C9, 2), F_ARCHEXT }, /* rw */
3827 { "pmsirr_el1", CPENC (3, 0, C9, C9, 3), F_ARCHEXT }, /* rw */
3828 { "pmsfcr_el1", CPENC (3, 0, C9, C9, 4), F_ARCHEXT }, /* rw */
3829 { "pmsevfr_el1", CPENC (3, 0, C9, C9, 5), F_ARCHEXT }, /* rw */
3830 { "pmslatfr_el1", CPENC (3, 0, C9, C9, 6), F_ARCHEXT }, /* rw */
3831 { "pmsidr_el1", CPENC (3, 0, C9, C9, 7), F_ARCHEXT }, /* ro */
3832 { "pmscr_el2", CPENC (3, 4, C9, C9, 0), F_ARCHEXT }, /* rw */
3833 { "pmscr_el12", CPENC (3, 5, C9, C9, 0), F_ARCHEXT }, /* rw */
3834 { "pmcr_el0", CPENC(3,3,C9,C12, 0), 0 },
3835 { "pmcntenset_el0", CPENC(3,3,C9,C12, 1), 0 },
3836 { "pmcntenclr_el0", CPENC(3,3,C9,C12, 2), 0 },
3837 { "pmovsclr_el0", CPENC(3,3,C9,C12, 3), 0 },
3838 { "pmswinc_el0", CPENC(3,3,C9,C12, 4), 0 }, /* w */
3839 { "pmselr_el0", CPENC(3,3,C9,C12, 5), 0 },
3840 { "pmceid0_el0", CPENC(3,3,C9,C12, 6), 0 }, /* r */
3841 { "pmceid1_el0", CPENC(3,3,C9,C12, 7), 0 }, /* r */
3842 { "pmccntr_el0", CPENC(3,3,C9,C13, 0), 0 },
3843 { "pmxevtyper_el0", CPENC(3,3,C9,C13, 1), 0 },
3844 { "pmxevcntr_el0", CPENC(3,3,C9,C13, 2), 0 },
3845 { "pmuserenr_el0", CPENC(3,3,C9,C14, 0), 0 },
3846 { "pmintenset_el1", CPENC(3,0,C9,C14, 1), 0 },
3847 { "pmintenclr_el1", CPENC(3,0,C9,C14, 2), 0 },
3848 { "pmovsset_el0", CPENC(3,3,C9,C14, 3), 0 },
3849 { "pmevcntr0_el0", CPENC(3,3,C14,C8, 0), 0 },
3850 { "pmevcntr1_el0", CPENC(3,3,C14,C8, 1), 0 },
3851 { "pmevcntr2_el0", CPENC(3,3,C14,C8, 2), 0 },
3852 { "pmevcntr3_el0", CPENC(3,3,C14,C8, 3), 0 },
3853 { "pmevcntr4_el0", CPENC(3,3,C14,C8, 4), 0 },
3854 { "pmevcntr5_el0", CPENC(3,3,C14,C8, 5), 0 },
3855 { "pmevcntr6_el0", CPENC(3,3,C14,C8, 6), 0 },
3856 { "pmevcntr7_el0", CPENC(3,3,C14,C8, 7), 0 },
3857 { "pmevcntr8_el0", CPENC(3,3,C14,C9, 0), 0 },
3858 { "pmevcntr9_el0", CPENC(3,3,C14,C9, 1), 0 },
3859 { "pmevcntr10_el0", CPENC(3,3,C14,C9, 2), 0 },
3860 { "pmevcntr11_el0", CPENC(3,3,C14,C9, 3), 0 },
3861 { "pmevcntr12_el0", CPENC(3,3,C14,C9, 4), 0 },
3862 { "pmevcntr13_el0", CPENC(3,3,C14,C9, 5), 0 },
3863 { "pmevcntr14_el0", CPENC(3,3,C14,C9, 6), 0 },
3864 { "pmevcntr15_el0", CPENC(3,3,C14,C9, 7), 0 },
3865 { "pmevcntr16_el0", CPENC(3,3,C14,C10,0), 0 },
3866 { "pmevcntr17_el0", CPENC(3,3,C14,C10,1), 0 },
3867 { "pmevcntr18_el0", CPENC(3,3,C14,C10,2), 0 },
3868 { "pmevcntr19_el0", CPENC(3,3,C14,C10,3), 0 },
3869 { "pmevcntr20_el0", CPENC(3,3,C14,C10,4), 0 },
3870 { "pmevcntr21_el0", CPENC(3,3,C14,C10,5), 0 },
3871 { "pmevcntr22_el0", CPENC(3,3,C14,C10,6), 0 },
3872 { "pmevcntr23_el0", CPENC(3,3,C14,C10,7), 0 },
3873 { "pmevcntr24_el0", CPENC(3,3,C14,C11,0), 0 },
3874 { "pmevcntr25_el0", CPENC(3,3,C14,C11,1), 0 },
3875 { "pmevcntr26_el0", CPENC(3,3,C14,C11,2), 0 },
3876 { "pmevcntr27_el0", CPENC(3,3,C14,C11,3), 0 },
3877 { "pmevcntr28_el0", CPENC(3,3,C14,C11,4), 0 },
3878 { "pmevcntr29_el0", CPENC(3,3,C14,C11,5), 0 },
3879 { "pmevcntr30_el0", CPENC(3,3,C14,C11,6), 0 },
3880 { "pmevtyper0_el0", CPENC(3,3,C14,C12,0), 0 },
3881 { "pmevtyper1_el0", CPENC(3,3,C14,C12,1), 0 },
3882 { "pmevtyper2_el0", CPENC(3,3,C14,C12,2), 0 },
3883 { "pmevtyper3_el0", CPENC(3,3,C14,C12,3), 0 },
3884 { "pmevtyper4_el0", CPENC(3,3,C14,C12,4), 0 },
3885 { "pmevtyper5_el0", CPENC(3,3,C14,C12,5), 0 },
3886 { "pmevtyper6_el0", CPENC(3,3,C14,C12,6), 0 },
3887 { "pmevtyper7_el0", CPENC(3,3,C14,C12,7), 0 },
3888 { "pmevtyper8_el0", CPENC(3,3,C14,C13,0), 0 },
3889 { "pmevtyper9_el0", CPENC(3,3,C14,C13,1), 0 },
3890 { "pmevtyper10_el0", CPENC(3,3,C14,C13,2), 0 },
3891 { "pmevtyper11_el0", CPENC(3,3,C14,C13,3), 0 },
3892 { "pmevtyper12_el0", CPENC(3,3,C14,C13,4), 0 },
3893 { "pmevtyper13_el0", CPENC(3,3,C14,C13,5), 0 },
3894 { "pmevtyper14_el0", CPENC(3,3,C14,C13,6), 0 },
3895 { "pmevtyper15_el0", CPENC(3,3,C14,C13,7), 0 },
3896 { "pmevtyper16_el0", CPENC(3,3,C14,C14,0), 0 },
3897 { "pmevtyper17_el0", CPENC(3,3,C14,C14,1), 0 },
3898 { "pmevtyper18_el0", CPENC(3,3,C14,C14,2), 0 },
3899 { "pmevtyper19_el0", CPENC(3,3,C14,C14,3), 0 },
3900 { "pmevtyper20_el0", CPENC(3,3,C14,C14,4), 0 },
3901 { "pmevtyper21_el0", CPENC(3,3,C14,C14,5), 0 },
3902 { "pmevtyper22_el0", CPENC(3,3,C14,C14,6), 0 },
3903 { "pmevtyper23_el0", CPENC(3,3,C14,C14,7), 0 },
3904 { "pmevtyper24_el0", CPENC(3,3,C14,C15,0), 0 },
3905 { "pmevtyper25_el0", CPENC(3,3,C14,C15,1), 0 },
3906 { "pmevtyper26_el0", CPENC(3,3,C14,C15,2), 0 },
3907 { "pmevtyper27_el0", CPENC(3,3,C14,C15,3), 0 },
3908 { "pmevtyper28_el0", CPENC(3,3,C14,C15,4), 0 },
3909 { "pmevtyper29_el0", CPENC(3,3,C14,C15,5), 0 },
3910 { "pmevtyper30_el0", CPENC(3,3,C14,C15,6), 0 },
3911 { "pmccfiltr_el0", CPENC(3,3,C14,C15,7), 0 },
3912 { 0, CPENC(0,0,0,0,0), 0 },
3916 aarch64_sys_reg_deprecated_p (const aarch64_sys_reg *reg)
3918 return (reg->flags & F_DEPRECATED) != 0;
3922 aarch64_sys_reg_supported_p (const aarch64_feature_set features,
3923 const aarch64_sys_reg *reg)
3925 if (!(reg->flags & F_ARCHEXT))
3928 /* PAN. Values are from aarch64_sys_regs. */
3929 if (reg->value == CPEN_(0,C2,3)
3930 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_PAN))
3933 /* Virtualization host extensions: system registers. */
3934 if ((reg->value == CPENC (3, 4, C2, C0, 1)
3935 || reg->value == CPENC (3, 4, C13, C0, 1)
3936 || reg->value == CPENC (3, 4, C14, C3, 0)
3937 || reg->value == CPENC (3, 4, C14, C3, 1)
3938 || reg->value == CPENC (3, 4, C14, C3, 2))
3939 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_1))
3942 /* Virtualization host extensions: *_el12 names of *_el1 registers. */
3943 if ((reg->value == CPEN_ (5, C0, 0)
3944 || reg->value == CPEN_ (5, C0, 1)
3945 || reg->value == CPENC (3, 5, C1, C0, 0)
3946 || reg->value == CPENC (3, 5, C1, C0, 2)
3947 || reg->value == CPENC (3, 5, C2, C0, 0)
3948 || reg->value == CPENC (3, 5, C2, C0, 1)
3949 || reg->value == CPENC (3, 5, C2, C0, 2)
3950 || reg->value == CPENC (3, 5, C5, C1, 0)
3951 || reg->value == CPENC (3, 5, C5, C1, 1)
3952 || reg->value == CPENC (3, 5, C5, C2, 0)
3953 || reg->value == CPENC (3, 5, C6, C0, 0)
3954 || reg->value == CPENC (3, 5, C10, C2, 0)
3955 || reg->value == CPENC (3, 5, C10, C3, 0)
3956 || reg->value == CPENC (3, 5, C12, C0, 0)
3957 || reg->value == CPENC (3, 5, C13, C0, 1)
3958 || reg->value == CPENC (3, 5, C14, C1, 0))
3959 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_1))
3962 /* Virtualization host extensions: *_el02 names of *_el0 registers. */
3963 if ((reg->value == CPENC (3, 5, C14, C2, 0)
3964 || reg->value == CPENC (3, 5, C14, C2, 1)
3965 || reg->value == CPENC (3, 5, C14, C2, 2)
3966 || reg->value == CPENC (3, 5, C14, C3, 0)
3967 || reg->value == CPENC (3, 5, C14, C3, 1)
3968 || reg->value == CPENC (3, 5, C14, C3, 2))
3969 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_1))
3972 /* ARMv8.2 features. */
3974 /* ID_AA64MMFR2_EL1. */
3975 if (reg->value == CPENC (3, 0, C0, C7, 2)
3976 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_2))
3980 if (reg->value == CPEN_ (0, C2, 4)
3981 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_2))
3984 /* RAS extension. */
3986 /* ERRIDR_EL1, ERRSELR_EL1, ERXFR_EL1, ERXCTLR_EL1, ERXSTATUS_EL, ERXADDR_EL1,
3987 ERXMISC0_EL1 AND ERXMISC1_EL1. */
3988 if ((reg->value == CPENC (3, 0, C5, C3, 0)
3989 || reg->value == CPENC (3, 0, C5, C3, 1)
3990 || reg->value == CPENC (3, 0, C5, C3, 2)
3991 || reg->value == CPENC (3, 0, C5, C3, 3)
3992 || reg->value == CPENC (3, 0, C5, C4, 0)
3993 || reg->value == CPENC (3, 0, C5, C4, 1)
3994 || reg->value == CPENC (3, 0, C5, C4, 2)
3995 || reg->value == CPENC (3, 0, C5, C4, 3)
3996 || reg->value == CPENC (3, 0, C5, C5, 0)
3997 || reg->value == CPENC (3, 0, C5, C5, 1))
3998 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_RAS))
4001 /* VSESR_EL2, DISR_EL1 and VDISR_EL2. */
4002 if ((reg->value == CPENC (3, 4, C5, C2, 3)
4003 || reg->value == CPENC (3, 0, C12, C1, 1)
4004 || reg->value == CPENC (3, 4, C12, C1, 1))
4005 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_RAS))
4008 /* Statistical Profiling extension. */
4009 if ((reg->value == CPENC (3, 0, C9, C10, 0)
4010 || reg->value == CPENC (3, 0, C9, C10, 1)
4011 || reg->value == CPENC (3, 0, C9, C10, 3)
4012 || reg->value == CPENC (3, 0, C9, C10, 7)
4013 || reg->value == CPENC (3, 0, C9, C9, 0)
4014 || reg->value == CPENC (3, 0, C9, C9, 2)
4015 || reg->value == CPENC (3, 0, C9, C9, 3)
4016 || reg->value == CPENC (3, 0, C9, C9, 4)
4017 || reg->value == CPENC (3, 0, C9, C9, 5)
4018 || reg->value == CPENC (3, 0, C9, C9, 6)
4019 || reg->value == CPENC (3, 0, C9, C9, 7)
4020 || reg->value == CPENC (3, 4, C9, C9, 0)
4021 || reg->value == CPENC (3, 5, C9, C9, 0))
4022 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_PROFILE))
4028 const aarch64_sys_reg aarch64_pstatefields [] =
4030 { "spsel", 0x05, 0 },
4031 { "daifset", 0x1e, 0 },
4032 { "daifclr", 0x1f, 0 },
4033 { "pan", 0x04, F_ARCHEXT },
4034 { "uao", 0x03, F_ARCHEXT },
4035 { 0, CPENC(0,0,0,0,0), 0 },
4039 aarch64_pstatefield_supported_p (const aarch64_feature_set features,
4040 const aarch64_sys_reg *reg)
4042 if (!(reg->flags & F_ARCHEXT))
4045 /* PAN. Values are from aarch64_pstatefields. */
4046 if (reg->value == 0x04
4047 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_PAN))
4050 /* UAO. Values are from aarch64_pstatefields. */
4051 if (reg->value == 0x03
4052 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_2))
4058 const aarch64_sys_ins_reg aarch64_sys_regs_ic[] =
4060 { "ialluis", CPENS(0,C7,C1,0), 0 },
4061 { "iallu", CPENS(0,C7,C5,0), 0 },
4062 { "ivau", CPENS (3, C7, C5, 1), F_HASXT },
4063 { 0, CPENS(0,0,0,0), 0 }
4066 const aarch64_sys_ins_reg aarch64_sys_regs_dc[] =
4068 { "zva", CPENS (3, C7, C4, 1), F_HASXT },
4069 { "ivac", CPENS (0, C7, C6, 1), F_HASXT },
4070 { "isw", CPENS (0, C7, C6, 2), F_HASXT },
4071 { "cvac", CPENS (3, C7, C10, 1), F_HASXT },
4072 { "csw", CPENS (0, C7, C10, 2), F_HASXT },
4073 { "cvau", CPENS (3, C7, C11, 1), F_HASXT },
4074 { "cvap", CPENS (3, C7, C12, 1), F_HASXT | F_ARCHEXT },
4075 { "civac", CPENS (3, C7, C14, 1), F_HASXT },
4076 { "cisw", CPENS (0, C7, C14, 2), F_HASXT },
4077 { 0, CPENS(0,0,0,0), 0 }
4080 const aarch64_sys_ins_reg aarch64_sys_regs_at[] =
4082 { "s1e1r", CPENS (0, C7, C8, 0), F_HASXT },
4083 { "s1e1w", CPENS (0, C7, C8, 1), F_HASXT },
4084 { "s1e0r", CPENS (0, C7, C8, 2), F_HASXT },
4085 { "s1e0w", CPENS (0, C7, C8, 3), F_HASXT },
4086 { "s12e1r", CPENS (4, C7, C8, 4), F_HASXT },
4087 { "s12e1w", CPENS (4, C7, C8, 5), F_HASXT },
4088 { "s12e0r", CPENS (4, C7, C8, 6), F_HASXT },
4089 { "s12e0w", CPENS (4, C7, C8, 7), F_HASXT },
4090 { "s1e2r", CPENS (4, C7, C8, 0), F_HASXT },
4091 { "s1e2w", CPENS (4, C7, C8, 1), F_HASXT },
4092 { "s1e3r", CPENS (6, C7, C8, 0), F_HASXT },
4093 { "s1e3w", CPENS (6, C7, C8, 1), F_HASXT },
4094 { "s1e1rp", CPENS (0, C7, C9, 0), F_HASXT | F_ARCHEXT },
4095 { "s1e1wp", CPENS (0, C7, C9, 1), F_HASXT | F_ARCHEXT },
4096 { 0, CPENS(0,0,0,0), 0 }
4099 const aarch64_sys_ins_reg aarch64_sys_regs_tlbi[] =
4101 { "vmalle1", CPENS(0,C8,C7,0), 0 },
4102 { "vae1", CPENS (0, C8, C7, 1), F_HASXT },
4103 { "aside1", CPENS (0, C8, C7, 2), F_HASXT },
4104 { "vaae1", CPENS (0, C8, C7, 3), F_HASXT },
4105 { "vmalle1is", CPENS(0,C8,C3,0), 0 },
4106 { "vae1is", CPENS (0, C8, C3, 1), F_HASXT },
4107 { "aside1is", CPENS (0, C8, C3, 2), F_HASXT },
4108 { "vaae1is", CPENS (0, C8, C3, 3), F_HASXT },
4109 { "ipas2e1is", CPENS (4, C8, C0, 1), F_HASXT },
4110 { "ipas2le1is",CPENS (4, C8, C0, 5), F_HASXT },
4111 { "ipas2e1", CPENS (4, C8, C4, 1), F_HASXT },
4112 { "ipas2le1", CPENS (4, C8, C4, 5), F_HASXT },
4113 { "vae2", CPENS (4, C8, C7, 1), F_HASXT },
4114 { "vae2is", CPENS (4, C8, C3, 1), F_HASXT },
4115 { "vmalls12e1",CPENS(4,C8,C7,6), 0 },
4116 { "vmalls12e1is",CPENS(4,C8,C3,6), 0 },
4117 { "vae3", CPENS (6, C8, C7, 1), F_HASXT },
4118 { "vae3is", CPENS (6, C8, C3, 1), F_HASXT },
4119 { "alle2", CPENS(4,C8,C7,0), 0 },
4120 { "alle2is", CPENS(4,C8,C3,0), 0 },
4121 { "alle1", CPENS(4,C8,C7,4), 0 },
4122 { "alle1is", CPENS(4,C8,C3,4), 0 },
4123 { "alle3", CPENS(6,C8,C7,0), 0 },
4124 { "alle3is", CPENS(6,C8,C3,0), 0 },
4125 { "vale1is", CPENS (0, C8, C3, 5), F_HASXT },
4126 { "vale2is", CPENS (4, C8, C3, 5), F_HASXT },
4127 { "vale3is", CPENS (6, C8, C3, 5), F_HASXT },
4128 { "vaale1is", CPENS (0, C8, C3, 7), F_HASXT },
4129 { "vale1", CPENS (0, C8, C7, 5), F_HASXT },
4130 { "vale2", CPENS (4, C8, C7, 5), F_HASXT },
4131 { "vale3", CPENS (6, C8, C7, 5), F_HASXT },
4132 { "vaale1", CPENS (0, C8, C7, 7), F_HASXT },
4133 { 0, CPENS(0,0,0,0), 0 }
4137 aarch64_sys_ins_reg_has_xt (const aarch64_sys_ins_reg *sys_ins_reg)
4139 return (sys_ins_reg->flags & F_HASXT) != 0;
4143 aarch64_sys_ins_reg_supported_p (const aarch64_feature_set features,
4144 const aarch64_sys_ins_reg *reg)
4146 if (!(reg->flags & F_ARCHEXT))
4149 /* DC CVAP. Values are from aarch64_sys_regs_dc. */
4150 if (reg->value == CPENS (3, C7, C12, 1)
4151 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_2))
4154 /* AT S1E1RP, AT S1E1WP. Values are from aarch64_sys_regs_at. */
4155 if ((reg->value == CPENS (0, C7, C9, 0)
4156 || reg->value == CPENS (0, C7, C9, 1))
4157 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_2))
4180 #define BIT(INSN,BT) (((INSN) >> (BT)) & 1)
4181 #define BITS(INSN,HI,LO) (((INSN) >> (LO)) & ((1 << (((HI) - (LO)) + 1)) - 1))
4184 verify_ldpsw (const struct aarch64_opcode * opcode ATTRIBUTE_UNUSED,
4185 const aarch64_insn insn)
4187 int t = BITS (insn, 4, 0);
4188 int n = BITS (insn, 9, 5);
4189 int t2 = BITS (insn, 14, 10);
4193 /* Write back enabled. */
4194 if ((t == n || t2 == n) && n != 31)
4208 /* Return true if VALUE cannot be moved into an SVE register using DUP
4209 (with any element size, not just ESIZE) and if using DUPM would
4210 therefore be OK. ESIZE is the number of bytes in the immediate. */
4213 aarch64_sve_dupm_mov_immediate_p (uint64_t uvalue, int esize)
4215 int64_t svalue = uvalue;
4216 uint64_t upper = (uint64_t) -1 << (esize * 4) << (esize * 4);
4218 if ((uvalue & ~upper) != uvalue && (uvalue | upper) != uvalue)
4220 if (esize <= 4 || (uint32_t) uvalue == (uint32_t) (uvalue >> 32))
4222 svalue = (int32_t) uvalue;
4223 if (esize <= 2 || (uint16_t) uvalue == (uint16_t) (uvalue >> 16))
4225 svalue = (int16_t) uvalue;
4226 if (esize == 1 || (uint8_t) uvalue == (uint8_t) (uvalue >> 8))
4230 if ((svalue & 0xff) == 0)
4232 return svalue < -128 || svalue >= 128;
4235 /* Include the opcode description table as well as the operand description
4237 #define VERIFIER(x) verify_##x
4238 #include "aarch64-tbl.h"