1 /* aarch64-opc.c -- AArch64 opcode support.
2 Copyright (C) 2009-2018 Free Software Foundation, Inc.
3 Contributed by ARM Ltd.
5 This file is part of the GNU opcodes library.
7 This library is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
12 It is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program; see the file COPYING3. If not,
19 see <http://www.gnu.org/licenses/>. */
30 #include "libiberty.h"
32 #include "aarch64-opc.h"
35 int debug_dump = FALSE;
36 #endif /* DEBUG_AARCH64 */
38 /* The enumeration strings associated with each value of a 5-bit SVE
39 pattern operand. A null entry indicates a reserved meaning. */
40 const char *const aarch64_sve_pattern_array[32] = {
79 /* The enumeration strings associated with each value of a 4-bit SVE
80 prefetch operand. A null entry indicates a reserved meaning. */
81 const char *const aarch64_sve_prfop_array[16] = {
102 /* Helper functions to determine which operand to be used to encode/decode
103 the size:Q fields for AdvSIMD instructions. */
105 static inline bfd_boolean
106 vector_qualifier_p (enum aarch64_opnd_qualifier qualifier)
108 return ((qualifier >= AARCH64_OPND_QLF_V_8B
109 && qualifier <= AARCH64_OPND_QLF_V_1Q) ? TRUE
113 static inline bfd_boolean
114 fp_qualifier_p (enum aarch64_opnd_qualifier qualifier)
116 return ((qualifier >= AARCH64_OPND_QLF_S_B
117 && qualifier <= AARCH64_OPND_QLF_S_Q) ? TRUE
127 DP_VECTOR_ACROSS_LANES,
130 static const char significant_operand_index [] =
132 0, /* DP_UNKNOWN, by default using operand 0. */
133 0, /* DP_VECTOR_3SAME */
134 1, /* DP_VECTOR_LONG */
135 2, /* DP_VECTOR_WIDE */
136 1, /* DP_VECTOR_ACROSS_LANES */
139 /* Given a sequence of qualifiers in QUALIFIERS, determine and return
141 N.B. QUALIFIERS is a possible sequence of qualifiers each of which
142 corresponds to one of a sequence of operands. */
144 static enum data_pattern
145 get_data_pattern (const aarch64_opnd_qualifier_seq_t qualifiers)
147 if (vector_qualifier_p (qualifiers[0]) == TRUE)
149 /* e.g. v.4s, v.4s, v.4s
150 or v.4h, v.4h, v.h[3]. */
151 if (qualifiers[0] == qualifiers[1]
152 && vector_qualifier_p (qualifiers[2]) == TRUE
153 && (aarch64_get_qualifier_esize (qualifiers[0])
154 == aarch64_get_qualifier_esize (qualifiers[1]))
155 && (aarch64_get_qualifier_esize (qualifiers[0])
156 == aarch64_get_qualifier_esize (qualifiers[2])))
157 return DP_VECTOR_3SAME;
158 /* e.g. v.8h, v.8b, v.8b.
159 or v.4s, v.4h, v.h[2].
161 if (vector_qualifier_p (qualifiers[1]) == TRUE
162 && aarch64_get_qualifier_esize (qualifiers[0]) != 0
163 && (aarch64_get_qualifier_esize (qualifiers[0])
164 == aarch64_get_qualifier_esize (qualifiers[1]) << 1))
165 return DP_VECTOR_LONG;
166 /* e.g. v.8h, v.8h, v.8b. */
167 if (qualifiers[0] == qualifiers[1]
168 && vector_qualifier_p (qualifiers[2]) == TRUE
169 && aarch64_get_qualifier_esize (qualifiers[0]) != 0
170 && (aarch64_get_qualifier_esize (qualifiers[0])
171 == aarch64_get_qualifier_esize (qualifiers[2]) << 1)
172 && (aarch64_get_qualifier_esize (qualifiers[0])
173 == aarch64_get_qualifier_esize (qualifiers[1])))
174 return DP_VECTOR_WIDE;
176 else if (fp_qualifier_p (qualifiers[0]) == TRUE)
178 /* e.g. SADDLV <V><d>, <Vn>.<T>. */
179 if (vector_qualifier_p (qualifiers[1]) == TRUE
180 && qualifiers[2] == AARCH64_OPND_QLF_NIL)
181 return DP_VECTOR_ACROSS_LANES;
187 /* Select the operand to do the encoding/decoding of the 'size:Q' fields in
188 the AdvSIMD instructions. */
189 /* N.B. it is possible to do some optimization that doesn't call
190 get_data_pattern each time when we need to select an operand. We can
191 either buffer the caculated the result or statically generate the data,
192 however, it is not obvious that the optimization will bring significant
196 aarch64_select_operand_for_sizeq_field_coding (const aarch64_opcode *opcode)
199 significant_operand_index [get_data_pattern (opcode->qualifiers_list[0])];
202 const aarch64_field fields[] =
205 { 0, 4 }, /* cond2: condition in truly conditional-executed inst. */
206 { 0, 4 }, /* nzcv: flag bit specifier, encoded in the "nzcv" field. */
207 { 5, 5 }, /* defgh: d:e:f:g:h bits in AdvSIMD modified immediate. */
208 { 16, 3 }, /* abc: a:b:c bits in AdvSIMD modified immediate. */
209 { 5, 19 }, /* imm19: e.g. in CBZ. */
210 { 5, 19 }, /* immhi: e.g. in ADRP. */
211 { 29, 2 }, /* immlo: e.g. in ADRP. */
212 { 22, 2 }, /* size: in most AdvSIMD and floating-point instructions. */
213 { 10, 2 }, /* vldst_size: size field in the AdvSIMD load/store inst. */
214 { 29, 1 }, /* op: in AdvSIMD modified immediate instructions. */
215 { 30, 1 }, /* Q: in most AdvSIMD instructions. */
216 { 0, 5 }, /* Rt: in load/store instructions. */
217 { 0, 5 }, /* Rd: in many integer instructions. */
218 { 5, 5 }, /* Rn: in many integer instructions. */
219 { 10, 5 }, /* Rt2: in load/store pair instructions. */
220 { 10, 5 }, /* Ra: in fp instructions. */
221 { 5, 3 }, /* op2: in the system instructions. */
222 { 8, 4 }, /* CRm: in the system instructions. */
223 { 12, 4 }, /* CRn: in the system instructions. */
224 { 16, 3 }, /* op1: in the system instructions. */
225 { 19, 2 }, /* op0: in the system instructions. */
226 { 10, 3 }, /* imm3: in add/sub extended reg instructions. */
227 { 12, 4 }, /* cond: condition flags as a source operand. */
228 { 12, 4 }, /* opcode: in advsimd load/store instructions. */
229 { 12, 4 }, /* cmode: in advsimd modified immediate instructions. */
230 { 13, 3 }, /* asisdlso_opcode: opcode in advsimd ld/st single element. */
231 { 13, 2 }, /* len: in advsimd tbl/tbx instructions. */
232 { 16, 5 }, /* Rm: in ld/st reg offset and some integer inst. */
233 { 16, 5 }, /* Rs: in load/store exclusive instructions. */
234 { 13, 3 }, /* option: in ld/st reg offset + add/sub extended reg inst. */
235 { 12, 1 }, /* S: in load/store reg offset instructions. */
236 { 21, 2 }, /* hw: in move wide constant instructions. */
237 { 22, 2 }, /* opc: in load/store reg offset instructions. */
238 { 23, 1 }, /* opc1: in load/store reg offset instructions. */
239 { 22, 2 }, /* shift: in add/sub reg/imm shifted instructions. */
240 { 22, 2 }, /* type: floating point type field in fp data inst. */
241 { 30, 2 }, /* ldst_size: size field in ld/st reg offset inst. */
242 { 10, 6 }, /* imm6: in add/sub reg shifted instructions. */
243 { 15, 6 }, /* imm6_2: in rmif instructions. */
244 { 11, 4 }, /* imm4: in advsimd ext and advsimd ins instructions. */
245 { 0, 4 }, /* imm4_2: in rmif instructions. */
246 { 16, 5 }, /* imm5: in conditional compare (immediate) instructions. */
247 { 15, 7 }, /* imm7: in load/store pair pre/post index instructions. */
248 { 13, 8 }, /* imm8: in floating-point scalar move immediate inst. */
249 { 12, 9 }, /* imm9: in load/store pre/post index instructions. */
250 { 10, 12 }, /* imm12: in ld/st unsigned imm or add/sub shifted inst. */
251 { 5, 14 }, /* imm14: in test bit and branch instructions. */
252 { 5, 16 }, /* imm16: in exception instructions. */
253 { 0, 26 }, /* imm26: in unconditional branch instructions. */
254 { 10, 6 }, /* imms: in bitfield and logical immediate instructions. */
255 { 16, 6 }, /* immr: in bitfield and logical immediate instructions. */
256 { 16, 3 }, /* immb: in advsimd shift by immediate instructions. */
257 { 19, 4 }, /* immh: in advsimd shift by immediate instructions. */
258 { 22, 1 }, /* S: in LDRAA and LDRAB instructions. */
259 { 22, 1 }, /* N: in logical (immediate) instructions. */
260 { 11, 1 }, /* index: in ld/st inst deciding the pre/post-index. */
261 { 24, 1 }, /* index2: in ld/st pair inst deciding the pre/post-index. */
262 { 31, 1 }, /* sf: in integer data processing instructions. */
263 { 30, 1 }, /* lse_size: in LSE extension atomic instructions. */
264 { 11, 1 }, /* H: in advsimd scalar x indexed element instructions. */
265 { 21, 1 }, /* L: in advsimd scalar x indexed element instructions. */
266 { 20, 1 }, /* M: in advsimd scalar x indexed element instructions. */
267 { 31, 1 }, /* b5: in the test bit and branch instructions. */
268 { 19, 5 }, /* b40: in the test bit and branch instructions. */
269 { 10, 6 }, /* scale: in the fixed-point scalar to fp converting inst. */
270 { 4, 1 }, /* SVE_M_4: Merge/zero select, bit 4. */
271 { 14, 1 }, /* SVE_M_14: Merge/zero select, bit 14. */
272 { 16, 1 }, /* SVE_M_16: Merge/zero select, bit 16. */
273 { 17, 1 }, /* SVE_N: SVE equivalent of N. */
274 { 0, 4 }, /* SVE_Pd: p0-p15, bits [3,0]. */
275 { 10, 3 }, /* SVE_Pg3: p0-p7, bits [12,10]. */
276 { 5, 4 }, /* SVE_Pg4_5: p0-p15, bits [8,5]. */
277 { 10, 4 }, /* SVE_Pg4_10: p0-p15, bits [13,10]. */
278 { 16, 4 }, /* SVE_Pg4_16: p0-p15, bits [19,16]. */
279 { 16, 4 }, /* SVE_Pm: p0-p15, bits [19,16]. */
280 { 5, 4 }, /* SVE_Pn: p0-p15, bits [8,5]. */
281 { 0, 4 }, /* SVE_Pt: p0-p15, bits [3,0]. */
282 { 5, 5 }, /* SVE_Rm: SVE alternative position for Rm. */
283 { 16, 5 }, /* SVE_Rn: SVE alternative position for Rn. */
284 { 0, 5 }, /* SVE_Vd: Scalar SIMD&FP register, bits [4,0]. */
285 { 5, 5 }, /* SVE_Vm: Scalar SIMD&FP register, bits [9,5]. */
286 { 5, 5 }, /* SVE_Vn: Scalar SIMD&FP register, bits [9,5]. */
287 { 5, 5 }, /* SVE_Za_5: SVE vector register, bits [9,5]. */
288 { 16, 5 }, /* SVE_Za_16: SVE vector register, bits [20,16]. */
289 { 0, 5 }, /* SVE_Zd: SVE vector register. bits [4,0]. */
290 { 5, 5 }, /* SVE_Zm_5: SVE vector register, bits [9,5]. */
291 { 16, 5 }, /* SVE_Zm_16: SVE vector register, bits [20,16]. */
292 { 5, 5 }, /* SVE_Zn: SVE vector register, bits [9,5]. */
293 { 0, 5 }, /* SVE_Zt: SVE vector register, bits [4,0]. */
294 { 5, 1 }, /* SVE_i1: single-bit immediate. */
295 { 22, 1 }, /* SVE_i3h: high bit of 3-bit immediate. */
296 { 16, 3 }, /* SVE_imm3: 3-bit immediate field. */
297 { 16, 4 }, /* SVE_imm4: 4-bit immediate field. */
298 { 5, 5 }, /* SVE_imm5: 5-bit immediate field. */
299 { 16, 5 }, /* SVE_imm5b: secondary 5-bit immediate field. */
300 { 16, 6 }, /* SVE_imm6: 6-bit immediate field. */
301 { 14, 7 }, /* SVE_imm7: 7-bit immediate field. */
302 { 5, 8 }, /* SVE_imm8: 8-bit immediate field. */
303 { 5, 9 }, /* SVE_imm9: 9-bit immediate field. */
304 { 11, 6 }, /* SVE_immr: SVE equivalent of immr. */
305 { 5, 6 }, /* SVE_imms: SVE equivalent of imms. */
306 { 10, 2 }, /* SVE_msz: 2-bit shift amount for ADR. */
307 { 5, 5 }, /* SVE_pattern: vector pattern enumeration. */
308 { 0, 4 }, /* SVE_prfop: prefetch operation for SVE PRF[BHWD]. */
309 { 16, 1 }, /* SVE_rot1: 1-bit rotation amount. */
310 { 10, 2 }, /* SVE_rot2: 2-bit rotation amount. */
311 { 22, 1 }, /* SVE_sz: 1-bit element size select. */
312 { 16, 4 }, /* SVE_tsz: triangular size select. */
313 { 22, 2 }, /* SVE_tszh: triangular size select high, bits [23,22]. */
314 { 8, 2 }, /* SVE_tszl_8: triangular size select low, bits [9,8]. */
315 { 19, 2 }, /* SVE_tszl_19: triangular size select low, bits [20,19]. */
316 { 14, 1 }, /* SVE_xs_14: UXTW/SXTW select (bit 14). */
317 { 22, 1 }, /* SVE_xs_22: UXTW/SXTW select (bit 22). */
318 { 11, 2 }, /* rotate1: FCMLA immediate rotate. */
319 { 13, 2 }, /* rotate2: Indexed element FCMLA immediate rotate. */
320 { 12, 1 }, /* rotate3: FCADD immediate rotate. */
321 { 12, 2 }, /* SM3: Indexed element SM3 2 bits index immediate. */
324 enum aarch64_operand_class
325 aarch64_get_operand_class (enum aarch64_opnd type)
327 return aarch64_operands[type].op_class;
331 aarch64_get_operand_name (enum aarch64_opnd type)
333 return aarch64_operands[type].name;
336 /* Get operand description string.
337 This is usually for the diagnosis purpose. */
339 aarch64_get_operand_desc (enum aarch64_opnd type)
341 return aarch64_operands[type].desc;
344 /* Table of all conditional affixes. */
345 const aarch64_cond aarch64_conds[16] =
347 {{"eq", "none"}, 0x0},
348 {{"ne", "any"}, 0x1},
349 {{"cs", "hs", "nlast"}, 0x2},
350 {{"cc", "lo", "ul", "last"}, 0x3},
351 {{"mi", "first"}, 0x4},
352 {{"pl", "nfrst"}, 0x5},
355 {{"hi", "pmore"}, 0x8},
356 {{"ls", "plast"}, 0x9},
357 {{"ge", "tcont"}, 0xa},
358 {{"lt", "tstop"}, 0xb},
366 get_cond_from_value (aarch64_insn value)
369 return &aarch64_conds[(unsigned int) value];
373 get_inverted_cond (const aarch64_cond *cond)
375 return &aarch64_conds[cond->value ^ 0x1];
378 /* Table describing the operand extension/shifting operators; indexed by
379 enum aarch64_modifier_kind.
381 The value column provides the most common values for encoding modifiers,
382 which enables table-driven encoding/decoding for the modifiers. */
383 const struct aarch64_name_value_pair aarch64_operand_modifiers [] =
404 enum aarch64_modifier_kind
405 aarch64_get_operand_modifier (const struct aarch64_name_value_pair *desc)
407 return desc - aarch64_operand_modifiers;
411 aarch64_get_operand_modifier_value (enum aarch64_modifier_kind kind)
413 return aarch64_operand_modifiers[kind].value;
416 enum aarch64_modifier_kind
417 aarch64_get_operand_modifier_from_value (aarch64_insn value,
418 bfd_boolean extend_p)
420 if (extend_p == TRUE)
421 return AARCH64_MOD_UXTB + value;
423 return AARCH64_MOD_LSL - value;
427 aarch64_extend_operator_p (enum aarch64_modifier_kind kind)
429 return (kind > AARCH64_MOD_LSL && kind <= AARCH64_MOD_SXTX)
433 static inline bfd_boolean
434 aarch64_shift_operator_p (enum aarch64_modifier_kind kind)
436 return (kind >= AARCH64_MOD_ROR && kind <= AARCH64_MOD_LSL)
440 const struct aarch64_name_value_pair aarch64_barrier_options[16] =
460 /* Table describing the operands supported by the aliases of the HINT
463 The name column is the operand that is accepted for the alias. The value
464 column is the hint number of the alias. The list of operands is terminated
465 by NULL in the name column. */
467 const struct aarch64_name_value_pair aarch64_hint_options[] =
469 { "csync", 0x11 }, /* PSB CSYNC. */
473 /* op -> op: load = 0 instruction = 1 store = 2
475 t -> temporal: temporal (retained) = 0 non-temporal (streaming) = 1 */
476 #define B(op,l,t) (((op) << 3) | (((l) - 1) << 1) | (t))
477 const struct aarch64_name_value_pair aarch64_prfops[32] =
479 { "pldl1keep", B(0, 1, 0) },
480 { "pldl1strm", B(0, 1, 1) },
481 { "pldl2keep", B(0, 2, 0) },
482 { "pldl2strm", B(0, 2, 1) },
483 { "pldl3keep", B(0, 3, 0) },
484 { "pldl3strm", B(0, 3, 1) },
487 { "plil1keep", B(1, 1, 0) },
488 { "plil1strm", B(1, 1, 1) },
489 { "plil2keep", B(1, 2, 0) },
490 { "plil2strm", B(1, 2, 1) },
491 { "plil3keep", B(1, 3, 0) },
492 { "plil3strm", B(1, 3, 1) },
495 { "pstl1keep", B(2, 1, 0) },
496 { "pstl1strm", B(2, 1, 1) },
497 { "pstl2keep", B(2, 2, 0) },
498 { "pstl2strm", B(2, 2, 1) },
499 { "pstl3keep", B(2, 3, 0) },
500 { "pstl3strm", B(2, 3, 1) },
514 /* Utilities on value constraint. */
517 value_in_range_p (int64_t value, int low, int high)
519 return (value >= low && value <= high) ? 1 : 0;
522 /* Return true if VALUE is a multiple of ALIGN. */
524 value_aligned_p (int64_t value, int align)
526 return (value % align) == 0;
529 /* A signed value fits in a field. */
531 value_fit_signed_field_p (int64_t value, unsigned width)
534 if (width < sizeof (value) * 8)
536 int64_t lim = (int64_t)1 << (width - 1);
537 if (value >= -lim && value < lim)
543 /* An unsigned value fits in a field. */
545 value_fit_unsigned_field_p (int64_t value, unsigned width)
548 if (width < sizeof (value) * 8)
550 int64_t lim = (int64_t)1 << width;
551 if (value >= 0 && value < lim)
557 /* Return 1 if OPERAND is SP or WSP. */
559 aarch64_stack_pointer_p (const aarch64_opnd_info *operand)
561 return ((aarch64_get_operand_class (operand->type)
562 == AARCH64_OPND_CLASS_INT_REG)
563 && operand_maybe_stack_pointer (aarch64_operands + operand->type)
564 && operand->reg.regno == 31);
567 /* Return 1 if OPERAND is XZR or WZP. */
569 aarch64_zero_register_p (const aarch64_opnd_info *operand)
571 return ((aarch64_get_operand_class (operand->type)
572 == AARCH64_OPND_CLASS_INT_REG)
573 && !operand_maybe_stack_pointer (aarch64_operands + operand->type)
574 && operand->reg.regno == 31);
577 /* Return true if the operand *OPERAND that has the operand code
578 OPERAND->TYPE and been qualified by OPERAND->QUALIFIER can be also
579 qualified by the qualifier TARGET. */
582 operand_also_qualified_p (const struct aarch64_opnd_info *operand,
583 aarch64_opnd_qualifier_t target)
585 switch (operand->qualifier)
587 case AARCH64_OPND_QLF_W:
588 if (target == AARCH64_OPND_QLF_WSP && aarch64_stack_pointer_p (operand))
591 case AARCH64_OPND_QLF_X:
592 if (target == AARCH64_OPND_QLF_SP && aarch64_stack_pointer_p (operand))
595 case AARCH64_OPND_QLF_WSP:
596 if (target == AARCH64_OPND_QLF_W
597 && operand_maybe_stack_pointer (aarch64_operands + operand->type))
600 case AARCH64_OPND_QLF_SP:
601 if (target == AARCH64_OPND_QLF_X
602 && operand_maybe_stack_pointer (aarch64_operands + operand->type))
612 /* Given qualifier sequence list QSEQ_LIST and the known qualifier KNOWN_QLF
613 for operand KNOWN_IDX, return the expected qualifier for operand IDX.
615 Return NIL if more than one expected qualifiers are found. */
617 aarch64_opnd_qualifier_t
618 aarch64_get_expected_qualifier (const aarch64_opnd_qualifier_seq_t *qseq_list,
620 const aarch64_opnd_qualifier_t known_qlf,
627 When the known qualifier is NIL, we have to assume that there is only
628 one qualifier sequence in the *QSEQ_LIST and return the corresponding
629 qualifier directly. One scenario is that for instruction
630 PRFM <prfop>, [<Xn|SP>, #:lo12:<symbol>]
631 which has only one possible valid qualifier sequence
633 the caller may pass NIL in KNOWN_QLF to obtain S_D so that it can
634 determine the correct relocation type (i.e. LDST64_LO12) for PRFM.
636 Because the qualifier NIL has dual roles in the qualifier sequence:
637 it can mean no qualifier for the operand, or the qualifer sequence is
638 not in use (when all qualifiers in the sequence are NILs), we have to
639 handle this special case here. */
640 if (known_qlf == AARCH64_OPND_NIL)
642 assert (qseq_list[0][known_idx] == AARCH64_OPND_NIL);
643 return qseq_list[0][idx];
646 for (i = 0, saved_i = -1; i < AARCH64_MAX_QLF_SEQ_NUM; ++i)
648 if (qseq_list[i][known_idx] == known_qlf)
651 /* More than one sequences are found to have KNOWN_QLF at
653 return AARCH64_OPND_NIL;
658 return qseq_list[saved_i][idx];
661 enum operand_qualifier_kind
669 /* Operand qualifier description. */
670 struct operand_qualifier_data
672 /* The usage of the three data fields depends on the qualifier kind. */
679 enum operand_qualifier_kind kind;
682 /* Indexed by the operand qualifier enumerators. */
683 struct operand_qualifier_data aarch64_opnd_qualifiers[] =
685 {0, 0, 0, "NIL", OQK_NIL},
687 /* Operand variant qualifiers.
689 element size, number of elements and common value for encoding. */
691 {4, 1, 0x0, "w", OQK_OPD_VARIANT},
692 {8, 1, 0x1, "x", OQK_OPD_VARIANT},
693 {4, 1, 0x0, "wsp", OQK_OPD_VARIANT},
694 {8, 1, 0x1, "sp", OQK_OPD_VARIANT},
696 {1, 1, 0x0, "b", OQK_OPD_VARIANT},
697 {2, 1, 0x1, "h", OQK_OPD_VARIANT},
698 {4, 1, 0x2, "s", OQK_OPD_VARIANT},
699 {8, 1, 0x3, "d", OQK_OPD_VARIANT},
700 {16, 1, 0x4, "q", OQK_OPD_VARIANT},
701 {1, 4, 0x0, "4b", OQK_OPD_VARIANT},
703 {1, 4, 0x0, "4b", OQK_OPD_VARIANT},
704 {1, 8, 0x0, "8b", OQK_OPD_VARIANT},
705 {1, 16, 0x1, "16b", OQK_OPD_VARIANT},
706 {2, 2, 0x0, "2h", OQK_OPD_VARIANT},
707 {2, 4, 0x2, "4h", OQK_OPD_VARIANT},
708 {2, 8, 0x3, "8h", OQK_OPD_VARIANT},
709 {4, 2, 0x4, "2s", OQK_OPD_VARIANT},
710 {4, 4, 0x5, "4s", OQK_OPD_VARIANT},
711 {8, 1, 0x6, "1d", OQK_OPD_VARIANT},
712 {8, 2, 0x7, "2d", OQK_OPD_VARIANT},
713 {16, 1, 0x8, "1q", OQK_OPD_VARIANT},
715 {0, 0, 0, "z", OQK_OPD_VARIANT},
716 {0, 0, 0, "m", OQK_OPD_VARIANT},
718 /* Qualifiers constraining the value range.
720 Lower bound, higher bound, unused. */
722 {0, 15, 0, "CR", OQK_VALUE_IN_RANGE},
723 {0, 7, 0, "imm_0_7" , OQK_VALUE_IN_RANGE},
724 {0, 15, 0, "imm_0_15", OQK_VALUE_IN_RANGE},
725 {0, 31, 0, "imm_0_31", OQK_VALUE_IN_RANGE},
726 {0, 63, 0, "imm_0_63", OQK_VALUE_IN_RANGE},
727 {1, 32, 0, "imm_1_32", OQK_VALUE_IN_RANGE},
728 {1, 64, 0, "imm_1_64", OQK_VALUE_IN_RANGE},
730 /* Qualifiers for miscellaneous purpose.
732 unused, unused and unused. */
737 {0, 0, 0, "retrieving", 0},
740 static inline bfd_boolean
741 operand_variant_qualifier_p (aarch64_opnd_qualifier_t qualifier)
743 return (aarch64_opnd_qualifiers[qualifier].kind == OQK_OPD_VARIANT)
747 static inline bfd_boolean
748 qualifier_value_in_range_constraint_p (aarch64_opnd_qualifier_t qualifier)
750 return (aarch64_opnd_qualifiers[qualifier].kind == OQK_VALUE_IN_RANGE)
755 aarch64_get_qualifier_name (aarch64_opnd_qualifier_t qualifier)
757 return aarch64_opnd_qualifiers[qualifier].desc;
760 /* Given an operand qualifier, return the expected data element size
761 of a qualified operand. */
763 aarch64_get_qualifier_esize (aarch64_opnd_qualifier_t qualifier)
765 assert (operand_variant_qualifier_p (qualifier) == TRUE);
766 return aarch64_opnd_qualifiers[qualifier].data0;
770 aarch64_get_qualifier_nelem (aarch64_opnd_qualifier_t qualifier)
772 assert (operand_variant_qualifier_p (qualifier) == TRUE);
773 return aarch64_opnd_qualifiers[qualifier].data1;
777 aarch64_get_qualifier_standard_value (aarch64_opnd_qualifier_t qualifier)
779 assert (operand_variant_qualifier_p (qualifier) == TRUE);
780 return aarch64_opnd_qualifiers[qualifier].data2;
784 get_lower_bound (aarch64_opnd_qualifier_t qualifier)
786 assert (qualifier_value_in_range_constraint_p (qualifier) == TRUE);
787 return aarch64_opnd_qualifiers[qualifier].data0;
791 get_upper_bound (aarch64_opnd_qualifier_t qualifier)
793 assert (qualifier_value_in_range_constraint_p (qualifier) == TRUE);
794 return aarch64_opnd_qualifiers[qualifier].data1;
799 aarch64_verbose (const char *str, ...)
810 dump_qualifier_sequence (const aarch64_opnd_qualifier_t *qualifier)
814 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i, ++qualifier)
815 printf ("%s,", aarch64_get_qualifier_name (*qualifier));
820 dump_match_qualifiers (const struct aarch64_opnd_info *opnd,
821 const aarch64_opnd_qualifier_t *qualifier)
824 aarch64_opnd_qualifier_t curr[AARCH64_MAX_OPND_NUM];
826 aarch64_verbose ("dump_match_qualifiers:");
827 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
828 curr[i] = opnd[i].qualifier;
829 dump_qualifier_sequence (curr);
830 aarch64_verbose ("against");
831 dump_qualifier_sequence (qualifier);
833 #endif /* DEBUG_AARCH64 */
835 /* TODO improve this, we can have an extra field at the runtime to
836 store the number of operands rather than calculating it every time. */
839 aarch64_num_of_operands (const aarch64_opcode *opcode)
842 const enum aarch64_opnd *opnds = opcode->operands;
843 while (opnds[i++] != AARCH64_OPND_NIL)
846 assert (i >= 0 && i <= AARCH64_MAX_OPND_NUM);
850 /* Find the best matched qualifier sequence in *QUALIFIERS_LIST for INST.
851 If succeeds, fill the found sequence in *RET, return 1; otherwise return 0.
853 N.B. on the entry, it is very likely that only some operands in *INST
854 have had their qualifiers been established.
856 If STOP_AT is not -1, the function will only try to match
857 the qualifier sequence for operands before and including the operand
858 of index STOP_AT; and on success *RET will only be filled with the first
859 (STOP_AT+1) qualifiers.
861 A couple examples of the matching algorithm:
869 Apart from serving the main encoding routine, this can also be called
870 during or after the operand decoding. */
873 aarch64_find_best_match (const aarch64_inst *inst,
874 const aarch64_opnd_qualifier_seq_t *qualifiers_list,
875 int stop_at, aarch64_opnd_qualifier_t *ret)
879 const aarch64_opnd_qualifier_t *qualifiers;
881 num_opnds = aarch64_num_of_operands (inst->opcode);
884 DEBUG_TRACE ("SUCCEED: no operand");
888 if (stop_at < 0 || stop_at >= num_opnds)
889 stop_at = num_opnds - 1;
891 /* For each pattern. */
892 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
895 qualifiers = *qualifiers_list;
897 /* Start as positive. */
900 DEBUG_TRACE ("%d", i);
903 dump_match_qualifiers (inst->operands, qualifiers);
906 /* Most opcodes has much fewer patterns in the list.
907 First NIL qualifier indicates the end in the list. */
908 if (empty_qualifier_sequence_p (qualifiers) == TRUE)
910 DEBUG_TRACE_IF (i == 0, "SUCCEED: empty qualifier list");
916 for (j = 0; j < num_opnds && j <= stop_at; ++j, ++qualifiers)
918 if (inst->operands[j].qualifier == AARCH64_OPND_QLF_NIL)
920 /* Either the operand does not have qualifier, or the qualifier
921 for the operand needs to be deduced from the qualifier
923 In the latter case, any constraint checking related with
924 the obtained qualifier should be done later in
925 operand_general_constraint_met_p. */
928 else if (*qualifiers != inst->operands[j].qualifier)
930 /* Unless the target qualifier can also qualify the operand
931 (which has already had a non-nil qualifier), non-equal
932 qualifiers are generally un-matched. */
933 if (operand_also_qualified_p (inst->operands + j, *qualifiers))
942 continue; /* Equal qualifiers are certainly matched. */
945 /* Qualifiers established. */
952 /* Fill the result in *RET. */
954 qualifiers = *qualifiers_list;
956 DEBUG_TRACE ("complete qualifiers using list %d", i);
959 dump_qualifier_sequence (qualifiers);
962 for (j = 0; j <= stop_at; ++j, ++qualifiers)
963 ret[j] = *qualifiers;
964 for (; j < AARCH64_MAX_OPND_NUM; ++j)
965 ret[j] = AARCH64_OPND_QLF_NIL;
967 DEBUG_TRACE ("SUCCESS");
971 DEBUG_TRACE ("FAIL");
975 /* Operand qualifier matching and resolving.
977 Return 1 if the operand qualifier(s) in *INST match one of the qualifier
978 sequences in INST->OPCODE->qualifiers_list; otherwise return 0.
980 if UPDATE_P == TRUE, update the qualifier(s) in *INST after the matching
984 match_operands_qualifier (aarch64_inst *inst, bfd_boolean update_p)
987 aarch64_opnd_qualifier_seq_t qualifiers;
989 if (!aarch64_find_best_match (inst, inst->opcode->qualifiers_list, -1,
992 DEBUG_TRACE ("matching FAIL");
996 if (inst->opcode->flags & F_STRICT)
998 /* Require an exact qualifier match, even for NIL qualifiers. */
999 nops = aarch64_num_of_operands (inst->opcode);
1000 for (i = 0; i < nops; ++i)
1001 if (inst->operands[i].qualifier != qualifiers[i])
1005 /* Update the qualifiers. */
1006 if (update_p == TRUE)
1007 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
1009 if (inst->opcode->operands[i] == AARCH64_OPND_NIL)
1011 DEBUG_TRACE_IF (inst->operands[i].qualifier != qualifiers[i],
1012 "update %s with %s for operand %d",
1013 aarch64_get_qualifier_name (inst->operands[i].qualifier),
1014 aarch64_get_qualifier_name (qualifiers[i]), i);
1015 inst->operands[i].qualifier = qualifiers[i];
1018 DEBUG_TRACE ("matching SUCCESS");
1022 /* Return TRUE if VALUE is a wide constant that can be moved into a general
1025 IS32 indicates whether value is a 32-bit immediate or not.
1026 If SHIFT_AMOUNT is not NULL, on the return of TRUE, the logical left shift
1027 amount will be returned in *SHIFT_AMOUNT. */
1030 aarch64_wide_constant_p (int64_t value, int is32, unsigned int *shift_amount)
1034 DEBUG_TRACE ("enter with 0x%" PRIx64 "(%" PRIi64 ")", value, value);
1038 /* Allow all zeros or all ones in top 32-bits, so that
1039 32-bit constant expressions like ~0x80000000 are
1041 uint64_t ext = value;
1042 if (ext >> 32 != 0 && ext >> 32 != (uint64_t) 0xffffffff)
1043 /* Immediate out of range. */
1045 value &= (int64_t) 0xffffffff;
1048 /* first, try movz then movn */
1050 if ((value & ((int64_t) 0xffff << 0)) == value)
1052 else if ((value & ((int64_t) 0xffff << 16)) == value)
1054 else if (!is32 && (value & ((int64_t) 0xffff << 32)) == value)
1056 else if (!is32 && (value & ((int64_t) 0xffff << 48)) == value)
1061 DEBUG_TRACE ("exit FALSE with 0x%" PRIx64 "(%" PRIi64 ")", value, value);
1065 if (shift_amount != NULL)
1066 *shift_amount = amount;
1068 DEBUG_TRACE ("exit TRUE with amount %d", amount);
1073 /* Build the accepted values for immediate logical SIMD instructions.
1075 The standard encodings of the immediate value are:
1076 N imms immr SIMD size R S
1077 1 ssssss rrrrrr 64 UInt(rrrrrr) UInt(ssssss)
1078 0 0sssss 0rrrrr 32 UInt(rrrrr) UInt(sssss)
1079 0 10ssss 00rrrr 16 UInt(rrrr) UInt(ssss)
1080 0 110sss 000rrr 8 UInt(rrr) UInt(sss)
1081 0 1110ss 0000rr 4 UInt(rr) UInt(ss)
1082 0 11110s 00000r 2 UInt(r) UInt(s)
1083 where all-ones value of S is reserved.
1085 Let's call E the SIMD size.
1087 The immediate value is: S+1 bits '1' rotated to the right by R.
1089 The total of valid encodings is 64*63 + 32*31 + ... + 2*1 = 5334
1090 (remember S != E - 1). */
1092 #define TOTAL_IMM_NB 5334
1097 aarch64_insn encoding;
1098 } simd_imm_encoding;
1100 static simd_imm_encoding simd_immediates[TOTAL_IMM_NB];
1103 simd_imm_encoding_cmp(const void *i1, const void *i2)
1105 const simd_imm_encoding *imm1 = (const simd_imm_encoding *)i1;
1106 const simd_imm_encoding *imm2 = (const simd_imm_encoding *)i2;
1108 if (imm1->imm < imm2->imm)
1110 if (imm1->imm > imm2->imm)
1115 /* immediate bitfield standard encoding
1116 imm13<12> imm13<5:0> imm13<11:6> SIMD size R S
1117 1 ssssss rrrrrr 64 rrrrrr ssssss
1118 0 0sssss 0rrrrr 32 rrrrr sssss
1119 0 10ssss 00rrrr 16 rrrr ssss
1120 0 110sss 000rrr 8 rrr sss
1121 0 1110ss 0000rr 4 rr ss
1122 0 11110s 00000r 2 r s */
1124 encode_immediate_bitfield (int is64, uint32_t s, uint32_t r)
1126 return (is64 << 12) | (r << 6) | s;
1130 build_immediate_table (void)
1132 uint32_t log_e, e, s, r, s_mask;
1138 for (log_e = 1; log_e <= 6; log_e++)
1140 /* Get element size. */
1145 mask = 0xffffffffffffffffull;
1151 mask = (1ull << e) - 1;
1153 1 ((1 << 4) - 1) << 2 = 111100
1154 2 ((1 << 3) - 1) << 3 = 111000
1155 3 ((1 << 2) - 1) << 4 = 110000
1156 4 ((1 << 1) - 1) << 5 = 100000
1157 5 ((1 << 0) - 1) << 6 = 000000 */
1158 s_mask = ((1u << (5 - log_e)) - 1) << (log_e + 1);
1160 for (s = 0; s < e - 1; s++)
1161 for (r = 0; r < e; r++)
1163 /* s+1 consecutive bits to 1 (s < 63) */
1164 imm = (1ull << (s + 1)) - 1;
1165 /* rotate right by r */
1167 imm = (imm >> r) | ((imm << (e - r)) & mask);
1168 /* replicate the constant depending on SIMD size */
1171 case 1: imm = (imm << 2) | imm;
1173 case 2: imm = (imm << 4) | imm;
1175 case 3: imm = (imm << 8) | imm;
1177 case 4: imm = (imm << 16) | imm;
1179 case 5: imm = (imm << 32) | imm;
1184 simd_immediates[nb_imms].imm = imm;
1185 simd_immediates[nb_imms].encoding =
1186 encode_immediate_bitfield(is64, s | s_mask, r);
1190 assert (nb_imms == TOTAL_IMM_NB);
1191 qsort(simd_immediates, nb_imms,
1192 sizeof(simd_immediates[0]), simd_imm_encoding_cmp);
1195 /* Return TRUE if VALUE is a valid logical immediate, i.e. bitmask, that can
1196 be accepted by logical (immediate) instructions
1197 e.g. ORR <Xd|SP>, <Xn>, #<imm>.
1199 ESIZE is the number of bytes in the decoded immediate value.
1200 If ENCODING is not NULL, on the return of TRUE, the standard encoding for
1201 VALUE will be returned in *ENCODING. */
1204 aarch64_logical_immediate_p (uint64_t value, int esize, aarch64_insn *encoding)
1206 simd_imm_encoding imm_enc;
1207 const simd_imm_encoding *imm_encoding;
1208 static bfd_boolean initialized = FALSE;
1212 DEBUG_TRACE ("enter with 0x%" PRIx64 "(%" PRIi64 "), esize: %d", value,
1217 build_immediate_table ();
1221 /* Allow all zeros or all ones in top bits, so that
1222 constant expressions like ~1 are permitted. */
1223 upper = (uint64_t) -1 << (esize * 4) << (esize * 4);
1224 if ((value & ~upper) != value && (value | upper) != value)
1227 /* Replicate to a full 64-bit value. */
1229 for (i = esize * 8; i < 64; i *= 2)
1230 value |= (value << i);
1232 imm_enc.imm = value;
1233 imm_encoding = (const simd_imm_encoding *)
1234 bsearch(&imm_enc, simd_immediates, TOTAL_IMM_NB,
1235 sizeof(simd_immediates[0]), simd_imm_encoding_cmp);
1236 if (imm_encoding == NULL)
1238 DEBUG_TRACE ("exit with FALSE");
1241 if (encoding != NULL)
1242 *encoding = imm_encoding->encoding;
1243 DEBUG_TRACE ("exit with TRUE");
1247 /* If 64-bit immediate IMM is in the format of
1248 "aaaaaaaabbbbbbbbccccccccddddddddeeeeeeeeffffffffgggggggghhhhhhhh",
1249 where a, b, c, d, e, f, g and h are independently 0 or 1, return an integer
1250 of value "abcdefgh". Otherwise return -1. */
1252 aarch64_shrink_expanded_imm8 (uint64_t imm)
1258 for (i = 0; i < 8; i++)
1260 byte = (imm >> (8 * i)) & 0xff;
1263 else if (byte != 0x00)
1269 /* Utility inline functions for operand_general_constraint_met_p. */
1272 set_error (aarch64_operand_error *mismatch_detail,
1273 enum aarch64_operand_error_kind kind, int idx,
1276 if (mismatch_detail == NULL)
1278 mismatch_detail->kind = kind;
1279 mismatch_detail->index = idx;
1280 mismatch_detail->error = error;
1284 set_syntax_error (aarch64_operand_error *mismatch_detail, int idx,
1287 if (mismatch_detail == NULL)
1289 set_error (mismatch_detail, AARCH64_OPDE_SYNTAX_ERROR, idx, error);
1293 set_out_of_range_error (aarch64_operand_error *mismatch_detail,
1294 int idx, int lower_bound, int upper_bound,
1297 if (mismatch_detail == NULL)
1299 set_error (mismatch_detail, AARCH64_OPDE_OUT_OF_RANGE, idx, error);
1300 mismatch_detail->data[0] = lower_bound;
1301 mismatch_detail->data[1] = upper_bound;
1305 set_imm_out_of_range_error (aarch64_operand_error *mismatch_detail,
1306 int idx, int lower_bound, int upper_bound)
1308 if (mismatch_detail == NULL)
1310 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1311 _("immediate value"));
1315 set_offset_out_of_range_error (aarch64_operand_error *mismatch_detail,
1316 int idx, int lower_bound, int upper_bound)
1318 if (mismatch_detail == NULL)
1320 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1321 _("immediate offset"));
1325 set_regno_out_of_range_error (aarch64_operand_error *mismatch_detail,
1326 int idx, int lower_bound, int upper_bound)
1328 if (mismatch_detail == NULL)
1330 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1331 _("register number"));
1335 set_elem_idx_out_of_range_error (aarch64_operand_error *mismatch_detail,
1336 int idx, int lower_bound, int upper_bound)
1338 if (mismatch_detail == NULL)
1340 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1341 _("register element index"));
1345 set_sft_amount_out_of_range_error (aarch64_operand_error *mismatch_detail,
1346 int idx, int lower_bound, int upper_bound)
1348 if (mismatch_detail == NULL)
1350 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1354 /* Report that the MUL modifier in operand IDX should be in the range
1355 [LOWER_BOUND, UPPER_BOUND]. */
1357 set_multiplier_out_of_range_error (aarch64_operand_error *mismatch_detail,
1358 int idx, int lower_bound, int upper_bound)
1360 if (mismatch_detail == NULL)
1362 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1367 set_unaligned_error (aarch64_operand_error *mismatch_detail, int idx,
1370 if (mismatch_detail == NULL)
1372 set_error (mismatch_detail, AARCH64_OPDE_UNALIGNED, idx, NULL);
1373 mismatch_detail->data[0] = alignment;
1377 set_reg_list_error (aarch64_operand_error *mismatch_detail, int idx,
1380 if (mismatch_detail == NULL)
1382 set_error (mismatch_detail, AARCH64_OPDE_REG_LIST, idx, NULL);
1383 mismatch_detail->data[0] = expected_num;
1387 set_other_error (aarch64_operand_error *mismatch_detail, int idx,
1390 if (mismatch_detail == NULL)
1392 set_error (mismatch_detail, AARCH64_OPDE_OTHER_ERROR, idx, error);
1395 /* General constraint checking based on operand code.
1397 Return 1 if OPNDS[IDX] meets the general constraint of operand code TYPE
1398 as the IDXth operand of opcode OPCODE. Otherwise return 0.
1400 This function has to be called after the qualifiers for all operands
1403 Mismatching error message is returned in *MISMATCH_DETAIL upon request,
1404 i.e. when MISMATCH_DETAIL is non-NULL. This avoids the generation
1405 of error message during the disassembling where error message is not
1406 wanted. We avoid the dynamic construction of strings of error messages
1407 here (i.e. in libopcodes), as it is costly and complicated; instead, we
1408 use a combination of error code, static string and some integer data to
1409 represent an error. */
1412 operand_general_constraint_met_p (const aarch64_opnd_info *opnds, int idx,
1413 enum aarch64_opnd type,
1414 const aarch64_opcode *opcode,
1415 aarch64_operand_error *mismatch_detail)
1417 unsigned num, modifiers, shift;
1419 int64_t imm, min_value, max_value;
1420 uint64_t uvalue, mask;
1421 const aarch64_opnd_info *opnd = opnds + idx;
1422 aarch64_opnd_qualifier_t qualifier = opnd->qualifier;
1424 assert (opcode->operands[idx] == opnd->type && opnd->type == type);
1426 switch (aarch64_operands[type].op_class)
1428 case AARCH64_OPND_CLASS_INT_REG:
1429 /* Check pair reg constraints for cas* instructions. */
1430 if (type == AARCH64_OPND_PAIRREG)
1432 assert (idx == 1 || idx == 3);
1433 if (opnds[idx - 1].reg.regno % 2 != 0)
1435 set_syntax_error (mismatch_detail, idx - 1,
1436 _("reg pair must start from even reg"));
1439 if (opnds[idx].reg.regno != opnds[idx - 1].reg.regno + 1)
1441 set_syntax_error (mismatch_detail, idx,
1442 _("reg pair must be contiguous"));
1448 /* <Xt> may be optional in some IC and TLBI instructions. */
1449 if (type == AARCH64_OPND_Rt_SYS)
1451 assert (idx == 1 && (aarch64_get_operand_class (opnds[0].type)
1452 == AARCH64_OPND_CLASS_SYSTEM));
1453 if (opnds[1].present
1454 && !aarch64_sys_ins_reg_has_xt (opnds[0].sysins_op))
1456 set_other_error (mismatch_detail, idx, _("extraneous register"));
1459 if (!opnds[1].present
1460 && aarch64_sys_ins_reg_has_xt (opnds[0].sysins_op))
1462 set_other_error (mismatch_detail, idx, _("missing register"));
1468 case AARCH64_OPND_QLF_WSP:
1469 case AARCH64_OPND_QLF_SP:
1470 if (!aarch64_stack_pointer_p (opnd))
1472 set_other_error (mismatch_detail, idx,
1473 _("stack pointer register expected"));
1482 case AARCH64_OPND_CLASS_SVE_REG:
1485 case AARCH64_OPND_SVE_Zm3_INDEX:
1486 case AARCH64_OPND_SVE_Zm3_22_INDEX:
1487 case AARCH64_OPND_SVE_Zm4_INDEX:
1488 size = get_operand_fields_width (get_operand_from_code (type));
1489 shift = get_operand_specific_data (&aarch64_operands[type]);
1490 mask = (1 << shift) - 1;
1491 if (opnd->reg.regno > mask)
1493 assert (mask == 7 || mask == 15);
1494 set_other_error (mismatch_detail, idx,
1496 ? _("z0-z15 expected")
1497 : _("z0-z7 expected"));
1500 mask = (1 << (size - shift)) - 1;
1501 if (!value_in_range_p (opnd->reglane.index, 0, mask))
1503 set_elem_idx_out_of_range_error (mismatch_detail, idx, 0, mask);
1508 case AARCH64_OPND_SVE_Zn_INDEX:
1509 size = aarch64_get_qualifier_esize (opnd->qualifier);
1510 if (!value_in_range_p (opnd->reglane.index, 0, 64 / size - 1))
1512 set_elem_idx_out_of_range_error (mismatch_detail, idx,
1518 case AARCH64_OPND_SVE_ZnxN:
1519 case AARCH64_OPND_SVE_ZtxN:
1520 if (opnd->reglist.num_regs != get_opcode_dependent_value (opcode))
1522 set_other_error (mismatch_detail, idx,
1523 _("invalid register list"));
1533 case AARCH64_OPND_CLASS_PRED_REG:
1534 if (opnd->reg.regno >= 8
1535 && get_operand_fields_width (get_operand_from_code (type)) == 3)
1537 set_other_error (mismatch_detail, idx, _("p0-p7 expected"));
1542 case AARCH64_OPND_CLASS_COND:
1543 if (type == AARCH64_OPND_COND1
1544 && (opnds[idx].cond->value & 0xe) == 0xe)
1546 /* Not allow AL or NV. */
1547 set_syntax_error (mismatch_detail, idx, NULL);
1551 case AARCH64_OPND_CLASS_ADDRESS:
1552 /* Check writeback. */
1553 switch (opcode->iclass)
1557 case ldstnapair_offs:
1560 if (opnd->addr.writeback == 1)
1562 set_syntax_error (mismatch_detail, idx,
1563 _("unexpected address writeback"));
1568 if (opnd->addr.writeback == 1 && opnd->addr.preind != 1)
1570 set_syntax_error (mismatch_detail, idx,
1571 _("unexpected address writeback"));
1576 case ldstpair_indexed:
1579 if (opnd->addr.writeback == 0)
1581 set_syntax_error (mismatch_detail, idx,
1582 _("address writeback expected"));
1587 assert (opnd->addr.writeback == 0);
1592 case AARCH64_OPND_ADDR_SIMM7:
1593 /* Scaled signed 7 bits immediate offset. */
1594 /* Get the size of the data element that is accessed, which may be
1595 different from that of the source register size,
1596 e.g. in strb/ldrb. */
1597 size = aarch64_get_qualifier_esize (opnd->qualifier);
1598 if (!value_in_range_p (opnd->addr.offset.imm, -64 * size, 63 * size))
1600 set_offset_out_of_range_error (mismatch_detail, idx,
1601 -64 * size, 63 * size);
1604 if (!value_aligned_p (opnd->addr.offset.imm, size))
1606 set_unaligned_error (mismatch_detail, idx, size);
1610 case AARCH64_OPND_ADDR_OFFSET:
1611 case AARCH64_OPND_ADDR_SIMM9:
1612 /* Unscaled signed 9 bits immediate offset. */
1613 if (!value_in_range_p (opnd->addr.offset.imm, -256, 255))
1615 set_offset_out_of_range_error (mismatch_detail, idx, -256, 255);
1620 case AARCH64_OPND_ADDR_SIMM9_2:
1621 /* Unscaled signed 9 bits immediate offset, which has to be negative
1623 size = aarch64_get_qualifier_esize (qualifier);
1624 if ((value_in_range_p (opnd->addr.offset.imm, 0, 255)
1625 && !value_aligned_p (opnd->addr.offset.imm, size))
1626 || value_in_range_p (opnd->addr.offset.imm, -256, -1))
1628 set_other_error (mismatch_detail, idx,
1629 _("negative or unaligned offset expected"));
1632 case AARCH64_OPND_ADDR_SIMM10:
1633 /* Scaled signed 10 bits immediate offset. */
1634 if (!value_in_range_p (opnd->addr.offset.imm, -4096, 4088))
1636 set_offset_out_of_range_error (mismatch_detail, idx, -4096, 4088);
1639 if (!value_aligned_p (opnd->addr.offset.imm, 8))
1641 set_unaligned_error (mismatch_detail, idx, 8);
1646 case AARCH64_OPND_SIMD_ADDR_POST:
1647 /* AdvSIMD load/store multiple structures, post-index. */
1649 if (opnd->addr.offset.is_reg)
1651 if (value_in_range_p (opnd->addr.offset.regno, 0, 30))
1655 set_other_error (mismatch_detail, idx,
1656 _("invalid register offset"));
1662 const aarch64_opnd_info *prev = &opnds[idx-1];
1663 unsigned num_bytes; /* total number of bytes transferred. */
1664 /* The opcode dependent area stores the number of elements in
1665 each structure to be loaded/stored. */
1666 int is_ld1r = get_opcode_dependent_value (opcode) == 1;
1667 if (opcode->operands[0] == AARCH64_OPND_LVt_AL)
1668 /* Special handling of loading single structure to all lane. */
1669 num_bytes = (is_ld1r ? 1 : prev->reglist.num_regs)
1670 * aarch64_get_qualifier_esize (prev->qualifier);
1672 num_bytes = prev->reglist.num_regs
1673 * aarch64_get_qualifier_esize (prev->qualifier)
1674 * aarch64_get_qualifier_nelem (prev->qualifier);
1675 if ((int) num_bytes != opnd->addr.offset.imm)
1677 set_other_error (mismatch_detail, idx,
1678 _("invalid post-increment amount"));
1684 case AARCH64_OPND_ADDR_REGOFF:
1685 /* Get the size of the data element that is accessed, which may be
1686 different from that of the source register size,
1687 e.g. in strb/ldrb. */
1688 size = aarch64_get_qualifier_esize (opnd->qualifier);
1689 /* It is either no shift or shift by the binary logarithm of SIZE. */
1690 if (opnd->shifter.amount != 0
1691 && opnd->shifter.amount != (int)get_logsz (size))
1693 set_other_error (mismatch_detail, idx,
1694 _("invalid shift amount"));
1697 /* Only UXTW, LSL, SXTW and SXTX are the accepted extending
1699 switch (opnd->shifter.kind)
1701 case AARCH64_MOD_UXTW:
1702 case AARCH64_MOD_LSL:
1703 case AARCH64_MOD_SXTW:
1704 case AARCH64_MOD_SXTX: break;
1706 set_other_error (mismatch_detail, idx,
1707 _("invalid extend/shift operator"));
1712 case AARCH64_OPND_ADDR_UIMM12:
1713 imm = opnd->addr.offset.imm;
1714 /* Get the size of the data element that is accessed, which may be
1715 different from that of the source register size,
1716 e.g. in strb/ldrb. */
1717 size = aarch64_get_qualifier_esize (qualifier);
1718 if (!value_in_range_p (opnd->addr.offset.imm, 0, 4095 * size))
1720 set_offset_out_of_range_error (mismatch_detail, idx,
1724 if (!value_aligned_p (opnd->addr.offset.imm, size))
1726 set_unaligned_error (mismatch_detail, idx, size);
1731 case AARCH64_OPND_ADDR_PCREL14:
1732 case AARCH64_OPND_ADDR_PCREL19:
1733 case AARCH64_OPND_ADDR_PCREL21:
1734 case AARCH64_OPND_ADDR_PCREL26:
1735 imm = opnd->imm.value;
1736 if (operand_need_shift_by_two (get_operand_from_code (type)))
1738 /* The offset value in a PC-relative branch instruction is alway
1739 4-byte aligned and is encoded without the lowest 2 bits. */
1740 if (!value_aligned_p (imm, 4))
1742 set_unaligned_error (mismatch_detail, idx, 4);
1745 /* Right shift by 2 so that we can carry out the following check
1749 size = get_operand_fields_width (get_operand_from_code (type));
1750 if (!value_fit_signed_field_p (imm, size))
1752 set_other_error (mismatch_detail, idx,
1753 _("immediate out of range"));
1758 case AARCH64_OPND_SVE_ADDR_RI_S4xVL:
1759 case AARCH64_OPND_SVE_ADDR_RI_S4x2xVL:
1760 case AARCH64_OPND_SVE_ADDR_RI_S4x3xVL:
1761 case AARCH64_OPND_SVE_ADDR_RI_S4x4xVL:
1765 assert (!opnd->addr.offset.is_reg);
1766 assert (opnd->addr.preind);
1767 num = 1 + get_operand_specific_data (&aarch64_operands[type]);
1770 if ((opnd->addr.offset.imm != 0 && !opnd->shifter.operator_present)
1771 || (opnd->shifter.operator_present
1772 && opnd->shifter.kind != AARCH64_MOD_MUL_VL))
1774 set_other_error (mismatch_detail, idx,
1775 _("invalid addressing mode"));
1778 if (!value_in_range_p (opnd->addr.offset.imm, min_value, max_value))
1780 set_offset_out_of_range_error (mismatch_detail, idx,
1781 min_value, max_value);
1784 if (!value_aligned_p (opnd->addr.offset.imm, num))
1786 set_unaligned_error (mismatch_detail, idx, num);
1791 case AARCH64_OPND_SVE_ADDR_RI_S6xVL:
1794 goto sve_imm_offset_vl;
1796 case AARCH64_OPND_SVE_ADDR_RI_S9xVL:
1799 goto sve_imm_offset_vl;
1801 case AARCH64_OPND_SVE_ADDR_RI_U6:
1802 case AARCH64_OPND_SVE_ADDR_RI_U6x2:
1803 case AARCH64_OPND_SVE_ADDR_RI_U6x4:
1804 case AARCH64_OPND_SVE_ADDR_RI_U6x8:
1808 assert (!opnd->addr.offset.is_reg);
1809 assert (opnd->addr.preind);
1810 num = 1 << get_operand_specific_data (&aarch64_operands[type]);
1813 if (opnd->shifter.operator_present
1814 || opnd->shifter.amount_present)
1816 set_other_error (mismatch_detail, idx,
1817 _("invalid addressing mode"));
1820 if (!value_in_range_p (opnd->addr.offset.imm, min_value, max_value))
1822 set_offset_out_of_range_error (mismatch_detail, idx,
1823 min_value, max_value);
1826 if (!value_aligned_p (opnd->addr.offset.imm, num))
1828 set_unaligned_error (mismatch_detail, idx, num);
1833 case AARCH64_OPND_SVE_ADDR_RI_S4x16:
1836 goto sve_imm_offset;
1838 case AARCH64_OPND_SVE_ADDR_R:
1839 case AARCH64_OPND_SVE_ADDR_RR:
1840 case AARCH64_OPND_SVE_ADDR_RR_LSL1:
1841 case AARCH64_OPND_SVE_ADDR_RR_LSL2:
1842 case AARCH64_OPND_SVE_ADDR_RR_LSL3:
1843 case AARCH64_OPND_SVE_ADDR_RX:
1844 case AARCH64_OPND_SVE_ADDR_RX_LSL1:
1845 case AARCH64_OPND_SVE_ADDR_RX_LSL2:
1846 case AARCH64_OPND_SVE_ADDR_RX_LSL3:
1847 case AARCH64_OPND_SVE_ADDR_RZ:
1848 case AARCH64_OPND_SVE_ADDR_RZ_LSL1:
1849 case AARCH64_OPND_SVE_ADDR_RZ_LSL2:
1850 case AARCH64_OPND_SVE_ADDR_RZ_LSL3:
1851 modifiers = 1 << AARCH64_MOD_LSL;
1853 assert (opnd->addr.offset.is_reg);
1854 assert (opnd->addr.preind);
1855 if ((aarch64_operands[type].flags & OPD_F_NO_ZR) != 0
1856 && opnd->addr.offset.regno == 31)
1858 set_other_error (mismatch_detail, idx,
1859 _("index register xzr is not allowed"));
1862 if (((1 << opnd->shifter.kind) & modifiers) == 0
1863 || (opnd->shifter.amount
1864 != get_operand_specific_data (&aarch64_operands[type])))
1866 set_other_error (mismatch_detail, idx,
1867 _("invalid addressing mode"));
1872 case AARCH64_OPND_SVE_ADDR_RZ_XTW_14:
1873 case AARCH64_OPND_SVE_ADDR_RZ_XTW_22:
1874 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_14:
1875 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_22:
1876 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_14:
1877 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_22:
1878 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_14:
1879 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_22:
1880 modifiers = (1 << AARCH64_MOD_SXTW) | (1 << AARCH64_MOD_UXTW);
1881 goto sve_rr_operand;
1883 case AARCH64_OPND_SVE_ADDR_ZI_U5:
1884 case AARCH64_OPND_SVE_ADDR_ZI_U5x2:
1885 case AARCH64_OPND_SVE_ADDR_ZI_U5x4:
1886 case AARCH64_OPND_SVE_ADDR_ZI_U5x8:
1889 goto sve_imm_offset;
1891 case AARCH64_OPND_SVE_ADDR_ZZ_LSL:
1892 modifiers = 1 << AARCH64_MOD_LSL;
1894 assert (opnd->addr.offset.is_reg);
1895 assert (opnd->addr.preind);
1896 if (((1 << opnd->shifter.kind) & modifiers) == 0
1897 || opnd->shifter.amount < 0
1898 || opnd->shifter.amount > 3)
1900 set_other_error (mismatch_detail, idx,
1901 _("invalid addressing mode"));
1906 case AARCH64_OPND_SVE_ADDR_ZZ_SXTW:
1907 modifiers = (1 << AARCH64_MOD_SXTW);
1908 goto sve_zz_operand;
1910 case AARCH64_OPND_SVE_ADDR_ZZ_UXTW:
1911 modifiers = 1 << AARCH64_MOD_UXTW;
1912 goto sve_zz_operand;
1919 case AARCH64_OPND_CLASS_SIMD_REGLIST:
1920 if (type == AARCH64_OPND_LEt)
1922 /* Get the upper bound for the element index. */
1923 num = 16 / aarch64_get_qualifier_esize (qualifier) - 1;
1924 if (!value_in_range_p (opnd->reglist.index, 0, num))
1926 set_elem_idx_out_of_range_error (mismatch_detail, idx, 0, num);
1930 /* The opcode dependent area stores the number of elements in
1931 each structure to be loaded/stored. */
1932 num = get_opcode_dependent_value (opcode);
1935 case AARCH64_OPND_LVt:
1936 assert (num >= 1 && num <= 4);
1937 /* Unless LD1/ST1, the number of registers should be equal to that
1938 of the structure elements. */
1939 if (num != 1 && opnd->reglist.num_regs != num)
1941 set_reg_list_error (mismatch_detail, idx, num);
1945 case AARCH64_OPND_LVt_AL:
1946 case AARCH64_OPND_LEt:
1947 assert (num >= 1 && num <= 4);
1948 /* The number of registers should be equal to that of the structure
1950 if (opnd->reglist.num_regs != num)
1952 set_reg_list_error (mismatch_detail, idx, num);
1961 case AARCH64_OPND_CLASS_IMMEDIATE:
1962 /* Constraint check on immediate operand. */
1963 imm = opnd->imm.value;
1964 /* E.g. imm_0_31 constrains value to be 0..31. */
1965 if (qualifier_value_in_range_constraint_p (qualifier)
1966 && !value_in_range_p (imm, get_lower_bound (qualifier),
1967 get_upper_bound (qualifier)))
1969 set_imm_out_of_range_error (mismatch_detail, idx,
1970 get_lower_bound (qualifier),
1971 get_upper_bound (qualifier));
1977 case AARCH64_OPND_AIMM:
1978 if (opnd->shifter.kind != AARCH64_MOD_LSL)
1980 set_other_error (mismatch_detail, idx,
1981 _("invalid shift operator"));
1984 if (opnd->shifter.amount != 0 && opnd->shifter.amount != 12)
1986 set_other_error (mismatch_detail, idx,
1987 _("shift amount must be 0 or 12"));
1990 if (!value_fit_unsigned_field_p (opnd->imm.value, 12))
1992 set_other_error (mismatch_detail, idx,
1993 _("immediate out of range"));
1998 case AARCH64_OPND_HALF:
1999 assert (idx == 1 && opnds[0].type == AARCH64_OPND_Rd);
2000 if (opnd->shifter.kind != AARCH64_MOD_LSL)
2002 set_other_error (mismatch_detail, idx,
2003 _("invalid shift operator"));
2006 size = aarch64_get_qualifier_esize (opnds[0].qualifier);
2007 if (!value_aligned_p (opnd->shifter.amount, 16))
2009 set_other_error (mismatch_detail, idx,
2010 _("shift amount must be a multiple of 16"));
2013 if (!value_in_range_p (opnd->shifter.amount, 0, size * 8 - 16))
2015 set_sft_amount_out_of_range_error (mismatch_detail, idx,
2019 if (opnd->imm.value < 0)
2021 set_other_error (mismatch_detail, idx,
2022 _("negative immediate value not allowed"));
2025 if (!value_fit_unsigned_field_p (opnd->imm.value, 16))
2027 set_other_error (mismatch_detail, idx,
2028 _("immediate out of range"));
2033 case AARCH64_OPND_IMM_MOV:
2035 int esize = aarch64_get_qualifier_esize (opnds[0].qualifier);
2036 imm = opnd->imm.value;
2040 case OP_MOV_IMM_WIDEN:
2043 case OP_MOV_IMM_WIDE:
2044 if (!aarch64_wide_constant_p (imm, esize == 4, NULL))
2046 set_other_error (mismatch_detail, idx,
2047 _("immediate out of range"));
2051 case OP_MOV_IMM_LOG:
2052 if (!aarch64_logical_immediate_p (imm, esize, NULL))
2054 set_other_error (mismatch_detail, idx,
2055 _("immediate out of range"));
2066 case AARCH64_OPND_NZCV:
2067 case AARCH64_OPND_CCMP_IMM:
2068 case AARCH64_OPND_EXCEPTION:
2069 case AARCH64_OPND_UIMM4:
2070 case AARCH64_OPND_UIMM7:
2071 case AARCH64_OPND_UIMM3_OP1:
2072 case AARCH64_OPND_UIMM3_OP2:
2073 case AARCH64_OPND_SVE_UIMM3:
2074 case AARCH64_OPND_SVE_UIMM7:
2075 case AARCH64_OPND_SVE_UIMM8:
2076 case AARCH64_OPND_SVE_UIMM8_53:
2077 size = get_operand_fields_width (get_operand_from_code (type));
2079 if (!value_fit_unsigned_field_p (opnd->imm.value, size))
2081 set_imm_out_of_range_error (mismatch_detail, idx, 0,
2087 case AARCH64_OPND_SIMM5:
2088 case AARCH64_OPND_SVE_SIMM5:
2089 case AARCH64_OPND_SVE_SIMM5B:
2090 case AARCH64_OPND_SVE_SIMM6:
2091 case AARCH64_OPND_SVE_SIMM8:
2092 size = get_operand_fields_width (get_operand_from_code (type));
2094 if (!value_fit_signed_field_p (opnd->imm.value, size))
2096 set_imm_out_of_range_error (mismatch_detail, idx,
2098 (1 << (size - 1)) - 1);
2103 case AARCH64_OPND_WIDTH:
2104 assert (idx > 1 && opnds[idx-1].type == AARCH64_OPND_IMM
2105 && opnds[0].type == AARCH64_OPND_Rd);
2106 size = get_upper_bound (qualifier);
2107 if (opnd->imm.value + opnds[idx-1].imm.value > size)
2108 /* lsb+width <= reg.size */
2110 set_imm_out_of_range_error (mismatch_detail, idx, 1,
2111 size - opnds[idx-1].imm.value);
2116 case AARCH64_OPND_LIMM:
2117 case AARCH64_OPND_SVE_LIMM:
2119 int esize = aarch64_get_qualifier_esize (opnds[0].qualifier);
2120 uint64_t uimm = opnd->imm.value;
2121 if (opcode->op == OP_BIC)
2123 if (!aarch64_logical_immediate_p (uimm, esize, NULL))
2125 set_other_error (mismatch_detail, idx,
2126 _("immediate out of range"));
2132 case AARCH64_OPND_IMM0:
2133 case AARCH64_OPND_FPIMM0:
2134 if (opnd->imm.value != 0)
2136 set_other_error (mismatch_detail, idx,
2137 _("immediate zero expected"));
2142 case AARCH64_OPND_IMM_ROT1:
2143 case AARCH64_OPND_IMM_ROT2:
2144 case AARCH64_OPND_SVE_IMM_ROT2:
2145 if (opnd->imm.value != 0
2146 && opnd->imm.value != 90
2147 && opnd->imm.value != 180
2148 && opnd->imm.value != 270)
2150 set_other_error (mismatch_detail, idx,
2151 _("rotate expected to be 0, 90, 180 or 270"));
2156 case AARCH64_OPND_IMM_ROT3:
2157 case AARCH64_OPND_SVE_IMM_ROT1:
2158 if (opnd->imm.value != 90 && opnd->imm.value != 270)
2160 set_other_error (mismatch_detail, idx,
2161 _("rotate expected to be 90 or 270"));
2166 case AARCH64_OPND_SHLL_IMM:
2168 size = 8 * aarch64_get_qualifier_esize (opnds[idx - 1].qualifier);
2169 if (opnd->imm.value != size)
2171 set_other_error (mismatch_detail, idx,
2172 _("invalid shift amount"));
2177 case AARCH64_OPND_IMM_VLSL:
2178 size = aarch64_get_qualifier_esize (qualifier);
2179 if (!value_in_range_p (opnd->imm.value, 0, size * 8 - 1))
2181 set_imm_out_of_range_error (mismatch_detail, idx, 0,
2187 case AARCH64_OPND_IMM_VLSR:
2188 size = aarch64_get_qualifier_esize (qualifier);
2189 if (!value_in_range_p (opnd->imm.value, 1, size * 8))
2191 set_imm_out_of_range_error (mismatch_detail, idx, 1, size * 8);
2196 case AARCH64_OPND_SIMD_IMM:
2197 case AARCH64_OPND_SIMD_IMM_SFT:
2198 /* Qualifier check. */
2201 case AARCH64_OPND_QLF_LSL:
2202 if (opnd->shifter.kind != AARCH64_MOD_LSL)
2204 set_other_error (mismatch_detail, idx,
2205 _("invalid shift operator"));
2209 case AARCH64_OPND_QLF_MSL:
2210 if (opnd->shifter.kind != AARCH64_MOD_MSL)
2212 set_other_error (mismatch_detail, idx,
2213 _("invalid shift operator"));
2217 case AARCH64_OPND_QLF_NIL:
2218 if (opnd->shifter.kind != AARCH64_MOD_NONE)
2220 set_other_error (mismatch_detail, idx,
2221 _("shift is not permitted"));
2229 /* Is the immediate valid? */
2231 if (aarch64_get_qualifier_esize (opnds[0].qualifier) != 8)
2233 /* uimm8 or simm8 */
2234 if (!value_in_range_p (opnd->imm.value, -128, 255))
2236 set_imm_out_of_range_error (mismatch_detail, idx, -128, 255);
2240 else if (aarch64_shrink_expanded_imm8 (opnd->imm.value) < 0)
2243 'aaaaaaaabbbbbbbbccccccccddddddddeeeeeeee
2244 ffffffffgggggggghhhhhhhh'. */
2245 set_other_error (mismatch_detail, idx,
2246 _("invalid value for immediate"));
2249 /* Is the shift amount valid? */
2250 switch (opnd->shifter.kind)
2252 case AARCH64_MOD_LSL:
2253 size = aarch64_get_qualifier_esize (opnds[0].qualifier);
2254 if (!value_in_range_p (opnd->shifter.amount, 0, (size - 1) * 8))
2256 set_sft_amount_out_of_range_error (mismatch_detail, idx, 0,
2260 if (!value_aligned_p (opnd->shifter.amount, 8))
2262 set_unaligned_error (mismatch_detail, idx, 8);
2266 case AARCH64_MOD_MSL:
2267 /* Only 8 and 16 are valid shift amount. */
2268 if (opnd->shifter.amount != 8 && opnd->shifter.amount != 16)
2270 set_other_error (mismatch_detail, idx,
2271 _("shift amount must be 0 or 16"));
2276 if (opnd->shifter.kind != AARCH64_MOD_NONE)
2278 set_other_error (mismatch_detail, idx,
2279 _("invalid shift operator"));
2286 case AARCH64_OPND_FPIMM:
2287 case AARCH64_OPND_SIMD_FPIMM:
2288 case AARCH64_OPND_SVE_FPIMM8:
2289 if (opnd->imm.is_fp == 0)
2291 set_other_error (mismatch_detail, idx,
2292 _("floating-point immediate expected"));
2295 /* The value is expected to be an 8-bit floating-point constant with
2296 sign, 3-bit exponent and normalized 4 bits of precision, encoded
2297 in "a:b:c:d:e:f:g:h" or FLD_imm8 (depending on the type of the
2299 if (!value_in_range_p (opnd->imm.value, 0, 255))
2301 set_other_error (mismatch_detail, idx,
2302 _("immediate out of range"));
2305 if (opnd->shifter.kind != AARCH64_MOD_NONE)
2307 set_other_error (mismatch_detail, idx,
2308 _("invalid shift operator"));
2313 case AARCH64_OPND_SVE_AIMM:
2316 assert (opnd->shifter.kind == AARCH64_MOD_LSL);
2317 size = aarch64_get_qualifier_esize (opnds[0].qualifier);
2318 mask = ~((uint64_t) -1 << (size * 4) << (size * 4));
2319 uvalue = opnd->imm.value;
2320 shift = opnd->shifter.amount;
2325 set_other_error (mismatch_detail, idx,
2326 _("no shift amount allowed for"
2327 " 8-bit constants"));
2333 if (shift != 0 && shift != 8)
2335 set_other_error (mismatch_detail, idx,
2336 _("shift amount must be 0 or 8"));
2339 if (shift == 0 && (uvalue & 0xff) == 0)
2342 uvalue = (int64_t) uvalue / 256;
2346 if ((uvalue & mask) != uvalue && (uvalue | ~mask) != uvalue)
2348 set_other_error (mismatch_detail, idx,
2349 _("immediate too big for element size"));
2352 uvalue = (uvalue - min_value) & mask;
2355 set_other_error (mismatch_detail, idx,
2356 _("invalid arithmetic immediate"));
2361 case AARCH64_OPND_SVE_ASIMM:
2365 case AARCH64_OPND_SVE_I1_HALF_ONE:
2366 assert (opnd->imm.is_fp);
2367 if (opnd->imm.value != 0x3f000000 && opnd->imm.value != 0x3f800000)
2369 set_other_error (mismatch_detail, idx,
2370 _("floating-point value must be 0.5 or 1.0"));
2375 case AARCH64_OPND_SVE_I1_HALF_TWO:
2376 assert (opnd->imm.is_fp);
2377 if (opnd->imm.value != 0x3f000000 && opnd->imm.value != 0x40000000)
2379 set_other_error (mismatch_detail, idx,
2380 _("floating-point value must be 0.5 or 2.0"));
2385 case AARCH64_OPND_SVE_I1_ZERO_ONE:
2386 assert (opnd->imm.is_fp);
2387 if (opnd->imm.value != 0 && opnd->imm.value != 0x3f800000)
2389 set_other_error (mismatch_detail, idx,
2390 _("floating-point value must be 0.0 or 1.0"));
2395 case AARCH64_OPND_SVE_INV_LIMM:
2397 int esize = aarch64_get_qualifier_esize (opnds[0].qualifier);
2398 uint64_t uimm = ~opnd->imm.value;
2399 if (!aarch64_logical_immediate_p (uimm, esize, NULL))
2401 set_other_error (mismatch_detail, idx,
2402 _("immediate out of range"));
2408 case AARCH64_OPND_SVE_LIMM_MOV:
2410 int esize = aarch64_get_qualifier_esize (opnds[0].qualifier);
2411 uint64_t uimm = opnd->imm.value;
2412 if (!aarch64_logical_immediate_p (uimm, esize, NULL))
2414 set_other_error (mismatch_detail, idx,
2415 _("immediate out of range"));
2418 if (!aarch64_sve_dupm_mov_immediate_p (uimm, esize))
2420 set_other_error (mismatch_detail, idx,
2421 _("invalid replicated MOV immediate"));
2427 case AARCH64_OPND_SVE_PATTERN_SCALED:
2428 assert (opnd->shifter.kind == AARCH64_MOD_MUL);
2429 if (!value_in_range_p (opnd->shifter.amount, 1, 16))
2431 set_multiplier_out_of_range_error (mismatch_detail, idx, 1, 16);
2436 case AARCH64_OPND_SVE_SHLIMM_PRED:
2437 case AARCH64_OPND_SVE_SHLIMM_UNPRED:
2438 size = aarch64_get_qualifier_esize (opnds[idx - 1].qualifier);
2439 if (!value_in_range_p (opnd->imm.value, 0, 8 * size - 1))
2441 set_imm_out_of_range_error (mismatch_detail, idx,
2447 case AARCH64_OPND_SVE_SHRIMM_PRED:
2448 case AARCH64_OPND_SVE_SHRIMM_UNPRED:
2449 size = aarch64_get_qualifier_esize (opnds[idx - 1].qualifier);
2450 if (!value_in_range_p (opnd->imm.value, 1, 8 * size))
2452 set_imm_out_of_range_error (mismatch_detail, idx, 1, 8 * size);
2462 case AARCH64_OPND_CLASS_SYSTEM:
2465 case AARCH64_OPND_PSTATEFIELD:
2466 assert (idx == 0 && opnds[1].type == AARCH64_OPND_UIMM4);
2469 The immediate must be #0 or #1. */
2470 if ((opnd->pstatefield == 0x03 /* UAO. */
2471 || opnd->pstatefield == 0x04 /* PAN. */
2472 || opnd->pstatefield == 0x1a) /* DIT. */
2473 && opnds[1].imm.value > 1)
2475 set_imm_out_of_range_error (mismatch_detail, idx, 0, 1);
2478 /* MSR SPSel, #uimm4
2479 Uses uimm4 as a control value to select the stack pointer: if
2480 bit 0 is set it selects the current exception level's stack
2481 pointer, if bit 0 is clear it selects shared EL0 stack pointer.
2482 Bits 1 to 3 of uimm4 are reserved and should be zero. */
2483 if (opnd->pstatefield == 0x05 /* spsel */ && opnds[1].imm.value > 1)
2485 set_imm_out_of_range_error (mismatch_detail, idx, 0, 1);
2494 case AARCH64_OPND_CLASS_SIMD_ELEMENT:
2495 /* Get the upper bound for the element index. */
2496 if (opcode->op == OP_FCMLA_ELEM)
2497 /* FCMLA index range depends on the vector size of other operands
2498 and is halfed because complex numbers take two elements. */
2499 num = aarch64_get_qualifier_nelem (opnds[0].qualifier)
2500 * aarch64_get_qualifier_esize (opnds[0].qualifier) / 2;
2503 num = num / aarch64_get_qualifier_esize (qualifier) - 1;
2505 /* Index out-of-range. */
2506 if (!value_in_range_p (opnd->reglane.index, 0, num))
2508 set_elem_idx_out_of_range_error (mismatch_detail, idx, 0, num);
2511 /* SMLAL<Q> <Vd>.<Ta>, <Vn>.<Tb>, <Vm>.<Ts>[<index>].
2512 <Vm> Is the vector register (V0-V31) or (V0-V15), whose
2513 number is encoded in "size:M:Rm":
2519 if (type == AARCH64_OPND_Em16 && qualifier == AARCH64_OPND_QLF_S_H
2520 && !value_in_range_p (opnd->reglane.regno, 0, 15))
2522 set_regno_out_of_range_error (mismatch_detail, idx, 0, 15);
2527 case AARCH64_OPND_CLASS_MODIFIED_REG:
2528 assert (idx == 1 || idx == 2);
2531 case AARCH64_OPND_Rm_EXT:
2532 if (!aarch64_extend_operator_p (opnd->shifter.kind)
2533 && opnd->shifter.kind != AARCH64_MOD_LSL)
2535 set_other_error (mismatch_detail, idx,
2536 _("extend operator expected"));
2539 /* It is not optional unless at least one of "Rd" or "Rn" is '11111'
2540 (i.e. SP), in which case it defaults to LSL. The LSL alias is
2541 only valid when "Rd" or "Rn" is '11111', and is preferred in that
2543 if (!aarch64_stack_pointer_p (opnds + 0)
2544 && (idx != 2 || !aarch64_stack_pointer_p (opnds + 1)))
2546 if (!opnd->shifter.operator_present)
2548 set_other_error (mismatch_detail, idx,
2549 _("missing extend operator"));
2552 else if (opnd->shifter.kind == AARCH64_MOD_LSL)
2554 set_other_error (mismatch_detail, idx,
2555 _("'LSL' operator not allowed"));
2559 assert (opnd->shifter.operator_present /* Default to LSL. */
2560 || opnd->shifter.kind == AARCH64_MOD_LSL);
2561 if (!value_in_range_p (opnd->shifter.amount, 0, 4))
2563 set_sft_amount_out_of_range_error (mismatch_detail, idx, 0, 4);
2566 /* In the 64-bit form, the final register operand is written as Wm
2567 for all but the (possibly omitted) UXTX/LSL and SXTX
2569 N.B. GAS allows X register to be used with any operator as a
2570 programming convenience. */
2571 if (qualifier == AARCH64_OPND_QLF_X
2572 && opnd->shifter.kind != AARCH64_MOD_LSL
2573 && opnd->shifter.kind != AARCH64_MOD_UXTX
2574 && opnd->shifter.kind != AARCH64_MOD_SXTX)
2576 set_other_error (mismatch_detail, idx, _("W register expected"));
2581 case AARCH64_OPND_Rm_SFT:
2582 /* ROR is not available to the shifted register operand in
2583 arithmetic instructions. */
2584 if (!aarch64_shift_operator_p (opnd->shifter.kind))
2586 set_other_error (mismatch_detail, idx,
2587 _("shift operator expected"));
2590 if (opnd->shifter.kind == AARCH64_MOD_ROR
2591 && opcode->iclass != log_shift)
2593 set_other_error (mismatch_detail, idx,
2594 _("'ROR' operator not allowed"));
2597 num = qualifier == AARCH64_OPND_QLF_W ? 31 : 63;
2598 if (!value_in_range_p (opnd->shifter.amount, 0, num))
2600 set_sft_amount_out_of_range_error (mismatch_detail, idx, 0, num);
2617 /* Main entrypoint for the operand constraint checking.
2619 Return 1 if operands of *INST meet the constraint applied by the operand
2620 codes and operand qualifiers; otherwise return 0 and if MISMATCH_DETAIL is
2621 not NULL, return the detail of the error in *MISMATCH_DETAIL. N.B. when
2622 adding more constraint checking, make sure MISMATCH_DETAIL->KIND is set
2623 with a proper error kind rather than AARCH64_OPDE_NIL (GAS asserts non-NIL
2624 error kind when it is notified that an instruction does not pass the check).
2626 Un-determined operand qualifiers may get established during the process. */
2629 aarch64_match_operands_constraint (aarch64_inst *inst,
2630 aarch64_operand_error *mismatch_detail)
2634 DEBUG_TRACE ("enter");
2636 /* Check for cases where a source register needs to be the same as the
2637 destination register. Do this before matching qualifiers since if
2638 an instruction has both invalid tying and invalid qualifiers,
2639 the error about qualifiers would suggest several alternative
2640 instructions that also have invalid tying. */
2641 i = inst->opcode->tied_operand;
2642 if (i > 0 && (inst->operands[0].reg.regno != inst->operands[i].reg.regno))
2644 if (mismatch_detail)
2646 mismatch_detail->kind = AARCH64_OPDE_UNTIED_OPERAND;
2647 mismatch_detail->index = i;
2648 mismatch_detail->error = NULL;
2653 /* Match operands' qualifier.
2654 *INST has already had qualifier establish for some, if not all, of
2655 its operands; we need to find out whether these established
2656 qualifiers match one of the qualifier sequence in
2657 INST->OPCODE->QUALIFIERS_LIST. If yes, we will assign each operand
2658 with the corresponding qualifier in such a sequence.
2659 Only basic operand constraint checking is done here; the more thorough
2660 constraint checking will carried out by operand_general_constraint_met_p,
2661 which has be to called after this in order to get all of the operands'
2662 qualifiers established. */
2663 if (match_operands_qualifier (inst, TRUE /* update_p */) == 0)
2665 DEBUG_TRACE ("FAIL on operand qualifier matching");
2666 if (mismatch_detail)
2668 /* Return an error type to indicate that it is the qualifier
2669 matching failure; we don't care about which operand as there
2670 are enough information in the opcode table to reproduce it. */
2671 mismatch_detail->kind = AARCH64_OPDE_INVALID_VARIANT;
2672 mismatch_detail->index = -1;
2673 mismatch_detail->error = NULL;
2678 /* Match operands' constraint. */
2679 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2681 enum aarch64_opnd type = inst->opcode->operands[i];
2682 if (type == AARCH64_OPND_NIL)
2684 if (inst->operands[i].skip)
2686 DEBUG_TRACE ("skip the incomplete operand %d", i);
2689 if (operand_general_constraint_met_p (inst->operands, i, type,
2690 inst->opcode, mismatch_detail) == 0)
2692 DEBUG_TRACE ("FAIL on operand %d", i);
2697 DEBUG_TRACE ("PASS");
2702 /* Replace INST->OPCODE with OPCODE and return the replaced OPCODE.
2703 Also updates the TYPE of each INST->OPERANDS with the corresponding
2704 value of OPCODE->OPERANDS.
2706 Note that some operand qualifiers may need to be manually cleared by
2707 the caller before it further calls the aarch64_opcode_encode; by
2708 doing this, it helps the qualifier matching facilities work
2711 const aarch64_opcode*
2712 aarch64_replace_opcode (aarch64_inst *inst, const aarch64_opcode *opcode)
2715 const aarch64_opcode *old = inst->opcode;
2717 inst->opcode = opcode;
2719 /* Update the operand types. */
2720 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2722 inst->operands[i].type = opcode->operands[i];
2723 if (opcode->operands[i] == AARCH64_OPND_NIL)
2727 DEBUG_TRACE ("replace %s with %s", old->name, opcode->name);
2733 aarch64_operand_index (const enum aarch64_opnd *operands, enum aarch64_opnd operand)
2736 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2737 if (operands[i] == operand)
2739 else if (operands[i] == AARCH64_OPND_NIL)
2744 /* R0...R30, followed by FOR31. */
2745 #define BANK(R, FOR31) \
2746 { R (0), R (1), R (2), R (3), R (4), R (5), R (6), R (7), \
2747 R (8), R (9), R (10), R (11), R (12), R (13), R (14), R (15), \
2748 R (16), R (17), R (18), R (19), R (20), R (21), R (22), R (23), \
2749 R (24), R (25), R (26), R (27), R (28), R (29), R (30), FOR31 }
2750 /* [0][0] 32-bit integer regs with sp Wn
2751 [0][1] 64-bit integer regs with sp Xn sf=1
2752 [1][0] 32-bit integer regs with #0 Wn
2753 [1][1] 64-bit integer regs with #0 Xn sf=1 */
2754 static const char *int_reg[2][2][32] = {
2755 #define R32(X) "w" #X
2756 #define R64(X) "x" #X
2757 { BANK (R32, "wsp"), BANK (R64, "sp") },
2758 { BANK (R32, "wzr"), BANK (R64, "xzr") }
2763 /* Names of the SVE vector registers, first with .S suffixes,
2764 then with .D suffixes. */
2766 static const char *sve_reg[2][32] = {
2767 #define ZS(X) "z" #X ".s"
2768 #define ZD(X) "z" #X ".d"
2769 BANK (ZS, ZS (31)), BANK (ZD, ZD (31))
2775 /* Return the integer register name.
2776 if SP_REG_P is not 0, R31 is an SP reg, other R31 is the zero reg. */
2778 static inline const char *
2779 get_int_reg_name (int regno, aarch64_opnd_qualifier_t qualifier, int sp_reg_p)
2781 const int has_zr = sp_reg_p ? 0 : 1;
2782 const int is_64 = aarch64_get_qualifier_esize (qualifier) == 4 ? 0 : 1;
2783 return int_reg[has_zr][is_64][regno];
2786 /* Like get_int_reg_name, but IS_64 is always 1. */
2788 static inline const char *
2789 get_64bit_int_reg_name (int regno, int sp_reg_p)
2791 const int has_zr = sp_reg_p ? 0 : 1;
2792 return int_reg[has_zr][1][regno];
2795 /* Get the name of the integer offset register in OPND, using the shift type
2796 to decide whether it's a word or doubleword. */
2798 static inline const char *
2799 get_offset_int_reg_name (const aarch64_opnd_info *opnd)
2801 switch (opnd->shifter.kind)
2803 case AARCH64_MOD_UXTW:
2804 case AARCH64_MOD_SXTW:
2805 return get_int_reg_name (opnd->addr.offset.regno, AARCH64_OPND_QLF_W, 0);
2807 case AARCH64_MOD_LSL:
2808 case AARCH64_MOD_SXTX:
2809 return get_int_reg_name (opnd->addr.offset.regno, AARCH64_OPND_QLF_X, 0);
2816 /* Get the name of the SVE vector offset register in OPND, using the operand
2817 qualifier to decide whether the suffix should be .S or .D. */
2819 static inline const char *
2820 get_addr_sve_reg_name (int regno, aarch64_opnd_qualifier_t qualifier)
2822 assert (qualifier == AARCH64_OPND_QLF_S_S
2823 || qualifier == AARCH64_OPND_QLF_S_D);
2824 return sve_reg[qualifier == AARCH64_OPND_QLF_S_D][regno];
2827 /* Types for expanding an encoded 8-bit value to a floating-point value. */
2847 /* IMM8 is an 8-bit floating-point constant with sign, 3-bit exponent and
2848 normalized 4 bits of precision, encoded in "a:b:c:d:e:f:g:h" or FLD_imm8
2849 (depending on the type of the instruction). IMM8 will be expanded to a
2850 single-precision floating-point value (SIZE == 4) or a double-precision
2851 floating-point value (SIZE == 8). A half-precision floating-point value
2852 (SIZE == 2) is expanded to a single-precision floating-point value. The
2853 expanded value is returned. */
2856 expand_fp_imm (int size, uint32_t imm8)
2859 uint32_t imm8_7, imm8_6_0, imm8_6, imm8_6_repl4;
2861 imm8_7 = (imm8 >> 7) & 0x01; /* imm8<7> */
2862 imm8_6_0 = imm8 & 0x7f; /* imm8<6:0> */
2863 imm8_6 = imm8_6_0 >> 6; /* imm8<6> */
2864 imm8_6_repl4 = (imm8_6 << 3) | (imm8_6 << 2)
2865 | (imm8_6 << 1) | imm8_6; /* Replicate(imm8<6>,4) */
2868 imm = (imm8_7 << (63-32)) /* imm8<7> */
2869 | ((imm8_6 ^ 1) << (62-32)) /* NOT(imm8<6) */
2870 | (imm8_6_repl4 << (58-32)) | (imm8_6 << (57-32))
2871 | (imm8_6 << (56-32)) | (imm8_6 << (55-32)) /* Replicate(imm8<6>,7) */
2872 | (imm8_6_0 << (48-32)); /* imm8<6>:imm8<5:0> */
2875 else if (size == 4 || size == 2)
2877 imm = (imm8_7 << 31) /* imm8<7> */
2878 | ((imm8_6 ^ 1) << 30) /* NOT(imm8<6>) */
2879 | (imm8_6_repl4 << 26) /* Replicate(imm8<6>,4) */
2880 | (imm8_6_0 << 19); /* imm8<6>:imm8<5:0> */
2884 /* An unsupported size. */
2891 /* Produce the string representation of the register list operand *OPND
2892 in the buffer pointed by BUF of size SIZE. PREFIX is the part of
2893 the register name that comes before the register number, such as "v". */
2895 print_register_list (char *buf, size_t size, const aarch64_opnd_info *opnd,
2898 const int num_regs = opnd->reglist.num_regs;
2899 const int first_reg = opnd->reglist.first_regno;
2900 const int last_reg = (first_reg + num_regs - 1) & 0x1f;
2901 const char *qlf_name = aarch64_get_qualifier_name (opnd->qualifier);
2902 char tb[8]; /* Temporary buffer. */
2904 assert (opnd->type != AARCH64_OPND_LEt || opnd->reglist.has_index);
2905 assert (num_regs >= 1 && num_regs <= 4);
2907 /* Prepare the index if any. */
2908 if (opnd->reglist.has_index)
2909 /* PR 21096: The %100 is to silence a warning about possible truncation. */
2910 snprintf (tb, 8, "[%" PRIi64 "]", (opnd->reglist.index % 100));
2914 /* The hyphenated form is preferred for disassembly if there are
2915 more than two registers in the list, and the register numbers
2916 are monotonically increasing in increments of one. */
2917 if (num_regs > 2 && last_reg > first_reg)
2918 snprintf (buf, size, "{%s%d.%s-%s%d.%s}%s", prefix, first_reg, qlf_name,
2919 prefix, last_reg, qlf_name, tb);
2922 const int reg0 = first_reg;
2923 const int reg1 = (first_reg + 1) & 0x1f;
2924 const int reg2 = (first_reg + 2) & 0x1f;
2925 const int reg3 = (first_reg + 3) & 0x1f;
2930 snprintf (buf, size, "{%s%d.%s}%s", prefix, reg0, qlf_name, tb);
2933 snprintf (buf, size, "{%s%d.%s, %s%d.%s}%s", prefix, reg0, qlf_name,
2934 prefix, reg1, qlf_name, tb);
2937 snprintf (buf, size, "{%s%d.%s, %s%d.%s, %s%d.%s}%s",
2938 prefix, reg0, qlf_name, prefix, reg1, qlf_name,
2939 prefix, reg2, qlf_name, tb);
2942 snprintf (buf, size, "{%s%d.%s, %s%d.%s, %s%d.%s, %s%d.%s}%s",
2943 prefix, reg0, qlf_name, prefix, reg1, qlf_name,
2944 prefix, reg2, qlf_name, prefix, reg3, qlf_name, tb);
2950 /* Print the register+immediate address in OPND to BUF, which has SIZE
2951 characters. BASE is the name of the base register. */
2954 print_immediate_offset_address (char *buf, size_t size,
2955 const aarch64_opnd_info *opnd,
2958 if (opnd->addr.writeback)
2960 if (opnd->addr.preind)
2961 snprintf (buf, size, "[%s, #%d]!", base, opnd->addr.offset.imm);
2963 snprintf (buf, size, "[%s], #%d", base, opnd->addr.offset.imm);
2967 if (opnd->shifter.operator_present)
2969 assert (opnd->shifter.kind == AARCH64_MOD_MUL_VL);
2970 snprintf (buf, size, "[%s, #%d, mul vl]",
2971 base, opnd->addr.offset.imm);
2973 else if (opnd->addr.offset.imm)
2974 snprintf (buf, size, "[%s, #%d]", base, opnd->addr.offset.imm);
2976 snprintf (buf, size, "[%s]", base);
2980 /* Produce the string representation of the register offset address operand
2981 *OPND in the buffer pointed by BUF of size SIZE. BASE and OFFSET are
2982 the names of the base and offset registers. */
2984 print_register_offset_address (char *buf, size_t size,
2985 const aarch64_opnd_info *opnd,
2986 const char *base, const char *offset)
2988 char tb[16]; /* Temporary buffer. */
2989 bfd_boolean print_extend_p = TRUE;
2990 bfd_boolean print_amount_p = TRUE;
2991 const char *shift_name = aarch64_operand_modifiers[opnd->shifter.kind].name;
2993 if (!opnd->shifter.amount && (opnd->qualifier != AARCH64_OPND_QLF_S_B
2994 || !opnd->shifter.amount_present))
2996 /* Not print the shift/extend amount when the amount is zero and
2997 when it is not the special case of 8-bit load/store instruction. */
2998 print_amount_p = FALSE;
2999 /* Likewise, no need to print the shift operator LSL in such a
3001 if (opnd->shifter.kind == AARCH64_MOD_LSL)
3002 print_extend_p = FALSE;
3005 /* Prepare for the extend/shift. */
3009 snprintf (tb, sizeof (tb), ", %s #%" PRIi64, shift_name,
3010 /* PR 21096: The %100 is to silence a warning about possible truncation. */
3011 (opnd->shifter.amount % 100));
3013 snprintf (tb, sizeof (tb), ", %s", shift_name);
3018 snprintf (buf, size, "[%s, %s%s]", base, offset, tb);
3021 /* Generate the string representation of the operand OPNDS[IDX] for OPCODE
3022 in *BUF. The caller should pass in the maximum size of *BUF in SIZE.
3023 PC, PCREL_P and ADDRESS are used to pass in and return information about
3024 the PC-relative address calculation, where the PC value is passed in
3025 PC. If the operand is pc-relative related, *PCREL_P (if PCREL_P non-NULL)
3026 will return 1 and *ADDRESS (if ADDRESS non-NULL) will return the
3027 calculated address; otherwise, *PCREL_P (if PCREL_P non-NULL) returns 0.
3029 The function serves both the disassembler and the assembler diagnostics
3030 issuer, which is the reason why it lives in this file. */
3033 aarch64_print_operand (char *buf, size_t size, bfd_vma pc,
3034 const aarch64_opcode *opcode,
3035 const aarch64_opnd_info *opnds, int idx, int *pcrel_p,
3036 bfd_vma *address, char** notes ATTRIBUTE_UNUSED)
3038 unsigned int i, num_conds;
3039 const char *name = NULL;
3040 const aarch64_opnd_info *opnd = opnds + idx;
3041 enum aarch64_modifier_kind kind;
3042 uint64_t addr, enum_value;
3050 case AARCH64_OPND_Rd:
3051 case AARCH64_OPND_Rn:
3052 case AARCH64_OPND_Rm:
3053 case AARCH64_OPND_Rt:
3054 case AARCH64_OPND_Rt2:
3055 case AARCH64_OPND_Rs:
3056 case AARCH64_OPND_Ra:
3057 case AARCH64_OPND_Rt_SYS:
3058 case AARCH64_OPND_PAIRREG:
3059 case AARCH64_OPND_SVE_Rm:
3060 /* The optional-ness of <Xt> in e.g. IC <ic_op>{, <Xt>} is determined by
3061 the <ic_op>, therefore we use opnd->present to override the
3062 generic optional-ness information. */
3063 if (opnd->type == AARCH64_OPND_Rt_SYS)
3068 /* Omit the operand, e.g. RET. */
3069 else if (optional_operand_p (opcode, idx)
3071 == get_optional_operand_default_value (opcode)))
3073 assert (opnd->qualifier == AARCH64_OPND_QLF_W
3074 || opnd->qualifier == AARCH64_OPND_QLF_X);
3075 snprintf (buf, size, "%s",
3076 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0));
3079 case AARCH64_OPND_Rd_SP:
3080 case AARCH64_OPND_Rn_SP:
3081 case AARCH64_OPND_SVE_Rn_SP:
3082 case AARCH64_OPND_Rm_SP:
3083 assert (opnd->qualifier == AARCH64_OPND_QLF_W
3084 || opnd->qualifier == AARCH64_OPND_QLF_WSP
3085 || opnd->qualifier == AARCH64_OPND_QLF_X
3086 || opnd->qualifier == AARCH64_OPND_QLF_SP);
3087 snprintf (buf, size, "%s",
3088 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 1));
3091 case AARCH64_OPND_Rm_EXT:
3092 kind = opnd->shifter.kind;
3093 assert (idx == 1 || idx == 2);
3094 if ((aarch64_stack_pointer_p (opnds)
3095 || (idx == 2 && aarch64_stack_pointer_p (opnds + 1)))
3096 && ((opnd->qualifier == AARCH64_OPND_QLF_W
3097 && opnds[0].qualifier == AARCH64_OPND_QLF_W
3098 && kind == AARCH64_MOD_UXTW)
3099 || (opnd->qualifier == AARCH64_OPND_QLF_X
3100 && kind == AARCH64_MOD_UXTX)))
3102 /* 'LSL' is the preferred form in this case. */
3103 kind = AARCH64_MOD_LSL;
3104 if (opnd->shifter.amount == 0)
3106 /* Shifter omitted. */
3107 snprintf (buf, size, "%s",
3108 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0));
3112 if (opnd->shifter.amount)
3113 snprintf (buf, size, "%s, %s #%" PRIi64,
3114 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0),
3115 aarch64_operand_modifiers[kind].name,
3116 opnd->shifter.amount);
3118 snprintf (buf, size, "%s, %s",
3119 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0),
3120 aarch64_operand_modifiers[kind].name);
3123 case AARCH64_OPND_Rm_SFT:
3124 assert (opnd->qualifier == AARCH64_OPND_QLF_W
3125 || opnd->qualifier == AARCH64_OPND_QLF_X);
3126 if (opnd->shifter.amount == 0 && opnd->shifter.kind == AARCH64_MOD_LSL)
3127 snprintf (buf, size, "%s",
3128 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0));
3130 snprintf (buf, size, "%s, %s #%" PRIi64,
3131 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0),
3132 aarch64_operand_modifiers[opnd->shifter.kind].name,
3133 opnd->shifter.amount);
3136 case AARCH64_OPND_Fd:
3137 case AARCH64_OPND_Fn:
3138 case AARCH64_OPND_Fm:
3139 case AARCH64_OPND_Fa:
3140 case AARCH64_OPND_Ft:
3141 case AARCH64_OPND_Ft2:
3142 case AARCH64_OPND_Sd:
3143 case AARCH64_OPND_Sn:
3144 case AARCH64_OPND_Sm:
3145 case AARCH64_OPND_SVE_VZn:
3146 case AARCH64_OPND_SVE_Vd:
3147 case AARCH64_OPND_SVE_Vm:
3148 case AARCH64_OPND_SVE_Vn:
3149 snprintf (buf, size, "%s%d", aarch64_get_qualifier_name (opnd->qualifier),
3153 case AARCH64_OPND_Va:
3154 case AARCH64_OPND_Vd:
3155 case AARCH64_OPND_Vn:
3156 case AARCH64_OPND_Vm:
3157 snprintf (buf, size, "v%d.%s", opnd->reg.regno,
3158 aarch64_get_qualifier_name (opnd->qualifier));
3161 case AARCH64_OPND_Ed:
3162 case AARCH64_OPND_En:
3163 case AARCH64_OPND_Em:
3164 case AARCH64_OPND_Em16:
3165 case AARCH64_OPND_SM3_IMM2:
3166 snprintf (buf, size, "v%d.%s[%" PRIi64 "]", opnd->reglane.regno,
3167 aarch64_get_qualifier_name (opnd->qualifier),
3168 opnd->reglane.index);
3171 case AARCH64_OPND_VdD1:
3172 case AARCH64_OPND_VnD1:
3173 snprintf (buf, size, "v%d.d[1]", opnd->reg.regno);
3176 case AARCH64_OPND_LVn:
3177 case AARCH64_OPND_LVt:
3178 case AARCH64_OPND_LVt_AL:
3179 case AARCH64_OPND_LEt:
3180 print_register_list (buf, size, opnd, "v");
3183 case AARCH64_OPND_SVE_Pd:
3184 case AARCH64_OPND_SVE_Pg3:
3185 case AARCH64_OPND_SVE_Pg4_5:
3186 case AARCH64_OPND_SVE_Pg4_10:
3187 case AARCH64_OPND_SVE_Pg4_16:
3188 case AARCH64_OPND_SVE_Pm:
3189 case AARCH64_OPND_SVE_Pn:
3190 case AARCH64_OPND_SVE_Pt:
3191 if (opnd->qualifier == AARCH64_OPND_QLF_NIL)
3192 snprintf (buf, size, "p%d", opnd->reg.regno);
3193 else if (opnd->qualifier == AARCH64_OPND_QLF_P_Z
3194 || opnd->qualifier == AARCH64_OPND_QLF_P_M)
3195 snprintf (buf, size, "p%d/%s", opnd->reg.regno,
3196 aarch64_get_qualifier_name (opnd->qualifier));
3198 snprintf (buf, size, "p%d.%s", opnd->reg.regno,
3199 aarch64_get_qualifier_name (opnd->qualifier));
3202 case AARCH64_OPND_SVE_Za_5:
3203 case AARCH64_OPND_SVE_Za_16:
3204 case AARCH64_OPND_SVE_Zd:
3205 case AARCH64_OPND_SVE_Zm_5:
3206 case AARCH64_OPND_SVE_Zm_16:
3207 case AARCH64_OPND_SVE_Zn:
3208 case AARCH64_OPND_SVE_Zt:
3209 if (opnd->qualifier == AARCH64_OPND_QLF_NIL)
3210 snprintf (buf, size, "z%d", opnd->reg.regno);
3212 snprintf (buf, size, "z%d.%s", opnd->reg.regno,
3213 aarch64_get_qualifier_name (opnd->qualifier));
3216 case AARCH64_OPND_SVE_ZnxN:
3217 case AARCH64_OPND_SVE_ZtxN:
3218 print_register_list (buf, size, opnd, "z");
3221 case AARCH64_OPND_SVE_Zm3_INDEX:
3222 case AARCH64_OPND_SVE_Zm3_22_INDEX:
3223 case AARCH64_OPND_SVE_Zm4_INDEX:
3224 case AARCH64_OPND_SVE_Zn_INDEX:
3225 snprintf (buf, size, "z%d.%s[%" PRIi64 "]", opnd->reglane.regno,
3226 aarch64_get_qualifier_name (opnd->qualifier),
3227 opnd->reglane.index);
3230 case AARCH64_OPND_CRn:
3231 case AARCH64_OPND_CRm:
3232 snprintf (buf, size, "C%" PRIi64, opnd->imm.value);
3235 case AARCH64_OPND_IDX:
3236 case AARCH64_OPND_MASK:
3237 case AARCH64_OPND_IMM:
3238 case AARCH64_OPND_IMM_2:
3239 case AARCH64_OPND_WIDTH:
3240 case AARCH64_OPND_UIMM3_OP1:
3241 case AARCH64_OPND_UIMM3_OP2:
3242 case AARCH64_OPND_BIT_NUM:
3243 case AARCH64_OPND_IMM_VLSL:
3244 case AARCH64_OPND_IMM_VLSR:
3245 case AARCH64_OPND_SHLL_IMM:
3246 case AARCH64_OPND_IMM0:
3247 case AARCH64_OPND_IMMR:
3248 case AARCH64_OPND_IMMS:
3249 case AARCH64_OPND_FBITS:
3250 case AARCH64_OPND_SIMM5:
3251 case AARCH64_OPND_SVE_SHLIMM_PRED:
3252 case AARCH64_OPND_SVE_SHLIMM_UNPRED:
3253 case AARCH64_OPND_SVE_SHRIMM_PRED:
3254 case AARCH64_OPND_SVE_SHRIMM_UNPRED:
3255 case AARCH64_OPND_SVE_SIMM5:
3256 case AARCH64_OPND_SVE_SIMM5B:
3257 case AARCH64_OPND_SVE_SIMM6:
3258 case AARCH64_OPND_SVE_SIMM8:
3259 case AARCH64_OPND_SVE_UIMM3:
3260 case AARCH64_OPND_SVE_UIMM7:
3261 case AARCH64_OPND_SVE_UIMM8:
3262 case AARCH64_OPND_SVE_UIMM8_53:
3263 case AARCH64_OPND_IMM_ROT1:
3264 case AARCH64_OPND_IMM_ROT2:
3265 case AARCH64_OPND_IMM_ROT3:
3266 case AARCH64_OPND_SVE_IMM_ROT1:
3267 case AARCH64_OPND_SVE_IMM_ROT2:
3268 snprintf (buf, size, "#%" PRIi64, opnd->imm.value);
3271 case AARCH64_OPND_SVE_I1_HALF_ONE:
3272 case AARCH64_OPND_SVE_I1_HALF_TWO:
3273 case AARCH64_OPND_SVE_I1_ZERO_ONE:
3276 c.i = opnd->imm.value;
3277 snprintf (buf, size, "#%.1f", c.f);
3281 case AARCH64_OPND_SVE_PATTERN:
3282 if (optional_operand_p (opcode, idx)
3283 && opnd->imm.value == get_optional_operand_default_value (opcode))
3285 enum_value = opnd->imm.value;
3286 assert (enum_value < ARRAY_SIZE (aarch64_sve_pattern_array));
3287 if (aarch64_sve_pattern_array[enum_value])
3288 snprintf (buf, size, "%s", aarch64_sve_pattern_array[enum_value]);
3290 snprintf (buf, size, "#%" PRIi64, opnd->imm.value);
3293 case AARCH64_OPND_SVE_PATTERN_SCALED:
3294 if (optional_operand_p (opcode, idx)
3295 && !opnd->shifter.operator_present
3296 && opnd->imm.value == get_optional_operand_default_value (opcode))
3298 enum_value = opnd->imm.value;
3299 assert (enum_value < ARRAY_SIZE (aarch64_sve_pattern_array));
3300 if (aarch64_sve_pattern_array[opnd->imm.value])
3301 snprintf (buf, size, "%s", aarch64_sve_pattern_array[opnd->imm.value]);
3303 snprintf (buf, size, "#%" PRIi64, opnd->imm.value);
3304 if (opnd->shifter.operator_present)
3306 size_t len = strlen (buf);
3307 snprintf (buf + len, size - len, ", %s #%" PRIi64,
3308 aarch64_operand_modifiers[opnd->shifter.kind].name,
3309 opnd->shifter.amount);
3313 case AARCH64_OPND_SVE_PRFOP:
3314 enum_value = opnd->imm.value;
3315 assert (enum_value < ARRAY_SIZE (aarch64_sve_prfop_array));
3316 if (aarch64_sve_prfop_array[enum_value])
3317 snprintf (buf, size, "%s", aarch64_sve_prfop_array[enum_value]);
3319 snprintf (buf, size, "#%" PRIi64, opnd->imm.value);
3322 case AARCH64_OPND_IMM_MOV:
3323 switch (aarch64_get_qualifier_esize (opnds[0].qualifier))
3325 case 4: /* e.g. MOV Wd, #<imm32>. */
3327 int imm32 = opnd->imm.value;
3328 snprintf (buf, size, "#0x%-20x\t// #%d", imm32, imm32);
3331 case 8: /* e.g. MOV Xd, #<imm64>. */
3332 snprintf (buf, size, "#0x%-20" PRIx64 "\t// #%" PRIi64,
3333 opnd->imm.value, opnd->imm.value);
3335 default: assert (0);
3339 case AARCH64_OPND_FPIMM0:
3340 snprintf (buf, size, "#0.0");
3343 case AARCH64_OPND_LIMM:
3344 case AARCH64_OPND_AIMM:
3345 case AARCH64_OPND_HALF:
3346 case AARCH64_OPND_SVE_INV_LIMM:
3347 case AARCH64_OPND_SVE_LIMM:
3348 case AARCH64_OPND_SVE_LIMM_MOV:
3349 if (opnd->shifter.amount)
3350 snprintf (buf, size, "#0x%" PRIx64 ", lsl #%" PRIi64, opnd->imm.value,
3351 opnd->shifter.amount);
3353 snprintf (buf, size, "#0x%" PRIx64, opnd->imm.value);
3356 case AARCH64_OPND_SIMD_IMM:
3357 case AARCH64_OPND_SIMD_IMM_SFT:
3358 if ((! opnd->shifter.amount && opnd->shifter.kind == AARCH64_MOD_LSL)
3359 || opnd->shifter.kind == AARCH64_MOD_NONE)
3360 snprintf (buf, size, "#0x%" PRIx64, opnd->imm.value);
3362 snprintf (buf, size, "#0x%" PRIx64 ", %s #%" PRIi64, opnd->imm.value,
3363 aarch64_operand_modifiers[opnd->shifter.kind].name,
3364 opnd->shifter.amount);
3367 case AARCH64_OPND_SVE_AIMM:
3368 case AARCH64_OPND_SVE_ASIMM:
3369 if (opnd->shifter.amount)
3370 snprintf (buf, size, "#%" PRIi64 ", lsl #%" PRIi64, opnd->imm.value,
3371 opnd->shifter.amount);
3373 snprintf (buf, size, "#%" PRIi64, opnd->imm.value);
3376 case AARCH64_OPND_FPIMM:
3377 case AARCH64_OPND_SIMD_FPIMM:
3378 case AARCH64_OPND_SVE_FPIMM8:
3379 switch (aarch64_get_qualifier_esize (opnds[0].qualifier))
3381 case 2: /* e.g. FMOV <Hd>, #<imm>. */
3384 c.i = expand_fp_imm (2, opnd->imm.value);
3385 snprintf (buf, size, "#%.18e", c.f);
3388 case 4: /* e.g. FMOV <Vd>.4S, #<imm>. */
3391 c.i = expand_fp_imm (4, opnd->imm.value);
3392 snprintf (buf, size, "#%.18e", c.f);
3395 case 8: /* e.g. FMOV <Sd>, #<imm>. */
3398 c.i = expand_fp_imm (8, opnd->imm.value);
3399 snprintf (buf, size, "#%.18e", c.d);
3402 default: assert (0);
3406 case AARCH64_OPND_CCMP_IMM:
3407 case AARCH64_OPND_NZCV:
3408 case AARCH64_OPND_EXCEPTION:
3409 case AARCH64_OPND_UIMM4:
3410 case AARCH64_OPND_UIMM7:
3411 if (optional_operand_p (opcode, idx) == TRUE
3412 && (opnd->imm.value ==
3413 (int64_t) get_optional_operand_default_value (opcode)))
3414 /* Omit the operand, e.g. DCPS1. */
3416 snprintf (buf, size, "#0x%x", (unsigned int)opnd->imm.value);
3419 case AARCH64_OPND_COND:
3420 case AARCH64_OPND_COND1:
3421 snprintf (buf, size, "%s", opnd->cond->names[0]);
3422 num_conds = ARRAY_SIZE (opnd->cond->names);
3423 for (i = 1; i < num_conds && opnd->cond->names[i]; ++i)
3425 size_t len = strlen (buf);
3427 snprintf (buf + len, size - len, " // %s = %s",
3428 opnd->cond->names[0], opnd->cond->names[i]);
3430 snprintf (buf + len, size - len, ", %s",
3431 opnd->cond->names[i]);
3435 case AARCH64_OPND_ADDR_ADRP:
3436 addr = ((pc + AARCH64_PCREL_OFFSET) & ~(uint64_t)0xfff)
3442 /* This is not necessary during the disassembling, as print_address_func
3443 in the disassemble_info will take care of the printing. But some
3444 other callers may be still interested in getting the string in *STR,
3445 so here we do snprintf regardless. */
3446 snprintf (buf, size, "#0x%" PRIx64, addr);
3449 case AARCH64_OPND_ADDR_PCREL14:
3450 case AARCH64_OPND_ADDR_PCREL19:
3451 case AARCH64_OPND_ADDR_PCREL21:
3452 case AARCH64_OPND_ADDR_PCREL26:
3453 addr = pc + AARCH64_PCREL_OFFSET + opnd->imm.value;
3458 /* This is not necessary during the disassembling, as print_address_func
3459 in the disassemble_info will take care of the printing. But some
3460 other callers may be still interested in getting the string in *STR,
3461 so here we do snprintf regardless. */
3462 snprintf (buf, size, "#0x%" PRIx64, addr);
3465 case AARCH64_OPND_ADDR_SIMPLE:
3466 case AARCH64_OPND_SIMD_ADDR_SIMPLE:
3467 case AARCH64_OPND_SIMD_ADDR_POST:
3468 name = get_64bit_int_reg_name (opnd->addr.base_regno, 1);
3469 if (opnd->type == AARCH64_OPND_SIMD_ADDR_POST)
3471 if (opnd->addr.offset.is_reg)
3472 snprintf (buf, size, "[%s], x%d", name, opnd->addr.offset.regno);
3474 snprintf (buf, size, "[%s], #%d", name, opnd->addr.offset.imm);
3477 snprintf (buf, size, "[%s]", name);
3480 case AARCH64_OPND_ADDR_REGOFF:
3481 case AARCH64_OPND_SVE_ADDR_R:
3482 case AARCH64_OPND_SVE_ADDR_RR:
3483 case AARCH64_OPND_SVE_ADDR_RR_LSL1:
3484 case AARCH64_OPND_SVE_ADDR_RR_LSL2:
3485 case AARCH64_OPND_SVE_ADDR_RR_LSL3:
3486 case AARCH64_OPND_SVE_ADDR_RX:
3487 case AARCH64_OPND_SVE_ADDR_RX_LSL1:
3488 case AARCH64_OPND_SVE_ADDR_RX_LSL2:
3489 case AARCH64_OPND_SVE_ADDR_RX_LSL3:
3490 print_register_offset_address
3491 (buf, size, opnd, get_64bit_int_reg_name (opnd->addr.base_regno, 1),
3492 get_offset_int_reg_name (opnd));
3495 case AARCH64_OPND_SVE_ADDR_RZ:
3496 case AARCH64_OPND_SVE_ADDR_RZ_LSL1:
3497 case AARCH64_OPND_SVE_ADDR_RZ_LSL2:
3498 case AARCH64_OPND_SVE_ADDR_RZ_LSL3:
3499 case AARCH64_OPND_SVE_ADDR_RZ_XTW_14:
3500 case AARCH64_OPND_SVE_ADDR_RZ_XTW_22:
3501 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_14:
3502 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_22:
3503 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_14:
3504 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_22:
3505 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_14:
3506 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_22:
3507 print_register_offset_address
3508 (buf, size, opnd, get_64bit_int_reg_name (opnd->addr.base_regno, 1),
3509 get_addr_sve_reg_name (opnd->addr.offset.regno, opnd->qualifier));
3512 case AARCH64_OPND_ADDR_SIMM7:
3513 case AARCH64_OPND_ADDR_SIMM9:
3514 case AARCH64_OPND_ADDR_SIMM9_2:
3515 case AARCH64_OPND_ADDR_SIMM10:
3516 case AARCH64_OPND_ADDR_OFFSET:
3517 case AARCH64_OPND_SVE_ADDR_RI_S4x16:
3518 case AARCH64_OPND_SVE_ADDR_RI_S4xVL:
3519 case AARCH64_OPND_SVE_ADDR_RI_S4x2xVL:
3520 case AARCH64_OPND_SVE_ADDR_RI_S4x3xVL:
3521 case AARCH64_OPND_SVE_ADDR_RI_S4x4xVL:
3522 case AARCH64_OPND_SVE_ADDR_RI_S6xVL:
3523 case AARCH64_OPND_SVE_ADDR_RI_S9xVL:
3524 case AARCH64_OPND_SVE_ADDR_RI_U6:
3525 case AARCH64_OPND_SVE_ADDR_RI_U6x2:
3526 case AARCH64_OPND_SVE_ADDR_RI_U6x4:
3527 case AARCH64_OPND_SVE_ADDR_RI_U6x8:
3528 print_immediate_offset_address
3529 (buf, size, opnd, get_64bit_int_reg_name (opnd->addr.base_regno, 1));
3532 case AARCH64_OPND_SVE_ADDR_ZI_U5:
3533 case AARCH64_OPND_SVE_ADDR_ZI_U5x2:
3534 case AARCH64_OPND_SVE_ADDR_ZI_U5x4:
3535 case AARCH64_OPND_SVE_ADDR_ZI_U5x8:
3536 print_immediate_offset_address
3538 get_addr_sve_reg_name (opnd->addr.base_regno, opnd->qualifier));
3541 case AARCH64_OPND_SVE_ADDR_ZZ_LSL:
3542 case AARCH64_OPND_SVE_ADDR_ZZ_SXTW:
3543 case AARCH64_OPND_SVE_ADDR_ZZ_UXTW:
3544 print_register_offset_address
3546 get_addr_sve_reg_name (opnd->addr.base_regno, opnd->qualifier),
3547 get_addr_sve_reg_name (opnd->addr.offset.regno, opnd->qualifier));
3550 case AARCH64_OPND_ADDR_UIMM12:
3551 name = get_64bit_int_reg_name (opnd->addr.base_regno, 1);
3552 if (opnd->addr.offset.imm)
3553 snprintf (buf, size, "[%s, #%d]", name, opnd->addr.offset.imm);
3555 snprintf (buf, size, "[%s]", name);
3558 case AARCH64_OPND_SYSREG:
3559 for (i = 0; aarch64_sys_regs[i].name; ++i)
3561 bfd_boolean exact_match
3562 = (aarch64_sys_regs[i].flags & opnd->sysreg.flags)
3563 == opnd->sysreg.flags;
3565 /* Try and find an exact match, But if that fails, return the first
3566 partial match that was found. */
3567 if (aarch64_sys_regs[i].value == opnd->sysreg.value
3568 && ! aarch64_sys_reg_deprecated_p (&aarch64_sys_regs[i])
3569 && (name == NULL || exact_match))
3571 name = aarch64_sys_regs[i].name;
3579 /* If we didn't match exactly, that means the presense of a flag
3580 indicates what we didn't want for this instruction. e.g. If
3581 F_REG_READ is there, that means we were looking for a write
3582 register. See aarch64_ext_sysreg. */
3583 if (aarch64_sys_regs[i].flags & F_REG_WRITE)
3584 *notes = _("reading from a write-only register.");
3585 else if (aarch64_sys_regs[i].flags & F_REG_READ)
3586 *notes = _("writing to a read-only register.");
3591 snprintf (buf, size, "%s", name);
3594 /* Implementation defined system register. */
3595 unsigned int value = opnd->sysreg.value;
3596 snprintf (buf, size, "s%u_%u_c%u_c%u_%u", (value >> 14) & 0x3,
3597 (value >> 11) & 0x7, (value >> 7) & 0xf, (value >> 3) & 0xf,
3602 case AARCH64_OPND_PSTATEFIELD:
3603 for (i = 0; aarch64_pstatefields[i].name; ++i)
3604 if (aarch64_pstatefields[i].value == opnd->pstatefield)
3606 assert (aarch64_pstatefields[i].name);
3607 snprintf (buf, size, "%s", aarch64_pstatefields[i].name);
3610 case AARCH64_OPND_SYSREG_AT:
3611 case AARCH64_OPND_SYSREG_DC:
3612 case AARCH64_OPND_SYSREG_IC:
3613 case AARCH64_OPND_SYSREG_TLBI:
3614 snprintf (buf, size, "%s", opnd->sysins_op->name);
3617 case AARCH64_OPND_BARRIER:
3618 snprintf (buf, size, "%s", opnd->barrier->name);
3621 case AARCH64_OPND_BARRIER_ISB:
3622 /* Operand can be omitted, e.g. in DCPS1. */
3623 if (! optional_operand_p (opcode, idx)
3624 || (opnd->barrier->value
3625 != get_optional_operand_default_value (opcode)))
3626 snprintf (buf, size, "#0x%x", opnd->barrier->value);
3629 case AARCH64_OPND_PRFOP:
3630 if (opnd->prfop->name != NULL)
3631 snprintf (buf, size, "%s", opnd->prfop->name);
3633 snprintf (buf, size, "#0x%02x", opnd->prfop->value);
3636 case AARCH64_OPND_BARRIER_PSB:
3637 snprintf (buf, size, "%s", opnd->hint_option->name);
3645 #define CPENC(op0,op1,crn,crm,op2) \
3646 ((((op0) << 19) | ((op1) << 16) | ((crn) << 12) | ((crm) << 8) | ((op2) << 5)) >> 5)
3647 /* for 3.9.3 Instructions for Accessing Special Purpose Registers */
3648 #define CPEN_(op1,crm,op2) CPENC(3,(op1),4,(crm),(op2))
3649 /* for 3.9.10 System Instructions */
3650 #define CPENS(op1,crn,crm,op2) CPENC(1,(op1),(crn),(crm),(op2))
3669 /* TODO there is one more issues need to be resolved
3670 1. handle cpu-implementation-defined system registers. */
3671 const aarch64_sys_reg aarch64_sys_regs [] =
3673 { "spsr_el1", CPEN_(0,C0,0), 0 }, /* = spsr_svc */
3674 { "spsr_el12", CPEN_ (5, C0, 0), F_ARCHEXT },
3675 { "elr_el1", CPEN_(0,C0,1), 0 },
3676 { "elr_el12", CPEN_ (5, C0, 1), F_ARCHEXT },
3677 { "sp_el0", CPEN_(0,C1,0), 0 },
3678 { "spsel", CPEN_(0,C2,0), 0 },
3679 { "daif", CPEN_(3,C2,1), 0 },
3680 { "currentel", CPEN_(0,C2,2), F_REG_READ }, /* RO */
3681 { "pan", CPEN_(0,C2,3), F_ARCHEXT },
3682 { "uao", CPEN_ (0, C2, 4), F_ARCHEXT },
3683 { "nzcv", CPEN_(3,C2,0), 0 },
3684 { "fpcr", CPEN_(3,C4,0), 0 },
3685 { "fpsr", CPEN_(3,C4,1), 0 },
3686 { "dspsr_el0", CPEN_(3,C5,0), 0 },
3687 { "dlr_el0", CPEN_(3,C5,1), 0 },
3688 { "spsr_el2", CPEN_(4,C0,0), 0 }, /* = spsr_hyp */
3689 { "elr_el2", CPEN_(4,C0,1), 0 },
3690 { "sp_el1", CPEN_(4,C1,0), 0 },
3691 { "spsr_irq", CPEN_(4,C3,0), 0 },
3692 { "spsr_abt", CPEN_(4,C3,1), 0 },
3693 { "spsr_und", CPEN_(4,C3,2), 0 },
3694 { "spsr_fiq", CPEN_(4,C3,3), 0 },
3695 { "spsr_el3", CPEN_(6,C0,0), 0 },
3696 { "elr_el3", CPEN_(6,C0,1), 0 },
3697 { "sp_el2", CPEN_(6,C1,0), 0 },
3698 { "spsr_svc", CPEN_(0,C0,0), F_DEPRECATED }, /* = spsr_el1 */
3699 { "spsr_hyp", CPEN_(4,C0,0), F_DEPRECATED }, /* = spsr_el2 */
3700 { "midr_el1", CPENC(3,0,C0,C0,0), F_REG_READ }, /* RO */
3701 { "ctr_el0", CPENC(3,3,C0,C0,1), F_REG_READ }, /* RO */
3702 { "mpidr_el1", CPENC(3,0,C0,C0,5), F_REG_READ }, /* RO */
3703 { "revidr_el1", CPENC(3,0,C0,C0,6), F_REG_READ }, /* RO */
3704 { "aidr_el1", CPENC(3,1,C0,C0,7), F_REG_READ }, /* RO */
3705 { "dczid_el0", CPENC(3,3,C0,C0,7), F_REG_READ }, /* RO */
3706 { "id_dfr0_el1", CPENC(3,0,C0,C1,2), F_REG_READ }, /* RO */
3707 { "id_pfr0_el1", CPENC(3,0,C0,C1,0), F_REG_READ }, /* RO */
3708 { "id_pfr1_el1", CPENC(3,0,C0,C1,1), F_REG_READ }, /* RO */
3709 { "id_afr0_el1", CPENC(3,0,C0,C1,3), F_REG_READ }, /* RO */
3710 { "id_mmfr0_el1", CPENC(3,0,C0,C1,4), F_REG_READ }, /* RO */
3711 { "id_mmfr1_el1", CPENC(3,0,C0,C1,5), F_REG_READ }, /* RO */
3712 { "id_mmfr2_el1", CPENC(3,0,C0,C1,6), F_REG_READ }, /* RO */
3713 { "id_mmfr3_el1", CPENC(3,0,C0,C1,7), F_REG_READ }, /* RO */
3714 { "id_mmfr4_el1", CPENC(3,0,C0,C2,6), F_REG_READ }, /* RO */
3715 { "id_isar0_el1", CPENC(3,0,C0,C2,0), F_REG_READ }, /* RO */
3716 { "id_isar1_el1", CPENC(3,0,C0,C2,1), F_REG_READ }, /* RO */
3717 { "id_isar2_el1", CPENC(3,0,C0,C2,2), F_REG_READ }, /* RO */
3718 { "id_isar3_el1", CPENC(3,0,C0,C2,3), F_REG_READ }, /* RO */
3719 { "id_isar4_el1", CPENC(3,0,C0,C2,4), F_REG_READ }, /* RO */
3720 { "id_isar5_el1", CPENC(3,0,C0,C2,5), F_REG_READ }, /* RO */
3721 { "mvfr0_el1", CPENC(3,0,C0,C3,0), F_REG_READ }, /* RO */
3722 { "mvfr1_el1", CPENC(3,0,C0,C3,1), F_REG_READ }, /* RO */
3723 { "mvfr2_el1", CPENC(3,0,C0,C3,2), F_REG_READ }, /* RO */
3724 { "ccsidr_el1", CPENC(3,1,C0,C0,0), F_REG_READ }, /* RO */
3725 { "id_aa64pfr0_el1", CPENC(3,0,C0,C4,0), F_REG_READ }, /* RO */
3726 { "id_aa64pfr1_el1", CPENC(3,0,C0,C4,1), F_REG_READ }, /* RO */
3727 { "id_aa64dfr0_el1", CPENC(3,0,C0,C5,0), F_REG_READ }, /* RO */
3728 { "id_aa64dfr1_el1", CPENC(3,0,C0,C5,1), F_REG_READ }, /* RO */
3729 { "id_aa64isar0_el1", CPENC(3,0,C0,C6,0), F_REG_READ }, /* RO */
3730 { "id_aa64isar1_el1", CPENC(3,0,C0,C6,1), F_REG_READ }, /* RO */
3731 { "id_aa64mmfr0_el1", CPENC(3,0,C0,C7,0), F_REG_READ }, /* RO */
3732 { "id_aa64mmfr1_el1", CPENC(3,0,C0,C7,1), F_REG_READ }, /* RO */
3733 { "id_aa64mmfr2_el1", CPENC (3, 0, C0, C7, 2), F_ARCHEXT | F_REG_READ }, /* RO */
3734 { "id_aa64afr0_el1", CPENC(3,0,C0,C5,4), F_REG_READ }, /* RO */
3735 { "id_aa64afr1_el1", CPENC(3,0,C0,C5,5), F_REG_READ }, /* RO */
3736 { "id_aa64zfr0_el1", CPENC (3, 0, C0, C4, 4), F_ARCHEXT | F_REG_READ }, /* RO */
3737 { "clidr_el1", CPENC(3,1,C0,C0,1), F_REG_READ }, /* RO */
3738 { "csselr_el1", CPENC(3,2,C0,C0,0), F_REG_READ }, /* RO */
3739 { "vpidr_el2", CPENC(3,4,C0,C0,0), 0 },
3740 { "vmpidr_el2", CPENC(3,4,C0,C0,5), 0 },
3741 { "sctlr_el1", CPENC(3,0,C1,C0,0), 0 },
3742 { "sctlr_el2", CPENC(3,4,C1,C0,0), 0 },
3743 { "sctlr_el3", CPENC(3,6,C1,C0,0), 0 },
3744 { "sctlr_el12", CPENC (3, 5, C1, C0, 0), F_ARCHEXT },
3745 { "actlr_el1", CPENC(3,0,C1,C0,1), 0 },
3746 { "actlr_el2", CPENC(3,4,C1,C0,1), 0 },
3747 { "actlr_el3", CPENC(3,6,C1,C0,1), 0 },
3748 { "cpacr_el1", CPENC(3,0,C1,C0,2), 0 },
3749 { "cpacr_el12", CPENC (3, 5, C1, C0, 2), F_ARCHEXT },
3750 { "cptr_el2", CPENC(3,4,C1,C1,2), 0 },
3751 { "cptr_el3", CPENC(3,6,C1,C1,2), 0 },
3752 { "scr_el3", CPENC(3,6,C1,C1,0), 0 },
3753 { "hcr_el2", CPENC(3,4,C1,C1,0), 0 },
3754 { "mdcr_el2", CPENC(3,4,C1,C1,1), 0 },
3755 { "mdcr_el3", CPENC(3,6,C1,C3,1), 0 },
3756 { "hstr_el2", CPENC(3,4,C1,C1,3), 0 },
3757 { "hacr_el2", CPENC(3,4,C1,C1,7), 0 },
3758 { "zcr_el1", CPENC (3, 0, C1, C2, 0), F_ARCHEXT },
3759 { "zcr_el12", CPENC (3, 5, C1, C2, 0), F_ARCHEXT },
3760 { "zcr_el2", CPENC (3, 4, C1, C2, 0), F_ARCHEXT },
3761 { "zcr_el3", CPENC (3, 6, C1, C2, 0), F_ARCHEXT },
3762 { "zidr_el1", CPENC (3, 0, C0, C0, 7), F_ARCHEXT },
3763 { "ttbr0_el1", CPENC(3,0,C2,C0,0), 0 },
3764 { "ttbr1_el1", CPENC(3,0,C2,C0,1), 0 },
3765 { "ttbr0_el2", CPENC(3,4,C2,C0,0), 0 },
3766 { "ttbr1_el2", CPENC (3, 4, C2, C0, 1), F_ARCHEXT },
3767 { "ttbr0_el3", CPENC(3,6,C2,C0,0), 0 },
3768 { "ttbr0_el12", CPENC (3, 5, C2, C0, 0), F_ARCHEXT },
3769 { "ttbr1_el12", CPENC (3, 5, C2, C0, 1), F_ARCHEXT },
3770 { "vttbr_el2", CPENC(3,4,C2,C1,0), 0 },
3771 { "tcr_el1", CPENC(3,0,C2,C0,2), 0 },
3772 { "tcr_el2", CPENC(3,4,C2,C0,2), 0 },
3773 { "tcr_el3", CPENC(3,6,C2,C0,2), 0 },
3774 { "tcr_el12", CPENC (3, 5, C2, C0, 2), F_ARCHEXT },
3775 { "vtcr_el2", CPENC(3,4,C2,C1,2), 0 },
3776 { "apiakeylo_el1", CPENC (3, 0, C2, C1, 0), F_ARCHEXT },
3777 { "apiakeyhi_el1", CPENC (3, 0, C2, C1, 1), F_ARCHEXT },
3778 { "apibkeylo_el1", CPENC (3, 0, C2, C1, 2), F_ARCHEXT },
3779 { "apibkeyhi_el1", CPENC (3, 0, C2, C1, 3), F_ARCHEXT },
3780 { "apdakeylo_el1", CPENC (3, 0, C2, C2, 0), F_ARCHEXT },
3781 { "apdakeyhi_el1", CPENC (3, 0, C2, C2, 1), F_ARCHEXT },
3782 { "apdbkeylo_el1", CPENC (3, 0, C2, C2, 2), F_ARCHEXT },
3783 { "apdbkeyhi_el1", CPENC (3, 0, C2, C2, 3), F_ARCHEXT },
3784 { "apgakeylo_el1", CPENC (3, 0, C2, C3, 0), F_ARCHEXT },
3785 { "apgakeyhi_el1", CPENC (3, 0, C2, C3, 1), F_ARCHEXT },
3786 { "afsr0_el1", CPENC(3,0,C5,C1,0), 0 },
3787 { "afsr1_el1", CPENC(3,0,C5,C1,1), 0 },
3788 { "afsr0_el2", CPENC(3,4,C5,C1,0), 0 },
3789 { "afsr1_el2", CPENC(3,4,C5,C1,1), 0 },
3790 { "afsr0_el3", CPENC(3,6,C5,C1,0), 0 },
3791 { "afsr0_el12", CPENC (3, 5, C5, C1, 0), F_ARCHEXT },
3792 { "afsr1_el3", CPENC(3,6,C5,C1,1), 0 },
3793 { "afsr1_el12", CPENC (3, 5, C5, C1, 1), F_ARCHEXT },
3794 { "esr_el1", CPENC(3,0,C5,C2,0), 0 },
3795 { "esr_el2", CPENC(3,4,C5,C2,0), 0 },
3796 { "esr_el3", CPENC(3,6,C5,C2,0), 0 },
3797 { "esr_el12", CPENC (3, 5, C5, C2, 0), F_ARCHEXT },
3798 { "vsesr_el2", CPENC (3, 4, C5, C2, 3), F_ARCHEXT | F_REG_READ }, /* RO */
3799 { "fpexc32_el2", CPENC(3,4,C5,C3,0), 0 },
3800 { "erridr_el1", CPENC (3, 0, C5, C3, 0), F_ARCHEXT | F_REG_READ }, /* RO */
3801 { "errselr_el1", CPENC (3, 0, C5, C3, 1), F_ARCHEXT },
3802 { "erxfr_el1", CPENC (3, 0, C5, C4, 0), F_ARCHEXT | F_REG_READ }, /* RO */
3803 { "erxctlr_el1", CPENC (3, 0, C5, C4, 1), F_ARCHEXT },
3804 { "erxstatus_el1", CPENC (3, 0, C5, C4, 2), F_ARCHEXT },
3805 { "erxaddr_el1", CPENC (3, 0, C5, C4, 3), F_ARCHEXT },
3806 { "erxmisc0_el1", CPENC (3, 0, C5, C5, 0), F_ARCHEXT },
3807 { "erxmisc1_el1", CPENC (3, 0, C5, C5, 1), F_ARCHEXT },
3808 { "far_el1", CPENC(3,0,C6,C0,0), 0 },
3809 { "far_el2", CPENC(3,4,C6,C0,0), 0 },
3810 { "far_el3", CPENC(3,6,C6,C0,0), 0 },
3811 { "far_el12", CPENC (3, 5, C6, C0, 0), F_ARCHEXT },
3812 { "hpfar_el2", CPENC(3,4,C6,C0,4), 0 },
3813 { "par_el1", CPENC(3,0,C7,C4,0), 0 },
3814 { "mair_el1", CPENC(3,0,C10,C2,0), 0 },
3815 { "mair_el2", CPENC(3,4,C10,C2,0), 0 },
3816 { "mair_el3", CPENC(3,6,C10,C2,0), 0 },
3817 { "mair_el12", CPENC (3, 5, C10, C2, 0), F_ARCHEXT },
3818 { "amair_el1", CPENC(3,0,C10,C3,0), 0 },
3819 { "amair_el2", CPENC(3,4,C10,C3,0), 0 },
3820 { "amair_el3", CPENC(3,6,C10,C3,0), 0 },
3821 { "amair_el12", CPENC (3, 5, C10, C3, 0), F_ARCHEXT },
3822 { "vbar_el1", CPENC(3,0,C12,C0,0), 0 },
3823 { "vbar_el2", CPENC(3,4,C12,C0,0), 0 },
3824 { "vbar_el3", CPENC(3,6,C12,C0,0), 0 },
3825 { "vbar_el12", CPENC (3, 5, C12, C0, 0), F_ARCHEXT },
3826 { "rvbar_el1", CPENC(3,0,C12,C0,1), F_REG_READ }, /* RO */
3827 { "rvbar_el2", CPENC(3,4,C12,C0,1), F_REG_READ }, /* RO */
3828 { "rvbar_el3", CPENC(3,6,C12,C0,1), F_REG_READ }, /* RO */
3829 { "rmr_el1", CPENC(3,0,C12,C0,2), 0 },
3830 { "rmr_el2", CPENC(3,4,C12,C0,2), 0 },
3831 { "rmr_el3", CPENC(3,6,C12,C0,2), 0 },
3832 { "isr_el1", CPENC(3,0,C12,C1,0), F_REG_READ }, /* RO */
3833 { "disr_el1", CPENC (3, 0, C12, C1, 1), F_ARCHEXT },
3834 { "vdisr_el2", CPENC (3, 4, C12, C1, 1), F_ARCHEXT },
3835 { "contextidr_el1", CPENC(3,0,C13,C0,1), 0 },
3836 { "contextidr_el2", CPENC (3, 4, C13, C0, 1), F_ARCHEXT },
3837 { "contextidr_el12", CPENC (3, 5, C13, C0, 1), F_ARCHEXT },
3838 { "tpidr_el0", CPENC(3,3,C13,C0,2), 0 },
3839 { "tpidrro_el0", CPENC(3,3,C13,C0,3), 0 }, /* RW */
3840 { "tpidr_el1", CPENC(3,0,C13,C0,4), 0 },
3841 { "tpidr_el2", CPENC(3,4,C13,C0,2), 0 },
3842 { "tpidr_el3", CPENC(3,6,C13,C0,2), 0 },
3843 { "teecr32_el1", CPENC(2,2,C0, C0,0), 0 }, /* See section 3.9.7.1 */
3844 { "cntfrq_el0", CPENC(3,3,C14,C0,0), 0 }, /* RW */
3845 { "cntpct_el0", CPENC(3,3,C14,C0,1), F_REG_READ }, /* RO */
3846 { "cntvct_el0", CPENC(3,3,C14,C0,2), F_REG_READ }, /* RO */
3847 { "cntvoff_el2", CPENC(3,4,C14,C0,3), 0 },
3848 { "cntkctl_el1", CPENC(3,0,C14,C1,0), 0 },
3849 { "cntkctl_el12", CPENC (3, 5, C14, C1, 0), F_ARCHEXT },
3850 { "cnthctl_el2", CPENC(3,4,C14,C1,0), 0 },
3851 { "cntp_tval_el0", CPENC(3,3,C14,C2,0), 0 },
3852 { "cntp_tval_el02", CPENC (3, 5, C14, C2, 0), F_ARCHEXT },
3853 { "cntp_ctl_el0", CPENC(3,3,C14,C2,1), 0 },
3854 { "cntp_ctl_el02", CPENC (3, 5, C14, C2, 1), F_ARCHEXT },
3855 { "cntp_cval_el0", CPENC(3,3,C14,C2,2), 0 },
3856 { "cntp_cval_el02", CPENC (3, 5, C14, C2, 2), F_ARCHEXT },
3857 { "cntv_tval_el0", CPENC(3,3,C14,C3,0), 0 },
3858 { "cntv_tval_el02", CPENC (3, 5, C14, C3, 0), F_ARCHEXT },
3859 { "cntv_ctl_el0", CPENC(3,3,C14,C3,1), 0 },
3860 { "cntv_ctl_el02", CPENC (3, 5, C14, C3, 1), F_ARCHEXT },
3861 { "cntv_cval_el0", CPENC(3,3,C14,C3,2), 0 },
3862 { "cntv_cval_el02", CPENC (3, 5, C14, C3, 2), F_ARCHEXT },
3863 { "cnthp_tval_el2", CPENC(3,4,C14,C2,0), 0 },
3864 { "cnthp_ctl_el2", CPENC(3,4,C14,C2,1), 0 },
3865 { "cnthp_cval_el2", CPENC(3,4,C14,C2,2), 0 },
3866 { "cntps_tval_el1", CPENC(3,7,C14,C2,0), 0 },
3867 { "cntps_ctl_el1", CPENC(3,7,C14,C2,1), 0 },
3868 { "cntps_cval_el1", CPENC(3,7,C14,C2,2), 0 },
3869 { "cnthv_tval_el2", CPENC (3, 4, C14, C3, 0), F_ARCHEXT },
3870 { "cnthv_ctl_el2", CPENC (3, 4, C14, C3, 1), F_ARCHEXT },
3871 { "cnthv_cval_el2", CPENC (3, 4, C14, C3, 2), F_ARCHEXT },
3872 { "dacr32_el2", CPENC(3,4,C3,C0,0), 0 },
3873 { "ifsr32_el2", CPENC(3,4,C5,C0,1), 0 },
3874 { "teehbr32_el1", CPENC(2,2,C1,C0,0), 0 },
3875 { "sder32_el3", CPENC(3,6,C1,C1,1), 0 },
3876 { "mdscr_el1", CPENC(2,0,C0, C2, 2), 0 },
3877 { "mdccsr_el0", CPENC(2,3,C0, C1, 0), F_REG_READ }, /* r */
3878 { "mdccint_el1", CPENC(2,0,C0, C2, 0), 0 },
3879 { "dbgdtr_el0", CPENC(2,3,C0, C4, 0), 0 },
3880 { "dbgdtrrx_el0", CPENC(2,3,C0, C5, 0), F_REG_READ }, /* r */
3881 { "dbgdtrtx_el0", CPENC(2,3,C0, C5, 0), F_REG_WRITE }, /* w */
3882 { "osdtrrx_el1", CPENC(2,0,C0, C0, 2), F_REG_READ }, /* r */
3883 { "osdtrtx_el1", CPENC(2,0,C0, C3, 2), F_REG_WRITE }, /* w */
3884 { "oseccr_el1", CPENC(2,0,C0, C6, 2), 0 },
3885 { "dbgvcr32_el2", CPENC(2,4,C0, C7, 0), 0 },
3886 { "dbgbvr0_el1", CPENC(2,0,C0, C0, 4), 0 },
3887 { "dbgbvr1_el1", CPENC(2,0,C0, C1, 4), 0 },
3888 { "dbgbvr2_el1", CPENC(2,0,C0, C2, 4), 0 },
3889 { "dbgbvr3_el1", CPENC(2,0,C0, C3, 4), 0 },
3890 { "dbgbvr4_el1", CPENC(2,0,C0, C4, 4), 0 },
3891 { "dbgbvr5_el1", CPENC(2,0,C0, C5, 4), 0 },
3892 { "dbgbvr6_el1", CPENC(2,0,C0, C6, 4), 0 },
3893 { "dbgbvr7_el1", CPENC(2,0,C0, C7, 4), 0 },
3894 { "dbgbvr8_el1", CPENC(2,0,C0, C8, 4), 0 },
3895 { "dbgbvr9_el1", CPENC(2,0,C0, C9, 4), 0 },
3896 { "dbgbvr10_el1", CPENC(2,0,C0, C10,4), 0 },
3897 { "dbgbvr11_el1", CPENC(2,0,C0, C11,4), 0 },
3898 { "dbgbvr12_el1", CPENC(2,0,C0, C12,4), 0 },
3899 { "dbgbvr13_el1", CPENC(2,0,C0, C13,4), 0 },
3900 { "dbgbvr14_el1", CPENC(2,0,C0, C14,4), 0 },
3901 { "dbgbvr15_el1", CPENC(2,0,C0, C15,4), 0 },
3902 { "dbgbcr0_el1", CPENC(2,0,C0, C0, 5), 0 },
3903 { "dbgbcr1_el1", CPENC(2,0,C0, C1, 5), 0 },
3904 { "dbgbcr2_el1", CPENC(2,0,C0, C2, 5), 0 },
3905 { "dbgbcr3_el1", CPENC(2,0,C0, C3, 5), 0 },
3906 { "dbgbcr4_el1", CPENC(2,0,C0, C4, 5), 0 },
3907 { "dbgbcr5_el1", CPENC(2,0,C0, C5, 5), 0 },
3908 { "dbgbcr6_el1", CPENC(2,0,C0, C6, 5), 0 },
3909 { "dbgbcr7_el1", CPENC(2,0,C0, C7, 5), 0 },
3910 { "dbgbcr8_el1", CPENC(2,0,C0, C8, 5), 0 },
3911 { "dbgbcr9_el1", CPENC(2,0,C0, C9, 5), 0 },
3912 { "dbgbcr10_el1", CPENC(2,0,C0, C10,5), 0 },
3913 { "dbgbcr11_el1", CPENC(2,0,C0, C11,5), 0 },
3914 { "dbgbcr12_el1", CPENC(2,0,C0, C12,5), 0 },
3915 { "dbgbcr13_el1", CPENC(2,0,C0, C13,5), 0 },
3916 { "dbgbcr14_el1", CPENC(2,0,C0, C14,5), 0 },
3917 { "dbgbcr15_el1", CPENC(2,0,C0, C15,5), 0 },
3918 { "dbgwvr0_el1", CPENC(2,0,C0, C0, 6), 0 },
3919 { "dbgwvr1_el1", CPENC(2,0,C0, C1, 6), 0 },
3920 { "dbgwvr2_el1", CPENC(2,0,C0, C2, 6), 0 },
3921 { "dbgwvr3_el1", CPENC(2,0,C0, C3, 6), 0 },
3922 { "dbgwvr4_el1", CPENC(2,0,C0, C4, 6), 0 },
3923 { "dbgwvr5_el1", CPENC(2,0,C0, C5, 6), 0 },
3924 { "dbgwvr6_el1", CPENC(2,0,C0, C6, 6), 0 },
3925 { "dbgwvr7_el1", CPENC(2,0,C0, C7, 6), 0 },
3926 { "dbgwvr8_el1", CPENC(2,0,C0, C8, 6), 0 },
3927 { "dbgwvr9_el1", CPENC(2,0,C0, C9, 6), 0 },
3928 { "dbgwvr10_el1", CPENC(2,0,C0, C10,6), 0 },
3929 { "dbgwvr11_el1", CPENC(2,0,C0, C11,6), 0 },
3930 { "dbgwvr12_el1", CPENC(2,0,C0, C12,6), 0 },
3931 { "dbgwvr13_el1", CPENC(2,0,C0, C13,6), 0 },
3932 { "dbgwvr14_el1", CPENC(2,0,C0, C14,6), 0 },
3933 { "dbgwvr15_el1", CPENC(2,0,C0, C15,6), 0 },
3934 { "dbgwcr0_el1", CPENC(2,0,C0, C0, 7), 0 },
3935 { "dbgwcr1_el1", CPENC(2,0,C0, C1, 7), 0 },
3936 { "dbgwcr2_el1", CPENC(2,0,C0, C2, 7), 0 },
3937 { "dbgwcr3_el1", CPENC(2,0,C0, C3, 7), 0 },
3938 { "dbgwcr4_el1", CPENC(2,0,C0, C4, 7), 0 },
3939 { "dbgwcr5_el1", CPENC(2,0,C0, C5, 7), 0 },
3940 { "dbgwcr6_el1", CPENC(2,0,C0, C6, 7), 0 },
3941 { "dbgwcr7_el1", CPENC(2,0,C0, C7, 7), 0 },
3942 { "dbgwcr8_el1", CPENC(2,0,C0, C8, 7), 0 },
3943 { "dbgwcr9_el1", CPENC(2,0,C0, C9, 7), 0 },
3944 { "dbgwcr10_el1", CPENC(2,0,C0, C10,7), 0 },
3945 { "dbgwcr11_el1", CPENC(2,0,C0, C11,7), 0 },
3946 { "dbgwcr12_el1", CPENC(2,0,C0, C12,7), 0 },
3947 { "dbgwcr13_el1", CPENC(2,0,C0, C13,7), 0 },
3948 { "dbgwcr14_el1", CPENC(2,0,C0, C14,7), 0 },
3949 { "dbgwcr15_el1", CPENC(2,0,C0, C15,7), 0 },
3950 { "mdrar_el1", CPENC(2,0,C1, C0, 0), F_REG_READ }, /* r */
3951 { "oslar_el1", CPENC(2,0,C1, C0, 4), F_REG_WRITE }, /* w */
3952 { "oslsr_el1", CPENC(2,0,C1, C1, 4), F_REG_READ }, /* r */
3953 { "osdlr_el1", CPENC(2,0,C1, C3, 4), 0 },
3954 { "dbgprcr_el1", CPENC(2,0,C1, C4, 4), 0 },
3955 { "dbgclaimset_el1", CPENC(2,0,C7, C8, 6), 0 },
3956 { "dbgclaimclr_el1", CPENC(2,0,C7, C9, 6), 0 },
3957 { "dbgauthstatus_el1", CPENC(2,0,C7, C14,6), F_REG_READ }, /* r */
3958 { "pmblimitr_el1", CPENC (3, 0, C9, C10, 0), F_ARCHEXT }, /* rw */
3959 { "pmbptr_el1", CPENC (3, 0, C9, C10, 1), F_ARCHEXT }, /* rw */
3960 { "pmbsr_el1", CPENC (3, 0, C9, C10, 3), F_ARCHEXT }, /* rw */
3961 { "pmbidr_el1", CPENC (3, 0, C9, C10, 7), F_ARCHEXT | F_REG_READ }, /* ro */
3962 { "pmscr_el1", CPENC (3, 0, C9, C9, 0), F_ARCHEXT }, /* rw */
3963 { "pmsicr_el1", CPENC (3, 0, C9, C9, 2), F_ARCHEXT }, /* rw */
3964 { "pmsirr_el1", CPENC (3, 0, C9, C9, 3), F_ARCHEXT }, /* rw */
3965 { "pmsfcr_el1", CPENC (3, 0, C9, C9, 4), F_ARCHEXT }, /* rw */
3966 { "pmsevfr_el1", CPENC (3, 0, C9, C9, 5), F_ARCHEXT }, /* rw */
3967 { "pmslatfr_el1", CPENC (3, 0, C9, C9, 6), F_ARCHEXT }, /* rw */
3968 { "pmsidr_el1", CPENC (3, 0, C9, C9, 7), F_ARCHEXT | F_REG_READ }, /* ro */
3969 { "pmscr_el2", CPENC (3, 4, C9, C9, 0), F_ARCHEXT }, /* rw */
3970 { "pmscr_el12", CPENC (3, 5, C9, C9, 0), F_ARCHEXT }, /* rw */
3971 { "pmcr_el0", CPENC(3,3,C9,C12, 0), 0 },
3972 { "pmcntenset_el0", CPENC(3,3,C9,C12, 1), 0 },
3973 { "pmcntenclr_el0", CPENC(3,3,C9,C12, 2), 0 },
3974 { "pmovsclr_el0", CPENC(3,3,C9,C12, 3), 0 },
3975 { "pmswinc_el0", CPENC(3,3,C9,C12, 4), F_REG_WRITE }, /* w */
3976 { "pmselr_el0", CPENC(3,3,C9,C12, 5), 0 },
3977 { "pmceid0_el0", CPENC(3,3,C9,C12, 6), F_REG_READ }, /* r */
3978 { "pmceid1_el0", CPENC(3,3,C9,C12, 7), F_REG_READ }, /* r */
3979 { "pmccntr_el0", CPENC(3,3,C9,C13, 0), 0 },
3980 { "pmxevtyper_el0", CPENC(3,3,C9,C13, 1), 0 },
3981 { "pmxevcntr_el0", CPENC(3,3,C9,C13, 2), 0 },
3982 { "pmuserenr_el0", CPENC(3,3,C9,C14, 0), 0 },
3983 { "pmintenset_el1", CPENC(3,0,C9,C14, 1), 0 },
3984 { "pmintenclr_el1", CPENC(3,0,C9,C14, 2), 0 },
3985 { "pmovsset_el0", CPENC(3,3,C9,C14, 3), 0 },
3986 { "pmevcntr0_el0", CPENC(3,3,C14,C8, 0), 0 },
3987 { "pmevcntr1_el0", CPENC(3,3,C14,C8, 1), 0 },
3988 { "pmevcntr2_el0", CPENC(3,3,C14,C8, 2), 0 },
3989 { "pmevcntr3_el0", CPENC(3,3,C14,C8, 3), 0 },
3990 { "pmevcntr4_el0", CPENC(3,3,C14,C8, 4), 0 },
3991 { "pmevcntr5_el0", CPENC(3,3,C14,C8, 5), 0 },
3992 { "pmevcntr6_el0", CPENC(3,3,C14,C8, 6), 0 },
3993 { "pmevcntr7_el0", CPENC(3,3,C14,C8, 7), 0 },
3994 { "pmevcntr8_el0", CPENC(3,3,C14,C9, 0), 0 },
3995 { "pmevcntr9_el0", CPENC(3,3,C14,C9, 1), 0 },
3996 { "pmevcntr10_el0", CPENC(3,3,C14,C9, 2), 0 },
3997 { "pmevcntr11_el0", CPENC(3,3,C14,C9, 3), 0 },
3998 { "pmevcntr12_el0", CPENC(3,3,C14,C9, 4), 0 },
3999 { "pmevcntr13_el0", CPENC(3,3,C14,C9, 5), 0 },
4000 { "pmevcntr14_el0", CPENC(3,3,C14,C9, 6), 0 },
4001 { "pmevcntr15_el0", CPENC(3,3,C14,C9, 7), 0 },
4002 { "pmevcntr16_el0", CPENC(3,3,C14,C10,0), 0 },
4003 { "pmevcntr17_el0", CPENC(3,3,C14,C10,1), 0 },
4004 { "pmevcntr18_el0", CPENC(3,3,C14,C10,2), 0 },
4005 { "pmevcntr19_el0", CPENC(3,3,C14,C10,3), 0 },
4006 { "pmevcntr20_el0", CPENC(3,3,C14,C10,4), 0 },
4007 { "pmevcntr21_el0", CPENC(3,3,C14,C10,5), 0 },
4008 { "pmevcntr22_el0", CPENC(3,3,C14,C10,6), 0 },
4009 { "pmevcntr23_el0", CPENC(3,3,C14,C10,7), 0 },
4010 { "pmevcntr24_el0", CPENC(3,3,C14,C11,0), 0 },
4011 { "pmevcntr25_el0", CPENC(3,3,C14,C11,1), 0 },
4012 { "pmevcntr26_el0", CPENC(3,3,C14,C11,2), 0 },
4013 { "pmevcntr27_el0", CPENC(3,3,C14,C11,3), 0 },
4014 { "pmevcntr28_el0", CPENC(3,3,C14,C11,4), 0 },
4015 { "pmevcntr29_el0", CPENC(3,3,C14,C11,5), 0 },
4016 { "pmevcntr30_el0", CPENC(3,3,C14,C11,6), 0 },
4017 { "pmevtyper0_el0", CPENC(3,3,C14,C12,0), 0 },
4018 { "pmevtyper1_el0", CPENC(3,3,C14,C12,1), 0 },
4019 { "pmevtyper2_el0", CPENC(3,3,C14,C12,2), 0 },
4020 { "pmevtyper3_el0", CPENC(3,3,C14,C12,3), 0 },
4021 { "pmevtyper4_el0", CPENC(3,3,C14,C12,4), 0 },
4022 { "pmevtyper5_el0", CPENC(3,3,C14,C12,5), 0 },
4023 { "pmevtyper6_el0", CPENC(3,3,C14,C12,6), 0 },
4024 { "pmevtyper7_el0", CPENC(3,3,C14,C12,7), 0 },
4025 { "pmevtyper8_el0", CPENC(3,3,C14,C13,0), 0 },
4026 { "pmevtyper9_el0", CPENC(3,3,C14,C13,1), 0 },
4027 { "pmevtyper10_el0", CPENC(3,3,C14,C13,2), 0 },
4028 { "pmevtyper11_el0", CPENC(3,3,C14,C13,3), 0 },
4029 { "pmevtyper12_el0", CPENC(3,3,C14,C13,4), 0 },
4030 { "pmevtyper13_el0", CPENC(3,3,C14,C13,5), 0 },
4031 { "pmevtyper14_el0", CPENC(3,3,C14,C13,6), 0 },
4032 { "pmevtyper15_el0", CPENC(3,3,C14,C13,7), 0 },
4033 { "pmevtyper16_el0", CPENC(3,3,C14,C14,0), 0 },
4034 { "pmevtyper17_el0", CPENC(3,3,C14,C14,1), 0 },
4035 { "pmevtyper18_el0", CPENC(3,3,C14,C14,2), 0 },
4036 { "pmevtyper19_el0", CPENC(3,3,C14,C14,3), 0 },
4037 { "pmevtyper20_el0", CPENC(3,3,C14,C14,4), 0 },
4038 { "pmevtyper21_el0", CPENC(3,3,C14,C14,5), 0 },
4039 { "pmevtyper22_el0", CPENC(3,3,C14,C14,6), 0 },
4040 { "pmevtyper23_el0", CPENC(3,3,C14,C14,7), 0 },
4041 { "pmevtyper24_el0", CPENC(3,3,C14,C15,0), 0 },
4042 { "pmevtyper25_el0", CPENC(3,3,C14,C15,1), 0 },
4043 { "pmevtyper26_el0", CPENC(3,3,C14,C15,2), 0 },
4044 { "pmevtyper27_el0", CPENC(3,3,C14,C15,3), 0 },
4045 { "pmevtyper28_el0", CPENC(3,3,C14,C15,4), 0 },
4046 { "pmevtyper29_el0", CPENC(3,3,C14,C15,5), 0 },
4047 { "pmevtyper30_el0", CPENC(3,3,C14,C15,6), 0 },
4048 { "pmccfiltr_el0", CPENC(3,3,C14,C15,7), 0 },
4050 { "dit", CPEN_ (3, C2, 5), F_ARCHEXT },
4051 { "vstcr_el2", CPENC(3, 4, C2, C6, 2), F_ARCHEXT },
4052 { "vsttbr_el2", CPENC(3, 4, C2, C6, 0), F_ARCHEXT },
4053 { "cnthvs_tval_el2", CPENC(3, 4, C14, C4, 0), F_ARCHEXT },
4054 { "cnthvs_cval_el2", CPENC(3, 4, C14, C4, 2), F_ARCHEXT },
4055 { "cnthvs_ctl_el2", CPENC(3, 4, C14, C4, 1), F_ARCHEXT },
4056 { "cnthps_tval_el2", CPENC(3, 4, C14, C5, 0), F_ARCHEXT },
4057 { "cnthps_cval_el2", CPENC(3, 4, C14, C5, 2), F_ARCHEXT },
4058 { "cnthps_ctl_el2", CPENC(3, 4, C14, C5, 1), F_ARCHEXT },
4059 { "sder32_el2", CPENC(3, 4, C1, C3, 1), F_ARCHEXT },
4060 { "vncr_el2", CPENC(3, 4, C2, C2, 0), F_ARCHEXT },
4061 { 0, CPENC(0,0,0,0,0), 0 },
4065 aarch64_sys_reg_deprecated_p (const aarch64_sys_reg *reg)
4067 return (reg->flags & F_DEPRECATED) != 0;
4071 aarch64_sys_reg_supported_p (const aarch64_feature_set features,
4072 const aarch64_sys_reg *reg)
4074 if (!(reg->flags & F_ARCHEXT))
4077 /* PAN. Values are from aarch64_sys_regs. */
4078 if (reg->value == CPEN_(0,C2,3)
4079 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_PAN))
4082 /* Virtualization host extensions: system registers. */
4083 if ((reg->value == CPENC (3, 4, C2, C0, 1)
4084 || reg->value == CPENC (3, 4, C13, C0, 1)
4085 || reg->value == CPENC (3, 4, C14, C3, 0)
4086 || reg->value == CPENC (3, 4, C14, C3, 1)
4087 || reg->value == CPENC (3, 4, C14, C3, 2))
4088 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_1))
4091 /* Virtualization host extensions: *_el12 names of *_el1 registers. */
4092 if ((reg->value == CPEN_ (5, C0, 0)
4093 || reg->value == CPEN_ (5, C0, 1)
4094 || reg->value == CPENC (3, 5, C1, C0, 0)
4095 || reg->value == CPENC (3, 5, C1, C0, 2)
4096 || reg->value == CPENC (3, 5, C2, C0, 0)
4097 || reg->value == CPENC (3, 5, C2, C0, 1)
4098 || reg->value == CPENC (3, 5, C2, C0, 2)
4099 || reg->value == CPENC (3, 5, C5, C1, 0)
4100 || reg->value == CPENC (3, 5, C5, C1, 1)
4101 || reg->value == CPENC (3, 5, C5, C2, 0)
4102 || reg->value == CPENC (3, 5, C6, C0, 0)
4103 || reg->value == CPENC (3, 5, C10, C2, 0)
4104 || reg->value == CPENC (3, 5, C10, C3, 0)
4105 || reg->value == CPENC (3, 5, C12, C0, 0)
4106 || reg->value == CPENC (3, 5, C13, C0, 1)
4107 || reg->value == CPENC (3, 5, C14, C1, 0))
4108 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_1))
4111 /* Virtualization host extensions: *_el02 names of *_el0 registers. */
4112 if ((reg->value == CPENC (3, 5, C14, C2, 0)
4113 || reg->value == CPENC (3, 5, C14, C2, 1)
4114 || reg->value == CPENC (3, 5, C14, C2, 2)
4115 || reg->value == CPENC (3, 5, C14, C3, 0)
4116 || reg->value == CPENC (3, 5, C14, C3, 1)
4117 || reg->value == CPENC (3, 5, C14, C3, 2))
4118 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_1))
4121 /* ARMv8.2 features. */
4123 /* ID_AA64MMFR2_EL1. */
4124 if (reg->value == CPENC (3, 0, C0, C7, 2)
4125 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_2))
4129 if (reg->value == CPEN_ (0, C2, 4)
4130 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_2))
4133 /* RAS extension. */
4135 /* ERRIDR_EL1, ERRSELR_EL1, ERXFR_EL1, ERXCTLR_EL1, ERXSTATUS_EL, ERXADDR_EL1,
4136 ERXMISC0_EL1 AND ERXMISC1_EL1. */
4137 if ((reg->value == CPENC (3, 0, C5, C3, 0)
4138 || reg->value == CPENC (3, 0, C5, C3, 1)
4139 || reg->value == CPENC (3, 0, C5, C3, 2)
4140 || reg->value == CPENC (3, 0, C5, C3, 3)
4141 || reg->value == CPENC (3, 0, C5, C4, 0)
4142 || reg->value == CPENC (3, 0, C5, C4, 1)
4143 || reg->value == CPENC (3, 0, C5, C4, 2)
4144 || reg->value == CPENC (3, 0, C5, C4, 3)
4145 || reg->value == CPENC (3, 0, C5, C5, 0)
4146 || reg->value == CPENC (3, 0, C5, C5, 1))
4147 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_RAS))
4150 /* VSESR_EL2, DISR_EL1 and VDISR_EL2. */
4151 if ((reg->value == CPENC (3, 4, C5, C2, 3)
4152 || reg->value == CPENC (3, 0, C12, C1, 1)
4153 || reg->value == CPENC (3, 4, C12, C1, 1))
4154 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_RAS))
4157 /* Statistical Profiling extension. */
4158 if ((reg->value == CPENC (3, 0, C9, C10, 0)
4159 || reg->value == CPENC (3, 0, C9, C10, 1)
4160 || reg->value == CPENC (3, 0, C9, C10, 3)
4161 || reg->value == CPENC (3, 0, C9, C10, 7)
4162 || reg->value == CPENC (3, 0, C9, C9, 0)
4163 || reg->value == CPENC (3, 0, C9, C9, 2)
4164 || reg->value == CPENC (3, 0, C9, C9, 3)
4165 || reg->value == CPENC (3, 0, C9, C9, 4)
4166 || reg->value == CPENC (3, 0, C9, C9, 5)
4167 || reg->value == CPENC (3, 0, C9, C9, 6)
4168 || reg->value == CPENC (3, 0, C9, C9, 7)
4169 || reg->value == CPENC (3, 4, C9, C9, 0)
4170 || reg->value == CPENC (3, 5, C9, C9, 0))
4171 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_PROFILE))
4174 /* ARMv8.3 Pointer authentication keys. */
4175 if ((reg->value == CPENC (3, 0, C2, C1, 0)
4176 || reg->value == CPENC (3, 0, C2, C1, 1)
4177 || reg->value == CPENC (3, 0, C2, C1, 2)
4178 || reg->value == CPENC (3, 0, C2, C1, 3)
4179 || reg->value == CPENC (3, 0, C2, C2, 0)
4180 || reg->value == CPENC (3, 0, C2, C2, 1)
4181 || reg->value == CPENC (3, 0, C2, C2, 2)
4182 || reg->value == CPENC (3, 0, C2, C2, 3)
4183 || reg->value == CPENC (3, 0, C2, C3, 0)
4184 || reg->value == CPENC (3, 0, C2, C3, 1))
4185 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_3))
4189 if ((reg->value == CPENC (3, 0, C0, C4, 4)
4190 || reg->value == CPENC (3, 0, C1, C2, 0)
4191 || reg->value == CPENC (3, 4, C1, C2, 0)
4192 || reg->value == CPENC (3, 6, C1, C2, 0)
4193 || reg->value == CPENC (3, 5, C1, C2, 0)
4194 || reg->value == CPENC (3, 0, C0, C0, 7))
4195 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_SVE))
4198 /* ARMv8.4 features. */
4201 if (reg->value == CPEN_ (3, C2, 5)
4202 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_4))
4205 /* Virtualization extensions. */
4206 if ((reg->value == CPENC(3, 4, C2, C6, 2)
4207 || reg->value == CPENC(3, 4, C2, C6, 0)
4208 || reg->value == CPENC(3, 4, C14, C4, 0)
4209 || reg->value == CPENC(3, 4, C14, C4, 2)
4210 || reg->value == CPENC(3, 4, C14, C4, 1)
4211 || reg->value == CPENC(3, 4, C14, C5, 0)
4212 || reg->value == CPENC(3, 4, C14, C5, 2)
4213 || reg->value == CPENC(3, 4, C14, C5, 1)
4214 || reg->value == CPENC(3, 4, C1, C3, 1)
4215 || reg->value == CPENC(3, 4, C2, C2, 0))
4216 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_4))
4219 /* ARMv8.4 TLB instructions. */
4220 if ((reg->value == CPENS (0, C8, C1, 0)
4221 || reg->value == CPENS (0, C8, C1, 1)
4222 || reg->value == CPENS (0, C8, C1, 2)
4223 || reg->value == CPENS (0, C8, C1, 3)
4224 || reg->value == CPENS (0, C8, C1, 5)
4225 || reg->value == CPENS (0, C8, C1, 7)
4226 || reg->value == CPENS (4, C8, C4, 0)
4227 || reg->value == CPENS (4, C8, C4, 4)
4228 || reg->value == CPENS (4, C8, C1, 1)
4229 || reg->value == CPENS (4, C8, C1, 5)
4230 || reg->value == CPENS (4, C8, C1, 6)
4231 || reg->value == CPENS (6, C8, C1, 1)
4232 || reg->value == CPENS (6, C8, C1, 5)
4233 || reg->value == CPENS (4, C8, C1, 0)
4234 || reg->value == CPENS (4, C8, C1, 4)
4235 || reg->value == CPENS (6, C8, C1, 0)
4236 || reg->value == CPENS (0, C8, C6, 1)
4237 || reg->value == CPENS (0, C8, C6, 3)
4238 || reg->value == CPENS (0, C8, C6, 5)
4239 || reg->value == CPENS (0, C8, C6, 7)
4240 || reg->value == CPENS (0, C8, C2, 1)
4241 || reg->value == CPENS (0, C8, C2, 3)
4242 || reg->value == CPENS (0, C8, C2, 5)
4243 || reg->value == CPENS (0, C8, C2, 7)
4244 || reg->value == CPENS (0, C8, C5, 1)
4245 || reg->value == CPENS (0, C8, C5, 3)
4246 || reg->value == CPENS (0, C8, C5, 5)
4247 || reg->value == CPENS (0, C8, C5, 7)
4248 || reg->value == CPENS (4, C8, C0, 2)
4249 || reg->value == CPENS (4, C8, C0, 6)
4250 || reg->value == CPENS (4, C8, C4, 2)
4251 || reg->value == CPENS (4, C8, C4, 6)
4252 || reg->value == CPENS (4, C8, C4, 3)
4253 || reg->value == CPENS (4, C8, C4, 7)
4254 || reg->value == CPENS (4, C8, C6, 1)
4255 || reg->value == CPENS (4, C8, C6, 5)
4256 || reg->value == CPENS (4, C8, C2, 1)
4257 || reg->value == CPENS (4, C8, C2, 5)
4258 || reg->value == CPENS (4, C8, C5, 1)
4259 || reg->value == CPENS (4, C8, C5, 5)
4260 || reg->value == CPENS (6, C8, C6, 1)
4261 || reg->value == CPENS (6, C8, C6, 5)
4262 || reg->value == CPENS (6, C8, C2, 1)
4263 || reg->value == CPENS (6, C8, C2, 5)
4264 || reg->value == CPENS (6, C8, C5, 1)
4265 || reg->value == CPENS (6, C8, C5, 5))
4266 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_4))
4272 /* The CPENC below is fairly misleading, the fields
4273 here are not in CPENC form. They are in op2op1 form. The fields are encoded
4274 by ins_pstatefield, which just shifts the value by the width of the fields
4275 in a loop. So if you CPENC them only the first value will be set, the rest
4276 are masked out to 0. As an example. op2 = 3, op1=2. CPENC would produce a
4277 value of 0b110000000001000000 (0x30040) while what you want is
4279 const aarch64_sys_reg aarch64_pstatefields [] =
4281 { "spsel", 0x05, 0 },
4282 { "daifset", 0x1e, 0 },
4283 { "daifclr", 0x1f, 0 },
4284 { "pan", 0x04, F_ARCHEXT },
4285 { "uao", 0x03, F_ARCHEXT },
4286 { "dit", 0x1a, F_ARCHEXT },
4287 { 0, CPENC(0,0,0,0,0), 0 },
4291 aarch64_pstatefield_supported_p (const aarch64_feature_set features,
4292 const aarch64_sys_reg *reg)
4294 if (!(reg->flags & F_ARCHEXT))
4297 /* PAN. Values are from aarch64_pstatefields. */
4298 if (reg->value == 0x04
4299 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_PAN))
4302 /* UAO. Values are from aarch64_pstatefields. */
4303 if (reg->value == 0x03
4304 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_2))
4307 /* DIT. Values are from aarch64_pstatefields. */
4308 if (reg->value == 0x1a
4309 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_4))
4315 const aarch64_sys_ins_reg aarch64_sys_regs_ic[] =
4317 { "ialluis", CPENS(0,C7,C1,0), 0 },
4318 { "iallu", CPENS(0,C7,C5,0), 0 },
4319 { "ivau", CPENS (3, C7, C5, 1), F_HASXT },
4320 { 0, CPENS(0,0,0,0), 0 }
4323 const aarch64_sys_ins_reg aarch64_sys_regs_dc[] =
4325 { "zva", CPENS (3, C7, C4, 1), F_HASXT },
4326 { "ivac", CPENS (0, C7, C6, 1), F_HASXT },
4327 { "isw", CPENS (0, C7, C6, 2), F_HASXT },
4328 { "cvac", CPENS (3, C7, C10, 1), F_HASXT },
4329 { "csw", CPENS (0, C7, C10, 2), F_HASXT },
4330 { "cvau", CPENS (3, C7, C11, 1), F_HASXT },
4331 { "cvap", CPENS (3, C7, C12, 1), F_HASXT | F_ARCHEXT },
4332 { "civac", CPENS (3, C7, C14, 1), F_HASXT },
4333 { "cisw", CPENS (0, C7, C14, 2), F_HASXT },
4334 { 0, CPENS(0,0,0,0), 0 }
4337 const aarch64_sys_ins_reg aarch64_sys_regs_at[] =
4339 { "s1e1r", CPENS (0, C7, C8, 0), F_HASXT },
4340 { "s1e1w", CPENS (0, C7, C8, 1), F_HASXT },
4341 { "s1e0r", CPENS (0, C7, C8, 2), F_HASXT },
4342 { "s1e0w", CPENS (0, C7, C8, 3), F_HASXT },
4343 { "s12e1r", CPENS (4, C7, C8, 4), F_HASXT },
4344 { "s12e1w", CPENS (4, C7, C8, 5), F_HASXT },
4345 { "s12e0r", CPENS (4, C7, C8, 6), F_HASXT },
4346 { "s12e0w", CPENS (4, C7, C8, 7), F_HASXT },
4347 { "s1e2r", CPENS (4, C7, C8, 0), F_HASXT },
4348 { "s1e2w", CPENS (4, C7, C8, 1), F_HASXT },
4349 { "s1e3r", CPENS (6, C7, C8, 0), F_HASXT },
4350 { "s1e3w", CPENS (6, C7, C8, 1), F_HASXT },
4351 { "s1e1rp", CPENS (0, C7, C9, 0), F_HASXT | F_ARCHEXT },
4352 { "s1e1wp", CPENS (0, C7, C9, 1), F_HASXT | F_ARCHEXT },
4353 { 0, CPENS(0,0,0,0), 0 }
4356 const aarch64_sys_ins_reg aarch64_sys_regs_tlbi[] =
4358 { "vmalle1", CPENS(0,C8,C7,0), 0 },
4359 { "vae1", CPENS (0, C8, C7, 1), F_HASXT },
4360 { "aside1", CPENS (0, C8, C7, 2), F_HASXT },
4361 { "vaae1", CPENS (0, C8, C7, 3), F_HASXT },
4362 { "vmalle1is", CPENS(0,C8,C3,0), 0 },
4363 { "vae1is", CPENS (0, C8, C3, 1), F_HASXT },
4364 { "aside1is", CPENS (0, C8, C3, 2), F_HASXT },
4365 { "vaae1is", CPENS (0, C8, C3, 3), F_HASXT },
4366 { "ipas2e1is", CPENS (4, C8, C0, 1), F_HASXT },
4367 { "ipas2le1is",CPENS (4, C8, C0, 5), F_HASXT },
4368 { "ipas2e1", CPENS (4, C8, C4, 1), F_HASXT },
4369 { "ipas2le1", CPENS (4, C8, C4, 5), F_HASXT },
4370 { "vae2", CPENS (4, C8, C7, 1), F_HASXT },
4371 { "vae2is", CPENS (4, C8, C3, 1), F_HASXT },
4372 { "vmalls12e1",CPENS(4,C8,C7,6), 0 },
4373 { "vmalls12e1is",CPENS(4,C8,C3,6), 0 },
4374 { "vae3", CPENS (6, C8, C7, 1), F_HASXT },
4375 { "vae3is", CPENS (6, C8, C3, 1), F_HASXT },
4376 { "alle2", CPENS(4,C8,C7,0), 0 },
4377 { "alle2is", CPENS(4,C8,C3,0), 0 },
4378 { "alle1", CPENS(4,C8,C7,4), 0 },
4379 { "alle1is", CPENS(4,C8,C3,4), 0 },
4380 { "alle3", CPENS(6,C8,C7,0), 0 },
4381 { "alle3is", CPENS(6,C8,C3,0), 0 },
4382 { "vale1is", CPENS (0, C8, C3, 5), F_HASXT },
4383 { "vale2is", CPENS (4, C8, C3, 5), F_HASXT },
4384 { "vale3is", CPENS (6, C8, C3, 5), F_HASXT },
4385 { "vaale1is", CPENS (0, C8, C3, 7), F_HASXT },
4386 { "vale1", CPENS (0, C8, C7, 5), F_HASXT },
4387 { "vale2", CPENS (4, C8, C7, 5), F_HASXT },
4388 { "vale3", CPENS (6, C8, C7, 5), F_HASXT },
4389 { "vaale1", CPENS (0, C8, C7, 7), F_HASXT },
4391 { "vmalle1os", CPENS (0, C8, C1, 0), F_ARCHEXT },
4392 { "vae1os", CPENS (0, C8, C1, 1), F_HASXT | F_ARCHEXT },
4393 { "aside1os", CPENS (0, C8, C1, 2), F_HASXT | F_ARCHEXT },
4394 { "vaae1os", CPENS (0, C8, C1, 3), F_HASXT | F_ARCHEXT },
4395 { "vale1os", CPENS (0, C8, C1, 5), F_HASXT | F_ARCHEXT },
4396 { "vaale1os", CPENS (0, C8, C1, 7), F_HASXT | F_ARCHEXT },
4397 { "ipas2e1os", CPENS (4, C8, C4, 0), F_HASXT | F_ARCHEXT },
4398 { "ipas2le1os", CPENS (4, C8, C4, 4), F_HASXT | F_ARCHEXT },
4399 { "vae2os", CPENS (4, C8, C1, 1), F_HASXT | F_ARCHEXT },
4400 { "vale2os", CPENS (4, C8, C1, 5), F_HASXT | F_ARCHEXT },
4401 { "vmalls12e1os", CPENS (4, C8, C1, 6), F_ARCHEXT },
4402 { "vae3os", CPENS (6, C8, C1, 1), F_HASXT | F_ARCHEXT },
4403 { "vale3os", CPENS (6, C8, C1, 5), F_HASXT | F_ARCHEXT },
4404 { "alle2os", CPENS (4, C8, C1, 0), F_ARCHEXT },
4405 { "alle1os", CPENS (4, C8, C1, 4), F_ARCHEXT },
4406 { "alle3os", CPENS (6, C8, C1, 0), F_ARCHEXT },
4408 { "rvae1", CPENS (0, C8, C6, 1), F_HASXT | F_ARCHEXT },
4409 { "rvaae1", CPENS (0, C8, C6, 3), F_HASXT | F_ARCHEXT },
4410 { "rvale1", CPENS (0, C8, C6, 5), F_HASXT | F_ARCHEXT },
4411 { "rvaale1", CPENS (0, C8, C6, 7), F_HASXT | F_ARCHEXT },
4412 { "rvae1is", CPENS (0, C8, C2, 1), F_HASXT | F_ARCHEXT },
4413 { "rvaae1is", CPENS (0, C8, C2, 3), F_HASXT | F_ARCHEXT },
4414 { "rvale1is", CPENS (0, C8, C2, 5), F_HASXT | F_ARCHEXT },
4415 { "rvaale1is", CPENS (0, C8, C2, 7), F_HASXT | F_ARCHEXT },
4416 { "rvae1os", CPENS (0, C8, C5, 1), F_HASXT | F_ARCHEXT },
4417 { "rvaae1os", CPENS (0, C8, C5, 3), F_HASXT | F_ARCHEXT },
4418 { "rvale1os", CPENS (0, C8, C5, 5), F_HASXT | F_ARCHEXT },
4419 { "rvaale1os", CPENS (0, C8, C5, 7), F_HASXT | F_ARCHEXT },
4420 { "ripas2e1is", CPENS (4, C8, C0, 2), F_HASXT | F_ARCHEXT },
4421 { "ripas2le1is",CPENS (4, C8, C0, 6), F_HASXT | F_ARCHEXT },
4422 { "ripas2e1", CPENS (4, C8, C4, 2), F_HASXT | F_ARCHEXT },
4423 { "ripas2le1", CPENS (4, C8, C4, 6), F_HASXT | F_ARCHEXT },
4424 { "ripas2e1os", CPENS (4, C8, C4, 3), F_HASXT | F_ARCHEXT },
4425 { "ripas2le1os",CPENS (4, C8, C4, 7), F_HASXT | F_ARCHEXT },
4426 { "rvae2", CPENS (4, C8, C6, 1), F_HASXT | F_ARCHEXT },
4427 { "rvale2", CPENS (4, C8, C6, 5), F_HASXT | F_ARCHEXT },
4428 { "rvae2is", CPENS (4, C8, C2, 1), F_HASXT | F_ARCHEXT },
4429 { "rvale2is", CPENS (4, C8, C2, 5), F_HASXT | F_ARCHEXT },
4430 { "rvae2os", CPENS (4, C8, C5, 1), F_HASXT | F_ARCHEXT },
4431 { "rvale2os", CPENS (4, C8, C5, 5), F_HASXT | F_ARCHEXT },
4432 { "rvae3", CPENS (6, C8, C6, 1), F_HASXT | F_ARCHEXT },
4433 { "rvale3", CPENS (6, C8, C6, 5), F_HASXT | F_ARCHEXT },
4434 { "rvae3is", CPENS (6, C8, C2, 1), F_HASXT | F_ARCHEXT },
4435 { "rvale3is", CPENS (6, C8, C2, 5), F_HASXT | F_ARCHEXT },
4436 { "rvae3os", CPENS (6, C8, C5, 1), F_HASXT | F_ARCHEXT },
4437 { "rvale3os", CPENS (6, C8, C5, 5), F_HASXT | F_ARCHEXT },
4439 { 0, CPENS(0,0,0,0), 0 }
4443 aarch64_sys_ins_reg_has_xt (const aarch64_sys_ins_reg *sys_ins_reg)
4445 return (sys_ins_reg->flags & F_HASXT) != 0;
4449 aarch64_sys_ins_reg_supported_p (const aarch64_feature_set features,
4450 const aarch64_sys_ins_reg *reg)
4452 if (!(reg->flags & F_ARCHEXT))
4455 /* DC CVAP. Values are from aarch64_sys_regs_dc. */
4456 if (reg->value == CPENS (3, C7, C12, 1)
4457 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_2))
4460 /* AT S1E1RP, AT S1E1WP. Values are from aarch64_sys_regs_at. */
4461 if ((reg->value == CPENS (0, C7, C9, 0)
4462 || reg->value == CPENS (0, C7, C9, 1))
4463 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_2))
4486 #define BIT(INSN,BT) (((INSN) >> (BT)) & 1)
4487 #define BITS(INSN,HI,LO) (((INSN) >> (LO)) & ((1 << (((HI) - (LO)) + 1)) - 1))
4490 verify_ldpsw (const struct aarch64_opcode * opcode ATTRIBUTE_UNUSED,
4491 const aarch64_insn insn)
4493 int t = BITS (insn, 4, 0);
4494 int n = BITS (insn, 9, 5);
4495 int t2 = BITS (insn, 14, 10);
4499 /* Write back enabled. */
4500 if ((t == n || t2 == n) && n != 31)
4514 /* Return true if VALUE cannot be moved into an SVE register using DUP
4515 (with any element size, not just ESIZE) and if using DUPM would
4516 therefore be OK. ESIZE is the number of bytes in the immediate. */
4519 aarch64_sve_dupm_mov_immediate_p (uint64_t uvalue, int esize)
4521 int64_t svalue = uvalue;
4522 uint64_t upper = (uint64_t) -1 << (esize * 4) << (esize * 4);
4524 if ((uvalue & ~upper) != uvalue && (uvalue | upper) != uvalue)
4526 if (esize <= 4 || (uint32_t) uvalue == (uint32_t) (uvalue >> 32))
4528 svalue = (int32_t) uvalue;
4529 if (esize <= 2 || (uint16_t) uvalue == (uint16_t) (uvalue >> 16))
4531 svalue = (int16_t) uvalue;
4532 if (esize == 1 || (uint8_t) uvalue == (uint8_t) (uvalue >> 8))
4536 if ((svalue & 0xff) == 0)
4538 return svalue < -128 || svalue >= 128;
4541 /* Include the opcode description table as well as the operand description
4543 #define VERIFIER(x) verify_##x
4544 #include "aarch64-tbl.h"