1 /* aarch64-opc.c -- AArch64 opcode support.
2 Copyright (C) 2009-2018 Free Software Foundation, Inc.
3 Contributed by ARM Ltd.
5 This file is part of the GNU opcodes library.
7 This library is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
12 It is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program; see the file COPYING3. If not,
19 see <http://www.gnu.org/licenses/>. */
30 #include "libiberty.h"
32 #include "aarch64-opc.h"
35 int debug_dump = FALSE;
36 #endif /* DEBUG_AARCH64 */
38 /* The enumeration strings associated with each value of a 5-bit SVE
39 pattern operand. A null entry indicates a reserved meaning. */
40 const char *const aarch64_sve_pattern_array[32] = {
79 /* The enumeration strings associated with each value of a 4-bit SVE
80 prefetch operand. A null entry indicates a reserved meaning. */
81 const char *const aarch64_sve_prfop_array[16] = {
102 /* Helper functions to determine which operand to be used to encode/decode
103 the size:Q fields for AdvSIMD instructions. */
105 static inline bfd_boolean
106 vector_qualifier_p (enum aarch64_opnd_qualifier qualifier)
108 return ((qualifier >= AARCH64_OPND_QLF_V_8B
109 && qualifier <= AARCH64_OPND_QLF_V_1Q) ? TRUE
113 static inline bfd_boolean
114 fp_qualifier_p (enum aarch64_opnd_qualifier qualifier)
116 return ((qualifier >= AARCH64_OPND_QLF_S_B
117 && qualifier <= AARCH64_OPND_QLF_S_Q) ? TRUE
127 DP_VECTOR_ACROSS_LANES,
130 static const char significant_operand_index [] =
132 0, /* DP_UNKNOWN, by default using operand 0. */
133 0, /* DP_VECTOR_3SAME */
134 1, /* DP_VECTOR_LONG */
135 2, /* DP_VECTOR_WIDE */
136 1, /* DP_VECTOR_ACROSS_LANES */
139 /* Given a sequence of qualifiers in QUALIFIERS, determine and return
141 N.B. QUALIFIERS is a possible sequence of qualifiers each of which
142 corresponds to one of a sequence of operands. */
144 static enum data_pattern
145 get_data_pattern (const aarch64_opnd_qualifier_seq_t qualifiers)
147 if (vector_qualifier_p (qualifiers[0]) == TRUE)
149 /* e.g. v.4s, v.4s, v.4s
150 or v.4h, v.4h, v.h[3]. */
151 if (qualifiers[0] == qualifiers[1]
152 && vector_qualifier_p (qualifiers[2]) == TRUE
153 && (aarch64_get_qualifier_esize (qualifiers[0])
154 == aarch64_get_qualifier_esize (qualifiers[1]))
155 && (aarch64_get_qualifier_esize (qualifiers[0])
156 == aarch64_get_qualifier_esize (qualifiers[2])))
157 return DP_VECTOR_3SAME;
158 /* e.g. v.8h, v.8b, v.8b.
159 or v.4s, v.4h, v.h[2].
161 if (vector_qualifier_p (qualifiers[1]) == TRUE
162 && aarch64_get_qualifier_esize (qualifiers[0]) != 0
163 && (aarch64_get_qualifier_esize (qualifiers[0])
164 == aarch64_get_qualifier_esize (qualifiers[1]) << 1))
165 return DP_VECTOR_LONG;
166 /* e.g. v.8h, v.8h, v.8b. */
167 if (qualifiers[0] == qualifiers[1]
168 && vector_qualifier_p (qualifiers[2]) == TRUE
169 && aarch64_get_qualifier_esize (qualifiers[0]) != 0
170 && (aarch64_get_qualifier_esize (qualifiers[0])
171 == aarch64_get_qualifier_esize (qualifiers[2]) << 1)
172 && (aarch64_get_qualifier_esize (qualifiers[0])
173 == aarch64_get_qualifier_esize (qualifiers[1])))
174 return DP_VECTOR_WIDE;
176 else if (fp_qualifier_p (qualifiers[0]) == TRUE)
178 /* e.g. SADDLV <V><d>, <Vn>.<T>. */
179 if (vector_qualifier_p (qualifiers[1]) == TRUE
180 && qualifiers[2] == AARCH64_OPND_QLF_NIL)
181 return DP_VECTOR_ACROSS_LANES;
187 /* Select the operand to do the encoding/decoding of the 'size:Q' fields in
188 the AdvSIMD instructions. */
189 /* N.B. it is possible to do some optimization that doesn't call
190 get_data_pattern each time when we need to select an operand. We can
191 either buffer the caculated the result or statically generate the data,
192 however, it is not obvious that the optimization will bring significant
196 aarch64_select_operand_for_sizeq_field_coding (const aarch64_opcode *opcode)
199 significant_operand_index [get_data_pattern (opcode->qualifiers_list[0])];
202 const aarch64_field fields[] =
205 { 0, 4 }, /* cond2: condition in truly conditional-executed inst. */
206 { 0, 4 }, /* nzcv: flag bit specifier, encoded in the "nzcv" field. */
207 { 5, 5 }, /* defgh: d:e:f:g:h bits in AdvSIMD modified immediate. */
208 { 16, 3 }, /* abc: a:b:c bits in AdvSIMD modified immediate. */
209 { 5, 19 }, /* imm19: e.g. in CBZ. */
210 { 5, 19 }, /* immhi: e.g. in ADRP. */
211 { 29, 2 }, /* immlo: e.g. in ADRP. */
212 { 22, 2 }, /* size: in most AdvSIMD and floating-point instructions. */
213 { 10, 2 }, /* vldst_size: size field in the AdvSIMD load/store inst. */
214 { 29, 1 }, /* op: in AdvSIMD modified immediate instructions. */
215 { 30, 1 }, /* Q: in most AdvSIMD instructions. */
216 { 0, 5 }, /* Rt: in load/store instructions. */
217 { 0, 5 }, /* Rd: in many integer instructions. */
218 { 5, 5 }, /* Rn: in many integer instructions. */
219 { 10, 5 }, /* Rt2: in load/store pair instructions. */
220 { 10, 5 }, /* Ra: in fp instructions. */
221 { 5, 3 }, /* op2: in the system instructions. */
222 { 8, 4 }, /* CRm: in the system instructions. */
223 { 12, 4 }, /* CRn: in the system instructions. */
224 { 16, 3 }, /* op1: in the system instructions. */
225 { 19, 2 }, /* op0: in the system instructions. */
226 { 10, 3 }, /* imm3: in add/sub extended reg instructions. */
227 { 12, 4 }, /* cond: condition flags as a source operand. */
228 { 12, 4 }, /* opcode: in advsimd load/store instructions. */
229 { 12, 4 }, /* cmode: in advsimd modified immediate instructions. */
230 { 13, 3 }, /* asisdlso_opcode: opcode in advsimd ld/st single element. */
231 { 13, 2 }, /* len: in advsimd tbl/tbx instructions. */
232 { 16, 5 }, /* Rm: in ld/st reg offset and some integer inst. */
233 { 16, 5 }, /* Rs: in load/store exclusive instructions. */
234 { 13, 3 }, /* option: in ld/st reg offset + add/sub extended reg inst. */
235 { 12, 1 }, /* S: in load/store reg offset instructions. */
236 { 21, 2 }, /* hw: in move wide constant instructions. */
237 { 22, 2 }, /* opc: in load/store reg offset instructions. */
238 { 23, 1 }, /* opc1: in load/store reg offset instructions. */
239 { 22, 2 }, /* shift: in add/sub reg/imm shifted instructions. */
240 { 22, 2 }, /* type: floating point type field in fp data inst. */
241 { 30, 2 }, /* ldst_size: size field in ld/st reg offset inst. */
242 { 10, 6 }, /* imm6: in add/sub reg shifted instructions. */
243 { 15, 6 }, /* imm6_2: in rmif instructions. */
244 { 11, 4 }, /* imm4: in advsimd ext and advsimd ins instructions. */
245 { 0, 4 }, /* imm4_2: in rmif instructions. */
246 { 16, 5 }, /* imm5: in conditional compare (immediate) instructions. */
247 { 15, 7 }, /* imm7: in load/store pair pre/post index instructions. */
248 { 13, 8 }, /* imm8: in floating-point scalar move immediate inst. */
249 { 12, 9 }, /* imm9: in load/store pre/post index instructions. */
250 { 10, 12 }, /* imm12: in ld/st unsigned imm or add/sub shifted inst. */
251 { 5, 14 }, /* imm14: in test bit and branch instructions. */
252 { 5, 16 }, /* imm16: in exception instructions. */
253 { 0, 26 }, /* imm26: in unconditional branch instructions. */
254 { 10, 6 }, /* imms: in bitfield and logical immediate instructions. */
255 { 16, 6 }, /* immr: in bitfield and logical immediate instructions. */
256 { 16, 3 }, /* immb: in advsimd shift by immediate instructions. */
257 { 19, 4 }, /* immh: in advsimd shift by immediate instructions. */
258 { 22, 1 }, /* S: in LDRAA and LDRAB instructions. */
259 { 22, 1 }, /* N: in logical (immediate) instructions. */
260 { 11, 1 }, /* index: in ld/st inst deciding the pre/post-index. */
261 { 24, 1 }, /* index2: in ld/st pair inst deciding the pre/post-index. */
262 { 31, 1 }, /* sf: in integer data processing instructions. */
263 { 30, 1 }, /* lse_size: in LSE extension atomic instructions. */
264 { 11, 1 }, /* H: in advsimd scalar x indexed element instructions. */
265 { 21, 1 }, /* L: in advsimd scalar x indexed element instructions. */
266 { 20, 1 }, /* M: in advsimd scalar x indexed element instructions. */
267 { 31, 1 }, /* b5: in the test bit and branch instructions. */
268 { 19, 5 }, /* b40: in the test bit and branch instructions. */
269 { 10, 6 }, /* scale: in the fixed-point scalar to fp converting inst. */
270 { 4, 1 }, /* SVE_M_4: Merge/zero select, bit 4. */
271 { 14, 1 }, /* SVE_M_14: Merge/zero select, bit 14. */
272 { 16, 1 }, /* SVE_M_16: Merge/zero select, bit 16. */
273 { 17, 1 }, /* SVE_N: SVE equivalent of N. */
274 { 0, 4 }, /* SVE_Pd: p0-p15, bits [3,0]. */
275 { 10, 3 }, /* SVE_Pg3: p0-p7, bits [12,10]. */
276 { 5, 4 }, /* SVE_Pg4_5: p0-p15, bits [8,5]. */
277 { 10, 4 }, /* SVE_Pg4_10: p0-p15, bits [13,10]. */
278 { 16, 4 }, /* SVE_Pg4_16: p0-p15, bits [19,16]. */
279 { 16, 4 }, /* SVE_Pm: p0-p15, bits [19,16]. */
280 { 5, 4 }, /* SVE_Pn: p0-p15, bits [8,5]. */
281 { 0, 4 }, /* SVE_Pt: p0-p15, bits [3,0]. */
282 { 5, 5 }, /* SVE_Rm: SVE alternative position for Rm. */
283 { 16, 5 }, /* SVE_Rn: SVE alternative position for Rn. */
284 { 0, 5 }, /* SVE_Vd: Scalar SIMD&FP register, bits [4,0]. */
285 { 5, 5 }, /* SVE_Vm: Scalar SIMD&FP register, bits [9,5]. */
286 { 5, 5 }, /* SVE_Vn: Scalar SIMD&FP register, bits [9,5]. */
287 { 5, 5 }, /* SVE_Za_5: SVE vector register, bits [9,5]. */
288 { 16, 5 }, /* SVE_Za_16: SVE vector register, bits [20,16]. */
289 { 0, 5 }, /* SVE_Zd: SVE vector register. bits [4,0]. */
290 { 5, 5 }, /* SVE_Zm_5: SVE vector register, bits [9,5]. */
291 { 16, 5 }, /* SVE_Zm_16: SVE vector register, bits [20,16]. */
292 { 5, 5 }, /* SVE_Zn: SVE vector register, bits [9,5]. */
293 { 0, 5 }, /* SVE_Zt: SVE vector register, bits [4,0]. */
294 { 5, 1 }, /* SVE_i1: single-bit immediate. */
295 { 22, 1 }, /* SVE_i3h: high bit of 3-bit immediate. */
296 { 16, 3 }, /* SVE_imm3: 3-bit immediate field. */
297 { 16, 4 }, /* SVE_imm4: 4-bit immediate field. */
298 { 5, 5 }, /* SVE_imm5: 5-bit immediate field. */
299 { 16, 5 }, /* SVE_imm5b: secondary 5-bit immediate field. */
300 { 16, 6 }, /* SVE_imm6: 6-bit immediate field. */
301 { 14, 7 }, /* SVE_imm7: 7-bit immediate field. */
302 { 5, 8 }, /* SVE_imm8: 8-bit immediate field. */
303 { 5, 9 }, /* SVE_imm9: 9-bit immediate field. */
304 { 11, 6 }, /* SVE_immr: SVE equivalent of immr. */
305 { 5, 6 }, /* SVE_imms: SVE equivalent of imms. */
306 { 10, 2 }, /* SVE_msz: 2-bit shift amount for ADR. */
307 { 5, 5 }, /* SVE_pattern: vector pattern enumeration. */
308 { 0, 4 }, /* SVE_prfop: prefetch operation for SVE PRF[BHWD]. */
309 { 16, 1 }, /* SVE_rot1: 1-bit rotation amount. */
310 { 10, 2 }, /* SVE_rot2: 2-bit rotation amount. */
311 { 22, 1 }, /* SVE_sz: 1-bit element size select. */
312 { 16, 4 }, /* SVE_tsz: triangular size select. */
313 { 22, 2 }, /* SVE_tszh: triangular size select high, bits [23,22]. */
314 { 8, 2 }, /* SVE_tszl_8: triangular size select low, bits [9,8]. */
315 { 19, 2 }, /* SVE_tszl_19: triangular size select low, bits [20,19]. */
316 { 14, 1 }, /* SVE_xs_14: UXTW/SXTW select (bit 14). */
317 { 22, 1 }, /* SVE_xs_22: UXTW/SXTW select (bit 22). */
318 { 11, 2 }, /* rotate1: FCMLA immediate rotate. */
319 { 13, 2 }, /* rotate2: Indexed element FCMLA immediate rotate. */
320 { 12, 1 }, /* rotate3: FCADD immediate rotate. */
321 { 12, 2 }, /* SM3: Indexed element SM3 2 bits index immediate. */
324 enum aarch64_operand_class
325 aarch64_get_operand_class (enum aarch64_opnd type)
327 return aarch64_operands[type].op_class;
331 aarch64_get_operand_name (enum aarch64_opnd type)
333 return aarch64_operands[type].name;
336 /* Get operand description string.
337 This is usually for the diagnosis purpose. */
339 aarch64_get_operand_desc (enum aarch64_opnd type)
341 return aarch64_operands[type].desc;
344 /* Table of all conditional affixes. */
345 const aarch64_cond aarch64_conds[16] =
347 {{"eq", "none"}, 0x0},
348 {{"ne", "any"}, 0x1},
349 {{"cs", "hs", "nlast"}, 0x2},
350 {{"cc", "lo", "ul", "last"}, 0x3},
351 {{"mi", "first"}, 0x4},
352 {{"pl", "nfrst"}, 0x5},
355 {{"hi", "pmore"}, 0x8},
356 {{"ls", "plast"}, 0x9},
357 {{"ge", "tcont"}, 0xa},
358 {{"lt", "tstop"}, 0xb},
366 get_cond_from_value (aarch64_insn value)
369 return &aarch64_conds[(unsigned int) value];
373 get_inverted_cond (const aarch64_cond *cond)
375 return &aarch64_conds[cond->value ^ 0x1];
378 /* Table describing the operand extension/shifting operators; indexed by
379 enum aarch64_modifier_kind.
381 The value column provides the most common values for encoding modifiers,
382 which enables table-driven encoding/decoding for the modifiers. */
383 const struct aarch64_name_value_pair aarch64_operand_modifiers [] =
404 enum aarch64_modifier_kind
405 aarch64_get_operand_modifier (const struct aarch64_name_value_pair *desc)
407 return desc - aarch64_operand_modifiers;
411 aarch64_get_operand_modifier_value (enum aarch64_modifier_kind kind)
413 return aarch64_operand_modifiers[kind].value;
416 enum aarch64_modifier_kind
417 aarch64_get_operand_modifier_from_value (aarch64_insn value,
418 bfd_boolean extend_p)
420 if (extend_p == TRUE)
421 return AARCH64_MOD_UXTB + value;
423 return AARCH64_MOD_LSL - value;
427 aarch64_extend_operator_p (enum aarch64_modifier_kind kind)
429 return (kind > AARCH64_MOD_LSL && kind <= AARCH64_MOD_SXTX)
433 static inline bfd_boolean
434 aarch64_shift_operator_p (enum aarch64_modifier_kind kind)
436 return (kind >= AARCH64_MOD_ROR && kind <= AARCH64_MOD_LSL)
440 const struct aarch64_name_value_pair aarch64_barrier_options[16] =
460 /* Table describing the operands supported by the aliases of the HINT
463 The name column is the operand that is accepted for the alias. The value
464 column is the hint number of the alias. The list of operands is terminated
465 by NULL in the name column. */
467 const struct aarch64_name_value_pair aarch64_hint_options[] =
469 { "csync", 0x11 }, /* PSB CSYNC. */
473 /* op -> op: load = 0 instruction = 1 store = 2
475 t -> temporal: temporal (retained) = 0 non-temporal (streaming) = 1 */
476 #define B(op,l,t) (((op) << 3) | (((l) - 1) << 1) | (t))
477 const struct aarch64_name_value_pair aarch64_prfops[32] =
479 { "pldl1keep", B(0, 1, 0) },
480 { "pldl1strm", B(0, 1, 1) },
481 { "pldl2keep", B(0, 2, 0) },
482 { "pldl2strm", B(0, 2, 1) },
483 { "pldl3keep", B(0, 3, 0) },
484 { "pldl3strm", B(0, 3, 1) },
487 { "plil1keep", B(1, 1, 0) },
488 { "plil1strm", B(1, 1, 1) },
489 { "plil2keep", B(1, 2, 0) },
490 { "plil2strm", B(1, 2, 1) },
491 { "plil3keep", B(1, 3, 0) },
492 { "plil3strm", B(1, 3, 1) },
495 { "pstl1keep", B(2, 1, 0) },
496 { "pstl1strm", B(2, 1, 1) },
497 { "pstl2keep", B(2, 2, 0) },
498 { "pstl2strm", B(2, 2, 1) },
499 { "pstl3keep", B(2, 3, 0) },
500 { "pstl3strm", B(2, 3, 1) },
514 /* Utilities on value constraint. */
517 value_in_range_p (int64_t value, int low, int high)
519 return (value >= low && value <= high) ? 1 : 0;
522 /* Return true if VALUE is a multiple of ALIGN. */
524 value_aligned_p (int64_t value, int align)
526 return (value % align) == 0;
529 /* A signed value fits in a field. */
531 value_fit_signed_field_p (int64_t value, unsigned width)
534 if (width < sizeof (value) * 8)
536 int64_t lim = (int64_t)1 << (width - 1);
537 if (value >= -lim && value < lim)
543 /* An unsigned value fits in a field. */
545 value_fit_unsigned_field_p (int64_t value, unsigned width)
548 if (width < sizeof (value) * 8)
550 int64_t lim = (int64_t)1 << width;
551 if (value >= 0 && value < lim)
557 /* Return 1 if OPERAND is SP or WSP. */
559 aarch64_stack_pointer_p (const aarch64_opnd_info *operand)
561 return ((aarch64_get_operand_class (operand->type)
562 == AARCH64_OPND_CLASS_INT_REG)
563 && operand_maybe_stack_pointer (aarch64_operands + operand->type)
564 && operand->reg.regno == 31);
567 /* Return 1 if OPERAND is XZR or WZP. */
569 aarch64_zero_register_p (const aarch64_opnd_info *operand)
571 return ((aarch64_get_operand_class (operand->type)
572 == AARCH64_OPND_CLASS_INT_REG)
573 && !operand_maybe_stack_pointer (aarch64_operands + operand->type)
574 && operand->reg.regno == 31);
577 /* Return true if the operand *OPERAND that has the operand code
578 OPERAND->TYPE and been qualified by OPERAND->QUALIFIER can be also
579 qualified by the qualifier TARGET. */
582 operand_also_qualified_p (const struct aarch64_opnd_info *operand,
583 aarch64_opnd_qualifier_t target)
585 switch (operand->qualifier)
587 case AARCH64_OPND_QLF_W:
588 if (target == AARCH64_OPND_QLF_WSP && aarch64_stack_pointer_p (operand))
591 case AARCH64_OPND_QLF_X:
592 if (target == AARCH64_OPND_QLF_SP && aarch64_stack_pointer_p (operand))
595 case AARCH64_OPND_QLF_WSP:
596 if (target == AARCH64_OPND_QLF_W
597 && operand_maybe_stack_pointer (aarch64_operands + operand->type))
600 case AARCH64_OPND_QLF_SP:
601 if (target == AARCH64_OPND_QLF_X
602 && operand_maybe_stack_pointer (aarch64_operands + operand->type))
612 /* Given qualifier sequence list QSEQ_LIST and the known qualifier KNOWN_QLF
613 for operand KNOWN_IDX, return the expected qualifier for operand IDX.
615 Return NIL if more than one expected qualifiers are found. */
617 aarch64_opnd_qualifier_t
618 aarch64_get_expected_qualifier (const aarch64_opnd_qualifier_seq_t *qseq_list,
620 const aarch64_opnd_qualifier_t known_qlf,
627 When the known qualifier is NIL, we have to assume that there is only
628 one qualifier sequence in the *QSEQ_LIST and return the corresponding
629 qualifier directly. One scenario is that for instruction
630 PRFM <prfop>, [<Xn|SP>, #:lo12:<symbol>]
631 which has only one possible valid qualifier sequence
633 the caller may pass NIL in KNOWN_QLF to obtain S_D so that it can
634 determine the correct relocation type (i.e. LDST64_LO12) for PRFM.
636 Because the qualifier NIL has dual roles in the qualifier sequence:
637 it can mean no qualifier for the operand, or the qualifer sequence is
638 not in use (when all qualifiers in the sequence are NILs), we have to
639 handle this special case here. */
640 if (known_qlf == AARCH64_OPND_NIL)
642 assert (qseq_list[0][known_idx] == AARCH64_OPND_NIL);
643 return qseq_list[0][idx];
646 for (i = 0, saved_i = -1; i < AARCH64_MAX_QLF_SEQ_NUM; ++i)
648 if (qseq_list[i][known_idx] == known_qlf)
651 /* More than one sequences are found to have KNOWN_QLF at
653 return AARCH64_OPND_NIL;
658 return qseq_list[saved_i][idx];
661 enum operand_qualifier_kind
669 /* Operand qualifier description. */
670 struct operand_qualifier_data
672 /* The usage of the three data fields depends on the qualifier kind. */
679 enum operand_qualifier_kind kind;
682 /* Indexed by the operand qualifier enumerators. */
683 struct operand_qualifier_data aarch64_opnd_qualifiers[] =
685 {0, 0, 0, "NIL", OQK_NIL},
687 /* Operand variant qualifiers.
689 element size, number of elements and common value for encoding. */
691 {4, 1, 0x0, "w", OQK_OPD_VARIANT},
692 {8, 1, 0x1, "x", OQK_OPD_VARIANT},
693 {4, 1, 0x0, "wsp", OQK_OPD_VARIANT},
694 {8, 1, 0x1, "sp", OQK_OPD_VARIANT},
696 {1, 1, 0x0, "b", OQK_OPD_VARIANT},
697 {2, 1, 0x1, "h", OQK_OPD_VARIANT},
698 {4, 1, 0x2, "s", OQK_OPD_VARIANT},
699 {8, 1, 0x3, "d", OQK_OPD_VARIANT},
700 {16, 1, 0x4, "q", OQK_OPD_VARIANT},
701 {1, 4, 0x0, "4b", OQK_OPD_VARIANT},
703 {1, 4, 0x0, "4b", OQK_OPD_VARIANT},
704 {1, 8, 0x0, "8b", OQK_OPD_VARIANT},
705 {1, 16, 0x1, "16b", OQK_OPD_VARIANT},
706 {2, 2, 0x0, "2h", OQK_OPD_VARIANT},
707 {2, 4, 0x2, "4h", OQK_OPD_VARIANT},
708 {2, 8, 0x3, "8h", OQK_OPD_VARIANT},
709 {4, 2, 0x4, "2s", OQK_OPD_VARIANT},
710 {4, 4, 0x5, "4s", OQK_OPD_VARIANT},
711 {8, 1, 0x6, "1d", OQK_OPD_VARIANT},
712 {8, 2, 0x7, "2d", OQK_OPD_VARIANT},
713 {16, 1, 0x8, "1q", OQK_OPD_VARIANT},
715 {0, 0, 0, "z", OQK_OPD_VARIANT},
716 {0, 0, 0, "m", OQK_OPD_VARIANT},
718 /* Qualifiers constraining the value range.
720 Lower bound, higher bound, unused. */
722 {0, 15, 0, "CR", OQK_VALUE_IN_RANGE},
723 {0, 7, 0, "imm_0_7" , OQK_VALUE_IN_RANGE},
724 {0, 15, 0, "imm_0_15", OQK_VALUE_IN_RANGE},
725 {0, 31, 0, "imm_0_31", OQK_VALUE_IN_RANGE},
726 {0, 63, 0, "imm_0_63", OQK_VALUE_IN_RANGE},
727 {1, 32, 0, "imm_1_32", OQK_VALUE_IN_RANGE},
728 {1, 64, 0, "imm_1_64", OQK_VALUE_IN_RANGE},
730 /* Qualifiers for miscellaneous purpose.
732 unused, unused and unused. */
737 {0, 0, 0, "retrieving", 0},
740 static inline bfd_boolean
741 operand_variant_qualifier_p (aarch64_opnd_qualifier_t qualifier)
743 return (aarch64_opnd_qualifiers[qualifier].kind == OQK_OPD_VARIANT)
747 static inline bfd_boolean
748 qualifier_value_in_range_constraint_p (aarch64_opnd_qualifier_t qualifier)
750 return (aarch64_opnd_qualifiers[qualifier].kind == OQK_VALUE_IN_RANGE)
755 aarch64_get_qualifier_name (aarch64_opnd_qualifier_t qualifier)
757 return aarch64_opnd_qualifiers[qualifier].desc;
760 /* Given an operand qualifier, return the expected data element size
761 of a qualified operand. */
763 aarch64_get_qualifier_esize (aarch64_opnd_qualifier_t qualifier)
765 assert (operand_variant_qualifier_p (qualifier) == TRUE);
766 return aarch64_opnd_qualifiers[qualifier].data0;
770 aarch64_get_qualifier_nelem (aarch64_opnd_qualifier_t qualifier)
772 assert (operand_variant_qualifier_p (qualifier) == TRUE);
773 return aarch64_opnd_qualifiers[qualifier].data1;
777 aarch64_get_qualifier_standard_value (aarch64_opnd_qualifier_t qualifier)
779 assert (operand_variant_qualifier_p (qualifier) == TRUE);
780 return aarch64_opnd_qualifiers[qualifier].data2;
784 get_lower_bound (aarch64_opnd_qualifier_t qualifier)
786 assert (qualifier_value_in_range_constraint_p (qualifier) == TRUE);
787 return aarch64_opnd_qualifiers[qualifier].data0;
791 get_upper_bound (aarch64_opnd_qualifier_t qualifier)
793 assert (qualifier_value_in_range_constraint_p (qualifier) == TRUE);
794 return aarch64_opnd_qualifiers[qualifier].data1;
799 aarch64_verbose (const char *str, ...)
810 dump_qualifier_sequence (const aarch64_opnd_qualifier_t *qualifier)
814 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i, ++qualifier)
815 printf ("%s,", aarch64_get_qualifier_name (*qualifier));
820 dump_match_qualifiers (const struct aarch64_opnd_info *opnd,
821 const aarch64_opnd_qualifier_t *qualifier)
824 aarch64_opnd_qualifier_t curr[AARCH64_MAX_OPND_NUM];
826 aarch64_verbose ("dump_match_qualifiers:");
827 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
828 curr[i] = opnd[i].qualifier;
829 dump_qualifier_sequence (curr);
830 aarch64_verbose ("against");
831 dump_qualifier_sequence (qualifier);
833 #endif /* DEBUG_AARCH64 */
835 /* TODO improve this, we can have an extra field at the runtime to
836 store the number of operands rather than calculating it every time. */
839 aarch64_num_of_operands (const aarch64_opcode *opcode)
842 const enum aarch64_opnd *opnds = opcode->operands;
843 while (opnds[i++] != AARCH64_OPND_NIL)
846 assert (i >= 0 && i <= AARCH64_MAX_OPND_NUM);
850 /* Find the best matched qualifier sequence in *QUALIFIERS_LIST for INST.
851 If succeeds, fill the found sequence in *RET, return 1; otherwise return 0.
853 N.B. on the entry, it is very likely that only some operands in *INST
854 have had their qualifiers been established.
856 If STOP_AT is not -1, the function will only try to match
857 the qualifier sequence for operands before and including the operand
858 of index STOP_AT; and on success *RET will only be filled with the first
859 (STOP_AT+1) qualifiers.
861 A couple examples of the matching algorithm:
869 Apart from serving the main encoding routine, this can also be called
870 during or after the operand decoding. */
873 aarch64_find_best_match (const aarch64_inst *inst,
874 const aarch64_opnd_qualifier_seq_t *qualifiers_list,
875 int stop_at, aarch64_opnd_qualifier_t *ret)
879 const aarch64_opnd_qualifier_t *qualifiers;
881 num_opnds = aarch64_num_of_operands (inst->opcode);
884 DEBUG_TRACE ("SUCCEED: no operand");
888 if (stop_at < 0 || stop_at >= num_opnds)
889 stop_at = num_opnds - 1;
891 /* For each pattern. */
892 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
895 qualifiers = *qualifiers_list;
897 /* Start as positive. */
900 DEBUG_TRACE ("%d", i);
903 dump_match_qualifiers (inst->operands, qualifiers);
906 /* Most opcodes has much fewer patterns in the list.
907 First NIL qualifier indicates the end in the list. */
908 if (empty_qualifier_sequence_p (qualifiers) == TRUE)
910 DEBUG_TRACE_IF (i == 0, "SUCCEED: empty qualifier list");
916 for (j = 0; j < num_opnds && j <= stop_at; ++j, ++qualifiers)
918 if (inst->operands[j].qualifier == AARCH64_OPND_QLF_NIL)
920 /* Either the operand does not have qualifier, or the qualifier
921 for the operand needs to be deduced from the qualifier
923 In the latter case, any constraint checking related with
924 the obtained qualifier should be done later in
925 operand_general_constraint_met_p. */
928 else if (*qualifiers != inst->operands[j].qualifier)
930 /* Unless the target qualifier can also qualify the operand
931 (which has already had a non-nil qualifier), non-equal
932 qualifiers are generally un-matched. */
933 if (operand_also_qualified_p (inst->operands + j, *qualifiers))
942 continue; /* Equal qualifiers are certainly matched. */
945 /* Qualifiers established. */
952 /* Fill the result in *RET. */
954 qualifiers = *qualifiers_list;
956 DEBUG_TRACE ("complete qualifiers using list %d", i);
959 dump_qualifier_sequence (qualifiers);
962 for (j = 0; j <= stop_at; ++j, ++qualifiers)
963 ret[j] = *qualifiers;
964 for (; j < AARCH64_MAX_OPND_NUM; ++j)
965 ret[j] = AARCH64_OPND_QLF_NIL;
967 DEBUG_TRACE ("SUCCESS");
971 DEBUG_TRACE ("FAIL");
975 /* Operand qualifier matching and resolving.
977 Return 1 if the operand qualifier(s) in *INST match one of the qualifier
978 sequences in INST->OPCODE->qualifiers_list; otherwise return 0.
980 if UPDATE_P == TRUE, update the qualifier(s) in *INST after the matching
984 match_operands_qualifier (aarch64_inst *inst, bfd_boolean update_p)
987 aarch64_opnd_qualifier_seq_t qualifiers;
989 if (!aarch64_find_best_match (inst, inst->opcode->qualifiers_list, -1,
992 DEBUG_TRACE ("matching FAIL");
996 if (inst->opcode->flags & F_STRICT)
998 /* Require an exact qualifier match, even for NIL qualifiers. */
999 nops = aarch64_num_of_operands (inst->opcode);
1000 for (i = 0; i < nops; ++i)
1001 if (inst->operands[i].qualifier != qualifiers[i])
1005 /* Update the qualifiers. */
1006 if (update_p == TRUE)
1007 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
1009 if (inst->opcode->operands[i] == AARCH64_OPND_NIL)
1011 DEBUG_TRACE_IF (inst->operands[i].qualifier != qualifiers[i],
1012 "update %s with %s for operand %d",
1013 aarch64_get_qualifier_name (inst->operands[i].qualifier),
1014 aarch64_get_qualifier_name (qualifiers[i]), i);
1015 inst->operands[i].qualifier = qualifiers[i];
1018 DEBUG_TRACE ("matching SUCCESS");
1022 /* Return TRUE if VALUE is a wide constant that can be moved into a general
1025 IS32 indicates whether value is a 32-bit immediate or not.
1026 If SHIFT_AMOUNT is not NULL, on the return of TRUE, the logical left shift
1027 amount will be returned in *SHIFT_AMOUNT. */
1030 aarch64_wide_constant_p (int64_t value, int is32, unsigned int *shift_amount)
1034 DEBUG_TRACE ("enter with 0x%" PRIx64 "(%" PRIi64 ")", value, value);
1038 /* Allow all zeros or all ones in top 32-bits, so that
1039 32-bit constant expressions like ~0x80000000 are
1041 uint64_t ext = value;
1042 if (ext >> 32 != 0 && ext >> 32 != (uint64_t) 0xffffffff)
1043 /* Immediate out of range. */
1045 value &= (int64_t) 0xffffffff;
1048 /* first, try movz then movn */
1050 if ((value & ((int64_t) 0xffff << 0)) == value)
1052 else if ((value & ((int64_t) 0xffff << 16)) == value)
1054 else if (!is32 && (value & ((int64_t) 0xffff << 32)) == value)
1056 else if (!is32 && (value & ((int64_t) 0xffff << 48)) == value)
1061 DEBUG_TRACE ("exit FALSE with 0x%" PRIx64 "(%" PRIi64 ")", value, value);
1065 if (shift_amount != NULL)
1066 *shift_amount = amount;
1068 DEBUG_TRACE ("exit TRUE with amount %d", amount);
1073 /* Build the accepted values for immediate logical SIMD instructions.
1075 The standard encodings of the immediate value are:
1076 N imms immr SIMD size R S
1077 1 ssssss rrrrrr 64 UInt(rrrrrr) UInt(ssssss)
1078 0 0sssss 0rrrrr 32 UInt(rrrrr) UInt(sssss)
1079 0 10ssss 00rrrr 16 UInt(rrrr) UInt(ssss)
1080 0 110sss 000rrr 8 UInt(rrr) UInt(sss)
1081 0 1110ss 0000rr 4 UInt(rr) UInt(ss)
1082 0 11110s 00000r 2 UInt(r) UInt(s)
1083 where all-ones value of S is reserved.
1085 Let's call E the SIMD size.
1087 The immediate value is: S+1 bits '1' rotated to the right by R.
1089 The total of valid encodings is 64*63 + 32*31 + ... + 2*1 = 5334
1090 (remember S != E - 1). */
1092 #define TOTAL_IMM_NB 5334
1097 aarch64_insn encoding;
1098 } simd_imm_encoding;
1100 static simd_imm_encoding simd_immediates[TOTAL_IMM_NB];
1103 simd_imm_encoding_cmp(const void *i1, const void *i2)
1105 const simd_imm_encoding *imm1 = (const simd_imm_encoding *)i1;
1106 const simd_imm_encoding *imm2 = (const simd_imm_encoding *)i2;
1108 if (imm1->imm < imm2->imm)
1110 if (imm1->imm > imm2->imm)
1115 /* immediate bitfield standard encoding
1116 imm13<12> imm13<5:0> imm13<11:6> SIMD size R S
1117 1 ssssss rrrrrr 64 rrrrrr ssssss
1118 0 0sssss 0rrrrr 32 rrrrr sssss
1119 0 10ssss 00rrrr 16 rrrr ssss
1120 0 110sss 000rrr 8 rrr sss
1121 0 1110ss 0000rr 4 rr ss
1122 0 11110s 00000r 2 r s */
1124 encode_immediate_bitfield (int is64, uint32_t s, uint32_t r)
1126 return (is64 << 12) | (r << 6) | s;
1130 build_immediate_table (void)
1132 uint32_t log_e, e, s, r, s_mask;
1138 for (log_e = 1; log_e <= 6; log_e++)
1140 /* Get element size. */
1145 mask = 0xffffffffffffffffull;
1151 mask = (1ull << e) - 1;
1153 1 ((1 << 4) - 1) << 2 = 111100
1154 2 ((1 << 3) - 1) << 3 = 111000
1155 3 ((1 << 2) - 1) << 4 = 110000
1156 4 ((1 << 1) - 1) << 5 = 100000
1157 5 ((1 << 0) - 1) << 6 = 000000 */
1158 s_mask = ((1u << (5 - log_e)) - 1) << (log_e + 1);
1160 for (s = 0; s < e - 1; s++)
1161 for (r = 0; r < e; r++)
1163 /* s+1 consecutive bits to 1 (s < 63) */
1164 imm = (1ull << (s + 1)) - 1;
1165 /* rotate right by r */
1167 imm = (imm >> r) | ((imm << (e - r)) & mask);
1168 /* replicate the constant depending on SIMD size */
1171 case 1: imm = (imm << 2) | imm;
1173 case 2: imm = (imm << 4) | imm;
1175 case 3: imm = (imm << 8) | imm;
1177 case 4: imm = (imm << 16) | imm;
1179 case 5: imm = (imm << 32) | imm;
1184 simd_immediates[nb_imms].imm = imm;
1185 simd_immediates[nb_imms].encoding =
1186 encode_immediate_bitfield(is64, s | s_mask, r);
1190 assert (nb_imms == TOTAL_IMM_NB);
1191 qsort(simd_immediates, nb_imms,
1192 sizeof(simd_immediates[0]), simd_imm_encoding_cmp);
1195 /* Return TRUE if VALUE is a valid logical immediate, i.e. bitmask, that can
1196 be accepted by logical (immediate) instructions
1197 e.g. ORR <Xd|SP>, <Xn>, #<imm>.
1199 ESIZE is the number of bytes in the decoded immediate value.
1200 If ENCODING is not NULL, on the return of TRUE, the standard encoding for
1201 VALUE will be returned in *ENCODING. */
1204 aarch64_logical_immediate_p (uint64_t value, int esize, aarch64_insn *encoding)
1206 simd_imm_encoding imm_enc;
1207 const simd_imm_encoding *imm_encoding;
1208 static bfd_boolean initialized = FALSE;
1212 DEBUG_TRACE ("enter with 0x%" PRIx64 "(%" PRIi64 "), esize: %d", value,
1217 build_immediate_table ();
1221 /* Allow all zeros or all ones in top bits, so that
1222 constant expressions like ~1 are permitted. */
1223 upper = (uint64_t) -1 << (esize * 4) << (esize * 4);
1224 if ((value & ~upper) != value && (value | upper) != value)
1227 /* Replicate to a full 64-bit value. */
1229 for (i = esize * 8; i < 64; i *= 2)
1230 value |= (value << i);
1232 imm_enc.imm = value;
1233 imm_encoding = (const simd_imm_encoding *)
1234 bsearch(&imm_enc, simd_immediates, TOTAL_IMM_NB,
1235 sizeof(simd_immediates[0]), simd_imm_encoding_cmp);
1236 if (imm_encoding == NULL)
1238 DEBUG_TRACE ("exit with FALSE");
1241 if (encoding != NULL)
1242 *encoding = imm_encoding->encoding;
1243 DEBUG_TRACE ("exit with TRUE");
1247 /* If 64-bit immediate IMM is in the format of
1248 "aaaaaaaabbbbbbbbccccccccddddddddeeeeeeeeffffffffgggggggghhhhhhhh",
1249 where a, b, c, d, e, f, g and h are independently 0 or 1, return an integer
1250 of value "abcdefgh". Otherwise return -1. */
1252 aarch64_shrink_expanded_imm8 (uint64_t imm)
1258 for (i = 0; i < 8; i++)
1260 byte = (imm >> (8 * i)) & 0xff;
1263 else if (byte != 0x00)
1269 /* Utility inline functions for operand_general_constraint_met_p. */
1272 set_error (aarch64_operand_error *mismatch_detail,
1273 enum aarch64_operand_error_kind kind, int idx,
1276 if (mismatch_detail == NULL)
1278 mismatch_detail->kind = kind;
1279 mismatch_detail->index = idx;
1280 mismatch_detail->error = error;
1284 set_syntax_error (aarch64_operand_error *mismatch_detail, int idx,
1287 if (mismatch_detail == NULL)
1289 set_error (mismatch_detail, AARCH64_OPDE_SYNTAX_ERROR, idx, error);
1293 set_out_of_range_error (aarch64_operand_error *mismatch_detail,
1294 int idx, int lower_bound, int upper_bound,
1297 if (mismatch_detail == NULL)
1299 set_error (mismatch_detail, AARCH64_OPDE_OUT_OF_RANGE, idx, error);
1300 mismatch_detail->data[0] = lower_bound;
1301 mismatch_detail->data[1] = upper_bound;
1305 set_imm_out_of_range_error (aarch64_operand_error *mismatch_detail,
1306 int idx, int lower_bound, int upper_bound)
1308 if (mismatch_detail == NULL)
1310 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1311 _("immediate value"));
1315 set_offset_out_of_range_error (aarch64_operand_error *mismatch_detail,
1316 int idx, int lower_bound, int upper_bound)
1318 if (mismatch_detail == NULL)
1320 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1321 _("immediate offset"));
1325 set_regno_out_of_range_error (aarch64_operand_error *mismatch_detail,
1326 int idx, int lower_bound, int upper_bound)
1328 if (mismatch_detail == NULL)
1330 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1331 _("register number"));
1335 set_elem_idx_out_of_range_error (aarch64_operand_error *mismatch_detail,
1336 int idx, int lower_bound, int upper_bound)
1338 if (mismatch_detail == NULL)
1340 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1341 _("register element index"));
1345 set_sft_amount_out_of_range_error (aarch64_operand_error *mismatch_detail,
1346 int idx, int lower_bound, int upper_bound)
1348 if (mismatch_detail == NULL)
1350 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1354 /* Report that the MUL modifier in operand IDX should be in the range
1355 [LOWER_BOUND, UPPER_BOUND]. */
1357 set_multiplier_out_of_range_error (aarch64_operand_error *mismatch_detail,
1358 int idx, int lower_bound, int upper_bound)
1360 if (mismatch_detail == NULL)
1362 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1367 set_unaligned_error (aarch64_operand_error *mismatch_detail, int idx,
1370 if (mismatch_detail == NULL)
1372 set_error (mismatch_detail, AARCH64_OPDE_UNALIGNED, idx, NULL);
1373 mismatch_detail->data[0] = alignment;
1377 set_reg_list_error (aarch64_operand_error *mismatch_detail, int idx,
1380 if (mismatch_detail == NULL)
1382 set_error (mismatch_detail, AARCH64_OPDE_REG_LIST, idx, NULL);
1383 mismatch_detail->data[0] = expected_num;
1387 set_other_error (aarch64_operand_error *mismatch_detail, int idx,
1390 if (mismatch_detail == NULL)
1392 set_error (mismatch_detail, AARCH64_OPDE_OTHER_ERROR, idx, error);
1395 /* General constraint checking based on operand code.
1397 Return 1 if OPNDS[IDX] meets the general constraint of operand code TYPE
1398 as the IDXth operand of opcode OPCODE. Otherwise return 0.
1400 This function has to be called after the qualifiers for all operands
1403 Mismatching error message is returned in *MISMATCH_DETAIL upon request,
1404 i.e. when MISMATCH_DETAIL is non-NULL. This avoids the generation
1405 of error message during the disassembling where error message is not
1406 wanted. We avoid the dynamic construction of strings of error messages
1407 here (i.e. in libopcodes), as it is costly and complicated; instead, we
1408 use a combination of error code, static string and some integer data to
1409 represent an error. */
1412 operand_general_constraint_met_p (const aarch64_opnd_info *opnds, int idx,
1413 enum aarch64_opnd type,
1414 const aarch64_opcode *opcode,
1415 aarch64_operand_error *mismatch_detail)
1417 unsigned num, modifiers, shift;
1419 int64_t imm, min_value, max_value;
1420 uint64_t uvalue, mask;
1421 const aarch64_opnd_info *opnd = opnds + idx;
1422 aarch64_opnd_qualifier_t qualifier = opnd->qualifier;
1424 assert (opcode->operands[idx] == opnd->type && opnd->type == type);
1426 switch (aarch64_operands[type].op_class)
1428 case AARCH64_OPND_CLASS_INT_REG:
1429 /* Check pair reg constraints for cas* instructions. */
1430 if (type == AARCH64_OPND_PAIRREG)
1432 assert (idx == 1 || idx == 3);
1433 if (opnds[idx - 1].reg.regno % 2 != 0)
1435 set_syntax_error (mismatch_detail, idx - 1,
1436 _("reg pair must start from even reg"));
1439 if (opnds[idx].reg.regno != opnds[idx - 1].reg.regno + 1)
1441 set_syntax_error (mismatch_detail, idx,
1442 _("reg pair must be contiguous"));
1448 /* <Xt> may be optional in some IC and TLBI instructions. */
1449 if (type == AARCH64_OPND_Rt_SYS)
1451 assert (idx == 1 && (aarch64_get_operand_class (opnds[0].type)
1452 == AARCH64_OPND_CLASS_SYSTEM));
1453 if (opnds[1].present
1454 && !aarch64_sys_ins_reg_has_xt (opnds[0].sysins_op))
1456 set_other_error (mismatch_detail, idx, _("extraneous register"));
1459 if (!opnds[1].present
1460 && aarch64_sys_ins_reg_has_xt (opnds[0].sysins_op))
1462 set_other_error (mismatch_detail, idx, _("missing register"));
1468 case AARCH64_OPND_QLF_WSP:
1469 case AARCH64_OPND_QLF_SP:
1470 if (!aarch64_stack_pointer_p (opnd))
1472 set_other_error (mismatch_detail, idx,
1473 _("stack pointer register expected"));
1482 case AARCH64_OPND_CLASS_SVE_REG:
1485 case AARCH64_OPND_SVE_Zm3_INDEX:
1486 case AARCH64_OPND_SVE_Zm3_22_INDEX:
1487 case AARCH64_OPND_SVE_Zm4_INDEX:
1488 size = get_operand_fields_width (get_operand_from_code (type));
1489 shift = get_operand_specific_data (&aarch64_operands[type]);
1490 mask = (1 << shift) - 1;
1491 if (opnd->reg.regno > mask)
1493 assert (mask == 7 || mask == 15);
1494 set_other_error (mismatch_detail, idx,
1496 ? _("z0-z15 expected")
1497 : _("z0-z7 expected"));
1500 mask = (1 << (size - shift)) - 1;
1501 if (!value_in_range_p (opnd->reglane.index, 0, mask))
1503 set_elem_idx_out_of_range_error (mismatch_detail, idx, 0, mask);
1508 case AARCH64_OPND_SVE_Zn_INDEX:
1509 size = aarch64_get_qualifier_esize (opnd->qualifier);
1510 if (!value_in_range_p (opnd->reglane.index, 0, 64 / size - 1))
1512 set_elem_idx_out_of_range_error (mismatch_detail, idx,
1518 case AARCH64_OPND_SVE_ZnxN:
1519 case AARCH64_OPND_SVE_ZtxN:
1520 if (opnd->reglist.num_regs != get_opcode_dependent_value (opcode))
1522 set_other_error (mismatch_detail, idx,
1523 _("invalid register list"));
1533 case AARCH64_OPND_CLASS_PRED_REG:
1534 if (opnd->reg.regno >= 8
1535 && get_operand_fields_width (get_operand_from_code (type)) == 3)
1537 set_other_error (mismatch_detail, idx, _("p0-p7 expected"));
1542 case AARCH64_OPND_CLASS_COND:
1543 if (type == AARCH64_OPND_COND1
1544 && (opnds[idx].cond->value & 0xe) == 0xe)
1546 /* Not allow AL or NV. */
1547 set_syntax_error (mismatch_detail, idx, NULL);
1551 case AARCH64_OPND_CLASS_ADDRESS:
1552 /* Check writeback. */
1553 switch (opcode->iclass)
1557 case ldstnapair_offs:
1560 if (opnd->addr.writeback == 1)
1562 set_syntax_error (mismatch_detail, idx,
1563 _("unexpected address writeback"));
1568 if (opnd->addr.writeback == 1 && opnd->addr.preind != 1)
1570 set_syntax_error (mismatch_detail, idx,
1571 _("unexpected address writeback"));
1576 case ldstpair_indexed:
1579 if (opnd->addr.writeback == 0)
1581 set_syntax_error (mismatch_detail, idx,
1582 _("address writeback expected"));
1587 assert (opnd->addr.writeback == 0);
1592 case AARCH64_OPND_ADDR_SIMM7:
1593 /* Scaled signed 7 bits immediate offset. */
1594 /* Get the size of the data element that is accessed, which may be
1595 different from that of the source register size,
1596 e.g. in strb/ldrb. */
1597 size = aarch64_get_qualifier_esize (opnd->qualifier);
1598 if (!value_in_range_p (opnd->addr.offset.imm, -64 * size, 63 * size))
1600 set_offset_out_of_range_error (mismatch_detail, idx,
1601 -64 * size, 63 * size);
1604 if (!value_aligned_p (opnd->addr.offset.imm, size))
1606 set_unaligned_error (mismatch_detail, idx, size);
1610 case AARCH64_OPND_ADDR_OFFSET:
1611 case AARCH64_OPND_ADDR_SIMM9:
1612 /* Unscaled signed 9 bits immediate offset. */
1613 if (!value_in_range_p (opnd->addr.offset.imm, -256, 255))
1615 set_offset_out_of_range_error (mismatch_detail, idx, -256, 255);
1620 case AARCH64_OPND_ADDR_SIMM9_2:
1621 /* Unscaled signed 9 bits immediate offset, which has to be negative
1623 size = aarch64_get_qualifier_esize (qualifier);
1624 if ((value_in_range_p (opnd->addr.offset.imm, 0, 255)
1625 && !value_aligned_p (opnd->addr.offset.imm, size))
1626 || value_in_range_p (opnd->addr.offset.imm, -256, -1))
1628 set_other_error (mismatch_detail, idx,
1629 _("negative or unaligned offset expected"));
1632 case AARCH64_OPND_ADDR_SIMM10:
1633 /* Scaled signed 10 bits immediate offset. */
1634 if (!value_in_range_p (opnd->addr.offset.imm, -4096, 4088))
1636 set_offset_out_of_range_error (mismatch_detail, idx, -4096, 4088);
1639 if (!value_aligned_p (opnd->addr.offset.imm, 8))
1641 set_unaligned_error (mismatch_detail, idx, 8);
1646 case AARCH64_OPND_SIMD_ADDR_POST:
1647 /* AdvSIMD load/store multiple structures, post-index. */
1649 if (opnd->addr.offset.is_reg)
1651 if (value_in_range_p (opnd->addr.offset.regno, 0, 30))
1655 set_other_error (mismatch_detail, idx,
1656 _("invalid register offset"));
1662 const aarch64_opnd_info *prev = &opnds[idx-1];
1663 unsigned num_bytes; /* total number of bytes transferred. */
1664 /* The opcode dependent area stores the number of elements in
1665 each structure to be loaded/stored. */
1666 int is_ld1r = get_opcode_dependent_value (opcode) == 1;
1667 if (opcode->operands[0] == AARCH64_OPND_LVt_AL)
1668 /* Special handling of loading single structure to all lane. */
1669 num_bytes = (is_ld1r ? 1 : prev->reglist.num_regs)
1670 * aarch64_get_qualifier_esize (prev->qualifier);
1672 num_bytes = prev->reglist.num_regs
1673 * aarch64_get_qualifier_esize (prev->qualifier)
1674 * aarch64_get_qualifier_nelem (prev->qualifier);
1675 if ((int) num_bytes != opnd->addr.offset.imm)
1677 set_other_error (mismatch_detail, idx,
1678 _("invalid post-increment amount"));
1684 case AARCH64_OPND_ADDR_REGOFF:
1685 /* Get the size of the data element that is accessed, which may be
1686 different from that of the source register size,
1687 e.g. in strb/ldrb. */
1688 size = aarch64_get_qualifier_esize (opnd->qualifier);
1689 /* It is either no shift or shift by the binary logarithm of SIZE. */
1690 if (opnd->shifter.amount != 0
1691 && opnd->shifter.amount != (int)get_logsz (size))
1693 set_other_error (mismatch_detail, idx,
1694 _("invalid shift amount"));
1697 /* Only UXTW, LSL, SXTW and SXTX are the accepted extending
1699 switch (opnd->shifter.kind)
1701 case AARCH64_MOD_UXTW:
1702 case AARCH64_MOD_LSL:
1703 case AARCH64_MOD_SXTW:
1704 case AARCH64_MOD_SXTX: break;
1706 set_other_error (mismatch_detail, idx,
1707 _("invalid extend/shift operator"));
1712 case AARCH64_OPND_ADDR_UIMM12:
1713 imm = opnd->addr.offset.imm;
1714 /* Get the size of the data element that is accessed, which may be
1715 different from that of the source register size,
1716 e.g. in strb/ldrb. */
1717 size = aarch64_get_qualifier_esize (qualifier);
1718 if (!value_in_range_p (opnd->addr.offset.imm, 0, 4095 * size))
1720 set_offset_out_of_range_error (mismatch_detail, idx,
1724 if (!value_aligned_p (opnd->addr.offset.imm, size))
1726 set_unaligned_error (mismatch_detail, idx, size);
1731 case AARCH64_OPND_ADDR_PCREL14:
1732 case AARCH64_OPND_ADDR_PCREL19:
1733 case AARCH64_OPND_ADDR_PCREL21:
1734 case AARCH64_OPND_ADDR_PCREL26:
1735 imm = opnd->imm.value;
1736 if (operand_need_shift_by_two (get_operand_from_code (type)))
1738 /* The offset value in a PC-relative branch instruction is alway
1739 4-byte aligned and is encoded without the lowest 2 bits. */
1740 if (!value_aligned_p (imm, 4))
1742 set_unaligned_error (mismatch_detail, idx, 4);
1745 /* Right shift by 2 so that we can carry out the following check
1749 size = get_operand_fields_width (get_operand_from_code (type));
1750 if (!value_fit_signed_field_p (imm, size))
1752 set_other_error (mismatch_detail, idx,
1753 _("immediate out of range"));
1758 case AARCH64_OPND_SVE_ADDR_RI_S4xVL:
1759 case AARCH64_OPND_SVE_ADDR_RI_S4x2xVL:
1760 case AARCH64_OPND_SVE_ADDR_RI_S4x3xVL:
1761 case AARCH64_OPND_SVE_ADDR_RI_S4x4xVL:
1765 assert (!opnd->addr.offset.is_reg);
1766 assert (opnd->addr.preind);
1767 num = 1 + get_operand_specific_data (&aarch64_operands[type]);
1770 if ((opnd->addr.offset.imm != 0 && !opnd->shifter.operator_present)
1771 || (opnd->shifter.operator_present
1772 && opnd->shifter.kind != AARCH64_MOD_MUL_VL))
1774 set_other_error (mismatch_detail, idx,
1775 _("invalid addressing mode"));
1778 if (!value_in_range_p (opnd->addr.offset.imm, min_value, max_value))
1780 set_offset_out_of_range_error (mismatch_detail, idx,
1781 min_value, max_value);
1784 if (!value_aligned_p (opnd->addr.offset.imm, num))
1786 set_unaligned_error (mismatch_detail, idx, num);
1791 case AARCH64_OPND_SVE_ADDR_RI_S6xVL:
1794 goto sve_imm_offset_vl;
1796 case AARCH64_OPND_SVE_ADDR_RI_S9xVL:
1799 goto sve_imm_offset_vl;
1801 case AARCH64_OPND_SVE_ADDR_RI_U6:
1802 case AARCH64_OPND_SVE_ADDR_RI_U6x2:
1803 case AARCH64_OPND_SVE_ADDR_RI_U6x4:
1804 case AARCH64_OPND_SVE_ADDR_RI_U6x8:
1808 assert (!opnd->addr.offset.is_reg);
1809 assert (opnd->addr.preind);
1810 num = 1 << get_operand_specific_data (&aarch64_operands[type]);
1813 if (opnd->shifter.operator_present
1814 || opnd->shifter.amount_present)
1816 set_other_error (mismatch_detail, idx,
1817 _("invalid addressing mode"));
1820 if (!value_in_range_p (opnd->addr.offset.imm, min_value, max_value))
1822 set_offset_out_of_range_error (mismatch_detail, idx,
1823 min_value, max_value);
1826 if (!value_aligned_p (opnd->addr.offset.imm, num))
1828 set_unaligned_error (mismatch_detail, idx, num);
1833 case AARCH64_OPND_SVE_ADDR_RI_S4x16:
1836 goto sve_imm_offset;
1838 case AARCH64_OPND_SVE_ADDR_RR:
1839 case AARCH64_OPND_SVE_ADDR_RR_LSL1:
1840 case AARCH64_OPND_SVE_ADDR_RR_LSL2:
1841 case AARCH64_OPND_SVE_ADDR_RR_LSL3:
1842 case AARCH64_OPND_SVE_ADDR_RX:
1843 case AARCH64_OPND_SVE_ADDR_RX_LSL1:
1844 case AARCH64_OPND_SVE_ADDR_RX_LSL2:
1845 case AARCH64_OPND_SVE_ADDR_RX_LSL3:
1846 case AARCH64_OPND_SVE_ADDR_RZ:
1847 case AARCH64_OPND_SVE_ADDR_RZ_LSL1:
1848 case AARCH64_OPND_SVE_ADDR_RZ_LSL2:
1849 case AARCH64_OPND_SVE_ADDR_RZ_LSL3:
1850 modifiers = 1 << AARCH64_MOD_LSL;
1852 assert (opnd->addr.offset.is_reg);
1853 assert (opnd->addr.preind);
1854 if ((aarch64_operands[type].flags & OPD_F_NO_ZR) != 0
1855 && opnd->addr.offset.regno == 31)
1857 set_other_error (mismatch_detail, idx,
1858 _("index register xzr is not allowed"));
1861 if (((1 << opnd->shifter.kind) & modifiers) == 0
1862 || (opnd->shifter.amount
1863 != get_operand_specific_data (&aarch64_operands[type])))
1865 set_other_error (mismatch_detail, idx,
1866 _("invalid addressing mode"));
1871 case AARCH64_OPND_SVE_ADDR_RZ_XTW_14:
1872 case AARCH64_OPND_SVE_ADDR_RZ_XTW_22:
1873 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_14:
1874 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_22:
1875 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_14:
1876 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_22:
1877 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_14:
1878 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_22:
1879 modifiers = (1 << AARCH64_MOD_SXTW) | (1 << AARCH64_MOD_UXTW);
1880 goto sve_rr_operand;
1882 case AARCH64_OPND_SVE_ADDR_ZI_U5:
1883 case AARCH64_OPND_SVE_ADDR_ZI_U5x2:
1884 case AARCH64_OPND_SVE_ADDR_ZI_U5x4:
1885 case AARCH64_OPND_SVE_ADDR_ZI_U5x8:
1888 goto sve_imm_offset;
1890 case AARCH64_OPND_SVE_ADDR_ZZ_LSL:
1891 modifiers = 1 << AARCH64_MOD_LSL;
1893 assert (opnd->addr.offset.is_reg);
1894 assert (opnd->addr.preind);
1895 if (((1 << opnd->shifter.kind) & modifiers) == 0
1896 || opnd->shifter.amount < 0
1897 || opnd->shifter.amount > 3)
1899 set_other_error (mismatch_detail, idx,
1900 _("invalid addressing mode"));
1905 case AARCH64_OPND_SVE_ADDR_ZZ_SXTW:
1906 modifiers = (1 << AARCH64_MOD_SXTW);
1907 goto sve_zz_operand;
1909 case AARCH64_OPND_SVE_ADDR_ZZ_UXTW:
1910 modifiers = 1 << AARCH64_MOD_UXTW;
1911 goto sve_zz_operand;
1918 case AARCH64_OPND_CLASS_SIMD_REGLIST:
1919 if (type == AARCH64_OPND_LEt)
1921 /* Get the upper bound for the element index. */
1922 num = 16 / aarch64_get_qualifier_esize (qualifier) - 1;
1923 if (!value_in_range_p (opnd->reglist.index, 0, num))
1925 set_elem_idx_out_of_range_error (mismatch_detail, idx, 0, num);
1929 /* The opcode dependent area stores the number of elements in
1930 each structure to be loaded/stored. */
1931 num = get_opcode_dependent_value (opcode);
1934 case AARCH64_OPND_LVt:
1935 assert (num >= 1 && num <= 4);
1936 /* Unless LD1/ST1, the number of registers should be equal to that
1937 of the structure elements. */
1938 if (num != 1 && opnd->reglist.num_regs != num)
1940 set_reg_list_error (mismatch_detail, idx, num);
1944 case AARCH64_OPND_LVt_AL:
1945 case AARCH64_OPND_LEt:
1946 assert (num >= 1 && num <= 4);
1947 /* The number of registers should be equal to that of the structure
1949 if (opnd->reglist.num_regs != num)
1951 set_reg_list_error (mismatch_detail, idx, num);
1960 case AARCH64_OPND_CLASS_IMMEDIATE:
1961 /* Constraint check on immediate operand. */
1962 imm = opnd->imm.value;
1963 /* E.g. imm_0_31 constrains value to be 0..31. */
1964 if (qualifier_value_in_range_constraint_p (qualifier)
1965 && !value_in_range_p (imm, get_lower_bound (qualifier),
1966 get_upper_bound (qualifier)))
1968 set_imm_out_of_range_error (mismatch_detail, idx,
1969 get_lower_bound (qualifier),
1970 get_upper_bound (qualifier));
1976 case AARCH64_OPND_AIMM:
1977 if (opnd->shifter.kind != AARCH64_MOD_LSL)
1979 set_other_error (mismatch_detail, idx,
1980 _("invalid shift operator"));
1983 if (opnd->shifter.amount != 0 && opnd->shifter.amount != 12)
1985 set_other_error (mismatch_detail, idx,
1986 _("shift amount must be 0 or 12"));
1989 if (!value_fit_unsigned_field_p (opnd->imm.value, 12))
1991 set_other_error (mismatch_detail, idx,
1992 _("immediate out of range"));
1997 case AARCH64_OPND_HALF:
1998 assert (idx == 1 && opnds[0].type == AARCH64_OPND_Rd);
1999 if (opnd->shifter.kind != AARCH64_MOD_LSL)
2001 set_other_error (mismatch_detail, idx,
2002 _("invalid shift operator"));
2005 size = aarch64_get_qualifier_esize (opnds[0].qualifier);
2006 if (!value_aligned_p (opnd->shifter.amount, 16))
2008 set_other_error (mismatch_detail, idx,
2009 _("shift amount must be a multiple of 16"));
2012 if (!value_in_range_p (opnd->shifter.amount, 0, size * 8 - 16))
2014 set_sft_amount_out_of_range_error (mismatch_detail, idx,
2018 if (opnd->imm.value < 0)
2020 set_other_error (mismatch_detail, idx,
2021 _("negative immediate value not allowed"));
2024 if (!value_fit_unsigned_field_p (opnd->imm.value, 16))
2026 set_other_error (mismatch_detail, idx,
2027 _("immediate out of range"));
2032 case AARCH64_OPND_IMM_MOV:
2034 int esize = aarch64_get_qualifier_esize (opnds[0].qualifier);
2035 imm = opnd->imm.value;
2039 case OP_MOV_IMM_WIDEN:
2042 case OP_MOV_IMM_WIDE:
2043 if (!aarch64_wide_constant_p (imm, esize == 4, NULL))
2045 set_other_error (mismatch_detail, idx,
2046 _("immediate out of range"));
2050 case OP_MOV_IMM_LOG:
2051 if (!aarch64_logical_immediate_p (imm, esize, NULL))
2053 set_other_error (mismatch_detail, idx,
2054 _("immediate out of range"));
2065 case AARCH64_OPND_NZCV:
2066 case AARCH64_OPND_CCMP_IMM:
2067 case AARCH64_OPND_EXCEPTION:
2068 case AARCH64_OPND_UIMM4:
2069 case AARCH64_OPND_UIMM7:
2070 case AARCH64_OPND_UIMM3_OP1:
2071 case AARCH64_OPND_UIMM3_OP2:
2072 case AARCH64_OPND_SVE_UIMM3:
2073 case AARCH64_OPND_SVE_UIMM7:
2074 case AARCH64_OPND_SVE_UIMM8:
2075 case AARCH64_OPND_SVE_UIMM8_53:
2076 size = get_operand_fields_width (get_operand_from_code (type));
2078 if (!value_fit_unsigned_field_p (opnd->imm.value, size))
2080 set_imm_out_of_range_error (mismatch_detail, idx, 0,
2086 case AARCH64_OPND_SIMM5:
2087 case AARCH64_OPND_SVE_SIMM5:
2088 case AARCH64_OPND_SVE_SIMM5B:
2089 case AARCH64_OPND_SVE_SIMM6:
2090 case AARCH64_OPND_SVE_SIMM8:
2091 size = get_operand_fields_width (get_operand_from_code (type));
2093 if (!value_fit_signed_field_p (opnd->imm.value, size))
2095 set_imm_out_of_range_error (mismatch_detail, idx,
2097 (1 << (size - 1)) - 1);
2102 case AARCH64_OPND_WIDTH:
2103 assert (idx > 1 && opnds[idx-1].type == AARCH64_OPND_IMM
2104 && opnds[0].type == AARCH64_OPND_Rd);
2105 size = get_upper_bound (qualifier);
2106 if (opnd->imm.value + opnds[idx-1].imm.value > size)
2107 /* lsb+width <= reg.size */
2109 set_imm_out_of_range_error (mismatch_detail, idx, 1,
2110 size - opnds[idx-1].imm.value);
2115 case AARCH64_OPND_LIMM:
2116 case AARCH64_OPND_SVE_LIMM:
2118 int esize = aarch64_get_qualifier_esize (opnds[0].qualifier);
2119 uint64_t uimm = opnd->imm.value;
2120 if (opcode->op == OP_BIC)
2122 if (!aarch64_logical_immediate_p (uimm, esize, NULL))
2124 set_other_error (mismatch_detail, idx,
2125 _("immediate out of range"));
2131 case AARCH64_OPND_IMM0:
2132 case AARCH64_OPND_FPIMM0:
2133 if (opnd->imm.value != 0)
2135 set_other_error (mismatch_detail, idx,
2136 _("immediate zero expected"));
2141 case AARCH64_OPND_IMM_ROT1:
2142 case AARCH64_OPND_IMM_ROT2:
2143 case AARCH64_OPND_SVE_IMM_ROT2:
2144 if (opnd->imm.value != 0
2145 && opnd->imm.value != 90
2146 && opnd->imm.value != 180
2147 && opnd->imm.value != 270)
2149 set_other_error (mismatch_detail, idx,
2150 _("rotate expected to be 0, 90, 180 or 270"));
2155 case AARCH64_OPND_IMM_ROT3:
2156 case AARCH64_OPND_SVE_IMM_ROT1:
2157 if (opnd->imm.value != 90 && opnd->imm.value != 270)
2159 set_other_error (mismatch_detail, idx,
2160 _("rotate expected to be 90 or 270"));
2165 case AARCH64_OPND_SHLL_IMM:
2167 size = 8 * aarch64_get_qualifier_esize (opnds[idx - 1].qualifier);
2168 if (opnd->imm.value != size)
2170 set_other_error (mismatch_detail, idx,
2171 _("invalid shift amount"));
2176 case AARCH64_OPND_IMM_VLSL:
2177 size = aarch64_get_qualifier_esize (qualifier);
2178 if (!value_in_range_p (opnd->imm.value, 0, size * 8 - 1))
2180 set_imm_out_of_range_error (mismatch_detail, idx, 0,
2186 case AARCH64_OPND_IMM_VLSR:
2187 size = aarch64_get_qualifier_esize (qualifier);
2188 if (!value_in_range_p (opnd->imm.value, 1, size * 8))
2190 set_imm_out_of_range_error (mismatch_detail, idx, 1, size * 8);
2195 case AARCH64_OPND_SIMD_IMM:
2196 case AARCH64_OPND_SIMD_IMM_SFT:
2197 /* Qualifier check. */
2200 case AARCH64_OPND_QLF_LSL:
2201 if (opnd->shifter.kind != AARCH64_MOD_LSL)
2203 set_other_error (mismatch_detail, idx,
2204 _("invalid shift operator"));
2208 case AARCH64_OPND_QLF_MSL:
2209 if (opnd->shifter.kind != AARCH64_MOD_MSL)
2211 set_other_error (mismatch_detail, idx,
2212 _("invalid shift operator"));
2216 case AARCH64_OPND_QLF_NIL:
2217 if (opnd->shifter.kind != AARCH64_MOD_NONE)
2219 set_other_error (mismatch_detail, idx,
2220 _("shift is not permitted"));
2228 /* Is the immediate valid? */
2230 if (aarch64_get_qualifier_esize (opnds[0].qualifier) != 8)
2232 /* uimm8 or simm8 */
2233 if (!value_in_range_p (opnd->imm.value, -128, 255))
2235 set_imm_out_of_range_error (mismatch_detail, idx, -128, 255);
2239 else if (aarch64_shrink_expanded_imm8 (opnd->imm.value) < 0)
2242 'aaaaaaaabbbbbbbbccccccccddddddddeeeeeeee
2243 ffffffffgggggggghhhhhhhh'. */
2244 set_other_error (mismatch_detail, idx,
2245 _("invalid value for immediate"));
2248 /* Is the shift amount valid? */
2249 switch (opnd->shifter.kind)
2251 case AARCH64_MOD_LSL:
2252 size = aarch64_get_qualifier_esize (opnds[0].qualifier);
2253 if (!value_in_range_p (opnd->shifter.amount, 0, (size - 1) * 8))
2255 set_sft_amount_out_of_range_error (mismatch_detail, idx, 0,
2259 if (!value_aligned_p (opnd->shifter.amount, 8))
2261 set_unaligned_error (mismatch_detail, idx, 8);
2265 case AARCH64_MOD_MSL:
2266 /* Only 8 and 16 are valid shift amount. */
2267 if (opnd->shifter.amount != 8 && opnd->shifter.amount != 16)
2269 set_other_error (mismatch_detail, idx,
2270 _("shift amount must be 0 or 16"));
2275 if (opnd->shifter.kind != AARCH64_MOD_NONE)
2277 set_other_error (mismatch_detail, idx,
2278 _("invalid shift operator"));
2285 case AARCH64_OPND_FPIMM:
2286 case AARCH64_OPND_SIMD_FPIMM:
2287 case AARCH64_OPND_SVE_FPIMM8:
2288 if (opnd->imm.is_fp == 0)
2290 set_other_error (mismatch_detail, idx,
2291 _("floating-point immediate expected"));
2294 /* The value is expected to be an 8-bit floating-point constant with
2295 sign, 3-bit exponent and normalized 4 bits of precision, encoded
2296 in "a:b:c:d:e:f:g:h" or FLD_imm8 (depending on the type of the
2298 if (!value_in_range_p (opnd->imm.value, 0, 255))
2300 set_other_error (mismatch_detail, idx,
2301 _("immediate out of range"));
2304 if (opnd->shifter.kind != AARCH64_MOD_NONE)
2306 set_other_error (mismatch_detail, idx,
2307 _("invalid shift operator"));
2312 case AARCH64_OPND_SVE_AIMM:
2315 assert (opnd->shifter.kind == AARCH64_MOD_LSL);
2316 size = aarch64_get_qualifier_esize (opnds[0].qualifier);
2317 mask = ~((uint64_t) -1 << (size * 4) << (size * 4));
2318 uvalue = opnd->imm.value;
2319 shift = opnd->shifter.amount;
2324 set_other_error (mismatch_detail, idx,
2325 _("no shift amount allowed for"
2326 " 8-bit constants"));
2332 if (shift != 0 && shift != 8)
2334 set_other_error (mismatch_detail, idx,
2335 _("shift amount must be 0 or 8"));
2338 if (shift == 0 && (uvalue & 0xff) == 0)
2341 uvalue = (int64_t) uvalue / 256;
2345 if ((uvalue & mask) != uvalue && (uvalue | ~mask) != uvalue)
2347 set_other_error (mismatch_detail, idx,
2348 _("immediate too big for element size"));
2351 uvalue = (uvalue - min_value) & mask;
2354 set_other_error (mismatch_detail, idx,
2355 _("invalid arithmetic immediate"));
2360 case AARCH64_OPND_SVE_ASIMM:
2364 case AARCH64_OPND_SVE_I1_HALF_ONE:
2365 assert (opnd->imm.is_fp);
2366 if (opnd->imm.value != 0x3f000000 && opnd->imm.value != 0x3f800000)
2368 set_other_error (mismatch_detail, idx,
2369 _("floating-point value must be 0.5 or 1.0"));
2374 case AARCH64_OPND_SVE_I1_HALF_TWO:
2375 assert (opnd->imm.is_fp);
2376 if (opnd->imm.value != 0x3f000000 && opnd->imm.value != 0x40000000)
2378 set_other_error (mismatch_detail, idx,
2379 _("floating-point value must be 0.5 or 2.0"));
2384 case AARCH64_OPND_SVE_I1_ZERO_ONE:
2385 assert (opnd->imm.is_fp);
2386 if (opnd->imm.value != 0 && opnd->imm.value != 0x3f800000)
2388 set_other_error (mismatch_detail, idx,
2389 _("floating-point value must be 0.0 or 1.0"));
2394 case AARCH64_OPND_SVE_INV_LIMM:
2396 int esize = aarch64_get_qualifier_esize (opnds[0].qualifier);
2397 uint64_t uimm = ~opnd->imm.value;
2398 if (!aarch64_logical_immediate_p (uimm, esize, NULL))
2400 set_other_error (mismatch_detail, idx,
2401 _("immediate out of range"));
2407 case AARCH64_OPND_SVE_LIMM_MOV:
2409 int esize = aarch64_get_qualifier_esize (opnds[0].qualifier);
2410 uint64_t uimm = opnd->imm.value;
2411 if (!aarch64_logical_immediate_p (uimm, esize, NULL))
2413 set_other_error (mismatch_detail, idx,
2414 _("immediate out of range"));
2417 if (!aarch64_sve_dupm_mov_immediate_p (uimm, esize))
2419 set_other_error (mismatch_detail, idx,
2420 _("invalid replicated MOV immediate"));
2426 case AARCH64_OPND_SVE_PATTERN_SCALED:
2427 assert (opnd->shifter.kind == AARCH64_MOD_MUL);
2428 if (!value_in_range_p (opnd->shifter.amount, 1, 16))
2430 set_multiplier_out_of_range_error (mismatch_detail, idx, 1, 16);
2435 case AARCH64_OPND_SVE_SHLIMM_PRED:
2436 case AARCH64_OPND_SVE_SHLIMM_UNPRED:
2437 size = aarch64_get_qualifier_esize (opnds[idx - 1].qualifier);
2438 if (!value_in_range_p (opnd->imm.value, 0, 8 * size - 1))
2440 set_imm_out_of_range_error (mismatch_detail, idx,
2446 case AARCH64_OPND_SVE_SHRIMM_PRED:
2447 case AARCH64_OPND_SVE_SHRIMM_UNPRED:
2448 size = aarch64_get_qualifier_esize (opnds[idx - 1].qualifier);
2449 if (!value_in_range_p (opnd->imm.value, 1, 8 * size))
2451 set_imm_out_of_range_error (mismatch_detail, idx, 1, 8 * size);
2461 case AARCH64_OPND_CLASS_SYSTEM:
2464 case AARCH64_OPND_PSTATEFIELD:
2465 assert (idx == 0 && opnds[1].type == AARCH64_OPND_UIMM4);
2468 The immediate must be #0 or #1. */
2469 if ((opnd->pstatefield == 0x03 /* UAO. */
2470 || opnd->pstatefield == 0x04 /* PAN. */
2471 || opnd->pstatefield == 0x1a) /* DIT. */
2472 && opnds[1].imm.value > 1)
2474 set_imm_out_of_range_error (mismatch_detail, idx, 0, 1);
2477 /* MSR SPSel, #uimm4
2478 Uses uimm4 as a control value to select the stack pointer: if
2479 bit 0 is set it selects the current exception level's stack
2480 pointer, if bit 0 is clear it selects shared EL0 stack pointer.
2481 Bits 1 to 3 of uimm4 are reserved and should be zero. */
2482 if (opnd->pstatefield == 0x05 /* spsel */ && opnds[1].imm.value > 1)
2484 set_imm_out_of_range_error (mismatch_detail, idx, 0, 1);
2493 case AARCH64_OPND_CLASS_SIMD_ELEMENT:
2494 /* Get the upper bound for the element index. */
2495 if (opcode->op == OP_FCMLA_ELEM)
2496 /* FCMLA index range depends on the vector size of other operands
2497 and is halfed because complex numbers take two elements. */
2498 num = aarch64_get_qualifier_nelem (opnds[0].qualifier)
2499 * aarch64_get_qualifier_esize (opnds[0].qualifier) / 2;
2502 num = num / aarch64_get_qualifier_esize (qualifier) - 1;
2504 /* Index out-of-range. */
2505 if (!value_in_range_p (opnd->reglane.index, 0, num))
2507 set_elem_idx_out_of_range_error (mismatch_detail, idx, 0, num);
2510 /* SMLAL<Q> <Vd>.<Ta>, <Vn>.<Tb>, <Vm>.<Ts>[<index>].
2511 <Vm> Is the vector register (V0-V31) or (V0-V15), whose
2512 number is encoded in "size:M:Rm":
2518 if (type == AARCH64_OPND_Em && qualifier == AARCH64_OPND_QLF_S_H
2519 && !value_in_range_p (opnd->reglane.regno, 0, 15))
2521 set_regno_out_of_range_error (mismatch_detail, idx, 0, 15);
2526 case AARCH64_OPND_CLASS_MODIFIED_REG:
2527 assert (idx == 1 || idx == 2);
2530 case AARCH64_OPND_Rm_EXT:
2531 if (!aarch64_extend_operator_p (opnd->shifter.kind)
2532 && opnd->shifter.kind != AARCH64_MOD_LSL)
2534 set_other_error (mismatch_detail, idx,
2535 _("extend operator expected"));
2538 /* It is not optional unless at least one of "Rd" or "Rn" is '11111'
2539 (i.e. SP), in which case it defaults to LSL. The LSL alias is
2540 only valid when "Rd" or "Rn" is '11111', and is preferred in that
2542 if (!aarch64_stack_pointer_p (opnds + 0)
2543 && (idx != 2 || !aarch64_stack_pointer_p (opnds + 1)))
2545 if (!opnd->shifter.operator_present)
2547 set_other_error (mismatch_detail, idx,
2548 _("missing extend operator"));
2551 else if (opnd->shifter.kind == AARCH64_MOD_LSL)
2553 set_other_error (mismatch_detail, idx,
2554 _("'LSL' operator not allowed"));
2558 assert (opnd->shifter.operator_present /* Default to LSL. */
2559 || opnd->shifter.kind == AARCH64_MOD_LSL);
2560 if (!value_in_range_p (opnd->shifter.amount, 0, 4))
2562 set_sft_amount_out_of_range_error (mismatch_detail, idx, 0, 4);
2565 /* In the 64-bit form, the final register operand is written as Wm
2566 for all but the (possibly omitted) UXTX/LSL and SXTX
2568 N.B. GAS allows X register to be used with any operator as a
2569 programming convenience. */
2570 if (qualifier == AARCH64_OPND_QLF_X
2571 && opnd->shifter.kind != AARCH64_MOD_LSL
2572 && opnd->shifter.kind != AARCH64_MOD_UXTX
2573 && opnd->shifter.kind != AARCH64_MOD_SXTX)
2575 set_other_error (mismatch_detail, idx, _("W register expected"));
2580 case AARCH64_OPND_Rm_SFT:
2581 /* ROR is not available to the shifted register operand in
2582 arithmetic instructions. */
2583 if (!aarch64_shift_operator_p (opnd->shifter.kind))
2585 set_other_error (mismatch_detail, idx,
2586 _("shift operator expected"));
2589 if (opnd->shifter.kind == AARCH64_MOD_ROR
2590 && opcode->iclass != log_shift)
2592 set_other_error (mismatch_detail, idx,
2593 _("'ROR' operator not allowed"));
2596 num = qualifier == AARCH64_OPND_QLF_W ? 31 : 63;
2597 if (!value_in_range_p (opnd->shifter.amount, 0, num))
2599 set_sft_amount_out_of_range_error (mismatch_detail, idx, 0, num);
2616 /* Main entrypoint for the operand constraint checking.
2618 Return 1 if operands of *INST meet the constraint applied by the operand
2619 codes and operand qualifiers; otherwise return 0 and if MISMATCH_DETAIL is
2620 not NULL, return the detail of the error in *MISMATCH_DETAIL. N.B. when
2621 adding more constraint checking, make sure MISMATCH_DETAIL->KIND is set
2622 with a proper error kind rather than AARCH64_OPDE_NIL (GAS asserts non-NIL
2623 error kind when it is notified that an instruction does not pass the check).
2625 Un-determined operand qualifiers may get established during the process. */
2628 aarch64_match_operands_constraint (aarch64_inst *inst,
2629 aarch64_operand_error *mismatch_detail)
2633 DEBUG_TRACE ("enter");
2635 /* Check for cases where a source register needs to be the same as the
2636 destination register. Do this before matching qualifiers since if
2637 an instruction has both invalid tying and invalid qualifiers,
2638 the error about qualifiers would suggest several alternative
2639 instructions that also have invalid tying. */
2640 i = inst->opcode->tied_operand;
2641 if (i > 0 && (inst->operands[0].reg.regno != inst->operands[i].reg.regno))
2643 if (mismatch_detail)
2645 mismatch_detail->kind = AARCH64_OPDE_UNTIED_OPERAND;
2646 mismatch_detail->index = i;
2647 mismatch_detail->error = NULL;
2652 /* Match operands' qualifier.
2653 *INST has already had qualifier establish for some, if not all, of
2654 its operands; we need to find out whether these established
2655 qualifiers match one of the qualifier sequence in
2656 INST->OPCODE->QUALIFIERS_LIST. If yes, we will assign each operand
2657 with the corresponding qualifier in such a sequence.
2658 Only basic operand constraint checking is done here; the more thorough
2659 constraint checking will carried out by operand_general_constraint_met_p,
2660 which has be to called after this in order to get all of the operands'
2661 qualifiers established. */
2662 if (match_operands_qualifier (inst, TRUE /* update_p */) == 0)
2664 DEBUG_TRACE ("FAIL on operand qualifier matching");
2665 if (mismatch_detail)
2667 /* Return an error type to indicate that it is the qualifier
2668 matching failure; we don't care about which operand as there
2669 are enough information in the opcode table to reproduce it. */
2670 mismatch_detail->kind = AARCH64_OPDE_INVALID_VARIANT;
2671 mismatch_detail->index = -1;
2672 mismatch_detail->error = NULL;
2677 /* Match operands' constraint. */
2678 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2680 enum aarch64_opnd type = inst->opcode->operands[i];
2681 if (type == AARCH64_OPND_NIL)
2683 if (inst->operands[i].skip)
2685 DEBUG_TRACE ("skip the incomplete operand %d", i);
2688 if (operand_general_constraint_met_p (inst->operands, i, type,
2689 inst->opcode, mismatch_detail) == 0)
2691 DEBUG_TRACE ("FAIL on operand %d", i);
2696 DEBUG_TRACE ("PASS");
2701 /* Replace INST->OPCODE with OPCODE and return the replaced OPCODE.
2702 Also updates the TYPE of each INST->OPERANDS with the corresponding
2703 value of OPCODE->OPERANDS.
2705 Note that some operand qualifiers may need to be manually cleared by
2706 the caller before it further calls the aarch64_opcode_encode; by
2707 doing this, it helps the qualifier matching facilities work
2710 const aarch64_opcode*
2711 aarch64_replace_opcode (aarch64_inst *inst, const aarch64_opcode *opcode)
2714 const aarch64_opcode *old = inst->opcode;
2716 inst->opcode = opcode;
2718 /* Update the operand types. */
2719 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2721 inst->operands[i].type = opcode->operands[i];
2722 if (opcode->operands[i] == AARCH64_OPND_NIL)
2726 DEBUG_TRACE ("replace %s with %s", old->name, opcode->name);
2732 aarch64_operand_index (const enum aarch64_opnd *operands, enum aarch64_opnd operand)
2735 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2736 if (operands[i] == operand)
2738 else if (operands[i] == AARCH64_OPND_NIL)
2743 /* R0...R30, followed by FOR31. */
2744 #define BANK(R, FOR31) \
2745 { R (0), R (1), R (2), R (3), R (4), R (5), R (6), R (7), \
2746 R (8), R (9), R (10), R (11), R (12), R (13), R (14), R (15), \
2747 R (16), R (17), R (18), R (19), R (20), R (21), R (22), R (23), \
2748 R (24), R (25), R (26), R (27), R (28), R (29), R (30), FOR31 }
2749 /* [0][0] 32-bit integer regs with sp Wn
2750 [0][1] 64-bit integer regs with sp Xn sf=1
2751 [1][0] 32-bit integer regs with #0 Wn
2752 [1][1] 64-bit integer regs with #0 Xn sf=1 */
2753 static const char *int_reg[2][2][32] = {
2754 #define R32(X) "w" #X
2755 #define R64(X) "x" #X
2756 { BANK (R32, "wsp"), BANK (R64, "sp") },
2757 { BANK (R32, "wzr"), BANK (R64, "xzr") }
2762 /* Names of the SVE vector registers, first with .S suffixes,
2763 then with .D suffixes. */
2765 static const char *sve_reg[2][32] = {
2766 #define ZS(X) "z" #X ".s"
2767 #define ZD(X) "z" #X ".d"
2768 BANK (ZS, ZS (31)), BANK (ZD, ZD (31))
2774 /* Return the integer register name.
2775 if SP_REG_P is not 0, R31 is an SP reg, other R31 is the zero reg. */
2777 static inline const char *
2778 get_int_reg_name (int regno, aarch64_opnd_qualifier_t qualifier, int sp_reg_p)
2780 const int has_zr = sp_reg_p ? 0 : 1;
2781 const int is_64 = aarch64_get_qualifier_esize (qualifier) == 4 ? 0 : 1;
2782 return int_reg[has_zr][is_64][regno];
2785 /* Like get_int_reg_name, but IS_64 is always 1. */
2787 static inline const char *
2788 get_64bit_int_reg_name (int regno, int sp_reg_p)
2790 const int has_zr = sp_reg_p ? 0 : 1;
2791 return int_reg[has_zr][1][regno];
2794 /* Get the name of the integer offset register in OPND, using the shift type
2795 to decide whether it's a word or doubleword. */
2797 static inline const char *
2798 get_offset_int_reg_name (const aarch64_opnd_info *opnd)
2800 switch (opnd->shifter.kind)
2802 case AARCH64_MOD_UXTW:
2803 case AARCH64_MOD_SXTW:
2804 return get_int_reg_name (opnd->addr.offset.regno, AARCH64_OPND_QLF_W, 0);
2806 case AARCH64_MOD_LSL:
2807 case AARCH64_MOD_SXTX:
2808 return get_int_reg_name (opnd->addr.offset.regno, AARCH64_OPND_QLF_X, 0);
2815 /* Get the name of the SVE vector offset register in OPND, using the operand
2816 qualifier to decide whether the suffix should be .S or .D. */
2818 static inline const char *
2819 get_addr_sve_reg_name (int regno, aarch64_opnd_qualifier_t qualifier)
2821 assert (qualifier == AARCH64_OPND_QLF_S_S
2822 || qualifier == AARCH64_OPND_QLF_S_D);
2823 return sve_reg[qualifier == AARCH64_OPND_QLF_S_D][regno];
2826 /* Types for expanding an encoded 8-bit value to a floating-point value. */
2846 /* IMM8 is an 8-bit floating-point constant with sign, 3-bit exponent and
2847 normalized 4 bits of precision, encoded in "a:b:c:d:e:f:g:h" or FLD_imm8
2848 (depending on the type of the instruction). IMM8 will be expanded to a
2849 single-precision floating-point value (SIZE == 4) or a double-precision
2850 floating-point value (SIZE == 8). A half-precision floating-point value
2851 (SIZE == 2) is expanded to a single-precision floating-point value. The
2852 expanded value is returned. */
2855 expand_fp_imm (int size, uint32_t imm8)
2858 uint32_t imm8_7, imm8_6_0, imm8_6, imm8_6_repl4;
2860 imm8_7 = (imm8 >> 7) & 0x01; /* imm8<7> */
2861 imm8_6_0 = imm8 & 0x7f; /* imm8<6:0> */
2862 imm8_6 = imm8_6_0 >> 6; /* imm8<6> */
2863 imm8_6_repl4 = (imm8_6 << 3) | (imm8_6 << 2)
2864 | (imm8_6 << 1) | imm8_6; /* Replicate(imm8<6>,4) */
2867 imm = (imm8_7 << (63-32)) /* imm8<7> */
2868 | ((imm8_6 ^ 1) << (62-32)) /* NOT(imm8<6) */
2869 | (imm8_6_repl4 << (58-32)) | (imm8_6 << (57-32))
2870 | (imm8_6 << (56-32)) | (imm8_6 << (55-32)) /* Replicate(imm8<6>,7) */
2871 | (imm8_6_0 << (48-32)); /* imm8<6>:imm8<5:0> */
2874 else if (size == 4 || size == 2)
2876 imm = (imm8_7 << 31) /* imm8<7> */
2877 | ((imm8_6 ^ 1) << 30) /* NOT(imm8<6>) */
2878 | (imm8_6_repl4 << 26) /* Replicate(imm8<6>,4) */
2879 | (imm8_6_0 << 19); /* imm8<6>:imm8<5:0> */
2883 /* An unsupported size. */
2890 /* Produce the string representation of the register list operand *OPND
2891 in the buffer pointed by BUF of size SIZE. PREFIX is the part of
2892 the register name that comes before the register number, such as "v". */
2894 print_register_list (char *buf, size_t size, const aarch64_opnd_info *opnd,
2897 const int num_regs = opnd->reglist.num_regs;
2898 const int first_reg = opnd->reglist.first_regno;
2899 const int last_reg = (first_reg + num_regs - 1) & 0x1f;
2900 const char *qlf_name = aarch64_get_qualifier_name (opnd->qualifier);
2901 char tb[8]; /* Temporary buffer. */
2903 assert (opnd->type != AARCH64_OPND_LEt || opnd->reglist.has_index);
2904 assert (num_regs >= 1 && num_regs <= 4);
2906 /* Prepare the index if any. */
2907 if (opnd->reglist.has_index)
2908 /* PR 21096: The %100 is to silence a warning about possible truncation. */
2909 snprintf (tb, 8, "[%" PRIi64 "]", (opnd->reglist.index % 100));
2913 /* The hyphenated form is preferred for disassembly if there are
2914 more than two registers in the list, and the register numbers
2915 are monotonically increasing in increments of one. */
2916 if (num_regs > 2 && last_reg > first_reg)
2917 snprintf (buf, size, "{%s%d.%s-%s%d.%s}%s", prefix, first_reg, qlf_name,
2918 prefix, last_reg, qlf_name, tb);
2921 const int reg0 = first_reg;
2922 const int reg1 = (first_reg + 1) & 0x1f;
2923 const int reg2 = (first_reg + 2) & 0x1f;
2924 const int reg3 = (first_reg + 3) & 0x1f;
2929 snprintf (buf, size, "{%s%d.%s}%s", prefix, reg0, qlf_name, tb);
2932 snprintf (buf, size, "{%s%d.%s, %s%d.%s}%s", prefix, reg0, qlf_name,
2933 prefix, reg1, qlf_name, tb);
2936 snprintf (buf, size, "{%s%d.%s, %s%d.%s, %s%d.%s}%s",
2937 prefix, reg0, qlf_name, prefix, reg1, qlf_name,
2938 prefix, reg2, qlf_name, tb);
2941 snprintf (buf, size, "{%s%d.%s, %s%d.%s, %s%d.%s, %s%d.%s}%s",
2942 prefix, reg0, qlf_name, prefix, reg1, qlf_name,
2943 prefix, reg2, qlf_name, prefix, reg3, qlf_name, tb);
2949 /* Print the register+immediate address in OPND to BUF, which has SIZE
2950 characters. BASE is the name of the base register. */
2953 print_immediate_offset_address (char *buf, size_t size,
2954 const aarch64_opnd_info *opnd,
2957 if (opnd->addr.writeback)
2959 if (opnd->addr.preind)
2960 snprintf (buf, size, "[%s, #%d]!", base, opnd->addr.offset.imm);
2962 snprintf (buf, size, "[%s], #%d", base, opnd->addr.offset.imm);
2966 if (opnd->shifter.operator_present)
2968 assert (opnd->shifter.kind == AARCH64_MOD_MUL_VL);
2969 snprintf (buf, size, "[%s, #%d, mul vl]",
2970 base, opnd->addr.offset.imm);
2972 else if (opnd->addr.offset.imm)
2973 snprintf (buf, size, "[%s, #%d]", base, opnd->addr.offset.imm);
2975 snprintf (buf, size, "[%s]", base);
2979 /* Produce the string representation of the register offset address operand
2980 *OPND in the buffer pointed by BUF of size SIZE. BASE and OFFSET are
2981 the names of the base and offset registers. */
2983 print_register_offset_address (char *buf, size_t size,
2984 const aarch64_opnd_info *opnd,
2985 const char *base, const char *offset)
2987 char tb[16]; /* Temporary buffer. */
2988 bfd_boolean print_extend_p = TRUE;
2989 bfd_boolean print_amount_p = TRUE;
2990 const char *shift_name = aarch64_operand_modifiers[opnd->shifter.kind].name;
2992 if (!opnd->shifter.amount && (opnd->qualifier != AARCH64_OPND_QLF_S_B
2993 || !opnd->shifter.amount_present))
2995 /* Not print the shift/extend amount when the amount is zero and
2996 when it is not the special case of 8-bit load/store instruction. */
2997 print_amount_p = FALSE;
2998 /* Likewise, no need to print the shift operator LSL in such a
3000 if (opnd->shifter.kind == AARCH64_MOD_LSL)
3001 print_extend_p = FALSE;
3004 /* Prepare for the extend/shift. */
3008 snprintf (tb, sizeof (tb), ", %s #%" PRIi64, shift_name,
3009 /* PR 21096: The %100 is to silence a warning about possible truncation. */
3010 (opnd->shifter.amount % 100));
3012 snprintf (tb, sizeof (tb), ", %s", shift_name);
3017 snprintf (buf, size, "[%s, %s%s]", base, offset, tb);
3020 /* Generate the string representation of the operand OPNDS[IDX] for OPCODE
3021 in *BUF. The caller should pass in the maximum size of *BUF in SIZE.
3022 PC, PCREL_P and ADDRESS are used to pass in and return information about
3023 the PC-relative address calculation, where the PC value is passed in
3024 PC. If the operand is pc-relative related, *PCREL_P (if PCREL_P non-NULL)
3025 will return 1 and *ADDRESS (if ADDRESS non-NULL) will return the
3026 calculated address; otherwise, *PCREL_P (if PCREL_P non-NULL) returns 0.
3028 The function serves both the disassembler and the assembler diagnostics
3029 issuer, which is the reason why it lives in this file. */
3032 aarch64_print_operand (char *buf, size_t size, bfd_vma pc,
3033 const aarch64_opcode *opcode,
3034 const aarch64_opnd_info *opnds, int idx, int *pcrel_p,
3037 unsigned int i, num_conds;
3038 const char *name = NULL;
3039 const aarch64_opnd_info *opnd = opnds + idx;
3040 enum aarch64_modifier_kind kind;
3041 uint64_t addr, enum_value;
3049 case AARCH64_OPND_Rd:
3050 case AARCH64_OPND_Rn:
3051 case AARCH64_OPND_Rm:
3052 case AARCH64_OPND_Rt:
3053 case AARCH64_OPND_Rt2:
3054 case AARCH64_OPND_Rs:
3055 case AARCH64_OPND_Ra:
3056 case AARCH64_OPND_Rt_SYS:
3057 case AARCH64_OPND_PAIRREG:
3058 case AARCH64_OPND_SVE_Rm:
3059 /* The optional-ness of <Xt> in e.g. IC <ic_op>{, <Xt>} is determined by
3060 the <ic_op>, therefore we use opnd->present to override the
3061 generic optional-ness information. */
3062 if (opnd->type == AARCH64_OPND_Rt_SYS)
3067 /* Omit the operand, e.g. RET. */
3068 else if (optional_operand_p (opcode, idx)
3070 == get_optional_operand_default_value (opcode)))
3072 assert (opnd->qualifier == AARCH64_OPND_QLF_W
3073 || opnd->qualifier == AARCH64_OPND_QLF_X);
3074 snprintf (buf, size, "%s",
3075 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0));
3078 case AARCH64_OPND_Rd_SP:
3079 case AARCH64_OPND_Rn_SP:
3080 case AARCH64_OPND_SVE_Rn_SP:
3081 case AARCH64_OPND_Rm_SP:
3082 assert (opnd->qualifier == AARCH64_OPND_QLF_W
3083 || opnd->qualifier == AARCH64_OPND_QLF_WSP
3084 || opnd->qualifier == AARCH64_OPND_QLF_X
3085 || opnd->qualifier == AARCH64_OPND_QLF_SP);
3086 snprintf (buf, size, "%s",
3087 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 1));
3090 case AARCH64_OPND_Rm_EXT:
3091 kind = opnd->shifter.kind;
3092 assert (idx == 1 || idx == 2);
3093 if ((aarch64_stack_pointer_p (opnds)
3094 || (idx == 2 && aarch64_stack_pointer_p (opnds + 1)))
3095 && ((opnd->qualifier == AARCH64_OPND_QLF_W
3096 && opnds[0].qualifier == AARCH64_OPND_QLF_W
3097 && kind == AARCH64_MOD_UXTW)
3098 || (opnd->qualifier == AARCH64_OPND_QLF_X
3099 && kind == AARCH64_MOD_UXTX)))
3101 /* 'LSL' is the preferred form in this case. */
3102 kind = AARCH64_MOD_LSL;
3103 if (opnd->shifter.amount == 0)
3105 /* Shifter omitted. */
3106 snprintf (buf, size, "%s",
3107 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0));
3111 if (opnd->shifter.amount)
3112 snprintf (buf, size, "%s, %s #%" PRIi64,
3113 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0),
3114 aarch64_operand_modifiers[kind].name,
3115 opnd->shifter.amount);
3117 snprintf (buf, size, "%s, %s",
3118 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0),
3119 aarch64_operand_modifiers[kind].name);
3122 case AARCH64_OPND_Rm_SFT:
3123 assert (opnd->qualifier == AARCH64_OPND_QLF_W
3124 || opnd->qualifier == AARCH64_OPND_QLF_X);
3125 if (opnd->shifter.amount == 0 && opnd->shifter.kind == AARCH64_MOD_LSL)
3126 snprintf (buf, size, "%s",
3127 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0));
3129 snprintf (buf, size, "%s, %s #%" PRIi64,
3130 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0),
3131 aarch64_operand_modifiers[opnd->shifter.kind].name,
3132 opnd->shifter.amount);
3135 case AARCH64_OPND_Fd:
3136 case AARCH64_OPND_Fn:
3137 case AARCH64_OPND_Fm:
3138 case AARCH64_OPND_Fa:
3139 case AARCH64_OPND_Ft:
3140 case AARCH64_OPND_Ft2:
3141 case AARCH64_OPND_Sd:
3142 case AARCH64_OPND_Sn:
3143 case AARCH64_OPND_Sm:
3144 case AARCH64_OPND_SVE_VZn:
3145 case AARCH64_OPND_SVE_Vd:
3146 case AARCH64_OPND_SVE_Vm:
3147 case AARCH64_OPND_SVE_Vn:
3148 snprintf (buf, size, "%s%d", aarch64_get_qualifier_name (opnd->qualifier),
3152 case AARCH64_OPND_Va:
3153 case AARCH64_OPND_Vd:
3154 case AARCH64_OPND_Vn:
3155 case AARCH64_OPND_Vm:
3156 snprintf (buf, size, "v%d.%s", opnd->reg.regno,
3157 aarch64_get_qualifier_name (opnd->qualifier));
3160 case AARCH64_OPND_Ed:
3161 case AARCH64_OPND_En:
3162 case AARCH64_OPND_Em:
3163 case AARCH64_OPND_SM3_IMM2:
3164 snprintf (buf, size, "v%d.%s[%" PRIi64 "]", opnd->reglane.regno,
3165 aarch64_get_qualifier_name (opnd->qualifier),
3166 opnd->reglane.index);
3169 case AARCH64_OPND_VdD1:
3170 case AARCH64_OPND_VnD1:
3171 snprintf (buf, size, "v%d.d[1]", opnd->reg.regno);
3174 case AARCH64_OPND_LVn:
3175 case AARCH64_OPND_LVt:
3176 case AARCH64_OPND_LVt_AL:
3177 case AARCH64_OPND_LEt:
3178 print_register_list (buf, size, opnd, "v");
3181 case AARCH64_OPND_SVE_Pd:
3182 case AARCH64_OPND_SVE_Pg3:
3183 case AARCH64_OPND_SVE_Pg4_5:
3184 case AARCH64_OPND_SVE_Pg4_10:
3185 case AARCH64_OPND_SVE_Pg4_16:
3186 case AARCH64_OPND_SVE_Pm:
3187 case AARCH64_OPND_SVE_Pn:
3188 case AARCH64_OPND_SVE_Pt:
3189 if (opnd->qualifier == AARCH64_OPND_QLF_NIL)
3190 snprintf (buf, size, "p%d", opnd->reg.regno);
3191 else if (opnd->qualifier == AARCH64_OPND_QLF_P_Z
3192 || opnd->qualifier == AARCH64_OPND_QLF_P_M)
3193 snprintf (buf, size, "p%d/%s", opnd->reg.regno,
3194 aarch64_get_qualifier_name (opnd->qualifier));
3196 snprintf (buf, size, "p%d.%s", opnd->reg.regno,
3197 aarch64_get_qualifier_name (opnd->qualifier));
3200 case AARCH64_OPND_SVE_Za_5:
3201 case AARCH64_OPND_SVE_Za_16:
3202 case AARCH64_OPND_SVE_Zd:
3203 case AARCH64_OPND_SVE_Zm_5:
3204 case AARCH64_OPND_SVE_Zm_16:
3205 case AARCH64_OPND_SVE_Zn:
3206 case AARCH64_OPND_SVE_Zt:
3207 if (opnd->qualifier == AARCH64_OPND_QLF_NIL)
3208 snprintf (buf, size, "z%d", opnd->reg.regno);
3210 snprintf (buf, size, "z%d.%s", opnd->reg.regno,
3211 aarch64_get_qualifier_name (opnd->qualifier));
3214 case AARCH64_OPND_SVE_ZnxN:
3215 case AARCH64_OPND_SVE_ZtxN:
3216 print_register_list (buf, size, opnd, "z");
3219 case AARCH64_OPND_SVE_Zm3_INDEX:
3220 case AARCH64_OPND_SVE_Zm3_22_INDEX:
3221 case AARCH64_OPND_SVE_Zm4_INDEX:
3222 case AARCH64_OPND_SVE_Zn_INDEX:
3223 snprintf (buf, size, "z%d.%s[%" PRIi64 "]", opnd->reglane.regno,
3224 aarch64_get_qualifier_name (opnd->qualifier),
3225 opnd->reglane.index);
3228 case AARCH64_OPND_CRn:
3229 case AARCH64_OPND_CRm:
3230 snprintf (buf, size, "C%" PRIi64, opnd->imm.value);
3233 case AARCH64_OPND_IDX:
3234 case AARCH64_OPND_MASK:
3235 case AARCH64_OPND_IMM:
3236 case AARCH64_OPND_IMM_2:
3237 case AARCH64_OPND_WIDTH:
3238 case AARCH64_OPND_UIMM3_OP1:
3239 case AARCH64_OPND_UIMM3_OP2:
3240 case AARCH64_OPND_BIT_NUM:
3241 case AARCH64_OPND_IMM_VLSL:
3242 case AARCH64_OPND_IMM_VLSR:
3243 case AARCH64_OPND_SHLL_IMM:
3244 case AARCH64_OPND_IMM0:
3245 case AARCH64_OPND_IMMR:
3246 case AARCH64_OPND_IMMS:
3247 case AARCH64_OPND_FBITS:
3248 case AARCH64_OPND_SIMM5:
3249 case AARCH64_OPND_SVE_SHLIMM_PRED:
3250 case AARCH64_OPND_SVE_SHLIMM_UNPRED:
3251 case AARCH64_OPND_SVE_SHRIMM_PRED:
3252 case AARCH64_OPND_SVE_SHRIMM_UNPRED:
3253 case AARCH64_OPND_SVE_SIMM5:
3254 case AARCH64_OPND_SVE_SIMM5B:
3255 case AARCH64_OPND_SVE_SIMM6:
3256 case AARCH64_OPND_SVE_SIMM8:
3257 case AARCH64_OPND_SVE_UIMM3:
3258 case AARCH64_OPND_SVE_UIMM7:
3259 case AARCH64_OPND_SVE_UIMM8:
3260 case AARCH64_OPND_SVE_UIMM8_53:
3261 case AARCH64_OPND_IMM_ROT1:
3262 case AARCH64_OPND_IMM_ROT2:
3263 case AARCH64_OPND_IMM_ROT3:
3264 case AARCH64_OPND_SVE_IMM_ROT1:
3265 case AARCH64_OPND_SVE_IMM_ROT2:
3266 snprintf (buf, size, "#%" PRIi64, opnd->imm.value);
3269 case AARCH64_OPND_SVE_I1_HALF_ONE:
3270 case AARCH64_OPND_SVE_I1_HALF_TWO:
3271 case AARCH64_OPND_SVE_I1_ZERO_ONE:
3274 c.i = opnd->imm.value;
3275 snprintf (buf, size, "#%.1f", c.f);
3279 case AARCH64_OPND_SVE_PATTERN:
3280 if (optional_operand_p (opcode, idx)
3281 && opnd->imm.value == get_optional_operand_default_value (opcode))
3283 enum_value = opnd->imm.value;
3284 assert (enum_value < ARRAY_SIZE (aarch64_sve_pattern_array));
3285 if (aarch64_sve_pattern_array[enum_value])
3286 snprintf (buf, size, "%s", aarch64_sve_pattern_array[enum_value]);
3288 snprintf (buf, size, "#%" PRIi64, opnd->imm.value);
3291 case AARCH64_OPND_SVE_PATTERN_SCALED:
3292 if (optional_operand_p (opcode, idx)
3293 && !opnd->shifter.operator_present
3294 && opnd->imm.value == get_optional_operand_default_value (opcode))
3296 enum_value = opnd->imm.value;
3297 assert (enum_value < ARRAY_SIZE (aarch64_sve_pattern_array));
3298 if (aarch64_sve_pattern_array[opnd->imm.value])
3299 snprintf (buf, size, "%s", aarch64_sve_pattern_array[opnd->imm.value]);
3301 snprintf (buf, size, "#%" PRIi64, opnd->imm.value);
3302 if (opnd->shifter.operator_present)
3304 size_t len = strlen (buf);
3305 snprintf (buf + len, size - len, ", %s #%" PRIi64,
3306 aarch64_operand_modifiers[opnd->shifter.kind].name,
3307 opnd->shifter.amount);
3311 case AARCH64_OPND_SVE_PRFOP:
3312 enum_value = opnd->imm.value;
3313 assert (enum_value < ARRAY_SIZE (aarch64_sve_prfop_array));
3314 if (aarch64_sve_prfop_array[enum_value])
3315 snprintf (buf, size, "%s", aarch64_sve_prfop_array[enum_value]);
3317 snprintf (buf, size, "#%" PRIi64, opnd->imm.value);
3320 case AARCH64_OPND_IMM_MOV:
3321 switch (aarch64_get_qualifier_esize (opnds[0].qualifier))
3323 case 4: /* e.g. MOV Wd, #<imm32>. */
3325 int imm32 = opnd->imm.value;
3326 snprintf (buf, size, "#0x%-20x\t// #%d", imm32, imm32);
3329 case 8: /* e.g. MOV Xd, #<imm64>. */
3330 snprintf (buf, size, "#0x%-20" PRIx64 "\t// #%" PRIi64,
3331 opnd->imm.value, opnd->imm.value);
3333 default: assert (0);
3337 case AARCH64_OPND_FPIMM0:
3338 snprintf (buf, size, "#0.0");
3341 case AARCH64_OPND_LIMM:
3342 case AARCH64_OPND_AIMM:
3343 case AARCH64_OPND_HALF:
3344 case AARCH64_OPND_SVE_INV_LIMM:
3345 case AARCH64_OPND_SVE_LIMM:
3346 case AARCH64_OPND_SVE_LIMM_MOV:
3347 if (opnd->shifter.amount)
3348 snprintf (buf, size, "#0x%" PRIx64 ", lsl #%" PRIi64, opnd->imm.value,
3349 opnd->shifter.amount);
3351 snprintf (buf, size, "#0x%" PRIx64, opnd->imm.value);
3354 case AARCH64_OPND_SIMD_IMM:
3355 case AARCH64_OPND_SIMD_IMM_SFT:
3356 if ((! opnd->shifter.amount && opnd->shifter.kind == AARCH64_MOD_LSL)
3357 || opnd->shifter.kind == AARCH64_MOD_NONE)
3358 snprintf (buf, size, "#0x%" PRIx64, opnd->imm.value);
3360 snprintf (buf, size, "#0x%" PRIx64 ", %s #%" PRIi64, opnd->imm.value,
3361 aarch64_operand_modifiers[opnd->shifter.kind].name,
3362 opnd->shifter.amount);
3365 case AARCH64_OPND_SVE_AIMM:
3366 case AARCH64_OPND_SVE_ASIMM:
3367 if (opnd->shifter.amount)
3368 snprintf (buf, size, "#%" PRIi64 ", lsl #%" PRIi64, opnd->imm.value,
3369 opnd->shifter.amount);
3371 snprintf (buf, size, "#%" PRIi64, opnd->imm.value);
3374 case AARCH64_OPND_FPIMM:
3375 case AARCH64_OPND_SIMD_FPIMM:
3376 case AARCH64_OPND_SVE_FPIMM8:
3377 switch (aarch64_get_qualifier_esize (opnds[0].qualifier))
3379 case 2: /* e.g. FMOV <Hd>, #<imm>. */
3382 c.i = expand_fp_imm (2, opnd->imm.value);
3383 snprintf (buf, size, "#%.18e", c.f);
3386 case 4: /* e.g. FMOV <Vd>.4S, #<imm>. */
3389 c.i = expand_fp_imm (4, opnd->imm.value);
3390 snprintf (buf, size, "#%.18e", c.f);
3393 case 8: /* e.g. FMOV <Sd>, #<imm>. */
3396 c.i = expand_fp_imm (8, opnd->imm.value);
3397 snprintf (buf, size, "#%.18e", c.d);
3400 default: assert (0);
3404 case AARCH64_OPND_CCMP_IMM:
3405 case AARCH64_OPND_NZCV:
3406 case AARCH64_OPND_EXCEPTION:
3407 case AARCH64_OPND_UIMM4:
3408 case AARCH64_OPND_UIMM7:
3409 if (optional_operand_p (opcode, idx) == TRUE
3410 && (opnd->imm.value ==
3411 (int64_t) get_optional_operand_default_value (opcode)))
3412 /* Omit the operand, e.g. DCPS1. */
3414 snprintf (buf, size, "#0x%x", (unsigned int)opnd->imm.value);
3417 case AARCH64_OPND_COND:
3418 case AARCH64_OPND_COND1:
3419 snprintf (buf, size, "%s", opnd->cond->names[0]);
3420 num_conds = ARRAY_SIZE (opnd->cond->names);
3421 for (i = 1; i < num_conds && opnd->cond->names[i]; ++i)
3423 size_t len = strlen (buf);
3425 snprintf (buf + len, size - len, " // %s = %s",
3426 opnd->cond->names[0], opnd->cond->names[i]);
3428 snprintf (buf + len, size - len, ", %s",
3429 opnd->cond->names[i]);
3433 case AARCH64_OPND_ADDR_ADRP:
3434 addr = ((pc + AARCH64_PCREL_OFFSET) & ~(uint64_t)0xfff)
3440 /* This is not necessary during the disassembling, as print_address_func
3441 in the disassemble_info will take care of the printing. But some
3442 other callers may be still interested in getting the string in *STR,
3443 so here we do snprintf regardless. */
3444 snprintf (buf, size, "#0x%" PRIx64, addr);
3447 case AARCH64_OPND_ADDR_PCREL14:
3448 case AARCH64_OPND_ADDR_PCREL19:
3449 case AARCH64_OPND_ADDR_PCREL21:
3450 case AARCH64_OPND_ADDR_PCREL26:
3451 addr = pc + AARCH64_PCREL_OFFSET + opnd->imm.value;
3456 /* This is not necessary during the disassembling, as print_address_func
3457 in the disassemble_info will take care of the printing. But some
3458 other callers may be still interested in getting the string in *STR,
3459 so here we do snprintf regardless. */
3460 snprintf (buf, size, "#0x%" PRIx64, addr);
3463 case AARCH64_OPND_ADDR_SIMPLE:
3464 case AARCH64_OPND_SIMD_ADDR_SIMPLE:
3465 case AARCH64_OPND_SIMD_ADDR_POST:
3466 name = get_64bit_int_reg_name (opnd->addr.base_regno, 1);
3467 if (opnd->type == AARCH64_OPND_SIMD_ADDR_POST)
3469 if (opnd->addr.offset.is_reg)
3470 snprintf (buf, size, "[%s], x%d", name, opnd->addr.offset.regno);
3472 snprintf (buf, size, "[%s], #%d", name, opnd->addr.offset.imm);
3475 snprintf (buf, size, "[%s]", name);
3478 case AARCH64_OPND_ADDR_REGOFF:
3479 case AARCH64_OPND_SVE_ADDR_RR:
3480 case AARCH64_OPND_SVE_ADDR_RR_LSL1:
3481 case AARCH64_OPND_SVE_ADDR_RR_LSL2:
3482 case AARCH64_OPND_SVE_ADDR_RR_LSL3:
3483 case AARCH64_OPND_SVE_ADDR_RX:
3484 case AARCH64_OPND_SVE_ADDR_RX_LSL1:
3485 case AARCH64_OPND_SVE_ADDR_RX_LSL2:
3486 case AARCH64_OPND_SVE_ADDR_RX_LSL3:
3487 print_register_offset_address
3488 (buf, size, opnd, get_64bit_int_reg_name (opnd->addr.base_regno, 1),
3489 get_offset_int_reg_name (opnd));
3492 case AARCH64_OPND_SVE_ADDR_RZ:
3493 case AARCH64_OPND_SVE_ADDR_RZ_LSL1:
3494 case AARCH64_OPND_SVE_ADDR_RZ_LSL2:
3495 case AARCH64_OPND_SVE_ADDR_RZ_LSL3:
3496 case AARCH64_OPND_SVE_ADDR_RZ_XTW_14:
3497 case AARCH64_OPND_SVE_ADDR_RZ_XTW_22:
3498 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_14:
3499 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_22:
3500 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_14:
3501 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_22:
3502 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_14:
3503 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_22:
3504 print_register_offset_address
3505 (buf, size, opnd, get_64bit_int_reg_name (opnd->addr.base_regno, 1),
3506 get_addr_sve_reg_name (opnd->addr.offset.regno, opnd->qualifier));
3509 case AARCH64_OPND_ADDR_SIMM7:
3510 case AARCH64_OPND_ADDR_SIMM9:
3511 case AARCH64_OPND_ADDR_SIMM9_2:
3512 case AARCH64_OPND_ADDR_SIMM10:
3513 case AARCH64_OPND_ADDR_OFFSET:
3514 case AARCH64_OPND_SVE_ADDR_RI_S4x16:
3515 case AARCH64_OPND_SVE_ADDR_RI_S4xVL:
3516 case AARCH64_OPND_SVE_ADDR_RI_S4x2xVL:
3517 case AARCH64_OPND_SVE_ADDR_RI_S4x3xVL:
3518 case AARCH64_OPND_SVE_ADDR_RI_S4x4xVL:
3519 case AARCH64_OPND_SVE_ADDR_RI_S6xVL:
3520 case AARCH64_OPND_SVE_ADDR_RI_S9xVL:
3521 case AARCH64_OPND_SVE_ADDR_RI_U6:
3522 case AARCH64_OPND_SVE_ADDR_RI_U6x2:
3523 case AARCH64_OPND_SVE_ADDR_RI_U6x4:
3524 case AARCH64_OPND_SVE_ADDR_RI_U6x8:
3525 print_immediate_offset_address
3526 (buf, size, opnd, get_64bit_int_reg_name (opnd->addr.base_regno, 1));
3529 case AARCH64_OPND_SVE_ADDR_ZI_U5:
3530 case AARCH64_OPND_SVE_ADDR_ZI_U5x2:
3531 case AARCH64_OPND_SVE_ADDR_ZI_U5x4:
3532 case AARCH64_OPND_SVE_ADDR_ZI_U5x8:
3533 print_immediate_offset_address
3535 get_addr_sve_reg_name (opnd->addr.base_regno, opnd->qualifier));
3538 case AARCH64_OPND_SVE_ADDR_ZZ_LSL:
3539 case AARCH64_OPND_SVE_ADDR_ZZ_SXTW:
3540 case AARCH64_OPND_SVE_ADDR_ZZ_UXTW:
3541 print_register_offset_address
3543 get_addr_sve_reg_name (opnd->addr.base_regno, opnd->qualifier),
3544 get_addr_sve_reg_name (opnd->addr.offset.regno, opnd->qualifier));
3547 case AARCH64_OPND_ADDR_UIMM12:
3548 name = get_64bit_int_reg_name (opnd->addr.base_regno, 1);
3549 if (opnd->addr.offset.imm)
3550 snprintf (buf, size, "[%s, #%d]", name, opnd->addr.offset.imm);
3552 snprintf (buf, size, "[%s]", name);
3555 case AARCH64_OPND_SYSREG:
3556 for (i = 0; aarch64_sys_regs[i].name; ++i)
3557 if (aarch64_sys_regs[i].value == opnd->sysreg
3558 && ! aarch64_sys_reg_deprecated_p (&aarch64_sys_regs[i]))
3560 if (aarch64_sys_regs[i].name)
3561 snprintf (buf, size, "%s", aarch64_sys_regs[i].name);
3564 /* Implementation defined system register. */
3565 unsigned int value = opnd->sysreg;
3566 snprintf (buf, size, "s%u_%u_c%u_c%u_%u", (value >> 14) & 0x3,
3567 (value >> 11) & 0x7, (value >> 7) & 0xf, (value >> 3) & 0xf,
3572 case AARCH64_OPND_PSTATEFIELD:
3573 for (i = 0; aarch64_pstatefields[i].name; ++i)
3574 if (aarch64_pstatefields[i].value == opnd->pstatefield)
3576 assert (aarch64_pstatefields[i].name);
3577 snprintf (buf, size, "%s", aarch64_pstatefields[i].name);
3580 case AARCH64_OPND_SYSREG_AT:
3581 case AARCH64_OPND_SYSREG_DC:
3582 case AARCH64_OPND_SYSREG_IC:
3583 case AARCH64_OPND_SYSREG_TLBI:
3584 snprintf (buf, size, "%s", opnd->sysins_op->name);
3587 case AARCH64_OPND_BARRIER:
3588 snprintf (buf, size, "%s", opnd->barrier->name);
3591 case AARCH64_OPND_BARRIER_ISB:
3592 /* Operand can be omitted, e.g. in DCPS1. */
3593 if (! optional_operand_p (opcode, idx)
3594 || (opnd->barrier->value
3595 != get_optional_operand_default_value (opcode)))
3596 snprintf (buf, size, "#0x%x", opnd->barrier->value);
3599 case AARCH64_OPND_PRFOP:
3600 if (opnd->prfop->name != NULL)
3601 snprintf (buf, size, "%s", opnd->prfop->name);
3603 snprintf (buf, size, "#0x%02x", opnd->prfop->value);
3606 case AARCH64_OPND_BARRIER_PSB:
3607 snprintf (buf, size, "%s", opnd->hint_option->name);
3615 #define CPENC(op0,op1,crn,crm,op2) \
3616 ((((op0) << 19) | ((op1) << 16) | ((crn) << 12) | ((crm) << 8) | ((op2) << 5)) >> 5)
3617 /* for 3.9.3 Instructions for Accessing Special Purpose Registers */
3618 #define CPEN_(op1,crm,op2) CPENC(3,(op1),4,(crm),(op2))
3619 /* for 3.9.10 System Instructions */
3620 #define CPENS(op1,crn,crm,op2) CPENC(1,(op1),(crn),(crm),(op2))
3642 #define F_DEPRECATED 0x1 /* Deprecated system register. */
3647 #define F_ARCHEXT 0x2 /* Architecture dependent system register. */
3652 #define F_HASXT 0x4 /* System instruction register <Xt>
3656 /* TODO there are two more issues need to be resolved
3657 1. handle read-only and write-only system registers
3658 2. handle cpu-implementation-defined system registers. */
3659 const aarch64_sys_reg aarch64_sys_regs [] =
3661 { "spsr_el1", CPEN_(0,C0,0), 0 }, /* = spsr_svc */
3662 { "spsr_el12", CPEN_ (5, C0, 0), F_ARCHEXT },
3663 { "elr_el1", CPEN_(0,C0,1), 0 },
3664 { "elr_el12", CPEN_ (5, C0, 1), F_ARCHEXT },
3665 { "sp_el0", CPEN_(0,C1,0), 0 },
3666 { "spsel", CPEN_(0,C2,0), 0 },
3667 { "daif", CPEN_(3,C2,1), 0 },
3668 { "currentel", CPEN_(0,C2,2), 0 }, /* RO */
3669 { "pan", CPEN_(0,C2,3), F_ARCHEXT },
3670 { "uao", CPEN_ (0, C2, 4), F_ARCHEXT },
3671 { "nzcv", CPEN_(3,C2,0), 0 },
3672 { "fpcr", CPEN_(3,C4,0), 0 },
3673 { "fpsr", CPEN_(3,C4,1), 0 },
3674 { "dspsr_el0", CPEN_(3,C5,0), 0 },
3675 { "dlr_el0", CPEN_(3,C5,1), 0 },
3676 { "spsr_el2", CPEN_(4,C0,0), 0 }, /* = spsr_hyp */
3677 { "elr_el2", CPEN_(4,C0,1), 0 },
3678 { "sp_el1", CPEN_(4,C1,0), 0 },
3679 { "spsr_irq", CPEN_(4,C3,0), 0 },
3680 { "spsr_abt", CPEN_(4,C3,1), 0 },
3681 { "spsr_und", CPEN_(4,C3,2), 0 },
3682 { "spsr_fiq", CPEN_(4,C3,3), 0 },
3683 { "spsr_el3", CPEN_(6,C0,0), 0 },
3684 { "elr_el3", CPEN_(6,C0,1), 0 },
3685 { "sp_el2", CPEN_(6,C1,0), 0 },
3686 { "spsr_svc", CPEN_(0,C0,0), F_DEPRECATED }, /* = spsr_el1 */
3687 { "spsr_hyp", CPEN_(4,C0,0), F_DEPRECATED }, /* = spsr_el2 */
3688 { "midr_el1", CPENC(3,0,C0,C0,0), 0 }, /* RO */
3689 { "ctr_el0", CPENC(3,3,C0,C0,1), 0 }, /* RO */
3690 { "mpidr_el1", CPENC(3,0,C0,C0,5), 0 }, /* RO */
3691 { "revidr_el1", CPENC(3,0,C0,C0,6), 0 }, /* RO */
3692 { "aidr_el1", CPENC(3,1,C0,C0,7), 0 }, /* RO */
3693 { "dczid_el0", CPENC(3,3,C0,C0,7), 0 }, /* RO */
3694 { "id_dfr0_el1", CPENC(3,0,C0,C1,2), 0 }, /* RO */
3695 { "id_pfr0_el1", CPENC(3,0,C0,C1,0), 0 }, /* RO */
3696 { "id_pfr1_el1", CPENC(3,0,C0,C1,1), 0 }, /* RO */
3697 { "id_afr0_el1", CPENC(3,0,C0,C1,3), 0 }, /* RO */
3698 { "id_mmfr0_el1", CPENC(3,0,C0,C1,4), 0 }, /* RO */
3699 { "id_mmfr1_el1", CPENC(3,0,C0,C1,5), 0 }, /* RO */
3700 { "id_mmfr2_el1", CPENC(3,0,C0,C1,6), 0 }, /* RO */
3701 { "id_mmfr3_el1", CPENC(3,0,C0,C1,7), 0 }, /* RO */
3702 { "id_mmfr4_el1", CPENC(3,0,C0,C2,6), 0 }, /* RO */
3703 { "id_isar0_el1", CPENC(3,0,C0,C2,0), 0 }, /* RO */
3704 { "id_isar1_el1", CPENC(3,0,C0,C2,1), 0 }, /* RO */
3705 { "id_isar2_el1", CPENC(3,0,C0,C2,2), 0 }, /* RO */
3706 { "id_isar3_el1", CPENC(3,0,C0,C2,3), 0 }, /* RO */
3707 { "id_isar4_el1", CPENC(3,0,C0,C2,4), 0 }, /* RO */
3708 { "id_isar5_el1", CPENC(3,0,C0,C2,5), 0 }, /* RO */
3709 { "mvfr0_el1", CPENC(3,0,C0,C3,0), 0 }, /* RO */
3710 { "mvfr1_el1", CPENC(3,0,C0,C3,1), 0 }, /* RO */
3711 { "mvfr2_el1", CPENC(3,0,C0,C3,2), 0 }, /* RO */
3712 { "ccsidr_el1", CPENC(3,1,C0,C0,0), 0 }, /* RO */
3713 { "id_aa64pfr0_el1", CPENC(3,0,C0,C4,0), 0 }, /* RO */
3714 { "id_aa64pfr1_el1", CPENC(3,0,C0,C4,1), 0 }, /* RO */
3715 { "id_aa64dfr0_el1", CPENC(3,0,C0,C5,0), 0 }, /* RO */
3716 { "id_aa64dfr1_el1", CPENC(3,0,C0,C5,1), 0 }, /* RO */
3717 { "id_aa64isar0_el1", CPENC(3,0,C0,C6,0), 0 }, /* RO */
3718 { "id_aa64isar1_el1", CPENC(3,0,C0,C6,1), 0 }, /* RO */
3719 { "id_aa64mmfr0_el1", CPENC(3,0,C0,C7,0), 0 }, /* RO */
3720 { "id_aa64mmfr1_el1", CPENC(3,0,C0,C7,1), 0 }, /* RO */
3721 { "id_aa64mmfr2_el1", CPENC (3, 0, C0, C7, 2), F_ARCHEXT }, /* RO */
3722 { "id_aa64afr0_el1", CPENC(3,0,C0,C5,4), 0 }, /* RO */
3723 { "id_aa64afr1_el1", CPENC(3,0,C0,C5,5), 0 }, /* RO */
3724 { "id_aa64zfr0_el1", CPENC (3, 0, C0, C4, 4), F_ARCHEXT }, /* RO */
3725 { "clidr_el1", CPENC(3,1,C0,C0,1), 0 }, /* RO */
3726 { "csselr_el1", CPENC(3,2,C0,C0,0), 0 }, /* RO */
3727 { "vpidr_el2", CPENC(3,4,C0,C0,0), 0 },
3728 { "vmpidr_el2", CPENC(3,4,C0,C0,5), 0 },
3729 { "sctlr_el1", CPENC(3,0,C1,C0,0), 0 },
3730 { "sctlr_el2", CPENC(3,4,C1,C0,0), 0 },
3731 { "sctlr_el3", CPENC(3,6,C1,C0,0), 0 },
3732 { "sctlr_el12", CPENC (3, 5, C1, C0, 0), F_ARCHEXT },
3733 { "actlr_el1", CPENC(3,0,C1,C0,1), 0 },
3734 { "actlr_el2", CPENC(3,4,C1,C0,1), 0 },
3735 { "actlr_el3", CPENC(3,6,C1,C0,1), 0 },
3736 { "cpacr_el1", CPENC(3,0,C1,C0,2), 0 },
3737 { "cpacr_el12", CPENC (3, 5, C1, C0, 2), F_ARCHEXT },
3738 { "cptr_el2", CPENC(3,4,C1,C1,2), 0 },
3739 { "cptr_el3", CPENC(3,6,C1,C1,2), 0 },
3740 { "scr_el3", CPENC(3,6,C1,C1,0), 0 },
3741 { "hcr_el2", CPENC(3,4,C1,C1,0), 0 },
3742 { "mdcr_el2", CPENC(3,4,C1,C1,1), 0 },
3743 { "mdcr_el3", CPENC(3,6,C1,C3,1), 0 },
3744 { "hstr_el2", CPENC(3,4,C1,C1,3), 0 },
3745 { "hacr_el2", CPENC(3,4,C1,C1,7), 0 },
3746 { "zcr_el1", CPENC (3, 0, C1, C2, 0), F_ARCHEXT },
3747 { "zcr_el12", CPENC (3, 5, C1, C2, 0), F_ARCHEXT },
3748 { "zcr_el2", CPENC (3, 4, C1, C2, 0), F_ARCHEXT },
3749 { "zcr_el3", CPENC (3, 6, C1, C2, 0), F_ARCHEXT },
3750 { "zidr_el1", CPENC (3, 0, C0, C0, 7), F_ARCHEXT },
3751 { "ttbr0_el1", CPENC(3,0,C2,C0,0), 0 },
3752 { "ttbr1_el1", CPENC(3,0,C2,C0,1), 0 },
3753 { "ttbr0_el2", CPENC(3,4,C2,C0,0), 0 },
3754 { "ttbr1_el2", CPENC (3, 4, C2, C0, 1), F_ARCHEXT },
3755 { "ttbr0_el3", CPENC(3,6,C2,C0,0), 0 },
3756 { "ttbr0_el12", CPENC (3, 5, C2, C0, 0), F_ARCHEXT },
3757 { "ttbr1_el12", CPENC (3, 5, C2, C0, 1), F_ARCHEXT },
3758 { "vttbr_el2", CPENC(3,4,C2,C1,0), 0 },
3759 { "tcr_el1", CPENC(3,0,C2,C0,2), 0 },
3760 { "tcr_el2", CPENC(3,4,C2,C0,2), 0 },
3761 { "tcr_el3", CPENC(3,6,C2,C0,2), 0 },
3762 { "tcr_el12", CPENC (3, 5, C2, C0, 2), F_ARCHEXT },
3763 { "vtcr_el2", CPENC(3,4,C2,C1,2), 0 },
3764 { "apiakeylo_el1", CPENC (3, 0, C2, C1, 0), F_ARCHEXT },
3765 { "apiakeyhi_el1", CPENC (3, 0, C2, C1, 1), F_ARCHEXT },
3766 { "apibkeylo_el1", CPENC (3, 0, C2, C1, 2), F_ARCHEXT },
3767 { "apibkeyhi_el1", CPENC (3, 0, C2, C1, 3), F_ARCHEXT },
3768 { "apdakeylo_el1", CPENC (3, 0, C2, C2, 0), F_ARCHEXT },
3769 { "apdakeyhi_el1", CPENC (3, 0, C2, C2, 1), F_ARCHEXT },
3770 { "apdbkeylo_el1", CPENC (3, 0, C2, C2, 2), F_ARCHEXT },
3771 { "apdbkeyhi_el1", CPENC (3, 0, C2, C2, 3), F_ARCHEXT },
3772 { "apgakeylo_el1", CPENC (3, 0, C2, C3, 0), F_ARCHEXT },
3773 { "apgakeyhi_el1", CPENC (3, 0, C2, C3, 1), F_ARCHEXT },
3774 { "afsr0_el1", CPENC(3,0,C5,C1,0), 0 },
3775 { "afsr1_el1", CPENC(3,0,C5,C1,1), 0 },
3776 { "afsr0_el2", CPENC(3,4,C5,C1,0), 0 },
3777 { "afsr1_el2", CPENC(3,4,C5,C1,1), 0 },
3778 { "afsr0_el3", CPENC(3,6,C5,C1,0), 0 },
3779 { "afsr0_el12", CPENC (3, 5, C5, C1, 0), F_ARCHEXT },
3780 { "afsr1_el3", CPENC(3,6,C5,C1,1), 0 },
3781 { "afsr1_el12", CPENC (3, 5, C5, C1, 1), F_ARCHEXT },
3782 { "esr_el1", CPENC(3,0,C5,C2,0), 0 },
3783 { "esr_el2", CPENC(3,4,C5,C2,0), 0 },
3784 { "esr_el3", CPENC(3,6,C5,C2,0), 0 },
3785 { "esr_el12", CPENC (3, 5, C5, C2, 0), F_ARCHEXT },
3786 { "vsesr_el2", CPENC (3, 4, C5, C2, 3), F_ARCHEXT }, /* RO */
3787 { "fpexc32_el2", CPENC(3,4,C5,C3,0), 0 },
3788 { "erridr_el1", CPENC (3, 0, C5, C3, 0), F_ARCHEXT }, /* RO */
3789 { "errselr_el1", CPENC (3, 0, C5, C3, 1), F_ARCHEXT },
3790 { "erxfr_el1", CPENC (3, 0, C5, C4, 0), F_ARCHEXT }, /* RO */
3791 { "erxctlr_el1", CPENC (3, 0, C5, C4, 1), F_ARCHEXT },
3792 { "erxstatus_el1", CPENC (3, 0, C5, C4, 2), F_ARCHEXT },
3793 { "erxaddr_el1", CPENC (3, 0, C5, C4, 3), F_ARCHEXT },
3794 { "erxmisc0_el1", CPENC (3, 0, C5, C5, 0), F_ARCHEXT },
3795 { "erxmisc1_el1", CPENC (3, 0, C5, C5, 1), F_ARCHEXT },
3796 { "far_el1", CPENC(3,0,C6,C0,0), 0 },
3797 { "far_el2", CPENC(3,4,C6,C0,0), 0 },
3798 { "far_el3", CPENC(3,6,C6,C0,0), 0 },
3799 { "far_el12", CPENC (3, 5, C6, C0, 0), F_ARCHEXT },
3800 { "hpfar_el2", CPENC(3,4,C6,C0,4), 0 },
3801 { "par_el1", CPENC(3,0,C7,C4,0), 0 },
3802 { "mair_el1", CPENC(3,0,C10,C2,0), 0 },
3803 { "mair_el2", CPENC(3,4,C10,C2,0), 0 },
3804 { "mair_el3", CPENC(3,6,C10,C2,0), 0 },
3805 { "mair_el12", CPENC (3, 5, C10, C2, 0), F_ARCHEXT },
3806 { "amair_el1", CPENC(3,0,C10,C3,0), 0 },
3807 { "amair_el2", CPENC(3,4,C10,C3,0), 0 },
3808 { "amair_el3", CPENC(3,6,C10,C3,0), 0 },
3809 { "amair_el12", CPENC (3, 5, C10, C3, 0), F_ARCHEXT },
3810 { "vbar_el1", CPENC(3,0,C12,C0,0), 0 },
3811 { "vbar_el2", CPENC(3,4,C12,C0,0), 0 },
3812 { "vbar_el3", CPENC(3,6,C12,C0,0), 0 },
3813 { "vbar_el12", CPENC (3, 5, C12, C0, 0), F_ARCHEXT },
3814 { "rvbar_el1", CPENC(3,0,C12,C0,1), 0 }, /* RO */
3815 { "rvbar_el2", CPENC(3,4,C12,C0,1), 0 }, /* RO */
3816 { "rvbar_el3", CPENC(3,6,C12,C0,1), 0 }, /* RO */
3817 { "rmr_el1", CPENC(3,0,C12,C0,2), 0 },
3818 { "rmr_el2", CPENC(3,4,C12,C0,2), 0 },
3819 { "rmr_el3", CPENC(3,6,C12,C0,2), 0 },
3820 { "isr_el1", CPENC(3,0,C12,C1,0), 0 }, /* RO */
3821 { "disr_el1", CPENC (3, 0, C12, C1, 1), F_ARCHEXT },
3822 { "vdisr_el2", CPENC (3, 4, C12, C1, 1), F_ARCHEXT },
3823 { "contextidr_el1", CPENC(3,0,C13,C0,1), 0 },
3824 { "contextidr_el2", CPENC (3, 4, C13, C0, 1), F_ARCHEXT },
3825 { "contextidr_el12", CPENC (3, 5, C13, C0, 1), F_ARCHEXT },
3826 { "tpidr_el0", CPENC(3,3,C13,C0,2), 0 },
3827 { "tpidrro_el0", CPENC(3,3,C13,C0,3), 0 }, /* RO */
3828 { "tpidr_el1", CPENC(3,0,C13,C0,4), 0 },
3829 { "tpidr_el2", CPENC(3,4,C13,C0,2), 0 },
3830 { "tpidr_el3", CPENC(3,6,C13,C0,2), 0 },
3831 { "teecr32_el1", CPENC(2,2,C0, C0,0), 0 }, /* See section 3.9.7.1 */
3832 { "cntfrq_el0", CPENC(3,3,C14,C0,0), 0 }, /* RO */
3833 { "cntpct_el0", CPENC(3,3,C14,C0,1), 0 }, /* RO */
3834 { "cntvct_el0", CPENC(3,3,C14,C0,2), 0 }, /* RO */
3835 { "cntvoff_el2", CPENC(3,4,C14,C0,3), 0 },
3836 { "cntkctl_el1", CPENC(3,0,C14,C1,0), 0 },
3837 { "cntkctl_el12", CPENC (3, 5, C14, C1, 0), F_ARCHEXT },
3838 { "cnthctl_el2", CPENC(3,4,C14,C1,0), 0 },
3839 { "cntp_tval_el0", CPENC(3,3,C14,C2,0), 0 },
3840 { "cntp_tval_el02", CPENC (3, 5, C14, C2, 0), F_ARCHEXT },
3841 { "cntp_ctl_el0", CPENC(3,3,C14,C2,1), 0 },
3842 { "cntp_ctl_el02", CPENC (3, 5, C14, C2, 1), F_ARCHEXT },
3843 { "cntp_cval_el0", CPENC(3,3,C14,C2,2), 0 },
3844 { "cntp_cval_el02", CPENC (3, 5, C14, C2, 2), F_ARCHEXT },
3845 { "cntv_tval_el0", CPENC(3,3,C14,C3,0), 0 },
3846 { "cntv_tval_el02", CPENC (3, 5, C14, C3, 0), F_ARCHEXT },
3847 { "cntv_ctl_el0", CPENC(3,3,C14,C3,1), 0 },
3848 { "cntv_ctl_el02", CPENC (3, 5, C14, C3, 1), F_ARCHEXT },
3849 { "cntv_cval_el0", CPENC(3,3,C14,C3,2), 0 },
3850 { "cntv_cval_el02", CPENC (3, 5, C14, C3, 2), F_ARCHEXT },
3851 { "cnthp_tval_el2", CPENC(3,4,C14,C2,0), 0 },
3852 { "cnthp_ctl_el2", CPENC(3,4,C14,C2,1), 0 },
3853 { "cnthp_cval_el2", CPENC(3,4,C14,C2,2), 0 },
3854 { "cntps_tval_el1", CPENC(3,7,C14,C2,0), 0 },
3855 { "cntps_ctl_el1", CPENC(3,7,C14,C2,1), 0 },
3856 { "cntps_cval_el1", CPENC(3,7,C14,C2,2), 0 },
3857 { "cnthv_tval_el2", CPENC (3, 4, C14, C3, 0), F_ARCHEXT },
3858 { "cnthv_ctl_el2", CPENC (3, 4, C14, C3, 1), F_ARCHEXT },
3859 { "cnthv_cval_el2", CPENC (3, 4, C14, C3, 2), F_ARCHEXT },
3860 { "dacr32_el2", CPENC(3,4,C3,C0,0), 0 },
3861 { "ifsr32_el2", CPENC(3,4,C5,C0,1), 0 },
3862 { "teehbr32_el1", CPENC(2,2,C1,C0,0), 0 },
3863 { "sder32_el3", CPENC(3,6,C1,C1,1), 0 },
3864 { "mdscr_el1", CPENC(2,0,C0, C2, 2), 0 },
3865 { "mdccsr_el0", CPENC(2,3,C0, C1, 0), 0 }, /* r */
3866 { "mdccint_el1", CPENC(2,0,C0, C2, 0), 0 },
3867 { "dbgdtr_el0", CPENC(2,3,C0, C4, 0), 0 },
3868 { "dbgdtrrx_el0", CPENC(2,3,C0, C5, 0), 0 }, /* r */
3869 { "dbgdtrtx_el0", CPENC(2,3,C0, C5, 0), 0 }, /* w */
3870 { "osdtrrx_el1", CPENC(2,0,C0, C0, 2), 0 }, /* r */
3871 { "osdtrtx_el1", CPENC(2,0,C0, C3, 2), 0 }, /* w */
3872 { "oseccr_el1", CPENC(2,0,C0, C6, 2), 0 },
3873 { "dbgvcr32_el2", CPENC(2,4,C0, C7, 0), 0 },
3874 { "dbgbvr0_el1", CPENC(2,0,C0, C0, 4), 0 },
3875 { "dbgbvr1_el1", CPENC(2,0,C0, C1, 4), 0 },
3876 { "dbgbvr2_el1", CPENC(2,0,C0, C2, 4), 0 },
3877 { "dbgbvr3_el1", CPENC(2,0,C0, C3, 4), 0 },
3878 { "dbgbvr4_el1", CPENC(2,0,C0, C4, 4), 0 },
3879 { "dbgbvr5_el1", CPENC(2,0,C0, C5, 4), 0 },
3880 { "dbgbvr6_el1", CPENC(2,0,C0, C6, 4), 0 },
3881 { "dbgbvr7_el1", CPENC(2,0,C0, C7, 4), 0 },
3882 { "dbgbvr8_el1", CPENC(2,0,C0, C8, 4), 0 },
3883 { "dbgbvr9_el1", CPENC(2,0,C0, C9, 4), 0 },
3884 { "dbgbvr10_el1", CPENC(2,0,C0, C10,4), 0 },
3885 { "dbgbvr11_el1", CPENC(2,0,C0, C11,4), 0 },
3886 { "dbgbvr12_el1", CPENC(2,0,C0, C12,4), 0 },
3887 { "dbgbvr13_el1", CPENC(2,0,C0, C13,4), 0 },
3888 { "dbgbvr14_el1", CPENC(2,0,C0, C14,4), 0 },
3889 { "dbgbvr15_el1", CPENC(2,0,C0, C15,4), 0 },
3890 { "dbgbcr0_el1", CPENC(2,0,C0, C0, 5), 0 },
3891 { "dbgbcr1_el1", CPENC(2,0,C0, C1, 5), 0 },
3892 { "dbgbcr2_el1", CPENC(2,0,C0, C2, 5), 0 },
3893 { "dbgbcr3_el1", CPENC(2,0,C0, C3, 5), 0 },
3894 { "dbgbcr4_el1", CPENC(2,0,C0, C4, 5), 0 },
3895 { "dbgbcr5_el1", CPENC(2,0,C0, C5, 5), 0 },
3896 { "dbgbcr6_el1", CPENC(2,0,C0, C6, 5), 0 },
3897 { "dbgbcr7_el1", CPENC(2,0,C0, C7, 5), 0 },
3898 { "dbgbcr8_el1", CPENC(2,0,C0, C8, 5), 0 },
3899 { "dbgbcr9_el1", CPENC(2,0,C0, C9, 5), 0 },
3900 { "dbgbcr10_el1", CPENC(2,0,C0, C10,5), 0 },
3901 { "dbgbcr11_el1", CPENC(2,0,C0, C11,5), 0 },
3902 { "dbgbcr12_el1", CPENC(2,0,C0, C12,5), 0 },
3903 { "dbgbcr13_el1", CPENC(2,0,C0, C13,5), 0 },
3904 { "dbgbcr14_el1", CPENC(2,0,C0, C14,5), 0 },
3905 { "dbgbcr15_el1", CPENC(2,0,C0, C15,5), 0 },
3906 { "dbgwvr0_el1", CPENC(2,0,C0, C0, 6), 0 },
3907 { "dbgwvr1_el1", CPENC(2,0,C0, C1, 6), 0 },
3908 { "dbgwvr2_el1", CPENC(2,0,C0, C2, 6), 0 },
3909 { "dbgwvr3_el1", CPENC(2,0,C0, C3, 6), 0 },
3910 { "dbgwvr4_el1", CPENC(2,0,C0, C4, 6), 0 },
3911 { "dbgwvr5_el1", CPENC(2,0,C0, C5, 6), 0 },
3912 { "dbgwvr6_el1", CPENC(2,0,C0, C6, 6), 0 },
3913 { "dbgwvr7_el1", CPENC(2,0,C0, C7, 6), 0 },
3914 { "dbgwvr8_el1", CPENC(2,0,C0, C8, 6), 0 },
3915 { "dbgwvr9_el1", CPENC(2,0,C0, C9, 6), 0 },
3916 { "dbgwvr10_el1", CPENC(2,0,C0, C10,6), 0 },
3917 { "dbgwvr11_el1", CPENC(2,0,C0, C11,6), 0 },
3918 { "dbgwvr12_el1", CPENC(2,0,C0, C12,6), 0 },
3919 { "dbgwvr13_el1", CPENC(2,0,C0, C13,6), 0 },
3920 { "dbgwvr14_el1", CPENC(2,0,C0, C14,6), 0 },
3921 { "dbgwvr15_el1", CPENC(2,0,C0, C15,6), 0 },
3922 { "dbgwcr0_el1", CPENC(2,0,C0, C0, 7), 0 },
3923 { "dbgwcr1_el1", CPENC(2,0,C0, C1, 7), 0 },
3924 { "dbgwcr2_el1", CPENC(2,0,C0, C2, 7), 0 },
3925 { "dbgwcr3_el1", CPENC(2,0,C0, C3, 7), 0 },
3926 { "dbgwcr4_el1", CPENC(2,0,C0, C4, 7), 0 },
3927 { "dbgwcr5_el1", CPENC(2,0,C0, C5, 7), 0 },
3928 { "dbgwcr6_el1", CPENC(2,0,C0, C6, 7), 0 },
3929 { "dbgwcr7_el1", CPENC(2,0,C0, C7, 7), 0 },
3930 { "dbgwcr8_el1", CPENC(2,0,C0, C8, 7), 0 },
3931 { "dbgwcr9_el1", CPENC(2,0,C0, C9, 7), 0 },
3932 { "dbgwcr10_el1", CPENC(2,0,C0, C10,7), 0 },
3933 { "dbgwcr11_el1", CPENC(2,0,C0, C11,7), 0 },
3934 { "dbgwcr12_el1", CPENC(2,0,C0, C12,7), 0 },
3935 { "dbgwcr13_el1", CPENC(2,0,C0, C13,7), 0 },
3936 { "dbgwcr14_el1", CPENC(2,0,C0, C14,7), 0 },
3937 { "dbgwcr15_el1", CPENC(2,0,C0, C15,7), 0 },
3938 { "mdrar_el1", CPENC(2,0,C1, C0, 0), 0 }, /* r */
3939 { "oslar_el1", CPENC(2,0,C1, C0, 4), 0 }, /* w */
3940 { "oslsr_el1", CPENC(2,0,C1, C1, 4), 0 }, /* r */
3941 { "osdlr_el1", CPENC(2,0,C1, C3, 4), 0 },
3942 { "dbgprcr_el1", CPENC(2,0,C1, C4, 4), 0 },
3943 { "dbgclaimset_el1", CPENC(2,0,C7, C8, 6), 0 },
3944 { "dbgclaimclr_el1", CPENC(2,0,C7, C9, 6), 0 },
3945 { "dbgauthstatus_el1", CPENC(2,0,C7, C14,6), 0 }, /* r */
3946 { "pmblimitr_el1", CPENC (3, 0, C9, C10, 0), F_ARCHEXT }, /* rw */
3947 { "pmbptr_el1", CPENC (3, 0, C9, C10, 1), F_ARCHEXT }, /* rw */
3948 { "pmbsr_el1", CPENC (3, 0, C9, C10, 3), F_ARCHEXT }, /* rw */
3949 { "pmbidr_el1", CPENC (3, 0, C9, C10, 7), F_ARCHEXT }, /* ro */
3950 { "pmscr_el1", CPENC (3, 0, C9, C9, 0), F_ARCHEXT }, /* rw */
3951 { "pmsicr_el1", CPENC (3, 0, C9, C9, 2), F_ARCHEXT }, /* rw */
3952 { "pmsirr_el1", CPENC (3, 0, C9, C9, 3), F_ARCHEXT }, /* rw */
3953 { "pmsfcr_el1", CPENC (3, 0, C9, C9, 4), F_ARCHEXT }, /* rw */
3954 { "pmsevfr_el1", CPENC (3, 0, C9, C9, 5), F_ARCHEXT }, /* rw */
3955 { "pmslatfr_el1", CPENC (3, 0, C9, C9, 6), F_ARCHEXT }, /* rw */
3956 { "pmsidr_el1", CPENC (3, 0, C9, C9, 7), F_ARCHEXT }, /* ro */
3957 { "pmscr_el2", CPENC (3, 4, C9, C9, 0), F_ARCHEXT }, /* rw */
3958 { "pmscr_el12", CPENC (3, 5, C9, C9, 0), F_ARCHEXT }, /* rw */
3959 { "pmcr_el0", CPENC(3,3,C9,C12, 0), 0 },
3960 { "pmcntenset_el0", CPENC(3,3,C9,C12, 1), 0 },
3961 { "pmcntenclr_el0", CPENC(3,3,C9,C12, 2), 0 },
3962 { "pmovsclr_el0", CPENC(3,3,C9,C12, 3), 0 },
3963 { "pmswinc_el0", CPENC(3,3,C9,C12, 4), 0 }, /* w */
3964 { "pmselr_el0", CPENC(3,3,C9,C12, 5), 0 },
3965 { "pmceid0_el0", CPENC(3,3,C9,C12, 6), 0 }, /* r */
3966 { "pmceid1_el0", CPENC(3,3,C9,C12, 7), 0 }, /* r */
3967 { "pmccntr_el0", CPENC(3,3,C9,C13, 0), 0 },
3968 { "pmxevtyper_el0", CPENC(3,3,C9,C13, 1), 0 },
3969 { "pmxevcntr_el0", CPENC(3,3,C9,C13, 2), 0 },
3970 { "pmuserenr_el0", CPENC(3,3,C9,C14, 0), 0 },
3971 { "pmintenset_el1", CPENC(3,0,C9,C14, 1), 0 },
3972 { "pmintenclr_el1", CPENC(3,0,C9,C14, 2), 0 },
3973 { "pmovsset_el0", CPENC(3,3,C9,C14, 3), 0 },
3974 { "pmevcntr0_el0", CPENC(3,3,C14,C8, 0), 0 },
3975 { "pmevcntr1_el0", CPENC(3,3,C14,C8, 1), 0 },
3976 { "pmevcntr2_el0", CPENC(3,3,C14,C8, 2), 0 },
3977 { "pmevcntr3_el0", CPENC(3,3,C14,C8, 3), 0 },
3978 { "pmevcntr4_el0", CPENC(3,3,C14,C8, 4), 0 },
3979 { "pmevcntr5_el0", CPENC(3,3,C14,C8, 5), 0 },
3980 { "pmevcntr6_el0", CPENC(3,3,C14,C8, 6), 0 },
3981 { "pmevcntr7_el0", CPENC(3,3,C14,C8, 7), 0 },
3982 { "pmevcntr8_el0", CPENC(3,3,C14,C9, 0), 0 },
3983 { "pmevcntr9_el0", CPENC(3,3,C14,C9, 1), 0 },
3984 { "pmevcntr10_el0", CPENC(3,3,C14,C9, 2), 0 },
3985 { "pmevcntr11_el0", CPENC(3,3,C14,C9, 3), 0 },
3986 { "pmevcntr12_el0", CPENC(3,3,C14,C9, 4), 0 },
3987 { "pmevcntr13_el0", CPENC(3,3,C14,C9, 5), 0 },
3988 { "pmevcntr14_el0", CPENC(3,3,C14,C9, 6), 0 },
3989 { "pmevcntr15_el0", CPENC(3,3,C14,C9, 7), 0 },
3990 { "pmevcntr16_el0", CPENC(3,3,C14,C10,0), 0 },
3991 { "pmevcntr17_el0", CPENC(3,3,C14,C10,1), 0 },
3992 { "pmevcntr18_el0", CPENC(3,3,C14,C10,2), 0 },
3993 { "pmevcntr19_el0", CPENC(3,3,C14,C10,3), 0 },
3994 { "pmevcntr20_el0", CPENC(3,3,C14,C10,4), 0 },
3995 { "pmevcntr21_el0", CPENC(3,3,C14,C10,5), 0 },
3996 { "pmevcntr22_el0", CPENC(3,3,C14,C10,6), 0 },
3997 { "pmevcntr23_el0", CPENC(3,3,C14,C10,7), 0 },
3998 { "pmevcntr24_el0", CPENC(3,3,C14,C11,0), 0 },
3999 { "pmevcntr25_el0", CPENC(3,3,C14,C11,1), 0 },
4000 { "pmevcntr26_el0", CPENC(3,3,C14,C11,2), 0 },
4001 { "pmevcntr27_el0", CPENC(3,3,C14,C11,3), 0 },
4002 { "pmevcntr28_el0", CPENC(3,3,C14,C11,4), 0 },
4003 { "pmevcntr29_el0", CPENC(3,3,C14,C11,5), 0 },
4004 { "pmevcntr30_el0", CPENC(3,3,C14,C11,6), 0 },
4005 { "pmevtyper0_el0", CPENC(3,3,C14,C12,0), 0 },
4006 { "pmevtyper1_el0", CPENC(3,3,C14,C12,1), 0 },
4007 { "pmevtyper2_el0", CPENC(3,3,C14,C12,2), 0 },
4008 { "pmevtyper3_el0", CPENC(3,3,C14,C12,3), 0 },
4009 { "pmevtyper4_el0", CPENC(3,3,C14,C12,4), 0 },
4010 { "pmevtyper5_el0", CPENC(3,3,C14,C12,5), 0 },
4011 { "pmevtyper6_el0", CPENC(3,3,C14,C12,6), 0 },
4012 { "pmevtyper7_el0", CPENC(3,3,C14,C12,7), 0 },
4013 { "pmevtyper8_el0", CPENC(3,3,C14,C13,0), 0 },
4014 { "pmevtyper9_el0", CPENC(3,3,C14,C13,1), 0 },
4015 { "pmevtyper10_el0", CPENC(3,3,C14,C13,2), 0 },
4016 { "pmevtyper11_el0", CPENC(3,3,C14,C13,3), 0 },
4017 { "pmevtyper12_el0", CPENC(3,3,C14,C13,4), 0 },
4018 { "pmevtyper13_el0", CPENC(3,3,C14,C13,5), 0 },
4019 { "pmevtyper14_el0", CPENC(3,3,C14,C13,6), 0 },
4020 { "pmevtyper15_el0", CPENC(3,3,C14,C13,7), 0 },
4021 { "pmevtyper16_el0", CPENC(3,3,C14,C14,0), 0 },
4022 { "pmevtyper17_el0", CPENC(3,3,C14,C14,1), 0 },
4023 { "pmevtyper18_el0", CPENC(3,3,C14,C14,2), 0 },
4024 { "pmevtyper19_el0", CPENC(3,3,C14,C14,3), 0 },
4025 { "pmevtyper20_el0", CPENC(3,3,C14,C14,4), 0 },
4026 { "pmevtyper21_el0", CPENC(3,3,C14,C14,5), 0 },
4027 { "pmevtyper22_el0", CPENC(3,3,C14,C14,6), 0 },
4028 { "pmevtyper23_el0", CPENC(3,3,C14,C14,7), 0 },
4029 { "pmevtyper24_el0", CPENC(3,3,C14,C15,0), 0 },
4030 { "pmevtyper25_el0", CPENC(3,3,C14,C15,1), 0 },
4031 { "pmevtyper26_el0", CPENC(3,3,C14,C15,2), 0 },
4032 { "pmevtyper27_el0", CPENC(3,3,C14,C15,3), 0 },
4033 { "pmevtyper28_el0", CPENC(3,3,C14,C15,4), 0 },
4034 { "pmevtyper29_el0", CPENC(3,3,C14,C15,5), 0 },
4035 { "pmevtyper30_el0", CPENC(3,3,C14,C15,6), 0 },
4036 { "pmccfiltr_el0", CPENC(3,3,C14,C15,7), 0 },
4038 { "dit", CPEN_ (3, C2, 5), F_ARCHEXT },
4039 { "vstcr_el2", CPENC(3, 4, C2, C6, 2), F_ARCHEXT },
4040 { "vsttbr_el2", CPENC(3, 4, C2, C6, 0), F_ARCHEXT },
4041 { "cnthvs_tval_el2", CPENC(3, 4, C14, C4, 0), F_ARCHEXT },
4042 { "cnthvs_cval_el2", CPENC(3, 4, C14, C4, 2), F_ARCHEXT },
4043 { "cnthvs_ctl_el2", CPENC(3, 4, C14, C4, 1), F_ARCHEXT },
4044 { "cnthps_tval_el2", CPENC(3, 4, C14, C5, 0), F_ARCHEXT },
4045 { "cnthps_cval_el2", CPENC(3, 4, C14, C5, 2), F_ARCHEXT },
4046 { "cnthps_ctl_el2", CPENC(3, 4, C14, C5, 1), F_ARCHEXT },
4047 { "sder32_el2", CPENC(3, 4, C1, C3, 1), F_ARCHEXT },
4048 { "vncr_el2", CPENC(3, 4, C2, C2, 0), F_ARCHEXT },
4049 { 0, CPENC(0,0,0,0,0), 0 },
4053 aarch64_sys_reg_deprecated_p (const aarch64_sys_reg *reg)
4055 return (reg->flags & F_DEPRECATED) != 0;
4059 aarch64_sys_reg_supported_p (const aarch64_feature_set features,
4060 const aarch64_sys_reg *reg)
4062 if (!(reg->flags & F_ARCHEXT))
4065 /* PAN. Values are from aarch64_sys_regs. */
4066 if (reg->value == CPEN_(0,C2,3)
4067 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_PAN))
4070 /* Virtualization host extensions: system registers. */
4071 if ((reg->value == CPENC (3, 4, C2, C0, 1)
4072 || reg->value == CPENC (3, 4, C13, C0, 1)
4073 || reg->value == CPENC (3, 4, C14, C3, 0)
4074 || reg->value == CPENC (3, 4, C14, C3, 1)
4075 || reg->value == CPENC (3, 4, C14, C3, 2))
4076 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_1))
4079 /* Virtualization host extensions: *_el12 names of *_el1 registers. */
4080 if ((reg->value == CPEN_ (5, C0, 0)
4081 || reg->value == CPEN_ (5, C0, 1)
4082 || reg->value == CPENC (3, 5, C1, C0, 0)
4083 || reg->value == CPENC (3, 5, C1, C0, 2)
4084 || reg->value == CPENC (3, 5, C2, C0, 0)
4085 || reg->value == CPENC (3, 5, C2, C0, 1)
4086 || reg->value == CPENC (3, 5, C2, C0, 2)
4087 || reg->value == CPENC (3, 5, C5, C1, 0)
4088 || reg->value == CPENC (3, 5, C5, C1, 1)
4089 || reg->value == CPENC (3, 5, C5, C2, 0)
4090 || reg->value == CPENC (3, 5, C6, C0, 0)
4091 || reg->value == CPENC (3, 5, C10, C2, 0)
4092 || reg->value == CPENC (3, 5, C10, C3, 0)
4093 || reg->value == CPENC (3, 5, C12, C0, 0)
4094 || reg->value == CPENC (3, 5, C13, C0, 1)
4095 || reg->value == CPENC (3, 5, C14, C1, 0))
4096 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_1))
4099 /* Virtualization host extensions: *_el02 names of *_el0 registers. */
4100 if ((reg->value == CPENC (3, 5, C14, C2, 0)
4101 || reg->value == CPENC (3, 5, C14, C2, 1)
4102 || reg->value == CPENC (3, 5, C14, C2, 2)
4103 || reg->value == CPENC (3, 5, C14, C3, 0)
4104 || reg->value == CPENC (3, 5, C14, C3, 1)
4105 || reg->value == CPENC (3, 5, C14, C3, 2))
4106 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_1))
4109 /* ARMv8.2 features. */
4111 /* ID_AA64MMFR2_EL1. */
4112 if (reg->value == CPENC (3, 0, C0, C7, 2)
4113 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_2))
4117 if (reg->value == CPEN_ (0, C2, 4)
4118 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_2))
4121 /* RAS extension. */
4123 /* ERRIDR_EL1, ERRSELR_EL1, ERXFR_EL1, ERXCTLR_EL1, ERXSTATUS_EL, ERXADDR_EL1,
4124 ERXMISC0_EL1 AND ERXMISC1_EL1. */
4125 if ((reg->value == CPENC (3, 0, C5, C3, 0)
4126 || reg->value == CPENC (3, 0, C5, C3, 1)
4127 || reg->value == CPENC (3, 0, C5, C3, 2)
4128 || reg->value == CPENC (3, 0, C5, C3, 3)
4129 || reg->value == CPENC (3, 0, C5, C4, 0)
4130 || reg->value == CPENC (3, 0, C5, C4, 1)
4131 || reg->value == CPENC (3, 0, C5, C4, 2)
4132 || reg->value == CPENC (3, 0, C5, C4, 3)
4133 || reg->value == CPENC (3, 0, C5, C5, 0)
4134 || reg->value == CPENC (3, 0, C5, C5, 1))
4135 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_RAS))
4138 /* VSESR_EL2, DISR_EL1 and VDISR_EL2. */
4139 if ((reg->value == CPENC (3, 4, C5, C2, 3)
4140 || reg->value == CPENC (3, 0, C12, C1, 1)
4141 || reg->value == CPENC (3, 4, C12, C1, 1))
4142 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_RAS))
4145 /* Statistical Profiling extension. */
4146 if ((reg->value == CPENC (3, 0, C9, C10, 0)
4147 || reg->value == CPENC (3, 0, C9, C10, 1)
4148 || reg->value == CPENC (3, 0, C9, C10, 3)
4149 || reg->value == CPENC (3, 0, C9, C10, 7)
4150 || reg->value == CPENC (3, 0, C9, C9, 0)
4151 || reg->value == CPENC (3, 0, C9, C9, 2)
4152 || reg->value == CPENC (3, 0, C9, C9, 3)
4153 || reg->value == CPENC (3, 0, C9, C9, 4)
4154 || reg->value == CPENC (3, 0, C9, C9, 5)
4155 || reg->value == CPENC (3, 0, C9, C9, 6)
4156 || reg->value == CPENC (3, 0, C9, C9, 7)
4157 || reg->value == CPENC (3, 4, C9, C9, 0)
4158 || reg->value == CPENC (3, 5, C9, C9, 0))
4159 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_PROFILE))
4162 /* ARMv8.3 Pointer authentication keys. */
4163 if ((reg->value == CPENC (3, 0, C2, C1, 0)
4164 || reg->value == CPENC (3, 0, C2, C1, 1)
4165 || reg->value == CPENC (3, 0, C2, C1, 2)
4166 || reg->value == CPENC (3, 0, C2, C1, 3)
4167 || reg->value == CPENC (3, 0, C2, C2, 0)
4168 || reg->value == CPENC (3, 0, C2, C2, 1)
4169 || reg->value == CPENC (3, 0, C2, C2, 2)
4170 || reg->value == CPENC (3, 0, C2, C2, 3)
4171 || reg->value == CPENC (3, 0, C2, C3, 0)
4172 || reg->value == CPENC (3, 0, C2, C3, 1))
4173 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_3))
4177 if ((reg->value == CPENC (3, 0, C0, C4, 4)
4178 || reg->value == CPENC (3, 0, C1, C2, 0)
4179 || reg->value == CPENC (3, 4, C1, C2, 0)
4180 || reg->value == CPENC (3, 6, C1, C2, 0)
4181 || reg->value == CPENC (3, 5, C1, C2, 0)
4182 || reg->value == CPENC (3, 0, C0, C0, 7))
4183 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_SVE))
4186 /* ARMv8.4 features. */
4189 if (reg->value == CPEN_ (3, C2, 5)
4190 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_4))
4193 /* Virtualization extensions. */
4194 if ((reg->value == CPENC(3, 4, C2, C6, 2)
4195 || reg->value == CPENC(3, 4, C2, C6, 0)
4196 || reg->value == CPENC(3, 4, C14, C4, 0)
4197 || reg->value == CPENC(3, 4, C14, C4, 2)
4198 || reg->value == CPENC(3, 4, C14, C4, 1)
4199 || reg->value == CPENC(3, 4, C14, C5, 0)
4200 || reg->value == CPENC(3, 4, C14, C5, 2)
4201 || reg->value == CPENC(3, 4, C14, C5, 1)
4202 || reg->value == CPENC(3, 4, C1, C3, 1)
4203 || reg->value == CPENC(3, 4, C2, C2, 0))
4204 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_4))
4207 /* ARMv8.4 TLB instructions. */
4208 if ((reg->value == CPENS (0, C8, C1, 0)
4209 || reg->value == CPENS (0, C8, C1, 1)
4210 || reg->value == CPENS (0, C8, C1, 2)
4211 || reg->value == CPENS (0, C8, C1, 3)
4212 || reg->value == CPENS (0, C8, C1, 5)
4213 || reg->value == CPENS (0, C8, C1, 7)
4214 || reg->value == CPENS (4, C8, C4, 0)
4215 || reg->value == CPENS (4, C8, C4, 4)
4216 || reg->value == CPENS (4, C8, C1, 1)
4217 || reg->value == CPENS (4, C8, C1, 5)
4218 || reg->value == CPENS (4, C8, C1, 6)
4219 || reg->value == CPENS (6, C8, C1, 1)
4220 || reg->value == CPENS (6, C8, C1, 5)
4221 || reg->value == CPENS (4, C8, C1, 0)
4222 || reg->value == CPENS (4, C8, C1, 4)
4223 || reg->value == CPENS (6, C8, C1, 0)
4224 || reg->value == CPENS (0, C8, C6, 1)
4225 || reg->value == CPENS (0, C8, C6, 3)
4226 || reg->value == CPENS (0, C8, C6, 5)
4227 || reg->value == CPENS (0, C8, C6, 7)
4228 || reg->value == CPENS (0, C8, C2, 1)
4229 || reg->value == CPENS (0, C8, C2, 3)
4230 || reg->value == CPENS (0, C8, C2, 5)
4231 || reg->value == CPENS (0, C8, C2, 7)
4232 || reg->value == CPENS (0, C8, C5, 1)
4233 || reg->value == CPENS (0, C8, C5, 3)
4234 || reg->value == CPENS (0, C8, C5, 5)
4235 || reg->value == CPENS (0, C8, C5, 7)
4236 || reg->value == CPENS (4, C8, C0, 2)
4237 || reg->value == CPENS (4, C8, C0, 6)
4238 || reg->value == CPENS (4, C8, C4, 2)
4239 || reg->value == CPENS (4, C8, C4, 6)
4240 || reg->value == CPENS (4, C8, C4, 3)
4241 || reg->value == CPENS (4, C8, C4, 7)
4242 || reg->value == CPENS (4, C8, C6, 1)
4243 || reg->value == CPENS (4, C8, C6, 5)
4244 || reg->value == CPENS (4, C8, C2, 1)
4245 || reg->value == CPENS (4, C8, C2, 5)
4246 || reg->value == CPENS (4, C8, C5, 1)
4247 || reg->value == CPENS (4, C8, C5, 5)
4248 || reg->value == CPENS (6, C8, C6, 1)
4249 || reg->value == CPENS (6, C8, C6, 5)
4250 || reg->value == CPENS (6, C8, C2, 1)
4251 || reg->value == CPENS (6, C8, C2, 5)
4252 || reg->value == CPENS (6, C8, C5, 1)
4253 || reg->value == CPENS (6, C8, C5, 5))
4254 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_4))
4260 /* The CPENC below is fairly misleading, the fields
4261 here are not in CPENC form. They are in op2op1 form. The fields are encoded
4262 by ins_pstatefield, which just shifts the value by the width of the fields
4263 in a loop. So if you CPENC them only the first value will be set, the rest
4264 are masked out to 0. As an example. op2 = 3, op1=2. CPENC would produce a
4265 value of 0b110000000001000000 (0x30040) while what you want is
4267 const aarch64_sys_reg aarch64_pstatefields [] =
4269 { "spsel", 0x05, 0 },
4270 { "daifset", 0x1e, 0 },
4271 { "daifclr", 0x1f, 0 },
4272 { "pan", 0x04, F_ARCHEXT },
4273 { "uao", 0x03, F_ARCHEXT },
4274 { "dit", 0x1a, F_ARCHEXT },
4275 { 0, CPENC(0,0,0,0,0), 0 },
4279 aarch64_pstatefield_supported_p (const aarch64_feature_set features,
4280 const aarch64_sys_reg *reg)
4282 if (!(reg->flags & F_ARCHEXT))
4285 /* PAN. Values are from aarch64_pstatefields. */
4286 if (reg->value == 0x04
4287 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_PAN))
4290 /* UAO. Values are from aarch64_pstatefields. */
4291 if (reg->value == 0x03
4292 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_2))
4295 /* DIT. Values are from aarch64_pstatefields. */
4296 if (reg->value == 0x1a
4297 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_4))
4303 const aarch64_sys_ins_reg aarch64_sys_regs_ic[] =
4305 { "ialluis", CPENS(0,C7,C1,0), 0 },
4306 { "iallu", CPENS(0,C7,C5,0), 0 },
4307 { "ivau", CPENS (3, C7, C5, 1), F_HASXT },
4308 { 0, CPENS(0,0,0,0), 0 }
4311 const aarch64_sys_ins_reg aarch64_sys_regs_dc[] =
4313 { "zva", CPENS (3, C7, C4, 1), F_HASXT },
4314 { "ivac", CPENS (0, C7, C6, 1), F_HASXT },
4315 { "isw", CPENS (0, C7, C6, 2), F_HASXT },
4316 { "cvac", CPENS (3, C7, C10, 1), F_HASXT },
4317 { "csw", CPENS (0, C7, C10, 2), F_HASXT },
4318 { "cvau", CPENS (3, C7, C11, 1), F_HASXT },
4319 { "cvap", CPENS (3, C7, C12, 1), F_HASXT | F_ARCHEXT },
4320 { "civac", CPENS (3, C7, C14, 1), F_HASXT },
4321 { "cisw", CPENS (0, C7, C14, 2), F_HASXT },
4322 { 0, CPENS(0,0,0,0), 0 }
4325 const aarch64_sys_ins_reg aarch64_sys_regs_at[] =
4327 { "s1e1r", CPENS (0, C7, C8, 0), F_HASXT },
4328 { "s1e1w", CPENS (0, C7, C8, 1), F_HASXT },
4329 { "s1e0r", CPENS (0, C7, C8, 2), F_HASXT },
4330 { "s1e0w", CPENS (0, C7, C8, 3), F_HASXT },
4331 { "s12e1r", CPENS (4, C7, C8, 4), F_HASXT },
4332 { "s12e1w", CPENS (4, C7, C8, 5), F_HASXT },
4333 { "s12e0r", CPENS (4, C7, C8, 6), F_HASXT },
4334 { "s12e0w", CPENS (4, C7, C8, 7), F_HASXT },
4335 { "s1e2r", CPENS (4, C7, C8, 0), F_HASXT },
4336 { "s1e2w", CPENS (4, C7, C8, 1), F_HASXT },
4337 { "s1e3r", CPENS (6, C7, C8, 0), F_HASXT },
4338 { "s1e3w", CPENS (6, C7, C8, 1), F_HASXT },
4339 { "s1e1rp", CPENS (0, C7, C9, 0), F_HASXT | F_ARCHEXT },
4340 { "s1e1wp", CPENS (0, C7, C9, 1), F_HASXT | F_ARCHEXT },
4341 { 0, CPENS(0,0,0,0), 0 }
4344 const aarch64_sys_ins_reg aarch64_sys_regs_tlbi[] =
4346 { "vmalle1", CPENS(0,C8,C7,0), 0 },
4347 { "vae1", CPENS (0, C8, C7, 1), F_HASXT },
4348 { "aside1", CPENS (0, C8, C7, 2), F_HASXT },
4349 { "vaae1", CPENS (0, C8, C7, 3), F_HASXT },
4350 { "vmalle1is", CPENS(0,C8,C3,0), 0 },
4351 { "vae1is", CPENS (0, C8, C3, 1), F_HASXT },
4352 { "aside1is", CPENS (0, C8, C3, 2), F_HASXT },
4353 { "vaae1is", CPENS (0, C8, C3, 3), F_HASXT },
4354 { "ipas2e1is", CPENS (4, C8, C0, 1), F_HASXT },
4355 { "ipas2le1is",CPENS (4, C8, C0, 5), F_HASXT },
4356 { "ipas2e1", CPENS (4, C8, C4, 1), F_HASXT },
4357 { "ipas2le1", CPENS (4, C8, C4, 5), F_HASXT },
4358 { "vae2", CPENS (4, C8, C7, 1), F_HASXT },
4359 { "vae2is", CPENS (4, C8, C3, 1), F_HASXT },
4360 { "vmalls12e1",CPENS(4,C8,C7,6), 0 },
4361 { "vmalls12e1is",CPENS(4,C8,C3,6), 0 },
4362 { "vae3", CPENS (6, C8, C7, 1), F_HASXT },
4363 { "vae3is", CPENS (6, C8, C3, 1), F_HASXT },
4364 { "alle2", CPENS(4,C8,C7,0), 0 },
4365 { "alle2is", CPENS(4,C8,C3,0), 0 },
4366 { "alle1", CPENS(4,C8,C7,4), 0 },
4367 { "alle1is", CPENS(4,C8,C3,4), 0 },
4368 { "alle3", CPENS(6,C8,C7,0), 0 },
4369 { "alle3is", CPENS(6,C8,C3,0), 0 },
4370 { "vale1is", CPENS (0, C8, C3, 5), F_HASXT },
4371 { "vale2is", CPENS (4, C8, C3, 5), F_HASXT },
4372 { "vale3is", CPENS (6, C8, C3, 5), F_HASXT },
4373 { "vaale1is", CPENS (0, C8, C3, 7), F_HASXT },
4374 { "vale1", CPENS (0, C8, C7, 5), F_HASXT },
4375 { "vale2", CPENS (4, C8, C7, 5), F_HASXT },
4376 { "vale3", CPENS (6, C8, C7, 5), F_HASXT },
4377 { "vaale1", CPENS (0, C8, C7, 7), F_HASXT },
4379 { "vmalle1os", CPENS (0, C8, C1, 0), F_ARCHEXT },
4380 { "vae1os", CPENS (0, C8, C1, 1), F_HASXT | F_ARCHEXT },
4381 { "aside1os", CPENS (0, C8, C1, 2), F_HASXT | F_ARCHEXT },
4382 { "vaae1os", CPENS (0, C8, C1, 3), F_HASXT | F_ARCHEXT },
4383 { "vale1os", CPENS (0, C8, C1, 5), F_HASXT | F_ARCHEXT },
4384 { "vaale1os", CPENS (0, C8, C1, 7), F_HASXT | F_ARCHEXT },
4385 { "ipas2e1os", CPENS (4, C8, C4, 0), F_HASXT | F_ARCHEXT },
4386 { "ipas2le1os", CPENS (4, C8, C4, 4), F_HASXT | F_ARCHEXT },
4387 { "vae2os", CPENS (4, C8, C1, 1), F_HASXT | F_ARCHEXT },
4388 { "vale2os", CPENS (4, C8, C1, 5), F_HASXT | F_ARCHEXT },
4389 { "vmalls12e1os", CPENS (4, C8, C1, 6), F_ARCHEXT },
4390 { "vae3os", CPENS (6, C8, C1, 1), F_HASXT | F_ARCHEXT },
4391 { "vale3os", CPENS (6, C8, C1, 5), F_HASXT | F_ARCHEXT },
4392 { "alle2os", CPENS (4, C8, C1, 0), F_ARCHEXT },
4393 { "alle1os", CPENS (4, C8, C1, 4), F_ARCHEXT },
4394 { "alle3os", CPENS (6, C8, C1, 0), F_ARCHEXT },
4396 { "rvae1", CPENS (0, C8, C6, 1), F_HASXT | F_ARCHEXT },
4397 { "rvaae1", CPENS (0, C8, C6, 3), F_HASXT | F_ARCHEXT },
4398 { "rvale1", CPENS (0, C8, C6, 5), F_HASXT | F_ARCHEXT },
4399 { "rvaale1", CPENS (0, C8, C6, 7), F_HASXT | F_ARCHEXT },
4400 { "rvae1is", CPENS (0, C8, C2, 1), F_HASXT | F_ARCHEXT },
4401 { "rvaae1is", CPENS (0, C8, C2, 3), F_HASXT | F_ARCHEXT },
4402 { "rvale1is", CPENS (0, C8, C2, 5), F_HASXT | F_ARCHEXT },
4403 { "rvaale1is", CPENS (0, C8, C2, 7), F_HASXT | F_ARCHEXT },
4404 { "rvae1os", CPENS (0, C8, C5, 1), F_HASXT | F_ARCHEXT },
4405 { "rvaae1os", CPENS (0, C8, C5, 3), F_HASXT | F_ARCHEXT },
4406 { "rvale1os", CPENS (0, C8, C5, 5), F_HASXT | F_ARCHEXT },
4407 { "rvaale1os", CPENS (0, C8, C5, 7), F_HASXT | F_ARCHEXT },
4408 { "ripas2e1is", CPENS (4, C8, C0, 2), F_HASXT | F_ARCHEXT },
4409 { "ripas2le1is",CPENS (4, C8, C0, 6), F_HASXT | F_ARCHEXT },
4410 { "ripas2e1", CPENS (4, C8, C4, 2), F_HASXT | F_ARCHEXT },
4411 { "ripas2le1", CPENS (4, C8, C4, 6), F_HASXT | F_ARCHEXT },
4412 { "ripas2e1os", CPENS (4, C8, C4, 3), F_HASXT | F_ARCHEXT },
4413 { "ripas2le1os",CPENS (4, C8, C4, 7), F_HASXT | F_ARCHEXT },
4414 { "rvae2", CPENS (4, C8, C6, 1), F_HASXT | F_ARCHEXT },
4415 { "rvale2", CPENS (4, C8, C6, 5), F_HASXT | F_ARCHEXT },
4416 { "rvae2is", CPENS (4, C8, C2, 1), F_HASXT | F_ARCHEXT },
4417 { "rvale2is", CPENS (4, C8, C2, 5), F_HASXT | F_ARCHEXT },
4418 { "rvae2os", CPENS (4, C8, C5, 1), F_HASXT | F_ARCHEXT },
4419 { "rvale2os", CPENS (4, C8, C5, 5), F_HASXT | F_ARCHEXT },
4420 { "rvae3", CPENS (6, C8, C6, 1), F_HASXT | F_ARCHEXT },
4421 { "rvale3", CPENS (6, C8, C6, 5), F_HASXT | F_ARCHEXT },
4422 { "rvae3is", CPENS (6, C8, C2, 1), F_HASXT | F_ARCHEXT },
4423 { "rvale3is", CPENS (6, C8, C2, 5), F_HASXT | F_ARCHEXT },
4424 { "rvae3os", CPENS (6, C8, C5, 1), F_HASXT | F_ARCHEXT },
4425 { "rvale3os", CPENS (6, C8, C5, 5), F_HASXT | F_ARCHEXT },
4427 { 0, CPENS(0,0,0,0), 0 }
4431 aarch64_sys_ins_reg_has_xt (const aarch64_sys_ins_reg *sys_ins_reg)
4433 return (sys_ins_reg->flags & F_HASXT) != 0;
4437 aarch64_sys_ins_reg_supported_p (const aarch64_feature_set features,
4438 const aarch64_sys_ins_reg *reg)
4440 if (!(reg->flags & F_ARCHEXT))
4443 /* DC CVAP. Values are from aarch64_sys_regs_dc. */
4444 if (reg->value == CPENS (3, C7, C12, 1)
4445 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_2))
4448 /* AT S1E1RP, AT S1E1WP. Values are from aarch64_sys_regs_at. */
4449 if ((reg->value == CPENS (0, C7, C9, 0)
4450 || reg->value == CPENS (0, C7, C9, 1))
4451 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_2))
4474 #define BIT(INSN,BT) (((INSN) >> (BT)) & 1)
4475 #define BITS(INSN,HI,LO) (((INSN) >> (LO)) & ((1 << (((HI) - (LO)) + 1)) - 1))
4478 verify_ldpsw (const struct aarch64_opcode * opcode ATTRIBUTE_UNUSED,
4479 const aarch64_insn insn)
4481 int t = BITS (insn, 4, 0);
4482 int n = BITS (insn, 9, 5);
4483 int t2 = BITS (insn, 14, 10);
4487 /* Write back enabled. */
4488 if ((t == n || t2 == n) && n != 31)
4502 /* Return true if VALUE cannot be moved into an SVE register using DUP
4503 (with any element size, not just ESIZE) and if using DUPM would
4504 therefore be OK. ESIZE is the number of bytes in the immediate. */
4507 aarch64_sve_dupm_mov_immediate_p (uint64_t uvalue, int esize)
4509 int64_t svalue = uvalue;
4510 uint64_t upper = (uint64_t) -1 << (esize * 4) << (esize * 4);
4512 if ((uvalue & ~upper) != uvalue && (uvalue | upper) != uvalue)
4514 if (esize <= 4 || (uint32_t) uvalue == (uint32_t) (uvalue >> 32))
4516 svalue = (int32_t) uvalue;
4517 if (esize <= 2 || (uint16_t) uvalue == (uint16_t) (uvalue >> 16))
4519 svalue = (int16_t) uvalue;
4520 if (esize == 1 || (uint8_t) uvalue == (uint8_t) (uvalue >> 8))
4524 if ((svalue & 0xff) == 0)
4526 return svalue < -128 || svalue >= 128;
4529 /* Include the opcode description table as well as the operand description
4531 #define VERIFIER(x) verify_##x
4532 #include "aarch64-tbl.h"