1 /* aarch64-opc.c -- AArch64 opcode support.
2 Copyright (C) 2009-2016 Free Software Foundation, Inc.
3 Contributed by ARM Ltd.
5 This file is part of the GNU opcodes library.
7 This library is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
12 It is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program; see the file COPYING3. If not,
19 see <http://www.gnu.org/licenses/>. */
30 #include "libiberty.h"
32 #include "aarch64-opc.h"
35 int debug_dump = FALSE;
36 #endif /* DEBUG_AARCH64 */
38 /* The enumeration strings associated with each value of a 5-bit SVE
39 pattern operand. A null entry indicates a reserved meaning. */
40 const char *const aarch64_sve_pattern_array[32] = {
79 /* The enumeration strings associated with each value of a 4-bit SVE
80 prefetch operand. A null entry indicates a reserved meaning. */
81 const char *const aarch64_sve_prfop_array[16] = {
102 /* Helper functions to determine which operand to be used to encode/decode
103 the size:Q fields for AdvSIMD instructions. */
105 static inline bfd_boolean
106 vector_qualifier_p (enum aarch64_opnd_qualifier qualifier)
108 return ((qualifier >= AARCH64_OPND_QLF_V_8B
109 && qualifier <= AARCH64_OPND_QLF_V_1Q) ? TRUE
113 static inline bfd_boolean
114 fp_qualifier_p (enum aarch64_opnd_qualifier qualifier)
116 return ((qualifier >= AARCH64_OPND_QLF_S_B
117 && qualifier <= AARCH64_OPND_QLF_S_Q) ? TRUE
127 DP_VECTOR_ACROSS_LANES,
130 static const char significant_operand_index [] =
132 0, /* DP_UNKNOWN, by default using operand 0. */
133 0, /* DP_VECTOR_3SAME */
134 1, /* DP_VECTOR_LONG */
135 2, /* DP_VECTOR_WIDE */
136 1, /* DP_VECTOR_ACROSS_LANES */
139 /* Given a sequence of qualifiers in QUALIFIERS, determine and return
141 N.B. QUALIFIERS is a possible sequence of qualifiers each of which
142 corresponds to one of a sequence of operands. */
144 static enum data_pattern
145 get_data_pattern (const aarch64_opnd_qualifier_seq_t qualifiers)
147 if (vector_qualifier_p (qualifiers[0]) == TRUE)
149 /* e.g. v.4s, v.4s, v.4s
150 or v.4h, v.4h, v.h[3]. */
151 if (qualifiers[0] == qualifiers[1]
152 && vector_qualifier_p (qualifiers[2]) == TRUE
153 && (aarch64_get_qualifier_esize (qualifiers[0])
154 == aarch64_get_qualifier_esize (qualifiers[1]))
155 && (aarch64_get_qualifier_esize (qualifiers[0])
156 == aarch64_get_qualifier_esize (qualifiers[2])))
157 return DP_VECTOR_3SAME;
158 /* e.g. v.8h, v.8b, v.8b.
159 or v.4s, v.4h, v.h[2].
161 if (vector_qualifier_p (qualifiers[1]) == TRUE
162 && aarch64_get_qualifier_esize (qualifiers[0]) != 0
163 && (aarch64_get_qualifier_esize (qualifiers[0])
164 == aarch64_get_qualifier_esize (qualifiers[1]) << 1))
165 return DP_VECTOR_LONG;
166 /* e.g. v.8h, v.8h, v.8b. */
167 if (qualifiers[0] == qualifiers[1]
168 && vector_qualifier_p (qualifiers[2]) == TRUE
169 && aarch64_get_qualifier_esize (qualifiers[0]) != 0
170 && (aarch64_get_qualifier_esize (qualifiers[0])
171 == aarch64_get_qualifier_esize (qualifiers[2]) << 1)
172 && (aarch64_get_qualifier_esize (qualifiers[0])
173 == aarch64_get_qualifier_esize (qualifiers[1])))
174 return DP_VECTOR_WIDE;
176 else if (fp_qualifier_p (qualifiers[0]) == TRUE)
178 /* e.g. SADDLV <V><d>, <Vn>.<T>. */
179 if (vector_qualifier_p (qualifiers[1]) == TRUE
180 && qualifiers[2] == AARCH64_OPND_QLF_NIL)
181 return DP_VECTOR_ACROSS_LANES;
187 /* Select the operand to do the encoding/decoding of the 'size:Q' fields in
188 the AdvSIMD instructions. */
189 /* N.B. it is possible to do some optimization that doesn't call
190 get_data_pattern each time when we need to select an operand. We can
191 either buffer the caculated the result or statically generate the data,
192 however, it is not obvious that the optimization will bring significant
196 aarch64_select_operand_for_sizeq_field_coding (const aarch64_opcode *opcode)
199 significant_operand_index [get_data_pattern (opcode->qualifiers_list[0])];
202 const aarch64_field fields[] =
205 { 0, 4 }, /* cond2: condition in truly conditional-executed inst. */
206 { 0, 4 }, /* nzcv: flag bit specifier, encoded in the "nzcv" field. */
207 { 5, 5 }, /* defgh: d:e:f:g:h bits in AdvSIMD modified immediate. */
208 { 16, 3 }, /* abc: a:b:c bits in AdvSIMD modified immediate. */
209 { 5, 19 }, /* imm19: e.g. in CBZ. */
210 { 5, 19 }, /* immhi: e.g. in ADRP. */
211 { 29, 2 }, /* immlo: e.g. in ADRP. */
212 { 22, 2 }, /* size: in most AdvSIMD and floating-point instructions. */
213 { 10, 2 }, /* vldst_size: size field in the AdvSIMD load/store inst. */
214 { 29, 1 }, /* op: in AdvSIMD modified immediate instructions. */
215 { 30, 1 }, /* Q: in most AdvSIMD instructions. */
216 { 0, 5 }, /* Rt: in load/store instructions. */
217 { 0, 5 }, /* Rd: in many integer instructions. */
218 { 5, 5 }, /* Rn: in many integer instructions. */
219 { 10, 5 }, /* Rt2: in load/store pair instructions. */
220 { 10, 5 }, /* Ra: in fp instructions. */
221 { 5, 3 }, /* op2: in the system instructions. */
222 { 8, 4 }, /* CRm: in the system instructions. */
223 { 12, 4 }, /* CRn: in the system instructions. */
224 { 16, 3 }, /* op1: in the system instructions. */
225 { 19, 2 }, /* op0: in the system instructions. */
226 { 10, 3 }, /* imm3: in add/sub extended reg instructions. */
227 { 12, 4 }, /* cond: condition flags as a source operand. */
228 { 12, 4 }, /* opcode: in advsimd load/store instructions. */
229 { 12, 4 }, /* cmode: in advsimd modified immediate instructions. */
230 { 13, 3 }, /* asisdlso_opcode: opcode in advsimd ld/st single element. */
231 { 13, 2 }, /* len: in advsimd tbl/tbx instructions. */
232 { 16, 5 }, /* Rm: in ld/st reg offset and some integer inst. */
233 { 16, 5 }, /* Rs: in load/store exclusive instructions. */
234 { 13, 3 }, /* option: in ld/st reg offset + add/sub extended reg inst. */
235 { 12, 1 }, /* S: in load/store reg offset instructions. */
236 { 21, 2 }, /* hw: in move wide constant instructions. */
237 { 22, 2 }, /* opc: in load/store reg offset instructions. */
238 { 23, 1 }, /* opc1: in load/store reg offset instructions. */
239 { 22, 2 }, /* shift: in add/sub reg/imm shifted instructions. */
240 { 22, 2 }, /* type: floating point type field in fp data inst. */
241 { 30, 2 }, /* ldst_size: size field in ld/st reg offset inst. */
242 { 10, 6 }, /* imm6: in add/sub reg shifted instructions. */
243 { 11, 4 }, /* imm4: in advsimd ext and advsimd ins instructions. */
244 { 16, 5 }, /* imm5: in conditional compare (immediate) instructions. */
245 { 15, 7 }, /* imm7: in load/store pair pre/post index instructions. */
246 { 13, 8 }, /* imm8: in floating-point scalar move immediate inst. */
247 { 12, 9 }, /* imm9: in load/store pre/post index instructions. */
248 { 10, 12 }, /* imm12: in ld/st unsigned imm or add/sub shifted inst. */
249 { 5, 14 }, /* imm14: in test bit and branch instructions. */
250 { 5, 16 }, /* imm16: in exception instructions. */
251 { 0, 26 }, /* imm26: in unconditional branch instructions. */
252 { 10, 6 }, /* imms: in bitfield and logical immediate instructions. */
253 { 16, 6 }, /* immr: in bitfield and logical immediate instructions. */
254 { 16, 3 }, /* immb: in advsimd shift by immediate instructions. */
255 { 19, 4 }, /* immh: in advsimd shift by immediate instructions. */
256 { 22, 1 }, /* N: in logical (immediate) instructions. */
257 { 11, 1 }, /* index: in ld/st inst deciding the pre/post-index. */
258 { 24, 1 }, /* index2: in ld/st pair inst deciding the pre/post-index. */
259 { 31, 1 }, /* sf: in integer data processing instructions. */
260 { 30, 1 }, /* lse_size: in LSE extension atomic instructions. */
261 { 11, 1 }, /* H: in advsimd scalar x indexed element instructions. */
262 { 21, 1 }, /* L: in advsimd scalar x indexed element instructions. */
263 { 20, 1 }, /* M: in advsimd scalar x indexed element instructions. */
264 { 31, 1 }, /* b5: in the test bit and branch instructions. */
265 { 19, 5 }, /* b40: in the test bit and branch instructions. */
266 { 10, 6 }, /* scale: in the fixed-point scalar to fp converting inst. */
267 { 17, 1 }, /* SVE_N: SVE equivalent of N. */
268 { 0, 4 }, /* SVE_Pd: p0-p15, bits [3,0]. */
269 { 10, 3 }, /* SVE_Pg3: p0-p7, bits [12,10]. */
270 { 5, 4 }, /* SVE_Pg4_5: p0-p15, bits [8,5]. */
271 { 10, 4 }, /* SVE_Pg4_10: p0-p15, bits [13,10]. */
272 { 16, 4 }, /* SVE_Pg4_16: p0-p15, bits [19,16]. */
273 { 16, 4 }, /* SVE_Pm: p0-p15, bits [19,16]. */
274 { 5, 4 }, /* SVE_Pn: p0-p15, bits [8,5]. */
275 { 0, 4 }, /* SVE_Pt: p0-p15, bits [3,0]. */
276 { 5, 5 }, /* SVE_Za_5: SVE vector register, bits [9,5]. */
277 { 16, 5 }, /* SVE_Za_16: SVE vector register, bits [20,16]. */
278 { 0, 5 }, /* SVE_Zd: SVE vector register. bits [4,0]. */
279 { 5, 5 }, /* SVE_Zm_5: SVE vector register, bits [9,5]. */
280 { 16, 5 }, /* SVE_Zm_16: SVE vector register, bits [20,16]. */
281 { 5, 5 }, /* SVE_Zn: SVE vector register, bits [9,5]. */
282 { 0, 5 }, /* SVE_Zt: SVE vector register, bits [4,0]. */
283 { 5, 1 }, /* SVE_i1: single-bit immediate. */
284 { 16, 3 }, /* SVE_imm3: 3-bit immediate field. */
285 { 16, 4 }, /* SVE_imm4: 4-bit immediate field. */
286 { 5, 5 }, /* SVE_imm5: 5-bit immediate field. */
287 { 16, 5 }, /* SVE_imm5b: secondary 5-bit immediate field. */
288 { 16, 6 }, /* SVE_imm6: 6-bit immediate field. */
289 { 14, 7 }, /* SVE_imm7: 7-bit immediate field. */
290 { 5, 8 }, /* SVE_imm8: 8-bit immediate field. */
291 { 5, 9 }, /* SVE_imm9: 9-bit immediate field. */
292 { 11, 6 }, /* SVE_immr: SVE equivalent of immr. */
293 { 5, 6 }, /* SVE_imms: SVE equivalent of imms. */
294 { 10, 2 }, /* SVE_msz: 2-bit shift amount for ADR. */
295 { 5, 5 }, /* SVE_pattern: vector pattern enumeration. */
296 { 0, 4 }, /* SVE_prfop: prefetch operation for SVE PRF[BHWD]. */
297 { 22, 2 }, /* SVE_tszh: triangular size select high, bits [23,22]. */
298 { 14, 1 }, /* SVE_xs_14: UXTW/SXTW select (bit 14). */
299 { 22, 1 } /* SVE_xs_22: UXTW/SXTW select (bit 22). */
302 enum aarch64_operand_class
303 aarch64_get_operand_class (enum aarch64_opnd type)
305 return aarch64_operands[type].op_class;
309 aarch64_get_operand_name (enum aarch64_opnd type)
311 return aarch64_operands[type].name;
314 /* Get operand description string.
315 This is usually for the diagnosis purpose. */
317 aarch64_get_operand_desc (enum aarch64_opnd type)
319 return aarch64_operands[type].desc;
322 /* Table of all conditional affixes. */
323 const aarch64_cond aarch64_conds[16] =
328 {{"cc", "lo", "ul"}, 0x3},
344 get_cond_from_value (aarch64_insn value)
347 return &aarch64_conds[(unsigned int) value];
351 get_inverted_cond (const aarch64_cond *cond)
353 return &aarch64_conds[cond->value ^ 0x1];
356 /* Table describing the operand extension/shifting operators; indexed by
357 enum aarch64_modifier_kind.
359 The value column provides the most common values for encoding modifiers,
360 which enables table-driven encoding/decoding for the modifiers. */
361 const struct aarch64_name_value_pair aarch64_operand_modifiers [] =
382 enum aarch64_modifier_kind
383 aarch64_get_operand_modifier (const struct aarch64_name_value_pair *desc)
385 return desc - aarch64_operand_modifiers;
389 aarch64_get_operand_modifier_value (enum aarch64_modifier_kind kind)
391 return aarch64_operand_modifiers[kind].value;
394 enum aarch64_modifier_kind
395 aarch64_get_operand_modifier_from_value (aarch64_insn value,
396 bfd_boolean extend_p)
398 if (extend_p == TRUE)
399 return AARCH64_MOD_UXTB + value;
401 return AARCH64_MOD_LSL - value;
405 aarch64_extend_operator_p (enum aarch64_modifier_kind kind)
407 return (kind > AARCH64_MOD_LSL && kind <= AARCH64_MOD_SXTX)
411 static inline bfd_boolean
412 aarch64_shift_operator_p (enum aarch64_modifier_kind kind)
414 return (kind >= AARCH64_MOD_ROR && kind <= AARCH64_MOD_LSL)
418 const struct aarch64_name_value_pair aarch64_barrier_options[16] =
438 /* Table describing the operands supported by the aliases of the HINT
441 The name column is the operand that is accepted for the alias. The value
442 column is the hint number of the alias. The list of operands is terminated
443 by NULL in the name column. */
445 const struct aarch64_name_value_pair aarch64_hint_options[] =
447 { "csync", 0x11 }, /* PSB CSYNC. */
451 /* op -> op: load = 0 instruction = 1 store = 2
453 t -> temporal: temporal (retained) = 0 non-temporal (streaming) = 1 */
454 #define B(op,l,t) (((op) << 3) | (((l) - 1) << 1) | (t))
455 const struct aarch64_name_value_pair aarch64_prfops[32] =
457 { "pldl1keep", B(0, 1, 0) },
458 { "pldl1strm", B(0, 1, 1) },
459 { "pldl2keep", B(0, 2, 0) },
460 { "pldl2strm", B(0, 2, 1) },
461 { "pldl3keep", B(0, 3, 0) },
462 { "pldl3strm", B(0, 3, 1) },
465 { "plil1keep", B(1, 1, 0) },
466 { "plil1strm", B(1, 1, 1) },
467 { "plil2keep", B(1, 2, 0) },
468 { "plil2strm", B(1, 2, 1) },
469 { "plil3keep", B(1, 3, 0) },
470 { "plil3strm", B(1, 3, 1) },
473 { "pstl1keep", B(2, 1, 0) },
474 { "pstl1strm", B(2, 1, 1) },
475 { "pstl2keep", B(2, 2, 0) },
476 { "pstl2strm", B(2, 2, 1) },
477 { "pstl3keep", B(2, 3, 0) },
478 { "pstl3strm", B(2, 3, 1) },
492 /* Utilities on value constraint. */
495 value_in_range_p (int64_t value, int low, int high)
497 return (value >= low && value <= high) ? 1 : 0;
500 /* Return true if VALUE is a multiple of ALIGN. */
502 value_aligned_p (int64_t value, int align)
504 return (value % align) == 0;
507 /* A signed value fits in a field. */
509 value_fit_signed_field_p (int64_t value, unsigned width)
512 if (width < sizeof (value) * 8)
514 int64_t lim = (int64_t)1 << (width - 1);
515 if (value >= -lim && value < lim)
521 /* An unsigned value fits in a field. */
523 value_fit_unsigned_field_p (int64_t value, unsigned width)
526 if (width < sizeof (value) * 8)
528 int64_t lim = (int64_t)1 << width;
529 if (value >= 0 && value < lim)
535 /* Return 1 if OPERAND is SP or WSP. */
537 aarch64_stack_pointer_p (const aarch64_opnd_info *operand)
539 return ((aarch64_get_operand_class (operand->type)
540 == AARCH64_OPND_CLASS_INT_REG)
541 && operand_maybe_stack_pointer (aarch64_operands + operand->type)
542 && operand->reg.regno == 31);
545 /* Return 1 if OPERAND is XZR or WZP. */
547 aarch64_zero_register_p (const aarch64_opnd_info *operand)
549 return ((aarch64_get_operand_class (operand->type)
550 == AARCH64_OPND_CLASS_INT_REG)
551 && !operand_maybe_stack_pointer (aarch64_operands + operand->type)
552 && operand->reg.regno == 31);
555 /* Return true if the operand *OPERAND that has the operand code
556 OPERAND->TYPE and been qualified by OPERAND->QUALIFIER can be also
557 qualified by the qualifier TARGET. */
560 operand_also_qualified_p (const struct aarch64_opnd_info *operand,
561 aarch64_opnd_qualifier_t target)
563 switch (operand->qualifier)
565 case AARCH64_OPND_QLF_W:
566 if (target == AARCH64_OPND_QLF_WSP && aarch64_stack_pointer_p (operand))
569 case AARCH64_OPND_QLF_X:
570 if (target == AARCH64_OPND_QLF_SP && aarch64_stack_pointer_p (operand))
573 case AARCH64_OPND_QLF_WSP:
574 if (target == AARCH64_OPND_QLF_W
575 && operand_maybe_stack_pointer (aarch64_operands + operand->type))
578 case AARCH64_OPND_QLF_SP:
579 if (target == AARCH64_OPND_QLF_X
580 && operand_maybe_stack_pointer (aarch64_operands + operand->type))
590 /* Given qualifier sequence list QSEQ_LIST and the known qualifier KNOWN_QLF
591 for operand KNOWN_IDX, return the expected qualifier for operand IDX.
593 Return NIL if more than one expected qualifiers are found. */
595 aarch64_opnd_qualifier_t
596 aarch64_get_expected_qualifier (const aarch64_opnd_qualifier_seq_t *qseq_list,
598 const aarch64_opnd_qualifier_t known_qlf,
605 When the known qualifier is NIL, we have to assume that there is only
606 one qualifier sequence in the *QSEQ_LIST and return the corresponding
607 qualifier directly. One scenario is that for instruction
608 PRFM <prfop>, [<Xn|SP>, #:lo12:<symbol>]
609 which has only one possible valid qualifier sequence
611 the caller may pass NIL in KNOWN_QLF to obtain S_D so that it can
612 determine the correct relocation type (i.e. LDST64_LO12) for PRFM.
614 Because the qualifier NIL has dual roles in the qualifier sequence:
615 it can mean no qualifier for the operand, or the qualifer sequence is
616 not in use (when all qualifiers in the sequence are NILs), we have to
617 handle this special case here. */
618 if (known_qlf == AARCH64_OPND_NIL)
620 assert (qseq_list[0][known_idx] == AARCH64_OPND_NIL);
621 return qseq_list[0][idx];
624 for (i = 0, saved_i = -1; i < AARCH64_MAX_QLF_SEQ_NUM; ++i)
626 if (qseq_list[i][known_idx] == known_qlf)
629 /* More than one sequences are found to have KNOWN_QLF at
631 return AARCH64_OPND_NIL;
636 return qseq_list[saved_i][idx];
639 enum operand_qualifier_kind
647 /* Operand qualifier description. */
648 struct operand_qualifier_data
650 /* The usage of the three data fields depends on the qualifier kind. */
657 enum operand_qualifier_kind kind;
660 /* Indexed by the operand qualifier enumerators. */
661 struct operand_qualifier_data aarch64_opnd_qualifiers[] =
663 {0, 0, 0, "NIL", OQK_NIL},
665 /* Operand variant qualifiers.
667 element size, number of elements and common value for encoding. */
669 {4, 1, 0x0, "w", OQK_OPD_VARIANT},
670 {8, 1, 0x1, "x", OQK_OPD_VARIANT},
671 {4, 1, 0x0, "wsp", OQK_OPD_VARIANT},
672 {8, 1, 0x1, "sp", OQK_OPD_VARIANT},
674 {1, 1, 0x0, "b", OQK_OPD_VARIANT},
675 {2, 1, 0x1, "h", OQK_OPD_VARIANT},
676 {4, 1, 0x2, "s", OQK_OPD_VARIANT},
677 {8, 1, 0x3, "d", OQK_OPD_VARIANT},
678 {16, 1, 0x4, "q", OQK_OPD_VARIANT},
680 {1, 8, 0x0, "8b", OQK_OPD_VARIANT},
681 {1, 16, 0x1, "16b", OQK_OPD_VARIANT},
682 {2, 2, 0x0, "2h", OQK_OPD_VARIANT},
683 {2, 4, 0x2, "4h", OQK_OPD_VARIANT},
684 {2, 8, 0x3, "8h", OQK_OPD_VARIANT},
685 {4, 2, 0x4, "2s", OQK_OPD_VARIANT},
686 {4, 4, 0x5, "4s", OQK_OPD_VARIANT},
687 {8, 1, 0x6, "1d", OQK_OPD_VARIANT},
688 {8, 2, 0x7, "2d", OQK_OPD_VARIANT},
689 {16, 1, 0x8, "1q", OQK_OPD_VARIANT},
691 {0, 0, 0, "z", OQK_OPD_VARIANT},
692 {0, 0, 0, "m", OQK_OPD_VARIANT},
694 /* Qualifiers constraining the value range.
696 Lower bound, higher bound, unused. */
698 {0, 7, 0, "imm_0_7" , OQK_VALUE_IN_RANGE},
699 {0, 15, 0, "imm_0_15", OQK_VALUE_IN_RANGE},
700 {0, 31, 0, "imm_0_31", OQK_VALUE_IN_RANGE},
701 {0, 63, 0, "imm_0_63", OQK_VALUE_IN_RANGE},
702 {1, 32, 0, "imm_1_32", OQK_VALUE_IN_RANGE},
703 {1, 64, 0, "imm_1_64", OQK_VALUE_IN_RANGE},
705 /* Qualifiers for miscellaneous purpose.
707 unused, unused and unused. */
712 {0, 0, 0, "retrieving", 0},
715 static inline bfd_boolean
716 operand_variant_qualifier_p (aarch64_opnd_qualifier_t qualifier)
718 return (aarch64_opnd_qualifiers[qualifier].kind == OQK_OPD_VARIANT)
722 static inline bfd_boolean
723 qualifier_value_in_range_constraint_p (aarch64_opnd_qualifier_t qualifier)
725 return (aarch64_opnd_qualifiers[qualifier].kind == OQK_VALUE_IN_RANGE)
730 aarch64_get_qualifier_name (aarch64_opnd_qualifier_t qualifier)
732 return aarch64_opnd_qualifiers[qualifier].desc;
735 /* Given an operand qualifier, return the expected data element size
736 of a qualified operand. */
738 aarch64_get_qualifier_esize (aarch64_opnd_qualifier_t qualifier)
740 assert (operand_variant_qualifier_p (qualifier) == TRUE);
741 return aarch64_opnd_qualifiers[qualifier].data0;
745 aarch64_get_qualifier_nelem (aarch64_opnd_qualifier_t qualifier)
747 assert (operand_variant_qualifier_p (qualifier) == TRUE);
748 return aarch64_opnd_qualifiers[qualifier].data1;
752 aarch64_get_qualifier_standard_value (aarch64_opnd_qualifier_t qualifier)
754 assert (operand_variant_qualifier_p (qualifier) == TRUE);
755 return aarch64_opnd_qualifiers[qualifier].data2;
759 get_lower_bound (aarch64_opnd_qualifier_t qualifier)
761 assert (qualifier_value_in_range_constraint_p (qualifier) == TRUE);
762 return aarch64_opnd_qualifiers[qualifier].data0;
766 get_upper_bound (aarch64_opnd_qualifier_t qualifier)
768 assert (qualifier_value_in_range_constraint_p (qualifier) == TRUE);
769 return aarch64_opnd_qualifiers[qualifier].data1;
774 aarch64_verbose (const char *str, ...)
785 dump_qualifier_sequence (const aarch64_opnd_qualifier_t *qualifier)
789 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i, ++qualifier)
790 printf ("%s,", aarch64_get_qualifier_name (*qualifier));
795 dump_match_qualifiers (const struct aarch64_opnd_info *opnd,
796 const aarch64_opnd_qualifier_t *qualifier)
799 aarch64_opnd_qualifier_t curr[AARCH64_MAX_OPND_NUM];
801 aarch64_verbose ("dump_match_qualifiers:");
802 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
803 curr[i] = opnd[i].qualifier;
804 dump_qualifier_sequence (curr);
805 aarch64_verbose ("against");
806 dump_qualifier_sequence (qualifier);
808 #endif /* DEBUG_AARCH64 */
810 /* TODO improve this, we can have an extra field at the runtime to
811 store the number of operands rather than calculating it every time. */
814 aarch64_num_of_operands (const aarch64_opcode *opcode)
817 const enum aarch64_opnd *opnds = opcode->operands;
818 while (opnds[i++] != AARCH64_OPND_NIL)
821 assert (i >= 0 && i <= AARCH64_MAX_OPND_NUM);
825 /* Find the best matched qualifier sequence in *QUALIFIERS_LIST for INST.
826 If succeeds, fill the found sequence in *RET, return 1; otherwise return 0.
828 N.B. on the entry, it is very likely that only some operands in *INST
829 have had their qualifiers been established.
831 If STOP_AT is not -1, the function will only try to match
832 the qualifier sequence for operands before and including the operand
833 of index STOP_AT; and on success *RET will only be filled with the first
834 (STOP_AT+1) qualifiers.
836 A couple examples of the matching algorithm:
844 Apart from serving the main encoding routine, this can also be called
845 during or after the operand decoding. */
848 aarch64_find_best_match (const aarch64_inst *inst,
849 const aarch64_opnd_qualifier_seq_t *qualifiers_list,
850 int stop_at, aarch64_opnd_qualifier_t *ret)
854 const aarch64_opnd_qualifier_t *qualifiers;
856 num_opnds = aarch64_num_of_operands (inst->opcode);
859 DEBUG_TRACE ("SUCCEED: no operand");
863 if (stop_at < 0 || stop_at >= num_opnds)
864 stop_at = num_opnds - 1;
866 /* For each pattern. */
867 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
870 qualifiers = *qualifiers_list;
872 /* Start as positive. */
875 DEBUG_TRACE ("%d", i);
878 dump_match_qualifiers (inst->operands, qualifiers);
881 /* Most opcodes has much fewer patterns in the list.
882 First NIL qualifier indicates the end in the list. */
883 if (empty_qualifier_sequence_p (qualifiers) == TRUE)
885 DEBUG_TRACE_IF (i == 0, "SUCCEED: empty qualifier list");
891 for (j = 0; j < num_opnds && j <= stop_at; ++j, ++qualifiers)
893 if (inst->operands[j].qualifier == AARCH64_OPND_QLF_NIL)
895 /* Either the operand does not have qualifier, or the qualifier
896 for the operand needs to be deduced from the qualifier
898 In the latter case, any constraint checking related with
899 the obtained qualifier should be done later in
900 operand_general_constraint_met_p. */
903 else if (*qualifiers != inst->operands[j].qualifier)
905 /* Unless the target qualifier can also qualify the operand
906 (which has already had a non-nil qualifier), non-equal
907 qualifiers are generally un-matched. */
908 if (operand_also_qualified_p (inst->operands + j, *qualifiers))
917 continue; /* Equal qualifiers are certainly matched. */
920 /* Qualifiers established. */
927 /* Fill the result in *RET. */
929 qualifiers = *qualifiers_list;
931 DEBUG_TRACE ("complete qualifiers using list %d", i);
934 dump_qualifier_sequence (qualifiers);
937 for (j = 0; j <= stop_at; ++j, ++qualifiers)
938 ret[j] = *qualifiers;
939 for (; j < AARCH64_MAX_OPND_NUM; ++j)
940 ret[j] = AARCH64_OPND_QLF_NIL;
942 DEBUG_TRACE ("SUCCESS");
946 DEBUG_TRACE ("FAIL");
950 /* Operand qualifier matching and resolving.
952 Return 1 if the operand qualifier(s) in *INST match one of the qualifier
953 sequences in INST->OPCODE->qualifiers_list; otherwise return 0.
955 if UPDATE_P == TRUE, update the qualifier(s) in *INST after the matching
959 match_operands_qualifier (aarch64_inst *inst, bfd_boolean update_p)
962 aarch64_opnd_qualifier_seq_t qualifiers;
964 if (!aarch64_find_best_match (inst, inst->opcode->qualifiers_list, -1,
967 DEBUG_TRACE ("matching FAIL");
971 if (inst->opcode->flags & F_STRICT)
973 /* Require an exact qualifier match, even for NIL qualifiers. */
974 nops = aarch64_num_of_operands (inst->opcode);
975 for (i = 0; i < nops; ++i)
976 if (inst->operands[i].qualifier != qualifiers[i])
980 /* Update the qualifiers. */
981 if (update_p == TRUE)
982 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
984 if (inst->opcode->operands[i] == AARCH64_OPND_NIL)
986 DEBUG_TRACE_IF (inst->operands[i].qualifier != qualifiers[i],
987 "update %s with %s for operand %d",
988 aarch64_get_qualifier_name (inst->operands[i].qualifier),
989 aarch64_get_qualifier_name (qualifiers[i]), i);
990 inst->operands[i].qualifier = qualifiers[i];
993 DEBUG_TRACE ("matching SUCCESS");
997 /* Return TRUE if VALUE is a wide constant that can be moved into a general
1000 IS32 indicates whether value is a 32-bit immediate or not.
1001 If SHIFT_AMOUNT is not NULL, on the return of TRUE, the logical left shift
1002 amount will be returned in *SHIFT_AMOUNT. */
1005 aarch64_wide_constant_p (int64_t value, int is32, unsigned int *shift_amount)
1009 DEBUG_TRACE ("enter with 0x%" PRIx64 "(%" PRIi64 ")", value, value);
1013 /* Allow all zeros or all ones in top 32-bits, so that
1014 32-bit constant expressions like ~0x80000000 are
1016 uint64_t ext = value;
1017 if (ext >> 32 != 0 && ext >> 32 != (uint64_t) 0xffffffff)
1018 /* Immediate out of range. */
1020 value &= (int64_t) 0xffffffff;
1023 /* first, try movz then movn */
1025 if ((value & ((int64_t) 0xffff << 0)) == value)
1027 else if ((value & ((int64_t) 0xffff << 16)) == value)
1029 else if (!is32 && (value & ((int64_t) 0xffff << 32)) == value)
1031 else if (!is32 && (value & ((int64_t) 0xffff << 48)) == value)
1036 DEBUG_TRACE ("exit FALSE with 0x%" PRIx64 "(%" PRIi64 ")", value, value);
1040 if (shift_amount != NULL)
1041 *shift_amount = amount;
1043 DEBUG_TRACE ("exit TRUE with amount %d", amount);
1048 /* Build the accepted values for immediate logical SIMD instructions.
1050 The standard encodings of the immediate value are:
1051 N imms immr SIMD size R S
1052 1 ssssss rrrrrr 64 UInt(rrrrrr) UInt(ssssss)
1053 0 0sssss 0rrrrr 32 UInt(rrrrr) UInt(sssss)
1054 0 10ssss 00rrrr 16 UInt(rrrr) UInt(ssss)
1055 0 110sss 000rrr 8 UInt(rrr) UInt(sss)
1056 0 1110ss 0000rr 4 UInt(rr) UInt(ss)
1057 0 11110s 00000r 2 UInt(r) UInt(s)
1058 where all-ones value of S is reserved.
1060 Let's call E the SIMD size.
1062 The immediate value is: S+1 bits '1' rotated to the right by R.
1064 The total of valid encodings is 64*63 + 32*31 + ... + 2*1 = 5334
1065 (remember S != E - 1). */
1067 #define TOTAL_IMM_NB 5334
1072 aarch64_insn encoding;
1073 } simd_imm_encoding;
1075 static simd_imm_encoding simd_immediates[TOTAL_IMM_NB];
1078 simd_imm_encoding_cmp(const void *i1, const void *i2)
1080 const simd_imm_encoding *imm1 = (const simd_imm_encoding *)i1;
1081 const simd_imm_encoding *imm2 = (const simd_imm_encoding *)i2;
1083 if (imm1->imm < imm2->imm)
1085 if (imm1->imm > imm2->imm)
1090 /* immediate bitfield standard encoding
1091 imm13<12> imm13<5:0> imm13<11:6> SIMD size R S
1092 1 ssssss rrrrrr 64 rrrrrr ssssss
1093 0 0sssss 0rrrrr 32 rrrrr sssss
1094 0 10ssss 00rrrr 16 rrrr ssss
1095 0 110sss 000rrr 8 rrr sss
1096 0 1110ss 0000rr 4 rr ss
1097 0 11110s 00000r 2 r s */
1099 encode_immediate_bitfield (int is64, uint32_t s, uint32_t r)
1101 return (is64 << 12) | (r << 6) | s;
1105 build_immediate_table (void)
1107 uint32_t log_e, e, s, r, s_mask;
1113 for (log_e = 1; log_e <= 6; log_e++)
1115 /* Get element size. */
1120 mask = 0xffffffffffffffffull;
1126 mask = (1ull << e) - 1;
1128 1 ((1 << 4) - 1) << 2 = 111100
1129 2 ((1 << 3) - 1) << 3 = 111000
1130 3 ((1 << 2) - 1) << 4 = 110000
1131 4 ((1 << 1) - 1) << 5 = 100000
1132 5 ((1 << 0) - 1) << 6 = 000000 */
1133 s_mask = ((1u << (5 - log_e)) - 1) << (log_e + 1);
1135 for (s = 0; s < e - 1; s++)
1136 for (r = 0; r < e; r++)
1138 /* s+1 consecutive bits to 1 (s < 63) */
1139 imm = (1ull << (s + 1)) - 1;
1140 /* rotate right by r */
1142 imm = (imm >> r) | ((imm << (e - r)) & mask);
1143 /* replicate the constant depending on SIMD size */
1146 case 1: imm = (imm << 2) | imm;
1147 case 2: imm = (imm << 4) | imm;
1148 case 3: imm = (imm << 8) | imm;
1149 case 4: imm = (imm << 16) | imm;
1150 case 5: imm = (imm << 32) | imm;
1154 simd_immediates[nb_imms].imm = imm;
1155 simd_immediates[nb_imms].encoding =
1156 encode_immediate_bitfield(is64, s | s_mask, r);
1160 assert (nb_imms == TOTAL_IMM_NB);
1161 qsort(simd_immediates, nb_imms,
1162 sizeof(simd_immediates[0]), simd_imm_encoding_cmp);
1165 /* Return TRUE if VALUE is a valid logical immediate, i.e. bitmask, that can
1166 be accepted by logical (immediate) instructions
1167 e.g. ORR <Xd|SP>, <Xn>, #<imm>.
1169 ESIZE is the number of bytes in the decoded immediate value.
1170 If ENCODING is not NULL, on the return of TRUE, the standard encoding for
1171 VALUE will be returned in *ENCODING. */
1174 aarch64_logical_immediate_p (uint64_t value, int esize, aarch64_insn *encoding)
1176 simd_imm_encoding imm_enc;
1177 const simd_imm_encoding *imm_encoding;
1178 static bfd_boolean initialized = FALSE;
1182 DEBUG_TRACE ("enter with 0x%" PRIx64 "(%" PRIi64 "), is32: %d", value,
1185 if (initialized == FALSE)
1187 build_immediate_table ();
1191 /* Allow all zeros or all ones in top bits, so that
1192 constant expressions like ~1 are permitted. */
1193 upper = (uint64_t) -1 << (esize * 4) << (esize * 4);
1194 if ((value & ~upper) != value && (value | upper) != value)
1197 /* Replicate to a full 64-bit value. */
1199 for (i = esize * 8; i < 64; i *= 2)
1200 value |= (value << i);
1202 imm_enc.imm = value;
1203 imm_encoding = (const simd_imm_encoding *)
1204 bsearch(&imm_enc, simd_immediates, TOTAL_IMM_NB,
1205 sizeof(simd_immediates[0]), simd_imm_encoding_cmp);
1206 if (imm_encoding == NULL)
1208 DEBUG_TRACE ("exit with FALSE");
1211 if (encoding != NULL)
1212 *encoding = imm_encoding->encoding;
1213 DEBUG_TRACE ("exit with TRUE");
1217 /* If 64-bit immediate IMM is in the format of
1218 "aaaaaaaabbbbbbbbccccccccddddddddeeeeeeeeffffffffgggggggghhhhhhhh",
1219 where a, b, c, d, e, f, g and h are independently 0 or 1, return an integer
1220 of value "abcdefgh". Otherwise return -1. */
1222 aarch64_shrink_expanded_imm8 (uint64_t imm)
1228 for (i = 0; i < 8; i++)
1230 byte = (imm >> (8 * i)) & 0xff;
1233 else if (byte != 0x00)
1239 /* Utility inline functions for operand_general_constraint_met_p. */
1242 set_error (aarch64_operand_error *mismatch_detail,
1243 enum aarch64_operand_error_kind kind, int idx,
1246 if (mismatch_detail == NULL)
1248 mismatch_detail->kind = kind;
1249 mismatch_detail->index = idx;
1250 mismatch_detail->error = error;
1254 set_syntax_error (aarch64_operand_error *mismatch_detail, int idx,
1257 if (mismatch_detail == NULL)
1259 set_error (mismatch_detail, AARCH64_OPDE_SYNTAX_ERROR, idx, error);
1263 set_out_of_range_error (aarch64_operand_error *mismatch_detail,
1264 int idx, int lower_bound, int upper_bound,
1267 if (mismatch_detail == NULL)
1269 set_error (mismatch_detail, AARCH64_OPDE_OUT_OF_RANGE, idx, error);
1270 mismatch_detail->data[0] = lower_bound;
1271 mismatch_detail->data[1] = upper_bound;
1275 set_imm_out_of_range_error (aarch64_operand_error *mismatch_detail,
1276 int idx, int lower_bound, int upper_bound)
1278 if (mismatch_detail == NULL)
1280 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1281 _("immediate value"));
1285 set_offset_out_of_range_error (aarch64_operand_error *mismatch_detail,
1286 int idx, int lower_bound, int upper_bound)
1288 if (mismatch_detail == NULL)
1290 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1291 _("immediate offset"));
1295 set_regno_out_of_range_error (aarch64_operand_error *mismatch_detail,
1296 int idx, int lower_bound, int upper_bound)
1298 if (mismatch_detail == NULL)
1300 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1301 _("register number"));
1305 set_elem_idx_out_of_range_error (aarch64_operand_error *mismatch_detail,
1306 int idx, int lower_bound, int upper_bound)
1308 if (mismatch_detail == NULL)
1310 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1311 _("register element index"));
1315 set_sft_amount_out_of_range_error (aarch64_operand_error *mismatch_detail,
1316 int idx, int lower_bound, int upper_bound)
1318 if (mismatch_detail == NULL)
1320 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1324 /* Report that the MUL modifier in operand IDX should be in the range
1325 [LOWER_BOUND, UPPER_BOUND]. */
1327 set_multiplier_out_of_range_error (aarch64_operand_error *mismatch_detail,
1328 int idx, int lower_bound, int upper_bound)
1330 if (mismatch_detail == NULL)
1332 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1337 set_unaligned_error (aarch64_operand_error *mismatch_detail, int idx,
1340 if (mismatch_detail == NULL)
1342 set_error (mismatch_detail, AARCH64_OPDE_UNALIGNED, idx, NULL);
1343 mismatch_detail->data[0] = alignment;
1347 set_reg_list_error (aarch64_operand_error *mismatch_detail, int idx,
1350 if (mismatch_detail == NULL)
1352 set_error (mismatch_detail, AARCH64_OPDE_REG_LIST, idx, NULL);
1353 mismatch_detail->data[0] = expected_num;
1357 set_other_error (aarch64_operand_error *mismatch_detail, int idx,
1360 if (mismatch_detail == NULL)
1362 set_error (mismatch_detail, AARCH64_OPDE_OTHER_ERROR, idx, error);
1365 /* General constraint checking based on operand code.
1367 Return 1 if OPNDS[IDX] meets the general constraint of operand code TYPE
1368 as the IDXth operand of opcode OPCODE. Otherwise return 0.
1370 This function has to be called after the qualifiers for all operands
1373 Mismatching error message is returned in *MISMATCH_DETAIL upon request,
1374 i.e. when MISMATCH_DETAIL is non-NULL. This avoids the generation
1375 of error message during the disassembling where error message is not
1376 wanted. We avoid the dynamic construction of strings of error messages
1377 here (i.e. in libopcodes), as it is costly and complicated; instead, we
1378 use a combination of error code, static string and some integer data to
1379 represent an error. */
1382 operand_general_constraint_met_p (const aarch64_opnd_info *opnds, int idx,
1383 enum aarch64_opnd type,
1384 const aarch64_opcode *opcode,
1385 aarch64_operand_error *mismatch_detail)
1387 unsigned num, modifiers, shift;
1389 int64_t imm, min_value, max_value;
1390 uint64_t uvalue, mask;
1391 const aarch64_opnd_info *opnd = opnds + idx;
1392 aarch64_opnd_qualifier_t qualifier = opnd->qualifier;
1394 assert (opcode->operands[idx] == opnd->type && opnd->type == type);
1396 switch (aarch64_operands[type].op_class)
1398 case AARCH64_OPND_CLASS_INT_REG:
1399 /* Check pair reg constraints for cas* instructions. */
1400 if (type == AARCH64_OPND_PAIRREG)
1402 assert (idx == 1 || idx == 3);
1403 if (opnds[idx - 1].reg.regno % 2 != 0)
1405 set_syntax_error (mismatch_detail, idx - 1,
1406 _("reg pair must start from even reg"));
1409 if (opnds[idx].reg.regno != opnds[idx - 1].reg.regno + 1)
1411 set_syntax_error (mismatch_detail, idx,
1412 _("reg pair must be contiguous"));
1418 /* <Xt> may be optional in some IC and TLBI instructions. */
1419 if (type == AARCH64_OPND_Rt_SYS)
1421 assert (idx == 1 && (aarch64_get_operand_class (opnds[0].type)
1422 == AARCH64_OPND_CLASS_SYSTEM));
1423 if (opnds[1].present
1424 && !aarch64_sys_ins_reg_has_xt (opnds[0].sysins_op))
1426 set_other_error (mismatch_detail, idx, _("extraneous register"));
1429 if (!opnds[1].present
1430 && aarch64_sys_ins_reg_has_xt (opnds[0].sysins_op))
1432 set_other_error (mismatch_detail, idx, _("missing register"));
1438 case AARCH64_OPND_QLF_WSP:
1439 case AARCH64_OPND_QLF_SP:
1440 if (!aarch64_stack_pointer_p (opnd))
1442 set_other_error (mismatch_detail, idx,
1443 _("stack pointer register expected"));
1452 case AARCH64_OPND_CLASS_SVE_REG:
1455 case AARCH64_OPND_SVE_Zn_INDEX:
1456 size = aarch64_get_qualifier_esize (opnd->qualifier);
1457 if (!value_in_range_p (opnd->reglane.index, 0, 64 / size - 1))
1459 set_elem_idx_out_of_range_error (mismatch_detail, idx,
1465 case AARCH64_OPND_SVE_ZnxN:
1466 case AARCH64_OPND_SVE_ZtxN:
1467 if (opnd->reglist.num_regs != get_opcode_dependent_value (opcode))
1469 set_other_error (mismatch_detail, idx,
1470 _("invalid register list"));
1480 case AARCH64_OPND_CLASS_PRED_REG:
1481 if (opnd->reg.regno >= 8
1482 && get_operand_fields_width (get_operand_from_code (type)) == 3)
1484 set_other_error (mismatch_detail, idx, _("p0-p7 expected"));
1489 case AARCH64_OPND_CLASS_COND:
1490 if (type == AARCH64_OPND_COND1
1491 && (opnds[idx].cond->value & 0xe) == 0xe)
1493 /* Not allow AL or NV. */
1494 set_syntax_error (mismatch_detail, idx, NULL);
1498 case AARCH64_OPND_CLASS_ADDRESS:
1499 /* Check writeback. */
1500 switch (opcode->iclass)
1504 case ldstnapair_offs:
1507 if (opnd->addr.writeback == 1)
1509 set_syntax_error (mismatch_detail, idx,
1510 _("unexpected address writeback"));
1515 case ldstpair_indexed:
1518 if (opnd->addr.writeback == 0)
1520 set_syntax_error (mismatch_detail, idx,
1521 _("address writeback expected"));
1526 assert (opnd->addr.writeback == 0);
1531 case AARCH64_OPND_ADDR_SIMM7:
1532 /* Scaled signed 7 bits immediate offset. */
1533 /* Get the size of the data element that is accessed, which may be
1534 different from that of the source register size,
1535 e.g. in strb/ldrb. */
1536 size = aarch64_get_qualifier_esize (opnd->qualifier);
1537 if (!value_in_range_p (opnd->addr.offset.imm, -64 * size, 63 * size))
1539 set_offset_out_of_range_error (mismatch_detail, idx,
1540 -64 * size, 63 * size);
1543 if (!value_aligned_p (opnd->addr.offset.imm, size))
1545 set_unaligned_error (mismatch_detail, idx, size);
1549 case AARCH64_OPND_ADDR_SIMM9:
1550 /* Unscaled signed 9 bits immediate offset. */
1551 if (!value_in_range_p (opnd->addr.offset.imm, -256, 255))
1553 set_offset_out_of_range_error (mismatch_detail, idx, -256, 255);
1558 case AARCH64_OPND_ADDR_SIMM9_2:
1559 /* Unscaled signed 9 bits immediate offset, which has to be negative
1561 size = aarch64_get_qualifier_esize (qualifier);
1562 if ((value_in_range_p (opnd->addr.offset.imm, 0, 255)
1563 && !value_aligned_p (opnd->addr.offset.imm, size))
1564 || value_in_range_p (opnd->addr.offset.imm, -256, -1))
1566 set_other_error (mismatch_detail, idx,
1567 _("negative or unaligned offset expected"));
1570 case AARCH64_OPND_SIMD_ADDR_POST:
1571 /* AdvSIMD load/store multiple structures, post-index. */
1573 if (opnd->addr.offset.is_reg)
1575 if (value_in_range_p (opnd->addr.offset.regno, 0, 30))
1579 set_other_error (mismatch_detail, idx,
1580 _("invalid register offset"));
1586 const aarch64_opnd_info *prev = &opnds[idx-1];
1587 unsigned num_bytes; /* total number of bytes transferred. */
1588 /* The opcode dependent area stores the number of elements in
1589 each structure to be loaded/stored. */
1590 int is_ld1r = get_opcode_dependent_value (opcode) == 1;
1591 if (opcode->operands[0] == AARCH64_OPND_LVt_AL)
1592 /* Special handling of loading single structure to all lane. */
1593 num_bytes = (is_ld1r ? 1 : prev->reglist.num_regs)
1594 * aarch64_get_qualifier_esize (prev->qualifier);
1596 num_bytes = prev->reglist.num_regs
1597 * aarch64_get_qualifier_esize (prev->qualifier)
1598 * aarch64_get_qualifier_nelem (prev->qualifier);
1599 if ((int) num_bytes != opnd->addr.offset.imm)
1601 set_other_error (mismatch_detail, idx,
1602 _("invalid post-increment amount"));
1608 case AARCH64_OPND_ADDR_REGOFF:
1609 /* Get the size of the data element that is accessed, which may be
1610 different from that of the source register size,
1611 e.g. in strb/ldrb. */
1612 size = aarch64_get_qualifier_esize (opnd->qualifier);
1613 /* It is either no shift or shift by the binary logarithm of SIZE. */
1614 if (opnd->shifter.amount != 0
1615 && opnd->shifter.amount != (int)get_logsz (size))
1617 set_other_error (mismatch_detail, idx,
1618 _("invalid shift amount"));
1621 /* Only UXTW, LSL, SXTW and SXTX are the accepted extending
1623 switch (opnd->shifter.kind)
1625 case AARCH64_MOD_UXTW:
1626 case AARCH64_MOD_LSL:
1627 case AARCH64_MOD_SXTW:
1628 case AARCH64_MOD_SXTX: break;
1630 set_other_error (mismatch_detail, idx,
1631 _("invalid extend/shift operator"));
1636 case AARCH64_OPND_ADDR_UIMM12:
1637 imm = opnd->addr.offset.imm;
1638 /* Get the size of the data element that is accessed, which may be
1639 different from that of the source register size,
1640 e.g. in strb/ldrb. */
1641 size = aarch64_get_qualifier_esize (qualifier);
1642 if (!value_in_range_p (opnd->addr.offset.imm, 0, 4095 * size))
1644 set_offset_out_of_range_error (mismatch_detail, idx,
1648 if (!value_aligned_p (opnd->addr.offset.imm, size))
1650 set_unaligned_error (mismatch_detail, idx, size);
1655 case AARCH64_OPND_ADDR_PCREL14:
1656 case AARCH64_OPND_ADDR_PCREL19:
1657 case AARCH64_OPND_ADDR_PCREL21:
1658 case AARCH64_OPND_ADDR_PCREL26:
1659 imm = opnd->imm.value;
1660 if (operand_need_shift_by_two (get_operand_from_code (type)))
1662 /* The offset value in a PC-relative branch instruction is alway
1663 4-byte aligned and is encoded without the lowest 2 bits. */
1664 if (!value_aligned_p (imm, 4))
1666 set_unaligned_error (mismatch_detail, idx, 4);
1669 /* Right shift by 2 so that we can carry out the following check
1673 size = get_operand_fields_width (get_operand_from_code (type));
1674 if (!value_fit_signed_field_p (imm, size))
1676 set_other_error (mismatch_detail, idx,
1677 _("immediate out of range"));
1682 case AARCH64_OPND_SVE_ADDR_RI_S4xVL:
1683 case AARCH64_OPND_SVE_ADDR_RI_S4x2xVL:
1684 case AARCH64_OPND_SVE_ADDR_RI_S4x3xVL:
1685 case AARCH64_OPND_SVE_ADDR_RI_S4x4xVL:
1689 assert (!opnd->addr.offset.is_reg);
1690 assert (opnd->addr.preind);
1691 num = 1 + get_operand_specific_data (&aarch64_operands[type]);
1694 if ((opnd->addr.offset.imm != 0 && !opnd->shifter.operator_present)
1695 || (opnd->shifter.operator_present
1696 && opnd->shifter.kind != AARCH64_MOD_MUL_VL))
1698 set_other_error (mismatch_detail, idx,
1699 _("invalid addressing mode"));
1702 if (!value_in_range_p (opnd->addr.offset.imm, min_value, max_value))
1704 set_offset_out_of_range_error (mismatch_detail, idx,
1705 min_value, max_value);
1708 if (!value_aligned_p (opnd->addr.offset.imm, num))
1710 set_unaligned_error (mismatch_detail, idx, num);
1715 case AARCH64_OPND_SVE_ADDR_RI_S6xVL:
1718 goto sve_imm_offset_vl;
1720 case AARCH64_OPND_SVE_ADDR_RI_S9xVL:
1723 goto sve_imm_offset_vl;
1725 case AARCH64_OPND_SVE_ADDR_RI_U6:
1726 case AARCH64_OPND_SVE_ADDR_RI_U6x2:
1727 case AARCH64_OPND_SVE_ADDR_RI_U6x4:
1728 case AARCH64_OPND_SVE_ADDR_RI_U6x8:
1732 assert (!opnd->addr.offset.is_reg);
1733 assert (opnd->addr.preind);
1734 num = 1 << get_operand_specific_data (&aarch64_operands[type]);
1737 if (opnd->shifter.operator_present
1738 || opnd->shifter.amount_present)
1740 set_other_error (mismatch_detail, idx,
1741 _("invalid addressing mode"));
1744 if (!value_in_range_p (opnd->addr.offset.imm, min_value, max_value))
1746 set_offset_out_of_range_error (mismatch_detail, idx,
1747 min_value, max_value);
1750 if (!value_aligned_p (opnd->addr.offset.imm, num))
1752 set_unaligned_error (mismatch_detail, idx, num);
1757 case AARCH64_OPND_SVE_ADDR_RR:
1758 case AARCH64_OPND_SVE_ADDR_RR_LSL1:
1759 case AARCH64_OPND_SVE_ADDR_RR_LSL2:
1760 case AARCH64_OPND_SVE_ADDR_RR_LSL3:
1761 case AARCH64_OPND_SVE_ADDR_RX:
1762 case AARCH64_OPND_SVE_ADDR_RX_LSL1:
1763 case AARCH64_OPND_SVE_ADDR_RX_LSL2:
1764 case AARCH64_OPND_SVE_ADDR_RX_LSL3:
1765 case AARCH64_OPND_SVE_ADDR_RZ:
1766 case AARCH64_OPND_SVE_ADDR_RZ_LSL1:
1767 case AARCH64_OPND_SVE_ADDR_RZ_LSL2:
1768 case AARCH64_OPND_SVE_ADDR_RZ_LSL3:
1769 modifiers = 1 << AARCH64_MOD_LSL;
1771 assert (opnd->addr.offset.is_reg);
1772 assert (opnd->addr.preind);
1773 if ((aarch64_operands[type].flags & OPD_F_NO_ZR) != 0
1774 && opnd->addr.offset.regno == 31)
1776 set_other_error (mismatch_detail, idx,
1777 _("index register xzr is not allowed"));
1780 if (((1 << opnd->shifter.kind) & modifiers) == 0
1781 || (opnd->shifter.amount
1782 != get_operand_specific_data (&aarch64_operands[type])))
1784 set_other_error (mismatch_detail, idx,
1785 _("invalid addressing mode"));
1790 case AARCH64_OPND_SVE_ADDR_RZ_XTW_14:
1791 case AARCH64_OPND_SVE_ADDR_RZ_XTW_22:
1792 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_14:
1793 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_22:
1794 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_14:
1795 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_22:
1796 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_14:
1797 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_22:
1798 modifiers = (1 << AARCH64_MOD_SXTW) | (1 << AARCH64_MOD_UXTW);
1799 goto sve_rr_operand;
1801 case AARCH64_OPND_SVE_ADDR_ZI_U5:
1802 case AARCH64_OPND_SVE_ADDR_ZI_U5x2:
1803 case AARCH64_OPND_SVE_ADDR_ZI_U5x4:
1804 case AARCH64_OPND_SVE_ADDR_ZI_U5x8:
1807 goto sve_imm_offset;
1809 case AARCH64_OPND_SVE_ADDR_ZZ_LSL:
1810 modifiers = 1 << AARCH64_MOD_LSL;
1812 assert (opnd->addr.offset.is_reg);
1813 assert (opnd->addr.preind);
1814 if (((1 << opnd->shifter.kind) & modifiers) == 0
1815 || opnd->shifter.amount < 0
1816 || opnd->shifter.amount > 3)
1818 set_other_error (mismatch_detail, idx,
1819 _("invalid addressing mode"));
1824 case AARCH64_OPND_SVE_ADDR_ZZ_SXTW:
1825 modifiers = (1 << AARCH64_MOD_SXTW);
1826 goto sve_zz_operand;
1828 case AARCH64_OPND_SVE_ADDR_ZZ_UXTW:
1829 modifiers = 1 << AARCH64_MOD_UXTW;
1830 goto sve_zz_operand;
1837 case AARCH64_OPND_CLASS_SIMD_REGLIST:
1838 if (type == AARCH64_OPND_LEt)
1840 /* Get the upper bound for the element index. */
1841 num = 16 / aarch64_get_qualifier_esize (qualifier) - 1;
1842 if (!value_in_range_p (opnd->reglist.index, 0, num))
1844 set_elem_idx_out_of_range_error (mismatch_detail, idx, 0, num);
1848 /* The opcode dependent area stores the number of elements in
1849 each structure to be loaded/stored. */
1850 num = get_opcode_dependent_value (opcode);
1853 case AARCH64_OPND_LVt:
1854 assert (num >= 1 && num <= 4);
1855 /* Unless LD1/ST1, the number of registers should be equal to that
1856 of the structure elements. */
1857 if (num != 1 && opnd->reglist.num_regs != num)
1859 set_reg_list_error (mismatch_detail, idx, num);
1863 case AARCH64_OPND_LVt_AL:
1864 case AARCH64_OPND_LEt:
1865 assert (num >= 1 && num <= 4);
1866 /* The number of registers should be equal to that of the structure
1868 if (opnd->reglist.num_regs != num)
1870 set_reg_list_error (mismatch_detail, idx, num);
1879 case AARCH64_OPND_CLASS_IMMEDIATE:
1880 /* Constraint check on immediate operand. */
1881 imm = opnd->imm.value;
1882 /* E.g. imm_0_31 constrains value to be 0..31. */
1883 if (qualifier_value_in_range_constraint_p (qualifier)
1884 && !value_in_range_p (imm, get_lower_bound (qualifier),
1885 get_upper_bound (qualifier)))
1887 set_imm_out_of_range_error (mismatch_detail, idx,
1888 get_lower_bound (qualifier),
1889 get_upper_bound (qualifier));
1895 case AARCH64_OPND_AIMM:
1896 if (opnd->shifter.kind != AARCH64_MOD_LSL)
1898 set_other_error (mismatch_detail, idx,
1899 _("invalid shift operator"));
1902 if (opnd->shifter.amount != 0 && opnd->shifter.amount != 12)
1904 set_other_error (mismatch_detail, idx,
1905 _("shift amount expected to be 0 or 12"));
1908 if (!value_fit_unsigned_field_p (opnd->imm.value, 12))
1910 set_other_error (mismatch_detail, idx,
1911 _("immediate out of range"));
1916 case AARCH64_OPND_HALF:
1917 assert (idx == 1 && opnds[0].type == AARCH64_OPND_Rd);
1918 if (opnd->shifter.kind != AARCH64_MOD_LSL)
1920 set_other_error (mismatch_detail, idx,
1921 _("invalid shift operator"));
1924 size = aarch64_get_qualifier_esize (opnds[0].qualifier);
1925 if (!value_aligned_p (opnd->shifter.amount, 16))
1927 set_other_error (mismatch_detail, idx,
1928 _("shift amount should be a multiple of 16"));
1931 if (!value_in_range_p (opnd->shifter.amount, 0, size * 8 - 16))
1933 set_sft_amount_out_of_range_error (mismatch_detail, idx,
1937 if (opnd->imm.value < 0)
1939 set_other_error (mismatch_detail, idx,
1940 _("negative immediate value not allowed"));
1943 if (!value_fit_unsigned_field_p (opnd->imm.value, 16))
1945 set_other_error (mismatch_detail, idx,
1946 _("immediate out of range"));
1951 case AARCH64_OPND_IMM_MOV:
1953 int esize = aarch64_get_qualifier_esize (opnds[0].qualifier);
1954 imm = opnd->imm.value;
1958 case OP_MOV_IMM_WIDEN:
1960 /* Fall through... */
1961 case OP_MOV_IMM_WIDE:
1962 if (!aarch64_wide_constant_p (imm, esize == 4, NULL))
1964 set_other_error (mismatch_detail, idx,
1965 _("immediate out of range"));
1969 case OP_MOV_IMM_LOG:
1970 if (!aarch64_logical_immediate_p (imm, esize, NULL))
1972 set_other_error (mismatch_detail, idx,
1973 _("immediate out of range"));
1984 case AARCH64_OPND_NZCV:
1985 case AARCH64_OPND_CCMP_IMM:
1986 case AARCH64_OPND_EXCEPTION:
1987 case AARCH64_OPND_UIMM4:
1988 case AARCH64_OPND_UIMM7:
1989 case AARCH64_OPND_UIMM3_OP1:
1990 case AARCH64_OPND_UIMM3_OP2:
1991 case AARCH64_OPND_SVE_UIMM3:
1992 case AARCH64_OPND_SVE_UIMM7:
1993 case AARCH64_OPND_SVE_UIMM8:
1994 case AARCH64_OPND_SVE_UIMM8_53:
1995 size = get_operand_fields_width (get_operand_from_code (type));
1997 if (!value_fit_unsigned_field_p (opnd->imm.value, size))
1999 set_imm_out_of_range_error (mismatch_detail, idx, 0,
2005 case AARCH64_OPND_SIMM5:
2006 case AARCH64_OPND_SVE_SIMM5:
2007 case AARCH64_OPND_SVE_SIMM5B:
2008 case AARCH64_OPND_SVE_SIMM6:
2009 case AARCH64_OPND_SVE_SIMM8:
2010 size = get_operand_fields_width (get_operand_from_code (type));
2012 if (!value_fit_signed_field_p (opnd->imm.value, size))
2014 set_imm_out_of_range_error (mismatch_detail, idx,
2016 (1 << (size - 1)) - 1);
2021 case AARCH64_OPND_WIDTH:
2022 assert (idx > 1 && opnds[idx-1].type == AARCH64_OPND_IMM
2023 && opnds[0].type == AARCH64_OPND_Rd);
2024 size = get_upper_bound (qualifier);
2025 if (opnd->imm.value + opnds[idx-1].imm.value > size)
2026 /* lsb+width <= reg.size */
2028 set_imm_out_of_range_error (mismatch_detail, idx, 1,
2029 size - opnds[idx-1].imm.value);
2034 case AARCH64_OPND_LIMM:
2035 case AARCH64_OPND_SVE_LIMM:
2037 int esize = aarch64_get_qualifier_esize (opnds[0].qualifier);
2038 uint64_t uimm = opnd->imm.value;
2039 if (opcode->op == OP_BIC)
2041 if (aarch64_logical_immediate_p (uimm, esize, NULL) == FALSE)
2043 set_other_error (mismatch_detail, idx,
2044 _("immediate out of range"));
2050 case AARCH64_OPND_IMM0:
2051 case AARCH64_OPND_FPIMM0:
2052 if (opnd->imm.value != 0)
2054 set_other_error (mismatch_detail, idx,
2055 _("immediate zero expected"));
2060 case AARCH64_OPND_SHLL_IMM:
2062 size = 8 * aarch64_get_qualifier_esize (opnds[idx - 1].qualifier);
2063 if (opnd->imm.value != size)
2065 set_other_error (mismatch_detail, idx,
2066 _("invalid shift amount"));
2071 case AARCH64_OPND_IMM_VLSL:
2072 size = aarch64_get_qualifier_esize (qualifier);
2073 if (!value_in_range_p (opnd->imm.value, 0, size * 8 - 1))
2075 set_imm_out_of_range_error (mismatch_detail, idx, 0,
2081 case AARCH64_OPND_IMM_VLSR:
2082 size = aarch64_get_qualifier_esize (qualifier);
2083 if (!value_in_range_p (opnd->imm.value, 1, size * 8))
2085 set_imm_out_of_range_error (mismatch_detail, idx, 1, size * 8);
2090 case AARCH64_OPND_SIMD_IMM:
2091 case AARCH64_OPND_SIMD_IMM_SFT:
2092 /* Qualifier check. */
2095 case AARCH64_OPND_QLF_LSL:
2096 if (opnd->shifter.kind != AARCH64_MOD_LSL)
2098 set_other_error (mismatch_detail, idx,
2099 _("invalid shift operator"));
2103 case AARCH64_OPND_QLF_MSL:
2104 if (opnd->shifter.kind != AARCH64_MOD_MSL)
2106 set_other_error (mismatch_detail, idx,
2107 _("invalid shift operator"));
2111 case AARCH64_OPND_QLF_NIL:
2112 if (opnd->shifter.kind != AARCH64_MOD_NONE)
2114 set_other_error (mismatch_detail, idx,
2115 _("shift is not permitted"));
2123 /* Is the immediate valid? */
2125 if (aarch64_get_qualifier_esize (opnds[0].qualifier) != 8)
2127 /* uimm8 or simm8 */
2128 if (!value_in_range_p (opnd->imm.value, -128, 255))
2130 set_imm_out_of_range_error (mismatch_detail, idx, -128, 255);
2134 else if (aarch64_shrink_expanded_imm8 (opnd->imm.value) < 0)
2137 'aaaaaaaabbbbbbbbccccccccddddddddeeeeeeee
2138 ffffffffgggggggghhhhhhhh'. */
2139 set_other_error (mismatch_detail, idx,
2140 _("invalid value for immediate"));
2143 /* Is the shift amount valid? */
2144 switch (opnd->shifter.kind)
2146 case AARCH64_MOD_LSL:
2147 size = aarch64_get_qualifier_esize (opnds[0].qualifier);
2148 if (!value_in_range_p (opnd->shifter.amount, 0, (size - 1) * 8))
2150 set_sft_amount_out_of_range_error (mismatch_detail, idx, 0,
2154 if (!value_aligned_p (opnd->shifter.amount, 8))
2156 set_unaligned_error (mismatch_detail, idx, 8);
2160 case AARCH64_MOD_MSL:
2161 /* Only 8 and 16 are valid shift amount. */
2162 if (opnd->shifter.amount != 8 && opnd->shifter.amount != 16)
2164 set_other_error (mismatch_detail, idx,
2165 _("shift amount expected to be 0 or 16"));
2170 if (opnd->shifter.kind != AARCH64_MOD_NONE)
2172 set_other_error (mismatch_detail, idx,
2173 _("invalid shift operator"));
2180 case AARCH64_OPND_FPIMM:
2181 case AARCH64_OPND_SIMD_FPIMM:
2182 case AARCH64_OPND_SVE_FPIMM8:
2183 if (opnd->imm.is_fp == 0)
2185 set_other_error (mismatch_detail, idx,
2186 _("floating-point immediate expected"));
2189 /* The value is expected to be an 8-bit floating-point constant with
2190 sign, 3-bit exponent and normalized 4 bits of precision, encoded
2191 in "a:b:c:d:e:f:g:h" or FLD_imm8 (depending on the type of the
2193 if (!value_in_range_p (opnd->imm.value, 0, 255))
2195 set_other_error (mismatch_detail, idx,
2196 _("immediate out of range"));
2199 if (opnd->shifter.kind != AARCH64_MOD_NONE)
2201 set_other_error (mismatch_detail, idx,
2202 _("invalid shift operator"));
2207 case AARCH64_OPND_SVE_AIMM:
2210 assert (opnd->shifter.kind == AARCH64_MOD_LSL);
2211 size = aarch64_get_qualifier_esize (opnds[0].qualifier);
2212 mask = ~((uint64_t) -1 << (size * 4) << (size * 4));
2213 uvalue = opnd->imm.value;
2214 shift = opnd->shifter.amount;
2219 set_other_error (mismatch_detail, idx,
2220 _("no shift amount allowed for"
2221 " 8-bit constants"));
2227 if (shift != 0 && shift != 8)
2229 set_other_error (mismatch_detail, idx,
2230 _("shift amount must be 0 or 8"));
2233 if (shift == 0 && (uvalue & 0xff) == 0)
2236 uvalue = (int64_t) uvalue / 256;
2240 if ((uvalue & mask) != uvalue && (uvalue | ~mask) != uvalue)
2242 set_other_error (mismatch_detail, idx,
2243 _("immediate too big for element size"));
2246 uvalue = (uvalue - min_value) & mask;
2249 set_other_error (mismatch_detail, idx,
2250 _("invalid arithmetic immediate"));
2255 case AARCH64_OPND_SVE_ASIMM:
2259 case AARCH64_OPND_SVE_I1_HALF_ONE:
2260 assert (opnd->imm.is_fp);
2261 if (opnd->imm.value != 0x3f000000 && opnd->imm.value != 0x3f800000)
2263 set_other_error (mismatch_detail, idx,
2264 _("floating-point value must be 0.5 or 1.0"));
2269 case AARCH64_OPND_SVE_I1_HALF_TWO:
2270 assert (opnd->imm.is_fp);
2271 if (opnd->imm.value != 0x3f000000 && opnd->imm.value != 0x40000000)
2273 set_other_error (mismatch_detail, idx,
2274 _("floating-point value must be 0.5 or 2.0"));
2279 case AARCH64_OPND_SVE_I1_ZERO_ONE:
2280 assert (opnd->imm.is_fp);
2281 if (opnd->imm.value != 0 && opnd->imm.value != 0x3f800000)
2283 set_other_error (mismatch_detail, idx,
2284 _("floating-point value must be 0.0 or 1.0"));
2289 case AARCH64_OPND_SVE_INV_LIMM:
2291 int esize = aarch64_get_qualifier_esize (opnds[0].qualifier);
2292 uint64_t uimm = ~opnd->imm.value;
2293 if (!aarch64_logical_immediate_p (uimm, esize, NULL))
2295 set_other_error (mismatch_detail, idx,
2296 _("immediate out of range"));
2302 case AARCH64_OPND_SVE_LIMM_MOV:
2304 int esize = aarch64_get_qualifier_esize (opnds[0].qualifier);
2305 uint64_t uimm = opnd->imm.value;
2306 if (!aarch64_logical_immediate_p (uimm, esize, NULL))
2308 set_other_error (mismatch_detail, idx,
2309 _("immediate out of range"));
2312 if (!aarch64_sve_dupm_mov_immediate_p (uimm, esize))
2314 set_other_error (mismatch_detail, idx,
2315 _("invalid replicated MOV immediate"));
2321 case AARCH64_OPND_SVE_PATTERN_SCALED:
2322 assert (opnd->shifter.kind == AARCH64_MOD_MUL);
2323 if (!value_in_range_p (opnd->shifter.amount, 1, 16))
2325 set_multiplier_out_of_range_error (mismatch_detail, idx, 1, 16);
2330 case AARCH64_OPND_SVE_SHLIMM_PRED:
2331 case AARCH64_OPND_SVE_SHLIMM_UNPRED:
2332 size = aarch64_get_qualifier_esize (opnds[idx - 1].qualifier);
2333 if (!value_in_range_p (opnd->imm.value, 0, 8 * size - 1))
2335 set_imm_out_of_range_error (mismatch_detail, idx,
2341 case AARCH64_OPND_SVE_SHRIMM_PRED:
2342 case AARCH64_OPND_SVE_SHRIMM_UNPRED:
2343 size = aarch64_get_qualifier_esize (opnds[idx - 1].qualifier);
2344 if (!value_in_range_p (opnd->imm.value, 1, 8 * size))
2346 set_imm_out_of_range_error (mismatch_detail, idx, 1, 8 * size);
2356 case AARCH64_OPND_CLASS_CP_REG:
2357 /* Cn or Cm: 4-bit opcode field named for historical reasons.
2358 valid range: C0 - C15. */
2359 if (opnd->reg.regno > 15)
2361 set_regno_out_of_range_error (mismatch_detail, idx, 0, 15);
2366 case AARCH64_OPND_CLASS_SYSTEM:
2369 case AARCH64_OPND_PSTATEFIELD:
2370 assert (idx == 0 && opnds[1].type == AARCH64_OPND_UIMM4);
2373 The immediate must be #0 or #1. */
2374 if ((opnd->pstatefield == 0x03 /* UAO. */
2375 || opnd->pstatefield == 0x04) /* PAN. */
2376 && opnds[1].imm.value > 1)
2378 set_imm_out_of_range_error (mismatch_detail, idx, 0, 1);
2381 /* MSR SPSel, #uimm4
2382 Uses uimm4 as a control value to select the stack pointer: if
2383 bit 0 is set it selects the current exception level's stack
2384 pointer, if bit 0 is clear it selects shared EL0 stack pointer.
2385 Bits 1 to 3 of uimm4 are reserved and should be zero. */
2386 if (opnd->pstatefield == 0x05 /* spsel */ && opnds[1].imm.value > 1)
2388 set_imm_out_of_range_error (mismatch_detail, idx, 0, 1);
2397 case AARCH64_OPND_CLASS_SIMD_ELEMENT:
2398 /* Get the upper bound for the element index. */
2399 num = 16 / aarch64_get_qualifier_esize (qualifier) - 1;
2400 /* Index out-of-range. */
2401 if (!value_in_range_p (opnd->reglane.index, 0, num))
2403 set_elem_idx_out_of_range_error (mismatch_detail, idx, 0, num);
2406 /* SMLAL<Q> <Vd>.<Ta>, <Vn>.<Tb>, <Vm>.<Ts>[<index>].
2407 <Vm> Is the vector register (V0-V31) or (V0-V15), whose
2408 number is encoded in "size:M:Rm":
2414 if (type == AARCH64_OPND_Em && qualifier == AARCH64_OPND_QLF_S_H
2415 && !value_in_range_p (opnd->reglane.regno, 0, 15))
2417 set_regno_out_of_range_error (mismatch_detail, idx, 0, 15);
2422 case AARCH64_OPND_CLASS_MODIFIED_REG:
2423 assert (idx == 1 || idx == 2);
2426 case AARCH64_OPND_Rm_EXT:
2427 if (aarch64_extend_operator_p (opnd->shifter.kind) == FALSE
2428 && opnd->shifter.kind != AARCH64_MOD_LSL)
2430 set_other_error (mismatch_detail, idx,
2431 _("extend operator expected"));
2434 /* It is not optional unless at least one of "Rd" or "Rn" is '11111'
2435 (i.e. SP), in which case it defaults to LSL. The LSL alias is
2436 only valid when "Rd" or "Rn" is '11111', and is preferred in that
2438 if (!aarch64_stack_pointer_p (opnds + 0)
2439 && (idx != 2 || !aarch64_stack_pointer_p (opnds + 1)))
2441 if (!opnd->shifter.operator_present)
2443 set_other_error (mismatch_detail, idx,
2444 _("missing extend operator"));
2447 else if (opnd->shifter.kind == AARCH64_MOD_LSL)
2449 set_other_error (mismatch_detail, idx,
2450 _("'LSL' operator not allowed"));
2454 assert (opnd->shifter.operator_present /* Default to LSL. */
2455 || opnd->shifter.kind == AARCH64_MOD_LSL);
2456 if (!value_in_range_p (opnd->shifter.amount, 0, 4))
2458 set_sft_amount_out_of_range_error (mismatch_detail, idx, 0, 4);
2461 /* In the 64-bit form, the final register operand is written as Wm
2462 for all but the (possibly omitted) UXTX/LSL and SXTX
2464 N.B. GAS allows X register to be used with any operator as a
2465 programming convenience. */
2466 if (qualifier == AARCH64_OPND_QLF_X
2467 && opnd->shifter.kind != AARCH64_MOD_LSL
2468 && opnd->shifter.kind != AARCH64_MOD_UXTX
2469 && opnd->shifter.kind != AARCH64_MOD_SXTX)
2471 set_other_error (mismatch_detail, idx, _("W register expected"));
2476 case AARCH64_OPND_Rm_SFT:
2477 /* ROR is not available to the shifted register operand in
2478 arithmetic instructions. */
2479 if (aarch64_shift_operator_p (opnd->shifter.kind) == FALSE)
2481 set_other_error (mismatch_detail, idx,
2482 _("shift operator expected"));
2485 if (opnd->shifter.kind == AARCH64_MOD_ROR
2486 && opcode->iclass != log_shift)
2488 set_other_error (mismatch_detail, idx,
2489 _("'ROR' operator not allowed"));
2492 num = qualifier == AARCH64_OPND_QLF_W ? 31 : 63;
2493 if (!value_in_range_p (opnd->shifter.amount, 0, num))
2495 set_sft_amount_out_of_range_error (mismatch_detail, idx, 0, num);
2512 /* Main entrypoint for the operand constraint checking.
2514 Return 1 if operands of *INST meet the constraint applied by the operand
2515 codes and operand qualifiers; otherwise return 0 and if MISMATCH_DETAIL is
2516 not NULL, return the detail of the error in *MISMATCH_DETAIL. N.B. when
2517 adding more constraint checking, make sure MISMATCH_DETAIL->KIND is set
2518 with a proper error kind rather than AARCH64_OPDE_NIL (GAS asserts non-NIL
2519 error kind when it is notified that an instruction does not pass the check).
2521 Un-determined operand qualifiers may get established during the process. */
2524 aarch64_match_operands_constraint (aarch64_inst *inst,
2525 aarch64_operand_error *mismatch_detail)
2529 DEBUG_TRACE ("enter");
2531 /* Check for cases where a source register needs to be the same as the
2532 destination register. Do this before matching qualifiers since if
2533 an instruction has both invalid tying and invalid qualifiers,
2534 the error about qualifiers would suggest several alternative
2535 instructions that also have invalid tying. */
2536 i = inst->opcode->tied_operand;
2537 if (i > 0 && (inst->operands[0].reg.regno != inst->operands[i].reg.regno))
2539 if (mismatch_detail)
2541 mismatch_detail->kind = AARCH64_OPDE_UNTIED_OPERAND;
2542 mismatch_detail->index = i;
2543 mismatch_detail->error = NULL;
2548 /* Match operands' qualifier.
2549 *INST has already had qualifier establish for some, if not all, of
2550 its operands; we need to find out whether these established
2551 qualifiers match one of the qualifier sequence in
2552 INST->OPCODE->QUALIFIERS_LIST. If yes, we will assign each operand
2553 with the corresponding qualifier in such a sequence.
2554 Only basic operand constraint checking is done here; the more thorough
2555 constraint checking will carried out by operand_general_constraint_met_p,
2556 which has be to called after this in order to get all of the operands'
2557 qualifiers established. */
2558 if (match_operands_qualifier (inst, TRUE /* update_p */) == 0)
2560 DEBUG_TRACE ("FAIL on operand qualifier matching");
2561 if (mismatch_detail)
2563 /* Return an error type to indicate that it is the qualifier
2564 matching failure; we don't care about which operand as there
2565 are enough information in the opcode table to reproduce it. */
2566 mismatch_detail->kind = AARCH64_OPDE_INVALID_VARIANT;
2567 mismatch_detail->index = -1;
2568 mismatch_detail->error = NULL;
2573 /* Match operands' constraint. */
2574 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2576 enum aarch64_opnd type = inst->opcode->operands[i];
2577 if (type == AARCH64_OPND_NIL)
2579 if (inst->operands[i].skip)
2581 DEBUG_TRACE ("skip the incomplete operand %d", i);
2584 if (operand_general_constraint_met_p (inst->operands, i, type,
2585 inst->opcode, mismatch_detail) == 0)
2587 DEBUG_TRACE ("FAIL on operand %d", i);
2592 DEBUG_TRACE ("PASS");
2597 /* Replace INST->OPCODE with OPCODE and return the replaced OPCODE.
2598 Also updates the TYPE of each INST->OPERANDS with the corresponding
2599 value of OPCODE->OPERANDS.
2601 Note that some operand qualifiers may need to be manually cleared by
2602 the caller before it further calls the aarch64_opcode_encode; by
2603 doing this, it helps the qualifier matching facilities work
2606 const aarch64_opcode*
2607 aarch64_replace_opcode (aarch64_inst *inst, const aarch64_opcode *opcode)
2610 const aarch64_opcode *old = inst->opcode;
2612 inst->opcode = opcode;
2614 /* Update the operand types. */
2615 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2617 inst->operands[i].type = opcode->operands[i];
2618 if (opcode->operands[i] == AARCH64_OPND_NIL)
2622 DEBUG_TRACE ("replace %s with %s", old->name, opcode->name);
2628 aarch64_operand_index (const enum aarch64_opnd *operands, enum aarch64_opnd operand)
2631 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2632 if (operands[i] == operand)
2634 else if (operands[i] == AARCH64_OPND_NIL)
2639 /* R0...R30, followed by FOR31. */
2640 #define BANK(R, FOR31) \
2641 { R (0), R (1), R (2), R (3), R (4), R (5), R (6), R (7), \
2642 R (8), R (9), R (10), R (11), R (12), R (13), R (14), R (15), \
2643 R (16), R (17), R (18), R (19), R (20), R (21), R (22), R (23), \
2644 R (24), R (25), R (26), R (27), R (28), R (29), R (30), FOR31 }
2645 /* [0][0] 32-bit integer regs with sp Wn
2646 [0][1] 64-bit integer regs with sp Xn sf=1
2647 [1][0] 32-bit integer regs with #0 Wn
2648 [1][1] 64-bit integer regs with #0 Xn sf=1 */
2649 static const char *int_reg[2][2][32] = {
2650 #define R32(X) "w" #X
2651 #define R64(X) "x" #X
2652 { BANK (R32, "wsp"), BANK (R64, "sp") },
2653 { BANK (R32, "wzr"), BANK (R64, "xzr") }
2658 /* Names of the SVE vector registers, first with .S suffixes,
2659 then with .D suffixes. */
2661 static const char *sve_reg[2][32] = {
2662 #define ZS(X) "z" #X ".s"
2663 #define ZD(X) "z" #X ".d"
2664 BANK (ZS, ZS (31)), BANK (ZD, ZD (31))
2670 /* Return the integer register name.
2671 if SP_REG_P is not 0, R31 is an SP reg, other R31 is the zero reg. */
2673 static inline const char *
2674 get_int_reg_name (int regno, aarch64_opnd_qualifier_t qualifier, int sp_reg_p)
2676 const int has_zr = sp_reg_p ? 0 : 1;
2677 const int is_64 = aarch64_get_qualifier_esize (qualifier) == 4 ? 0 : 1;
2678 return int_reg[has_zr][is_64][regno];
2681 /* Like get_int_reg_name, but IS_64 is always 1. */
2683 static inline const char *
2684 get_64bit_int_reg_name (int regno, int sp_reg_p)
2686 const int has_zr = sp_reg_p ? 0 : 1;
2687 return int_reg[has_zr][1][regno];
2690 /* Get the name of the integer offset register in OPND, using the shift type
2691 to decide whether it's a word or doubleword. */
2693 static inline const char *
2694 get_offset_int_reg_name (const aarch64_opnd_info *opnd)
2696 switch (opnd->shifter.kind)
2698 case AARCH64_MOD_UXTW:
2699 case AARCH64_MOD_SXTW:
2700 return get_int_reg_name (opnd->addr.offset.regno, AARCH64_OPND_QLF_W, 0);
2702 case AARCH64_MOD_LSL:
2703 case AARCH64_MOD_SXTX:
2704 return get_int_reg_name (opnd->addr.offset.regno, AARCH64_OPND_QLF_X, 0);
2711 /* Get the name of the SVE vector offset register in OPND, using the operand
2712 qualifier to decide whether the suffix should be .S or .D. */
2714 static inline const char *
2715 get_addr_sve_reg_name (int regno, aarch64_opnd_qualifier_t qualifier)
2717 assert (qualifier == AARCH64_OPND_QLF_S_S
2718 || qualifier == AARCH64_OPND_QLF_S_D);
2719 return sve_reg[qualifier == AARCH64_OPND_QLF_S_D][regno];
2722 /* Types for expanding an encoded 8-bit value to a floating-point value. */
2742 /* IMM8 is an 8-bit floating-point constant with sign, 3-bit exponent and
2743 normalized 4 bits of precision, encoded in "a:b:c:d:e:f:g:h" or FLD_imm8
2744 (depending on the type of the instruction). IMM8 will be expanded to a
2745 single-precision floating-point value (SIZE == 4) or a double-precision
2746 floating-point value (SIZE == 8). A half-precision floating-point value
2747 (SIZE == 2) is expanded to a single-precision floating-point value. The
2748 expanded value is returned. */
2751 expand_fp_imm (int size, uint32_t imm8)
2754 uint32_t imm8_7, imm8_6_0, imm8_6, imm8_6_repl4;
2756 imm8_7 = (imm8 >> 7) & 0x01; /* imm8<7> */
2757 imm8_6_0 = imm8 & 0x7f; /* imm8<6:0> */
2758 imm8_6 = imm8_6_0 >> 6; /* imm8<6> */
2759 imm8_6_repl4 = (imm8_6 << 3) | (imm8_6 << 2)
2760 | (imm8_6 << 1) | imm8_6; /* Replicate(imm8<6>,4) */
2763 imm = (imm8_7 << (63-32)) /* imm8<7> */
2764 | ((imm8_6 ^ 1) << (62-32)) /* NOT(imm8<6) */
2765 | (imm8_6_repl4 << (58-32)) | (imm8_6 << (57-32))
2766 | (imm8_6 << (56-32)) | (imm8_6 << (55-32)) /* Replicate(imm8<6>,7) */
2767 | (imm8_6_0 << (48-32)); /* imm8<6>:imm8<5:0> */
2770 else if (size == 4 || size == 2)
2772 imm = (imm8_7 << 31) /* imm8<7> */
2773 | ((imm8_6 ^ 1) << 30) /* NOT(imm8<6>) */
2774 | (imm8_6_repl4 << 26) /* Replicate(imm8<6>,4) */
2775 | (imm8_6_0 << 19); /* imm8<6>:imm8<5:0> */
2779 /* An unsupported size. */
2786 /* Produce the string representation of the register list operand *OPND
2787 in the buffer pointed by BUF of size SIZE. PREFIX is the part of
2788 the register name that comes before the register number, such as "v". */
2790 print_register_list (char *buf, size_t size, const aarch64_opnd_info *opnd,
2793 const int num_regs = opnd->reglist.num_regs;
2794 const int first_reg = opnd->reglist.first_regno;
2795 const int last_reg = (first_reg + num_regs - 1) & 0x1f;
2796 const char *qlf_name = aarch64_get_qualifier_name (opnd->qualifier);
2797 char tb[8]; /* Temporary buffer. */
2799 assert (opnd->type != AARCH64_OPND_LEt || opnd->reglist.has_index);
2800 assert (num_regs >= 1 && num_regs <= 4);
2802 /* Prepare the index if any. */
2803 if (opnd->reglist.has_index)
2804 snprintf (tb, 8, "[%" PRIi64 "]", opnd->reglist.index);
2808 /* The hyphenated form is preferred for disassembly if there are
2809 more than two registers in the list, and the register numbers
2810 are monotonically increasing in increments of one. */
2811 if (num_regs > 2 && last_reg > first_reg)
2812 snprintf (buf, size, "{%s%d.%s-%s%d.%s}%s", prefix, first_reg, qlf_name,
2813 prefix, last_reg, qlf_name, tb);
2816 const int reg0 = first_reg;
2817 const int reg1 = (first_reg + 1) & 0x1f;
2818 const int reg2 = (first_reg + 2) & 0x1f;
2819 const int reg3 = (first_reg + 3) & 0x1f;
2824 snprintf (buf, size, "{%s%d.%s}%s", prefix, reg0, qlf_name, tb);
2827 snprintf (buf, size, "{%s%d.%s, %s%d.%s}%s", prefix, reg0, qlf_name,
2828 prefix, reg1, qlf_name, tb);
2831 snprintf (buf, size, "{%s%d.%s, %s%d.%s, %s%d.%s}%s",
2832 prefix, reg0, qlf_name, prefix, reg1, qlf_name,
2833 prefix, reg2, qlf_name, tb);
2836 snprintf (buf, size, "{%s%d.%s, %s%d.%s, %s%d.%s, %s%d.%s}%s",
2837 prefix, reg0, qlf_name, prefix, reg1, qlf_name,
2838 prefix, reg2, qlf_name, prefix, reg3, qlf_name, tb);
2844 /* Print the register+immediate address in OPND to BUF, which has SIZE
2845 characters. BASE is the name of the base register. */
2848 print_immediate_offset_address (char *buf, size_t size,
2849 const aarch64_opnd_info *opnd,
2852 if (opnd->addr.writeback)
2854 if (opnd->addr.preind)
2855 snprintf (buf, size, "[%s,#%d]!", base, opnd->addr.offset.imm);
2857 snprintf (buf, size, "[%s],#%d", base, opnd->addr.offset.imm);
2861 if (opnd->shifter.operator_present)
2863 assert (opnd->shifter.kind == AARCH64_MOD_MUL_VL);
2864 snprintf (buf, size, "[%s,#%d,mul vl]",
2865 base, opnd->addr.offset.imm);
2867 else if (opnd->addr.offset.imm)
2868 snprintf (buf, size, "[%s,#%d]", base, opnd->addr.offset.imm);
2870 snprintf (buf, size, "[%s]", base);
2874 /* Produce the string representation of the register offset address operand
2875 *OPND in the buffer pointed by BUF of size SIZE. BASE and OFFSET are
2876 the names of the base and offset registers. */
2878 print_register_offset_address (char *buf, size_t size,
2879 const aarch64_opnd_info *opnd,
2880 const char *base, const char *offset)
2882 char tb[16]; /* Temporary buffer. */
2883 bfd_boolean print_extend_p = TRUE;
2884 bfd_boolean print_amount_p = TRUE;
2885 const char *shift_name = aarch64_operand_modifiers[opnd->shifter.kind].name;
2887 if (!opnd->shifter.amount && (opnd->qualifier != AARCH64_OPND_QLF_S_B
2888 || !opnd->shifter.amount_present))
2890 /* Not print the shift/extend amount when the amount is zero and
2891 when it is not the special case of 8-bit load/store instruction. */
2892 print_amount_p = FALSE;
2893 /* Likewise, no need to print the shift operator LSL in such a
2895 if (opnd->shifter.kind == AARCH64_MOD_LSL)
2896 print_extend_p = FALSE;
2899 /* Prepare for the extend/shift. */
2903 snprintf (tb, sizeof (tb), ",%s #%" PRIi64, shift_name,
2904 opnd->shifter.amount);
2906 snprintf (tb, sizeof (tb), ",%s", shift_name);
2911 snprintf (buf, size, "[%s,%s%s]", base, offset, tb);
2914 /* Generate the string representation of the operand OPNDS[IDX] for OPCODE
2915 in *BUF. The caller should pass in the maximum size of *BUF in SIZE.
2916 PC, PCREL_P and ADDRESS are used to pass in and return information about
2917 the PC-relative address calculation, where the PC value is passed in
2918 PC. If the operand is pc-relative related, *PCREL_P (if PCREL_P non-NULL)
2919 will return 1 and *ADDRESS (if ADDRESS non-NULL) will return the
2920 calculated address; otherwise, *PCREL_P (if PCREL_P non-NULL) returns 0.
2922 The function serves both the disassembler and the assembler diagnostics
2923 issuer, which is the reason why it lives in this file. */
2926 aarch64_print_operand (char *buf, size_t size, bfd_vma pc,
2927 const aarch64_opcode *opcode,
2928 const aarch64_opnd_info *opnds, int idx, int *pcrel_p,
2932 const char *name = NULL;
2933 const aarch64_opnd_info *opnd = opnds + idx;
2934 enum aarch64_modifier_kind kind;
2935 uint64_t addr, enum_value;
2943 case AARCH64_OPND_Rd:
2944 case AARCH64_OPND_Rn:
2945 case AARCH64_OPND_Rm:
2946 case AARCH64_OPND_Rt:
2947 case AARCH64_OPND_Rt2:
2948 case AARCH64_OPND_Rs:
2949 case AARCH64_OPND_Ra:
2950 case AARCH64_OPND_Rt_SYS:
2951 case AARCH64_OPND_PAIRREG:
2952 /* The optional-ness of <Xt> in e.g. IC <ic_op>{, <Xt>} is determined by
2953 the <ic_op>, therefore we we use opnd->present to override the
2954 generic optional-ness information. */
2955 if (opnd->type == AARCH64_OPND_Rt_SYS && !opnd->present)
2957 /* Omit the operand, e.g. RET. */
2958 if (optional_operand_p (opcode, idx)
2959 && opnd->reg.regno == get_optional_operand_default_value (opcode))
2961 assert (opnd->qualifier == AARCH64_OPND_QLF_W
2962 || opnd->qualifier == AARCH64_OPND_QLF_X);
2963 snprintf (buf, size, "%s",
2964 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0));
2967 case AARCH64_OPND_Rd_SP:
2968 case AARCH64_OPND_Rn_SP:
2969 assert (opnd->qualifier == AARCH64_OPND_QLF_W
2970 || opnd->qualifier == AARCH64_OPND_QLF_WSP
2971 || opnd->qualifier == AARCH64_OPND_QLF_X
2972 || opnd->qualifier == AARCH64_OPND_QLF_SP);
2973 snprintf (buf, size, "%s",
2974 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 1));
2977 case AARCH64_OPND_Rm_EXT:
2978 kind = opnd->shifter.kind;
2979 assert (idx == 1 || idx == 2);
2980 if ((aarch64_stack_pointer_p (opnds)
2981 || (idx == 2 && aarch64_stack_pointer_p (opnds + 1)))
2982 && ((opnd->qualifier == AARCH64_OPND_QLF_W
2983 && opnds[0].qualifier == AARCH64_OPND_QLF_W
2984 && kind == AARCH64_MOD_UXTW)
2985 || (opnd->qualifier == AARCH64_OPND_QLF_X
2986 && kind == AARCH64_MOD_UXTX)))
2988 /* 'LSL' is the preferred form in this case. */
2989 kind = AARCH64_MOD_LSL;
2990 if (opnd->shifter.amount == 0)
2992 /* Shifter omitted. */
2993 snprintf (buf, size, "%s",
2994 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0));
2998 if (opnd->shifter.amount)
2999 snprintf (buf, size, "%s, %s #%" PRIi64,
3000 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0),
3001 aarch64_operand_modifiers[kind].name,
3002 opnd->shifter.amount);
3004 snprintf (buf, size, "%s, %s",
3005 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0),
3006 aarch64_operand_modifiers[kind].name);
3009 case AARCH64_OPND_Rm_SFT:
3010 assert (opnd->qualifier == AARCH64_OPND_QLF_W
3011 || opnd->qualifier == AARCH64_OPND_QLF_X);
3012 if (opnd->shifter.amount == 0 && opnd->shifter.kind == AARCH64_MOD_LSL)
3013 snprintf (buf, size, "%s",
3014 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0));
3016 snprintf (buf, size, "%s, %s #%" PRIi64,
3017 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0),
3018 aarch64_operand_modifiers[opnd->shifter.kind].name,
3019 opnd->shifter.amount);
3022 case AARCH64_OPND_Fd:
3023 case AARCH64_OPND_Fn:
3024 case AARCH64_OPND_Fm:
3025 case AARCH64_OPND_Fa:
3026 case AARCH64_OPND_Ft:
3027 case AARCH64_OPND_Ft2:
3028 case AARCH64_OPND_Sd:
3029 case AARCH64_OPND_Sn:
3030 case AARCH64_OPND_Sm:
3031 snprintf (buf, size, "%s%d", aarch64_get_qualifier_name (opnd->qualifier),
3035 case AARCH64_OPND_Vd:
3036 case AARCH64_OPND_Vn:
3037 case AARCH64_OPND_Vm:
3038 snprintf (buf, size, "v%d.%s", opnd->reg.regno,
3039 aarch64_get_qualifier_name (opnd->qualifier));
3042 case AARCH64_OPND_Ed:
3043 case AARCH64_OPND_En:
3044 case AARCH64_OPND_Em:
3045 snprintf (buf, size, "v%d.%s[%" PRIi64 "]", opnd->reglane.regno,
3046 aarch64_get_qualifier_name (opnd->qualifier),
3047 opnd->reglane.index);
3050 case AARCH64_OPND_VdD1:
3051 case AARCH64_OPND_VnD1:
3052 snprintf (buf, size, "v%d.d[1]", opnd->reg.regno);
3055 case AARCH64_OPND_LVn:
3056 case AARCH64_OPND_LVt:
3057 case AARCH64_OPND_LVt_AL:
3058 case AARCH64_OPND_LEt:
3059 print_register_list (buf, size, opnd, "v");
3062 case AARCH64_OPND_SVE_Pd:
3063 case AARCH64_OPND_SVE_Pg3:
3064 case AARCH64_OPND_SVE_Pg4_5:
3065 case AARCH64_OPND_SVE_Pg4_10:
3066 case AARCH64_OPND_SVE_Pg4_16:
3067 case AARCH64_OPND_SVE_Pm:
3068 case AARCH64_OPND_SVE_Pn:
3069 case AARCH64_OPND_SVE_Pt:
3070 if (opnd->qualifier == AARCH64_OPND_QLF_NIL)
3071 snprintf (buf, size, "p%d", opnd->reg.regno);
3072 else if (opnd->qualifier == AARCH64_OPND_QLF_P_Z
3073 || opnd->qualifier == AARCH64_OPND_QLF_P_M)
3074 snprintf (buf, size, "p%d/%s", opnd->reg.regno,
3075 aarch64_get_qualifier_name (opnd->qualifier));
3077 snprintf (buf, size, "p%d.%s", opnd->reg.regno,
3078 aarch64_get_qualifier_name (opnd->qualifier));
3081 case AARCH64_OPND_SVE_Za_5:
3082 case AARCH64_OPND_SVE_Za_16:
3083 case AARCH64_OPND_SVE_Zd:
3084 case AARCH64_OPND_SVE_Zm_5:
3085 case AARCH64_OPND_SVE_Zm_16:
3086 case AARCH64_OPND_SVE_Zn:
3087 case AARCH64_OPND_SVE_Zt:
3088 if (opnd->qualifier == AARCH64_OPND_QLF_NIL)
3089 snprintf (buf, size, "z%d", opnd->reg.regno);
3091 snprintf (buf, size, "z%d.%s", opnd->reg.regno,
3092 aarch64_get_qualifier_name (opnd->qualifier));
3095 case AARCH64_OPND_SVE_ZnxN:
3096 case AARCH64_OPND_SVE_ZtxN:
3097 print_register_list (buf, size, opnd, "z");
3100 case AARCH64_OPND_SVE_Zn_INDEX:
3101 snprintf (buf, size, "z%d.%s[%" PRIi64 "]", opnd->reglane.regno,
3102 aarch64_get_qualifier_name (opnd->qualifier),
3103 opnd->reglane.index);
3106 case AARCH64_OPND_Cn:
3107 case AARCH64_OPND_Cm:
3108 snprintf (buf, size, "C%d", opnd->reg.regno);
3111 case AARCH64_OPND_IDX:
3112 case AARCH64_OPND_IMM:
3113 case AARCH64_OPND_WIDTH:
3114 case AARCH64_OPND_UIMM3_OP1:
3115 case AARCH64_OPND_UIMM3_OP2:
3116 case AARCH64_OPND_BIT_NUM:
3117 case AARCH64_OPND_IMM_VLSL:
3118 case AARCH64_OPND_IMM_VLSR:
3119 case AARCH64_OPND_SHLL_IMM:
3120 case AARCH64_OPND_IMM0:
3121 case AARCH64_OPND_IMMR:
3122 case AARCH64_OPND_IMMS:
3123 case AARCH64_OPND_FBITS:
3124 case AARCH64_OPND_SIMM5:
3125 case AARCH64_OPND_SVE_SHLIMM_PRED:
3126 case AARCH64_OPND_SVE_SHLIMM_UNPRED:
3127 case AARCH64_OPND_SVE_SHRIMM_PRED:
3128 case AARCH64_OPND_SVE_SHRIMM_UNPRED:
3129 case AARCH64_OPND_SVE_SIMM5:
3130 case AARCH64_OPND_SVE_SIMM5B:
3131 case AARCH64_OPND_SVE_SIMM6:
3132 case AARCH64_OPND_SVE_SIMM8:
3133 case AARCH64_OPND_SVE_UIMM3:
3134 case AARCH64_OPND_SVE_UIMM7:
3135 case AARCH64_OPND_SVE_UIMM8:
3136 case AARCH64_OPND_SVE_UIMM8_53:
3137 snprintf (buf, size, "#%" PRIi64, opnd->imm.value);
3140 case AARCH64_OPND_SVE_I1_HALF_ONE:
3141 case AARCH64_OPND_SVE_I1_HALF_TWO:
3142 case AARCH64_OPND_SVE_I1_ZERO_ONE:
3145 c.i = opnd->imm.value;
3146 snprintf (buf, size, "#%.1f", c.f);
3150 case AARCH64_OPND_SVE_PATTERN:
3151 if (optional_operand_p (opcode, idx)
3152 && opnd->imm.value == get_optional_operand_default_value (opcode))
3154 enum_value = opnd->imm.value;
3155 assert (enum_value < ARRAY_SIZE (aarch64_sve_pattern_array));
3156 if (aarch64_sve_pattern_array[enum_value])
3157 snprintf (buf, size, "%s", aarch64_sve_pattern_array[enum_value]);
3159 snprintf (buf, size, "#%" PRIi64, opnd->imm.value);
3162 case AARCH64_OPND_SVE_PATTERN_SCALED:
3163 if (optional_operand_p (opcode, idx)
3164 && !opnd->shifter.operator_present
3165 && opnd->imm.value == get_optional_operand_default_value (opcode))
3167 enum_value = opnd->imm.value;
3168 assert (enum_value < ARRAY_SIZE (aarch64_sve_pattern_array));
3169 if (aarch64_sve_pattern_array[opnd->imm.value])
3170 snprintf (buf, size, "%s", aarch64_sve_pattern_array[opnd->imm.value]);
3172 snprintf (buf, size, "#%" PRIi64, opnd->imm.value);
3173 if (opnd->shifter.operator_present)
3175 size_t len = strlen (buf);
3176 snprintf (buf + len, size - len, ", %s #%" PRIi64,
3177 aarch64_operand_modifiers[opnd->shifter.kind].name,
3178 opnd->shifter.amount);
3182 case AARCH64_OPND_SVE_PRFOP:
3183 enum_value = opnd->imm.value;
3184 assert (enum_value < ARRAY_SIZE (aarch64_sve_prfop_array));
3185 if (aarch64_sve_prfop_array[enum_value])
3186 snprintf (buf, size, "%s", aarch64_sve_prfop_array[enum_value]);
3188 snprintf (buf, size, "#%" PRIi64, opnd->imm.value);
3191 case AARCH64_OPND_IMM_MOV:
3192 switch (aarch64_get_qualifier_esize (opnds[0].qualifier))
3194 case 4: /* e.g. MOV Wd, #<imm32>. */
3196 int imm32 = opnd->imm.value;
3197 snprintf (buf, size, "#0x%-20x\t// #%d", imm32, imm32);
3200 case 8: /* e.g. MOV Xd, #<imm64>. */
3201 snprintf (buf, size, "#0x%-20" PRIx64 "\t// #%" PRIi64,
3202 opnd->imm.value, opnd->imm.value);
3204 default: assert (0);
3208 case AARCH64_OPND_FPIMM0:
3209 snprintf (buf, size, "#0.0");
3212 case AARCH64_OPND_LIMM:
3213 case AARCH64_OPND_AIMM:
3214 case AARCH64_OPND_HALF:
3215 case AARCH64_OPND_SVE_INV_LIMM:
3216 case AARCH64_OPND_SVE_LIMM:
3217 case AARCH64_OPND_SVE_LIMM_MOV:
3218 if (opnd->shifter.amount)
3219 snprintf (buf, size, "#0x%" PRIx64 ", lsl #%" PRIi64, opnd->imm.value,
3220 opnd->shifter.amount);
3222 snprintf (buf, size, "#0x%" PRIx64, opnd->imm.value);
3225 case AARCH64_OPND_SIMD_IMM:
3226 case AARCH64_OPND_SIMD_IMM_SFT:
3227 if ((! opnd->shifter.amount && opnd->shifter.kind == AARCH64_MOD_LSL)
3228 || opnd->shifter.kind == AARCH64_MOD_NONE)
3229 snprintf (buf, size, "#0x%" PRIx64, opnd->imm.value);
3231 snprintf (buf, size, "#0x%" PRIx64 ", %s #%" PRIi64, opnd->imm.value,
3232 aarch64_operand_modifiers[opnd->shifter.kind].name,
3233 opnd->shifter.amount);
3236 case AARCH64_OPND_SVE_AIMM:
3237 case AARCH64_OPND_SVE_ASIMM:
3238 if (opnd->shifter.amount)
3239 snprintf (buf, size, "#%" PRIi64 ", lsl #%" PRIi64, opnd->imm.value,
3240 opnd->shifter.amount);
3242 snprintf (buf, size, "#%" PRIi64, opnd->imm.value);
3245 case AARCH64_OPND_FPIMM:
3246 case AARCH64_OPND_SIMD_FPIMM:
3247 case AARCH64_OPND_SVE_FPIMM8:
3248 switch (aarch64_get_qualifier_esize (opnds[0].qualifier))
3250 case 2: /* e.g. FMOV <Hd>, #<imm>. */
3253 c.i = expand_fp_imm (2, opnd->imm.value);
3254 snprintf (buf, size, "#%.18e", c.f);
3257 case 4: /* e.g. FMOV <Vd>.4S, #<imm>. */
3260 c.i = expand_fp_imm (4, opnd->imm.value);
3261 snprintf (buf, size, "#%.18e", c.f);
3264 case 8: /* e.g. FMOV <Sd>, #<imm>. */
3267 c.i = expand_fp_imm (8, opnd->imm.value);
3268 snprintf (buf, size, "#%.18e", c.d);
3271 default: assert (0);
3275 case AARCH64_OPND_CCMP_IMM:
3276 case AARCH64_OPND_NZCV:
3277 case AARCH64_OPND_EXCEPTION:
3278 case AARCH64_OPND_UIMM4:
3279 case AARCH64_OPND_UIMM7:
3280 if (optional_operand_p (opcode, idx) == TRUE
3281 && (opnd->imm.value ==
3282 (int64_t) get_optional_operand_default_value (opcode)))
3283 /* Omit the operand, e.g. DCPS1. */
3285 snprintf (buf, size, "#0x%x", (unsigned int)opnd->imm.value);
3288 case AARCH64_OPND_COND:
3289 case AARCH64_OPND_COND1:
3290 snprintf (buf, size, "%s", opnd->cond->names[0]);
3293 case AARCH64_OPND_ADDR_ADRP:
3294 addr = ((pc + AARCH64_PCREL_OFFSET) & ~(uint64_t)0xfff)
3300 /* This is not necessary during the disassembling, as print_address_func
3301 in the disassemble_info will take care of the printing. But some
3302 other callers may be still interested in getting the string in *STR,
3303 so here we do snprintf regardless. */
3304 snprintf (buf, size, "#0x%" PRIx64, addr);
3307 case AARCH64_OPND_ADDR_PCREL14:
3308 case AARCH64_OPND_ADDR_PCREL19:
3309 case AARCH64_OPND_ADDR_PCREL21:
3310 case AARCH64_OPND_ADDR_PCREL26:
3311 addr = pc + AARCH64_PCREL_OFFSET + opnd->imm.value;
3316 /* This is not necessary during the disassembling, as print_address_func
3317 in the disassemble_info will take care of the printing. But some
3318 other callers may be still interested in getting the string in *STR,
3319 so here we do snprintf regardless. */
3320 snprintf (buf, size, "#0x%" PRIx64, addr);
3323 case AARCH64_OPND_ADDR_SIMPLE:
3324 case AARCH64_OPND_SIMD_ADDR_SIMPLE:
3325 case AARCH64_OPND_SIMD_ADDR_POST:
3326 name = get_64bit_int_reg_name (opnd->addr.base_regno, 1);
3327 if (opnd->type == AARCH64_OPND_SIMD_ADDR_POST)
3329 if (opnd->addr.offset.is_reg)
3330 snprintf (buf, size, "[%s], x%d", name, opnd->addr.offset.regno);
3332 snprintf (buf, size, "[%s], #%d", name, opnd->addr.offset.imm);
3335 snprintf (buf, size, "[%s]", name);
3338 case AARCH64_OPND_ADDR_REGOFF:
3339 case AARCH64_OPND_SVE_ADDR_RR:
3340 case AARCH64_OPND_SVE_ADDR_RR_LSL1:
3341 case AARCH64_OPND_SVE_ADDR_RR_LSL2:
3342 case AARCH64_OPND_SVE_ADDR_RR_LSL3:
3343 case AARCH64_OPND_SVE_ADDR_RX:
3344 case AARCH64_OPND_SVE_ADDR_RX_LSL1:
3345 case AARCH64_OPND_SVE_ADDR_RX_LSL2:
3346 case AARCH64_OPND_SVE_ADDR_RX_LSL3:
3347 print_register_offset_address
3348 (buf, size, opnd, get_64bit_int_reg_name (opnd->addr.base_regno, 1),
3349 get_offset_int_reg_name (opnd));
3352 case AARCH64_OPND_SVE_ADDR_RZ:
3353 case AARCH64_OPND_SVE_ADDR_RZ_LSL1:
3354 case AARCH64_OPND_SVE_ADDR_RZ_LSL2:
3355 case AARCH64_OPND_SVE_ADDR_RZ_LSL3:
3356 case AARCH64_OPND_SVE_ADDR_RZ_XTW_14:
3357 case AARCH64_OPND_SVE_ADDR_RZ_XTW_22:
3358 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_14:
3359 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_22:
3360 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_14:
3361 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_22:
3362 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_14:
3363 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_22:
3364 print_register_offset_address
3365 (buf, size, opnd, get_64bit_int_reg_name (opnd->addr.base_regno, 1),
3366 get_addr_sve_reg_name (opnd->addr.offset.regno, opnd->qualifier));
3369 case AARCH64_OPND_ADDR_SIMM7:
3370 case AARCH64_OPND_ADDR_SIMM9:
3371 case AARCH64_OPND_ADDR_SIMM9_2:
3372 case AARCH64_OPND_SVE_ADDR_RI_S4xVL:
3373 case AARCH64_OPND_SVE_ADDR_RI_S4x2xVL:
3374 case AARCH64_OPND_SVE_ADDR_RI_S4x3xVL:
3375 case AARCH64_OPND_SVE_ADDR_RI_S4x4xVL:
3376 case AARCH64_OPND_SVE_ADDR_RI_S6xVL:
3377 case AARCH64_OPND_SVE_ADDR_RI_S9xVL:
3378 case AARCH64_OPND_SVE_ADDR_RI_U6:
3379 case AARCH64_OPND_SVE_ADDR_RI_U6x2:
3380 case AARCH64_OPND_SVE_ADDR_RI_U6x4:
3381 case AARCH64_OPND_SVE_ADDR_RI_U6x8:
3382 print_immediate_offset_address
3383 (buf, size, opnd, get_64bit_int_reg_name (opnd->addr.base_regno, 1));
3386 case AARCH64_OPND_SVE_ADDR_ZI_U5:
3387 case AARCH64_OPND_SVE_ADDR_ZI_U5x2:
3388 case AARCH64_OPND_SVE_ADDR_ZI_U5x4:
3389 case AARCH64_OPND_SVE_ADDR_ZI_U5x8:
3390 print_immediate_offset_address
3392 get_addr_sve_reg_name (opnd->addr.base_regno, opnd->qualifier));
3395 case AARCH64_OPND_SVE_ADDR_ZZ_LSL:
3396 case AARCH64_OPND_SVE_ADDR_ZZ_SXTW:
3397 case AARCH64_OPND_SVE_ADDR_ZZ_UXTW:
3398 print_register_offset_address
3400 get_addr_sve_reg_name (opnd->addr.base_regno, opnd->qualifier),
3401 get_addr_sve_reg_name (opnd->addr.offset.regno, opnd->qualifier));
3404 case AARCH64_OPND_ADDR_UIMM12:
3405 name = get_64bit_int_reg_name (opnd->addr.base_regno, 1);
3406 if (opnd->addr.offset.imm)
3407 snprintf (buf, size, "[%s,#%d]", name, opnd->addr.offset.imm);
3409 snprintf (buf, size, "[%s]", name);
3412 case AARCH64_OPND_SYSREG:
3413 for (i = 0; aarch64_sys_regs[i].name; ++i)
3414 if (aarch64_sys_regs[i].value == opnd->sysreg
3415 && ! aarch64_sys_reg_deprecated_p (&aarch64_sys_regs[i]))
3417 if (aarch64_sys_regs[i].name)
3418 snprintf (buf, size, "%s", aarch64_sys_regs[i].name);
3421 /* Implementation defined system register. */
3422 unsigned int value = opnd->sysreg;
3423 snprintf (buf, size, "s%u_%u_c%u_c%u_%u", (value >> 14) & 0x3,
3424 (value >> 11) & 0x7, (value >> 7) & 0xf, (value >> 3) & 0xf,
3429 case AARCH64_OPND_PSTATEFIELD:
3430 for (i = 0; aarch64_pstatefields[i].name; ++i)
3431 if (aarch64_pstatefields[i].value == opnd->pstatefield)
3433 assert (aarch64_pstatefields[i].name);
3434 snprintf (buf, size, "%s", aarch64_pstatefields[i].name);
3437 case AARCH64_OPND_SYSREG_AT:
3438 case AARCH64_OPND_SYSREG_DC:
3439 case AARCH64_OPND_SYSREG_IC:
3440 case AARCH64_OPND_SYSREG_TLBI:
3441 snprintf (buf, size, "%s", opnd->sysins_op->name);
3444 case AARCH64_OPND_BARRIER:
3445 snprintf (buf, size, "%s", opnd->barrier->name);
3448 case AARCH64_OPND_BARRIER_ISB:
3449 /* Operand can be omitted, e.g. in DCPS1. */
3450 if (! optional_operand_p (opcode, idx)
3451 || (opnd->barrier->value
3452 != get_optional_operand_default_value (opcode)))
3453 snprintf (buf, size, "#0x%x", opnd->barrier->value);
3456 case AARCH64_OPND_PRFOP:
3457 if (opnd->prfop->name != NULL)
3458 snprintf (buf, size, "%s", opnd->prfop->name);
3460 snprintf (buf, size, "#0x%02x", opnd->prfop->value);
3463 case AARCH64_OPND_BARRIER_PSB:
3464 snprintf (buf, size, "%s", opnd->hint_option->name);
3472 #define CPENC(op0,op1,crn,crm,op2) \
3473 ((((op0) << 19) | ((op1) << 16) | ((crn) << 12) | ((crm) << 8) | ((op2) << 5)) >> 5)
3474 /* for 3.9.3 Instructions for Accessing Special Purpose Registers */
3475 #define CPEN_(op1,crm,op2) CPENC(3,(op1),4,(crm),(op2))
3476 /* for 3.9.10 System Instructions */
3477 #define CPENS(op1,crn,crm,op2) CPENC(1,(op1),(crn),(crm),(op2))
3499 #define F_DEPRECATED 0x1 /* Deprecated system register. */
3504 #define F_ARCHEXT 0x2 /* Architecture dependent system register. */
3509 #define F_HASXT 0x4 /* System instruction register <Xt>
3513 /* TODO there are two more issues need to be resolved
3514 1. handle read-only and write-only system registers
3515 2. handle cpu-implementation-defined system registers. */
3516 const aarch64_sys_reg aarch64_sys_regs [] =
3518 { "spsr_el1", CPEN_(0,C0,0), 0 }, /* = spsr_svc */
3519 { "spsr_el12", CPEN_ (5, C0, 0), F_ARCHEXT },
3520 { "elr_el1", CPEN_(0,C0,1), 0 },
3521 { "elr_el12", CPEN_ (5, C0, 1), F_ARCHEXT },
3522 { "sp_el0", CPEN_(0,C1,0), 0 },
3523 { "spsel", CPEN_(0,C2,0), 0 },
3524 { "daif", CPEN_(3,C2,1), 0 },
3525 { "currentel", CPEN_(0,C2,2), 0 }, /* RO */
3526 { "pan", CPEN_(0,C2,3), F_ARCHEXT },
3527 { "uao", CPEN_ (0, C2, 4), F_ARCHEXT },
3528 { "nzcv", CPEN_(3,C2,0), 0 },
3529 { "fpcr", CPEN_(3,C4,0), 0 },
3530 { "fpsr", CPEN_(3,C4,1), 0 },
3531 { "dspsr_el0", CPEN_(3,C5,0), 0 },
3532 { "dlr_el0", CPEN_(3,C5,1), 0 },
3533 { "spsr_el2", CPEN_(4,C0,0), 0 }, /* = spsr_hyp */
3534 { "elr_el2", CPEN_(4,C0,1), 0 },
3535 { "sp_el1", CPEN_(4,C1,0), 0 },
3536 { "spsr_irq", CPEN_(4,C3,0), 0 },
3537 { "spsr_abt", CPEN_(4,C3,1), 0 },
3538 { "spsr_und", CPEN_(4,C3,2), 0 },
3539 { "spsr_fiq", CPEN_(4,C3,3), 0 },
3540 { "spsr_el3", CPEN_(6,C0,0), 0 },
3541 { "elr_el3", CPEN_(6,C0,1), 0 },
3542 { "sp_el2", CPEN_(6,C1,0), 0 },
3543 { "spsr_svc", CPEN_(0,C0,0), F_DEPRECATED }, /* = spsr_el1 */
3544 { "spsr_hyp", CPEN_(4,C0,0), F_DEPRECATED }, /* = spsr_el2 */
3545 { "midr_el1", CPENC(3,0,C0,C0,0), 0 }, /* RO */
3546 { "ctr_el0", CPENC(3,3,C0,C0,1), 0 }, /* RO */
3547 { "mpidr_el1", CPENC(3,0,C0,C0,5), 0 }, /* RO */
3548 { "revidr_el1", CPENC(3,0,C0,C0,6), 0 }, /* RO */
3549 { "aidr_el1", CPENC(3,1,C0,C0,7), 0 }, /* RO */
3550 { "dczid_el0", CPENC(3,3,C0,C0,7), 0 }, /* RO */
3551 { "id_dfr0_el1", CPENC(3,0,C0,C1,2), 0 }, /* RO */
3552 { "id_pfr0_el1", CPENC(3,0,C0,C1,0), 0 }, /* RO */
3553 { "id_pfr1_el1", CPENC(3,0,C0,C1,1), 0 }, /* RO */
3554 { "id_afr0_el1", CPENC(3,0,C0,C1,3), 0 }, /* RO */
3555 { "id_mmfr0_el1", CPENC(3,0,C0,C1,4), 0 }, /* RO */
3556 { "id_mmfr1_el1", CPENC(3,0,C0,C1,5), 0 }, /* RO */
3557 { "id_mmfr2_el1", CPENC(3,0,C0,C1,6), 0 }, /* RO */
3558 { "id_mmfr3_el1", CPENC(3,0,C0,C1,7), 0 }, /* RO */
3559 { "id_mmfr4_el1", CPENC(3,0,C0,C2,6), 0 }, /* RO */
3560 { "id_isar0_el1", CPENC(3,0,C0,C2,0), 0 }, /* RO */
3561 { "id_isar1_el1", CPENC(3,0,C0,C2,1), 0 }, /* RO */
3562 { "id_isar2_el1", CPENC(3,0,C0,C2,2), 0 }, /* RO */
3563 { "id_isar3_el1", CPENC(3,0,C0,C2,3), 0 }, /* RO */
3564 { "id_isar4_el1", CPENC(3,0,C0,C2,4), 0 }, /* RO */
3565 { "id_isar5_el1", CPENC(3,0,C0,C2,5), 0 }, /* RO */
3566 { "mvfr0_el1", CPENC(3,0,C0,C3,0), 0 }, /* RO */
3567 { "mvfr1_el1", CPENC(3,0,C0,C3,1), 0 }, /* RO */
3568 { "mvfr2_el1", CPENC(3,0,C0,C3,2), 0 }, /* RO */
3569 { "ccsidr_el1", CPENC(3,1,C0,C0,0), 0 }, /* RO */
3570 { "id_aa64pfr0_el1", CPENC(3,0,C0,C4,0), 0 }, /* RO */
3571 { "id_aa64pfr1_el1", CPENC(3,0,C0,C4,1), 0 }, /* RO */
3572 { "id_aa64dfr0_el1", CPENC(3,0,C0,C5,0), 0 }, /* RO */
3573 { "id_aa64dfr1_el1", CPENC(3,0,C0,C5,1), 0 }, /* RO */
3574 { "id_aa64isar0_el1", CPENC(3,0,C0,C6,0), 0 }, /* RO */
3575 { "id_aa64isar1_el1", CPENC(3,0,C0,C6,1), 0 }, /* RO */
3576 { "id_aa64mmfr0_el1", CPENC(3,0,C0,C7,0), 0 }, /* RO */
3577 { "id_aa64mmfr1_el1", CPENC(3,0,C0,C7,1), 0 }, /* RO */
3578 { "id_aa64mmfr2_el1", CPENC (3, 0, C0, C7, 2), F_ARCHEXT }, /* RO */
3579 { "id_aa64afr0_el1", CPENC(3,0,C0,C5,4), 0 }, /* RO */
3580 { "id_aa64afr1_el1", CPENC(3,0,C0,C5,5), 0 }, /* RO */
3581 { "clidr_el1", CPENC(3,1,C0,C0,1), 0 }, /* RO */
3582 { "csselr_el1", CPENC(3,2,C0,C0,0), 0 }, /* RO */
3583 { "vpidr_el2", CPENC(3,4,C0,C0,0), 0 },
3584 { "vmpidr_el2", CPENC(3,4,C0,C0,5), 0 },
3585 { "sctlr_el1", CPENC(3,0,C1,C0,0), 0 },
3586 { "sctlr_el2", CPENC(3,4,C1,C0,0), 0 },
3587 { "sctlr_el3", CPENC(3,6,C1,C0,0), 0 },
3588 { "sctlr_el12", CPENC (3, 5, C1, C0, 0), F_ARCHEXT },
3589 { "actlr_el1", CPENC(3,0,C1,C0,1), 0 },
3590 { "actlr_el2", CPENC(3,4,C1,C0,1), 0 },
3591 { "actlr_el3", CPENC(3,6,C1,C0,1), 0 },
3592 { "cpacr_el1", CPENC(3,0,C1,C0,2), 0 },
3593 { "cpacr_el12", CPENC (3, 5, C1, C0, 2), F_ARCHEXT },
3594 { "cptr_el2", CPENC(3,4,C1,C1,2), 0 },
3595 { "cptr_el3", CPENC(3,6,C1,C1,2), 0 },
3596 { "scr_el3", CPENC(3,6,C1,C1,0), 0 },
3597 { "hcr_el2", CPENC(3,4,C1,C1,0), 0 },
3598 { "mdcr_el2", CPENC(3,4,C1,C1,1), 0 },
3599 { "mdcr_el3", CPENC(3,6,C1,C3,1), 0 },
3600 { "hstr_el2", CPENC(3,4,C1,C1,3), 0 },
3601 { "hacr_el2", CPENC(3,4,C1,C1,7), 0 },
3602 { "ttbr0_el1", CPENC(3,0,C2,C0,0), 0 },
3603 { "ttbr1_el1", CPENC(3,0,C2,C0,1), 0 },
3604 { "ttbr0_el2", CPENC(3,4,C2,C0,0), 0 },
3605 { "ttbr1_el2", CPENC (3, 4, C2, C0, 1), F_ARCHEXT },
3606 { "ttbr0_el3", CPENC(3,6,C2,C0,0), 0 },
3607 { "ttbr0_el12", CPENC (3, 5, C2, C0, 0), F_ARCHEXT },
3608 { "ttbr1_el12", CPENC (3, 5, C2, C0, 1), F_ARCHEXT },
3609 { "vttbr_el2", CPENC(3,4,C2,C1,0), 0 },
3610 { "tcr_el1", CPENC(3,0,C2,C0,2), 0 },
3611 { "tcr_el2", CPENC(3,4,C2,C0,2), 0 },
3612 { "tcr_el3", CPENC(3,6,C2,C0,2), 0 },
3613 { "tcr_el12", CPENC (3, 5, C2, C0, 2), F_ARCHEXT },
3614 { "vtcr_el2", CPENC(3,4,C2,C1,2), 0 },
3615 { "afsr0_el1", CPENC(3,0,C5,C1,0), 0 },
3616 { "afsr1_el1", CPENC(3,0,C5,C1,1), 0 },
3617 { "afsr0_el2", CPENC(3,4,C5,C1,0), 0 },
3618 { "afsr1_el2", CPENC(3,4,C5,C1,1), 0 },
3619 { "afsr0_el3", CPENC(3,6,C5,C1,0), 0 },
3620 { "afsr0_el12", CPENC (3, 5, C5, C1, 0), F_ARCHEXT },
3621 { "afsr1_el3", CPENC(3,6,C5,C1,1), 0 },
3622 { "afsr1_el12", CPENC (3, 5, C5, C1, 1), F_ARCHEXT },
3623 { "esr_el1", CPENC(3,0,C5,C2,0), 0 },
3624 { "esr_el2", CPENC(3,4,C5,C2,0), 0 },
3625 { "esr_el3", CPENC(3,6,C5,C2,0), 0 },
3626 { "esr_el12", CPENC (3, 5, C5, C2, 0), F_ARCHEXT },
3627 { "vsesr_el2", CPENC (3, 4, C5, C2, 3), F_ARCHEXT }, /* RO */
3628 { "fpexc32_el2", CPENC(3,4,C5,C3,0), 0 },
3629 { "erridr_el1", CPENC (3, 0, C5, C3, 0), F_ARCHEXT }, /* RO */
3630 { "errselr_el1", CPENC (3, 0, C5, C3, 1), F_ARCHEXT },
3631 { "erxfr_el1", CPENC (3, 0, C5, C4, 0), F_ARCHEXT }, /* RO */
3632 { "erxctlr_el1", CPENC (3, 0, C5, C4, 1), F_ARCHEXT },
3633 { "erxstatus_el1", CPENC (3, 0, C5, C4, 2), F_ARCHEXT },
3634 { "erxaddr_el1", CPENC (3, 0, C5, C4, 3), F_ARCHEXT },
3635 { "erxmisc0_el1", CPENC (3, 0, C5, C5, 0), F_ARCHEXT },
3636 { "erxmisc1_el1", CPENC (3, 0, C5, C5, 1), F_ARCHEXT },
3637 { "far_el1", CPENC(3,0,C6,C0,0), 0 },
3638 { "far_el2", CPENC(3,4,C6,C0,0), 0 },
3639 { "far_el3", CPENC(3,6,C6,C0,0), 0 },
3640 { "far_el12", CPENC (3, 5, C6, C0, 0), F_ARCHEXT },
3641 { "hpfar_el2", CPENC(3,4,C6,C0,4), 0 },
3642 { "par_el1", CPENC(3,0,C7,C4,0), 0 },
3643 { "mair_el1", CPENC(3,0,C10,C2,0), 0 },
3644 { "mair_el2", CPENC(3,4,C10,C2,0), 0 },
3645 { "mair_el3", CPENC(3,6,C10,C2,0), 0 },
3646 { "mair_el12", CPENC (3, 5, C10, C2, 0), F_ARCHEXT },
3647 { "amair_el1", CPENC(3,0,C10,C3,0), 0 },
3648 { "amair_el2", CPENC(3,4,C10,C3,0), 0 },
3649 { "amair_el3", CPENC(3,6,C10,C3,0), 0 },
3650 { "amair_el12", CPENC (3, 5, C10, C3, 0), F_ARCHEXT },
3651 { "vbar_el1", CPENC(3,0,C12,C0,0), 0 },
3652 { "vbar_el2", CPENC(3,4,C12,C0,0), 0 },
3653 { "vbar_el3", CPENC(3,6,C12,C0,0), 0 },
3654 { "vbar_el12", CPENC (3, 5, C12, C0, 0), F_ARCHEXT },
3655 { "rvbar_el1", CPENC(3,0,C12,C0,1), 0 }, /* RO */
3656 { "rvbar_el2", CPENC(3,4,C12,C0,1), 0 }, /* RO */
3657 { "rvbar_el3", CPENC(3,6,C12,C0,1), 0 }, /* RO */
3658 { "rmr_el1", CPENC(3,0,C12,C0,2), 0 },
3659 { "rmr_el2", CPENC(3,4,C12,C0,2), 0 },
3660 { "rmr_el3", CPENC(3,6,C12,C0,2), 0 },
3661 { "isr_el1", CPENC(3,0,C12,C1,0), 0 }, /* RO */
3662 { "disr_el1", CPENC (3, 0, C12, C1, 1), F_ARCHEXT },
3663 { "vdisr_el2", CPENC (3, 4, C12, C1, 1), F_ARCHEXT },
3664 { "contextidr_el1", CPENC(3,0,C13,C0,1), 0 },
3665 { "contextidr_el2", CPENC (3, 4, C13, C0, 1), F_ARCHEXT },
3666 { "contextidr_el12", CPENC (3, 5, C13, C0, 1), F_ARCHEXT },
3667 { "tpidr_el0", CPENC(3,3,C13,C0,2), 0 },
3668 { "tpidrro_el0", CPENC(3,3,C13,C0,3), 0 }, /* RO */
3669 { "tpidr_el1", CPENC(3,0,C13,C0,4), 0 },
3670 { "tpidr_el2", CPENC(3,4,C13,C0,2), 0 },
3671 { "tpidr_el3", CPENC(3,6,C13,C0,2), 0 },
3672 { "teecr32_el1", CPENC(2,2,C0, C0,0), 0 }, /* See section 3.9.7.1 */
3673 { "cntfrq_el0", CPENC(3,3,C14,C0,0), 0 }, /* RO */
3674 { "cntpct_el0", CPENC(3,3,C14,C0,1), 0 }, /* RO */
3675 { "cntvct_el0", CPENC(3,3,C14,C0,2), 0 }, /* RO */
3676 { "cntvoff_el2", CPENC(3,4,C14,C0,3), 0 },
3677 { "cntkctl_el1", CPENC(3,0,C14,C1,0), 0 },
3678 { "cntkctl_el12", CPENC (3, 5, C14, C1, 0), F_ARCHEXT },
3679 { "cnthctl_el2", CPENC(3,4,C14,C1,0), 0 },
3680 { "cntp_tval_el0", CPENC(3,3,C14,C2,0), 0 },
3681 { "cntp_tval_el02", CPENC (3, 5, C14, C2, 0), F_ARCHEXT },
3682 { "cntp_ctl_el0", CPENC(3,3,C14,C2,1), 0 },
3683 { "cntp_ctl_el02", CPENC (3, 5, C14, C2, 1), F_ARCHEXT },
3684 { "cntp_cval_el0", CPENC(3,3,C14,C2,2), 0 },
3685 { "cntp_cval_el02", CPENC (3, 5, C14, C2, 2), F_ARCHEXT },
3686 { "cntv_tval_el0", CPENC(3,3,C14,C3,0), 0 },
3687 { "cntv_tval_el02", CPENC (3, 5, C14, C3, 0), F_ARCHEXT },
3688 { "cntv_ctl_el0", CPENC(3,3,C14,C3,1), 0 },
3689 { "cntv_ctl_el02", CPENC (3, 5, C14, C3, 1), F_ARCHEXT },
3690 { "cntv_cval_el0", CPENC(3,3,C14,C3,2), 0 },
3691 { "cntv_cval_el02", CPENC (3, 5, C14, C3, 2), F_ARCHEXT },
3692 { "cnthp_tval_el2", CPENC(3,4,C14,C2,0), 0 },
3693 { "cnthp_ctl_el2", CPENC(3,4,C14,C2,1), 0 },
3694 { "cnthp_cval_el2", CPENC(3,4,C14,C2,2), 0 },
3695 { "cntps_tval_el1", CPENC(3,7,C14,C2,0), 0 },
3696 { "cntps_ctl_el1", CPENC(3,7,C14,C2,1), 0 },
3697 { "cntps_cval_el1", CPENC(3,7,C14,C2,2), 0 },
3698 { "cnthv_tval_el2", CPENC (3, 4, C14, C3, 0), F_ARCHEXT },
3699 { "cnthv_ctl_el2", CPENC (3, 4, C14, C3, 1), F_ARCHEXT },
3700 { "cnthv_cval_el2", CPENC (3, 4, C14, C3, 2), F_ARCHEXT },
3701 { "dacr32_el2", CPENC(3,4,C3,C0,0), 0 },
3702 { "ifsr32_el2", CPENC(3,4,C5,C0,1), 0 },
3703 { "teehbr32_el1", CPENC(2,2,C1,C0,0), 0 },
3704 { "sder32_el3", CPENC(3,6,C1,C1,1), 0 },
3705 { "mdscr_el1", CPENC(2,0,C0, C2, 2), 0 },
3706 { "mdccsr_el0", CPENC(2,3,C0, C1, 0), 0 }, /* r */
3707 { "mdccint_el1", CPENC(2,0,C0, C2, 0), 0 },
3708 { "dbgdtr_el0", CPENC(2,3,C0, C4, 0), 0 },
3709 { "dbgdtrrx_el0", CPENC(2,3,C0, C5, 0), 0 }, /* r */
3710 { "dbgdtrtx_el0", CPENC(2,3,C0, C5, 0), 0 }, /* w */
3711 { "osdtrrx_el1", CPENC(2,0,C0, C0, 2), 0 }, /* r */
3712 { "osdtrtx_el1", CPENC(2,0,C0, C3, 2), 0 }, /* w */
3713 { "oseccr_el1", CPENC(2,0,C0, C6, 2), 0 },
3714 { "dbgvcr32_el2", CPENC(2,4,C0, C7, 0), 0 },
3715 { "dbgbvr0_el1", CPENC(2,0,C0, C0, 4), 0 },
3716 { "dbgbvr1_el1", CPENC(2,0,C0, C1, 4), 0 },
3717 { "dbgbvr2_el1", CPENC(2,0,C0, C2, 4), 0 },
3718 { "dbgbvr3_el1", CPENC(2,0,C0, C3, 4), 0 },
3719 { "dbgbvr4_el1", CPENC(2,0,C0, C4, 4), 0 },
3720 { "dbgbvr5_el1", CPENC(2,0,C0, C5, 4), 0 },
3721 { "dbgbvr6_el1", CPENC(2,0,C0, C6, 4), 0 },
3722 { "dbgbvr7_el1", CPENC(2,0,C0, C7, 4), 0 },
3723 { "dbgbvr8_el1", CPENC(2,0,C0, C8, 4), 0 },
3724 { "dbgbvr9_el1", CPENC(2,0,C0, C9, 4), 0 },
3725 { "dbgbvr10_el1", CPENC(2,0,C0, C10,4), 0 },
3726 { "dbgbvr11_el1", CPENC(2,0,C0, C11,4), 0 },
3727 { "dbgbvr12_el1", CPENC(2,0,C0, C12,4), 0 },
3728 { "dbgbvr13_el1", CPENC(2,0,C0, C13,4), 0 },
3729 { "dbgbvr14_el1", CPENC(2,0,C0, C14,4), 0 },
3730 { "dbgbvr15_el1", CPENC(2,0,C0, C15,4), 0 },
3731 { "dbgbcr0_el1", CPENC(2,0,C0, C0, 5), 0 },
3732 { "dbgbcr1_el1", CPENC(2,0,C0, C1, 5), 0 },
3733 { "dbgbcr2_el1", CPENC(2,0,C0, C2, 5), 0 },
3734 { "dbgbcr3_el1", CPENC(2,0,C0, C3, 5), 0 },
3735 { "dbgbcr4_el1", CPENC(2,0,C0, C4, 5), 0 },
3736 { "dbgbcr5_el1", CPENC(2,0,C0, C5, 5), 0 },
3737 { "dbgbcr6_el1", CPENC(2,0,C0, C6, 5), 0 },
3738 { "dbgbcr7_el1", CPENC(2,0,C0, C7, 5), 0 },
3739 { "dbgbcr8_el1", CPENC(2,0,C0, C8, 5), 0 },
3740 { "dbgbcr9_el1", CPENC(2,0,C0, C9, 5), 0 },
3741 { "dbgbcr10_el1", CPENC(2,0,C0, C10,5), 0 },
3742 { "dbgbcr11_el1", CPENC(2,0,C0, C11,5), 0 },
3743 { "dbgbcr12_el1", CPENC(2,0,C0, C12,5), 0 },
3744 { "dbgbcr13_el1", CPENC(2,0,C0, C13,5), 0 },
3745 { "dbgbcr14_el1", CPENC(2,0,C0, C14,5), 0 },
3746 { "dbgbcr15_el1", CPENC(2,0,C0, C15,5), 0 },
3747 { "dbgwvr0_el1", CPENC(2,0,C0, C0, 6), 0 },
3748 { "dbgwvr1_el1", CPENC(2,0,C0, C1, 6), 0 },
3749 { "dbgwvr2_el1", CPENC(2,0,C0, C2, 6), 0 },
3750 { "dbgwvr3_el1", CPENC(2,0,C0, C3, 6), 0 },
3751 { "dbgwvr4_el1", CPENC(2,0,C0, C4, 6), 0 },
3752 { "dbgwvr5_el1", CPENC(2,0,C0, C5, 6), 0 },
3753 { "dbgwvr6_el1", CPENC(2,0,C0, C6, 6), 0 },
3754 { "dbgwvr7_el1", CPENC(2,0,C0, C7, 6), 0 },
3755 { "dbgwvr8_el1", CPENC(2,0,C0, C8, 6), 0 },
3756 { "dbgwvr9_el1", CPENC(2,0,C0, C9, 6), 0 },
3757 { "dbgwvr10_el1", CPENC(2,0,C0, C10,6), 0 },
3758 { "dbgwvr11_el1", CPENC(2,0,C0, C11,6), 0 },
3759 { "dbgwvr12_el1", CPENC(2,0,C0, C12,6), 0 },
3760 { "dbgwvr13_el1", CPENC(2,0,C0, C13,6), 0 },
3761 { "dbgwvr14_el1", CPENC(2,0,C0, C14,6), 0 },
3762 { "dbgwvr15_el1", CPENC(2,0,C0, C15,6), 0 },
3763 { "dbgwcr0_el1", CPENC(2,0,C0, C0, 7), 0 },
3764 { "dbgwcr1_el1", CPENC(2,0,C0, C1, 7), 0 },
3765 { "dbgwcr2_el1", CPENC(2,0,C0, C2, 7), 0 },
3766 { "dbgwcr3_el1", CPENC(2,0,C0, C3, 7), 0 },
3767 { "dbgwcr4_el1", CPENC(2,0,C0, C4, 7), 0 },
3768 { "dbgwcr5_el1", CPENC(2,0,C0, C5, 7), 0 },
3769 { "dbgwcr6_el1", CPENC(2,0,C0, C6, 7), 0 },
3770 { "dbgwcr7_el1", CPENC(2,0,C0, C7, 7), 0 },
3771 { "dbgwcr8_el1", CPENC(2,0,C0, C8, 7), 0 },
3772 { "dbgwcr9_el1", CPENC(2,0,C0, C9, 7), 0 },
3773 { "dbgwcr10_el1", CPENC(2,0,C0, C10,7), 0 },
3774 { "dbgwcr11_el1", CPENC(2,0,C0, C11,7), 0 },
3775 { "dbgwcr12_el1", CPENC(2,0,C0, C12,7), 0 },
3776 { "dbgwcr13_el1", CPENC(2,0,C0, C13,7), 0 },
3777 { "dbgwcr14_el1", CPENC(2,0,C0, C14,7), 0 },
3778 { "dbgwcr15_el1", CPENC(2,0,C0, C15,7), 0 },
3779 { "mdrar_el1", CPENC(2,0,C1, C0, 0), 0 }, /* r */
3780 { "oslar_el1", CPENC(2,0,C1, C0, 4), 0 }, /* w */
3781 { "oslsr_el1", CPENC(2,0,C1, C1, 4), 0 }, /* r */
3782 { "osdlr_el1", CPENC(2,0,C1, C3, 4), 0 },
3783 { "dbgprcr_el1", CPENC(2,0,C1, C4, 4), 0 },
3784 { "dbgclaimset_el1", CPENC(2,0,C7, C8, 6), 0 },
3785 { "dbgclaimclr_el1", CPENC(2,0,C7, C9, 6), 0 },
3786 { "dbgauthstatus_el1", CPENC(2,0,C7, C14,6), 0 }, /* r */
3787 { "pmblimitr_el1", CPENC (3, 0, C9, C10, 0), F_ARCHEXT }, /* rw */
3788 { "pmbptr_el1", CPENC (3, 0, C9, C10, 1), F_ARCHEXT }, /* rw */
3789 { "pmbsr_el1", CPENC (3, 0, C9, C10, 3), F_ARCHEXT }, /* rw */
3790 { "pmbidr_el1", CPENC (3, 0, C9, C10, 7), F_ARCHEXT }, /* ro */
3791 { "pmscr_el1", CPENC (3, 0, C9, C9, 0), F_ARCHEXT }, /* rw */
3792 { "pmsicr_el1", CPENC (3, 0, C9, C9, 2), F_ARCHEXT }, /* rw */
3793 { "pmsirr_el1", CPENC (3, 0, C9, C9, 3), F_ARCHEXT }, /* rw */
3794 { "pmsfcr_el1", CPENC (3, 0, C9, C9, 4), F_ARCHEXT }, /* rw */
3795 { "pmsevfr_el1", CPENC (3, 0, C9, C9, 5), F_ARCHEXT }, /* rw */
3796 { "pmslatfr_el1", CPENC (3, 0, C9, C9, 6), F_ARCHEXT }, /* rw */
3797 { "pmsidr_el1", CPENC (3, 0, C9, C9, 7), F_ARCHEXT }, /* ro */
3798 { "pmscr_el2", CPENC (3, 4, C9, C9, 0), F_ARCHEXT }, /* rw */
3799 { "pmscr_el12", CPENC (3, 5, C9, C9, 0), F_ARCHEXT }, /* rw */
3800 { "pmcr_el0", CPENC(3,3,C9,C12, 0), 0 },
3801 { "pmcntenset_el0", CPENC(3,3,C9,C12, 1), 0 },
3802 { "pmcntenclr_el0", CPENC(3,3,C9,C12, 2), 0 },
3803 { "pmovsclr_el0", CPENC(3,3,C9,C12, 3), 0 },
3804 { "pmswinc_el0", CPENC(3,3,C9,C12, 4), 0 }, /* w */
3805 { "pmselr_el0", CPENC(3,3,C9,C12, 5), 0 },
3806 { "pmceid0_el0", CPENC(3,3,C9,C12, 6), 0 }, /* r */
3807 { "pmceid1_el0", CPENC(3,3,C9,C12, 7), 0 }, /* r */
3808 { "pmccntr_el0", CPENC(3,3,C9,C13, 0), 0 },
3809 { "pmxevtyper_el0", CPENC(3,3,C9,C13, 1), 0 },
3810 { "pmxevcntr_el0", CPENC(3,3,C9,C13, 2), 0 },
3811 { "pmuserenr_el0", CPENC(3,3,C9,C14, 0), 0 },
3812 { "pmintenset_el1", CPENC(3,0,C9,C14, 1), 0 },
3813 { "pmintenclr_el1", CPENC(3,0,C9,C14, 2), 0 },
3814 { "pmovsset_el0", CPENC(3,3,C9,C14, 3), 0 },
3815 { "pmevcntr0_el0", CPENC(3,3,C14,C8, 0), 0 },
3816 { "pmevcntr1_el0", CPENC(3,3,C14,C8, 1), 0 },
3817 { "pmevcntr2_el0", CPENC(3,3,C14,C8, 2), 0 },
3818 { "pmevcntr3_el0", CPENC(3,3,C14,C8, 3), 0 },
3819 { "pmevcntr4_el0", CPENC(3,3,C14,C8, 4), 0 },
3820 { "pmevcntr5_el0", CPENC(3,3,C14,C8, 5), 0 },
3821 { "pmevcntr6_el0", CPENC(3,3,C14,C8, 6), 0 },
3822 { "pmevcntr7_el0", CPENC(3,3,C14,C8, 7), 0 },
3823 { "pmevcntr8_el0", CPENC(3,3,C14,C9, 0), 0 },
3824 { "pmevcntr9_el0", CPENC(3,3,C14,C9, 1), 0 },
3825 { "pmevcntr10_el0", CPENC(3,3,C14,C9, 2), 0 },
3826 { "pmevcntr11_el0", CPENC(3,3,C14,C9, 3), 0 },
3827 { "pmevcntr12_el0", CPENC(3,3,C14,C9, 4), 0 },
3828 { "pmevcntr13_el0", CPENC(3,3,C14,C9, 5), 0 },
3829 { "pmevcntr14_el0", CPENC(3,3,C14,C9, 6), 0 },
3830 { "pmevcntr15_el0", CPENC(3,3,C14,C9, 7), 0 },
3831 { "pmevcntr16_el0", CPENC(3,3,C14,C10,0), 0 },
3832 { "pmevcntr17_el0", CPENC(3,3,C14,C10,1), 0 },
3833 { "pmevcntr18_el0", CPENC(3,3,C14,C10,2), 0 },
3834 { "pmevcntr19_el0", CPENC(3,3,C14,C10,3), 0 },
3835 { "pmevcntr20_el0", CPENC(3,3,C14,C10,4), 0 },
3836 { "pmevcntr21_el0", CPENC(3,3,C14,C10,5), 0 },
3837 { "pmevcntr22_el0", CPENC(3,3,C14,C10,6), 0 },
3838 { "pmevcntr23_el0", CPENC(3,3,C14,C10,7), 0 },
3839 { "pmevcntr24_el0", CPENC(3,3,C14,C11,0), 0 },
3840 { "pmevcntr25_el0", CPENC(3,3,C14,C11,1), 0 },
3841 { "pmevcntr26_el0", CPENC(3,3,C14,C11,2), 0 },
3842 { "pmevcntr27_el0", CPENC(3,3,C14,C11,3), 0 },
3843 { "pmevcntr28_el0", CPENC(3,3,C14,C11,4), 0 },
3844 { "pmevcntr29_el0", CPENC(3,3,C14,C11,5), 0 },
3845 { "pmevcntr30_el0", CPENC(3,3,C14,C11,6), 0 },
3846 { "pmevtyper0_el0", CPENC(3,3,C14,C12,0), 0 },
3847 { "pmevtyper1_el0", CPENC(3,3,C14,C12,1), 0 },
3848 { "pmevtyper2_el0", CPENC(3,3,C14,C12,2), 0 },
3849 { "pmevtyper3_el0", CPENC(3,3,C14,C12,3), 0 },
3850 { "pmevtyper4_el0", CPENC(3,3,C14,C12,4), 0 },
3851 { "pmevtyper5_el0", CPENC(3,3,C14,C12,5), 0 },
3852 { "pmevtyper6_el0", CPENC(3,3,C14,C12,6), 0 },
3853 { "pmevtyper7_el0", CPENC(3,3,C14,C12,7), 0 },
3854 { "pmevtyper8_el0", CPENC(3,3,C14,C13,0), 0 },
3855 { "pmevtyper9_el0", CPENC(3,3,C14,C13,1), 0 },
3856 { "pmevtyper10_el0", CPENC(3,3,C14,C13,2), 0 },
3857 { "pmevtyper11_el0", CPENC(3,3,C14,C13,3), 0 },
3858 { "pmevtyper12_el0", CPENC(3,3,C14,C13,4), 0 },
3859 { "pmevtyper13_el0", CPENC(3,3,C14,C13,5), 0 },
3860 { "pmevtyper14_el0", CPENC(3,3,C14,C13,6), 0 },
3861 { "pmevtyper15_el0", CPENC(3,3,C14,C13,7), 0 },
3862 { "pmevtyper16_el0", CPENC(3,3,C14,C14,0), 0 },
3863 { "pmevtyper17_el0", CPENC(3,3,C14,C14,1), 0 },
3864 { "pmevtyper18_el0", CPENC(3,3,C14,C14,2), 0 },
3865 { "pmevtyper19_el0", CPENC(3,3,C14,C14,3), 0 },
3866 { "pmevtyper20_el0", CPENC(3,3,C14,C14,4), 0 },
3867 { "pmevtyper21_el0", CPENC(3,3,C14,C14,5), 0 },
3868 { "pmevtyper22_el0", CPENC(3,3,C14,C14,6), 0 },
3869 { "pmevtyper23_el0", CPENC(3,3,C14,C14,7), 0 },
3870 { "pmevtyper24_el0", CPENC(3,3,C14,C15,0), 0 },
3871 { "pmevtyper25_el0", CPENC(3,3,C14,C15,1), 0 },
3872 { "pmevtyper26_el0", CPENC(3,3,C14,C15,2), 0 },
3873 { "pmevtyper27_el0", CPENC(3,3,C14,C15,3), 0 },
3874 { "pmevtyper28_el0", CPENC(3,3,C14,C15,4), 0 },
3875 { "pmevtyper29_el0", CPENC(3,3,C14,C15,5), 0 },
3876 { "pmevtyper30_el0", CPENC(3,3,C14,C15,6), 0 },
3877 { "pmccfiltr_el0", CPENC(3,3,C14,C15,7), 0 },
3878 { 0, CPENC(0,0,0,0,0), 0 },
3882 aarch64_sys_reg_deprecated_p (const aarch64_sys_reg *reg)
3884 return (reg->flags & F_DEPRECATED) != 0;
3888 aarch64_sys_reg_supported_p (const aarch64_feature_set features,
3889 const aarch64_sys_reg *reg)
3891 if (!(reg->flags & F_ARCHEXT))
3894 /* PAN. Values are from aarch64_sys_regs. */
3895 if (reg->value == CPEN_(0,C2,3)
3896 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_PAN))
3899 /* Virtualization host extensions: system registers. */
3900 if ((reg->value == CPENC (3, 4, C2, C0, 1)
3901 || reg->value == CPENC (3, 4, C13, C0, 1)
3902 || reg->value == CPENC (3, 4, C14, C3, 0)
3903 || reg->value == CPENC (3, 4, C14, C3, 1)
3904 || reg->value == CPENC (3, 4, C14, C3, 2))
3905 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_1))
3908 /* Virtualization host extensions: *_el12 names of *_el1 registers. */
3909 if ((reg->value == CPEN_ (5, C0, 0)
3910 || reg->value == CPEN_ (5, C0, 1)
3911 || reg->value == CPENC (3, 5, C1, C0, 0)
3912 || reg->value == CPENC (3, 5, C1, C0, 2)
3913 || reg->value == CPENC (3, 5, C2, C0, 0)
3914 || reg->value == CPENC (3, 5, C2, C0, 1)
3915 || reg->value == CPENC (3, 5, C2, C0, 2)
3916 || reg->value == CPENC (3, 5, C5, C1, 0)
3917 || reg->value == CPENC (3, 5, C5, C1, 1)
3918 || reg->value == CPENC (3, 5, C5, C2, 0)
3919 || reg->value == CPENC (3, 5, C6, C0, 0)
3920 || reg->value == CPENC (3, 5, C10, C2, 0)
3921 || reg->value == CPENC (3, 5, C10, C3, 0)
3922 || reg->value == CPENC (3, 5, C12, C0, 0)
3923 || reg->value == CPENC (3, 5, C13, C0, 1)
3924 || reg->value == CPENC (3, 5, C14, C1, 0))
3925 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_1))
3928 /* Virtualization host extensions: *_el02 names of *_el0 registers. */
3929 if ((reg->value == CPENC (3, 5, C14, C2, 0)
3930 || reg->value == CPENC (3, 5, C14, C2, 1)
3931 || reg->value == CPENC (3, 5, C14, C2, 2)
3932 || reg->value == CPENC (3, 5, C14, C3, 0)
3933 || reg->value == CPENC (3, 5, C14, C3, 1)
3934 || reg->value == CPENC (3, 5, C14, C3, 2))
3935 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_1))
3938 /* ARMv8.2 features. */
3940 /* ID_AA64MMFR2_EL1. */
3941 if (reg->value == CPENC (3, 0, C0, C7, 2)
3942 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_2))
3946 if (reg->value == CPEN_ (0, C2, 4)
3947 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_2))
3950 /* RAS extension. */
3952 /* ERRIDR_EL1, ERRSELR_EL1, ERXFR_EL1, ERXCTLR_EL1, ERXSTATUS_EL, ERXADDR_EL1,
3953 ERXMISC0_EL1 AND ERXMISC1_EL1. */
3954 if ((reg->value == CPENC (3, 0, C5, C3, 0)
3955 || reg->value == CPENC (3, 0, C5, C3, 1)
3956 || reg->value == CPENC (3, 0, C5, C3, 2)
3957 || reg->value == CPENC (3, 0, C5, C3, 3)
3958 || reg->value == CPENC (3, 0, C5, C4, 0)
3959 || reg->value == CPENC (3, 0, C5, C4, 1)
3960 || reg->value == CPENC (3, 0, C5, C4, 2)
3961 || reg->value == CPENC (3, 0, C5, C4, 3)
3962 || reg->value == CPENC (3, 0, C5, C5, 0)
3963 || reg->value == CPENC (3, 0, C5, C5, 1))
3964 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_RAS))
3967 /* VSESR_EL2, DISR_EL1 and VDISR_EL2. */
3968 if ((reg->value == CPENC (3, 4, C5, C2, 3)
3969 || reg->value == CPENC (3, 0, C12, C1, 1)
3970 || reg->value == CPENC (3, 4, C12, C1, 1))
3971 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_RAS))
3974 /* Statistical Profiling extension. */
3975 if ((reg->value == CPENC (3, 0, C9, C10, 0)
3976 || reg->value == CPENC (3, 0, C9, C10, 1)
3977 || reg->value == CPENC (3, 0, C9, C10, 3)
3978 || reg->value == CPENC (3, 0, C9, C10, 7)
3979 || reg->value == CPENC (3, 0, C9, C9, 0)
3980 || reg->value == CPENC (3, 0, C9, C9, 2)
3981 || reg->value == CPENC (3, 0, C9, C9, 3)
3982 || reg->value == CPENC (3, 0, C9, C9, 4)
3983 || reg->value == CPENC (3, 0, C9, C9, 5)
3984 || reg->value == CPENC (3, 0, C9, C9, 6)
3985 || reg->value == CPENC (3, 0, C9, C9, 7)
3986 || reg->value == CPENC (3, 4, C9, C9, 0)
3987 || reg->value == CPENC (3, 5, C9, C9, 0))
3988 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_PROFILE))
3994 const aarch64_sys_reg aarch64_pstatefields [] =
3996 { "spsel", 0x05, 0 },
3997 { "daifset", 0x1e, 0 },
3998 { "daifclr", 0x1f, 0 },
3999 { "pan", 0x04, F_ARCHEXT },
4000 { "uao", 0x03, F_ARCHEXT },
4001 { 0, CPENC(0,0,0,0,0), 0 },
4005 aarch64_pstatefield_supported_p (const aarch64_feature_set features,
4006 const aarch64_sys_reg *reg)
4008 if (!(reg->flags & F_ARCHEXT))
4011 /* PAN. Values are from aarch64_pstatefields. */
4012 if (reg->value == 0x04
4013 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_PAN))
4016 /* UAO. Values are from aarch64_pstatefields. */
4017 if (reg->value == 0x03
4018 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_2))
4024 const aarch64_sys_ins_reg aarch64_sys_regs_ic[] =
4026 { "ialluis", CPENS(0,C7,C1,0), 0 },
4027 { "iallu", CPENS(0,C7,C5,0), 0 },
4028 { "ivau", CPENS (3, C7, C5, 1), F_HASXT },
4029 { 0, CPENS(0,0,0,0), 0 }
4032 const aarch64_sys_ins_reg aarch64_sys_regs_dc[] =
4034 { "zva", CPENS (3, C7, C4, 1), F_HASXT },
4035 { "ivac", CPENS (0, C7, C6, 1), F_HASXT },
4036 { "isw", CPENS (0, C7, C6, 2), F_HASXT },
4037 { "cvac", CPENS (3, C7, C10, 1), F_HASXT },
4038 { "csw", CPENS (0, C7, C10, 2), F_HASXT },
4039 { "cvau", CPENS (3, C7, C11, 1), F_HASXT },
4040 { "cvap", CPENS (3, C7, C12, 1), F_HASXT | F_ARCHEXT },
4041 { "civac", CPENS (3, C7, C14, 1), F_HASXT },
4042 { "cisw", CPENS (0, C7, C14, 2), F_HASXT },
4043 { 0, CPENS(0,0,0,0), 0 }
4046 const aarch64_sys_ins_reg aarch64_sys_regs_at[] =
4048 { "s1e1r", CPENS (0, C7, C8, 0), F_HASXT },
4049 { "s1e1w", CPENS (0, C7, C8, 1), F_HASXT },
4050 { "s1e0r", CPENS (0, C7, C8, 2), F_HASXT },
4051 { "s1e0w", CPENS (0, C7, C8, 3), F_HASXT },
4052 { "s12e1r", CPENS (4, C7, C8, 4), F_HASXT },
4053 { "s12e1w", CPENS (4, C7, C8, 5), F_HASXT },
4054 { "s12e0r", CPENS (4, C7, C8, 6), F_HASXT },
4055 { "s12e0w", CPENS (4, C7, C8, 7), F_HASXT },
4056 { "s1e2r", CPENS (4, C7, C8, 0), F_HASXT },
4057 { "s1e2w", CPENS (4, C7, C8, 1), F_HASXT },
4058 { "s1e3r", CPENS (6, C7, C8, 0), F_HASXT },
4059 { "s1e3w", CPENS (6, C7, C8, 1), F_HASXT },
4060 { "s1e1rp", CPENS (0, C7, C9, 0), F_HASXT | F_ARCHEXT },
4061 { "s1e1wp", CPENS (0, C7, C9, 1), F_HASXT | F_ARCHEXT },
4062 { 0, CPENS(0,0,0,0), 0 }
4065 const aarch64_sys_ins_reg aarch64_sys_regs_tlbi[] =
4067 { "vmalle1", CPENS(0,C8,C7,0), 0 },
4068 { "vae1", CPENS (0, C8, C7, 1), F_HASXT },
4069 { "aside1", CPENS (0, C8, C7, 2), F_HASXT },
4070 { "vaae1", CPENS (0, C8, C7, 3), F_HASXT },
4071 { "vmalle1is", CPENS(0,C8,C3,0), 0 },
4072 { "vae1is", CPENS (0, C8, C3, 1), F_HASXT },
4073 { "aside1is", CPENS (0, C8, C3, 2), F_HASXT },
4074 { "vaae1is", CPENS (0, C8, C3, 3), F_HASXT },
4075 { "ipas2e1is", CPENS (4, C8, C0, 1), F_HASXT },
4076 { "ipas2le1is",CPENS (4, C8, C0, 5), F_HASXT },
4077 { "ipas2e1", CPENS (4, C8, C4, 1), F_HASXT },
4078 { "ipas2le1", CPENS (4, C8, C4, 5), F_HASXT },
4079 { "vae2", CPENS (4, C8, C7, 1), F_HASXT },
4080 { "vae2is", CPENS (4, C8, C3, 1), F_HASXT },
4081 { "vmalls12e1",CPENS(4,C8,C7,6), 0 },
4082 { "vmalls12e1is",CPENS(4,C8,C3,6), 0 },
4083 { "vae3", CPENS (6, C8, C7, 1), F_HASXT },
4084 { "vae3is", CPENS (6, C8, C3, 1), F_HASXT },
4085 { "alle2", CPENS(4,C8,C7,0), 0 },
4086 { "alle2is", CPENS(4,C8,C3,0), 0 },
4087 { "alle1", CPENS(4,C8,C7,4), 0 },
4088 { "alle1is", CPENS(4,C8,C3,4), 0 },
4089 { "alle3", CPENS(6,C8,C7,0), 0 },
4090 { "alle3is", CPENS(6,C8,C3,0), 0 },
4091 { "vale1is", CPENS (0, C8, C3, 5), F_HASXT },
4092 { "vale2is", CPENS (4, C8, C3, 5), F_HASXT },
4093 { "vale3is", CPENS (6, C8, C3, 5), F_HASXT },
4094 { "vaale1is", CPENS (0, C8, C3, 7), F_HASXT },
4095 { "vale1", CPENS (0, C8, C7, 5), F_HASXT },
4096 { "vale2", CPENS (4, C8, C7, 5), F_HASXT },
4097 { "vale3", CPENS (6, C8, C7, 5), F_HASXT },
4098 { "vaale1", CPENS (0, C8, C7, 7), F_HASXT },
4099 { 0, CPENS(0,0,0,0), 0 }
4103 aarch64_sys_ins_reg_has_xt (const aarch64_sys_ins_reg *sys_ins_reg)
4105 return (sys_ins_reg->flags & F_HASXT) != 0;
4109 aarch64_sys_ins_reg_supported_p (const aarch64_feature_set features,
4110 const aarch64_sys_ins_reg *reg)
4112 if (!(reg->flags & F_ARCHEXT))
4115 /* DC CVAP. Values are from aarch64_sys_regs_dc. */
4116 if (reg->value == CPENS (3, C7, C12, 1)
4117 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_2))
4120 /* AT S1E1RP, AT S1E1WP. Values are from aarch64_sys_regs_at. */
4121 if ((reg->value == CPENS (0, C7, C9, 0)
4122 || reg->value == CPENS (0, C7, C9, 1))
4123 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_2))
4146 #define BIT(INSN,BT) (((INSN) >> (BT)) & 1)
4147 #define BITS(INSN,HI,LO) (((INSN) >> (LO)) & ((1 << (((HI) - (LO)) + 1)) - 1))
4150 verify_ldpsw (const struct aarch64_opcode * opcode ATTRIBUTE_UNUSED,
4151 const aarch64_insn insn)
4153 int t = BITS (insn, 4, 0);
4154 int n = BITS (insn, 9, 5);
4155 int t2 = BITS (insn, 14, 10);
4159 /* Write back enabled. */
4160 if ((t == n || t2 == n) && n != 31)
4174 /* Return true if VALUE cannot be moved into an SVE register using DUP
4175 (with any element size, not just ESIZE) and if using DUPM would
4176 therefore be OK. ESIZE is the number of bytes in the immediate. */
4179 aarch64_sve_dupm_mov_immediate_p (uint64_t uvalue, int esize)
4181 int64_t svalue = uvalue;
4182 uint64_t upper = (uint64_t) -1 << (esize * 4) << (esize * 4);
4184 if ((uvalue & ~upper) != uvalue && (uvalue | upper) != uvalue)
4186 if (esize <= 4 || (uint32_t) uvalue == (uint32_t) (uvalue >> 32))
4188 svalue = (int32_t) uvalue;
4189 if (esize <= 2 || (uint16_t) uvalue == (uint16_t) (uvalue >> 16))
4191 svalue = (int16_t) uvalue;
4192 if (esize == 1 || (uint8_t) uvalue == (uint8_t) (uvalue >> 8))
4196 if ((svalue & 0xff) == 0)
4198 return svalue < -128 || svalue >= 128;
4201 /* Include the opcode description table as well as the operand description
4203 #define VERIFIER(x) verify_##x
4204 #include "aarch64-tbl.h"