1 /* aarch64-opc.c -- AArch64 opcode support.
2 Copyright (C) 2009-2016 Free Software Foundation, Inc.
3 Contributed by ARM Ltd.
5 This file is part of the GNU opcodes library.
7 This library is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
12 It is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program; see the file COPYING3. If not,
19 see <http://www.gnu.org/licenses/>. */
30 #include "libiberty.h"
32 #include "aarch64-opc.h"
35 int debug_dump = FALSE;
36 #endif /* DEBUG_AARCH64 */
38 /* The enumeration strings associated with each value of a 5-bit SVE
39 pattern operand. A null entry indicates a reserved meaning. */
40 const char *const aarch64_sve_pattern_array[32] = {
79 /* The enumeration strings associated with each value of a 4-bit SVE
80 prefetch operand. A null entry indicates a reserved meaning. */
81 const char *const aarch64_sve_prfop_array[16] = {
102 /* Helper functions to determine which operand to be used to encode/decode
103 the size:Q fields for AdvSIMD instructions. */
105 static inline bfd_boolean
106 vector_qualifier_p (enum aarch64_opnd_qualifier qualifier)
108 return ((qualifier >= AARCH64_OPND_QLF_V_8B
109 && qualifier <= AARCH64_OPND_QLF_V_1Q) ? TRUE
113 static inline bfd_boolean
114 fp_qualifier_p (enum aarch64_opnd_qualifier qualifier)
116 return ((qualifier >= AARCH64_OPND_QLF_S_B
117 && qualifier <= AARCH64_OPND_QLF_S_Q) ? TRUE
127 DP_VECTOR_ACROSS_LANES,
130 static const char significant_operand_index [] =
132 0, /* DP_UNKNOWN, by default using operand 0. */
133 0, /* DP_VECTOR_3SAME */
134 1, /* DP_VECTOR_LONG */
135 2, /* DP_VECTOR_WIDE */
136 1, /* DP_VECTOR_ACROSS_LANES */
139 /* Given a sequence of qualifiers in QUALIFIERS, determine and return
141 N.B. QUALIFIERS is a possible sequence of qualifiers each of which
142 corresponds to one of a sequence of operands. */
144 static enum data_pattern
145 get_data_pattern (const aarch64_opnd_qualifier_seq_t qualifiers)
147 if (vector_qualifier_p (qualifiers[0]) == TRUE)
149 /* e.g. v.4s, v.4s, v.4s
150 or v.4h, v.4h, v.h[3]. */
151 if (qualifiers[0] == qualifiers[1]
152 && vector_qualifier_p (qualifiers[2]) == TRUE
153 && (aarch64_get_qualifier_esize (qualifiers[0])
154 == aarch64_get_qualifier_esize (qualifiers[1]))
155 && (aarch64_get_qualifier_esize (qualifiers[0])
156 == aarch64_get_qualifier_esize (qualifiers[2])))
157 return DP_VECTOR_3SAME;
158 /* e.g. v.8h, v.8b, v.8b.
159 or v.4s, v.4h, v.h[2].
161 if (vector_qualifier_p (qualifiers[1]) == TRUE
162 && aarch64_get_qualifier_esize (qualifiers[0]) != 0
163 && (aarch64_get_qualifier_esize (qualifiers[0])
164 == aarch64_get_qualifier_esize (qualifiers[1]) << 1))
165 return DP_VECTOR_LONG;
166 /* e.g. v.8h, v.8h, v.8b. */
167 if (qualifiers[0] == qualifiers[1]
168 && vector_qualifier_p (qualifiers[2]) == TRUE
169 && aarch64_get_qualifier_esize (qualifiers[0]) != 0
170 && (aarch64_get_qualifier_esize (qualifiers[0])
171 == aarch64_get_qualifier_esize (qualifiers[2]) << 1)
172 && (aarch64_get_qualifier_esize (qualifiers[0])
173 == aarch64_get_qualifier_esize (qualifiers[1])))
174 return DP_VECTOR_WIDE;
176 else if (fp_qualifier_p (qualifiers[0]) == TRUE)
178 /* e.g. SADDLV <V><d>, <Vn>.<T>. */
179 if (vector_qualifier_p (qualifiers[1]) == TRUE
180 && qualifiers[2] == AARCH64_OPND_QLF_NIL)
181 return DP_VECTOR_ACROSS_LANES;
187 /* Select the operand to do the encoding/decoding of the 'size:Q' fields in
188 the AdvSIMD instructions. */
189 /* N.B. it is possible to do some optimization that doesn't call
190 get_data_pattern each time when we need to select an operand. We can
191 either buffer the caculated the result or statically generate the data,
192 however, it is not obvious that the optimization will bring significant
196 aarch64_select_operand_for_sizeq_field_coding (const aarch64_opcode *opcode)
199 significant_operand_index [get_data_pattern (opcode->qualifiers_list[0])];
202 const aarch64_field fields[] =
205 { 0, 4 }, /* cond2: condition in truly conditional-executed inst. */
206 { 0, 4 }, /* nzcv: flag bit specifier, encoded in the "nzcv" field. */
207 { 5, 5 }, /* defgh: d:e:f:g:h bits in AdvSIMD modified immediate. */
208 { 16, 3 }, /* abc: a:b:c bits in AdvSIMD modified immediate. */
209 { 5, 19 }, /* imm19: e.g. in CBZ. */
210 { 5, 19 }, /* immhi: e.g. in ADRP. */
211 { 29, 2 }, /* immlo: e.g. in ADRP. */
212 { 22, 2 }, /* size: in most AdvSIMD and floating-point instructions. */
213 { 10, 2 }, /* vldst_size: size field in the AdvSIMD load/store inst. */
214 { 29, 1 }, /* op: in AdvSIMD modified immediate instructions. */
215 { 30, 1 }, /* Q: in most AdvSIMD instructions. */
216 { 0, 5 }, /* Rt: in load/store instructions. */
217 { 0, 5 }, /* Rd: in many integer instructions. */
218 { 5, 5 }, /* Rn: in many integer instructions. */
219 { 10, 5 }, /* Rt2: in load/store pair instructions. */
220 { 10, 5 }, /* Ra: in fp instructions. */
221 { 5, 3 }, /* op2: in the system instructions. */
222 { 8, 4 }, /* CRm: in the system instructions. */
223 { 12, 4 }, /* CRn: in the system instructions. */
224 { 16, 3 }, /* op1: in the system instructions. */
225 { 19, 2 }, /* op0: in the system instructions. */
226 { 10, 3 }, /* imm3: in add/sub extended reg instructions. */
227 { 12, 4 }, /* cond: condition flags as a source operand. */
228 { 12, 4 }, /* opcode: in advsimd load/store instructions. */
229 { 12, 4 }, /* cmode: in advsimd modified immediate instructions. */
230 { 13, 3 }, /* asisdlso_opcode: opcode in advsimd ld/st single element. */
231 { 13, 2 }, /* len: in advsimd tbl/tbx instructions. */
232 { 16, 5 }, /* Rm: in ld/st reg offset and some integer inst. */
233 { 16, 5 }, /* Rs: in load/store exclusive instructions. */
234 { 13, 3 }, /* option: in ld/st reg offset + add/sub extended reg inst. */
235 { 12, 1 }, /* S: in load/store reg offset instructions. */
236 { 21, 2 }, /* hw: in move wide constant instructions. */
237 { 22, 2 }, /* opc: in load/store reg offset instructions. */
238 { 23, 1 }, /* opc1: in load/store reg offset instructions. */
239 { 22, 2 }, /* shift: in add/sub reg/imm shifted instructions. */
240 { 22, 2 }, /* type: floating point type field in fp data inst. */
241 { 30, 2 }, /* ldst_size: size field in ld/st reg offset inst. */
242 { 10, 6 }, /* imm6: in add/sub reg shifted instructions. */
243 { 11, 4 }, /* imm4: in advsimd ext and advsimd ins instructions. */
244 { 16, 5 }, /* imm5: in conditional compare (immediate) instructions. */
245 { 15, 7 }, /* imm7: in load/store pair pre/post index instructions. */
246 { 13, 8 }, /* imm8: in floating-point scalar move immediate inst. */
247 { 12, 9 }, /* imm9: in load/store pre/post index instructions. */
248 { 10, 12 }, /* imm12: in ld/st unsigned imm or add/sub shifted inst. */
249 { 5, 14 }, /* imm14: in test bit and branch instructions. */
250 { 5, 16 }, /* imm16: in exception instructions. */
251 { 0, 26 }, /* imm26: in unconditional branch instructions. */
252 { 10, 6 }, /* imms: in bitfield and logical immediate instructions. */
253 { 16, 6 }, /* immr: in bitfield and logical immediate instructions. */
254 { 16, 3 }, /* immb: in advsimd shift by immediate instructions. */
255 { 19, 4 }, /* immh: in advsimd shift by immediate instructions. */
256 { 22, 1 }, /* N: in logical (immediate) instructions. */
257 { 11, 1 }, /* index: in ld/st inst deciding the pre/post-index. */
258 { 24, 1 }, /* index2: in ld/st pair inst deciding the pre/post-index. */
259 { 31, 1 }, /* sf: in integer data processing instructions. */
260 { 30, 1 }, /* lse_size: in LSE extension atomic instructions. */
261 { 11, 1 }, /* H: in advsimd scalar x indexed element instructions. */
262 { 21, 1 }, /* L: in advsimd scalar x indexed element instructions. */
263 { 20, 1 }, /* M: in advsimd scalar x indexed element instructions. */
264 { 31, 1 }, /* b5: in the test bit and branch instructions. */
265 { 19, 5 }, /* b40: in the test bit and branch instructions. */
266 { 10, 6 }, /* scale: in the fixed-point scalar to fp converting inst. */
267 { 0, 4 }, /* SVE_Pd: p0-p15, bits [3,0]. */
268 { 10, 3 }, /* SVE_Pg3: p0-p7, bits [12,10]. */
269 { 5, 4 }, /* SVE_Pg4_5: p0-p15, bits [8,5]. */
270 { 10, 4 }, /* SVE_Pg4_10: p0-p15, bits [13,10]. */
271 { 16, 4 }, /* SVE_Pg4_16: p0-p15, bits [19,16]. */
272 { 16, 4 }, /* SVE_Pm: p0-p15, bits [19,16]. */
273 { 5, 4 }, /* SVE_Pn: p0-p15, bits [8,5]. */
274 { 0, 4 }, /* SVE_Pt: p0-p15, bits [3,0]. */
275 { 5, 5 }, /* SVE_Za_5: SVE vector register, bits [9,5]. */
276 { 16, 5 }, /* SVE_Za_16: SVE vector register, bits [20,16]. */
277 { 0, 5 }, /* SVE_Zd: SVE vector register. bits [4,0]. */
278 { 5, 5 }, /* SVE_Zm_5: SVE vector register, bits [9,5]. */
279 { 16, 5 }, /* SVE_Zm_16: SVE vector register, bits [20,16]. */
280 { 5, 5 }, /* SVE_Zn: SVE vector register, bits [9,5]. */
281 { 0, 5 }, /* SVE_Zt: SVE vector register, bits [4,0]. */
282 { 16, 4 }, /* SVE_imm4: 4-bit immediate field. */
283 { 16, 6 }, /* SVE_imm6: 6-bit immediate field. */
284 { 10, 2 }, /* SVE_msz: 2-bit shift amount for ADR. */
285 { 5, 5 }, /* SVE_pattern: vector pattern enumeration. */
286 { 0, 4 }, /* SVE_prfop: prefetch operation for SVE PRF[BHWD]. */
287 { 22, 2 }, /* SVE_tszh: triangular size select high, bits [23,22]. */
288 { 14, 1 }, /* SVE_xs_14: UXTW/SXTW select (bit 14). */
289 { 22, 1 } /* SVE_xs_22: UXTW/SXTW select (bit 22). */
292 enum aarch64_operand_class
293 aarch64_get_operand_class (enum aarch64_opnd type)
295 return aarch64_operands[type].op_class;
299 aarch64_get_operand_name (enum aarch64_opnd type)
301 return aarch64_operands[type].name;
304 /* Get operand description string.
305 This is usually for the diagnosis purpose. */
307 aarch64_get_operand_desc (enum aarch64_opnd type)
309 return aarch64_operands[type].desc;
312 /* Table of all conditional affixes. */
313 const aarch64_cond aarch64_conds[16] =
318 {{"cc", "lo", "ul"}, 0x3},
334 get_cond_from_value (aarch64_insn value)
337 return &aarch64_conds[(unsigned int) value];
341 get_inverted_cond (const aarch64_cond *cond)
343 return &aarch64_conds[cond->value ^ 0x1];
346 /* Table describing the operand extension/shifting operators; indexed by
347 enum aarch64_modifier_kind.
349 The value column provides the most common values for encoding modifiers,
350 which enables table-driven encoding/decoding for the modifiers. */
351 const struct aarch64_name_value_pair aarch64_operand_modifiers [] =
371 enum aarch64_modifier_kind
372 aarch64_get_operand_modifier (const struct aarch64_name_value_pair *desc)
374 return desc - aarch64_operand_modifiers;
378 aarch64_get_operand_modifier_value (enum aarch64_modifier_kind kind)
380 return aarch64_operand_modifiers[kind].value;
383 enum aarch64_modifier_kind
384 aarch64_get_operand_modifier_from_value (aarch64_insn value,
385 bfd_boolean extend_p)
387 if (extend_p == TRUE)
388 return AARCH64_MOD_UXTB + value;
390 return AARCH64_MOD_LSL - value;
394 aarch64_extend_operator_p (enum aarch64_modifier_kind kind)
396 return (kind > AARCH64_MOD_LSL && kind <= AARCH64_MOD_SXTX)
400 static inline bfd_boolean
401 aarch64_shift_operator_p (enum aarch64_modifier_kind kind)
403 return (kind >= AARCH64_MOD_ROR && kind <= AARCH64_MOD_LSL)
407 const struct aarch64_name_value_pair aarch64_barrier_options[16] =
427 /* Table describing the operands supported by the aliases of the HINT
430 The name column is the operand that is accepted for the alias. The value
431 column is the hint number of the alias. The list of operands is terminated
432 by NULL in the name column. */
434 const struct aarch64_name_value_pair aarch64_hint_options[] =
436 { "csync", 0x11 }, /* PSB CSYNC. */
440 /* op -> op: load = 0 instruction = 1 store = 2
442 t -> temporal: temporal (retained) = 0 non-temporal (streaming) = 1 */
443 #define B(op,l,t) (((op) << 3) | (((l) - 1) << 1) | (t))
444 const struct aarch64_name_value_pair aarch64_prfops[32] =
446 { "pldl1keep", B(0, 1, 0) },
447 { "pldl1strm", B(0, 1, 1) },
448 { "pldl2keep", B(0, 2, 0) },
449 { "pldl2strm", B(0, 2, 1) },
450 { "pldl3keep", B(0, 3, 0) },
451 { "pldl3strm", B(0, 3, 1) },
454 { "plil1keep", B(1, 1, 0) },
455 { "plil1strm", B(1, 1, 1) },
456 { "plil2keep", B(1, 2, 0) },
457 { "plil2strm", B(1, 2, 1) },
458 { "plil3keep", B(1, 3, 0) },
459 { "plil3strm", B(1, 3, 1) },
462 { "pstl1keep", B(2, 1, 0) },
463 { "pstl1strm", B(2, 1, 1) },
464 { "pstl2keep", B(2, 2, 0) },
465 { "pstl2strm", B(2, 2, 1) },
466 { "pstl3keep", B(2, 3, 0) },
467 { "pstl3strm", B(2, 3, 1) },
481 /* Utilities on value constraint. */
484 value_in_range_p (int64_t value, int low, int high)
486 return (value >= low && value <= high) ? 1 : 0;
490 value_aligned_p (int64_t value, int align)
492 return ((value & (align - 1)) == 0) ? 1 : 0;
495 /* A signed value fits in a field. */
497 value_fit_signed_field_p (int64_t value, unsigned width)
500 if (width < sizeof (value) * 8)
502 int64_t lim = (int64_t)1 << (width - 1);
503 if (value >= -lim && value < lim)
509 /* An unsigned value fits in a field. */
511 value_fit_unsigned_field_p (int64_t value, unsigned width)
514 if (width < sizeof (value) * 8)
516 int64_t lim = (int64_t)1 << width;
517 if (value >= 0 && value < lim)
523 /* Return 1 if OPERAND is SP or WSP. */
525 aarch64_stack_pointer_p (const aarch64_opnd_info *operand)
527 return ((aarch64_get_operand_class (operand->type)
528 == AARCH64_OPND_CLASS_INT_REG)
529 && operand_maybe_stack_pointer (aarch64_operands + operand->type)
530 && operand->reg.regno == 31);
533 /* Return 1 if OPERAND is XZR or WZP. */
535 aarch64_zero_register_p (const aarch64_opnd_info *operand)
537 return ((aarch64_get_operand_class (operand->type)
538 == AARCH64_OPND_CLASS_INT_REG)
539 && !operand_maybe_stack_pointer (aarch64_operands + operand->type)
540 && operand->reg.regno == 31);
543 /* Return true if the operand *OPERAND that has the operand code
544 OPERAND->TYPE and been qualified by OPERAND->QUALIFIER can be also
545 qualified by the qualifier TARGET. */
548 operand_also_qualified_p (const struct aarch64_opnd_info *operand,
549 aarch64_opnd_qualifier_t target)
551 switch (operand->qualifier)
553 case AARCH64_OPND_QLF_W:
554 if (target == AARCH64_OPND_QLF_WSP && aarch64_stack_pointer_p (operand))
557 case AARCH64_OPND_QLF_X:
558 if (target == AARCH64_OPND_QLF_SP && aarch64_stack_pointer_p (operand))
561 case AARCH64_OPND_QLF_WSP:
562 if (target == AARCH64_OPND_QLF_W
563 && operand_maybe_stack_pointer (aarch64_operands + operand->type))
566 case AARCH64_OPND_QLF_SP:
567 if (target == AARCH64_OPND_QLF_X
568 && operand_maybe_stack_pointer (aarch64_operands + operand->type))
578 /* Given qualifier sequence list QSEQ_LIST and the known qualifier KNOWN_QLF
579 for operand KNOWN_IDX, return the expected qualifier for operand IDX.
581 Return NIL if more than one expected qualifiers are found. */
583 aarch64_opnd_qualifier_t
584 aarch64_get_expected_qualifier (const aarch64_opnd_qualifier_seq_t *qseq_list,
586 const aarch64_opnd_qualifier_t known_qlf,
593 When the known qualifier is NIL, we have to assume that there is only
594 one qualifier sequence in the *QSEQ_LIST and return the corresponding
595 qualifier directly. One scenario is that for instruction
596 PRFM <prfop>, [<Xn|SP>, #:lo12:<symbol>]
597 which has only one possible valid qualifier sequence
599 the caller may pass NIL in KNOWN_QLF to obtain S_D so that it can
600 determine the correct relocation type (i.e. LDST64_LO12) for PRFM.
602 Because the qualifier NIL has dual roles in the qualifier sequence:
603 it can mean no qualifier for the operand, or the qualifer sequence is
604 not in use (when all qualifiers in the sequence are NILs), we have to
605 handle this special case here. */
606 if (known_qlf == AARCH64_OPND_NIL)
608 assert (qseq_list[0][known_idx] == AARCH64_OPND_NIL);
609 return qseq_list[0][idx];
612 for (i = 0, saved_i = -1; i < AARCH64_MAX_QLF_SEQ_NUM; ++i)
614 if (qseq_list[i][known_idx] == known_qlf)
617 /* More than one sequences are found to have KNOWN_QLF at
619 return AARCH64_OPND_NIL;
624 return qseq_list[saved_i][idx];
627 enum operand_qualifier_kind
635 /* Operand qualifier description. */
636 struct operand_qualifier_data
638 /* The usage of the three data fields depends on the qualifier kind. */
645 enum operand_qualifier_kind kind;
648 /* Indexed by the operand qualifier enumerators. */
649 struct operand_qualifier_data aarch64_opnd_qualifiers[] =
651 {0, 0, 0, "NIL", OQK_NIL},
653 /* Operand variant qualifiers.
655 element size, number of elements and common value for encoding. */
657 {4, 1, 0x0, "w", OQK_OPD_VARIANT},
658 {8, 1, 0x1, "x", OQK_OPD_VARIANT},
659 {4, 1, 0x0, "wsp", OQK_OPD_VARIANT},
660 {8, 1, 0x1, "sp", OQK_OPD_VARIANT},
662 {1, 1, 0x0, "b", OQK_OPD_VARIANT},
663 {2, 1, 0x1, "h", OQK_OPD_VARIANT},
664 {4, 1, 0x2, "s", OQK_OPD_VARIANT},
665 {8, 1, 0x3, "d", OQK_OPD_VARIANT},
666 {16, 1, 0x4, "q", OQK_OPD_VARIANT},
668 {1, 8, 0x0, "8b", OQK_OPD_VARIANT},
669 {1, 16, 0x1, "16b", OQK_OPD_VARIANT},
670 {2, 2, 0x0, "2h", OQK_OPD_VARIANT},
671 {2, 4, 0x2, "4h", OQK_OPD_VARIANT},
672 {2, 8, 0x3, "8h", OQK_OPD_VARIANT},
673 {4, 2, 0x4, "2s", OQK_OPD_VARIANT},
674 {4, 4, 0x5, "4s", OQK_OPD_VARIANT},
675 {8, 1, 0x6, "1d", OQK_OPD_VARIANT},
676 {8, 2, 0x7, "2d", OQK_OPD_VARIANT},
677 {16, 1, 0x8, "1q", OQK_OPD_VARIANT},
679 {0, 0, 0, "z", OQK_OPD_VARIANT},
680 {0, 0, 0, "m", OQK_OPD_VARIANT},
682 /* Qualifiers constraining the value range.
684 Lower bound, higher bound, unused. */
686 {0, 7, 0, "imm_0_7" , OQK_VALUE_IN_RANGE},
687 {0, 15, 0, "imm_0_15", OQK_VALUE_IN_RANGE},
688 {0, 31, 0, "imm_0_31", OQK_VALUE_IN_RANGE},
689 {0, 63, 0, "imm_0_63", OQK_VALUE_IN_RANGE},
690 {1, 32, 0, "imm_1_32", OQK_VALUE_IN_RANGE},
691 {1, 64, 0, "imm_1_64", OQK_VALUE_IN_RANGE},
693 /* Qualifiers for miscellaneous purpose.
695 unused, unused and unused. */
700 {0, 0, 0, "retrieving", 0},
703 static inline bfd_boolean
704 operand_variant_qualifier_p (aarch64_opnd_qualifier_t qualifier)
706 return (aarch64_opnd_qualifiers[qualifier].kind == OQK_OPD_VARIANT)
710 static inline bfd_boolean
711 qualifier_value_in_range_constraint_p (aarch64_opnd_qualifier_t qualifier)
713 return (aarch64_opnd_qualifiers[qualifier].kind == OQK_VALUE_IN_RANGE)
718 aarch64_get_qualifier_name (aarch64_opnd_qualifier_t qualifier)
720 return aarch64_opnd_qualifiers[qualifier].desc;
723 /* Given an operand qualifier, return the expected data element size
724 of a qualified operand. */
726 aarch64_get_qualifier_esize (aarch64_opnd_qualifier_t qualifier)
728 assert (operand_variant_qualifier_p (qualifier) == TRUE);
729 return aarch64_opnd_qualifiers[qualifier].data0;
733 aarch64_get_qualifier_nelem (aarch64_opnd_qualifier_t qualifier)
735 assert (operand_variant_qualifier_p (qualifier) == TRUE);
736 return aarch64_opnd_qualifiers[qualifier].data1;
740 aarch64_get_qualifier_standard_value (aarch64_opnd_qualifier_t qualifier)
742 assert (operand_variant_qualifier_p (qualifier) == TRUE);
743 return aarch64_opnd_qualifiers[qualifier].data2;
747 get_lower_bound (aarch64_opnd_qualifier_t qualifier)
749 assert (qualifier_value_in_range_constraint_p (qualifier) == TRUE);
750 return aarch64_opnd_qualifiers[qualifier].data0;
754 get_upper_bound (aarch64_opnd_qualifier_t qualifier)
756 assert (qualifier_value_in_range_constraint_p (qualifier) == TRUE);
757 return aarch64_opnd_qualifiers[qualifier].data1;
762 aarch64_verbose (const char *str, ...)
773 dump_qualifier_sequence (const aarch64_opnd_qualifier_t *qualifier)
777 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i, ++qualifier)
778 printf ("%s,", aarch64_get_qualifier_name (*qualifier));
783 dump_match_qualifiers (const struct aarch64_opnd_info *opnd,
784 const aarch64_opnd_qualifier_t *qualifier)
787 aarch64_opnd_qualifier_t curr[AARCH64_MAX_OPND_NUM];
789 aarch64_verbose ("dump_match_qualifiers:");
790 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
791 curr[i] = opnd[i].qualifier;
792 dump_qualifier_sequence (curr);
793 aarch64_verbose ("against");
794 dump_qualifier_sequence (qualifier);
796 #endif /* DEBUG_AARCH64 */
798 /* TODO improve this, we can have an extra field at the runtime to
799 store the number of operands rather than calculating it every time. */
802 aarch64_num_of_operands (const aarch64_opcode *opcode)
805 const enum aarch64_opnd *opnds = opcode->operands;
806 while (opnds[i++] != AARCH64_OPND_NIL)
809 assert (i >= 0 && i <= AARCH64_MAX_OPND_NUM);
813 /* Find the best matched qualifier sequence in *QUALIFIERS_LIST for INST.
814 If succeeds, fill the found sequence in *RET, return 1; otherwise return 0.
816 N.B. on the entry, it is very likely that only some operands in *INST
817 have had their qualifiers been established.
819 If STOP_AT is not -1, the function will only try to match
820 the qualifier sequence for operands before and including the operand
821 of index STOP_AT; and on success *RET will only be filled with the first
822 (STOP_AT+1) qualifiers.
824 A couple examples of the matching algorithm:
832 Apart from serving the main encoding routine, this can also be called
833 during or after the operand decoding. */
836 aarch64_find_best_match (const aarch64_inst *inst,
837 const aarch64_opnd_qualifier_seq_t *qualifiers_list,
838 int stop_at, aarch64_opnd_qualifier_t *ret)
842 const aarch64_opnd_qualifier_t *qualifiers;
844 num_opnds = aarch64_num_of_operands (inst->opcode);
847 DEBUG_TRACE ("SUCCEED: no operand");
851 if (stop_at < 0 || stop_at >= num_opnds)
852 stop_at = num_opnds - 1;
854 /* For each pattern. */
855 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
858 qualifiers = *qualifiers_list;
860 /* Start as positive. */
863 DEBUG_TRACE ("%d", i);
866 dump_match_qualifiers (inst->operands, qualifiers);
869 /* Most opcodes has much fewer patterns in the list.
870 First NIL qualifier indicates the end in the list. */
871 if (empty_qualifier_sequence_p (qualifiers) == TRUE)
873 DEBUG_TRACE_IF (i == 0, "SUCCEED: empty qualifier list");
879 for (j = 0; j < num_opnds && j <= stop_at; ++j, ++qualifiers)
881 if (inst->operands[j].qualifier == AARCH64_OPND_QLF_NIL)
883 /* Either the operand does not have qualifier, or the qualifier
884 for the operand needs to be deduced from the qualifier
886 In the latter case, any constraint checking related with
887 the obtained qualifier should be done later in
888 operand_general_constraint_met_p. */
891 else if (*qualifiers != inst->operands[j].qualifier)
893 /* Unless the target qualifier can also qualify the operand
894 (which has already had a non-nil qualifier), non-equal
895 qualifiers are generally un-matched. */
896 if (operand_also_qualified_p (inst->operands + j, *qualifiers))
905 continue; /* Equal qualifiers are certainly matched. */
908 /* Qualifiers established. */
915 /* Fill the result in *RET. */
917 qualifiers = *qualifiers_list;
919 DEBUG_TRACE ("complete qualifiers using list %d", i);
922 dump_qualifier_sequence (qualifiers);
925 for (j = 0; j <= stop_at; ++j, ++qualifiers)
926 ret[j] = *qualifiers;
927 for (; j < AARCH64_MAX_OPND_NUM; ++j)
928 ret[j] = AARCH64_OPND_QLF_NIL;
930 DEBUG_TRACE ("SUCCESS");
934 DEBUG_TRACE ("FAIL");
938 /* Operand qualifier matching and resolving.
940 Return 1 if the operand qualifier(s) in *INST match one of the qualifier
941 sequences in INST->OPCODE->qualifiers_list; otherwise return 0.
943 if UPDATE_P == TRUE, update the qualifier(s) in *INST after the matching
947 match_operands_qualifier (aarch64_inst *inst, bfd_boolean update_p)
950 aarch64_opnd_qualifier_seq_t qualifiers;
952 if (!aarch64_find_best_match (inst, inst->opcode->qualifiers_list, -1,
955 DEBUG_TRACE ("matching FAIL");
959 if (inst->opcode->flags & F_STRICT)
961 /* Require an exact qualifier match, even for NIL qualifiers. */
962 nops = aarch64_num_of_operands (inst->opcode);
963 for (i = 0; i < nops; ++i)
964 if (inst->operands[i].qualifier != qualifiers[i])
968 /* Update the qualifiers. */
969 if (update_p == TRUE)
970 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
972 if (inst->opcode->operands[i] == AARCH64_OPND_NIL)
974 DEBUG_TRACE_IF (inst->operands[i].qualifier != qualifiers[i],
975 "update %s with %s for operand %d",
976 aarch64_get_qualifier_name (inst->operands[i].qualifier),
977 aarch64_get_qualifier_name (qualifiers[i]), i);
978 inst->operands[i].qualifier = qualifiers[i];
981 DEBUG_TRACE ("matching SUCCESS");
985 /* Return TRUE if VALUE is a wide constant that can be moved into a general
988 IS32 indicates whether value is a 32-bit immediate or not.
989 If SHIFT_AMOUNT is not NULL, on the return of TRUE, the logical left shift
990 amount will be returned in *SHIFT_AMOUNT. */
993 aarch64_wide_constant_p (int64_t value, int is32, unsigned int *shift_amount)
997 DEBUG_TRACE ("enter with 0x%" PRIx64 "(%" PRIi64 ")", value, value);
1001 /* Allow all zeros or all ones in top 32-bits, so that
1002 32-bit constant expressions like ~0x80000000 are
1004 uint64_t ext = value;
1005 if (ext >> 32 != 0 && ext >> 32 != (uint64_t) 0xffffffff)
1006 /* Immediate out of range. */
1008 value &= (int64_t) 0xffffffff;
1011 /* first, try movz then movn */
1013 if ((value & ((int64_t) 0xffff << 0)) == value)
1015 else if ((value & ((int64_t) 0xffff << 16)) == value)
1017 else if (!is32 && (value & ((int64_t) 0xffff << 32)) == value)
1019 else if (!is32 && (value & ((int64_t) 0xffff << 48)) == value)
1024 DEBUG_TRACE ("exit FALSE with 0x%" PRIx64 "(%" PRIi64 ")", value, value);
1028 if (shift_amount != NULL)
1029 *shift_amount = amount;
1031 DEBUG_TRACE ("exit TRUE with amount %d", amount);
1036 /* Build the accepted values for immediate logical SIMD instructions.
1038 The standard encodings of the immediate value are:
1039 N imms immr SIMD size R S
1040 1 ssssss rrrrrr 64 UInt(rrrrrr) UInt(ssssss)
1041 0 0sssss 0rrrrr 32 UInt(rrrrr) UInt(sssss)
1042 0 10ssss 00rrrr 16 UInt(rrrr) UInt(ssss)
1043 0 110sss 000rrr 8 UInt(rrr) UInt(sss)
1044 0 1110ss 0000rr 4 UInt(rr) UInt(ss)
1045 0 11110s 00000r 2 UInt(r) UInt(s)
1046 where all-ones value of S is reserved.
1048 Let's call E the SIMD size.
1050 The immediate value is: S+1 bits '1' rotated to the right by R.
1052 The total of valid encodings is 64*63 + 32*31 + ... + 2*1 = 5334
1053 (remember S != E - 1). */
1055 #define TOTAL_IMM_NB 5334
1060 aarch64_insn encoding;
1061 } simd_imm_encoding;
1063 static simd_imm_encoding simd_immediates[TOTAL_IMM_NB];
1066 simd_imm_encoding_cmp(const void *i1, const void *i2)
1068 const simd_imm_encoding *imm1 = (const simd_imm_encoding *)i1;
1069 const simd_imm_encoding *imm2 = (const simd_imm_encoding *)i2;
1071 if (imm1->imm < imm2->imm)
1073 if (imm1->imm > imm2->imm)
1078 /* immediate bitfield standard encoding
1079 imm13<12> imm13<5:0> imm13<11:6> SIMD size R S
1080 1 ssssss rrrrrr 64 rrrrrr ssssss
1081 0 0sssss 0rrrrr 32 rrrrr sssss
1082 0 10ssss 00rrrr 16 rrrr ssss
1083 0 110sss 000rrr 8 rrr sss
1084 0 1110ss 0000rr 4 rr ss
1085 0 11110s 00000r 2 r s */
1087 encode_immediate_bitfield (int is64, uint32_t s, uint32_t r)
1089 return (is64 << 12) | (r << 6) | s;
1093 build_immediate_table (void)
1095 uint32_t log_e, e, s, r, s_mask;
1101 for (log_e = 1; log_e <= 6; log_e++)
1103 /* Get element size. */
1108 mask = 0xffffffffffffffffull;
1114 mask = (1ull << e) - 1;
1116 1 ((1 << 4) - 1) << 2 = 111100
1117 2 ((1 << 3) - 1) << 3 = 111000
1118 3 ((1 << 2) - 1) << 4 = 110000
1119 4 ((1 << 1) - 1) << 5 = 100000
1120 5 ((1 << 0) - 1) << 6 = 000000 */
1121 s_mask = ((1u << (5 - log_e)) - 1) << (log_e + 1);
1123 for (s = 0; s < e - 1; s++)
1124 for (r = 0; r < e; r++)
1126 /* s+1 consecutive bits to 1 (s < 63) */
1127 imm = (1ull << (s + 1)) - 1;
1128 /* rotate right by r */
1130 imm = (imm >> r) | ((imm << (e - r)) & mask);
1131 /* replicate the constant depending on SIMD size */
1134 case 1: imm = (imm << 2) | imm;
1135 case 2: imm = (imm << 4) | imm;
1136 case 3: imm = (imm << 8) | imm;
1137 case 4: imm = (imm << 16) | imm;
1138 case 5: imm = (imm << 32) | imm;
1142 simd_immediates[nb_imms].imm = imm;
1143 simd_immediates[nb_imms].encoding =
1144 encode_immediate_bitfield(is64, s | s_mask, r);
1148 assert (nb_imms == TOTAL_IMM_NB);
1149 qsort(simd_immediates, nb_imms,
1150 sizeof(simd_immediates[0]), simd_imm_encoding_cmp);
1153 /* Return TRUE if VALUE is a valid logical immediate, i.e. bitmask, that can
1154 be accepted by logical (immediate) instructions
1155 e.g. ORR <Xd|SP>, <Xn>, #<imm>.
1157 ESIZE is the number of bytes in the decoded immediate value.
1158 If ENCODING is not NULL, on the return of TRUE, the standard encoding for
1159 VALUE will be returned in *ENCODING. */
1162 aarch64_logical_immediate_p (uint64_t value, int esize, aarch64_insn *encoding)
1164 simd_imm_encoding imm_enc;
1165 const simd_imm_encoding *imm_encoding;
1166 static bfd_boolean initialized = FALSE;
1170 DEBUG_TRACE ("enter with 0x%" PRIx64 "(%" PRIi64 "), is32: %d", value,
1173 if (initialized == FALSE)
1175 build_immediate_table ();
1179 /* Allow all zeros or all ones in top bits, so that
1180 constant expressions like ~1 are permitted. */
1181 upper = (uint64_t) -1 << (esize * 4) << (esize * 4);
1182 if ((value & ~upper) != value && (value | upper) != value)
1185 /* Replicate to a full 64-bit value. */
1187 for (i = esize * 8; i < 64; i *= 2)
1188 value |= (value << i);
1190 imm_enc.imm = value;
1191 imm_encoding = (const simd_imm_encoding *)
1192 bsearch(&imm_enc, simd_immediates, TOTAL_IMM_NB,
1193 sizeof(simd_immediates[0]), simd_imm_encoding_cmp);
1194 if (imm_encoding == NULL)
1196 DEBUG_TRACE ("exit with FALSE");
1199 if (encoding != NULL)
1200 *encoding = imm_encoding->encoding;
1201 DEBUG_TRACE ("exit with TRUE");
1205 /* If 64-bit immediate IMM is in the format of
1206 "aaaaaaaabbbbbbbbccccccccddddddddeeeeeeeeffffffffgggggggghhhhhhhh",
1207 where a, b, c, d, e, f, g and h are independently 0 or 1, return an integer
1208 of value "abcdefgh". Otherwise return -1. */
1210 aarch64_shrink_expanded_imm8 (uint64_t imm)
1216 for (i = 0; i < 8; i++)
1218 byte = (imm >> (8 * i)) & 0xff;
1221 else if (byte != 0x00)
1227 /* Utility inline functions for operand_general_constraint_met_p. */
1230 set_error (aarch64_operand_error *mismatch_detail,
1231 enum aarch64_operand_error_kind kind, int idx,
1234 if (mismatch_detail == NULL)
1236 mismatch_detail->kind = kind;
1237 mismatch_detail->index = idx;
1238 mismatch_detail->error = error;
1242 set_syntax_error (aarch64_operand_error *mismatch_detail, int idx,
1245 if (mismatch_detail == NULL)
1247 set_error (mismatch_detail, AARCH64_OPDE_SYNTAX_ERROR, idx, error);
1251 set_out_of_range_error (aarch64_operand_error *mismatch_detail,
1252 int idx, int lower_bound, int upper_bound,
1255 if (mismatch_detail == NULL)
1257 set_error (mismatch_detail, AARCH64_OPDE_OUT_OF_RANGE, idx, error);
1258 mismatch_detail->data[0] = lower_bound;
1259 mismatch_detail->data[1] = upper_bound;
1263 set_imm_out_of_range_error (aarch64_operand_error *mismatch_detail,
1264 int idx, int lower_bound, int upper_bound)
1266 if (mismatch_detail == NULL)
1268 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1269 _("immediate value"));
1273 set_offset_out_of_range_error (aarch64_operand_error *mismatch_detail,
1274 int idx, int lower_bound, int upper_bound)
1276 if (mismatch_detail == NULL)
1278 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1279 _("immediate offset"));
1283 set_regno_out_of_range_error (aarch64_operand_error *mismatch_detail,
1284 int idx, int lower_bound, int upper_bound)
1286 if (mismatch_detail == NULL)
1288 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1289 _("register number"));
1293 set_elem_idx_out_of_range_error (aarch64_operand_error *mismatch_detail,
1294 int idx, int lower_bound, int upper_bound)
1296 if (mismatch_detail == NULL)
1298 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1299 _("register element index"));
1303 set_sft_amount_out_of_range_error (aarch64_operand_error *mismatch_detail,
1304 int idx, int lower_bound, int upper_bound)
1306 if (mismatch_detail == NULL)
1308 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1312 /* Report that the MUL modifier in operand IDX should be in the range
1313 [LOWER_BOUND, UPPER_BOUND]. */
1315 set_multiplier_out_of_range_error (aarch64_operand_error *mismatch_detail,
1316 int idx, int lower_bound, int upper_bound)
1318 if (mismatch_detail == NULL)
1320 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1325 set_unaligned_error (aarch64_operand_error *mismatch_detail, int idx,
1328 if (mismatch_detail == NULL)
1330 set_error (mismatch_detail, AARCH64_OPDE_UNALIGNED, idx, NULL);
1331 mismatch_detail->data[0] = alignment;
1335 set_reg_list_error (aarch64_operand_error *mismatch_detail, int idx,
1338 if (mismatch_detail == NULL)
1340 set_error (mismatch_detail, AARCH64_OPDE_REG_LIST, idx, NULL);
1341 mismatch_detail->data[0] = expected_num;
1345 set_other_error (aarch64_operand_error *mismatch_detail, int idx,
1348 if (mismatch_detail == NULL)
1350 set_error (mismatch_detail, AARCH64_OPDE_OTHER_ERROR, idx, error);
1353 /* General constraint checking based on operand code.
1355 Return 1 if OPNDS[IDX] meets the general constraint of operand code TYPE
1356 as the IDXth operand of opcode OPCODE. Otherwise return 0.
1358 This function has to be called after the qualifiers for all operands
1361 Mismatching error message is returned in *MISMATCH_DETAIL upon request,
1362 i.e. when MISMATCH_DETAIL is non-NULL. This avoids the generation
1363 of error message during the disassembling where error message is not
1364 wanted. We avoid the dynamic construction of strings of error messages
1365 here (i.e. in libopcodes), as it is costly and complicated; instead, we
1366 use a combination of error code, static string and some integer data to
1367 represent an error. */
1370 operand_general_constraint_met_p (const aarch64_opnd_info *opnds, int idx,
1371 enum aarch64_opnd type,
1372 const aarch64_opcode *opcode,
1373 aarch64_operand_error *mismatch_detail)
1375 unsigned num, modifiers;
1377 int64_t imm, min_value, max_value;
1378 const aarch64_opnd_info *opnd = opnds + idx;
1379 aarch64_opnd_qualifier_t qualifier = opnd->qualifier;
1381 assert (opcode->operands[idx] == opnd->type && opnd->type == type);
1383 switch (aarch64_operands[type].op_class)
1385 case AARCH64_OPND_CLASS_INT_REG:
1386 /* Check pair reg constraints for cas* instructions. */
1387 if (type == AARCH64_OPND_PAIRREG)
1389 assert (idx == 1 || idx == 3);
1390 if (opnds[idx - 1].reg.regno % 2 != 0)
1392 set_syntax_error (mismatch_detail, idx - 1,
1393 _("reg pair must start from even reg"));
1396 if (opnds[idx].reg.regno != opnds[idx - 1].reg.regno + 1)
1398 set_syntax_error (mismatch_detail, idx,
1399 _("reg pair must be contiguous"));
1405 /* <Xt> may be optional in some IC and TLBI instructions. */
1406 if (type == AARCH64_OPND_Rt_SYS)
1408 assert (idx == 1 && (aarch64_get_operand_class (opnds[0].type)
1409 == AARCH64_OPND_CLASS_SYSTEM));
1410 if (opnds[1].present
1411 && !aarch64_sys_ins_reg_has_xt (opnds[0].sysins_op))
1413 set_other_error (mismatch_detail, idx, _("extraneous register"));
1416 if (!opnds[1].present
1417 && aarch64_sys_ins_reg_has_xt (opnds[0].sysins_op))
1419 set_other_error (mismatch_detail, idx, _("missing register"));
1425 case AARCH64_OPND_QLF_WSP:
1426 case AARCH64_OPND_QLF_SP:
1427 if (!aarch64_stack_pointer_p (opnd))
1429 set_other_error (mismatch_detail, idx,
1430 _("stack pointer register expected"));
1439 case AARCH64_OPND_CLASS_SVE_REG:
1442 case AARCH64_OPND_SVE_Zn_INDEX:
1443 size = aarch64_get_qualifier_esize (opnd->qualifier);
1444 if (!value_in_range_p (opnd->reglane.index, 0, 64 / size - 1))
1446 set_elem_idx_out_of_range_error (mismatch_detail, idx,
1452 case AARCH64_OPND_SVE_ZnxN:
1453 case AARCH64_OPND_SVE_ZtxN:
1454 if (opnd->reglist.num_regs != get_opcode_dependent_value (opcode))
1456 set_other_error (mismatch_detail, idx,
1457 _("invalid register list"));
1467 case AARCH64_OPND_CLASS_PRED_REG:
1468 if (opnd->reg.regno >= 8
1469 && get_operand_fields_width (get_operand_from_code (type)) == 3)
1471 set_other_error (mismatch_detail, idx, _("p0-p7 expected"));
1476 case AARCH64_OPND_CLASS_COND:
1477 if (type == AARCH64_OPND_COND1
1478 && (opnds[idx].cond->value & 0xe) == 0xe)
1480 /* Not allow AL or NV. */
1481 set_syntax_error (mismatch_detail, idx, NULL);
1485 case AARCH64_OPND_CLASS_ADDRESS:
1486 /* Check writeback. */
1487 switch (opcode->iclass)
1491 case ldstnapair_offs:
1494 if (opnd->addr.writeback == 1)
1496 set_syntax_error (mismatch_detail, idx,
1497 _("unexpected address writeback"));
1502 case ldstpair_indexed:
1505 if (opnd->addr.writeback == 0)
1507 set_syntax_error (mismatch_detail, idx,
1508 _("address writeback expected"));
1513 assert (opnd->addr.writeback == 0);
1518 case AARCH64_OPND_ADDR_SIMM7:
1519 /* Scaled signed 7 bits immediate offset. */
1520 /* Get the size of the data element that is accessed, which may be
1521 different from that of the source register size,
1522 e.g. in strb/ldrb. */
1523 size = aarch64_get_qualifier_esize (opnd->qualifier);
1524 if (!value_in_range_p (opnd->addr.offset.imm, -64 * size, 63 * size))
1526 set_offset_out_of_range_error (mismatch_detail, idx,
1527 -64 * size, 63 * size);
1530 if (!value_aligned_p (opnd->addr.offset.imm, size))
1532 set_unaligned_error (mismatch_detail, idx, size);
1536 case AARCH64_OPND_ADDR_SIMM9:
1537 /* Unscaled signed 9 bits immediate offset. */
1538 if (!value_in_range_p (opnd->addr.offset.imm, -256, 255))
1540 set_offset_out_of_range_error (mismatch_detail, idx, -256, 255);
1545 case AARCH64_OPND_ADDR_SIMM9_2:
1546 /* Unscaled signed 9 bits immediate offset, which has to be negative
1548 size = aarch64_get_qualifier_esize (qualifier);
1549 if ((value_in_range_p (opnd->addr.offset.imm, 0, 255)
1550 && !value_aligned_p (opnd->addr.offset.imm, size))
1551 || value_in_range_p (opnd->addr.offset.imm, -256, -1))
1553 set_other_error (mismatch_detail, idx,
1554 _("negative or unaligned offset expected"));
1557 case AARCH64_OPND_SIMD_ADDR_POST:
1558 /* AdvSIMD load/store multiple structures, post-index. */
1560 if (opnd->addr.offset.is_reg)
1562 if (value_in_range_p (opnd->addr.offset.regno, 0, 30))
1566 set_other_error (mismatch_detail, idx,
1567 _("invalid register offset"));
1573 const aarch64_opnd_info *prev = &opnds[idx-1];
1574 unsigned num_bytes; /* total number of bytes transferred. */
1575 /* The opcode dependent area stores the number of elements in
1576 each structure to be loaded/stored. */
1577 int is_ld1r = get_opcode_dependent_value (opcode) == 1;
1578 if (opcode->operands[0] == AARCH64_OPND_LVt_AL)
1579 /* Special handling of loading single structure to all lane. */
1580 num_bytes = (is_ld1r ? 1 : prev->reglist.num_regs)
1581 * aarch64_get_qualifier_esize (prev->qualifier);
1583 num_bytes = prev->reglist.num_regs
1584 * aarch64_get_qualifier_esize (prev->qualifier)
1585 * aarch64_get_qualifier_nelem (prev->qualifier);
1586 if ((int) num_bytes != opnd->addr.offset.imm)
1588 set_other_error (mismatch_detail, idx,
1589 _("invalid post-increment amount"));
1595 case AARCH64_OPND_ADDR_REGOFF:
1596 /* Get the size of the data element that is accessed, which may be
1597 different from that of the source register size,
1598 e.g. in strb/ldrb. */
1599 size = aarch64_get_qualifier_esize (opnd->qualifier);
1600 /* It is either no shift or shift by the binary logarithm of SIZE. */
1601 if (opnd->shifter.amount != 0
1602 && opnd->shifter.amount != (int)get_logsz (size))
1604 set_other_error (mismatch_detail, idx,
1605 _("invalid shift amount"));
1608 /* Only UXTW, LSL, SXTW and SXTX are the accepted extending
1610 switch (opnd->shifter.kind)
1612 case AARCH64_MOD_UXTW:
1613 case AARCH64_MOD_LSL:
1614 case AARCH64_MOD_SXTW:
1615 case AARCH64_MOD_SXTX: break;
1617 set_other_error (mismatch_detail, idx,
1618 _("invalid extend/shift operator"));
1623 case AARCH64_OPND_ADDR_UIMM12:
1624 imm = opnd->addr.offset.imm;
1625 /* Get the size of the data element that is accessed, which may be
1626 different from that of the source register size,
1627 e.g. in strb/ldrb. */
1628 size = aarch64_get_qualifier_esize (qualifier);
1629 if (!value_in_range_p (opnd->addr.offset.imm, 0, 4095 * size))
1631 set_offset_out_of_range_error (mismatch_detail, idx,
1635 if (!value_aligned_p (opnd->addr.offset.imm, size))
1637 set_unaligned_error (mismatch_detail, idx, size);
1642 case AARCH64_OPND_ADDR_PCREL14:
1643 case AARCH64_OPND_ADDR_PCREL19:
1644 case AARCH64_OPND_ADDR_PCREL21:
1645 case AARCH64_OPND_ADDR_PCREL26:
1646 imm = opnd->imm.value;
1647 if (operand_need_shift_by_two (get_operand_from_code (type)))
1649 /* The offset value in a PC-relative branch instruction is alway
1650 4-byte aligned and is encoded without the lowest 2 bits. */
1651 if (!value_aligned_p (imm, 4))
1653 set_unaligned_error (mismatch_detail, idx, 4);
1656 /* Right shift by 2 so that we can carry out the following check
1660 size = get_operand_fields_width (get_operand_from_code (type));
1661 if (!value_fit_signed_field_p (imm, size))
1663 set_other_error (mismatch_detail, idx,
1664 _("immediate out of range"));
1669 case AARCH64_OPND_SVE_ADDR_RI_U6:
1670 case AARCH64_OPND_SVE_ADDR_RI_U6x2:
1671 case AARCH64_OPND_SVE_ADDR_RI_U6x4:
1672 case AARCH64_OPND_SVE_ADDR_RI_U6x8:
1676 assert (!opnd->addr.offset.is_reg);
1677 assert (opnd->addr.preind);
1678 num = 1 << get_operand_specific_data (&aarch64_operands[type]);
1681 if (opnd->shifter.operator_present
1682 || opnd->shifter.amount_present)
1684 set_other_error (mismatch_detail, idx,
1685 _("invalid addressing mode"));
1688 if (!value_in_range_p (opnd->addr.offset.imm, min_value, max_value))
1690 set_offset_out_of_range_error (mismatch_detail, idx,
1691 min_value, max_value);
1694 if (!value_aligned_p (opnd->addr.offset.imm, num))
1696 set_unaligned_error (mismatch_detail, idx, num);
1701 case AARCH64_OPND_SVE_ADDR_RR:
1702 case AARCH64_OPND_SVE_ADDR_RR_LSL1:
1703 case AARCH64_OPND_SVE_ADDR_RR_LSL2:
1704 case AARCH64_OPND_SVE_ADDR_RR_LSL3:
1705 case AARCH64_OPND_SVE_ADDR_RX:
1706 case AARCH64_OPND_SVE_ADDR_RX_LSL1:
1707 case AARCH64_OPND_SVE_ADDR_RX_LSL2:
1708 case AARCH64_OPND_SVE_ADDR_RX_LSL3:
1709 case AARCH64_OPND_SVE_ADDR_RZ:
1710 case AARCH64_OPND_SVE_ADDR_RZ_LSL1:
1711 case AARCH64_OPND_SVE_ADDR_RZ_LSL2:
1712 case AARCH64_OPND_SVE_ADDR_RZ_LSL3:
1713 modifiers = 1 << AARCH64_MOD_LSL;
1715 assert (opnd->addr.offset.is_reg);
1716 assert (opnd->addr.preind);
1717 if ((aarch64_operands[type].flags & OPD_F_NO_ZR) != 0
1718 && opnd->addr.offset.regno == 31)
1720 set_other_error (mismatch_detail, idx,
1721 _("index register xzr is not allowed"));
1724 if (((1 << opnd->shifter.kind) & modifiers) == 0
1725 || (opnd->shifter.amount
1726 != get_operand_specific_data (&aarch64_operands[type])))
1728 set_other_error (mismatch_detail, idx,
1729 _("invalid addressing mode"));
1734 case AARCH64_OPND_SVE_ADDR_RZ_XTW_14:
1735 case AARCH64_OPND_SVE_ADDR_RZ_XTW_22:
1736 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_14:
1737 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_22:
1738 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_14:
1739 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_22:
1740 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_14:
1741 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_22:
1742 modifiers = (1 << AARCH64_MOD_SXTW) | (1 << AARCH64_MOD_UXTW);
1743 goto sve_rr_operand;
1745 case AARCH64_OPND_SVE_ADDR_ZI_U5:
1746 case AARCH64_OPND_SVE_ADDR_ZI_U5x2:
1747 case AARCH64_OPND_SVE_ADDR_ZI_U5x4:
1748 case AARCH64_OPND_SVE_ADDR_ZI_U5x8:
1751 goto sve_imm_offset;
1753 case AARCH64_OPND_SVE_ADDR_ZZ_LSL:
1754 modifiers = 1 << AARCH64_MOD_LSL;
1756 assert (opnd->addr.offset.is_reg);
1757 assert (opnd->addr.preind);
1758 if (((1 << opnd->shifter.kind) & modifiers) == 0
1759 || opnd->shifter.amount < 0
1760 || opnd->shifter.amount > 3)
1762 set_other_error (mismatch_detail, idx,
1763 _("invalid addressing mode"));
1768 case AARCH64_OPND_SVE_ADDR_ZZ_SXTW:
1769 modifiers = (1 << AARCH64_MOD_SXTW);
1770 goto sve_zz_operand;
1772 case AARCH64_OPND_SVE_ADDR_ZZ_UXTW:
1773 modifiers = 1 << AARCH64_MOD_UXTW;
1774 goto sve_zz_operand;
1781 case AARCH64_OPND_CLASS_SIMD_REGLIST:
1782 if (type == AARCH64_OPND_LEt)
1784 /* Get the upper bound for the element index. */
1785 num = 16 / aarch64_get_qualifier_esize (qualifier) - 1;
1786 if (!value_in_range_p (opnd->reglist.index, 0, num))
1788 set_elem_idx_out_of_range_error (mismatch_detail, idx, 0, num);
1792 /* The opcode dependent area stores the number of elements in
1793 each structure to be loaded/stored. */
1794 num = get_opcode_dependent_value (opcode);
1797 case AARCH64_OPND_LVt:
1798 assert (num >= 1 && num <= 4);
1799 /* Unless LD1/ST1, the number of registers should be equal to that
1800 of the structure elements. */
1801 if (num != 1 && opnd->reglist.num_regs != num)
1803 set_reg_list_error (mismatch_detail, idx, num);
1807 case AARCH64_OPND_LVt_AL:
1808 case AARCH64_OPND_LEt:
1809 assert (num >= 1 && num <= 4);
1810 /* The number of registers should be equal to that of the structure
1812 if (opnd->reglist.num_regs != num)
1814 set_reg_list_error (mismatch_detail, idx, num);
1823 case AARCH64_OPND_CLASS_IMMEDIATE:
1824 /* Constraint check on immediate operand. */
1825 imm = opnd->imm.value;
1826 /* E.g. imm_0_31 constrains value to be 0..31. */
1827 if (qualifier_value_in_range_constraint_p (qualifier)
1828 && !value_in_range_p (imm, get_lower_bound (qualifier),
1829 get_upper_bound (qualifier)))
1831 set_imm_out_of_range_error (mismatch_detail, idx,
1832 get_lower_bound (qualifier),
1833 get_upper_bound (qualifier));
1839 case AARCH64_OPND_AIMM:
1840 if (opnd->shifter.kind != AARCH64_MOD_LSL)
1842 set_other_error (mismatch_detail, idx,
1843 _("invalid shift operator"));
1846 if (opnd->shifter.amount != 0 && opnd->shifter.amount != 12)
1848 set_other_error (mismatch_detail, idx,
1849 _("shift amount expected to be 0 or 12"));
1852 if (!value_fit_unsigned_field_p (opnd->imm.value, 12))
1854 set_other_error (mismatch_detail, idx,
1855 _("immediate out of range"));
1860 case AARCH64_OPND_HALF:
1861 assert (idx == 1 && opnds[0].type == AARCH64_OPND_Rd);
1862 if (opnd->shifter.kind != AARCH64_MOD_LSL)
1864 set_other_error (mismatch_detail, idx,
1865 _("invalid shift operator"));
1868 size = aarch64_get_qualifier_esize (opnds[0].qualifier);
1869 if (!value_aligned_p (opnd->shifter.amount, 16))
1871 set_other_error (mismatch_detail, idx,
1872 _("shift amount should be a multiple of 16"));
1875 if (!value_in_range_p (opnd->shifter.amount, 0, size * 8 - 16))
1877 set_sft_amount_out_of_range_error (mismatch_detail, idx,
1881 if (opnd->imm.value < 0)
1883 set_other_error (mismatch_detail, idx,
1884 _("negative immediate value not allowed"));
1887 if (!value_fit_unsigned_field_p (opnd->imm.value, 16))
1889 set_other_error (mismatch_detail, idx,
1890 _("immediate out of range"));
1895 case AARCH64_OPND_IMM_MOV:
1897 int esize = aarch64_get_qualifier_esize (opnds[0].qualifier);
1898 imm = opnd->imm.value;
1902 case OP_MOV_IMM_WIDEN:
1904 /* Fall through... */
1905 case OP_MOV_IMM_WIDE:
1906 if (!aarch64_wide_constant_p (imm, esize == 4, NULL))
1908 set_other_error (mismatch_detail, idx,
1909 _("immediate out of range"));
1913 case OP_MOV_IMM_LOG:
1914 if (!aarch64_logical_immediate_p (imm, esize, NULL))
1916 set_other_error (mismatch_detail, idx,
1917 _("immediate out of range"));
1928 case AARCH64_OPND_NZCV:
1929 case AARCH64_OPND_CCMP_IMM:
1930 case AARCH64_OPND_EXCEPTION:
1931 case AARCH64_OPND_UIMM4:
1932 case AARCH64_OPND_UIMM7:
1933 case AARCH64_OPND_UIMM3_OP1:
1934 case AARCH64_OPND_UIMM3_OP2:
1935 size = get_operand_fields_width (get_operand_from_code (type));
1937 if (!value_fit_unsigned_field_p (opnd->imm.value, size))
1939 set_imm_out_of_range_error (mismatch_detail, idx, 0,
1945 case AARCH64_OPND_WIDTH:
1946 assert (idx > 1 && opnds[idx-1].type == AARCH64_OPND_IMM
1947 && opnds[0].type == AARCH64_OPND_Rd);
1948 size = get_upper_bound (qualifier);
1949 if (opnd->imm.value + opnds[idx-1].imm.value > size)
1950 /* lsb+width <= reg.size */
1952 set_imm_out_of_range_error (mismatch_detail, idx, 1,
1953 size - opnds[idx-1].imm.value);
1958 case AARCH64_OPND_LIMM:
1960 int esize = aarch64_get_qualifier_esize (opnds[0].qualifier);
1961 uint64_t uimm = opnd->imm.value;
1962 if (opcode->op == OP_BIC)
1964 if (aarch64_logical_immediate_p (uimm, esize, NULL) == FALSE)
1966 set_other_error (mismatch_detail, idx,
1967 _("immediate out of range"));
1973 case AARCH64_OPND_IMM0:
1974 case AARCH64_OPND_FPIMM0:
1975 if (opnd->imm.value != 0)
1977 set_other_error (mismatch_detail, idx,
1978 _("immediate zero expected"));
1983 case AARCH64_OPND_SHLL_IMM:
1985 size = 8 * aarch64_get_qualifier_esize (opnds[idx - 1].qualifier);
1986 if (opnd->imm.value != size)
1988 set_other_error (mismatch_detail, idx,
1989 _("invalid shift amount"));
1994 case AARCH64_OPND_IMM_VLSL:
1995 size = aarch64_get_qualifier_esize (qualifier);
1996 if (!value_in_range_p (opnd->imm.value, 0, size * 8 - 1))
1998 set_imm_out_of_range_error (mismatch_detail, idx, 0,
2004 case AARCH64_OPND_IMM_VLSR:
2005 size = aarch64_get_qualifier_esize (qualifier);
2006 if (!value_in_range_p (opnd->imm.value, 1, size * 8))
2008 set_imm_out_of_range_error (mismatch_detail, idx, 1, size * 8);
2013 case AARCH64_OPND_SIMD_IMM:
2014 case AARCH64_OPND_SIMD_IMM_SFT:
2015 /* Qualifier check. */
2018 case AARCH64_OPND_QLF_LSL:
2019 if (opnd->shifter.kind != AARCH64_MOD_LSL)
2021 set_other_error (mismatch_detail, idx,
2022 _("invalid shift operator"));
2026 case AARCH64_OPND_QLF_MSL:
2027 if (opnd->shifter.kind != AARCH64_MOD_MSL)
2029 set_other_error (mismatch_detail, idx,
2030 _("invalid shift operator"));
2034 case AARCH64_OPND_QLF_NIL:
2035 if (opnd->shifter.kind != AARCH64_MOD_NONE)
2037 set_other_error (mismatch_detail, idx,
2038 _("shift is not permitted"));
2046 /* Is the immediate valid? */
2048 if (aarch64_get_qualifier_esize (opnds[0].qualifier) != 8)
2050 /* uimm8 or simm8 */
2051 if (!value_in_range_p (opnd->imm.value, -128, 255))
2053 set_imm_out_of_range_error (mismatch_detail, idx, -128, 255);
2057 else if (aarch64_shrink_expanded_imm8 (opnd->imm.value) < 0)
2060 'aaaaaaaabbbbbbbbccccccccddddddddeeeeeeee
2061 ffffffffgggggggghhhhhhhh'. */
2062 set_other_error (mismatch_detail, idx,
2063 _("invalid value for immediate"));
2066 /* Is the shift amount valid? */
2067 switch (opnd->shifter.kind)
2069 case AARCH64_MOD_LSL:
2070 size = aarch64_get_qualifier_esize (opnds[0].qualifier);
2071 if (!value_in_range_p (opnd->shifter.amount, 0, (size - 1) * 8))
2073 set_sft_amount_out_of_range_error (mismatch_detail, idx, 0,
2077 if (!value_aligned_p (opnd->shifter.amount, 8))
2079 set_unaligned_error (mismatch_detail, idx, 8);
2083 case AARCH64_MOD_MSL:
2084 /* Only 8 and 16 are valid shift amount. */
2085 if (opnd->shifter.amount != 8 && opnd->shifter.amount != 16)
2087 set_other_error (mismatch_detail, idx,
2088 _("shift amount expected to be 0 or 16"));
2093 if (opnd->shifter.kind != AARCH64_MOD_NONE)
2095 set_other_error (mismatch_detail, idx,
2096 _("invalid shift operator"));
2103 case AARCH64_OPND_FPIMM:
2104 case AARCH64_OPND_SIMD_FPIMM:
2105 if (opnd->imm.is_fp == 0)
2107 set_other_error (mismatch_detail, idx,
2108 _("floating-point immediate expected"));
2111 /* The value is expected to be an 8-bit floating-point constant with
2112 sign, 3-bit exponent and normalized 4 bits of precision, encoded
2113 in "a:b:c:d:e:f:g:h" or FLD_imm8 (depending on the type of the
2115 if (!value_in_range_p (opnd->imm.value, 0, 255))
2117 set_other_error (mismatch_detail, idx,
2118 _("immediate out of range"));
2121 if (opnd->shifter.kind != AARCH64_MOD_NONE)
2123 set_other_error (mismatch_detail, idx,
2124 _("invalid shift operator"));
2129 case AARCH64_OPND_SVE_PATTERN_SCALED:
2130 assert (opnd->shifter.kind == AARCH64_MOD_MUL);
2131 if (!value_in_range_p (opnd->shifter.amount, 1, 16))
2133 set_multiplier_out_of_range_error (mismatch_detail, idx, 1, 16);
2143 case AARCH64_OPND_CLASS_CP_REG:
2144 /* Cn or Cm: 4-bit opcode field named for historical reasons.
2145 valid range: C0 - C15. */
2146 if (opnd->reg.regno > 15)
2148 set_regno_out_of_range_error (mismatch_detail, idx, 0, 15);
2153 case AARCH64_OPND_CLASS_SYSTEM:
2156 case AARCH64_OPND_PSTATEFIELD:
2157 assert (idx == 0 && opnds[1].type == AARCH64_OPND_UIMM4);
2160 The immediate must be #0 or #1. */
2161 if ((opnd->pstatefield == 0x03 /* UAO. */
2162 || opnd->pstatefield == 0x04) /* PAN. */
2163 && opnds[1].imm.value > 1)
2165 set_imm_out_of_range_error (mismatch_detail, idx, 0, 1);
2168 /* MSR SPSel, #uimm4
2169 Uses uimm4 as a control value to select the stack pointer: if
2170 bit 0 is set it selects the current exception level's stack
2171 pointer, if bit 0 is clear it selects shared EL0 stack pointer.
2172 Bits 1 to 3 of uimm4 are reserved and should be zero. */
2173 if (opnd->pstatefield == 0x05 /* spsel */ && opnds[1].imm.value > 1)
2175 set_imm_out_of_range_error (mismatch_detail, idx, 0, 1);
2184 case AARCH64_OPND_CLASS_SIMD_ELEMENT:
2185 /* Get the upper bound for the element index. */
2186 num = 16 / aarch64_get_qualifier_esize (qualifier) - 1;
2187 /* Index out-of-range. */
2188 if (!value_in_range_p (opnd->reglane.index, 0, num))
2190 set_elem_idx_out_of_range_error (mismatch_detail, idx, 0, num);
2193 /* SMLAL<Q> <Vd>.<Ta>, <Vn>.<Tb>, <Vm>.<Ts>[<index>].
2194 <Vm> Is the vector register (V0-V31) or (V0-V15), whose
2195 number is encoded in "size:M:Rm":
2201 if (type == AARCH64_OPND_Em && qualifier == AARCH64_OPND_QLF_S_H
2202 && !value_in_range_p (opnd->reglane.regno, 0, 15))
2204 set_regno_out_of_range_error (mismatch_detail, idx, 0, 15);
2209 case AARCH64_OPND_CLASS_MODIFIED_REG:
2210 assert (idx == 1 || idx == 2);
2213 case AARCH64_OPND_Rm_EXT:
2214 if (aarch64_extend_operator_p (opnd->shifter.kind) == FALSE
2215 && opnd->shifter.kind != AARCH64_MOD_LSL)
2217 set_other_error (mismatch_detail, idx,
2218 _("extend operator expected"));
2221 /* It is not optional unless at least one of "Rd" or "Rn" is '11111'
2222 (i.e. SP), in which case it defaults to LSL. The LSL alias is
2223 only valid when "Rd" or "Rn" is '11111', and is preferred in that
2225 if (!aarch64_stack_pointer_p (opnds + 0)
2226 && (idx != 2 || !aarch64_stack_pointer_p (opnds + 1)))
2228 if (!opnd->shifter.operator_present)
2230 set_other_error (mismatch_detail, idx,
2231 _("missing extend operator"));
2234 else if (opnd->shifter.kind == AARCH64_MOD_LSL)
2236 set_other_error (mismatch_detail, idx,
2237 _("'LSL' operator not allowed"));
2241 assert (opnd->shifter.operator_present /* Default to LSL. */
2242 || opnd->shifter.kind == AARCH64_MOD_LSL);
2243 if (!value_in_range_p (opnd->shifter.amount, 0, 4))
2245 set_sft_amount_out_of_range_error (mismatch_detail, idx, 0, 4);
2248 /* In the 64-bit form, the final register operand is written as Wm
2249 for all but the (possibly omitted) UXTX/LSL and SXTX
2251 N.B. GAS allows X register to be used with any operator as a
2252 programming convenience. */
2253 if (qualifier == AARCH64_OPND_QLF_X
2254 && opnd->shifter.kind != AARCH64_MOD_LSL
2255 && opnd->shifter.kind != AARCH64_MOD_UXTX
2256 && opnd->shifter.kind != AARCH64_MOD_SXTX)
2258 set_other_error (mismatch_detail, idx, _("W register expected"));
2263 case AARCH64_OPND_Rm_SFT:
2264 /* ROR is not available to the shifted register operand in
2265 arithmetic instructions. */
2266 if (aarch64_shift_operator_p (opnd->shifter.kind) == FALSE)
2268 set_other_error (mismatch_detail, idx,
2269 _("shift operator expected"));
2272 if (opnd->shifter.kind == AARCH64_MOD_ROR
2273 && opcode->iclass != log_shift)
2275 set_other_error (mismatch_detail, idx,
2276 _("'ROR' operator not allowed"));
2279 num = qualifier == AARCH64_OPND_QLF_W ? 31 : 63;
2280 if (!value_in_range_p (opnd->shifter.amount, 0, num))
2282 set_sft_amount_out_of_range_error (mismatch_detail, idx, 0, num);
2299 /* Main entrypoint for the operand constraint checking.
2301 Return 1 if operands of *INST meet the constraint applied by the operand
2302 codes and operand qualifiers; otherwise return 0 and if MISMATCH_DETAIL is
2303 not NULL, return the detail of the error in *MISMATCH_DETAIL. N.B. when
2304 adding more constraint checking, make sure MISMATCH_DETAIL->KIND is set
2305 with a proper error kind rather than AARCH64_OPDE_NIL (GAS asserts non-NIL
2306 error kind when it is notified that an instruction does not pass the check).
2308 Un-determined operand qualifiers may get established during the process. */
2311 aarch64_match_operands_constraint (aarch64_inst *inst,
2312 aarch64_operand_error *mismatch_detail)
2316 DEBUG_TRACE ("enter");
2318 /* Check for cases where a source register needs to be the same as the
2319 destination register. Do this before matching qualifiers since if
2320 an instruction has both invalid tying and invalid qualifiers,
2321 the error about qualifiers would suggest several alternative
2322 instructions that also have invalid tying. */
2323 i = inst->opcode->tied_operand;
2324 if (i > 0 && (inst->operands[0].reg.regno != inst->operands[i].reg.regno))
2326 if (mismatch_detail)
2328 mismatch_detail->kind = AARCH64_OPDE_UNTIED_OPERAND;
2329 mismatch_detail->index = i;
2330 mismatch_detail->error = NULL;
2335 /* Match operands' qualifier.
2336 *INST has already had qualifier establish for some, if not all, of
2337 its operands; we need to find out whether these established
2338 qualifiers match one of the qualifier sequence in
2339 INST->OPCODE->QUALIFIERS_LIST. If yes, we will assign each operand
2340 with the corresponding qualifier in such a sequence.
2341 Only basic operand constraint checking is done here; the more thorough
2342 constraint checking will carried out by operand_general_constraint_met_p,
2343 which has be to called after this in order to get all of the operands'
2344 qualifiers established. */
2345 if (match_operands_qualifier (inst, TRUE /* update_p */) == 0)
2347 DEBUG_TRACE ("FAIL on operand qualifier matching");
2348 if (mismatch_detail)
2350 /* Return an error type to indicate that it is the qualifier
2351 matching failure; we don't care about which operand as there
2352 are enough information in the opcode table to reproduce it. */
2353 mismatch_detail->kind = AARCH64_OPDE_INVALID_VARIANT;
2354 mismatch_detail->index = -1;
2355 mismatch_detail->error = NULL;
2360 /* Match operands' constraint. */
2361 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2363 enum aarch64_opnd type = inst->opcode->operands[i];
2364 if (type == AARCH64_OPND_NIL)
2366 if (inst->operands[i].skip)
2368 DEBUG_TRACE ("skip the incomplete operand %d", i);
2371 if (operand_general_constraint_met_p (inst->operands, i, type,
2372 inst->opcode, mismatch_detail) == 0)
2374 DEBUG_TRACE ("FAIL on operand %d", i);
2379 DEBUG_TRACE ("PASS");
2384 /* Replace INST->OPCODE with OPCODE and return the replaced OPCODE.
2385 Also updates the TYPE of each INST->OPERANDS with the corresponding
2386 value of OPCODE->OPERANDS.
2388 Note that some operand qualifiers may need to be manually cleared by
2389 the caller before it further calls the aarch64_opcode_encode; by
2390 doing this, it helps the qualifier matching facilities work
2393 const aarch64_opcode*
2394 aarch64_replace_opcode (aarch64_inst *inst, const aarch64_opcode *opcode)
2397 const aarch64_opcode *old = inst->opcode;
2399 inst->opcode = opcode;
2401 /* Update the operand types. */
2402 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2404 inst->operands[i].type = opcode->operands[i];
2405 if (opcode->operands[i] == AARCH64_OPND_NIL)
2409 DEBUG_TRACE ("replace %s with %s", old->name, opcode->name);
2415 aarch64_operand_index (const enum aarch64_opnd *operands, enum aarch64_opnd operand)
2418 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2419 if (operands[i] == operand)
2421 else if (operands[i] == AARCH64_OPND_NIL)
2426 /* R0...R30, followed by FOR31. */
2427 #define BANK(R, FOR31) \
2428 { R (0), R (1), R (2), R (3), R (4), R (5), R (6), R (7), \
2429 R (8), R (9), R (10), R (11), R (12), R (13), R (14), R (15), \
2430 R (16), R (17), R (18), R (19), R (20), R (21), R (22), R (23), \
2431 R (24), R (25), R (26), R (27), R (28), R (29), R (30), FOR31 }
2432 /* [0][0] 32-bit integer regs with sp Wn
2433 [0][1] 64-bit integer regs with sp Xn sf=1
2434 [1][0] 32-bit integer regs with #0 Wn
2435 [1][1] 64-bit integer regs with #0 Xn sf=1 */
2436 static const char *int_reg[2][2][32] = {
2437 #define R32(X) "w" #X
2438 #define R64(X) "x" #X
2439 { BANK (R32, "wsp"), BANK (R64, "sp") },
2440 { BANK (R32, "wzr"), BANK (R64, "xzr") }
2445 /* Names of the SVE vector registers, first with .S suffixes,
2446 then with .D suffixes. */
2448 static const char *sve_reg[2][32] = {
2449 #define ZS(X) "z" #X ".s"
2450 #define ZD(X) "z" #X ".d"
2451 BANK (ZS, ZS (31)), BANK (ZD, ZD (31))
2457 /* Return the integer register name.
2458 if SP_REG_P is not 0, R31 is an SP reg, other R31 is the zero reg. */
2460 static inline const char *
2461 get_int_reg_name (int regno, aarch64_opnd_qualifier_t qualifier, int sp_reg_p)
2463 const int has_zr = sp_reg_p ? 0 : 1;
2464 const int is_64 = aarch64_get_qualifier_esize (qualifier) == 4 ? 0 : 1;
2465 return int_reg[has_zr][is_64][regno];
2468 /* Like get_int_reg_name, but IS_64 is always 1. */
2470 static inline const char *
2471 get_64bit_int_reg_name (int regno, int sp_reg_p)
2473 const int has_zr = sp_reg_p ? 0 : 1;
2474 return int_reg[has_zr][1][regno];
2477 /* Get the name of the integer offset register in OPND, using the shift type
2478 to decide whether it's a word or doubleword. */
2480 static inline const char *
2481 get_offset_int_reg_name (const aarch64_opnd_info *opnd)
2483 switch (opnd->shifter.kind)
2485 case AARCH64_MOD_UXTW:
2486 case AARCH64_MOD_SXTW:
2487 return get_int_reg_name (opnd->addr.offset.regno, AARCH64_OPND_QLF_W, 0);
2489 case AARCH64_MOD_LSL:
2490 case AARCH64_MOD_SXTX:
2491 return get_int_reg_name (opnd->addr.offset.regno, AARCH64_OPND_QLF_X, 0);
2498 /* Get the name of the SVE vector offset register in OPND, using the operand
2499 qualifier to decide whether the suffix should be .S or .D. */
2501 static inline const char *
2502 get_addr_sve_reg_name (int regno, aarch64_opnd_qualifier_t qualifier)
2504 assert (qualifier == AARCH64_OPND_QLF_S_S
2505 || qualifier == AARCH64_OPND_QLF_S_D);
2506 return sve_reg[qualifier == AARCH64_OPND_QLF_S_D][regno];
2509 /* Types for expanding an encoded 8-bit value to a floating-point value. */
2529 /* IMM8 is an 8-bit floating-point constant with sign, 3-bit exponent and
2530 normalized 4 bits of precision, encoded in "a:b:c:d:e:f:g:h" or FLD_imm8
2531 (depending on the type of the instruction). IMM8 will be expanded to a
2532 single-precision floating-point value (SIZE == 4) or a double-precision
2533 floating-point value (SIZE == 8). A half-precision floating-point value
2534 (SIZE == 2) is expanded to a single-precision floating-point value. The
2535 expanded value is returned. */
2538 expand_fp_imm (int size, uint32_t imm8)
2541 uint32_t imm8_7, imm8_6_0, imm8_6, imm8_6_repl4;
2543 imm8_7 = (imm8 >> 7) & 0x01; /* imm8<7> */
2544 imm8_6_0 = imm8 & 0x7f; /* imm8<6:0> */
2545 imm8_6 = imm8_6_0 >> 6; /* imm8<6> */
2546 imm8_6_repl4 = (imm8_6 << 3) | (imm8_6 << 2)
2547 | (imm8_6 << 1) | imm8_6; /* Replicate(imm8<6>,4) */
2550 imm = (imm8_7 << (63-32)) /* imm8<7> */
2551 | ((imm8_6 ^ 1) << (62-32)) /* NOT(imm8<6) */
2552 | (imm8_6_repl4 << (58-32)) | (imm8_6 << (57-32))
2553 | (imm8_6 << (56-32)) | (imm8_6 << (55-32)) /* Replicate(imm8<6>,7) */
2554 | (imm8_6_0 << (48-32)); /* imm8<6>:imm8<5:0> */
2557 else if (size == 4 || size == 2)
2559 imm = (imm8_7 << 31) /* imm8<7> */
2560 | ((imm8_6 ^ 1) << 30) /* NOT(imm8<6>) */
2561 | (imm8_6_repl4 << 26) /* Replicate(imm8<6>,4) */
2562 | (imm8_6_0 << 19); /* imm8<6>:imm8<5:0> */
2566 /* An unsupported size. */
2573 /* Produce the string representation of the register list operand *OPND
2574 in the buffer pointed by BUF of size SIZE. PREFIX is the part of
2575 the register name that comes before the register number, such as "v". */
2577 print_register_list (char *buf, size_t size, const aarch64_opnd_info *opnd,
2580 const int num_regs = opnd->reglist.num_regs;
2581 const int first_reg = opnd->reglist.first_regno;
2582 const int last_reg = (first_reg + num_regs - 1) & 0x1f;
2583 const char *qlf_name = aarch64_get_qualifier_name (opnd->qualifier);
2584 char tb[8]; /* Temporary buffer. */
2586 assert (opnd->type != AARCH64_OPND_LEt || opnd->reglist.has_index);
2587 assert (num_regs >= 1 && num_regs <= 4);
2589 /* Prepare the index if any. */
2590 if (opnd->reglist.has_index)
2591 snprintf (tb, 8, "[%" PRIi64 "]", opnd->reglist.index);
2595 /* The hyphenated form is preferred for disassembly if there are
2596 more than two registers in the list, and the register numbers
2597 are monotonically increasing in increments of one. */
2598 if (num_regs > 2 && last_reg > first_reg)
2599 snprintf (buf, size, "{%s%d.%s-%s%d.%s}%s", prefix, first_reg, qlf_name,
2600 prefix, last_reg, qlf_name, tb);
2603 const int reg0 = first_reg;
2604 const int reg1 = (first_reg + 1) & 0x1f;
2605 const int reg2 = (first_reg + 2) & 0x1f;
2606 const int reg3 = (first_reg + 3) & 0x1f;
2611 snprintf (buf, size, "{%s%d.%s}%s", prefix, reg0, qlf_name, tb);
2614 snprintf (buf, size, "{%s%d.%s, %s%d.%s}%s", prefix, reg0, qlf_name,
2615 prefix, reg1, qlf_name, tb);
2618 snprintf (buf, size, "{%s%d.%s, %s%d.%s, %s%d.%s}%s",
2619 prefix, reg0, qlf_name, prefix, reg1, qlf_name,
2620 prefix, reg2, qlf_name, tb);
2623 snprintf (buf, size, "{%s%d.%s, %s%d.%s, %s%d.%s, %s%d.%s}%s",
2624 prefix, reg0, qlf_name, prefix, reg1, qlf_name,
2625 prefix, reg2, qlf_name, prefix, reg3, qlf_name, tb);
2631 /* Print the register+immediate address in OPND to BUF, which has SIZE
2632 characters. BASE is the name of the base register. */
2635 print_immediate_offset_address (char *buf, size_t size,
2636 const aarch64_opnd_info *opnd,
2639 if (opnd->addr.writeback)
2641 if (opnd->addr.preind)
2642 snprintf (buf, size, "[%s,#%d]!", base, opnd->addr.offset.imm);
2644 snprintf (buf, size, "[%s],#%d", base, opnd->addr.offset.imm);
2648 if (opnd->addr.offset.imm)
2649 snprintf (buf, size, "[%s,#%d]", base, opnd->addr.offset.imm);
2651 snprintf (buf, size, "[%s]", base);
2655 /* Produce the string representation of the register offset address operand
2656 *OPND in the buffer pointed by BUF of size SIZE. BASE and OFFSET are
2657 the names of the base and offset registers. */
2659 print_register_offset_address (char *buf, size_t size,
2660 const aarch64_opnd_info *opnd,
2661 const char *base, const char *offset)
2663 char tb[16]; /* Temporary buffer. */
2664 bfd_boolean print_extend_p = TRUE;
2665 bfd_boolean print_amount_p = TRUE;
2666 const char *shift_name = aarch64_operand_modifiers[opnd->shifter.kind].name;
2668 if (!opnd->shifter.amount && (opnd->qualifier != AARCH64_OPND_QLF_S_B
2669 || !opnd->shifter.amount_present))
2671 /* Not print the shift/extend amount when the amount is zero and
2672 when it is not the special case of 8-bit load/store instruction. */
2673 print_amount_p = FALSE;
2674 /* Likewise, no need to print the shift operator LSL in such a
2676 if (opnd->shifter.kind == AARCH64_MOD_LSL)
2677 print_extend_p = FALSE;
2680 /* Prepare for the extend/shift. */
2684 snprintf (tb, sizeof (tb), ",%s #%" PRIi64, shift_name,
2685 opnd->shifter.amount);
2687 snprintf (tb, sizeof (tb), ",%s", shift_name);
2692 snprintf (buf, size, "[%s,%s%s]", base, offset, tb);
2695 /* Generate the string representation of the operand OPNDS[IDX] for OPCODE
2696 in *BUF. The caller should pass in the maximum size of *BUF in SIZE.
2697 PC, PCREL_P and ADDRESS are used to pass in and return information about
2698 the PC-relative address calculation, where the PC value is passed in
2699 PC. If the operand is pc-relative related, *PCREL_P (if PCREL_P non-NULL)
2700 will return 1 and *ADDRESS (if ADDRESS non-NULL) will return the
2701 calculated address; otherwise, *PCREL_P (if PCREL_P non-NULL) returns 0.
2703 The function serves both the disassembler and the assembler diagnostics
2704 issuer, which is the reason why it lives in this file. */
2707 aarch64_print_operand (char *buf, size_t size, bfd_vma pc,
2708 const aarch64_opcode *opcode,
2709 const aarch64_opnd_info *opnds, int idx, int *pcrel_p,
2713 const char *name = NULL;
2714 const aarch64_opnd_info *opnd = opnds + idx;
2715 enum aarch64_modifier_kind kind;
2716 uint64_t addr, enum_value;
2724 case AARCH64_OPND_Rd:
2725 case AARCH64_OPND_Rn:
2726 case AARCH64_OPND_Rm:
2727 case AARCH64_OPND_Rt:
2728 case AARCH64_OPND_Rt2:
2729 case AARCH64_OPND_Rs:
2730 case AARCH64_OPND_Ra:
2731 case AARCH64_OPND_Rt_SYS:
2732 case AARCH64_OPND_PAIRREG:
2733 /* The optional-ness of <Xt> in e.g. IC <ic_op>{, <Xt>} is determined by
2734 the <ic_op>, therefore we we use opnd->present to override the
2735 generic optional-ness information. */
2736 if (opnd->type == AARCH64_OPND_Rt_SYS && !opnd->present)
2738 /* Omit the operand, e.g. RET. */
2739 if (optional_operand_p (opcode, idx)
2740 && opnd->reg.regno == get_optional_operand_default_value (opcode))
2742 assert (opnd->qualifier == AARCH64_OPND_QLF_W
2743 || opnd->qualifier == AARCH64_OPND_QLF_X);
2744 snprintf (buf, size, "%s",
2745 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0));
2748 case AARCH64_OPND_Rd_SP:
2749 case AARCH64_OPND_Rn_SP:
2750 assert (opnd->qualifier == AARCH64_OPND_QLF_W
2751 || opnd->qualifier == AARCH64_OPND_QLF_WSP
2752 || opnd->qualifier == AARCH64_OPND_QLF_X
2753 || opnd->qualifier == AARCH64_OPND_QLF_SP);
2754 snprintf (buf, size, "%s",
2755 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 1));
2758 case AARCH64_OPND_Rm_EXT:
2759 kind = opnd->shifter.kind;
2760 assert (idx == 1 || idx == 2);
2761 if ((aarch64_stack_pointer_p (opnds)
2762 || (idx == 2 && aarch64_stack_pointer_p (opnds + 1)))
2763 && ((opnd->qualifier == AARCH64_OPND_QLF_W
2764 && opnds[0].qualifier == AARCH64_OPND_QLF_W
2765 && kind == AARCH64_MOD_UXTW)
2766 || (opnd->qualifier == AARCH64_OPND_QLF_X
2767 && kind == AARCH64_MOD_UXTX)))
2769 /* 'LSL' is the preferred form in this case. */
2770 kind = AARCH64_MOD_LSL;
2771 if (opnd->shifter.amount == 0)
2773 /* Shifter omitted. */
2774 snprintf (buf, size, "%s",
2775 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0));
2779 if (opnd->shifter.amount)
2780 snprintf (buf, size, "%s, %s #%" PRIi64,
2781 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0),
2782 aarch64_operand_modifiers[kind].name,
2783 opnd->shifter.amount);
2785 snprintf (buf, size, "%s, %s",
2786 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0),
2787 aarch64_operand_modifiers[kind].name);
2790 case AARCH64_OPND_Rm_SFT:
2791 assert (opnd->qualifier == AARCH64_OPND_QLF_W
2792 || opnd->qualifier == AARCH64_OPND_QLF_X);
2793 if (opnd->shifter.amount == 0 && opnd->shifter.kind == AARCH64_MOD_LSL)
2794 snprintf (buf, size, "%s",
2795 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0));
2797 snprintf (buf, size, "%s, %s #%" PRIi64,
2798 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0),
2799 aarch64_operand_modifiers[opnd->shifter.kind].name,
2800 opnd->shifter.amount);
2803 case AARCH64_OPND_Fd:
2804 case AARCH64_OPND_Fn:
2805 case AARCH64_OPND_Fm:
2806 case AARCH64_OPND_Fa:
2807 case AARCH64_OPND_Ft:
2808 case AARCH64_OPND_Ft2:
2809 case AARCH64_OPND_Sd:
2810 case AARCH64_OPND_Sn:
2811 case AARCH64_OPND_Sm:
2812 snprintf (buf, size, "%s%d", aarch64_get_qualifier_name (opnd->qualifier),
2816 case AARCH64_OPND_Vd:
2817 case AARCH64_OPND_Vn:
2818 case AARCH64_OPND_Vm:
2819 snprintf (buf, size, "v%d.%s", opnd->reg.regno,
2820 aarch64_get_qualifier_name (opnd->qualifier));
2823 case AARCH64_OPND_Ed:
2824 case AARCH64_OPND_En:
2825 case AARCH64_OPND_Em:
2826 snprintf (buf, size, "v%d.%s[%" PRIi64 "]", opnd->reglane.regno,
2827 aarch64_get_qualifier_name (opnd->qualifier),
2828 opnd->reglane.index);
2831 case AARCH64_OPND_VdD1:
2832 case AARCH64_OPND_VnD1:
2833 snprintf (buf, size, "v%d.d[1]", opnd->reg.regno);
2836 case AARCH64_OPND_LVn:
2837 case AARCH64_OPND_LVt:
2838 case AARCH64_OPND_LVt_AL:
2839 case AARCH64_OPND_LEt:
2840 print_register_list (buf, size, opnd, "v");
2843 case AARCH64_OPND_SVE_Pd:
2844 case AARCH64_OPND_SVE_Pg3:
2845 case AARCH64_OPND_SVE_Pg4_5:
2846 case AARCH64_OPND_SVE_Pg4_10:
2847 case AARCH64_OPND_SVE_Pg4_16:
2848 case AARCH64_OPND_SVE_Pm:
2849 case AARCH64_OPND_SVE_Pn:
2850 case AARCH64_OPND_SVE_Pt:
2851 if (opnd->qualifier == AARCH64_OPND_QLF_NIL)
2852 snprintf (buf, size, "p%d", opnd->reg.regno);
2853 else if (opnd->qualifier == AARCH64_OPND_QLF_P_Z
2854 || opnd->qualifier == AARCH64_OPND_QLF_P_M)
2855 snprintf (buf, size, "p%d/%s", opnd->reg.regno,
2856 aarch64_get_qualifier_name (opnd->qualifier));
2858 snprintf (buf, size, "p%d.%s", opnd->reg.regno,
2859 aarch64_get_qualifier_name (opnd->qualifier));
2862 case AARCH64_OPND_SVE_Za_5:
2863 case AARCH64_OPND_SVE_Za_16:
2864 case AARCH64_OPND_SVE_Zd:
2865 case AARCH64_OPND_SVE_Zm_5:
2866 case AARCH64_OPND_SVE_Zm_16:
2867 case AARCH64_OPND_SVE_Zn:
2868 case AARCH64_OPND_SVE_Zt:
2869 if (opnd->qualifier == AARCH64_OPND_QLF_NIL)
2870 snprintf (buf, size, "z%d", opnd->reg.regno);
2872 snprintf (buf, size, "z%d.%s", opnd->reg.regno,
2873 aarch64_get_qualifier_name (opnd->qualifier));
2876 case AARCH64_OPND_SVE_ZnxN:
2877 case AARCH64_OPND_SVE_ZtxN:
2878 print_register_list (buf, size, opnd, "z");
2881 case AARCH64_OPND_SVE_Zn_INDEX:
2882 snprintf (buf, size, "z%d.%s[%" PRIi64 "]", opnd->reglane.regno,
2883 aarch64_get_qualifier_name (opnd->qualifier),
2884 opnd->reglane.index);
2887 case AARCH64_OPND_Cn:
2888 case AARCH64_OPND_Cm:
2889 snprintf (buf, size, "C%d", opnd->reg.regno);
2892 case AARCH64_OPND_IDX:
2893 case AARCH64_OPND_IMM:
2894 case AARCH64_OPND_WIDTH:
2895 case AARCH64_OPND_UIMM3_OP1:
2896 case AARCH64_OPND_UIMM3_OP2:
2897 case AARCH64_OPND_BIT_NUM:
2898 case AARCH64_OPND_IMM_VLSL:
2899 case AARCH64_OPND_IMM_VLSR:
2900 case AARCH64_OPND_SHLL_IMM:
2901 case AARCH64_OPND_IMM0:
2902 case AARCH64_OPND_IMMR:
2903 case AARCH64_OPND_IMMS:
2904 case AARCH64_OPND_FBITS:
2905 snprintf (buf, size, "#%" PRIi64, opnd->imm.value);
2908 case AARCH64_OPND_SVE_PATTERN:
2909 if (optional_operand_p (opcode, idx)
2910 && opnd->imm.value == get_optional_operand_default_value (opcode))
2912 enum_value = opnd->imm.value;
2913 assert (enum_value < ARRAY_SIZE (aarch64_sve_pattern_array));
2914 if (aarch64_sve_pattern_array[enum_value])
2915 snprintf (buf, size, "%s", aarch64_sve_pattern_array[enum_value]);
2917 snprintf (buf, size, "#%" PRIi64, opnd->imm.value);
2920 case AARCH64_OPND_SVE_PATTERN_SCALED:
2921 if (optional_operand_p (opcode, idx)
2922 && !opnd->shifter.operator_present
2923 && opnd->imm.value == get_optional_operand_default_value (opcode))
2925 enum_value = opnd->imm.value;
2926 assert (enum_value < ARRAY_SIZE (aarch64_sve_pattern_array));
2927 if (aarch64_sve_pattern_array[opnd->imm.value])
2928 snprintf (buf, size, "%s", aarch64_sve_pattern_array[opnd->imm.value]);
2930 snprintf (buf, size, "#%" PRIi64, opnd->imm.value);
2931 if (opnd->shifter.operator_present)
2933 size_t len = strlen (buf);
2934 snprintf (buf + len, size - len, ", %s #%" PRIi64,
2935 aarch64_operand_modifiers[opnd->shifter.kind].name,
2936 opnd->shifter.amount);
2940 case AARCH64_OPND_SVE_PRFOP:
2941 enum_value = opnd->imm.value;
2942 assert (enum_value < ARRAY_SIZE (aarch64_sve_prfop_array));
2943 if (aarch64_sve_prfop_array[enum_value])
2944 snprintf (buf, size, "%s", aarch64_sve_prfop_array[enum_value]);
2946 snprintf (buf, size, "#%" PRIi64, opnd->imm.value);
2949 case AARCH64_OPND_IMM_MOV:
2950 switch (aarch64_get_qualifier_esize (opnds[0].qualifier))
2952 case 4: /* e.g. MOV Wd, #<imm32>. */
2954 int imm32 = opnd->imm.value;
2955 snprintf (buf, size, "#0x%-20x\t// #%d", imm32, imm32);
2958 case 8: /* e.g. MOV Xd, #<imm64>. */
2959 snprintf (buf, size, "#0x%-20" PRIx64 "\t// #%" PRIi64,
2960 opnd->imm.value, opnd->imm.value);
2962 default: assert (0);
2966 case AARCH64_OPND_FPIMM0:
2967 snprintf (buf, size, "#0.0");
2970 case AARCH64_OPND_LIMM:
2971 case AARCH64_OPND_AIMM:
2972 case AARCH64_OPND_HALF:
2973 if (opnd->shifter.amount)
2974 snprintf (buf, size, "#0x%" PRIx64 ", lsl #%" PRIi64, opnd->imm.value,
2975 opnd->shifter.amount);
2977 snprintf (buf, size, "#0x%" PRIx64, opnd->imm.value);
2980 case AARCH64_OPND_SIMD_IMM:
2981 case AARCH64_OPND_SIMD_IMM_SFT:
2982 if ((! opnd->shifter.amount && opnd->shifter.kind == AARCH64_MOD_LSL)
2983 || opnd->shifter.kind == AARCH64_MOD_NONE)
2984 snprintf (buf, size, "#0x%" PRIx64, opnd->imm.value);
2986 snprintf (buf, size, "#0x%" PRIx64 ", %s #%" PRIi64, opnd->imm.value,
2987 aarch64_operand_modifiers[opnd->shifter.kind].name,
2988 opnd->shifter.amount);
2991 case AARCH64_OPND_FPIMM:
2992 case AARCH64_OPND_SIMD_FPIMM:
2993 switch (aarch64_get_qualifier_esize (opnds[0].qualifier))
2995 case 2: /* e.g. FMOV <Hd>, #<imm>. */
2998 c.i = expand_fp_imm (2, opnd->imm.value);
2999 snprintf (buf, size, "#%.18e", c.f);
3002 case 4: /* e.g. FMOV <Vd>.4S, #<imm>. */
3005 c.i = expand_fp_imm (4, opnd->imm.value);
3006 snprintf (buf, size, "#%.18e", c.f);
3009 case 8: /* e.g. FMOV <Sd>, #<imm>. */
3012 c.i = expand_fp_imm (8, opnd->imm.value);
3013 snprintf (buf, size, "#%.18e", c.d);
3016 default: assert (0);
3020 case AARCH64_OPND_CCMP_IMM:
3021 case AARCH64_OPND_NZCV:
3022 case AARCH64_OPND_EXCEPTION:
3023 case AARCH64_OPND_UIMM4:
3024 case AARCH64_OPND_UIMM7:
3025 if (optional_operand_p (opcode, idx) == TRUE
3026 && (opnd->imm.value ==
3027 (int64_t) get_optional_operand_default_value (opcode)))
3028 /* Omit the operand, e.g. DCPS1. */
3030 snprintf (buf, size, "#0x%x", (unsigned int)opnd->imm.value);
3033 case AARCH64_OPND_COND:
3034 case AARCH64_OPND_COND1:
3035 snprintf (buf, size, "%s", opnd->cond->names[0]);
3038 case AARCH64_OPND_ADDR_ADRP:
3039 addr = ((pc + AARCH64_PCREL_OFFSET) & ~(uint64_t)0xfff)
3045 /* This is not necessary during the disassembling, as print_address_func
3046 in the disassemble_info will take care of the printing. But some
3047 other callers may be still interested in getting the string in *STR,
3048 so here we do snprintf regardless. */
3049 snprintf (buf, size, "#0x%" PRIx64, addr);
3052 case AARCH64_OPND_ADDR_PCREL14:
3053 case AARCH64_OPND_ADDR_PCREL19:
3054 case AARCH64_OPND_ADDR_PCREL21:
3055 case AARCH64_OPND_ADDR_PCREL26:
3056 addr = pc + AARCH64_PCREL_OFFSET + opnd->imm.value;
3061 /* This is not necessary during the disassembling, as print_address_func
3062 in the disassemble_info will take care of the printing. But some
3063 other callers may be still interested in getting the string in *STR,
3064 so here we do snprintf regardless. */
3065 snprintf (buf, size, "#0x%" PRIx64, addr);
3068 case AARCH64_OPND_ADDR_SIMPLE:
3069 case AARCH64_OPND_SIMD_ADDR_SIMPLE:
3070 case AARCH64_OPND_SIMD_ADDR_POST:
3071 name = get_64bit_int_reg_name (opnd->addr.base_regno, 1);
3072 if (opnd->type == AARCH64_OPND_SIMD_ADDR_POST)
3074 if (opnd->addr.offset.is_reg)
3075 snprintf (buf, size, "[%s], x%d", name, opnd->addr.offset.regno);
3077 snprintf (buf, size, "[%s], #%d", name, opnd->addr.offset.imm);
3080 snprintf (buf, size, "[%s]", name);
3083 case AARCH64_OPND_ADDR_REGOFF:
3084 case AARCH64_OPND_SVE_ADDR_RR:
3085 case AARCH64_OPND_SVE_ADDR_RR_LSL1:
3086 case AARCH64_OPND_SVE_ADDR_RR_LSL2:
3087 case AARCH64_OPND_SVE_ADDR_RR_LSL3:
3088 case AARCH64_OPND_SVE_ADDR_RX:
3089 case AARCH64_OPND_SVE_ADDR_RX_LSL1:
3090 case AARCH64_OPND_SVE_ADDR_RX_LSL2:
3091 case AARCH64_OPND_SVE_ADDR_RX_LSL3:
3092 print_register_offset_address
3093 (buf, size, opnd, get_64bit_int_reg_name (opnd->addr.base_regno, 1),
3094 get_offset_int_reg_name (opnd));
3097 case AARCH64_OPND_SVE_ADDR_RZ:
3098 case AARCH64_OPND_SVE_ADDR_RZ_LSL1:
3099 case AARCH64_OPND_SVE_ADDR_RZ_LSL2:
3100 case AARCH64_OPND_SVE_ADDR_RZ_LSL3:
3101 case AARCH64_OPND_SVE_ADDR_RZ_XTW_14:
3102 case AARCH64_OPND_SVE_ADDR_RZ_XTW_22:
3103 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_14:
3104 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_22:
3105 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_14:
3106 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_22:
3107 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_14:
3108 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_22:
3109 print_register_offset_address
3110 (buf, size, opnd, get_64bit_int_reg_name (opnd->addr.base_regno, 1),
3111 get_addr_sve_reg_name (opnd->addr.offset.regno, opnd->qualifier));
3114 case AARCH64_OPND_ADDR_SIMM7:
3115 case AARCH64_OPND_ADDR_SIMM9:
3116 case AARCH64_OPND_ADDR_SIMM9_2:
3117 case AARCH64_OPND_SVE_ADDR_RI_U6:
3118 case AARCH64_OPND_SVE_ADDR_RI_U6x2:
3119 case AARCH64_OPND_SVE_ADDR_RI_U6x4:
3120 case AARCH64_OPND_SVE_ADDR_RI_U6x8:
3121 print_immediate_offset_address
3122 (buf, size, opnd, get_64bit_int_reg_name (opnd->addr.base_regno, 1));
3125 case AARCH64_OPND_SVE_ADDR_ZI_U5:
3126 case AARCH64_OPND_SVE_ADDR_ZI_U5x2:
3127 case AARCH64_OPND_SVE_ADDR_ZI_U5x4:
3128 case AARCH64_OPND_SVE_ADDR_ZI_U5x8:
3129 print_immediate_offset_address
3131 get_addr_sve_reg_name (opnd->addr.base_regno, opnd->qualifier));
3134 case AARCH64_OPND_SVE_ADDR_ZZ_LSL:
3135 case AARCH64_OPND_SVE_ADDR_ZZ_SXTW:
3136 case AARCH64_OPND_SVE_ADDR_ZZ_UXTW:
3137 print_register_offset_address
3139 get_addr_sve_reg_name (opnd->addr.base_regno, opnd->qualifier),
3140 get_addr_sve_reg_name (opnd->addr.offset.regno, opnd->qualifier));
3143 case AARCH64_OPND_ADDR_UIMM12:
3144 name = get_64bit_int_reg_name (opnd->addr.base_regno, 1);
3145 if (opnd->addr.offset.imm)
3146 snprintf (buf, size, "[%s,#%d]", name, opnd->addr.offset.imm);
3148 snprintf (buf, size, "[%s]", name);
3151 case AARCH64_OPND_SYSREG:
3152 for (i = 0; aarch64_sys_regs[i].name; ++i)
3153 if (aarch64_sys_regs[i].value == opnd->sysreg
3154 && ! aarch64_sys_reg_deprecated_p (&aarch64_sys_regs[i]))
3156 if (aarch64_sys_regs[i].name)
3157 snprintf (buf, size, "%s", aarch64_sys_regs[i].name);
3160 /* Implementation defined system register. */
3161 unsigned int value = opnd->sysreg;
3162 snprintf (buf, size, "s%u_%u_c%u_c%u_%u", (value >> 14) & 0x3,
3163 (value >> 11) & 0x7, (value >> 7) & 0xf, (value >> 3) & 0xf,
3168 case AARCH64_OPND_PSTATEFIELD:
3169 for (i = 0; aarch64_pstatefields[i].name; ++i)
3170 if (aarch64_pstatefields[i].value == opnd->pstatefield)
3172 assert (aarch64_pstatefields[i].name);
3173 snprintf (buf, size, "%s", aarch64_pstatefields[i].name);
3176 case AARCH64_OPND_SYSREG_AT:
3177 case AARCH64_OPND_SYSREG_DC:
3178 case AARCH64_OPND_SYSREG_IC:
3179 case AARCH64_OPND_SYSREG_TLBI:
3180 snprintf (buf, size, "%s", opnd->sysins_op->name);
3183 case AARCH64_OPND_BARRIER:
3184 snprintf (buf, size, "%s", opnd->barrier->name);
3187 case AARCH64_OPND_BARRIER_ISB:
3188 /* Operand can be omitted, e.g. in DCPS1. */
3189 if (! optional_operand_p (opcode, idx)
3190 || (opnd->barrier->value
3191 != get_optional_operand_default_value (opcode)))
3192 snprintf (buf, size, "#0x%x", opnd->barrier->value);
3195 case AARCH64_OPND_PRFOP:
3196 if (opnd->prfop->name != NULL)
3197 snprintf (buf, size, "%s", opnd->prfop->name);
3199 snprintf (buf, size, "#0x%02x", opnd->prfop->value);
3202 case AARCH64_OPND_BARRIER_PSB:
3203 snprintf (buf, size, "%s", opnd->hint_option->name);
3211 #define CPENC(op0,op1,crn,crm,op2) \
3212 ((((op0) << 19) | ((op1) << 16) | ((crn) << 12) | ((crm) << 8) | ((op2) << 5)) >> 5)
3213 /* for 3.9.3 Instructions for Accessing Special Purpose Registers */
3214 #define CPEN_(op1,crm,op2) CPENC(3,(op1),4,(crm),(op2))
3215 /* for 3.9.10 System Instructions */
3216 #define CPENS(op1,crn,crm,op2) CPENC(1,(op1),(crn),(crm),(op2))
3238 #define F_DEPRECATED 0x1 /* Deprecated system register. */
3243 #define F_ARCHEXT 0x2 /* Architecture dependent system register. */
3248 #define F_HASXT 0x4 /* System instruction register <Xt>
3252 /* TODO there are two more issues need to be resolved
3253 1. handle read-only and write-only system registers
3254 2. handle cpu-implementation-defined system registers. */
3255 const aarch64_sys_reg aarch64_sys_regs [] =
3257 { "spsr_el1", CPEN_(0,C0,0), 0 }, /* = spsr_svc */
3258 { "spsr_el12", CPEN_ (5, C0, 0), F_ARCHEXT },
3259 { "elr_el1", CPEN_(0,C0,1), 0 },
3260 { "elr_el12", CPEN_ (5, C0, 1), F_ARCHEXT },
3261 { "sp_el0", CPEN_(0,C1,0), 0 },
3262 { "spsel", CPEN_(0,C2,0), 0 },
3263 { "daif", CPEN_(3,C2,1), 0 },
3264 { "currentel", CPEN_(0,C2,2), 0 }, /* RO */
3265 { "pan", CPEN_(0,C2,3), F_ARCHEXT },
3266 { "uao", CPEN_ (0, C2, 4), F_ARCHEXT },
3267 { "nzcv", CPEN_(3,C2,0), 0 },
3268 { "fpcr", CPEN_(3,C4,0), 0 },
3269 { "fpsr", CPEN_(3,C4,1), 0 },
3270 { "dspsr_el0", CPEN_(3,C5,0), 0 },
3271 { "dlr_el0", CPEN_(3,C5,1), 0 },
3272 { "spsr_el2", CPEN_(4,C0,0), 0 }, /* = spsr_hyp */
3273 { "elr_el2", CPEN_(4,C0,1), 0 },
3274 { "sp_el1", CPEN_(4,C1,0), 0 },
3275 { "spsr_irq", CPEN_(4,C3,0), 0 },
3276 { "spsr_abt", CPEN_(4,C3,1), 0 },
3277 { "spsr_und", CPEN_(4,C3,2), 0 },
3278 { "spsr_fiq", CPEN_(4,C3,3), 0 },
3279 { "spsr_el3", CPEN_(6,C0,0), 0 },
3280 { "elr_el3", CPEN_(6,C0,1), 0 },
3281 { "sp_el2", CPEN_(6,C1,0), 0 },
3282 { "spsr_svc", CPEN_(0,C0,0), F_DEPRECATED }, /* = spsr_el1 */
3283 { "spsr_hyp", CPEN_(4,C0,0), F_DEPRECATED }, /* = spsr_el2 */
3284 { "midr_el1", CPENC(3,0,C0,C0,0), 0 }, /* RO */
3285 { "ctr_el0", CPENC(3,3,C0,C0,1), 0 }, /* RO */
3286 { "mpidr_el1", CPENC(3,0,C0,C0,5), 0 }, /* RO */
3287 { "revidr_el1", CPENC(3,0,C0,C0,6), 0 }, /* RO */
3288 { "aidr_el1", CPENC(3,1,C0,C0,7), 0 }, /* RO */
3289 { "dczid_el0", CPENC(3,3,C0,C0,7), 0 }, /* RO */
3290 { "id_dfr0_el1", CPENC(3,0,C0,C1,2), 0 }, /* RO */
3291 { "id_pfr0_el1", CPENC(3,0,C0,C1,0), 0 }, /* RO */
3292 { "id_pfr1_el1", CPENC(3,0,C0,C1,1), 0 }, /* RO */
3293 { "id_afr0_el1", CPENC(3,0,C0,C1,3), 0 }, /* RO */
3294 { "id_mmfr0_el1", CPENC(3,0,C0,C1,4), 0 }, /* RO */
3295 { "id_mmfr1_el1", CPENC(3,0,C0,C1,5), 0 }, /* RO */
3296 { "id_mmfr2_el1", CPENC(3,0,C0,C1,6), 0 }, /* RO */
3297 { "id_mmfr3_el1", CPENC(3,0,C0,C1,7), 0 }, /* RO */
3298 { "id_mmfr4_el1", CPENC(3,0,C0,C2,6), 0 }, /* RO */
3299 { "id_isar0_el1", CPENC(3,0,C0,C2,0), 0 }, /* RO */
3300 { "id_isar1_el1", CPENC(3,0,C0,C2,1), 0 }, /* RO */
3301 { "id_isar2_el1", CPENC(3,0,C0,C2,2), 0 }, /* RO */
3302 { "id_isar3_el1", CPENC(3,0,C0,C2,3), 0 }, /* RO */
3303 { "id_isar4_el1", CPENC(3,0,C0,C2,4), 0 }, /* RO */
3304 { "id_isar5_el1", CPENC(3,0,C0,C2,5), 0 }, /* RO */
3305 { "mvfr0_el1", CPENC(3,0,C0,C3,0), 0 }, /* RO */
3306 { "mvfr1_el1", CPENC(3,0,C0,C3,1), 0 }, /* RO */
3307 { "mvfr2_el1", CPENC(3,0,C0,C3,2), 0 }, /* RO */
3308 { "ccsidr_el1", CPENC(3,1,C0,C0,0), 0 }, /* RO */
3309 { "id_aa64pfr0_el1", CPENC(3,0,C0,C4,0), 0 }, /* RO */
3310 { "id_aa64pfr1_el1", CPENC(3,0,C0,C4,1), 0 }, /* RO */
3311 { "id_aa64dfr0_el1", CPENC(3,0,C0,C5,0), 0 }, /* RO */
3312 { "id_aa64dfr1_el1", CPENC(3,0,C0,C5,1), 0 }, /* RO */
3313 { "id_aa64isar0_el1", CPENC(3,0,C0,C6,0), 0 }, /* RO */
3314 { "id_aa64isar1_el1", CPENC(3,0,C0,C6,1), 0 }, /* RO */
3315 { "id_aa64mmfr0_el1", CPENC(3,0,C0,C7,0), 0 }, /* RO */
3316 { "id_aa64mmfr1_el1", CPENC(3,0,C0,C7,1), 0 }, /* RO */
3317 { "id_aa64mmfr2_el1", CPENC (3, 0, C0, C7, 2), F_ARCHEXT }, /* RO */
3318 { "id_aa64afr0_el1", CPENC(3,0,C0,C5,4), 0 }, /* RO */
3319 { "id_aa64afr1_el1", CPENC(3,0,C0,C5,5), 0 }, /* RO */
3320 { "clidr_el1", CPENC(3,1,C0,C0,1), 0 }, /* RO */
3321 { "csselr_el1", CPENC(3,2,C0,C0,0), 0 }, /* RO */
3322 { "vpidr_el2", CPENC(3,4,C0,C0,0), 0 },
3323 { "vmpidr_el2", CPENC(3,4,C0,C0,5), 0 },
3324 { "sctlr_el1", CPENC(3,0,C1,C0,0), 0 },
3325 { "sctlr_el2", CPENC(3,4,C1,C0,0), 0 },
3326 { "sctlr_el3", CPENC(3,6,C1,C0,0), 0 },
3327 { "sctlr_el12", CPENC (3, 5, C1, C0, 0), F_ARCHEXT },
3328 { "actlr_el1", CPENC(3,0,C1,C0,1), 0 },
3329 { "actlr_el2", CPENC(3,4,C1,C0,1), 0 },
3330 { "actlr_el3", CPENC(3,6,C1,C0,1), 0 },
3331 { "cpacr_el1", CPENC(3,0,C1,C0,2), 0 },
3332 { "cpacr_el12", CPENC (3, 5, C1, C0, 2), F_ARCHEXT },
3333 { "cptr_el2", CPENC(3,4,C1,C1,2), 0 },
3334 { "cptr_el3", CPENC(3,6,C1,C1,2), 0 },
3335 { "scr_el3", CPENC(3,6,C1,C1,0), 0 },
3336 { "hcr_el2", CPENC(3,4,C1,C1,0), 0 },
3337 { "mdcr_el2", CPENC(3,4,C1,C1,1), 0 },
3338 { "mdcr_el3", CPENC(3,6,C1,C3,1), 0 },
3339 { "hstr_el2", CPENC(3,4,C1,C1,3), 0 },
3340 { "hacr_el2", CPENC(3,4,C1,C1,7), 0 },
3341 { "ttbr0_el1", CPENC(3,0,C2,C0,0), 0 },
3342 { "ttbr1_el1", CPENC(3,0,C2,C0,1), 0 },
3343 { "ttbr0_el2", CPENC(3,4,C2,C0,0), 0 },
3344 { "ttbr1_el2", CPENC (3, 4, C2, C0, 1), F_ARCHEXT },
3345 { "ttbr0_el3", CPENC(3,6,C2,C0,0), 0 },
3346 { "ttbr0_el12", CPENC (3, 5, C2, C0, 0), F_ARCHEXT },
3347 { "ttbr1_el12", CPENC (3, 5, C2, C0, 1), F_ARCHEXT },
3348 { "vttbr_el2", CPENC(3,4,C2,C1,0), 0 },
3349 { "tcr_el1", CPENC(3,0,C2,C0,2), 0 },
3350 { "tcr_el2", CPENC(3,4,C2,C0,2), 0 },
3351 { "tcr_el3", CPENC(3,6,C2,C0,2), 0 },
3352 { "tcr_el12", CPENC (3, 5, C2, C0, 2), F_ARCHEXT },
3353 { "vtcr_el2", CPENC(3,4,C2,C1,2), 0 },
3354 { "afsr0_el1", CPENC(3,0,C5,C1,0), 0 },
3355 { "afsr1_el1", CPENC(3,0,C5,C1,1), 0 },
3356 { "afsr0_el2", CPENC(3,4,C5,C1,0), 0 },
3357 { "afsr1_el2", CPENC(3,4,C5,C1,1), 0 },
3358 { "afsr0_el3", CPENC(3,6,C5,C1,0), 0 },
3359 { "afsr0_el12", CPENC (3, 5, C5, C1, 0), F_ARCHEXT },
3360 { "afsr1_el3", CPENC(3,6,C5,C1,1), 0 },
3361 { "afsr1_el12", CPENC (3, 5, C5, C1, 1), F_ARCHEXT },
3362 { "esr_el1", CPENC(3,0,C5,C2,0), 0 },
3363 { "esr_el2", CPENC(3,4,C5,C2,0), 0 },
3364 { "esr_el3", CPENC(3,6,C5,C2,0), 0 },
3365 { "esr_el12", CPENC (3, 5, C5, C2, 0), F_ARCHEXT },
3366 { "vsesr_el2", CPENC (3, 4, C5, C2, 3), F_ARCHEXT }, /* RO */
3367 { "fpexc32_el2", CPENC(3,4,C5,C3,0), 0 },
3368 { "erridr_el1", CPENC (3, 0, C5, C3, 0), F_ARCHEXT }, /* RO */
3369 { "errselr_el1", CPENC (3, 0, C5, C3, 1), F_ARCHEXT },
3370 { "erxfr_el1", CPENC (3, 0, C5, C4, 0), F_ARCHEXT }, /* RO */
3371 { "erxctlr_el1", CPENC (3, 0, C5, C4, 1), F_ARCHEXT },
3372 { "erxstatus_el1", CPENC (3, 0, C5, C4, 2), F_ARCHEXT },
3373 { "erxaddr_el1", CPENC (3, 0, C5, C4, 3), F_ARCHEXT },
3374 { "erxmisc0_el1", CPENC (3, 0, C5, C5, 0), F_ARCHEXT },
3375 { "erxmisc1_el1", CPENC (3, 0, C5, C5, 1), F_ARCHEXT },
3376 { "far_el1", CPENC(3,0,C6,C0,0), 0 },
3377 { "far_el2", CPENC(3,4,C6,C0,0), 0 },
3378 { "far_el3", CPENC(3,6,C6,C0,0), 0 },
3379 { "far_el12", CPENC (3, 5, C6, C0, 0), F_ARCHEXT },
3380 { "hpfar_el2", CPENC(3,4,C6,C0,4), 0 },
3381 { "par_el1", CPENC(3,0,C7,C4,0), 0 },
3382 { "mair_el1", CPENC(3,0,C10,C2,0), 0 },
3383 { "mair_el2", CPENC(3,4,C10,C2,0), 0 },
3384 { "mair_el3", CPENC(3,6,C10,C2,0), 0 },
3385 { "mair_el12", CPENC (3, 5, C10, C2, 0), F_ARCHEXT },
3386 { "amair_el1", CPENC(3,0,C10,C3,0), 0 },
3387 { "amair_el2", CPENC(3,4,C10,C3,0), 0 },
3388 { "amair_el3", CPENC(3,6,C10,C3,0), 0 },
3389 { "amair_el12", CPENC (3, 5, C10, C3, 0), F_ARCHEXT },
3390 { "vbar_el1", CPENC(3,0,C12,C0,0), 0 },
3391 { "vbar_el2", CPENC(3,4,C12,C0,0), 0 },
3392 { "vbar_el3", CPENC(3,6,C12,C0,0), 0 },
3393 { "vbar_el12", CPENC (3, 5, C12, C0, 0), F_ARCHEXT },
3394 { "rvbar_el1", CPENC(3,0,C12,C0,1), 0 }, /* RO */
3395 { "rvbar_el2", CPENC(3,4,C12,C0,1), 0 }, /* RO */
3396 { "rvbar_el3", CPENC(3,6,C12,C0,1), 0 }, /* RO */
3397 { "rmr_el1", CPENC(3,0,C12,C0,2), 0 },
3398 { "rmr_el2", CPENC(3,4,C12,C0,2), 0 },
3399 { "rmr_el3", CPENC(3,6,C12,C0,2), 0 },
3400 { "isr_el1", CPENC(3,0,C12,C1,0), 0 }, /* RO */
3401 { "disr_el1", CPENC (3, 0, C12, C1, 1), F_ARCHEXT },
3402 { "vdisr_el2", CPENC (3, 4, C12, C1, 1), F_ARCHEXT },
3403 { "contextidr_el1", CPENC(3,0,C13,C0,1), 0 },
3404 { "contextidr_el2", CPENC (3, 4, C13, C0, 1), F_ARCHEXT },
3405 { "contextidr_el12", CPENC (3, 5, C13, C0, 1), F_ARCHEXT },
3406 { "tpidr_el0", CPENC(3,3,C13,C0,2), 0 },
3407 { "tpidrro_el0", CPENC(3,3,C13,C0,3), 0 }, /* RO */
3408 { "tpidr_el1", CPENC(3,0,C13,C0,4), 0 },
3409 { "tpidr_el2", CPENC(3,4,C13,C0,2), 0 },
3410 { "tpidr_el3", CPENC(3,6,C13,C0,2), 0 },
3411 { "teecr32_el1", CPENC(2,2,C0, C0,0), 0 }, /* See section 3.9.7.1 */
3412 { "cntfrq_el0", CPENC(3,3,C14,C0,0), 0 }, /* RO */
3413 { "cntpct_el0", CPENC(3,3,C14,C0,1), 0 }, /* RO */
3414 { "cntvct_el0", CPENC(3,3,C14,C0,2), 0 }, /* RO */
3415 { "cntvoff_el2", CPENC(3,4,C14,C0,3), 0 },
3416 { "cntkctl_el1", CPENC(3,0,C14,C1,0), 0 },
3417 { "cntkctl_el12", CPENC (3, 5, C14, C1, 0), F_ARCHEXT },
3418 { "cnthctl_el2", CPENC(3,4,C14,C1,0), 0 },
3419 { "cntp_tval_el0", CPENC(3,3,C14,C2,0), 0 },
3420 { "cntp_tval_el02", CPENC (3, 5, C14, C2, 0), F_ARCHEXT },
3421 { "cntp_ctl_el0", CPENC(3,3,C14,C2,1), 0 },
3422 { "cntp_ctl_el02", CPENC (3, 5, C14, C2, 1), F_ARCHEXT },
3423 { "cntp_cval_el0", CPENC(3,3,C14,C2,2), 0 },
3424 { "cntp_cval_el02", CPENC (3, 5, C14, C2, 2), F_ARCHEXT },
3425 { "cntv_tval_el0", CPENC(3,3,C14,C3,0), 0 },
3426 { "cntv_tval_el02", CPENC (3, 5, C14, C3, 0), F_ARCHEXT },
3427 { "cntv_ctl_el0", CPENC(3,3,C14,C3,1), 0 },
3428 { "cntv_ctl_el02", CPENC (3, 5, C14, C3, 1), F_ARCHEXT },
3429 { "cntv_cval_el0", CPENC(3,3,C14,C3,2), 0 },
3430 { "cntv_cval_el02", CPENC (3, 5, C14, C3, 2), F_ARCHEXT },
3431 { "cnthp_tval_el2", CPENC(3,4,C14,C2,0), 0 },
3432 { "cnthp_ctl_el2", CPENC(3,4,C14,C2,1), 0 },
3433 { "cnthp_cval_el2", CPENC(3,4,C14,C2,2), 0 },
3434 { "cntps_tval_el1", CPENC(3,7,C14,C2,0), 0 },
3435 { "cntps_ctl_el1", CPENC(3,7,C14,C2,1), 0 },
3436 { "cntps_cval_el1", CPENC(3,7,C14,C2,2), 0 },
3437 { "cnthv_tval_el2", CPENC (3, 4, C14, C3, 0), F_ARCHEXT },
3438 { "cnthv_ctl_el2", CPENC (3, 4, C14, C3, 1), F_ARCHEXT },
3439 { "cnthv_cval_el2", CPENC (3, 4, C14, C3, 2), F_ARCHEXT },
3440 { "dacr32_el2", CPENC(3,4,C3,C0,0), 0 },
3441 { "ifsr32_el2", CPENC(3,4,C5,C0,1), 0 },
3442 { "teehbr32_el1", CPENC(2,2,C1,C0,0), 0 },
3443 { "sder32_el3", CPENC(3,6,C1,C1,1), 0 },
3444 { "mdscr_el1", CPENC(2,0,C0, C2, 2), 0 },
3445 { "mdccsr_el0", CPENC(2,3,C0, C1, 0), 0 }, /* r */
3446 { "mdccint_el1", CPENC(2,0,C0, C2, 0), 0 },
3447 { "dbgdtr_el0", CPENC(2,3,C0, C4, 0), 0 },
3448 { "dbgdtrrx_el0", CPENC(2,3,C0, C5, 0), 0 }, /* r */
3449 { "dbgdtrtx_el0", CPENC(2,3,C0, C5, 0), 0 }, /* w */
3450 { "osdtrrx_el1", CPENC(2,0,C0, C0, 2), 0 }, /* r */
3451 { "osdtrtx_el1", CPENC(2,0,C0, C3, 2), 0 }, /* w */
3452 { "oseccr_el1", CPENC(2,0,C0, C6, 2), 0 },
3453 { "dbgvcr32_el2", CPENC(2,4,C0, C7, 0), 0 },
3454 { "dbgbvr0_el1", CPENC(2,0,C0, C0, 4), 0 },
3455 { "dbgbvr1_el1", CPENC(2,0,C0, C1, 4), 0 },
3456 { "dbgbvr2_el1", CPENC(2,0,C0, C2, 4), 0 },
3457 { "dbgbvr3_el1", CPENC(2,0,C0, C3, 4), 0 },
3458 { "dbgbvr4_el1", CPENC(2,0,C0, C4, 4), 0 },
3459 { "dbgbvr5_el1", CPENC(2,0,C0, C5, 4), 0 },
3460 { "dbgbvr6_el1", CPENC(2,0,C0, C6, 4), 0 },
3461 { "dbgbvr7_el1", CPENC(2,0,C0, C7, 4), 0 },
3462 { "dbgbvr8_el1", CPENC(2,0,C0, C8, 4), 0 },
3463 { "dbgbvr9_el1", CPENC(2,0,C0, C9, 4), 0 },
3464 { "dbgbvr10_el1", CPENC(2,0,C0, C10,4), 0 },
3465 { "dbgbvr11_el1", CPENC(2,0,C0, C11,4), 0 },
3466 { "dbgbvr12_el1", CPENC(2,0,C0, C12,4), 0 },
3467 { "dbgbvr13_el1", CPENC(2,0,C0, C13,4), 0 },
3468 { "dbgbvr14_el1", CPENC(2,0,C0, C14,4), 0 },
3469 { "dbgbvr15_el1", CPENC(2,0,C0, C15,4), 0 },
3470 { "dbgbcr0_el1", CPENC(2,0,C0, C0, 5), 0 },
3471 { "dbgbcr1_el1", CPENC(2,0,C0, C1, 5), 0 },
3472 { "dbgbcr2_el1", CPENC(2,0,C0, C2, 5), 0 },
3473 { "dbgbcr3_el1", CPENC(2,0,C0, C3, 5), 0 },
3474 { "dbgbcr4_el1", CPENC(2,0,C0, C4, 5), 0 },
3475 { "dbgbcr5_el1", CPENC(2,0,C0, C5, 5), 0 },
3476 { "dbgbcr6_el1", CPENC(2,0,C0, C6, 5), 0 },
3477 { "dbgbcr7_el1", CPENC(2,0,C0, C7, 5), 0 },
3478 { "dbgbcr8_el1", CPENC(2,0,C0, C8, 5), 0 },
3479 { "dbgbcr9_el1", CPENC(2,0,C0, C9, 5), 0 },
3480 { "dbgbcr10_el1", CPENC(2,0,C0, C10,5), 0 },
3481 { "dbgbcr11_el1", CPENC(2,0,C0, C11,5), 0 },
3482 { "dbgbcr12_el1", CPENC(2,0,C0, C12,5), 0 },
3483 { "dbgbcr13_el1", CPENC(2,0,C0, C13,5), 0 },
3484 { "dbgbcr14_el1", CPENC(2,0,C0, C14,5), 0 },
3485 { "dbgbcr15_el1", CPENC(2,0,C0, C15,5), 0 },
3486 { "dbgwvr0_el1", CPENC(2,0,C0, C0, 6), 0 },
3487 { "dbgwvr1_el1", CPENC(2,0,C0, C1, 6), 0 },
3488 { "dbgwvr2_el1", CPENC(2,0,C0, C2, 6), 0 },
3489 { "dbgwvr3_el1", CPENC(2,0,C0, C3, 6), 0 },
3490 { "dbgwvr4_el1", CPENC(2,0,C0, C4, 6), 0 },
3491 { "dbgwvr5_el1", CPENC(2,0,C0, C5, 6), 0 },
3492 { "dbgwvr6_el1", CPENC(2,0,C0, C6, 6), 0 },
3493 { "dbgwvr7_el1", CPENC(2,0,C0, C7, 6), 0 },
3494 { "dbgwvr8_el1", CPENC(2,0,C0, C8, 6), 0 },
3495 { "dbgwvr9_el1", CPENC(2,0,C0, C9, 6), 0 },
3496 { "dbgwvr10_el1", CPENC(2,0,C0, C10,6), 0 },
3497 { "dbgwvr11_el1", CPENC(2,0,C0, C11,6), 0 },
3498 { "dbgwvr12_el1", CPENC(2,0,C0, C12,6), 0 },
3499 { "dbgwvr13_el1", CPENC(2,0,C0, C13,6), 0 },
3500 { "dbgwvr14_el1", CPENC(2,0,C0, C14,6), 0 },
3501 { "dbgwvr15_el1", CPENC(2,0,C0, C15,6), 0 },
3502 { "dbgwcr0_el1", CPENC(2,0,C0, C0, 7), 0 },
3503 { "dbgwcr1_el1", CPENC(2,0,C0, C1, 7), 0 },
3504 { "dbgwcr2_el1", CPENC(2,0,C0, C2, 7), 0 },
3505 { "dbgwcr3_el1", CPENC(2,0,C0, C3, 7), 0 },
3506 { "dbgwcr4_el1", CPENC(2,0,C0, C4, 7), 0 },
3507 { "dbgwcr5_el1", CPENC(2,0,C0, C5, 7), 0 },
3508 { "dbgwcr6_el1", CPENC(2,0,C0, C6, 7), 0 },
3509 { "dbgwcr7_el1", CPENC(2,0,C0, C7, 7), 0 },
3510 { "dbgwcr8_el1", CPENC(2,0,C0, C8, 7), 0 },
3511 { "dbgwcr9_el1", CPENC(2,0,C0, C9, 7), 0 },
3512 { "dbgwcr10_el1", CPENC(2,0,C0, C10,7), 0 },
3513 { "dbgwcr11_el1", CPENC(2,0,C0, C11,7), 0 },
3514 { "dbgwcr12_el1", CPENC(2,0,C0, C12,7), 0 },
3515 { "dbgwcr13_el1", CPENC(2,0,C0, C13,7), 0 },
3516 { "dbgwcr14_el1", CPENC(2,0,C0, C14,7), 0 },
3517 { "dbgwcr15_el1", CPENC(2,0,C0, C15,7), 0 },
3518 { "mdrar_el1", CPENC(2,0,C1, C0, 0), 0 }, /* r */
3519 { "oslar_el1", CPENC(2,0,C1, C0, 4), 0 }, /* w */
3520 { "oslsr_el1", CPENC(2,0,C1, C1, 4), 0 }, /* r */
3521 { "osdlr_el1", CPENC(2,0,C1, C3, 4), 0 },
3522 { "dbgprcr_el1", CPENC(2,0,C1, C4, 4), 0 },
3523 { "dbgclaimset_el1", CPENC(2,0,C7, C8, 6), 0 },
3524 { "dbgclaimclr_el1", CPENC(2,0,C7, C9, 6), 0 },
3525 { "dbgauthstatus_el1", CPENC(2,0,C7, C14,6), 0 }, /* r */
3526 { "pmblimitr_el1", CPENC (3, 0, C9, C10, 0), F_ARCHEXT }, /* rw */
3527 { "pmbptr_el1", CPENC (3, 0, C9, C10, 1), F_ARCHEXT }, /* rw */
3528 { "pmbsr_el1", CPENC (3, 0, C9, C10, 3), F_ARCHEXT }, /* rw */
3529 { "pmbidr_el1", CPENC (3, 0, C9, C10, 7), F_ARCHEXT }, /* ro */
3530 { "pmscr_el1", CPENC (3, 0, C9, C9, 0), F_ARCHEXT }, /* rw */
3531 { "pmsicr_el1", CPENC (3, 0, C9, C9, 2), F_ARCHEXT }, /* rw */
3532 { "pmsirr_el1", CPENC (3, 0, C9, C9, 3), F_ARCHEXT }, /* rw */
3533 { "pmsfcr_el1", CPENC (3, 0, C9, C9, 4), F_ARCHEXT }, /* rw */
3534 { "pmsevfr_el1", CPENC (3, 0, C9, C9, 5), F_ARCHEXT }, /* rw */
3535 { "pmslatfr_el1", CPENC (3, 0, C9, C9, 6), F_ARCHEXT }, /* rw */
3536 { "pmsidr_el1", CPENC (3, 0, C9, C9, 7), F_ARCHEXT }, /* ro */
3537 { "pmscr_el2", CPENC (3, 4, C9, C9, 0), F_ARCHEXT }, /* rw */
3538 { "pmscr_el12", CPENC (3, 5, C9, C9, 0), F_ARCHEXT }, /* rw */
3539 { "pmcr_el0", CPENC(3,3,C9,C12, 0), 0 },
3540 { "pmcntenset_el0", CPENC(3,3,C9,C12, 1), 0 },
3541 { "pmcntenclr_el0", CPENC(3,3,C9,C12, 2), 0 },
3542 { "pmovsclr_el0", CPENC(3,3,C9,C12, 3), 0 },
3543 { "pmswinc_el0", CPENC(3,3,C9,C12, 4), 0 }, /* w */
3544 { "pmselr_el0", CPENC(3,3,C9,C12, 5), 0 },
3545 { "pmceid0_el0", CPENC(3,3,C9,C12, 6), 0 }, /* r */
3546 { "pmceid1_el0", CPENC(3,3,C9,C12, 7), 0 }, /* r */
3547 { "pmccntr_el0", CPENC(3,3,C9,C13, 0), 0 },
3548 { "pmxevtyper_el0", CPENC(3,3,C9,C13, 1), 0 },
3549 { "pmxevcntr_el0", CPENC(3,3,C9,C13, 2), 0 },
3550 { "pmuserenr_el0", CPENC(3,3,C9,C14, 0), 0 },
3551 { "pmintenset_el1", CPENC(3,0,C9,C14, 1), 0 },
3552 { "pmintenclr_el1", CPENC(3,0,C9,C14, 2), 0 },
3553 { "pmovsset_el0", CPENC(3,3,C9,C14, 3), 0 },
3554 { "pmevcntr0_el0", CPENC(3,3,C14,C8, 0), 0 },
3555 { "pmevcntr1_el0", CPENC(3,3,C14,C8, 1), 0 },
3556 { "pmevcntr2_el0", CPENC(3,3,C14,C8, 2), 0 },
3557 { "pmevcntr3_el0", CPENC(3,3,C14,C8, 3), 0 },
3558 { "pmevcntr4_el0", CPENC(3,3,C14,C8, 4), 0 },
3559 { "pmevcntr5_el0", CPENC(3,3,C14,C8, 5), 0 },
3560 { "pmevcntr6_el0", CPENC(3,3,C14,C8, 6), 0 },
3561 { "pmevcntr7_el0", CPENC(3,3,C14,C8, 7), 0 },
3562 { "pmevcntr8_el0", CPENC(3,3,C14,C9, 0), 0 },
3563 { "pmevcntr9_el0", CPENC(3,3,C14,C9, 1), 0 },
3564 { "pmevcntr10_el0", CPENC(3,3,C14,C9, 2), 0 },
3565 { "pmevcntr11_el0", CPENC(3,3,C14,C9, 3), 0 },
3566 { "pmevcntr12_el0", CPENC(3,3,C14,C9, 4), 0 },
3567 { "pmevcntr13_el0", CPENC(3,3,C14,C9, 5), 0 },
3568 { "pmevcntr14_el0", CPENC(3,3,C14,C9, 6), 0 },
3569 { "pmevcntr15_el0", CPENC(3,3,C14,C9, 7), 0 },
3570 { "pmevcntr16_el0", CPENC(3,3,C14,C10,0), 0 },
3571 { "pmevcntr17_el0", CPENC(3,3,C14,C10,1), 0 },
3572 { "pmevcntr18_el0", CPENC(3,3,C14,C10,2), 0 },
3573 { "pmevcntr19_el0", CPENC(3,3,C14,C10,3), 0 },
3574 { "pmevcntr20_el0", CPENC(3,3,C14,C10,4), 0 },
3575 { "pmevcntr21_el0", CPENC(3,3,C14,C10,5), 0 },
3576 { "pmevcntr22_el0", CPENC(3,3,C14,C10,6), 0 },
3577 { "pmevcntr23_el0", CPENC(3,3,C14,C10,7), 0 },
3578 { "pmevcntr24_el0", CPENC(3,3,C14,C11,0), 0 },
3579 { "pmevcntr25_el0", CPENC(3,3,C14,C11,1), 0 },
3580 { "pmevcntr26_el0", CPENC(3,3,C14,C11,2), 0 },
3581 { "pmevcntr27_el0", CPENC(3,3,C14,C11,3), 0 },
3582 { "pmevcntr28_el0", CPENC(3,3,C14,C11,4), 0 },
3583 { "pmevcntr29_el0", CPENC(3,3,C14,C11,5), 0 },
3584 { "pmevcntr30_el0", CPENC(3,3,C14,C11,6), 0 },
3585 { "pmevtyper0_el0", CPENC(3,3,C14,C12,0), 0 },
3586 { "pmevtyper1_el0", CPENC(3,3,C14,C12,1), 0 },
3587 { "pmevtyper2_el0", CPENC(3,3,C14,C12,2), 0 },
3588 { "pmevtyper3_el0", CPENC(3,3,C14,C12,3), 0 },
3589 { "pmevtyper4_el0", CPENC(3,3,C14,C12,4), 0 },
3590 { "pmevtyper5_el0", CPENC(3,3,C14,C12,5), 0 },
3591 { "pmevtyper6_el0", CPENC(3,3,C14,C12,6), 0 },
3592 { "pmevtyper7_el0", CPENC(3,3,C14,C12,7), 0 },
3593 { "pmevtyper8_el0", CPENC(3,3,C14,C13,0), 0 },
3594 { "pmevtyper9_el0", CPENC(3,3,C14,C13,1), 0 },
3595 { "pmevtyper10_el0", CPENC(3,3,C14,C13,2), 0 },
3596 { "pmevtyper11_el0", CPENC(3,3,C14,C13,3), 0 },
3597 { "pmevtyper12_el0", CPENC(3,3,C14,C13,4), 0 },
3598 { "pmevtyper13_el0", CPENC(3,3,C14,C13,5), 0 },
3599 { "pmevtyper14_el0", CPENC(3,3,C14,C13,6), 0 },
3600 { "pmevtyper15_el0", CPENC(3,3,C14,C13,7), 0 },
3601 { "pmevtyper16_el0", CPENC(3,3,C14,C14,0), 0 },
3602 { "pmevtyper17_el0", CPENC(3,3,C14,C14,1), 0 },
3603 { "pmevtyper18_el0", CPENC(3,3,C14,C14,2), 0 },
3604 { "pmevtyper19_el0", CPENC(3,3,C14,C14,3), 0 },
3605 { "pmevtyper20_el0", CPENC(3,3,C14,C14,4), 0 },
3606 { "pmevtyper21_el0", CPENC(3,3,C14,C14,5), 0 },
3607 { "pmevtyper22_el0", CPENC(3,3,C14,C14,6), 0 },
3608 { "pmevtyper23_el0", CPENC(3,3,C14,C14,7), 0 },
3609 { "pmevtyper24_el0", CPENC(3,3,C14,C15,0), 0 },
3610 { "pmevtyper25_el0", CPENC(3,3,C14,C15,1), 0 },
3611 { "pmevtyper26_el0", CPENC(3,3,C14,C15,2), 0 },
3612 { "pmevtyper27_el0", CPENC(3,3,C14,C15,3), 0 },
3613 { "pmevtyper28_el0", CPENC(3,3,C14,C15,4), 0 },
3614 { "pmevtyper29_el0", CPENC(3,3,C14,C15,5), 0 },
3615 { "pmevtyper30_el0", CPENC(3,3,C14,C15,6), 0 },
3616 { "pmccfiltr_el0", CPENC(3,3,C14,C15,7), 0 },
3617 { 0, CPENC(0,0,0,0,0), 0 },
3621 aarch64_sys_reg_deprecated_p (const aarch64_sys_reg *reg)
3623 return (reg->flags & F_DEPRECATED) != 0;
3627 aarch64_sys_reg_supported_p (const aarch64_feature_set features,
3628 const aarch64_sys_reg *reg)
3630 if (!(reg->flags & F_ARCHEXT))
3633 /* PAN. Values are from aarch64_sys_regs. */
3634 if (reg->value == CPEN_(0,C2,3)
3635 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_PAN))
3638 /* Virtualization host extensions: system registers. */
3639 if ((reg->value == CPENC (3, 4, C2, C0, 1)
3640 || reg->value == CPENC (3, 4, C13, C0, 1)
3641 || reg->value == CPENC (3, 4, C14, C3, 0)
3642 || reg->value == CPENC (3, 4, C14, C3, 1)
3643 || reg->value == CPENC (3, 4, C14, C3, 2))
3644 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_1))
3647 /* Virtualization host extensions: *_el12 names of *_el1 registers. */
3648 if ((reg->value == CPEN_ (5, C0, 0)
3649 || reg->value == CPEN_ (5, C0, 1)
3650 || reg->value == CPENC (3, 5, C1, C0, 0)
3651 || reg->value == CPENC (3, 5, C1, C0, 2)
3652 || reg->value == CPENC (3, 5, C2, C0, 0)
3653 || reg->value == CPENC (3, 5, C2, C0, 1)
3654 || reg->value == CPENC (3, 5, C2, C0, 2)
3655 || reg->value == CPENC (3, 5, C5, C1, 0)
3656 || reg->value == CPENC (3, 5, C5, C1, 1)
3657 || reg->value == CPENC (3, 5, C5, C2, 0)
3658 || reg->value == CPENC (3, 5, C6, C0, 0)
3659 || reg->value == CPENC (3, 5, C10, C2, 0)
3660 || reg->value == CPENC (3, 5, C10, C3, 0)
3661 || reg->value == CPENC (3, 5, C12, C0, 0)
3662 || reg->value == CPENC (3, 5, C13, C0, 1)
3663 || reg->value == CPENC (3, 5, C14, C1, 0))
3664 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_1))
3667 /* Virtualization host extensions: *_el02 names of *_el0 registers. */
3668 if ((reg->value == CPENC (3, 5, C14, C2, 0)
3669 || reg->value == CPENC (3, 5, C14, C2, 1)
3670 || reg->value == CPENC (3, 5, C14, C2, 2)
3671 || reg->value == CPENC (3, 5, C14, C3, 0)
3672 || reg->value == CPENC (3, 5, C14, C3, 1)
3673 || reg->value == CPENC (3, 5, C14, C3, 2))
3674 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_1))
3677 /* ARMv8.2 features. */
3679 /* ID_AA64MMFR2_EL1. */
3680 if (reg->value == CPENC (3, 0, C0, C7, 2)
3681 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_2))
3685 if (reg->value == CPEN_ (0, C2, 4)
3686 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_2))
3689 /* RAS extension. */
3691 /* ERRIDR_EL1, ERRSELR_EL1, ERXFR_EL1, ERXCTLR_EL1, ERXSTATUS_EL, ERXADDR_EL1,
3692 ERXMISC0_EL1 AND ERXMISC1_EL1. */
3693 if ((reg->value == CPENC (3, 0, C5, C3, 0)
3694 || reg->value == CPENC (3, 0, C5, C3, 1)
3695 || reg->value == CPENC (3, 0, C5, C3, 2)
3696 || reg->value == CPENC (3, 0, C5, C3, 3)
3697 || reg->value == CPENC (3, 0, C5, C4, 0)
3698 || reg->value == CPENC (3, 0, C5, C4, 1)
3699 || reg->value == CPENC (3, 0, C5, C4, 2)
3700 || reg->value == CPENC (3, 0, C5, C4, 3)
3701 || reg->value == CPENC (3, 0, C5, C5, 0)
3702 || reg->value == CPENC (3, 0, C5, C5, 1))
3703 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_RAS))
3706 /* VSESR_EL2, DISR_EL1 and VDISR_EL2. */
3707 if ((reg->value == CPENC (3, 4, C5, C2, 3)
3708 || reg->value == CPENC (3, 0, C12, C1, 1)
3709 || reg->value == CPENC (3, 4, C12, C1, 1))
3710 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_RAS))
3713 /* Statistical Profiling extension. */
3714 if ((reg->value == CPENC (3, 0, C9, C10, 0)
3715 || reg->value == CPENC (3, 0, C9, C10, 1)
3716 || reg->value == CPENC (3, 0, C9, C10, 3)
3717 || reg->value == CPENC (3, 0, C9, C10, 7)
3718 || reg->value == CPENC (3, 0, C9, C9, 0)
3719 || reg->value == CPENC (3, 0, C9, C9, 2)
3720 || reg->value == CPENC (3, 0, C9, C9, 3)
3721 || reg->value == CPENC (3, 0, C9, C9, 4)
3722 || reg->value == CPENC (3, 0, C9, C9, 5)
3723 || reg->value == CPENC (3, 0, C9, C9, 6)
3724 || reg->value == CPENC (3, 0, C9, C9, 7)
3725 || reg->value == CPENC (3, 4, C9, C9, 0)
3726 || reg->value == CPENC (3, 5, C9, C9, 0))
3727 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_PROFILE))
3733 const aarch64_sys_reg aarch64_pstatefields [] =
3735 { "spsel", 0x05, 0 },
3736 { "daifset", 0x1e, 0 },
3737 { "daifclr", 0x1f, 0 },
3738 { "pan", 0x04, F_ARCHEXT },
3739 { "uao", 0x03, F_ARCHEXT },
3740 { 0, CPENC(0,0,0,0,0), 0 },
3744 aarch64_pstatefield_supported_p (const aarch64_feature_set features,
3745 const aarch64_sys_reg *reg)
3747 if (!(reg->flags & F_ARCHEXT))
3750 /* PAN. Values are from aarch64_pstatefields. */
3751 if (reg->value == 0x04
3752 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_PAN))
3755 /* UAO. Values are from aarch64_pstatefields. */
3756 if (reg->value == 0x03
3757 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_2))
3763 const aarch64_sys_ins_reg aarch64_sys_regs_ic[] =
3765 { "ialluis", CPENS(0,C7,C1,0), 0 },
3766 { "iallu", CPENS(0,C7,C5,0), 0 },
3767 { "ivau", CPENS (3, C7, C5, 1), F_HASXT },
3768 { 0, CPENS(0,0,0,0), 0 }
3771 const aarch64_sys_ins_reg aarch64_sys_regs_dc[] =
3773 { "zva", CPENS (3, C7, C4, 1), F_HASXT },
3774 { "ivac", CPENS (0, C7, C6, 1), F_HASXT },
3775 { "isw", CPENS (0, C7, C6, 2), F_HASXT },
3776 { "cvac", CPENS (3, C7, C10, 1), F_HASXT },
3777 { "csw", CPENS (0, C7, C10, 2), F_HASXT },
3778 { "cvau", CPENS (3, C7, C11, 1), F_HASXT },
3779 { "cvap", CPENS (3, C7, C12, 1), F_HASXT | F_ARCHEXT },
3780 { "civac", CPENS (3, C7, C14, 1), F_HASXT },
3781 { "cisw", CPENS (0, C7, C14, 2), F_HASXT },
3782 { 0, CPENS(0,0,0,0), 0 }
3785 const aarch64_sys_ins_reg aarch64_sys_regs_at[] =
3787 { "s1e1r", CPENS (0, C7, C8, 0), F_HASXT },
3788 { "s1e1w", CPENS (0, C7, C8, 1), F_HASXT },
3789 { "s1e0r", CPENS (0, C7, C8, 2), F_HASXT },
3790 { "s1e0w", CPENS (0, C7, C8, 3), F_HASXT },
3791 { "s12e1r", CPENS (4, C7, C8, 4), F_HASXT },
3792 { "s12e1w", CPENS (4, C7, C8, 5), F_HASXT },
3793 { "s12e0r", CPENS (4, C7, C8, 6), F_HASXT },
3794 { "s12e0w", CPENS (4, C7, C8, 7), F_HASXT },
3795 { "s1e2r", CPENS (4, C7, C8, 0), F_HASXT },
3796 { "s1e2w", CPENS (4, C7, C8, 1), F_HASXT },
3797 { "s1e3r", CPENS (6, C7, C8, 0), F_HASXT },
3798 { "s1e3w", CPENS (6, C7, C8, 1), F_HASXT },
3799 { "s1e1rp", CPENS (0, C7, C9, 0), F_HASXT | F_ARCHEXT },
3800 { "s1e1wp", CPENS (0, C7, C9, 1), F_HASXT | F_ARCHEXT },
3801 { 0, CPENS(0,0,0,0), 0 }
3804 const aarch64_sys_ins_reg aarch64_sys_regs_tlbi[] =
3806 { "vmalle1", CPENS(0,C8,C7,0), 0 },
3807 { "vae1", CPENS (0, C8, C7, 1), F_HASXT },
3808 { "aside1", CPENS (0, C8, C7, 2), F_HASXT },
3809 { "vaae1", CPENS (0, C8, C7, 3), F_HASXT },
3810 { "vmalle1is", CPENS(0,C8,C3,0), 0 },
3811 { "vae1is", CPENS (0, C8, C3, 1), F_HASXT },
3812 { "aside1is", CPENS (0, C8, C3, 2), F_HASXT },
3813 { "vaae1is", CPENS (0, C8, C3, 3), F_HASXT },
3814 { "ipas2e1is", CPENS (4, C8, C0, 1), F_HASXT },
3815 { "ipas2le1is",CPENS (4, C8, C0, 5), F_HASXT },
3816 { "ipas2e1", CPENS (4, C8, C4, 1), F_HASXT },
3817 { "ipas2le1", CPENS (4, C8, C4, 5), F_HASXT },
3818 { "vae2", CPENS (4, C8, C7, 1), F_HASXT },
3819 { "vae2is", CPENS (4, C8, C3, 1), F_HASXT },
3820 { "vmalls12e1",CPENS(4,C8,C7,6), 0 },
3821 { "vmalls12e1is",CPENS(4,C8,C3,6), 0 },
3822 { "vae3", CPENS (6, C8, C7, 1), F_HASXT },
3823 { "vae3is", CPENS (6, C8, C3, 1), F_HASXT },
3824 { "alle2", CPENS(4,C8,C7,0), 0 },
3825 { "alle2is", CPENS(4,C8,C3,0), 0 },
3826 { "alle1", CPENS(4,C8,C7,4), 0 },
3827 { "alle1is", CPENS(4,C8,C3,4), 0 },
3828 { "alle3", CPENS(6,C8,C7,0), 0 },
3829 { "alle3is", CPENS(6,C8,C3,0), 0 },
3830 { "vale1is", CPENS (0, C8, C3, 5), F_HASXT },
3831 { "vale2is", CPENS (4, C8, C3, 5), F_HASXT },
3832 { "vale3is", CPENS (6, C8, C3, 5), F_HASXT },
3833 { "vaale1is", CPENS (0, C8, C3, 7), F_HASXT },
3834 { "vale1", CPENS (0, C8, C7, 5), F_HASXT },
3835 { "vale2", CPENS (4, C8, C7, 5), F_HASXT },
3836 { "vale3", CPENS (6, C8, C7, 5), F_HASXT },
3837 { "vaale1", CPENS (0, C8, C7, 7), F_HASXT },
3838 { 0, CPENS(0,0,0,0), 0 }
3842 aarch64_sys_ins_reg_has_xt (const aarch64_sys_ins_reg *sys_ins_reg)
3844 return (sys_ins_reg->flags & F_HASXT) != 0;
3848 aarch64_sys_ins_reg_supported_p (const aarch64_feature_set features,
3849 const aarch64_sys_ins_reg *reg)
3851 if (!(reg->flags & F_ARCHEXT))
3854 /* DC CVAP. Values are from aarch64_sys_regs_dc. */
3855 if (reg->value == CPENS (3, C7, C12, 1)
3856 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_2))
3859 /* AT S1E1RP, AT S1E1WP. Values are from aarch64_sys_regs_at. */
3860 if ((reg->value == CPENS (0, C7, C9, 0)
3861 || reg->value == CPENS (0, C7, C9, 1))
3862 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_2))
3885 #define BIT(INSN,BT) (((INSN) >> (BT)) & 1)
3886 #define BITS(INSN,HI,LO) (((INSN) >> (LO)) & ((1 << (((HI) - (LO)) + 1)) - 1))
3889 verify_ldpsw (const struct aarch64_opcode * opcode ATTRIBUTE_UNUSED,
3890 const aarch64_insn insn)
3892 int t = BITS (insn, 4, 0);
3893 int n = BITS (insn, 9, 5);
3894 int t2 = BITS (insn, 14, 10);
3898 /* Write back enabled. */
3899 if ((t == n || t2 == n) && n != 31)
3913 /* Include the opcode description table as well as the operand description
3915 #define VERIFIER(x) verify_##x
3916 #include "aarch64-tbl.h"