1 /* aarch64-opc.c -- AArch64 opcode support.
2 Copyright (C) 2009-2016 Free Software Foundation, Inc.
3 Contributed by ARM Ltd.
5 This file is part of the GNU opcodes library.
7 This library is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
12 It is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program; see the file COPYING3. If not,
19 see <http://www.gnu.org/licenses/>. */
30 #include "libiberty.h"
32 #include "aarch64-opc.h"
35 int debug_dump = FALSE;
36 #endif /* DEBUG_AARCH64 */
38 /* The enumeration strings associated with each value of a 5-bit SVE
39 pattern operand. A null entry indicates a reserved meaning. */
40 const char *const aarch64_sve_pattern_array[32] = {
79 /* The enumeration strings associated with each value of a 4-bit SVE
80 prefetch operand. A null entry indicates a reserved meaning. */
81 const char *const aarch64_sve_prfop_array[16] = {
102 /* Helper functions to determine which operand to be used to encode/decode
103 the size:Q fields for AdvSIMD instructions. */
105 static inline bfd_boolean
106 vector_qualifier_p (enum aarch64_opnd_qualifier qualifier)
108 return ((qualifier >= AARCH64_OPND_QLF_V_8B
109 && qualifier <= AARCH64_OPND_QLF_V_1Q) ? TRUE
113 static inline bfd_boolean
114 fp_qualifier_p (enum aarch64_opnd_qualifier qualifier)
116 return ((qualifier >= AARCH64_OPND_QLF_S_B
117 && qualifier <= AARCH64_OPND_QLF_S_Q) ? TRUE
127 DP_VECTOR_ACROSS_LANES,
130 static const char significant_operand_index [] =
132 0, /* DP_UNKNOWN, by default using operand 0. */
133 0, /* DP_VECTOR_3SAME */
134 1, /* DP_VECTOR_LONG */
135 2, /* DP_VECTOR_WIDE */
136 1, /* DP_VECTOR_ACROSS_LANES */
139 /* Given a sequence of qualifiers in QUALIFIERS, determine and return
141 N.B. QUALIFIERS is a possible sequence of qualifiers each of which
142 corresponds to one of a sequence of operands. */
144 static enum data_pattern
145 get_data_pattern (const aarch64_opnd_qualifier_seq_t qualifiers)
147 if (vector_qualifier_p (qualifiers[0]) == TRUE)
149 /* e.g. v.4s, v.4s, v.4s
150 or v.4h, v.4h, v.h[3]. */
151 if (qualifiers[0] == qualifiers[1]
152 && vector_qualifier_p (qualifiers[2]) == TRUE
153 && (aarch64_get_qualifier_esize (qualifiers[0])
154 == aarch64_get_qualifier_esize (qualifiers[1]))
155 && (aarch64_get_qualifier_esize (qualifiers[0])
156 == aarch64_get_qualifier_esize (qualifiers[2])))
157 return DP_VECTOR_3SAME;
158 /* e.g. v.8h, v.8b, v.8b.
159 or v.4s, v.4h, v.h[2].
161 if (vector_qualifier_p (qualifiers[1]) == TRUE
162 && aarch64_get_qualifier_esize (qualifiers[0]) != 0
163 && (aarch64_get_qualifier_esize (qualifiers[0])
164 == aarch64_get_qualifier_esize (qualifiers[1]) << 1))
165 return DP_VECTOR_LONG;
166 /* e.g. v.8h, v.8h, v.8b. */
167 if (qualifiers[0] == qualifiers[1]
168 && vector_qualifier_p (qualifiers[2]) == TRUE
169 && aarch64_get_qualifier_esize (qualifiers[0]) != 0
170 && (aarch64_get_qualifier_esize (qualifiers[0])
171 == aarch64_get_qualifier_esize (qualifiers[2]) << 1)
172 && (aarch64_get_qualifier_esize (qualifiers[0])
173 == aarch64_get_qualifier_esize (qualifiers[1])))
174 return DP_VECTOR_WIDE;
176 else if (fp_qualifier_p (qualifiers[0]) == TRUE)
178 /* e.g. SADDLV <V><d>, <Vn>.<T>. */
179 if (vector_qualifier_p (qualifiers[1]) == TRUE
180 && qualifiers[2] == AARCH64_OPND_QLF_NIL)
181 return DP_VECTOR_ACROSS_LANES;
187 /* Select the operand to do the encoding/decoding of the 'size:Q' fields in
188 the AdvSIMD instructions. */
189 /* N.B. it is possible to do some optimization that doesn't call
190 get_data_pattern each time when we need to select an operand. We can
191 either buffer the caculated the result or statically generate the data,
192 however, it is not obvious that the optimization will bring significant
196 aarch64_select_operand_for_sizeq_field_coding (const aarch64_opcode *opcode)
199 significant_operand_index [get_data_pattern (opcode->qualifiers_list[0])];
202 const aarch64_field fields[] =
205 { 0, 4 }, /* cond2: condition in truly conditional-executed inst. */
206 { 0, 4 }, /* nzcv: flag bit specifier, encoded in the "nzcv" field. */
207 { 5, 5 }, /* defgh: d:e:f:g:h bits in AdvSIMD modified immediate. */
208 { 16, 3 }, /* abc: a:b:c bits in AdvSIMD modified immediate. */
209 { 5, 19 }, /* imm19: e.g. in CBZ. */
210 { 5, 19 }, /* immhi: e.g. in ADRP. */
211 { 29, 2 }, /* immlo: e.g. in ADRP. */
212 { 22, 2 }, /* size: in most AdvSIMD and floating-point instructions. */
213 { 10, 2 }, /* vldst_size: size field in the AdvSIMD load/store inst. */
214 { 29, 1 }, /* op: in AdvSIMD modified immediate instructions. */
215 { 30, 1 }, /* Q: in most AdvSIMD instructions. */
216 { 0, 5 }, /* Rt: in load/store instructions. */
217 { 0, 5 }, /* Rd: in many integer instructions. */
218 { 5, 5 }, /* Rn: in many integer instructions. */
219 { 10, 5 }, /* Rt2: in load/store pair instructions. */
220 { 10, 5 }, /* Ra: in fp instructions. */
221 { 5, 3 }, /* op2: in the system instructions. */
222 { 8, 4 }, /* CRm: in the system instructions. */
223 { 12, 4 }, /* CRn: in the system instructions. */
224 { 16, 3 }, /* op1: in the system instructions. */
225 { 19, 2 }, /* op0: in the system instructions. */
226 { 10, 3 }, /* imm3: in add/sub extended reg instructions. */
227 { 12, 4 }, /* cond: condition flags as a source operand. */
228 { 12, 4 }, /* opcode: in advsimd load/store instructions. */
229 { 12, 4 }, /* cmode: in advsimd modified immediate instructions. */
230 { 13, 3 }, /* asisdlso_opcode: opcode in advsimd ld/st single element. */
231 { 13, 2 }, /* len: in advsimd tbl/tbx instructions. */
232 { 16, 5 }, /* Rm: in ld/st reg offset and some integer inst. */
233 { 16, 5 }, /* Rs: in load/store exclusive instructions. */
234 { 13, 3 }, /* option: in ld/st reg offset + add/sub extended reg inst. */
235 { 12, 1 }, /* S: in load/store reg offset instructions. */
236 { 21, 2 }, /* hw: in move wide constant instructions. */
237 { 22, 2 }, /* opc: in load/store reg offset instructions. */
238 { 23, 1 }, /* opc1: in load/store reg offset instructions. */
239 { 22, 2 }, /* shift: in add/sub reg/imm shifted instructions. */
240 { 22, 2 }, /* type: floating point type field in fp data inst. */
241 { 30, 2 }, /* ldst_size: size field in ld/st reg offset inst. */
242 { 10, 6 }, /* imm6: in add/sub reg shifted instructions. */
243 { 11, 4 }, /* imm4: in advsimd ext and advsimd ins instructions. */
244 { 16, 5 }, /* imm5: in conditional compare (immediate) instructions. */
245 { 15, 7 }, /* imm7: in load/store pair pre/post index instructions. */
246 { 13, 8 }, /* imm8: in floating-point scalar move immediate inst. */
247 { 12, 9 }, /* imm9: in load/store pre/post index instructions. */
248 { 10, 12 }, /* imm12: in ld/st unsigned imm or add/sub shifted inst. */
249 { 5, 14 }, /* imm14: in test bit and branch instructions. */
250 { 5, 16 }, /* imm16: in exception instructions. */
251 { 0, 26 }, /* imm26: in unconditional branch instructions. */
252 { 10, 6 }, /* imms: in bitfield and logical immediate instructions. */
253 { 16, 6 }, /* immr: in bitfield and logical immediate instructions. */
254 { 16, 3 }, /* immb: in advsimd shift by immediate instructions. */
255 { 19, 4 }, /* immh: in advsimd shift by immediate instructions. */
256 { 22, 1 }, /* N: in logical (immediate) instructions. */
257 { 11, 1 }, /* index: in ld/st inst deciding the pre/post-index. */
258 { 24, 1 }, /* index2: in ld/st pair inst deciding the pre/post-index. */
259 { 31, 1 }, /* sf: in integer data processing instructions. */
260 { 30, 1 }, /* lse_size: in LSE extension atomic instructions. */
261 { 11, 1 }, /* H: in advsimd scalar x indexed element instructions. */
262 { 21, 1 }, /* L: in advsimd scalar x indexed element instructions. */
263 { 20, 1 }, /* M: in advsimd scalar x indexed element instructions. */
264 { 31, 1 }, /* b5: in the test bit and branch instructions. */
265 { 19, 5 }, /* b40: in the test bit and branch instructions. */
266 { 10, 6 }, /* scale: in the fixed-point scalar to fp converting inst. */
267 { 0, 4 }, /* SVE_Pd: p0-p15, bits [3,0]. */
268 { 10, 3 }, /* SVE_Pg3: p0-p7, bits [12,10]. */
269 { 5, 4 }, /* SVE_Pg4_5: p0-p15, bits [8,5]. */
270 { 10, 4 }, /* SVE_Pg4_10: p0-p15, bits [13,10]. */
271 { 16, 4 }, /* SVE_Pg4_16: p0-p15, bits [19,16]. */
272 { 16, 4 }, /* SVE_Pm: p0-p15, bits [19,16]. */
273 { 5, 4 }, /* SVE_Pn: p0-p15, bits [8,5]. */
274 { 0, 4 }, /* SVE_Pt: p0-p15, bits [3,0]. */
275 { 5, 5 }, /* SVE_Za_5: SVE vector register, bits [9,5]. */
276 { 16, 5 }, /* SVE_Za_16: SVE vector register, bits [20,16]. */
277 { 0, 5 }, /* SVE_Zd: SVE vector register. bits [4,0]. */
278 { 5, 5 }, /* SVE_Zm_5: SVE vector register, bits [9,5]. */
279 { 16, 5 }, /* SVE_Zm_16: SVE vector register, bits [20,16]. */
280 { 5, 5 }, /* SVE_Zn: SVE vector register, bits [9,5]. */
281 { 0, 5 }, /* SVE_Zt: SVE vector register, bits [4,0]. */
282 { 5, 5 }, /* SVE_pattern: vector pattern enumeration. */
283 { 0, 4 }, /* SVE_prfop: prefetch operation for SVE PRF[BHWD]. */
284 { 22, 2 }, /* SVE_tszh: triangular size select high, bits [23,22]. */
287 enum aarch64_operand_class
288 aarch64_get_operand_class (enum aarch64_opnd type)
290 return aarch64_operands[type].op_class;
294 aarch64_get_operand_name (enum aarch64_opnd type)
296 return aarch64_operands[type].name;
299 /* Get operand description string.
300 This is usually for the diagnosis purpose. */
302 aarch64_get_operand_desc (enum aarch64_opnd type)
304 return aarch64_operands[type].desc;
307 /* Table of all conditional affixes. */
308 const aarch64_cond aarch64_conds[16] =
313 {{"cc", "lo", "ul"}, 0x3},
329 get_cond_from_value (aarch64_insn value)
332 return &aarch64_conds[(unsigned int) value];
336 get_inverted_cond (const aarch64_cond *cond)
338 return &aarch64_conds[cond->value ^ 0x1];
341 /* Table describing the operand extension/shifting operators; indexed by
342 enum aarch64_modifier_kind.
344 The value column provides the most common values for encoding modifiers,
345 which enables table-driven encoding/decoding for the modifiers. */
346 const struct aarch64_name_value_pair aarch64_operand_modifiers [] =
365 enum aarch64_modifier_kind
366 aarch64_get_operand_modifier (const struct aarch64_name_value_pair *desc)
368 return desc - aarch64_operand_modifiers;
372 aarch64_get_operand_modifier_value (enum aarch64_modifier_kind kind)
374 return aarch64_operand_modifiers[kind].value;
377 enum aarch64_modifier_kind
378 aarch64_get_operand_modifier_from_value (aarch64_insn value,
379 bfd_boolean extend_p)
381 if (extend_p == TRUE)
382 return AARCH64_MOD_UXTB + value;
384 return AARCH64_MOD_LSL - value;
388 aarch64_extend_operator_p (enum aarch64_modifier_kind kind)
390 return (kind > AARCH64_MOD_LSL && kind <= AARCH64_MOD_SXTX)
394 static inline bfd_boolean
395 aarch64_shift_operator_p (enum aarch64_modifier_kind kind)
397 return (kind >= AARCH64_MOD_ROR && kind <= AARCH64_MOD_LSL)
401 const struct aarch64_name_value_pair aarch64_barrier_options[16] =
421 /* Table describing the operands supported by the aliases of the HINT
424 The name column is the operand that is accepted for the alias. The value
425 column is the hint number of the alias. The list of operands is terminated
426 by NULL in the name column. */
428 const struct aarch64_name_value_pair aarch64_hint_options[] =
430 { "csync", 0x11 }, /* PSB CSYNC. */
434 /* op -> op: load = 0 instruction = 1 store = 2
436 t -> temporal: temporal (retained) = 0 non-temporal (streaming) = 1 */
437 #define B(op,l,t) (((op) << 3) | (((l) - 1) << 1) | (t))
438 const struct aarch64_name_value_pair aarch64_prfops[32] =
440 { "pldl1keep", B(0, 1, 0) },
441 { "pldl1strm", B(0, 1, 1) },
442 { "pldl2keep", B(0, 2, 0) },
443 { "pldl2strm", B(0, 2, 1) },
444 { "pldl3keep", B(0, 3, 0) },
445 { "pldl3strm", B(0, 3, 1) },
448 { "plil1keep", B(1, 1, 0) },
449 { "plil1strm", B(1, 1, 1) },
450 { "plil2keep", B(1, 2, 0) },
451 { "plil2strm", B(1, 2, 1) },
452 { "plil3keep", B(1, 3, 0) },
453 { "plil3strm", B(1, 3, 1) },
456 { "pstl1keep", B(2, 1, 0) },
457 { "pstl1strm", B(2, 1, 1) },
458 { "pstl2keep", B(2, 2, 0) },
459 { "pstl2strm", B(2, 2, 1) },
460 { "pstl3keep", B(2, 3, 0) },
461 { "pstl3strm", B(2, 3, 1) },
475 /* Utilities on value constraint. */
478 value_in_range_p (int64_t value, int low, int high)
480 return (value >= low && value <= high) ? 1 : 0;
484 value_aligned_p (int64_t value, int align)
486 return ((value & (align - 1)) == 0) ? 1 : 0;
489 /* A signed value fits in a field. */
491 value_fit_signed_field_p (int64_t value, unsigned width)
494 if (width < sizeof (value) * 8)
496 int64_t lim = (int64_t)1 << (width - 1);
497 if (value >= -lim && value < lim)
503 /* An unsigned value fits in a field. */
505 value_fit_unsigned_field_p (int64_t value, unsigned width)
508 if (width < sizeof (value) * 8)
510 int64_t lim = (int64_t)1 << width;
511 if (value >= 0 && value < lim)
517 /* Return 1 if OPERAND is SP or WSP. */
519 aarch64_stack_pointer_p (const aarch64_opnd_info *operand)
521 return ((aarch64_get_operand_class (operand->type)
522 == AARCH64_OPND_CLASS_INT_REG)
523 && operand_maybe_stack_pointer (aarch64_operands + operand->type)
524 && operand->reg.regno == 31);
527 /* Return 1 if OPERAND is XZR or WZP. */
529 aarch64_zero_register_p (const aarch64_opnd_info *operand)
531 return ((aarch64_get_operand_class (operand->type)
532 == AARCH64_OPND_CLASS_INT_REG)
533 && !operand_maybe_stack_pointer (aarch64_operands + operand->type)
534 && operand->reg.regno == 31);
537 /* Return true if the operand *OPERAND that has the operand code
538 OPERAND->TYPE and been qualified by OPERAND->QUALIFIER can be also
539 qualified by the qualifier TARGET. */
542 operand_also_qualified_p (const struct aarch64_opnd_info *operand,
543 aarch64_opnd_qualifier_t target)
545 switch (operand->qualifier)
547 case AARCH64_OPND_QLF_W:
548 if (target == AARCH64_OPND_QLF_WSP && aarch64_stack_pointer_p (operand))
551 case AARCH64_OPND_QLF_X:
552 if (target == AARCH64_OPND_QLF_SP && aarch64_stack_pointer_p (operand))
555 case AARCH64_OPND_QLF_WSP:
556 if (target == AARCH64_OPND_QLF_W
557 && operand_maybe_stack_pointer (aarch64_operands + operand->type))
560 case AARCH64_OPND_QLF_SP:
561 if (target == AARCH64_OPND_QLF_X
562 && operand_maybe_stack_pointer (aarch64_operands + operand->type))
572 /* Given qualifier sequence list QSEQ_LIST and the known qualifier KNOWN_QLF
573 for operand KNOWN_IDX, return the expected qualifier for operand IDX.
575 Return NIL if more than one expected qualifiers are found. */
577 aarch64_opnd_qualifier_t
578 aarch64_get_expected_qualifier (const aarch64_opnd_qualifier_seq_t *qseq_list,
580 const aarch64_opnd_qualifier_t known_qlf,
587 When the known qualifier is NIL, we have to assume that there is only
588 one qualifier sequence in the *QSEQ_LIST and return the corresponding
589 qualifier directly. One scenario is that for instruction
590 PRFM <prfop>, [<Xn|SP>, #:lo12:<symbol>]
591 which has only one possible valid qualifier sequence
593 the caller may pass NIL in KNOWN_QLF to obtain S_D so that it can
594 determine the correct relocation type (i.e. LDST64_LO12) for PRFM.
596 Because the qualifier NIL has dual roles in the qualifier sequence:
597 it can mean no qualifier for the operand, or the qualifer sequence is
598 not in use (when all qualifiers in the sequence are NILs), we have to
599 handle this special case here. */
600 if (known_qlf == AARCH64_OPND_NIL)
602 assert (qseq_list[0][known_idx] == AARCH64_OPND_NIL);
603 return qseq_list[0][idx];
606 for (i = 0, saved_i = -1; i < AARCH64_MAX_QLF_SEQ_NUM; ++i)
608 if (qseq_list[i][known_idx] == known_qlf)
611 /* More than one sequences are found to have KNOWN_QLF at
613 return AARCH64_OPND_NIL;
618 return qseq_list[saved_i][idx];
621 enum operand_qualifier_kind
629 /* Operand qualifier description. */
630 struct operand_qualifier_data
632 /* The usage of the three data fields depends on the qualifier kind. */
639 enum operand_qualifier_kind kind;
642 /* Indexed by the operand qualifier enumerators. */
643 struct operand_qualifier_data aarch64_opnd_qualifiers[] =
645 {0, 0, 0, "NIL", OQK_NIL},
647 /* Operand variant qualifiers.
649 element size, number of elements and common value for encoding. */
651 {4, 1, 0x0, "w", OQK_OPD_VARIANT},
652 {8, 1, 0x1, "x", OQK_OPD_VARIANT},
653 {4, 1, 0x0, "wsp", OQK_OPD_VARIANT},
654 {8, 1, 0x1, "sp", OQK_OPD_VARIANT},
656 {1, 1, 0x0, "b", OQK_OPD_VARIANT},
657 {2, 1, 0x1, "h", OQK_OPD_VARIANT},
658 {4, 1, 0x2, "s", OQK_OPD_VARIANT},
659 {8, 1, 0x3, "d", OQK_OPD_VARIANT},
660 {16, 1, 0x4, "q", OQK_OPD_VARIANT},
662 {1, 8, 0x0, "8b", OQK_OPD_VARIANT},
663 {1, 16, 0x1, "16b", OQK_OPD_VARIANT},
664 {2, 2, 0x0, "2h", OQK_OPD_VARIANT},
665 {2, 4, 0x2, "4h", OQK_OPD_VARIANT},
666 {2, 8, 0x3, "8h", OQK_OPD_VARIANT},
667 {4, 2, 0x4, "2s", OQK_OPD_VARIANT},
668 {4, 4, 0x5, "4s", OQK_OPD_VARIANT},
669 {8, 1, 0x6, "1d", OQK_OPD_VARIANT},
670 {8, 2, 0x7, "2d", OQK_OPD_VARIANT},
671 {16, 1, 0x8, "1q", OQK_OPD_VARIANT},
673 {0, 0, 0, "z", OQK_OPD_VARIANT},
674 {0, 0, 0, "m", OQK_OPD_VARIANT},
676 /* Qualifiers constraining the value range.
678 Lower bound, higher bound, unused. */
680 {0, 7, 0, "imm_0_7" , OQK_VALUE_IN_RANGE},
681 {0, 15, 0, "imm_0_15", OQK_VALUE_IN_RANGE},
682 {0, 31, 0, "imm_0_31", OQK_VALUE_IN_RANGE},
683 {0, 63, 0, "imm_0_63", OQK_VALUE_IN_RANGE},
684 {1, 32, 0, "imm_1_32", OQK_VALUE_IN_RANGE},
685 {1, 64, 0, "imm_1_64", OQK_VALUE_IN_RANGE},
687 /* Qualifiers for miscellaneous purpose.
689 unused, unused and unused. */
694 {0, 0, 0, "retrieving", 0},
697 static inline bfd_boolean
698 operand_variant_qualifier_p (aarch64_opnd_qualifier_t qualifier)
700 return (aarch64_opnd_qualifiers[qualifier].kind == OQK_OPD_VARIANT)
704 static inline bfd_boolean
705 qualifier_value_in_range_constraint_p (aarch64_opnd_qualifier_t qualifier)
707 return (aarch64_opnd_qualifiers[qualifier].kind == OQK_VALUE_IN_RANGE)
712 aarch64_get_qualifier_name (aarch64_opnd_qualifier_t qualifier)
714 return aarch64_opnd_qualifiers[qualifier].desc;
717 /* Given an operand qualifier, return the expected data element size
718 of a qualified operand. */
720 aarch64_get_qualifier_esize (aarch64_opnd_qualifier_t qualifier)
722 assert (operand_variant_qualifier_p (qualifier) == TRUE);
723 return aarch64_opnd_qualifiers[qualifier].data0;
727 aarch64_get_qualifier_nelem (aarch64_opnd_qualifier_t qualifier)
729 assert (operand_variant_qualifier_p (qualifier) == TRUE);
730 return aarch64_opnd_qualifiers[qualifier].data1;
734 aarch64_get_qualifier_standard_value (aarch64_opnd_qualifier_t qualifier)
736 assert (operand_variant_qualifier_p (qualifier) == TRUE);
737 return aarch64_opnd_qualifiers[qualifier].data2;
741 get_lower_bound (aarch64_opnd_qualifier_t qualifier)
743 assert (qualifier_value_in_range_constraint_p (qualifier) == TRUE);
744 return aarch64_opnd_qualifiers[qualifier].data0;
748 get_upper_bound (aarch64_opnd_qualifier_t qualifier)
750 assert (qualifier_value_in_range_constraint_p (qualifier) == TRUE);
751 return aarch64_opnd_qualifiers[qualifier].data1;
756 aarch64_verbose (const char *str, ...)
767 dump_qualifier_sequence (const aarch64_opnd_qualifier_t *qualifier)
771 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i, ++qualifier)
772 printf ("%s,", aarch64_get_qualifier_name (*qualifier));
777 dump_match_qualifiers (const struct aarch64_opnd_info *opnd,
778 const aarch64_opnd_qualifier_t *qualifier)
781 aarch64_opnd_qualifier_t curr[AARCH64_MAX_OPND_NUM];
783 aarch64_verbose ("dump_match_qualifiers:");
784 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
785 curr[i] = opnd[i].qualifier;
786 dump_qualifier_sequence (curr);
787 aarch64_verbose ("against");
788 dump_qualifier_sequence (qualifier);
790 #endif /* DEBUG_AARCH64 */
792 /* TODO improve this, we can have an extra field at the runtime to
793 store the number of operands rather than calculating it every time. */
796 aarch64_num_of_operands (const aarch64_opcode *opcode)
799 const enum aarch64_opnd *opnds = opcode->operands;
800 while (opnds[i++] != AARCH64_OPND_NIL)
803 assert (i >= 0 && i <= AARCH64_MAX_OPND_NUM);
807 /* Find the best matched qualifier sequence in *QUALIFIERS_LIST for INST.
808 If succeeds, fill the found sequence in *RET, return 1; otherwise return 0.
810 N.B. on the entry, it is very likely that only some operands in *INST
811 have had their qualifiers been established.
813 If STOP_AT is not -1, the function will only try to match
814 the qualifier sequence for operands before and including the operand
815 of index STOP_AT; and on success *RET will only be filled with the first
816 (STOP_AT+1) qualifiers.
818 A couple examples of the matching algorithm:
826 Apart from serving the main encoding routine, this can also be called
827 during or after the operand decoding. */
830 aarch64_find_best_match (const aarch64_inst *inst,
831 const aarch64_opnd_qualifier_seq_t *qualifiers_list,
832 int stop_at, aarch64_opnd_qualifier_t *ret)
836 const aarch64_opnd_qualifier_t *qualifiers;
838 num_opnds = aarch64_num_of_operands (inst->opcode);
841 DEBUG_TRACE ("SUCCEED: no operand");
845 if (stop_at < 0 || stop_at >= num_opnds)
846 stop_at = num_opnds - 1;
848 /* For each pattern. */
849 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
852 qualifiers = *qualifiers_list;
854 /* Start as positive. */
857 DEBUG_TRACE ("%d", i);
860 dump_match_qualifiers (inst->operands, qualifiers);
863 /* Most opcodes has much fewer patterns in the list.
864 First NIL qualifier indicates the end in the list. */
865 if (empty_qualifier_sequence_p (qualifiers) == TRUE)
867 DEBUG_TRACE_IF (i == 0, "SUCCEED: empty qualifier list");
873 for (j = 0; j < num_opnds && j <= stop_at; ++j, ++qualifiers)
875 if (inst->operands[j].qualifier == AARCH64_OPND_QLF_NIL)
877 /* Either the operand does not have qualifier, or the qualifier
878 for the operand needs to be deduced from the qualifier
880 In the latter case, any constraint checking related with
881 the obtained qualifier should be done later in
882 operand_general_constraint_met_p. */
885 else if (*qualifiers != inst->operands[j].qualifier)
887 /* Unless the target qualifier can also qualify the operand
888 (which has already had a non-nil qualifier), non-equal
889 qualifiers are generally un-matched. */
890 if (operand_also_qualified_p (inst->operands + j, *qualifiers))
899 continue; /* Equal qualifiers are certainly matched. */
902 /* Qualifiers established. */
909 /* Fill the result in *RET. */
911 qualifiers = *qualifiers_list;
913 DEBUG_TRACE ("complete qualifiers using list %d", i);
916 dump_qualifier_sequence (qualifiers);
919 for (j = 0; j <= stop_at; ++j, ++qualifiers)
920 ret[j] = *qualifiers;
921 for (; j < AARCH64_MAX_OPND_NUM; ++j)
922 ret[j] = AARCH64_OPND_QLF_NIL;
924 DEBUG_TRACE ("SUCCESS");
928 DEBUG_TRACE ("FAIL");
932 /* Operand qualifier matching and resolving.
934 Return 1 if the operand qualifier(s) in *INST match one of the qualifier
935 sequences in INST->OPCODE->qualifiers_list; otherwise return 0.
937 if UPDATE_P == TRUE, update the qualifier(s) in *INST after the matching
941 match_operands_qualifier (aarch64_inst *inst, bfd_boolean update_p)
944 aarch64_opnd_qualifier_seq_t qualifiers;
946 if (!aarch64_find_best_match (inst, inst->opcode->qualifiers_list, -1,
949 DEBUG_TRACE ("matching FAIL");
953 if (inst->opcode->flags & F_STRICT)
955 /* Require an exact qualifier match, even for NIL qualifiers. */
956 nops = aarch64_num_of_operands (inst->opcode);
957 for (i = 0; i < nops; ++i)
958 if (inst->operands[i].qualifier != qualifiers[i])
962 /* Update the qualifiers. */
963 if (update_p == TRUE)
964 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
966 if (inst->opcode->operands[i] == AARCH64_OPND_NIL)
968 DEBUG_TRACE_IF (inst->operands[i].qualifier != qualifiers[i],
969 "update %s with %s for operand %d",
970 aarch64_get_qualifier_name (inst->operands[i].qualifier),
971 aarch64_get_qualifier_name (qualifiers[i]), i);
972 inst->operands[i].qualifier = qualifiers[i];
975 DEBUG_TRACE ("matching SUCCESS");
979 /* Return TRUE if VALUE is a wide constant that can be moved into a general
982 IS32 indicates whether value is a 32-bit immediate or not.
983 If SHIFT_AMOUNT is not NULL, on the return of TRUE, the logical left shift
984 amount will be returned in *SHIFT_AMOUNT. */
987 aarch64_wide_constant_p (int64_t value, int is32, unsigned int *shift_amount)
991 DEBUG_TRACE ("enter with 0x%" PRIx64 "(%" PRIi64 ")", value, value);
995 /* Allow all zeros or all ones in top 32-bits, so that
996 32-bit constant expressions like ~0x80000000 are
998 uint64_t ext = value;
999 if (ext >> 32 != 0 && ext >> 32 != (uint64_t) 0xffffffff)
1000 /* Immediate out of range. */
1002 value &= (int64_t) 0xffffffff;
1005 /* first, try movz then movn */
1007 if ((value & ((int64_t) 0xffff << 0)) == value)
1009 else if ((value & ((int64_t) 0xffff << 16)) == value)
1011 else if (!is32 && (value & ((int64_t) 0xffff << 32)) == value)
1013 else if (!is32 && (value & ((int64_t) 0xffff << 48)) == value)
1018 DEBUG_TRACE ("exit FALSE with 0x%" PRIx64 "(%" PRIi64 ")", value, value);
1022 if (shift_amount != NULL)
1023 *shift_amount = amount;
1025 DEBUG_TRACE ("exit TRUE with amount %d", amount);
1030 /* Build the accepted values for immediate logical SIMD instructions.
1032 The standard encodings of the immediate value are:
1033 N imms immr SIMD size R S
1034 1 ssssss rrrrrr 64 UInt(rrrrrr) UInt(ssssss)
1035 0 0sssss 0rrrrr 32 UInt(rrrrr) UInt(sssss)
1036 0 10ssss 00rrrr 16 UInt(rrrr) UInt(ssss)
1037 0 110sss 000rrr 8 UInt(rrr) UInt(sss)
1038 0 1110ss 0000rr 4 UInt(rr) UInt(ss)
1039 0 11110s 00000r 2 UInt(r) UInt(s)
1040 where all-ones value of S is reserved.
1042 Let's call E the SIMD size.
1044 The immediate value is: S+1 bits '1' rotated to the right by R.
1046 The total of valid encodings is 64*63 + 32*31 + ... + 2*1 = 5334
1047 (remember S != E - 1). */
1049 #define TOTAL_IMM_NB 5334
1054 aarch64_insn encoding;
1055 } simd_imm_encoding;
1057 static simd_imm_encoding simd_immediates[TOTAL_IMM_NB];
1060 simd_imm_encoding_cmp(const void *i1, const void *i2)
1062 const simd_imm_encoding *imm1 = (const simd_imm_encoding *)i1;
1063 const simd_imm_encoding *imm2 = (const simd_imm_encoding *)i2;
1065 if (imm1->imm < imm2->imm)
1067 if (imm1->imm > imm2->imm)
1072 /* immediate bitfield standard encoding
1073 imm13<12> imm13<5:0> imm13<11:6> SIMD size R S
1074 1 ssssss rrrrrr 64 rrrrrr ssssss
1075 0 0sssss 0rrrrr 32 rrrrr sssss
1076 0 10ssss 00rrrr 16 rrrr ssss
1077 0 110sss 000rrr 8 rrr sss
1078 0 1110ss 0000rr 4 rr ss
1079 0 11110s 00000r 2 r s */
1081 encode_immediate_bitfield (int is64, uint32_t s, uint32_t r)
1083 return (is64 << 12) | (r << 6) | s;
1087 build_immediate_table (void)
1089 uint32_t log_e, e, s, r, s_mask;
1095 for (log_e = 1; log_e <= 6; log_e++)
1097 /* Get element size. */
1102 mask = 0xffffffffffffffffull;
1108 mask = (1ull << e) - 1;
1110 1 ((1 << 4) - 1) << 2 = 111100
1111 2 ((1 << 3) - 1) << 3 = 111000
1112 3 ((1 << 2) - 1) << 4 = 110000
1113 4 ((1 << 1) - 1) << 5 = 100000
1114 5 ((1 << 0) - 1) << 6 = 000000 */
1115 s_mask = ((1u << (5 - log_e)) - 1) << (log_e + 1);
1117 for (s = 0; s < e - 1; s++)
1118 for (r = 0; r < e; r++)
1120 /* s+1 consecutive bits to 1 (s < 63) */
1121 imm = (1ull << (s + 1)) - 1;
1122 /* rotate right by r */
1124 imm = (imm >> r) | ((imm << (e - r)) & mask);
1125 /* replicate the constant depending on SIMD size */
1128 case 1: imm = (imm << 2) | imm;
1129 case 2: imm = (imm << 4) | imm;
1130 case 3: imm = (imm << 8) | imm;
1131 case 4: imm = (imm << 16) | imm;
1132 case 5: imm = (imm << 32) | imm;
1136 simd_immediates[nb_imms].imm = imm;
1137 simd_immediates[nb_imms].encoding =
1138 encode_immediate_bitfield(is64, s | s_mask, r);
1142 assert (nb_imms == TOTAL_IMM_NB);
1143 qsort(simd_immediates, nb_imms,
1144 sizeof(simd_immediates[0]), simd_imm_encoding_cmp);
1147 /* Return TRUE if VALUE is a valid logical immediate, i.e. bitmask, that can
1148 be accepted by logical (immediate) instructions
1149 e.g. ORR <Xd|SP>, <Xn>, #<imm>.
1151 ESIZE is the number of bytes in the decoded immediate value.
1152 If ENCODING is not NULL, on the return of TRUE, the standard encoding for
1153 VALUE will be returned in *ENCODING. */
1156 aarch64_logical_immediate_p (uint64_t value, int esize, aarch64_insn *encoding)
1158 simd_imm_encoding imm_enc;
1159 const simd_imm_encoding *imm_encoding;
1160 static bfd_boolean initialized = FALSE;
1164 DEBUG_TRACE ("enter with 0x%" PRIx64 "(%" PRIi64 "), is32: %d", value,
1167 if (initialized == FALSE)
1169 build_immediate_table ();
1173 /* Allow all zeros or all ones in top bits, so that
1174 constant expressions like ~1 are permitted. */
1175 upper = (uint64_t) -1 << (esize * 4) << (esize * 4);
1176 if ((value & ~upper) != value && (value | upper) != value)
1179 /* Replicate to a full 64-bit value. */
1181 for (i = esize * 8; i < 64; i *= 2)
1182 value |= (value << i);
1184 imm_enc.imm = value;
1185 imm_encoding = (const simd_imm_encoding *)
1186 bsearch(&imm_enc, simd_immediates, TOTAL_IMM_NB,
1187 sizeof(simd_immediates[0]), simd_imm_encoding_cmp);
1188 if (imm_encoding == NULL)
1190 DEBUG_TRACE ("exit with FALSE");
1193 if (encoding != NULL)
1194 *encoding = imm_encoding->encoding;
1195 DEBUG_TRACE ("exit with TRUE");
1199 /* If 64-bit immediate IMM is in the format of
1200 "aaaaaaaabbbbbbbbccccccccddddddddeeeeeeeeffffffffgggggggghhhhhhhh",
1201 where a, b, c, d, e, f, g and h are independently 0 or 1, return an integer
1202 of value "abcdefgh". Otherwise return -1. */
1204 aarch64_shrink_expanded_imm8 (uint64_t imm)
1210 for (i = 0; i < 8; i++)
1212 byte = (imm >> (8 * i)) & 0xff;
1215 else if (byte != 0x00)
1221 /* Utility inline functions for operand_general_constraint_met_p. */
1224 set_error (aarch64_operand_error *mismatch_detail,
1225 enum aarch64_operand_error_kind kind, int idx,
1228 if (mismatch_detail == NULL)
1230 mismatch_detail->kind = kind;
1231 mismatch_detail->index = idx;
1232 mismatch_detail->error = error;
1236 set_syntax_error (aarch64_operand_error *mismatch_detail, int idx,
1239 if (mismatch_detail == NULL)
1241 set_error (mismatch_detail, AARCH64_OPDE_SYNTAX_ERROR, idx, error);
1245 set_out_of_range_error (aarch64_operand_error *mismatch_detail,
1246 int idx, int lower_bound, int upper_bound,
1249 if (mismatch_detail == NULL)
1251 set_error (mismatch_detail, AARCH64_OPDE_OUT_OF_RANGE, idx, error);
1252 mismatch_detail->data[0] = lower_bound;
1253 mismatch_detail->data[1] = upper_bound;
1257 set_imm_out_of_range_error (aarch64_operand_error *mismatch_detail,
1258 int idx, int lower_bound, int upper_bound)
1260 if (mismatch_detail == NULL)
1262 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1263 _("immediate value"));
1267 set_offset_out_of_range_error (aarch64_operand_error *mismatch_detail,
1268 int idx, int lower_bound, int upper_bound)
1270 if (mismatch_detail == NULL)
1272 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1273 _("immediate offset"));
1277 set_regno_out_of_range_error (aarch64_operand_error *mismatch_detail,
1278 int idx, int lower_bound, int upper_bound)
1280 if (mismatch_detail == NULL)
1282 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1283 _("register number"));
1287 set_elem_idx_out_of_range_error (aarch64_operand_error *mismatch_detail,
1288 int idx, int lower_bound, int upper_bound)
1290 if (mismatch_detail == NULL)
1292 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1293 _("register element index"));
1297 set_sft_amount_out_of_range_error (aarch64_operand_error *mismatch_detail,
1298 int idx, int lower_bound, int upper_bound)
1300 if (mismatch_detail == NULL)
1302 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1307 set_unaligned_error (aarch64_operand_error *mismatch_detail, int idx,
1310 if (mismatch_detail == NULL)
1312 set_error (mismatch_detail, AARCH64_OPDE_UNALIGNED, idx, NULL);
1313 mismatch_detail->data[0] = alignment;
1317 set_reg_list_error (aarch64_operand_error *mismatch_detail, int idx,
1320 if (mismatch_detail == NULL)
1322 set_error (mismatch_detail, AARCH64_OPDE_REG_LIST, idx, NULL);
1323 mismatch_detail->data[0] = expected_num;
1327 set_other_error (aarch64_operand_error *mismatch_detail, int idx,
1330 if (mismatch_detail == NULL)
1332 set_error (mismatch_detail, AARCH64_OPDE_OTHER_ERROR, idx, error);
1335 /* General constraint checking based on operand code.
1337 Return 1 if OPNDS[IDX] meets the general constraint of operand code TYPE
1338 as the IDXth operand of opcode OPCODE. Otherwise return 0.
1340 This function has to be called after the qualifiers for all operands
1343 Mismatching error message is returned in *MISMATCH_DETAIL upon request,
1344 i.e. when MISMATCH_DETAIL is non-NULL. This avoids the generation
1345 of error message during the disassembling where error message is not
1346 wanted. We avoid the dynamic construction of strings of error messages
1347 here (i.e. in libopcodes), as it is costly and complicated; instead, we
1348 use a combination of error code, static string and some integer data to
1349 represent an error. */
1352 operand_general_constraint_met_p (const aarch64_opnd_info *opnds, int idx,
1353 enum aarch64_opnd type,
1354 const aarch64_opcode *opcode,
1355 aarch64_operand_error *mismatch_detail)
1360 const aarch64_opnd_info *opnd = opnds + idx;
1361 aarch64_opnd_qualifier_t qualifier = opnd->qualifier;
1363 assert (opcode->operands[idx] == opnd->type && opnd->type == type);
1365 switch (aarch64_operands[type].op_class)
1367 case AARCH64_OPND_CLASS_INT_REG:
1368 /* Check pair reg constraints for cas* instructions. */
1369 if (type == AARCH64_OPND_PAIRREG)
1371 assert (idx == 1 || idx == 3);
1372 if (opnds[idx - 1].reg.regno % 2 != 0)
1374 set_syntax_error (mismatch_detail, idx - 1,
1375 _("reg pair must start from even reg"));
1378 if (opnds[idx].reg.regno != opnds[idx - 1].reg.regno + 1)
1380 set_syntax_error (mismatch_detail, idx,
1381 _("reg pair must be contiguous"));
1387 /* <Xt> may be optional in some IC and TLBI instructions. */
1388 if (type == AARCH64_OPND_Rt_SYS)
1390 assert (idx == 1 && (aarch64_get_operand_class (opnds[0].type)
1391 == AARCH64_OPND_CLASS_SYSTEM));
1392 if (opnds[1].present
1393 && !aarch64_sys_ins_reg_has_xt (opnds[0].sysins_op))
1395 set_other_error (mismatch_detail, idx, _("extraneous register"));
1398 if (!opnds[1].present
1399 && aarch64_sys_ins_reg_has_xt (opnds[0].sysins_op))
1401 set_other_error (mismatch_detail, idx, _("missing register"));
1407 case AARCH64_OPND_QLF_WSP:
1408 case AARCH64_OPND_QLF_SP:
1409 if (!aarch64_stack_pointer_p (opnd))
1411 set_other_error (mismatch_detail, idx,
1412 _("stack pointer register expected"));
1421 case AARCH64_OPND_CLASS_SVE_REG:
1424 case AARCH64_OPND_SVE_Zn_INDEX:
1425 size = aarch64_get_qualifier_esize (opnd->qualifier);
1426 if (!value_in_range_p (opnd->reglane.index, 0, 64 / size - 1))
1428 set_elem_idx_out_of_range_error (mismatch_detail, idx,
1434 case AARCH64_OPND_SVE_ZnxN:
1435 case AARCH64_OPND_SVE_ZtxN:
1436 if (opnd->reglist.num_regs != get_opcode_dependent_value (opcode))
1438 set_other_error (mismatch_detail, idx,
1439 _("invalid register list"));
1449 case AARCH64_OPND_CLASS_PRED_REG:
1450 if (opnd->reg.regno >= 8
1451 && get_operand_fields_width (get_operand_from_code (type)) == 3)
1453 set_other_error (mismatch_detail, idx, _("p0-p7 expected"));
1458 case AARCH64_OPND_CLASS_COND:
1459 if (type == AARCH64_OPND_COND1
1460 && (opnds[idx].cond->value & 0xe) == 0xe)
1462 /* Not allow AL or NV. */
1463 set_syntax_error (mismatch_detail, idx, NULL);
1467 case AARCH64_OPND_CLASS_ADDRESS:
1468 /* Check writeback. */
1469 switch (opcode->iclass)
1473 case ldstnapair_offs:
1476 if (opnd->addr.writeback == 1)
1478 set_syntax_error (mismatch_detail, idx,
1479 _("unexpected address writeback"));
1484 case ldstpair_indexed:
1487 if (opnd->addr.writeback == 0)
1489 set_syntax_error (mismatch_detail, idx,
1490 _("address writeback expected"));
1495 assert (opnd->addr.writeback == 0);
1500 case AARCH64_OPND_ADDR_SIMM7:
1501 /* Scaled signed 7 bits immediate offset. */
1502 /* Get the size of the data element that is accessed, which may be
1503 different from that of the source register size,
1504 e.g. in strb/ldrb. */
1505 size = aarch64_get_qualifier_esize (opnd->qualifier);
1506 if (!value_in_range_p (opnd->addr.offset.imm, -64 * size, 63 * size))
1508 set_offset_out_of_range_error (mismatch_detail, idx,
1509 -64 * size, 63 * size);
1512 if (!value_aligned_p (opnd->addr.offset.imm, size))
1514 set_unaligned_error (mismatch_detail, idx, size);
1518 case AARCH64_OPND_ADDR_SIMM9:
1519 /* Unscaled signed 9 bits immediate offset. */
1520 if (!value_in_range_p (opnd->addr.offset.imm, -256, 255))
1522 set_offset_out_of_range_error (mismatch_detail, idx, -256, 255);
1527 case AARCH64_OPND_ADDR_SIMM9_2:
1528 /* Unscaled signed 9 bits immediate offset, which has to be negative
1530 size = aarch64_get_qualifier_esize (qualifier);
1531 if ((value_in_range_p (opnd->addr.offset.imm, 0, 255)
1532 && !value_aligned_p (opnd->addr.offset.imm, size))
1533 || value_in_range_p (opnd->addr.offset.imm, -256, -1))
1535 set_other_error (mismatch_detail, idx,
1536 _("negative or unaligned offset expected"));
1539 case AARCH64_OPND_SIMD_ADDR_POST:
1540 /* AdvSIMD load/store multiple structures, post-index. */
1542 if (opnd->addr.offset.is_reg)
1544 if (value_in_range_p (opnd->addr.offset.regno, 0, 30))
1548 set_other_error (mismatch_detail, idx,
1549 _("invalid register offset"));
1555 const aarch64_opnd_info *prev = &opnds[idx-1];
1556 unsigned num_bytes; /* total number of bytes transferred. */
1557 /* The opcode dependent area stores the number of elements in
1558 each structure to be loaded/stored. */
1559 int is_ld1r = get_opcode_dependent_value (opcode) == 1;
1560 if (opcode->operands[0] == AARCH64_OPND_LVt_AL)
1561 /* Special handling of loading single structure to all lane. */
1562 num_bytes = (is_ld1r ? 1 : prev->reglist.num_regs)
1563 * aarch64_get_qualifier_esize (prev->qualifier);
1565 num_bytes = prev->reglist.num_regs
1566 * aarch64_get_qualifier_esize (prev->qualifier)
1567 * aarch64_get_qualifier_nelem (prev->qualifier);
1568 if ((int) num_bytes != opnd->addr.offset.imm)
1570 set_other_error (mismatch_detail, idx,
1571 _("invalid post-increment amount"));
1577 case AARCH64_OPND_ADDR_REGOFF:
1578 /* Get the size of the data element that is accessed, which may be
1579 different from that of the source register size,
1580 e.g. in strb/ldrb. */
1581 size = aarch64_get_qualifier_esize (opnd->qualifier);
1582 /* It is either no shift or shift by the binary logarithm of SIZE. */
1583 if (opnd->shifter.amount != 0
1584 && opnd->shifter.amount != (int)get_logsz (size))
1586 set_other_error (mismatch_detail, idx,
1587 _("invalid shift amount"));
1590 /* Only UXTW, LSL, SXTW and SXTX are the accepted extending
1592 switch (opnd->shifter.kind)
1594 case AARCH64_MOD_UXTW:
1595 case AARCH64_MOD_LSL:
1596 case AARCH64_MOD_SXTW:
1597 case AARCH64_MOD_SXTX: break;
1599 set_other_error (mismatch_detail, idx,
1600 _("invalid extend/shift operator"));
1605 case AARCH64_OPND_ADDR_UIMM12:
1606 imm = opnd->addr.offset.imm;
1607 /* Get the size of the data element that is accessed, which may be
1608 different from that of the source register size,
1609 e.g. in strb/ldrb. */
1610 size = aarch64_get_qualifier_esize (qualifier);
1611 if (!value_in_range_p (opnd->addr.offset.imm, 0, 4095 * size))
1613 set_offset_out_of_range_error (mismatch_detail, idx,
1617 if (!value_aligned_p (opnd->addr.offset.imm, size))
1619 set_unaligned_error (mismatch_detail, idx, size);
1624 case AARCH64_OPND_ADDR_PCREL14:
1625 case AARCH64_OPND_ADDR_PCREL19:
1626 case AARCH64_OPND_ADDR_PCREL21:
1627 case AARCH64_OPND_ADDR_PCREL26:
1628 imm = opnd->imm.value;
1629 if (operand_need_shift_by_two (get_operand_from_code (type)))
1631 /* The offset value in a PC-relative branch instruction is alway
1632 4-byte aligned and is encoded without the lowest 2 bits. */
1633 if (!value_aligned_p (imm, 4))
1635 set_unaligned_error (mismatch_detail, idx, 4);
1638 /* Right shift by 2 so that we can carry out the following check
1642 size = get_operand_fields_width (get_operand_from_code (type));
1643 if (!value_fit_signed_field_p (imm, size))
1645 set_other_error (mismatch_detail, idx,
1646 _("immediate out of range"));
1656 case AARCH64_OPND_CLASS_SIMD_REGLIST:
1657 if (type == AARCH64_OPND_LEt)
1659 /* Get the upper bound for the element index. */
1660 num = 16 / aarch64_get_qualifier_esize (qualifier) - 1;
1661 if (!value_in_range_p (opnd->reglist.index, 0, num))
1663 set_elem_idx_out_of_range_error (mismatch_detail, idx, 0, num);
1667 /* The opcode dependent area stores the number of elements in
1668 each structure to be loaded/stored. */
1669 num = get_opcode_dependent_value (opcode);
1672 case AARCH64_OPND_LVt:
1673 assert (num >= 1 && num <= 4);
1674 /* Unless LD1/ST1, the number of registers should be equal to that
1675 of the structure elements. */
1676 if (num != 1 && opnd->reglist.num_regs != num)
1678 set_reg_list_error (mismatch_detail, idx, num);
1682 case AARCH64_OPND_LVt_AL:
1683 case AARCH64_OPND_LEt:
1684 assert (num >= 1 && num <= 4);
1685 /* The number of registers should be equal to that of the structure
1687 if (opnd->reglist.num_regs != num)
1689 set_reg_list_error (mismatch_detail, idx, num);
1698 case AARCH64_OPND_CLASS_IMMEDIATE:
1699 /* Constraint check on immediate operand. */
1700 imm = opnd->imm.value;
1701 /* E.g. imm_0_31 constrains value to be 0..31. */
1702 if (qualifier_value_in_range_constraint_p (qualifier)
1703 && !value_in_range_p (imm, get_lower_bound (qualifier),
1704 get_upper_bound (qualifier)))
1706 set_imm_out_of_range_error (mismatch_detail, idx,
1707 get_lower_bound (qualifier),
1708 get_upper_bound (qualifier));
1714 case AARCH64_OPND_AIMM:
1715 if (opnd->shifter.kind != AARCH64_MOD_LSL)
1717 set_other_error (mismatch_detail, idx,
1718 _("invalid shift operator"));
1721 if (opnd->shifter.amount != 0 && opnd->shifter.amount != 12)
1723 set_other_error (mismatch_detail, idx,
1724 _("shift amount expected to be 0 or 12"));
1727 if (!value_fit_unsigned_field_p (opnd->imm.value, 12))
1729 set_other_error (mismatch_detail, idx,
1730 _("immediate out of range"));
1735 case AARCH64_OPND_HALF:
1736 assert (idx == 1 && opnds[0].type == AARCH64_OPND_Rd);
1737 if (opnd->shifter.kind != AARCH64_MOD_LSL)
1739 set_other_error (mismatch_detail, idx,
1740 _("invalid shift operator"));
1743 size = aarch64_get_qualifier_esize (opnds[0].qualifier);
1744 if (!value_aligned_p (opnd->shifter.amount, 16))
1746 set_other_error (mismatch_detail, idx,
1747 _("shift amount should be a multiple of 16"));
1750 if (!value_in_range_p (opnd->shifter.amount, 0, size * 8 - 16))
1752 set_sft_amount_out_of_range_error (mismatch_detail, idx,
1756 if (opnd->imm.value < 0)
1758 set_other_error (mismatch_detail, idx,
1759 _("negative immediate value not allowed"));
1762 if (!value_fit_unsigned_field_p (opnd->imm.value, 16))
1764 set_other_error (mismatch_detail, idx,
1765 _("immediate out of range"));
1770 case AARCH64_OPND_IMM_MOV:
1772 int esize = aarch64_get_qualifier_esize (opnds[0].qualifier);
1773 imm = opnd->imm.value;
1777 case OP_MOV_IMM_WIDEN:
1779 /* Fall through... */
1780 case OP_MOV_IMM_WIDE:
1781 if (!aarch64_wide_constant_p (imm, esize == 4, NULL))
1783 set_other_error (mismatch_detail, idx,
1784 _("immediate out of range"));
1788 case OP_MOV_IMM_LOG:
1789 if (!aarch64_logical_immediate_p (imm, esize, NULL))
1791 set_other_error (mismatch_detail, idx,
1792 _("immediate out of range"));
1803 case AARCH64_OPND_NZCV:
1804 case AARCH64_OPND_CCMP_IMM:
1805 case AARCH64_OPND_EXCEPTION:
1806 case AARCH64_OPND_UIMM4:
1807 case AARCH64_OPND_UIMM7:
1808 case AARCH64_OPND_UIMM3_OP1:
1809 case AARCH64_OPND_UIMM3_OP2:
1810 size = get_operand_fields_width (get_operand_from_code (type));
1812 if (!value_fit_unsigned_field_p (opnd->imm.value, size))
1814 set_imm_out_of_range_error (mismatch_detail, idx, 0,
1820 case AARCH64_OPND_WIDTH:
1821 assert (idx > 1 && opnds[idx-1].type == AARCH64_OPND_IMM
1822 && opnds[0].type == AARCH64_OPND_Rd);
1823 size = get_upper_bound (qualifier);
1824 if (opnd->imm.value + opnds[idx-1].imm.value > size)
1825 /* lsb+width <= reg.size */
1827 set_imm_out_of_range_error (mismatch_detail, idx, 1,
1828 size - opnds[idx-1].imm.value);
1833 case AARCH64_OPND_LIMM:
1835 int esize = aarch64_get_qualifier_esize (opnds[0].qualifier);
1836 uint64_t uimm = opnd->imm.value;
1837 if (opcode->op == OP_BIC)
1839 if (aarch64_logical_immediate_p (uimm, esize, NULL) == FALSE)
1841 set_other_error (mismatch_detail, idx,
1842 _("immediate out of range"));
1848 case AARCH64_OPND_IMM0:
1849 case AARCH64_OPND_FPIMM0:
1850 if (opnd->imm.value != 0)
1852 set_other_error (mismatch_detail, idx,
1853 _("immediate zero expected"));
1858 case AARCH64_OPND_SHLL_IMM:
1860 size = 8 * aarch64_get_qualifier_esize (opnds[idx - 1].qualifier);
1861 if (opnd->imm.value != size)
1863 set_other_error (mismatch_detail, idx,
1864 _("invalid shift amount"));
1869 case AARCH64_OPND_IMM_VLSL:
1870 size = aarch64_get_qualifier_esize (qualifier);
1871 if (!value_in_range_p (opnd->imm.value, 0, size * 8 - 1))
1873 set_imm_out_of_range_error (mismatch_detail, idx, 0,
1879 case AARCH64_OPND_IMM_VLSR:
1880 size = aarch64_get_qualifier_esize (qualifier);
1881 if (!value_in_range_p (opnd->imm.value, 1, size * 8))
1883 set_imm_out_of_range_error (mismatch_detail, idx, 1, size * 8);
1888 case AARCH64_OPND_SIMD_IMM:
1889 case AARCH64_OPND_SIMD_IMM_SFT:
1890 /* Qualifier check. */
1893 case AARCH64_OPND_QLF_LSL:
1894 if (opnd->shifter.kind != AARCH64_MOD_LSL)
1896 set_other_error (mismatch_detail, idx,
1897 _("invalid shift operator"));
1901 case AARCH64_OPND_QLF_MSL:
1902 if (opnd->shifter.kind != AARCH64_MOD_MSL)
1904 set_other_error (mismatch_detail, idx,
1905 _("invalid shift operator"));
1909 case AARCH64_OPND_QLF_NIL:
1910 if (opnd->shifter.kind != AARCH64_MOD_NONE)
1912 set_other_error (mismatch_detail, idx,
1913 _("shift is not permitted"));
1921 /* Is the immediate valid? */
1923 if (aarch64_get_qualifier_esize (opnds[0].qualifier) != 8)
1925 /* uimm8 or simm8 */
1926 if (!value_in_range_p (opnd->imm.value, -128, 255))
1928 set_imm_out_of_range_error (mismatch_detail, idx, -128, 255);
1932 else if (aarch64_shrink_expanded_imm8 (opnd->imm.value) < 0)
1935 'aaaaaaaabbbbbbbbccccccccddddddddeeeeeeee
1936 ffffffffgggggggghhhhhhhh'. */
1937 set_other_error (mismatch_detail, idx,
1938 _("invalid value for immediate"));
1941 /* Is the shift amount valid? */
1942 switch (opnd->shifter.kind)
1944 case AARCH64_MOD_LSL:
1945 size = aarch64_get_qualifier_esize (opnds[0].qualifier);
1946 if (!value_in_range_p (opnd->shifter.amount, 0, (size - 1) * 8))
1948 set_sft_amount_out_of_range_error (mismatch_detail, idx, 0,
1952 if (!value_aligned_p (opnd->shifter.amount, 8))
1954 set_unaligned_error (mismatch_detail, idx, 8);
1958 case AARCH64_MOD_MSL:
1959 /* Only 8 and 16 are valid shift amount. */
1960 if (opnd->shifter.amount != 8 && opnd->shifter.amount != 16)
1962 set_other_error (mismatch_detail, idx,
1963 _("shift amount expected to be 0 or 16"));
1968 if (opnd->shifter.kind != AARCH64_MOD_NONE)
1970 set_other_error (mismatch_detail, idx,
1971 _("invalid shift operator"));
1978 case AARCH64_OPND_FPIMM:
1979 case AARCH64_OPND_SIMD_FPIMM:
1980 if (opnd->imm.is_fp == 0)
1982 set_other_error (mismatch_detail, idx,
1983 _("floating-point immediate expected"));
1986 /* The value is expected to be an 8-bit floating-point constant with
1987 sign, 3-bit exponent and normalized 4 bits of precision, encoded
1988 in "a:b:c:d:e:f:g:h" or FLD_imm8 (depending on the type of the
1990 if (!value_in_range_p (opnd->imm.value, 0, 255))
1992 set_other_error (mismatch_detail, idx,
1993 _("immediate out of range"));
1996 if (opnd->shifter.kind != AARCH64_MOD_NONE)
1998 set_other_error (mismatch_detail, idx,
1999 _("invalid shift operator"));
2009 case AARCH64_OPND_CLASS_CP_REG:
2010 /* Cn or Cm: 4-bit opcode field named for historical reasons.
2011 valid range: C0 - C15. */
2012 if (opnd->reg.regno > 15)
2014 set_regno_out_of_range_error (mismatch_detail, idx, 0, 15);
2019 case AARCH64_OPND_CLASS_SYSTEM:
2022 case AARCH64_OPND_PSTATEFIELD:
2023 assert (idx == 0 && opnds[1].type == AARCH64_OPND_UIMM4);
2026 The immediate must be #0 or #1. */
2027 if ((opnd->pstatefield == 0x03 /* UAO. */
2028 || opnd->pstatefield == 0x04) /* PAN. */
2029 && opnds[1].imm.value > 1)
2031 set_imm_out_of_range_error (mismatch_detail, idx, 0, 1);
2034 /* MSR SPSel, #uimm4
2035 Uses uimm4 as a control value to select the stack pointer: if
2036 bit 0 is set it selects the current exception level's stack
2037 pointer, if bit 0 is clear it selects shared EL0 stack pointer.
2038 Bits 1 to 3 of uimm4 are reserved and should be zero. */
2039 if (opnd->pstatefield == 0x05 /* spsel */ && opnds[1].imm.value > 1)
2041 set_imm_out_of_range_error (mismatch_detail, idx, 0, 1);
2050 case AARCH64_OPND_CLASS_SIMD_ELEMENT:
2051 /* Get the upper bound for the element index. */
2052 num = 16 / aarch64_get_qualifier_esize (qualifier) - 1;
2053 /* Index out-of-range. */
2054 if (!value_in_range_p (opnd->reglane.index, 0, num))
2056 set_elem_idx_out_of_range_error (mismatch_detail, idx, 0, num);
2059 /* SMLAL<Q> <Vd>.<Ta>, <Vn>.<Tb>, <Vm>.<Ts>[<index>].
2060 <Vm> Is the vector register (V0-V31) or (V0-V15), whose
2061 number is encoded in "size:M:Rm":
2067 if (type == AARCH64_OPND_Em && qualifier == AARCH64_OPND_QLF_S_H
2068 && !value_in_range_p (opnd->reglane.regno, 0, 15))
2070 set_regno_out_of_range_error (mismatch_detail, idx, 0, 15);
2075 case AARCH64_OPND_CLASS_MODIFIED_REG:
2076 assert (idx == 1 || idx == 2);
2079 case AARCH64_OPND_Rm_EXT:
2080 if (aarch64_extend_operator_p (opnd->shifter.kind) == FALSE
2081 && opnd->shifter.kind != AARCH64_MOD_LSL)
2083 set_other_error (mismatch_detail, idx,
2084 _("extend operator expected"));
2087 /* It is not optional unless at least one of "Rd" or "Rn" is '11111'
2088 (i.e. SP), in which case it defaults to LSL. The LSL alias is
2089 only valid when "Rd" or "Rn" is '11111', and is preferred in that
2091 if (!aarch64_stack_pointer_p (opnds + 0)
2092 && (idx != 2 || !aarch64_stack_pointer_p (opnds + 1)))
2094 if (!opnd->shifter.operator_present)
2096 set_other_error (mismatch_detail, idx,
2097 _("missing extend operator"));
2100 else if (opnd->shifter.kind == AARCH64_MOD_LSL)
2102 set_other_error (mismatch_detail, idx,
2103 _("'LSL' operator not allowed"));
2107 assert (opnd->shifter.operator_present /* Default to LSL. */
2108 || opnd->shifter.kind == AARCH64_MOD_LSL);
2109 if (!value_in_range_p (opnd->shifter.amount, 0, 4))
2111 set_sft_amount_out_of_range_error (mismatch_detail, idx, 0, 4);
2114 /* In the 64-bit form, the final register operand is written as Wm
2115 for all but the (possibly omitted) UXTX/LSL and SXTX
2117 N.B. GAS allows X register to be used with any operator as a
2118 programming convenience. */
2119 if (qualifier == AARCH64_OPND_QLF_X
2120 && opnd->shifter.kind != AARCH64_MOD_LSL
2121 && opnd->shifter.kind != AARCH64_MOD_UXTX
2122 && opnd->shifter.kind != AARCH64_MOD_SXTX)
2124 set_other_error (mismatch_detail, idx, _("W register expected"));
2129 case AARCH64_OPND_Rm_SFT:
2130 /* ROR is not available to the shifted register operand in
2131 arithmetic instructions. */
2132 if (aarch64_shift_operator_p (opnd->shifter.kind) == FALSE)
2134 set_other_error (mismatch_detail, idx,
2135 _("shift operator expected"));
2138 if (opnd->shifter.kind == AARCH64_MOD_ROR
2139 && opcode->iclass != log_shift)
2141 set_other_error (mismatch_detail, idx,
2142 _("'ROR' operator not allowed"));
2145 num = qualifier == AARCH64_OPND_QLF_W ? 31 : 63;
2146 if (!value_in_range_p (opnd->shifter.amount, 0, num))
2148 set_sft_amount_out_of_range_error (mismatch_detail, idx, 0, num);
2165 /* Main entrypoint for the operand constraint checking.
2167 Return 1 if operands of *INST meet the constraint applied by the operand
2168 codes and operand qualifiers; otherwise return 0 and if MISMATCH_DETAIL is
2169 not NULL, return the detail of the error in *MISMATCH_DETAIL. N.B. when
2170 adding more constraint checking, make sure MISMATCH_DETAIL->KIND is set
2171 with a proper error kind rather than AARCH64_OPDE_NIL (GAS asserts non-NIL
2172 error kind when it is notified that an instruction does not pass the check).
2174 Un-determined operand qualifiers may get established during the process. */
2177 aarch64_match_operands_constraint (aarch64_inst *inst,
2178 aarch64_operand_error *mismatch_detail)
2182 DEBUG_TRACE ("enter");
2184 /* Check for cases where a source register needs to be the same as the
2185 destination register. Do this before matching qualifiers since if
2186 an instruction has both invalid tying and invalid qualifiers,
2187 the error about qualifiers would suggest several alternative
2188 instructions that also have invalid tying. */
2189 i = inst->opcode->tied_operand;
2190 if (i > 0 && (inst->operands[0].reg.regno != inst->operands[i].reg.regno))
2192 if (mismatch_detail)
2194 mismatch_detail->kind = AARCH64_OPDE_UNTIED_OPERAND;
2195 mismatch_detail->index = i;
2196 mismatch_detail->error = NULL;
2201 /* Match operands' qualifier.
2202 *INST has already had qualifier establish for some, if not all, of
2203 its operands; we need to find out whether these established
2204 qualifiers match one of the qualifier sequence in
2205 INST->OPCODE->QUALIFIERS_LIST. If yes, we will assign each operand
2206 with the corresponding qualifier in such a sequence.
2207 Only basic operand constraint checking is done here; the more thorough
2208 constraint checking will carried out by operand_general_constraint_met_p,
2209 which has be to called after this in order to get all of the operands'
2210 qualifiers established. */
2211 if (match_operands_qualifier (inst, TRUE /* update_p */) == 0)
2213 DEBUG_TRACE ("FAIL on operand qualifier matching");
2214 if (mismatch_detail)
2216 /* Return an error type to indicate that it is the qualifier
2217 matching failure; we don't care about which operand as there
2218 are enough information in the opcode table to reproduce it. */
2219 mismatch_detail->kind = AARCH64_OPDE_INVALID_VARIANT;
2220 mismatch_detail->index = -1;
2221 mismatch_detail->error = NULL;
2226 /* Match operands' constraint. */
2227 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2229 enum aarch64_opnd type = inst->opcode->operands[i];
2230 if (type == AARCH64_OPND_NIL)
2232 if (inst->operands[i].skip)
2234 DEBUG_TRACE ("skip the incomplete operand %d", i);
2237 if (operand_general_constraint_met_p (inst->operands, i, type,
2238 inst->opcode, mismatch_detail) == 0)
2240 DEBUG_TRACE ("FAIL on operand %d", i);
2245 DEBUG_TRACE ("PASS");
2250 /* Replace INST->OPCODE with OPCODE and return the replaced OPCODE.
2251 Also updates the TYPE of each INST->OPERANDS with the corresponding
2252 value of OPCODE->OPERANDS.
2254 Note that some operand qualifiers may need to be manually cleared by
2255 the caller before it further calls the aarch64_opcode_encode; by
2256 doing this, it helps the qualifier matching facilities work
2259 const aarch64_opcode*
2260 aarch64_replace_opcode (aarch64_inst *inst, const aarch64_opcode *opcode)
2263 const aarch64_opcode *old = inst->opcode;
2265 inst->opcode = opcode;
2267 /* Update the operand types. */
2268 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2270 inst->operands[i].type = opcode->operands[i];
2271 if (opcode->operands[i] == AARCH64_OPND_NIL)
2275 DEBUG_TRACE ("replace %s with %s", old->name, opcode->name);
2281 aarch64_operand_index (const enum aarch64_opnd *operands, enum aarch64_opnd operand)
2284 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2285 if (operands[i] == operand)
2287 else if (operands[i] == AARCH64_OPND_NIL)
2292 /* R0...R30, followed by FOR31. */
2293 #define BANK(R, FOR31) \
2294 { R (0), R (1), R (2), R (3), R (4), R (5), R (6), R (7), \
2295 R (8), R (9), R (10), R (11), R (12), R (13), R (14), R (15), \
2296 R (16), R (17), R (18), R (19), R (20), R (21), R (22), R (23), \
2297 R (24), R (25), R (26), R (27), R (28), R (29), R (30), FOR31 }
2298 /* [0][0] 32-bit integer regs with sp Wn
2299 [0][1] 64-bit integer regs with sp Xn sf=1
2300 [1][0] 32-bit integer regs with #0 Wn
2301 [1][1] 64-bit integer regs with #0 Xn sf=1 */
2302 static const char *int_reg[2][2][32] = {
2303 #define R32(X) "w" #X
2304 #define R64(X) "x" #X
2305 { BANK (R32, "wsp"), BANK (R64, "sp") },
2306 { BANK (R32, "wzr"), BANK (R64, "xzr") }
2312 /* Return the integer register name.
2313 if SP_REG_P is not 0, R31 is an SP reg, other R31 is the zero reg. */
2315 static inline const char *
2316 get_int_reg_name (int regno, aarch64_opnd_qualifier_t qualifier, int sp_reg_p)
2318 const int has_zr = sp_reg_p ? 0 : 1;
2319 const int is_64 = aarch64_get_qualifier_esize (qualifier) == 4 ? 0 : 1;
2320 return int_reg[has_zr][is_64][regno];
2323 /* Like get_int_reg_name, but IS_64 is always 1. */
2325 static inline const char *
2326 get_64bit_int_reg_name (int regno, int sp_reg_p)
2328 const int has_zr = sp_reg_p ? 0 : 1;
2329 return int_reg[has_zr][1][regno];
2332 /* Get the name of the integer offset register in OPND, using the shift type
2333 to decide whether it's a word or doubleword. */
2335 static inline const char *
2336 get_offset_int_reg_name (const aarch64_opnd_info *opnd)
2338 switch (opnd->shifter.kind)
2340 case AARCH64_MOD_UXTW:
2341 case AARCH64_MOD_SXTW:
2342 return get_int_reg_name (opnd->addr.offset.regno, AARCH64_OPND_QLF_W, 0);
2344 case AARCH64_MOD_LSL:
2345 case AARCH64_MOD_SXTX:
2346 return get_int_reg_name (opnd->addr.offset.regno, AARCH64_OPND_QLF_X, 0);
2353 /* Types for expanding an encoded 8-bit value to a floating-point value. */
2373 /* IMM8 is an 8-bit floating-point constant with sign, 3-bit exponent and
2374 normalized 4 bits of precision, encoded in "a:b:c:d:e:f:g:h" or FLD_imm8
2375 (depending on the type of the instruction). IMM8 will be expanded to a
2376 single-precision floating-point value (SIZE == 4) or a double-precision
2377 floating-point value (SIZE == 8). A half-precision floating-point value
2378 (SIZE == 2) is expanded to a single-precision floating-point value. The
2379 expanded value is returned. */
2382 expand_fp_imm (int size, uint32_t imm8)
2385 uint32_t imm8_7, imm8_6_0, imm8_6, imm8_6_repl4;
2387 imm8_7 = (imm8 >> 7) & 0x01; /* imm8<7> */
2388 imm8_6_0 = imm8 & 0x7f; /* imm8<6:0> */
2389 imm8_6 = imm8_6_0 >> 6; /* imm8<6> */
2390 imm8_6_repl4 = (imm8_6 << 3) | (imm8_6 << 2)
2391 | (imm8_6 << 1) | imm8_6; /* Replicate(imm8<6>,4) */
2394 imm = (imm8_7 << (63-32)) /* imm8<7> */
2395 | ((imm8_6 ^ 1) << (62-32)) /* NOT(imm8<6) */
2396 | (imm8_6_repl4 << (58-32)) | (imm8_6 << (57-32))
2397 | (imm8_6 << (56-32)) | (imm8_6 << (55-32)) /* Replicate(imm8<6>,7) */
2398 | (imm8_6_0 << (48-32)); /* imm8<6>:imm8<5:0> */
2401 else if (size == 4 || size == 2)
2403 imm = (imm8_7 << 31) /* imm8<7> */
2404 | ((imm8_6 ^ 1) << 30) /* NOT(imm8<6>) */
2405 | (imm8_6_repl4 << 26) /* Replicate(imm8<6>,4) */
2406 | (imm8_6_0 << 19); /* imm8<6>:imm8<5:0> */
2410 /* An unsupported size. */
2417 /* Produce the string representation of the register list operand *OPND
2418 in the buffer pointed by BUF of size SIZE. PREFIX is the part of
2419 the register name that comes before the register number, such as "v". */
2421 print_register_list (char *buf, size_t size, const aarch64_opnd_info *opnd,
2424 const int num_regs = opnd->reglist.num_regs;
2425 const int first_reg = opnd->reglist.first_regno;
2426 const int last_reg = (first_reg + num_regs - 1) & 0x1f;
2427 const char *qlf_name = aarch64_get_qualifier_name (opnd->qualifier);
2428 char tb[8]; /* Temporary buffer. */
2430 assert (opnd->type != AARCH64_OPND_LEt || opnd->reglist.has_index);
2431 assert (num_regs >= 1 && num_regs <= 4);
2433 /* Prepare the index if any. */
2434 if (opnd->reglist.has_index)
2435 snprintf (tb, 8, "[%" PRIi64 "]", opnd->reglist.index);
2439 /* The hyphenated form is preferred for disassembly if there are
2440 more than two registers in the list, and the register numbers
2441 are monotonically increasing in increments of one. */
2442 if (num_regs > 2 && last_reg > first_reg)
2443 snprintf (buf, size, "{%s%d.%s-%s%d.%s}%s", prefix, first_reg, qlf_name,
2444 prefix, last_reg, qlf_name, tb);
2447 const int reg0 = first_reg;
2448 const int reg1 = (first_reg + 1) & 0x1f;
2449 const int reg2 = (first_reg + 2) & 0x1f;
2450 const int reg3 = (first_reg + 3) & 0x1f;
2455 snprintf (buf, size, "{%s%d.%s}%s", prefix, reg0, qlf_name, tb);
2458 snprintf (buf, size, "{%s%d.%s, %s%d.%s}%s", prefix, reg0, qlf_name,
2459 prefix, reg1, qlf_name, tb);
2462 snprintf (buf, size, "{%s%d.%s, %s%d.%s, %s%d.%s}%s",
2463 prefix, reg0, qlf_name, prefix, reg1, qlf_name,
2464 prefix, reg2, qlf_name, tb);
2467 snprintf (buf, size, "{%s%d.%s, %s%d.%s, %s%d.%s, %s%d.%s}%s",
2468 prefix, reg0, qlf_name, prefix, reg1, qlf_name,
2469 prefix, reg2, qlf_name, prefix, reg3, qlf_name, tb);
2475 /* Print the register+immediate address in OPND to BUF, which has SIZE
2476 characters. BASE is the name of the base register. */
2479 print_immediate_offset_address (char *buf, size_t size,
2480 const aarch64_opnd_info *opnd,
2483 if (opnd->addr.writeback)
2485 if (opnd->addr.preind)
2486 snprintf (buf, size, "[%s,#%d]!", base, opnd->addr.offset.imm);
2488 snprintf (buf, size, "[%s],#%d", base, opnd->addr.offset.imm);
2492 if (opnd->addr.offset.imm)
2493 snprintf (buf, size, "[%s,#%d]", base, opnd->addr.offset.imm);
2495 snprintf (buf, size, "[%s]", base);
2499 /* Produce the string representation of the register offset address operand
2500 *OPND in the buffer pointed by BUF of size SIZE. BASE and OFFSET are
2501 the names of the base and offset registers. */
2503 print_register_offset_address (char *buf, size_t size,
2504 const aarch64_opnd_info *opnd,
2505 const char *base, const char *offset)
2507 char tb[16]; /* Temporary buffer. */
2508 bfd_boolean print_extend_p = TRUE;
2509 bfd_boolean print_amount_p = TRUE;
2510 const char *shift_name = aarch64_operand_modifiers[opnd->shifter.kind].name;
2512 if (!opnd->shifter.amount && (opnd->qualifier != AARCH64_OPND_QLF_S_B
2513 || !opnd->shifter.amount_present))
2515 /* Not print the shift/extend amount when the amount is zero and
2516 when it is not the special case of 8-bit load/store instruction. */
2517 print_amount_p = FALSE;
2518 /* Likewise, no need to print the shift operator LSL in such a
2520 if (opnd->shifter.kind == AARCH64_MOD_LSL)
2521 print_extend_p = FALSE;
2524 /* Prepare for the extend/shift. */
2528 snprintf (tb, sizeof (tb), ",%s #%d", shift_name, opnd->shifter.amount);
2530 snprintf (tb, sizeof (tb), ",%s", shift_name);
2535 snprintf (buf, size, "[%s,%s%s]", base, offset, tb);
2538 /* Generate the string representation of the operand OPNDS[IDX] for OPCODE
2539 in *BUF. The caller should pass in the maximum size of *BUF in SIZE.
2540 PC, PCREL_P and ADDRESS are used to pass in and return information about
2541 the PC-relative address calculation, where the PC value is passed in
2542 PC. If the operand is pc-relative related, *PCREL_P (if PCREL_P non-NULL)
2543 will return 1 and *ADDRESS (if ADDRESS non-NULL) will return the
2544 calculated address; otherwise, *PCREL_P (if PCREL_P non-NULL) returns 0.
2546 The function serves both the disassembler and the assembler diagnostics
2547 issuer, which is the reason why it lives in this file. */
2550 aarch64_print_operand (char *buf, size_t size, bfd_vma pc,
2551 const aarch64_opcode *opcode,
2552 const aarch64_opnd_info *opnds, int idx, int *pcrel_p,
2556 const char *name = NULL;
2557 const aarch64_opnd_info *opnd = opnds + idx;
2558 enum aarch64_modifier_kind kind;
2559 uint64_t addr, enum_value;
2567 case AARCH64_OPND_Rd:
2568 case AARCH64_OPND_Rn:
2569 case AARCH64_OPND_Rm:
2570 case AARCH64_OPND_Rt:
2571 case AARCH64_OPND_Rt2:
2572 case AARCH64_OPND_Rs:
2573 case AARCH64_OPND_Ra:
2574 case AARCH64_OPND_Rt_SYS:
2575 case AARCH64_OPND_PAIRREG:
2576 /* The optional-ness of <Xt> in e.g. IC <ic_op>{, <Xt>} is determined by
2577 the <ic_op>, therefore we we use opnd->present to override the
2578 generic optional-ness information. */
2579 if (opnd->type == AARCH64_OPND_Rt_SYS && !opnd->present)
2581 /* Omit the operand, e.g. RET. */
2582 if (optional_operand_p (opcode, idx)
2583 && opnd->reg.regno == get_optional_operand_default_value (opcode))
2585 assert (opnd->qualifier == AARCH64_OPND_QLF_W
2586 || opnd->qualifier == AARCH64_OPND_QLF_X);
2587 snprintf (buf, size, "%s",
2588 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0));
2591 case AARCH64_OPND_Rd_SP:
2592 case AARCH64_OPND_Rn_SP:
2593 assert (opnd->qualifier == AARCH64_OPND_QLF_W
2594 || opnd->qualifier == AARCH64_OPND_QLF_WSP
2595 || opnd->qualifier == AARCH64_OPND_QLF_X
2596 || opnd->qualifier == AARCH64_OPND_QLF_SP);
2597 snprintf (buf, size, "%s",
2598 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 1));
2601 case AARCH64_OPND_Rm_EXT:
2602 kind = opnd->shifter.kind;
2603 assert (idx == 1 || idx == 2);
2604 if ((aarch64_stack_pointer_p (opnds)
2605 || (idx == 2 && aarch64_stack_pointer_p (opnds + 1)))
2606 && ((opnd->qualifier == AARCH64_OPND_QLF_W
2607 && opnds[0].qualifier == AARCH64_OPND_QLF_W
2608 && kind == AARCH64_MOD_UXTW)
2609 || (opnd->qualifier == AARCH64_OPND_QLF_X
2610 && kind == AARCH64_MOD_UXTX)))
2612 /* 'LSL' is the preferred form in this case. */
2613 kind = AARCH64_MOD_LSL;
2614 if (opnd->shifter.amount == 0)
2616 /* Shifter omitted. */
2617 snprintf (buf, size, "%s",
2618 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0));
2622 if (opnd->shifter.amount)
2623 snprintf (buf, size, "%s, %s #%d",
2624 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0),
2625 aarch64_operand_modifiers[kind].name,
2626 opnd->shifter.amount);
2628 snprintf (buf, size, "%s, %s",
2629 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0),
2630 aarch64_operand_modifiers[kind].name);
2633 case AARCH64_OPND_Rm_SFT:
2634 assert (opnd->qualifier == AARCH64_OPND_QLF_W
2635 || opnd->qualifier == AARCH64_OPND_QLF_X);
2636 if (opnd->shifter.amount == 0 && opnd->shifter.kind == AARCH64_MOD_LSL)
2637 snprintf (buf, size, "%s",
2638 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0));
2640 snprintf (buf, size, "%s, %s #%d",
2641 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0),
2642 aarch64_operand_modifiers[opnd->shifter.kind].name,
2643 opnd->shifter.amount);
2646 case AARCH64_OPND_Fd:
2647 case AARCH64_OPND_Fn:
2648 case AARCH64_OPND_Fm:
2649 case AARCH64_OPND_Fa:
2650 case AARCH64_OPND_Ft:
2651 case AARCH64_OPND_Ft2:
2652 case AARCH64_OPND_Sd:
2653 case AARCH64_OPND_Sn:
2654 case AARCH64_OPND_Sm:
2655 snprintf (buf, size, "%s%d", aarch64_get_qualifier_name (opnd->qualifier),
2659 case AARCH64_OPND_Vd:
2660 case AARCH64_OPND_Vn:
2661 case AARCH64_OPND_Vm:
2662 snprintf (buf, size, "v%d.%s", opnd->reg.regno,
2663 aarch64_get_qualifier_name (opnd->qualifier));
2666 case AARCH64_OPND_Ed:
2667 case AARCH64_OPND_En:
2668 case AARCH64_OPND_Em:
2669 snprintf (buf, size, "v%d.%s[%" PRIi64 "]", opnd->reglane.regno,
2670 aarch64_get_qualifier_name (opnd->qualifier),
2671 opnd->reglane.index);
2674 case AARCH64_OPND_VdD1:
2675 case AARCH64_OPND_VnD1:
2676 snprintf (buf, size, "v%d.d[1]", opnd->reg.regno);
2679 case AARCH64_OPND_LVn:
2680 case AARCH64_OPND_LVt:
2681 case AARCH64_OPND_LVt_AL:
2682 case AARCH64_OPND_LEt:
2683 print_register_list (buf, size, opnd, "v");
2686 case AARCH64_OPND_SVE_Pd:
2687 case AARCH64_OPND_SVE_Pg3:
2688 case AARCH64_OPND_SVE_Pg4_5:
2689 case AARCH64_OPND_SVE_Pg4_10:
2690 case AARCH64_OPND_SVE_Pg4_16:
2691 case AARCH64_OPND_SVE_Pm:
2692 case AARCH64_OPND_SVE_Pn:
2693 case AARCH64_OPND_SVE_Pt:
2694 if (opnd->qualifier == AARCH64_OPND_QLF_NIL)
2695 snprintf (buf, size, "p%d", opnd->reg.regno);
2696 else if (opnd->qualifier == AARCH64_OPND_QLF_P_Z
2697 || opnd->qualifier == AARCH64_OPND_QLF_P_M)
2698 snprintf (buf, size, "p%d/%s", opnd->reg.regno,
2699 aarch64_get_qualifier_name (opnd->qualifier));
2701 snprintf (buf, size, "p%d.%s", opnd->reg.regno,
2702 aarch64_get_qualifier_name (opnd->qualifier));
2705 case AARCH64_OPND_SVE_Za_5:
2706 case AARCH64_OPND_SVE_Za_16:
2707 case AARCH64_OPND_SVE_Zd:
2708 case AARCH64_OPND_SVE_Zm_5:
2709 case AARCH64_OPND_SVE_Zm_16:
2710 case AARCH64_OPND_SVE_Zn:
2711 case AARCH64_OPND_SVE_Zt:
2712 if (opnd->qualifier == AARCH64_OPND_QLF_NIL)
2713 snprintf (buf, size, "z%d", opnd->reg.regno);
2715 snprintf (buf, size, "z%d.%s", opnd->reg.regno,
2716 aarch64_get_qualifier_name (opnd->qualifier));
2719 case AARCH64_OPND_SVE_ZnxN:
2720 case AARCH64_OPND_SVE_ZtxN:
2721 print_register_list (buf, size, opnd, "z");
2724 case AARCH64_OPND_SVE_Zn_INDEX:
2725 snprintf (buf, size, "z%d.%s[%" PRIi64 "]", opnd->reglane.regno,
2726 aarch64_get_qualifier_name (opnd->qualifier),
2727 opnd->reglane.index);
2730 case AARCH64_OPND_Cn:
2731 case AARCH64_OPND_Cm:
2732 snprintf (buf, size, "C%d", opnd->reg.regno);
2735 case AARCH64_OPND_IDX:
2736 case AARCH64_OPND_IMM:
2737 case AARCH64_OPND_WIDTH:
2738 case AARCH64_OPND_UIMM3_OP1:
2739 case AARCH64_OPND_UIMM3_OP2:
2740 case AARCH64_OPND_BIT_NUM:
2741 case AARCH64_OPND_IMM_VLSL:
2742 case AARCH64_OPND_IMM_VLSR:
2743 case AARCH64_OPND_SHLL_IMM:
2744 case AARCH64_OPND_IMM0:
2745 case AARCH64_OPND_IMMR:
2746 case AARCH64_OPND_IMMS:
2747 case AARCH64_OPND_FBITS:
2748 snprintf (buf, size, "#%" PRIi64, opnd->imm.value);
2751 case AARCH64_OPND_SVE_PATTERN:
2752 if (optional_operand_p (opcode, idx)
2753 && opnd->imm.value == get_optional_operand_default_value (opcode))
2755 enum_value = opnd->imm.value;
2756 assert (enum_value < ARRAY_SIZE (aarch64_sve_pattern_array));
2757 if (aarch64_sve_pattern_array[enum_value])
2758 snprintf (buf, size, "%s", aarch64_sve_pattern_array[enum_value]);
2760 snprintf (buf, size, "#%" PRIi64, opnd->imm.value);
2763 case AARCH64_OPND_SVE_PRFOP:
2764 enum_value = opnd->imm.value;
2765 assert (enum_value < ARRAY_SIZE (aarch64_sve_prfop_array));
2766 if (aarch64_sve_prfop_array[enum_value])
2767 snprintf (buf, size, "%s", aarch64_sve_prfop_array[enum_value]);
2769 snprintf (buf, size, "#%" PRIi64, opnd->imm.value);
2772 case AARCH64_OPND_IMM_MOV:
2773 switch (aarch64_get_qualifier_esize (opnds[0].qualifier))
2775 case 4: /* e.g. MOV Wd, #<imm32>. */
2777 int imm32 = opnd->imm.value;
2778 snprintf (buf, size, "#0x%-20x\t// #%d", imm32, imm32);
2781 case 8: /* e.g. MOV Xd, #<imm64>. */
2782 snprintf (buf, size, "#0x%-20" PRIx64 "\t// #%" PRIi64,
2783 opnd->imm.value, opnd->imm.value);
2785 default: assert (0);
2789 case AARCH64_OPND_FPIMM0:
2790 snprintf (buf, size, "#0.0");
2793 case AARCH64_OPND_LIMM:
2794 case AARCH64_OPND_AIMM:
2795 case AARCH64_OPND_HALF:
2796 if (opnd->shifter.amount)
2797 snprintf (buf, size, "#0x%" PRIx64 ", lsl #%d", opnd->imm.value,
2798 opnd->shifter.amount);
2800 snprintf (buf, size, "#0x%" PRIx64, opnd->imm.value);
2803 case AARCH64_OPND_SIMD_IMM:
2804 case AARCH64_OPND_SIMD_IMM_SFT:
2805 if ((! opnd->shifter.amount && opnd->shifter.kind == AARCH64_MOD_LSL)
2806 || opnd->shifter.kind == AARCH64_MOD_NONE)
2807 snprintf (buf, size, "#0x%" PRIx64, opnd->imm.value);
2809 snprintf (buf, size, "#0x%" PRIx64 ", %s #%d", opnd->imm.value,
2810 aarch64_operand_modifiers[opnd->shifter.kind].name,
2811 opnd->shifter.amount);
2814 case AARCH64_OPND_FPIMM:
2815 case AARCH64_OPND_SIMD_FPIMM:
2816 switch (aarch64_get_qualifier_esize (opnds[0].qualifier))
2818 case 2: /* e.g. FMOV <Hd>, #<imm>. */
2821 c.i = expand_fp_imm (2, opnd->imm.value);
2822 snprintf (buf, size, "#%.18e", c.f);
2825 case 4: /* e.g. FMOV <Vd>.4S, #<imm>. */
2828 c.i = expand_fp_imm (4, opnd->imm.value);
2829 snprintf (buf, size, "#%.18e", c.f);
2832 case 8: /* e.g. FMOV <Sd>, #<imm>. */
2835 c.i = expand_fp_imm (8, opnd->imm.value);
2836 snprintf (buf, size, "#%.18e", c.d);
2839 default: assert (0);
2843 case AARCH64_OPND_CCMP_IMM:
2844 case AARCH64_OPND_NZCV:
2845 case AARCH64_OPND_EXCEPTION:
2846 case AARCH64_OPND_UIMM4:
2847 case AARCH64_OPND_UIMM7:
2848 if (optional_operand_p (opcode, idx) == TRUE
2849 && (opnd->imm.value ==
2850 (int64_t) get_optional_operand_default_value (opcode)))
2851 /* Omit the operand, e.g. DCPS1. */
2853 snprintf (buf, size, "#0x%x", (unsigned int)opnd->imm.value);
2856 case AARCH64_OPND_COND:
2857 case AARCH64_OPND_COND1:
2858 snprintf (buf, size, "%s", opnd->cond->names[0]);
2861 case AARCH64_OPND_ADDR_ADRP:
2862 addr = ((pc + AARCH64_PCREL_OFFSET) & ~(uint64_t)0xfff)
2868 /* This is not necessary during the disassembling, as print_address_func
2869 in the disassemble_info will take care of the printing. But some
2870 other callers may be still interested in getting the string in *STR,
2871 so here we do snprintf regardless. */
2872 snprintf (buf, size, "#0x%" PRIx64, addr);
2875 case AARCH64_OPND_ADDR_PCREL14:
2876 case AARCH64_OPND_ADDR_PCREL19:
2877 case AARCH64_OPND_ADDR_PCREL21:
2878 case AARCH64_OPND_ADDR_PCREL26:
2879 addr = pc + AARCH64_PCREL_OFFSET + opnd->imm.value;
2884 /* This is not necessary during the disassembling, as print_address_func
2885 in the disassemble_info will take care of the printing. But some
2886 other callers may be still interested in getting the string in *STR,
2887 so here we do snprintf regardless. */
2888 snprintf (buf, size, "#0x%" PRIx64, addr);
2891 case AARCH64_OPND_ADDR_SIMPLE:
2892 case AARCH64_OPND_SIMD_ADDR_SIMPLE:
2893 case AARCH64_OPND_SIMD_ADDR_POST:
2894 name = get_64bit_int_reg_name (opnd->addr.base_regno, 1);
2895 if (opnd->type == AARCH64_OPND_SIMD_ADDR_POST)
2897 if (opnd->addr.offset.is_reg)
2898 snprintf (buf, size, "[%s], x%d", name, opnd->addr.offset.regno);
2900 snprintf (buf, size, "[%s], #%d", name, opnd->addr.offset.imm);
2903 snprintf (buf, size, "[%s]", name);
2906 case AARCH64_OPND_ADDR_REGOFF:
2907 print_register_offset_address
2908 (buf, size, opnd, get_64bit_int_reg_name (opnd->addr.base_regno, 1),
2909 get_offset_int_reg_name (opnd));
2912 case AARCH64_OPND_ADDR_SIMM7:
2913 case AARCH64_OPND_ADDR_SIMM9:
2914 case AARCH64_OPND_ADDR_SIMM9_2:
2915 print_immediate_offset_address
2916 (buf, size, opnd, get_64bit_int_reg_name (opnd->addr.base_regno, 1));
2919 case AARCH64_OPND_ADDR_UIMM12:
2920 name = get_64bit_int_reg_name (opnd->addr.base_regno, 1);
2921 if (opnd->addr.offset.imm)
2922 snprintf (buf, size, "[%s,#%d]", name, opnd->addr.offset.imm);
2924 snprintf (buf, size, "[%s]", name);
2927 case AARCH64_OPND_SYSREG:
2928 for (i = 0; aarch64_sys_regs[i].name; ++i)
2929 if (aarch64_sys_regs[i].value == opnd->sysreg
2930 && ! aarch64_sys_reg_deprecated_p (&aarch64_sys_regs[i]))
2932 if (aarch64_sys_regs[i].name)
2933 snprintf (buf, size, "%s", aarch64_sys_regs[i].name);
2936 /* Implementation defined system register. */
2937 unsigned int value = opnd->sysreg;
2938 snprintf (buf, size, "s%u_%u_c%u_c%u_%u", (value >> 14) & 0x3,
2939 (value >> 11) & 0x7, (value >> 7) & 0xf, (value >> 3) & 0xf,
2944 case AARCH64_OPND_PSTATEFIELD:
2945 for (i = 0; aarch64_pstatefields[i].name; ++i)
2946 if (aarch64_pstatefields[i].value == opnd->pstatefield)
2948 assert (aarch64_pstatefields[i].name);
2949 snprintf (buf, size, "%s", aarch64_pstatefields[i].name);
2952 case AARCH64_OPND_SYSREG_AT:
2953 case AARCH64_OPND_SYSREG_DC:
2954 case AARCH64_OPND_SYSREG_IC:
2955 case AARCH64_OPND_SYSREG_TLBI:
2956 snprintf (buf, size, "%s", opnd->sysins_op->name);
2959 case AARCH64_OPND_BARRIER:
2960 snprintf (buf, size, "%s", opnd->barrier->name);
2963 case AARCH64_OPND_BARRIER_ISB:
2964 /* Operand can be omitted, e.g. in DCPS1. */
2965 if (! optional_operand_p (opcode, idx)
2966 || (opnd->barrier->value
2967 != get_optional_operand_default_value (opcode)))
2968 snprintf (buf, size, "#0x%x", opnd->barrier->value);
2971 case AARCH64_OPND_PRFOP:
2972 if (opnd->prfop->name != NULL)
2973 snprintf (buf, size, "%s", opnd->prfop->name);
2975 snprintf (buf, size, "#0x%02x", opnd->prfop->value);
2978 case AARCH64_OPND_BARRIER_PSB:
2979 snprintf (buf, size, "%s", opnd->hint_option->name);
2987 #define CPENC(op0,op1,crn,crm,op2) \
2988 ((((op0) << 19) | ((op1) << 16) | ((crn) << 12) | ((crm) << 8) | ((op2) << 5)) >> 5)
2989 /* for 3.9.3 Instructions for Accessing Special Purpose Registers */
2990 #define CPEN_(op1,crm,op2) CPENC(3,(op1),4,(crm),(op2))
2991 /* for 3.9.10 System Instructions */
2992 #define CPENS(op1,crn,crm,op2) CPENC(1,(op1),(crn),(crm),(op2))
3014 #define F_DEPRECATED 0x1 /* Deprecated system register. */
3019 #define F_ARCHEXT 0x2 /* Architecture dependent system register. */
3024 #define F_HASXT 0x4 /* System instruction register <Xt>
3028 /* TODO there are two more issues need to be resolved
3029 1. handle read-only and write-only system registers
3030 2. handle cpu-implementation-defined system registers. */
3031 const aarch64_sys_reg aarch64_sys_regs [] =
3033 { "spsr_el1", CPEN_(0,C0,0), 0 }, /* = spsr_svc */
3034 { "spsr_el12", CPEN_ (5, C0, 0), F_ARCHEXT },
3035 { "elr_el1", CPEN_(0,C0,1), 0 },
3036 { "elr_el12", CPEN_ (5, C0, 1), F_ARCHEXT },
3037 { "sp_el0", CPEN_(0,C1,0), 0 },
3038 { "spsel", CPEN_(0,C2,0), 0 },
3039 { "daif", CPEN_(3,C2,1), 0 },
3040 { "currentel", CPEN_(0,C2,2), 0 }, /* RO */
3041 { "pan", CPEN_(0,C2,3), F_ARCHEXT },
3042 { "uao", CPEN_ (0, C2, 4), F_ARCHEXT },
3043 { "nzcv", CPEN_(3,C2,0), 0 },
3044 { "fpcr", CPEN_(3,C4,0), 0 },
3045 { "fpsr", CPEN_(3,C4,1), 0 },
3046 { "dspsr_el0", CPEN_(3,C5,0), 0 },
3047 { "dlr_el0", CPEN_(3,C5,1), 0 },
3048 { "spsr_el2", CPEN_(4,C0,0), 0 }, /* = spsr_hyp */
3049 { "elr_el2", CPEN_(4,C0,1), 0 },
3050 { "sp_el1", CPEN_(4,C1,0), 0 },
3051 { "spsr_irq", CPEN_(4,C3,0), 0 },
3052 { "spsr_abt", CPEN_(4,C3,1), 0 },
3053 { "spsr_und", CPEN_(4,C3,2), 0 },
3054 { "spsr_fiq", CPEN_(4,C3,3), 0 },
3055 { "spsr_el3", CPEN_(6,C0,0), 0 },
3056 { "elr_el3", CPEN_(6,C0,1), 0 },
3057 { "sp_el2", CPEN_(6,C1,0), 0 },
3058 { "spsr_svc", CPEN_(0,C0,0), F_DEPRECATED }, /* = spsr_el1 */
3059 { "spsr_hyp", CPEN_(4,C0,0), F_DEPRECATED }, /* = spsr_el2 */
3060 { "midr_el1", CPENC(3,0,C0,C0,0), 0 }, /* RO */
3061 { "ctr_el0", CPENC(3,3,C0,C0,1), 0 }, /* RO */
3062 { "mpidr_el1", CPENC(3,0,C0,C0,5), 0 }, /* RO */
3063 { "revidr_el1", CPENC(3,0,C0,C0,6), 0 }, /* RO */
3064 { "aidr_el1", CPENC(3,1,C0,C0,7), 0 }, /* RO */
3065 { "dczid_el0", CPENC(3,3,C0,C0,7), 0 }, /* RO */
3066 { "id_dfr0_el1", CPENC(3,0,C0,C1,2), 0 }, /* RO */
3067 { "id_pfr0_el1", CPENC(3,0,C0,C1,0), 0 }, /* RO */
3068 { "id_pfr1_el1", CPENC(3,0,C0,C1,1), 0 }, /* RO */
3069 { "id_afr0_el1", CPENC(3,0,C0,C1,3), 0 }, /* RO */
3070 { "id_mmfr0_el1", CPENC(3,0,C0,C1,4), 0 }, /* RO */
3071 { "id_mmfr1_el1", CPENC(3,0,C0,C1,5), 0 }, /* RO */
3072 { "id_mmfr2_el1", CPENC(3,0,C0,C1,6), 0 }, /* RO */
3073 { "id_mmfr3_el1", CPENC(3,0,C0,C1,7), 0 }, /* RO */
3074 { "id_mmfr4_el1", CPENC(3,0,C0,C2,6), 0 }, /* RO */
3075 { "id_isar0_el1", CPENC(3,0,C0,C2,0), 0 }, /* RO */
3076 { "id_isar1_el1", CPENC(3,0,C0,C2,1), 0 }, /* RO */
3077 { "id_isar2_el1", CPENC(3,0,C0,C2,2), 0 }, /* RO */
3078 { "id_isar3_el1", CPENC(3,0,C0,C2,3), 0 }, /* RO */
3079 { "id_isar4_el1", CPENC(3,0,C0,C2,4), 0 }, /* RO */
3080 { "id_isar5_el1", CPENC(3,0,C0,C2,5), 0 }, /* RO */
3081 { "mvfr0_el1", CPENC(3,0,C0,C3,0), 0 }, /* RO */
3082 { "mvfr1_el1", CPENC(3,0,C0,C3,1), 0 }, /* RO */
3083 { "mvfr2_el1", CPENC(3,0,C0,C3,2), 0 }, /* RO */
3084 { "ccsidr_el1", CPENC(3,1,C0,C0,0), 0 }, /* RO */
3085 { "id_aa64pfr0_el1", CPENC(3,0,C0,C4,0), 0 }, /* RO */
3086 { "id_aa64pfr1_el1", CPENC(3,0,C0,C4,1), 0 }, /* RO */
3087 { "id_aa64dfr0_el1", CPENC(3,0,C0,C5,0), 0 }, /* RO */
3088 { "id_aa64dfr1_el1", CPENC(3,0,C0,C5,1), 0 }, /* RO */
3089 { "id_aa64isar0_el1", CPENC(3,0,C0,C6,0), 0 }, /* RO */
3090 { "id_aa64isar1_el1", CPENC(3,0,C0,C6,1), 0 }, /* RO */
3091 { "id_aa64mmfr0_el1", CPENC(3,0,C0,C7,0), 0 }, /* RO */
3092 { "id_aa64mmfr1_el1", CPENC(3,0,C0,C7,1), 0 }, /* RO */
3093 { "id_aa64mmfr2_el1", CPENC (3, 0, C0, C7, 2), F_ARCHEXT }, /* RO */
3094 { "id_aa64afr0_el1", CPENC(3,0,C0,C5,4), 0 }, /* RO */
3095 { "id_aa64afr1_el1", CPENC(3,0,C0,C5,5), 0 }, /* RO */
3096 { "clidr_el1", CPENC(3,1,C0,C0,1), 0 }, /* RO */
3097 { "csselr_el1", CPENC(3,2,C0,C0,0), 0 }, /* RO */
3098 { "vpidr_el2", CPENC(3,4,C0,C0,0), 0 },
3099 { "vmpidr_el2", CPENC(3,4,C0,C0,5), 0 },
3100 { "sctlr_el1", CPENC(3,0,C1,C0,0), 0 },
3101 { "sctlr_el2", CPENC(3,4,C1,C0,0), 0 },
3102 { "sctlr_el3", CPENC(3,6,C1,C0,0), 0 },
3103 { "sctlr_el12", CPENC (3, 5, C1, C0, 0), F_ARCHEXT },
3104 { "actlr_el1", CPENC(3,0,C1,C0,1), 0 },
3105 { "actlr_el2", CPENC(3,4,C1,C0,1), 0 },
3106 { "actlr_el3", CPENC(3,6,C1,C0,1), 0 },
3107 { "cpacr_el1", CPENC(3,0,C1,C0,2), 0 },
3108 { "cpacr_el12", CPENC (3, 5, C1, C0, 2), F_ARCHEXT },
3109 { "cptr_el2", CPENC(3,4,C1,C1,2), 0 },
3110 { "cptr_el3", CPENC(3,6,C1,C1,2), 0 },
3111 { "scr_el3", CPENC(3,6,C1,C1,0), 0 },
3112 { "hcr_el2", CPENC(3,4,C1,C1,0), 0 },
3113 { "mdcr_el2", CPENC(3,4,C1,C1,1), 0 },
3114 { "mdcr_el3", CPENC(3,6,C1,C3,1), 0 },
3115 { "hstr_el2", CPENC(3,4,C1,C1,3), 0 },
3116 { "hacr_el2", CPENC(3,4,C1,C1,7), 0 },
3117 { "ttbr0_el1", CPENC(3,0,C2,C0,0), 0 },
3118 { "ttbr1_el1", CPENC(3,0,C2,C0,1), 0 },
3119 { "ttbr0_el2", CPENC(3,4,C2,C0,0), 0 },
3120 { "ttbr1_el2", CPENC (3, 4, C2, C0, 1), F_ARCHEXT },
3121 { "ttbr0_el3", CPENC(3,6,C2,C0,0), 0 },
3122 { "ttbr0_el12", CPENC (3, 5, C2, C0, 0), F_ARCHEXT },
3123 { "ttbr1_el12", CPENC (3, 5, C2, C0, 1), F_ARCHEXT },
3124 { "vttbr_el2", CPENC(3,4,C2,C1,0), 0 },
3125 { "tcr_el1", CPENC(3,0,C2,C0,2), 0 },
3126 { "tcr_el2", CPENC(3,4,C2,C0,2), 0 },
3127 { "tcr_el3", CPENC(3,6,C2,C0,2), 0 },
3128 { "tcr_el12", CPENC (3, 5, C2, C0, 2), F_ARCHEXT },
3129 { "vtcr_el2", CPENC(3,4,C2,C1,2), 0 },
3130 { "afsr0_el1", CPENC(3,0,C5,C1,0), 0 },
3131 { "afsr1_el1", CPENC(3,0,C5,C1,1), 0 },
3132 { "afsr0_el2", CPENC(3,4,C5,C1,0), 0 },
3133 { "afsr1_el2", CPENC(3,4,C5,C1,1), 0 },
3134 { "afsr0_el3", CPENC(3,6,C5,C1,0), 0 },
3135 { "afsr0_el12", CPENC (3, 5, C5, C1, 0), F_ARCHEXT },
3136 { "afsr1_el3", CPENC(3,6,C5,C1,1), 0 },
3137 { "afsr1_el12", CPENC (3, 5, C5, C1, 1), F_ARCHEXT },
3138 { "esr_el1", CPENC(3,0,C5,C2,0), 0 },
3139 { "esr_el2", CPENC(3,4,C5,C2,0), 0 },
3140 { "esr_el3", CPENC(3,6,C5,C2,0), 0 },
3141 { "esr_el12", CPENC (3, 5, C5, C2, 0), F_ARCHEXT },
3142 { "vsesr_el2", CPENC (3, 4, C5, C2, 3), F_ARCHEXT }, /* RO */
3143 { "fpexc32_el2", CPENC(3,4,C5,C3,0), 0 },
3144 { "erridr_el1", CPENC (3, 0, C5, C3, 0), F_ARCHEXT }, /* RO */
3145 { "errselr_el1", CPENC (3, 0, C5, C3, 1), F_ARCHEXT },
3146 { "erxfr_el1", CPENC (3, 0, C5, C4, 0), F_ARCHEXT }, /* RO */
3147 { "erxctlr_el1", CPENC (3, 0, C5, C4, 1), F_ARCHEXT },
3148 { "erxstatus_el1", CPENC (3, 0, C5, C4, 2), F_ARCHEXT },
3149 { "erxaddr_el1", CPENC (3, 0, C5, C4, 3), F_ARCHEXT },
3150 { "erxmisc0_el1", CPENC (3, 0, C5, C5, 0), F_ARCHEXT },
3151 { "erxmisc1_el1", CPENC (3, 0, C5, C5, 1), F_ARCHEXT },
3152 { "far_el1", CPENC(3,0,C6,C0,0), 0 },
3153 { "far_el2", CPENC(3,4,C6,C0,0), 0 },
3154 { "far_el3", CPENC(3,6,C6,C0,0), 0 },
3155 { "far_el12", CPENC (3, 5, C6, C0, 0), F_ARCHEXT },
3156 { "hpfar_el2", CPENC(3,4,C6,C0,4), 0 },
3157 { "par_el1", CPENC(3,0,C7,C4,0), 0 },
3158 { "mair_el1", CPENC(3,0,C10,C2,0), 0 },
3159 { "mair_el2", CPENC(3,4,C10,C2,0), 0 },
3160 { "mair_el3", CPENC(3,6,C10,C2,0), 0 },
3161 { "mair_el12", CPENC (3, 5, C10, C2, 0), F_ARCHEXT },
3162 { "amair_el1", CPENC(3,0,C10,C3,0), 0 },
3163 { "amair_el2", CPENC(3,4,C10,C3,0), 0 },
3164 { "amair_el3", CPENC(3,6,C10,C3,0), 0 },
3165 { "amair_el12", CPENC (3, 5, C10, C3, 0), F_ARCHEXT },
3166 { "vbar_el1", CPENC(3,0,C12,C0,0), 0 },
3167 { "vbar_el2", CPENC(3,4,C12,C0,0), 0 },
3168 { "vbar_el3", CPENC(3,6,C12,C0,0), 0 },
3169 { "vbar_el12", CPENC (3, 5, C12, C0, 0), F_ARCHEXT },
3170 { "rvbar_el1", CPENC(3,0,C12,C0,1), 0 }, /* RO */
3171 { "rvbar_el2", CPENC(3,4,C12,C0,1), 0 }, /* RO */
3172 { "rvbar_el3", CPENC(3,6,C12,C0,1), 0 }, /* RO */
3173 { "rmr_el1", CPENC(3,0,C12,C0,2), 0 },
3174 { "rmr_el2", CPENC(3,4,C12,C0,2), 0 },
3175 { "rmr_el3", CPENC(3,6,C12,C0,2), 0 },
3176 { "isr_el1", CPENC(3,0,C12,C1,0), 0 }, /* RO */
3177 { "disr_el1", CPENC (3, 0, C12, C1, 1), F_ARCHEXT },
3178 { "vdisr_el2", CPENC (3, 4, C12, C1, 1), F_ARCHEXT },
3179 { "contextidr_el1", CPENC(3,0,C13,C0,1), 0 },
3180 { "contextidr_el2", CPENC (3, 4, C13, C0, 1), F_ARCHEXT },
3181 { "contextidr_el12", CPENC (3, 5, C13, C0, 1), F_ARCHEXT },
3182 { "tpidr_el0", CPENC(3,3,C13,C0,2), 0 },
3183 { "tpidrro_el0", CPENC(3,3,C13,C0,3), 0 }, /* RO */
3184 { "tpidr_el1", CPENC(3,0,C13,C0,4), 0 },
3185 { "tpidr_el2", CPENC(3,4,C13,C0,2), 0 },
3186 { "tpidr_el3", CPENC(3,6,C13,C0,2), 0 },
3187 { "teecr32_el1", CPENC(2,2,C0, C0,0), 0 }, /* See section 3.9.7.1 */
3188 { "cntfrq_el0", CPENC(3,3,C14,C0,0), 0 }, /* RO */
3189 { "cntpct_el0", CPENC(3,3,C14,C0,1), 0 }, /* RO */
3190 { "cntvct_el0", CPENC(3,3,C14,C0,2), 0 }, /* RO */
3191 { "cntvoff_el2", CPENC(3,4,C14,C0,3), 0 },
3192 { "cntkctl_el1", CPENC(3,0,C14,C1,0), 0 },
3193 { "cntkctl_el12", CPENC (3, 5, C14, C1, 0), F_ARCHEXT },
3194 { "cnthctl_el2", CPENC(3,4,C14,C1,0), 0 },
3195 { "cntp_tval_el0", CPENC(3,3,C14,C2,0), 0 },
3196 { "cntp_tval_el02", CPENC (3, 5, C14, C2, 0), F_ARCHEXT },
3197 { "cntp_ctl_el0", CPENC(3,3,C14,C2,1), 0 },
3198 { "cntp_ctl_el02", CPENC (3, 5, C14, C2, 1), F_ARCHEXT },
3199 { "cntp_cval_el0", CPENC(3,3,C14,C2,2), 0 },
3200 { "cntp_cval_el02", CPENC (3, 5, C14, C2, 2), F_ARCHEXT },
3201 { "cntv_tval_el0", CPENC(3,3,C14,C3,0), 0 },
3202 { "cntv_tval_el02", CPENC (3, 5, C14, C3, 0), F_ARCHEXT },
3203 { "cntv_ctl_el0", CPENC(3,3,C14,C3,1), 0 },
3204 { "cntv_ctl_el02", CPENC (3, 5, C14, C3, 1), F_ARCHEXT },
3205 { "cntv_cval_el0", CPENC(3,3,C14,C3,2), 0 },
3206 { "cntv_cval_el02", CPENC (3, 5, C14, C3, 2), F_ARCHEXT },
3207 { "cnthp_tval_el2", CPENC(3,4,C14,C2,0), 0 },
3208 { "cnthp_ctl_el2", CPENC(3,4,C14,C2,1), 0 },
3209 { "cnthp_cval_el2", CPENC(3,4,C14,C2,2), 0 },
3210 { "cntps_tval_el1", CPENC(3,7,C14,C2,0), 0 },
3211 { "cntps_ctl_el1", CPENC(3,7,C14,C2,1), 0 },
3212 { "cntps_cval_el1", CPENC(3,7,C14,C2,2), 0 },
3213 { "cnthv_tval_el2", CPENC (3, 4, C14, C3, 0), F_ARCHEXT },
3214 { "cnthv_ctl_el2", CPENC (3, 4, C14, C3, 1), F_ARCHEXT },
3215 { "cnthv_cval_el2", CPENC (3, 4, C14, C3, 2), F_ARCHEXT },
3216 { "dacr32_el2", CPENC(3,4,C3,C0,0), 0 },
3217 { "ifsr32_el2", CPENC(3,4,C5,C0,1), 0 },
3218 { "teehbr32_el1", CPENC(2,2,C1,C0,0), 0 },
3219 { "sder32_el3", CPENC(3,6,C1,C1,1), 0 },
3220 { "mdscr_el1", CPENC(2,0,C0, C2, 2), 0 },
3221 { "mdccsr_el0", CPENC(2,3,C0, C1, 0), 0 }, /* r */
3222 { "mdccint_el1", CPENC(2,0,C0, C2, 0), 0 },
3223 { "dbgdtr_el0", CPENC(2,3,C0, C4, 0), 0 },
3224 { "dbgdtrrx_el0", CPENC(2,3,C0, C5, 0), 0 }, /* r */
3225 { "dbgdtrtx_el0", CPENC(2,3,C0, C5, 0), 0 }, /* w */
3226 { "osdtrrx_el1", CPENC(2,0,C0, C0, 2), 0 }, /* r */
3227 { "osdtrtx_el1", CPENC(2,0,C0, C3, 2), 0 }, /* w */
3228 { "oseccr_el1", CPENC(2,0,C0, C6, 2), 0 },
3229 { "dbgvcr32_el2", CPENC(2,4,C0, C7, 0), 0 },
3230 { "dbgbvr0_el1", CPENC(2,0,C0, C0, 4), 0 },
3231 { "dbgbvr1_el1", CPENC(2,0,C0, C1, 4), 0 },
3232 { "dbgbvr2_el1", CPENC(2,0,C0, C2, 4), 0 },
3233 { "dbgbvr3_el1", CPENC(2,0,C0, C3, 4), 0 },
3234 { "dbgbvr4_el1", CPENC(2,0,C0, C4, 4), 0 },
3235 { "dbgbvr5_el1", CPENC(2,0,C0, C5, 4), 0 },
3236 { "dbgbvr6_el1", CPENC(2,0,C0, C6, 4), 0 },
3237 { "dbgbvr7_el1", CPENC(2,0,C0, C7, 4), 0 },
3238 { "dbgbvr8_el1", CPENC(2,0,C0, C8, 4), 0 },
3239 { "dbgbvr9_el1", CPENC(2,0,C0, C9, 4), 0 },
3240 { "dbgbvr10_el1", CPENC(2,0,C0, C10,4), 0 },
3241 { "dbgbvr11_el1", CPENC(2,0,C0, C11,4), 0 },
3242 { "dbgbvr12_el1", CPENC(2,0,C0, C12,4), 0 },
3243 { "dbgbvr13_el1", CPENC(2,0,C0, C13,4), 0 },
3244 { "dbgbvr14_el1", CPENC(2,0,C0, C14,4), 0 },
3245 { "dbgbvr15_el1", CPENC(2,0,C0, C15,4), 0 },
3246 { "dbgbcr0_el1", CPENC(2,0,C0, C0, 5), 0 },
3247 { "dbgbcr1_el1", CPENC(2,0,C0, C1, 5), 0 },
3248 { "dbgbcr2_el1", CPENC(2,0,C0, C2, 5), 0 },
3249 { "dbgbcr3_el1", CPENC(2,0,C0, C3, 5), 0 },
3250 { "dbgbcr4_el1", CPENC(2,0,C0, C4, 5), 0 },
3251 { "dbgbcr5_el1", CPENC(2,0,C0, C5, 5), 0 },
3252 { "dbgbcr6_el1", CPENC(2,0,C0, C6, 5), 0 },
3253 { "dbgbcr7_el1", CPENC(2,0,C0, C7, 5), 0 },
3254 { "dbgbcr8_el1", CPENC(2,0,C0, C8, 5), 0 },
3255 { "dbgbcr9_el1", CPENC(2,0,C0, C9, 5), 0 },
3256 { "dbgbcr10_el1", CPENC(2,0,C0, C10,5), 0 },
3257 { "dbgbcr11_el1", CPENC(2,0,C0, C11,5), 0 },
3258 { "dbgbcr12_el1", CPENC(2,0,C0, C12,5), 0 },
3259 { "dbgbcr13_el1", CPENC(2,0,C0, C13,5), 0 },
3260 { "dbgbcr14_el1", CPENC(2,0,C0, C14,5), 0 },
3261 { "dbgbcr15_el1", CPENC(2,0,C0, C15,5), 0 },
3262 { "dbgwvr0_el1", CPENC(2,0,C0, C0, 6), 0 },
3263 { "dbgwvr1_el1", CPENC(2,0,C0, C1, 6), 0 },
3264 { "dbgwvr2_el1", CPENC(2,0,C0, C2, 6), 0 },
3265 { "dbgwvr3_el1", CPENC(2,0,C0, C3, 6), 0 },
3266 { "dbgwvr4_el1", CPENC(2,0,C0, C4, 6), 0 },
3267 { "dbgwvr5_el1", CPENC(2,0,C0, C5, 6), 0 },
3268 { "dbgwvr6_el1", CPENC(2,0,C0, C6, 6), 0 },
3269 { "dbgwvr7_el1", CPENC(2,0,C0, C7, 6), 0 },
3270 { "dbgwvr8_el1", CPENC(2,0,C0, C8, 6), 0 },
3271 { "dbgwvr9_el1", CPENC(2,0,C0, C9, 6), 0 },
3272 { "dbgwvr10_el1", CPENC(2,0,C0, C10,6), 0 },
3273 { "dbgwvr11_el1", CPENC(2,0,C0, C11,6), 0 },
3274 { "dbgwvr12_el1", CPENC(2,0,C0, C12,6), 0 },
3275 { "dbgwvr13_el1", CPENC(2,0,C0, C13,6), 0 },
3276 { "dbgwvr14_el1", CPENC(2,0,C0, C14,6), 0 },
3277 { "dbgwvr15_el1", CPENC(2,0,C0, C15,6), 0 },
3278 { "dbgwcr0_el1", CPENC(2,0,C0, C0, 7), 0 },
3279 { "dbgwcr1_el1", CPENC(2,0,C0, C1, 7), 0 },
3280 { "dbgwcr2_el1", CPENC(2,0,C0, C2, 7), 0 },
3281 { "dbgwcr3_el1", CPENC(2,0,C0, C3, 7), 0 },
3282 { "dbgwcr4_el1", CPENC(2,0,C0, C4, 7), 0 },
3283 { "dbgwcr5_el1", CPENC(2,0,C0, C5, 7), 0 },
3284 { "dbgwcr6_el1", CPENC(2,0,C0, C6, 7), 0 },
3285 { "dbgwcr7_el1", CPENC(2,0,C0, C7, 7), 0 },
3286 { "dbgwcr8_el1", CPENC(2,0,C0, C8, 7), 0 },
3287 { "dbgwcr9_el1", CPENC(2,0,C0, C9, 7), 0 },
3288 { "dbgwcr10_el1", CPENC(2,0,C0, C10,7), 0 },
3289 { "dbgwcr11_el1", CPENC(2,0,C0, C11,7), 0 },
3290 { "dbgwcr12_el1", CPENC(2,0,C0, C12,7), 0 },
3291 { "dbgwcr13_el1", CPENC(2,0,C0, C13,7), 0 },
3292 { "dbgwcr14_el1", CPENC(2,0,C0, C14,7), 0 },
3293 { "dbgwcr15_el1", CPENC(2,0,C0, C15,7), 0 },
3294 { "mdrar_el1", CPENC(2,0,C1, C0, 0), 0 }, /* r */
3295 { "oslar_el1", CPENC(2,0,C1, C0, 4), 0 }, /* w */
3296 { "oslsr_el1", CPENC(2,0,C1, C1, 4), 0 }, /* r */
3297 { "osdlr_el1", CPENC(2,0,C1, C3, 4), 0 },
3298 { "dbgprcr_el1", CPENC(2,0,C1, C4, 4), 0 },
3299 { "dbgclaimset_el1", CPENC(2,0,C7, C8, 6), 0 },
3300 { "dbgclaimclr_el1", CPENC(2,0,C7, C9, 6), 0 },
3301 { "dbgauthstatus_el1", CPENC(2,0,C7, C14,6), 0 }, /* r */
3302 { "pmblimitr_el1", CPENC (3, 0, C9, C10, 0), F_ARCHEXT }, /* rw */
3303 { "pmbptr_el1", CPENC (3, 0, C9, C10, 1), F_ARCHEXT }, /* rw */
3304 { "pmbsr_el1", CPENC (3, 0, C9, C10, 3), F_ARCHEXT }, /* rw */
3305 { "pmbidr_el1", CPENC (3, 0, C9, C10, 7), F_ARCHEXT }, /* ro */
3306 { "pmscr_el1", CPENC (3, 0, C9, C9, 0), F_ARCHEXT }, /* rw */
3307 { "pmsicr_el1", CPENC (3, 0, C9, C9, 2), F_ARCHEXT }, /* rw */
3308 { "pmsirr_el1", CPENC (3, 0, C9, C9, 3), F_ARCHEXT }, /* rw */
3309 { "pmsfcr_el1", CPENC (3, 0, C9, C9, 4), F_ARCHEXT }, /* rw */
3310 { "pmsevfr_el1", CPENC (3, 0, C9, C9, 5), F_ARCHEXT }, /* rw */
3311 { "pmslatfr_el1", CPENC (3, 0, C9, C9, 6), F_ARCHEXT }, /* rw */
3312 { "pmsidr_el1", CPENC (3, 0, C9, C9, 7), F_ARCHEXT }, /* ro */
3313 { "pmscr_el2", CPENC (3, 4, C9, C9, 0), F_ARCHEXT }, /* rw */
3314 { "pmscr_el12", CPENC (3, 5, C9, C9, 0), F_ARCHEXT }, /* rw */
3315 { "pmcr_el0", CPENC(3,3,C9,C12, 0), 0 },
3316 { "pmcntenset_el0", CPENC(3,3,C9,C12, 1), 0 },
3317 { "pmcntenclr_el0", CPENC(3,3,C9,C12, 2), 0 },
3318 { "pmovsclr_el0", CPENC(3,3,C9,C12, 3), 0 },
3319 { "pmswinc_el0", CPENC(3,3,C9,C12, 4), 0 }, /* w */
3320 { "pmselr_el0", CPENC(3,3,C9,C12, 5), 0 },
3321 { "pmceid0_el0", CPENC(3,3,C9,C12, 6), 0 }, /* r */
3322 { "pmceid1_el0", CPENC(3,3,C9,C12, 7), 0 }, /* r */
3323 { "pmccntr_el0", CPENC(3,3,C9,C13, 0), 0 },
3324 { "pmxevtyper_el0", CPENC(3,3,C9,C13, 1), 0 },
3325 { "pmxevcntr_el0", CPENC(3,3,C9,C13, 2), 0 },
3326 { "pmuserenr_el0", CPENC(3,3,C9,C14, 0), 0 },
3327 { "pmintenset_el1", CPENC(3,0,C9,C14, 1), 0 },
3328 { "pmintenclr_el1", CPENC(3,0,C9,C14, 2), 0 },
3329 { "pmovsset_el0", CPENC(3,3,C9,C14, 3), 0 },
3330 { "pmevcntr0_el0", CPENC(3,3,C14,C8, 0), 0 },
3331 { "pmevcntr1_el0", CPENC(3,3,C14,C8, 1), 0 },
3332 { "pmevcntr2_el0", CPENC(3,3,C14,C8, 2), 0 },
3333 { "pmevcntr3_el0", CPENC(3,3,C14,C8, 3), 0 },
3334 { "pmevcntr4_el0", CPENC(3,3,C14,C8, 4), 0 },
3335 { "pmevcntr5_el0", CPENC(3,3,C14,C8, 5), 0 },
3336 { "pmevcntr6_el0", CPENC(3,3,C14,C8, 6), 0 },
3337 { "pmevcntr7_el0", CPENC(3,3,C14,C8, 7), 0 },
3338 { "pmevcntr8_el0", CPENC(3,3,C14,C9, 0), 0 },
3339 { "pmevcntr9_el0", CPENC(3,3,C14,C9, 1), 0 },
3340 { "pmevcntr10_el0", CPENC(3,3,C14,C9, 2), 0 },
3341 { "pmevcntr11_el0", CPENC(3,3,C14,C9, 3), 0 },
3342 { "pmevcntr12_el0", CPENC(3,3,C14,C9, 4), 0 },
3343 { "pmevcntr13_el0", CPENC(3,3,C14,C9, 5), 0 },
3344 { "pmevcntr14_el0", CPENC(3,3,C14,C9, 6), 0 },
3345 { "pmevcntr15_el0", CPENC(3,3,C14,C9, 7), 0 },
3346 { "pmevcntr16_el0", CPENC(3,3,C14,C10,0), 0 },
3347 { "pmevcntr17_el0", CPENC(3,3,C14,C10,1), 0 },
3348 { "pmevcntr18_el0", CPENC(3,3,C14,C10,2), 0 },
3349 { "pmevcntr19_el0", CPENC(3,3,C14,C10,3), 0 },
3350 { "pmevcntr20_el0", CPENC(3,3,C14,C10,4), 0 },
3351 { "pmevcntr21_el0", CPENC(3,3,C14,C10,5), 0 },
3352 { "pmevcntr22_el0", CPENC(3,3,C14,C10,6), 0 },
3353 { "pmevcntr23_el0", CPENC(3,3,C14,C10,7), 0 },
3354 { "pmevcntr24_el0", CPENC(3,3,C14,C11,0), 0 },
3355 { "pmevcntr25_el0", CPENC(3,3,C14,C11,1), 0 },
3356 { "pmevcntr26_el0", CPENC(3,3,C14,C11,2), 0 },
3357 { "pmevcntr27_el0", CPENC(3,3,C14,C11,3), 0 },
3358 { "pmevcntr28_el0", CPENC(3,3,C14,C11,4), 0 },
3359 { "pmevcntr29_el0", CPENC(3,3,C14,C11,5), 0 },
3360 { "pmevcntr30_el0", CPENC(3,3,C14,C11,6), 0 },
3361 { "pmevtyper0_el0", CPENC(3,3,C14,C12,0), 0 },
3362 { "pmevtyper1_el0", CPENC(3,3,C14,C12,1), 0 },
3363 { "pmevtyper2_el0", CPENC(3,3,C14,C12,2), 0 },
3364 { "pmevtyper3_el0", CPENC(3,3,C14,C12,3), 0 },
3365 { "pmevtyper4_el0", CPENC(3,3,C14,C12,4), 0 },
3366 { "pmevtyper5_el0", CPENC(3,3,C14,C12,5), 0 },
3367 { "pmevtyper6_el0", CPENC(3,3,C14,C12,6), 0 },
3368 { "pmevtyper7_el0", CPENC(3,3,C14,C12,7), 0 },
3369 { "pmevtyper8_el0", CPENC(3,3,C14,C13,0), 0 },
3370 { "pmevtyper9_el0", CPENC(3,3,C14,C13,1), 0 },
3371 { "pmevtyper10_el0", CPENC(3,3,C14,C13,2), 0 },
3372 { "pmevtyper11_el0", CPENC(3,3,C14,C13,3), 0 },
3373 { "pmevtyper12_el0", CPENC(3,3,C14,C13,4), 0 },
3374 { "pmevtyper13_el0", CPENC(3,3,C14,C13,5), 0 },
3375 { "pmevtyper14_el0", CPENC(3,3,C14,C13,6), 0 },
3376 { "pmevtyper15_el0", CPENC(3,3,C14,C13,7), 0 },
3377 { "pmevtyper16_el0", CPENC(3,3,C14,C14,0), 0 },
3378 { "pmevtyper17_el0", CPENC(3,3,C14,C14,1), 0 },
3379 { "pmevtyper18_el0", CPENC(3,3,C14,C14,2), 0 },
3380 { "pmevtyper19_el0", CPENC(3,3,C14,C14,3), 0 },
3381 { "pmevtyper20_el0", CPENC(3,3,C14,C14,4), 0 },
3382 { "pmevtyper21_el0", CPENC(3,3,C14,C14,5), 0 },
3383 { "pmevtyper22_el0", CPENC(3,3,C14,C14,6), 0 },
3384 { "pmevtyper23_el0", CPENC(3,3,C14,C14,7), 0 },
3385 { "pmevtyper24_el0", CPENC(3,3,C14,C15,0), 0 },
3386 { "pmevtyper25_el0", CPENC(3,3,C14,C15,1), 0 },
3387 { "pmevtyper26_el0", CPENC(3,3,C14,C15,2), 0 },
3388 { "pmevtyper27_el0", CPENC(3,3,C14,C15,3), 0 },
3389 { "pmevtyper28_el0", CPENC(3,3,C14,C15,4), 0 },
3390 { "pmevtyper29_el0", CPENC(3,3,C14,C15,5), 0 },
3391 { "pmevtyper30_el0", CPENC(3,3,C14,C15,6), 0 },
3392 { "pmccfiltr_el0", CPENC(3,3,C14,C15,7), 0 },
3393 { 0, CPENC(0,0,0,0,0), 0 },
3397 aarch64_sys_reg_deprecated_p (const aarch64_sys_reg *reg)
3399 return (reg->flags & F_DEPRECATED) != 0;
3403 aarch64_sys_reg_supported_p (const aarch64_feature_set features,
3404 const aarch64_sys_reg *reg)
3406 if (!(reg->flags & F_ARCHEXT))
3409 /* PAN. Values are from aarch64_sys_regs. */
3410 if (reg->value == CPEN_(0,C2,3)
3411 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_PAN))
3414 /* Virtualization host extensions: system registers. */
3415 if ((reg->value == CPENC (3, 4, C2, C0, 1)
3416 || reg->value == CPENC (3, 4, C13, C0, 1)
3417 || reg->value == CPENC (3, 4, C14, C3, 0)
3418 || reg->value == CPENC (3, 4, C14, C3, 1)
3419 || reg->value == CPENC (3, 4, C14, C3, 2))
3420 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_1))
3423 /* Virtualization host extensions: *_el12 names of *_el1 registers. */
3424 if ((reg->value == CPEN_ (5, C0, 0)
3425 || reg->value == CPEN_ (5, C0, 1)
3426 || reg->value == CPENC (3, 5, C1, C0, 0)
3427 || reg->value == CPENC (3, 5, C1, C0, 2)
3428 || reg->value == CPENC (3, 5, C2, C0, 0)
3429 || reg->value == CPENC (3, 5, C2, C0, 1)
3430 || reg->value == CPENC (3, 5, C2, C0, 2)
3431 || reg->value == CPENC (3, 5, C5, C1, 0)
3432 || reg->value == CPENC (3, 5, C5, C1, 1)
3433 || reg->value == CPENC (3, 5, C5, C2, 0)
3434 || reg->value == CPENC (3, 5, C6, C0, 0)
3435 || reg->value == CPENC (3, 5, C10, C2, 0)
3436 || reg->value == CPENC (3, 5, C10, C3, 0)
3437 || reg->value == CPENC (3, 5, C12, C0, 0)
3438 || reg->value == CPENC (3, 5, C13, C0, 1)
3439 || reg->value == CPENC (3, 5, C14, C1, 0))
3440 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_1))
3443 /* Virtualization host extensions: *_el02 names of *_el0 registers. */
3444 if ((reg->value == CPENC (3, 5, C14, C2, 0)
3445 || reg->value == CPENC (3, 5, C14, C2, 1)
3446 || reg->value == CPENC (3, 5, C14, C2, 2)
3447 || reg->value == CPENC (3, 5, C14, C3, 0)
3448 || reg->value == CPENC (3, 5, C14, C3, 1)
3449 || reg->value == CPENC (3, 5, C14, C3, 2))
3450 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_1))
3453 /* ARMv8.2 features. */
3455 /* ID_AA64MMFR2_EL1. */
3456 if (reg->value == CPENC (3, 0, C0, C7, 2)
3457 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_2))
3461 if (reg->value == CPEN_ (0, C2, 4)
3462 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_2))
3465 /* RAS extension. */
3467 /* ERRIDR_EL1, ERRSELR_EL1, ERXFR_EL1, ERXCTLR_EL1, ERXSTATUS_EL, ERXADDR_EL1,
3468 ERXMISC0_EL1 AND ERXMISC1_EL1. */
3469 if ((reg->value == CPENC (3, 0, C5, C3, 0)
3470 || reg->value == CPENC (3, 0, C5, C3, 1)
3471 || reg->value == CPENC (3, 0, C5, C3, 2)
3472 || reg->value == CPENC (3, 0, C5, C3, 3)
3473 || reg->value == CPENC (3, 0, C5, C4, 0)
3474 || reg->value == CPENC (3, 0, C5, C4, 1)
3475 || reg->value == CPENC (3, 0, C5, C4, 2)
3476 || reg->value == CPENC (3, 0, C5, C4, 3)
3477 || reg->value == CPENC (3, 0, C5, C5, 0)
3478 || reg->value == CPENC (3, 0, C5, C5, 1))
3479 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_RAS))
3482 /* VSESR_EL2, DISR_EL1 and VDISR_EL2. */
3483 if ((reg->value == CPENC (3, 4, C5, C2, 3)
3484 || reg->value == CPENC (3, 0, C12, C1, 1)
3485 || reg->value == CPENC (3, 4, C12, C1, 1))
3486 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_RAS))
3489 /* Statistical Profiling extension. */
3490 if ((reg->value == CPENC (3, 0, C9, C10, 0)
3491 || reg->value == CPENC (3, 0, C9, C10, 1)
3492 || reg->value == CPENC (3, 0, C9, C10, 3)
3493 || reg->value == CPENC (3, 0, C9, C10, 7)
3494 || reg->value == CPENC (3, 0, C9, C9, 0)
3495 || reg->value == CPENC (3, 0, C9, C9, 2)
3496 || reg->value == CPENC (3, 0, C9, C9, 3)
3497 || reg->value == CPENC (3, 0, C9, C9, 4)
3498 || reg->value == CPENC (3, 0, C9, C9, 5)
3499 || reg->value == CPENC (3, 0, C9, C9, 6)
3500 || reg->value == CPENC (3, 0, C9, C9, 7)
3501 || reg->value == CPENC (3, 4, C9, C9, 0)
3502 || reg->value == CPENC (3, 5, C9, C9, 0))
3503 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_PROFILE))
3509 const aarch64_sys_reg aarch64_pstatefields [] =
3511 { "spsel", 0x05, 0 },
3512 { "daifset", 0x1e, 0 },
3513 { "daifclr", 0x1f, 0 },
3514 { "pan", 0x04, F_ARCHEXT },
3515 { "uao", 0x03, F_ARCHEXT },
3516 { 0, CPENC(0,0,0,0,0), 0 },
3520 aarch64_pstatefield_supported_p (const aarch64_feature_set features,
3521 const aarch64_sys_reg *reg)
3523 if (!(reg->flags & F_ARCHEXT))
3526 /* PAN. Values are from aarch64_pstatefields. */
3527 if (reg->value == 0x04
3528 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_PAN))
3531 /* UAO. Values are from aarch64_pstatefields. */
3532 if (reg->value == 0x03
3533 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_2))
3539 const aarch64_sys_ins_reg aarch64_sys_regs_ic[] =
3541 { "ialluis", CPENS(0,C7,C1,0), 0 },
3542 { "iallu", CPENS(0,C7,C5,0), 0 },
3543 { "ivau", CPENS (3, C7, C5, 1), F_HASXT },
3544 { 0, CPENS(0,0,0,0), 0 }
3547 const aarch64_sys_ins_reg aarch64_sys_regs_dc[] =
3549 { "zva", CPENS (3, C7, C4, 1), F_HASXT },
3550 { "ivac", CPENS (0, C7, C6, 1), F_HASXT },
3551 { "isw", CPENS (0, C7, C6, 2), F_HASXT },
3552 { "cvac", CPENS (3, C7, C10, 1), F_HASXT },
3553 { "csw", CPENS (0, C7, C10, 2), F_HASXT },
3554 { "cvau", CPENS (3, C7, C11, 1), F_HASXT },
3555 { "cvap", CPENS (3, C7, C12, 1), F_HASXT | F_ARCHEXT },
3556 { "civac", CPENS (3, C7, C14, 1), F_HASXT },
3557 { "cisw", CPENS (0, C7, C14, 2), F_HASXT },
3558 { 0, CPENS(0,0,0,0), 0 }
3561 const aarch64_sys_ins_reg aarch64_sys_regs_at[] =
3563 { "s1e1r", CPENS (0, C7, C8, 0), F_HASXT },
3564 { "s1e1w", CPENS (0, C7, C8, 1), F_HASXT },
3565 { "s1e0r", CPENS (0, C7, C8, 2), F_HASXT },
3566 { "s1e0w", CPENS (0, C7, C8, 3), F_HASXT },
3567 { "s12e1r", CPENS (4, C7, C8, 4), F_HASXT },
3568 { "s12e1w", CPENS (4, C7, C8, 5), F_HASXT },
3569 { "s12e0r", CPENS (4, C7, C8, 6), F_HASXT },
3570 { "s12e0w", CPENS (4, C7, C8, 7), F_HASXT },
3571 { "s1e2r", CPENS (4, C7, C8, 0), F_HASXT },
3572 { "s1e2w", CPENS (4, C7, C8, 1), F_HASXT },
3573 { "s1e3r", CPENS (6, C7, C8, 0), F_HASXT },
3574 { "s1e3w", CPENS (6, C7, C8, 1), F_HASXT },
3575 { "s1e1rp", CPENS (0, C7, C9, 0), F_HASXT | F_ARCHEXT },
3576 { "s1e1wp", CPENS (0, C7, C9, 1), F_HASXT | F_ARCHEXT },
3577 { 0, CPENS(0,0,0,0), 0 }
3580 const aarch64_sys_ins_reg aarch64_sys_regs_tlbi[] =
3582 { "vmalle1", CPENS(0,C8,C7,0), 0 },
3583 { "vae1", CPENS (0, C8, C7, 1), F_HASXT },
3584 { "aside1", CPENS (0, C8, C7, 2), F_HASXT },
3585 { "vaae1", CPENS (0, C8, C7, 3), F_HASXT },
3586 { "vmalle1is", CPENS(0,C8,C3,0), 0 },
3587 { "vae1is", CPENS (0, C8, C3, 1), F_HASXT },
3588 { "aside1is", CPENS (0, C8, C3, 2), F_HASXT },
3589 { "vaae1is", CPENS (0, C8, C3, 3), F_HASXT },
3590 { "ipas2e1is", CPENS (4, C8, C0, 1), F_HASXT },
3591 { "ipas2le1is",CPENS (4, C8, C0, 5), F_HASXT },
3592 { "ipas2e1", CPENS (4, C8, C4, 1), F_HASXT },
3593 { "ipas2le1", CPENS (4, C8, C4, 5), F_HASXT },
3594 { "vae2", CPENS (4, C8, C7, 1), F_HASXT },
3595 { "vae2is", CPENS (4, C8, C3, 1), F_HASXT },
3596 { "vmalls12e1",CPENS(4,C8,C7,6), 0 },
3597 { "vmalls12e1is",CPENS(4,C8,C3,6), 0 },
3598 { "vae3", CPENS (6, C8, C7, 1), F_HASXT },
3599 { "vae3is", CPENS (6, C8, C3, 1), F_HASXT },
3600 { "alle2", CPENS(4,C8,C7,0), 0 },
3601 { "alle2is", CPENS(4,C8,C3,0), 0 },
3602 { "alle1", CPENS(4,C8,C7,4), 0 },
3603 { "alle1is", CPENS(4,C8,C3,4), 0 },
3604 { "alle3", CPENS(6,C8,C7,0), 0 },
3605 { "alle3is", CPENS(6,C8,C3,0), 0 },
3606 { "vale1is", CPENS (0, C8, C3, 5), F_HASXT },
3607 { "vale2is", CPENS (4, C8, C3, 5), F_HASXT },
3608 { "vale3is", CPENS (6, C8, C3, 5), F_HASXT },
3609 { "vaale1is", CPENS (0, C8, C3, 7), F_HASXT },
3610 { "vale1", CPENS (0, C8, C7, 5), F_HASXT },
3611 { "vale2", CPENS (4, C8, C7, 5), F_HASXT },
3612 { "vale3", CPENS (6, C8, C7, 5), F_HASXT },
3613 { "vaale1", CPENS (0, C8, C7, 7), F_HASXT },
3614 { 0, CPENS(0,0,0,0), 0 }
3618 aarch64_sys_ins_reg_has_xt (const aarch64_sys_ins_reg *sys_ins_reg)
3620 return (sys_ins_reg->flags & F_HASXT) != 0;
3624 aarch64_sys_ins_reg_supported_p (const aarch64_feature_set features,
3625 const aarch64_sys_ins_reg *reg)
3627 if (!(reg->flags & F_ARCHEXT))
3630 /* DC CVAP. Values are from aarch64_sys_regs_dc. */
3631 if (reg->value == CPENS (3, C7, C12, 1)
3632 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_2))
3635 /* AT S1E1RP, AT S1E1WP. Values are from aarch64_sys_regs_at. */
3636 if ((reg->value == CPENS (0, C7, C9, 0)
3637 || reg->value == CPENS (0, C7, C9, 1))
3638 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_2))
3661 #define BIT(INSN,BT) (((INSN) >> (BT)) & 1)
3662 #define BITS(INSN,HI,LO) (((INSN) >> (LO)) & ((1 << (((HI) - (LO)) + 1)) - 1))
3665 verify_ldpsw (const struct aarch64_opcode * opcode ATTRIBUTE_UNUSED,
3666 const aarch64_insn insn)
3668 int t = BITS (insn, 4, 0);
3669 int n = BITS (insn, 9, 5);
3670 int t2 = BITS (insn, 14, 10);
3674 /* Write back enabled. */
3675 if ((t == n || t2 == n) && n != 31)
3689 /* Include the opcode description table as well as the operand description
3691 #define VERIFIER(x) verify_##x
3692 #include "aarch64-tbl.h"