1 /* aarch64-opc.c -- AArch64 opcode support.
2 Copyright 2009, 2010, 2011, 2012, 2013 Free Software Foundation, Inc.
3 Contributed by ARM Ltd.
5 This file is part of the GNU opcodes library.
7 This library is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
12 It is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program; see the file COPYING3. If not,
19 see <http://www.gnu.org/licenses/>. */
31 #include "aarch64-opc.h"
34 int debug_dump = FALSE;
35 #endif /* DEBUG_AARCH64 */
37 /* Helper functions to determine which operand to be used to encode/decode
38 the size:Q fields for AdvSIMD instructions. */
40 static inline bfd_boolean
41 vector_qualifier_p (enum aarch64_opnd_qualifier qualifier)
43 return ((qualifier >= AARCH64_OPND_QLF_V_8B
44 && qualifier <= AARCH64_OPND_QLF_V_1Q) ? TRUE
48 static inline bfd_boolean
49 fp_qualifier_p (enum aarch64_opnd_qualifier qualifier)
51 return ((qualifier >= AARCH64_OPND_QLF_S_B
52 && qualifier <= AARCH64_OPND_QLF_S_Q) ? TRUE
62 DP_VECTOR_ACROSS_LANES,
65 static const char significant_operand_index [] =
67 0, /* DP_UNKNOWN, by default using operand 0. */
68 0, /* DP_VECTOR_3SAME */
69 1, /* DP_VECTOR_LONG */
70 2, /* DP_VECTOR_WIDE */
71 1, /* DP_VECTOR_ACROSS_LANES */
74 /* Given a sequence of qualifiers in QUALIFIERS, determine and return
76 N.B. QUALIFIERS is a possible sequence of qualifiers each of which
77 corresponds to one of a sequence of operands. */
79 static enum data_pattern
80 get_data_pattern (const aarch64_opnd_qualifier_seq_t qualifiers)
82 if (vector_qualifier_p (qualifiers[0]) == TRUE)
84 /* e.g. v.4s, v.4s, v.4s
85 or v.4h, v.4h, v.h[3]. */
86 if (qualifiers[0] == qualifiers[1]
87 && vector_qualifier_p (qualifiers[2]) == TRUE
88 && (aarch64_get_qualifier_esize (qualifiers[0])
89 == aarch64_get_qualifier_esize (qualifiers[1]))
90 && (aarch64_get_qualifier_esize (qualifiers[0])
91 == aarch64_get_qualifier_esize (qualifiers[2])))
92 return DP_VECTOR_3SAME;
93 /* e.g. v.8h, v.8b, v.8b.
94 or v.4s, v.4h, v.h[2].
96 if (vector_qualifier_p (qualifiers[1]) == TRUE
97 && aarch64_get_qualifier_esize (qualifiers[0]) != 0
98 && (aarch64_get_qualifier_esize (qualifiers[0])
99 == aarch64_get_qualifier_esize (qualifiers[1]) << 1))
100 return DP_VECTOR_LONG;
101 /* e.g. v.8h, v.8h, v.8b. */
102 if (qualifiers[0] == qualifiers[1]
103 && vector_qualifier_p (qualifiers[2]) == TRUE
104 && aarch64_get_qualifier_esize (qualifiers[0]) != 0
105 && (aarch64_get_qualifier_esize (qualifiers[0])
106 == aarch64_get_qualifier_esize (qualifiers[2]) << 1)
107 && (aarch64_get_qualifier_esize (qualifiers[0])
108 == aarch64_get_qualifier_esize (qualifiers[1])))
109 return DP_VECTOR_WIDE;
111 else if (fp_qualifier_p (qualifiers[0]) == TRUE)
113 /* e.g. SADDLV <V><d>, <Vn>.<T>. */
114 if (vector_qualifier_p (qualifiers[1]) == TRUE
115 && qualifiers[2] == AARCH64_OPND_QLF_NIL)
116 return DP_VECTOR_ACROSS_LANES;
122 /* Select the operand to do the encoding/decoding of the 'size:Q' fields in
123 the AdvSIMD instructions. */
124 /* N.B. it is possible to do some optimization that doesn't call
125 get_data_pattern each time when we need to select an operand. We can
126 either buffer the caculated the result or statically generate the data,
127 however, it is not obvious that the optimization will bring significant
131 aarch64_select_operand_for_sizeq_field_coding (const aarch64_opcode *opcode)
134 significant_operand_index [get_data_pattern (opcode->qualifiers_list[0])];
137 const aarch64_field fields[] =
140 { 0, 4 }, /* cond2: condition in truly conditional-executed inst. */
141 { 0, 4 }, /* nzcv: flag bit specifier, encoded in the "nzcv" field. */
142 { 5, 5 }, /* defgh: d:e:f:g:h bits in AdvSIMD modified immediate. */
143 { 16, 3 }, /* abc: a:b:c bits in AdvSIMD modified immediate. */
144 { 5, 19 }, /* imm19: e.g. in CBZ. */
145 { 5, 19 }, /* immhi: e.g. in ADRP. */
146 { 29, 2 }, /* immlo: e.g. in ADRP. */
147 { 22, 2 }, /* size: in most AdvSIMD and floating-point instructions. */
148 { 10, 2 }, /* vldst_size: size field in the AdvSIMD load/store inst. */
149 { 29, 1 }, /* op: in AdvSIMD modified immediate instructions. */
150 { 30, 1 }, /* Q: in most AdvSIMD instructions. */
151 { 0, 5 }, /* Rt: in load/store instructions. */
152 { 0, 5 }, /* Rd: in many integer instructions. */
153 { 5, 5 }, /* Rn: in many integer instructions. */
154 { 10, 5 }, /* Rt2: in load/store pair instructions. */
155 { 10, 5 }, /* Ra: in fp instructions. */
156 { 5, 3 }, /* op2: in the system instructions. */
157 { 8, 4 }, /* CRm: in the system instructions. */
158 { 12, 4 }, /* CRn: in the system instructions. */
159 { 16, 3 }, /* op1: in the system instructions. */
160 { 19, 2 }, /* op0: in the system instructions. */
161 { 10, 3 }, /* imm3: in add/sub extended reg instructions. */
162 { 12, 4 }, /* cond: condition flags as a source operand. */
163 { 12, 4 }, /* opcode: in advsimd load/store instructions. */
164 { 12, 4 }, /* cmode: in advsimd modified immediate instructions. */
165 { 13, 3 }, /* asisdlso_opcode: opcode in advsimd ld/st single element. */
166 { 13, 2 }, /* len: in advsimd tbl/tbx instructions. */
167 { 16, 5 }, /* Rm: in ld/st reg offset and some integer inst. */
168 { 16, 5 }, /* Rs: in load/store exclusive instructions. */
169 { 13, 3 }, /* option: in ld/st reg offset + add/sub extended reg inst. */
170 { 12, 1 }, /* S: in load/store reg offset instructions. */
171 { 21, 2 }, /* hw: in move wide constant instructions. */
172 { 22, 2 }, /* opc: in load/store reg offset instructions. */
173 { 23, 1 }, /* opc1: in load/store reg offset instructions. */
174 { 22, 2 }, /* shift: in add/sub reg/imm shifted instructions. */
175 { 22, 2 }, /* type: floating point type field in fp data inst. */
176 { 30, 2 }, /* ldst_size: size field in ld/st reg offset inst. */
177 { 10, 6 }, /* imm6: in add/sub reg shifted instructions. */
178 { 11, 4 }, /* imm4: in advsimd ext and advsimd ins instructions. */
179 { 16, 5 }, /* imm5: in conditional compare (immediate) instructions. */
180 { 15, 7 }, /* imm7: in load/store pair pre/post index instructions. */
181 { 13, 8 }, /* imm8: in floating-point scalar move immediate inst. */
182 { 12, 9 }, /* imm9: in load/store pre/post index instructions. */
183 { 10, 12 }, /* imm12: in ld/st unsigned imm or add/sub shifted inst. */
184 { 5, 14 }, /* imm14: in test bit and branch instructions. */
185 { 5, 16 }, /* imm16: in exception instructions. */
186 { 0, 26 }, /* imm26: in unconditional branch instructions. */
187 { 10, 6 }, /* imms: in bitfield and logical immediate instructions. */
188 { 16, 6 }, /* immr: in bitfield and logical immediate instructions. */
189 { 16, 3 }, /* immb: in advsimd shift by immediate instructions. */
190 { 19, 4 }, /* immh: in advsimd shift by immediate instructions. */
191 { 22, 1 }, /* N: in logical (immediate) instructions. */
192 { 11, 1 }, /* index: in ld/st inst deciding the pre/post-index. */
193 { 24, 1 }, /* index2: in ld/st pair inst deciding the pre/post-index. */
194 { 31, 1 }, /* sf: in integer data processing instructions. */
195 { 11, 1 }, /* H: in advsimd scalar x indexed element instructions. */
196 { 21, 1 }, /* L: in advsimd scalar x indexed element instructions. */
197 { 20, 1 }, /* M: in advsimd scalar x indexed element instructions. */
198 { 31, 1 }, /* b5: in the test bit and branch instructions. */
199 { 19, 5 }, /* b40: in the test bit and branch instructions. */
200 { 10, 6 }, /* scale: in the fixed-point scalar to fp converting inst. */
203 enum aarch64_operand_class
204 aarch64_get_operand_class (enum aarch64_opnd type)
206 return aarch64_operands[type].op_class;
210 aarch64_get_operand_name (enum aarch64_opnd type)
212 return aarch64_operands[type].name;
215 /* Get operand description string.
216 This is usually for the diagnosis purpose. */
218 aarch64_get_operand_desc (enum aarch64_opnd type)
220 return aarch64_operands[type].desc;
223 /* Table of all conditional affixes. */
224 const aarch64_cond aarch64_conds[16] =
229 {{"cc", "lo", "ul"}, 0x3},
245 get_cond_from_value (aarch64_insn value)
248 return &aarch64_conds[(unsigned int) value];
252 get_inverted_cond (const aarch64_cond *cond)
254 return &aarch64_conds[cond->value ^ 0x1];
257 /* Table describing the operand extension/shifting operators; indexed by
258 enum aarch64_modifier_kind.
260 The value column provides the most common values for encoding modifiers,
261 which enables table-driven encoding/decoding for the modifiers. */
262 const struct aarch64_name_value_pair aarch64_operand_modifiers [] =
281 enum aarch64_modifier_kind
282 aarch64_get_operand_modifier (const struct aarch64_name_value_pair *desc)
284 return desc - aarch64_operand_modifiers;
288 aarch64_get_operand_modifier_value (enum aarch64_modifier_kind kind)
290 return aarch64_operand_modifiers[kind].value;
293 enum aarch64_modifier_kind
294 aarch64_get_operand_modifier_from_value (aarch64_insn value,
295 bfd_boolean extend_p)
297 if (extend_p == TRUE)
298 return AARCH64_MOD_UXTB + value;
300 return AARCH64_MOD_LSL - value;
304 aarch64_extend_operator_p (enum aarch64_modifier_kind kind)
306 return (kind > AARCH64_MOD_LSL && kind <= AARCH64_MOD_SXTX)
310 static inline bfd_boolean
311 aarch64_shift_operator_p (enum aarch64_modifier_kind kind)
313 return (kind >= AARCH64_MOD_ROR && kind <= AARCH64_MOD_LSL)
317 const struct aarch64_name_value_pair aarch64_barrier_options[16] =
337 /* op -> op: load = 0 instruction = 1 store = 2
339 t -> temporal: temporal (retained) = 0 non-temporal (streaming) = 1 */
340 #define B(op,l,t) (((op) << 3) | (((l) - 1) << 1) | (t))
341 const struct aarch64_name_value_pair aarch64_prfops[32] =
343 { "pldl1keep", B(0, 1, 0) },
344 { "pldl1strm", B(0, 1, 1) },
345 { "pldl2keep", B(0, 2, 0) },
346 { "pldl2strm", B(0, 2, 1) },
347 { "pldl3keep", B(0, 3, 0) },
348 { "pldl3strm", B(0, 3, 1) },
351 { "plil1keep", B(1, 1, 0) },
352 { "plil1strm", B(1, 1, 1) },
353 { "plil2keep", B(1, 2, 0) },
354 { "plil2strm", B(1, 2, 1) },
355 { "plil3keep", B(1, 3, 0) },
356 { "plil3strm", B(1, 3, 1) },
359 { "pstl1keep", B(2, 1, 0) },
360 { "pstl1strm", B(2, 1, 1) },
361 { "pstl2keep", B(2, 2, 0) },
362 { "pstl2strm", B(2, 2, 1) },
363 { "pstl3keep", B(2, 3, 0) },
364 { "pstl3strm", B(2, 3, 1) },
378 /* Utilities on value constraint. */
381 value_in_range_p (int64_t value, int low, int high)
383 return (value >= low && value <= high) ? 1 : 0;
387 value_aligned_p (int64_t value, int align)
389 return ((value & (align - 1)) == 0) ? 1 : 0;
392 /* A signed value fits in a field. */
394 value_fit_signed_field_p (int64_t value, unsigned width)
397 if (width < sizeof (value) * 8)
399 int64_t lim = (int64_t)1 << (width - 1);
400 if (value >= -lim && value < lim)
406 /* An unsigned value fits in a field. */
408 value_fit_unsigned_field_p (int64_t value, unsigned width)
411 if (width < sizeof (value) * 8)
413 int64_t lim = (int64_t)1 << width;
414 if (value >= 0 && value < lim)
420 /* Return 1 if OPERAND is SP or WSP. */
422 aarch64_stack_pointer_p (const aarch64_opnd_info *operand)
424 return ((aarch64_get_operand_class (operand->type)
425 == AARCH64_OPND_CLASS_INT_REG)
426 && operand_maybe_stack_pointer (aarch64_operands + operand->type)
427 && operand->reg.regno == 31);
430 /* Return 1 if OPERAND is XZR or WZP. */
432 aarch64_zero_register_p (const aarch64_opnd_info *operand)
434 return ((aarch64_get_operand_class (operand->type)
435 == AARCH64_OPND_CLASS_INT_REG)
436 && !operand_maybe_stack_pointer (aarch64_operands + operand->type)
437 && operand->reg.regno == 31);
440 /* Return true if the operand *OPERAND that has the operand code
441 OPERAND->TYPE and been qualified by OPERAND->QUALIFIER can be also
442 qualified by the qualifier TARGET. */
445 operand_also_qualified_p (const struct aarch64_opnd_info *operand,
446 aarch64_opnd_qualifier_t target)
448 switch (operand->qualifier)
450 case AARCH64_OPND_QLF_W:
451 if (target == AARCH64_OPND_QLF_WSP && aarch64_stack_pointer_p (operand))
454 case AARCH64_OPND_QLF_X:
455 if (target == AARCH64_OPND_QLF_SP && aarch64_stack_pointer_p (operand))
458 case AARCH64_OPND_QLF_WSP:
459 if (target == AARCH64_OPND_QLF_W
460 && operand_maybe_stack_pointer (aarch64_operands + operand->type))
463 case AARCH64_OPND_QLF_SP:
464 if (target == AARCH64_OPND_QLF_X
465 && operand_maybe_stack_pointer (aarch64_operands + operand->type))
475 /* Given qualifier sequence list QSEQ_LIST and the known qualifier KNOWN_QLF
476 for operand KNOWN_IDX, return the expected qualifier for operand IDX.
478 Return NIL if more than one expected qualifiers are found. */
480 aarch64_opnd_qualifier_t
481 aarch64_get_expected_qualifier (const aarch64_opnd_qualifier_seq_t *qseq_list,
483 const aarch64_opnd_qualifier_t known_qlf,
490 When the known qualifier is NIL, we have to assume that there is only
491 one qualifier sequence in the *QSEQ_LIST and return the corresponding
492 qualifier directly. One scenario is that for instruction
493 PRFM <prfop>, [<Xn|SP>, #:lo12:<symbol>]
494 which has only one possible valid qualifier sequence
496 the caller may pass NIL in KNOWN_QLF to obtain S_D so that it can
497 determine the correct relocation type (i.e. LDST64_LO12) for PRFM.
499 Because the qualifier NIL has dual roles in the qualifier sequence:
500 it can mean no qualifier for the operand, or the qualifer sequence is
501 not in use (when all qualifiers in the sequence are NILs), we have to
502 handle this special case here. */
503 if (known_qlf == AARCH64_OPND_NIL)
505 assert (qseq_list[0][known_idx] == AARCH64_OPND_NIL);
506 return qseq_list[0][idx];
509 for (i = 0, saved_i = -1; i < AARCH64_MAX_QLF_SEQ_NUM; ++i)
511 if (qseq_list[i][known_idx] == known_qlf)
514 /* More than one sequences are found to have KNOWN_QLF at
516 return AARCH64_OPND_NIL;
521 return qseq_list[saved_i][idx];
524 enum operand_qualifier_kind
532 /* Operand qualifier description. */
533 struct operand_qualifier_data
535 /* The usage of the three data fields depends on the qualifier kind. */
542 enum operand_qualifier_kind kind;
545 /* Indexed by the operand qualifier enumerators. */
546 struct operand_qualifier_data aarch64_opnd_qualifiers[] =
548 {0, 0, 0, "NIL", OQK_NIL},
550 /* Operand variant qualifiers.
552 element size, number of elements and common value for encoding. */
554 {4, 1, 0x0, "w", OQK_OPD_VARIANT},
555 {8, 1, 0x1, "x", OQK_OPD_VARIANT},
556 {4, 1, 0x0, "wsp", OQK_OPD_VARIANT},
557 {8, 1, 0x1, "sp", OQK_OPD_VARIANT},
559 {1, 1, 0x0, "b", OQK_OPD_VARIANT},
560 {2, 1, 0x1, "h", OQK_OPD_VARIANT},
561 {4, 1, 0x2, "s", OQK_OPD_VARIANT},
562 {8, 1, 0x3, "d", OQK_OPD_VARIANT},
563 {16, 1, 0x4, "q", OQK_OPD_VARIANT},
565 {1, 8, 0x0, "8b", OQK_OPD_VARIANT},
566 {1, 16, 0x1, "16b", OQK_OPD_VARIANT},
567 {2, 4, 0x2, "4h", OQK_OPD_VARIANT},
568 {2, 8, 0x3, "8h", OQK_OPD_VARIANT},
569 {4, 2, 0x4, "2s", OQK_OPD_VARIANT},
570 {4, 4, 0x5, "4s", OQK_OPD_VARIANT},
571 {8, 1, 0x6, "1d", OQK_OPD_VARIANT},
572 {8, 2, 0x7, "2d", OQK_OPD_VARIANT},
573 {16, 1, 0x8, "1q", OQK_OPD_VARIANT},
575 /* Qualifiers constraining the value range.
577 Lower bound, higher bound, unused. */
579 {0, 7, 0, "imm_0_7" , OQK_VALUE_IN_RANGE},
580 {0, 15, 0, "imm_0_15", OQK_VALUE_IN_RANGE},
581 {0, 31, 0, "imm_0_31", OQK_VALUE_IN_RANGE},
582 {0, 63, 0, "imm_0_63", OQK_VALUE_IN_RANGE},
583 {1, 32, 0, "imm_1_32", OQK_VALUE_IN_RANGE},
584 {1, 64, 0, "imm_1_64", OQK_VALUE_IN_RANGE},
586 /* Qualifiers for miscellaneous purpose.
588 unused, unused and unused. */
593 {0, 0, 0, "retrieving", 0},
596 static inline bfd_boolean
597 operand_variant_qualifier_p (aarch64_opnd_qualifier_t qualifier)
599 return (aarch64_opnd_qualifiers[qualifier].kind == OQK_OPD_VARIANT)
603 static inline bfd_boolean
604 qualifier_value_in_range_constraint_p (aarch64_opnd_qualifier_t qualifier)
606 return (aarch64_opnd_qualifiers[qualifier].kind == OQK_VALUE_IN_RANGE)
611 aarch64_get_qualifier_name (aarch64_opnd_qualifier_t qualifier)
613 return aarch64_opnd_qualifiers[qualifier].desc;
616 /* Given an operand qualifier, return the expected data element size
617 of a qualified operand. */
619 aarch64_get_qualifier_esize (aarch64_opnd_qualifier_t qualifier)
621 assert (operand_variant_qualifier_p (qualifier) == TRUE);
622 return aarch64_opnd_qualifiers[qualifier].data0;
626 aarch64_get_qualifier_nelem (aarch64_opnd_qualifier_t qualifier)
628 assert (operand_variant_qualifier_p (qualifier) == TRUE);
629 return aarch64_opnd_qualifiers[qualifier].data1;
633 aarch64_get_qualifier_standard_value (aarch64_opnd_qualifier_t qualifier)
635 assert (operand_variant_qualifier_p (qualifier) == TRUE);
636 return aarch64_opnd_qualifiers[qualifier].data2;
640 get_lower_bound (aarch64_opnd_qualifier_t qualifier)
642 assert (qualifier_value_in_range_constraint_p (qualifier) == TRUE);
643 return aarch64_opnd_qualifiers[qualifier].data0;
647 get_upper_bound (aarch64_opnd_qualifier_t qualifier)
649 assert (qualifier_value_in_range_constraint_p (qualifier) == TRUE);
650 return aarch64_opnd_qualifiers[qualifier].data1;
655 aarch64_verbose (const char *str, ...)
666 dump_qualifier_sequence (const aarch64_opnd_qualifier_t *qualifier)
670 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i, ++qualifier)
671 printf ("%s,", aarch64_get_qualifier_name (*qualifier));
676 dump_match_qualifiers (const struct aarch64_opnd_info *opnd,
677 const aarch64_opnd_qualifier_t *qualifier)
680 aarch64_opnd_qualifier_t curr[AARCH64_MAX_OPND_NUM];
682 aarch64_verbose ("dump_match_qualifiers:");
683 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
684 curr[i] = opnd[i].qualifier;
685 dump_qualifier_sequence (curr);
686 aarch64_verbose ("against");
687 dump_qualifier_sequence (qualifier);
689 #endif /* DEBUG_AARCH64 */
691 /* TODO improve this, we can have an extra field at the runtime to
692 store the number of operands rather than calculating it every time. */
695 aarch64_num_of_operands (const aarch64_opcode *opcode)
698 const enum aarch64_opnd *opnds = opcode->operands;
699 while (opnds[i++] != AARCH64_OPND_NIL)
702 assert (i >= 0 && i <= AARCH64_MAX_OPND_NUM);
706 /* Find the best matched qualifier sequence in *QUALIFIERS_LIST for INST.
707 If succeeds, fill the found sequence in *RET, return 1; otherwise return 0.
709 N.B. on the entry, it is very likely that only some operands in *INST
710 have had their qualifiers been established.
712 If STOP_AT is not -1, the function will only try to match
713 the qualifier sequence for operands before and including the operand
714 of index STOP_AT; and on success *RET will only be filled with the first
715 (STOP_AT+1) qualifiers.
717 A couple examples of the matching algorithm:
725 Apart from serving the main encoding routine, this can also be called
726 during or after the operand decoding. */
729 aarch64_find_best_match (const aarch64_inst *inst,
730 const aarch64_opnd_qualifier_seq_t *qualifiers_list,
731 int stop_at, aarch64_opnd_qualifier_t *ret)
735 const aarch64_opnd_qualifier_t *qualifiers;
737 num_opnds = aarch64_num_of_operands (inst->opcode);
740 DEBUG_TRACE ("SUCCEED: no operand");
744 if (stop_at < 0 || stop_at >= num_opnds)
745 stop_at = num_opnds - 1;
747 /* For each pattern. */
748 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
751 qualifiers = *qualifiers_list;
753 /* Start as positive. */
756 DEBUG_TRACE ("%d", i);
759 dump_match_qualifiers (inst->operands, qualifiers);
762 /* Most opcodes has much fewer patterns in the list.
763 First NIL qualifier indicates the end in the list. */
764 if (empty_qualifier_sequence_p (qualifiers) == TRUE)
766 DEBUG_TRACE_IF (i == 0, "SUCCEED: empty qualifier list");
772 for (j = 0; j < num_opnds && j <= stop_at; ++j, ++qualifiers)
774 if (inst->operands[j].qualifier == AARCH64_OPND_QLF_NIL)
776 /* Either the operand does not have qualifier, or the qualifier
777 for the operand needs to be deduced from the qualifier
779 In the latter case, any constraint checking related with
780 the obtained qualifier should be done later in
781 operand_general_constraint_met_p. */
784 else if (*qualifiers != inst->operands[j].qualifier)
786 /* Unless the target qualifier can also qualify the operand
787 (which has already had a non-nil qualifier), non-equal
788 qualifiers are generally un-matched. */
789 if (operand_also_qualified_p (inst->operands + j, *qualifiers))
798 continue; /* Equal qualifiers are certainly matched. */
801 /* Qualifiers established. */
808 /* Fill the result in *RET. */
810 qualifiers = *qualifiers_list;
812 DEBUG_TRACE ("complete qualifiers using list %d", i);
815 dump_qualifier_sequence (qualifiers);
818 for (j = 0; j <= stop_at; ++j, ++qualifiers)
819 ret[j] = *qualifiers;
820 for (; j < AARCH64_MAX_OPND_NUM; ++j)
821 ret[j] = AARCH64_OPND_QLF_NIL;
823 DEBUG_TRACE ("SUCCESS");
827 DEBUG_TRACE ("FAIL");
831 /* Operand qualifier matching and resolving.
833 Return 1 if the operand qualifier(s) in *INST match one of the qualifier
834 sequences in INST->OPCODE->qualifiers_list; otherwise return 0.
836 if UPDATE_P == TRUE, update the qualifier(s) in *INST after the matching
840 match_operands_qualifier (aarch64_inst *inst, bfd_boolean update_p)
843 aarch64_opnd_qualifier_seq_t qualifiers;
845 if (!aarch64_find_best_match (inst, inst->opcode->qualifiers_list, -1,
848 DEBUG_TRACE ("matching FAIL");
852 /* Update the qualifiers. */
853 if (update_p == TRUE)
854 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
856 if (inst->opcode->operands[i] == AARCH64_OPND_NIL)
858 DEBUG_TRACE_IF (inst->operands[i].qualifier != qualifiers[i],
859 "update %s with %s for operand %d",
860 aarch64_get_qualifier_name (inst->operands[i].qualifier),
861 aarch64_get_qualifier_name (qualifiers[i]), i);
862 inst->operands[i].qualifier = qualifiers[i];
865 DEBUG_TRACE ("matching SUCCESS");
869 /* Return TRUE if VALUE is a wide constant that can be moved into a general
872 IS32 indicates whether value is a 32-bit immediate or not.
873 If SHIFT_AMOUNT is not NULL, on the return of TRUE, the logical left shift
874 amount will be returned in *SHIFT_AMOUNT. */
877 aarch64_wide_constant_p (int64_t value, int is32, unsigned int *shift_amount)
881 DEBUG_TRACE ("enter with 0x%" PRIx64 "(%" PRIi64 ")", value, value);
885 /* Allow all zeros or all ones in top 32-bits, so that
886 32-bit constant expressions like ~0x80000000 are
888 uint64_t ext = value;
889 if (ext >> 32 != 0 && ext >> 32 != (uint64_t) 0xffffffff)
890 /* Immediate out of range. */
892 value &= (int64_t) 0xffffffff;
895 /* first, try movz then movn */
897 if ((value & ((int64_t) 0xffff << 0)) == value)
899 else if ((value & ((int64_t) 0xffff << 16)) == value)
901 else if (!is32 && (value & ((int64_t) 0xffff << 32)) == value)
903 else if (!is32 && (value & ((int64_t) 0xffff << 48)) == value)
908 DEBUG_TRACE ("exit FALSE with 0x%" PRIx64 "(%" PRIi64 ")", value, value);
912 if (shift_amount != NULL)
913 *shift_amount = amount;
915 DEBUG_TRACE ("exit TRUE with amount %d", amount);
920 /* Build the accepted values for immediate logical SIMD instructions.
922 The standard encodings of the immediate value are:
923 N imms immr SIMD size R S
924 1 ssssss rrrrrr 64 UInt(rrrrrr) UInt(ssssss)
925 0 0sssss 0rrrrr 32 UInt(rrrrr) UInt(sssss)
926 0 10ssss 00rrrr 16 UInt(rrrr) UInt(ssss)
927 0 110sss 000rrr 8 UInt(rrr) UInt(sss)
928 0 1110ss 0000rr 4 UInt(rr) UInt(ss)
929 0 11110s 00000r 2 UInt(r) UInt(s)
930 where all-ones value of S is reserved.
932 Let's call E the SIMD size.
934 The immediate value is: S+1 bits '1' rotated to the right by R.
936 The total of valid encodings is 64*63 + 32*31 + ... + 2*1 = 5334
937 (remember S != E - 1). */
939 #define TOTAL_IMM_NB 5334
944 aarch64_insn encoding;
947 static simd_imm_encoding simd_immediates[TOTAL_IMM_NB];
950 simd_imm_encoding_cmp(const void *i1, const void *i2)
952 const simd_imm_encoding *imm1 = (const simd_imm_encoding *)i1;
953 const simd_imm_encoding *imm2 = (const simd_imm_encoding *)i2;
955 if (imm1->imm < imm2->imm)
957 if (imm1->imm > imm2->imm)
962 /* immediate bitfield standard encoding
963 imm13<12> imm13<5:0> imm13<11:6> SIMD size R S
964 1 ssssss rrrrrr 64 rrrrrr ssssss
965 0 0sssss 0rrrrr 32 rrrrr sssss
966 0 10ssss 00rrrr 16 rrrr ssss
967 0 110sss 000rrr 8 rrr sss
968 0 1110ss 0000rr 4 rr ss
969 0 11110s 00000r 2 r s */
971 encode_immediate_bitfield (int is64, uint32_t s, uint32_t r)
973 return (is64 << 12) | (r << 6) | s;
977 build_immediate_table (void)
979 uint32_t log_e, e, s, r, s_mask;
985 for (log_e = 1; log_e <= 6; log_e++)
987 /* Get element size. */
992 mask = 0xffffffffffffffffull;
998 mask = (1ull << e) - 1;
1000 1 ((1 << 4) - 1) << 2 = 111100
1001 2 ((1 << 3) - 1) << 3 = 111000
1002 3 ((1 << 2) - 1) << 4 = 110000
1003 4 ((1 << 1) - 1) << 5 = 100000
1004 5 ((1 << 0) - 1) << 6 = 000000 */
1005 s_mask = ((1u << (5 - log_e)) - 1) << (log_e + 1);
1007 for (s = 0; s < e - 1; s++)
1008 for (r = 0; r < e; r++)
1010 /* s+1 consecutive bits to 1 (s < 63) */
1011 imm = (1ull << (s + 1)) - 1;
1012 /* rotate right by r */
1014 imm = (imm >> r) | ((imm << (e - r)) & mask);
1015 /* replicate the constant depending on SIMD size */
1018 case 1: imm = (imm << 2) | imm;
1019 case 2: imm = (imm << 4) | imm;
1020 case 3: imm = (imm << 8) | imm;
1021 case 4: imm = (imm << 16) | imm;
1022 case 5: imm = (imm << 32) | imm;
1026 simd_immediates[nb_imms].imm = imm;
1027 simd_immediates[nb_imms].encoding =
1028 encode_immediate_bitfield(is64, s | s_mask, r);
1032 assert (nb_imms == TOTAL_IMM_NB);
1033 qsort(simd_immediates, nb_imms,
1034 sizeof(simd_immediates[0]), simd_imm_encoding_cmp);
1037 /* Return TRUE if VALUE is a valid logical immediate, i.e. bitmask, that can
1038 be accepted by logical (immediate) instructions
1039 e.g. ORR <Xd|SP>, <Xn>, #<imm>.
1041 IS32 indicates whether or not VALUE is a 32-bit immediate.
1042 If ENCODING is not NULL, on the return of TRUE, the standard encoding for
1043 VALUE will be returned in *ENCODING. */
1046 aarch64_logical_immediate_p (uint64_t value, int is32, aarch64_insn *encoding)
1048 simd_imm_encoding imm_enc;
1049 const simd_imm_encoding *imm_encoding;
1050 static bfd_boolean initialized = FALSE;
1052 DEBUG_TRACE ("enter with 0x%" PRIx64 "(%" PRIi64 "), is32: %d", value,
1055 if (initialized == FALSE)
1057 build_immediate_table ();
1063 /* Allow all zeros or all ones in top 32-bits, so that
1064 constant expressions like ~1 are permitted. */
1065 if (value >> 32 != 0 && value >> 32 != 0xffffffff)
1067 /* Replicate the 32 lower bits to the 32 upper bits. */
1068 value &= 0xffffffff;
1069 value |= value << 32;
1072 imm_enc.imm = value;
1073 imm_encoding = (const simd_imm_encoding *)
1074 bsearch(&imm_enc, simd_immediates, TOTAL_IMM_NB,
1075 sizeof(simd_immediates[0]), simd_imm_encoding_cmp);
1076 if (imm_encoding == NULL)
1078 DEBUG_TRACE ("exit with FALSE");
1081 if (encoding != NULL)
1082 *encoding = imm_encoding->encoding;
1083 DEBUG_TRACE ("exit with TRUE");
1087 /* If 64-bit immediate IMM is in the format of
1088 "aaaaaaaabbbbbbbbccccccccddddddddeeeeeeeeffffffffgggggggghhhhhhhh",
1089 where a, b, c, d, e, f, g and h are independently 0 or 1, return an integer
1090 of value "abcdefgh". Otherwise return -1. */
1092 aarch64_shrink_expanded_imm8 (uint64_t imm)
1098 for (i = 0; i < 8; i++)
1100 byte = (imm >> (8 * i)) & 0xff;
1103 else if (byte != 0x00)
1109 /* Utility inline functions for operand_general_constraint_met_p. */
1112 set_error (aarch64_operand_error *mismatch_detail,
1113 enum aarch64_operand_error_kind kind, int idx,
1116 if (mismatch_detail == NULL)
1118 mismatch_detail->kind = kind;
1119 mismatch_detail->index = idx;
1120 mismatch_detail->error = error;
1124 set_out_of_range_error (aarch64_operand_error *mismatch_detail,
1125 int idx, int lower_bound, int upper_bound,
1128 if (mismatch_detail == NULL)
1130 set_error (mismatch_detail, AARCH64_OPDE_OUT_OF_RANGE, idx, error);
1131 mismatch_detail->data[0] = lower_bound;
1132 mismatch_detail->data[1] = upper_bound;
1136 set_imm_out_of_range_error (aarch64_operand_error *mismatch_detail,
1137 int idx, int lower_bound, int upper_bound)
1139 if (mismatch_detail == NULL)
1141 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1142 _("immediate value"));
1146 set_offset_out_of_range_error (aarch64_operand_error *mismatch_detail,
1147 int idx, int lower_bound, int upper_bound)
1149 if (mismatch_detail == NULL)
1151 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1152 _("immediate offset"));
1156 set_regno_out_of_range_error (aarch64_operand_error *mismatch_detail,
1157 int idx, int lower_bound, int upper_bound)
1159 if (mismatch_detail == NULL)
1161 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1162 _("register number"));
1166 set_elem_idx_out_of_range_error (aarch64_operand_error *mismatch_detail,
1167 int idx, int lower_bound, int upper_bound)
1169 if (mismatch_detail == NULL)
1171 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1172 _("register element index"));
1176 set_sft_amount_out_of_range_error (aarch64_operand_error *mismatch_detail,
1177 int idx, int lower_bound, int upper_bound)
1179 if (mismatch_detail == NULL)
1181 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1186 set_unaligned_error (aarch64_operand_error *mismatch_detail, int idx,
1189 if (mismatch_detail == NULL)
1191 set_error (mismatch_detail, AARCH64_OPDE_UNALIGNED, idx, NULL);
1192 mismatch_detail->data[0] = alignment;
1196 set_reg_list_error (aarch64_operand_error *mismatch_detail, int idx,
1199 if (mismatch_detail == NULL)
1201 set_error (mismatch_detail, AARCH64_OPDE_REG_LIST, idx, NULL);
1202 mismatch_detail->data[0] = expected_num;
1206 set_other_error (aarch64_operand_error *mismatch_detail, int idx,
1209 if (mismatch_detail == NULL)
1211 set_error (mismatch_detail, AARCH64_OPDE_OTHER_ERROR, idx, error);
1214 /* General constraint checking based on operand code.
1216 Return 1 if OPNDS[IDX] meets the general constraint of operand code TYPE
1217 as the IDXth operand of opcode OPCODE. Otherwise return 0.
1219 This function has to be called after the qualifiers for all operands
1222 Mismatching error message is returned in *MISMATCH_DETAIL upon request,
1223 i.e. when MISMATCH_DETAIL is non-NULL. This avoids the generation
1224 of error message during the disassembling where error message is not
1225 wanted. We avoid the dynamic construction of strings of error messages
1226 here (i.e. in libopcodes), as it is costly and complicated; instead, we
1227 use a combination of error code, static string and some integer data to
1228 represent an error. */
1231 operand_general_constraint_met_p (const aarch64_opnd_info *opnds, int idx,
1232 enum aarch64_opnd type,
1233 const aarch64_opcode *opcode,
1234 aarch64_operand_error *mismatch_detail)
1239 const aarch64_opnd_info *opnd = opnds + idx;
1240 aarch64_opnd_qualifier_t qualifier = opnd->qualifier;
1242 assert (opcode->operands[idx] == opnd->type && opnd->type == type);
1244 switch (aarch64_operands[type].op_class)
1246 case AARCH64_OPND_CLASS_INT_REG:
1247 /* <Xt> may be optional in some IC and TLBI instructions. */
1248 if (type == AARCH64_OPND_Rt_SYS)
1250 assert (idx == 1 && (aarch64_get_operand_class (opnds[0].type)
1251 == AARCH64_OPND_CLASS_SYSTEM));
1252 if (opnds[1].present && !opnds[0].sysins_op->has_xt)
1254 set_other_error (mismatch_detail, idx, _("extraneous register"));
1257 if (!opnds[1].present && opnds[0].sysins_op->has_xt)
1259 set_other_error (mismatch_detail, idx, _("missing register"));
1265 case AARCH64_OPND_QLF_WSP:
1266 case AARCH64_OPND_QLF_SP:
1267 if (!aarch64_stack_pointer_p (opnd))
1269 set_other_error (mismatch_detail, idx,
1270 _("stack pointer register expected"));
1279 case AARCH64_OPND_CLASS_ADDRESS:
1280 /* Check writeback. */
1281 switch (opcode->iclass)
1285 case ldstnapair_offs:
1288 if (opnd->addr.writeback == 1)
1290 set_other_error (mismatch_detail, idx,
1291 _("unexpected address writeback"));
1296 case ldstpair_indexed:
1299 if (opnd->addr.writeback == 0)
1301 set_other_error (mismatch_detail, idx,
1302 _("address writeback expected"));
1307 assert (opnd->addr.writeback == 0);
1312 case AARCH64_OPND_ADDR_SIMM7:
1313 /* Scaled signed 7 bits immediate offset. */
1314 /* Get the size of the data element that is accessed, which may be
1315 different from that of the source register size,
1316 e.g. in strb/ldrb. */
1317 size = aarch64_get_qualifier_esize (opnd->qualifier);
1318 if (!value_in_range_p (opnd->addr.offset.imm, -64 * size, 63 * size))
1320 set_offset_out_of_range_error (mismatch_detail, idx,
1321 -64 * size, 63 * size);
1324 if (!value_aligned_p (opnd->addr.offset.imm, size))
1326 set_unaligned_error (mismatch_detail, idx, size);
1330 case AARCH64_OPND_ADDR_SIMM9:
1331 /* Unscaled signed 9 bits immediate offset. */
1332 if (!value_in_range_p (opnd->addr.offset.imm, -256, 255))
1334 set_offset_out_of_range_error (mismatch_detail, idx, -256, 255);
1339 case AARCH64_OPND_ADDR_SIMM9_2:
1340 /* Unscaled signed 9 bits immediate offset, which has to be negative
1342 size = aarch64_get_qualifier_esize (qualifier);
1343 if ((value_in_range_p (opnd->addr.offset.imm, 0, 255)
1344 && !value_aligned_p (opnd->addr.offset.imm, size))
1345 || value_in_range_p (opnd->addr.offset.imm, -256, -1))
1347 set_other_error (mismatch_detail, idx,
1348 _("negative or unaligned offset expected"));
1351 case AARCH64_OPND_SIMD_ADDR_POST:
1352 /* AdvSIMD load/store multiple structures, post-index. */
1354 if (opnd->addr.offset.is_reg)
1356 if (value_in_range_p (opnd->addr.offset.regno, 0, 30))
1360 set_other_error (mismatch_detail, idx,
1361 _("invalid register offset"));
1367 const aarch64_opnd_info *prev = &opnds[idx-1];
1368 unsigned num_bytes; /* total number of bytes transferred. */
1369 /* The opcode dependent area stores the number of elements in
1370 each structure to be loaded/stored. */
1371 int is_ld1r = get_opcode_dependent_value (opcode) == 1;
1372 if (opcode->operands[0] == AARCH64_OPND_LVt_AL)
1373 /* Special handling of loading single structure to all lane. */
1374 num_bytes = (is_ld1r ? 1 : prev->reglist.num_regs)
1375 * aarch64_get_qualifier_esize (prev->qualifier);
1377 num_bytes = prev->reglist.num_regs
1378 * aarch64_get_qualifier_esize (prev->qualifier)
1379 * aarch64_get_qualifier_nelem (prev->qualifier);
1380 if ((int) num_bytes != opnd->addr.offset.imm)
1382 set_other_error (mismatch_detail, idx,
1383 _("invalid post-increment amount"));
1389 case AARCH64_OPND_ADDR_REGOFF:
1390 /* Get the size of the data element that is accessed, which may be
1391 different from that of the source register size,
1392 e.g. in strb/ldrb. */
1393 size = aarch64_get_qualifier_esize (opnd->qualifier);
1394 /* It is either no shift or shift by the binary logarithm of SIZE. */
1395 if (opnd->shifter.amount != 0
1396 && opnd->shifter.amount != (int)get_logsz (size))
1398 set_other_error (mismatch_detail, idx,
1399 _("invalid shift amount"));
1402 /* Only UXTW, LSL, SXTW and SXTX are the accepted extending
1404 switch (opnd->shifter.kind)
1406 case AARCH64_MOD_UXTW:
1407 case AARCH64_MOD_LSL:
1408 case AARCH64_MOD_SXTW:
1409 case AARCH64_MOD_SXTX: break;
1411 set_other_error (mismatch_detail, idx,
1412 _("invalid extend/shift operator"));
1417 case AARCH64_OPND_ADDR_UIMM12:
1418 imm = opnd->addr.offset.imm;
1419 /* Get the size of the data element that is accessed, which may be
1420 different from that of the source register size,
1421 e.g. in strb/ldrb. */
1422 size = aarch64_get_qualifier_esize (qualifier);
1423 if (!value_in_range_p (opnd->addr.offset.imm, 0, 4095 * size))
1425 set_offset_out_of_range_error (mismatch_detail, idx,
1429 if (!value_aligned_p (opnd->addr.offset.imm, size))
1431 set_unaligned_error (mismatch_detail, idx, size);
1436 case AARCH64_OPND_ADDR_PCREL14:
1437 case AARCH64_OPND_ADDR_PCREL19:
1438 case AARCH64_OPND_ADDR_PCREL21:
1439 case AARCH64_OPND_ADDR_PCREL26:
1440 imm = opnd->imm.value;
1441 if (operand_need_shift_by_two (get_operand_from_code (type)))
1443 /* The offset value in a PC-relative branch instruction is alway
1444 4-byte aligned and is encoded without the lowest 2 bits. */
1445 if (!value_aligned_p (imm, 4))
1447 set_unaligned_error (mismatch_detail, idx, 4);
1450 /* Right shift by 2 so that we can carry out the following check
1454 size = get_operand_fields_width (get_operand_from_code (type));
1455 if (!value_fit_signed_field_p (imm, size))
1457 set_other_error (mismatch_detail, idx,
1458 _("immediate out of range"));
1468 case AARCH64_OPND_CLASS_SIMD_REGLIST:
1469 /* The opcode dependent area stores the number of elements in
1470 each structure to be loaded/stored. */
1471 num = get_opcode_dependent_value (opcode);
1474 case AARCH64_OPND_LVt:
1475 assert (num >= 1 && num <= 4);
1476 /* Unless LD1/ST1, the number of registers should be equal to that
1477 of the structure elements. */
1478 if (num != 1 && opnd->reglist.num_regs != num)
1480 set_reg_list_error (mismatch_detail, idx, num);
1484 case AARCH64_OPND_LVt_AL:
1485 case AARCH64_OPND_LEt:
1486 assert (num >= 1 && num <= 4);
1487 /* The number of registers should be equal to that of the structure
1489 if (opnd->reglist.num_regs != num)
1491 set_reg_list_error (mismatch_detail, idx, num);
1500 case AARCH64_OPND_CLASS_IMMEDIATE:
1501 /* Constraint check on immediate operand. */
1502 imm = opnd->imm.value;
1503 /* E.g. imm_0_31 constrains value to be 0..31. */
1504 if (qualifier_value_in_range_constraint_p (qualifier)
1505 && !value_in_range_p (imm, get_lower_bound (qualifier),
1506 get_upper_bound (qualifier)))
1508 set_imm_out_of_range_error (mismatch_detail, idx,
1509 get_lower_bound (qualifier),
1510 get_upper_bound (qualifier));
1516 case AARCH64_OPND_AIMM:
1517 if (opnd->shifter.kind != AARCH64_MOD_LSL)
1519 set_other_error (mismatch_detail, idx,
1520 _("invalid shift operator"));
1523 if (opnd->shifter.amount != 0 && opnd->shifter.amount != 12)
1525 set_other_error (mismatch_detail, idx,
1526 _("shift amount expected to be 0 or 12"));
1529 if (!value_fit_unsigned_field_p (opnd->imm.value, 12))
1531 set_other_error (mismatch_detail, idx,
1532 _("immediate out of range"));
1537 case AARCH64_OPND_HALF:
1538 assert (idx == 1 && opnds[0].type == AARCH64_OPND_Rd);
1539 if (opnd->shifter.kind != AARCH64_MOD_LSL)
1541 set_other_error (mismatch_detail, idx,
1542 _("invalid shift operator"));
1545 size = aarch64_get_qualifier_esize (opnds[0].qualifier);
1546 if (!value_aligned_p (opnd->shifter.amount, 16))
1548 set_other_error (mismatch_detail, idx,
1549 _("shift amount should be a multiple of 16"));
1552 if (!value_in_range_p (opnd->shifter.amount, 0, size * 8 - 16))
1554 set_sft_amount_out_of_range_error (mismatch_detail, idx,
1558 if (opnd->imm.value < 0)
1560 set_other_error (mismatch_detail, idx,
1561 _("negative immediate value not allowed"));
1564 if (!value_fit_unsigned_field_p (opnd->imm.value, 16))
1566 set_other_error (mismatch_detail, idx,
1567 _("immediate out of range"));
1572 case AARCH64_OPND_IMM_MOV:
1574 int is32 = aarch64_get_qualifier_esize (opnds[0].qualifier) == 4;
1575 imm = opnd->imm.value;
1579 case OP_MOV_IMM_WIDEN:
1581 /* Fall through... */
1582 case OP_MOV_IMM_WIDE:
1583 if (!aarch64_wide_constant_p (imm, is32, NULL))
1585 set_other_error (mismatch_detail, idx,
1586 _("immediate out of range"));
1590 case OP_MOV_IMM_LOG:
1591 if (!aarch64_logical_immediate_p (imm, is32, NULL))
1593 set_other_error (mismatch_detail, idx,
1594 _("immediate out of range"));
1605 case AARCH64_OPND_NZCV:
1606 case AARCH64_OPND_CCMP_IMM:
1607 case AARCH64_OPND_EXCEPTION:
1608 case AARCH64_OPND_UIMM4:
1609 case AARCH64_OPND_UIMM7:
1610 case AARCH64_OPND_UIMM3_OP1:
1611 case AARCH64_OPND_UIMM3_OP2:
1612 size = get_operand_fields_width (get_operand_from_code (type));
1614 if (!value_fit_unsigned_field_p (opnd->imm.value, size))
1616 set_imm_out_of_range_error (mismatch_detail, idx, 0,
1622 case AARCH64_OPND_WIDTH:
1623 assert (idx == 3 && opnds[idx-1].type == AARCH64_OPND_IMM
1624 && opnds[0].type == AARCH64_OPND_Rd);
1625 size = get_upper_bound (qualifier);
1626 if (opnd->imm.value + opnds[idx-1].imm.value > size)
1627 /* lsb+width <= reg.size */
1629 set_imm_out_of_range_error (mismatch_detail, idx, 1,
1630 size - opnds[idx-1].imm.value);
1635 case AARCH64_OPND_LIMM:
1637 int is32 = opnds[0].qualifier == AARCH64_OPND_QLF_W;
1638 uint64_t uimm = opnd->imm.value;
1639 if (opcode->op == OP_BIC)
1641 if (aarch64_logical_immediate_p (uimm, is32, NULL) == FALSE)
1643 set_other_error (mismatch_detail, idx,
1644 _("immediate out of range"));
1650 case AARCH64_OPND_IMM0:
1651 case AARCH64_OPND_FPIMM0:
1652 if (opnd->imm.value != 0)
1654 set_other_error (mismatch_detail, idx,
1655 _("immediate zero expected"));
1660 case AARCH64_OPND_SHLL_IMM:
1662 size = 8 * aarch64_get_qualifier_esize (opnds[idx - 1].qualifier);
1663 if (opnd->imm.value != size)
1665 set_other_error (mismatch_detail, idx,
1666 _("invalid shift amount"));
1671 case AARCH64_OPND_IMM_VLSL:
1672 size = aarch64_get_qualifier_esize (qualifier);
1673 if (!value_in_range_p (opnd->imm.value, 0, size * 8 - 1))
1675 set_imm_out_of_range_error (mismatch_detail, idx, 0,
1681 case AARCH64_OPND_IMM_VLSR:
1682 size = aarch64_get_qualifier_esize (qualifier);
1683 if (!value_in_range_p (opnd->imm.value, 1, size * 8))
1685 set_imm_out_of_range_error (mismatch_detail, idx, 1, size * 8);
1690 case AARCH64_OPND_SIMD_IMM:
1691 case AARCH64_OPND_SIMD_IMM_SFT:
1692 /* Qualifier check. */
1695 case AARCH64_OPND_QLF_LSL:
1696 if (opnd->shifter.kind != AARCH64_MOD_LSL)
1698 set_other_error (mismatch_detail, idx,
1699 _("invalid shift operator"));
1703 case AARCH64_OPND_QLF_MSL:
1704 if (opnd->shifter.kind != AARCH64_MOD_MSL)
1706 set_other_error (mismatch_detail, idx,
1707 _("invalid shift operator"));
1711 case AARCH64_OPND_QLF_NIL:
1712 if (opnd->shifter.kind != AARCH64_MOD_NONE)
1714 set_other_error (mismatch_detail, idx,
1715 _("shift is not permitted"));
1723 /* Is the immediate valid? */
1725 if (aarch64_get_qualifier_esize (opnds[0].qualifier) != 8)
1727 /* uimm8 or simm8 */
1728 if (!value_in_range_p (opnd->imm.value, -128, 255))
1730 set_imm_out_of_range_error (mismatch_detail, idx, -128, 255);
1734 else if (aarch64_shrink_expanded_imm8 (opnd->imm.value) < 0)
1737 'aaaaaaaabbbbbbbbccccccccddddddddeeeeeeee
1738 ffffffffgggggggghhhhhhhh'. */
1739 set_other_error (mismatch_detail, idx,
1740 _("invalid value for immediate"));
1743 /* Is the shift amount valid? */
1744 switch (opnd->shifter.kind)
1746 case AARCH64_MOD_LSL:
1747 size = aarch64_get_qualifier_esize (opnds[0].qualifier);
1748 if (!value_in_range_p (opnd->shifter.amount, 0, (size - 1) * 8))
1750 set_sft_amount_out_of_range_error (mismatch_detail, idx, 0,
1754 if (!value_aligned_p (opnd->shifter.amount, 8))
1756 set_unaligned_error (mismatch_detail, idx, 8);
1760 case AARCH64_MOD_MSL:
1761 /* Only 8 and 16 are valid shift amount. */
1762 if (opnd->shifter.amount != 8 && opnd->shifter.amount != 16)
1764 set_other_error (mismatch_detail, idx,
1765 _("shift amount expected to be 0 or 16"));
1770 if (opnd->shifter.kind != AARCH64_MOD_NONE)
1772 set_other_error (mismatch_detail, idx,
1773 _("invalid shift operator"));
1780 case AARCH64_OPND_FPIMM:
1781 case AARCH64_OPND_SIMD_FPIMM:
1782 if (opnd->imm.is_fp == 0)
1784 set_other_error (mismatch_detail, idx,
1785 _("floating-point immediate expected"));
1788 /* The value is expected to be an 8-bit floating-point constant with
1789 sign, 3-bit exponent and normalized 4 bits of precision, encoded
1790 in "a:b:c:d:e:f:g:h" or FLD_imm8 (depending on the type of the
1792 if (!value_in_range_p (opnd->imm.value, 0, 255))
1794 set_other_error (mismatch_detail, idx,
1795 _("immediate out of range"));
1798 if (opnd->shifter.kind != AARCH64_MOD_NONE)
1800 set_other_error (mismatch_detail, idx,
1801 _("invalid shift operator"));
1811 case AARCH64_OPND_CLASS_CP_REG:
1812 /* Cn or Cm: 4-bit opcode field named for historical reasons.
1813 valid range: C0 - C15. */
1814 if (opnd->reg.regno > 15)
1816 set_regno_out_of_range_error (mismatch_detail, idx, 0, 15);
1821 case AARCH64_OPND_CLASS_SYSTEM:
1824 case AARCH64_OPND_PSTATEFIELD:
1825 assert (idx == 0 && opnds[1].type == AARCH64_OPND_UIMM4);
1826 /* MSR SPSel, #uimm4
1827 Uses uimm4 as a control value to select the stack pointer: if
1828 bit 0 is set it selects the current exception level's stack
1829 pointer, if bit 0 is clear it selects shared EL0 stack pointer.
1830 Bits 1 to 3 of uimm4 are reserved and should be zero. */
1831 if (opnd->pstatefield == 0x05 /* spsel */ && opnds[1].imm.value > 1)
1833 set_imm_out_of_range_error (mismatch_detail, idx, 0, 1);
1842 case AARCH64_OPND_CLASS_SIMD_ELEMENT:
1843 /* Get the upper bound for the element index. */
1844 num = 16 / aarch64_get_qualifier_esize (qualifier) - 1;
1845 /* Index out-of-range. */
1846 if (!value_in_range_p (opnd->reglane.index, 0, num))
1848 set_elem_idx_out_of_range_error (mismatch_detail, idx, 0, num);
1851 /* SMLAL<Q> <Vd>.<Ta>, <Vn>.<Tb>, <Vm>.<Ts>[<index>].
1852 <Vm> Is the vector register (V0-V31) or (V0-V15), whose
1853 number is encoded in "size:M:Rm":
1859 if (type == AARCH64_OPND_Em && qualifier == AARCH64_OPND_QLF_S_H
1860 && !value_in_range_p (opnd->reglane.regno, 0, 15))
1862 set_regno_out_of_range_error (mismatch_detail, idx, 0, 15);
1867 case AARCH64_OPND_CLASS_MODIFIED_REG:
1868 assert (idx == 1 || idx == 2);
1871 case AARCH64_OPND_Rm_EXT:
1872 if (aarch64_extend_operator_p (opnd->shifter.kind) == FALSE
1873 && opnd->shifter.kind != AARCH64_MOD_LSL)
1875 set_other_error (mismatch_detail, idx,
1876 _("extend operator expected"));
1879 /* It is not optional unless at least one of "Rd" or "Rn" is '11111'
1880 (i.e. SP), in which case it defaults to LSL. The LSL alias is
1881 only valid when "Rd" or "Rn" is '11111', and is preferred in that
1883 if (!aarch64_stack_pointer_p (opnds + 0)
1884 && (idx != 2 || !aarch64_stack_pointer_p (opnds + 1)))
1886 if (!opnd->shifter.operator_present)
1888 set_other_error (mismatch_detail, idx,
1889 _("missing extend operator"));
1892 else if (opnd->shifter.kind == AARCH64_MOD_LSL)
1894 set_other_error (mismatch_detail, idx,
1895 _("'LSL' operator not allowed"));
1899 assert (opnd->shifter.operator_present /* Default to LSL. */
1900 || opnd->shifter.kind == AARCH64_MOD_LSL);
1901 if (!value_in_range_p (opnd->shifter.amount, 0, 4))
1903 set_sft_amount_out_of_range_error (mismatch_detail, idx, 0, 4);
1906 /* In the 64-bit form, the final register operand is written as Wm
1907 for all but the (possibly omitted) UXTX/LSL and SXTX
1909 N.B. GAS allows X register to be used with any operator as a
1910 programming convenience. */
1911 if (qualifier == AARCH64_OPND_QLF_X
1912 && opnd->shifter.kind != AARCH64_MOD_LSL
1913 && opnd->shifter.kind != AARCH64_MOD_UXTX
1914 && opnd->shifter.kind != AARCH64_MOD_SXTX)
1916 set_other_error (mismatch_detail, idx, _("W register expected"));
1921 case AARCH64_OPND_Rm_SFT:
1922 /* ROR is not available to the shifted register operand in
1923 arithmetic instructions. */
1924 if (aarch64_shift_operator_p (opnd->shifter.kind) == FALSE)
1926 set_other_error (mismatch_detail, idx,
1927 _("shift operator expected"));
1930 if (opnd->shifter.kind == AARCH64_MOD_ROR
1931 && opcode->iclass != log_shift)
1933 set_other_error (mismatch_detail, idx,
1934 _("'ROR' operator not allowed"));
1937 num = qualifier == AARCH64_OPND_QLF_W ? 31 : 63;
1938 if (!value_in_range_p (opnd->shifter.amount, 0, num))
1940 set_sft_amount_out_of_range_error (mismatch_detail, idx, 0, num);
1957 /* Main entrypoint for the operand constraint checking.
1959 Return 1 if operands of *INST meet the constraint applied by the operand
1960 codes and operand qualifiers; otherwise return 0 and if MISMATCH_DETAIL is
1961 not NULL, return the detail of the error in *MISMATCH_DETAIL. N.B. when
1962 adding more constraint checking, make sure MISMATCH_DETAIL->KIND is set
1963 with a proper error kind rather than AARCH64_OPDE_NIL (GAS asserts non-NIL
1964 error kind when it is notified that an instruction does not pass the check).
1966 Un-determined operand qualifiers may get established during the process. */
1969 aarch64_match_operands_constraint (aarch64_inst *inst,
1970 aarch64_operand_error *mismatch_detail)
1974 DEBUG_TRACE ("enter");
1976 /* Match operands' qualifier.
1977 *INST has already had qualifier establish for some, if not all, of
1978 its operands; we need to find out whether these established
1979 qualifiers match one of the qualifier sequence in
1980 INST->OPCODE->QUALIFIERS_LIST. If yes, we will assign each operand
1981 with the corresponding qualifier in such a sequence.
1982 Only basic operand constraint checking is done here; the more thorough
1983 constraint checking will carried out by operand_general_constraint_met_p,
1984 which has be to called after this in order to get all of the operands'
1985 qualifiers established. */
1986 if (match_operands_qualifier (inst, TRUE /* update_p */) == 0)
1988 DEBUG_TRACE ("FAIL on operand qualifier matching");
1989 if (mismatch_detail)
1991 /* Return an error type to indicate that it is the qualifier
1992 matching failure; we don't care about which operand as there
1993 are enough information in the opcode table to reproduce it. */
1994 mismatch_detail->kind = AARCH64_OPDE_INVALID_VARIANT;
1995 mismatch_detail->index = -1;
1996 mismatch_detail->error = NULL;
2001 /* Match operands' constraint. */
2002 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2004 enum aarch64_opnd type = inst->opcode->operands[i];
2005 if (type == AARCH64_OPND_NIL)
2007 if (inst->operands[i].skip)
2009 DEBUG_TRACE ("skip the incomplete operand %d", i);
2012 if (operand_general_constraint_met_p (inst->operands, i, type,
2013 inst->opcode, mismatch_detail) == 0)
2015 DEBUG_TRACE ("FAIL on operand %d", i);
2020 DEBUG_TRACE ("PASS");
2025 /* Replace INST->OPCODE with OPCODE and return the replaced OPCODE.
2026 Also updates the TYPE of each INST->OPERANDS with the corresponding
2027 value of OPCODE->OPERANDS.
2029 Note that some operand qualifiers may need to be manually cleared by
2030 the caller before it further calls the aarch64_opcode_encode; by
2031 doing this, it helps the qualifier matching facilities work
2034 const aarch64_opcode*
2035 aarch64_replace_opcode (aarch64_inst *inst, const aarch64_opcode *opcode)
2038 const aarch64_opcode *old = inst->opcode;
2040 inst->opcode = opcode;
2042 /* Update the operand types. */
2043 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2045 inst->operands[i].type = opcode->operands[i];
2046 if (opcode->operands[i] == AARCH64_OPND_NIL)
2050 DEBUG_TRACE ("replace %s with %s", old->name, opcode->name);
2056 aarch64_operand_index (const enum aarch64_opnd *operands, enum aarch64_opnd operand)
2059 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2060 if (operands[i] == operand)
2062 else if (operands[i] == AARCH64_OPND_NIL)
2067 /* [0][0] 32-bit integer regs with sp Wn
2068 [0][1] 64-bit integer regs with sp Xn sf=1
2069 [1][0] 32-bit integer regs with #0 Wn
2070 [1][1] 64-bit integer regs with #0 Xn sf=1 */
2071 static const char *int_reg[2][2][32] = {
2074 { { R32 "0", R32 "1", R32 "2", R32 "3", R32 "4", R32 "5", R32 "6", R32 "7",
2075 R32 "8", R32 "9", R32 "10", R32 "11", R32 "12", R32 "13", R32 "14", R32 "15",
2076 R32 "16", R32 "17", R32 "18", R32 "19", R32 "20", R32 "21", R32 "22", R32 "23",
2077 R32 "24", R32 "25", R32 "26", R32 "27", R32 "28", R32 "29", R32 "30", "wsp" },
2078 { R64 "0", R64 "1", R64 "2", R64 "3", R64 "4", R64 "5", R64 "6", R64 "7",
2079 R64 "8", R64 "9", R64 "10", R64 "11", R64 "12", R64 "13", R64 "14", R64 "15",
2080 R64 "16", R64 "17", R64 "18", R64 "19", R64 "20", R64 "21", R64 "22", R64 "23",
2081 R64 "24", R64 "25", R64 "26", R64 "27", R64 "28", R64 "29", R64 "30", "sp" } },
2082 { { R32 "0", R32 "1", R32 "2", R32 "3", R32 "4", R32 "5", R32 "6", R32 "7",
2083 R32 "8", R32 "9", R32 "10", R32 "11", R32 "12", R32 "13", R32 "14", R32 "15",
2084 R32 "16", R32 "17", R32 "18", R32 "19", R32 "20", R32 "21", R32 "22", R32 "23",
2085 R32 "24", R32 "25", R32 "26", R32 "27", R32 "28", R32 "29", R32 "30", R32 "zr" },
2086 { R64 "0", R64 "1", R64 "2", R64 "3", R64 "4", R64 "5", R64 "6", R64 "7",
2087 R64 "8", R64 "9", R64 "10", R64 "11", R64 "12", R64 "13", R64 "14", R64 "15",
2088 R64 "16", R64 "17", R64 "18", R64 "19", R64 "20", R64 "21", R64 "22", R64 "23",
2089 R64 "24", R64 "25", R64 "26", R64 "27", R64 "28", R64 "29", R64 "30", R64 "zr" } }
2094 /* Return the integer register name.
2095 if SP_REG_P is not 0, R31 is an SP reg, other R31 is the zero reg. */
2097 static inline const char *
2098 get_int_reg_name (int regno, aarch64_opnd_qualifier_t qualifier, int sp_reg_p)
2100 const int has_zr = sp_reg_p ? 0 : 1;
2101 const int is_64 = aarch64_get_qualifier_esize (qualifier) == 4 ? 0 : 1;
2102 return int_reg[has_zr][is_64][regno];
2105 /* Like get_int_reg_name, but IS_64 is always 1. */
2107 static inline const char *
2108 get_64bit_int_reg_name (int regno, int sp_reg_p)
2110 const int has_zr = sp_reg_p ? 0 : 1;
2111 return int_reg[has_zr][1][regno];
2114 /* Types for expanding an encoded 8-bit value to a floating-point value. */
2128 /* IMM8 is an 8-bit floating-point constant with sign, 3-bit exponent and
2129 normalized 4 bits of precision, encoded in "a:b:c:d:e:f:g:h" or FLD_imm8
2130 (depending on the type of the instruction). IMM8 will be expanded to a
2131 single-precision floating-point value (IS_DP == 0) or a double-precision
2132 floating-point value (IS_DP == 1). The expanded value is returned. */
2135 expand_fp_imm (int is_dp, uint32_t imm8)
2138 uint32_t imm8_7, imm8_6_0, imm8_6, imm8_6_repl4;
2140 imm8_7 = (imm8 >> 7) & 0x01; /* imm8<7> */
2141 imm8_6_0 = imm8 & 0x7f; /* imm8<6:0> */
2142 imm8_6 = imm8_6_0 >> 6; /* imm8<6> */
2143 imm8_6_repl4 = (imm8_6 << 3) | (imm8_6 << 2)
2144 | (imm8_6 << 1) | imm8_6; /* Replicate(imm8<6>,4) */
2147 imm = (imm8_7 << (63-32)) /* imm8<7> */
2148 | ((imm8_6 ^ 1) << (62-32)) /* NOT(imm8<6) */
2149 | (imm8_6_repl4 << (58-32)) | (imm8_6 << (57-32))
2150 | (imm8_6 << (56-32)) | (imm8_6 << (55-32)) /* Replicate(imm8<6>,7) */
2151 | (imm8_6_0 << (48-32)); /* imm8<6>:imm8<5:0> */
2156 imm = (imm8_7 << 31) /* imm8<7> */
2157 | ((imm8_6 ^ 1) << 30) /* NOT(imm8<6>) */
2158 | (imm8_6_repl4 << 26) /* Replicate(imm8<6>,4) */
2159 | (imm8_6_0 << 19); /* imm8<6>:imm8<5:0> */
2165 /* Produce the string representation of the register list operand *OPND
2166 in the buffer pointed by BUF of size SIZE. */
2168 print_register_list (char *buf, size_t size, const aarch64_opnd_info *opnd)
2170 const int num_regs = opnd->reglist.num_regs;
2171 const int first_reg = opnd->reglist.first_regno;
2172 const int last_reg = (first_reg + num_regs - 1) & 0x1f;
2173 const char *qlf_name = aarch64_get_qualifier_name (opnd->qualifier);
2174 char tb[8]; /* Temporary buffer. */
2176 assert (opnd->type != AARCH64_OPND_LEt || opnd->reglist.has_index);
2177 assert (num_regs >= 1 && num_regs <= 4);
2179 /* Prepare the index if any. */
2180 if (opnd->reglist.has_index)
2181 snprintf (tb, 8, "[%d]", opnd->reglist.index);
2185 /* The hyphenated form is preferred for disassembly if there are
2186 more than two registers in the list, and the register numbers
2187 are monotonically increasing in increments of one. */
2188 if (num_regs > 2 && last_reg > first_reg)
2189 snprintf (buf, size, "{v%d.%s-v%d.%s}%s", first_reg, qlf_name,
2190 last_reg, qlf_name, tb);
2193 const int reg0 = first_reg;
2194 const int reg1 = (first_reg + 1) & 0x1f;
2195 const int reg2 = (first_reg + 2) & 0x1f;
2196 const int reg3 = (first_reg + 3) & 0x1f;
2201 snprintf (buf, size, "{v%d.%s}%s", reg0, qlf_name, tb);
2204 snprintf (buf, size, "{v%d.%s, v%d.%s}%s", reg0, qlf_name,
2205 reg1, qlf_name, tb);
2208 snprintf (buf, size, "{v%d.%s, v%d.%s, v%d.%s}%s", reg0, qlf_name,
2209 reg1, qlf_name, reg2, qlf_name, tb);
2212 snprintf (buf, size, "{v%d.%s, v%d.%s, v%d.%s, v%d.%s}%s",
2213 reg0, qlf_name, reg1, qlf_name, reg2, qlf_name,
2214 reg3, qlf_name, tb);
2220 /* Produce the string representation of the register offset address operand
2221 *OPND in the buffer pointed by BUF of size SIZE. */
2223 print_register_offset_address (char *buf, size_t size,
2224 const aarch64_opnd_info *opnd)
2226 const size_t tblen = 16;
2227 char tb[tblen]; /* Temporary buffer. */
2228 bfd_boolean lsl_p = FALSE; /* Is LSL shift operator? */
2229 bfd_boolean wm_p = FALSE; /* Should Rm be Wm? */
2230 bfd_boolean print_extend_p = TRUE;
2231 bfd_boolean print_amount_p = TRUE;
2232 const char *shift_name = aarch64_operand_modifiers[opnd->shifter.kind].name;
2234 switch (opnd->shifter.kind)
2236 case AARCH64_MOD_UXTW: wm_p = TRUE; break;
2237 case AARCH64_MOD_LSL : lsl_p = TRUE; break;
2238 case AARCH64_MOD_SXTW: wm_p = TRUE; break;
2239 case AARCH64_MOD_SXTX: break;
2240 default: assert (0);
2243 if (!opnd->shifter.amount && (opnd->qualifier != AARCH64_OPND_QLF_S_B
2244 || !opnd->shifter.amount_present))
2246 /* Not print the shift/extend amount when the amount is zero and
2247 when it is not the special case of 8-bit load/store instruction. */
2248 print_amount_p = FALSE;
2249 /* Likewise, no need to print the shift operator LSL in such a
2252 print_extend_p = FALSE;
2255 /* Prepare for the extend/shift. */
2259 snprintf (tb, tblen, ",%s #%d", shift_name, opnd->shifter.amount);
2261 snprintf (tb, tblen, ",%s", shift_name);
2266 snprintf (buf, size, "[%s,%c%d%s]",
2267 get_64bit_int_reg_name (opnd->addr.base_regno, 1),
2268 wm_p ? 'w' : 'x', opnd->addr.offset.regno, tb);
2271 /* Generate the string representation of the operand OPNDS[IDX] for OPCODE
2272 in *BUF. The caller should pass in the maximum size of *BUF in SIZE.
2273 PC, PCREL_P and ADDRESS are used to pass in and return information about
2274 the PC-relative address calculation, where the PC value is passed in
2275 PC. If the operand is pc-relative related, *PCREL_P (if PCREL_P non-NULL)
2276 will return 1 and *ADDRESS (if ADDRESS non-NULL) will return the
2277 calculated address; otherwise, *PCREL_P (if PCREL_P non-NULL) returns 0.
2279 The function serves both the disassembler and the assembler diagnostics
2280 issuer, which is the reason why it lives in this file. */
2283 aarch64_print_operand (char *buf, size_t size, bfd_vma pc,
2284 const aarch64_opcode *opcode,
2285 const aarch64_opnd_info *opnds, int idx, int *pcrel_p,
2289 const char *name = NULL;
2290 const aarch64_opnd_info *opnd = opnds + idx;
2291 enum aarch64_modifier_kind kind;
2300 case AARCH64_OPND_Rd:
2301 case AARCH64_OPND_Rn:
2302 case AARCH64_OPND_Rm:
2303 case AARCH64_OPND_Rt:
2304 case AARCH64_OPND_Rt2:
2305 case AARCH64_OPND_Rs:
2306 case AARCH64_OPND_Ra:
2307 case AARCH64_OPND_Rt_SYS:
2308 /* The optional-ness of <Xt> in e.g. IC <ic_op>{, <Xt>} is determined by
2309 the <ic_op>, therefore we we use opnd->present to override the
2310 generic optional-ness information. */
2311 if (opnd->type == AARCH64_OPND_Rt_SYS && !opnd->present)
2313 /* Omit the operand, e.g. RET. */
2314 if (optional_operand_p (opcode, idx)
2315 && opnd->reg.regno == get_optional_operand_default_value (opcode))
2317 assert (opnd->qualifier == AARCH64_OPND_QLF_W
2318 || opnd->qualifier == AARCH64_OPND_QLF_X);
2319 snprintf (buf, size, "%s",
2320 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0));
2323 case AARCH64_OPND_Rd_SP:
2324 case AARCH64_OPND_Rn_SP:
2325 assert (opnd->qualifier == AARCH64_OPND_QLF_W
2326 || opnd->qualifier == AARCH64_OPND_QLF_WSP
2327 || opnd->qualifier == AARCH64_OPND_QLF_X
2328 || opnd->qualifier == AARCH64_OPND_QLF_SP);
2329 snprintf (buf, size, "%s",
2330 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 1));
2333 case AARCH64_OPND_Rm_EXT:
2334 kind = opnd->shifter.kind;
2335 assert (idx == 1 || idx == 2);
2336 if ((aarch64_stack_pointer_p (opnds)
2337 || (idx == 2 && aarch64_stack_pointer_p (opnds + 1)))
2338 && ((opnd->qualifier == AARCH64_OPND_QLF_W
2339 && opnds[0].qualifier == AARCH64_OPND_QLF_W
2340 && kind == AARCH64_MOD_UXTW)
2341 || (opnd->qualifier == AARCH64_OPND_QLF_X
2342 && kind == AARCH64_MOD_UXTX)))
2344 /* 'LSL' is the preferred form in this case. */
2345 kind = AARCH64_MOD_LSL;
2346 if (opnd->shifter.amount == 0)
2348 /* Shifter omitted. */
2349 snprintf (buf, size, "%s",
2350 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0));
2354 if (opnd->shifter.amount)
2355 snprintf (buf, size, "%s, %s #%d",
2356 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0),
2357 aarch64_operand_modifiers[kind].name,
2358 opnd->shifter.amount);
2360 snprintf (buf, size, "%s, %s",
2361 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0),
2362 aarch64_operand_modifiers[kind].name);
2365 case AARCH64_OPND_Rm_SFT:
2366 assert (opnd->qualifier == AARCH64_OPND_QLF_W
2367 || opnd->qualifier == AARCH64_OPND_QLF_X);
2368 if (opnd->shifter.amount == 0 && opnd->shifter.kind == AARCH64_MOD_LSL)
2369 snprintf (buf, size, "%s",
2370 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0));
2372 snprintf (buf, size, "%s, %s #%d",
2373 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0),
2374 aarch64_operand_modifiers[opnd->shifter.kind].name,
2375 opnd->shifter.amount);
2378 case AARCH64_OPND_Fd:
2379 case AARCH64_OPND_Fn:
2380 case AARCH64_OPND_Fm:
2381 case AARCH64_OPND_Fa:
2382 case AARCH64_OPND_Ft:
2383 case AARCH64_OPND_Ft2:
2384 case AARCH64_OPND_Sd:
2385 case AARCH64_OPND_Sn:
2386 case AARCH64_OPND_Sm:
2387 snprintf (buf, size, "%s%d", aarch64_get_qualifier_name (opnd->qualifier),
2391 case AARCH64_OPND_Vd:
2392 case AARCH64_OPND_Vn:
2393 case AARCH64_OPND_Vm:
2394 snprintf (buf, size, "v%d.%s", opnd->reg.regno,
2395 aarch64_get_qualifier_name (opnd->qualifier));
2398 case AARCH64_OPND_Ed:
2399 case AARCH64_OPND_En:
2400 case AARCH64_OPND_Em:
2401 snprintf (buf, size, "v%d.%s[%d]", opnd->reglane.regno,
2402 aarch64_get_qualifier_name (opnd->qualifier),
2403 opnd->reglane.index);
2406 case AARCH64_OPND_VdD1:
2407 case AARCH64_OPND_VnD1:
2408 snprintf (buf, size, "v%d.d[1]", opnd->reg.regno);
2411 case AARCH64_OPND_LVn:
2412 case AARCH64_OPND_LVt:
2413 case AARCH64_OPND_LVt_AL:
2414 case AARCH64_OPND_LEt:
2415 print_register_list (buf, size, opnd);
2418 case AARCH64_OPND_Cn:
2419 case AARCH64_OPND_Cm:
2420 snprintf (buf, size, "C%d", opnd->reg.regno);
2423 case AARCH64_OPND_IDX:
2424 case AARCH64_OPND_IMM:
2425 case AARCH64_OPND_WIDTH:
2426 case AARCH64_OPND_UIMM3_OP1:
2427 case AARCH64_OPND_UIMM3_OP2:
2428 case AARCH64_OPND_BIT_NUM:
2429 case AARCH64_OPND_IMM_VLSL:
2430 case AARCH64_OPND_IMM_VLSR:
2431 case AARCH64_OPND_SHLL_IMM:
2432 case AARCH64_OPND_IMM0:
2433 case AARCH64_OPND_IMMR:
2434 case AARCH64_OPND_IMMS:
2435 case AARCH64_OPND_FBITS:
2436 snprintf (buf, size, "#%" PRIi64, opnd->imm.value);
2439 case AARCH64_OPND_IMM_MOV:
2440 switch (aarch64_get_qualifier_esize (opnds[0].qualifier))
2442 case 4: /* e.g. MOV Wd, #<imm32>. */
2444 int imm32 = opnd->imm.value;
2445 snprintf (buf, size, "#0x%-20x\t// #%d", imm32, imm32);
2448 case 8: /* e.g. MOV Xd, #<imm64>. */
2449 snprintf (buf, size, "#0x%-20" PRIx64 "\t// #%" PRIi64,
2450 opnd->imm.value, opnd->imm.value);
2452 default: assert (0);
2456 case AARCH64_OPND_FPIMM0:
2457 snprintf (buf, size, "#0.0");
2460 case AARCH64_OPND_LIMM:
2461 case AARCH64_OPND_AIMM:
2462 case AARCH64_OPND_HALF:
2463 if (opnd->shifter.amount)
2464 snprintf (buf, size, "#0x%" PRIx64 ", lsl #%d", opnd->imm.value,
2465 opnd->shifter.amount);
2467 snprintf (buf, size, "#0x%" PRIx64, opnd->imm.value);
2470 case AARCH64_OPND_SIMD_IMM:
2471 case AARCH64_OPND_SIMD_IMM_SFT:
2472 if ((! opnd->shifter.amount && opnd->shifter.kind == AARCH64_MOD_LSL)
2473 || opnd->shifter.kind == AARCH64_MOD_NONE)
2474 snprintf (buf, size, "#0x%" PRIx64, opnd->imm.value);
2476 snprintf (buf, size, "#0x%" PRIx64 ", %s #%d", opnd->imm.value,
2477 aarch64_operand_modifiers[opnd->shifter.kind].name,
2478 opnd->shifter.amount);
2481 case AARCH64_OPND_FPIMM:
2482 case AARCH64_OPND_SIMD_FPIMM:
2483 switch (aarch64_get_qualifier_esize (opnds[0].qualifier))
2485 case 4: /* e.g. FMOV <Vd>.4S, #<imm>. */
2488 c.i = expand_fp_imm (0, opnd->imm.value);
2489 snprintf (buf, size, "#%.18e", c.f);
2492 case 8: /* e.g. FMOV <Sd>, #<imm>. */
2495 c.i = expand_fp_imm (1, opnd->imm.value);
2496 snprintf (buf, size, "#%.18e", c.d);
2499 default: assert (0);
2503 case AARCH64_OPND_CCMP_IMM:
2504 case AARCH64_OPND_NZCV:
2505 case AARCH64_OPND_EXCEPTION:
2506 case AARCH64_OPND_UIMM4:
2507 case AARCH64_OPND_UIMM7:
2508 if (optional_operand_p (opcode, idx) == TRUE
2509 && (opnd->imm.value ==
2510 (int64_t) get_optional_operand_default_value (opcode)))
2511 /* Omit the operand, e.g. DCPS1. */
2513 snprintf (buf, size, "#0x%x", (unsigned int)opnd->imm.value);
2516 case AARCH64_OPND_COND:
2517 snprintf (buf, size, "%s", opnd->cond->names[0]);
2520 case AARCH64_OPND_ADDR_ADRP:
2521 addr = ((pc + AARCH64_PCREL_OFFSET) & ~(uint64_t)0xfff)
2527 /* This is not necessary during the disassembling, as print_address_func
2528 in the disassemble_info will take care of the printing. But some
2529 other callers may be still interested in getting the string in *STR,
2530 so here we do snprintf regardless. */
2531 snprintf (buf, size, "#0x%" PRIx64, addr);
2534 case AARCH64_OPND_ADDR_PCREL14:
2535 case AARCH64_OPND_ADDR_PCREL19:
2536 case AARCH64_OPND_ADDR_PCREL21:
2537 case AARCH64_OPND_ADDR_PCREL26:
2538 addr = pc + AARCH64_PCREL_OFFSET + opnd->imm.value;
2543 /* This is not necessary during the disassembling, as print_address_func
2544 in the disassemble_info will take care of the printing. But some
2545 other callers may be still interested in getting the string in *STR,
2546 so here we do snprintf regardless. */
2547 snprintf (buf, size, "#0x%" PRIx64, addr);
2550 case AARCH64_OPND_ADDR_SIMPLE:
2551 case AARCH64_OPND_SIMD_ADDR_SIMPLE:
2552 case AARCH64_OPND_SIMD_ADDR_POST:
2553 name = get_64bit_int_reg_name (opnd->addr.base_regno, 1);
2554 if (opnd->type == AARCH64_OPND_SIMD_ADDR_POST)
2556 if (opnd->addr.offset.is_reg)
2557 snprintf (buf, size, "[%s], x%d", name, opnd->addr.offset.regno);
2559 snprintf (buf, size, "[%s], #%d", name, opnd->addr.offset.imm);
2562 snprintf (buf, size, "[%s]", name);
2565 case AARCH64_OPND_ADDR_REGOFF:
2566 print_register_offset_address (buf, size, opnd);
2569 case AARCH64_OPND_ADDR_SIMM7:
2570 case AARCH64_OPND_ADDR_SIMM9:
2571 case AARCH64_OPND_ADDR_SIMM9_2:
2572 name = get_64bit_int_reg_name (opnd->addr.base_regno, 1);
2573 if (opnd->addr.writeback)
2575 if (opnd->addr.preind)
2576 snprintf (buf, size, "[%s,#%d]!", name, opnd->addr.offset.imm);
2578 snprintf (buf, size, "[%s],#%d", name, opnd->addr.offset.imm);
2582 if (opnd->addr.offset.imm)
2583 snprintf (buf, size, "[%s,#%d]", name, opnd->addr.offset.imm);
2585 snprintf (buf, size, "[%s]", name);
2589 case AARCH64_OPND_ADDR_UIMM12:
2590 name = get_64bit_int_reg_name (opnd->addr.base_regno, 1);
2591 if (opnd->addr.offset.imm)
2592 snprintf (buf, size, "[%s,#%d]", name, opnd->addr.offset.imm);
2594 snprintf (buf, size, "[%s]", name);
2597 case AARCH64_OPND_SYSREG:
2598 for (i = 0; aarch64_sys_regs[i].name; ++i)
2599 if (aarch64_sys_regs[i].value == opnd->sysreg)
2601 if (aarch64_sys_regs[i].name)
2602 snprintf (buf, size, "%s", aarch64_sys_regs[i].name);
2605 /* Implementation defined system register. */
2606 unsigned int value = opnd->sysreg;
2607 snprintf (buf, size, "s%u_%u_c%u_c%u_%u", (value >> 14) & 0x3,
2608 (value >> 11) & 0x7, (value >> 7) & 0xf, (value >> 3) & 0xf,
2613 case AARCH64_OPND_PSTATEFIELD:
2614 for (i = 0; aarch64_pstatefields[i].name; ++i)
2615 if (aarch64_pstatefields[i].value == opnd->pstatefield)
2617 assert (aarch64_pstatefields[i].name);
2618 snprintf (buf, size, "%s", aarch64_pstatefields[i].name);
2621 case AARCH64_OPND_SYSREG_AT:
2622 case AARCH64_OPND_SYSREG_DC:
2623 case AARCH64_OPND_SYSREG_IC:
2624 case AARCH64_OPND_SYSREG_TLBI:
2625 snprintf (buf, size, "%s", opnd->sysins_op->template);
2628 case AARCH64_OPND_BARRIER:
2629 snprintf (buf, size, "%s", opnd->barrier->name);
2632 case AARCH64_OPND_BARRIER_ISB:
2633 /* Operand can be omitted, e.g. in DCPS1. */
2634 if (! optional_operand_p (opcode, idx)
2635 || (opnd->barrier->value
2636 != get_optional_operand_default_value (opcode)))
2637 snprintf (buf, size, "#0x%x", opnd->barrier->value);
2640 case AARCH64_OPND_PRFOP:
2641 if (opnd->prfop->name != NULL)
2642 snprintf (buf, size, "%s", opnd->prfop->name);
2644 snprintf (buf, size, "#0x%02x", opnd->prfop->value);
2652 #define CPENC(op0,op1,crn,crm,op2) \
2653 ((((op0) << 19) | ((op1) << 16) | ((crn) << 12) | ((crm) << 8) | ((op2) << 5)) >> 5)
2654 /* for 3.9.3 Instructions for Accessing Special Purpose Registers */
2655 #define CPEN_(op1,crm,op2) CPENC(3,(op1),4,(crm),(op2))
2656 /* for 3.9.10 System Instructions */
2657 #define CPENS(op1,crn,crm,op2) CPENC(1,(op1),(crn),(crm),(op2))
2676 /* TODO there are two more issues need to be resolved
2677 1. handle read-only and write-only system registers
2678 2. handle cpu-implementation-defined system registers. */
2679 const struct aarch64_name_value_pair aarch64_sys_regs [] =
2681 { "spsr_el1", CPEN_(0,C0,0) }, /* = spsr_svc */
2682 { "elr_el1", CPEN_(0,C0,1) },
2683 { "sp_el0", CPEN_(0,C1,0) },
2684 { "spsel", CPEN_(0,C2,0) },
2685 { "daif", CPEN_(3,C2,1) },
2686 { "currentel", CPEN_(0,C2,2) }, /* RO */
2687 { "nzcv", CPEN_(3,C2,0) },
2688 { "fpcr", CPEN_(3,C4,0) },
2689 { "fpsr", CPEN_(3,C4,1) },
2690 { "dspsr_el0", CPEN_(3,C5,0) },
2691 { "dlr_el0", CPEN_(3,C5,1) },
2692 { "spsr_el2", CPEN_(4,C0,0) }, /* = spsr_hyp */
2693 { "elr_el2", CPEN_(4,C0,1) },
2694 { "sp_el1", CPEN_(4,C1,0) },
2695 { "spsr_irq", CPEN_(4,C3,0) },
2696 { "spsr_abt", CPEN_(4,C3,1) },
2697 { "spsr_und", CPEN_(4,C3,2) },
2698 { "spsr_fiq", CPEN_(4,C3,3) },
2699 { "spsr_el3", CPEN_(6,C0,0) },
2700 { "elr_el3", CPEN_(6,C0,1) },
2701 { "sp_el2", CPEN_(6,C1,0) },
2702 { "spsr_svc", CPEN_(0,C0,0) }, /* = spsr_el1 */
2703 { "spsr_hyp", CPEN_(4,C0,0) }, /* = spsr_el2 */
2704 { "midr_el1", CPENC(3,0,C0,C0,0) }, /* RO */
2705 { "ctr_el0", CPENC(3,3,C0,C0,1) }, /* RO */
2706 { "mpidr_el1", CPENC(3,0,C0,C0,5) }, /* RO */
2707 { "revidr_el1", CPENC(3,0,C0,C0,6) }, /* RO */
2708 { "aidr_el1", CPENC(3,1,C0,C0,7) }, /* RO */
2709 { "dczid_el0", CPENC(3,3,C0,C0,7) }, /* RO */
2710 { "id_dfr0_el1", CPENC(3,0,C0,C1,2) }, /* RO */
2711 { "id_pfr0_el1", CPENC(3,0,C0,C1,0) }, /* RO */
2712 { "id_pfr1_el1", CPENC(3,0,C0,C1,1) }, /* RO */
2713 { "id_afr0_el1", CPENC(3,0,C0,C1,3) }, /* RO */
2714 { "id_mmfr0_el1", CPENC(3,0,C0,C1,4) }, /* RO */
2715 { "id_mmfr1_el1", CPENC(3,0,C0,C1,5) }, /* RO */
2716 { "id_mmfr2_el1", CPENC(3,0,C0,C1,6) }, /* RO */
2717 { "id_mmfr3_el1", CPENC(3,0,C0,C1,7) }, /* RO */
2718 { "id_isar0_el1", CPENC(3,0,C0,C2,0) }, /* RO */
2719 { "id_isar1_el1", CPENC(3,0,C0,C2,1) }, /* RO */
2720 { "id_isar2_el1", CPENC(3,0,C0,C2,2) }, /* RO */
2721 { "id_isar3_el1", CPENC(3,0,C0,C2,3) }, /* RO */
2722 { "id_isar4_el1", CPENC(3,0,C0,C2,4) }, /* RO */
2723 { "id_isar5_el1", CPENC(3,0,C0,C2,5) }, /* RO */
2724 { "mvfr0_el1", CPENC(3,0,C0,C3,0) }, /* RO */
2725 { "mvfr1_el1", CPENC(3,0,C0,C3,1) }, /* RO */
2726 { "mvfr2_el1", CPENC(3,0,C0,C3,2) }, /* RO */
2727 { "ccsidr_el1", CPENC(3,1,C0,C0,0) }, /* RO */
2728 { "id_aa64pfr0_el1", CPENC(3,0,C0,C4,0) }, /* RO */
2729 { "id_aa64pfr1_el1", CPENC(3,0,C0,C4,1) }, /* RO */
2730 { "id_aa64dfr0_el1", CPENC(3,0,C0,C5,0) }, /* RO */
2731 { "id_aa64dfr1_el1", CPENC(3,0,C0,C5,1) }, /* RO */
2732 { "id_aa64isar0_el1", CPENC(3,0,C0,C6,0) }, /* RO */
2733 { "id_aa64isar1_el1", CPENC(3,0,C0,C6,1) }, /* RO */
2734 { "id_aa64mmfr0_el1", CPENC(3,0,C0,C7,0) }, /* RO */
2735 { "id_aa64mmfr1_el1", CPENC(3,0,C0,C7,1) }, /* RO */
2736 { "id_aa64afr0_el1", CPENC(3,0,C0,C5,4) }, /* RO */
2737 { "id_aa64afr1_el1", CPENC(3,0,C0,C5,5) }, /* RO */
2738 { "clidr_el1", CPENC(3,1,C0,C0,1) }, /* RO */
2739 { "csselr_el1", CPENC(3,2,C0,C0,0) }, /* RO */
2740 { "vpidr_el2", CPENC(3,4,C0,C0,0) },
2741 { "vmpidr_el2", CPENC(3,4,C0,C0,5) },
2742 { "sctlr_el1", CPENC(3,0,C1,C0,0) },
2743 { "sctlr_el2", CPENC(3,4,C1,C0,0) },
2744 { "sctlr_el3", CPENC(3,6,C1,C0,0) },
2745 { "actlr_el1", CPENC(3,0,C1,C0,1) },
2746 { "actlr_el2", CPENC(3,4,C1,C0,1) },
2747 { "actlr_el3", CPENC(3,6,C1,C0,1) },
2748 { "cpacr_el1", CPENC(3,0,C1,C0,2) },
2749 { "cptr_el2", CPENC(3,4,C1,C1,2) },
2750 { "cptr_el3", CPENC(3,6,C1,C1,2) },
2751 { "scr_el3", CPENC(3,6,C1,C1,0) },
2752 { "hcr_el2", CPENC(3,4,C1,C1,0) },
2753 { "mdcr_el2", CPENC(3,4,C1,C1,1) },
2754 { "mdcr_el3", CPENC(3,6,C1,C3,1) },
2755 { "hstr_el2", CPENC(3,4,C1,C1,3) },
2756 { "hacr_el2", CPENC(3,4,C1,C1,7) },
2757 { "ttbr0_el1", CPENC(3,0,C2,C0,0) },
2758 { "ttbr1_el1", CPENC(3,0,C2,C0,1) },
2759 { "ttbr0_el2", CPENC(3,4,C2,C0,0) },
2760 { "ttbr0_el3", CPENC(3,6,C2,C0,0) },
2761 { "vttbr_el2", CPENC(3,4,C2,C1,0) },
2762 { "tcr_el1", CPENC(3,0,C2,C0,2) },
2763 { "tcr_el2", CPENC(3,4,C2,C0,2) },
2764 { "tcr_el3", CPENC(3,6,C2,C0,2) },
2765 { "vtcr_el2", CPENC(3,4,C2,C1,2) },
2766 { "afsr0_el1", CPENC(3,0,C5,C1,0) },
2767 { "afsr1_el1", CPENC(3,0,C5,C1,1) },
2768 { "afsr0_el2", CPENC(3,4,C5,C1,0) },
2769 { "afsr1_el2", CPENC(3,4,C5,C1,1) },
2770 { "afsr0_el3", CPENC(3,6,C5,C1,0) },
2771 { "afsr1_el3", CPENC(3,6,C5,C1,1) },
2772 { "esr_el1", CPENC(3,0,C5,C2,0) },
2773 { "esr_el2", CPENC(3,4,C5,C2,0) },
2774 { "esr_el3", CPENC(3,6,C5,C2,0) },
2775 { "fpexc32_el2", CPENC(3,4,C5,C3,0) },
2776 { "far_el1", CPENC(3,0,C6,C0,0) },
2777 { "far_el2", CPENC(3,4,C6,C0,0) },
2778 { "far_el3", CPENC(3,6,C6,C0,0) },
2779 { "hpfar_el2", CPENC(3,4,C6,C0,4) },
2780 { "par_el1", CPENC(3,0,C7,C4,0) },
2781 { "mair_el1", CPENC(3,0,C10,C2,0) },
2782 { "mair_el2", CPENC(3,4,C10,C2,0) },
2783 { "mair_el3", CPENC(3,6,C10,C2,0) },
2784 { "amair_el1", CPENC(3,0,C10,C3,0) },
2785 { "amair_el2", CPENC(3,4,C10,C3,0) },
2786 { "amair_el3", CPENC(3,6,C10,C3,0) },
2787 { "vbar_el1", CPENC(3,0,C12,C0,0) },
2788 { "vbar_el2", CPENC(3,4,C12,C0,0) },
2789 { "vbar_el3", CPENC(3,6,C12,C0,0) },
2790 { "rvbar_el1", CPENC(3,0,C12,C0,1) }, /* RO */
2791 { "rvbar_el2", CPENC(3,4,C12,C0,1) }, /* RO */
2792 { "rvbar_el3", CPENC(3,6,C12,C0,1) }, /* RO */
2793 { "rmr_el1", CPENC(3,0,C12,C0,2) },
2794 { "rmr_el2", CPENC(3,4,C12,C0,2) },
2795 { "rmr_el3", CPENC(3,6,C12,C0,2) },
2796 { "isr_el1", CPENC(3,0,C12,C1,0) }, /* RO */
2797 { "contextidr_el1", CPENC(3,0,C13,C0,1) },
2798 { "tpidr_el0", CPENC(3,3,C13,C0,2) },
2799 { "tpidrro_el0", CPENC(3,3,C13,C0,3) }, /* RO */
2800 { "tpidr_el1", CPENC(3,0,C13,C0,4) },
2801 { "tpidr_el2", CPENC(3,4,C13,C0,2) },
2802 { "tpidr_el3", CPENC(3,6,C13,C0,2) },
2803 { "teecr32_el1", CPENC(2,2,C0, C0,0) }, /* See section 3.9.7.1 */
2804 { "cntfrq_el0", CPENC(3,3,C14,C0,0) }, /* RO */
2805 { "cntpct_el0", CPENC(3,3,C14,C0,1) }, /* RO */
2806 { "cntvct_el0", CPENC(3,3,C14,C0,2) }, /* RO */
2807 { "cntvoff_el2", CPENC(3,4,C14,C0,3) },
2808 { "cntkctl_el1", CPENC(3,0,C14,C1,0) },
2809 { "cnthctl_el2", CPENC(3,4,C14,C1,0) },
2810 { "cntp_tval_el0", CPENC(3,3,C14,C2,0) },
2811 { "cntp_ctl_el0", CPENC(3,3,C14,C2,1) },
2812 { "cntp_cval_el0", CPENC(3,3,C14,C2,2) },
2813 { "cntv_tval_el0", CPENC(3,3,C14,C3,0) },
2814 { "cntv_ctl_el0", CPENC(3,3,C14,C3,1) },
2815 { "cntv_cval_el0", CPENC(3,3,C14,C3,2) },
2816 { "cnthp_tval_el2", CPENC(3,4,C14,C2,0) },
2817 { "cnthp_ctl_el2", CPENC(3,4,C14,C2,1) },
2818 { "cnthp_cval_el2", CPENC(3,4,C14,C2,2) },
2819 { "cntps_tval_el1", CPENC(3,7,C14,C2,0) },
2820 { "cntps_ctl_el1", CPENC(3,7,C14,C2,1) },
2821 { "cntps_cval_el1", CPENC(3,7,C14,C2,2) },
2822 { "dacr32_el2", CPENC(3,4,C3,C0,0) },
2823 { "ifsr32_el2", CPENC(3,4,C5,C0,1) },
2824 { "teehbr32_el1", CPENC(2,2,C1,C0,0) },
2825 { "sder32_el3", CPENC(3,6,C1,C1,1) },
2826 { "mdscr_el1", CPENC(2,0,C0, C2, 2) },
2827 { "mdccsr_el0", CPENC(2,3,C0, C1, 0) }, /* r */
2828 { "mdccint_el1", CPENC(2,0,C0, C2, 0) },
2829 { "dbgdtr_el0", CPENC(2,3,C0, C4, 0) },
2830 { "dbgdtrrx_el0", CPENC(2,3,C0, C5, 0) }, /* r */
2831 { "dbgdtrtx_el0", CPENC(2,3,C0, C5, 0) }, /* w */
2832 { "osdtrrx_el1", CPENC(2,0,C0, C0, 2) }, /* r */
2833 { "osdtrtx_el1", CPENC(2,0,C0, C3, 2) }, /* w */
2834 { "oseccr_el1", CPENC(2,0,C0, C6, 2) },
2835 { "dbgvcr32_el2", CPENC(2,4,C0, C7, 0) },
2836 { "dbgbvr0_el1", CPENC(2,0,C0, C0, 4) },
2837 { "dbgbvr1_el1", CPENC(2,0,C0, C1, 4) },
2838 { "dbgbvr2_el1", CPENC(2,0,C0, C2, 4) },
2839 { "dbgbvr3_el1", CPENC(2,0,C0, C3, 4) },
2840 { "dbgbvr4_el1", CPENC(2,0,C0, C4, 4) },
2841 { "dbgbvr5_el1", CPENC(2,0,C0, C5, 4) },
2842 { "dbgbvr6_el1", CPENC(2,0,C0, C6, 4) },
2843 { "dbgbvr7_el1", CPENC(2,0,C0, C7, 4) },
2844 { "dbgbvr8_el1", CPENC(2,0,C0, C8, 4) },
2845 { "dbgbvr9_el1", CPENC(2,0,C0, C9, 4) },
2846 { "dbgbvr10_el1", CPENC(2,0,C0, C10,4) },
2847 { "dbgbvr11_el1", CPENC(2,0,C0, C11,4) },
2848 { "dbgbvr12_el1", CPENC(2,0,C0, C12,4) },
2849 { "dbgbvr13_el1", CPENC(2,0,C0, C13,4) },
2850 { "dbgbvr14_el1", CPENC(2,0,C0, C14,4) },
2851 { "dbgbvr15_el1", CPENC(2,0,C0, C15,4) },
2852 { "dbgbcr0_el1", CPENC(2,0,C0, C0, 5) },
2853 { "dbgbcr1_el1", CPENC(2,0,C0, C1, 5) },
2854 { "dbgbcr2_el1", CPENC(2,0,C0, C2, 5) },
2855 { "dbgbcr3_el1", CPENC(2,0,C0, C3, 5) },
2856 { "dbgbcr4_el1", CPENC(2,0,C0, C4, 5) },
2857 { "dbgbcr5_el1", CPENC(2,0,C0, C5, 5) },
2858 { "dbgbcr6_el1", CPENC(2,0,C0, C6, 5) },
2859 { "dbgbcr7_el1", CPENC(2,0,C0, C7, 5) },
2860 { "dbgbcr8_el1", CPENC(2,0,C0, C8, 5) },
2861 { "dbgbcr9_el1", CPENC(2,0,C0, C9, 5) },
2862 { "dbgbcr10_el1", CPENC(2,0,C0, C10,5) },
2863 { "dbgbcr11_el1", CPENC(2,0,C0, C11,5) },
2864 { "dbgbcr12_el1", CPENC(2,0,C0, C12,5) },
2865 { "dbgbcr13_el1", CPENC(2,0,C0, C13,5) },
2866 { "dbgbcr14_el1", CPENC(2,0,C0, C14,5) },
2867 { "dbgbcr15_el1", CPENC(2,0,C0, C15,5) },
2868 { "dbgwvr0_el1", CPENC(2,0,C0, C0, 6) },
2869 { "dbgwvr1_el1", CPENC(2,0,C0, C1, 6) },
2870 { "dbgwvr2_el1", CPENC(2,0,C0, C2, 6) },
2871 { "dbgwvr3_el1", CPENC(2,0,C0, C3, 6) },
2872 { "dbgwvr4_el1", CPENC(2,0,C0, C4, 6) },
2873 { "dbgwvr5_el1", CPENC(2,0,C0, C5, 6) },
2874 { "dbgwvr6_el1", CPENC(2,0,C0, C6, 6) },
2875 { "dbgwvr7_el1", CPENC(2,0,C0, C7, 6) },
2876 { "dbgwvr8_el1", CPENC(2,0,C0, C8, 6) },
2877 { "dbgwvr9_el1", CPENC(2,0,C0, C9, 6) },
2878 { "dbgwvr10_el1", CPENC(2,0,C0, C10,6) },
2879 { "dbgwvr11_el1", CPENC(2,0,C0, C11,6) },
2880 { "dbgwvr12_el1", CPENC(2,0,C0, C12,6) },
2881 { "dbgwvr13_el1", CPENC(2,0,C0, C13,6) },
2882 { "dbgwvr14_el1", CPENC(2,0,C0, C14,6) },
2883 { "dbgwvr15_el1", CPENC(2,0,C0, C15,6) },
2884 { "dbgwcr0_el1", CPENC(2,0,C0, C0, 7) },
2885 { "dbgwcr1_el1", CPENC(2,0,C0, C1, 7) },
2886 { "dbgwcr2_el1", CPENC(2,0,C0, C2, 7) },
2887 { "dbgwcr3_el1", CPENC(2,0,C0, C3, 7) },
2888 { "dbgwcr4_el1", CPENC(2,0,C0, C4, 7) },
2889 { "dbgwcr5_el1", CPENC(2,0,C0, C5, 7) },
2890 { "dbgwcr6_el1", CPENC(2,0,C0, C6, 7) },
2891 { "dbgwcr7_el1", CPENC(2,0,C0, C7, 7) },
2892 { "dbgwcr8_el1", CPENC(2,0,C0, C8, 7) },
2893 { "dbgwcr9_el1", CPENC(2,0,C0, C9, 7) },
2894 { "dbgwcr10_el1", CPENC(2,0,C0, C10,7) },
2895 { "dbgwcr11_el1", CPENC(2,0,C0, C11,7) },
2896 { "dbgwcr12_el1", CPENC(2,0,C0, C12,7) },
2897 { "dbgwcr13_el1", CPENC(2,0,C0, C13,7) },
2898 { "dbgwcr14_el1", CPENC(2,0,C0, C14,7) },
2899 { "dbgwcr15_el1", CPENC(2,0,C0, C15,7) },
2900 { "mdrar_el1", CPENC(2,0,C1, C0, 0) }, /* r */
2901 { "oslar_el1", CPENC(2,0,C1, C0, 4) }, /* w */
2902 { "oslsr_el1", CPENC(2,0,C1, C1, 4) }, /* r */
2903 { "osdlr_el1", CPENC(2,0,C1, C3, 4) },
2904 { "dbgprcr_el1", CPENC(2,0,C1, C4, 4) },
2905 { "dbgclaimset_el1", CPENC(2,0,C7, C8, 6) },
2906 { "dbgclaimclr_el1", CPENC(2,0,C7, C9, 6) },
2907 { "dbgauthstatus_el1", CPENC(2,0,C7, C14,6) }, /* r */
2909 { "pmcr_el0", CPENC(3,3,C9,C12, 0) },
2910 { "pmcntenset_el0", CPENC(3,3,C9,C12, 1) },
2911 { "pmcntenclr_el0", CPENC(3,3,C9,C12, 2) },
2912 { "pmovsclr_el0", CPENC(3,3,C9,C12, 3) },
2913 { "pmswinc_el0", CPENC(3,3,C9,C12, 4) }, /* w */
2914 { "pmselr_el0", CPENC(3,3,C9,C12, 5) },
2915 { "pmceid0_el0", CPENC(3,3,C9,C12, 6) }, /* r */
2916 { "pmceid1_el0", CPENC(3,3,C9,C12, 7) }, /* r */
2917 { "pmccntr_el0", CPENC(3,3,C9,C13, 0) },
2918 { "pmxevtyper_el0", CPENC(3,3,C9,C13, 1) },
2919 { "pmxevcntr_el0", CPENC(3,3,C9,C13, 2) },
2920 { "pmuserenr_el0", CPENC(3,3,C9,C14, 0) },
2921 { "pmintenset_el1", CPENC(3,0,C9,C14, 1) },
2922 { "pmintenclr_el1", CPENC(3,0,C9,C14, 2) },
2923 { "pmovsset_el0", CPENC(3,3,C9,C14, 3) },
2924 { "pmevcntr0_el0", CPENC(3,3,C14,C8, 0) },
2925 { "pmevcntr1_el0", CPENC(3,3,C14,C8, 1) },
2926 { "pmevcntr2_el0", CPENC(3,3,C14,C8, 2) },
2927 { "pmevcntr3_el0", CPENC(3,3,C14,C8, 3) },
2928 { "pmevcntr4_el0", CPENC(3,3,C14,C8, 4) },
2929 { "pmevcntr5_el0", CPENC(3,3,C14,C8, 5) },
2930 { "pmevcntr6_el0", CPENC(3,3,C14,C8, 6) },
2931 { "pmevcntr7_el0", CPENC(3,3,C14,C8, 7) },
2932 { "pmevcntr8_el0", CPENC(3,3,C14,C9, 0) },
2933 { "pmevcntr9_el0", CPENC(3,3,C14,C9, 1) },
2934 { "pmevcntr10_el0", CPENC(3,3,C14,C9, 2) },
2935 { "pmevcntr11_el0", CPENC(3,3,C14,C9, 3) },
2936 { "pmevcntr12_el0", CPENC(3,3,C14,C9, 4) },
2937 { "pmevcntr13_el0", CPENC(3,3,C14,C9, 5) },
2938 { "pmevcntr14_el0", CPENC(3,3,C14,C9, 6) },
2939 { "pmevcntr15_el0", CPENC(3,3,C14,C9, 7) },
2940 { "pmevcntr16_el0", CPENC(3,3,C14,C10,0) },
2941 { "pmevcntr17_el0", CPENC(3,3,C14,C10,1) },
2942 { "pmevcntr18_el0", CPENC(3,3,C14,C10,2) },
2943 { "pmevcntr19_el0", CPENC(3,3,C14,C10,3) },
2944 { "pmevcntr20_el0", CPENC(3,3,C14,C10,4) },
2945 { "pmevcntr21_el0", CPENC(3,3,C14,C10,5) },
2946 { "pmevcntr22_el0", CPENC(3,3,C14,C10,6) },
2947 { "pmevcntr23_el0", CPENC(3,3,C14,C10,7) },
2948 { "pmevcntr24_el0", CPENC(3,3,C14,C11,0) },
2949 { "pmevcntr25_el0", CPENC(3,3,C14,C11,1) },
2950 { "pmevcntr26_el0", CPENC(3,3,C14,C11,2) },
2951 { "pmevcntr27_el0", CPENC(3,3,C14,C11,3) },
2952 { "pmevcntr28_el0", CPENC(3,3,C14,C11,4) },
2953 { "pmevcntr29_el0", CPENC(3,3,C14,C11,5) },
2954 { "pmevcntr30_el0", CPENC(3,3,C14,C11,6) },
2955 { "pmevtyper0_el0", CPENC(3,3,C14,C12,0) },
2956 { "pmevtyper1_el0", CPENC(3,3,C14,C12,1) },
2957 { "pmevtyper2_el0", CPENC(3,3,C14,C12,2) },
2958 { "pmevtyper3_el0", CPENC(3,3,C14,C12,3) },
2959 { "pmevtyper4_el0", CPENC(3,3,C14,C12,4) },
2960 { "pmevtyper5_el0", CPENC(3,3,C14,C12,5) },
2961 { "pmevtyper6_el0", CPENC(3,3,C14,C12,6) },
2962 { "pmevtyper7_el0", CPENC(3,3,C14,C12,7) },
2963 { "pmevtyper8_el0", CPENC(3,3,C14,C13,0) },
2964 { "pmevtyper9_el0", CPENC(3,3,C14,C13,1) },
2965 { "pmevtyper10_el0", CPENC(3,3,C14,C13,2) },
2966 { "pmevtyper11_el0", CPENC(3,3,C14,C13,3) },
2967 { "pmevtyper12_el0", CPENC(3,3,C14,C13,4) },
2968 { "pmevtyper13_el0", CPENC(3,3,C14,C13,5) },
2969 { "pmevtyper14_el0", CPENC(3,3,C14,C13,6) },
2970 { "pmevtyper15_el0", CPENC(3,3,C14,C13,7) },
2971 { "pmevtyper16_el0", CPENC(3,3,C14,C14,0) },
2972 { "pmevtyper17_el0", CPENC(3,3,C14,C14,1) },
2973 { "pmevtyper18_el0", CPENC(3,3,C14,C14,2) },
2974 { "pmevtyper19_el0", CPENC(3,3,C14,C14,3) },
2975 { "pmevtyper20_el0", CPENC(3,3,C14,C14,4) },
2976 { "pmevtyper21_el0", CPENC(3,3,C14,C14,5) },
2977 { "pmevtyper22_el0", CPENC(3,3,C14,C14,6) },
2978 { "pmevtyper23_el0", CPENC(3,3,C14,C14,7) },
2979 { "pmevtyper24_el0", CPENC(3,3,C14,C15,0) },
2980 { "pmevtyper25_el0", CPENC(3,3,C14,C15,1) },
2981 { "pmevtyper26_el0", CPENC(3,3,C14,C15,2) },
2982 { "pmevtyper27_el0", CPENC(3,3,C14,C15,3) },
2983 { "pmevtyper28_el0", CPENC(3,3,C14,C15,4) },
2984 { "pmevtyper29_el0", CPENC(3,3,C14,C15,5) },
2985 { "pmevtyper30_el0", CPENC(3,3,C14,C15,6) },
2986 { "pmccfiltr_el0", CPENC(3,3,C14,C15,7) },
2987 { 0, CPENC(0,0,0,0,0) },
2990 const struct aarch64_name_value_pair aarch64_pstatefields [] =
2993 { "daifset", 0x1e },
2994 { "daifclr", 0x1f },
2995 { 0, CPENC(0,0,0,0,0) },
2998 const aarch64_sys_ins_reg aarch64_sys_regs_ic[] =
3000 { "ialluis", CPENS(0,C7,C1,0), 0 },
3001 { "iallu", CPENS(0,C7,C5,0), 0 },
3002 { "ivau", CPENS(3,C7,C5,1), 1 },
3003 { 0, CPENS(0,0,0,0), 0 }
3006 const aarch64_sys_ins_reg aarch64_sys_regs_dc[] =
3008 { "zva", CPENS(3,C7,C4,1), 1 },
3009 { "ivac", CPENS(0,C7,C6,1), 1 },
3010 { "isw", CPENS(0,C7,C6,2), 1 },
3011 { "cvac", CPENS(3,C7,C10,1), 1 },
3012 { "csw", CPENS(0,C7,C10,2), 1 },
3013 { "cvau", CPENS(3,C7,C11,1), 1 },
3014 { "civac", CPENS(3,C7,C14,1), 1 },
3015 { "cisw", CPENS(0,C7,C14,2), 1 },
3016 { 0, CPENS(0,0,0,0), 0 }
3019 const aarch64_sys_ins_reg aarch64_sys_regs_at[] =
3021 { "s1e1r", CPENS(0,C7,C8,0), 1 },
3022 { "s1e1w", CPENS(0,C7,C8,1), 1 },
3023 { "s1e0r", CPENS(0,C7,C8,2), 1 },
3024 { "s1e0w", CPENS(0,C7,C8,3), 1 },
3025 { "s12e1r", CPENS(4,C7,C8,4), 1 },
3026 { "s12e1w", CPENS(4,C7,C8,5), 1 },
3027 { "s12e0r", CPENS(4,C7,C8,6), 1 },
3028 { "s12e0w", CPENS(4,C7,C8,7), 1 },
3029 { "s1e2r", CPENS(4,C7,C8,0), 1 },
3030 { "s1e2w", CPENS(4,C7,C8,1), 1 },
3031 { "s1e3r", CPENS(6,C7,C8,0), 1 },
3032 { "s1e3w", CPENS(6,C7,C8,1), 1 },
3033 { 0, CPENS(0,0,0,0), 0 }
3036 const aarch64_sys_ins_reg aarch64_sys_regs_tlbi[] =
3038 { "vmalle1", CPENS(0,C8,C7,0), 0 },
3039 { "vae1", CPENS(0,C8,C7,1), 1 },
3040 { "aside1", CPENS(0,C8,C7,2), 1 },
3041 { "vaae1", CPENS(0,C8,C7,3), 1 },
3042 { "vmalle1is", CPENS(0,C8,C3,0), 0 },
3043 { "vae1is", CPENS(0,C8,C3,1), 1 },
3044 { "aside1is", CPENS(0,C8,C3,2), 1 },
3045 { "vaae1is", CPENS(0,C8,C3,3), 1 },
3046 { "ipas2e1is", CPENS(4,C8,C0,1), 1 },
3047 { "ipas2le1is",CPENS(4,C8,C0,5), 1 },
3048 { "ipas2e1", CPENS(4,C8,C4,1), 1 },
3049 { "ipas2le1", CPENS(4,C8,C4,5), 1 },
3050 { "vae2", CPENS(4,C8,C7,1), 1 },
3051 { "vae2is", CPENS(4,C8,C3,1), 1 },
3052 { "vmalls12e1",CPENS(4,C8,C7,6), 0 },
3053 { "vmalls12e1is",CPENS(4,C8,C3,6), 0 },
3054 { "vae3", CPENS(6,C8,C7,1), 1 },
3055 { "vae3is", CPENS(6,C8,C3,1), 1 },
3056 { "alle2", CPENS(4,C8,C7,0), 0 },
3057 { "alle2is", CPENS(4,C8,C3,0), 0 },
3058 { "alle1", CPENS(4,C8,C7,4), 0 },
3059 { "alle1is", CPENS(4,C8,C3,4), 0 },
3060 { "alle3", CPENS(6,C8,C7,0), 0 },
3061 { "alle3is", CPENS(6,C8,C3,0), 0 },
3062 { "vale1is", CPENS(0,C8,C3,5), 1 },
3063 { "vale2is", CPENS(4,C8,C3,5), 1 },
3064 { "vale3is", CPENS(6,C8,C3,5), 1 },
3065 { "vaale1is", CPENS(0,C8,C3,7), 1 },
3066 { "vale1", CPENS(0,C8,C7,5), 1 },
3067 { "vale2", CPENS(4,C8,C7,5), 1 },
3068 { "vale3", CPENS(6,C8,C7,5), 1 },
3069 { "vaale1", CPENS(0,C8,C7,7), 1 },
3070 { 0, CPENS(0,0,0,0), 0 }
3090 /* Include the opcode description table as well as the operand description
3092 #include "aarch64-tbl.h"