1 /* aarch64-opc.c -- AArch64 opcode support.
2 Copyright 2009, 2010, 2011, 2012, 2013 Free Software Foundation, Inc.
3 Contributed by ARM Ltd.
5 This file is part of the GNU opcodes library.
7 This library is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
12 It is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program; see the file COPYING3. If not,
19 see <http://www.gnu.org/licenses/>. */
31 #include "aarch64-opc.h"
34 int debug_dump = FALSE;
35 #endif /* DEBUG_AARCH64 */
37 /* Helper functions to determine which operand to be used to encode/decode
38 the size:Q fields for AdvSIMD instructions. */
40 static inline bfd_boolean
41 vector_qualifier_p (enum aarch64_opnd_qualifier qualifier)
43 return ((qualifier >= AARCH64_OPND_QLF_V_8B
44 && qualifier <= AARCH64_OPND_QLF_V_1Q) ? TRUE
48 static inline bfd_boolean
49 fp_qualifier_p (enum aarch64_opnd_qualifier qualifier)
51 return ((qualifier >= AARCH64_OPND_QLF_S_B
52 && qualifier <= AARCH64_OPND_QLF_S_Q) ? TRUE
62 DP_VECTOR_ACROSS_LANES,
65 static const char significant_operand_index [] =
67 0, /* DP_UNKNOWN, by default using operand 0. */
68 0, /* DP_VECTOR_3SAME */
69 1, /* DP_VECTOR_LONG */
70 2, /* DP_VECTOR_WIDE */
71 1, /* DP_VECTOR_ACROSS_LANES */
74 /* Given a sequence of qualifiers in QUALIFIERS, determine and return
76 N.B. QUALIFIERS is a possible sequence of qualifiers each of which
77 corresponds to one of a sequence of operands. */
79 static enum data_pattern
80 get_data_pattern (const aarch64_opnd_qualifier_seq_t qualifiers)
82 if (vector_qualifier_p (qualifiers[0]) == TRUE)
84 /* e.g. v.4s, v.4s, v.4s
85 or v.4h, v.4h, v.h[3]. */
86 if (qualifiers[0] == qualifiers[1]
87 && vector_qualifier_p (qualifiers[2]) == TRUE
88 && (aarch64_get_qualifier_esize (qualifiers[0])
89 == aarch64_get_qualifier_esize (qualifiers[1]))
90 && (aarch64_get_qualifier_esize (qualifiers[0])
91 == aarch64_get_qualifier_esize (qualifiers[2])))
92 return DP_VECTOR_3SAME;
93 /* e.g. v.8h, v.8b, v.8b.
94 or v.4s, v.4h, v.h[2].
96 if (vector_qualifier_p (qualifiers[1]) == TRUE
97 && aarch64_get_qualifier_esize (qualifiers[0]) != 0
98 && (aarch64_get_qualifier_esize (qualifiers[0])
99 == aarch64_get_qualifier_esize (qualifiers[1]) << 1))
100 return DP_VECTOR_LONG;
101 /* e.g. v.8h, v.8h, v.8b. */
102 if (qualifiers[0] == qualifiers[1]
103 && vector_qualifier_p (qualifiers[2]) == TRUE
104 && aarch64_get_qualifier_esize (qualifiers[0]) != 0
105 && (aarch64_get_qualifier_esize (qualifiers[0])
106 == aarch64_get_qualifier_esize (qualifiers[2]) << 1)
107 && (aarch64_get_qualifier_esize (qualifiers[0])
108 == aarch64_get_qualifier_esize (qualifiers[1])))
109 return DP_VECTOR_WIDE;
111 else if (fp_qualifier_p (qualifiers[0]) == TRUE)
113 /* e.g. SADDLV <V><d>, <Vn>.<T>. */
114 if (vector_qualifier_p (qualifiers[1]) == TRUE
115 && qualifiers[2] == AARCH64_OPND_QLF_NIL)
116 return DP_VECTOR_ACROSS_LANES;
122 /* Select the operand to do the encoding/decoding of the 'size:Q' fields in
123 the AdvSIMD instructions. */
124 /* N.B. it is possible to do some optimization that doesn't call
125 get_data_pattern each time when we need to select an operand. We can
126 either buffer the caculated the result or statically generate the data,
127 however, it is not obvious that the optimization will bring significant
131 aarch64_select_operand_for_sizeq_field_coding (const aarch64_opcode *opcode)
134 significant_operand_index [get_data_pattern (opcode->qualifiers_list[0])];
137 const aarch64_field fields[] =
140 { 0, 4 }, /* cond2: condition in truly conditional-executed inst. */
141 { 0, 4 }, /* nzcv: flag bit specifier, encoded in the "nzcv" field. */
142 { 5, 5 }, /* defgh: d:e:f:g:h bits in AdvSIMD modified immediate. */
143 { 16, 3 }, /* abc: a:b:c bits in AdvSIMD modified immediate. */
144 { 5, 19 }, /* imm19: e.g. in CBZ. */
145 { 5, 19 }, /* immhi: e.g. in ADRP. */
146 { 29, 2 }, /* immlo: e.g. in ADRP. */
147 { 22, 2 }, /* size: in most AdvSIMD and floating-point instructions. */
148 { 10, 2 }, /* vldst_size: size field in the AdvSIMD load/store inst. */
149 { 29, 1 }, /* op: in AdvSIMD modified immediate instructions. */
150 { 30, 1 }, /* Q: in most AdvSIMD instructions. */
151 { 0, 5 }, /* Rt: in load/store instructions. */
152 { 0, 5 }, /* Rd: in many integer instructions. */
153 { 5, 5 }, /* Rn: in many integer instructions. */
154 { 10, 5 }, /* Rt2: in load/store pair instructions. */
155 { 10, 5 }, /* Ra: in fp instructions. */
156 { 5, 3 }, /* op2: in the system instructions. */
157 { 8, 4 }, /* CRm: in the system instructions. */
158 { 12, 4 }, /* CRn: in the system instructions. */
159 { 16, 3 }, /* op1: in the system instructions. */
160 { 19, 2 }, /* op0: in the system instructions. */
161 { 10, 3 }, /* imm3: in add/sub extended reg instructions. */
162 { 12, 4 }, /* cond: condition flags as a source operand. */
163 { 12, 4 }, /* opcode: in advsimd load/store instructions. */
164 { 12, 4 }, /* cmode: in advsimd modified immediate instructions. */
165 { 13, 3 }, /* asisdlso_opcode: opcode in advsimd ld/st single element. */
166 { 13, 2 }, /* len: in advsimd tbl/tbx instructions. */
167 { 16, 5 }, /* Rm: in ld/st reg offset and some integer inst. */
168 { 16, 5 }, /* Rs: in load/store exclusive instructions. */
169 { 13, 3 }, /* option: in ld/st reg offset + add/sub extended reg inst. */
170 { 12, 1 }, /* S: in load/store reg offset instructions. */
171 { 21, 2 }, /* hw: in move wide constant instructions. */
172 { 22, 2 }, /* opc: in load/store reg offset instructions. */
173 { 23, 1 }, /* opc1: in load/store reg offset instructions. */
174 { 22, 2 }, /* shift: in add/sub reg/imm shifted instructions. */
175 { 22, 2 }, /* type: floating point type field in fp data inst. */
176 { 30, 2 }, /* ldst_size: size field in ld/st reg offset inst. */
177 { 10, 6 }, /* imm6: in add/sub reg shifted instructions. */
178 { 11, 4 }, /* imm4: in advsimd ext and advsimd ins instructions. */
179 { 16, 5 }, /* imm5: in conditional compare (immediate) instructions. */
180 { 15, 7 }, /* imm7: in load/store pair pre/post index instructions. */
181 { 13, 8 }, /* imm8: in floating-point scalar move immediate inst. */
182 { 12, 9 }, /* imm9: in load/store pre/post index instructions. */
183 { 10, 12 }, /* imm12: in ld/st unsigned imm or add/sub shifted inst. */
184 { 5, 14 }, /* imm14: in test bit and branch instructions. */
185 { 5, 16 }, /* imm16: in exception instructions. */
186 { 0, 26 }, /* imm26: in unconditional branch instructions. */
187 { 10, 6 }, /* imms: in bitfield and logical immediate instructions. */
188 { 16, 6 }, /* immr: in bitfield and logical immediate instructions. */
189 { 16, 3 }, /* immb: in advsimd shift by immediate instructions. */
190 { 19, 4 }, /* immh: in advsimd shift by immediate instructions. */
191 { 22, 1 }, /* N: in logical (immediate) instructions. */
192 { 11, 1 }, /* index: in ld/st inst deciding the pre/post-index. */
193 { 24, 1 }, /* index2: in ld/st pair inst deciding the pre/post-index. */
194 { 31, 1 }, /* sf: in integer data processing instructions. */
195 { 11, 1 }, /* H: in advsimd scalar x indexed element instructions. */
196 { 21, 1 }, /* L: in advsimd scalar x indexed element instructions. */
197 { 20, 1 }, /* M: in advsimd scalar x indexed element instructions. */
198 { 31, 1 }, /* b5: in the test bit and branch instructions. */
199 { 19, 5 }, /* b40: in the test bit and branch instructions. */
200 { 10, 6 }, /* scale: in the fixed-point scalar to fp converting inst. */
203 enum aarch64_operand_class
204 aarch64_get_operand_class (enum aarch64_opnd type)
206 return aarch64_operands[type].op_class;
210 aarch64_get_operand_name (enum aarch64_opnd type)
212 return aarch64_operands[type].name;
215 /* Get operand description string.
216 This is usually for the diagnosis purpose. */
218 aarch64_get_operand_desc (enum aarch64_opnd type)
220 return aarch64_operands[type].desc;
223 /* Table of all conditional affixes. */
224 const aarch64_cond aarch64_conds[16] =
229 {{"cc", "lo", "ul"}, 0x3},
245 get_cond_from_value (aarch64_insn value)
248 return &aarch64_conds[(unsigned int) value];
252 get_inverted_cond (const aarch64_cond *cond)
254 return &aarch64_conds[cond->value ^ 0x1];
257 /* Table describing the operand extension/shifting operators; indexed by
258 enum aarch64_modifier_kind.
260 The value column provides the most common values for encoding modifiers,
261 which enables table-driven encoding/decoding for the modifiers. */
262 const struct aarch64_name_value_pair aarch64_operand_modifiers [] =
281 enum aarch64_modifier_kind
282 aarch64_get_operand_modifier (const struct aarch64_name_value_pair *desc)
284 return desc - aarch64_operand_modifiers;
288 aarch64_get_operand_modifier_value (enum aarch64_modifier_kind kind)
290 return aarch64_operand_modifiers[kind].value;
293 enum aarch64_modifier_kind
294 aarch64_get_operand_modifier_from_value (aarch64_insn value,
295 bfd_boolean extend_p)
297 if (extend_p == TRUE)
298 return AARCH64_MOD_UXTB + value;
300 return AARCH64_MOD_LSL - value;
304 aarch64_extend_operator_p (enum aarch64_modifier_kind kind)
306 return (kind > AARCH64_MOD_LSL && kind <= AARCH64_MOD_SXTX)
310 static inline bfd_boolean
311 aarch64_shift_operator_p (enum aarch64_modifier_kind kind)
313 return (kind >= AARCH64_MOD_ROR && kind <= AARCH64_MOD_LSL)
317 const struct aarch64_name_value_pair aarch64_barrier_options[16] =
337 /* op -> op: load = 0 instruction = 1 store = 2
339 t -> temporal: temporal (retained) = 0 non-temporal (streaming) = 1 */
340 #define B(op,l,t) (((op) << 3) | (((l) - 1) << 1) | (t))
341 const struct aarch64_name_value_pair aarch64_prfops[32] =
343 { "pldl1keep", B(0, 1, 0) },
344 { "pldl1strm", B(0, 1, 1) },
345 { "pldl2keep", B(0, 2, 0) },
346 { "pldl2strm", B(0, 2, 1) },
347 { "pldl3keep", B(0, 3, 0) },
348 { "pldl3strm", B(0, 3, 1) },
351 { "plil1keep", B(1, 1, 0) },
352 { "plil1strm", B(1, 1, 1) },
353 { "plil2keep", B(1, 2, 0) },
354 { "plil2strm", B(1, 2, 1) },
355 { "plil3keep", B(1, 3, 0) },
356 { "plil3strm", B(1, 3, 1) },
359 { "pstl1keep", B(2, 1, 0) },
360 { "pstl1strm", B(2, 1, 1) },
361 { "pstl2keep", B(2, 2, 0) },
362 { "pstl2strm", B(2, 2, 1) },
363 { "pstl3keep", B(2, 3, 0) },
364 { "pstl3strm", B(2, 3, 1) },
378 /* Utilities on value constraint. */
381 value_in_range_p (int64_t value, int low, int high)
383 return (value >= low && value <= high) ? 1 : 0;
387 value_aligned_p (int64_t value, int align)
389 return ((value & (align - 1)) == 0) ? 1 : 0;
392 /* A signed value fits in a field. */
394 value_fit_signed_field_p (int64_t value, unsigned width)
397 if (width < sizeof (value) * 8)
399 int64_t lim = (int64_t)1 << (width - 1);
400 if (value >= -lim && value < lim)
406 /* An unsigned value fits in a field. */
408 value_fit_unsigned_field_p (int64_t value, unsigned width)
411 if (width < sizeof (value) * 8)
413 int64_t lim = (int64_t)1 << width;
414 if (value >= 0 && value < lim)
420 /* Return 1 if OPERAND is SP or WSP. */
422 aarch64_stack_pointer_p (const aarch64_opnd_info *operand)
424 return ((aarch64_get_operand_class (operand->type)
425 == AARCH64_OPND_CLASS_INT_REG)
426 && operand_maybe_stack_pointer (aarch64_operands + operand->type)
427 && operand->reg.regno == 31);
430 /* Return 1 if OPERAND is XZR or WZP. */
432 aarch64_zero_register_p (const aarch64_opnd_info *operand)
434 return ((aarch64_get_operand_class (operand->type)
435 == AARCH64_OPND_CLASS_INT_REG)
436 && !operand_maybe_stack_pointer (aarch64_operands + operand->type)
437 && operand->reg.regno == 31);
440 /* Return true if the operand *OPERAND that has the operand code
441 OPERAND->TYPE and been qualified by OPERAND->QUALIFIER can be also
442 qualified by the qualifier TARGET. */
445 operand_also_qualified_p (const struct aarch64_opnd_info *operand,
446 aarch64_opnd_qualifier_t target)
448 switch (operand->qualifier)
450 case AARCH64_OPND_QLF_W:
451 if (target == AARCH64_OPND_QLF_WSP && aarch64_stack_pointer_p (operand))
454 case AARCH64_OPND_QLF_X:
455 if (target == AARCH64_OPND_QLF_SP && aarch64_stack_pointer_p (operand))
458 case AARCH64_OPND_QLF_WSP:
459 if (target == AARCH64_OPND_QLF_W
460 && operand_maybe_stack_pointer (aarch64_operands + operand->type))
463 case AARCH64_OPND_QLF_SP:
464 if (target == AARCH64_OPND_QLF_X
465 && operand_maybe_stack_pointer (aarch64_operands + operand->type))
475 /* Given qualifier sequence list QSEQ_LIST and the known qualifier KNOWN_QLF
476 for operand KNOWN_IDX, return the expected qualifier for operand IDX.
478 Return NIL if more than one expected qualifiers are found. */
480 aarch64_opnd_qualifier_t
481 aarch64_get_expected_qualifier (const aarch64_opnd_qualifier_seq_t *qseq_list,
483 const aarch64_opnd_qualifier_t known_qlf,
490 When the known qualifier is NIL, we have to assume that there is only
491 one qualifier sequence in the *QSEQ_LIST and return the corresponding
492 qualifier directly. One scenario is that for instruction
493 PRFM <prfop>, [<Xn|SP>, #:lo12:<symbol>]
494 which has only one possible valid qualifier sequence
496 the caller may pass NIL in KNOWN_QLF to obtain S_D so that it can
497 determine the correct relocation type (i.e. LDST64_LO12) for PRFM.
499 Because the qualifier NIL has dual roles in the qualifier sequence:
500 it can mean no qualifier for the operand, or the qualifer sequence is
501 not in use (when all qualifiers in the sequence are NILs), we have to
502 handle this special case here. */
503 if (known_qlf == AARCH64_OPND_NIL)
505 assert (qseq_list[0][known_idx] == AARCH64_OPND_NIL);
506 return qseq_list[0][idx];
509 for (i = 0, saved_i = -1; i < AARCH64_MAX_QLF_SEQ_NUM; ++i)
511 if (qseq_list[i][known_idx] == known_qlf)
514 /* More than one sequences are found to have KNOWN_QLF at
516 return AARCH64_OPND_NIL;
521 return qseq_list[saved_i][idx];
524 enum operand_qualifier_kind
532 /* Operand qualifier description. */
533 struct operand_qualifier_data
535 /* The usage of the three data fields depends on the qualifier kind. */
542 enum operand_qualifier_kind kind;
545 /* Indexed by the operand qualifier enumerators. */
546 struct operand_qualifier_data aarch64_opnd_qualifiers[] =
548 {0, 0, 0, "NIL", OQK_NIL},
550 /* Operand variant qualifiers.
552 element size, number of elements and common value for encoding. */
554 {4, 1, 0x0, "w", OQK_OPD_VARIANT},
555 {8, 1, 0x1, "x", OQK_OPD_VARIANT},
556 {4, 1, 0x0, "wsp", OQK_OPD_VARIANT},
557 {8, 1, 0x1, "sp", OQK_OPD_VARIANT},
559 {1, 1, 0x0, "b", OQK_OPD_VARIANT},
560 {2, 1, 0x1, "h", OQK_OPD_VARIANT},
561 {4, 1, 0x2, "s", OQK_OPD_VARIANT},
562 {8, 1, 0x3, "d", OQK_OPD_VARIANT},
563 {16, 1, 0x4, "q", OQK_OPD_VARIANT},
565 {1, 8, 0x0, "8b", OQK_OPD_VARIANT},
566 {1, 16, 0x1, "16b", OQK_OPD_VARIANT},
567 {2, 4, 0x2, "4h", OQK_OPD_VARIANT},
568 {2, 8, 0x3, "8h", OQK_OPD_VARIANT},
569 {4, 2, 0x4, "2s", OQK_OPD_VARIANT},
570 {4, 4, 0x5, "4s", OQK_OPD_VARIANT},
571 {8, 1, 0x6, "1d", OQK_OPD_VARIANT},
572 {8, 2, 0x7, "2d", OQK_OPD_VARIANT},
573 {16, 1, 0x8, "1q", OQK_OPD_VARIANT},
575 /* Qualifiers constraining the value range.
577 Lower bound, higher bound, unused. */
579 {0, 7, 0, "imm_0_7" , OQK_VALUE_IN_RANGE},
580 {0, 15, 0, "imm_0_15", OQK_VALUE_IN_RANGE},
581 {0, 31, 0, "imm_0_31", OQK_VALUE_IN_RANGE},
582 {0, 63, 0, "imm_0_63", OQK_VALUE_IN_RANGE},
583 {1, 32, 0, "imm_1_32", OQK_VALUE_IN_RANGE},
584 {1, 64, 0, "imm_1_64", OQK_VALUE_IN_RANGE},
586 /* Qualifiers for miscellaneous purpose.
588 unused, unused and unused. */
593 {0, 0, 0, "retrieving", 0},
596 static inline bfd_boolean
597 operand_variant_qualifier_p (aarch64_opnd_qualifier_t qualifier)
599 return (aarch64_opnd_qualifiers[qualifier].kind == OQK_OPD_VARIANT)
603 static inline bfd_boolean
604 qualifier_value_in_range_constraint_p (aarch64_opnd_qualifier_t qualifier)
606 return (aarch64_opnd_qualifiers[qualifier].kind == OQK_VALUE_IN_RANGE)
611 aarch64_get_qualifier_name (aarch64_opnd_qualifier_t qualifier)
613 return aarch64_opnd_qualifiers[qualifier].desc;
616 /* Given an operand qualifier, return the expected data element size
617 of a qualified operand. */
619 aarch64_get_qualifier_esize (aarch64_opnd_qualifier_t qualifier)
621 assert (operand_variant_qualifier_p (qualifier) == TRUE);
622 return aarch64_opnd_qualifiers[qualifier].data0;
626 aarch64_get_qualifier_nelem (aarch64_opnd_qualifier_t qualifier)
628 assert (operand_variant_qualifier_p (qualifier) == TRUE);
629 return aarch64_opnd_qualifiers[qualifier].data1;
633 aarch64_get_qualifier_standard_value (aarch64_opnd_qualifier_t qualifier)
635 assert (operand_variant_qualifier_p (qualifier) == TRUE);
636 return aarch64_opnd_qualifiers[qualifier].data2;
640 get_lower_bound (aarch64_opnd_qualifier_t qualifier)
642 assert (qualifier_value_in_range_constraint_p (qualifier) == TRUE);
643 return aarch64_opnd_qualifiers[qualifier].data0;
647 get_upper_bound (aarch64_opnd_qualifier_t qualifier)
649 assert (qualifier_value_in_range_constraint_p (qualifier) == TRUE);
650 return aarch64_opnd_qualifiers[qualifier].data1;
655 aarch64_verbose (const char *str, ...)
666 dump_qualifier_sequence (const aarch64_opnd_qualifier_t *qualifier)
670 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i, ++qualifier)
671 printf ("%s,", aarch64_get_qualifier_name (*qualifier));
676 dump_match_qualifiers (const struct aarch64_opnd_info *opnd,
677 const aarch64_opnd_qualifier_t *qualifier)
680 aarch64_opnd_qualifier_t curr[AARCH64_MAX_OPND_NUM];
682 aarch64_verbose ("dump_match_qualifiers:");
683 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
684 curr[i] = opnd[i].qualifier;
685 dump_qualifier_sequence (curr);
686 aarch64_verbose ("against");
687 dump_qualifier_sequence (qualifier);
689 #endif /* DEBUG_AARCH64 */
691 /* TODO improve this, we can have an extra field at the runtime to
692 store the number of operands rather than calculating it every time. */
695 aarch64_num_of_operands (const aarch64_opcode *opcode)
698 const enum aarch64_opnd *opnds = opcode->operands;
699 while (opnds[i++] != AARCH64_OPND_NIL)
702 assert (i >= 0 && i <= AARCH64_MAX_OPND_NUM);
706 /* Find the best matched qualifier sequence in *QUALIFIERS_LIST for INST.
707 If succeeds, fill the found sequence in *RET, return 1; otherwise return 0.
709 N.B. on the entry, it is very likely that only some operands in *INST
710 have had their qualifiers been established.
712 If STOP_AT is not -1, the function will only try to match
713 the qualifier sequence for operands before and including the operand
714 of index STOP_AT; and on success *RET will only be filled with the first
715 (STOP_AT+1) qualifiers.
717 A couple examples of the matching algorithm:
725 Apart from serving the main encoding routine, this can also be called
726 during or after the operand decoding. */
729 aarch64_find_best_match (const aarch64_inst *inst,
730 const aarch64_opnd_qualifier_seq_t *qualifiers_list,
731 int stop_at, aarch64_opnd_qualifier_t *ret)
735 const aarch64_opnd_qualifier_t *qualifiers;
737 num_opnds = aarch64_num_of_operands (inst->opcode);
740 DEBUG_TRACE ("SUCCEED: no operand");
744 if (stop_at < 0 || stop_at >= num_opnds)
745 stop_at = num_opnds - 1;
747 /* For each pattern. */
748 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
751 qualifiers = *qualifiers_list;
753 /* Start as positive. */
756 DEBUG_TRACE ("%d", i);
759 dump_match_qualifiers (inst->operands, qualifiers);
762 /* Most opcodes has much fewer patterns in the list.
763 First NIL qualifier indicates the end in the list. */
764 if (empty_qualifier_sequence_p (qualifiers) == TRUE)
766 DEBUG_TRACE_IF (i == 0, "SUCCEED: empty qualifier list");
772 for (j = 0; j < num_opnds && j <= stop_at; ++j, ++qualifiers)
774 if (inst->operands[j].qualifier == AARCH64_OPND_QLF_NIL)
776 /* Either the operand does not have qualifier, or the qualifier
777 for the operand needs to be deduced from the qualifier
779 In the latter case, any constraint checking related with
780 the obtained qualifier should be done later in
781 operand_general_constraint_met_p. */
784 else if (*qualifiers != inst->operands[j].qualifier)
786 /* Unless the target qualifier can also qualify the operand
787 (which has already had a non-nil qualifier), non-equal
788 qualifiers are generally un-matched. */
789 if (operand_also_qualified_p (inst->operands + j, *qualifiers))
798 continue; /* Equal qualifiers are certainly matched. */
801 /* Qualifiers established. */
808 /* Fill the result in *RET. */
810 qualifiers = *qualifiers_list;
812 DEBUG_TRACE ("complete qualifiers using list %d", i);
815 dump_qualifier_sequence (qualifiers);
818 for (j = 0; j <= stop_at; ++j, ++qualifiers)
819 ret[j] = *qualifiers;
820 for (; j < AARCH64_MAX_OPND_NUM; ++j)
821 ret[j] = AARCH64_OPND_QLF_NIL;
823 DEBUG_TRACE ("SUCCESS");
827 DEBUG_TRACE ("FAIL");
831 /* Operand qualifier matching and resolving.
833 Return 1 if the operand qualifier(s) in *INST match one of the qualifier
834 sequences in INST->OPCODE->qualifiers_list; otherwise return 0.
836 if UPDATE_P == TRUE, update the qualifier(s) in *INST after the matching
840 match_operands_qualifier (aarch64_inst *inst, bfd_boolean update_p)
843 aarch64_opnd_qualifier_seq_t qualifiers;
845 if (!aarch64_find_best_match (inst, inst->opcode->qualifiers_list, -1,
848 DEBUG_TRACE ("matching FAIL");
852 /* Update the qualifiers. */
853 if (update_p == TRUE)
854 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
856 if (inst->opcode->operands[i] == AARCH64_OPND_NIL)
858 DEBUG_TRACE_IF (inst->operands[i].qualifier != qualifiers[i],
859 "update %s with %s for operand %d",
860 aarch64_get_qualifier_name (inst->operands[i].qualifier),
861 aarch64_get_qualifier_name (qualifiers[i]), i);
862 inst->operands[i].qualifier = qualifiers[i];
865 DEBUG_TRACE ("matching SUCCESS");
869 /* Return TRUE if VALUE is a wide constant that can be moved into a general
872 IS32 indicates whether value is a 32-bit immediate or not.
873 If SHIFT_AMOUNT is not NULL, on the return of TRUE, the logical left shift
874 amount will be returned in *SHIFT_AMOUNT. */
877 aarch64_wide_constant_p (int64_t value, int is32, unsigned int *shift_amount)
881 DEBUG_TRACE ("enter with 0x%" PRIx64 "(%" PRIi64 ")", value, value);
885 /* Allow all zeros or all ones in top 32-bits, so that
886 32-bit constant expressions like ~0x80000000 are
888 uint64_t ext = value;
889 if (ext >> 32 != 0 && ext >> 32 != (uint64_t) 0xffffffff)
890 /* Immediate out of range. */
892 value &= (int64_t) 0xffffffff;
895 /* first, try movz then movn */
897 if ((value & ((int64_t) 0xffff << 0)) == value)
899 else if ((value & ((int64_t) 0xffff << 16)) == value)
901 else if (!is32 && (value & ((int64_t) 0xffff << 32)) == value)
903 else if (!is32 && (value & ((int64_t) 0xffff << 48)) == value)
908 DEBUG_TRACE ("exit FALSE with 0x%" PRIx64 "(%" PRIi64 ")", value, value);
912 if (shift_amount != NULL)
913 *shift_amount = amount;
915 DEBUG_TRACE ("exit TRUE with amount %d", amount);
920 /* Build the accepted values for immediate logical SIMD instructions.
922 The standard encodings of the immediate value are:
923 N imms immr SIMD size R S
924 1 ssssss rrrrrr 64 UInt(rrrrrr) UInt(ssssss)
925 0 0sssss 0rrrrr 32 UInt(rrrrr) UInt(sssss)
926 0 10ssss 00rrrr 16 UInt(rrrr) UInt(ssss)
927 0 110sss 000rrr 8 UInt(rrr) UInt(sss)
928 0 1110ss 0000rr 4 UInt(rr) UInt(ss)
929 0 11110s 00000r 2 UInt(r) UInt(s)
930 where all-ones value of S is reserved.
932 Let's call E the SIMD size.
934 The immediate value is: S+1 bits '1' rotated to the right by R.
936 The total of valid encodings is 64*63 + 32*31 + ... + 2*1 = 5334
937 (remember S != E - 1). */
939 #define TOTAL_IMM_NB 5334
944 aarch64_insn encoding;
947 static simd_imm_encoding simd_immediates[TOTAL_IMM_NB];
950 simd_imm_encoding_cmp(const void *i1, const void *i2)
952 const simd_imm_encoding *imm1 = (const simd_imm_encoding *)i1;
953 const simd_imm_encoding *imm2 = (const simd_imm_encoding *)i2;
955 if (imm1->imm < imm2->imm)
957 if (imm1->imm > imm2->imm)
962 /* immediate bitfield standard encoding
963 imm13<12> imm13<5:0> imm13<11:6> SIMD size R S
964 1 ssssss rrrrrr 64 rrrrrr ssssss
965 0 0sssss 0rrrrr 32 rrrrr sssss
966 0 10ssss 00rrrr 16 rrrr ssss
967 0 110sss 000rrr 8 rrr sss
968 0 1110ss 0000rr 4 rr ss
969 0 11110s 00000r 2 r s */
971 encode_immediate_bitfield (int is64, uint32_t s, uint32_t r)
973 return (is64 << 12) | (r << 6) | s;
977 build_immediate_table (void)
979 uint32_t log_e, e, s, r, s_mask;
985 for (log_e = 1; log_e <= 6; log_e++)
987 /* Get element size. */
992 mask = 0xffffffffffffffffull;
998 mask = (1ull << e) - 1;
1000 1 ((1 << 4) - 1) << 2 = 111100
1001 2 ((1 << 3) - 1) << 3 = 111000
1002 3 ((1 << 2) - 1) << 4 = 110000
1003 4 ((1 << 1) - 1) << 5 = 100000
1004 5 ((1 << 0) - 1) << 6 = 000000 */
1005 s_mask = ((1u << (5 - log_e)) - 1) << (log_e + 1);
1007 for (s = 0; s < e - 1; s++)
1008 for (r = 0; r < e; r++)
1010 /* s+1 consecutive bits to 1 (s < 63) */
1011 imm = (1ull << (s + 1)) - 1;
1012 /* rotate right by r */
1014 imm = (imm >> r) | ((imm << (e - r)) & mask);
1015 /* replicate the constant depending on SIMD size */
1018 case 1: imm = (imm << 2) | imm;
1019 case 2: imm = (imm << 4) | imm;
1020 case 3: imm = (imm << 8) | imm;
1021 case 4: imm = (imm << 16) | imm;
1022 case 5: imm = (imm << 32) | imm;
1026 simd_immediates[nb_imms].imm = imm;
1027 simd_immediates[nb_imms].encoding =
1028 encode_immediate_bitfield(is64, s | s_mask, r);
1032 assert (nb_imms == TOTAL_IMM_NB);
1033 qsort(simd_immediates, nb_imms,
1034 sizeof(simd_immediates[0]), simd_imm_encoding_cmp);
1037 /* Return TRUE if VALUE is a valid logical immediate, i.e. bitmask, that can
1038 be accepted by logical (immediate) instructions
1039 e.g. ORR <Xd|SP>, <Xn>, #<imm>.
1041 IS32 indicates whether or not VALUE is a 32-bit immediate.
1042 If ENCODING is not NULL, on the return of TRUE, the standard encoding for
1043 VALUE will be returned in *ENCODING. */
1046 aarch64_logical_immediate_p (uint64_t value, int is32, aarch64_insn *encoding)
1048 simd_imm_encoding imm_enc;
1049 const simd_imm_encoding *imm_encoding;
1050 static bfd_boolean initialized = FALSE;
1052 DEBUG_TRACE ("enter with 0x%" PRIx64 "(%" PRIi64 "), is32: %d", value,
1055 if (initialized == FALSE)
1057 build_immediate_table ();
1063 /* Allow all zeros or all ones in top 32-bits, so that
1064 constant expressions like ~1 are permitted. */
1065 if (value >> 32 != 0 && value >> 32 != 0xffffffff)
1068 /* Replicate the 32 lower bits to the 32 upper bits. */
1069 value &= 0xffffffff;
1070 value |= value << 32;
1073 imm_enc.imm = value;
1074 imm_encoding = (const simd_imm_encoding *)
1075 bsearch(&imm_enc, simd_immediates, TOTAL_IMM_NB,
1076 sizeof(simd_immediates[0]), simd_imm_encoding_cmp);
1077 if (imm_encoding == NULL)
1079 DEBUG_TRACE ("exit with FALSE");
1082 if (encoding != NULL)
1083 *encoding = imm_encoding->encoding;
1084 DEBUG_TRACE ("exit with TRUE");
1088 /* If 64-bit immediate IMM is in the format of
1089 "aaaaaaaabbbbbbbbccccccccddddddddeeeeeeeeffffffffgggggggghhhhhhhh",
1090 where a, b, c, d, e, f, g and h are independently 0 or 1, return an integer
1091 of value "abcdefgh". Otherwise return -1. */
1093 aarch64_shrink_expanded_imm8 (uint64_t imm)
1099 for (i = 0; i < 8; i++)
1101 byte = (imm >> (8 * i)) & 0xff;
1104 else if (byte != 0x00)
1110 /* Utility inline functions for operand_general_constraint_met_p. */
1113 set_error (aarch64_operand_error *mismatch_detail,
1114 enum aarch64_operand_error_kind kind, int idx,
1117 if (mismatch_detail == NULL)
1119 mismatch_detail->kind = kind;
1120 mismatch_detail->index = idx;
1121 mismatch_detail->error = error;
1125 set_syntax_error (aarch64_operand_error *mismatch_detail, int idx,
1128 if (mismatch_detail == NULL)
1130 set_error (mismatch_detail, AARCH64_OPDE_SYNTAX_ERROR, idx, error);
1134 set_out_of_range_error (aarch64_operand_error *mismatch_detail,
1135 int idx, int lower_bound, int upper_bound,
1138 if (mismatch_detail == NULL)
1140 set_error (mismatch_detail, AARCH64_OPDE_OUT_OF_RANGE, idx, error);
1141 mismatch_detail->data[0] = lower_bound;
1142 mismatch_detail->data[1] = upper_bound;
1146 set_imm_out_of_range_error (aarch64_operand_error *mismatch_detail,
1147 int idx, int lower_bound, int upper_bound)
1149 if (mismatch_detail == NULL)
1151 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1152 _("immediate value"));
1156 set_offset_out_of_range_error (aarch64_operand_error *mismatch_detail,
1157 int idx, int lower_bound, int upper_bound)
1159 if (mismatch_detail == NULL)
1161 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1162 _("immediate offset"));
1166 set_regno_out_of_range_error (aarch64_operand_error *mismatch_detail,
1167 int idx, int lower_bound, int upper_bound)
1169 if (mismatch_detail == NULL)
1171 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1172 _("register number"));
1176 set_elem_idx_out_of_range_error (aarch64_operand_error *mismatch_detail,
1177 int idx, int lower_bound, int upper_bound)
1179 if (mismatch_detail == NULL)
1181 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1182 _("register element index"));
1186 set_sft_amount_out_of_range_error (aarch64_operand_error *mismatch_detail,
1187 int idx, int lower_bound, int upper_bound)
1189 if (mismatch_detail == NULL)
1191 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1196 set_unaligned_error (aarch64_operand_error *mismatch_detail, int idx,
1199 if (mismatch_detail == NULL)
1201 set_error (mismatch_detail, AARCH64_OPDE_UNALIGNED, idx, NULL);
1202 mismatch_detail->data[0] = alignment;
1206 set_reg_list_error (aarch64_operand_error *mismatch_detail, int idx,
1209 if (mismatch_detail == NULL)
1211 set_error (mismatch_detail, AARCH64_OPDE_REG_LIST, idx, NULL);
1212 mismatch_detail->data[0] = expected_num;
1216 set_other_error (aarch64_operand_error *mismatch_detail, int idx,
1219 if (mismatch_detail == NULL)
1221 set_error (mismatch_detail, AARCH64_OPDE_OTHER_ERROR, idx, error);
1224 /* General constraint checking based on operand code.
1226 Return 1 if OPNDS[IDX] meets the general constraint of operand code TYPE
1227 as the IDXth operand of opcode OPCODE. Otherwise return 0.
1229 This function has to be called after the qualifiers for all operands
1232 Mismatching error message is returned in *MISMATCH_DETAIL upon request,
1233 i.e. when MISMATCH_DETAIL is non-NULL. This avoids the generation
1234 of error message during the disassembling where error message is not
1235 wanted. We avoid the dynamic construction of strings of error messages
1236 here (i.e. in libopcodes), as it is costly and complicated; instead, we
1237 use a combination of error code, static string and some integer data to
1238 represent an error. */
1241 operand_general_constraint_met_p (const aarch64_opnd_info *opnds, int idx,
1242 enum aarch64_opnd type,
1243 const aarch64_opcode *opcode,
1244 aarch64_operand_error *mismatch_detail)
1249 const aarch64_opnd_info *opnd = opnds + idx;
1250 aarch64_opnd_qualifier_t qualifier = opnd->qualifier;
1252 assert (opcode->operands[idx] == opnd->type && opnd->type == type);
1254 switch (aarch64_operands[type].op_class)
1256 case AARCH64_OPND_CLASS_INT_REG:
1257 /* <Xt> may be optional in some IC and TLBI instructions. */
1258 if (type == AARCH64_OPND_Rt_SYS)
1260 assert (idx == 1 && (aarch64_get_operand_class (opnds[0].type)
1261 == AARCH64_OPND_CLASS_SYSTEM));
1262 if (opnds[1].present && !opnds[0].sysins_op->has_xt)
1264 set_other_error (mismatch_detail, idx, _("extraneous register"));
1267 if (!opnds[1].present && opnds[0].sysins_op->has_xt)
1269 set_other_error (mismatch_detail, idx, _("missing register"));
1275 case AARCH64_OPND_QLF_WSP:
1276 case AARCH64_OPND_QLF_SP:
1277 if (!aarch64_stack_pointer_p (opnd))
1279 set_other_error (mismatch_detail, idx,
1280 _("stack pointer register expected"));
1289 case AARCH64_OPND_CLASS_COND:
1290 if (type == AARCH64_OPND_COND1
1291 && (opnds[idx].cond->value & 0xe) == 0xe)
1293 /* Not allow AL or NV. */
1294 set_syntax_error (mismatch_detail, idx, NULL);
1298 case AARCH64_OPND_CLASS_ADDRESS:
1299 /* Check writeback. */
1300 switch (opcode->iclass)
1304 case ldstnapair_offs:
1307 if (opnd->addr.writeback == 1)
1309 set_syntax_error (mismatch_detail, idx,
1310 _("unexpected address writeback"));
1315 case ldstpair_indexed:
1318 if (opnd->addr.writeback == 0)
1320 set_syntax_error (mismatch_detail, idx,
1321 _("address writeback expected"));
1326 assert (opnd->addr.writeback == 0);
1331 case AARCH64_OPND_ADDR_SIMM7:
1332 /* Scaled signed 7 bits immediate offset. */
1333 /* Get the size of the data element that is accessed, which may be
1334 different from that of the source register size,
1335 e.g. in strb/ldrb. */
1336 size = aarch64_get_qualifier_esize (opnd->qualifier);
1337 if (!value_in_range_p (opnd->addr.offset.imm, -64 * size, 63 * size))
1339 set_offset_out_of_range_error (mismatch_detail, idx,
1340 -64 * size, 63 * size);
1343 if (!value_aligned_p (opnd->addr.offset.imm, size))
1345 set_unaligned_error (mismatch_detail, idx, size);
1349 case AARCH64_OPND_ADDR_SIMM9:
1350 /* Unscaled signed 9 bits immediate offset. */
1351 if (!value_in_range_p (opnd->addr.offset.imm, -256, 255))
1353 set_offset_out_of_range_error (mismatch_detail, idx, -256, 255);
1358 case AARCH64_OPND_ADDR_SIMM9_2:
1359 /* Unscaled signed 9 bits immediate offset, which has to be negative
1361 size = aarch64_get_qualifier_esize (qualifier);
1362 if ((value_in_range_p (opnd->addr.offset.imm, 0, 255)
1363 && !value_aligned_p (opnd->addr.offset.imm, size))
1364 || value_in_range_p (opnd->addr.offset.imm, -256, -1))
1366 set_other_error (mismatch_detail, idx,
1367 _("negative or unaligned offset expected"));
1370 case AARCH64_OPND_SIMD_ADDR_POST:
1371 /* AdvSIMD load/store multiple structures, post-index. */
1373 if (opnd->addr.offset.is_reg)
1375 if (value_in_range_p (opnd->addr.offset.regno, 0, 30))
1379 set_other_error (mismatch_detail, idx,
1380 _("invalid register offset"));
1386 const aarch64_opnd_info *prev = &opnds[idx-1];
1387 unsigned num_bytes; /* total number of bytes transferred. */
1388 /* The opcode dependent area stores the number of elements in
1389 each structure to be loaded/stored. */
1390 int is_ld1r = get_opcode_dependent_value (opcode) == 1;
1391 if (opcode->operands[0] == AARCH64_OPND_LVt_AL)
1392 /* Special handling of loading single structure to all lane. */
1393 num_bytes = (is_ld1r ? 1 : prev->reglist.num_regs)
1394 * aarch64_get_qualifier_esize (prev->qualifier);
1396 num_bytes = prev->reglist.num_regs
1397 * aarch64_get_qualifier_esize (prev->qualifier)
1398 * aarch64_get_qualifier_nelem (prev->qualifier);
1399 if ((int) num_bytes != opnd->addr.offset.imm)
1401 set_other_error (mismatch_detail, idx,
1402 _("invalid post-increment amount"));
1408 case AARCH64_OPND_ADDR_REGOFF:
1409 /* Get the size of the data element that is accessed, which may be
1410 different from that of the source register size,
1411 e.g. in strb/ldrb. */
1412 size = aarch64_get_qualifier_esize (opnd->qualifier);
1413 /* It is either no shift or shift by the binary logarithm of SIZE. */
1414 if (opnd->shifter.amount != 0
1415 && opnd->shifter.amount != (int)get_logsz (size))
1417 set_other_error (mismatch_detail, idx,
1418 _("invalid shift amount"));
1421 /* Only UXTW, LSL, SXTW and SXTX are the accepted extending
1423 switch (opnd->shifter.kind)
1425 case AARCH64_MOD_UXTW:
1426 case AARCH64_MOD_LSL:
1427 case AARCH64_MOD_SXTW:
1428 case AARCH64_MOD_SXTX: break;
1430 set_other_error (mismatch_detail, idx,
1431 _("invalid extend/shift operator"));
1436 case AARCH64_OPND_ADDR_UIMM12:
1437 imm = opnd->addr.offset.imm;
1438 /* Get the size of the data element that is accessed, which may be
1439 different from that of the source register size,
1440 e.g. in strb/ldrb. */
1441 size = aarch64_get_qualifier_esize (qualifier);
1442 if (!value_in_range_p (opnd->addr.offset.imm, 0, 4095 * size))
1444 set_offset_out_of_range_error (mismatch_detail, idx,
1448 if (!value_aligned_p (opnd->addr.offset.imm, size))
1450 set_unaligned_error (mismatch_detail, idx, size);
1455 case AARCH64_OPND_ADDR_PCREL14:
1456 case AARCH64_OPND_ADDR_PCREL19:
1457 case AARCH64_OPND_ADDR_PCREL21:
1458 case AARCH64_OPND_ADDR_PCREL26:
1459 imm = opnd->imm.value;
1460 if (operand_need_shift_by_two (get_operand_from_code (type)))
1462 /* The offset value in a PC-relative branch instruction is alway
1463 4-byte aligned and is encoded without the lowest 2 bits. */
1464 if (!value_aligned_p (imm, 4))
1466 set_unaligned_error (mismatch_detail, idx, 4);
1469 /* Right shift by 2 so that we can carry out the following check
1473 size = get_operand_fields_width (get_operand_from_code (type));
1474 if (!value_fit_signed_field_p (imm, size))
1476 set_other_error (mismatch_detail, idx,
1477 _("immediate out of range"));
1487 case AARCH64_OPND_CLASS_SIMD_REGLIST:
1488 /* The opcode dependent area stores the number of elements in
1489 each structure to be loaded/stored. */
1490 num = get_opcode_dependent_value (opcode);
1493 case AARCH64_OPND_LVt:
1494 assert (num >= 1 && num <= 4);
1495 /* Unless LD1/ST1, the number of registers should be equal to that
1496 of the structure elements. */
1497 if (num != 1 && opnd->reglist.num_regs != num)
1499 set_reg_list_error (mismatch_detail, idx, num);
1503 case AARCH64_OPND_LVt_AL:
1504 case AARCH64_OPND_LEt:
1505 assert (num >= 1 && num <= 4);
1506 /* The number of registers should be equal to that of the structure
1508 if (opnd->reglist.num_regs != num)
1510 set_reg_list_error (mismatch_detail, idx, num);
1519 case AARCH64_OPND_CLASS_IMMEDIATE:
1520 /* Constraint check on immediate operand. */
1521 imm = opnd->imm.value;
1522 /* E.g. imm_0_31 constrains value to be 0..31. */
1523 if (qualifier_value_in_range_constraint_p (qualifier)
1524 && !value_in_range_p (imm, get_lower_bound (qualifier),
1525 get_upper_bound (qualifier)))
1527 set_imm_out_of_range_error (mismatch_detail, idx,
1528 get_lower_bound (qualifier),
1529 get_upper_bound (qualifier));
1535 case AARCH64_OPND_AIMM:
1536 if (opnd->shifter.kind != AARCH64_MOD_LSL)
1538 set_other_error (mismatch_detail, idx,
1539 _("invalid shift operator"));
1542 if (opnd->shifter.amount != 0 && opnd->shifter.amount != 12)
1544 set_other_error (mismatch_detail, idx,
1545 _("shift amount expected to be 0 or 12"));
1548 if (!value_fit_unsigned_field_p (opnd->imm.value, 12))
1550 set_other_error (mismatch_detail, idx,
1551 _("immediate out of range"));
1556 case AARCH64_OPND_HALF:
1557 assert (idx == 1 && opnds[0].type == AARCH64_OPND_Rd);
1558 if (opnd->shifter.kind != AARCH64_MOD_LSL)
1560 set_other_error (mismatch_detail, idx,
1561 _("invalid shift operator"));
1564 size = aarch64_get_qualifier_esize (opnds[0].qualifier);
1565 if (!value_aligned_p (opnd->shifter.amount, 16))
1567 set_other_error (mismatch_detail, idx,
1568 _("shift amount should be a multiple of 16"));
1571 if (!value_in_range_p (opnd->shifter.amount, 0, size * 8 - 16))
1573 set_sft_amount_out_of_range_error (mismatch_detail, idx,
1577 if (opnd->imm.value < 0)
1579 set_other_error (mismatch_detail, idx,
1580 _("negative immediate value not allowed"));
1583 if (!value_fit_unsigned_field_p (opnd->imm.value, 16))
1585 set_other_error (mismatch_detail, idx,
1586 _("immediate out of range"));
1591 case AARCH64_OPND_IMM_MOV:
1593 int is32 = aarch64_get_qualifier_esize (opnds[0].qualifier) == 4;
1594 imm = opnd->imm.value;
1598 case OP_MOV_IMM_WIDEN:
1600 /* Fall through... */
1601 case OP_MOV_IMM_WIDE:
1602 if (!aarch64_wide_constant_p (imm, is32, NULL))
1604 set_other_error (mismatch_detail, idx,
1605 _("immediate out of range"));
1609 case OP_MOV_IMM_LOG:
1610 if (!aarch64_logical_immediate_p (imm, is32, NULL))
1612 set_other_error (mismatch_detail, idx,
1613 _("immediate out of range"));
1624 case AARCH64_OPND_NZCV:
1625 case AARCH64_OPND_CCMP_IMM:
1626 case AARCH64_OPND_EXCEPTION:
1627 case AARCH64_OPND_UIMM4:
1628 case AARCH64_OPND_UIMM7:
1629 case AARCH64_OPND_UIMM3_OP1:
1630 case AARCH64_OPND_UIMM3_OP2:
1631 size = get_operand_fields_width (get_operand_from_code (type));
1633 if (!value_fit_unsigned_field_p (opnd->imm.value, size))
1635 set_imm_out_of_range_error (mismatch_detail, idx, 0,
1641 case AARCH64_OPND_WIDTH:
1642 assert (idx == 3 && opnds[idx-1].type == AARCH64_OPND_IMM
1643 && opnds[0].type == AARCH64_OPND_Rd);
1644 size = get_upper_bound (qualifier);
1645 if (opnd->imm.value + opnds[idx-1].imm.value > size)
1646 /* lsb+width <= reg.size */
1648 set_imm_out_of_range_error (mismatch_detail, idx, 1,
1649 size - opnds[idx-1].imm.value);
1654 case AARCH64_OPND_LIMM:
1656 int is32 = opnds[0].qualifier == AARCH64_OPND_QLF_W;
1657 uint64_t uimm = opnd->imm.value;
1658 if (opcode->op == OP_BIC)
1660 if (aarch64_logical_immediate_p (uimm, is32, NULL) == FALSE)
1662 set_other_error (mismatch_detail, idx,
1663 _("immediate out of range"));
1669 case AARCH64_OPND_IMM0:
1670 case AARCH64_OPND_FPIMM0:
1671 if (opnd->imm.value != 0)
1673 set_other_error (mismatch_detail, idx,
1674 _("immediate zero expected"));
1679 case AARCH64_OPND_SHLL_IMM:
1681 size = 8 * aarch64_get_qualifier_esize (opnds[idx - 1].qualifier);
1682 if (opnd->imm.value != size)
1684 set_other_error (mismatch_detail, idx,
1685 _("invalid shift amount"));
1690 case AARCH64_OPND_IMM_VLSL:
1691 size = aarch64_get_qualifier_esize (qualifier);
1692 if (!value_in_range_p (opnd->imm.value, 0, size * 8 - 1))
1694 set_imm_out_of_range_error (mismatch_detail, idx, 0,
1700 case AARCH64_OPND_IMM_VLSR:
1701 size = aarch64_get_qualifier_esize (qualifier);
1702 if (!value_in_range_p (opnd->imm.value, 1, size * 8))
1704 set_imm_out_of_range_error (mismatch_detail, idx, 1, size * 8);
1709 case AARCH64_OPND_SIMD_IMM:
1710 case AARCH64_OPND_SIMD_IMM_SFT:
1711 /* Qualifier check. */
1714 case AARCH64_OPND_QLF_LSL:
1715 if (opnd->shifter.kind != AARCH64_MOD_LSL)
1717 set_other_error (mismatch_detail, idx,
1718 _("invalid shift operator"));
1722 case AARCH64_OPND_QLF_MSL:
1723 if (opnd->shifter.kind != AARCH64_MOD_MSL)
1725 set_other_error (mismatch_detail, idx,
1726 _("invalid shift operator"));
1730 case AARCH64_OPND_QLF_NIL:
1731 if (opnd->shifter.kind != AARCH64_MOD_NONE)
1733 set_other_error (mismatch_detail, idx,
1734 _("shift is not permitted"));
1742 /* Is the immediate valid? */
1744 if (aarch64_get_qualifier_esize (opnds[0].qualifier) != 8)
1746 /* uimm8 or simm8 */
1747 if (!value_in_range_p (opnd->imm.value, -128, 255))
1749 set_imm_out_of_range_error (mismatch_detail, idx, -128, 255);
1753 else if (aarch64_shrink_expanded_imm8 (opnd->imm.value) < 0)
1756 'aaaaaaaabbbbbbbbccccccccddddddddeeeeeeee
1757 ffffffffgggggggghhhhhhhh'. */
1758 set_other_error (mismatch_detail, idx,
1759 _("invalid value for immediate"));
1762 /* Is the shift amount valid? */
1763 switch (opnd->shifter.kind)
1765 case AARCH64_MOD_LSL:
1766 size = aarch64_get_qualifier_esize (opnds[0].qualifier);
1767 if (!value_in_range_p (opnd->shifter.amount, 0, (size - 1) * 8))
1769 set_sft_amount_out_of_range_error (mismatch_detail, idx, 0,
1773 if (!value_aligned_p (opnd->shifter.amount, 8))
1775 set_unaligned_error (mismatch_detail, idx, 8);
1779 case AARCH64_MOD_MSL:
1780 /* Only 8 and 16 are valid shift amount. */
1781 if (opnd->shifter.amount != 8 && opnd->shifter.amount != 16)
1783 set_other_error (mismatch_detail, idx,
1784 _("shift amount expected to be 0 or 16"));
1789 if (opnd->shifter.kind != AARCH64_MOD_NONE)
1791 set_other_error (mismatch_detail, idx,
1792 _("invalid shift operator"));
1799 case AARCH64_OPND_FPIMM:
1800 case AARCH64_OPND_SIMD_FPIMM:
1801 if (opnd->imm.is_fp == 0)
1803 set_other_error (mismatch_detail, idx,
1804 _("floating-point immediate expected"));
1807 /* The value is expected to be an 8-bit floating-point constant with
1808 sign, 3-bit exponent and normalized 4 bits of precision, encoded
1809 in "a:b:c:d:e:f:g:h" or FLD_imm8 (depending on the type of the
1811 if (!value_in_range_p (opnd->imm.value, 0, 255))
1813 set_other_error (mismatch_detail, idx,
1814 _("immediate out of range"));
1817 if (opnd->shifter.kind != AARCH64_MOD_NONE)
1819 set_other_error (mismatch_detail, idx,
1820 _("invalid shift operator"));
1830 case AARCH64_OPND_CLASS_CP_REG:
1831 /* Cn or Cm: 4-bit opcode field named for historical reasons.
1832 valid range: C0 - C15. */
1833 if (opnd->reg.regno > 15)
1835 set_regno_out_of_range_error (mismatch_detail, idx, 0, 15);
1840 case AARCH64_OPND_CLASS_SYSTEM:
1843 case AARCH64_OPND_PSTATEFIELD:
1844 assert (idx == 0 && opnds[1].type == AARCH64_OPND_UIMM4);
1845 /* MSR SPSel, #uimm4
1846 Uses uimm4 as a control value to select the stack pointer: if
1847 bit 0 is set it selects the current exception level's stack
1848 pointer, if bit 0 is clear it selects shared EL0 stack pointer.
1849 Bits 1 to 3 of uimm4 are reserved and should be zero. */
1850 if (opnd->pstatefield == 0x05 /* spsel */ && opnds[1].imm.value > 1)
1852 set_imm_out_of_range_error (mismatch_detail, idx, 0, 1);
1861 case AARCH64_OPND_CLASS_SIMD_ELEMENT:
1862 /* Get the upper bound for the element index. */
1863 num = 16 / aarch64_get_qualifier_esize (qualifier) - 1;
1864 /* Index out-of-range. */
1865 if (!value_in_range_p (opnd->reglane.index, 0, num))
1867 set_elem_idx_out_of_range_error (mismatch_detail, idx, 0, num);
1870 /* SMLAL<Q> <Vd>.<Ta>, <Vn>.<Tb>, <Vm>.<Ts>[<index>].
1871 <Vm> Is the vector register (V0-V31) or (V0-V15), whose
1872 number is encoded in "size:M:Rm":
1878 if (type == AARCH64_OPND_Em && qualifier == AARCH64_OPND_QLF_S_H
1879 && !value_in_range_p (opnd->reglane.regno, 0, 15))
1881 set_regno_out_of_range_error (mismatch_detail, idx, 0, 15);
1886 case AARCH64_OPND_CLASS_MODIFIED_REG:
1887 assert (idx == 1 || idx == 2);
1890 case AARCH64_OPND_Rm_EXT:
1891 if (aarch64_extend_operator_p (opnd->shifter.kind) == FALSE
1892 && opnd->shifter.kind != AARCH64_MOD_LSL)
1894 set_other_error (mismatch_detail, idx,
1895 _("extend operator expected"));
1898 /* It is not optional unless at least one of "Rd" or "Rn" is '11111'
1899 (i.e. SP), in which case it defaults to LSL. The LSL alias is
1900 only valid when "Rd" or "Rn" is '11111', and is preferred in that
1902 if (!aarch64_stack_pointer_p (opnds + 0)
1903 && (idx != 2 || !aarch64_stack_pointer_p (opnds + 1)))
1905 if (!opnd->shifter.operator_present)
1907 set_other_error (mismatch_detail, idx,
1908 _("missing extend operator"));
1911 else if (opnd->shifter.kind == AARCH64_MOD_LSL)
1913 set_other_error (mismatch_detail, idx,
1914 _("'LSL' operator not allowed"));
1918 assert (opnd->shifter.operator_present /* Default to LSL. */
1919 || opnd->shifter.kind == AARCH64_MOD_LSL);
1920 if (!value_in_range_p (opnd->shifter.amount, 0, 4))
1922 set_sft_amount_out_of_range_error (mismatch_detail, idx, 0, 4);
1925 /* In the 64-bit form, the final register operand is written as Wm
1926 for all but the (possibly omitted) UXTX/LSL and SXTX
1928 N.B. GAS allows X register to be used with any operator as a
1929 programming convenience. */
1930 if (qualifier == AARCH64_OPND_QLF_X
1931 && opnd->shifter.kind != AARCH64_MOD_LSL
1932 && opnd->shifter.kind != AARCH64_MOD_UXTX
1933 && opnd->shifter.kind != AARCH64_MOD_SXTX)
1935 set_other_error (mismatch_detail, idx, _("W register expected"));
1940 case AARCH64_OPND_Rm_SFT:
1941 /* ROR is not available to the shifted register operand in
1942 arithmetic instructions. */
1943 if (aarch64_shift_operator_p (opnd->shifter.kind) == FALSE)
1945 set_other_error (mismatch_detail, idx,
1946 _("shift operator expected"));
1949 if (opnd->shifter.kind == AARCH64_MOD_ROR
1950 && opcode->iclass != log_shift)
1952 set_other_error (mismatch_detail, idx,
1953 _("'ROR' operator not allowed"));
1956 num = qualifier == AARCH64_OPND_QLF_W ? 31 : 63;
1957 if (!value_in_range_p (opnd->shifter.amount, 0, num))
1959 set_sft_amount_out_of_range_error (mismatch_detail, idx, 0, num);
1976 /* Main entrypoint for the operand constraint checking.
1978 Return 1 if operands of *INST meet the constraint applied by the operand
1979 codes and operand qualifiers; otherwise return 0 and if MISMATCH_DETAIL is
1980 not NULL, return the detail of the error in *MISMATCH_DETAIL. N.B. when
1981 adding more constraint checking, make sure MISMATCH_DETAIL->KIND is set
1982 with a proper error kind rather than AARCH64_OPDE_NIL (GAS asserts non-NIL
1983 error kind when it is notified that an instruction does not pass the check).
1985 Un-determined operand qualifiers may get established during the process. */
1988 aarch64_match_operands_constraint (aarch64_inst *inst,
1989 aarch64_operand_error *mismatch_detail)
1993 DEBUG_TRACE ("enter");
1995 /* Match operands' qualifier.
1996 *INST has already had qualifier establish for some, if not all, of
1997 its operands; we need to find out whether these established
1998 qualifiers match one of the qualifier sequence in
1999 INST->OPCODE->QUALIFIERS_LIST. If yes, we will assign each operand
2000 with the corresponding qualifier in such a sequence.
2001 Only basic operand constraint checking is done here; the more thorough
2002 constraint checking will carried out by operand_general_constraint_met_p,
2003 which has be to called after this in order to get all of the operands'
2004 qualifiers established. */
2005 if (match_operands_qualifier (inst, TRUE /* update_p */) == 0)
2007 DEBUG_TRACE ("FAIL on operand qualifier matching");
2008 if (mismatch_detail)
2010 /* Return an error type to indicate that it is the qualifier
2011 matching failure; we don't care about which operand as there
2012 are enough information in the opcode table to reproduce it. */
2013 mismatch_detail->kind = AARCH64_OPDE_INVALID_VARIANT;
2014 mismatch_detail->index = -1;
2015 mismatch_detail->error = NULL;
2020 /* Match operands' constraint. */
2021 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2023 enum aarch64_opnd type = inst->opcode->operands[i];
2024 if (type == AARCH64_OPND_NIL)
2026 if (inst->operands[i].skip)
2028 DEBUG_TRACE ("skip the incomplete operand %d", i);
2031 if (operand_general_constraint_met_p (inst->operands, i, type,
2032 inst->opcode, mismatch_detail) == 0)
2034 DEBUG_TRACE ("FAIL on operand %d", i);
2039 DEBUG_TRACE ("PASS");
2044 /* Replace INST->OPCODE with OPCODE and return the replaced OPCODE.
2045 Also updates the TYPE of each INST->OPERANDS with the corresponding
2046 value of OPCODE->OPERANDS.
2048 Note that some operand qualifiers may need to be manually cleared by
2049 the caller before it further calls the aarch64_opcode_encode; by
2050 doing this, it helps the qualifier matching facilities work
2053 const aarch64_opcode*
2054 aarch64_replace_opcode (aarch64_inst *inst, const aarch64_opcode *opcode)
2057 const aarch64_opcode *old = inst->opcode;
2059 inst->opcode = opcode;
2061 /* Update the operand types. */
2062 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2064 inst->operands[i].type = opcode->operands[i];
2065 if (opcode->operands[i] == AARCH64_OPND_NIL)
2069 DEBUG_TRACE ("replace %s with %s", old->name, opcode->name);
2075 aarch64_operand_index (const enum aarch64_opnd *operands, enum aarch64_opnd operand)
2078 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2079 if (operands[i] == operand)
2081 else if (operands[i] == AARCH64_OPND_NIL)
2086 /* [0][0] 32-bit integer regs with sp Wn
2087 [0][1] 64-bit integer regs with sp Xn sf=1
2088 [1][0] 32-bit integer regs with #0 Wn
2089 [1][1] 64-bit integer regs with #0 Xn sf=1 */
2090 static const char *int_reg[2][2][32] = {
2093 { { R32 "0", R32 "1", R32 "2", R32 "3", R32 "4", R32 "5", R32 "6", R32 "7",
2094 R32 "8", R32 "9", R32 "10", R32 "11", R32 "12", R32 "13", R32 "14", R32 "15",
2095 R32 "16", R32 "17", R32 "18", R32 "19", R32 "20", R32 "21", R32 "22", R32 "23",
2096 R32 "24", R32 "25", R32 "26", R32 "27", R32 "28", R32 "29", R32 "30", "wsp" },
2097 { R64 "0", R64 "1", R64 "2", R64 "3", R64 "4", R64 "5", R64 "6", R64 "7",
2098 R64 "8", R64 "9", R64 "10", R64 "11", R64 "12", R64 "13", R64 "14", R64 "15",
2099 R64 "16", R64 "17", R64 "18", R64 "19", R64 "20", R64 "21", R64 "22", R64 "23",
2100 R64 "24", R64 "25", R64 "26", R64 "27", R64 "28", R64 "29", R64 "30", "sp" } },
2101 { { R32 "0", R32 "1", R32 "2", R32 "3", R32 "4", R32 "5", R32 "6", R32 "7",
2102 R32 "8", R32 "9", R32 "10", R32 "11", R32 "12", R32 "13", R32 "14", R32 "15",
2103 R32 "16", R32 "17", R32 "18", R32 "19", R32 "20", R32 "21", R32 "22", R32 "23",
2104 R32 "24", R32 "25", R32 "26", R32 "27", R32 "28", R32 "29", R32 "30", R32 "zr" },
2105 { R64 "0", R64 "1", R64 "2", R64 "3", R64 "4", R64 "5", R64 "6", R64 "7",
2106 R64 "8", R64 "9", R64 "10", R64 "11", R64 "12", R64 "13", R64 "14", R64 "15",
2107 R64 "16", R64 "17", R64 "18", R64 "19", R64 "20", R64 "21", R64 "22", R64 "23",
2108 R64 "24", R64 "25", R64 "26", R64 "27", R64 "28", R64 "29", R64 "30", R64 "zr" } }
2113 /* Return the integer register name.
2114 if SP_REG_P is not 0, R31 is an SP reg, other R31 is the zero reg. */
2116 static inline const char *
2117 get_int_reg_name (int regno, aarch64_opnd_qualifier_t qualifier, int sp_reg_p)
2119 const int has_zr = sp_reg_p ? 0 : 1;
2120 const int is_64 = aarch64_get_qualifier_esize (qualifier) == 4 ? 0 : 1;
2121 return int_reg[has_zr][is_64][regno];
2124 /* Like get_int_reg_name, but IS_64 is always 1. */
2126 static inline const char *
2127 get_64bit_int_reg_name (int regno, int sp_reg_p)
2129 const int has_zr = sp_reg_p ? 0 : 1;
2130 return int_reg[has_zr][1][regno];
2133 /* Types for expanding an encoded 8-bit value to a floating-point value. */
2147 /* IMM8 is an 8-bit floating-point constant with sign, 3-bit exponent and
2148 normalized 4 bits of precision, encoded in "a:b:c:d:e:f:g:h" or FLD_imm8
2149 (depending on the type of the instruction). IMM8 will be expanded to a
2150 single-precision floating-point value (IS_DP == 0) or a double-precision
2151 floating-point value (IS_DP == 1). The expanded value is returned. */
2154 expand_fp_imm (int is_dp, uint32_t imm8)
2157 uint32_t imm8_7, imm8_6_0, imm8_6, imm8_6_repl4;
2159 imm8_7 = (imm8 >> 7) & 0x01; /* imm8<7> */
2160 imm8_6_0 = imm8 & 0x7f; /* imm8<6:0> */
2161 imm8_6 = imm8_6_0 >> 6; /* imm8<6> */
2162 imm8_6_repl4 = (imm8_6 << 3) | (imm8_6 << 2)
2163 | (imm8_6 << 1) | imm8_6; /* Replicate(imm8<6>,4) */
2166 imm = (imm8_7 << (63-32)) /* imm8<7> */
2167 | ((imm8_6 ^ 1) << (62-32)) /* NOT(imm8<6) */
2168 | (imm8_6_repl4 << (58-32)) | (imm8_6 << (57-32))
2169 | (imm8_6 << (56-32)) | (imm8_6 << (55-32)) /* Replicate(imm8<6>,7) */
2170 | (imm8_6_0 << (48-32)); /* imm8<6>:imm8<5:0> */
2175 imm = (imm8_7 << 31) /* imm8<7> */
2176 | ((imm8_6 ^ 1) << 30) /* NOT(imm8<6>) */
2177 | (imm8_6_repl4 << 26) /* Replicate(imm8<6>,4) */
2178 | (imm8_6_0 << 19); /* imm8<6>:imm8<5:0> */
2184 /* Produce the string representation of the register list operand *OPND
2185 in the buffer pointed by BUF of size SIZE. */
2187 print_register_list (char *buf, size_t size, const aarch64_opnd_info *opnd)
2189 const int num_regs = opnd->reglist.num_regs;
2190 const int first_reg = opnd->reglist.first_regno;
2191 const int last_reg = (first_reg + num_regs - 1) & 0x1f;
2192 const char *qlf_name = aarch64_get_qualifier_name (opnd->qualifier);
2193 char tb[8]; /* Temporary buffer. */
2195 assert (opnd->type != AARCH64_OPND_LEt || opnd->reglist.has_index);
2196 assert (num_regs >= 1 && num_regs <= 4);
2198 /* Prepare the index if any. */
2199 if (opnd->reglist.has_index)
2200 snprintf (tb, 8, "[%d]", opnd->reglist.index);
2204 /* The hyphenated form is preferred for disassembly if there are
2205 more than two registers in the list, and the register numbers
2206 are monotonically increasing in increments of one. */
2207 if (num_regs > 2 && last_reg > first_reg)
2208 snprintf (buf, size, "{v%d.%s-v%d.%s}%s", first_reg, qlf_name,
2209 last_reg, qlf_name, tb);
2212 const int reg0 = first_reg;
2213 const int reg1 = (first_reg + 1) & 0x1f;
2214 const int reg2 = (first_reg + 2) & 0x1f;
2215 const int reg3 = (first_reg + 3) & 0x1f;
2220 snprintf (buf, size, "{v%d.%s}%s", reg0, qlf_name, tb);
2223 snprintf (buf, size, "{v%d.%s, v%d.%s}%s", reg0, qlf_name,
2224 reg1, qlf_name, tb);
2227 snprintf (buf, size, "{v%d.%s, v%d.%s, v%d.%s}%s", reg0, qlf_name,
2228 reg1, qlf_name, reg2, qlf_name, tb);
2231 snprintf (buf, size, "{v%d.%s, v%d.%s, v%d.%s, v%d.%s}%s",
2232 reg0, qlf_name, reg1, qlf_name, reg2, qlf_name,
2233 reg3, qlf_name, tb);
2239 /* Produce the string representation of the register offset address operand
2240 *OPND in the buffer pointed by BUF of size SIZE. */
2242 print_register_offset_address (char *buf, size_t size,
2243 const aarch64_opnd_info *opnd)
2245 const size_t tblen = 16;
2246 char tb[tblen]; /* Temporary buffer. */
2247 bfd_boolean lsl_p = FALSE; /* Is LSL shift operator? */
2248 bfd_boolean wm_p = FALSE; /* Should Rm be Wm? */
2249 bfd_boolean print_extend_p = TRUE;
2250 bfd_boolean print_amount_p = TRUE;
2251 const char *shift_name = aarch64_operand_modifiers[opnd->shifter.kind].name;
2253 switch (opnd->shifter.kind)
2255 case AARCH64_MOD_UXTW: wm_p = TRUE; break;
2256 case AARCH64_MOD_LSL : lsl_p = TRUE; break;
2257 case AARCH64_MOD_SXTW: wm_p = TRUE; break;
2258 case AARCH64_MOD_SXTX: break;
2259 default: assert (0);
2262 if (!opnd->shifter.amount && (opnd->qualifier != AARCH64_OPND_QLF_S_B
2263 || !opnd->shifter.amount_present))
2265 /* Not print the shift/extend amount when the amount is zero and
2266 when it is not the special case of 8-bit load/store instruction. */
2267 print_amount_p = FALSE;
2268 /* Likewise, no need to print the shift operator LSL in such a
2271 print_extend_p = FALSE;
2274 /* Prepare for the extend/shift. */
2278 snprintf (tb, tblen, ",%s #%d", shift_name, opnd->shifter.amount);
2280 snprintf (tb, tblen, ",%s", shift_name);
2285 snprintf (buf, size, "[%s,%s%s]",
2286 get_64bit_int_reg_name (opnd->addr.base_regno, 1),
2287 get_int_reg_name (opnd->addr.offset.regno,
2288 wm_p ? AARCH64_OPND_QLF_W : AARCH64_OPND_QLF_X,
2293 /* Generate the string representation of the operand OPNDS[IDX] for OPCODE
2294 in *BUF. The caller should pass in the maximum size of *BUF in SIZE.
2295 PC, PCREL_P and ADDRESS are used to pass in and return information about
2296 the PC-relative address calculation, where the PC value is passed in
2297 PC. If the operand is pc-relative related, *PCREL_P (if PCREL_P non-NULL)
2298 will return 1 and *ADDRESS (if ADDRESS non-NULL) will return the
2299 calculated address; otherwise, *PCREL_P (if PCREL_P non-NULL) returns 0.
2301 The function serves both the disassembler and the assembler diagnostics
2302 issuer, which is the reason why it lives in this file. */
2305 aarch64_print_operand (char *buf, size_t size, bfd_vma pc,
2306 const aarch64_opcode *opcode,
2307 const aarch64_opnd_info *opnds, int idx, int *pcrel_p,
2311 const char *name = NULL;
2312 const aarch64_opnd_info *opnd = opnds + idx;
2313 enum aarch64_modifier_kind kind;
2322 case AARCH64_OPND_Rd:
2323 case AARCH64_OPND_Rn:
2324 case AARCH64_OPND_Rm:
2325 case AARCH64_OPND_Rt:
2326 case AARCH64_OPND_Rt2:
2327 case AARCH64_OPND_Rs:
2328 case AARCH64_OPND_Ra:
2329 case AARCH64_OPND_Rt_SYS:
2330 /* The optional-ness of <Xt> in e.g. IC <ic_op>{, <Xt>} is determined by
2331 the <ic_op>, therefore we we use opnd->present to override the
2332 generic optional-ness information. */
2333 if (opnd->type == AARCH64_OPND_Rt_SYS && !opnd->present)
2335 /* Omit the operand, e.g. RET. */
2336 if (optional_operand_p (opcode, idx)
2337 && opnd->reg.regno == get_optional_operand_default_value (opcode))
2339 assert (opnd->qualifier == AARCH64_OPND_QLF_W
2340 || opnd->qualifier == AARCH64_OPND_QLF_X);
2341 snprintf (buf, size, "%s",
2342 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0));
2345 case AARCH64_OPND_Rd_SP:
2346 case AARCH64_OPND_Rn_SP:
2347 assert (opnd->qualifier == AARCH64_OPND_QLF_W
2348 || opnd->qualifier == AARCH64_OPND_QLF_WSP
2349 || opnd->qualifier == AARCH64_OPND_QLF_X
2350 || opnd->qualifier == AARCH64_OPND_QLF_SP);
2351 snprintf (buf, size, "%s",
2352 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 1));
2355 case AARCH64_OPND_Rm_EXT:
2356 kind = opnd->shifter.kind;
2357 assert (idx == 1 || idx == 2);
2358 if ((aarch64_stack_pointer_p (opnds)
2359 || (idx == 2 && aarch64_stack_pointer_p (opnds + 1)))
2360 && ((opnd->qualifier == AARCH64_OPND_QLF_W
2361 && opnds[0].qualifier == AARCH64_OPND_QLF_W
2362 && kind == AARCH64_MOD_UXTW)
2363 || (opnd->qualifier == AARCH64_OPND_QLF_X
2364 && kind == AARCH64_MOD_UXTX)))
2366 /* 'LSL' is the preferred form in this case. */
2367 kind = AARCH64_MOD_LSL;
2368 if (opnd->shifter.amount == 0)
2370 /* Shifter omitted. */
2371 snprintf (buf, size, "%s",
2372 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0));
2376 if (opnd->shifter.amount)
2377 snprintf (buf, size, "%s, %s #%d",
2378 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0),
2379 aarch64_operand_modifiers[kind].name,
2380 opnd->shifter.amount);
2382 snprintf (buf, size, "%s, %s",
2383 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0),
2384 aarch64_operand_modifiers[kind].name);
2387 case AARCH64_OPND_Rm_SFT:
2388 assert (opnd->qualifier == AARCH64_OPND_QLF_W
2389 || opnd->qualifier == AARCH64_OPND_QLF_X);
2390 if (opnd->shifter.amount == 0 && opnd->shifter.kind == AARCH64_MOD_LSL)
2391 snprintf (buf, size, "%s",
2392 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0));
2394 snprintf (buf, size, "%s, %s #%d",
2395 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0),
2396 aarch64_operand_modifiers[opnd->shifter.kind].name,
2397 opnd->shifter.amount);
2400 case AARCH64_OPND_Fd:
2401 case AARCH64_OPND_Fn:
2402 case AARCH64_OPND_Fm:
2403 case AARCH64_OPND_Fa:
2404 case AARCH64_OPND_Ft:
2405 case AARCH64_OPND_Ft2:
2406 case AARCH64_OPND_Sd:
2407 case AARCH64_OPND_Sn:
2408 case AARCH64_OPND_Sm:
2409 snprintf (buf, size, "%s%d", aarch64_get_qualifier_name (opnd->qualifier),
2413 case AARCH64_OPND_Vd:
2414 case AARCH64_OPND_Vn:
2415 case AARCH64_OPND_Vm:
2416 snprintf (buf, size, "v%d.%s", opnd->reg.regno,
2417 aarch64_get_qualifier_name (opnd->qualifier));
2420 case AARCH64_OPND_Ed:
2421 case AARCH64_OPND_En:
2422 case AARCH64_OPND_Em:
2423 snprintf (buf, size, "v%d.%s[%d]", opnd->reglane.regno,
2424 aarch64_get_qualifier_name (opnd->qualifier),
2425 opnd->reglane.index);
2428 case AARCH64_OPND_VdD1:
2429 case AARCH64_OPND_VnD1:
2430 snprintf (buf, size, "v%d.d[1]", opnd->reg.regno);
2433 case AARCH64_OPND_LVn:
2434 case AARCH64_OPND_LVt:
2435 case AARCH64_OPND_LVt_AL:
2436 case AARCH64_OPND_LEt:
2437 print_register_list (buf, size, opnd);
2440 case AARCH64_OPND_Cn:
2441 case AARCH64_OPND_Cm:
2442 snprintf (buf, size, "C%d", opnd->reg.regno);
2445 case AARCH64_OPND_IDX:
2446 case AARCH64_OPND_IMM:
2447 case AARCH64_OPND_WIDTH:
2448 case AARCH64_OPND_UIMM3_OP1:
2449 case AARCH64_OPND_UIMM3_OP2:
2450 case AARCH64_OPND_BIT_NUM:
2451 case AARCH64_OPND_IMM_VLSL:
2452 case AARCH64_OPND_IMM_VLSR:
2453 case AARCH64_OPND_SHLL_IMM:
2454 case AARCH64_OPND_IMM0:
2455 case AARCH64_OPND_IMMR:
2456 case AARCH64_OPND_IMMS:
2457 case AARCH64_OPND_FBITS:
2458 snprintf (buf, size, "#%" PRIi64, opnd->imm.value);
2461 case AARCH64_OPND_IMM_MOV:
2462 switch (aarch64_get_qualifier_esize (opnds[0].qualifier))
2464 case 4: /* e.g. MOV Wd, #<imm32>. */
2466 int imm32 = opnd->imm.value;
2467 snprintf (buf, size, "#0x%-20x\t// #%d", imm32, imm32);
2470 case 8: /* e.g. MOV Xd, #<imm64>. */
2471 snprintf (buf, size, "#0x%-20" PRIx64 "\t// #%" PRIi64,
2472 opnd->imm.value, opnd->imm.value);
2474 default: assert (0);
2478 case AARCH64_OPND_FPIMM0:
2479 snprintf (buf, size, "#0.0");
2482 case AARCH64_OPND_LIMM:
2483 case AARCH64_OPND_AIMM:
2484 case AARCH64_OPND_HALF:
2485 if (opnd->shifter.amount)
2486 snprintf (buf, size, "#0x%" PRIx64 ", lsl #%d", opnd->imm.value,
2487 opnd->shifter.amount);
2489 snprintf (buf, size, "#0x%" PRIx64, opnd->imm.value);
2492 case AARCH64_OPND_SIMD_IMM:
2493 case AARCH64_OPND_SIMD_IMM_SFT:
2494 if ((! opnd->shifter.amount && opnd->shifter.kind == AARCH64_MOD_LSL)
2495 || opnd->shifter.kind == AARCH64_MOD_NONE)
2496 snprintf (buf, size, "#0x%" PRIx64, opnd->imm.value);
2498 snprintf (buf, size, "#0x%" PRIx64 ", %s #%d", opnd->imm.value,
2499 aarch64_operand_modifiers[opnd->shifter.kind].name,
2500 opnd->shifter.amount);
2503 case AARCH64_OPND_FPIMM:
2504 case AARCH64_OPND_SIMD_FPIMM:
2505 switch (aarch64_get_qualifier_esize (opnds[0].qualifier))
2507 case 4: /* e.g. FMOV <Vd>.4S, #<imm>. */
2510 c.i = expand_fp_imm (0, opnd->imm.value);
2511 snprintf (buf, size, "#%.18e", c.f);
2514 case 8: /* e.g. FMOV <Sd>, #<imm>. */
2517 c.i = expand_fp_imm (1, opnd->imm.value);
2518 snprintf (buf, size, "#%.18e", c.d);
2521 default: assert (0);
2525 case AARCH64_OPND_CCMP_IMM:
2526 case AARCH64_OPND_NZCV:
2527 case AARCH64_OPND_EXCEPTION:
2528 case AARCH64_OPND_UIMM4:
2529 case AARCH64_OPND_UIMM7:
2530 if (optional_operand_p (opcode, idx) == TRUE
2531 && (opnd->imm.value ==
2532 (int64_t) get_optional_operand_default_value (opcode)))
2533 /* Omit the operand, e.g. DCPS1. */
2535 snprintf (buf, size, "#0x%x", (unsigned int)opnd->imm.value);
2538 case AARCH64_OPND_COND:
2539 case AARCH64_OPND_COND1:
2540 snprintf (buf, size, "%s", opnd->cond->names[0]);
2543 case AARCH64_OPND_ADDR_ADRP:
2544 addr = ((pc + AARCH64_PCREL_OFFSET) & ~(uint64_t)0xfff)
2550 /* This is not necessary during the disassembling, as print_address_func
2551 in the disassemble_info will take care of the printing. But some
2552 other callers may be still interested in getting the string in *STR,
2553 so here we do snprintf regardless. */
2554 snprintf (buf, size, "#0x%" PRIx64, addr);
2557 case AARCH64_OPND_ADDR_PCREL14:
2558 case AARCH64_OPND_ADDR_PCREL19:
2559 case AARCH64_OPND_ADDR_PCREL21:
2560 case AARCH64_OPND_ADDR_PCREL26:
2561 addr = pc + AARCH64_PCREL_OFFSET + opnd->imm.value;
2566 /* This is not necessary during the disassembling, as print_address_func
2567 in the disassemble_info will take care of the printing. But some
2568 other callers may be still interested in getting the string in *STR,
2569 so here we do snprintf regardless. */
2570 snprintf (buf, size, "#0x%" PRIx64, addr);
2573 case AARCH64_OPND_ADDR_SIMPLE:
2574 case AARCH64_OPND_SIMD_ADDR_SIMPLE:
2575 case AARCH64_OPND_SIMD_ADDR_POST:
2576 name = get_64bit_int_reg_name (opnd->addr.base_regno, 1);
2577 if (opnd->type == AARCH64_OPND_SIMD_ADDR_POST)
2579 if (opnd->addr.offset.is_reg)
2580 snprintf (buf, size, "[%s], x%d", name, opnd->addr.offset.regno);
2582 snprintf (buf, size, "[%s], #%d", name, opnd->addr.offset.imm);
2585 snprintf (buf, size, "[%s]", name);
2588 case AARCH64_OPND_ADDR_REGOFF:
2589 print_register_offset_address (buf, size, opnd);
2592 case AARCH64_OPND_ADDR_SIMM7:
2593 case AARCH64_OPND_ADDR_SIMM9:
2594 case AARCH64_OPND_ADDR_SIMM9_2:
2595 name = get_64bit_int_reg_name (opnd->addr.base_regno, 1);
2596 if (opnd->addr.writeback)
2598 if (opnd->addr.preind)
2599 snprintf (buf, size, "[%s,#%d]!", name, opnd->addr.offset.imm);
2601 snprintf (buf, size, "[%s],#%d", name, opnd->addr.offset.imm);
2605 if (opnd->addr.offset.imm)
2606 snprintf (buf, size, "[%s,#%d]", name, opnd->addr.offset.imm);
2608 snprintf (buf, size, "[%s]", name);
2612 case AARCH64_OPND_ADDR_UIMM12:
2613 name = get_64bit_int_reg_name (opnd->addr.base_regno, 1);
2614 if (opnd->addr.offset.imm)
2615 snprintf (buf, size, "[%s,#%d]", name, opnd->addr.offset.imm);
2617 snprintf (buf, size, "[%s]", name);
2620 case AARCH64_OPND_SYSREG:
2621 for (i = 0; aarch64_sys_regs[i].name; ++i)
2622 if (aarch64_sys_regs[i].value == opnd->sysreg
2623 && ! aarch64_sys_reg_deprecated_p (&aarch64_sys_regs[i]))
2625 if (aarch64_sys_regs[i].name)
2626 snprintf (buf, size, "%s", aarch64_sys_regs[i].name);
2629 /* Implementation defined system register. */
2630 unsigned int value = opnd->sysreg;
2631 snprintf (buf, size, "s%u_%u_c%u_c%u_%u", (value >> 14) & 0x3,
2632 (value >> 11) & 0x7, (value >> 7) & 0xf, (value >> 3) & 0xf,
2637 case AARCH64_OPND_PSTATEFIELD:
2638 for (i = 0; aarch64_pstatefields[i].name; ++i)
2639 if (aarch64_pstatefields[i].value == opnd->pstatefield)
2641 assert (aarch64_pstatefields[i].name);
2642 snprintf (buf, size, "%s", aarch64_pstatefields[i].name);
2645 case AARCH64_OPND_SYSREG_AT:
2646 case AARCH64_OPND_SYSREG_DC:
2647 case AARCH64_OPND_SYSREG_IC:
2648 case AARCH64_OPND_SYSREG_TLBI:
2649 snprintf (buf, size, "%s", opnd->sysins_op->template);
2652 case AARCH64_OPND_BARRIER:
2653 snprintf (buf, size, "%s", opnd->barrier->name);
2656 case AARCH64_OPND_BARRIER_ISB:
2657 /* Operand can be omitted, e.g. in DCPS1. */
2658 if (! optional_operand_p (opcode, idx)
2659 || (opnd->barrier->value
2660 != get_optional_operand_default_value (opcode)))
2661 snprintf (buf, size, "#0x%x", opnd->barrier->value);
2664 case AARCH64_OPND_PRFOP:
2665 if (opnd->prfop->name != NULL)
2666 snprintf (buf, size, "%s", opnd->prfop->name);
2668 snprintf (buf, size, "#0x%02x", opnd->prfop->value);
2676 #define CPENC(op0,op1,crn,crm,op2) \
2677 ((((op0) << 19) | ((op1) << 16) | ((crn) << 12) | ((crm) << 8) | ((op2) << 5)) >> 5)
2678 /* for 3.9.3 Instructions for Accessing Special Purpose Registers */
2679 #define CPEN_(op1,crm,op2) CPENC(3,(op1),4,(crm),(op2))
2680 /* for 3.9.10 System Instructions */
2681 #define CPENS(op1,crn,crm,op2) CPENC(1,(op1),(crn),(crm),(op2))
2703 #define F_DEPRECATED 0x1 /* Deprecated system register. */
2705 /* TODO there are two more issues need to be resolved
2706 1. handle read-only and write-only system registers
2707 2. handle cpu-implementation-defined system registers. */
2708 const aarch64_sys_reg aarch64_sys_regs [] =
2710 { "spsr_el1", CPEN_(0,C0,0), 0 }, /* = spsr_svc */
2711 { "elr_el1", CPEN_(0,C0,1), 0 },
2712 { "sp_el0", CPEN_(0,C1,0), 0 },
2713 { "spsel", CPEN_(0,C2,0), 0 },
2714 { "daif", CPEN_(3,C2,1), 0 },
2715 { "currentel", CPEN_(0,C2,2), 0 }, /* RO */
2716 { "nzcv", CPEN_(3,C2,0), 0 },
2717 { "fpcr", CPEN_(3,C4,0), 0 },
2718 { "fpsr", CPEN_(3,C4,1), 0 },
2719 { "dspsr_el0", CPEN_(3,C5,0), 0 },
2720 { "dlr_el0", CPEN_(3,C5,1), 0 },
2721 { "spsr_el2", CPEN_(4,C0,0), 0 }, /* = spsr_hyp */
2722 { "elr_el2", CPEN_(4,C0,1), 0 },
2723 { "sp_el1", CPEN_(4,C1,0), 0 },
2724 { "spsr_irq", CPEN_(4,C3,0), 0 },
2725 { "spsr_abt", CPEN_(4,C3,1), 0 },
2726 { "spsr_und", CPEN_(4,C3,2), 0 },
2727 { "spsr_fiq", CPEN_(4,C3,3), 0 },
2728 { "spsr_el3", CPEN_(6,C0,0), 0 },
2729 { "elr_el3", CPEN_(6,C0,1), 0 },
2730 { "sp_el2", CPEN_(6,C1,0), 0 },
2731 { "spsr_svc", CPEN_(0,C0,0), F_DEPRECATED }, /* = spsr_el1 */
2732 { "spsr_hyp", CPEN_(4,C0,0), F_DEPRECATED }, /* = spsr_el2 */
2733 { "midr_el1", CPENC(3,0,C0,C0,0), 0 }, /* RO */
2734 { "ctr_el0", CPENC(3,3,C0,C0,1), 0 }, /* RO */
2735 { "mpidr_el1", CPENC(3,0,C0,C0,5), 0 }, /* RO */
2736 { "revidr_el1", CPENC(3,0,C0,C0,6), 0 }, /* RO */
2737 { "aidr_el1", CPENC(3,1,C0,C0,7), 0 }, /* RO */
2738 { "dczid_el0", CPENC(3,3,C0,C0,7), 0 }, /* RO */
2739 { "id_dfr0_el1", CPENC(3,0,C0,C1,2), 0 }, /* RO */
2740 { "id_pfr0_el1", CPENC(3,0,C0,C1,0), 0 }, /* RO */
2741 { "id_pfr1_el1", CPENC(3,0,C0,C1,1), 0 }, /* RO */
2742 { "id_afr0_el1", CPENC(3,0,C0,C1,3), 0 }, /* RO */
2743 { "id_mmfr0_el1", CPENC(3,0,C0,C1,4), 0 }, /* RO */
2744 { "id_mmfr1_el1", CPENC(3,0,C0,C1,5), 0 }, /* RO */
2745 { "id_mmfr2_el1", CPENC(3,0,C0,C1,6), 0 }, /* RO */
2746 { "id_mmfr3_el1", CPENC(3,0,C0,C1,7), 0 }, /* RO */
2747 { "id_isar0_el1", CPENC(3,0,C0,C2,0), 0 }, /* RO */
2748 { "id_isar1_el1", CPENC(3,0,C0,C2,1), 0 }, /* RO */
2749 { "id_isar2_el1", CPENC(3,0,C0,C2,2), 0 }, /* RO */
2750 { "id_isar3_el1", CPENC(3,0,C0,C2,3), 0 }, /* RO */
2751 { "id_isar4_el1", CPENC(3,0,C0,C2,4), 0 }, /* RO */
2752 { "id_isar5_el1", CPENC(3,0,C0,C2,5), 0 }, /* RO */
2753 { "mvfr0_el1", CPENC(3,0,C0,C3,0), 0 }, /* RO */
2754 { "mvfr1_el1", CPENC(3,0,C0,C3,1), 0 }, /* RO */
2755 { "mvfr2_el1", CPENC(3,0,C0,C3,2), 0 }, /* RO */
2756 { "ccsidr_el1", CPENC(3,1,C0,C0,0), 0 }, /* RO */
2757 { "id_aa64pfr0_el1", CPENC(3,0,C0,C4,0), 0 }, /* RO */
2758 { "id_aa64pfr1_el1", CPENC(3,0,C0,C4,1), 0 }, /* RO */
2759 { "id_aa64dfr0_el1", CPENC(3,0,C0,C5,0), 0 }, /* RO */
2760 { "id_aa64dfr1_el1", CPENC(3,0,C0,C5,1), 0 }, /* RO */
2761 { "id_aa64isar0_el1", CPENC(3,0,C0,C6,0), 0 }, /* RO */
2762 { "id_aa64isar1_el1", CPENC(3,0,C0,C6,1), 0 }, /* RO */
2763 { "id_aa64mmfr0_el1", CPENC(3,0,C0,C7,0), 0 }, /* RO */
2764 { "id_aa64mmfr1_el1", CPENC(3,0,C0,C7,1), 0 }, /* RO */
2765 { "id_aa64afr0_el1", CPENC(3,0,C0,C5,4), 0 }, /* RO */
2766 { "id_aa64afr1_el1", CPENC(3,0,C0,C5,5), 0 }, /* RO */
2767 { "clidr_el1", CPENC(3,1,C0,C0,1), 0 }, /* RO */
2768 { "csselr_el1", CPENC(3,2,C0,C0,0), 0 }, /* RO */
2769 { "vpidr_el2", CPENC(3,4,C0,C0,0), 0 },
2770 { "vmpidr_el2", CPENC(3,4,C0,C0,5), 0 },
2771 { "sctlr_el1", CPENC(3,0,C1,C0,0), 0 },
2772 { "sctlr_el2", CPENC(3,4,C1,C0,0), 0 },
2773 { "sctlr_el3", CPENC(3,6,C1,C0,0), 0 },
2774 { "actlr_el1", CPENC(3,0,C1,C0,1), 0 },
2775 { "actlr_el2", CPENC(3,4,C1,C0,1), 0 },
2776 { "actlr_el3", CPENC(3,6,C1,C0,1), 0 },
2777 { "cpacr_el1", CPENC(3,0,C1,C0,2), 0 },
2778 { "cptr_el2", CPENC(3,4,C1,C1,2), 0 },
2779 { "cptr_el3", CPENC(3,6,C1,C1,2), 0 },
2780 { "scr_el3", CPENC(3,6,C1,C1,0), 0 },
2781 { "hcr_el2", CPENC(3,4,C1,C1,0), 0 },
2782 { "mdcr_el2", CPENC(3,4,C1,C1,1), 0 },
2783 { "mdcr_el3", CPENC(3,6,C1,C3,1), 0 },
2784 { "hstr_el2", CPENC(3,4,C1,C1,3), 0 },
2785 { "hacr_el2", CPENC(3,4,C1,C1,7), 0 },
2786 { "ttbr0_el1", CPENC(3,0,C2,C0,0), 0 },
2787 { "ttbr1_el1", CPENC(3,0,C2,C0,1), 0 },
2788 { "ttbr0_el2", CPENC(3,4,C2,C0,0), 0 },
2789 { "ttbr0_el3", CPENC(3,6,C2,C0,0), 0 },
2790 { "vttbr_el2", CPENC(3,4,C2,C1,0), 0 },
2791 { "tcr_el1", CPENC(3,0,C2,C0,2), 0 },
2792 { "tcr_el2", CPENC(3,4,C2,C0,2), 0 },
2793 { "tcr_el3", CPENC(3,6,C2,C0,2), 0 },
2794 { "vtcr_el2", CPENC(3,4,C2,C1,2), 0 },
2795 { "afsr0_el1", CPENC(3,0,C5,C1,0), 0 },
2796 { "afsr1_el1", CPENC(3,0,C5,C1,1), 0 },
2797 { "afsr0_el2", CPENC(3,4,C5,C1,0), 0 },
2798 { "afsr1_el2", CPENC(3,4,C5,C1,1), 0 },
2799 { "afsr0_el3", CPENC(3,6,C5,C1,0), 0 },
2800 { "afsr1_el3", CPENC(3,6,C5,C1,1), 0 },
2801 { "esr_el1", CPENC(3,0,C5,C2,0), 0 },
2802 { "esr_el2", CPENC(3,4,C5,C2,0), 0 },
2803 { "esr_el3", CPENC(3,6,C5,C2,0), 0 },
2804 { "fpexc32_el2", CPENC(3,4,C5,C3,0), 0 },
2805 { "far_el1", CPENC(3,0,C6,C0,0), 0 },
2806 { "far_el2", CPENC(3,4,C6,C0,0), 0 },
2807 { "far_el3", CPENC(3,6,C6,C0,0), 0 },
2808 { "hpfar_el2", CPENC(3,4,C6,C0,4), 0 },
2809 { "par_el1", CPENC(3,0,C7,C4,0), 0 },
2810 { "mair_el1", CPENC(3,0,C10,C2,0), 0 },
2811 { "mair_el2", CPENC(3,4,C10,C2,0), 0 },
2812 { "mair_el3", CPENC(3,6,C10,C2,0), 0 },
2813 { "amair_el1", CPENC(3,0,C10,C3,0), 0 },
2814 { "amair_el2", CPENC(3,4,C10,C3,0), 0 },
2815 { "amair_el3", CPENC(3,6,C10,C3,0), 0 },
2816 { "vbar_el1", CPENC(3,0,C12,C0,0), 0 },
2817 { "vbar_el2", CPENC(3,4,C12,C0,0), 0 },
2818 { "vbar_el3", CPENC(3,6,C12,C0,0), 0 },
2819 { "rvbar_el1", CPENC(3,0,C12,C0,1), 0 }, /* RO */
2820 { "rvbar_el2", CPENC(3,4,C12,C0,1), 0 }, /* RO */
2821 { "rvbar_el3", CPENC(3,6,C12,C0,1), 0 }, /* RO */
2822 { "rmr_el1", CPENC(3,0,C12,C0,2), 0 },
2823 { "rmr_el2", CPENC(3,4,C12,C0,2), 0 },
2824 { "rmr_el3", CPENC(3,6,C12,C0,2), 0 },
2825 { "isr_el1", CPENC(3,0,C12,C1,0), 0 }, /* RO */
2826 { "contextidr_el1", CPENC(3,0,C13,C0,1), 0 },
2827 { "tpidr_el0", CPENC(3,3,C13,C0,2), 0 },
2828 { "tpidrro_el0", CPENC(3,3,C13,C0,3), 0 }, /* RO */
2829 { "tpidr_el1", CPENC(3,0,C13,C0,4), 0 },
2830 { "tpidr_el2", CPENC(3,4,C13,C0,2), 0 },
2831 { "tpidr_el3", CPENC(3,6,C13,C0,2), 0 },
2832 { "teecr32_el1", CPENC(2,2,C0, C0,0), 0 }, /* See section 3.9.7.1 */
2833 { "cntfrq_el0", CPENC(3,3,C14,C0,0), 0 }, /* RO */
2834 { "cntpct_el0", CPENC(3,3,C14,C0,1), 0 }, /* RO */
2835 { "cntvct_el0", CPENC(3,3,C14,C0,2), 0 }, /* RO */
2836 { "cntvoff_el2", CPENC(3,4,C14,C0,3), 0 },
2837 { "cntkctl_el1", CPENC(3,0,C14,C1,0), 0 },
2838 { "cnthctl_el2", CPENC(3,4,C14,C1,0), 0 },
2839 { "cntp_tval_el0", CPENC(3,3,C14,C2,0), 0 },
2840 { "cntp_ctl_el0", CPENC(3,3,C14,C2,1), 0 },
2841 { "cntp_cval_el0", CPENC(3,3,C14,C2,2), 0 },
2842 { "cntv_tval_el0", CPENC(3,3,C14,C3,0), 0 },
2843 { "cntv_ctl_el0", CPENC(3,3,C14,C3,1), 0 },
2844 { "cntv_cval_el0", CPENC(3,3,C14,C3,2), 0 },
2845 { "cnthp_tval_el2", CPENC(3,4,C14,C2,0), 0 },
2846 { "cnthp_ctl_el2", CPENC(3,4,C14,C2,1), 0 },
2847 { "cnthp_cval_el2", CPENC(3,4,C14,C2,2), 0 },
2848 { "cntps_tval_el1", CPENC(3,7,C14,C2,0), 0 },
2849 { "cntps_ctl_el1", CPENC(3,7,C14,C2,1), 0 },
2850 { "cntps_cval_el1", CPENC(3,7,C14,C2,2), 0 },
2851 { "dacr32_el2", CPENC(3,4,C3,C0,0), 0 },
2852 { "ifsr32_el2", CPENC(3,4,C5,C0,1), 0 },
2853 { "teehbr32_el1", CPENC(2,2,C1,C0,0), 0 },
2854 { "sder32_el3", CPENC(3,6,C1,C1,1), 0 },
2855 { "mdscr_el1", CPENC(2,0,C0, C2, 2), 0 },
2856 { "mdccsr_el0", CPENC(2,3,C0, C1, 0), 0 }, /* r */
2857 { "mdccint_el1", CPENC(2,0,C0, C2, 0), 0 },
2858 { "dbgdtr_el0", CPENC(2,3,C0, C4, 0), 0 },
2859 { "dbgdtrrx_el0", CPENC(2,3,C0, C5, 0), 0 }, /* r */
2860 { "dbgdtrtx_el0", CPENC(2,3,C0, C5, 0), 0 }, /* w */
2861 { "osdtrrx_el1", CPENC(2,0,C0, C0, 2), 0 }, /* r */
2862 { "osdtrtx_el1", CPENC(2,0,C0, C3, 2), 0 }, /* w */
2863 { "oseccr_el1", CPENC(2,0,C0, C6, 2), 0 },
2864 { "dbgvcr32_el2", CPENC(2,4,C0, C7, 0), 0 },
2865 { "dbgbvr0_el1", CPENC(2,0,C0, C0, 4), 0 },
2866 { "dbgbvr1_el1", CPENC(2,0,C0, C1, 4), 0 },
2867 { "dbgbvr2_el1", CPENC(2,0,C0, C2, 4), 0 },
2868 { "dbgbvr3_el1", CPENC(2,0,C0, C3, 4), 0 },
2869 { "dbgbvr4_el1", CPENC(2,0,C0, C4, 4), 0 },
2870 { "dbgbvr5_el1", CPENC(2,0,C0, C5, 4), 0 },
2871 { "dbgbvr6_el1", CPENC(2,0,C0, C6, 4), 0 },
2872 { "dbgbvr7_el1", CPENC(2,0,C0, C7, 4), 0 },
2873 { "dbgbvr8_el1", CPENC(2,0,C0, C8, 4), 0 },
2874 { "dbgbvr9_el1", CPENC(2,0,C0, C9, 4), 0 },
2875 { "dbgbvr10_el1", CPENC(2,0,C0, C10,4), 0 },
2876 { "dbgbvr11_el1", CPENC(2,0,C0, C11,4), 0 },
2877 { "dbgbvr12_el1", CPENC(2,0,C0, C12,4), 0 },
2878 { "dbgbvr13_el1", CPENC(2,0,C0, C13,4), 0 },
2879 { "dbgbvr14_el1", CPENC(2,0,C0, C14,4), 0 },
2880 { "dbgbvr15_el1", CPENC(2,0,C0, C15,4), 0 },
2881 { "dbgbcr0_el1", CPENC(2,0,C0, C0, 5), 0 },
2882 { "dbgbcr1_el1", CPENC(2,0,C0, C1, 5), 0 },
2883 { "dbgbcr2_el1", CPENC(2,0,C0, C2, 5), 0 },
2884 { "dbgbcr3_el1", CPENC(2,0,C0, C3, 5), 0 },
2885 { "dbgbcr4_el1", CPENC(2,0,C0, C4, 5), 0 },
2886 { "dbgbcr5_el1", CPENC(2,0,C0, C5, 5), 0 },
2887 { "dbgbcr6_el1", CPENC(2,0,C0, C6, 5), 0 },
2888 { "dbgbcr7_el1", CPENC(2,0,C0, C7, 5), 0 },
2889 { "dbgbcr8_el1", CPENC(2,0,C0, C8, 5), 0 },
2890 { "dbgbcr9_el1", CPENC(2,0,C0, C9, 5), 0 },
2891 { "dbgbcr10_el1", CPENC(2,0,C0, C10,5), 0 },
2892 { "dbgbcr11_el1", CPENC(2,0,C0, C11,5), 0 },
2893 { "dbgbcr12_el1", CPENC(2,0,C0, C12,5), 0 },
2894 { "dbgbcr13_el1", CPENC(2,0,C0, C13,5), 0 },
2895 { "dbgbcr14_el1", CPENC(2,0,C0, C14,5), 0 },
2896 { "dbgbcr15_el1", CPENC(2,0,C0, C15,5), 0 },
2897 { "dbgwvr0_el1", CPENC(2,0,C0, C0, 6), 0 },
2898 { "dbgwvr1_el1", CPENC(2,0,C0, C1, 6), 0 },
2899 { "dbgwvr2_el1", CPENC(2,0,C0, C2, 6), 0 },
2900 { "dbgwvr3_el1", CPENC(2,0,C0, C3, 6), 0 },
2901 { "dbgwvr4_el1", CPENC(2,0,C0, C4, 6), 0 },
2902 { "dbgwvr5_el1", CPENC(2,0,C0, C5, 6), 0 },
2903 { "dbgwvr6_el1", CPENC(2,0,C0, C6, 6), 0 },
2904 { "dbgwvr7_el1", CPENC(2,0,C0, C7, 6), 0 },
2905 { "dbgwvr8_el1", CPENC(2,0,C0, C8, 6), 0 },
2906 { "dbgwvr9_el1", CPENC(2,0,C0, C9, 6), 0 },
2907 { "dbgwvr10_el1", CPENC(2,0,C0, C10,6), 0 },
2908 { "dbgwvr11_el1", CPENC(2,0,C0, C11,6), 0 },
2909 { "dbgwvr12_el1", CPENC(2,0,C0, C12,6), 0 },
2910 { "dbgwvr13_el1", CPENC(2,0,C0, C13,6), 0 },
2911 { "dbgwvr14_el1", CPENC(2,0,C0, C14,6), 0 },
2912 { "dbgwvr15_el1", CPENC(2,0,C0, C15,6), 0 },
2913 { "dbgwcr0_el1", CPENC(2,0,C0, C0, 7), 0 },
2914 { "dbgwcr1_el1", CPENC(2,0,C0, C1, 7), 0 },
2915 { "dbgwcr2_el1", CPENC(2,0,C0, C2, 7), 0 },
2916 { "dbgwcr3_el1", CPENC(2,0,C0, C3, 7), 0 },
2917 { "dbgwcr4_el1", CPENC(2,0,C0, C4, 7), 0 },
2918 { "dbgwcr5_el1", CPENC(2,0,C0, C5, 7), 0 },
2919 { "dbgwcr6_el1", CPENC(2,0,C0, C6, 7), 0 },
2920 { "dbgwcr7_el1", CPENC(2,0,C0, C7, 7), 0 },
2921 { "dbgwcr8_el1", CPENC(2,0,C0, C8, 7), 0 },
2922 { "dbgwcr9_el1", CPENC(2,0,C0, C9, 7), 0 },
2923 { "dbgwcr10_el1", CPENC(2,0,C0, C10,7), 0 },
2924 { "dbgwcr11_el1", CPENC(2,0,C0, C11,7), 0 },
2925 { "dbgwcr12_el1", CPENC(2,0,C0, C12,7), 0 },
2926 { "dbgwcr13_el1", CPENC(2,0,C0, C13,7), 0 },
2927 { "dbgwcr14_el1", CPENC(2,0,C0, C14,7), 0 },
2928 { "dbgwcr15_el1", CPENC(2,0,C0, C15,7), 0 },
2929 { "mdrar_el1", CPENC(2,0,C1, C0, 0), 0 }, /* r */
2930 { "oslar_el1", CPENC(2,0,C1, C0, 4), 0 }, /* w */
2931 { "oslsr_el1", CPENC(2,0,C1, C1, 4), 0 }, /* r */
2932 { "osdlr_el1", CPENC(2,0,C1, C3, 4), 0 },
2933 { "dbgprcr_el1", CPENC(2,0,C1, C4, 4), 0 },
2934 { "dbgclaimset_el1", CPENC(2,0,C7, C8, 6), 0 },
2935 { "dbgclaimclr_el1", CPENC(2,0,C7, C9, 6), 0 },
2936 { "dbgauthstatus_el1", CPENC(2,0,C7, C14,6), 0 }, /* r */
2938 { "pmcr_el0", CPENC(3,3,C9,C12, 0), 0 },
2939 { "pmcntenset_el0", CPENC(3,3,C9,C12, 1), 0 },
2940 { "pmcntenclr_el0", CPENC(3,3,C9,C12, 2), 0 },
2941 { "pmovsclr_el0", CPENC(3,3,C9,C12, 3), 0 },
2942 { "pmswinc_el0", CPENC(3,3,C9,C12, 4), 0 }, /* w */
2943 { "pmselr_el0", CPENC(3,3,C9,C12, 5), 0 },
2944 { "pmceid0_el0", CPENC(3,3,C9,C12, 6), 0 }, /* r */
2945 { "pmceid1_el0", CPENC(3,3,C9,C12, 7), 0 }, /* r */
2946 { "pmccntr_el0", CPENC(3,3,C9,C13, 0), 0 },
2947 { "pmxevtyper_el0", CPENC(3,3,C9,C13, 1), 0 },
2948 { "pmxevcntr_el0", CPENC(3,3,C9,C13, 2), 0 },
2949 { "pmuserenr_el0", CPENC(3,3,C9,C14, 0), 0 },
2950 { "pmintenset_el1", CPENC(3,0,C9,C14, 1), 0 },
2951 { "pmintenclr_el1", CPENC(3,0,C9,C14, 2), 0 },
2952 { "pmovsset_el0", CPENC(3,3,C9,C14, 3), 0 },
2953 { "pmevcntr0_el0", CPENC(3,3,C14,C8, 0), 0 },
2954 { "pmevcntr1_el0", CPENC(3,3,C14,C8, 1), 0 },
2955 { "pmevcntr2_el0", CPENC(3,3,C14,C8, 2), 0 },
2956 { "pmevcntr3_el0", CPENC(3,3,C14,C8, 3), 0 },
2957 { "pmevcntr4_el0", CPENC(3,3,C14,C8, 4), 0 },
2958 { "pmevcntr5_el0", CPENC(3,3,C14,C8, 5), 0 },
2959 { "pmevcntr6_el0", CPENC(3,3,C14,C8, 6), 0 },
2960 { "pmevcntr7_el0", CPENC(3,3,C14,C8, 7), 0 },
2961 { "pmevcntr8_el0", CPENC(3,3,C14,C9, 0), 0 },
2962 { "pmevcntr9_el0", CPENC(3,3,C14,C9, 1), 0 },
2963 { "pmevcntr10_el0", CPENC(3,3,C14,C9, 2), 0 },
2964 { "pmevcntr11_el0", CPENC(3,3,C14,C9, 3), 0 },
2965 { "pmevcntr12_el0", CPENC(3,3,C14,C9, 4), 0 },
2966 { "pmevcntr13_el0", CPENC(3,3,C14,C9, 5), 0 },
2967 { "pmevcntr14_el0", CPENC(3,3,C14,C9, 6), 0 },
2968 { "pmevcntr15_el0", CPENC(3,3,C14,C9, 7), 0 },
2969 { "pmevcntr16_el0", CPENC(3,3,C14,C10,0), 0 },
2970 { "pmevcntr17_el0", CPENC(3,3,C14,C10,1), 0 },
2971 { "pmevcntr18_el0", CPENC(3,3,C14,C10,2), 0 },
2972 { "pmevcntr19_el0", CPENC(3,3,C14,C10,3), 0 },
2973 { "pmevcntr20_el0", CPENC(3,3,C14,C10,4), 0 },
2974 { "pmevcntr21_el0", CPENC(3,3,C14,C10,5), 0 },
2975 { "pmevcntr22_el0", CPENC(3,3,C14,C10,6), 0 },
2976 { "pmevcntr23_el0", CPENC(3,3,C14,C10,7), 0 },
2977 { "pmevcntr24_el0", CPENC(3,3,C14,C11,0), 0 },
2978 { "pmevcntr25_el0", CPENC(3,3,C14,C11,1), 0 },
2979 { "pmevcntr26_el0", CPENC(3,3,C14,C11,2), 0 },
2980 { "pmevcntr27_el0", CPENC(3,3,C14,C11,3), 0 },
2981 { "pmevcntr28_el0", CPENC(3,3,C14,C11,4), 0 },
2982 { "pmevcntr29_el0", CPENC(3,3,C14,C11,5), 0 },
2983 { "pmevcntr30_el0", CPENC(3,3,C14,C11,6), 0 },
2984 { "pmevtyper0_el0", CPENC(3,3,C14,C12,0), 0 },
2985 { "pmevtyper1_el0", CPENC(3,3,C14,C12,1), 0 },
2986 { "pmevtyper2_el0", CPENC(3,3,C14,C12,2), 0 },
2987 { "pmevtyper3_el0", CPENC(3,3,C14,C12,3), 0 },
2988 { "pmevtyper4_el0", CPENC(3,3,C14,C12,4), 0 },
2989 { "pmevtyper5_el0", CPENC(3,3,C14,C12,5), 0 },
2990 { "pmevtyper6_el0", CPENC(3,3,C14,C12,6), 0 },
2991 { "pmevtyper7_el0", CPENC(3,3,C14,C12,7), 0 },
2992 { "pmevtyper8_el0", CPENC(3,3,C14,C13,0), 0 },
2993 { "pmevtyper9_el0", CPENC(3,3,C14,C13,1), 0 },
2994 { "pmevtyper10_el0", CPENC(3,3,C14,C13,2), 0 },
2995 { "pmevtyper11_el0", CPENC(3,3,C14,C13,3), 0 },
2996 { "pmevtyper12_el0", CPENC(3,3,C14,C13,4), 0 },
2997 { "pmevtyper13_el0", CPENC(3,3,C14,C13,5), 0 },
2998 { "pmevtyper14_el0", CPENC(3,3,C14,C13,6), 0 },
2999 { "pmevtyper15_el0", CPENC(3,3,C14,C13,7), 0 },
3000 { "pmevtyper16_el0", CPENC(3,3,C14,C14,0), 0 },
3001 { "pmevtyper17_el0", CPENC(3,3,C14,C14,1), 0 },
3002 { "pmevtyper18_el0", CPENC(3,3,C14,C14,2), 0 },
3003 { "pmevtyper19_el0", CPENC(3,3,C14,C14,3), 0 },
3004 { "pmevtyper20_el0", CPENC(3,3,C14,C14,4), 0 },
3005 { "pmevtyper21_el0", CPENC(3,3,C14,C14,5), 0 },
3006 { "pmevtyper22_el0", CPENC(3,3,C14,C14,6), 0 },
3007 { "pmevtyper23_el0", CPENC(3,3,C14,C14,7), 0 },
3008 { "pmevtyper24_el0", CPENC(3,3,C14,C15,0), 0 },
3009 { "pmevtyper25_el0", CPENC(3,3,C14,C15,1), 0 },
3010 { "pmevtyper26_el0", CPENC(3,3,C14,C15,2), 0 },
3011 { "pmevtyper27_el0", CPENC(3,3,C14,C15,3), 0 },
3012 { "pmevtyper28_el0", CPENC(3,3,C14,C15,4), 0 },
3013 { "pmevtyper29_el0", CPENC(3,3,C14,C15,5), 0 },
3014 { "pmevtyper30_el0", CPENC(3,3,C14,C15,6), 0 },
3015 { "pmccfiltr_el0", CPENC(3,3,C14,C15,7), 0 },
3016 { 0, CPENC(0,0,0,0,0), 0 },
3020 aarch64_sys_reg_deprecated_p (const aarch64_sys_reg *reg)
3022 return (reg->flags & F_DEPRECATED) != 0;
3025 const aarch64_sys_reg aarch64_pstatefields [] =
3027 { "spsel", 0x05, 0 },
3028 { "daifset", 0x1e, 0 },
3029 { "daifclr", 0x1f, 0 },
3030 { 0, CPENC(0,0,0,0,0), 0 },
3033 const aarch64_sys_ins_reg aarch64_sys_regs_ic[] =
3035 { "ialluis", CPENS(0,C7,C1,0), 0 },
3036 { "iallu", CPENS(0,C7,C5,0), 0 },
3037 { "ivau", CPENS(3,C7,C5,1), 1 },
3038 { 0, CPENS(0,0,0,0), 0 }
3041 const aarch64_sys_ins_reg aarch64_sys_regs_dc[] =
3043 { "zva", CPENS(3,C7,C4,1), 1 },
3044 { "ivac", CPENS(0,C7,C6,1), 1 },
3045 { "isw", CPENS(0,C7,C6,2), 1 },
3046 { "cvac", CPENS(3,C7,C10,1), 1 },
3047 { "csw", CPENS(0,C7,C10,2), 1 },
3048 { "cvau", CPENS(3,C7,C11,1), 1 },
3049 { "civac", CPENS(3,C7,C14,1), 1 },
3050 { "cisw", CPENS(0,C7,C14,2), 1 },
3051 { 0, CPENS(0,0,0,0), 0 }
3054 const aarch64_sys_ins_reg aarch64_sys_regs_at[] =
3056 { "s1e1r", CPENS(0,C7,C8,0), 1 },
3057 { "s1e1w", CPENS(0,C7,C8,1), 1 },
3058 { "s1e0r", CPENS(0,C7,C8,2), 1 },
3059 { "s1e0w", CPENS(0,C7,C8,3), 1 },
3060 { "s12e1r", CPENS(4,C7,C8,4), 1 },
3061 { "s12e1w", CPENS(4,C7,C8,5), 1 },
3062 { "s12e0r", CPENS(4,C7,C8,6), 1 },
3063 { "s12e0w", CPENS(4,C7,C8,7), 1 },
3064 { "s1e2r", CPENS(4,C7,C8,0), 1 },
3065 { "s1e2w", CPENS(4,C7,C8,1), 1 },
3066 { "s1e3r", CPENS(6,C7,C8,0), 1 },
3067 { "s1e3w", CPENS(6,C7,C8,1), 1 },
3068 { 0, CPENS(0,0,0,0), 0 }
3071 const aarch64_sys_ins_reg aarch64_sys_regs_tlbi[] =
3073 { "vmalle1", CPENS(0,C8,C7,0), 0 },
3074 { "vae1", CPENS(0,C8,C7,1), 1 },
3075 { "aside1", CPENS(0,C8,C7,2), 1 },
3076 { "vaae1", CPENS(0,C8,C7,3), 1 },
3077 { "vmalle1is", CPENS(0,C8,C3,0), 0 },
3078 { "vae1is", CPENS(0,C8,C3,1), 1 },
3079 { "aside1is", CPENS(0,C8,C3,2), 1 },
3080 { "vaae1is", CPENS(0,C8,C3,3), 1 },
3081 { "ipas2e1is", CPENS(4,C8,C0,1), 1 },
3082 { "ipas2le1is",CPENS(4,C8,C0,5), 1 },
3083 { "ipas2e1", CPENS(4,C8,C4,1), 1 },
3084 { "ipas2le1", CPENS(4,C8,C4,5), 1 },
3085 { "vae2", CPENS(4,C8,C7,1), 1 },
3086 { "vae2is", CPENS(4,C8,C3,1), 1 },
3087 { "vmalls12e1",CPENS(4,C8,C7,6), 0 },
3088 { "vmalls12e1is",CPENS(4,C8,C3,6), 0 },
3089 { "vae3", CPENS(6,C8,C7,1), 1 },
3090 { "vae3is", CPENS(6,C8,C3,1), 1 },
3091 { "alle2", CPENS(4,C8,C7,0), 0 },
3092 { "alle2is", CPENS(4,C8,C3,0), 0 },
3093 { "alle1", CPENS(4,C8,C7,4), 0 },
3094 { "alle1is", CPENS(4,C8,C3,4), 0 },
3095 { "alle3", CPENS(6,C8,C7,0), 0 },
3096 { "alle3is", CPENS(6,C8,C3,0), 0 },
3097 { "vale1is", CPENS(0,C8,C3,5), 1 },
3098 { "vale2is", CPENS(4,C8,C3,5), 1 },
3099 { "vale3is", CPENS(6,C8,C3,5), 1 },
3100 { "vaale1is", CPENS(0,C8,C3,7), 1 },
3101 { "vale1", CPENS(0,C8,C7,5), 1 },
3102 { "vale2", CPENS(4,C8,C7,5), 1 },
3103 { "vale3", CPENS(6,C8,C7,5), 1 },
3104 { "vaale1", CPENS(0,C8,C7,7), 1 },
3105 { 0, CPENS(0,0,0,0), 0 }
3125 /* Include the opcode description table as well as the operand description
3127 #include "aarch64-tbl.h"