1 /* aarch64-dis.c -- AArch64 disassembler.
2 Copyright (C) 2009-2019 Free Software Foundation, Inc.
3 Contributed by ARM Ltd.
5 This file is part of the GNU opcodes library.
7 This library is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
12 It is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program; see the file COPYING3. If not,
19 see <http://www.gnu.org/licenses/>. */
22 #include "bfd_stdint.h"
23 #include "disassemble.h"
24 #include "libiberty.h"
26 #include "aarch64-dis.h"
31 /* Cached mapping symbol state. */
38 static enum map_type last_type;
39 static int last_mapping_sym = -1;
40 static bfd_vma last_stop_offset = 0;
41 static bfd_vma last_mapping_addr = 0;
44 static int no_aliases = 0; /* If set disassemble as most general inst. */
45 \fstatic int no_notes = 1; /* If set do not print disassemble notes in the
46 output as comments. */
48 /* Currently active instruction sequence. */
49 static aarch64_instr_sequence insn_sequence;
52 set_default_aarch64_dis_options (struct disassemble_info *info ATTRIBUTE_UNUSED)
57 parse_aarch64_dis_option (const char *option, unsigned int len ATTRIBUTE_UNUSED)
59 /* Try to match options that are simple flags */
60 if (CONST_STRNEQ (option, "no-aliases"))
66 if (CONST_STRNEQ (option, "aliases"))
72 if (CONST_STRNEQ (option, "no-notes"))
78 if (CONST_STRNEQ (option, "notes"))
85 if (CONST_STRNEQ (option, "debug_dump"))
90 #endif /* DEBUG_AARCH64 */
93 opcodes_error_handler (_("unrecognised disassembler option: %s"), option);
97 parse_aarch64_dis_options (const char *options)
99 const char *option_end;
104 while (*options != '\0')
106 /* Skip empty options. */
113 /* We know that *options is neither NUL or a comma. */
114 option_end = options + 1;
115 while (*option_end != ',' && *option_end != '\0')
118 parse_aarch64_dis_option (options, option_end - options);
120 /* Go on to the next one. If option_end points to a comma, it
121 will be skipped above. */
122 options = option_end;
126 /* Functions doing the instruction disassembling. */
128 /* The unnamed arguments consist of the number of fields and information about
129 these fields where the VALUE will be extracted from CODE and returned.
130 MASK can be zero or the base mask of the opcode.
132 N.B. the fields are required to be in such an order than the most signficant
133 field for VALUE comes the first, e.g. the <index> in
134 SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]
135 is encoded in H:L:M in some cases, the fields H:L:M should be passed in
136 the order of H, L, M. */
139 extract_fields (aarch64_insn code, aarch64_insn mask, ...)
142 const aarch64_field *field;
143 enum aarch64_field_kind kind;
147 num = va_arg (va, uint32_t);
149 aarch64_insn value = 0x0;
152 kind = va_arg (va, enum aarch64_field_kind);
153 field = &fields[kind];
154 value <<= field->width;
155 value |= extract_field (kind, code, mask);
160 /* Extract the value of all fields in SELF->fields from instruction CODE.
161 The least significant bit comes from the final field. */
164 extract_all_fields (const aarch64_operand *self, aarch64_insn code)
168 enum aarch64_field_kind kind;
171 for (i = 0; i < ARRAY_SIZE (self->fields) && self->fields[i] != FLD_NIL; ++i)
173 kind = self->fields[i];
174 value <<= fields[kind].width;
175 value |= extract_field (kind, code, 0);
180 /* Sign-extend bit I of VALUE. */
181 static inline int32_t
182 sign_extend (aarch64_insn value, unsigned i)
184 uint32_t ret = value;
187 if ((value >> i) & 0x1)
189 uint32_t val = (uint32_t)(-1) << i;
192 return (int32_t) ret;
195 /* N.B. the following inline helpfer functions create a dependency on the
196 order of operand qualifier enumerators. */
198 /* Given VALUE, return qualifier for a general purpose register. */
199 static inline enum aarch64_opnd_qualifier
200 get_greg_qualifier_from_value (aarch64_insn value)
202 enum aarch64_opnd_qualifier qualifier = AARCH64_OPND_QLF_W + value;
204 && aarch64_get_qualifier_standard_value (qualifier) == value);
208 /* Given VALUE, return qualifier for a vector register. This does not support
209 decoding instructions that accept the 2H vector type. */
211 static inline enum aarch64_opnd_qualifier
212 get_vreg_qualifier_from_value (aarch64_insn value)
214 enum aarch64_opnd_qualifier qualifier = AARCH64_OPND_QLF_V_8B + value;
216 /* Instructions using vector type 2H should not call this function. Skip over
218 if (qualifier >= AARCH64_OPND_QLF_V_2H)
222 && aarch64_get_qualifier_standard_value (qualifier) == value);
226 /* Given VALUE, return qualifier for an FP or AdvSIMD scalar register. */
227 static inline enum aarch64_opnd_qualifier
228 get_sreg_qualifier_from_value (aarch64_insn value)
230 enum aarch64_opnd_qualifier qualifier = AARCH64_OPND_QLF_S_B + value;
233 && aarch64_get_qualifier_standard_value (qualifier) == value);
237 /* Given the instruction in *INST which is probably half way through the
238 decoding and our caller wants to know the expected qualifier for operand
239 I. Return such a qualifier if we can establish it; otherwise return
240 AARCH64_OPND_QLF_NIL. */
242 static aarch64_opnd_qualifier_t
243 get_expected_qualifier (const aarch64_inst *inst, int i)
245 aarch64_opnd_qualifier_seq_t qualifiers;
246 /* Should not be called if the qualifier is known. */
247 assert (inst->operands[i].qualifier == AARCH64_OPND_QLF_NIL);
248 if (aarch64_find_best_match (inst, inst->opcode->qualifiers_list,
250 return qualifiers[i];
252 return AARCH64_OPND_QLF_NIL;
255 /* Operand extractors. */
258 aarch64_ext_regno (const aarch64_operand *self, aarch64_opnd_info *info,
259 const aarch64_insn code,
260 const aarch64_inst *inst ATTRIBUTE_UNUSED,
261 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
263 info->reg.regno = extract_field (self->fields[0], code, 0);
268 aarch64_ext_regno_pair (const aarch64_operand *self ATTRIBUTE_UNUSED, aarch64_opnd_info *info,
269 const aarch64_insn code ATTRIBUTE_UNUSED,
270 const aarch64_inst *inst ATTRIBUTE_UNUSED,
271 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
273 assert (info->idx == 1
275 info->reg.regno = inst->operands[info->idx - 1].reg.regno + 1;
279 /* e.g. IC <ic_op>{, <Xt>}. */
281 aarch64_ext_regrt_sysins (const aarch64_operand *self, aarch64_opnd_info *info,
282 const aarch64_insn code,
283 const aarch64_inst *inst ATTRIBUTE_UNUSED,
284 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
286 info->reg.regno = extract_field (self->fields[0], code, 0);
287 assert (info->idx == 1
288 && (aarch64_get_operand_class (inst->operands[0].type)
289 == AARCH64_OPND_CLASS_SYSTEM));
290 /* This will make the constraint checking happy and more importantly will
291 help the disassembler determine whether this operand is optional or
293 info->present = aarch64_sys_ins_reg_has_xt (inst->operands[0].sysins_op);
298 /* e.g. SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]. */
300 aarch64_ext_reglane (const aarch64_operand *self, aarch64_opnd_info *info,
301 const aarch64_insn code,
302 const aarch64_inst *inst ATTRIBUTE_UNUSED,
303 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
306 info->reglane.regno = extract_field (self->fields[0], code,
309 /* Index and/or type. */
310 if (inst->opcode->iclass == asisdone
311 || inst->opcode->iclass == asimdins)
313 if (info->type == AARCH64_OPND_En
314 && inst->opcode->operands[0] == AARCH64_OPND_Ed)
317 /* index2 for e.g. INS <Vd>.<Ts>[<index1>], <Vn>.<Ts>[<index2>]. */
318 assert (info->idx == 1); /* Vn */
319 aarch64_insn value = extract_field (FLD_imm4, code, 0);
320 /* Depend on AARCH64_OPND_Ed to determine the qualifier. */
321 info->qualifier = get_expected_qualifier (inst, info->idx);
322 shift = get_logsz (aarch64_get_qualifier_esize (info->qualifier));
323 info->reglane.index = value >> shift;
327 /* index and type for e.g. DUP <V><d>, <Vn>.<T>[<index>].
335 aarch64_insn value = extract_field (FLD_imm5, code, 0);
336 while (++pos <= 3 && (value & 0x1) == 0)
340 info->qualifier = get_sreg_qualifier_from_value (pos);
341 info->reglane.index = (unsigned) (value >> 1);
344 else if (inst->opcode->iclass == dotproduct)
346 /* Need information in other operand(s) to help decoding. */
347 info->qualifier = get_expected_qualifier (inst, info->idx);
348 switch (info->qualifier)
350 case AARCH64_OPND_QLF_S_4B:
352 info->reglane.index = extract_fields (code, 0, 2, FLD_H, FLD_L);
353 info->reglane.regno &= 0x1f;
359 else if (inst->opcode->iclass == cryptosm3)
361 /* index for e.g. SM3TT2A <Vd>.4S, <Vn>.4S, <Vm>S[<imm2>]. */
362 info->reglane.index = extract_field (FLD_SM3_imm2, code, 0);
366 /* Index only for e.g. SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]
367 or SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]. */
369 /* Need information in other operand(s) to help decoding. */
370 info->qualifier = get_expected_qualifier (inst, info->idx);
371 switch (info->qualifier)
373 case AARCH64_OPND_QLF_S_H:
374 if (info->type == AARCH64_OPND_Em16)
377 info->reglane.index = extract_fields (code, 0, 3, FLD_H, FLD_L,
379 info->reglane.regno &= 0xf;
384 info->reglane.index = extract_fields (code, 0, 2, FLD_H, FLD_L);
387 case AARCH64_OPND_QLF_S_S:
389 info->reglane.index = extract_fields (code, 0, 2, FLD_H, FLD_L);
391 case AARCH64_OPND_QLF_S_D:
393 info->reglane.index = extract_field (FLD_H, code, 0);
399 if (inst->opcode->op == OP_FCMLA_ELEM
400 && info->qualifier != AARCH64_OPND_QLF_S_H)
402 /* Complex operand takes two elements. */
403 if (info->reglane.index & 1)
405 info->reglane.index /= 2;
413 aarch64_ext_reglist (const aarch64_operand *self, aarch64_opnd_info *info,
414 const aarch64_insn code,
415 const aarch64_inst *inst ATTRIBUTE_UNUSED,
416 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
419 info->reglist.first_regno = extract_field (self->fields[0], code, 0);
421 info->reglist.num_regs = extract_field (FLD_len, code, 0) + 1;
425 /* Decode Rt and opcode fields of Vt in AdvSIMD load/store instructions. */
427 aarch64_ext_ldst_reglist (const aarch64_operand *self ATTRIBUTE_UNUSED,
428 aarch64_opnd_info *info, const aarch64_insn code,
429 const aarch64_inst *inst,
430 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
433 /* Number of elements in each structure to be loaded/stored. */
434 unsigned expected_num = get_opcode_dependent_value (inst->opcode);
438 unsigned is_reserved;
440 unsigned num_elements;
456 info->reglist.first_regno = extract_field (FLD_Rt, code, 0);
458 value = extract_field (FLD_opcode, code, 0);
459 /* PR 21595: Check for a bogus value. */
460 if (value >= ARRAY_SIZE (data))
462 if (expected_num != data[value].num_elements || data[value].is_reserved)
464 info->reglist.num_regs = data[value].num_regs;
469 /* Decode Rt and S fields of Vt in AdvSIMD load single structure to all
470 lanes instructions. */
472 aarch64_ext_ldst_reglist_r (const aarch64_operand *self ATTRIBUTE_UNUSED,
473 aarch64_opnd_info *info, const aarch64_insn code,
474 const aarch64_inst *inst,
475 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
480 info->reglist.first_regno = extract_field (FLD_Rt, code, 0);
482 value = extract_field (FLD_S, code, 0);
484 /* Number of registers is equal to the number of elements in
485 each structure to be loaded/stored. */
486 info->reglist.num_regs = get_opcode_dependent_value (inst->opcode);
487 assert (info->reglist.num_regs >= 1 && info->reglist.num_regs <= 4);
489 /* Except when it is LD1R. */
490 if (info->reglist.num_regs == 1 && value == (aarch64_insn) 1)
491 info->reglist.num_regs = 2;
496 /* Decode Q, opcode<2:1>, S, size and Rt fields of Vt in AdvSIMD
497 load/store single element instructions. */
499 aarch64_ext_ldst_elemlist (const aarch64_operand *self ATTRIBUTE_UNUSED,
500 aarch64_opnd_info *info, const aarch64_insn code,
501 const aarch64_inst *inst ATTRIBUTE_UNUSED,
502 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
504 aarch64_field field = {0, 0};
505 aarch64_insn QSsize; /* fields Q:S:size. */
506 aarch64_insn opcodeh2; /* opcode<2:1> */
509 info->reglist.first_regno = extract_field (FLD_Rt, code, 0);
511 /* Decode the index, opcode<2:1> and size. */
512 gen_sub_field (FLD_asisdlso_opcode, 1, 2, &field);
513 opcodeh2 = extract_field_2 (&field, code, 0);
514 QSsize = extract_fields (code, 0, 3, FLD_Q, FLD_S, FLD_vldst_size);
518 info->qualifier = AARCH64_OPND_QLF_S_B;
519 /* Index encoded in "Q:S:size". */
520 info->reglist.index = QSsize;
526 info->qualifier = AARCH64_OPND_QLF_S_H;
527 /* Index encoded in "Q:S:size<1>". */
528 info->reglist.index = QSsize >> 1;
531 if ((QSsize >> 1) & 0x1)
534 if ((QSsize & 0x1) == 0)
536 info->qualifier = AARCH64_OPND_QLF_S_S;
537 /* Index encoded in "Q:S". */
538 info->reglist.index = QSsize >> 2;
542 if (extract_field (FLD_S, code, 0))
545 info->qualifier = AARCH64_OPND_QLF_S_D;
546 /* Index encoded in "Q". */
547 info->reglist.index = QSsize >> 3;
554 info->reglist.has_index = 1;
555 info->reglist.num_regs = 0;
556 /* Number of registers is equal to the number of elements in
557 each structure to be loaded/stored. */
558 info->reglist.num_regs = get_opcode_dependent_value (inst->opcode);
559 assert (info->reglist.num_regs >= 1 && info->reglist.num_regs <= 4);
564 /* Decode fields immh:immb and/or Q for e.g.
565 SSHR <Vd>.<T>, <Vn>.<T>, #<shift>
566 or SSHR <V><d>, <V><n>, #<shift>. */
569 aarch64_ext_advsimd_imm_shift (const aarch64_operand *self ATTRIBUTE_UNUSED,
570 aarch64_opnd_info *info, const aarch64_insn code,
571 const aarch64_inst *inst,
572 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
575 aarch64_insn Q, imm, immh;
576 enum aarch64_insn_class iclass = inst->opcode->iclass;
578 immh = extract_field (FLD_immh, code, 0);
581 imm = extract_fields (code, 0, 2, FLD_immh, FLD_immb);
583 /* Get highest set bit in immh. */
584 while (--pos >= 0 && (immh & 0x8) == 0)
587 assert ((iclass == asimdshf || iclass == asisdshf)
588 && (info->type == AARCH64_OPND_IMM_VLSR
589 || info->type == AARCH64_OPND_IMM_VLSL));
591 if (iclass == asimdshf)
593 Q = extract_field (FLD_Q, code, 0);
595 0000 x SEE AdvSIMD modified immediate
605 get_vreg_qualifier_from_value ((pos << 1) | (int) Q);
608 info->qualifier = get_sreg_qualifier_from_value (pos);
610 if (info->type == AARCH64_OPND_IMM_VLSR)
612 0000 SEE AdvSIMD modified immediate
613 0001 (16-UInt(immh:immb))
614 001x (32-UInt(immh:immb))
615 01xx (64-UInt(immh:immb))
616 1xxx (128-UInt(immh:immb)) */
617 info->imm.value = (16 << pos) - imm;
621 0000 SEE AdvSIMD modified immediate
622 0001 (UInt(immh:immb)-8)
623 001x (UInt(immh:immb)-16)
624 01xx (UInt(immh:immb)-32)
625 1xxx (UInt(immh:immb)-64) */
626 info->imm.value = imm - (8 << pos);
631 /* Decode shift immediate for e.g. sshr (imm). */
633 aarch64_ext_shll_imm (const aarch64_operand *self ATTRIBUTE_UNUSED,
634 aarch64_opnd_info *info, const aarch64_insn code,
635 const aarch64_inst *inst ATTRIBUTE_UNUSED,
636 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
640 val = extract_field (FLD_size, code, 0);
643 case 0: imm = 8; break;
644 case 1: imm = 16; break;
645 case 2: imm = 32; break;
646 default: return FALSE;
648 info->imm.value = imm;
652 /* Decode imm for e.g. BFM <Wd>, <Wn>, #<immr>, #<imms>.
653 value in the field(s) will be extracted as unsigned immediate value. */
655 aarch64_ext_imm (const aarch64_operand *self, aarch64_opnd_info *info,
656 const aarch64_insn code,
657 const aarch64_inst *inst ATTRIBUTE_UNUSED,
658 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
662 imm = extract_all_fields (self, code);
664 if (operand_need_sign_extension (self))
665 imm = sign_extend (imm, get_operand_fields_width (self) - 1);
667 if (operand_need_shift_by_two (self))
669 else if (operand_need_shift_by_four (self))
672 if (info->type == AARCH64_OPND_ADDR_ADRP)
675 info->imm.value = imm;
679 /* Decode imm and its shifter for e.g. MOVZ <Wd>, #<imm16>{, LSL #<shift>}. */
681 aarch64_ext_imm_half (const aarch64_operand *self, aarch64_opnd_info *info,
682 const aarch64_insn code,
683 const aarch64_inst *inst ATTRIBUTE_UNUSED,
684 aarch64_operand_error *errors)
686 aarch64_ext_imm (self, info, code, inst, errors);
687 info->shifter.kind = AARCH64_MOD_LSL;
688 info->shifter.amount = extract_field (FLD_hw, code, 0) << 4;
692 /* Decode cmode and "a:b:c:d:e:f:g:h" for e.g.
693 MOVI <Vd>.<T>, #<imm8> {, LSL #<amount>}. */
695 aarch64_ext_advsimd_imm_modified (const aarch64_operand *self ATTRIBUTE_UNUSED,
696 aarch64_opnd_info *info,
697 const aarch64_insn code,
698 const aarch64_inst *inst ATTRIBUTE_UNUSED,
699 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
702 enum aarch64_opnd_qualifier opnd0_qualifier = inst->operands[0].qualifier;
703 aarch64_field field = {0, 0};
705 assert (info->idx == 1);
707 if (info->type == AARCH64_OPND_SIMD_FPIMM)
710 /* a:b:c:d:e:f:g:h */
711 imm = extract_fields (code, 0, 2, FLD_abc, FLD_defgh);
712 if (!info->imm.is_fp && aarch64_get_qualifier_esize (opnd0_qualifier) == 8)
714 /* Either MOVI <Dd>, #<imm>
715 or MOVI <Vd>.2D, #<imm>.
716 <imm> is a 64-bit immediate
717 'aaaaaaaabbbbbbbbccccccccddddddddeeeeeeeeffffffffgggggggghhhhhhhh',
718 encoded in "a:b:c:d:e:f:g:h". */
720 unsigned abcdefgh = imm;
721 for (imm = 0ull, i = 0; i < 8; i++)
722 if (((abcdefgh >> i) & 0x1) != 0)
723 imm |= 0xffull << (8 * i);
725 info->imm.value = imm;
728 info->qualifier = get_expected_qualifier (inst, info->idx);
729 switch (info->qualifier)
731 case AARCH64_OPND_QLF_NIL:
733 info->shifter.kind = AARCH64_MOD_NONE;
735 case AARCH64_OPND_QLF_LSL:
737 info->shifter.kind = AARCH64_MOD_LSL;
738 switch (aarch64_get_qualifier_esize (opnd0_qualifier))
740 case 4: gen_sub_field (FLD_cmode, 1, 2, &field); break; /* per word */
741 case 2: gen_sub_field (FLD_cmode, 1, 1, &field); break; /* per half */
742 case 1: gen_sub_field (FLD_cmode, 1, 0, &field); break; /* per byte */
743 default: assert (0); return FALSE;
745 /* 00: 0; 01: 8; 10:16; 11:24. */
746 info->shifter.amount = extract_field_2 (&field, code, 0) << 3;
748 case AARCH64_OPND_QLF_MSL:
750 info->shifter.kind = AARCH64_MOD_MSL;
751 gen_sub_field (FLD_cmode, 0, 1, &field); /* per word */
752 info->shifter.amount = extract_field_2 (&field, code, 0) ? 16 : 8;
762 /* Decode an 8-bit floating-point immediate. */
764 aarch64_ext_fpimm (const aarch64_operand *self, aarch64_opnd_info *info,
765 const aarch64_insn code,
766 const aarch64_inst *inst ATTRIBUTE_UNUSED,
767 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
769 info->imm.value = extract_all_fields (self, code);
774 /* Decode a 1-bit rotate immediate (#90 or #270). */
776 aarch64_ext_imm_rotate1 (const aarch64_operand *self, aarch64_opnd_info *info,
777 const aarch64_insn code,
778 const aarch64_inst *inst ATTRIBUTE_UNUSED,
779 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
781 uint64_t rot = extract_field (self->fields[0], code, 0);
783 info->imm.value = rot * 180 + 90;
787 /* Decode a 2-bit rotate immediate (#0, #90, #180 or #270). */
789 aarch64_ext_imm_rotate2 (const aarch64_operand *self, aarch64_opnd_info *info,
790 const aarch64_insn code,
791 const aarch64_inst *inst ATTRIBUTE_UNUSED,
792 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
794 uint64_t rot = extract_field (self->fields[0], code, 0);
796 info->imm.value = rot * 90;
800 /* Decode scale for e.g. SCVTF <Dd>, <Wn>, #<fbits>. */
802 aarch64_ext_fbits (const aarch64_operand *self ATTRIBUTE_UNUSED,
803 aarch64_opnd_info *info, const aarch64_insn code,
804 const aarch64_inst *inst ATTRIBUTE_UNUSED,
805 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
807 info->imm.value = 64- extract_field (FLD_scale, code, 0);
811 /* Decode arithmetic immediate for e.g.
812 SUBS <Wd>, <Wn|WSP>, #<imm> {, <shift>}. */
814 aarch64_ext_aimm (const aarch64_operand *self ATTRIBUTE_UNUSED,
815 aarch64_opnd_info *info, const aarch64_insn code,
816 const aarch64_inst *inst ATTRIBUTE_UNUSED,
817 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
821 info->shifter.kind = AARCH64_MOD_LSL;
823 value = extract_field (FLD_shift, code, 0);
826 info->shifter.amount = value ? 12 : 0;
827 /* imm12 (unsigned) */
828 info->imm.value = extract_field (FLD_imm12, code, 0);
833 /* Return true if VALUE is a valid logical immediate encoding, storing the
834 decoded value in *RESULT if so. ESIZE is the number of bytes in the
835 decoded immediate. */
837 decode_limm (uint32_t esize, aarch64_insn value, int64_t *result)
843 /* value is N:immr:imms. */
845 R = (value >> 6) & 0x3f;
846 N = (value >> 12) & 0x1;
848 /* The immediate value is S+1 bits to 1, left rotated by SIMDsize - R
849 (in other words, right rotated by R), then replicated. */
853 mask = 0xffffffffffffffffull;
859 case 0x00 ... 0x1f: /* 0xxxxx */ simd_size = 32; break;
860 case 0x20 ... 0x2f: /* 10xxxx */ simd_size = 16; S &= 0xf; break;
861 case 0x30 ... 0x37: /* 110xxx */ simd_size = 8; S &= 0x7; break;
862 case 0x38 ... 0x3b: /* 1110xx */ simd_size = 4; S &= 0x3; break;
863 case 0x3c ... 0x3d: /* 11110x */ simd_size = 2; S &= 0x1; break;
864 default: return FALSE;
866 mask = (1ull << simd_size) - 1;
867 /* Top bits are IGNORED. */
871 if (simd_size > esize * 8)
874 /* NOTE: if S = simd_size - 1 we get 0xf..f which is rejected. */
875 if (S == simd_size - 1)
877 /* S+1 consecutive bits to 1. */
878 /* NOTE: S can't be 63 due to detection above. */
879 imm = (1ull << (S + 1)) - 1;
880 /* Rotate to the left by simd_size - R. */
882 imm = ((imm << (simd_size - R)) & mask) | (imm >> R);
883 /* Replicate the value according to SIMD size. */
886 case 2: imm = (imm << 2) | imm;
888 case 4: imm = (imm << 4) | imm;
890 case 8: imm = (imm << 8) | imm;
892 case 16: imm = (imm << 16) | imm;
894 case 32: imm = (imm << 32) | imm;
897 default: assert (0); return 0;
900 *result = imm & ~((uint64_t) -1 << (esize * 4) << (esize * 4));
905 /* Decode a logical immediate for e.g. ORR <Wd|WSP>, <Wn>, #<imm>. */
907 aarch64_ext_limm (const aarch64_operand *self,
908 aarch64_opnd_info *info, const aarch64_insn code,
909 const aarch64_inst *inst,
910 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
915 value = extract_fields (code, 0, 3, self->fields[0], self->fields[1],
917 esize = aarch64_get_qualifier_esize (inst->operands[0].qualifier);
918 return decode_limm (esize, value, &info->imm.value);
921 /* Decode a logical immediate for the BIC alias of AND (etc.). */
923 aarch64_ext_inv_limm (const aarch64_operand *self,
924 aarch64_opnd_info *info, const aarch64_insn code,
925 const aarch64_inst *inst,
926 aarch64_operand_error *errors)
928 if (!aarch64_ext_limm (self, info, code, inst, errors))
930 info->imm.value = ~info->imm.value;
934 /* Decode Ft for e.g. STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]
935 or LDP <Qt1>, <Qt2>, [<Xn|SP>], #<imm>. */
937 aarch64_ext_ft (const aarch64_operand *self ATTRIBUTE_UNUSED,
938 aarch64_opnd_info *info,
939 const aarch64_insn code, const aarch64_inst *inst,
940 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
945 info->reg.regno = extract_field (FLD_Rt, code, 0);
948 value = extract_field (FLD_ldst_size, code, 0);
949 if (inst->opcode->iclass == ldstpair_indexed
950 || inst->opcode->iclass == ldstnapair_offs
951 || inst->opcode->iclass == ldstpair_off
952 || inst->opcode->iclass == loadlit)
954 enum aarch64_opnd_qualifier qualifier;
957 case 0: qualifier = AARCH64_OPND_QLF_S_S; break;
958 case 1: qualifier = AARCH64_OPND_QLF_S_D; break;
959 case 2: qualifier = AARCH64_OPND_QLF_S_Q; break;
960 default: return FALSE;
962 info->qualifier = qualifier;
967 value = extract_fields (code, 0, 2, FLD_opc1, FLD_ldst_size);
970 info->qualifier = get_sreg_qualifier_from_value (value);
976 /* Decode the address operand for e.g. STXRB <Ws>, <Wt>, [<Xn|SP>{,#0}]. */
978 aarch64_ext_addr_simple (const aarch64_operand *self ATTRIBUTE_UNUSED,
979 aarch64_opnd_info *info,
981 const aarch64_inst *inst ATTRIBUTE_UNUSED,
982 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
985 info->addr.base_regno = extract_field (FLD_Rn, code, 0);
989 /* Decode the address operand for e.g.
990 stlur <Xt>, [<Xn|SP>{, <amount>}]. */
992 aarch64_ext_addr_offset (const aarch64_operand *self ATTRIBUTE_UNUSED,
993 aarch64_opnd_info *info,
994 aarch64_insn code, const aarch64_inst *inst,
995 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
997 info->qualifier = get_expected_qualifier (inst, info->idx);
1000 info->addr.base_regno = extract_field (self->fields[0], code, 0);
1003 aarch64_insn imm = extract_fields (code, 0, 1, self->fields[1]);
1004 info->addr.offset.imm = sign_extend (imm, 8);
1005 if (extract_field (self->fields[2], code, 0) == 1) {
1006 info->addr.writeback = 1;
1007 info->addr.preind = 1;
1012 /* Decode the address operand for e.g.
1013 STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
1015 aarch64_ext_addr_regoff (const aarch64_operand *self ATTRIBUTE_UNUSED,
1016 aarch64_opnd_info *info,
1017 aarch64_insn code, const aarch64_inst *inst,
1018 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1020 aarch64_insn S, value;
1023 info->addr.base_regno = extract_field (FLD_Rn, code, 0);
1025 info->addr.offset.regno = extract_field (FLD_Rm, code, 0);
1027 value = extract_field (FLD_option, code, 0);
1028 info->shifter.kind =
1029 aarch64_get_operand_modifier_from_value (value, TRUE /* extend_p */);
1030 /* Fix-up the shifter kind; although the table-driven approach is
1031 efficient, it is slightly inflexible, thus needing this fix-up. */
1032 if (info->shifter.kind == AARCH64_MOD_UXTX)
1033 info->shifter.kind = AARCH64_MOD_LSL;
1035 S = extract_field (FLD_S, code, 0);
1038 info->shifter.amount = 0;
1039 info->shifter.amount_present = 0;
1044 /* Need information in other operand(s) to help achieve the decoding
1046 info->qualifier = get_expected_qualifier (inst, info->idx);
1047 /* Get the size of the data element that is accessed, which may be
1048 different from that of the source register size, e.g. in strb/ldrb. */
1049 size = aarch64_get_qualifier_esize (info->qualifier);
1050 info->shifter.amount = get_logsz (size);
1051 info->shifter.amount_present = 1;
1057 /* Decode the address operand for e.g. LDRSW <Xt>, [<Xn|SP>], #<simm>. */
1059 aarch64_ext_addr_simm (const aarch64_operand *self, aarch64_opnd_info *info,
1060 aarch64_insn code, const aarch64_inst *inst,
1061 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1064 info->qualifier = get_expected_qualifier (inst, info->idx);
1067 info->addr.base_regno = extract_field (FLD_Rn, code, 0);
1068 /* simm (imm9 or imm7) */
1069 imm = extract_field (self->fields[0], code, 0);
1070 info->addr.offset.imm = sign_extend (imm, fields[self->fields[0]].width - 1);
1071 if (self->fields[0] == FLD_imm7
1072 || info->qualifier == AARCH64_OPND_QLF_imm_tag)
1073 /* scaled immediate in ld/st pair instructions. */
1074 info->addr.offset.imm *= aarch64_get_qualifier_esize (info->qualifier);
1076 if (inst->opcode->iclass == ldst_unscaled
1077 || inst->opcode->iclass == ldstnapair_offs
1078 || inst->opcode->iclass == ldstpair_off
1079 || inst->opcode->iclass == ldst_unpriv)
1080 info->addr.writeback = 0;
1083 /* pre/post- index */
1084 info->addr.writeback = 1;
1085 if (extract_field (self->fields[1], code, 0) == 1)
1086 info->addr.preind = 1;
1088 info->addr.postind = 1;
1094 /* Decode the address operand for e.g. LDRSW <Xt>, [<Xn|SP>{, #<simm>}]. */
1096 aarch64_ext_addr_uimm12 (const aarch64_operand *self, aarch64_opnd_info *info,
1098 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1099 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1102 info->qualifier = get_expected_qualifier (inst, info->idx);
1103 shift = get_logsz (aarch64_get_qualifier_esize (info->qualifier));
1105 info->addr.base_regno = extract_field (self->fields[0], code, 0);
1107 info->addr.offset.imm = extract_field (self->fields[1], code, 0) << shift;
1111 /* Decode the address operand for e.g. LDRAA <Xt>, [<Xn|SP>{, #<simm>}]. */
1113 aarch64_ext_addr_simm10 (const aarch64_operand *self, aarch64_opnd_info *info,
1115 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1116 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1120 info->qualifier = get_expected_qualifier (inst, info->idx);
1122 info->addr.base_regno = extract_field (self->fields[0], code, 0);
1124 imm = extract_fields (code, 0, 2, self->fields[1], self->fields[2]);
1125 info->addr.offset.imm = sign_extend (imm, 9) << 3;
1126 if (extract_field (self->fields[3], code, 0) == 1) {
1127 info->addr.writeback = 1;
1128 info->addr.preind = 1;
1133 /* Decode the address operand for e.g.
1134 LD1 {<Vt>.<T>, <Vt2>.<T>, <Vt3>.<T>}, [<Xn|SP>], <Xm|#<amount>>. */
1136 aarch64_ext_simd_addr_post (const aarch64_operand *self ATTRIBUTE_UNUSED,
1137 aarch64_opnd_info *info,
1138 aarch64_insn code, const aarch64_inst *inst,
1139 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1141 /* The opcode dependent area stores the number of elements in
1142 each structure to be loaded/stored. */
1143 int is_ld1r = get_opcode_dependent_value (inst->opcode) == 1;
1146 info->addr.base_regno = extract_field (FLD_Rn, code, 0);
1147 /* Rm | #<amount> */
1148 info->addr.offset.regno = extract_field (FLD_Rm, code, 0);
1149 if (info->addr.offset.regno == 31)
1151 if (inst->opcode->operands[0] == AARCH64_OPND_LVt_AL)
1152 /* Special handling of loading single structure to all lane. */
1153 info->addr.offset.imm = (is_ld1r ? 1
1154 : inst->operands[0].reglist.num_regs)
1155 * aarch64_get_qualifier_esize (inst->operands[0].qualifier);
1157 info->addr.offset.imm = inst->operands[0].reglist.num_regs
1158 * aarch64_get_qualifier_esize (inst->operands[0].qualifier)
1159 * aarch64_get_qualifier_nelem (inst->operands[0].qualifier);
1162 info->addr.offset.is_reg = 1;
1163 info->addr.writeback = 1;
1168 /* Decode the condition operand for e.g. CSEL <Xd>, <Xn>, <Xm>, <cond>. */
1170 aarch64_ext_cond (const aarch64_operand *self ATTRIBUTE_UNUSED,
1171 aarch64_opnd_info *info,
1172 aarch64_insn code, const aarch64_inst *inst ATTRIBUTE_UNUSED,
1173 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1177 value = extract_field (FLD_cond, code, 0);
1178 info->cond = get_cond_from_value (value);
1182 /* Decode the system register operand for e.g. MRS <Xt>, <systemreg>. */
1184 aarch64_ext_sysreg (const aarch64_operand *self ATTRIBUTE_UNUSED,
1185 aarch64_opnd_info *info,
1187 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1188 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1190 /* op0:op1:CRn:CRm:op2 */
1191 info->sysreg.value = extract_fields (code, 0, 5, FLD_op0, FLD_op1, FLD_CRn,
1193 info->sysreg.flags = 0;
1195 /* If a system instruction, check which restrictions should be on the register
1196 value during decoding, these will be enforced then. */
1197 if (inst->opcode->iclass == ic_system)
1199 /* Check to see if it's read-only, else check if it's write only.
1200 if it's both or unspecified don't care. */
1201 if ((inst->opcode->flags & (F_SYS_READ | F_SYS_WRITE)) == F_SYS_READ)
1202 info->sysreg.flags = F_REG_READ;
1203 else if ((inst->opcode->flags & (F_SYS_READ | F_SYS_WRITE))
1205 info->sysreg.flags = F_REG_WRITE;
1211 /* Decode the PSTATE field operand for e.g. MSR <pstatefield>, #<imm>. */
1213 aarch64_ext_pstatefield (const aarch64_operand *self ATTRIBUTE_UNUSED,
1214 aarch64_opnd_info *info, aarch64_insn code,
1215 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1216 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1220 info->pstatefield = extract_fields (code, 0, 2, FLD_op1, FLD_op2);
1221 for (i = 0; aarch64_pstatefields[i].name != NULL; ++i)
1222 if (aarch64_pstatefields[i].value == (aarch64_insn)info->pstatefield)
1224 /* Reserved value in <pstatefield>. */
1228 /* Decode the system instruction op operand for e.g. AT <at_op>, <Xt>. */
1230 aarch64_ext_sysins_op (const aarch64_operand *self ATTRIBUTE_UNUSED,
1231 aarch64_opnd_info *info,
1233 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1234 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1238 const aarch64_sys_ins_reg *sysins_ops;
1239 /* op0:op1:CRn:CRm:op2 */
1240 value = extract_fields (code, 0, 5,
1241 FLD_op0, FLD_op1, FLD_CRn,
1246 case AARCH64_OPND_SYSREG_AT: sysins_ops = aarch64_sys_regs_at; break;
1247 case AARCH64_OPND_SYSREG_DC: sysins_ops = aarch64_sys_regs_dc; break;
1248 case AARCH64_OPND_SYSREG_IC: sysins_ops = aarch64_sys_regs_ic; break;
1249 case AARCH64_OPND_SYSREG_TLBI: sysins_ops = aarch64_sys_regs_tlbi; break;
1250 case AARCH64_OPND_SYSREG_SR:
1251 sysins_ops = aarch64_sys_regs_sr;
1252 /* Let's remove op2 for rctx. Refer to comments in the definition of
1253 aarch64_sys_regs_sr[]. */
1254 value = value & ~(0x7);
1256 default: assert (0); return FALSE;
1259 for (i = 0; sysins_ops[i].name != NULL; ++i)
1260 if (sysins_ops[i].value == value)
1262 info->sysins_op = sysins_ops + i;
1263 DEBUG_TRACE ("%s found value: %x, has_xt: %d, i: %d.",
1264 info->sysins_op->name,
1265 (unsigned)info->sysins_op->value,
1266 aarch64_sys_ins_reg_has_xt (info->sysins_op), i);
1273 /* Decode the memory barrier option operand for e.g. DMB <option>|#<imm>. */
1276 aarch64_ext_barrier (const aarch64_operand *self ATTRIBUTE_UNUSED,
1277 aarch64_opnd_info *info,
1279 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1280 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1283 info->barrier = aarch64_barrier_options + extract_field (FLD_CRm, code, 0);
1287 /* Decode the prefetch operation option operand for e.g.
1288 PRFM <prfop>, [<Xn|SP>{, #<pimm>}]. */
1291 aarch64_ext_prfop (const aarch64_operand *self ATTRIBUTE_UNUSED,
1292 aarch64_opnd_info *info,
1293 aarch64_insn code, const aarch64_inst *inst ATTRIBUTE_UNUSED,
1294 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1297 info->prfop = aarch64_prfops + extract_field (FLD_Rt, code, 0);
1301 /* Decode the hint number for an alias taking an operand. Set info->hint_option
1302 to the matching name/value pair in aarch64_hint_options. */
1305 aarch64_ext_hint (const aarch64_operand *self ATTRIBUTE_UNUSED,
1306 aarch64_opnd_info *info,
1308 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1309 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1312 unsigned hint_number;
1315 hint_number = extract_fields (code, 0, 2, FLD_CRm, FLD_op2);
1317 for (i = 0; aarch64_hint_options[i].name != NULL; i++)
1319 if (hint_number == HINT_VAL (aarch64_hint_options[i].value))
1321 info->hint_option = &(aarch64_hint_options[i]);
1329 /* Decode the extended register operand for e.g.
1330 STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
1332 aarch64_ext_reg_extended (const aarch64_operand *self ATTRIBUTE_UNUSED,
1333 aarch64_opnd_info *info,
1335 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1336 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1341 info->reg.regno = extract_field (FLD_Rm, code, 0);
1343 value = extract_field (FLD_option, code, 0);
1344 info->shifter.kind =
1345 aarch64_get_operand_modifier_from_value (value, TRUE /* extend_p */);
1347 info->shifter.amount = extract_field (FLD_imm3, code, 0);
1349 /* This makes the constraint checking happy. */
1350 info->shifter.operator_present = 1;
1352 /* Assume inst->operands[0].qualifier has been resolved. */
1353 assert (inst->operands[0].qualifier != AARCH64_OPND_QLF_NIL);
1354 info->qualifier = AARCH64_OPND_QLF_W;
1355 if (inst->operands[0].qualifier == AARCH64_OPND_QLF_X
1356 && (info->shifter.kind == AARCH64_MOD_UXTX
1357 || info->shifter.kind == AARCH64_MOD_SXTX))
1358 info->qualifier = AARCH64_OPND_QLF_X;
1363 /* Decode the shifted register operand for e.g.
1364 SUBS <Xd>, <Xn>, <Xm> {, <shift> #<amount>}. */
1366 aarch64_ext_reg_shifted (const aarch64_operand *self ATTRIBUTE_UNUSED,
1367 aarch64_opnd_info *info,
1369 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1370 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1375 info->reg.regno = extract_field (FLD_Rm, code, 0);
1377 value = extract_field (FLD_shift, code, 0);
1378 info->shifter.kind =
1379 aarch64_get_operand_modifier_from_value (value, FALSE /* extend_p */);
1380 if (info->shifter.kind == AARCH64_MOD_ROR
1381 && inst->opcode->iclass != log_shift)
1382 /* ROR is not available for the shifted register operand in arithmetic
1386 info->shifter.amount = extract_field (FLD_imm6, code, 0);
1388 /* This makes the constraint checking happy. */
1389 info->shifter.operator_present = 1;
1394 /* Decode an SVE address [<base>, #<offset>*<factor>, MUL VL],
1395 where <offset> is given by the OFFSET parameter and where <factor> is
1396 1 plus SELF's operand-dependent value. fields[0] specifies the field
1397 that holds <base>. */
1399 aarch64_ext_sve_addr_reg_mul_vl (const aarch64_operand *self,
1400 aarch64_opnd_info *info, aarch64_insn code,
1403 info->addr.base_regno = extract_field (self->fields[0], code, 0);
1404 info->addr.offset.imm = offset * (1 + get_operand_specific_data (self));
1405 info->addr.offset.is_reg = FALSE;
1406 info->addr.writeback = FALSE;
1407 info->addr.preind = TRUE;
1409 info->shifter.kind = AARCH64_MOD_MUL_VL;
1410 info->shifter.amount = 1;
1411 info->shifter.operator_present = (info->addr.offset.imm != 0);
1412 info->shifter.amount_present = FALSE;
1416 /* Decode an SVE address [<base>, #<simm4>*<factor>, MUL VL],
1417 where <simm4> is a 4-bit signed value and where <factor> is 1 plus
1418 SELF's operand-dependent value. fields[0] specifies the field that
1419 holds <base>. <simm4> is encoded in the SVE_imm4 field. */
1421 aarch64_ext_sve_addr_ri_s4xvl (const aarch64_operand *self,
1422 aarch64_opnd_info *info, aarch64_insn code,
1423 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1424 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1428 offset = extract_field (FLD_SVE_imm4, code, 0);
1429 offset = ((offset + 8) & 15) - 8;
1430 return aarch64_ext_sve_addr_reg_mul_vl (self, info, code, offset);
1433 /* Decode an SVE address [<base>, #<simm6>*<factor>, MUL VL],
1434 where <simm6> is a 6-bit signed value and where <factor> is 1 plus
1435 SELF's operand-dependent value. fields[0] specifies the field that
1436 holds <base>. <simm6> is encoded in the SVE_imm6 field. */
1438 aarch64_ext_sve_addr_ri_s6xvl (const aarch64_operand *self,
1439 aarch64_opnd_info *info, aarch64_insn code,
1440 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1441 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1445 offset = extract_field (FLD_SVE_imm6, code, 0);
1446 offset = (((offset + 32) & 63) - 32);
1447 return aarch64_ext_sve_addr_reg_mul_vl (self, info, code, offset);
1450 /* Decode an SVE address [<base>, #<simm9>*<factor>, MUL VL],
1451 where <simm9> is a 9-bit signed value and where <factor> is 1 plus
1452 SELF's operand-dependent value. fields[0] specifies the field that
1453 holds <base>. <simm9> is encoded in the concatenation of the SVE_imm6
1454 and imm3 fields, with imm3 being the less-significant part. */
1456 aarch64_ext_sve_addr_ri_s9xvl (const aarch64_operand *self,
1457 aarch64_opnd_info *info,
1459 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1460 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1464 offset = extract_fields (code, 0, 2, FLD_SVE_imm6, FLD_imm3);
1465 offset = (((offset + 256) & 511) - 256);
1466 return aarch64_ext_sve_addr_reg_mul_vl (self, info, code, offset);
1469 /* Decode an SVE address [<base>, #<offset> << <shift>], where <offset>
1470 is given by the OFFSET parameter and where <shift> is SELF's operand-
1471 dependent value. fields[0] specifies the base register field <base>. */
1473 aarch64_ext_sve_addr_reg_imm (const aarch64_operand *self,
1474 aarch64_opnd_info *info, aarch64_insn code,
1477 info->addr.base_regno = extract_field (self->fields[0], code, 0);
1478 info->addr.offset.imm = offset * (1 << get_operand_specific_data (self));
1479 info->addr.offset.is_reg = FALSE;
1480 info->addr.writeback = FALSE;
1481 info->addr.preind = TRUE;
1482 info->shifter.operator_present = FALSE;
1483 info->shifter.amount_present = FALSE;
1487 /* Decode an SVE address [X<n>, #<SVE_imm4> << <shift>], where <SVE_imm4>
1488 is a 4-bit signed number and where <shift> is SELF's operand-dependent
1489 value. fields[0] specifies the base register field. */
1491 aarch64_ext_sve_addr_ri_s4 (const aarch64_operand *self,
1492 aarch64_opnd_info *info, aarch64_insn code,
1493 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1494 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1496 int offset = sign_extend (extract_field (FLD_SVE_imm4, code, 0), 3);
1497 return aarch64_ext_sve_addr_reg_imm (self, info, code, offset);
1500 /* Decode an SVE address [X<n>, #<SVE_imm6> << <shift>], where <SVE_imm6>
1501 is a 6-bit unsigned number and where <shift> is SELF's operand-dependent
1502 value. fields[0] specifies the base register field. */
1504 aarch64_ext_sve_addr_ri_u6 (const aarch64_operand *self,
1505 aarch64_opnd_info *info, aarch64_insn code,
1506 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1507 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1509 int offset = extract_field (FLD_SVE_imm6, code, 0);
1510 return aarch64_ext_sve_addr_reg_imm (self, info, code, offset);
1513 /* Decode an SVE address [X<n>, X<m>{, LSL #<shift>}], where <shift>
1514 is SELF's operand-dependent value. fields[0] specifies the base
1515 register field and fields[1] specifies the offset register field. */
1517 aarch64_ext_sve_addr_rr_lsl (const aarch64_operand *self,
1518 aarch64_opnd_info *info, aarch64_insn code,
1519 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1520 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1524 index_regno = extract_field (self->fields[1], code, 0);
1525 if (index_regno == 31 && (self->flags & OPD_F_NO_ZR) != 0)
1528 info->addr.base_regno = extract_field (self->fields[0], code, 0);
1529 info->addr.offset.regno = index_regno;
1530 info->addr.offset.is_reg = TRUE;
1531 info->addr.writeback = FALSE;
1532 info->addr.preind = TRUE;
1533 info->shifter.kind = AARCH64_MOD_LSL;
1534 info->shifter.amount = get_operand_specific_data (self);
1535 info->shifter.operator_present = (info->shifter.amount != 0);
1536 info->shifter.amount_present = (info->shifter.amount != 0);
1540 /* Decode an SVE address [X<n>, Z<m>.<T>, (S|U)XTW {#<shift>}], where
1541 <shift> is SELF's operand-dependent value. fields[0] specifies the
1542 base register field, fields[1] specifies the offset register field and
1543 fields[2] is a single-bit field that selects SXTW over UXTW. */
1545 aarch64_ext_sve_addr_rz_xtw (const aarch64_operand *self,
1546 aarch64_opnd_info *info, aarch64_insn code,
1547 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1548 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1550 info->addr.base_regno = extract_field (self->fields[0], code, 0);
1551 info->addr.offset.regno = extract_field (self->fields[1], code, 0);
1552 info->addr.offset.is_reg = TRUE;
1553 info->addr.writeback = FALSE;
1554 info->addr.preind = TRUE;
1555 if (extract_field (self->fields[2], code, 0))
1556 info->shifter.kind = AARCH64_MOD_SXTW;
1558 info->shifter.kind = AARCH64_MOD_UXTW;
1559 info->shifter.amount = get_operand_specific_data (self);
1560 info->shifter.operator_present = TRUE;
1561 info->shifter.amount_present = (info->shifter.amount != 0);
1565 /* Decode an SVE address [Z<n>.<T>, #<imm5> << <shift>], where <imm5> is a
1566 5-bit unsigned number and where <shift> is SELF's operand-dependent value.
1567 fields[0] specifies the base register field. */
1569 aarch64_ext_sve_addr_zi_u5 (const aarch64_operand *self,
1570 aarch64_opnd_info *info, aarch64_insn code,
1571 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1572 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1574 int offset = extract_field (FLD_imm5, code, 0);
1575 return aarch64_ext_sve_addr_reg_imm (self, info, code, offset);
1578 /* Decode an SVE address [Z<n>.<T>, Z<m>.<T>{, <modifier> {#<msz>}}],
1579 where <modifier> is given by KIND and where <msz> is a 2-bit unsigned
1580 number. fields[0] specifies the base register field and fields[1]
1581 specifies the offset register field. */
1583 aarch64_ext_sve_addr_zz (const aarch64_operand *self, aarch64_opnd_info *info,
1584 aarch64_insn code, enum aarch64_modifier_kind kind)
1586 info->addr.base_regno = extract_field (self->fields[0], code, 0);
1587 info->addr.offset.regno = extract_field (self->fields[1], code, 0);
1588 info->addr.offset.is_reg = TRUE;
1589 info->addr.writeback = FALSE;
1590 info->addr.preind = TRUE;
1591 info->shifter.kind = kind;
1592 info->shifter.amount = extract_field (FLD_SVE_msz, code, 0);
1593 info->shifter.operator_present = (kind != AARCH64_MOD_LSL
1594 || info->shifter.amount != 0);
1595 info->shifter.amount_present = (info->shifter.amount != 0);
1599 /* Decode an SVE address [Z<n>.<T>, Z<m>.<T>{, LSL #<msz>}], where
1600 <msz> is a 2-bit unsigned number. fields[0] specifies the base register
1601 field and fields[1] specifies the offset register field. */
1603 aarch64_ext_sve_addr_zz_lsl (const aarch64_operand *self,
1604 aarch64_opnd_info *info, aarch64_insn code,
1605 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1606 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1608 return aarch64_ext_sve_addr_zz (self, info, code, AARCH64_MOD_LSL);
1611 /* Decode an SVE address [Z<n>.<T>, Z<m>.<T>, SXTW {#<msz>}], where
1612 <msz> is a 2-bit unsigned number. fields[0] specifies the base register
1613 field and fields[1] specifies the offset register field. */
1615 aarch64_ext_sve_addr_zz_sxtw (const aarch64_operand *self,
1616 aarch64_opnd_info *info, aarch64_insn code,
1617 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1618 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1620 return aarch64_ext_sve_addr_zz (self, info, code, AARCH64_MOD_SXTW);
1623 /* Decode an SVE address [Z<n>.<T>, Z<m>.<T>, UXTW {#<msz>}], where
1624 <msz> is a 2-bit unsigned number. fields[0] specifies the base register
1625 field and fields[1] specifies the offset register field. */
1627 aarch64_ext_sve_addr_zz_uxtw (const aarch64_operand *self,
1628 aarch64_opnd_info *info, aarch64_insn code,
1629 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1630 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1632 return aarch64_ext_sve_addr_zz (self, info, code, AARCH64_MOD_UXTW);
1635 /* Finish decoding an SVE arithmetic immediate, given that INFO already
1636 has the raw field value and that the low 8 bits decode to VALUE. */
1638 decode_sve_aimm (aarch64_opnd_info *info, int64_t value)
1640 info->shifter.kind = AARCH64_MOD_LSL;
1641 info->shifter.amount = 0;
1642 if (info->imm.value & 0x100)
1645 /* Decode 0x100 as #0, LSL #8. */
1646 info->shifter.amount = 8;
1650 info->shifter.operator_present = (info->shifter.amount != 0);
1651 info->shifter.amount_present = (info->shifter.amount != 0);
1652 info->imm.value = value;
1656 /* Decode an SVE ADD/SUB immediate. */
1658 aarch64_ext_sve_aimm (const aarch64_operand *self,
1659 aarch64_opnd_info *info, const aarch64_insn code,
1660 const aarch64_inst *inst,
1661 aarch64_operand_error *errors)
1663 return (aarch64_ext_imm (self, info, code, inst, errors)
1664 && decode_sve_aimm (info, (uint8_t) info->imm.value));
1667 /* Decode an SVE CPY/DUP immediate. */
1669 aarch64_ext_sve_asimm (const aarch64_operand *self,
1670 aarch64_opnd_info *info, const aarch64_insn code,
1671 const aarch64_inst *inst,
1672 aarch64_operand_error *errors)
1674 return (aarch64_ext_imm (self, info, code, inst, errors)
1675 && decode_sve_aimm (info, (int8_t) info->imm.value));
1678 /* Decode a single-bit immediate that selects between #0.5 and #1.0.
1679 The fields array specifies which field to use. */
1681 aarch64_ext_sve_float_half_one (const aarch64_operand *self,
1682 aarch64_opnd_info *info, aarch64_insn code,
1683 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1684 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1686 if (extract_field (self->fields[0], code, 0))
1687 info->imm.value = 0x3f800000;
1689 info->imm.value = 0x3f000000;
1690 info->imm.is_fp = TRUE;
1694 /* Decode a single-bit immediate that selects between #0.5 and #2.0.
1695 The fields array specifies which field to use. */
1697 aarch64_ext_sve_float_half_two (const aarch64_operand *self,
1698 aarch64_opnd_info *info, aarch64_insn code,
1699 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1700 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1702 if (extract_field (self->fields[0], code, 0))
1703 info->imm.value = 0x40000000;
1705 info->imm.value = 0x3f000000;
1706 info->imm.is_fp = TRUE;
1710 /* Decode a single-bit immediate that selects between #0.0 and #1.0.
1711 The fields array specifies which field to use. */
1713 aarch64_ext_sve_float_zero_one (const aarch64_operand *self,
1714 aarch64_opnd_info *info, aarch64_insn code,
1715 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1716 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1718 if (extract_field (self->fields[0], code, 0))
1719 info->imm.value = 0x3f800000;
1721 info->imm.value = 0x0;
1722 info->imm.is_fp = TRUE;
1726 /* Decode Zn[MM], where MM has a 7-bit triangular encoding. The fields
1727 array specifies which field to use for Zn. MM is encoded in the
1728 concatenation of imm5 and SVE_tszh, with imm5 being the less
1729 significant part. */
1731 aarch64_ext_sve_index (const aarch64_operand *self,
1732 aarch64_opnd_info *info, aarch64_insn code,
1733 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1734 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1738 info->reglane.regno = extract_field (self->fields[0], code, 0);
1739 val = extract_fields (code, 0, 2, FLD_SVE_tszh, FLD_imm5);
1740 if ((val & 31) == 0)
1742 while ((val & 1) == 0)
1744 info->reglane.index = val / 2;
1748 /* Decode a logical immediate for the MOV alias of SVE DUPM. */
1750 aarch64_ext_sve_limm_mov (const aarch64_operand *self,
1751 aarch64_opnd_info *info, const aarch64_insn code,
1752 const aarch64_inst *inst,
1753 aarch64_operand_error *errors)
1755 int esize = aarch64_get_qualifier_esize (inst->operands[0].qualifier);
1756 return (aarch64_ext_limm (self, info, code, inst, errors)
1757 && aarch64_sve_dupm_mov_immediate_p (info->imm.value, esize));
1760 /* Decode Zn[MM], where Zn occupies the least-significant part of the field
1761 and where MM occupies the most-significant part. The operand-dependent
1762 value specifies the number of bits in Zn. */
1764 aarch64_ext_sve_quad_index (const aarch64_operand *self,
1765 aarch64_opnd_info *info, aarch64_insn code,
1766 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1767 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1769 unsigned int reg_bits = get_operand_specific_data (self);
1770 unsigned int val = extract_all_fields (self, code);
1771 info->reglane.regno = val & ((1 << reg_bits) - 1);
1772 info->reglane.index = val >> reg_bits;
1776 /* Decode {Zn.<T> - Zm.<T>}. The fields array specifies which field
1777 to use for Zn. The opcode-dependent value specifies the number
1778 of registers in the list. */
1780 aarch64_ext_sve_reglist (const aarch64_operand *self,
1781 aarch64_opnd_info *info, aarch64_insn code,
1782 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1783 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1785 info->reglist.first_regno = extract_field (self->fields[0], code, 0);
1786 info->reglist.num_regs = get_opcode_dependent_value (inst->opcode);
1790 /* Decode <pattern>{, MUL #<amount>}. The fields array specifies which
1791 fields to use for <pattern>. <amount> - 1 is encoded in the SVE_imm4
1794 aarch64_ext_sve_scale (const aarch64_operand *self,
1795 aarch64_opnd_info *info, aarch64_insn code,
1796 const aarch64_inst *inst, aarch64_operand_error *errors)
1800 if (!aarch64_ext_imm (self, info, code, inst, errors))
1802 val = extract_field (FLD_SVE_imm4, code, 0);
1803 info->shifter.kind = AARCH64_MOD_MUL;
1804 info->shifter.amount = val + 1;
1805 info->shifter.operator_present = (val != 0);
1806 info->shifter.amount_present = (val != 0);
1810 /* Return the top set bit in VALUE, which is expected to be relatively
1813 get_top_bit (uint64_t value)
1815 while ((value & -value) != value)
1816 value -= value & -value;
1820 /* Decode an SVE shift-left immediate. */
1822 aarch64_ext_sve_shlimm (const aarch64_operand *self,
1823 aarch64_opnd_info *info, const aarch64_insn code,
1824 const aarch64_inst *inst, aarch64_operand_error *errors)
1826 if (!aarch64_ext_imm (self, info, code, inst, errors)
1827 || info->imm.value == 0)
1830 info->imm.value -= get_top_bit (info->imm.value);
1834 /* Decode an SVE shift-right immediate. */
1836 aarch64_ext_sve_shrimm (const aarch64_operand *self,
1837 aarch64_opnd_info *info, const aarch64_insn code,
1838 const aarch64_inst *inst, aarch64_operand_error *errors)
1840 if (!aarch64_ext_imm (self, info, code, inst, errors)
1841 || info->imm.value == 0)
1844 info->imm.value = get_top_bit (info->imm.value) * 2 - info->imm.value;
1848 /* Bitfields that are commonly used to encode certain operands' information
1849 may be partially used as part of the base opcode in some instructions.
1850 For example, the bit 1 of the field 'size' in
1851 FCVTXN <Vb><d>, <Va><n>
1852 is actually part of the base opcode, while only size<0> is available
1853 for encoding the register type. Another example is the AdvSIMD
1854 instruction ORR (register), in which the field 'size' is also used for
1855 the base opcode, leaving only the field 'Q' available to encode the
1856 vector register arrangement specifier '8B' or '16B'.
1858 This function tries to deduce the qualifier from the value of partially
1859 constrained field(s). Given the VALUE of such a field or fields, the
1860 qualifiers CANDIDATES and the MASK (indicating which bits are valid for
1861 operand encoding), the function returns the matching qualifier or
1862 AARCH64_OPND_QLF_NIL if nothing matches.
1864 N.B. CANDIDATES is a group of possible qualifiers that are valid for
1865 one operand; it has a maximum of AARCH64_MAX_QLF_SEQ_NUM qualifiers and
1866 may end with AARCH64_OPND_QLF_NIL. */
1868 static enum aarch64_opnd_qualifier
1869 get_qualifier_from_partial_encoding (aarch64_insn value,
1870 const enum aarch64_opnd_qualifier* \
1875 DEBUG_TRACE ("enter with value: %d, mask: %d", (int)value, (int)mask);
1876 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i)
1878 aarch64_insn standard_value;
1879 if (candidates[i] == AARCH64_OPND_QLF_NIL)
1881 standard_value = aarch64_get_qualifier_standard_value (candidates[i]);
1882 if ((standard_value & mask) == (value & mask))
1883 return candidates[i];
1885 return AARCH64_OPND_QLF_NIL;
1888 /* Given a list of qualifier sequences, return all possible valid qualifiers
1889 for operand IDX in QUALIFIERS.
1890 Assume QUALIFIERS is an array whose length is large enough. */
1893 get_operand_possible_qualifiers (int idx,
1894 const aarch64_opnd_qualifier_seq_t *list,
1895 enum aarch64_opnd_qualifier *qualifiers)
1898 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i)
1899 if ((qualifiers[i] = list[i][idx]) == AARCH64_OPND_QLF_NIL)
1903 /* Decode the size Q field for e.g. SHADD.
1904 We tag one operand with the qualifer according to the code;
1905 whether the qualifier is valid for this opcode or not, it is the
1906 duty of the semantic checking. */
1909 decode_sizeq (aarch64_inst *inst)
1912 enum aarch64_opnd_qualifier qualifier;
1914 aarch64_insn value, mask;
1915 enum aarch64_field_kind fld_sz;
1916 enum aarch64_opnd_qualifier candidates[AARCH64_MAX_QLF_SEQ_NUM];
1918 if (inst->opcode->iclass == asisdlse
1919 || inst->opcode->iclass == asisdlsep
1920 || inst->opcode->iclass == asisdlso
1921 || inst->opcode->iclass == asisdlsop)
1922 fld_sz = FLD_vldst_size;
1927 value = extract_fields (code, inst->opcode->mask, 2, fld_sz, FLD_Q);
1928 /* Obtain the info that which bits of fields Q and size are actually
1929 available for operand encoding. Opcodes like FMAXNM and FMLA have
1930 size[1] unavailable. */
1931 mask = extract_fields (~inst->opcode->mask, 0, 2, fld_sz, FLD_Q);
1933 /* The index of the operand we are going to tag a qualifier and the qualifer
1934 itself are reasoned from the value of the size and Q fields and the
1935 possible valid qualifier lists. */
1936 idx = aarch64_select_operand_for_sizeq_field_coding (inst->opcode);
1937 DEBUG_TRACE ("key idx: %d", idx);
1939 /* For most related instruciton, size:Q are fully available for operand
1943 inst->operands[idx].qualifier = get_vreg_qualifier_from_value (value);
1947 get_operand_possible_qualifiers (idx, inst->opcode->qualifiers_list,
1949 #ifdef DEBUG_AARCH64
1953 for (i = 0; candidates[i] != AARCH64_OPND_QLF_NIL
1954 && i < AARCH64_MAX_QLF_SEQ_NUM; ++i)
1955 DEBUG_TRACE ("qualifier %d: %s", i,
1956 aarch64_get_qualifier_name(candidates[i]));
1957 DEBUG_TRACE ("%d, %d", (int)value, (int)mask);
1959 #endif /* DEBUG_AARCH64 */
1961 qualifier = get_qualifier_from_partial_encoding (value, candidates, mask);
1963 if (qualifier == AARCH64_OPND_QLF_NIL)
1966 inst->operands[idx].qualifier = qualifier;
1970 /* Decode size[0]:Q, i.e. bit 22 and bit 30, for
1971 e.g. FCVTN<Q> <Vd>.<Tb>, <Vn>.<Ta>. */
1974 decode_asimd_fcvt (aarch64_inst *inst)
1976 aarch64_field field = {0, 0};
1978 enum aarch64_opnd_qualifier qualifier;
1980 gen_sub_field (FLD_size, 0, 1, &field);
1981 value = extract_field_2 (&field, inst->value, 0);
1982 qualifier = value == 0 ? AARCH64_OPND_QLF_V_4S
1983 : AARCH64_OPND_QLF_V_2D;
1984 switch (inst->opcode->op)
1988 /* FCVTN<Q> <Vd>.<Tb>, <Vn>.<Ta>. */
1989 inst->operands[1].qualifier = qualifier;
1993 /* FCVTL<Q> <Vd>.<Ta>, <Vn>.<Tb>. */
1994 inst->operands[0].qualifier = qualifier;
2004 /* Decode size[0], i.e. bit 22, for
2005 e.g. FCVTXN <Vb><d>, <Va><n>. */
2008 decode_asisd_fcvtxn (aarch64_inst *inst)
2010 aarch64_field field = {0, 0};
2011 gen_sub_field (FLD_size, 0, 1, &field);
2012 if (!extract_field_2 (&field, inst->value, 0))
2014 inst->operands[0].qualifier = AARCH64_OPND_QLF_S_S;
2018 /* Decode the 'opc' field for e.g. FCVT <Dd>, <Sn>. */
2020 decode_fcvt (aarch64_inst *inst)
2022 enum aarch64_opnd_qualifier qualifier;
2024 const aarch64_field field = {15, 2};
2027 value = extract_field_2 (&field, inst->value, 0);
2030 case 0: qualifier = AARCH64_OPND_QLF_S_S; break;
2031 case 1: qualifier = AARCH64_OPND_QLF_S_D; break;
2032 case 3: qualifier = AARCH64_OPND_QLF_S_H; break;
2035 inst->operands[0].qualifier = qualifier;
2040 /* Do miscellaneous decodings that are not common enough to be driven by
2044 do_misc_decoding (aarch64_inst *inst)
2047 switch (inst->opcode->op)
2050 return decode_fcvt (inst);
2056 return decode_asimd_fcvt (inst);
2059 return decode_asisd_fcvtxn (inst);
2063 value = extract_field (FLD_SVE_Pn, inst->value, 0);
2064 return (value == extract_field (FLD_SVE_Pm, inst->value, 0)
2065 && value == extract_field (FLD_SVE_Pg4_10, inst->value, 0));
2068 return (extract_field (FLD_SVE_Zd, inst->value, 0)
2069 == extract_field (FLD_SVE_Zm_16, inst->value, 0));
2072 /* Index must be zero. */
2073 value = extract_fields (inst->value, 0, 2, FLD_SVE_tszh, FLD_imm5);
2074 return value > 0 && value <= 16 && value == (value & -value);
2077 return (extract_field (FLD_SVE_Zn, inst->value, 0)
2078 == extract_field (FLD_SVE_Zm_16, inst->value, 0));
2081 /* Index must be nonzero. */
2082 value = extract_fields (inst->value, 0, 2, FLD_SVE_tszh, FLD_imm5);
2083 return value > 0 && value != (value & -value);
2086 return (extract_field (FLD_SVE_Pd, inst->value, 0)
2087 == extract_field (FLD_SVE_Pm, inst->value, 0));
2089 case OP_MOVZS_P_P_P:
2091 return (extract_field (FLD_SVE_Pn, inst->value, 0)
2092 == extract_field (FLD_SVE_Pm, inst->value, 0));
2094 case OP_NOTS_P_P_P_Z:
2095 case OP_NOT_P_P_P_Z:
2096 return (extract_field (FLD_SVE_Pm, inst->value, 0)
2097 == extract_field (FLD_SVE_Pg4_10, inst->value, 0));
2104 /* Opcodes that have fields shared by multiple operands are usually flagged
2105 with flags. In this function, we detect such flags, decode the related
2106 field(s) and store the information in one of the related operands. The
2107 'one' operand is not any operand but one of the operands that can
2108 accommadate all the information that has been decoded. */
2111 do_special_decoding (aarch64_inst *inst)
2115 /* Condition for truly conditional executed instructions, e.g. b.cond. */
2116 if (inst->opcode->flags & F_COND)
2118 value = extract_field (FLD_cond2, inst->value, 0);
2119 inst->cond = get_cond_from_value (value);
2122 if (inst->opcode->flags & F_SF)
2124 idx = select_operand_for_sf_field_coding (inst->opcode);
2125 value = extract_field (FLD_sf, inst->value, 0);
2126 inst->operands[idx].qualifier = get_greg_qualifier_from_value (value);
2127 if ((inst->opcode->flags & F_N)
2128 && extract_field (FLD_N, inst->value, 0) != value)
2132 if (inst->opcode->flags & F_LSE_SZ)
2134 idx = select_operand_for_sf_field_coding (inst->opcode);
2135 value = extract_field (FLD_lse_sz, inst->value, 0);
2136 inst->operands[idx].qualifier = get_greg_qualifier_from_value (value);
2138 /* size:Q fields. */
2139 if (inst->opcode->flags & F_SIZEQ)
2140 return decode_sizeq (inst);
2142 if (inst->opcode->flags & F_FPTYPE)
2144 idx = select_operand_for_fptype_field_coding (inst->opcode);
2145 value = extract_field (FLD_type, inst->value, 0);
2148 case 0: inst->operands[idx].qualifier = AARCH64_OPND_QLF_S_S; break;
2149 case 1: inst->operands[idx].qualifier = AARCH64_OPND_QLF_S_D; break;
2150 case 3: inst->operands[idx].qualifier = AARCH64_OPND_QLF_S_H; break;
2155 if (inst->opcode->flags & F_SSIZE)
2157 /* N.B. some opcodes like FCMGT <V><d>, <V><n>, #0 have the size[1] as part
2158 of the base opcode. */
2160 enum aarch64_opnd_qualifier candidates[AARCH64_MAX_QLF_SEQ_NUM];
2161 idx = select_operand_for_scalar_size_field_coding (inst->opcode);
2162 value = extract_field (FLD_size, inst->value, inst->opcode->mask);
2163 mask = extract_field (FLD_size, ~inst->opcode->mask, 0);
2164 /* For most related instruciton, the 'size' field is fully available for
2165 operand encoding. */
2167 inst->operands[idx].qualifier = get_sreg_qualifier_from_value (value);
2170 get_operand_possible_qualifiers (idx, inst->opcode->qualifiers_list,
2172 inst->operands[idx].qualifier
2173 = get_qualifier_from_partial_encoding (value, candidates, mask);
2177 if (inst->opcode->flags & F_T)
2179 /* Num of consecutive '0's on the right side of imm5<3:0>. */
2182 assert (aarch64_get_operand_class (inst->opcode->operands[0])
2183 == AARCH64_OPND_CLASS_SIMD_REG);
2194 val = extract_field (FLD_imm5, inst->value, 0);
2195 while ((val & 0x1) == 0 && ++num <= 3)
2199 Q = (unsigned) extract_field (FLD_Q, inst->value, inst->opcode->mask);
2200 inst->operands[0].qualifier =
2201 get_vreg_qualifier_from_value ((num << 1) | Q);
2204 if (inst->opcode->flags & F_GPRSIZE_IN_Q)
2206 /* Use Rt to encode in the case of e.g.
2207 STXP <Ws>, <Xt1>, <Xt2>, [<Xn|SP>{,#0}]. */
2208 idx = aarch64_operand_index (inst->opcode->operands, AARCH64_OPND_Rt);
2211 /* Otherwise use the result operand, which has to be a integer
2213 assert (aarch64_get_operand_class (inst->opcode->operands[0])
2214 == AARCH64_OPND_CLASS_INT_REG);
2217 assert (idx == 0 || idx == 1);
2218 value = extract_field (FLD_Q, inst->value, 0);
2219 inst->operands[idx].qualifier = get_greg_qualifier_from_value (value);
2222 if (inst->opcode->flags & F_LDS_SIZE)
2224 aarch64_field field = {0, 0};
2225 assert (aarch64_get_operand_class (inst->opcode->operands[0])
2226 == AARCH64_OPND_CLASS_INT_REG);
2227 gen_sub_field (FLD_opc, 0, 1, &field);
2228 value = extract_field_2 (&field, inst->value, 0);
2229 inst->operands[0].qualifier
2230 = value ? AARCH64_OPND_QLF_W : AARCH64_OPND_QLF_X;
2233 /* Miscellaneous decoding; done as the last step. */
2234 if (inst->opcode->flags & F_MISC)
2235 return do_misc_decoding (inst);
2240 /* Converters converting a real opcode instruction to its alias form. */
2242 /* ROR <Wd>, <Ws>, #<shift>
2244 EXTR <Wd>, <Ws>, <Ws>, #<shift>. */
2246 convert_extr_to_ror (aarch64_inst *inst)
2248 if (inst->operands[1].reg.regno == inst->operands[2].reg.regno)
2250 copy_operand_info (inst, 2, 3);
2251 inst->operands[3].type = AARCH64_OPND_NIL;
2257 /* UXTL<Q> <Vd>.<Ta>, <Vn>.<Tb>
2259 USHLL<Q> <Vd>.<Ta>, <Vn>.<Tb>, #0. */
2261 convert_shll_to_xtl (aarch64_inst *inst)
2263 if (inst->operands[2].imm.value == 0)
2265 inst->operands[2].type = AARCH64_OPND_NIL;
2272 UBFM <Xd>, <Xn>, #<shift>, #63.
2274 LSR <Xd>, <Xn>, #<shift>. */
2276 convert_bfm_to_sr (aarch64_inst *inst)
2280 imms = inst->operands[3].imm.value;
2281 val = inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31 ? 31 : 63;
2284 inst->operands[3].type = AARCH64_OPND_NIL;
2291 /* Convert MOV to ORR. */
2293 convert_orr_to_mov (aarch64_inst *inst)
2295 /* MOV <Vd>.<T>, <Vn>.<T>
2297 ORR <Vd>.<T>, <Vn>.<T>, <Vn>.<T>. */
2298 if (inst->operands[1].reg.regno == inst->operands[2].reg.regno)
2300 inst->operands[2].type = AARCH64_OPND_NIL;
2306 /* When <imms> >= <immr>, the instruction written:
2307 SBFX <Xd>, <Xn>, #<lsb>, #<width>
2309 SBFM <Xd>, <Xn>, #<lsb>, #(<lsb>+<width>-1). */
2312 convert_bfm_to_bfx (aarch64_inst *inst)
2316 immr = inst->operands[2].imm.value;
2317 imms = inst->operands[3].imm.value;
2321 inst->operands[2].imm.value = lsb;
2322 inst->operands[3].imm.value = imms + 1 - lsb;
2323 /* The two opcodes have different qualifiers for
2324 the immediate operands; reset to help the checking. */
2325 reset_operand_qualifier (inst, 2);
2326 reset_operand_qualifier (inst, 3);
2333 /* When <imms> < <immr>, the instruction written:
2334 SBFIZ <Xd>, <Xn>, #<lsb>, #<width>
2336 SBFM <Xd>, <Xn>, #((64-<lsb>)&0x3f), #(<width>-1). */
2339 convert_bfm_to_bfi (aarch64_inst *inst)
2341 int64_t immr, imms, val;
2343 immr = inst->operands[2].imm.value;
2344 imms = inst->operands[3].imm.value;
2345 val = inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31 ? 32 : 64;
2348 inst->operands[2].imm.value = (val - immr) & (val - 1);
2349 inst->operands[3].imm.value = imms + 1;
2350 /* The two opcodes have different qualifiers for
2351 the immediate operands; reset to help the checking. */
2352 reset_operand_qualifier (inst, 2);
2353 reset_operand_qualifier (inst, 3);
2360 /* The instruction written:
2361 BFC <Xd>, #<lsb>, #<width>
2363 BFM <Xd>, XZR, #((64-<lsb>)&0x3f), #(<width>-1). */
2366 convert_bfm_to_bfc (aarch64_inst *inst)
2368 int64_t immr, imms, val;
2370 /* Should have been assured by the base opcode value. */
2371 assert (inst->operands[1].reg.regno == 0x1f);
2373 immr = inst->operands[2].imm.value;
2374 imms = inst->operands[3].imm.value;
2375 val = inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31 ? 32 : 64;
2378 /* Drop XZR from the second operand. */
2379 copy_operand_info (inst, 1, 2);
2380 copy_operand_info (inst, 2, 3);
2381 inst->operands[3].type = AARCH64_OPND_NIL;
2383 /* Recalculate the immediates. */
2384 inst->operands[1].imm.value = (val - immr) & (val - 1);
2385 inst->operands[2].imm.value = imms + 1;
2387 /* The two opcodes have different qualifiers for the operands; reset to
2388 help the checking. */
2389 reset_operand_qualifier (inst, 1);
2390 reset_operand_qualifier (inst, 2);
2391 reset_operand_qualifier (inst, 3);
2399 /* The instruction written:
2400 LSL <Xd>, <Xn>, #<shift>
2402 UBFM <Xd>, <Xn>, #((64-<shift>)&0x3f), #(63-<shift>). */
2405 convert_ubfm_to_lsl (aarch64_inst *inst)
2407 int64_t immr = inst->operands[2].imm.value;
2408 int64_t imms = inst->operands[3].imm.value;
2410 = inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31 ? 31 : 63;
2412 if ((immr == 0 && imms == val) || immr == imms + 1)
2414 inst->operands[3].type = AARCH64_OPND_NIL;
2415 inst->operands[2].imm.value = val - imms;
2422 /* CINC <Wd>, <Wn>, <cond>
2424 CSINC <Wd>, <Wn>, <Wn>, invert(<cond>)
2425 where <cond> is not AL or NV. */
2428 convert_from_csel (aarch64_inst *inst)
2430 if (inst->operands[1].reg.regno == inst->operands[2].reg.regno
2431 && (inst->operands[3].cond->value & 0xe) != 0xe)
2433 copy_operand_info (inst, 2, 3);
2434 inst->operands[2].cond = get_inverted_cond (inst->operands[3].cond);
2435 inst->operands[3].type = AARCH64_OPND_NIL;
2441 /* CSET <Wd>, <cond>
2443 CSINC <Wd>, WZR, WZR, invert(<cond>)
2444 where <cond> is not AL or NV. */
2447 convert_csinc_to_cset (aarch64_inst *inst)
2449 if (inst->operands[1].reg.regno == 0x1f
2450 && inst->operands[2].reg.regno == 0x1f
2451 && (inst->operands[3].cond->value & 0xe) != 0xe)
2453 copy_operand_info (inst, 1, 3);
2454 inst->operands[1].cond = get_inverted_cond (inst->operands[3].cond);
2455 inst->operands[3].type = AARCH64_OPND_NIL;
2456 inst->operands[2].type = AARCH64_OPND_NIL;
2464 MOVZ <Wd>, #<imm16>, LSL #<shift>.
2466 A disassembler may output ORR, MOVZ and MOVN as a MOV mnemonic, except when
2467 ORR has an immediate that could be generated by a MOVZ or MOVN instruction,
2468 or where a MOVN has an immediate that could be encoded by MOVZ, or where
2469 MOVZ/MOVN #0 have a shift amount other than LSL #0, in which case the
2470 machine-instruction mnemonic must be used. */
2473 convert_movewide_to_mov (aarch64_inst *inst)
2475 uint64_t value = inst->operands[1].imm.value;
2476 /* MOVZ/MOVN #0 have a shift amount other than LSL #0. */
2477 if (value == 0 && inst->operands[1].shifter.amount != 0)
2479 inst->operands[1].type = AARCH64_OPND_IMM_MOV;
2480 inst->operands[1].shifter.kind = AARCH64_MOD_NONE;
2481 value <<= inst->operands[1].shifter.amount;
2482 /* As an alias convertor, it has to be clear that the INST->OPCODE
2483 is the opcode of the real instruction. */
2484 if (inst->opcode->op == OP_MOVN)
2486 int is32 = inst->operands[0].qualifier == AARCH64_OPND_QLF_W;
2488 /* A MOVN has an immediate that could be encoded by MOVZ. */
2489 if (aarch64_wide_constant_p (value, is32, NULL))
2492 inst->operands[1].imm.value = value;
2493 inst->operands[1].shifter.amount = 0;
2499 ORR <Wd>, WZR, #<imm>.
2501 A disassembler may output ORR, MOVZ and MOVN as a MOV mnemonic, except when
2502 ORR has an immediate that could be generated by a MOVZ or MOVN instruction,
2503 or where a MOVN has an immediate that could be encoded by MOVZ, or where
2504 MOVZ/MOVN #0 have a shift amount other than LSL #0, in which case the
2505 machine-instruction mnemonic must be used. */
2508 convert_movebitmask_to_mov (aarch64_inst *inst)
2513 /* Should have been assured by the base opcode value. */
2514 assert (inst->operands[1].reg.regno == 0x1f);
2515 copy_operand_info (inst, 1, 2);
2516 is32 = inst->operands[0].qualifier == AARCH64_OPND_QLF_W;
2517 inst->operands[1].type = AARCH64_OPND_IMM_MOV;
2518 value = inst->operands[1].imm.value;
2519 /* ORR has an immediate that could be generated by a MOVZ or MOVN
2521 if (inst->operands[0].reg.regno != 0x1f
2522 && (aarch64_wide_constant_p (value, is32, NULL)
2523 || aarch64_wide_constant_p (~value, is32, NULL)))
2526 inst->operands[2].type = AARCH64_OPND_NIL;
2530 /* Some alias opcodes are disassembled by being converted from their real-form.
2531 N.B. INST->OPCODE is the real opcode rather than the alias. */
2534 convert_to_alias (aarch64_inst *inst, const aarch64_opcode *alias)
2540 return convert_bfm_to_sr (inst);
2542 return convert_ubfm_to_lsl (inst);
2546 return convert_from_csel (inst);
2549 return convert_csinc_to_cset (inst);
2553 return convert_bfm_to_bfx (inst);
2557 return convert_bfm_to_bfi (inst);
2559 return convert_bfm_to_bfc (inst);
2561 return convert_orr_to_mov (inst);
2562 case OP_MOV_IMM_WIDE:
2563 case OP_MOV_IMM_WIDEN:
2564 return convert_movewide_to_mov (inst);
2565 case OP_MOV_IMM_LOG:
2566 return convert_movebitmask_to_mov (inst);
2568 return convert_extr_to_ror (inst);
2573 return convert_shll_to_xtl (inst);
2580 aarch64_opcode_decode (const aarch64_opcode *, const aarch64_insn,
2581 aarch64_inst *, int, aarch64_operand_error *errors);
2583 /* Given the instruction information in *INST, check if the instruction has
2584 any alias form that can be used to represent *INST. If the answer is yes,
2585 update *INST to be in the form of the determined alias. */
2587 /* In the opcode description table, the following flags are used in opcode
2588 entries to help establish the relations between the real and alias opcodes:
2590 F_ALIAS: opcode is an alias
2591 F_HAS_ALIAS: opcode has alias(es)
2594 F_P3: Disassembly preference priority 1-3 (the larger the
2595 higher). If nothing is specified, it is the priority
2596 0 by default, i.e. the lowest priority.
2598 Although the relation between the machine and the alias instructions are not
2599 explicitly described, it can be easily determined from the base opcode
2600 values, masks and the flags F_ALIAS and F_HAS_ALIAS in their opcode
2601 description entries:
2603 The mask of an alias opcode must be equal to or a super-set (i.e. more
2604 constrained) of that of the aliased opcode; so is the base opcode value.
2606 if (opcode_has_alias (real) && alias_opcode_p (opcode)
2607 && (opcode->mask & real->mask) == real->mask
2608 && (real->mask & opcode->opcode) == (real->mask & real->opcode))
2609 then OPCODE is an alias of, and only of, the REAL instruction
2611 The alias relationship is forced flat-structured to keep related algorithm
2612 simple; an opcode entry cannot be flagged with both F_ALIAS and F_HAS_ALIAS.
2614 During the disassembling, the decoding decision tree (in
2615 opcodes/aarch64-dis-2.c) always returns an machine instruction opcode entry;
2616 if the decoding of such a machine instruction succeeds (and -Mno-aliases is
2617 not specified), the disassembler will check whether there is any alias
2618 instruction exists for this real instruction. If there is, the disassembler
2619 will try to disassemble the 32-bit binary again using the alias's rule, or
2620 try to convert the IR to the form of the alias. In the case of the multiple
2621 aliases, the aliases are tried one by one from the highest priority
2622 (currently the flag F_P3) to the lowest priority (no priority flag), and the
2623 first succeeds first adopted.
2625 You may ask why there is a need for the conversion of IR from one form to
2626 another in handling certain aliases. This is because on one hand it avoids
2627 adding more operand code to handle unusual encoding/decoding; on other
2628 hand, during the disassembling, the conversion is an effective approach to
2629 check the condition of an alias (as an alias may be adopted only if certain
2630 conditions are met).
2632 In order to speed up the alias opcode lookup, aarch64-gen has preprocessed
2633 aarch64_opcode_table and generated aarch64_find_alias_opcode and
2634 aarch64_find_next_alias_opcode (in opcodes/aarch64-dis-2.c) to help. */
2637 determine_disassembling_preference (struct aarch64_inst *inst,
2638 aarch64_operand_error *errors)
2640 const aarch64_opcode *opcode;
2641 const aarch64_opcode *alias;
2643 opcode = inst->opcode;
2645 /* This opcode does not have an alias, so use itself. */
2646 if (!opcode_has_alias (opcode))
2649 alias = aarch64_find_alias_opcode (opcode);
2652 #ifdef DEBUG_AARCH64
2655 const aarch64_opcode *tmp = alias;
2656 printf ("#### LIST orderd: ");
2659 printf ("%s, ", tmp->name);
2660 tmp = aarch64_find_next_alias_opcode (tmp);
2664 #endif /* DEBUG_AARCH64 */
2666 for (; alias; alias = aarch64_find_next_alias_opcode (alias))
2668 DEBUG_TRACE ("try %s", alias->name);
2669 assert (alias_opcode_p (alias) || opcode_has_alias (opcode));
2671 /* An alias can be a pseudo opcode which will never be used in the
2672 disassembly, e.g. BIC logical immediate is such a pseudo opcode
2674 if (pseudo_opcode_p (alias))
2676 DEBUG_TRACE ("skip pseudo %s", alias->name);
2680 if ((inst->value & alias->mask) != alias->opcode)
2682 DEBUG_TRACE ("skip %s as base opcode not match", alias->name);
2685 /* No need to do any complicated transformation on operands, if the alias
2686 opcode does not have any operand. */
2687 if (aarch64_num_of_operands (alias) == 0 && alias->opcode == inst->value)
2689 DEBUG_TRACE ("succeed with 0-operand opcode %s", alias->name);
2690 aarch64_replace_opcode (inst, alias);
2693 if (alias->flags & F_CONV)
2696 memcpy (©, inst, sizeof (aarch64_inst));
2697 /* ALIAS is the preference as long as the instruction can be
2698 successfully converted to the form of ALIAS. */
2699 if (convert_to_alias (©, alias) == 1)
2701 aarch64_replace_opcode (©, alias);
2702 assert (aarch64_match_operands_constraint (©, NULL));
2703 DEBUG_TRACE ("succeed with %s via conversion", alias->name);
2704 memcpy (inst, ©, sizeof (aarch64_inst));
2710 /* Directly decode the alias opcode. */
2712 memset (&temp, '\0', sizeof (aarch64_inst));
2713 if (aarch64_opcode_decode (alias, inst->value, &temp, 1, errors) == 1)
2715 DEBUG_TRACE ("succeed with %s via direct decoding", alias->name);
2716 memcpy (inst, &temp, sizeof (aarch64_inst));
2723 /* Some instructions (including all SVE ones) use the instruction class
2724 to describe how a qualifiers_list index is represented in the instruction
2725 encoding. If INST is such an instruction, decode the appropriate fields
2726 and fill in the operand qualifiers accordingly. Return true if no
2727 problems are found. */
2730 aarch64_decode_variant_using_iclass (aarch64_inst *inst)
2735 switch (inst->opcode->iclass)
2738 variant = extract_fields (inst->value, 0, 2, FLD_size, FLD_SVE_M_14);
2742 i = extract_fields (inst->value, 0, 2, FLD_SVE_tszh, FLD_imm5);
2745 while ((i & 1) == 0)
2753 /* Pick the smallest applicable element size. */
2754 if ((inst->value & 0x20600) == 0x600)
2756 else if ((inst->value & 0x20400) == 0x400)
2758 else if ((inst->value & 0x20000) == 0)
2765 /* sve_misc instructions have only a single variant. */
2769 variant = extract_fields (inst->value, 0, 2, FLD_size, FLD_SVE_M_16);
2773 variant = extract_field (FLD_SVE_M_4, inst->value, 0);
2776 case sve_shift_pred:
2777 i = extract_fields (inst->value, 0, 2, FLD_SVE_tszh, FLD_SVE_tszl_8);
2788 case sve_shift_unpred:
2789 i = extract_fields (inst->value, 0, 2, FLD_SVE_tszh, FLD_SVE_tszl_19);
2793 variant = extract_field (FLD_size, inst->value, 0);
2799 variant = extract_field (FLD_size, inst->value, 0);
2803 i = extract_field (FLD_size, inst->value, 0);
2811 variant = extract_field (FLD_SVE_sz, inst->value, 0);
2815 variant = extract_field (FLD_SVE_sz2, inst->value, 0);
2819 i = extract_field (FLD_SVE_size, inst->value, 0);
2826 i = extract_field (FLD_size, inst->value, 0);
2835 case sve_shift_tsz_bhsd:
2836 i = extract_fields (inst->value, 0, 2, FLD_SVE_tszh, FLD_SVE_tszl_19);
2846 case sve_size_tsz_bhs:
2847 i = extract_fields (inst->value, 0, 2, FLD_SVE_sz, FLD_SVE_tszl_19);
2857 case sve_shift_tsz_hsd:
2858 i = extract_fields (inst->value, 0, 2, FLD_SVE_sz, FLD_SVE_tszl_19);
2869 /* No mapping between instruction class and qualifiers. */
2873 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2874 inst->operands[i].qualifier = inst->opcode->qualifiers_list[variant][i];
2877 /* Decode the CODE according to OPCODE; fill INST. Return 0 if the decoding
2878 fails, which meanes that CODE is not an instruction of OPCODE; otherwise
2881 If OPCODE has alias(es) and NOALIASES_P is 0, an alias opcode may be
2882 determined and used to disassemble CODE; this is done just before the
2886 aarch64_opcode_decode (const aarch64_opcode *opcode, const aarch64_insn code,
2887 aarch64_inst *inst, int noaliases_p,
2888 aarch64_operand_error *errors)
2892 DEBUG_TRACE ("enter with %s", opcode->name);
2894 assert (opcode && inst);
2897 memset (inst, '\0', sizeof (aarch64_inst));
2899 /* Check the base opcode. */
2900 if ((code & opcode->mask) != (opcode->opcode & opcode->mask))
2902 DEBUG_TRACE ("base opcode match FAIL");
2906 inst->opcode = opcode;
2909 /* Assign operand codes and indexes. */
2910 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2912 if (opcode->operands[i] == AARCH64_OPND_NIL)
2914 inst->operands[i].type = opcode->operands[i];
2915 inst->operands[i].idx = i;
2918 /* Call the opcode decoder indicated by flags. */
2919 if (opcode_has_special_coder (opcode) && do_special_decoding (inst) == 0)
2921 DEBUG_TRACE ("opcode flag-based decoder FAIL");
2925 /* Possibly use the instruction class to determine the correct
2927 if (!aarch64_decode_variant_using_iclass (inst))
2929 DEBUG_TRACE ("iclass-based decoder FAIL");
2933 /* Call operand decoders. */
2934 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2936 const aarch64_operand *opnd;
2937 enum aarch64_opnd type;
2939 type = opcode->operands[i];
2940 if (type == AARCH64_OPND_NIL)
2942 opnd = &aarch64_operands[type];
2943 if (operand_has_extractor (opnd)
2944 && (! aarch64_extract_operand (opnd, &inst->operands[i], code, inst,
2947 DEBUG_TRACE ("operand decoder FAIL at operand %d", i);
2952 /* If the opcode has a verifier, then check it now. */
2953 if (opcode->verifier
2954 && opcode->verifier (inst, code, 0, FALSE, errors, NULL) != ERR_OK)
2956 DEBUG_TRACE ("operand verifier FAIL");
2960 /* Match the qualifiers. */
2961 if (aarch64_match_operands_constraint (inst, NULL) == 1)
2963 /* Arriving here, the CODE has been determined as a valid instruction
2964 of OPCODE and *INST has been filled with information of this OPCODE
2965 instruction. Before the return, check if the instruction has any
2966 alias and should be disassembled in the form of its alias instead.
2967 If the answer is yes, *INST will be updated. */
2969 determine_disassembling_preference (inst, errors);
2970 DEBUG_TRACE ("SUCCESS");
2975 DEBUG_TRACE ("constraint matching FAIL");
2982 /* This does some user-friendly fix-up to *INST. It is currently focus on
2983 the adjustment of qualifiers to help the printed instruction
2984 recognized/understood more easily. */
2987 user_friendly_fixup (aarch64_inst *inst)
2989 switch (inst->opcode->iclass)
2992 /* TBNZ Xn|Wn, #uimm6, label
2993 Test and Branch Not Zero: conditionally jumps to label if bit number
2994 uimm6 in register Xn is not zero. The bit number implies the width of
2995 the register, which may be written and should be disassembled as Wn if
2996 uimm is less than 32. Limited to a branch offset range of +/- 32KiB.
2998 if (inst->operands[1].imm.value < 32)
2999 inst->operands[0].qualifier = AARCH64_OPND_QLF_W;
3005 /* Decode INSN and fill in *INST the instruction information. An alias
3006 opcode may be filled in *INSN if NOALIASES_P is FALSE. Return zero on
3010 aarch64_decode_insn (aarch64_insn insn, aarch64_inst *inst,
3011 bfd_boolean noaliases_p,
3012 aarch64_operand_error *errors)
3014 const aarch64_opcode *opcode = aarch64_opcode_lookup (insn);
3016 #ifdef DEBUG_AARCH64
3019 const aarch64_opcode *tmp = opcode;
3021 DEBUG_TRACE ("opcode lookup:");
3024 aarch64_verbose (" %s", tmp->name);
3025 tmp = aarch64_find_next_opcode (tmp);
3028 #endif /* DEBUG_AARCH64 */
3030 /* A list of opcodes may have been found, as aarch64_opcode_lookup cannot
3031 distinguish some opcodes, e.g. SSHR and MOVI, which almost share the same
3032 opcode field and value, apart from the difference that one of them has an
3033 extra field as part of the opcode, but such a field is used for operand
3034 encoding in other opcode(s) ('immh' in the case of the example). */
3035 while (opcode != NULL)
3037 /* But only one opcode can be decoded successfully for, as the
3038 decoding routine will check the constraint carefully. */
3039 if (aarch64_opcode_decode (opcode, insn, inst, noaliases_p, errors) == 1)
3041 opcode = aarch64_find_next_opcode (opcode);
3047 /* Print operands. */
3050 print_operands (bfd_vma pc, const aarch64_opcode *opcode,
3051 const aarch64_opnd_info *opnds, struct disassemble_info *info,
3052 bfd_boolean *has_notes)
3055 int i, pcrel_p, num_printed;
3056 for (i = 0, num_printed = 0; i < AARCH64_MAX_OPND_NUM; ++i)
3059 /* We regard the opcode operand info more, however we also look into
3060 the inst->operands to support the disassembling of the optional
3062 The two operand code should be the same in all cases, apart from
3063 when the operand can be optional. */
3064 if (opcode->operands[i] == AARCH64_OPND_NIL
3065 || opnds[i].type == AARCH64_OPND_NIL)
3068 /* Generate the operand string in STR. */
3069 aarch64_print_operand (str, sizeof (str), pc, opcode, opnds, i, &pcrel_p,
3070 &info->target, ¬es);
3072 /* Print the delimiter (taking account of omitted operand(s)). */
3074 (*info->fprintf_func) (info->stream, "%s",
3075 num_printed++ == 0 ? "\t" : ", ");
3077 /* Print the operand. */
3079 (*info->print_address_func) (info->target, info);
3081 (*info->fprintf_func) (info->stream, "%s", str);
3084 if (notes && !no_notes)
3087 (*info->fprintf_func) (info->stream, " // note: %s", notes);
3091 /* Set NAME to a copy of INST's mnemonic with the "." suffix removed. */
3094 remove_dot_suffix (char *name, const aarch64_inst *inst)
3099 ptr = strchr (inst->opcode->name, '.');
3100 assert (ptr && inst->cond);
3101 len = ptr - inst->opcode->name;
3103 strncpy (name, inst->opcode->name, len);
3107 /* Print the instruction mnemonic name. */
3110 print_mnemonic_name (const aarch64_inst *inst, struct disassemble_info *info)
3112 if (inst->opcode->flags & F_COND)
3114 /* For instructions that are truly conditionally executed, e.g. b.cond,
3115 prepare the full mnemonic name with the corresponding condition
3119 remove_dot_suffix (name, inst);
3120 (*info->fprintf_func) (info->stream, "%s.%s", name, inst->cond->names[0]);
3123 (*info->fprintf_func) (info->stream, "%s", inst->opcode->name);
3126 /* Decide whether we need to print a comment after the operands of
3127 instruction INST. */
3130 print_comment (const aarch64_inst *inst, struct disassemble_info *info)
3132 if (inst->opcode->flags & F_COND)
3135 unsigned int i, num_conds;
3137 remove_dot_suffix (name, inst);
3138 num_conds = ARRAY_SIZE (inst->cond->names);
3139 for (i = 1; i < num_conds && inst->cond->names[i]; ++i)
3140 (*info->fprintf_func) (info->stream, "%s %s.%s",
3141 i == 1 ? " //" : ",",
3142 name, inst->cond->names[i]);
3146 /* Build notes from verifiers into a string for printing. */
3149 print_verifier_notes (aarch64_operand_error *detail,
3150 struct disassemble_info *info)
3155 /* The output of the verifier cannot be a fatal error, otherwise the assembly
3156 would not have succeeded. We can safely ignore these. */
3157 assert (detail->non_fatal);
3158 assert (detail->error);
3160 /* If there are multiple verifier messages, concat them up to 1k. */
3161 (*info->fprintf_func) (info->stream, " // note: %s", detail->error);
3162 if (detail->index >= 0)
3163 (*info->fprintf_func) (info->stream, " at operand %d", detail->index + 1);
3166 /* Print the instruction according to *INST. */
3169 print_aarch64_insn (bfd_vma pc, const aarch64_inst *inst,
3170 const aarch64_insn code,
3171 struct disassemble_info *info,
3172 aarch64_operand_error *mismatch_details)
3174 bfd_boolean has_notes = FALSE;
3176 print_mnemonic_name (inst, info);
3177 print_operands (pc, inst->opcode, inst->operands, info, &has_notes);
3178 print_comment (inst, info);
3180 /* We've already printed a note, not enough space to print more so exit.
3181 Usually notes shouldn't overlap so it shouldn't happen that we have a note
3182 from a register and instruction at the same time. */
3186 /* Always run constraint verifiers, this is needed because constraints need to
3187 maintain a global state regardless of whether the instruction has the flag
3189 enum err_type result = verify_constraints (inst, code, pc, FALSE,
3190 mismatch_details, &insn_sequence);
3198 print_verifier_notes (mismatch_details, info);
3205 /* Entry-point of the instruction disassembler and printer. */
3208 print_insn_aarch64_word (bfd_vma pc,
3210 struct disassemble_info *info,
3211 aarch64_operand_error *errors)
3213 static const char *err_msg[ERR_NR_ENTRIES+1] =
3216 [ERR_UND] = "undefined",
3217 [ERR_UNP] = "unpredictable",
3224 info->insn_info_valid = 1;
3225 info->branch_delay_insns = 0;
3226 info->data_size = 0;
3230 if (info->flags & INSN_HAS_RELOC)
3231 /* If the instruction has a reloc associated with it, then
3232 the offset field in the instruction will actually be the
3233 addend for the reloc. (If we are using REL type relocs).
3234 In such cases, we can ignore the pc when computing
3235 addresses, since the addend is not currently pc-relative. */
3238 ret = aarch64_decode_insn (word, &inst, no_aliases, errors);
3240 if (((word >> 21) & 0x3ff) == 1)
3242 /* RESERVED for ALES. */
3243 assert (ret != ERR_OK);
3252 /* Handle undefined instructions. */
3253 info->insn_type = dis_noninsn;
3254 (*info->fprintf_func) (info->stream,".inst\t0x%08x ; %s",
3255 word, err_msg[ret]);
3258 user_friendly_fixup (&inst);
3259 print_aarch64_insn (pc, &inst, word, info, errors);
3266 /* Disallow mapping symbols ($x, $d etc) from
3267 being displayed in symbol relative addresses. */
3270 aarch64_symbol_is_valid (asymbol * sym,
3271 struct disassemble_info * info ATTRIBUTE_UNUSED)
3278 name = bfd_asymbol_name (sym);
3282 || (name[1] != 'x' && name[1] != 'd')
3283 || (name[2] != '\0' && name[2] != '.'));
3286 /* Print data bytes on INFO->STREAM. */
3289 print_insn_data (bfd_vma pc ATTRIBUTE_UNUSED,
3291 struct disassemble_info *info,
3292 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
3294 switch (info->bytes_per_chunk)
3297 info->fprintf_func (info->stream, ".byte\t0x%02x", word);
3300 info->fprintf_func (info->stream, ".short\t0x%04x", word);
3303 info->fprintf_func (info->stream, ".word\t0x%08x", word);
3310 /* Try to infer the code or data type from a symbol.
3311 Returns nonzero if *MAP_TYPE was set. */
3314 get_sym_code_type (struct disassemble_info *info, int n,
3315 enum map_type *map_type)
3317 elf_symbol_type *es;
3321 /* If the symbol is in a different section, ignore it. */
3322 if (info->section != NULL && info->section != info->symtab[n]->section)
3325 es = *(elf_symbol_type **)(info->symtab + n);
3326 type = ELF_ST_TYPE (es->internal_elf_sym.st_info);
3328 /* If the symbol has function type then use that. */
3329 if (type == STT_FUNC)
3331 *map_type = MAP_INSN;
3335 /* Check for mapping symbols. */
3336 name = bfd_asymbol_name(info->symtab[n]);
3338 && (name[1] == 'x' || name[1] == 'd')
3339 && (name[2] == '\0' || name[2] == '.'))
3341 *map_type = (name[1] == 'x' ? MAP_INSN : MAP_DATA);
3348 /* Entry-point of the AArch64 disassembler. */
3351 print_insn_aarch64 (bfd_vma pc,
3352 struct disassemble_info *info)
3354 bfd_byte buffer[INSNLEN];
3356 void (*printer) (bfd_vma, uint32_t, struct disassemble_info *,
3357 aarch64_operand_error *);
3358 bfd_boolean found = FALSE;
3359 unsigned int size = 4;
3361 aarch64_operand_error errors;
3363 if (info->disassembler_options)
3365 set_default_aarch64_dis_options (info);
3367 parse_aarch64_dis_options (info->disassembler_options);
3369 /* To avoid repeated parsing of these options, we remove them here. */
3370 info->disassembler_options = NULL;
3373 /* Aarch64 instructions are always little-endian */
3374 info->endian_code = BFD_ENDIAN_LITTLE;
3376 /* Default to DATA. A text section is required by the ABI to contain an
3377 INSN mapping symbol at the start. A data section has no such
3378 requirement, hence if no mapping symbol is found the section must
3379 contain only data. This however isn't very useful if the user has
3380 fully stripped the binaries. If this is the case use the section
3381 attributes to determine the default. If we have no section default to
3382 INSN as well, as we may be disassembling some raw bytes on a baremetal
3383 HEX file or similar. */
3384 enum map_type type = MAP_DATA;
3385 if ((info->section && info->section->flags & SEC_CODE) || !info->section)
3388 /* First check the full symtab for a mapping symbol, even if there
3389 are no usable non-mapping symbols for this address. */
3390 if (info->symtab_size != 0
3391 && bfd_asymbol_flavour (*info->symtab) == bfd_target_elf_flavour)
3394 bfd_vma addr, section_vma = 0;
3395 bfd_boolean can_use_search_opt_p;
3398 if (pc <= last_mapping_addr)
3399 last_mapping_sym = -1;
3401 /* Start scanning at the start of the function, or wherever
3402 we finished last time. */
3403 n = info->symtab_pos + 1;
3405 /* If the last stop offset is different from the current one it means we
3406 are disassembling a different glob of bytes. As such the optimization
3407 would not be safe and we should start over. */
3408 can_use_search_opt_p = last_mapping_sym >= 0
3409 && info->stop_offset == last_stop_offset;
3411 if (n >= last_mapping_sym && can_use_search_opt_p)
3412 n = last_mapping_sym;
3414 /* Look down while we haven't passed the location being disassembled.
3415 The reason for this is that there's no defined order between a symbol
3416 and an mapping symbol that may be at the same address. We may have to
3417 look at least one position ahead. */
3418 for (; n < info->symtab_size; n++)
3420 addr = bfd_asymbol_value (info->symtab[n]);
3423 if (get_sym_code_type (info, n, &type))
3432 n = info->symtab_pos;
3433 if (n >= last_mapping_sym && can_use_search_opt_p)
3434 n = last_mapping_sym;
3436 /* No mapping symbol found at this address. Look backwards
3437 for a preceeding one, but don't go pass the section start
3438 otherwise a data section with no mapping symbol can pick up
3439 a text mapping symbol of a preceeding section. The documentation
3440 says section can be NULL, in which case we will seek up all the
3443 section_vma = info->section->vma;
3447 addr = bfd_asymbol_value (info->symtab[n]);
3448 if (addr < section_vma)
3451 if (get_sym_code_type (info, n, &type))
3460 last_mapping_sym = last_sym;
3462 last_stop_offset = info->stop_offset;
3464 /* Look a little bit ahead to see if we should print out
3465 less than four bytes of data. If there's a symbol,
3466 mapping or otherwise, after two bytes then don't
3468 if (last_type == MAP_DATA)
3470 size = 4 - (pc & 3);
3471 for (n = last_sym + 1; n < info->symtab_size; n++)
3473 addr = bfd_asymbol_value (info->symtab[n]);
3476 if (addr - pc < size)
3481 /* If the next symbol is after three bytes, we need to
3482 print only part of the data, so that we can use either
3485 size = (pc & 1) ? 1 : 2;
3491 /* PR 10263: Disassemble data if requested to do so by the user. */
3492 if (last_type == MAP_DATA && ((info->flags & DISASSEMBLE_DATA) == 0))
3494 /* size was set above. */
3495 info->bytes_per_chunk = size;
3496 info->display_endian = info->endian;
3497 printer = print_insn_data;
3501 info->bytes_per_chunk = size = INSNLEN;
3502 info->display_endian = info->endian_code;
3503 printer = print_insn_aarch64_word;
3506 status = (*info->read_memory_func) (pc, buffer, size, info);
3509 (*info->memory_error_func) (status, pc, info);
3513 data = bfd_get_bits (buffer, size * 8,
3514 info->display_endian == BFD_ENDIAN_BIG);
3516 (*printer) (pc, data, info, &errors);
3522 print_aarch64_disassembler_options (FILE *stream)
3524 fprintf (stream, _("\n\
3525 The following AARCH64 specific disassembler options are supported for use\n\
3526 with the -M switch (multiple options should be separated by commas):\n"));
3528 fprintf (stream, _("\n\
3529 no-aliases Don't print instruction aliases.\n"));
3531 fprintf (stream, _("\n\
3532 aliases Do print instruction aliases.\n"));
3534 fprintf (stream, _("\n\
3535 no-notes Don't print instruction notes.\n"));
3537 fprintf (stream, _("\n\
3538 notes Do print instruction notes.\n"));
3540 #ifdef DEBUG_AARCH64
3541 fprintf (stream, _("\n\
3542 debug_dump Temp switch for debug trace.\n"));
3543 #endif /* DEBUG_AARCH64 */
3545 fprintf (stream, _("\n"));