1 /* tc-arm.c -- Assemble for the ARM
2 Copyright 1994-2013 Free Software Foundation, Inc.
3 Contributed by Richard Earnshaw (rwe@pegasus.esprit.ec.org)
4 Modified by David Taylor (dtaylor@armltd.co.uk)
5 Cirrus coprocessor mods by Aldy Hernandez (aldyh@redhat.com)
6 Cirrus coprocessor fixes by Petko Manolov (petkan@nucleusys.com)
7 Cirrus coprocessor fixes by Vladimir Ivanov (vladitx@nucleusys.com)
9 This file is part of GAS, the GNU Assembler.
11 GAS is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License as published by
13 the Free Software Foundation; either version 3, or (at your option)
16 GAS is distributed in the hope that it will be useful,
17 but WITHOUT ANY WARRANTY; without even the implied warranty of
18 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 GNU General Public License for more details.
21 You should have received a copy of the GNU General Public License
22 along with GAS; see the file COPYING. If not, write to the Free
23 Software Foundation, 51 Franklin Street - Fifth Floor, Boston, MA
30 #include "safe-ctype.h"
33 #include "libiberty.h"
34 #include "opcode/arm.h"
38 #include "dw2gencfi.h"
41 #include "dwarf2dbg.h"
44 /* Must be at least the size of the largest unwind opcode (currently two). */
45 #define ARM_OPCODE_CHUNK_SIZE 8
47 /* This structure holds the unwinding state. */
52 symbolS * table_entry;
53 symbolS * personality_routine;
54 int personality_index;
55 /* The segment containing the function. */
58 /* Opcodes generated from this function. */
59 unsigned char * opcodes;
62 /* The number of bytes pushed to the stack. */
64 /* We don't add stack adjustment opcodes immediately so that we can merge
65 multiple adjustments. We can also omit the final adjustment
66 when using a frame pointer. */
67 offsetT pending_offset;
68 /* These two fields are set by both unwind_movsp and unwind_setfp. They
69 hold the reg+offset to use when restoring sp from a frame pointer. */
72 /* Nonzero if an unwind_setfp directive has been seen. */
74 /* Nonzero if the last opcode restores sp from fp_reg. */
75 unsigned sp_restored:1;
80 /* Results from operand parsing worker functions. */
84 PARSE_OPERAND_SUCCESS,
86 PARSE_OPERAND_FAIL_NO_BACKTRACK
87 } parse_operand_result;
96 /* Types of processor to assemble for. */
98 /* The code that was here used to select a default CPU depending on compiler
99 pre-defines which were only present when doing native builds, thus
100 changing gas' default behaviour depending upon the build host.
102 If you have a target that requires a default CPU option then the you
103 should define CPU_DEFAULT here. */
108 # define FPU_DEFAULT FPU_ARCH_FPA
109 # elif defined (TE_NetBSD)
111 # define FPU_DEFAULT FPU_ARCH_VFP /* Soft-float, but VFP order. */
113 /* Legacy a.out format. */
114 # define FPU_DEFAULT FPU_ARCH_FPA /* Soft-float, but FPA order. */
116 # elif defined (TE_VXWORKS)
117 # define FPU_DEFAULT FPU_ARCH_VFP /* Soft-float, VFP order. */
119 /* For backwards compatibility, default to FPA. */
120 # define FPU_DEFAULT FPU_ARCH_FPA
122 #endif /* ifndef FPU_DEFAULT */
124 #define streq(a, b) (strcmp (a, b) == 0)
126 static arm_feature_set cpu_variant;
127 static arm_feature_set arm_arch_used;
128 static arm_feature_set thumb_arch_used;
130 /* Flags stored in private area of BFD structure. */
131 static int uses_apcs_26 = FALSE;
132 static int atpcs = FALSE;
133 static int support_interwork = FALSE;
134 static int uses_apcs_float = FALSE;
135 static int pic_code = FALSE;
136 static int fix_v4bx = FALSE;
137 /* Warn on using deprecated features. */
138 static int warn_on_deprecated = TRUE;
141 /* Variables that we set while parsing command-line options. Once all
142 options have been read we re-process these values to set the real
144 static const arm_feature_set *legacy_cpu = NULL;
145 static const arm_feature_set *legacy_fpu = NULL;
147 static const arm_feature_set *mcpu_cpu_opt = NULL;
148 static const arm_feature_set *mcpu_fpu_opt = NULL;
149 static const arm_feature_set *march_cpu_opt = NULL;
150 static const arm_feature_set *march_fpu_opt = NULL;
151 static const arm_feature_set *mfpu_opt = NULL;
152 static const arm_feature_set *object_arch = NULL;
154 /* Constants for known architecture features. */
155 static const arm_feature_set fpu_default = FPU_DEFAULT;
156 static const arm_feature_set fpu_arch_vfp_v1 = FPU_ARCH_VFP_V1;
157 static const arm_feature_set fpu_arch_vfp_v2 = FPU_ARCH_VFP_V2;
158 static const arm_feature_set fpu_arch_vfp_v3 = FPU_ARCH_VFP_V3;
159 static const arm_feature_set fpu_arch_neon_v1 = FPU_ARCH_NEON_V1;
160 static const arm_feature_set fpu_arch_fpa = FPU_ARCH_FPA;
161 static const arm_feature_set fpu_any_hard = FPU_ANY_HARD;
162 static const arm_feature_set fpu_arch_maverick = FPU_ARCH_MAVERICK;
163 static const arm_feature_set fpu_endian_pure = FPU_ARCH_ENDIAN_PURE;
166 static const arm_feature_set cpu_default = CPU_DEFAULT;
169 static const arm_feature_set arm_ext_v1 = ARM_FEATURE (ARM_EXT_V1, 0);
170 static const arm_feature_set arm_ext_v2 = ARM_FEATURE (ARM_EXT_V1, 0);
171 static const arm_feature_set arm_ext_v2s = ARM_FEATURE (ARM_EXT_V2S, 0);
172 static const arm_feature_set arm_ext_v3 = ARM_FEATURE (ARM_EXT_V3, 0);
173 static const arm_feature_set arm_ext_v3m = ARM_FEATURE (ARM_EXT_V3M, 0);
174 static const arm_feature_set arm_ext_v4 = ARM_FEATURE (ARM_EXT_V4, 0);
175 static const arm_feature_set arm_ext_v4t = ARM_FEATURE (ARM_EXT_V4T, 0);
176 static const arm_feature_set arm_ext_v5 = ARM_FEATURE (ARM_EXT_V5, 0);
177 static const arm_feature_set arm_ext_v4t_5 =
178 ARM_FEATURE (ARM_EXT_V4T | ARM_EXT_V5, 0);
179 static const arm_feature_set arm_ext_v5t = ARM_FEATURE (ARM_EXT_V5T, 0);
180 static const arm_feature_set arm_ext_v5e = ARM_FEATURE (ARM_EXT_V5E, 0);
181 static const arm_feature_set arm_ext_v5exp = ARM_FEATURE (ARM_EXT_V5ExP, 0);
182 static const arm_feature_set arm_ext_v5j = ARM_FEATURE (ARM_EXT_V5J, 0);
183 static const arm_feature_set arm_ext_v6 = ARM_FEATURE (ARM_EXT_V6, 0);
184 static const arm_feature_set arm_ext_v6k = ARM_FEATURE (ARM_EXT_V6K, 0);
185 static const arm_feature_set arm_ext_v6t2 = ARM_FEATURE (ARM_EXT_V6T2, 0);
186 static const arm_feature_set arm_ext_v6m = ARM_FEATURE (ARM_EXT_V6M, 0);
187 static const arm_feature_set arm_ext_v6_notm = ARM_FEATURE (ARM_EXT_V6_NOTM, 0);
188 static const arm_feature_set arm_ext_v6_dsp = ARM_FEATURE (ARM_EXT_V6_DSP, 0);
189 static const arm_feature_set arm_ext_barrier = ARM_FEATURE (ARM_EXT_BARRIER, 0);
190 static const arm_feature_set arm_ext_msr = ARM_FEATURE (ARM_EXT_THUMB_MSR, 0);
191 static const arm_feature_set arm_ext_div = ARM_FEATURE (ARM_EXT_DIV, 0);
192 static const arm_feature_set arm_ext_v7 = ARM_FEATURE (ARM_EXT_V7, 0);
193 static const arm_feature_set arm_ext_v7a = ARM_FEATURE (ARM_EXT_V7A, 0);
194 static const arm_feature_set arm_ext_v7r = ARM_FEATURE (ARM_EXT_V7R, 0);
195 static const arm_feature_set arm_ext_v7m = ARM_FEATURE (ARM_EXT_V7M, 0);
196 static const arm_feature_set arm_ext_v8 = ARM_FEATURE (ARM_EXT_V8, 0);
197 static const arm_feature_set arm_ext_m =
198 ARM_FEATURE (ARM_EXT_V6M | ARM_EXT_OS | ARM_EXT_V7M, 0);
199 static const arm_feature_set arm_ext_mp = ARM_FEATURE (ARM_EXT_MP, 0);
200 static const arm_feature_set arm_ext_sec = ARM_FEATURE (ARM_EXT_SEC, 0);
201 static const arm_feature_set arm_ext_os = ARM_FEATURE (ARM_EXT_OS, 0);
202 static const arm_feature_set arm_ext_adiv = ARM_FEATURE (ARM_EXT_ADIV, 0);
203 static const arm_feature_set arm_ext_virt = ARM_FEATURE (ARM_EXT_VIRT, 0);
205 static const arm_feature_set arm_arch_any = ARM_ANY;
206 static const arm_feature_set arm_arch_full = ARM_FEATURE (-1, -1);
207 static const arm_feature_set arm_arch_t2 = ARM_ARCH_THUMB2;
208 static const arm_feature_set arm_arch_none = ARM_ARCH_NONE;
209 static const arm_feature_set arm_arch_v6m_only = ARM_ARCH_V6M_ONLY;
211 static const arm_feature_set arm_cext_iwmmxt2 =
212 ARM_FEATURE (0, ARM_CEXT_IWMMXT2);
213 static const arm_feature_set arm_cext_iwmmxt =
214 ARM_FEATURE (0, ARM_CEXT_IWMMXT);
215 static const arm_feature_set arm_cext_xscale =
216 ARM_FEATURE (0, ARM_CEXT_XSCALE);
217 static const arm_feature_set arm_cext_maverick =
218 ARM_FEATURE (0, ARM_CEXT_MAVERICK);
219 static const arm_feature_set fpu_fpa_ext_v1 = ARM_FEATURE (0, FPU_FPA_EXT_V1);
220 static const arm_feature_set fpu_fpa_ext_v2 = ARM_FEATURE (0, FPU_FPA_EXT_V2);
221 static const arm_feature_set fpu_vfp_ext_v1xd =
222 ARM_FEATURE (0, FPU_VFP_EXT_V1xD);
223 static const arm_feature_set fpu_vfp_ext_v1 = ARM_FEATURE (0, FPU_VFP_EXT_V1);
224 static const arm_feature_set fpu_vfp_ext_v2 = ARM_FEATURE (0, FPU_VFP_EXT_V2);
225 static const arm_feature_set fpu_vfp_ext_v3xd = ARM_FEATURE (0, FPU_VFP_EXT_V3xD);
226 static const arm_feature_set fpu_vfp_ext_v3 = ARM_FEATURE (0, FPU_VFP_EXT_V3);
227 static const arm_feature_set fpu_vfp_ext_d32 =
228 ARM_FEATURE (0, FPU_VFP_EXT_D32);
229 static const arm_feature_set fpu_neon_ext_v1 = ARM_FEATURE (0, FPU_NEON_EXT_V1);
230 static const arm_feature_set fpu_vfp_v3_or_neon_ext =
231 ARM_FEATURE (0, FPU_NEON_EXT_V1 | FPU_VFP_EXT_V3);
232 static const arm_feature_set fpu_vfp_fp16 = ARM_FEATURE (0, FPU_VFP_EXT_FP16);
233 static const arm_feature_set fpu_neon_ext_fma = ARM_FEATURE (0, FPU_NEON_EXT_FMA);
234 static const arm_feature_set fpu_vfp_ext_fma = ARM_FEATURE (0, FPU_VFP_EXT_FMA);
235 static const arm_feature_set fpu_vfp_ext_armv8 =
236 ARM_FEATURE (0, FPU_VFP_EXT_ARMV8);
237 static const arm_feature_set fpu_neon_ext_armv8 =
238 ARM_FEATURE (0, FPU_NEON_EXT_ARMV8);
239 static const arm_feature_set fpu_crypto_ext_armv8 =
240 ARM_FEATURE (0, FPU_CRYPTO_EXT_ARMV8);
241 static const arm_feature_set crc_ext_armv8 =
242 ARM_FEATURE (0, CRC_EXT_ARMV8);
244 static int mfloat_abi_opt = -1;
245 /* Record user cpu selection for object attributes. */
246 static arm_feature_set selected_cpu = ARM_ARCH_NONE;
247 /* Must be long enough to hold any of the names in arm_cpus. */
248 static char selected_cpu_name[16];
250 /* Return if no cpu was selected on command-line. */
252 no_cpu_selected (void)
254 return selected_cpu.core == arm_arch_none.core
255 && selected_cpu.coproc == arm_arch_none.coproc;
260 static int meabi_flags = EABI_DEFAULT;
262 static int meabi_flags = EF_ARM_EABI_UNKNOWN;
265 static int attributes_set_explicitly[NUM_KNOWN_OBJ_ATTRIBUTES];
270 return (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4);
275 /* Pre-defined "_GLOBAL_OFFSET_TABLE_" */
276 symbolS * GOT_symbol;
279 /* 0: assemble for ARM,
280 1: assemble for Thumb,
281 2: assemble for Thumb even though target CPU does not support thumb
283 static int thumb_mode = 0;
284 /* A value distinct from the possible values for thumb_mode that we
285 can use to record whether thumb_mode has been copied into the
286 tc_frag_data field of a frag. */
287 #define MODE_RECORDED (1 << 4)
289 /* Specifies the intrinsic IT insn behavior mode. */
290 enum implicit_it_mode
292 IMPLICIT_IT_MODE_NEVER = 0x00,
293 IMPLICIT_IT_MODE_ARM = 0x01,
294 IMPLICIT_IT_MODE_THUMB = 0x02,
295 IMPLICIT_IT_MODE_ALWAYS = (IMPLICIT_IT_MODE_ARM | IMPLICIT_IT_MODE_THUMB)
297 static int implicit_it_mode = IMPLICIT_IT_MODE_ARM;
299 /* If unified_syntax is true, we are processing the new unified
300 ARM/Thumb syntax. Important differences from the old ARM mode:
302 - Immediate operands do not require a # prefix.
303 - Conditional affixes always appear at the end of the
304 instruction. (For backward compatibility, those instructions
305 that formerly had them in the middle, continue to accept them
307 - The IT instruction may appear, and if it does is validated
308 against subsequent conditional affixes. It does not generate
311 Important differences from the old Thumb mode:
313 - Immediate operands do not require a # prefix.
314 - Most of the V6T2 instructions are only available in unified mode.
315 - The .N and .W suffixes are recognized and honored (it is an error
316 if they cannot be honored).
317 - All instructions set the flags if and only if they have an 's' affix.
318 - Conditional affixes may be used. They are validated against
319 preceding IT instructions. Unlike ARM mode, you cannot use a
320 conditional affix except in the scope of an IT instruction. */
322 static bfd_boolean unified_syntax = FALSE;
324 /* An immediate operand can start with #, and ld*, st*, pld operands
325 can contain [ and ]. We need to tell APP not to elide whitespace
326 before a [, which can appear as the first operand for pld.
327 Likewise, a { can appear as the first operand for push, pop, vld*, etc. */
328 const char arm_symbol_chars[] = "#[]{}";
343 enum neon_el_type type;
347 #define NEON_MAX_TYPE_ELS 4
351 struct neon_type_el el[NEON_MAX_TYPE_ELS];
355 enum it_instruction_type
360 IF_INSIDE_IT_LAST_INSN, /* Either outside or inside;
361 if inside, should be the last one. */
362 NEUTRAL_IT_INSN, /* This could be either inside or outside,
363 i.e. BKPT and NOP. */
364 IT_INSN /* The IT insn has been parsed. */
367 /* The maximum number of operands we need. */
368 #define ARM_IT_MAX_OPERANDS 6
373 unsigned long instruction;
377 /* "uncond_value" is set to the value in place of the conditional field in
378 unconditional versions of the instruction, or -1 if nothing is
381 struct neon_type vectype;
382 /* This does not indicate an actual NEON instruction, only that
383 the mnemonic accepts neon-style type suffixes. */
385 /* Set to the opcode if the instruction needs relaxation.
386 Zero if the instruction is not relaxed. */
390 bfd_reloc_code_real_type type;
395 enum it_instruction_type it_insn_type;
401 struct neon_type_el vectype;
402 unsigned present : 1; /* Operand present. */
403 unsigned isreg : 1; /* Operand was a register. */
404 unsigned immisreg : 1; /* .imm field is a second register. */
405 unsigned isscalar : 1; /* Operand is a (Neon) scalar. */
406 unsigned immisalign : 1; /* Immediate is an alignment specifier. */
407 unsigned immisfloat : 1; /* Immediate was parsed as a float. */
408 /* Note: we abuse "regisimm" to mean "is Neon register" in VMOV
409 instructions. This allows us to disambiguate ARM <-> vector insns. */
410 unsigned regisimm : 1; /* 64-bit immediate, reg forms high 32 bits. */
411 unsigned isvec : 1; /* Is a single, double or quad VFP/Neon reg. */
412 unsigned isquad : 1; /* Operand is Neon quad-precision register. */
413 unsigned issingle : 1; /* Operand is VFP single-precision register. */
414 unsigned hasreloc : 1; /* Operand has relocation suffix. */
415 unsigned writeback : 1; /* Operand has trailing ! */
416 unsigned preind : 1; /* Preindexed address. */
417 unsigned postind : 1; /* Postindexed address. */
418 unsigned negative : 1; /* Index register was negated. */
419 unsigned shifted : 1; /* Shift applied to operation. */
420 unsigned shift_kind : 3; /* Shift operation (enum shift_kind). */
421 } operands[ARM_IT_MAX_OPERANDS];
424 static struct arm_it inst;
426 #define NUM_FLOAT_VALS 8
428 const char * fp_const[] =
430 "0.0", "1.0", "2.0", "3.0", "4.0", "5.0", "0.5", "10.0", 0
433 /* Number of littlenums required to hold an extended precision number. */
434 #define MAX_LITTLENUMS 6
436 LITTLENUM_TYPE fp_values[NUM_FLOAT_VALS][MAX_LITTLENUMS];
446 #define CP_T_X 0x00008000
447 #define CP_T_Y 0x00400000
449 #define CONDS_BIT 0x00100000
450 #define LOAD_BIT 0x00100000
452 #define DOUBLE_LOAD_FLAG 0x00000001
456 const char * template_name;
460 #define COND_ALWAYS 0xE
464 const char * template_name;
468 struct asm_barrier_opt
470 const char * template_name;
472 const arm_feature_set arch;
475 /* The bit that distinguishes CPSR and SPSR. */
476 #define SPSR_BIT (1 << 22)
478 /* The individual PSR flag bits. */
479 #define PSR_c (1 << 16)
480 #define PSR_x (1 << 17)
481 #define PSR_s (1 << 18)
482 #define PSR_f (1 << 19)
487 bfd_reloc_code_real_type reloc;
492 VFP_REG_Sd, VFP_REG_Sm, VFP_REG_Sn,
493 VFP_REG_Dd, VFP_REG_Dm, VFP_REG_Dn
498 VFP_LDSTMIA, VFP_LDSTMDB, VFP_LDSTMIAX, VFP_LDSTMDBX
501 /* Bits for DEFINED field in neon_typed_alias. */
502 #define NTA_HASTYPE 1
503 #define NTA_HASINDEX 2
505 struct neon_typed_alias
507 unsigned char defined;
509 struct neon_type_el eltype;
512 /* ARM register categories. This includes coprocessor numbers and various
513 architecture extensions' registers. */
540 /* Structure for a hash table entry for a register.
541 If TYPE is REG_TYPE_VFD or REG_TYPE_NQ, the NEON field can point to extra
542 information which states whether a vector type or index is specified (for a
543 register alias created with .dn or .qn). Otherwise NEON should be NULL. */
549 unsigned char builtin;
550 struct neon_typed_alias * neon;
553 /* Diagnostics used when we don't get a register of the expected type. */
554 const char * const reg_expected_msgs[] =
556 N_("ARM register expected"),
557 N_("bad or missing co-processor number"),
558 N_("co-processor register expected"),
559 N_("FPA register expected"),
560 N_("VFP single precision register expected"),
561 N_("VFP/Neon double precision register expected"),
562 N_("Neon quad precision register expected"),
563 N_("VFP single or double precision register expected"),
564 N_("Neon double or quad precision register expected"),
565 N_("VFP single, double or Neon quad precision register expected"),
566 N_("VFP system register expected"),
567 N_("Maverick MVF register expected"),
568 N_("Maverick MVD register expected"),
569 N_("Maverick MVFX register expected"),
570 N_("Maverick MVDX register expected"),
571 N_("Maverick MVAX register expected"),
572 N_("Maverick DSPSC register expected"),
573 N_("iWMMXt data register expected"),
574 N_("iWMMXt control register expected"),
575 N_("iWMMXt scalar register expected"),
576 N_("XScale accumulator register expected"),
579 /* Some well known registers that we refer to directly elsewhere. */
585 /* ARM instructions take 4bytes in the object file, Thumb instructions
591 /* Basic string to match. */
592 const char * template_name;
594 /* Parameters to instruction. */
595 unsigned int operands[8];
597 /* Conditional tag - see opcode_lookup. */
598 unsigned int tag : 4;
600 /* Basic instruction code. */
601 unsigned int avalue : 28;
603 /* Thumb-format instruction code. */
606 /* Which architecture variant provides this instruction. */
607 const arm_feature_set * avariant;
608 const arm_feature_set * tvariant;
610 /* Function to call to encode instruction in ARM format. */
611 void (* aencode) (void);
613 /* Function to call to encode instruction in Thumb format. */
614 void (* tencode) (void);
617 /* Defines for various bits that we will want to toggle. */
618 #define INST_IMMEDIATE 0x02000000
619 #define OFFSET_REG 0x02000000
620 #define HWOFFSET_IMM 0x00400000
621 #define SHIFT_BY_REG 0x00000010
622 #define PRE_INDEX 0x01000000
623 #define INDEX_UP 0x00800000
624 #define WRITE_BACK 0x00200000
625 #define LDM_TYPE_2_OR_3 0x00400000
626 #define CPSI_MMOD 0x00020000
628 #define LITERAL_MASK 0xf000f000
629 #define OPCODE_MASK 0xfe1fffff
630 #define V4_STR_BIT 0x00000020
632 #define T2_SUBS_PC_LR 0xf3de8f00
634 #define DATA_OP_SHIFT 21
636 #define T2_OPCODE_MASK 0xfe1fffff
637 #define T2_DATA_OP_SHIFT 21
639 #define A_COND_MASK 0xf0000000
640 #define A_PUSH_POP_OP_MASK 0x0fff0000
642 /* Opcodes for pushing/poping registers to/from the stack. */
643 #define A1_OPCODE_PUSH 0x092d0000
644 #define A2_OPCODE_PUSH 0x052d0004
645 #define A2_OPCODE_POP 0x049d0004
647 /* Codes to distinguish the arithmetic instructions. */
658 #define OPCODE_CMP 10
659 #define OPCODE_CMN 11
660 #define OPCODE_ORR 12
661 #define OPCODE_MOV 13
662 #define OPCODE_BIC 14
663 #define OPCODE_MVN 15
665 #define T2_OPCODE_AND 0
666 #define T2_OPCODE_BIC 1
667 #define T2_OPCODE_ORR 2
668 #define T2_OPCODE_ORN 3
669 #define T2_OPCODE_EOR 4
670 #define T2_OPCODE_ADD 8
671 #define T2_OPCODE_ADC 10
672 #define T2_OPCODE_SBC 11
673 #define T2_OPCODE_SUB 13
674 #define T2_OPCODE_RSB 14
676 #define T_OPCODE_MUL 0x4340
677 #define T_OPCODE_TST 0x4200
678 #define T_OPCODE_CMN 0x42c0
679 #define T_OPCODE_NEG 0x4240
680 #define T_OPCODE_MVN 0x43c0
682 #define T_OPCODE_ADD_R3 0x1800
683 #define T_OPCODE_SUB_R3 0x1a00
684 #define T_OPCODE_ADD_HI 0x4400
685 #define T_OPCODE_ADD_ST 0xb000
686 #define T_OPCODE_SUB_ST 0xb080
687 #define T_OPCODE_ADD_SP 0xa800
688 #define T_OPCODE_ADD_PC 0xa000
689 #define T_OPCODE_ADD_I8 0x3000
690 #define T_OPCODE_SUB_I8 0x3800
691 #define T_OPCODE_ADD_I3 0x1c00
692 #define T_OPCODE_SUB_I3 0x1e00
694 #define T_OPCODE_ASR_R 0x4100
695 #define T_OPCODE_LSL_R 0x4080
696 #define T_OPCODE_LSR_R 0x40c0
697 #define T_OPCODE_ROR_R 0x41c0
698 #define T_OPCODE_ASR_I 0x1000
699 #define T_OPCODE_LSL_I 0x0000
700 #define T_OPCODE_LSR_I 0x0800
702 #define T_OPCODE_MOV_I8 0x2000
703 #define T_OPCODE_CMP_I8 0x2800
704 #define T_OPCODE_CMP_LR 0x4280
705 #define T_OPCODE_MOV_HR 0x4600
706 #define T_OPCODE_CMP_HR 0x4500
708 #define T_OPCODE_LDR_PC 0x4800
709 #define T_OPCODE_LDR_SP 0x9800
710 #define T_OPCODE_STR_SP 0x9000
711 #define T_OPCODE_LDR_IW 0x6800
712 #define T_OPCODE_STR_IW 0x6000
713 #define T_OPCODE_LDR_IH 0x8800
714 #define T_OPCODE_STR_IH 0x8000
715 #define T_OPCODE_LDR_IB 0x7800
716 #define T_OPCODE_STR_IB 0x7000
717 #define T_OPCODE_LDR_RW 0x5800
718 #define T_OPCODE_STR_RW 0x5000
719 #define T_OPCODE_LDR_RH 0x5a00
720 #define T_OPCODE_STR_RH 0x5200
721 #define T_OPCODE_LDR_RB 0x5c00
722 #define T_OPCODE_STR_RB 0x5400
724 #define T_OPCODE_PUSH 0xb400
725 #define T_OPCODE_POP 0xbc00
727 #define T_OPCODE_BRANCH 0xe000
729 #define THUMB_SIZE 2 /* Size of thumb instruction. */
730 #define THUMB_PP_PC_LR 0x0100
731 #define THUMB_LOAD_BIT 0x0800
732 #define THUMB2_LOAD_BIT 0x00100000
734 #define BAD_ARGS _("bad arguments to instruction")
735 #define BAD_SP _("r13 not allowed here")
736 #define BAD_PC _("r15 not allowed here")
737 #define BAD_COND _("instruction cannot be conditional")
738 #define BAD_OVERLAP _("registers may not be the same")
739 #define BAD_HIREG _("lo register required")
740 #define BAD_THUMB32 _("instruction not supported in Thumb16 mode")
741 #define BAD_ADDR_MODE _("instruction does not accept this addressing mode");
742 #define BAD_BRANCH _("branch must be last instruction in IT block")
743 #define BAD_NOT_IT _("instruction not allowed in IT block")
744 #define BAD_FPU _("selected FPU does not support instruction")
745 #define BAD_OUT_IT _("thumb conditional instruction should be in IT block")
746 #define BAD_IT_COND _("incorrect condition in IT block")
747 #define BAD_IT_IT _("IT falling in the range of a previous IT block")
748 #define MISSING_FNSTART _("missing .fnstart before unwinding directive")
749 #define BAD_PC_ADDRESSING \
750 _("cannot use register index with PC-relative addressing")
751 #define BAD_PC_WRITEBACK \
752 _("cannot use writeback with PC-relative addressing")
753 #define BAD_RANGE _("branch out of range")
754 #define UNPRED_REG(R) _("using " R " results in unpredictable behaviour")
756 static struct hash_control * arm_ops_hsh;
757 static struct hash_control * arm_cond_hsh;
758 static struct hash_control * arm_shift_hsh;
759 static struct hash_control * arm_psr_hsh;
760 static struct hash_control * arm_v7m_psr_hsh;
761 static struct hash_control * arm_reg_hsh;
762 static struct hash_control * arm_reloc_hsh;
763 static struct hash_control * arm_barrier_opt_hsh;
765 /* Stuff needed to resolve the label ambiguity
774 symbolS * last_label_seen;
775 static int label_is_thumb_function_name = FALSE;
777 /* Literal pool structure. Held on a per-section
778 and per-sub-section basis. */
780 #define MAX_LITERAL_POOL_SIZE 1024
781 typedef struct literal_pool
783 expressionS literals [MAX_LITERAL_POOL_SIZE];
784 unsigned int next_free_entry;
790 struct dwarf2_line_info locs [MAX_LITERAL_POOL_SIZE];
792 struct literal_pool * next;
795 /* Pointer to a linked list of literal pools. */
796 literal_pool * list_of_pools = NULL;
799 # define now_it seg_info (now_seg)->tc_segment_info_data.current_it
801 static struct current_it now_it;
805 now_it_compatible (int cond)
807 return (cond & ~1) == (now_it.cc & ~1);
811 conditional_insn (void)
813 return inst.cond != COND_ALWAYS;
816 static int in_it_block (void);
818 static int handle_it_state (void);
820 static void force_automatic_it_block_close (void);
822 static void it_fsm_post_encode (void);
824 #define set_it_insn_type(type) \
827 inst.it_insn_type = type; \
828 if (handle_it_state () == FAIL) \
833 #define set_it_insn_type_nonvoid(type, failret) \
836 inst.it_insn_type = type; \
837 if (handle_it_state () == FAIL) \
842 #define set_it_insn_type_last() \
845 if (inst.cond == COND_ALWAYS) \
846 set_it_insn_type (IF_INSIDE_IT_LAST_INSN); \
848 set_it_insn_type (INSIDE_IT_LAST_INSN); \
854 /* This array holds the chars that always start a comment. If the
855 pre-processor is disabled, these aren't very useful. */
856 const char comment_chars[] = "@";
858 /* This array holds the chars that only start a comment at the beginning of
859 a line. If the line seems to have the form '# 123 filename'
860 .line and .file directives will appear in the pre-processed output. */
861 /* Note that input_file.c hand checks for '#' at the beginning of the
862 first line of the input file. This is because the compiler outputs
863 #NO_APP at the beginning of its output. */
864 /* Also note that comments like this one will always work. */
865 const char line_comment_chars[] = "#";
867 const char line_separator_chars[] = ";";
869 /* Chars that can be used to separate mant
870 from exp in floating point numbers. */
871 const char EXP_CHARS[] = "eE";
873 /* Chars that mean this number is a floating point constant. */
877 const char FLT_CHARS[] = "rRsSfFdDxXeEpP";
879 /* Prefix characters that indicate the start of an immediate
881 #define is_immediate_prefix(C) ((C) == '#' || (C) == '$')
883 /* Separator character handling. */
885 #define skip_whitespace(str) do { if (*(str) == ' ') ++(str); } while (0)
888 skip_past_char (char ** str, char c)
890 /* PR gas/14987: Allow for whitespace before the expected character. */
891 skip_whitespace (*str);
902 #define skip_past_comma(str) skip_past_char (str, ',')
904 /* Arithmetic expressions (possibly involving symbols). */
906 /* Return TRUE if anything in the expression is a bignum. */
909 walk_no_bignums (symbolS * sp)
911 if (symbol_get_value_expression (sp)->X_op == O_big)
914 if (symbol_get_value_expression (sp)->X_add_symbol)
916 return (walk_no_bignums (symbol_get_value_expression (sp)->X_add_symbol)
917 || (symbol_get_value_expression (sp)->X_op_symbol
918 && walk_no_bignums (symbol_get_value_expression (sp)->X_op_symbol)));
924 static int in_my_get_expression = 0;
926 /* Third argument to my_get_expression. */
927 #define GE_NO_PREFIX 0
928 #define GE_IMM_PREFIX 1
929 #define GE_OPT_PREFIX 2
930 /* This is a bit of a hack. Use an optional prefix, and also allow big (64-bit)
931 immediates, as can be used in Neon VMVN and VMOV immediate instructions. */
932 #define GE_OPT_PREFIX_BIG 3
935 my_get_expression (expressionS * ep, char ** str, int prefix_mode)
940 /* In unified syntax, all prefixes are optional. */
942 prefix_mode = (prefix_mode == GE_OPT_PREFIX_BIG) ? prefix_mode
947 case GE_NO_PREFIX: break;
949 if (!is_immediate_prefix (**str))
951 inst.error = _("immediate expression requires a # prefix");
957 case GE_OPT_PREFIX_BIG:
958 if (is_immediate_prefix (**str))
964 memset (ep, 0, sizeof (expressionS));
966 save_in = input_line_pointer;
967 input_line_pointer = *str;
968 in_my_get_expression = 1;
969 seg = expression (ep);
970 in_my_get_expression = 0;
972 if (ep->X_op == O_illegal || ep->X_op == O_absent)
974 /* We found a bad or missing expression in md_operand(). */
975 *str = input_line_pointer;
976 input_line_pointer = save_in;
977 if (inst.error == NULL)
978 inst.error = (ep->X_op == O_absent
979 ? _("missing expression") :_("bad expression"));
984 if (seg != absolute_section
985 && seg != text_section
986 && seg != data_section
987 && seg != bss_section
988 && seg != undefined_section)
990 inst.error = _("bad segment");
991 *str = input_line_pointer;
992 input_line_pointer = save_in;
999 /* Get rid of any bignums now, so that we don't generate an error for which
1000 we can't establish a line number later on. Big numbers are never valid
1001 in instructions, which is where this routine is always called. */
1002 if (prefix_mode != GE_OPT_PREFIX_BIG
1003 && (ep->X_op == O_big
1004 || (ep->X_add_symbol
1005 && (walk_no_bignums (ep->X_add_symbol)
1007 && walk_no_bignums (ep->X_op_symbol))))))
1009 inst.error = _("invalid constant");
1010 *str = input_line_pointer;
1011 input_line_pointer = save_in;
1015 *str = input_line_pointer;
1016 input_line_pointer = save_in;
1020 /* Turn a string in input_line_pointer into a floating point constant
1021 of type TYPE, and store the appropriate bytes in *LITP. The number
1022 of LITTLENUMS emitted is stored in *SIZEP. An error message is
1023 returned, or NULL on OK.
1025 Note that fp constants aren't represent in the normal way on the ARM.
1026 In big endian mode, things are as expected. However, in little endian
1027 mode fp constants are big-endian word-wise, and little-endian byte-wise
1028 within the words. For example, (double) 1.1 in big endian mode is
1029 the byte sequence 3f f1 99 99 99 99 99 9a, and in little endian mode is
1030 the byte sequence 99 99 f1 3f 9a 99 99 99.
1032 ??? The format of 12 byte floats is uncertain according to gcc's arm.h. */
1035 md_atof (int type, char * litP, int * sizeP)
1038 LITTLENUM_TYPE words[MAX_LITTLENUMS];
1070 return _("Unrecognized or unsupported floating point constant");
1073 t = atof_ieee (input_line_pointer, type, words);
1075 input_line_pointer = t;
1076 *sizeP = prec * sizeof (LITTLENUM_TYPE);
1078 if (target_big_endian)
1080 for (i = 0; i < prec; i++)
1082 md_number_to_chars (litP, (valueT) words[i], sizeof (LITTLENUM_TYPE));
1083 litP += sizeof (LITTLENUM_TYPE);
1088 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_endian_pure))
1089 for (i = prec - 1; i >= 0; i--)
1091 md_number_to_chars (litP, (valueT) words[i], sizeof (LITTLENUM_TYPE));
1092 litP += sizeof (LITTLENUM_TYPE);
1095 /* For a 4 byte float the order of elements in `words' is 1 0.
1096 For an 8 byte float the order is 1 0 3 2. */
1097 for (i = 0; i < prec; i += 2)
1099 md_number_to_chars (litP, (valueT) words[i + 1],
1100 sizeof (LITTLENUM_TYPE));
1101 md_number_to_chars (litP + sizeof (LITTLENUM_TYPE),
1102 (valueT) words[i], sizeof (LITTLENUM_TYPE));
1103 litP += 2 * sizeof (LITTLENUM_TYPE);
1110 /* We handle all bad expressions here, so that we can report the faulty
1111 instruction in the error message. */
1113 md_operand (expressionS * exp)
1115 if (in_my_get_expression)
1116 exp->X_op = O_illegal;
1119 /* Immediate values. */
1121 /* Generic immediate-value read function for use in directives.
1122 Accepts anything that 'expression' can fold to a constant.
1123 *val receives the number. */
1126 immediate_for_directive (int *val)
1129 exp.X_op = O_illegal;
1131 if (is_immediate_prefix (*input_line_pointer))
1133 input_line_pointer++;
1137 if (exp.X_op != O_constant)
1139 as_bad (_("expected #constant"));
1140 ignore_rest_of_line ();
1143 *val = exp.X_add_number;
1148 /* Register parsing. */
1150 /* Generic register parser. CCP points to what should be the
1151 beginning of a register name. If it is indeed a valid register
1152 name, advance CCP over it and return the reg_entry structure;
1153 otherwise return NULL. Does not issue diagnostics. */
1155 static struct reg_entry *
1156 arm_reg_parse_multi (char **ccp)
1160 struct reg_entry *reg;
1162 skip_whitespace (start);
1164 #ifdef REGISTER_PREFIX
1165 if (*start != REGISTER_PREFIX)
1169 #ifdef OPTIONAL_REGISTER_PREFIX
1170 if (*start == OPTIONAL_REGISTER_PREFIX)
1175 if (!ISALPHA (*p) || !is_name_beginner (*p))
1180 while (ISALPHA (*p) || ISDIGIT (*p) || *p == '_');
1182 reg = (struct reg_entry *) hash_find_n (arm_reg_hsh, start, p - start);
1192 arm_reg_alt_syntax (char **ccp, char *start, struct reg_entry *reg,
1193 enum arm_reg_type type)
1195 /* Alternative syntaxes are accepted for a few register classes. */
1202 /* Generic coprocessor register names are allowed for these. */
1203 if (reg && reg->type == REG_TYPE_CN)
1208 /* For backward compatibility, a bare number is valid here. */
1210 unsigned long processor = strtoul (start, ccp, 10);
1211 if (*ccp != start && processor <= 15)
1215 case REG_TYPE_MMXWC:
1216 /* WC includes WCG. ??? I'm not sure this is true for all
1217 instructions that take WC registers. */
1218 if (reg && reg->type == REG_TYPE_MMXWCG)
1229 /* As arm_reg_parse_multi, but the register must be of type TYPE, and the
1230 return value is the register number or FAIL. */
1233 arm_reg_parse (char **ccp, enum arm_reg_type type)
1236 struct reg_entry *reg = arm_reg_parse_multi (ccp);
1239 /* Do not allow a scalar (reg+index) to parse as a register. */
1240 if (reg && reg->neon && (reg->neon->defined & NTA_HASINDEX))
1243 if (reg && reg->type == type)
1246 if ((ret = arm_reg_alt_syntax (ccp, start, reg, type)) != FAIL)
1253 /* Parse a Neon type specifier. *STR should point at the leading '.'
1254 character. Does no verification at this stage that the type fits the opcode
1261 Can all be legally parsed by this function.
1263 Fills in neon_type struct pointer with parsed information, and updates STR
1264 to point after the parsed type specifier. Returns SUCCESS if this was a legal
1265 type, FAIL if not. */
1268 parse_neon_type (struct neon_type *type, char **str)
1275 while (type->elems < NEON_MAX_TYPE_ELS)
1277 enum neon_el_type thistype = NT_untyped;
1278 unsigned thissize = -1u;
1285 /* Just a size without an explicit type. */
1289 switch (TOLOWER (*ptr))
1291 case 'i': thistype = NT_integer; break;
1292 case 'f': thistype = NT_float; break;
1293 case 'p': thistype = NT_poly; break;
1294 case 's': thistype = NT_signed; break;
1295 case 'u': thistype = NT_unsigned; break;
1297 thistype = NT_float;
1302 as_bad (_("unexpected character `%c' in type specifier"), *ptr);
1308 /* .f is an abbreviation for .f32. */
1309 if (thistype == NT_float && !ISDIGIT (*ptr))
1314 thissize = strtoul (ptr, &ptr, 10);
1316 if (thissize != 8 && thissize != 16 && thissize != 32
1319 as_bad (_("bad size %d in type specifier"), thissize);
1327 type->el[type->elems].type = thistype;
1328 type->el[type->elems].size = thissize;
1333 /* Empty/missing type is not a successful parse. */
1334 if (type->elems == 0)
1342 /* Errors may be set multiple times during parsing or bit encoding
1343 (particularly in the Neon bits), but usually the earliest error which is set
1344 will be the most meaningful. Avoid overwriting it with later (cascading)
1345 errors by calling this function. */
1348 first_error (const char *err)
1354 /* Parse a single type, e.g. ".s32", leading period included. */
1356 parse_neon_operand_type (struct neon_type_el *vectype, char **ccp)
1359 struct neon_type optype;
1363 if (parse_neon_type (&optype, &str) == SUCCESS)
1365 if (optype.elems == 1)
1366 *vectype = optype.el[0];
1369 first_error (_("only one type should be specified for operand"));
1375 first_error (_("vector type expected"));
1387 /* Special meanings for indices (which have a range of 0-7), which will fit into
1390 #define NEON_ALL_LANES 15
1391 #define NEON_INTERLEAVE_LANES 14
1393 /* Parse either a register or a scalar, with an optional type. Return the
1394 register number, and optionally fill in the actual type of the register
1395 when multiple alternatives were given (NEON_TYPE_NDQ) in *RTYPE, and
1396 type/index information in *TYPEINFO. */
1399 parse_typed_reg_or_scalar (char **ccp, enum arm_reg_type type,
1400 enum arm_reg_type *rtype,
1401 struct neon_typed_alias *typeinfo)
1404 struct reg_entry *reg = arm_reg_parse_multi (&str);
1405 struct neon_typed_alias atype;
1406 struct neon_type_el parsetype;
1410 atype.eltype.type = NT_invtype;
1411 atype.eltype.size = -1;
1413 /* Try alternate syntax for some types of register. Note these are mutually
1414 exclusive with the Neon syntax extensions. */
1417 int altreg = arm_reg_alt_syntax (&str, *ccp, reg, type);
1425 /* Undo polymorphism when a set of register types may be accepted. */
1426 if ((type == REG_TYPE_NDQ
1427 && (reg->type == REG_TYPE_NQ || reg->type == REG_TYPE_VFD))
1428 || (type == REG_TYPE_VFSD
1429 && (reg->type == REG_TYPE_VFS || reg->type == REG_TYPE_VFD))
1430 || (type == REG_TYPE_NSDQ
1431 && (reg->type == REG_TYPE_VFS || reg->type == REG_TYPE_VFD
1432 || reg->type == REG_TYPE_NQ))
1433 || (type == REG_TYPE_MMXWC
1434 && (reg->type == REG_TYPE_MMXWCG)))
1435 type = (enum arm_reg_type) reg->type;
1437 if (type != reg->type)
1443 if (parse_neon_operand_type (&parsetype, &str) == SUCCESS)
1445 if ((atype.defined & NTA_HASTYPE) != 0)
1447 first_error (_("can't redefine type for operand"));
1450 atype.defined |= NTA_HASTYPE;
1451 atype.eltype = parsetype;
1454 if (skip_past_char (&str, '[') == SUCCESS)
1456 if (type != REG_TYPE_VFD)
1458 first_error (_("only D registers may be indexed"));
1462 if ((atype.defined & NTA_HASINDEX) != 0)
1464 first_error (_("can't change index for operand"));
1468 atype.defined |= NTA_HASINDEX;
1470 if (skip_past_char (&str, ']') == SUCCESS)
1471 atype.index = NEON_ALL_LANES;
1476 my_get_expression (&exp, &str, GE_NO_PREFIX);
1478 if (exp.X_op != O_constant)
1480 first_error (_("constant expression required"));
1484 if (skip_past_char (&str, ']') == FAIL)
1487 atype.index = exp.X_add_number;
1502 /* Like arm_reg_parse, but allow allow the following extra features:
1503 - If RTYPE is non-zero, return the (possibly restricted) type of the
1504 register (e.g. Neon double or quad reg when either has been requested).
1505 - If this is a Neon vector type with additional type information, fill
1506 in the struct pointed to by VECTYPE (if non-NULL).
1507 This function will fault on encountering a scalar. */
1510 arm_typed_reg_parse (char **ccp, enum arm_reg_type type,
1511 enum arm_reg_type *rtype, struct neon_type_el *vectype)
1513 struct neon_typed_alias atype;
1515 int reg = parse_typed_reg_or_scalar (&str, type, rtype, &atype);
1520 /* Do not allow regname(... to parse as a register. */
1524 /* Do not allow a scalar (reg+index) to parse as a register. */
1525 if ((atype.defined & NTA_HASINDEX) != 0)
1527 first_error (_("register operand expected, but got scalar"));
1532 *vectype = atype.eltype;
1539 #define NEON_SCALAR_REG(X) ((X) >> 4)
1540 #define NEON_SCALAR_INDEX(X) ((X) & 15)
1542 /* Parse a Neon scalar. Most of the time when we're parsing a scalar, we don't
1543 have enough information to be able to do a good job bounds-checking. So, we
1544 just do easy checks here, and do further checks later. */
1547 parse_scalar (char **ccp, int elsize, struct neon_type_el *type)
1551 struct neon_typed_alias atype;
1553 reg = parse_typed_reg_or_scalar (&str, REG_TYPE_VFD, NULL, &atype);
1555 if (reg == FAIL || (atype.defined & NTA_HASINDEX) == 0)
1558 if (atype.index == NEON_ALL_LANES)
1560 first_error (_("scalar must have an index"));
1563 else if (atype.index >= 64 / elsize)
1565 first_error (_("scalar index out of range"));
1570 *type = atype.eltype;
1574 return reg * 16 + atype.index;
1577 /* Parse an ARM register list. Returns the bitmask, or FAIL. */
1580 parse_reg_list (char ** strp)
1582 char * str = * strp;
1586 /* We come back here if we get ranges concatenated by '+' or '|'. */
1589 skip_whitespace (str);
1603 if ((reg = arm_reg_parse (&str, REG_TYPE_RN)) == FAIL)
1605 first_error (_(reg_expected_msgs[REG_TYPE_RN]));
1615 first_error (_("bad range in register list"));
1619 for (i = cur_reg + 1; i < reg; i++)
1621 if (range & (1 << i))
1623 (_("Warning: duplicated register (r%d) in register list"),
1631 if (range & (1 << reg))
1632 as_tsktsk (_("Warning: duplicated register (r%d) in register list"),
1634 else if (reg <= cur_reg)
1635 as_tsktsk (_("Warning: register range not in ascending order"));
1640 while (skip_past_comma (&str) != FAIL
1641 || (in_range = 1, *str++ == '-'));
1646 first_error (_("missing `}'"));
1654 if (my_get_expression (&exp, &str, GE_NO_PREFIX))
1657 if (exp.X_op == O_constant)
1659 if (exp.X_add_number
1660 != (exp.X_add_number & 0x0000ffff))
1662 inst.error = _("invalid register mask");
1666 if ((range & exp.X_add_number) != 0)
1668 int regno = range & exp.X_add_number;
1671 regno = (1 << regno) - 1;
1673 (_("Warning: duplicated register (r%d) in register list"),
1677 range |= exp.X_add_number;
1681 if (inst.reloc.type != 0)
1683 inst.error = _("expression too complex");
1687 memcpy (&inst.reloc.exp, &exp, sizeof (expressionS));
1688 inst.reloc.type = BFD_RELOC_ARM_MULTI;
1689 inst.reloc.pc_rel = 0;
1693 if (*str == '|' || *str == '+')
1699 while (another_range);
1705 /* Types of registers in a list. */
1714 /* Parse a VFP register list. If the string is invalid return FAIL.
1715 Otherwise return the number of registers, and set PBASE to the first
1716 register. Parses registers of type ETYPE.
1717 If REGLIST_NEON_D is used, several syntax enhancements are enabled:
1718 - Q registers can be used to specify pairs of D registers
1719 - { } can be omitted from around a singleton register list
1720 FIXME: This is not implemented, as it would require backtracking in
1723 This could be done (the meaning isn't really ambiguous), but doesn't
1724 fit in well with the current parsing framework.
1725 - 32 D registers may be used (also true for VFPv3).
1726 FIXME: Types are ignored in these register lists, which is probably a
1730 parse_vfp_reg_list (char **ccp, unsigned int *pbase, enum reg_list_els etype)
1735 enum arm_reg_type regtype = (enum arm_reg_type) 0;
1739 unsigned long mask = 0;
1742 if (skip_past_char (&str, '{') == FAIL)
1744 inst.error = _("expecting {");
1751 regtype = REG_TYPE_VFS;
1756 regtype = REG_TYPE_VFD;
1759 case REGLIST_NEON_D:
1760 regtype = REG_TYPE_NDQ;
1764 if (etype != REGLIST_VFP_S)
1766 /* VFPv3 allows 32 D registers, except for the VFPv3-D16 variant. */
1767 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_d32))
1771 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
1774 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used,
1781 base_reg = max_regs;
1785 int setmask = 1, addregs = 1;
1787 new_base = arm_typed_reg_parse (&str, regtype, ®type, NULL);
1789 if (new_base == FAIL)
1791 first_error (_(reg_expected_msgs[regtype]));
1795 if (new_base >= max_regs)
1797 first_error (_("register out of range in list"));
1801 /* Note: a value of 2 * n is returned for the register Q<n>. */
1802 if (regtype == REG_TYPE_NQ)
1808 if (new_base < base_reg)
1809 base_reg = new_base;
1811 if (mask & (setmask << new_base))
1813 first_error (_("invalid register list"));
1817 if ((mask >> new_base) != 0 && ! warned)
1819 as_tsktsk (_("register list not in ascending order"));
1823 mask |= setmask << new_base;
1826 if (*str == '-') /* We have the start of a range expression */
1832 if ((high_range = arm_typed_reg_parse (&str, regtype, NULL, NULL))
1835 inst.error = gettext (reg_expected_msgs[regtype]);
1839 if (high_range >= max_regs)
1841 first_error (_("register out of range in list"));
1845 if (regtype == REG_TYPE_NQ)
1846 high_range = high_range + 1;
1848 if (high_range <= new_base)
1850 inst.error = _("register range not in ascending order");
1854 for (new_base += addregs; new_base <= high_range; new_base += addregs)
1856 if (mask & (setmask << new_base))
1858 inst.error = _("invalid register list");
1862 mask |= setmask << new_base;
1867 while (skip_past_comma (&str) != FAIL);
1871 /* Sanity check -- should have raised a parse error above. */
1872 if (count == 0 || count > max_regs)
1877 /* Final test -- the registers must be consecutive. */
1879 for (i = 0; i < count; i++)
1881 if ((mask & (1u << i)) == 0)
1883 inst.error = _("non-contiguous register range");
1893 /* True if two alias types are the same. */
1896 neon_alias_types_same (struct neon_typed_alias *a, struct neon_typed_alias *b)
1904 if (a->defined != b->defined)
1907 if ((a->defined & NTA_HASTYPE) != 0
1908 && (a->eltype.type != b->eltype.type
1909 || a->eltype.size != b->eltype.size))
1912 if ((a->defined & NTA_HASINDEX) != 0
1913 && (a->index != b->index))
1919 /* Parse element/structure lists for Neon VLD<n> and VST<n> instructions.
1920 The base register is put in *PBASE.
1921 The lane (or one of the NEON_*_LANES constants) is placed in bits [3:0] of
1923 The register stride (minus one) is put in bit 4 of the return value.
1924 Bits [6:5] encode the list length (minus one).
1925 The type of the list elements is put in *ELTYPE, if non-NULL. */
1927 #define NEON_LANE(X) ((X) & 0xf)
1928 #define NEON_REG_STRIDE(X) ((((X) >> 4) & 1) + 1)
1929 #define NEON_REGLIST_LENGTH(X) ((((X) >> 5) & 3) + 1)
1932 parse_neon_el_struct_list (char **str, unsigned *pbase,
1933 struct neon_type_el *eltype)
1940 int leading_brace = 0;
1941 enum arm_reg_type rtype = REG_TYPE_NDQ;
1942 const char *const incr_error = _("register stride must be 1 or 2");
1943 const char *const type_error = _("mismatched element/structure types in list");
1944 struct neon_typed_alias firsttype;
1946 if (skip_past_char (&ptr, '{') == SUCCESS)
1951 struct neon_typed_alias atype;
1952 int getreg = parse_typed_reg_or_scalar (&ptr, rtype, &rtype, &atype);
1956 first_error (_(reg_expected_msgs[rtype]));
1963 if (rtype == REG_TYPE_NQ)
1969 else if (reg_incr == -1)
1971 reg_incr = getreg - base_reg;
1972 if (reg_incr < 1 || reg_incr > 2)
1974 first_error (_(incr_error));
1978 else if (getreg != base_reg + reg_incr * count)
1980 first_error (_(incr_error));
1984 if (! neon_alias_types_same (&atype, &firsttype))
1986 first_error (_(type_error));
1990 /* Handle Dn-Dm or Qn-Qm syntax. Can only be used with non-indexed list
1994 struct neon_typed_alias htype;
1995 int hireg, dregs = (rtype == REG_TYPE_NQ) ? 2 : 1;
1997 lane = NEON_INTERLEAVE_LANES;
1998 else if (lane != NEON_INTERLEAVE_LANES)
2000 first_error (_(type_error));
2005 else if (reg_incr != 1)
2007 first_error (_("don't use Rn-Rm syntax with non-unit stride"));
2011 hireg = parse_typed_reg_or_scalar (&ptr, rtype, NULL, &htype);
2014 first_error (_(reg_expected_msgs[rtype]));
2017 if (! neon_alias_types_same (&htype, &firsttype))
2019 first_error (_(type_error));
2022 count += hireg + dregs - getreg;
2026 /* If we're using Q registers, we can't use [] or [n] syntax. */
2027 if (rtype == REG_TYPE_NQ)
2033 if ((atype.defined & NTA_HASINDEX) != 0)
2037 else if (lane != atype.index)
2039 first_error (_(type_error));
2043 else if (lane == -1)
2044 lane = NEON_INTERLEAVE_LANES;
2045 else if (lane != NEON_INTERLEAVE_LANES)
2047 first_error (_(type_error));
2052 while ((count != 1 || leading_brace) && skip_past_comma (&ptr) != FAIL);
2054 /* No lane set by [x]. We must be interleaving structures. */
2056 lane = NEON_INTERLEAVE_LANES;
2059 if (lane == -1 || base_reg == -1 || count < 1 || count > 4
2060 || (count > 1 && reg_incr == -1))
2062 first_error (_("error parsing element/structure list"));
2066 if ((count > 1 || leading_brace) && skip_past_char (&ptr, '}') == FAIL)
2068 first_error (_("expected }"));
2076 *eltype = firsttype.eltype;
2081 return lane | ((reg_incr - 1) << 4) | ((count - 1) << 5);
2084 /* Parse an explicit relocation suffix on an expression. This is
2085 either nothing, or a word in parentheses. Note that if !OBJ_ELF,
2086 arm_reloc_hsh contains no entries, so this function can only
2087 succeed if there is no () after the word. Returns -1 on error,
2088 BFD_RELOC_UNUSED if there wasn't any suffix. */
2091 parse_reloc (char **str)
2093 struct reloc_entry *r;
2097 return BFD_RELOC_UNUSED;
2102 while (*q && *q != ')' && *q != ',')
2107 if ((r = (struct reloc_entry *)
2108 hash_find_n (arm_reloc_hsh, p, q - p)) == NULL)
2115 /* Directives: register aliases. */
2117 static struct reg_entry *
2118 insert_reg_alias (char *str, unsigned number, int type)
2120 struct reg_entry *new_reg;
2123 if ((new_reg = (struct reg_entry *) hash_find (arm_reg_hsh, str)) != 0)
2125 if (new_reg->builtin)
2126 as_warn (_("ignoring attempt to redefine built-in register '%s'"), str);
2128 /* Only warn about a redefinition if it's not defined as the
2130 else if (new_reg->number != number || new_reg->type != type)
2131 as_warn (_("ignoring redefinition of register alias '%s'"), str);
2136 name = xstrdup (str);
2137 new_reg = (struct reg_entry *) xmalloc (sizeof (struct reg_entry));
2139 new_reg->name = name;
2140 new_reg->number = number;
2141 new_reg->type = type;
2142 new_reg->builtin = FALSE;
2143 new_reg->neon = NULL;
2145 if (hash_insert (arm_reg_hsh, name, (void *) new_reg))
2152 insert_neon_reg_alias (char *str, int number, int type,
2153 struct neon_typed_alias *atype)
2155 struct reg_entry *reg = insert_reg_alias (str, number, type);
2159 first_error (_("attempt to redefine typed alias"));
2165 reg->neon = (struct neon_typed_alias *)
2166 xmalloc (sizeof (struct neon_typed_alias));
2167 *reg->neon = *atype;
2171 /* Look for the .req directive. This is of the form:
2173 new_register_name .req existing_register_name
2175 If we find one, or if it looks sufficiently like one that we want to
2176 handle any error here, return TRUE. Otherwise return FALSE. */
2179 create_register_alias (char * newname, char *p)
2181 struct reg_entry *old;
2182 char *oldname, *nbuf;
2185 /* The input scrubber ensures that whitespace after the mnemonic is
2186 collapsed to single spaces. */
2188 if (strncmp (oldname, " .req ", 6) != 0)
2192 if (*oldname == '\0')
2195 old = (struct reg_entry *) hash_find (arm_reg_hsh, oldname);
2198 as_warn (_("unknown register '%s' -- .req ignored"), oldname);
2202 /* If TC_CASE_SENSITIVE is defined, then newname already points to
2203 the desired alias name, and p points to its end. If not, then
2204 the desired alias name is in the global original_case_string. */
2205 #ifdef TC_CASE_SENSITIVE
2208 newname = original_case_string;
2209 nlen = strlen (newname);
2212 nbuf = (char *) alloca (nlen + 1);
2213 memcpy (nbuf, newname, nlen);
2216 /* Create aliases under the new name as stated; an all-lowercase
2217 version of the new name; and an all-uppercase version of the new
2219 if (insert_reg_alias (nbuf, old->number, old->type) != NULL)
2221 for (p = nbuf; *p; p++)
2224 if (strncmp (nbuf, newname, nlen))
2226 /* If this attempt to create an additional alias fails, do not bother
2227 trying to create the all-lower case alias. We will fail and issue
2228 a second, duplicate error message. This situation arises when the
2229 programmer does something like:
2232 The second .req creates the "Foo" alias but then fails to create
2233 the artificial FOO alias because it has already been created by the
2235 if (insert_reg_alias (nbuf, old->number, old->type) == NULL)
2239 for (p = nbuf; *p; p++)
2242 if (strncmp (nbuf, newname, nlen))
2243 insert_reg_alias (nbuf, old->number, old->type);
2249 /* Create a Neon typed/indexed register alias using directives, e.g.:
2254 These typed registers can be used instead of the types specified after the
2255 Neon mnemonic, so long as all operands given have types. Types can also be
2256 specified directly, e.g.:
2257 vadd d0.s32, d1.s32, d2.s32 */
2260 create_neon_reg_alias (char *newname, char *p)
2262 enum arm_reg_type basetype;
2263 struct reg_entry *basereg;
2264 struct reg_entry mybasereg;
2265 struct neon_type ntype;
2266 struct neon_typed_alias typeinfo;
2267 char *namebuf, *nameend ATTRIBUTE_UNUSED;
2270 typeinfo.defined = 0;
2271 typeinfo.eltype.type = NT_invtype;
2272 typeinfo.eltype.size = -1;
2273 typeinfo.index = -1;
2277 if (strncmp (p, " .dn ", 5) == 0)
2278 basetype = REG_TYPE_VFD;
2279 else if (strncmp (p, " .qn ", 5) == 0)
2280 basetype = REG_TYPE_NQ;
2289 basereg = arm_reg_parse_multi (&p);
2291 if (basereg && basereg->type != basetype)
2293 as_bad (_("bad type for register"));
2297 if (basereg == NULL)
2300 /* Try parsing as an integer. */
2301 my_get_expression (&exp, &p, GE_NO_PREFIX);
2302 if (exp.X_op != O_constant)
2304 as_bad (_("expression must be constant"));
2307 basereg = &mybasereg;
2308 basereg->number = (basetype == REG_TYPE_NQ) ? exp.X_add_number * 2
2314 typeinfo = *basereg->neon;
2316 if (parse_neon_type (&ntype, &p) == SUCCESS)
2318 /* We got a type. */
2319 if (typeinfo.defined & NTA_HASTYPE)
2321 as_bad (_("can't redefine the type of a register alias"));
2325 typeinfo.defined |= NTA_HASTYPE;
2326 if (ntype.elems != 1)
2328 as_bad (_("you must specify a single type only"));
2331 typeinfo.eltype = ntype.el[0];
2334 if (skip_past_char (&p, '[') == SUCCESS)
2337 /* We got a scalar index. */
2339 if (typeinfo.defined & NTA_HASINDEX)
2341 as_bad (_("can't redefine the index of a scalar alias"));
2345 my_get_expression (&exp, &p, GE_NO_PREFIX);
2347 if (exp.X_op != O_constant)
2349 as_bad (_("scalar index must be constant"));
2353 typeinfo.defined |= NTA_HASINDEX;
2354 typeinfo.index = exp.X_add_number;
2356 if (skip_past_char (&p, ']') == FAIL)
2358 as_bad (_("expecting ]"));
2363 /* If TC_CASE_SENSITIVE is defined, then newname already points to
2364 the desired alias name, and p points to its end. If not, then
2365 the desired alias name is in the global original_case_string. */
2366 #ifdef TC_CASE_SENSITIVE
2367 namelen = nameend - newname;
2369 newname = original_case_string;
2370 namelen = strlen (newname);
2373 namebuf = (char *) alloca (namelen + 1);
2374 strncpy (namebuf, newname, namelen);
2375 namebuf[namelen] = '\0';
2377 insert_neon_reg_alias (namebuf, basereg->number, basetype,
2378 typeinfo.defined != 0 ? &typeinfo : NULL);
2380 /* Insert name in all uppercase. */
2381 for (p = namebuf; *p; p++)
2384 if (strncmp (namebuf, newname, namelen))
2385 insert_neon_reg_alias (namebuf, basereg->number, basetype,
2386 typeinfo.defined != 0 ? &typeinfo : NULL);
2388 /* Insert name in all lowercase. */
2389 for (p = namebuf; *p; p++)
2392 if (strncmp (namebuf, newname, namelen))
2393 insert_neon_reg_alias (namebuf, basereg->number, basetype,
2394 typeinfo.defined != 0 ? &typeinfo : NULL);
2399 /* Should never be called, as .req goes between the alias and the
2400 register name, not at the beginning of the line. */
2403 s_req (int a ATTRIBUTE_UNUSED)
2405 as_bad (_("invalid syntax for .req directive"));
2409 s_dn (int a ATTRIBUTE_UNUSED)
2411 as_bad (_("invalid syntax for .dn directive"));
2415 s_qn (int a ATTRIBUTE_UNUSED)
2417 as_bad (_("invalid syntax for .qn directive"));
2420 /* The .unreq directive deletes an alias which was previously defined
2421 by .req. For example:
2427 s_unreq (int a ATTRIBUTE_UNUSED)
2432 name = input_line_pointer;
2434 while (*input_line_pointer != 0
2435 && *input_line_pointer != ' '
2436 && *input_line_pointer != '\n')
2437 ++input_line_pointer;
2439 saved_char = *input_line_pointer;
2440 *input_line_pointer = 0;
2443 as_bad (_("invalid syntax for .unreq directive"));
2446 struct reg_entry *reg = (struct reg_entry *) hash_find (arm_reg_hsh,
2450 as_bad (_("unknown register alias '%s'"), name);
2451 else if (reg->builtin)
2452 as_warn (_("ignoring attempt to use .unreq on fixed register name: '%s'"),
2459 hash_delete (arm_reg_hsh, name, FALSE);
2460 free ((char *) reg->name);
2465 /* Also locate the all upper case and all lower case versions.
2466 Do not complain if we cannot find one or the other as it
2467 was probably deleted above. */
2469 nbuf = strdup (name);
2470 for (p = nbuf; *p; p++)
2472 reg = (struct reg_entry *) hash_find (arm_reg_hsh, nbuf);
2475 hash_delete (arm_reg_hsh, nbuf, FALSE);
2476 free ((char *) reg->name);
2482 for (p = nbuf; *p; p++)
2484 reg = (struct reg_entry *) hash_find (arm_reg_hsh, nbuf);
2487 hash_delete (arm_reg_hsh, nbuf, FALSE);
2488 free ((char *) reg->name);
2498 *input_line_pointer = saved_char;
2499 demand_empty_rest_of_line ();
2502 /* Directives: Instruction set selection. */
2505 /* This code is to handle mapping symbols as defined in the ARM ELF spec.
2506 (See "Mapping symbols", section 4.5.5, ARM AAELF version 1.0).
2507 Note that previously, $a and $t has type STT_FUNC (BSF_OBJECT flag),
2508 and $d has type STT_OBJECT (BSF_OBJECT flag). Now all three are untyped. */
2510 /* Create a new mapping symbol for the transition to STATE. */
2513 make_mapping_symbol (enum mstate state, valueT value, fragS *frag)
2516 const char * symname;
2523 type = BSF_NO_FLAGS;
2527 type = BSF_NO_FLAGS;
2531 type = BSF_NO_FLAGS;
2537 symbolP = symbol_new (symname, now_seg, value, frag);
2538 symbol_get_bfdsym (symbolP)->flags |= type | BSF_LOCAL;
2543 THUMB_SET_FUNC (symbolP, 0);
2544 ARM_SET_THUMB (symbolP, 0);
2545 ARM_SET_INTERWORK (symbolP, support_interwork);
2549 THUMB_SET_FUNC (symbolP, 1);
2550 ARM_SET_THUMB (symbolP, 1);
2551 ARM_SET_INTERWORK (symbolP, support_interwork);
2559 /* Save the mapping symbols for future reference. Also check that
2560 we do not place two mapping symbols at the same offset within a
2561 frag. We'll handle overlap between frags in
2562 check_mapping_symbols.
2564 If .fill or other data filling directive generates zero sized data,
2565 the mapping symbol for the following code will have the same value
2566 as the one generated for the data filling directive. In this case,
2567 we replace the old symbol with the new one at the same address. */
2570 if (frag->tc_frag_data.first_map != NULL)
2572 know (S_GET_VALUE (frag->tc_frag_data.first_map) == 0);
2573 symbol_remove (frag->tc_frag_data.first_map, &symbol_rootP, &symbol_lastP);
2575 frag->tc_frag_data.first_map = symbolP;
2577 if (frag->tc_frag_data.last_map != NULL)
2579 know (S_GET_VALUE (frag->tc_frag_data.last_map) <= S_GET_VALUE (symbolP));
2580 if (S_GET_VALUE (frag->tc_frag_data.last_map) == S_GET_VALUE (symbolP))
2581 symbol_remove (frag->tc_frag_data.last_map, &symbol_rootP, &symbol_lastP);
2583 frag->tc_frag_data.last_map = symbolP;
2586 /* We must sometimes convert a region marked as code to data during
2587 code alignment, if an odd number of bytes have to be padded. The
2588 code mapping symbol is pushed to an aligned address. */
2591 insert_data_mapping_symbol (enum mstate state,
2592 valueT value, fragS *frag, offsetT bytes)
2594 /* If there was already a mapping symbol, remove it. */
2595 if (frag->tc_frag_data.last_map != NULL
2596 && S_GET_VALUE (frag->tc_frag_data.last_map) == frag->fr_address + value)
2598 symbolS *symp = frag->tc_frag_data.last_map;
2602 know (frag->tc_frag_data.first_map == symp);
2603 frag->tc_frag_data.first_map = NULL;
2605 frag->tc_frag_data.last_map = NULL;
2606 symbol_remove (symp, &symbol_rootP, &symbol_lastP);
2609 make_mapping_symbol (MAP_DATA, value, frag);
2610 make_mapping_symbol (state, value + bytes, frag);
2613 static void mapping_state_2 (enum mstate state, int max_chars);
2615 /* Set the mapping state to STATE. Only call this when about to
2616 emit some STATE bytes to the file. */
2619 mapping_state (enum mstate state)
2621 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
2623 #define TRANSITION(from, to) (mapstate == (from) && state == (to))
2625 if (mapstate == state)
2626 /* The mapping symbol has already been emitted.
2627 There is nothing else to do. */
2630 if (state == MAP_ARM || state == MAP_THUMB)
2632 All ARM instructions require 4-byte alignment.
2633 (Almost) all Thumb instructions require 2-byte alignment.
2635 When emitting instructions into any section, mark the section
2638 Some Thumb instructions are alignment-sensitive modulo 4 bytes,
2639 but themselves require 2-byte alignment; this applies to some
2640 PC- relative forms. However, these cases will invovle implicit
2641 literal pool generation or an explicit .align >=2, both of
2642 which will cause the section to me marked with sufficient
2643 alignment. Thus, we don't handle those cases here. */
2644 record_alignment (now_seg, state == MAP_ARM ? 2 : 1);
2646 if (TRANSITION (MAP_UNDEFINED, MAP_DATA))
2647 /* This case will be evaluated later in the next else. */
2649 else if (TRANSITION (MAP_UNDEFINED, MAP_ARM)
2650 || TRANSITION (MAP_UNDEFINED, MAP_THUMB))
2652 /* Only add the symbol if the offset is > 0:
2653 if we're at the first frag, check it's size > 0;
2654 if we're not at the first frag, then for sure
2655 the offset is > 0. */
2656 struct frag * const frag_first = seg_info (now_seg)->frchainP->frch_root;
2657 const int add_symbol = (frag_now != frag_first) || (frag_now_fix () > 0);
2660 make_mapping_symbol (MAP_DATA, (valueT) 0, frag_first);
2663 mapping_state_2 (state, 0);
2667 /* Same as mapping_state, but MAX_CHARS bytes have already been
2668 allocated. Put the mapping symbol that far back. */
2671 mapping_state_2 (enum mstate state, int max_chars)
2673 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
2675 if (!SEG_NORMAL (now_seg))
2678 if (mapstate == state)
2679 /* The mapping symbol has already been emitted.
2680 There is nothing else to do. */
2683 seg_info (now_seg)->tc_segment_info_data.mapstate = state;
2684 make_mapping_symbol (state, (valueT) frag_now_fix () - max_chars, frag_now);
2687 #define mapping_state(x) ((void)0)
2688 #define mapping_state_2(x, y) ((void)0)
2691 /* Find the real, Thumb encoded start of a Thumb function. */
2695 find_real_start (symbolS * symbolP)
2698 const char * name = S_GET_NAME (symbolP);
2699 symbolS * new_target;
2701 /* This definition must agree with the one in gcc/config/arm/thumb.c. */
2702 #define STUB_NAME ".real_start_of"
2707 /* The compiler may generate BL instructions to local labels because
2708 it needs to perform a branch to a far away location. These labels
2709 do not have a corresponding ".real_start_of" label. We check
2710 both for S_IS_LOCAL and for a leading dot, to give a way to bypass
2711 the ".real_start_of" convention for nonlocal branches. */
2712 if (S_IS_LOCAL (symbolP) || name[0] == '.')
2715 real_start = ACONCAT ((STUB_NAME, name, NULL));
2716 new_target = symbol_find (real_start);
2718 if (new_target == NULL)
2720 as_warn (_("Failed to find real start of function: %s\n"), name);
2721 new_target = symbolP;
2729 opcode_select (int width)
2736 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4t))
2737 as_bad (_("selected processor does not support THUMB opcodes"));
2740 /* No need to force the alignment, since we will have been
2741 coming from ARM mode, which is word-aligned. */
2742 record_alignment (now_seg, 1);
2749 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1))
2750 as_bad (_("selected processor does not support ARM opcodes"));
2755 frag_align (2, 0, 0);
2757 record_alignment (now_seg, 1);
2762 as_bad (_("invalid instruction size selected (%d)"), width);
2767 s_arm (int ignore ATTRIBUTE_UNUSED)
2770 demand_empty_rest_of_line ();
2774 s_thumb (int ignore ATTRIBUTE_UNUSED)
2777 demand_empty_rest_of_line ();
2781 s_code (int unused ATTRIBUTE_UNUSED)
2785 temp = get_absolute_expression ();
2790 opcode_select (temp);
2794 as_bad (_("invalid operand to .code directive (%d) (expecting 16 or 32)"), temp);
2799 s_force_thumb (int ignore ATTRIBUTE_UNUSED)
2801 /* If we are not already in thumb mode go into it, EVEN if
2802 the target processor does not support thumb instructions.
2803 This is used by gcc/config/arm/lib1funcs.asm for example
2804 to compile interworking support functions even if the
2805 target processor should not support interworking. */
2809 record_alignment (now_seg, 1);
2812 demand_empty_rest_of_line ();
2816 s_thumb_func (int ignore ATTRIBUTE_UNUSED)
2820 /* The following label is the name/address of the start of a Thumb function.
2821 We need to know this for the interworking support. */
2822 label_is_thumb_function_name = TRUE;
2825 /* Perform a .set directive, but also mark the alias as
2826 being a thumb function. */
2829 s_thumb_set (int equiv)
2831 /* XXX the following is a duplicate of the code for s_set() in read.c
2832 We cannot just call that code as we need to get at the symbol that
2839 /* Especial apologies for the random logic:
2840 This just grew, and could be parsed much more simply!
2842 name = input_line_pointer;
2843 delim = get_symbol_end ();
2844 end_name = input_line_pointer;
2847 if (*input_line_pointer != ',')
2850 as_bad (_("expected comma after name \"%s\""), name);
2852 ignore_rest_of_line ();
2856 input_line_pointer++;
2859 if (name[0] == '.' && name[1] == '\0')
2861 /* XXX - this should not happen to .thumb_set. */
2865 if ((symbolP = symbol_find (name)) == NULL
2866 && (symbolP = md_undefined_symbol (name)) == NULL)
2869 /* When doing symbol listings, play games with dummy fragments living
2870 outside the normal fragment chain to record the file and line info
2872 if (listing & LISTING_SYMBOLS)
2874 extern struct list_info_struct * listing_tail;
2875 fragS * dummy_frag = (fragS * ) xmalloc (sizeof (fragS));
2877 memset (dummy_frag, 0, sizeof (fragS));
2878 dummy_frag->fr_type = rs_fill;
2879 dummy_frag->line = listing_tail;
2880 symbolP = symbol_new (name, undefined_section, 0, dummy_frag);
2881 dummy_frag->fr_symbol = symbolP;
2885 symbolP = symbol_new (name, undefined_section, 0, &zero_address_frag);
2888 /* "set" symbols are local unless otherwise specified. */
2889 SF_SET_LOCAL (symbolP);
2890 #endif /* OBJ_COFF */
2891 } /* Make a new symbol. */
2893 symbol_table_insert (symbolP);
2898 && S_IS_DEFINED (symbolP)
2899 && S_GET_SEGMENT (symbolP) != reg_section)
2900 as_bad (_("symbol `%s' already defined"), S_GET_NAME (symbolP));
2902 pseudo_set (symbolP);
2904 demand_empty_rest_of_line ();
2906 /* XXX Now we come to the Thumb specific bit of code. */
2908 THUMB_SET_FUNC (symbolP, 1);
2909 ARM_SET_THUMB (symbolP, 1);
2910 #if defined OBJ_ELF || defined OBJ_COFF
2911 ARM_SET_INTERWORK (symbolP, support_interwork);
2915 /* Directives: Mode selection. */
2917 /* .syntax [unified|divided] - choose the new unified syntax
2918 (same for Arm and Thumb encoding, modulo slight differences in what
2919 can be represented) or the old divergent syntax for each mode. */
2921 s_syntax (int unused ATTRIBUTE_UNUSED)
2925 name = input_line_pointer;
2926 delim = get_symbol_end ();
2928 if (!strcasecmp (name, "unified"))
2929 unified_syntax = TRUE;
2930 else if (!strcasecmp (name, "divided"))
2931 unified_syntax = FALSE;
2934 as_bad (_("unrecognized syntax mode \"%s\""), name);
2937 *input_line_pointer = delim;
2938 demand_empty_rest_of_line ();
2941 /* Directives: sectioning and alignment. */
2943 /* Same as s_align_ptwo but align 0 => align 2. */
2946 s_align (int unused ATTRIBUTE_UNUSED)
2951 long max_alignment = 15;
2953 temp = get_absolute_expression ();
2954 if (temp > max_alignment)
2955 as_bad (_("alignment too large: %d assumed"), temp = max_alignment);
2958 as_bad (_("alignment negative. 0 assumed."));
2962 if (*input_line_pointer == ',')
2964 input_line_pointer++;
2965 temp_fill = get_absolute_expression ();
2977 /* Only make a frag if we HAVE to. */
2978 if (temp && !need_pass_2)
2980 if (!fill_p && subseg_text_p (now_seg))
2981 frag_align_code (temp, 0);
2983 frag_align (temp, (int) temp_fill, 0);
2985 demand_empty_rest_of_line ();
2987 record_alignment (now_seg, temp);
2991 s_bss (int ignore ATTRIBUTE_UNUSED)
2993 /* We don't support putting frags in the BSS segment, we fake it by
2994 marking in_bss, then looking at s_skip for clues. */
2995 subseg_set (bss_section, 0);
2996 demand_empty_rest_of_line ();
2998 #ifdef md_elf_section_change_hook
2999 md_elf_section_change_hook ();
3004 s_even (int ignore ATTRIBUTE_UNUSED)
3006 /* Never make frag if expect extra pass. */
3008 frag_align (1, 0, 0);
3010 record_alignment (now_seg, 1);
3012 demand_empty_rest_of_line ();
3015 /* Directives: Literal pools. */
3017 static literal_pool *
3018 find_literal_pool (void)
3020 literal_pool * pool;
3022 for (pool = list_of_pools; pool != NULL; pool = pool->next)
3024 if (pool->section == now_seg
3025 && pool->sub_section == now_subseg)
3032 static literal_pool *
3033 find_or_make_literal_pool (void)
3035 /* Next literal pool ID number. */
3036 static unsigned int latest_pool_num = 1;
3037 literal_pool * pool;
3039 pool = find_literal_pool ();
3043 /* Create a new pool. */
3044 pool = (literal_pool *) xmalloc (sizeof (* pool));
3048 pool->next_free_entry = 0;
3049 pool->section = now_seg;
3050 pool->sub_section = now_subseg;
3051 pool->next = list_of_pools;
3052 pool->symbol = NULL;
3054 /* Add it to the list. */
3055 list_of_pools = pool;
3058 /* New pools, and emptied pools, will have a NULL symbol. */
3059 if (pool->symbol == NULL)
3061 pool->symbol = symbol_create (FAKE_LABEL_NAME, undefined_section,
3062 (valueT) 0, &zero_address_frag);
3063 pool->id = latest_pool_num ++;
3070 /* Add the literal in the global 'inst'
3071 structure to the relevant literal pool. */
3074 add_to_lit_pool (void)
3076 literal_pool * pool;
3079 pool = find_or_make_literal_pool ();
3081 /* Check if this literal value is already in the pool. */
3082 for (entry = 0; entry < pool->next_free_entry; entry ++)
3084 if ((pool->literals[entry].X_op == inst.reloc.exp.X_op)
3085 && (inst.reloc.exp.X_op == O_constant)
3086 && (pool->literals[entry].X_add_number
3087 == inst.reloc.exp.X_add_number)
3088 && (pool->literals[entry].X_unsigned
3089 == inst.reloc.exp.X_unsigned))
3092 if ((pool->literals[entry].X_op == inst.reloc.exp.X_op)
3093 && (inst.reloc.exp.X_op == O_symbol)
3094 && (pool->literals[entry].X_add_number
3095 == inst.reloc.exp.X_add_number)
3096 && (pool->literals[entry].X_add_symbol
3097 == inst.reloc.exp.X_add_symbol)
3098 && (pool->literals[entry].X_op_symbol
3099 == inst.reloc.exp.X_op_symbol))
3103 /* Do we need to create a new entry? */
3104 if (entry == pool->next_free_entry)
3106 if (entry >= MAX_LITERAL_POOL_SIZE)
3108 inst.error = _("literal pool overflow");
3112 pool->literals[entry] = inst.reloc.exp;
3114 /* PR ld/12974: Record the location of the first source line to reference
3115 this entry in the literal pool. If it turns out during linking that the
3116 symbol does not exist we will be able to give an accurate line number for
3117 the (first use of the) missing reference. */
3118 if (debug_type == DEBUG_DWARF2)
3119 dwarf2_where (pool->locs + entry);
3121 pool->next_free_entry += 1;
3124 inst.reloc.exp.X_op = O_symbol;
3125 inst.reloc.exp.X_add_number = ((int) entry) * 4;
3126 inst.reloc.exp.X_add_symbol = pool->symbol;
3131 /* Can't use symbol_new here, so have to create a symbol and then at
3132 a later date assign it a value. Thats what these functions do. */
3135 symbol_locate (symbolS * symbolP,
3136 const char * name, /* It is copied, the caller can modify. */
3137 segT segment, /* Segment identifier (SEG_<something>). */
3138 valueT valu, /* Symbol value. */
3139 fragS * frag) /* Associated fragment. */
3141 unsigned int name_length;
3142 char * preserved_copy_of_name;
3144 name_length = strlen (name) + 1; /* +1 for \0. */
3145 obstack_grow (¬es, name, name_length);
3146 preserved_copy_of_name = (char *) obstack_finish (¬es);
3148 #ifdef tc_canonicalize_symbol_name
3149 preserved_copy_of_name =
3150 tc_canonicalize_symbol_name (preserved_copy_of_name);
3153 S_SET_NAME (symbolP, preserved_copy_of_name);
3155 S_SET_SEGMENT (symbolP, segment);
3156 S_SET_VALUE (symbolP, valu);
3157 symbol_clear_list_pointers (symbolP);
3159 symbol_set_frag (symbolP, frag);
3161 /* Link to end of symbol chain. */
3163 extern int symbol_table_frozen;
3165 if (symbol_table_frozen)
3169 symbol_append (symbolP, symbol_lastP, & symbol_rootP, & symbol_lastP);
3171 obj_symbol_new_hook (symbolP);
3173 #ifdef tc_symbol_new_hook
3174 tc_symbol_new_hook (symbolP);
3178 verify_symbol_chain (symbol_rootP, symbol_lastP);
3179 #endif /* DEBUG_SYMS */
3184 s_ltorg (int ignored ATTRIBUTE_UNUSED)
3187 literal_pool * pool;
3190 pool = find_literal_pool ();
3192 || pool->symbol == NULL
3193 || pool->next_free_entry == 0)
3196 mapping_state (MAP_DATA);
3198 /* Align pool as you have word accesses.
3199 Only make a frag if we have to. */
3201 frag_align (2, 0, 0);
3203 record_alignment (now_seg, 2);
3205 sprintf (sym_name, "$$lit_\002%x", pool->id);
3207 symbol_locate (pool->symbol, sym_name, now_seg,
3208 (valueT) frag_now_fix (), frag_now);
3209 symbol_table_insert (pool->symbol);
3211 ARM_SET_THUMB (pool->symbol, thumb_mode);
3213 #if defined OBJ_COFF || defined OBJ_ELF
3214 ARM_SET_INTERWORK (pool->symbol, support_interwork);
3217 for (entry = 0; entry < pool->next_free_entry; entry ++)
3220 if (debug_type == DEBUG_DWARF2)
3221 dwarf2_gen_line_info (frag_now_fix (), pool->locs + entry);
3223 /* First output the expression in the instruction to the pool. */
3224 emit_expr (&(pool->literals[entry]), 4); /* .word */
3227 /* Mark the pool as empty. */
3228 pool->next_free_entry = 0;
3229 pool->symbol = NULL;
3233 /* Forward declarations for functions below, in the MD interface
3235 static void fix_new_arm (fragS *, int, short, expressionS *, int, int);
3236 static valueT create_unwind_entry (int);
3237 static void start_unwind_section (const segT, int);
3238 static void add_unwind_opcode (valueT, int);
3239 static void flush_pending_unwind (void);
3241 /* Directives: Data. */
3244 s_arm_elf_cons (int nbytes)
3248 #ifdef md_flush_pending_output
3249 md_flush_pending_output ();
3252 if (is_it_end_of_statement ())
3254 demand_empty_rest_of_line ();
3258 #ifdef md_cons_align
3259 md_cons_align (nbytes);
3262 mapping_state (MAP_DATA);
3266 char *base = input_line_pointer;
3270 if (exp.X_op != O_symbol)
3271 emit_expr (&exp, (unsigned int) nbytes);
3274 char *before_reloc = input_line_pointer;
3275 reloc = parse_reloc (&input_line_pointer);
3278 as_bad (_("unrecognized relocation suffix"));
3279 ignore_rest_of_line ();
3282 else if (reloc == BFD_RELOC_UNUSED)
3283 emit_expr (&exp, (unsigned int) nbytes);
3286 reloc_howto_type *howto = (reloc_howto_type *)
3287 bfd_reloc_type_lookup (stdoutput,
3288 (bfd_reloc_code_real_type) reloc);
3289 int size = bfd_get_reloc_size (howto);
3291 if (reloc == BFD_RELOC_ARM_PLT32)
3293 as_bad (_("(plt) is only valid on branch targets"));
3294 reloc = BFD_RELOC_UNUSED;
3299 as_bad (_("%s relocations do not fit in %d bytes"),
3300 howto->name, nbytes);
3303 /* We've parsed an expression stopping at O_symbol.
3304 But there may be more expression left now that we
3305 have parsed the relocation marker. Parse it again.
3306 XXX Surely there is a cleaner way to do this. */
3307 char *p = input_line_pointer;
3309 char *save_buf = (char *) alloca (input_line_pointer - base);
3310 memcpy (save_buf, base, input_line_pointer - base);
3311 memmove (base + (input_line_pointer - before_reloc),
3312 base, before_reloc - base);
3314 input_line_pointer = base + (input_line_pointer-before_reloc);
3316 memcpy (base, save_buf, p - base);
3318 offset = nbytes - size;
3319 p = frag_more ((int) nbytes);
3320 fix_new_exp (frag_now, p - frag_now->fr_literal + offset,
3321 size, &exp, 0, (enum bfd_reloc_code_real) reloc);
3326 while (*input_line_pointer++ == ',');
3328 /* Put terminator back into stream. */
3329 input_line_pointer --;
3330 demand_empty_rest_of_line ();
3333 /* Emit an expression containing a 32-bit thumb instruction.
3334 Implementation based on put_thumb32_insn. */
3337 emit_thumb32_expr (expressionS * exp)
3339 expressionS exp_high = *exp;
3341 exp_high.X_add_number = (unsigned long)exp_high.X_add_number >> 16;
3342 emit_expr (& exp_high, (unsigned int) THUMB_SIZE);
3343 exp->X_add_number &= 0xffff;
3344 emit_expr (exp, (unsigned int) THUMB_SIZE);
3347 /* Guess the instruction size based on the opcode. */
3350 thumb_insn_size (int opcode)
3352 if ((unsigned int) opcode < 0xe800u)
3354 else if ((unsigned int) opcode >= 0xe8000000u)
3361 emit_insn (expressionS *exp, int nbytes)
3365 if (exp->X_op == O_constant)
3370 size = thumb_insn_size (exp->X_add_number);
3374 if (size == 2 && (unsigned int)exp->X_add_number > 0xffffu)
3376 as_bad (_(".inst.n operand too big. "\
3377 "Use .inst.w instead"));
3382 if (now_it.state == AUTOMATIC_IT_BLOCK)
3383 set_it_insn_type_nonvoid (OUTSIDE_IT_INSN, 0);
3385 set_it_insn_type_nonvoid (NEUTRAL_IT_INSN, 0);
3387 if (thumb_mode && (size > THUMB_SIZE) && !target_big_endian)
3388 emit_thumb32_expr (exp);
3390 emit_expr (exp, (unsigned int) size);
3392 it_fsm_post_encode ();
3396 as_bad (_("cannot determine Thumb instruction size. " \
3397 "Use .inst.n/.inst.w instead"));
3400 as_bad (_("constant expression required"));
3405 /* Like s_arm_elf_cons but do not use md_cons_align and
3406 set the mapping state to MAP_ARM/MAP_THUMB. */
3409 s_arm_elf_inst (int nbytes)
3411 if (is_it_end_of_statement ())
3413 demand_empty_rest_of_line ();
3417 /* Calling mapping_state () here will not change ARM/THUMB,
3418 but will ensure not to be in DATA state. */
3421 mapping_state (MAP_THUMB);
3426 as_bad (_("width suffixes are invalid in ARM mode"));
3427 ignore_rest_of_line ();
3433 mapping_state (MAP_ARM);
3442 if (! emit_insn (& exp, nbytes))
3444 ignore_rest_of_line ();
3448 while (*input_line_pointer++ == ',');
3450 /* Put terminator back into stream. */
3451 input_line_pointer --;
3452 demand_empty_rest_of_line ();
3455 /* Parse a .rel31 directive. */
3458 s_arm_rel31 (int ignored ATTRIBUTE_UNUSED)
3465 if (*input_line_pointer == '1')
3466 highbit = 0x80000000;
3467 else if (*input_line_pointer != '0')
3468 as_bad (_("expected 0 or 1"));
3470 input_line_pointer++;
3471 if (*input_line_pointer != ',')
3472 as_bad (_("missing comma"));
3473 input_line_pointer++;
3475 #ifdef md_flush_pending_output
3476 md_flush_pending_output ();
3479 #ifdef md_cons_align
3483 mapping_state (MAP_DATA);
3488 md_number_to_chars (p, highbit, 4);
3489 fix_new_arm (frag_now, p - frag_now->fr_literal, 4, &exp, 1,
3490 BFD_RELOC_ARM_PREL31);
3492 demand_empty_rest_of_line ();
3495 /* Directives: AEABI stack-unwind tables. */
3497 /* Parse an unwind_fnstart directive. Simply records the current location. */
3500 s_arm_unwind_fnstart (int ignored ATTRIBUTE_UNUSED)
3502 demand_empty_rest_of_line ();
3503 if (unwind.proc_start)
3505 as_bad (_("duplicate .fnstart directive"));
3509 /* Mark the start of the function. */
3510 unwind.proc_start = expr_build_dot ();
3512 /* Reset the rest of the unwind info. */
3513 unwind.opcode_count = 0;
3514 unwind.table_entry = NULL;
3515 unwind.personality_routine = NULL;
3516 unwind.personality_index = -1;
3517 unwind.frame_size = 0;
3518 unwind.fp_offset = 0;
3519 unwind.fp_reg = REG_SP;
3521 unwind.sp_restored = 0;
3525 /* Parse a handlerdata directive. Creates the exception handling table entry
3526 for the function. */
3529 s_arm_unwind_handlerdata (int ignored ATTRIBUTE_UNUSED)
3531 demand_empty_rest_of_line ();
3532 if (!unwind.proc_start)
3533 as_bad (MISSING_FNSTART);
3535 if (unwind.table_entry)
3536 as_bad (_("duplicate .handlerdata directive"));
3538 create_unwind_entry (1);
3541 /* Parse an unwind_fnend directive. Generates the index table entry. */
3544 s_arm_unwind_fnend (int ignored ATTRIBUTE_UNUSED)
3549 unsigned int marked_pr_dependency;
3551 demand_empty_rest_of_line ();
3553 if (!unwind.proc_start)
3555 as_bad (_(".fnend directive without .fnstart"));
3559 /* Add eh table entry. */
3560 if (unwind.table_entry == NULL)
3561 val = create_unwind_entry (0);
3565 /* Add index table entry. This is two words. */
3566 start_unwind_section (unwind.saved_seg, 1);
3567 frag_align (2, 0, 0);
3568 record_alignment (now_seg, 2);
3570 ptr = frag_more (8);
3572 where = frag_now_fix () - 8;
3574 /* Self relative offset of the function start. */
3575 fix_new (frag_now, where, 4, unwind.proc_start, 0, 1,
3576 BFD_RELOC_ARM_PREL31);
3578 /* Indicate dependency on EHABI-defined personality routines to the
3579 linker, if it hasn't been done already. */
3580 marked_pr_dependency
3581 = seg_info (now_seg)->tc_segment_info_data.marked_pr_dependency;
3582 if (unwind.personality_index >= 0 && unwind.personality_index < 3
3583 && !(marked_pr_dependency & (1 << unwind.personality_index)))
3585 static const char *const name[] =
3587 "__aeabi_unwind_cpp_pr0",
3588 "__aeabi_unwind_cpp_pr1",
3589 "__aeabi_unwind_cpp_pr2"
3591 symbolS *pr = symbol_find_or_make (name[unwind.personality_index]);
3592 fix_new (frag_now, where, 0, pr, 0, 1, BFD_RELOC_NONE);
3593 seg_info (now_seg)->tc_segment_info_data.marked_pr_dependency
3594 |= 1 << unwind.personality_index;
3598 /* Inline exception table entry. */
3599 md_number_to_chars (ptr + 4, val, 4);
3601 /* Self relative offset of the table entry. */
3602 fix_new (frag_now, where + 4, 4, unwind.table_entry, 0, 1,
3603 BFD_RELOC_ARM_PREL31);
3605 /* Restore the original section. */
3606 subseg_set (unwind.saved_seg, unwind.saved_subseg);
3608 unwind.proc_start = NULL;
3612 /* Parse an unwind_cantunwind directive. */
3615 s_arm_unwind_cantunwind (int ignored ATTRIBUTE_UNUSED)
3617 demand_empty_rest_of_line ();
3618 if (!unwind.proc_start)
3619 as_bad (MISSING_FNSTART);
3621 if (unwind.personality_routine || unwind.personality_index != -1)
3622 as_bad (_("personality routine specified for cantunwind frame"));
3624 unwind.personality_index = -2;
3628 /* Parse a personalityindex directive. */
3631 s_arm_unwind_personalityindex (int ignored ATTRIBUTE_UNUSED)
3635 if (!unwind.proc_start)
3636 as_bad (MISSING_FNSTART);
3638 if (unwind.personality_routine || unwind.personality_index != -1)
3639 as_bad (_("duplicate .personalityindex directive"));
3643 if (exp.X_op != O_constant
3644 || exp.X_add_number < 0 || exp.X_add_number > 15)
3646 as_bad (_("bad personality routine number"));
3647 ignore_rest_of_line ();
3651 unwind.personality_index = exp.X_add_number;
3653 demand_empty_rest_of_line ();
3657 /* Parse a personality directive. */
3660 s_arm_unwind_personality (int ignored ATTRIBUTE_UNUSED)
3664 if (!unwind.proc_start)
3665 as_bad (MISSING_FNSTART);
3667 if (unwind.personality_routine || unwind.personality_index != -1)
3668 as_bad (_("duplicate .personality directive"));
3670 name = input_line_pointer;
3671 c = get_symbol_end ();
3672 p = input_line_pointer;
3673 unwind.personality_routine = symbol_find_or_make (name);
3675 demand_empty_rest_of_line ();
3679 /* Parse a directive saving core registers. */
3682 s_arm_unwind_save_core (void)
3688 range = parse_reg_list (&input_line_pointer);
3691 as_bad (_("expected register list"));
3692 ignore_rest_of_line ();
3696 demand_empty_rest_of_line ();
3698 /* Turn .unwind_movsp ip followed by .unwind_save {..., ip, ...}
3699 into .unwind_save {..., sp...}. We aren't bothered about the value of
3700 ip because it is clobbered by calls. */
3701 if (unwind.sp_restored && unwind.fp_reg == 12
3702 && (range & 0x3000) == 0x1000)
3704 unwind.opcode_count--;
3705 unwind.sp_restored = 0;
3706 range = (range | 0x2000) & ~0x1000;
3707 unwind.pending_offset = 0;
3713 /* See if we can use the short opcodes. These pop a block of up to 8
3714 registers starting with r4, plus maybe r14. */
3715 for (n = 0; n < 8; n++)
3717 /* Break at the first non-saved register. */
3718 if ((range & (1 << (n + 4))) == 0)
3721 /* See if there are any other bits set. */
3722 if (n == 0 || (range & (0xfff0 << n) & 0xbff0) != 0)
3724 /* Use the long form. */
3725 op = 0x8000 | ((range >> 4) & 0xfff);
3726 add_unwind_opcode (op, 2);
3730 /* Use the short form. */
3732 op = 0xa8; /* Pop r14. */
3734 op = 0xa0; /* Do not pop r14. */
3736 add_unwind_opcode (op, 1);
3743 op = 0xb100 | (range & 0xf);
3744 add_unwind_opcode (op, 2);
3747 /* Record the number of bytes pushed. */
3748 for (n = 0; n < 16; n++)
3750 if (range & (1 << n))
3751 unwind.frame_size += 4;
3756 /* Parse a directive saving FPA registers. */
3759 s_arm_unwind_save_fpa (int reg)
3765 /* Get Number of registers to transfer. */
3766 if (skip_past_comma (&input_line_pointer) != FAIL)
3769 exp.X_op = O_illegal;
3771 if (exp.X_op != O_constant)
3773 as_bad (_("expected , <constant>"));
3774 ignore_rest_of_line ();
3778 num_regs = exp.X_add_number;
3780 if (num_regs < 1 || num_regs > 4)
3782 as_bad (_("number of registers must be in the range [1:4]"));
3783 ignore_rest_of_line ();
3787 demand_empty_rest_of_line ();
3792 op = 0xb4 | (num_regs - 1);
3793 add_unwind_opcode (op, 1);
3798 op = 0xc800 | (reg << 4) | (num_regs - 1);
3799 add_unwind_opcode (op, 2);
3801 unwind.frame_size += num_regs * 12;
3805 /* Parse a directive saving VFP registers for ARMv6 and above. */
3808 s_arm_unwind_save_vfp_armv6 (void)
3813 int num_vfpv3_regs = 0;
3814 int num_regs_below_16;
3816 count = parse_vfp_reg_list (&input_line_pointer, &start, REGLIST_VFP_D);
3819 as_bad (_("expected register list"));
3820 ignore_rest_of_line ();
3824 demand_empty_rest_of_line ();
3826 /* We always generate FSTMD/FLDMD-style unwinding opcodes (rather
3827 than FSTMX/FLDMX-style ones). */
3829 /* Generate opcode for (VFPv3) registers numbered in the range 16 .. 31. */
3831 num_vfpv3_regs = count;
3832 else if (start + count > 16)
3833 num_vfpv3_regs = start + count - 16;
3835 if (num_vfpv3_regs > 0)
3837 int start_offset = start > 16 ? start - 16 : 0;
3838 op = 0xc800 | (start_offset << 4) | (num_vfpv3_regs - 1);
3839 add_unwind_opcode (op, 2);
3842 /* Generate opcode for registers numbered in the range 0 .. 15. */
3843 num_regs_below_16 = num_vfpv3_regs > 0 ? 16 - (int) start : count;
3844 gas_assert (num_regs_below_16 + num_vfpv3_regs == count);
3845 if (num_regs_below_16 > 0)
3847 op = 0xc900 | (start << 4) | (num_regs_below_16 - 1);
3848 add_unwind_opcode (op, 2);
3851 unwind.frame_size += count * 8;
3855 /* Parse a directive saving VFP registers for pre-ARMv6. */
3858 s_arm_unwind_save_vfp (void)
3864 count = parse_vfp_reg_list (&input_line_pointer, ®, REGLIST_VFP_D);
3867 as_bad (_("expected register list"));
3868 ignore_rest_of_line ();
3872 demand_empty_rest_of_line ();
3877 op = 0xb8 | (count - 1);
3878 add_unwind_opcode (op, 1);
3883 op = 0xb300 | (reg << 4) | (count - 1);
3884 add_unwind_opcode (op, 2);
3886 unwind.frame_size += count * 8 + 4;
3890 /* Parse a directive saving iWMMXt data registers. */
3893 s_arm_unwind_save_mmxwr (void)
3901 if (*input_line_pointer == '{')
3902 input_line_pointer++;
3906 reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWR);
3910 as_bad ("%s", _(reg_expected_msgs[REG_TYPE_MMXWR]));
3915 as_tsktsk (_("register list not in ascending order"));
3918 if (*input_line_pointer == '-')
3920 input_line_pointer++;
3921 hi_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWR);
3924 as_bad ("%s", _(reg_expected_msgs[REG_TYPE_MMXWR]));
3927 else if (reg >= hi_reg)
3929 as_bad (_("bad register range"));
3932 for (; reg < hi_reg; reg++)
3936 while (skip_past_comma (&input_line_pointer) != FAIL);
3938 if (*input_line_pointer == '}')
3939 input_line_pointer++;
3941 demand_empty_rest_of_line ();
3943 /* Generate any deferred opcodes because we're going to be looking at
3945 flush_pending_unwind ();
3947 for (i = 0; i < 16; i++)
3949 if (mask & (1 << i))
3950 unwind.frame_size += 8;
3953 /* Attempt to combine with a previous opcode. We do this because gcc
3954 likes to output separate unwind directives for a single block of
3956 if (unwind.opcode_count > 0)
3958 i = unwind.opcodes[unwind.opcode_count - 1];
3959 if ((i & 0xf8) == 0xc0)
3962 /* Only merge if the blocks are contiguous. */
3965 if ((mask & 0xfe00) == (1 << 9))
3967 mask |= ((1 << (i + 11)) - 1) & 0xfc00;
3968 unwind.opcode_count--;
3971 else if (i == 6 && unwind.opcode_count >= 2)
3973 i = unwind.opcodes[unwind.opcode_count - 2];
3977 op = 0xffff << (reg - 1);
3979 && ((mask & op) == (1u << (reg - 1))))
3981 op = (1 << (reg + i + 1)) - 1;
3982 op &= ~((1 << reg) - 1);
3984 unwind.opcode_count -= 2;
3991 /* We want to generate opcodes in the order the registers have been
3992 saved, ie. descending order. */
3993 for (reg = 15; reg >= -1; reg--)
3995 /* Save registers in blocks. */
3997 || !(mask & (1 << reg)))
3999 /* We found an unsaved reg. Generate opcodes to save the
4006 op = 0xc0 | (hi_reg - 10);
4007 add_unwind_opcode (op, 1);
4012 op = 0xc600 | ((reg + 1) << 4) | ((hi_reg - reg) - 1);
4013 add_unwind_opcode (op, 2);
4022 ignore_rest_of_line ();
4026 s_arm_unwind_save_mmxwcg (void)
4033 if (*input_line_pointer == '{')
4034 input_line_pointer++;
4036 skip_whitespace (input_line_pointer);
4040 reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWCG);
4044 as_bad ("%s", _(reg_expected_msgs[REG_TYPE_MMXWCG]));
4050 as_tsktsk (_("register list not in ascending order"));
4053 if (*input_line_pointer == '-')
4055 input_line_pointer++;
4056 hi_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWCG);
4059 as_bad ("%s", _(reg_expected_msgs[REG_TYPE_MMXWCG]));
4062 else if (reg >= hi_reg)
4064 as_bad (_("bad register range"));
4067 for (; reg < hi_reg; reg++)
4071 while (skip_past_comma (&input_line_pointer) != FAIL);
4073 if (*input_line_pointer == '}')
4074 input_line_pointer++;
4076 demand_empty_rest_of_line ();
4078 /* Generate any deferred opcodes because we're going to be looking at
4080 flush_pending_unwind ();
4082 for (reg = 0; reg < 16; reg++)
4084 if (mask & (1 << reg))
4085 unwind.frame_size += 4;
4088 add_unwind_opcode (op, 2);
4091 ignore_rest_of_line ();
4095 /* Parse an unwind_save directive.
4096 If the argument is non-zero, this is a .vsave directive. */
4099 s_arm_unwind_save (int arch_v6)
4102 struct reg_entry *reg;
4103 bfd_boolean had_brace = FALSE;
4105 if (!unwind.proc_start)
4106 as_bad (MISSING_FNSTART);
4108 /* Figure out what sort of save we have. */
4109 peek = input_line_pointer;
4117 reg = arm_reg_parse_multi (&peek);
4121 as_bad (_("register expected"));
4122 ignore_rest_of_line ();
4131 as_bad (_("FPA .unwind_save does not take a register list"));
4132 ignore_rest_of_line ();
4135 input_line_pointer = peek;
4136 s_arm_unwind_save_fpa (reg->number);
4139 case REG_TYPE_RN: s_arm_unwind_save_core (); return;
4142 s_arm_unwind_save_vfp_armv6 ();
4144 s_arm_unwind_save_vfp ();
4146 case REG_TYPE_MMXWR: s_arm_unwind_save_mmxwr (); return;
4147 case REG_TYPE_MMXWCG: s_arm_unwind_save_mmxwcg (); return;
4150 as_bad (_(".unwind_save does not support this kind of register"));
4151 ignore_rest_of_line ();
4156 /* Parse an unwind_movsp directive. */
4159 s_arm_unwind_movsp (int ignored ATTRIBUTE_UNUSED)
4165 if (!unwind.proc_start)
4166 as_bad (MISSING_FNSTART);
4168 reg = arm_reg_parse (&input_line_pointer, REG_TYPE_RN);
4171 as_bad ("%s", _(reg_expected_msgs[REG_TYPE_RN]));
4172 ignore_rest_of_line ();
4176 /* Optional constant. */
4177 if (skip_past_comma (&input_line_pointer) != FAIL)
4179 if (immediate_for_directive (&offset) == FAIL)
4185 demand_empty_rest_of_line ();
4187 if (reg == REG_SP || reg == REG_PC)
4189 as_bad (_("SP and PC not permitted in .unwind_movsp directive"));
4193 if (unwind.fp_reg != REG_SP)
4194 as_bad (_("unexpected .unwind_movsp directive"));
4196 /* Generate opcode to restore the value. */
4198 add_unwind_opcode (op, 1);
4200 /* Record the information for later. */
4201 unwind.fp_reg = reg;
4202 unwind.fp_offset = unwind.frame_size - offset;
4203 unwind.sp_restored = 1;
4206 /* Parse an unwind_pad directive. */
4209 s_arm_unwind_pad (int ignored ATTRIBUTE_UNUSED)
4213 if (!unwind.proc_start)
4214 as_bad (MISSING_FNSTART);
4216 if (immediate_for_directive (&offset) == FAIL)
4221 as_bad (_("stack increment must be multiple of 4"));
4222 ignore_rest_of_line ();
4226 /* Don't generate any opcodes, just record the details for later. */
4227 unwind.frame_size += offset;
4228 unwind.pending_offset += offset;
4230 demand_empty_rest_of_line ();
4233 /* Parse an unwind_setfp directive. */
4236 s_arm_unwind_setfp (int ignored ATTRIBUTE_UNUSED)
4242 if (!unwind.proc_start)
4243 as_bad (MISSING_FNSTART);
4245 fp_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_RN);
4246 if (skip_past_comma (&input_line_pointer) == FAIL)
4249 sp_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_RN);
4251 if (fp_reg == FAIL || sp_reg == FAIL)
4253 as_bad (_("expected <reg>, <reg>"));
4254 ignore_rest_of_line ();
4258 /* Optional constant. */
4259 if (skip_past_comma (&input_line_pointer) != FAIL)
4261 if (immediate_for_directive (&offset) == FAIL)
4267 demand_empty_rest_of_line ();
4269 if (sp_reg != REG_SP && sp_reg != unwind.fp_reg)
4271 as_bad (_("register must be either sp or set by a previous"
4272 "unwind_movsp directive"));
4276 /* Don't generate any opcodes, just record the information for later. */
4277 unwind.fp_reg = fp_reg;
4279 if (sp_reg == REG_SP)
4280 unwind.fp_offset = unwind.frame_size - offset;
4282 unwind.fp_offset -= offset;
4285 /* Parse an unwind_raw directive. */
4288 s_arm_unwind_raw (int ignored ATTRIBUTE_UNUSED)
4291 /* This is an arbitrary limit. */
4292 unsigned char op[16];
4295 if (!unwind.proc_start)
4296 as_bad (MISSING_FNSTART);
4299 if (exp.X_op == O_constant
4300 && skip_past_comma (&input_line_pointer) != FAIL)
4302 unwind.frame_size += exp.X_add_number;
4306 exp.X_op = O_illegal;
4308 if (exp.X_op != O_constant)
4310 as_bad (_("expected <offset>, <opcode>"));
4311 ignore_rest_of_line ();
4317 /* Parse the opcode. */
4322 as_bad (_("unwind opcode too long"));
4323 ignore_rest_of_line ();
4325 if (exp.X_op != O_constant || exp.X_add_number & ~0xff)
4327 as_bad (_("invalid unwind opcode"));
4328 ignore_rest_of_line ();
4331 op[count++] = exp.X_add_number;
4333 /* Parse the next byte. */
4334 if (skip_past_comma (&input_line_pointer) == FAIL)
4340 /* Add the opcode bytes in reverse order. */
4342 add_unwind_opcode (op[count], 1);
4344 demand_empty_rest_of_line ();
4348 /* Parse a .eabi_attribute directive. */
4351 s_arm_eabi_attribute (int ignored ATTRIBUTE_UNUSED)
4353 int tag = obj_elf_vendor_attribute (OBJ_ATTR_PROC);
4355 if (tag < NUM_KNOWN_OBJ_ATTRIBUTES)
4356 attributes_set_explicitly[tag] = 1;
4359 /* Emit a tls fix for the symbol. */
4362 s_arm_tls_descseq (int ignored ATTRIBUTE_UNUSED)
4366 #ifdef md_flush_pending_output
4367 md_flush_pending_output ();
4370 #ifdef md_cons_align
4374 /* Since we're just labelling the code, there's no need to define a
4377 p = obstack_next_free (&frchain_now->frch_obstack);
4378 fix_new_arm (frag_now, p - frag_now->fr_literal, 4, &exp, 0,
4379 thumb_mode ? BFD_RELOC_ARM_THM_TLS_DESCSEQ
4380 : BFD_RELOC_ARM_TLS_DESCSEQ);
4382 #endif /* OBJ_ELF */
4384 static void s_arm_arch (int);
4385 static void s_arm_object_arch (int);
4386 static void s_arm_cpu (int);
4387 static void s_arm_fpu (int);
4388 static void s_arm_arch_extension (int);
4393 pe_directive_secrel (int dummy ATTRIBUTE_UNUSED)
4400 if (exp.X_op == O_symbol)
4401 exp.X_op = O_secrel;
4403 emit_expr (&exp, 4);
4405 while (*input_line_pointer++ == ',');
4407 input_line_pointer--;
4408 demand_empty_rest_of_line ();
4412 /* This table describes all the machine specific pseudo-ops the assembler
4413 has to support. The fields are:
4414 pseudo-op name without dot
4415 function to call to execute this pseudo-op
4416 Integer arg to pass to the function. */
4418 const pseudo_typeS md_pseudo_table[] =
4420 /* Never called because '.req' does not start a line. */
4421 { "req", s_req, 0 },
4422 /* Following two are likewise never called. */
4425 { "unreq", s_unreq, 0 },
4426 { "bss", s_bss, 0 },
4427 { "align", s_align, 0 },
4428 { "arm", s_arm, 0 },
4429 { "thumb", s_thumb, 0 },
4430 { "code", s_code, 0 },
4431 { "force_thumb", s_force_thumb, 0 },
4432 { "thumb_func", s_thumb_func, 0 },
4433 { "thumb_set", s_thumb_set, 0 },
4434 { "even", s_even, 0 },
4435 { "ltorg", s_ltorg, 0 },
4436 { "pool", s_ltorg, 0 },
4437 { "syntax", s_syntax, 0 },
4438 { "cpu", s_arm_cpu, 0 },
4439 { "arch", s_arm_arch, 0 },
4440 { "object_arch", s_arm_object_arch, 0 },
4441 { "fpu", s_arm_fpu, 0 },
4442 { "arch_extension", s_arm_arch_extension, 0 },
4444 { "word", s_arm_elf_cons, 4 },
4445 { "long", s_arm_elf_cons, 4 },
4446 { "inst.n", s_arm_elf_inst, 2 },
4447 { "inst.w", s_arm_elf_inst, 4 },
4448 { "inst", s_arm_elf_inst, 0 },
4449 { "rel31", s_arm_rel31, 0 },
4450 { "fnstart", s_arm_unwind_fnstart, 0 },
4451 { "fnend", s_arm_unwind_fnend, 0 },
4452 { "cantunwind", s_arm_unwind_cantunwind, 0 },
4453 { "personality", s_arm_unwind_personality, 0 },
4454 { "personalityindex", s_arm_unwind_personalityindex, 0 },
4455 { "handlerdata", s_arm_unwind_handlerdata, 0 },
4456 { "save", s_arm_unwind_save, 0 },
4457 { "vsave", s_arm_unwind_save, 1 },
4458 { "movsp", s_arm_unwind_movsp, 0 },
4459 { "pad", s_arm_unwind_pad, 0 },
4460 { "setfp", s_arm_unwind_setfp, 0 },
4461 { "unwind_raw", s_arm_unwind_raw, 0 },
4462 { "eabi_attribute", s_arm_eabi_attribute, 0 },
4463 { "tlsdescseq", s_arm_tls_descseq, 0 },
4467 /* These are used for dwarf. */
4471 /* These are used for dwarf2. */
4472 { "file", (void (*) (int)) dwarf2_directive_file, 0 },
4473 { "loc", dwarf2_directive_loc, 0 },
4474 { "loc_mark_labels", dwarf2_directive_loc_mark_labels, 0 },
4476 { "extend", float_cons, 'x' },
4477 { "ldouble", float_cons, 'x' },
4478 { "packed", float_cons, 'p' },
4480 {"secrel32", pe_directive_secrel, 0},
4485 /* Parser functions used exclusively in instruction operands. */
4487 /* Generic immediate-value read function for use in insn parsing.
4488 STR points to the beginning of the immediate (the leading #);
4489 VAL receives the value; if the value is outside [MIN, MAX]
4490 issue an error. PREFIX_OPT is true if the immediate prefix is
4494 parse_immediate (char **str, int *val, int min, int max,
4495 bfd_boolean prefix_opt)
4498 my_get_expression (&exp, str, prefix_opt ? GE_OPT_PREFIX : GE_IMM_PREFIX);
4499 if (exp.X_op != O_constant)
4501 inst.error = _("constant expression required");
4505 if (exp.X_add_number < min || exp.X_add_number > max)
4507 inst.error = _("immediate value out of range");
4511 *val = exp.X_add_number;
4515 /* Less-generic immediate-value read function with the possibility of loading a
4516 big (64-bit) immediate, as required by Neon VMOV, VMVN and logic immediate
4517 instructions. Puts the result directly in inst.operands[i]. */
4520 parse_big_immediate (char **str, int i)
4525 my_get_expression (&exp, &ptr, GE_OPT_PREFIX_BIG);
4527 if (exp.X_op == O_constant)
4529 inst.operands[i].imm = exp.X_add_number & 0xffffffff;
4530 /* If we're on a 64-bit host, then a 64-bit number can be returned using
4531 O_constant. We have to be careful not to break compilation for
4532 32-bit X_add_number, though. */
4533 if ((exp.X_add_number & ~(offsetT)(0xffffffffU)) != 0)
4535 /* X >> 32 is illegal if sizeof (exp.X_add_number) == 4. */
4536 inst.operands[i].reg = ((exp.X_add_number >> 16) >> 16) & 0xffffffff;
4537 inst.operands[i].regisimm = 1;
4540 else if (exp.X_op == O_big
4541 && LITTLENUM_NUMBER_OF_BITS * exp.X_add_number > 32)
4543 unsigned parts = 32 / LITTLENUM_NUMBER_OF_BITS, j, idx = 0;
4545 /* Bignums have their least significant bits in
4546 generic_bignum[0]. Make sure we put 32 bits in imm and
4547 32 bits in reg, in a (hopefully) portable way. */
4548 gas_assert (parts != 0);
4550 /* Make sure that the number is not too big.
4551 PR 11972: Bignums can now be sign-extended to the
4552 size of a .octa so check that the out of range bits
4553 are all zero or all one. */
4554 if (LITTLENUM_NUMBER_OF_BITS * exp.X_add_number > 64)
4556 LITTLENUM_TYPE m = -1;
4558 if (generic_bignum[parts * 2] != 0
4559 && generic_bignum[parts * 2] != m)
4562 for (j = parts * 2 + 1; j < (unsigned) exp.X_add_number; j++)
4563 if (generic_bignum[j] != generic_bignum[j-1])
4567 inst.operands[i].imm = 0;
4568 for (j = 0; j < parts; j++, idx++)
4569 inst.operands[i].imm |= generic_bignum[idx]
4570 << (LITTLENUM_NUMBER_OF_BITS * j);
4571 inst.operands[i].reg = 0;
4572 for (j = 0; j < parts; j++, idx++)
4573 inst.operands[i].reg |= generic_bignum[idx]
4574 << (LITTLENUM_NUMBER_OF_BITS * j);
4575 inst.operands[i].regisimm = 1;
4585 /* Returns the pseudo-register number of an FPA immediate constant,
4586 or FAIL if there isn't a valid constant here. */
4589 parse_fpa_immediate (char ** str)
4591 LITTLENUM_TYPE words[MAX_LITTLENUMS];
4597 /* First try and match exact strings, this is to guarantee
4598 that some formats will work even for cross assembly. */
4600 for (i = 0; fp_const[i]; i++)
4602 if (strncmp (*str, fp_const[i], strlen (fp_const[i])) == 0)
4606 *str += strlen (fp_const[i]);
4607 if (is_end_of_line[(unsigned char) **str])
4613 /* Just because we didn't get a match doesn't mean that the constant
4614 isn't valid, just that it is in a format that we don't
4615 automatically recognize. Try parsing it with the standard
4616 expression routines. */
4618 memset (words, 0, MAX_LITTLENUMS * sizeof (LITTLENUM_TYPE));
4620 /* Look for a raw floating point number. */
4621 if ((save_in = atof_ieee (*str, 'x', words)) != NULL
4622 && is_end_of_line[(unsigned char) *save_in])
4624 for (i = 0; i < NUM_FLOAT_VALS; i++)
4626 for (j = 0; j < MAX_LITTLENUMS; j++)
4628 if (words[j] != fp_values[i][j])
4632 if (j == MAX_LITTLENUMS)
4640 /* Try and parse a more complex expression, this will probably fail
4641 unless the code uses a floating point prefix (eg "0f"). */
4642 save_in = input_line_pointer;
4643 input_line_pointer = *str;
4644 if (expression (&exp) == absolute_section
4645 && exp.X_op == O_big
4646 && exp.X_add_number < 0)
4648 /* FIXME: 5 = X_PRECISION, should be #define'd where we can use it.
4650 if (gen_to_words (words, 5, (long) 15) == 0)
4652 for (i = 0; i < NUM_FLOAT_VALS; i++)
4654 for (j = 0; j < MAX_LITTLENUMS; j++)
4656 if (words[j] != fp_values[i][j])
4660 if (j == MAX_LITTLENUMS)
4662 *str = input_line_pointer;
4663 input_line_pointer = save_in;
4670 *str = input_line_pointer;
4671 input_line_pointer = save_in;
4672 inst.error = _("invalid FPA immediate expression");
4676 /* Returns 1 if a number has "quarter-precision" float format
4677 0baBbbbbbc defgh000 00000000 00000000. */
4680 is_quarter_float (unsigned imm)
4682 int bs = (imm & 0x20000000) ? 0x3e000000 : 0x40000000;
4683 return (imm & 0x7ffff) == 0 && ((imm & 0x7e000000) ^ bs) == 0;
4686 /* Parse an 8-bit "quarter-precision" floating point number of the form:
4687 0baBbbbbbc defgh000 00000000 00000000.
4688 The zero and minus-zero cases need special handling, since they can't be
4689 encoded in the "quarter-precision" float format, but can nonetheless be
4690 loaded as integer constants. */
4693 parse_qfloat_immediate (char **ccp, int *immed)
4697 LITTLENUM_TYPE words[MAX_LITTLENUMS];
4698 int found_fpchar = 0;
4700 skip_past_char (&str, '#');
4702 /* We must not accidentally parse an integer as a floating-point number. Make
4703 sure that the value we parse is not an integer by checking for special
4704 characters '.' or 'e'.
4705 FIXME: This is a horrible hack, but doing better is tricky because type
4706 information isn't in a very usable state at parse time. */
4708 skip_whitespace (fpnum);
4710 if (strncmp (fpnum, "0x", 2) == 0)
4714 for (; *fpnum != '\0' && *fpnum != ' ' && *fpnum != '\n'; fpnum++)
4715 if (*fpnum == '.' || *fpnum == 'e' || *fpnum == 'E')
4725 if ((str = atof_ieee (str, 's', words)) != NULL)
4727 unsigned fpword = 0;
4730 /* Our FP word must be 32 bits (single-precision FP). */
4731 for (i = 0; i < 32 / LITTLENUM_NUMBER_OF_BITS; i++)
4733 fpword <<= LITTLENUM_NUMBER_OF_BITS;
4737 if (is_quarter_float (fpword) || (fpword & 0x7fffffff) == 0)
4750 /* Shift operands. */
4753 SHIFT_LSL, SHIFT_LSR, SHIFT_ASR, SHIFT_ROR, SHIFT_RRX
4756 struct asm_shift_name
4759 enum shift_kind kind;
4762 /* Third argument to parse_shift. */
4763 enum parse_shift_mode
4765 NO_SHIFT_RESTRICT, /* Any kind of shift is accepted. */
4766 SHIFT_IMMEDIATE, /* Shift operand must be an immediate. */
4767 SHIFT_LSL_OR_ASR_IMMEDIATE, /* Shift must be LSL or ASR immediate. */
4768 SHIFT_ASR_IMMEDIATE, /* Shift must be ASR immediate. */
4769 SHIFT_LSL_IMMEDIATE, /* Shift must be LSL immediate. */
4772 /* Parse a <shift> specifier on an ARM data processing instruction.
4773 This has three forms:
4775 (LSL|LSR|ASL|ASR|ROR) Rs
4776 (LSL|LSR|ASL|ASR|ROR) #imm
4779 Note that ASL is assimilated to LSL in the instruction encoding, and
4780 RRX to ROR #0 (which cannot be written as such). */
4783 parse_shift (char **str, int i, enum parse_shift_mode mode)
4785 const struct asm_shift_name *shift_name;
4786 enum shift_kind shift;
4791 for (p = *str; ISALPHA (*p); p++)
4796 inst.error = _("shift expression expected");
4800 shift_name = (const struct asm_shift_name *) hash_find_n (arm_shift_hsh, *str,
4803 if (shift_name == NULL)
4805 inst.error = _("shift expression expected");
4809 shift = shift_name->kind;
4813 case NO_SHIFT_RESTRICT:
4814 case SHIFT_IMMEDIATE: break;
4816 case SHIFT_LSL_OR_ASR_IMMEDIATE:
4817 if (shift != SHIFT_LSL && shift != SHIFT_ASR)
4819 inst.error = _("'LSL' or 'ASR' required");
4824 case SHIFT_LSL_IMMEDIATE:
4825 if (shift != SHIFT_LSL)
4827 inst.error = _("'LSL' required");
4832 case SHIFT_ASR_IMMEDIATE:
4833 if (shift != SHIFT_ASR)
4835 inst.error = _("'ASR' required");
4843 if (shift != SHIFT_RRX)
4845 /* Whitespace can appear here if the next thing is a bare digit. */
4846 skip_whitespace (p);
4848 if (mode == NO_SHIFT_RESTRICT
4849 && (reg = arm_reg_parse (&p, REG_TYPE_RN)) != FAIL)
4851 inst.operands[i].imm = reg;
4852 inst.operands[i].immisreg = 1;
4854 else if (my_get_expression (&inst.reloc.exp, &p, GE_IMM_PREFIX))
4857 inst.operands[i].shift_kind = shift;
4858 inst.operands[i].shifted = 1;
4863 /* Parse a <shifter_operand> for an ARM data processing instruction:
4866 #<immediate>, <rotate>
4870 where <shift> is defined by parse_shift above, and <rotate> is a
4871 multiple of 2 between 0 and 30. Validation of immediate operands
4872 is deferred to md_apply_fix. */
4875 parse_shifter_operand (char **str, int i)
4880 if ((value = arm_reg_parse (str, REG_TYPE_RN)) != FAIL)
4882 inst.operands[i].reg = value;
4883 inst.operands[i].isreg = 1;
4885 /* parse_shift will override this if appropriate */
4886 inst.reloc.exp.X_op = O_constant;
4887 inst.reloc.exp.X_add_number = 0;
4889 if (skip_past_comma (str) == FAIL)
4892 /* Shift operation on register. */
4893 return parse_shift (str, i, NO_SHIFT_RESTRICT);
4896 if (my_get_expression (&inst.reloc.exp, str, GE_IMM_PREFIX))
4899 if (skip_past_comma (str) == SUCCESS)
4901 /* #x, y -- ie explicit rotation by Y. */
4902 if (my_get_expression (&exp, str, GE_NO_PREFIX))
4905 if (exp.X_op != O_constant || inst.reloc.exp.X_op != O_constant)
4907 inst.error = _("constant expression expected");
4911 value = exp.X_add_number;
4912 if (value < 0 || value > 30 || value % 2 != 0)
4914 inst.error = _("invalid rotation");
4917 if (inst.reloc.exp.X_add_number < 0 || inst.reloc.exp.X_add_number > 255)
4919 inst.error = _("invalid constant");
4923 /* Encode as specified. */
4924 inst.operands[i].imm = inst.reloc.exp.X_add_number | value << 7;
4928 inst.reloc.type = BFD_RELOC_ARM_IMMEDIATE;
4929 inst.reloc.pc_rel = 0;
4933 /* Group relocation information. Each entry in the table contains the
4934 textual name of the relocation as may appear in assembler source
4935 and must end with a colon.
4936 Along with this textual name are the relocation codes to be used if
4937 the corresponding instruction is an ALU instruction (ADD or SUB only),
4938 an LDR, an LDRS, or an LDC. */
4940 struct group_reloc_table_entry
4951 /* Varieties of non-ALU group relocation. */
4958 static struct group_reloc_table_entry group_reloc_table[] =
4959 { /* Program counter relative: */
4961 BFD_RELOC_ARM_ALU_PC_G0_NC, /* ALU */
4966 BFD_RELOC_ARM_ALU_PC_G0, /* ALU */
4967 BFD_RELOC_ARM_LDR_PC_G0, /* LDR */
4968 BFD_RELOC_ARM_LDRS_PC_G0, /* LDRS */
4969 BFD_RELOC_ARM_LDC_PC_G0 }, /* LDC */
4971 BFD_RELOC_ARM_ALU_PC_G1_NC, /* ALU */
4976 BFD_RELOC_ARM_ALU_PC_G1, /* ALU */
4977 BFD_RELOC_ARM_LDR_PC_G1, /* LDR */
4978 BFD_RELOC_ARM_LDRS_PC_G1, /* LDRS */
4979 BFD_RELOC_ARM_LDC_PC_G1 }, /* LDC */
4981 BFD_RELOC_ARM_ALU_PC_G2, /* ALU */
4982 BFD_RELOC_ARM_LDR_PC_G2, /* LDR */
4983 BFD_RELOC_ARM_LDRS_PC_G2, /* LDRS */
4984 BFD_RELOC_ARM_LDC_PC_G2 }, /* LDC */
4985 /* Section base relative */
4987 BFD_RELOC_ARM_ALU_SB_G0_NC, /* ALU */
4992 BFD_RELOC_ARM_ALU_SB_G0, /* ALU */
4993 BFD_RELOC_ARM_LDR_SB_G0, /* LDR */
4994 BFD_RELOC_ARM_LDRS_SB_G0, /* LDRS */
4995 BFD_RELOC_ARM_LDC_SB_G0 }, /* LDC */
4997 BFD_RELOC_ARM_ALU_SB_G1_NC, /* ALU */
5002 BFD_RELOC_ARM_ALU_SB_G1, /* ALU */
5003 BFD_RELOC_ARM_LDR_SB_G1, /* LDR */
5004 BFD_RELOC_ARM_LDRS_SB_G1, /* LDRS */
5005 BFD_RELOC_ARM_LDC_SB_G1 }, /* LDC */
5007 BFD_RELOC_ARM_ALU_SB_G2, /* ALU */
5008 BFD_RELOC_ARM_LDR_SB_G2, /* LDR */
5009 BFD_RELOC_ARM_LDRS_SB_G2, /* LDRS */
5010 BFD_RELOC_ARM_LDC_SB_G2 } }; /* LDC */
5012 /* Given the address of a pointer pointing to the textual name of a group
5013 relocation as may appear in assembler source, attempt to find its details
5014 in group_reloc_table. The pointer will be updated to the character after
5015 the trailing colon. On failure, FAIL will be returned; SUCCESS
5016 otherwise. On success, *entry will be updated to point at the relevant
5017 group_reloc_table entry. */
5020 find_group_reloc_table_entry (char **str, struct group_reloc_table_entry **out)
5023 for (i = 0; i < ARRAY_SIZE (group_reloc_table); i++)
5025 int length = strlen (group_reloc_table[i].name);
5027 if (strncasecmp (group_reloc_table[i].name, *str, length) == 0
5028 && (*str)[length] == ':')
5030 *out = &group_reloc_table[i];
5031 *str += (length + 1);
5039 /* Parse a <shifter_operand> for an ARM data processing instruction
5040 (as for parse_shifter_operand) where group relocations are allowed:
5043 #<immediate>, <rotate>
5044 #:<group_reloc>:<expression>
5048 where <group_reloc> is one of the strings defined in group_reloc_table.
5049 The hashes are optional.
5051 Everything else is as for parse_shifter_operand. */
5053 static parse_operand_result
5054 parse_shifter_operand_group_reloc (char **str, int i)
5056 /* Determine if we have the sequence of characters #: or just :
5057 coming next. If we do, then we check for a group relocation.
5058 If we don't, punt the whole lot to parse_shifter_operand. */
5060 if (((*str)[0] == '#' && (*str)[1] == ':')
5061 || (*str)[0] == ':')
5063 struct group_reloc_table_entry *entry;
5065 if ((*str)[0] == '#')
5070 /* Try to parse a group relocation. Anything else is an error. */
5071 if (find_group_reloc_table_entry (str, &entry) == FAIL)
5073 inst.error = _("unknown group relocation");
5074 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
5077 /* We now have the group relocation table entry corresponding to
5078 the name in the assembler source. Next, we parse the expression. */
5079 if (my_get_expression (&inst.reloc.exp, str, GE_NO_PREFIX))
5080 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
5082 /* Record the relocation type (always the ALU variant here). */
5083 inst.reloc.type = (bfd_reloc_code_real_type) entry->alu_code;
5084 gas_assert (inst.reloc.type != 0);
5086 return PARSE_OPERAND_SUCCESS;
5089 return parse_shifter_operand (str, i) == SUCCESS
5090 ? PARSE_OPERAND_SUCCESS : PARSE_OPERAND_FAIL;
5092 /* Never reached. */
5095 /* Parse a Neon alignment expression. Information is written to
5096 inst.operands[i]. We assume the initial ':' has been skipped.
5098 align .imm = align << 8, .immisalign=1, .preind=0 */
5099 static parse_operand_result
5100 parse_neon_alignment (char **str, int i)
5105 my_get_expression (&exp, &p, GE_NO_PREFIX);
5107 if (exp.X_op != O_constant)
5109 inst.error = _("alignment must be constant");
5110 return PARSE_OPERAND_FAIL;
5113 inst.operands[i].imm = exp.X_add_number << 8;
5114 inst.operands[i].immisalign = 1;
5115 /* Alignments are not pre-indexes. */
5116 inst.operands[i].preind = 0;
5119 return PARSE_OPERAND_SUCCESS;
5122 /* Parse all forms of an ARM address expression. Information is written
5123 to inst.operands[i] and/or inst.reloc.
5125 Preindexed addressing (.preind=1):
5127 [Rn, #offset] .reg=Rn .reloc.exp=offset
5128 [Rn, +/-Rm] .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5129 [Rn, +/-Rm, shift] .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5130 .shift_kind=shift .reloc.exp=shift_imm
5132 These three may have a trailing ! which causes .writeback to be set also.
5134 Postindexed addressing (.postind=1, .writeback=1):
5136 [Rn], #offset .reg=Rn .reloc.exp=offset
5137 [Rn], +/-Rm .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5138 [Rn], +/-Rm, shift .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5139 .shift_kind=shift .reloc.exp=shift_imm
5141 Unindexed addressing (.preind=0, .postind=0):
5143 [Rn], {option} .reg=Rn .imm=option .immisreg=0
5147 [Rn]{!} shorthand for [Rn,#0]{!}
5148 =immediate .isreg=0 .reloc.exp=immediate
5149 label .reg=PC .reloc.pc_rel=1 .reloc.exp=label
5151 It is the caller's responsibility to check for addressing modes not
5152 supported by the instruction, and to set inst.reloc.type. */
5154 static parse_operand_result
5155 parse_address_main (char **str, int i, int group_relocations,
5156 group_reloc_type group_type)
5161 if (skip_past_char (&p, '[') == FAIL)
5163 if (skip_past_char (&p, '=') == FAIL)
5165 /* Bare address - translate to PC-relative offset. */
5166 inst.reloc.pc_rel = 1;
5167 inst.operands[i].reg = REG_PC;
5168 inst.operands[i].isreg = 1;
5169 inst.operands[i].preind = 1;
5171 /* Otherwise a load-constant pseudo op, no special treatment needed here. */
5173 if (my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX))
5174 return PARSE_OPERAND_FAIL;
5177 return PARSE_OPERAND_SUCCESS;
5180 /* PR gas/14887: Allow for whitespace after the opening bracket. */
5181 skip_whitespace (p);
5183 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL)
5185 inst.error = _(reg_expected_msgs[REG_TYPE_RN]);
5186 return PARSE_OPERAND_FAIL;
5188 inst.operands[i].reg = reg;
5189 inst.operands[i].isreg = 1;
5191 if (skip_past_comma (&p) == SUCCESS)
5193 inst.operands[i].preind = 1;
5196 else if (*p == '-') p++, inst.operands[i].negative = 1;
5198 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) != FAIL)
5200 inst.operands[i].imm = reg;
5201 inst.operands[i].immisreg = 1;
5203 if (skip_past_comma (&p) == SUCCESS)
5204 if (parse_shift (&p, i, SHIFT_IMMEDIATE) == FAIL)
5205 return PARSE_OPERAND_FAIL;
5207 else if (skip_past_char (&p, ':') == SUCCESS)
5209 /* FIXME: '@' should be used here, but it's filtered out by generic
5210 code before we get to see it here. This may be subject to
5212 parse_operand_result result = parse_neon_alignment (&p, i);
5214 if (result != PARSE_OPERAND_SUCCESS)
5219 if (inst.operands[i].negative)
5221 inst.operands[i].negative = 0;
5225 if (group_relocations
5226 && ((*p == '#' && *(p + 1) == ':') || *p == ':'))
5228 struct group_reloc_table_entry *entry;
5230 /* Skip over the #: or : sequence. */
5236 /* Try to parse a group relocation. Anything else is an
5238 if (find_group_reloc_table_entry (&p, &entry) == FAIL)
5240 inst.error = _("unknown group relocation");
5241 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
5244 /* We now have the group relocation table entry corresponding to
5245 the name in the assembler source. Next, we parse the
5247 if (my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX))
5248 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
5250 /* Record the relocation type. */
5254 inst.reloc.type = (bfd_reloc_code_real_type) entry->ldr_code;
5258 inst.reloc.type = (bfd_reloc_code_real_type) entry->ldrs_code;
5262 inst.reloc.type = (bfd_reloc_code_real_type) entry->ldc_code;
5269 if (inst.reloc.type == 0)
5271 inst.error = _("this group relocation is not allowed on this instruction");
5272 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
5278 if (my_get_expression (&inst.reloc.exp, &p, GE_IMM_PREFIX))
5279 return PARSE_OPERAND_FAIL;
5280 /* If the offset is 0, find out if it's a +0 or -0. */
5281 if (inst.reloc.exp.X_op == O_constant
5282 && inst.reloc.exp.X_add_number == 0)
5284 skip_whitespace (q);
5288 skip_whitespace (q);
5291 inst.operands[i].negative = 1;
5296 else if (skip_past_char (&p, ':') == SUCCESS)
5298 /* FIXME: '@' should be used here, but it's filtered out by generic code
5299 before we get to see it here. This may be subject to change. */
5300 parse_operand_result result = parse_neon_alignment (&p, i);
5302 if (result != PARSE_OPERAND_SUCCESS)
5306 if (skip_past_char (&p, ']') == FAIL)
5308 inst.error = _("']' expected");
5309 return PARSE_OPERAND_FAIL;
5312 if (skip_past_char (&p, '!') == SUCCESS)
5313 inst.operands[i].writeback = 1;
5315 else if (skip_past_comma (&p) == SUCCESS)
5317 if (skip_past_char (&p, '{') == SUCCESS)
5319 /* [Rn], {expr} - unindexed, with option */
5320 if (parse_immediate (&p, &inst.operands[i].imm,
5321 0, 255, TRUE) == FAIL)
5322 return PARSE_OPERAND_FAIL;
5324 if (skip_past_char (&p, '}') == FAIL)
5326 inst.error = _("'}' expected at end of 'option' field");
5327 return PARSE_OPERAND_FAIL;
5329 if (inst.operands[i].preind)
5331 inst.error = _("cannot combine index with option");
5332 return PARSE_OPERAND_FAIL;
5335 return PARSE_OPERAND_SUCCESS;
5339 inst.operands[i].postind = 1;
5340 inst.operands[i].writeback = 1;
5342 if (inst.operands[i].preind)
5344 inst.error = _("cannot combine pre- and post-indexing");
5345 return PARSE_OPERAND_FAIL;
5349 else if (*p == '-') p++, inst.operands[i].negative = 1;
5351 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) != FAIL)
5353 /* We might be using the immediate for alignment already. If we
5354 are, OR the register number into the low-order bits. */
5355 if (inst.operands[i].immisalign)
5356 inst.operands[i].imm |= reg;
5358 inst.operands[i].imm = reg;
5359 inst.operands[i].immisreg = 1;
5361 if (skip_past_comma (&p) == SUCCESS)
5362 if (parse_shift (&p, i, SHIFT_IMMEDIATE) == FAIL)
5363 return PARSE_OPERAND_FAIL;
5368 if (inst.operands[i].negative)
5370 inst.operands[i].negative = 0;
5373 if (my_get_expression (&inst.reloc.exp, &p, GE_IMM_PREFIX))
5374 return PARSE_OPERAND_FAIL;
5375 /* If the offset is 0, find out if it's a +0 or -0. */
5376 if (inst.reloc.exp.X_op == O_constant
5377 && inst.reloc.exp.X_add_number == 0)
5379 skip_whitespace (q);
5383 skip_whitespace (q);
5386 inst.operands[i].negative = 1;
5392 /* If at this point neither .preind nor .postind is set, we have a
5393 bare [Rn]{!}, which is shorthand for [Rn,#0]{!}. */
5394 if (inst.operands[i].preind == 0 && inst.operands[i].postind == 0)
5396 inst.operands[i].preind = 1;
5397 inst.reloc.exp.X_op = O_constant;
5398 inst.reloc.exp.X_add_number = 0;
5401 return PARSE_OPERAND_SUCCESS;
5405 parse_address (char **str, int i)
5407 return parse_address_main (str, i, 0, GROUP_LDR) == PARSE_OPERAND_SUCCESS
5411 static parse_operand_result
5412 parse_address_group_reloc (char **str, int i, group_reloc_type type)
5414 return parse_address_main (str, i, 1, type);
5417 /* Parse an operand for a MOVW or MOVT instruction. */
5419 parse_half (char **str)
5424 skip_past_char (&p, '#');
5425 if (strncasecmp (p, ":lower16:", 9) == 0)
5426 inst.reloc.type = BFD_RELOC_ARM_MOVW;
5427 else if (strncasecmp (p, ":upper16:", 9) == 0)
5428 inst.reloc.type = BFD_RELOC_ARM_MOVT;
5430 if (inst.reloc.type != BFD_RELOC_UNUSED)
5433 skip_whitespace (p);
5436 if (my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX))
5439 if (inst.reloc.type == BFD_RELOC_UNUSED)
5441 if (inst.reloc.exp.X_op != O_constant)
5443 inst.error = _("constant expression expected");
5446 if (inst.reloc.exp.X_add_number < 0
5447 || inst.reloc.exp.X_add_number > 0xffff)
5449 inst.error = _("immediate value out of range");
5457 /* Miscellaneous. */
5459 /* Parse a PSR flag operand. The value returned is FAIL on syntax error,
5460 or a bitmask suitable to be or-ed into the ARM msr instruction. */
5462 parse_psr (char **str, bfd_boolean lhs)
5465 unsigned long psr_field;
5466 const struct asm_psr *psr;
5468 bfd_boolean is_apsr = FALSE;
5469 bfd_boolean m_profile = ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_m);
5471 /* PR gas/12698: If the user has specified -march=all then m_profile will
5472 be TRUE, but we want to ignore it in this case as we are building for any
5473 CPU type, including non-m variants. */
5474 if (selected_cpu.core == arm_arch_any.core)
5477 /* CPSR's and SPSR's can now be lowercase. This is just a convenience
5478 feature for ease of use and backwards compatibility. */
5480 if (strncasecmp (p, "SPSR", 4) == 0)
5483 goto unsupported_psr;
5485 psr_field = SPSR_BIT;
5487 else if (strncasecmp (p, "CPSR", 4) == 0)
5490 goto unsupported_psr;
5494 else if (strncasecmp (p, "APSR", 4) == 0)
5496 /* APSR[_<bits>] can be used as a synonym for CPSR[_<flags>] on ARMv7-A
5497 and ARMv7-R architecture CPUs. */
5506 while (ISALNUM (*p) || *p == '_');
5508 if (strncasecmp (start, "iapsr", 5) == 0
5509 || strncasecmp (start, "eapsr", 5) == 0
5510 || strncasecmp (start, "xpsr", 4) == 0
5511 || strncasecmp (start, "psr", 3) == 0)
5512 p = start + strcspn (start, "rR") + 1;
5514 psr = (const struct asm_psr *) hash_find_n (arm_v7m_psr_hsh, start,
5520 /* If APSR is being written, a bitfield may be specified. Note that
5521 APSR itself is handled above. */
5522 if (psr->field <= 3)
5524 psr_field = psr->field;
5530 /* M-profile MSR instructions have the mask field set to "10", except
5531 *PSR variants which modify APSR, which may use a different mask (and
5532 have been handled already). Do that by setting the PSR_f field
5534 return psr->field | (lhs ? PSR_f : 0);
5537 goto unsupported_psr;
5543 /* A suffix follows. */
5549 while (ISALNUM (*p) || *p == '_');
5553 /* APSR uses a notation for bits, rather than fields. */
5554 unsigned int nzcvq_bits = 0;
5555 unsigned int g_bit = 0;
5558 for (bit = start; bit != p; bit++)
5560 switch (TOLOWER (*bit))
5563 nzcvq_bits |= (nzcvq_bits & 0x01) ? 0x20 : 0x01;
5567 nzcvq_bits |= (nzcvq_bits & 0x02) ? 0x20 : 0x02;
5571 nzcvq_bits |= (nzcvq_bits & 0x04) ? 0x20 : 0x04;
5575 nzcvq_bits |= (nzcvq_bits & 0x08) ? 0x20 : 0x08;
5579 nzcvq_bits |= (nzcvq_bits & 0x10) ? 0x20 : 0x10;
5583 g_bit |= (g_bit & 0x1) ? 0x2 : 0x1;
5587 inst.error = _("unexpected bit specified after APSR");
5592 if (nzcvq_bits == 0x1f)
5597 if (!ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6_dsp))
5599 inst.error = _("selected processor does not "
5600 "support DSP extension");
5607 if ((nzcvq_bits & 0x20) != 0
5608 || (nzcvq_bits != 0x1f && nzcvq_bits != 0)
5609 || (g_bit & 0x2) != 0)
5611 inst.error = _("bad bitmask specified after APSR");
5617 psr = (const struct asm_psr *) hash_find_n (arm_psr_hsh, start,
5622 psr_field |= psr->field;
5628 goto error; /* Garbage after "[CS]PSR". */
5630 /* Unadorned APSR is equivalent to APSR_nzcvq/CPSR_f (for writes). This
5631 is deprecated, but allow it anyway. */
5635 as_tsktsk (_("writing to APSR without specifying a bitmask is "
5638 else if (!m_profile)
5639 /* These bits are never right for M-profile devices: don't set them
5640 (only code paths which read/write APSR reach here). */
5641 psr_field |= (PSR_c | PSR_f);
5647 inst.error = _("selected processor does not support requested special "
5648 "purpose register");
5652 inst.error = _("flag for {c}psr instruction expected");
5656 /* Parse the flags argument to CPSI[ED]. Returns FAIL on error, or a
5657 value suitable for splatting into the AIF field of the instruction. */
5660 parse_cps_flags (char **str)
5669 case '\0': case ',':
5672 case 'a': case 'A': saw_a_flag = 1; val |= 0x4; break;
5673 case 'i': case 'I': saw_a_flag = 1; val |= 0x2; break;
5674 case 'f': case 'F': saw_a_flag = 1; val |= 0x1; break;
5677 inst.error = _("unrecognized CPS flag");
5682 if (saw_a_flag == 0)
5684 inst.error = _("missing CPS flags");
5692 /* Parse an endian specifier ("BE" or "LE", case insensitive);
5693 returns 0 for big-endian, 1 for little-endian, FAIL for an error. */
5696 parse_endian_specifier (char **str)
5701 if (strncasecmp (s, "BE", 2))
5703 else if (strncasecmp (s, "LE", 2))
5707 inst.error = _("valid endian specifiers are be or le");
5711 if (ISALNUM (s[2]) || s[2] == '_')
5713 inst.error = _("valid endian specifiers are be or le");
5718 return little_endian;
5721 /* Parse a rotation specifier: ROR #0, #8, #16, #24. *val receives a
5722 value suitable for poking into the rotate field of an sxt or sxta
5723 instruction, or FAIL on error. */
5726 parse_ror (char **str)
5731 if (strncasecmp (s, "ROR", 3) == 0)
5735 inst.error = _("missing rotation field after comma");
5739 if (parse_immediate (&s, &rot, 0, 24, FALSE) == FAIL)
5744 case 0: *str = s; return 0x0;
5745 case 8: *str = s; return 0x1;
5746 case 16: *str = s; return 0x2;
5747 case 24: *str = s; return 0x3;
5750 inst.error = _("rotation can only be 0, 8, 16, or 24");
5755 /* Parse a conditional code (from conds[] below). The value returned is in the
5756 range 0 .. 14, or FAIL. */
5758 parse_cond (char **str)
5761 const struct asm_cond *c;
5763 /* Condition codes are always 2 characters, so matching up to
5764 3 characters is sufficient. */
5769 while (ISALPHA (*q) && n < 3)
5771 cond[n] = TOLOWER (*q);
5776 c = (const struct asm_cond *) hash_find_n (arm_cond_hsh, cond, n);
5779 inst.error = _("condition required");
5787 /* If the given feature available in the selected CPU, mark it as used.
5788 Returns TRUE iff feature is available. */
5790 mark_feature_used (const arm_feature_set *feature)
5792 /* Ensure the option is valid on the current architecture. */
5793 if (!ARM_CPU_HAS_FEATURE (cpu_variant, *feature))
5796 /* Add the appropriate architecture feature for the barrier option used.
5799 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used, *feature);
5801 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used, *feature);
5806 /* Parse an option for a barrier instruction. Returns the encoding for the
5809 parse_barrier (char **str)
5812 const struct asm_barrier_opt *o;
5815 while (ISALPHA (*q))
5818 o = (const struct asm_barrier_opt *) hash_find_n (arm_barrier_opt_hsh, p,
5823 if (!mark_feature_used (&o->arch))
5830 /* Parse the operands of a table branch instruction. Similar to a memory
5833 parse_tb (char **str)
5838 if (skip_past_char (&p, '[') == FAIL)
5840 inst.error = _("'[' expected");
5844 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL)
5846 inst.error = _(reg_expected_msgs[REG_TYPE_RN]);
5849 inst.operands[0].reg = reg;
5851 if (skip_past_comma (&p) == FAIL)
5853 inst.error = _("',' expected");
5857 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL)
5859 inst.error = _(reg_expected_msgs[REG_TYPE_RN]);
5862 inst.operands[0].imm = reg;
5864 if (skip_past_comma (&p) == SUCCESS)
5866 if (parse_shift (&p, 0, SHIFT_LSL_IMMEDIATE) == FAIL)
5868 if (inst.reloc.exp.X_add_number != 1)
5870 inst.error = _("invalid shift");
5873 inst.operands[0].shifted = 1;
5876 if (skip_past_char (&p, ']') == FAIL)
5878 inst.error = _("']' expected");
5885 /* Parse the operands of a Neon VMOV instruction. See do_neon_mov for more
5886 information on the types the operands can take and how they are encoded.
5887 Up to four operands may be read; this function handles setting the
5888 ".present" field for each read operand itself.
5889 Updates STR and WHICH_OPERAND if parsing is successful and returns SUCCESS,
5890 else returns FAIL. */
5893 parse_neon_mov (char **str, int *which_operand)
5895 int i = *which_operand, val;
5896 enum arm_reg_type rtype;
5898 struct neon_type_el optype;
5900 if ((val = parse_scalar (&ptr, 8, &optype)) != FAIL)
5902 /* Case 4: VMOV<c><q>.<size> <Dn[x]>, <Rd>. */
5903 inst.operands[i].reg = val;
5904 inst.operands[i].isscalar = 1;
5905 inst.operands[i].vectype = optype;
5906 inst.operands[i++].present = 1;
5908 if (skip_past_comma (&ptr) == FAIL)
5911 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
5914 inst.operands[i].reg = val;
5915 inst.operands[i].isreg = 1;
5916 inst.operands[i].present = 1;
5918 else if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_NSDQ, &rtype, &optype))
5921 /* Cases 0, 1, 2, 3, 5 (D only). */
5922 if (skip_past_comma (&ptr) == FAIL)
5925 inst.operands[i].reg = val;
5926 inst.operands[i].isreg = 1;
5927 inst.operands[i].isquad = (rtype == REG_TYPE_NQ);
5928 inst.operands[i].issingle = (rtype == REG_TYPE_VFS);
5929 inst.operands[i].isvec = 1;
5930 inst.operands[i].vectype = optype;
5931 inst.operands[i++].present = 1;
5933 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL)
5935 /* Case 5: VMOV<c><q> <Dm>, <Rd>, <Rn>.
5936 Case 13: VMOV <Sd>, <Rm> */
5937 inst.operands[i].reg = val;
5938 inst.operands[i].isreg = 1;
5939 inst.operands[i].present = 1;
5941 if (rtype == REG_TYPE_NQ)
5943 first_error (_("can't use Neon quad register here"));
5946 else if (rtype != REG_TYPE_VFS)
5949 if (skip_past_comma (&ptr) == FAIL)
5951 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
5953 inst.operands[i].reg = val;
5954 inst.operands[i].isreg = 1;
5955 inst.operands[i].present = 1;
5958 else if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_NSDQ, &rtype,
5961 /* Case 0: VMOV<c><q> <Qd>, <Qm>
5962 Case 1: VMOV<c><q> <Dd>, <Dm>
5963 Case 8: VMOV.F32 <Sd>, <Sm>
5964 Case 15: VMOV <Sd>, <Se>, <Rn>, <Rm> */
5966 inst.operands[i].reg = val;
5967 inst.operands[i].isreg = 1;
5968 inst.operands[i].isquad = (rtype == REG_TYPE_NQ);
5969 inst.operands[i].issingle = (rtype == REG_TYPE_VFS);
5970 inst.operands[i].isvec = 1;
5971 inst.operands[i].vectype = optype;
5972 inst.operands[i].present = 1;
5974 if (skip_past_comma (&ptr) == SUCCESS)
5979 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
5982 inst.operands[i].reg = val;
5983 inst.operands[i].isreg = 1;
5984 inst.operands[i++].present = 1;
5986 if (skip_past_comma (&ptr) == FAIL)
5989 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
5992 inst.operands[i].reg = val;
5993 inst.operands[i].isreg = 1;
5994 inst.operands[i].present = 1;
5997 else if (parse_qfloat_immediate (&ptr, &inst.operands[i].imm) == SUCCESS)
5998 /* Case 2: VMOV<c><q>.<dt> <Qd>, #<float-imm>
5999 Case 3: VMOV<c><q>.<dt> <Dd>, #<float-imm>
6000 Case 10: VMOV.F32 <Sd>, #<imm>
6001 Case 11: VMOV.F64 <Dd>, #<imm> */
6002 inst.operands[i].immisfloat = 1;
6003 else if (parse_big_immediate (&ptr, i) == SUCCESS)
6004 /* Case 2: VMOV<c><q>.<dt> <Qd>, #<imm>
6005 Case 3: VMOV<c><q>.<dt> <Dd>, #<imm> */
6009 first_error (_("expected <Rm> or <Dm> or <Qm> operand"));
6013 else if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL)
6016 inst.operands[i].reg = val;
6017 inst.operands[i].isreg = 1;
6018 inst.operands[i++].present = 1;
6020 if (skip_past_comma (&ptr) == FAIL)
6023 if ((val = parse_scalar (&ptr, 8, &optype)) != FAIL)
6025 /* Case 6: VMOV<c><q>.<dt> <Rd>, <Dn[x]> */
6026 inst.operands[i].reg = val;
6027 inst.operands[i].isscalar = 1;
6028 inst.operands[i].present = 1;
6029 inst.operands[i].vectype = optype;
6031 else if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL)
6033 /* Case 7: VMOV<c><q> <Rd>, <Rn>, <Dm> */
6034 inst.operands[i].reg = val;
6035 inst.operands[i].isreg = 1;
6036 inst.operands[i++].present = 1;
6038 if (skip_past_comma (&ptr) == FAIL)
6041 if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_VFSD, &rtype, &optype))
6044 first_error (_(reg_expected_msgs[REG_TYPE_VFSD]));
6048 inst.operands[i].reg = val;
6049 inst.operands[i].isreg = 1;
6050 inst.operands[i].isvec = 1;
6051 inst.operands[i].issingle = (rtype == REG_TYPE_VFS);
6052 inst.operands[i].vectype = optype;
6053 inst.operands[i].present = 1;
6055 if (rtype == REG_TYPE_VFS)
6059 if (skip_past_comma (&ptr) == FAIL)
6061 if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_VFS, NULL,
6064 first_error (_(reg_expected_msgs[REG_TYPE_VFS]));
6067 inst.operands[i].reg = val;
6068 inst.operands[i].isreg = 1;
6069 inst.operands[i].isvec = 1;
6070 inst.operands[i].issingle = 1;
6071 inst.operands[i].vectype = optype;
6072 inst.operands[i].present = 1;
6075 else if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_VFS, NULL, &optype))
6079 inst.operands[i].reg = val;
6080 inst.operands[i].isreg = 1;
6081 inst.operands[i].isvec = 1;
6082 inst.operands[i].issingle = 1;
6083 inst.operands[i].vectype = optype;
6084 inst.operands[i].present = 1;
6089 first_error (_("parse error"));
6093 /* Successfully parsed the operands. Update args. */
6099 first_error (_("expected comma"));
6103 first_error (_(reg_expected_msgs[REG_TYPE_RN]));
6107 /* Use this macro when the operand constraints are different
6108 for ARM and THUMB (e.g. ldrd). */
6109 #define MIX_ARM_THUMB_OPERANDS(arm_operand, thumb_operand) \
6110 ((arm_operand) | ((thumb_operand) << 16))
6112 /* Matcher codes for parse_operands. */
6113 enum operand_parse_code
6115 OP_stop, /* end of line */
6117 OP_RR, /* ARM register */
6118 OP_RRnpc, /* ARM register, not r15 */
6119 OP_RRnpcsp, /* ARM register, neither r15 nor r13 (a.k.a. 'BadReg') */
6120 OP_RRnpcb, /* ARM register, not r15, in square brackets */
6121 OP_RRnpctw, /* ARM register, not r15 in Thumb-state or with writeback,
6122 optional trailing ! */
6123 OP_RRw, /* ARM register, not r15, optional trailing ! */
6124 OP_RCP, /* Coprocessor number */
6125 OP_RCN, /* Coprocessor register */
6126 OP_RF, /* FPA register */
6127 OP_RVS, /* VFP single precision register */
6128 OP_RVD, /* VFP double precision register (0..15) */
6129 OP_RND, /* Neon double precision register (0..31) */
6130 OP_RNQ, /* Neon quad precision register */
6131 OP_RVSD, /* VFP single or double precision register */
6132 OP_RNDQ, /* Neon double or quad precision register */
6133 OP_RNSDQ, /* Neon single, double or quad precision register */
6134 OP_RNSC, /* Neon scalar D[X] */
6135 OP_RVC, /* VFP control register */
6136 OP_RMF, /* Maverick F register */
6137 OP_RMD, /* Maverick D register */
6138 OP_RMFX, /* Maverick FX register */
6139 OP_RMDX, /* Maverick DX register */
6140 OP_RMAX, /* Maverick AX register */
6141 OP_RMDS, /* Maverick DSPSC register */
6142 OP_RIWR, /* iWMMXt wR register */
6143 OP_RIWC, /* iWMMXt wC register */
6144 OP_RIWG, /* iWMMXt wCG register */
6145 OP_RXA, /* XScale accumulator register */
6147 OP_REGLST, /* ARM register list */
6148 OP_VRSLST, /* VFP single-precision register list */
6149 OP_VRDLST, /* VFP double-precision register list */
6150 OP_VRSDLST, /* VFP single or double-precision register list (& quad) */
6151 OP_NRDLST, /* Neon double-precision register list (d0-d31, qN aliases) */
6152 OP_NSTRLST, /* Neon element/structure list */
6154 OP_RNDQ_I0, /* Neon D or Q reg, or immediate zero. */
6155 OP_RVSD_I0, /* VFP S or D reg, or immediate zero. */
6156 OP_RR_RNSC, /* ARM reg or Neon scalar. */
6157 OP_RNSDQ_RNSC, /* Vector S, D or Q reg, or Neon scalar. */
6158 OP_RNDQ_RNSC, /* Neon D or Q reg, or Neon scalar. */
6159 OP_RND_RNSC, /* Neon D reg, or Neon scalar. */
6160 OP_VMOV, /* Neon VMOV operands. */
6161 OP_RNDQ_Ibig, /* Neon D or Q reg, or big immediate for logic and VMVN. */
6162 OP_RNDQ_I63b, /* Neon D or Q reg, or immediate for shift. */
6163 OP_RIWR_I32z, /* iWMMXt wR register, or immediate 0 .. 32 for iWMMXt2. */
6165 OP_I0, /* immediate zero */
6166 OP_I7, /* immediate value 0 .. 7 */
6167 OP_I15, /* 0 .. 15 */
6168 OP_I16, /* 1 .. 16 */
6169 OP_I16z, /* 0 .. 16 */
6170 OP_I31, /* 0 .. 31 */
6171 OP_I31w, /* 0 .. 31, optional trailing ! */
6172 OP_I32, /* 1 .. 32 */
6173 OP_I32z, /* 0 .. 32 */
6174 OP_I63, /* 0 .. 63 */
6175 OP_I63s, /* -64 .. 63 */
6176 OP_I64, /* 1 .. 64 */
6177 OP_I64z, /* 0 .. 64 */
6178 OP_I255, /* 0 .. 255 */
6180 OP_I4b, /* immediate, prefix optional, 1 .. 4 */
6181 OP_I7b, /* 0 .. 7 */
6182 OP_I15b, /* 0 .. 15 */
6183 OP_I31b, /* 0 .. 31 */
6185 OP_SH, /* shifter operand */
6186 OP_SHG, /* shifter operand with possible group relocation */
6187 OP_ADDR, /* Memory address expression (any mode) */
6188 OP_ADDRGLDR, /* Mem addr expr (any mode) with possible LDR group reloc */
6189 OP_ADDRGLDRS, /* Mem addr expr (any mode) with possible LDRS group reloc */
6190 OP_ADDRGLDC, /* Mem addr expr (any mode) with possible LDC group reloc */
6191 OP_EXP, /* arbitrary expression */
6192 OP_EXPi, /* same, with optional immediate prefix */
6193 OP_EXPr, /* same, with optional relocation suffix */
6194 OP_HALF, /* 0 .. 65535 or low/high reloc. */
6196 OP_CPSF, /* CPS flags */
6197 OP_ENDI, /* Endianness specifier */
6198 OP_wPSR, /* CPSR/SPSR/APSR mask for msr (writing). */
6199 OP_rPSR, /* CPSR/SPSR/APSR mask for msr (reading). */
6200 OP_COND, /* conditional code */
6201 OP_TB, /* Table branch. */
6203 OP_APSR_RR, /* ARM register or "APSR_nzcv". */
6205 OP_RRnpc_I0, /* ARM register or literal 0 */
6206 OP_RR_EXr, /* ARM register or expression with opt. reloc suff. */
6207 OP_RR_EXi, /* ARM register or expression with imm prefix */
6208 OP_RF_IF, /* FPA register or immediate */
6209 OP_RIWR_RIWC, /* iWMMXt R or C reg */
6210 OP_RIWC_RIWG, /* iWMMXt wC or wCG reg */
6212 /* Optional operands. */
6213 OP_oI7b, /* immediate, prefix optional, 0 .. 7 */
6214 OP_oI31b, /* 0 .. 31 */
6215 OP_oI32b, /* 1 .. 32 */
6216 OP_oI32z, /* 0 .. 32 */
6217 OP_oIffffb, /* 0 .. 65535 */
6218 OP_oI255c, /* curly-brace enclosed, 0 .. 255 */
6220 OP_oRR, /* ARM register */
6221 OP_oRRnpc, /* ARM register, not the PC */
6222 OP_oRRnpcsp, /* ARM register, neither the PC nor the SP (a.k.a. BadReg) */
6223 OP_oRRw, /* ARM register, not r15, optional trailing ! */
6224 OP_oRND, /* Optional Neon double precision register */
6225 OP_oRNQ, /* Optional Neon quad precision register */
6226 OP_oRNDQ, /* Optional Neon double or quad precision register */
6227 OP_oRNSDQ, /* Optional single, double or quad precision vector register */
6228 OP_oSHll, /* LSL immediate */
6229 OP_oSHar, /* ASR immediate */
6230 OP_oSHllar, /* LSL or ASR immediate */
6231 OP_oROR, /* ROR 0/8/16/24 */
6232 OP_oBARRIER_I15, /* Option argument for a barrier instruction. */
6234 /* Some pre-defined mixed (ARM/THUMB) operands. */
6235 OP_RR_npcsp = MIX_ARM_THUMB_OPERANDS (OP_RR, OP_RRnpcsp),
6236 OP_RRnpc_npcsp = MIX_ARM_THUMB_OPERANDS (OP_RRnpc, OP_RRnpcsp),
6237 OP_oRRnpc_npcsp = MIX_ARM_THUMB_OPERANDS (OP_oRRnpc, OP_oRRnpcsp),
6239 OP_FIRST_OPTIONAL = OP_oI7b
6242 /* Generic instruction operand parser. This does no encoding and no
6243 semantic validation; it merely squirrels values away in the inst
6244 structure. Returns SUCCESS or FAIL depending on whether the
6245 specified grammar matched. */
6247 parse_operands (char *str, const unsigned int *pattern, bfd_boolean thumb)
6249 unsigned const int *upat = pattern;
6250 char *backtrack_pos = 0;
6251 const char *backtrack_error = 0;
6252 int i, val = 0, backtrack_index = 0;
6253 enum arm_reg_type rtype;
6254 parse_operand_result result;
6255 unsigned int op_parse_code;
6257 #define po_char_or_fail(chr) \
6260 if (skip_past_char (&str, chr) == FAIL) \
6265 #define po_reg_or_fail(regtype) \
6268 val = arm_typed_reg_parse (& str, regtype, & rtype, \
6269 & inst.operands[i].vectype); \
6272 first_error (_(reg_expected_msgs[regtype])); \
6275 inst.operands[i].reg = val; \
6276 inst.operands[i].isreg = 1; \
6277 inst.operands[i].isquad = (rtype == REG_TYPE_NQ); \
6278 inst.operands[i].issingle = (rtype == REG_TYPE_VFS); \
6279 inst.operands[i].isvec = (rtype == REG_TYPE_VFS \
6280 || rtype == REG_TYPE_VFD \
6281 || rtype == REG_TYPE_NQ); \
6285 #define po_reg_or_goto(regtype, label) \
6288 val = arm_typed_reg_parse (& str, regtype, & rtype, \
6289 & inst.operands[i].vectype); \
6293 inst.operands[i].reg = val; \
6294 inst.operands[i].isreg = 1; \
6295 inst.operands[i].isquad = (rtype == REG_TYPE_NQ); \
6296 inst.operands[i].issingle = (rtype == REG_TYPE_VFS); \
6297 inst.operands[i].isvec = (rtype == REG_TYPE_VFS \
6298 || rtype == REG_TYPE_VFD \
6299 || rtype == REG_TYPE_NQ); \
6303 #define po_imm_or_fail(min, max, popt) \
6306 if (parse_immediate (&str, &val, min, max, popt) == FAIL) \
6308 inst.operands[i].imm = val; \
6312 #define po_scalar_or_goto(elsz, label) \
6315 val = parse_scalar (& str, elsz, & inst.operands[i].vectype); \
6318 inst.operands[i].reg = val; \
6319 inst.operands[i].isscalar = 1; \
6323 #define po_misc_or_fail(expr) \
6331 #define po_misc_or_fail_no_backtrack(expr) \
6335 if (result == PARSE_OPERAND_FAIL_NO_BACKTRACK) \
6336 backtrack_pos = 0; \
6337 if (result != PARSE_OPERAND_SUCCESS) \
6342 #define po_barrier_or_imm(str) \
6345 val = parse_barrier (&str); \
6346 if (val == FAIL && ! ISALPHA (*str)) \
6349 /* ISB can only take SY as an option. */ \
6350 || ((inst.instruction & 0xf0) == 0x60 \
6353 inst.error = _("invalid barrier type"); \
6354 backtrack_pos = 0; \
6360 skip_whitespace (str);
6362 for (i = 0; upat[i] != OP_stop; i++)
6364 op_parse_code = upat[i];
6365 if (op_parse_code >= 1<<16)
6366 op_parse_code = thumb ? (op_parse_code >> 16)
6367 : (op_parse_code & ((1<<16)-1));
6369 if (op_parse_code >= OP_FIRST_OPTIONAL)
6371 /* Remember where we are in case we need to backtrack. */
6372 gas_assert (!backtrack_pos);
6373 backtrack_pos = str;
6374 backtrack_error = inst.error;
6375 backtrack_index = i;
6378 if (i > 0 && (i > 1 || inst.operands[0].present))
6379 po_char_or_fail (',');
6381 switch (op_parse_code)
6389 case OP_RR: po_reg_or_fail (REG_TYPE_RN); break;
6390 case OP_RCP: po_reg_or_fail (REG_TYPE_CP); break;
6391 case OP_RCN: po_reg_or_fail (REG_TYPE_CN); break;
6392 case OP_RF: po_reg_or_fail (REG_TYPE_FN); break;
6393 case OP_RVS: po_reg_or_fail (REG_TYPE_VFS); break;
6394 case OP_RVD: po_reg_or_fail (REG_TYPE_VFD); break;
6396 case OP_RND: po_reg_or_fail (REG_TYPE_VFD); break;
6398 po_reg_or_goto (REG_TYPE_VFC, coproc_reg);
6400 /* Also accept generic coprocessor regs for unknown registers. */
6402 po_reg_or_fail (REG_TYPE_CN);
6404 case OP_RMF: po_reg_or_fail (REG_TYPE_MVF); break;
6405 case OP_RMD: po_reg_or_fail (REG_TYPE_MVD); break;
6406 case OP_RMFX: po_reg_or_fail (REG_TYPE_MVFX); break;
6407 case OP_RMDX: po_reg_or_fail (REG_TYPE_MVDX); break;
6408 case OP_RMAX: po_reg_or_fail (REG_TYPE_MVAX); break;
6409 case OP_RMDS: po_reg_or_fail (REG_TYPE_DSPSC); break;
6410 case OP_RIWR: po_reg_or_fail (REG_TYPE_MMXWR); break;
6411 case OP_RIWC: po_reg_or_fail (REG_TYPE_MMXWC); break;
6412 case OP_RIWG: po_reg_or_fail (REG_TYPE_MMXWCG); break;
6413 case OP_RXA: po_reg_or_fail (REG_TYPE_XSCALE); break;
6415 case OP_RNQ: po_reg_or_fail (REG_TYPE_NQ); break;
6417 case OP_RNDQ: po_reg_or_fail (REG_TYPE_NDQ); break;
6418 case OP_RVSD: po_reg_or_fail (REG_TYPE_VFSD); break;
6420 case OP_RNSDQ: po_reg_or_fail (REG_TYPE_NSDQ); break;
6422 /* Neon scalar. Using an element size of 8 means that some invalid
6423 scalars are accepted here, so deal with those in later code. */
6424 case OP_RNSC: po_scalar_or_goto (8, failure); break;
6428 po_reg_or_goto (REG_TYPE_NDQ, try_imm0);
6431 po_imm_or_fail (0, 0, TRUE);
6436 po_reg_or_goto (REG_TYPE_VFSD, try_imm0);
6441 po_scalar_or_goto (8, try_rr);
6444 po_reg_or_fail (REG_TYPE_RN);
6450 po_scalar_or_goto (8, try_nsdq);
6453 po_reg_or_fail (REG_TYPE_NSDQ);
6459 po_scalar_or_goto (8, try_ndq);
6462 po_reg_or_fail (REG_TYPE_NDQ);
6468 po_scalar_or_goto (8, try_vfd);
6471 po_reg_or_fail (REG_TYPE_VFD);
6476 /* WARNING: parse_neon_mov can move the operand counter, i. If we're
6477 not careful then bad things might happen. */
6478 po_misc_or_fail (parse_neon_mov (&str, &i) == FAIL);
6483 po_reg_or_goto (REG_TYPE_NDQ, try_immbig);
6486 /* There's a possibility of getting a 64-bit immediate here, so
6487 we need special handling. */
6488 if (parse_big_immediate (&str, i) == FAIL)
6490 inst.error = _("immediate value is out of range");
6498 po_reg_or_goto (REG_TYPE_NDQ, try_shimm);
6501 po_imm_or_fail (0, 63, TRUE);
6506 po_char_or_fail ('[');
6507 po_reg_or_fail (REG_TYPE_RN);
6508 po_char_or_fail (']');
6514 po_reg_or_fail (REG_TYPE_RN);
6515 if (skip_past_char (&str, '!') == SUCCESS)
6516 inst.operands[i].writeback = 1;
6520 case OP_I7: po_imm_or_fail ( 0, 7, FALSE); break;
6521 case OP_I15: po_imm_or_fail ( 0, 15, FALSE); break;
6522 case OP_I16: po_imm_or_fail ( 1, 16, FALSE); break;
6523 case OP_I16z: po_imm_or_fail ( 0, 16, FALSE); break;
6524 case OP_I31: po_imm_or_fail ( 0, 31, FALSE); break;
6525 case OP_I32: po_imm_or_fail ( 1, 32, FALSE); break;
6526 case OP_I32z: po_imm_or_fail ( 0, 32, FALSE); break;
6527 case OP_I63s: po_imm_or_fail (-64, 63, FALSE); break;
6528 case OP_I63: po_imm_or_fail ( 0, 63, FALSE); break;
6529 case OP_I64: po_imm_or_fail ( 1, 64, FALSE); break;
6530 case OP_I64z: po_imm_or_fail ( 0, 64, FALSE); break;
6531 case OP_I255: po_imm_or_fail ( 0, 255, FALSE); break;
6533 case OP_I4b: po_imm_or_fail ( 1, 4, TRUE); break;
6535 case OP_I7b: po_imm_or_fail ( 0, 7, TRUE); break;
6536 case OP_I15b: po_imm_or_fail ( 0, 15, TRUE); break;
6538 case OP_I31b: po_imm_or_fail ( 0, 31, TRUE); break;
6539 case OP_oI32b: po_imm_or_fail ( 1, 32, TRUE); break;
6540 case OP_oI32z: po_imm_or_fail ( 0, 32, TRUE); break;
6541 case OP_oIffffb: po_imm_or_fail ( 0, 0xffff, TRUE); break;
6543 /* Immediate variants */
6545 po_char_or_fail ('{');
6546 po_imm_or_fail (0, 255, TRUE);
6547 po_char_or_fail ('}');
6551 /* The expression parser chokes on a trailing !, so we have
6552 to find it first and zap it. */
6555 while (*s && *s != ',')
6560 inst.operands[i].writeback = 1;
6562 po_imm_or_fail (0, 31, TRUE);
6570 po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
6575 po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
6580 po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
6582 if (inst.reloc.exp.X_op == O_symbol)
6584 val = parse_reloc (&str);
6587 inst.error = _("unrecognized relocation suffix");
6590 else if (val != BFD_RELOC_UNUSED)
6592 inst.operands[i].imm = val;
6593 inst.operands[i].hasreloc = 1;
6598 /* Operand for MOVW or MOVT. */
6600 po_misc_or_fail (parse_half (&str));
6603 /* Register or expression. */
6604 case OP_RR_EXr: po_reg_or_goto (REG_TYPE_RN, EXPr); break;
6605 case OP_RR_EXi: po_reg_or_goto (REG_TYPE_RN, EXPi); break;
6607 /* Register or immediate. */
6608 case OP_RRnpc_I0: po_reg_or_goto (REG_TYPE_RN, I0); break;
6609 I0: po_imm_or_fail (0, 0, FALSE); break;
6611 case OP_RF_IF: po_reg_or_goto (REG_TYPE_FN, IF); break;
6613 if (!is_immediate_prefix (*str))
6616 val = parse_fpa_immediate (&str);
6619 /* FPA immediates are encoded as registers 8-15.
6620 parse_fpa_immediate has already applied the offset. */
6621 inst.operands[i].reg = val;
6622 inst.operands[i].isreg = 1;
6625 case OP_RIWR_I32z: po_reg_or_goto (REG_TYPE_MMXWR, I32z); break;
6626 I32z: po_imm_or_fail (0, 32, FALSE); break;
6628 /* Two kinds of register. */
6631 struct reg_entry *rege = arm_reg_parse_multi (&str);
6633 || (rege->type != REG_TYPE_MMXWR
6634 && rege->type != REG_TYPE_MMXWC
6635 && rege->type != REG_TYPE_MMXWCG))
6637 inst.error = _("iWMMXt data or control register expected");
6640 inst.operands[i].reg = rege->number;
6641 inst.operands[i].isreg = (rege->type == REG_TYPE_MMXWR);
6647 struct reg_entry *rege = arm_reg_parse_multi (&str);
6649 || (rege->type != REG_TYPE_MMXWC
6650 && rege->type != REG_TYPE_MMXWCG))
6652 inst.error = _("iWMMXt control register expected");
6655 inst.operands[i].reg = rege->number;
6656 inst.operands[i].isreg = 1;
6661 case OP_CPSF: val = parse_cps_flags (&str); break;
6662 case OP_ENDI: val = parse_endian_specifier (&str); break;
6663 case OP_oROR: val = parse_ror (&str); break;
6664 case OP_COND: val = parse_cond (&str); break;
6665 case OP_oBARRIER_I15:
6666 po_barrier_or_imm (str); break;
6668 if (parse_immediate (&str, &val, 0, 15, TRUE) == FAIL)
6674 po_reg_or_goto (REG_TYPE_RNB, try_psr);
6675 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_virt))
6677 inst.error = _("Banked registers are not available with this "
6683 val = parse_psr (&str, op_parse_code == OP_wPSR);
6687 po_reg_or_goto (REG_TYPE_RN, try_apsr);
6690 /* Parse "APSR_nvzc" operand (for FMSTAT-equivalent MRS
6692 if (strncasecmp (str, "APSR_", 5) == 0)
6699 case 'c': found = (found & 1) ? 16 : found | 1; break;
6700 case 'n': found = (found & 2) ? 16 : found | 2; break;
6701 case 'z': found = (found & 4) ? 16 : found | 4; break;
6702 case 'v': found = (found & 8) ? 16 : found | 8; break;
6703 default: found = 16;
6707 inst.operands[i].isvec = 1;
6708 /* APSR_nzcv is encoded in instructions as if it were the REG_PC. */
6709 inst.operands[i].reg = REG_PC;
6716 po_misc_or_fail (parse_tb (&str));
6719 /* Register lists. */
6721 val = parse_reg_list (&str);
6724 inst.operands[1].writeback = 1;
6730 val = parse_vfp_reg_list (&str, &inst.operands[i].reg, REGLIST_VFP_S);
6734 val = parse_vfp_reg_list (&str, &inst.operands[i].reg, REGLIST_VFP_D);
6738 /* Allow Q registers too. */
6739 val = parse_vfp_reg_list (&str, &inst.operands[i].reg,
6744 val = parse_vfp_reg_list (&str, &inst.operands[i].reg,
6746 inst.operands[i].issingle = 1;
6751 val = parse_vfp_reg_list (&str, &inst.operands[i].reg,
6756 val = parse_neon_el_struct_list (&str, &inst.operands[i].reg,
6757 &inst.operands[i].vectype);
6760 /* Addressing modes */
6762 po_misc_or_fail (parse_address (&str, i));
6766 po_misc_or_fail_no_backtrack (
6767 parse_address_group_reloc (&str, i, GROUP_LDR));
6771 po_misc_or_fail_no_backtrack (
6772 parse_address_group_reloc (&str, i, GROUP_LDRS));
6776 po_misc_or_fail_no_backtrack (
6777 parse_address_group_reloc (&str, i, GROUP_LDC));
6781 po_misc_or_fail (parse_shifter_operand (&str, i));
6785 po_misc_or_fail_no_backtrack (
6786 parse_shifter_operand_group_reloc (&str, i));
6790 po_misc_or_fail (parse_shift (&str, i, SHIFT_LSL_IMMEDIATE));
6794 po_misc_or_fail (parse_shift (&str, i, SHIFT_ASR_IMMEDIATE));
6798 po_misc_or_fail (parse_shift (&str, i, SHIFT_LSL_OR_ASR_IMMEDIATE));
6802 as_fatal (_("unhandled operand code %d"), op_parse_code);
6805 /* Various value-based sanity checks and shared operations. We
6806 do not signal immediate failures for the register constraints;
6807 this allows a syntax error to take precedence. */
6808 switch (op_parse_code)
6816 if (inst.operands[i].isreg && inst.operands[i].reg == REG_PC)
6817 inst.error = BAD_PC;
6822 if (inst.operands[i].isreg)
6824 if (inst.operands[i].reg == REG_PC)
6825 inst.error = BAD_PC;
6826 else if (inst.operands[i].reg == REG_SP)
6827 inst.error = BAD_SP;
6832 if (inst.operands[i].isreg
6833 && inst.operands[i].reg == REG_PC
6834 && (inst.operands[i].writeback || thumb))
6835 inst.error = BAD_PC;
6844 case OP_oBARRIER_I15:
6853 inst.operands[i].imm = val;
6860 /* If we get here, this operand was successfully parsed. */
6861 inst.operands[i].present = 1;
6865 inst.error = BAD_ARGS;
6870 /* The parse routine should already have set inst.error, but set a
6871 default here just in case. */
6873 inst.error = _("syntax error");
6877 /* Do not backtrack over a trailing optional argument that
6878 absorbed some text. We will only fail again, with the
6879 'garbage following instruction' error message, which is
6880 probably less helpful than the current one. */
6881 if (backtrack_index == i && backtrack_pos != str
6882 && upat[i+1] == OP_stop)
6885 inst.error = _("syntax error");
6889 /* Try again, skipping the optional argument at backtrack_pos. */
6890 str = backtrack_pos;
6891 inst.error = backtrack_error;
6892 inst.operands[backtrack_index].present = 0;
6893 i = backtrack_index;
6897 /* Check that we have parsed all the arguments. */
6898 if (*str != '\0' && !inst.error)
6899 inst.error = _("garbage following instruction");
6901 return inst.error ? FAIL : SUCCESS;
6904 #undef po_char_or_fail
6905 #undef po_reg_or_fail
6906 #undef po_reg_or_goto
6907 #undef po_imm_or_fail
6908 #undef po_scalar_or_fail
6909 #undef po_barrier_or_imm
6911 /* Shorthand macro for instruction encoding functions issuing errors. */
6912 #define constraint(expr, err) \
6923 /* Reject "bad registers" for Thumb-2 instructions. Many Thumb-2
6924 instructions are unpredictable if these registers are used. This
6925 is the BadReg predicate in ARM's Thumb-2 documentation. */
6926 #define reject_bad_reg(reg) \
6928 if (reg == REG_SP || reg == REG_PC) \
6930 inst.error = (reg == REG_SP) ? BAD_SP : BAD_PC; \
6935 /* If REG is R13 (the stack pointer), warn that its use is
6937 #define warn_deprecated_sp(reg) \
6939 if (warn_on_deprecated && reg == REG_SP) \
6940 as_warn (_("use of r13 is deprecated")); \
6943 /* Functions for operand encoding. ARM, then Thumb. */
6945 #define rotate_left(v, n) (v << n | v >> (32 - n))
6947 /* If VAL can be encoded in the immediate field of an ARM instruction,
6948 return the encoded form. Otherwise, return FAIL. */
6951 encode_arm_immediate (unsigned int val)
6955 for (i = 0; i < 32; i += 2)
6956 if ((a = rotate_left (val, i)) <= 0xff)
6957 return a | (i << 7); /* 12-bit pack: [shift-cnt,const]. */
6962 /* If VAL can be encoded in the immediate field of a Thumb32 instruction,
6963 return the encoded form. Otherwise, return FAIL. */
6965 encode_thumb32_immediate (unsigned int val)
6972 for (i = 1; i <= 24; i++)
6975 if ((val & ~(0xff << i)) == 0)
6976 return ((val >> i) & 0x7f) | ((32 - i) << 7);
6980 if (val == ((a << 16) | a))
6982 if (val == ((a << 24) | (a << 16) | (a << 8) | a))
6986 if (val == ((a << 16) | a))
6987 return 0x200 | (a >> 8);
6991 /* Encode a VFP SP or DP register number into inst.instruction. */
6994 encode_arm_vfp_reg (int reg, enum vfp_reg_pos pos)
6996 if ((pos == VFP_REG_Dd || pos == VFP_REG_Dn || pos == VFP_REG_Dm)
6999 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_d32))
7002 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
7005 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used,
7010 first_error (_("D register out of range for selected VFP version"));
7018 inst.instruction |= ((reg >> 1) << 12) | ((reg & 1) << 22);
7022 inst.instruction |= ((reg >> 1) << 16) | ((reg & 1) << 7);
7026 inst.instruction |= ((reg >> 1) << 0) | ((reg & 1) << 5);
7030 inst.instruction |= ((reg & 15) << 12) | ((reg >> 4) << 22);
7034 inst.instruction |= ((reg & 15) << 16) | ((reg >> 4) << 7);
7038 inst.instruction |= (reg & 15) | ((reg >> 4) << 5);
7046 /* Encode a <shift> in an ARM-format instruction. The immediate,
7047 if any, is handled by md_apply_fix. */
7049 encode_arm_shift (int i)
7051 if (inst.operands[i].shift_kind == SHIFT_RRX)
7052 inst.instruction |= SHIFT_ROR << 5;
7055 inst.instruction |= inst.operands[i].shift_kind << 5;
7056 if (inst.operands[i].immisreg)
7058 inst.instruction |= SHIFT_BY_REG;
7059 inst.instruction |= inst.operands[i].imm << 8;
7062 inst.reloc.type = BFD_RELOC_ARM_SHIFT_IMM;
7067 encode_arm_shifter_operand (int i)
7069 if (inst.operands[i].isreg)
7071 inst.instruction |= inst.operands[i].reg;
7072 encode_arm_shift (i);
7076 inst.instruction |= INST_IMMEDIATE;
7077 if (inst.reloc.type != BFD_RELOC_ARM_IMMEDIATE)
7078 inst.instruction |= inst.operands[i].imm;
7082 /* Subroutine of encode_arm_addr_mode_2 and encode_arm_addr_mode_3. */
7084 encode_arm_addr_mode_common (int i, bfd_boolean is_t)
7087 Generate an error if the operand is not a register. */
7088 constraint (!inst.operands[i].isreg,
7089 _("Instruction does not support =N addresses"));
7091 inst.instruction |= inst.operands[i].reg << 16;
7093 if (inst.operands[i].preind)
7097 inst.error = _("instruction does not accept preindexed addressing");
7100 inst.instruction |= PRE_INDEX;
7101 if (inst.operands[i].writeback)
7102 inst.instruction |= WRITE_BACK;
7105 else if (inst.operands[i].postind)
7107 gas_assert (inst.operands[i].writeback);
7109 inst.instruction |= WRITE_BACK;
7111 else /* unindexed - only for coprocessor */
7113 inst.error = _("instruction does not accept unindexed addressing");
7117 if (((inst.instruction & WRITE_BACK) || !(inst.instruction & PRE_INDEX))
7118 && (((inst.instruction & 0x000f0000) >> 16)
7119 == ((inst.instruction & 0x0000f000) >> 12)))
7120 as_warn ((inst.instruction & LOAD_BIT)
7121 ? _("destination register same as write-back base")
7122 : _("source register same as write-back base"));
7125 /* inst.operands[i] was set up by parse_address. Encode it into an
7126 ARM-format mode 2 load or store instruction. If is_t is true,
7127 reject forms that cannot be used with a T instruction (i.e. not
7130 encode_arm_addr_mode_2 (int i, bfd_boolean is_t)
7132 const bfd_boolean is_pc = (inst.operands[i].reg == REG_PC);
7134 encode_arm_addr_mode_common (i, is_t);
7136 if (inst.operands[i].immisreg)
7138 constraint ((inst.operands[i].imm == REG_PC
7139 || (is_pc && inst.operands[i].writeback)),
7141 inst.instruction |= INST_IMMEDIATE; /* yes, this is backwards */
7142 inst.instruction |= inst.operands[i].imm;
7143 if (!inst.operands[i].negative)
7144 inst.instruction |= INDEX_UP;
7145 if (inst.operands[i].shifted)
7147 if (inst.operands[i].shift_kind == SHIFT_RRX)
7148 inst.instruction |= SHIFT_ROR << 5;
7151 inst.instruction |= inst.operands[i].shift_kind << 5;
7152 inst.reloc.type = BFD_RELOC_ARM_SHIFT_IMM;
7156 else /* immediate offset in inst.reloc */
7158 if (is_pc && !inst.reloc.pc_rel)
7160 const bfd_boolean is_load = ((inst.instruction & LOAD_BIT) != 0);
7162 /* If is_t is TRUE, it's called from do_ldstt. ldrt/strt
7163 cannot use PC in addressing.
7164 PC cannot be used in writeback addressing, either. */
7165 constraint ((is_t || inst.operands[i].writeback),
7168 /* Use of PC in str is deprecated for ARMv7. */
7169 if (warn_on_deprecated
7171 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v7))
7172 as_warn (_("use of PC in this instruction is deprecated"));
7175 if (inst.reloc.type == BFD_RELOC_UNUSED)
7177 /* Prefer + for zero encoded value. */
7178 if (!inst.operands[i].negative)
7179 inst.instruction |= INDEX_UP;
7180 inst.reloc.type = BFD_RELOC_ARM_OFFSET_IMM;
7185 /* inst.operands[i] was set up by parse_address. Encode it into an
7186 ARM-format mode 3 load or store instruction. Reject forms that
7187 cannot be used with such instructions. If is_t is true, reject
7188 forms that cannot be used with a T instruction (i.e. not
7191 encode_arm_addr_mode_3 (int i, bfd_boolean is_t)
7193 if (inst.operands[i].immisreg && inst.operands[i].shifted)
7195 inst.error = _("instruction does not accept scaled register index");
7199 encode_arm_addr_mode_common (i, is_t);
7201 if (inst.operands[i].immisreg)
7203 constraint ((inst.operands[i].imm == REG_PC
7204 || (is_t && inst.operands[i].reg == REG_PC)),
7206 constraint (inst.operands[i].reg == REG_PC && inst.operands[i].writeback,
7208 inst.instruction |= inst.operands[i].imm;
7209 if (!inst.operands[i].negative)
7210 inst.instruction |= INDEX_UP;
7212 else /* immediate offset in inst.reloc */
7214 constraint ((inst.operands[i].reg == REG_PC && !inst.reloc.pc_rel
7215 && inst.operands[i].writeback),
7217 inst.instruction |= HWOFFSET_IMM;
7218 if (inst.reloc.type == BFD_RELOC_UNUSED)
7220 /* Prefer + for zero encoded value. */
7221 if (!inst.operands[i].negative)
7222 inst.instruction |= INDEX_UP;
7224 inst.reloc.type = BFD_RELOC_ARM_OFFSET_IMM8;
7229 /* inst.operands[i] was set up by parse_address. Encode it into an
7230 ARM-format instruction. Reject all forms which cannot be encoded
7231 into a coprocessor load/store instruction. If wb_ok is false,
7232 reject use of writeback; if unind_ok is false, reject use of
7233 unindexed addressing. If reloc_override is not 0, use it instead
7234 of BFD_ARM_CP_OFF_IMM, unless the initial relocation is a group one
7235 (in which case it is preserved). */
7238 encode_arm_cp_address (int i, int wb_ok, int unind_ok, int reloc_override)
7240 inst.instruction |= inst.operands[i].reg << 16;
7242 gas_assert (!(inst.operands[i].preind && inst.operands[i].postind));
7244 if (!inst.operands[i].preind && !inst.operands[i].postind) /* unindexed */
7246 gas_assert (!inst.operands[i].writeback);
7249 inst.error = _("instruction does not support unindexed addressing");
7252 inst.instruction |= inst.operands[i].imm;
7253 inst.instruction |= INDEX_UP;
7257 if (inst.operands[i].preind)
7258 inst.instruction |= PRE_INDEX;
7260 if (inst.operands[i].writeback)
7262 if (inst.operands[i].reg == REG_PC)
7264 inst.error = _("pc may not be used with write-back");
7269 inst.error = _("instruction does not support writeback");
7272 inst.instruction |= WRITE_BACK;
7276 inst.reloc.type = (bfd_reloc_code_real_type) reloc_override;
7277 else if ((inst.reloc.type < BFD_RELOC_ARM_ALU_PC_G0_NC
7278 || inst.reloc.type > BFD_RELOC_ARM_LDC_SB_G2)
7279 && inst.reloc.type != BFD_RELOC_ARM_LDR_PC_G0)
7282 inst.reloc.type = BFD_RELOC_ARM_T32_CP_OFF_IMM;
7284 inst.reloc.type = BFD_RELOC_ARM_CP_OFF_IMM;
7287 /* Prefer + for zero encoded value. */
7288 if (!inst.operands[i].negative)
7289 inst.instruction |= INDEX_UP;
7294 /* inst.reloc.exp describes an "=expr" load pseudo-operation.
7295 Determine whether it can be performed with a move instruction; if
7296 it can, convert inst.instruction to that move instruction and
7297 return TRUE; if it can't, convert inst.instruction to a literal-pool
7298 load and return FALSE. If this is not a valid thing to do in the
7299 current context, set inst.error and return TRUE.
7301 inst.operands[i] describes the destination register. */
7304 move_or_literal_pool (int i, bfd_boolean thumb_p, bfd_boolean mode_3)
7309 tbit = (inst.instruction > 0xffff) ? THUMB2_LOAD_BIT : THUMB_LOAD_BIT;
7313 if ((inst.instruction & tbit) == 0)
7315 inst.error = _("invalid pseudo operation");
7318 if (inst.reloc.exp.X_op != O_constant && inst.reloc.exp.X_op != O_symbol)
7320 inst.error = _("constant expression expected");
7323 if (inst.reloc.exp.X_op == O_constant)
7327 if (!unified_syntax && (inst.reloc.exp.X_add_number & ~0xFF) == 0)
7329 /* This can be done with a mov(1) instruction. */
7330 inst.instruction = T_OPCODE_MOV_I8 | (inst.operands[i].reg << 8);
7331 inst.instruction |= inst.reloc.exp.X_add_number;
7337 int value = encode_arm_immediate (inst.reloc.exp.X_add_number);
7340 /* This can be done with a mov instruction. */
7341 inst.instruction &= LITERAL_MASK;
7342 inst.instruction |= INST_IMMEDIATE | (OPCODE_MOV << DATA_OP_SHIFT);
7343 inst.instruction |= value & 0xfff;
7347 value = encode_arm_immediate (~inst.reloc.exp.X_add_number);
7350 /* This can be done with a mvn instruction. */
7351 inst.instruction &= LITERAL_MASK;
7352 inst.instruction |= INST_IMMEDIATE | (OPCODE_MVN << DATA_OP_SHIFT);
7353 inst.instruction |= value & 0xfff;
7359 if (add_to_lit_pool () == FAIL)
7361 inst.error = _("literal pool insertion failed");
7364 inst.operands[1].reg = REG_PC;
7365 inst.operands[1].isreg = 1;
7366 inst.operands[1].preind = 1;
7367 inst.reloc.pc_rel = 1;
7368 inst.reloc.type = (thumb_p
7369 ? BFD_RELOC_ARM_THUMB_OFFSET
7371 ? BFD_RELOC_ARM_HWLITERAL
7372 : BFD_RELOC_ARM_LITERAL));
7376 /* Functions for instruction encoding, sorted by sub-architecture.
7377 First some generics; their names are taken from the conventional
7378 bit positions for register arguments in ARM format instructions. */
7388 inst.instruction |= inst.operands[0].reg << 12;
7394 inst.instruction |= inst.operands[0].reg << 12;
7395 inst.instruction |= inst.operands[1].reg;
7401 inst.instruction |= inst.operands[0].reg;
7402 inst.instruction |= inst.operands[1].reg << 16;
7408 inst.instruction |= inst.operands[0].reg << 12;
7409 inst.instruction |= inst.operands[1].reg << 16;
7415 inst.instruction |= inst.operands[0].reg << 16;
7416 inst.instruction |= inst.operands[1].reg << 12;
7420 check_obsolete (const arm_feature_set *feature, const char *msg)
7422 if (ARM_CPU_IS_ANY (cpu_variant))
7424 as_warn ("%s", msg);
7427 else if (ARM_CPU_HAS_FEATURE (cpu_variant, *feature))
7439 unsigned Rn = inst.operands[2].reg;
7440 /* Enforce restrictions on SWP instruction. */
7441 if ((inst.instruction & 0x0fbfffff) == 0x01000090)
7443 constraint (Rn == inst.operands[0].reg || Rn == inst.operands[1].reg,
7444 _("Rn must not overlap other operands"));
7446 /* SWP{b} is obsolete for ARMv8-A, and deprecated for ARMv6* and ARMv7.
7448 if (!check_obsolete (&arm_ext_v8,
7449 _("swp{b} use is obsoleted for ARMv8 and later"))
7450 && warn_on_deprecated
7451 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6))
7452 as_warn (_("swp{b} use is deprecated for ARMv6 and ARMv7"));
7455 inst.instruction |= inst.operands[0].reg << 12;
7456 inst.instruction |= inst.operands[1].reg;
7457 inst.instruction |= Rn << 16;
7463 inst.instruction |= inst.operands[0].reg << 12;
7464 inst.instruction |= inst.operands[1].reg << 16;
7465 inst.instruction |= inst.operands[2].reg;
7471 constraint ((inst.operands[2].reg == REG_PC), BAD_PC);
7472 constraint (((inst.reloc.exp.X_op != O_constant
7473 && inst.reloc.exp.X_op != O_illegal)
7474 || inst.reloc.exp.X_add_number != 0),
7476 inst.instruction |= inst.operands[0].reg;
7477 inst.instruction |= inst.operands[1].reg << 12;
7478 inst.instruction |= inst.operands[2].reg << 16;
7484 inst.instruction |= inst.operands[0].imm;
7490 inst.instruction |= inst.operands[0].reg << 12;
7491 encode_arm_cp_address (1, TRUE, TRUE, 0);
7494 /* ARM instructions, in alphabetical order by function name (except
7495 that wrapper functions appear immediately after the function they
7498 /* This is a pseudo-op of the form "adr rd, label" to be converted
7499 into a relative address of the form "add rd, pc, #label-.-8". */
7504 inst.instruction |= (inst.operands[0].reg << 12); /* Rd */
7506 /* Frag hacking will turn this into a sub instruction if the offset turns
7507 out to be negative. */
7508 inst.reloc.type = BFD_RELOC_ARM_IMMEDIATE;
7509 inst.reloc.pc_rel = 1;
7510 inst.reloc.exp.X_add_number -= 8;
7513 /* This is a pseudo-op of the form "adrl rd, label" to be converted
7514 into a relative address of the form:
7515 add rd, pc, #low(label-.-8)"
7516 add rd, rd, #high(label-.-8)" */
7521 inst.instruction |= (inst.operands[0].reg << 12); /* Rd */
7523 /* Frag hacking will turn this into a sub instruction if the offset turns
7524 out to be negative. */
7525 inst.reloc.type = BFD_RELOC_ARM_ADRL_IMMEDIATE;
7526 inst.reloc.pc_rel = 1;
7527 inst.size = INSN_SIZE * 2;
7528 inst.reloc.exp.X_add_number -= 8;
7534 if (!inst.operands[1].present)
7535 inst.operands[1].reg = inst.operands[0].reg;
7536 inst.instruction |= inst.operands[0].reg << 12;
7537 inst.instruction |= inst.operands[1].reg << 16;
7538 encode_arm_shifter_operand (2);
7544 if (inst.operands[0].present)
7545 inst.instruction |= inst.operands[0].imm;
7547 inst.instruction |= 0xf;
7553 unsigned int msb = inst.operands[1].imm + inst.operands[2].imm;
7554 constraint (msb > 32, _("bit-field extends past end of register"));
7555 /* The instruction encoding stores the LSB and MSB,
7556 not the LSB and width. */
7557 inst.instruction |= inst.operands[0].reg << 12;
7558 inst.instruction |= inst.operands[1].imm << 7;
7559 inst.instruction |= (msb - 1) << 16;
7567 /* #0 in second position is alternative syntax for bfc, which is
7568 the same instruction but with REG_PC in the Rm field. */
7569 if (!inst.operands[1].isreg)
7570 inst.operands[1].reg = REG_PC;
7572 msb = inst.operands[2].imm + inst.operands[3].imm;
7573 constraint (msb > 32, _("bit-field extends past end of register"));
7574 /* The instruction encoding stores the LSB and MSB,
7575 not the LSB and width. */
7576 inst.instruction |= inst.operands[0].reg << 12;
7577 inst.instruction |= inst.operands[1].reg;
7578 inst.instruction |= inst.operands[2].imm << 7;
7579 inst.instruction |= (msb - 1) << 16;
7585 constraint (inst.operands[2].imm + inst.operands[3].imm > 32,
7586 _("bit-field extends past end of register"));
7587 inst.instruction |= inst.operands[0].reg << 12;
7588 inst.instruction |= inst.operands[1].reg;
7589 inst.instruction |= inst.operands[2].imm << 7;
7590 inst.instruction |= (inst.operands[3].imm - 1) << 16;
7593 /* ARM V5 breakpoint instruction (argument parse)
7594 BKPT <16 bit unsigned immediate>
7595 Instruction is not conditional.
7596 The bit pattern given in insns[] has the COND_ALWAYS condition,
7597 and it is an error if the caller tried to override that. */
7602 /* Top 12 of 16 bits to bits 19:8. */
7603 inst.instruction |= (inst.operands[0].imm & 0xfff0) << 4;
7605 /* Bottom 4 of 16 bits to bits 3:0. */
7606 inst.instruction |= inst.operands[0].imm & 0xf;
7610 encode_branch (int default_reloc)
7612 if (inst.operands[0].hasreloc)
7614 constraint (inst.operands[0].imm != BFD_RELOC_ARM_PLT32
7615 && inst.operands[0].imm != BFD_RELOC_ARM_TLS_CALL,
7616 _("the only valid suffixes here are '(plt)' and '(tlscall)'"));
7617 inst.reloc.type = inst.operands[0].imm == BFD_RELOC_ARM_PLT32
7618 ? BFD_RELOC_ARM_PLT32
7619 : thumb_mode ? BFD_RELOC_ARM_THM_TLS_CALL : BFD_RELOC_ARM_TLS_CALL;
7622 inst.reloc.type = (bfd_reloc_code_real_type) default_reloc;
7623 inst.reloc.pc_rel = 1;
7630 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
7631 encode_branch (BFD_RELOC_ARM_PCREL_JUMP);
7634 encode_branch (BFD_RELOC_ARM_PCREL_BRANCH);
7641 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
7643 if (inst.cond == COND_ALWAYS)
7644 encode_branch (BFD_RELOC_ARM_PCREL_CALL);
7646 encode_branch (BFD_RELOC_ARM_PCREL_JUMP);
7650 encode_branch (BFD_RELOC_ARM_PCREL_BRANCH);
7653 /* ARM V5 branch-link-exchange instruction (argument parse)
7654 BLX <target_addr> ie BLX(1)
7655 BLX{<condition>} <Rm> ie BLX(2)
7656 Unfortunately, there are two different opcodes for this mnemonic.
7657 So, the insns[].value is not used, and the code here zaps values
7658 into inst.instruction.
7659 Also, the <target_addr> can be 25 bits, hence has its own reloc. */
7664 if (inst.operands[0].isreg)
7666 /* Arg is a register; the opcode provided by insns[] is correct.
7667 It is not illegal to do "blx pc", just useless. */
7668 if (inst.operands[0].reg == REG_PC)
7669 as_tsktsk (_("use of r15 in blx in ARM mode is not really useful"));
7671 inst.instruction |= inst.operands[0].reg;
7675 /* Arg is an address; this instruction cannot be executed
7676 conditionally, and the opcode must be adjusted.
7677 We retain the BFD_RELOC_ARM_PCREL_BLX till the very end
7678 where we generate out a BFD_RELOC_ARM_PCREL_CALL instead. */
7679 constraint (inst.cond != COND_ALWAYS, BAD_COND);
7680 inst.instruction = 0xfa000000;
7681 encode_branch (BFD_RELOC_ARM_PCREL_BLX);
7688 bfd_boolean want_reloc;
7690 if (inst.operands[0].reg == REG_PC)
7691 as_tsktsk (_("use of r15 in bx in ARM mode is not really useful"));
7693 inst.instruction |= inst.operands[0].reg;
7694 /* Output R_ARM_V4BX relocations if is an EABI object that looks like
7695 it is for ARMv4t or earlier. */
7696 want_reloc = !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5);
7697 if (object_arch && !ARM_CPU_HAS_FEATURE (*object_arch, arm_ext_v5))
7701 if (EF_ARM_EABI_VERSION (meabi_flags) < EF_ARM_EABI_VER4)
7706 inst.reloc.type = BFD_RELOC_ARM_V4BX;
7710 /* ARM v5TEJ. Jump to Jazelle code. */
7715 if (inst.operands[0].reg == REG_PC)
7716 as_tsktsk (_("use of r15 in bxj is not really useful"));
7718 inst.instruction |= inst.operands[0].reg;
7721 /* Co-processor data operation:
7722 CDP{cond} <coproc>, <opcode_1>, <CRd>, <CRn>, <CRm>{, <opcode_2>}
7723 CDP2 <coproc>, <opcode_1>, <CRd>, <CRn>, <CRm>{, <opcode_2>} */
7727 inst.instruction |= inst.operands[0].reg << 8;
7728 inst.instruction |= inst.operands[1].imm << 20;
7729 inst.instruction |= inst.operands[2].reg << 12;
7730 inst.instruction |= inst.operands[3].reg << 16;
7731 inst.instruction |= inst.operands[4].reg;
7732 inst.instruction |= inst.operands[5].imm << 5;
7738 inst.instruction |= inst.operands[0].reg << 16;
7739 encode_arm_shifter_operand (1);
7742 /* Transfer between coprocessor and ARM registers.
7743 MRC{cond} <coproc>, <opcode_1>, <Rd>, <CRn>, <CRm>{, <opcode_2>}
7748 No special properties. */
7750 struct deprecated_coproc_regs_s
7757 arm_feature_set deprecated;
7758 arm_feature_set obsoleted;
7759 const char *dep_msg;
7760 const char *obs_msg;
7763 #define DEPR_ACCESS_V8 \
7764 N_("This coprocessor register access is deprecated in ARMv8")
7766 /* Table of all deprecated coprocessor registers. */
7767 static struct deprecated_coproc_regs_s deprecated_coproc_regs[] =
7769 {15, 0, 7, 10, 5, /* CP15DMB. */
7770 ARM_FEATURE (ARM_EXT_V8, 0), ARM_FEATURE (0, 0),
7771 DEPR_ACCESS_V8, NULL},
7772 {15, 0, 7, 10, 4, /* CP15DSB. */
7773 ARM_FEATURE (ARM_EXT_V8, 0), ARM_FEATURE (0, 0),
7774 DEPR_ACCESS_V8, NULL},
7775 {15, 0, 7, 5, 4, /* CP15ISB. */
7776 ARM_FEATURE (ARM_EXT_V8, 0), ARM_FEATURE (0, 0),
7777 DEPR_ACCESS_V8, NULL},
7778 {14, 6, 1, 0, 0, /* TEEHBR. */
7779 ARM_FEATURE (ARM_EXT_V8, 0), ARM_FEATURE (0, 0),
7780 DEPR_ACCESS_V8, NULL},
7781 {14, 6, 0, 0, 0, /* TEECR. */
7782 ARM_FEATURE (ARM_EXT_V8, 0), ARM_FEATURE (0, 0),
7783 DEPR_ACCESS_V8, NULL},
7786 #undef DEPR_ACCESS_V8
7788 static const size_t deprecated_coproc_reg_count =
7789 sizeof (deprecated_coproc_regs) / sizeof (deprecated_coproc_regs[0]);
7797 Rd = inst.operands[2].reg;
7800 if (inst.instruction == 0xee000010
7801 || inst.instruction == 0xfe000010)
7803 reject_bad_reg (Rd);
7806 constraint (Rd == REG_SP, BAD_SP);
7811 if (inst.instruction == 0xe000010)
7812 constraint (Rd == REG_PC, BAD_PC);
7815 for (i = 0; i < deprecated_coproc_reg_count; ++i)
7817 const struct deprecated_coproc_regs_s *r =
7818 deprecated_coproc_regs + i;
7820 if (inst.operands[0].reg == r->cp
7821 && inst.operands[1].imm == r->opc1
7822 && inst.operands[3].reg == r->crn
7823 && inst.operands[4].reg == r->crm
7824 && inst.operands[5].imm == r->opc2)
7826 if (! ARM_CPU_IS_ANY (cpu_variant)
7827 && warn_on_deprecated
7828 && ARM_CPU_HAS_FEATURE (cpu_variant, r->deprecated))
7829 as_warn ("%s", r->dep_msg);
7833 inst.instruction |= inst.operands[0].reg << 8;
7834 inst.instruction |= inst.operands[1].imm << 21;
7835 inst.instruction |= Rd << 12;
7836 inst.instruction |= inst.operands[3].reg << 16;
7837 inst.instruction |= inst.operands[4].reg;
7838 inst.instruction |= inst.operands[5].imm << 5;
7841 /* Transfer between coprocessor register and pair of ARM registers.
7842 MCRR{cond} <coproc>, <opcode>, <Rd>, <Rn>, <CRm>.
7847 Two XScale instructions are special cases of these:
7849 MAR{cond} acc0, <RdLo>, <RdHi> == MCRR{cond} p0, #0, <RdLo>, <RdHi>, c0
7850 MRA{cond} acc0, <RdLo>, <RdHi> == MRRC{cond} p0, #0, <RdLo>, <RdHi>, c0
7852 Result unpredictable if Rd or Rn is R15. */
7859 Rd = inst.operands[2].reg;
7860 Rn = inst.operands[3].reg;
7864 reject_bad_reg (Rd);
7865 reject_bad_reg (Rn);
7869 constraint (Rd == REG_PC, BAD_PC);
7870 constraint (Rn == REG_PC, BAD_PC);
7873 inst.instruction |= inst.operands[0].reg << 8;
7874 inst.instruction |= inst.operands[1].imm << 4;
7875 inst.instruction |= Rd << 12;
7876 inst.instruction |= Rn << 16;
7877 inst.instruction |= inst.operands[4].reg;
7883 inst.instruction |= inst.operands[0].imm << 6;
7884 if (inst.operands[1].present)
7886 inst.instruction |= CPSI_MMOD;
7887 inst.instruction |= inst.operands[1].imm;
7894 inst.instruction |= inst.operands[0].imm;
7900 unsigned Rd, Rn, Rm;
7902 Rd = inst.operands[0].reg;
7903 Rn = (inst.operands[1].present
7904 ? inst.operands[1].reg : Rd);
7905 Rm = inst.operands[2].reg;
7907 constraint ((Rd == REG_PC), BAD_PC);
7908 constraint ((Rn == REG_PC), BAD_PC);
7909 constraint ((Rm == REG_PC), BAD_PC);
7911 inst.instruction |= Rd << 16;
7912 inst.instruction |= Rn << 0;
7913 inst.instruction |= Rm << 8;
7919 /* There is no IT instruction in ARM mode. We
7920 process it to do the validation as if in
7921 thumb mode, just in case the code gets
7922 assembled for thumb using the unified syntax. */
7927 set_it_insn_type (IT_INSN);
7928 now_it.mask = (inst.instruction & 0xf) | 0x10;
7929 now_it.cc = inst.operands[0].imm;
7933 /* If there is only one register in the register list,
7934 then return its register number. Otherwise return -1. */
7936 only_one_reg_in_list (int range)
7938 int i = ffs (range) - 1;
7939 return (i > 15 || range != (1 << i)) ? -1 : i;
7943 encode_ldmstm(int from_push_pop_mnem)
7945 int base_reg = inst.operands[0].reg;
7946 int range = inst.operands[1].imm;
7949 inst.instruction |= base_reg << 16;
7950 inst.instruction |= range;
7952 if (inst.operands[1].writeback)
7953 inst.instruction |= LDM_TYPE_2_OR_3;
7955 if (inst.operands[0].writeback)
7957 inst.instruction |= WRITE_BACK;
7958 /* Check for unpredictable uses of writeback. */
7959 if (inst.instruction & LOAD_BIT)
7961 /* Not allowed in LDM type 2. */
7962 if ((inst.instruction & LDM_TYPE_2_OR_3)
7963 && ((range & (1 << REG_PC)) == 0))
7964 as_warn (_("writeback of base register is UNPREDICTABLE"));
7965 /* Only allowed if base reg not in list for other types. */
7966 else if (range & (1 << base_reg))
7967 as_warn (_("writeback of base register when in register list is UNPREDICTABLE"));
7971 /* Not allowed for type 2. */
7972 if (inst.instruction & LDM_TYPE_2_OR_3)
7973 as_warn (_("writeback of base register is UNPREDICTABLE"));
7974 /* Only allowed if base reg not in list, or first in list. */
7975 else if ((range & (1 << base_reg))
7976 && (range & ((1 << base_reg) - 1)))
7977 as_warn (_("if writeback register is in list, it must be the lowest reg in the list"));
7981 /* If PUSH/POP has only one register, then use the A2 encoding. */
7982 one_reg = only_one_reg_in_list (range);
7983 if (from_push_pop_mnem && one_reg >= 0)
7985 int is_push = (inst.instruction & A_PUSH_POP_OP_MASK) == A1_OPCODE_PUSH;
7987 inst.instruction &= A_COND_MASK;
7988 inst.instruction |= is_push ? A2_OPCODE_PUSH : A2_OPCODE_POP;
7989 inst.instruction |= one_reg << 12;
7996 encode_ldmstm (/*from_push_pop_mnem=*/FALSE);
7999 /* ARMv5TE load-consecutive (argument parse)
8008 constraint (inst.operands[0].reg % 2 != 0,
8009 _("first transfer register must be even"));
8010 constraint (inst.operands[1].present
8011 && inst.operands[1].reg != inst.operands[0].reg + 1,
8012 _("can only transfer two consecutive registers"));
8013 constraint (inst.operands[0].reg == REG_LR, _("r14 not allowed here"));
8014 constraint (!inst.operands[2].isreg, _("'[' expected"));
8016 if (!inst.operands[1].present)
8017 inst.operands[1].reg = inst.operands[0].reg + 1;
8019 /* encode_arm_addr_mode_3 will diagnose overlap between the base
8020 register and the first register written; we have to diagnose
8021 overlap between the base and the second register written here. */
8023 if (inst.operands[2].reg == inst.operands[1].reg
8024 && (inst.operands[2].writeback || inst.operands[2].postind))
8025 as_warn (_("base register written back, and overlaps "
8026 "second transfer register"));
8028 if (!(inst.instruction & V4_STR_BIT))
8030 /* For an index-register load, the index register must not overlap the
8031 destination (even if not write-back). */
8032 if (inst.operands[2].immisreg
8033 && ((unsigned) inst.operands[2].imm == inst.operands[0].reg
8034 || (unsigned) inst.operands[2].imm == inst.operands[1].reg))
8035 as_warn (_("index register overlaps transfer register"));
8037 inst.instruction |= inst.operands[0].reg << 12;
8038 encode_arm_addr_mode_3 (2, /*is_t=*/FALSE);
8044 constraint (!inst.operands[1].isreg || !inst.operands[1].preind
8045 || inst.operands[1].postind || inst.operands[1].writeback
8046 || inst.operands[1].immisreg || inst.operands[1].shifted
8047 || inst.operands[1].negative
8048 /* This can arise if the programmer has written
8050 or if they have mistakenly used a register name as the last
8053 It is very difficult to distinguish between these two cases
8054 because "rX" might actually be a label. ie the register
8055 name has been occluded by a symbol of the same name. So we
8056 just generate a general 'bad addressing mode' type error
8057 message and leave it up to the programmer to discover the
8058 true cause and fix their mistake. */
8059 || (inst.operands[1].reg == REG_PC),
8062 constraint (inst.reloc.exp.X_op != O_constant
8063 || inst.reloc.exp.X_add_number != 0,
8064 _("offset must be zero in ARM encoding"));
8066 constraint ((inst.operands[1].reg == REG_PC), BAD_PC);
8068 inst.instruction |= inst.operands[0].reg << 12;
8069 inst.instruction |= inst.operands[1].reg << 16;
8070 inst.reloc.type = BFD_RELOC_UNUSED;
8076 constraint (inst.operands[0].reg % 2 != 0,
8077 _("even register required"));
8078 constraint (inst.operands[1].present
8079 && inst.operands[1].reg != inst.operands[0].reg + 1,
8080 _("can only load two consecutive registers"));
8081 /* If op 1 were present and equal to PC, this function wouldn't
8082 have been called in the first place. */
8083 constraint (inst.operands[0].reg == REG_LR, _("r14 not allowed here"));
8085 inst.instruction |= inst.operands[0].reg << 12;
8086 inst.instruction |= inst.operands[2].reg << 16;
8089 /* In both ARM and thumb state 'ldr pc, #imm' with an immediate
8090 which is not a multiple of four is UNPREDICTABLE. */
8092 check_ldr_r15_aligned (void)
8094 constraint (!(inst.operands[1].immisreg)
8095 && (inst.operands[0].reg == REG_PC
8096 && inst.operands[1].reg == REG_PC
8097 && (inst.reloc.exp.X_add_number & 0x3)),
8098 _("ldr to register 15 must be 4-byte alligned"));
8104 inst.instruction |= inst.operands[0].reg << 12;
8105 if (!inst.operands[1].isreg)
8106 if (move_or_literal_pool (0, /*thumb_p=*/FALSE, /*mode_3=*/FALSE))
8108 encode_arm_addr_mode_2 (1, /*is_t=*/FALSE);
8109 check_ldr_r15_aligned ();
8115 /* ldrt/strt always use post-indexed addressing. Turn [Rn] into [Rn]! and
8117 if (inst.operands[1].preind)
8119 constraint (inst.reloc.exp.X_op != O_constant
8120 || inst.reloc.exp.X_add_number != 0,
8121 _("this instruction requires a post-indexed address"));
8123 inst.operands[1].preind = 0;
8124 inst.operands[1].postind = 1;
8125 inst.operands[1].writeback = 1;
8127 inst.instruction |= inst.operands[0].reg << 12;
8128 encode_arm_addr_mode_2 (1, /*is_t=*/TRUE);
8131 /* Halfword and signed-byte load/store operations. */
8136 constraint (inst.operands[0].reg == REG_PC, BAD_PC);
8137 inst.instruction |= inst.operands[0].reg << 12;
8138 if (!inst.operands[1].isreg)
8139 if (move_or_literal_pool (0, /*thumb_p=*/FALSE, /*mode_3=*/TRUE))
8141 encode_arm_addr_mode_3 (1, /*is_t=*/FALSE);
8147 /* ldrt/strt always use post-indexed addressing. Turn [Rn] into [Rn]! and
8149 if (inst.operands[1].preind)
8151 constraint (inst.reloc.exp.X_op != O_constant
8152 || inst.reloc.exp.X_add_number != 0,
8153 _("this instruction requires a post-indexed address"));
8155 inst.operands[1].preind = 0;
8156 inst.operands[1].postind = 1;
8157 inst.operands[1].writeback = 1;
8159 inst.instruction |= inst.operands[0].reg << 12;
8160 encode_arm_addr_mode_3 (1, /*is_t=*/TRUE);
8163 /* Co-processor register load/store.
8164 Format: <LDC|STC>{cond}[L] CP#,CRd,<address> */
8168 inst.instruction |= inst.operands[0].reg << 8;
8169 inst.instruction |= inst.operands[1].reg << 12;
8170 encode_arm_cp_address (2, TRUE, TRUE, 0);
8176 /* This restriction does not apply to mls (nor to mla in v6 or later). */
8177 if (inst.operands[0].reg == inst.operands[1].reg
8178 && !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6)
8179 && !(inst.instruction & 0x00400000))
8180 as_tsktsk (_("Rd and Rm should be different in mla"));
8182 inst.instruction |= inst.operands[0].reg << 16;
8183 inst.instruction |= inst.operands[1].reg;
8184 inst.instruction |= inst.operands[2].reg << 8;
8185 inst.instruction |= inst.operands[3].reg << 12;
8191 inst.instruction |= inst.operands[0].reg << 12;
8192 encode_arm_shifter_operand (1);
8195 /* ARM V6T2 16-bit immediate register load: MOV[WT]{cond} Rd, #<imm16>. */
8202 top = (inst.instruction & 0x00400000) != 0;
8203 constraint (top && inst.reloc.type == BFD_RELOC_ARM_MOVW,
8204 _(":lower16: not allowed this instruction"));
8205 constraint (!top && inst.reloc.type == BFD_RELOC_ARM_MOVT,
8206 _(":upper16: not allowed instruction"));
8207 inst.instruction |= inst.operands[0].reg << 12;
8208 if (inst.reloc.type == BFD_RELOC_UNUSED)
8210 imm = inst.reloc.exp.X_add_number;
8211 /* The value is in two pieces: 0:11, 16:19. */
8212 inst.instruction |= (imm & 0x00000fff);
8213 inst.instruction |= (imm & 0x0000f000) << 4;
8217 static void do_vfp_nsyn_opcode (const char *);
8220 do_vfp_nsyn_mrs (void)
8222 if (inst.operands[0].isvec)
8224 if (inst.operands[1].reg != 1)
8225 first_error (_("operand 1 must be FPSCR"));
8226 memset (&inst.operands[0], '\0', sizeof (inst.operands[0]));
8227 memset (&inst.operands[1], '\0', sizeof (inst.operands[1]));
8228 do_vfp_nsyn_opcode ("fmstat");
8230 else if (inst.operands[1].isvec)
8231 do_vfp_nsyn_opcode ("fmrx");
8239 do_vfp_nsyn_msr (void)
8241 if (inst.operands[0].isvec)
8242 do_vfp_nsyn_opcode ("fmxr");
8252 unsigned Rt = inst.operands[0].reg;
8254 if (thumb_mode && Rt == REG_SP)
8256 inst.error = BAD_SP;
8260 /* APSR_ sets isvec. All other refs to PC are illegal. */
8261 if (!inst.operands[0].isvec && Rt == REG_PC)
8263 inst.error = BAD_PC;
8267 /* If we get through parsing the register name, we just insert the number
8268 generated into the instruction without further validation. */
8269 inst.instruction |= (inst.operands[1].reg << 16);
8270 inst.instruction |= (Rt << 12);
8276 unsigned Rt = inst.operands[1].reg;
8279 reject_bad_reg (Rt);
8280 else if (Rt == REG_PC)
8282 inst.error = BAD_PC;
8286 /* If we get through parsing the register name, we just insert the number
8287 generated into the instruction without further validation. */
8288 inst.instruction |= (inst.operands[0].reg << 16);
8289 inst.instruction |= (Rt << 12);
8297 if (do_vfp_nsyn_mrs () == SUCCESS)
8300 constraint (inst.operands[0].reg == REG_PC, BAD_PC);
8301 inst.instruction |= inst.operands[0].reg << 12;
8303 if (inst.operands[1].isreg)
8305 br = inst.operands[1].reg;
8306 if (((br & 0x200) == 0) && ((br & 0xf0000) != 0xf000))
8307 as_bad (_("bad register for mrs"));
8311 /* mrs only accepts CPSR/SPSR/CPSR_all/SPSR_all. */
8312 constraint ((inst.operands[1].imm & (PSR_c|PSR_x|PSR_s|PSR_f))
8314 _("'APSR', 'CPSR' or 'SPSR' expected"));
8315 br = (15<<16) | (inst.operands[1].imm & SPSR_BIT);
8318 inst.instruction |= br;
8321 /* Two possible forms:
8322 "{C|S}PSR_<field>, Rm",
8323 "{C|S}PSR_f, #expression". */
8328 if (do_vfp_nsyn_msr () == SUCCESS)
8331 inst.instruction |= inst.operands[0].imm;
8332 if (inst.operands[1].isreg)
8333 inst.instruction |= inst.operands[1].reg;
8336 inst.instruction |= INST_IMMEDIATE;
8337 inst.reloc.type = BFD_RELOC_ARM_IMMEDIATE;
8338 inst.reloc.pc_rel = 0;
8345 constraint (inst.operands[2].reg == REG_PC, BAD_PC);
8347 if (!inst.operands[2].present)
8348 inst.operands[2].reg = inst.operands[0].reg;
8349 inst.instruction |= inst.operands[0].reg << 16;
8350 inst.instruction |= inst.operands[1].reg;
8351 inst.instruction |= inst.operands[2].reg << 8;
8353 if (inst.operands[0].reg == inst.operands[1].reg
8354 && !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6))
8355 as_tsktsk (_("Rd and Rm should be different in mul"));
8358 /* Long Multiply Parser
8359 UMULL RdLo, RdHi, Rm, Rs
8360 SMULL RdLo, RdHi, Rm, Rs
8361 UMLAL RdLo, RdHi, Rm, Rs
8362 SMLAL RdLo, RdHi, Rm, Rs. */
8367 inst.instruction |= inst.operands[0].reg << 12;
8368 inst.instruction |= inst.operands[1].reg << 16;
8369 inst.instruction |= inst.operands[2].reg;
8370 inst.instruction |= inst.operands[3].reg << 8;
8372 /* rdhi and rdlo must be different. */
8373 if (inst.operands[0].reg == inst.operands[1].reg)
8374 as_tsktsk (_("rdhi and rdlo must be different"));
8376 /* rdhi, rdlo and rm must all be different before armv6. */
8377 if ((inst.operands[0].reg == inst.operands[2].reg
8378 || inst.operands[1].reg == inst.operands[2].reg)
8379 && !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6))
8380 as_tsktsk (_("rdhi, rdlo and rm must all be different"));
8386 if (inst.operands[0].present
8387 || ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6k))
8389 /* Architectural NOP hints are CPSR sets with no bits selected. */
8390 inst.instruction &= 0xf0000000;
8391 inst.instruction |= 0x0320f000;
8392 if (inst.operands[0].present)
8393 inst.instruction |= inst.operands[0].imm;
8397 /* ARM V6 Pack Halfword Bottom Top instruction (argument parse).
8398 PKHBT {<cond>} <Rd>, <Rn>, <Rm> {, LSL #<shift_imm>}
8399 Condition defaults to COND_ALWAYS.
8400 Error if Rd, Rn or Rm are R15. */
8405 inst.instruction |= inst.operands[0].reg << 12;
8406 inst.instruction |= inst.operands[1].reg << 16;
8407 inst.instruction |= inst.operands[2].reg;
8408 if (inst.operands[3].present)
8409 encode_arm_shift (3);
8412 /* ARM V6 PKHTB (Argument Parse). */
8417 if (!inst.operands[3].present)
8419 /* If the shift specifier is omitted, turn the instruction
8420 into pkhbt rd, rm, rn. */
8421 inst.instruction &= 0xfff00010;
8422 inst.instruction |= inst.operands[0].reg << 12;
8423 inst.instruction |= inst.operands[1].reg;
8424 inst.instruction |= inst.operands[2].reg << 16;
8428 inst.instruction |= inst.operands[0].reg << 12;
8429 inst.instruction |= inst.operands[1].reg << 16;
8430 inst.instruction |= inst.operands[2].reg;
8431 encode_arm_shift (3);
8435 /* ARMv5TE: Preload-Cache
8436 MP Extensions: Preload for write
8440 Syntactically, like LDR with B=1, W=0, L=1. */
8445 constraint (!inst.operands[0].isreg,
8446 _("'[' expected after PLD mnemonic"));
8447 constraint (inst.operands[0].postind,
8448 _("post-indexed expression used in preload instruction"));
8449 constraint (inst.operands[0].writeback,
8450 _("writeback used in preload instruction"));
8451 constraint (!inst.operands[0].preind,
8452 _("unindexed addressing used in preload instruction"));
8453 encode_arm_addr_mode_2 (0, /*is_t=*/FALSE);
8456 /* ARMv7: PLI <addr_mode> */
8460 constraint (!inst.operands[0].isreg,
8461 _("'[' expected after PLI mnemonic"));
8462 constraint (inst.operands[0].postind,
8463 _("post-indexed expression used in preload instruction"));
8464 constraint (inst.operands[0].writeback,
8465 _("writeback used in preload instruction"));
8466 constraint (!inst.operands[0].preind,
8467 _("unindexed addressing used in preload instruction"));
8468 encode_arm_addr_mode_2 (0, /*is_t=*/FALSE);
8469 inst.instruction &= ~PRE_INDEX;
8475 inst.operands[1] = inst.operands[0];
8476 memset (&inst.operands[0], 0, sizeof inst.operands[0]);
8477 inst.operands[0].isreg = 1;
8478 inst.operands[0].writeback = 1;
8479 inst.operands[0].reg = REG_SP;
8480 encode_ldmstm (/*from_push_pop_mnem=*/TRUE);
8483 /* ARM V6 RFE (Return from Exception) loads the PC and CPSR from the
8484 word at the specified address and the following word
8486 Unconditionally executed.
8487 Error if Rn is R15. */
8492 inst.instruction |= inst.operands[0].reg << 16;
8493 if (inst.operands[0].writeback)
8494 inst.instruction |= WRITE_BACK;
8497 /* ARM V6 ssat (argument parse). */
8502 inst.instruction |= inst.operands[0].reg << 12;
8503 inst.instruction |= (inst.operands[1].imm - 1) << 16;
8504 inst.instruction |= inst.operands[2].reg;
8506 if (inst.operands[3].present)
8507 encode_arm_shift (3);
8510 /* ARM V6 usat (argument parse). */
8515 inst.instruction |= inst.operands[0].reg << 12;
8516 inst.instruction |= inst.operands[1].imm << 16;
8517 inst.instruction |= inst.operands[2].reg;
8519 if (inst.operands[3].present)
8520 encode_arm_shift (3);
8523 /* ARM V6 ssat16 (argument parse). */
8528 inst.instruction |= inst.operands[0].reg << 12;
8529 inst.instruction |= ((inst.operands[1].imm - 1) << 16);
8530 inst.instruction |= inst.operands[2].reg;
8536 inst.instruction |= inst.operands[0].reg << 12;
8537 inst.instruction |= inst.operands[1].imm << 16;
8538 inst.instruction |= inst.operands[2].reg;
8541 /* ARM V6 SETEND (argument parse). Sets the E bit in the CPSR while
8542 preserving the other bits.
8544 setend <endian_specifier>, where <endian_specifier> is either
8550 if (warn_on_deprecated
8551 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
8552 as_warn (_("setend use is deprecated for ARMv8"));
8554 if (inst.operands[0].imm)
8555 inst.instruction |= 0x200;
8561 unsigned int Rm = (inst.operands[1].present
8562 ? inst.operands[1].reg
8563 : inst.operands[0].reg);
8565 inst.instruction |= inst.operands[0].reg << 12;
8566 inst.instruction |= Rm;
8567 if (inst.operands[2].isreg) /* Rd, {Rm,} Rs */
8569 inst.instruction |= inst.operands[2].reg << 8;
8570 inst.instruction |= SHIFT_BY_REG;
8571 /* PR 12854: Error on extraneous shifts. */
8572 constraint (inst.operands[2].shifted,
8573 _("extraneous shift as part of operand to shift insn"));
8576 inst.reloc.type = BFD_RELOC_ARM_SHIFT_IMM;
8582 inst.reloc.type = BFD_RELOC_ARM_SMC;
8583 inst.reloc.pc_rel = 0;
8589 inst.reloc.type = BFD_RELOC_ARM_HVC;
8590 inst.reloc.pc_rel = 0;
8596 inst.reloc.type = BFD_RELOC_ARM_SWI;
8597 inst.reloc.pc_rel = 0;
8600 /* ARM V5E (El Segundo) signed-multiply-accumulate (argument parse)
8601 SMLAxy{cond} Rd,Rm,Rs,Rn
8602 SMLAWy{cond} Rd,Rm,Rs,Rn
8603 Error if any register is R15. */
8608 inst.instruction |= inst.operands[0].reg << 16;
8609 inst.instruction |= inst.operands[1].reg;
8610 inst.instruction |= inst.operands[2].reg << 8;
8611 inst.instruction |= inst.operands[3].reg << 12;
8614 /* ARM V5E (El Segundo) signed-multiply-accumulate-long (argument parse)
8615 SMLALxy{cond} Rdlo,Rdhi,Rm,Rs
8616 Error if any register is R15.
8617 Warning if Rdlo == Rdhi. */
8622 inst.instruction |= inst.operands[0].reg << 12;
8623 inst.instruction |= inst.operands[1].reg << 16;
8624 inst.instruction |= inst.operands[2].reg;
8625 inst.instruction |= inst.operands[3].reg << 8;
8627 if (inst.operands[0].reg == inst.operands[1].reg)
8628 as_tsktsk (_("rdhi and rdlo must be different"));
8631 /* ARM V5E (El Segundo) signed-multiply (argument parse)
8632 SMULxy{cond} Rd,Rm,Rs
8633 Error if any register is R15. */
8638 inst.instruction |= inst.operands[0].reg << 16;
8639 inst.instruction |= inst.operands[1].reg;
8640 inst.instruction |= inst.operands[2].reg << 8;
8643 /* ARM V6 srs (argument parse). The variable fields in the encoding are
8644 the same for both ARM and Thumb-2. */
8651 if (inst.operands[0].present)
8653 reg = inst.operands[0].reg;
8654 constraint (reg != REG_SP, _("SRS base register must be r13"));
8659 inst.instruction |= reg << 16;
8660 inst.instruction |= inst.operands[1].imm;
8661 if (inst.operands[0].writeback || inst.operands[1].writeback)
8662 inst.instruction |= WRITE_BACK;
8665 /* ARM V6 strex (argument parse). */
8670 constraint (!inst.operands[2].isreg || !inst.operands[2].preind
8671 || inst.operands[2].postind || inst.operands[2].writeback
8672 || inst.operands[2].immisreg || inst.operands[2].shifted
8673 || inst.operands[2].negative
8674 /* See comment in do_ldrex(). */
8675 || (inst.operands[2].reg == REG_PC),
8678 constraint (inst.operands[0].reg == inst.operands[1].reg
8679 || inst.operands[0].reg == inst.operands[2].reg, BAD_OVERLAP);
8681 constraint (inst.reloc.exp.X_op != O_constant
8682 || inst.reloc.exp.X_add_number != 0,
8683 _("offset must be zero in ARM encoding"));
8685 inst.instruction |= inst.operands[0].reg << 12;
8686 inst.instruction |= inst.operands[1].reg;
8687 inst.instruction |= inst.operands[2].reg << 16;
8688 inst.reloc.type = BFD_RELOC_UNUSED;
8694 constraint (!inst.operands[2].isreg || !inst.operands[2].preind
8695 || inst.operands[2].postind || inst.operands[2].writeback
8696 || inst.operands[2].immisreg || inst.operands[2].shifted
8697 || inst.operands[2].negative,
8700 constraint (inst.operands[0].reg == inst.operands[1].reg
8701 || inst.operands[0].reg == inst.operands[2].reg, BAD_OVERLAP);
8709 constraint (inst.operands[1].reg % 2 != 0,
8710 _("even register required"));
8711 constraint (inst.operands[2].present
8712 && inst.operands[2].reg != inst.operands[1].reg + 1,
8713 _("can only store two consecutive registers"));
8714 /* If op 2 were present and equal to PC, this function wouldn't
8715 have been called in the first place. */
8716 constraint (inst.operands[1].reg == REG_LR, _("r14 not allowed here"));
8718 constraint (inst.operands[0].reg == inst.operands[1].reg
8719 || inst.operands[0].reg == inst.operands[1].reg + 1
8720 || inst.operands[0].reg == inst.operands[3].reg,
8723 inst.instruction |= inst.operands[0].reg << 12;
8724 inst.instruction |= inst.operands[1].reg;
8725 inst.instruction |= inst.operands[3].reg << 16;
8732 constraint (inst.operands[0].reg == inst.operands[1].reg
8733 || inst.operands[0].reg == inst.operands[2].reg, BAD_OVERLAP);
8741 constraint (inst.operands[0].reg == inst.operands[1].reg
8742 || inst.operands[0].reg == inst.operands[2].reg, BAD_OVERLAP);
8747 /* ARM V6 SXTAH extracts a 16-bit value from a register, sign
8748 extends it to 32-bits, and adds the result to a value in another
8749 register. You can specify a rotation by 0, 8, 16, or 24 bits
8750 before extracting the 16-bit value.
8751 SXTAH{<cond>} <Rd>, <Rn>, <Rm>{, <rotation>}
8752 Condition defaults to COND_ALWAYS.
8753 Error if any register uses R15. */
8758 inst.instruction |= inst.operands[0].reg << 12;
8759 inst.instruction |= inst.operands[1].reg << 16;
8760 inst.instruction |= inst.operands[2].reg;
8761 inst.instruction |= inst.operands[3].imm << 10;
8766 SXTH {<cond>} <Rd>, <Rm>{, <rotation>}
8767 Condition defaults to COND_ALWAYS.
8768 Error if any register uses R15. */
8773 inst.instruction |= inst.operands[0].reg << 12;
8774 inst.instruction |= inst.operands[1].reg;
8775 inst.instruction |= inst.operands[2].imm << 10;
8778 /* VFP instructions. In a logical order: SP variant first, monad
8779 before dyad, arithmetic then move then load/store. */
8782 do_vfp_sp_monadic (void)
8784 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
8785 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sm);
8789 do_vfp_sp_dyadic (void)
8791 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
8792 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sn);
8793 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Sm);
8797 do_vfp_sp_compare_z (void)
8799 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
8803 do_vfp_dp_sp_cvt (void)
8805 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
8806 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sm);
8810 do_vfp_sp_dp_cvt (void)
8812 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
8813 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dm);
8817 do_vfp_reg_from_sp (void)
8819 inst.instruction |= inst.operands[0].reg << 12;
8820 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sn);
8824 do_vfp_reg2_from_sp2 (void)
8826 constraint (inst.operands[2].imm != 2,
8827 _("only two consecutive VFP SP registers allowed here"));
8828 inst.instruction |= inst.operands[0].reg << 12;
8829 inst.instruction |= inst.operands[1].reg << 16;
8830 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Sm);
8834 do_vfp_sp_from_reg (void)
8836 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sn);
8837 inst.instruction |= inst.operands[1].reg << 12;
8841 do_vfp_sp2_from_reg2 (void)
8843 constraint (inst.operands[0].imm != 2,
8844 _("only two consecutive VFP SP registers allowed here"));
8845 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sm);
8846 inst.instruction |= inst.operands[1].reg << 12;
8847 inst.instruction |= inst.operands[2].reg << 16;
8851 do_vfp_sp_ldst (void)
8853 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
8854 encode_arm_cp_address (1, FALSE, TRUE, 0);
8858 do_vfp_dp_ldst (void)
8860 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
8861 encode_arm_cp_address (1, FALSE, TRUE, 0);
8866 vfp_sp_ldstm (enum vfp_ldstm_type ldstm_type)
8868 if (inst.operands[0].writeback)
8869 inst.instruction |= WRITE_BACK;
8871 constraint (ldstm_type != VFP_LDSTMIA,
8872 _("this addressing mode requires base-register writeback"));
8873 inst.instruction |= inst.operands[0].reg << 16;
8874 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sd);
8875 inst.instruction |= inst.operands[1].imm;
8879 vfp_dp_ldstm (enum vfp_ldstm_type ldstm_type)
8883 if (inst.operands[0].writeback)
8884 inst.instruction |= WRITE_BACK;
8886 constraint (ldstm_type != VFP_LDSTMIA && ldstm_type != VFP_LDSTMIAX,
8887 _("this addressing mode requires base-register writeback"));
8889 inst.instruction |= inst.operands[0].reg << 16;
8890 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dd);
8892 count = inst.operands[1].imm << 1;
8893 if (ldstm_type == VFP_LDSTMIAX || ldstm_type == VFP_LDSTMDBX)
8896 inst.instruction |= count;
8900 do_vfp_sp_ldstmia (void)
8902 vfp_sp_ldstm (VFP_LDSTMIA);
8906 do_vfp_sp_ldstmdb (void)
8908 vfp_sp_ldstm (VFP_LDSTMDB);
8912 do_vfp_dp_ldstmia (void)
8914 vfp_dp_ldstm (VFP_LDSTMIA);
8918 do_vfp_dp_ldstmdb (void)
8920 vfp_dp_ldstm (VFP_LDSTMDB);
8924 do_vfp_xp_ldstmia (void)
8926 vfp_dp_ldstm (VFP_LDSTMIAX);
8930 do_vfp_xp_ldstmdb (void)
8932 vfp_dp_ldstm (VFP_LDSTMDBX);
8936 do_vfp_dp_rd_rm (void)
8938 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
8939 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dm);
8943 do_vfp_dp_rn_rd (void)
8945 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dn);
8946 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dd);
8950 do_vfp_dp_rd_rn (void)
8952 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
8953 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dn);
8957 do_vfp_dp_rd_rn_rm (void)
8959 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
8960 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dn);
8961 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Dm);
8967 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
8971 do_vfp_dp_rm_rd_rn (void)
8973 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dm);
8974 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dd);
8975 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Dn);
8978 /* VFPv3 instructions. */
8980 do_vfp_sp_const (void)
8982 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
8983 inst.instruction |= (inst.operands[1].imm & 0xf0) << 12;
8984 inst.instruction |= (inst.operands[1].imm & 0x0f);
8988 do_vfp_dp_const (void)
8990 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
8991 inst.instruction |= (inst.operands[1].imm & 0xf0) << 12;
8992 inst.instruction |= (inst.operands[1].imm & 0x0f);
8996 vfp_conv (int srcsize)
8998 int immbits = srcsize - inst.operands[1].imm;
9000 if (srcsize == 16 && !(immbits >= 0 && immbits <= srcsize))
9002 /* If srcsize is 16, inst.operands[1].imm must be in the range 0-16.
9003 i.e. immbits must be in range 0 - 16. */
9004 inst.error = _("immediate value out of range, expected range [0, 16]");
9007 else if (srcsize == 32 && !(immbits >= 0 && immbits < srcsize))
9009 /* If srcsize is 32, inst.operands[1].imm must be in the range 1-32.
9010 i.e. immbits must be in range 0 - 31. */
9011 inst.error = _("immediate value out of range, expected range [1, 32]");
9015 inst.instruction |= (immbits & 1) << 5;
9016 inst.instruction |= (immbits >> 1);
9020 do_vfp_sp_conv_16 (void)
9022 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
9027 do_vfp_dp_conv_16 (void)
9029 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
9034 do_vfp_sp_conv_32 (void)
9036 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
9041 do_vfp_dp_conv_32 (void)
9043 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
9047 /* FPA instructions. Also in a logical order. */
9052 inst.instruction |= inst.operands[0].reg << 16;
9053 inst.instruction |= inst.operands[1].reg;
9057 do_fpa_ldmstm (void)
9059 inst.instruction |= inst.operands[0].reg << 12;
9060 switch (inst.operands[1].imm)
9062 case 1: inst.instruction |= CP_T_X; break;
9063 case 2: inst.instruction |= CP_T_Y; break;
9064 case 3: inst.instruction |= CP_T_Y | CP_T_X; break;
9069 if (inst.instruction & (PRE_INDEX | INDEX_UP))
9071 /* The instruction specified "ea" or "fd", so we can only accept
9072 [Rn]{!}. The instruction does not really support stacking or
9073 unstacking, so we have to emulate these by setting appropriate
9074 bits and offsets. */
9075 constraint (inst.reloc.exp.X_op != O_constant
9076 || inst.reloc.exp.X_add_number != 0,
9077 _("this instruction does not support indexing"));
9079 if ((inst.instruction & PRE_INDEX) || inst.operands[2].writeback)
9080 inst.reloc.exp.X_add_number = 12 * inst.operands[1].imm;
9082 if (!(inst.instruction & INDEX_UP))
9083 inst.reloc.exp.X_add_number = -inst.reloc.exp.X_add_number;
9085 if (!(inst.instruction & PRE_INDEX) && inst.operands[2].writeback)
9087 inst.operands[2].preind = 0;
9088 inst.operands[2].postind = 1;
9092 encode_arm_cp_address (2, TRUE, TRUE, 0);
9095 /* iWMMXt instructions: strictly in alphabetical order. */
9098 do_iwmmxt_tandorc (void)
9100 constraint (inst.operands[0].reg != REG_PC, _("only r15 allowed here"));
9104 do_iwmmxt_textrc (void)
9106 inst.instruction |= inst.operands[0].reg << 12;
9107 inst.instruction |= inst.operands[1].imm;
9111 do_iwmmxt_textrm (void)
9113 inst.instruction |= inst.operands[0].reg << 12;
9114 inst.instruction |= inst.operands[1].reg << 16;
9115 inst.instruction |= inst.operands[2].imm;
9119 do_iwmmxt_tinsr (void)
9121 inst.instruction |= inst.operands[0].reg << 16;
9122 inst.instruction |= inst.operands[1].reg << 12;
9123 inst.instruction |= inst.operands[2].imm;
9127 do_iwmmxt_tmia (void)
9129 inst.instruction |= inst.operands[0].reg << 5;
9130 inst.instruction |= inst.operands[1].reg;
9131 inst.instruction |= inst.operands[2].reg << 12;
9135 do_iwmmxt_waligni (void)
9137 inst.instruction |= inst.operands[0].reg << 12;
9138 inst.instruction |= inst.operands[1].reg << 16;
9139 inst.instruction |= inst.operands[2].reg;
9140 inst.instruction |= inst.operands[3].imm << 20;
9144 do_iwmmxt_wmerge (void)
9146 inst.instruction |= inst.operands[0].reg << 12;
9147 inst.instruction |= inst.operands[1].reg << 16;
9148 inst.instruction |= inst.operands[2].reg;
9149 inst.instruction |= inst.operands[3].imm << 21;
9153 do_iwmmxt_wmov (void)
9155 /* WMOV rD, rN is an alias for WOR rD, rN, rN. */
9156 inst.instruction |= inst.operands[0].reg << 12;
9157 inst.instruction |= inst.operands[1].reg << 16;
9158 inst.instruction |= inst.operands[1].reg;
9162 do_iwmmxt_wldstbh (void)
9165 inst.instruction |= inst.operands[0].reg << 12;
9167 reloc = BFD_RELOC_ARM_T32_CP_OFF_IMM_S2;
9169 reloc = BFD_RELOC_ARM_CP_OFF_IMM_S2;
9170 encode_arm_cp_address (1, TRUE, FALSE, reloc);
9174 do_iwmmxt_wldstw (void)
9176 /* RIWR_RIWC clears .isreg for a control register. */
9177 if (!inst.operands[0].isreg)
9179 constraint (inst.cond != COND_ALWAYS, BAD_COND);
9180 inst.instruction |= 0xf0000000;
9183 inst.instruction |= inst.operands[0].reg << 12;
9184 encode_arm_cp_address (1, TRUE, TRUE, 0);
9188 do_iwmmxt_wldstd (void)
9190 inst.instruction |= inst.operands[0].reg << 12;
9191 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt2)
9192 && inst.operands[1].immisreg)
9194 inst.instruction &= ~0x1a000ff;
9195 inst.instruction |= (0xf << 28);
9196 if (inst.operands[1].preind)
9197 inst.instruction |= PRE_INDEX;
9198 if (!inst.operands[1].negative)
9199 inst.instruction |= INDEX_UP;
9200 if (inst.operands[1].writeback)
9201 inst.instruction |= WRITE_BACK;
9202 inst.instruction |= inst.operands[1].reg << 16;
9203 inst.instruction |= inst.reloc.exp.X_add_number << 4;
9204 inst.instruction |= inst.operands[1].imm;
9207 encode_arm_cp_address (1, TRUE, FALSE, 0);
9211 do_iwmmxt_wshufh (void)
9213 inst.instruction |= inst.operands[0].reg << 12;
9214 inst.instruction |= inst.operands[1].reg << 16;
9215 inst.instruction |= ((inst.operands[2].imm & 0xf0) << 16);
9216 inst.instruction |= (inst.operands[2].imm & 0x0f);
9220 do_iwmmxt_wzero (void)
9222 /* WZERO reg is an alias for WANDN reg, reg, reg. */
9223 inst.instruction |= inst.operands[0].reg;
9224 inst.instruction |= inst.operands[0].reg << 12;
9225 inst.instruction |= inst.operands[0].reg << 16;
9229 do_iwmmxt_wrwrwr_or_imm5 (void)
9231 if (inst.operands[2].isreg)
9234 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt2),
9235 _("immediate operand requires iWMMXt2"));
9237 if (inst.operands[2].imm == 0)
9239 switch ((inst.instruction >> 20) & 0xf)
9245 /* w...h wrd, wrn, #0 -> wrorh wrd, wrn, #16. */
9246 inst.operands[2].imm = 16;
9247 inst.instruction = (inst.instruction & 0xff0fffff) | (0x7 << 20);
9253 /* w...w wrd, wrn, #0 -> wrorw wrd, wrn, #32. */
9254 inst.operands[2].imm = 32;
9255 inst.instruction = (inst.instruction & 0xff0fffff) | (0xb << 20);
9262 /* w...d wrd, wrn, #0 -> wor wrd, wrn, wrn. */
9264 wrn = (inst.instruction >> 16) & 0xf;
9265 inst.instruction &= 0xff0fff0f;
9266 inst.instruction |= wrn;
9267 /* Bail out here; the instruction is now assembled. */
9272 /* Map 32 -> 0, etc. */
9273 inst.operands[2].imm &= 0x1f;
9274 inst.instruction |= (0xf << 28) | ((inst.operands[2].imm & 0x10) << 4) | (inst.operands[2].imm & 0xf);
9278 /* Cirrus Maverick instructions. Simple 2-, 3-, and 4-register
9279 operations first, then control, shift, and load/store. */
9281 /* Insns like "foo X,Y,Z". */
9284 do_mav_triple (void)
9286 inst.instruction |= inst.operands[0].reg << 16;
9287 inst.instruction |= inst.operands[1].reg;
9288 inst.instruction |= inst.operands[2].reg << 12;
9291 /* Insns like "foo W,X,Y,Z".
9292 where W=MVAX[0:3] and X,Y,Z=MVFX[0:15]. */
9297 inst.instruction |= inst.operands[0].reg << 5;
9298 inst.instruction |= inst.operands[1].reg << 12;
9299 inst.instruction |= inst.operands[2].reg << 16;
9300 inst.instruction |= inst.operands[3].reg;
9303 /* cfmvsc32<cond> DSPSC,MVDX[15:0]. */
9307 inst.instruction |= inst.operands[1].reg << 12;
9310 /* Maverick shift immediate instructions.
9311 cfsh32<cond> MVFX[15:0],MVFX[15:0],Shift[6:0].
9312 cfsh64<cond> MVDX[15:0],MVDX[15:0],Shift[6:0]. */
9317 int imm = inst.operands[2].imm;
9319 inst.instruction |= inst.operands[0].reg << 12;
9320 inst.instruction |= inst.operands[1].reg << 16;
9322 /* Bits 0-3 of the insn should have bits 0-3 of the immediate.
9323 Bits 5-7 of the insn should have bits 4-6 of the immediate.
9324 Bit 4 should be 0. */
9325 imm = (imm & 0xf) | ((imm & 0x70) << 1);
9327 inst.instruction |= imm;
9330 /* XScale instructions. Also sorted arithmetic before move. */
9332 /* Xscale multiply-accumulate (argument parse)
9335 MIAxycc acc0,Rm,Rs. */
9340 inst.instruction |= inst.operands[1].reg;
9341 inst.instruction |= inst.operands[2].reg << 12;
9344 /* Xscale move-accumulator-register (argument parse)
9346 MARcc acc0,RdLo,RdHi. */
9351 inst.instruction |= inst.operands[1].reg << 12;
9352 inst.instruction |= inst.operands[2].reg << 16;
9355 /* Xscale move-register-accumulator (argument parse)
9357 MRAcc RdLo,RdHi,acc0. */
9362 constraint (inst.operands[0].reg == inst.operands[1].reg, BAD_OVERLAP);
9363 inst.instruction |= inst.operands[0].reg << 12;
9364 inst.instruction |= inst.operands[1].reg << 16;
9367 /* Encoding functions relevant only to Thumb. */
9369 /* inst.operands[i] is a shifted-register operand; encode
9370 it into inst.instruction in the format used by Thumb32. */
9373 encode_thumb32_shifted_operand (int i)
9375 unsigned int value = inst.reloc.exp.X_add_number;
9376 unsigned int shift = inst.operands[i].shift_kind;
9378 constraint (inst.operands[i].immisreg,
9379 _("shift by register not allowed in thumb mode"));
9380 inst.instruction |= inst.operands[i].reg;
9381 if (shift == SHIFT_RRX)
9382 inst.instruction |= SHIFT_ROR << 4;
9385 constraint (inst.reloc.exp.X_op != O_constant,
9386 _("expression too complex"));
9388 constraint (value > 32
9389 || (value == 32 && (shift == SHIFT_LSL
9390 || shift == SHIFT_ROR)),
9391 _("shift expression is too large"));
9395 else if (value == 32)
9398 inst.instruction |= shift << 4;
9399 inst.instruction |= (value & 0x1c) << 10;
9400 inst.instruction |= (value & 0x03) << 6;
9405 /* inst.operands[i] was set up by parse_address. Encode it into a
9406 Thumb32 format load or store instruction. Reject forms that cannot
9407 be used with such instructions. If is_t is true, reject forms that
9408 cannot be used with a T instruction; if is_d is true, reject forms
9409 that cannot be used with a D instruction. If it is a store insn,
9413 encode_thumb32_addr_mode (int i, bfd_boolean is_t, bfd_boolean is_d)
9415 const bfd_boolean is_pc = (inst.operands[i].reg == REG_PC);
9417 constraint (!inst.operands[i].isreg,
9418 _("Instruction does not support =N addresses"));
9420 inst.instruction |= inst.operands[i].reg << 16;
9421 if (inst.operands[i].immisreg)
9423 constraint (is_pc, BAD_PC_ADDRESSING);
9424 constraint (is_t || is_d, _("cannot use register index with this instruction"));
9425 constraint (inst.operands[i].negative,
9426 _("Thumb does not support negative register indexing"));
9427 constraint (inst.operands[i].postind,
9428 _("Thumb does not support register post-indexing"));
9429 constraint (inst.operands[i].writeback,
9430 _("Thumb does not support register indexing with writeback"));
9431 constraint (inst.operands[i].shifted && inst.operands[i].shift_kind != SHIFT_LSL,
9432 _("Thumb supports only LSL in shifted register indexing"));
9434 inst.instruction |= inst.operands[i].imm;
9435 if (inst.operands[i].shifted)
9437 constraint (inst.reloc.exp.X_op != O_constant,
9438 _("expression too complex"));
9439 constraint (inst.reloc.exp.X_add_number < 0
9440 || inst.reloc.exp.X_add_number > 3,
9441 _("shift out of range"));
9442 inst.instruction |= inst.reloc.exp.X_add_number << 4;
9444 inst.reloc.type = BFD_RELOC_UNUSED;
9446 else if (inst.operands[i].preind)
9448 constraint (is_pc && inst.operands[i].writeback, BAD_PC_WRITEBACK);
9449 constraint (is_t && inst.operands[i].writeback,
9450 _("cannot use writeback with this instruction"));
9451 constraint (is_pc && ((inst.instruction & THUMB2_LOAD_BIT) == 0),
9456 inst.instruction |= 0x01000000;
9457 if (inst.operands[i].writeback)
9458 inst.instruction |= 0x00200000;
9462 inst.instruction |= 0x00000c00;
9463 if (inst.operands[i].writeback)
9464 inst.instruction |= 0x00000100;
9466 inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_IMM;
9468 else if (inst.operands[i].postind)
9470 gas_assert (inst.operands[i].writeback);
9471 constraint (is_pc, _("cannot use post-indexing with PC-relative addressing"));
9472 constraint (is_t, _("cannot use post-indexing with this instruction"));
9475 inst.instruction |= 0x00200000;
9477 inst.instruction |= 0x00000900;
9478 inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_IMM;
9480 else /* unindexed - only for coprocessor */
9481 inst.error = _("instruction does not accept unindexed addressing");
9484 /* Table of Thumb instructions which exist in both 16- and 32-bit
9485 encodings (the latter only in post-V6T2 cores). The index is the
9486 value used in the insns table below. When there is more than one
9487 possible 16-bit encoding for the instruction, this table always
9489 Also contains several pseudo-instructions used during relaxation. */
9490 #define T16_32_TAB \
9491 X(_adc, 4140, eb400000), \
9492 X(_adcs, 4140, eb500000), \
9493 X(_add, 1c00, eb000000), \
9494 X(_adds, 1c00, eb100000), \
9495 X(_addi, 0000, f1000000), \
9496 X(_addis, 0000, f1100000), \
9497 X(_add_pc,000f, f20f0000), \
9498 X(_add_sp,000d, f10d0000), \
9499 X(_adr, 000f, f20f0000), \
9500 X(_and, 4000, ea000000), \
9501 X(_ands, 4000, ea100000), \
9502 X(_asr, 1000, fa40f000), \
9503 X(_asrs, 1000, fa50f000), \
9504 X(_b, e000, f000b000), \
9505 X(_bcond, d000, f0008000), \
9506 X(_bic, 4380, ea200000), \
9507 X(_bics, 4380, ea300000), \
9508 X(_cmn, 42c0, eb100f00), \
9509 X(_cmp, 2800, ebb00f00), \
9510 X(_cpsie, b660, f3af8400), \
9511 X(_cpsid, b670, f3af8600), \
9512 X(_cpy, 4600, ea4f0000), \
9513 X(_dec_sp,80dd, f1ad0d00), \
9514 X(_eor, 4040, ea800000), \
9515 X(_eors, 4040, ea900000), \
9516 X(_inc_sp,00dd, f10d0d00), \
9517 X(_ldmia, c800, e8900000), \
9518 X(_ldr, 6800, f8500000), \
9519 X(_ldrb, 7800, f8100000), \
9520 X(_ldrh, 8800, f8300000), \
9521 X(_ldrsb, 5600, f9100000), \
9522 X(_ldrsh, 5e00, f9300000), \
9523 X(_ldr_pc,4800, f85f0000), \
9524 X(_ldr_pc2,4800, f85f0000), \
9525 X(_ldr_sp,9800, f85d0000), \
9526 X(_lsl, 0000, fa00f000), \
9527 X(_lsls, 0000, fa10f000), \
9528 X(_lsr, 0800, fa20f000), \
9529 X(_lsrs, 0800, fa30f000), \
9530 X(_mov, 2000, ea4f0000), \
9531 X(_movs, 2000, ea5f0000), \
9532 X(_mul, 4340, fb00f000), \
9533 X(_muls, 4340, ffffffff), /* no 32b muls */ \
9534 X(_mvn, 43c0, ea6f0000), \
9535 X(_mvns, 43c0, ea7f0000), \
9536 X(_neg, 4240, f1c00000), /* rsb #0 */ \
9537 X(_negs, 4240, f1d00000), /* rsbs #0 */ \
9538 X(_orr, 4300, ea400000), \
9539 X(_orrs, 4300, ea500000), \
9540 X(_pop, bc00, e8bd0000), /* ldmia sp!,... */ \
9541 X(_push, b400, e92d0000), /* stmdb sp!,... */ \
9542 X(_rev, ba00, fa90f080), \
9543 X(_rev16, ba40, fa90f090), \
9544 X(_revsh, bac0, fa90f0b0), \
9545 X(_ror, 41c0, fa60f000), \
9546 X(_rors, 41c0, fa70f000), \
9547 X(_sbc, 4180, eb600000), \
9548 X(_sbcs, 4180, eb700000), \
9549 X(_stmia, c000, e8800000), \
9550 X(_str, 6000, f8400000), \
9551 X(_strb, 7000, f8000000), \
9552 X(_strh, 8000, f8200000), \
9553 X(_str_sp,9000, f84d0000), \
9554 X(_sub, 1e00, eba00000), \
9555 X(_subs, 1e00, ebb00000), \
9556 X(_subi, 8000, f1a00000), \
9557 X(_subis, 8000, f1b00000), \
9558 X(_sxtb, b240, fa4ff080), \
9559 X(_sxth, b200, fa0ff080), \
9560 X(_tst, 4200, ea100f00), \
9561 X(_uxtb, b2c0, fa5ff080), \
9562 X(_uxth, b280, fa1ff080), \
9563 X(_nop, bf00, f3af8000), \
9564 X(_yield, bf10, f3af8001), \
9565 X(_wfe, bf20, f3af8002), \
9566 X(_wfi, bf30, f3af8003), \
9567 X(_sev, bf40, f3af8004), \
9568 X(_sevl, bf50, f3af8005)
9570 /* To catch errors in encoding functions, the codes are all offset by
9571 0xF800, putting them in one of the 32-bit prefix ranges, ergo undefined
9572 as 16-bit instructions. */
9573 #define X(a,b,c) T_MNEM##a
9574 enum t16_32_codes { T16_32_OFFSET = 0xF7FF, T16_32_TAB };
9577 #define X(a,b,c) 0x##b
9578 static const unsigned short thumb_op16[] = { T16_32_TAB };
9579 #define THUMB_OP16(n) (thumb_op16[(n) - (T16_32_OFFSET + 1)])
9582 #define X(a,b,c) 0x##c
9583 static const unsigned int thumb_op32[] = { T16_32_TAB };
9584 #define THUMB_OP32(n) (thumb_op32[(n) - (T16_32_OFFSET + 1)])
9585 #define THUMB_SETS_FLAGS(n) (THUMB_OP32 (n) & 0x00100000)
9589 /* Thumb instruction encoders, in alphabetical order. */
9594 do_t_add_sub_w (void)
9598 Rd = inst.operands[0].reg;
9599 Rn = inst.operands[1].reg;
9601 /* If Rn is REG_PC, this is ADR; if Rn is REG_SP, then this
9602 is the SP-{plus,minus}-immediate form of the instruction. */
9604 constraint (Rd == REG_PC, BAD_PC);
9606 reject_bad_reg (Rd);
9608 inst.instruction |= (Rn << 16) | (Rd << 8);
9609 inst.reloc.type = BFD_RELOC_ARM_T32_IMM12;
9612 /* Parse an add or subtract instruction. We get here with inst.instruction
9613 equalling any of THUMB_OPCODE_add, adds, sub, or subs. */
9620 Rd = inst.operands[0].reg;
9621 Rs = (inst.operands[1].present
9622 ? inst.operands[1].reg /* Rd, Rs, foo */
9623 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
9626 set_it_insn_type_last ();
9634 flags = (inst.instruction == T_MNEM_adds
9635 || inst.instruction == T_MNEM_subs);
9637 narrow = !in_it_block ();
9639 narrow = in_it_block ();
9640 if (!inst.operands[2].isreg)
9644 constraint (Rd == REG_SP && Rs != REG_SP, BAD_SP);
9646 add = (inst.instruction == T_MNEM_add
9647 || inst.instruction == T_MNEM_adds);
9649 if (inst.size_req != 4)
9651 /* Attempt to use a narrow opcode, with relaxation if
9653 if (Rd == REG_SP && Rs == REG_SP && !flags)
9654 opcode = add ? T_MNEM_inc_sp : T_MNEM_dec_sp;
9655 else if (Rd <= 7 && Rs == REG_SP && add && !flags)
9656 opcode = T_MNEM_add_sp;
9657 else if (Rd <= 7 && Rs == REG_PC && add && !flags)
9658 opcode = T_MNEM_add_pc;
9659 else if (Rd <= 7 && Rs <= 7 && narrow)
9662 opcode = add ? T_MNEM_addis : T_MNEM_subis;
9664 opcode = add ? T_MNEM_addi : T_MNEM_subi;
9668 inst.instruction = THUMB_OP16(opcode);
9669 inst.instruction |= (Rd << 4) | Rs;
9670 inst.reloc.type = BFD_RELOC_ARM_THUMB_ADD;
9671 if (inst.size_req != 2)
9672 inst.relax = opcode;
9675 constraint (inst.size_req == 2, BAD_HIREG);
9677 if (inst.size_req == 4
9678 || (inst.size_req != 2 && !opcode))
9682 constraint (add, BAD_PC);
9683 constraint (Rs != REG_LR || inst.instruction != T_MNEM_subs,
9684 _("only SUBS PC, LR, #const allowed"));
9685 constraint (inst.reloc.exp.X_op != O_constant,
9686 _("expression too complex"));
9687 constraint (inst.reloc.exp.X_add_number < 0
9688 || inst.reloc.exp.X_add_number > 0xff,
9689 _("immediate value out of range"));
9690 inst.instruction = T2_SUBS_PC_LR
9691 | inst.reloc.exp.X_add_number;
9692 inst.reloc.type = BFD_RELOC_UNUSED;
9695 else if (Rs == REG_PC)
9697 /* Always use addw/subw. */
9698 inst.instruction = add ? 0xf20f0000 : 0xf2af0000;
9699 inst.reloc.type = BFD_RELOC_ARM_T32_IMM12;
9703 inst.instruction = THUMB_OP32 (inst.instruction);
9704 inst.instruction = (inst.instruction & 0xe1ffffff)
9707 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
9709 inst.reloc.type = BFD_RELOC_ARM_T32_ADD_IMM;
9711 inst.instruction |= Rd << 8;
9712 inst.instruction |= Rs << 16;
9717 unsigned int value = inst.reloc.exp.X_add_number;
9718 unsigned int shift = inst.operands[2].shift_kind;
9720 Rn = inst.operands[2].reg;
9721 /* See if we can do this with a 16-bit instruction. */
9722 if (!inst.operands[2].shifted && inst.size_req != 4)
9724 if (Rd > 7 || Rs > 7 || Rn > 7)
9729 inst.instruction = ((inst.instruction == T_MNEM_adds
9730 || inst.instruction == T_MNEM_add)
9733 inst.instruction |= Rd | (Rs << 3) | (Rn << 6);
9737 if (inst.instruction == T_MNEM_add && (Rd == Rs || Rd == Rn))
9739 /* Thumb-1 cores (except v6-M) require at least one high
9740 register in a narrow non flag setting add. */
9741 if (Rd > 7 || Rn > 7
9742 || ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6t2)
9743 || ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_msr))
9750 inst.instruction = T_OPCODE_ADD_HI;
9751 inst.instruction |= (Rd & 8) << 4;
9752 inst.instruction |= (Rd & 7);
9753 inst.instruction |= Rn << 3;
9759 constraint (Rd == REG_PC, BAD_PC);
9760 constraint (Rd == REG_SP && Rs != REG_SP, BAD_SP);
9761 constraint (Rs == REG_PC, BAD_PC);
9762 reject_bad_reg (Rn);
9764 /* If we get here, it can't be done in 16 bits. */
9765 constraint (inst.operands[2].shifted && inst.operands[2].immisreg,
9766 _("shift must be constant"));
9767 inst.instruction = THUMB_OP32 (inst.instruction);
9768 inst.instruction |= Rd << 8;
9769 inst.instruction |= Rs << 16;
9770 constraint (Rd == REG_SP && Rs == REG_SP && value > 3,
9771 _("shift value over 3 not allowed in thumb mode"));
9772 constraint (Rd == REG_SP && Rs == REG_SP && shift != SHIFT_LSL,
9773 _("only LSL shift allowed in thumb mode"));
9774 encode_thumb32_shifted_operand (2);
9779 constraint (inst.instruction == T_MNEM_adds
9780 || inst.instruction == T_MNEM_subs,
9783 if (!inst.operands[2].isreg) /* Rd, Rs, #imm */
9785 constraint ((Rd > 7 && (Rd != REG_SP || Rs != REG_SP))
9786 || (Rs > 7 && Rs != REG_SP && Rs != REG_PC),
9789 inst.instruction = (inst.instruction == T_MNEM_add
9791 inst.instruction |= (Rd << 4) | Rs;
9792 inst.reloc.type = BFD_RELOC_ARM_THUMB_ADD;
9796 Rn = inst.operands[2].reg;
9797 constraint (inst.operands[2].shifted, _("unshifted register required"));
9799 /* We now have Rd, Rs, and Rn set to registers. */
9800 if (Rd > 7 || Rs > 7 || Rn > 7)
9802 /* Can't do this for SUB. */
9803 constraint (inst.instruction == T_MNEM_sub, BAD_HIREG);
9804 inst.instruction = T_OPCODE_ADD_HI;
9805 inst.instruction |= (Rd & 8) << 4;
9806 inst.instruction |= (Rd & 7);
9808 inst.instruction |= Rn << 3;
9810 inst.instruction |= Rs << 3;
9812 constraint (1, _("dest must overlap one source register"));
9816 inst.instruction = (inst.instruction == T_MNEM_add
9817 ? T_OPCODE_ADD_R3 : T_OPCODE_SUB_R3);
9818 inst.instruction |= Rd | (Rs << 3) | (Rn << 6);
9828 Rd = inst.operands[0].reg;
9829 reject_bad_reg (Rd);
9831 if (unified_syntax && inst.size_req == 0 && Rd <= 7)
9833 /* Defer to section relaxation. */
9834 inst.relax = inst.instruction;
9835 inst.instruction = THUMB_OP16 (inst.instruction);
9836 inst.instruction |= Rd << 4;
9838 else if (unified_syntax && inst.size_req != 2)
9840 /* Generate a 32-bit opcode. */
9841 inst.instruction = THUMB_OP32 (inst.instruction);
9842 inst.instruction |= Rd << 8;
9843 inst.reloc.type = BFD_RELOC_ARM_T32_ADD_PC12;
9844 inst.reloc.pc_rel = 1;
9848 /* Generate a 16-bit opcode. */
9849 inst.instruction = THUMB_OP16 (inst.instruction);
9850 inst.reloc.type = BFD_RELOC_ARM_THUMB_ADD;
9851 inst.reloc.exp.X_add_number -= 4; /* PC relative adjust. */
9852 inst.reloc.pc_rel = 1;
9854 inst.instruction |= Rd << 4;
9858 /* Arithmetic instructions for which there is just one 16-bit
9859 instruction encoding, and it allows only two low registers.
9860 For maximal compatibility with ARM syntax, we allow three register
9861 operands even when Thumb-32 instructions are not available, as long
9862 as the first two are identical. For instance, both "sbc r0,r1" and
9863 "sbc r0,r0,r1" are allowed. */
9869 Rd = inst.operands[0].reg;
9870 Rs = (inst.operands[1].present
9871 ? inst.operands[1].reg /* Rd, Rs, foo */
9872 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
9873 Rn = inst.operands[2].reg;
9875 reject_bad_reg (Rd);
9876 reject_bad_reg (Rs);
9877 if (inst.operands[2].isreg)
9878 reject_bad_reg (Rn);
9882 if (!inst.operands[2].isreg)
9884 /* For an immediate, we always generate a 32-bit opcode;
9885 section relaxation will shrink it later if possible. */
9886 inst.instruction = THUMB_OP32 (inst.instruction);
9887 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
9888 inst.instruction |= Rd << 8;
9889 inst.instruction |= Rs << 16;
9890 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
9896 /* See if we can do this with a 16-bit instruction. */
9897 if (THUMB_SETS_FLAGS (inst.instruction))
9898 narrow = !in_it_block ();
9900 narrow = in_it_block ();
9902 if (Rd > 7 || Rn > 7 || Rs > 7)
9904 if (inst.operands[2].shifted)
9906 if (inst.size_req == 4)
9912 inst.instruction = THUMB_OP16 (inst.instruction);
9913 inst.instruction |= Rd;
9914 inst.instruction |= Rn << 3;
9918 /* If we get here, it can't be done in 16 bits. */
9919 constraint (inst.operands[2].shifted
9920 && inst.operands[2].immisreg,
9921 _("shift must be constant"));
9922 inst.instruction = THUMB_OP32 (inst.instruction);
9923 inst.instruction |= Rd << 8;
9924 inst.instruction |= Rs << 16;
9925 encode_thumb32_shifted_operand (2);
9930 /* On its face this is a lie - the instruction does set the
9931 flags. However, the only supported mnemonic in this mode
9933 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
9935 constraint (!inst.operands[2].isreg || inst.operands[2].shifted,
9936 _("unshifted register required"));
9937 constraint (Rd > 7 || Rs > 7 || Rn > 7, BAD_HIREG);
9938 constraint (Rd != Rs,
9939 _("dest and source1 must be the same register"));
9941 inst.instruction = THUMB_OP16 (inst.instruction);
9942 inst.instruction |= Rd;
9943 inst.instruction |= Rn << 3;
9947 /* Similarly, but for instructions where the arithmetic operation is
9948 commutative, so we can allow either of them to be different from
9949 the destination operand in a 16-bit instruction. For instance, all
9950 three of "adc r0,r1", "adc r0,r0,r1", and "adc r0,r1,r0" are
9957 Rd = inst.operands[0].reg;
9958 Rs = (inst.operands[1].present
9959 ? inst.operands[1].reg /* Rd, Rs, foo */
9960 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
9961 Rn = inst.operands[2].reg;
9963 reject_bad_reg (Rd);
9964 reject_bad_reg (Rs);
9965 if (inst.operands[2].isreg)
9966 reject_bad_reg (Rn);
9970 if (!inst.operands[2].isreg)
9972 /* For an immediate, we always generate a 32-bit opcode;
9973 section relaxation will shrink it later if possible. */
9974 inst.instruction = THUMB_OP32 (inst.instruction);
9975 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
9976 inst.instruction |= Rd << 8;
9977 inst.instruction |= Rs << 16;
9978 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
9984 /* See if we can do this with a 16-bit instruction. */
9985 if (THUMB_SETS_FLAGS (inst.instruction))
9986 narrow = !in_it_block ();
9988 narrow = in_it_block ();
9990 if (Rd > 7 || Rn > 7 || Rs > 7)
9992 if (inst.operands[2].shifted)
9994 if (inst.size_req == 4)
10001 inst.instruction = THUMB_OP16 (inst.instruction);
10002 inst.instruction |= Rd;
10003 inst.instruction |= Rn << 3;
10008 inst.instruction = THUMB_OP16 (inst.instruction);
10009 inst.instruction |= Rd;
10010 inst.instruction |= Rs << 3;
10015 /* If we get here, it can't be done in 16 bits. */
10016 constraint (inst.operands[2].shifted
10017 && inst.operands[2].immisreg,
10018 _("shift must be constant"));
10019 inst.instruction = THUMB_OP32 (inst.instruction);
10020 inst.instruction |= Rd << 8;
10021 inst.instruction |= Rs << 16;
10022 encode_thumb32_shifted_operand (2);
10027 /* On its face this is a lie - the instruction does set the
10028 flags. However, the only supported mnemonic in this mode
10029 says it doesn't. */
10030 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
10032 constraint (!inst.operands[2].isreg || inst.operands[2].shifted,
10033 _("unshifted register required"));
10034 constraint (Rd > 7 || Rs > 7 || Rn > 7, BAD_HIREG);
10036 inst.instruction = THUMB_OP16 (inst.instruction);
10037 inst.instruction |= Rd;
10040 inst.instruction |= Rn << 3;
10042 inst.instruction |= Rs << 3;
10044 constraint (1, _("dest must overlap one source register"));
10052 unsigned int msb = inst.operands[1].imm + inst.operands[2].imm;
10053 constraint (msb > 32, _("bit-field extends past end of register"));
10054 /* The instruction encoding stores the LSB and MSB,
10055 not the LSB and width. */
10056 Rd = inst.operands[0].reg;
10057 reject_bad_reg (Rd);
10058 inst.instruction |= Rd << 8;
10059 inst.instruction |= (inst.operands[1].imm & 0x1c) << 10;
10060 inst.instruction |= (inst.operands[1].imm & 0x03) << 6;
10061 inst.instruction |= msb - 1;
10070 Rd = inst.operands[0].reg;
10071 reject_bad_reg (Rd);
10073 /* #0 in second position is alternative syntax for bfc, which is
10074 the same instruction but with REG_PC in the Rm field. */
10075 if (!inst.operands[1].isreg)
10079 Rn = inst.operands[1].reg;
10080 reject_bad_reg (Rn);
10083 msb = inst.operands[2].imm + inst.operands[3].imm;
10084 constraint (msb > 32, _("bit-field extends past end of register"));
10085 /* The instruction encoding stores the LSB and MSB,
10086 not the LSB and width. */
10087 inst.instruction |= Rd << 8;
10088 inst.instruction |= Rn << 16;
10089 inst.instruction |= (inst.operands[2].imm & 0x1c) << 10;
10090 inst.instruction |= (inst.operands[2].imm & 0x03) << 6;
10091 inst.instruction |= msb - 1;
10099 Rd = inst.operands[0].reg;
10100 Rn = inst.operands[1].reg;
10102 reject_bad_reg (Rd);
10103 reject_bad_reg (Rn);
10105 constraint (inst.operands[2].imm + inst.operands[3].imm > 32,
10106 _("bit-field extends past end of register"));
10107 inst.instruction |= Rd << 8;
10108 inst.instruction |= Rn << 16;
10109 inst.instruction |= (inst.operands[2].imm & 0x1c) << 10;
10110 inst.instruction |= (inst.operands[2].imm & 0x03) << 6;
10111 inst.instruction |= inst.operands[3].imm - 1;
10114 /* ARM V5 Thumb BLX (argument parse)
10115 BLX <target_addr> which is BLX(1)
10116 BLX <Rm> which is BLX(2)
10117 Unfortunately, there are two different opcodes for this mnemonic.
10118 So, the insns[].value is not used, and the code here zaps values
10119 into inst.instruction.
10121 ??? How to take advantage of the additional two bits of displacement
10122 available in Thumb32 mode? Need new relocation? */
10127 set_it_insn_type_last ();
10129 if (inst.operands[0].isreg)
10131 constraint (inst.operands[0].reg == REG_PC, BAD_PC);
10132 /* We have a register, so this is BLX(2). */
10133 inst.instruction |= inst.operands[0].reg << 3;
10137 /* No register. This must be BLX(1). */
10138 inst.instruction = 0xf000e800;
10139 encode_branch (BFD_RELOC_THUMB_PCREL_BLX);
10151 set_it_insn_type (IF_INSIDE_IT_LAST_INSN);
10153 if (in_it_block ())
10155 /* Conditional branches inside IT blocks are encoded as unconditional
10157 cond = COND_ALWAYS;
10162 if (cond != COND_ALWAYS)
10163 opcode = T_MNEM_bcond;
10165 opcode = inst.instruction;
10168 && (inst.size_req == 4
10169 || (inst.size_req != 2
10170 && (inst.operands[0].hasreloc
10171 || inst.reloc.exp.X_op == O_constant))))
10173 inst.instruction = THUMB_OP32(opcode);
10174 if (cond == COND_ALWAYS)
10175 reloc = BFD_RELOC_THUMB_PCREL_BRANCH25;
10178 gas_assert (cond != 0xF);
10179 inst.instruction |= cond << 22;
10180 reloc = BFD_RELOC_THUMB_PCREL_BRANCH20;
10185 inst.instruction = THUMB_OP16(opcode);
10186 if (cond == COND_ALWAYS)
10187 reloc = BFD_RELOC_THUMB_PCREL_BRANCH12;
10190 inst.instruction |= cond << 8;
10191 reloc = BFD_RELOC_THUMB_PCREL_BRANCH9;
10193 /* Allow section relaxation. */
10194 if (unified_syntax && inst.size_req != 2)
10195 inst.relax = opcode;
10197 inst.reloc.type = reloc;
10198 inst.reloc.pc_rel = 1;
10201 /* Actually do the work for Thumb state bkpt and hlt. The only difference
10202 between the two is the maximum immediate allowed - which is passed in
10205 do_t_bkpt_hlt1 (int range)
10207 constraint (inst.cond != COND_ALWAYS,
10208 _("instruction is always unconditional"));
10209 if (inst.operands[0].present)
10211 constraint (inst.operands[0].imm > range,
10212 _("immediate value out of range"));
10213 inst.instruction |= inst.operands[0].imm;
10216 set_it_insn_type (NEUTRAL_IT_INSN);
10222 do_t_bkpt_hlt1 (63);
10228 do_t_bkpt_hlt1 (255);
10232 do_t_branch23 (void)
10234 set_it_insn_type_last ();
10235 encode_branch (BFD_RELOC_THUMB_PCREL_BRANCH23);
10237 /* md_apply_fix blows up with 'bl foo(PLT)' where foo is defined in
10238 this file. We used to simply ignore the PLT reloc type here --
10239 the branch encoding is now needed to deal with TLSCALL relocs.
10240 So if we see a PLT reloc now, put it back to how it used to be to
10241 keep the preexisting behaviour. */
10242 if (inst.reloc.type == BFD_RELOC_ARM_PLT32)
10243 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH23;
10245 #if defined(OBJ_COFF)
10246 /* If the destination of the branch is a defined symbol which does not have
10247 the THUMB_FUNC attribute, then we must be calling a function which has
10248 the (interfacearm) attribute. We look for the Thumb entry point to that
10249 function and change the branch to refer to that function instead. */
10250 if ( inst.reloc.exp.X_op == O_symbol
10251 && inst.reloc.exp.X_add_symbol != NULL
10252 && S_IS_DEFINED (inst.reloc.exp.X_add_symbol)
10253 && ! THUMB_IS_FUNC (inst.reloc.exp.X_add_symbol))
10254 inst.reloc.exp.X_add_symbol =
10255 find_real_start (inst.reloc.exp.X_add_symbol);
10262 set_it_insn_type_last ();
10263 inst.instruction |= inst.operands[0].reg << 3;
10264 /* ??? FIXME: Should add a hacky reloc here if reg is REG_PC. The reloc
10265 should cause the alignment to be checked once it is known. This is
10266 because BX PC only works if the instruction is word aligned. */
10274 set_it_insn_type_last ();
10275 Rm = inst.operands[0].reg;
10276 reject_bad_reg (Rm);
10277 inst.instruction |= Rm << 16;
10286 Rd = inst.operands[0].reg;
10287 Rm = inst.operands[1].reg;
10289 reject_bad_reg (Rd);
10290 reject_bad_reg (Rm);
10292 inst.instruction |= Rd << 8;
10293 inst.instruction |= Rm << 16;
10294 inst.instruction |= Rm;
10300 set_it_insn_type (OUTSIDE_IT_INSN);
10301 inst.instruction |= inst.operands[0].imm;
10307 set_it_insn_type (OUTSIDE_IT_INSN);
10309 && (inst.operands[1].present || inst.size_req == 4)
10310 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6_notm))
10312 unsigned int imod = (inst.instruction & 0x0030) >> 4;
10313 inst.instruction = 0xf3af8000;
10314 inst.instruction |= imod << 9;
10315 inst.instruction |= inst.operands[0].imm << 5;
10316 if (inst.operands[1].present)
10317 inst.instruction |= 0x100 | inst.operands[1].imm;
10321 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1)
10322 && (inst.operands[0].imm & 4),
10323 _("selected processor does not support 'A' form "
10324 "of this instruction"));
10325 constraint (inst.operands[1].present || inst.size_req == 4,
10326 _("Thumb does not support the 2-argument "
10327 "form of this instruction"));
10328 inst.instruction |= inst.operands[0].imm;
10332 /* THUMB CPY instruction (argument parse). */
10337 if (inst.size_req == 4)
10339 inst.instruction = THUMB_OP32 (T_MNEM_mov);
10340 inst.instruction |= inst.operands[0].reg << 8;
10341 inst.instruction |= inst.operands[1].reg;
10345 inst.instruction |= (inst.operands[0].reg & 0x8) << 4;
10346 inst.instruction |= (inst.operands[0].reg & 0x7);
10347 inst.instruction |= inst.operands[1].reg << 3;
10354 set_it_insn_type (OUTSIDE_IT_INSN);
10355 constraint (inst.operands[0].reg > 7, BAD_HIREG);
10356 inst.instruction |= inst.operands[0].reg;
10357 inst.reloc.pc_rel = 1;
10358 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH7;
10364 inst.instruction |= inst.operands[0].imm;
10370 unsigned Rd, Rn, Rm;
10372 Rd = inst.operands[0].reg;
10373 Rn = (inst.operands[1].present
10374 ? inst.operands[1].reg : Rd);
10375 Rm = inst.operands[2].reg;
10377 reject_bad_reg (Rd);
10378 reject_bad_reg (Rn);
10379 reject_bad_reg (Rm);
10381 inst.instruction |= Rd << 8;
10382 inst.instruction |= Rn << 16;
10383 inst.instruction |= Rm;
10389 if (unified_syntax && inst.size_req == 4)
10390 inst.instruction = THUMB_OP32 (inst.instruction);
10392 inst.instruction = THUMB_OP16 (inst.instruction);
10398 unsigned int cond = inst.operands[0].imm;
10400 set_it_insn_type (IT_INSN);
10401 now_it.mask = (inst.instruction & 0xf) | 0x10;
10403 now_it.warn_deprecated = FALSE;
10405 /* If the condition is a negative condition, invert the mask. */
10406 if ((cond & 0x1) == 0x0)
10408 unsigned int mask = inst.instruction & 0x000f;
10410 if ((mask & 0x7) == 0)
10412 /* No conversion needed. */
10413 now_it.block_length = 1;
10415 else if ((mask & 0x3) == 0)
10418 now_it.block_length = 2;
10420 else if ((mask & 0x1) == 0)
10423 now_it.block_length = 3;
10428 now_it.block_length = 4;
10431 inst.instruction &= 0xfff0;
10432 inst.instruction |= mask;
10435 inst.instruction |= cond << 4;
10438 /* Helper function used for both push/pop and ldm/stm. */
10440 encode_thumb2_ldmstm (int base, unsigned mask, bfd_boolean writeback)
10444 load = (inst.instruction & (1 << 20)) != 0;
10446 if (mask & (1 << 13))
10447 inst.error = _("SP not allowed in register list");
10449 if ((mask & (1 << base)) != 0
10451 inst.error = _("having the base register in the register list when "
10452 "using write back is UNPREDICTABLE");
10456 if (mask & (1 << 15))
10458 if (mask & (1 << 14))
10459 inst.error = _("LR and PC should not both be in register list");
10461 set_it_insn_type_last ();
10466 if (mask & (1 << 15))
10467 inst.error = _("PC not allowed in register list");
10470 if ((mask & (mask - 1)) == 0)
10472 /* Single register transfers implemented as str/ldr. */
10475 if (inst.instruction & (1 << 23))
10476 inst.instruction = 0x00000b04; /* ia! -> [base], #4 */
10478 inst.instruction = 0x00000d04; /* db! -> [base, #-4]! */
10482 if (inst.instruction & (1 << 23))
10483 inst.instruction = 0x00800000; /* ia -> [base] */
10485 inst.instruction = 0x00000c04; /* db -> [base, #-4] */
10488 inst.instruction |= 0xf8400000;
10490 inst.instruction |= 0x00100000;
10492 mask = ffs (mask) - 1;
10495 else if (writeback)
10496 inst.instruction |= WRITE_BACK;
10498 inst.instruction |= mask;
10499 inst.instruction |= base << 16;
10505 /* This really doesn't seem worth it. */
10506 constraint (inst.reloc.type != BFD_RELOC_UNUSED,
10507 _("expression too complex"));
10508 constraint (inst.operands[1].writeback,
10509 _("Thumb load/store multiple does not support {reglist}^"));
10511 if (unified_syntax)
10513 bfd_boolean narrow;
10517 /* See if we can use a 16-bit instruction. */
10518 if (inst.instruction < 0xffff /* not ldmdb/stmdb */
10519 && inst.size_req != 4
10520 && !(inst.operands[1].imm & ~0xff))
10522 mask = 1 << inst.operands[0].reg;
10524 if (inst.operands[0].reg <= 7)
10526 if (inst.instruction == T_MNEM_stmia
10527 ? inst.operands[0].writeback
10528 : (inst.operands[0].writeback
10529 == !(inst.operands[1].imm & mask)))
10531 if (inst.instruction == T_MNEM_stmia
10532 && (inst.operands[1].imm & mask)
10533 && (inst.operands[1].imm & (mask - 1)))
10534 as_warn (_("value stored for r%d is UNKNOWN"),
10535 inst.operands[0].reg);
10537 inst.instruction = THUMB_OP16 (inst.instruction);
10538 inst.instruction |= inst.operands[0].reg << 8;
10539 inst.instruction |= inst.operands[1].imm;
10542 else if ((inst.operands[1].imm & (inst.operands[1].imm-1)) == 0)
10544 /* This means 1 register in reg list one of 3 situations:
10545 1. Instruction is stmia, but without writeback.
10546 2. lmdia without writeback, but with Rn not in
10548 3. ldmia with writeback, but with Rn in reglist.
10549 Case 3 is UNPREDICTABLE behaviour, so we handle
10550 case 1 and 2 which can be converted into a 16-bit
10551 str or ldr. The SP cases are handled below. */
10552 unsigned long opcode;
10553 /* First, record an error for Case 3. */
10554 if (inst.operands[1].imm & mask
10555 && inst.operands[0].writeback)
10557 _("having the base register in the register list when "
10558 "using write back is UNPREDICTABLE");
10560 opcode = (inst.instruction == T_MNEM_stmia ? T_MNEM_str
10562 inst.instruction = THUMB_OP16 (opcode);
10563 inst.instruction |= inst.operands[0].reg << 3;
10564 inst.instruction |= (ffs (inst.operands[1].imm)-1);
10568 else if (inst.operands[0] .reg == REG_SP)
10570 if (inst.operands[0].writeback)
10573 THUMB_OP16 (inst.instruction == T_MNEM_stmia
10574 ? T_MNEM_push : T_MNEM_pop);
10575 inst.instruction |= inst.operands[1].imm;
10578 else if ((inst.operands[1].imm & (inst.operands[1].imm-1)) == 0)
10581 THUMB_OP16 (inst.instruction == T_MNEM_stmia
10582 ? T_MNEM_str_sp : T_MNEM_ldr_sp);
10583 inst.instruction |= ((ffs (inst.operands[1].imm)-1) << 8);
10591 if (inst.instruction < 0xffff)
10592 inst.instruction = THUMB_OP32 (inst.instruction);
10594 encode_thumb2_ldmstm (inst.operands[0].reg, inst.operands[1].imm,
10595 inst.operands[0].writeback);
10600 constraint (inst.operands[0].reg > 7
10601 || (inst.operands[1].imm & ~0xff), BAD_HIREG);
10602 constraint (inst.instruction != T_MNEM_ldmia
10603 && inst.instruction != T_MNEM_stmia,
10604 _("Thumb-2 instruction only valid in unified syntax"));
10605 if (inst.instruction == T_MNEM_stmia)
10607 if (!inst.operands[0].writeback)
10608 as_warn (_("this instruction will write back the base register"));
10609 if ((inst.operands[1].imm & (1 << inst.operands[0].reg))
10610 && (inst.operands[1].imm & ((1 << inst.operands[0].reg) - 1)))
10611 as_warn (_("value stored for r%d is UNKNOWN"),
10612 inst.operands[0].reg);
10616 if (!inst.operands[0].writeback
10617 && !(inst.operands[1].imm & (1 << inst.operands[0].reg)))
10618 as_warn (_("this instruction will write back the base register"));
10619 else if (inst.operands[0].writeback
10620 && (inst.operands[1].imm & (1 << inst.operands[0].reg)))
10621 as_warn (_("this instruction will not write back the base register"));
10624 inst.instruction = THUMB_OP16 (inst.instruction);
10625 inst.instruction |= inst.operands[0].reg << 8;
10626 inst.instruction |= inst.operands[1].imm;
10633 constraint (!inst.operands[1].isreg || !inst.operands[1].preind
10634 || inst.operands[1].postind || inst.operands[1].writeback
10635 || inst.operands[1].immisreg || inst.operands[1].shifted
10636 || inst.operands[1].negative,
10639 constraint ((inst.operands[1].reg == REG_PC), BAD_PC);
10641 inst.instruction |= inst.operands[0].reg << 12;
10642 inst.instruction |= inst.operands[1].reg << 16;
10643 inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_U8;
10649 if (!inst.operands[1].present)
10651 constraint (inst.operands[0].reg == REG_LR,
10652 _("r14 not allowed as first register "
10653 "when second register is omitted"));
10654 inst.operands[1].reg = inst.operands[0].reg + 1;
10656 constraint (inst.operands[0].reg == inst.operands[1].reg,
10659 inst.instruction |= inst.operands[0].reg << 12;
10660 inst.instruction |= inst.operands[1].reg << 8;
10661 inst.instruction |= inst.operands[2].reg << 16;
10667 unsigned long opcode;
10670 if (inst.operands[0].isreg
10671 && !inst.operands[0].preind
10672 && inst.operands[0].reg == REG_PC)
10673 set_it_insn_type_last ();
10675 opcode = inst.instruction;
10676 if (unified_syntax)
10678 if (!inst.operands[1].isreg)
10680 if (opcode <= 0xffff)
10681 inst.instruction = THUMB_OP32 (opcode);
10682 if (move_or_literal_pool (0, /*thumb_p=*/TRUE, /*mode_3=*/FALSE))
10685 if (inst.operands[1].isreg
10686 && !inst.operands[1].writeback
10687 && !inst.operands[1].shifted && !inst.operands[1].postind
10688 && !inst.operands[1].negative && inst.operands[0].reg <= 7
10689 && opcode <= 0xffff
10690 && inst.size_req != 4)
10692 /* Insn may have a 16-bit form. */
10693 Rn = inst.operands[1].reg;
10694 if (inst.operands[1].immisreg)
10696 inst.instruction = THUMB_OP16 (opcode);
10698 if (Rn <= 7 && inst.operands[1].imm <= 7)
10700 else if (opcode != T_MNEM_ldr && opcode != T_MNEM_str)
10701 reject_bad_reg (inst.operands[1].imm);
10703 else if ((Rn <= 7 && opcode != T_MNEM_ldrsh
10704 && opcode != T_MNEM_ldrsb)
10705 || ((Rn == REG_PC || Rn == REG_SP) && opcode == T_MNEM_ldr)
10706 || (Rn == REG_SP && opcode == T_MNEM_str))
10713 if (inst.reloc.pc_rel)
10714 opcode = T_MNEM_ldr_pc2;
10716 opcode = T_MNEM_ldr_pc;
10720 if (opcode == T_MNEM_ldr)
10721 opcode = T_MNEM_ldr_sp;
10723 opcode = T_MNEM_str_sp;
10725 inst.instruction = inst.operands[0].reg << 8;
10729 inst.instruction = inst.operands[0].reg;
10730 inst.instruction |= inst.operands[1].reg << 3;
10732 inst.instruction |= THUMB_OP16 (opcode);
10733 if (inst.size_req == 2)
10734 inst.reloc.type = BFD_RELOC_ARM_THUMB_OFFSET;
10736 inst.relax = opcode;
10740 /* Definitely a 32-bit variant. */
10742 /* Warning for Erratum 752419. */
10743 if (opcode == T_MNEM_ldr
10744 && inst.operands[0].reg == REG_SP
10745 && inst.operands[1].writeback == 1
10746 && !inst.operands[1].immisreg)
10748 if (no_cpu_selected ()
10749 || (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7)
10750 && !ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7a)
10751 && !ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7r)))
10752 as_warn (_("This instruction may be unpredictable "
10753 "if executed on M-profile cores "
10754 "with interrupts enabled."));
10757 /* Do some validations regarding addressing modes. */
10758 if (inst.operands[1].immisreg)
10759 reject_bad_reg (inst.operands[1].imm);
10761 constraint (inst.operands[1].writeback == 1
10762 && inst.operands[0].reg == inst.operands[1].reg,
10765 inst.instruction = THUMB_OP32 (opcode);
10766 inst.instruction |= inst.operands[0].reg << 12;
10767 encode_thumb32_addr_mode (1, /*is_t=*/FALSE, /*is_d=*/FALSE);
10768 check_ldr_r15_aligned ();
10772 constraint (inst.operands[0].reg > 7, BAD_HIREG);
10774 if (inst.instruction == T_MNEM_ldrsh || inst.instruction == T_MNEM_ldrsb)
10776 /* Only [Rn,Rm] is acceptable. */
10777 constraint (inst.operands[1].reg > 7 || inst.operands[1].imm > 7, BAD_HIREG);
10778 constraint (!inst.operands[1].isreg || !inst.operands[1].immisreg
10779 || inst.operands[1].postind || inst.operands[1].shifted
10780 || inst.operands[1].negative,
10781 _("Thumb does not support this addressing mode"));
10782 inst.instruction = THUMB_OP16 (inst.instruction);
10786 inst.instruction = THUMB_OP16 (inst.instruction);
10787 if (!inst.operands[1].isreg)
10788 if (move_or_literal_pool (0, /*thumb_p=*/TRUE, /*mode_3=*/FALSE))
10791 constraint (!inst.operands[1].preind
10792 || inst.operands[1].shifted
10793 || inst.operands[1].writeback,
10794 _("Thumb does not support this addressing mode"));
10795 if (inst.operands[1].reg == REG_PC || inst.operands[1].reg == REG_SP)
10797 constraint (inst.instruction & 0x0600,
10798 _("byte or halfword not valid for base register"));
10799 constraint (inst.operands[1].reg == REG_PC
10800 && !(inst.instruction & THUMB_LOAD_BIT),
10801 _("r15 based store not allowed"));
10802 constraint (inst.operands[1].immisreg,
10803 _("invalid base register for register offset"));
10805 if (inst.operands[1].reg == REG_PC)
10806 inst.instruction = T_OPCODE_LDR_PC;
10807 else if (inst.instruction & THUMB_LOAD_BIT)
10808 inst.instruction = T_OPCODE_LDR_SP;
10810 inst.instruction = T_OPCODE_STR_SP;
10812 inst.instruction |= inst.operands[0].reg << 8;
10813 inst.reloc.type = BFD_RELOC_ARM_THUMB_OFFSET;
10817 constraint (inst.operands[1].reg > 7, BAD_HIREG);
10818 if (!inst.operands[1].immisreg)
10820 /* Immediate offset. */
10821 inst.instruction |= inst.operands[0].reg;
10822 inst.instruction |= inst.operands[1].reg << 3;
10823 inst.reloc.type = BFD_RELOC_ARM_THUMB_OFFSET;
10827 /* Register offset. */
10828 constraint (inst.operands[1].imm > 7, BAD_HIREG);
10829 constraint (inst.operands[1].negative,
10830 _("Thumb does not support this addressing mode"));
10833 switch (inst.instruction)
10835 case T_OPCODE_STR_IW: inst.instruction = T_OPCODE_STR_RW; break;
10836 case T_OPCODE_STR_IH: inst.instruction = T_OPCODE_STR_RH; break;
10837 case T_OPCODE_STR_IB: inst.instruction = T_OPCODE_STR_RB; break;
10838 case T_OPCODE_LDR_IW: inst.instruction = T_OPCODE_LDR_RW; break;
10839 case T_OPCODE_LDR_IH: inst.instruction = T_OPCODE_LDR_RH; break;
10840 case T_OPCODE_LDR_IB: inst.instruction = T_OPCODE_LDR_RB; break;
10841 case 0x5600 /* ldrsb */:
10842 case 0x5e00 /* ldrsh */: break;
10846 inst.instruction |= inst.operands[0].reg;
10847 inst.instruction |= inst.operands[1].reg << 3;
10848 inst.instruction |= inst.operands[1].imm << 6;
10854 if (!inst.operands[1].present)
10856 inst.operands[1].reg = inst.operands[0].reg + 1;
10857 constraint (inst.operands[0].reg == REG_LR,
10858 _("r14 not allowed here"));
10859 constraint (inst.operands[0].reg == REG_R12,
10860 _("r12 not allowed here"));
10863 if (inst.operands[2].writeback
10864 && (inst.operands[0].reg == inst.operands[2].reg
10865 || inst.operands[1].reg == inst.operands[2].reg))
10866 as_warn (_("base register written back, and overlaps "
10867 "one of transfer registers"));
10869 inst.instruction |= inst.operands[0].reg << 12;
10870 inst.instruction |= inst.operands[1].reg << 8;
10871 encode_thumb32_addr_mode (2, /*is_t=*/FALSE, /*is_d=*/TRUE);
10877 inst.instruction |= inst.operands[0].reg << 12;
10878 encode_thumb32_addr_mode (1, /*is_t=*/TRUE, /*is_d=*/FALSE);
10884 unsigned Rd, Rn, Rm, Ra;
10886 Rd = inst.operands[0].reg;
10887 Rn = inst.operands[1].reg;
10888 Rm = inst.operands[2].reg;
10889 Ra = inst.operands[3].reg;
10891 reject_bad_reg (Rd);
10892 reject_bad_reg (Rn);
10893 reject_bad_reg (Rm);
10894 reject_bad_reg (Ra);
10896 inst.instruction |= Rd << 8;
10897 inst.instruction |= Rn << 16;
10898 inst.instruction |= Rm;
10899 inst.instruction |= Ra << 12;
10905 unsigned RdLo, RdHi, Rn, Rm;
10907 RdLo = inst.operands[0].reg;
10908 RdHi = inst.operands[1].reg;
10909 Rn = inst.operands[2].reg;
10910 Rm = inst.operands[3].reg;
10912 reject_bad_reg (RdLo);
10913 reject_bad_reg (RdHi);
10914 reject_bad_reg (Rn);
10915 reject_bad_reg (Rm);
10917 inst.instruction |= RdLo << 12;
10918 inst.instruction |= RdHi << 8;
10919 inst.instruction |= Rn << 16;
10920 inst.instruction |= Rm;
10924 do_t_mov_cmp (void)
10928 Rn = inst.operands[0].reg;
10929 Rm = inst.operands[1].reg;
10932 set_it_insn_type_last ();
10934 if (unified_syntax)
10936 int r0off = (inst.instruction == T_MNEM_mov
10937 || inst.instruction == T_MNEM_movs) ? 8 : 16;
10938 unsigned long opcode;
10939 bfd_boolean narrow;
10940 bfd_boolean low_regs;
10942 low_regs = (Rn <= 7 && Rm <= 7);
10943 opcode = inst.instruction;
10944 if (in_it_block ())
10945 narrow = opcode != T_MNEM_movs;
10947 narrow = opcode != T_MNEM_movs || low_regs;
10948 if (inst.size_req == 4
10949 || inst.operands[1].shifted)
10952 /* MOVS PC, LR is encoded as SUBS PC, LR, #0. */
10953 if (opcode == T_MNEM_movs && inst.operands[1].isreg
10954 && !inst.operands[1].shifted
10958 inst.instruction = T2_SUBS_PC_LR;
10962 if (opcode == T_MNEM_cmp)
10964 constraint (Rn == REG_PC, BAD_PC);
10967 /* In the Thumb-2 ISA, use of R13 as Rm is deprecated,
10969 warn_deprecated_sp (Rm);
10970 /* R15 was documented as a valid choice for Rm in ARMv6,
10971 but as UNPREDICTABLE in ARMv7. ARM's proprietary
10972 tools reject R15, so we do too. */
10973 constraint (Rm == REG_PC, BAD_PC);
10976 reject_bad_reg (Rm);
10978 else if (opcode == T_MNEM_mov
10979 || opcode == T_MNEM_movs)
10981 if (inst.operands[1].isreg)
10983 if (opcode == T_MNEM_movs)
10985 reject_bad_reg (Rn);
10986 reject_bad_reg (Rm);
10990 /* This is mov.n. */
10991 if ((Rn == REG_SP || Rn == REG_PC)
10992 && (Rm == REG_SP || Rm == REG_PC))
10994 as_warn (_("Use of r%u as a source register is "
10995 "deprecated when r%u is the destination "
10996 "register."), Rm, Rn);
11001 /* This is mov.w. */
11002 constraint (Rn == REG_PC, BAD_PC);
11003 constraint (Rm == REG_PC, BAD_PC);
11004 constraint (Rn == REG_SP && Rm == REG_SP, BAD_SP);
11008 reject_bad_reg (Rn);
11011 if (!inst.operands[1].isreg)
11013 /* Immediate operand. */
11014 if (!in_it_block () && opcode == T_MNEM_mov)
11016 if (low_regs && narrow)
11018 inst.instruction = THUMB_OP16 (opcode);
11019 inst.instruction |= Rn << 8;
11020 if (inst.size_req == 2)
11021 inst.reloc.type = BFD_RELOC_ARM_THUMB_IMM;
11023 inst.relax = opcode;
11027 inst.instruction = THUMB_OP32 (inst.instruction);
11028 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
11029 inst.instruction |= Rn << r0off;
11030 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
11033 else if (inst.operands[1].shifted && inst.operands[1].immisreg
11034 && (inst.instruction == T_MNEM_mov
11035 || inst.instruction == T_MNEM_movs))
11037 /* Register shifts are encoded as separate shift instructions. */
11038 bfd_boolean flags = (inst.instruction == T_MNEM_movs);
11040 if (in_it_block ())
11045 if (inst.size_req == 4)
11048 if (!low_regs || inst.operands[1].imm > 7)
11054 switch (inst.operands[1].shift_kind)
11057 opcode = narrow ? T_OPCODE_LSL_R : THUMB_OP32 (T_MNEM_lsl);
11060 opcode = narrow ? T_OPCODE_ASR_R : THUMB_OP32 (T_MNEM_asr);
11063 opcode = narrow ? T_OPCODE_LSR_R : THUMB_OP32 (T_MNEM_lsr);
11066 opcode = narrow ? T_OPCODE_ROR_R : THUMB_OP32 (T_MNEM_ror);
11072 inst.instruction = opcode;
11075 inst.instruction |= Rn;
11076 inst.instruction |= inst.operands[1].imm << 3;
11081 inst.instruction |= CONDS_BIT;
11083 inst.instruction |= Rn << 8;
11084 inst.instruction |= Rm << 16;
11085 inst.instruction |= inst.operands[1].imm;
11090 /* Some mov with immediate shift have narrow variants.
11091 Register shifts are handled above. */
11092 if (low_regs && inst.operands[1].shifted
11093 && (inst.instruction == T_MNEM_mov
11094 || inst.instruction == T_MNEM_movs))
11096 if (in_it_block ())
11097 narrow = (inst.instruction == T_MNEM_mov);
11099 narrow = (inst.instruction == T_MNEM_movs);
11104 switch (inst.operands[1].shift_kind)
11106 case SHIFT_LSL: inst.instruction = T_OPCODE_LSL_I; break;
11107 case SHIFT_LSR: inst.instruction = T_OPCODE_LSR_I; break;
11108 case SHIFT_ASR: inst.instruction = T_OPCODE_ASR_I; break;
11109 default: narrow = FALSE; break;
11115 inst.instruction |= Rn;
11116 inst.instruction |= Rm << 3;
11117 inst.reloc.type = BFD_RELOC_ARM_THUMB_SHIFT;
11121 inst.instruction = THUMB_OP32 (inst.instruction);
11122 inst.instruction |= Rn << r0off;
11123 encode_thumb32_shifted_operand (1);
11127 switch (inst.instruction)
11130 /* In v4t or v5t a move of two lowregs produces unpredictable
11131 results. Don't allow this. */
11134 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6),
11135 "MOV Rd, Rs with two low registers is not "
11136 "permitted on this architecture");
11137 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
11141 inst.instruction = T_OPCODE_MOV_HR;
11142 inst.instruction |= (Rn & 0x8) << 4;
11143 inst.instruction |= (Rn & 0x7);
11144 inst.instruction |= Rm << 3;
11148 /* We know we have low registers at this point.
11149 Generate LSLS Rd, Rs, #0. */
11150 inst.instruction = T_OPCODE_LSL_I;
11151 inst.instruction |= Rn;
11152 inst.instruction |= Rm << 3;
11158 inst.instruction = T_OPCODE_CMP_LR;
11159 inst.instruction |= Rn;
11160 inst.instruction |= Rm << 3;
11164 inst.instruction = T_OPCODE_CMP_HR;
11165 inst.instruction |= (Rn & 0x8) << 4;
11166 inst.instruction |= (Rn & 0x7);
11167 inst.instruction |= Rm << 3;
11174 inst.instruction = THUMB_OP16 (inst.instruction);
11176 /* PR 10443: Do not silently ignore shifted operands. */
11177 constraint (inst.operands[1].shifted,
11178 _("shifts in CMP/MOV instructions are only supported in unified syntax"));
11180 if (inst.operands[1].isreg)
11182 if (Rn < 8 && Rm < 8)
11184 /* A move of two lowregs is encoded as ADD Rd, Rs, #0
11185 since a MOV instruction produces unpredictable results. */
11186 if (inst.instruction == T_OPCODE_MOV_I8)
11187 inst.instruction = T_OPCODE_ADD_I3;
11189 inst.instruction = T_OPCODE_CMP_LR;
11191 inst.instruction |= Rn;
11192 inst.instruction |= Rm << 3;
11196 if (inst.instruction == T_OPCODE_MOV_I8)
11197 inst.instruction = T_OPCODE_MOV_HR;
11199 inst.instruction = T_OPCODE_CMP_HR;
11205 constraint (Rn > 7,
11206 _("only lo regs allowed with immediate"));
11207 inst.instruction |= Rn << 8;
11208 inst.reloc.type = BFD_RELOC_ARM_THUMB_IMM;
11219 top = (inst.instruction & 0x00800000) != 0;
11220 if (inst.reloc.type == BFD_RELOC_ARM_MOVW)
11222 constraint (top, _(":lower16: not allowed this instruction"));
11223 inst.reloc.type = BFD_RELOC_ARM_THUMB_MOVW;
11225 else if (inst.reloc.type == BFD_RELOC_ARM_MOVT)
11227 constraint (!top, _(":upper16: not allowed this instruction"));
11228 inst.reloc.type = BFD_RELOC_ARM_THUMB_MOVT;
11231 Rd = inst.operands[0].reg;
11232 reject_bad_reg (Rd);
11234 inst.instruction |= Rd << 8;
11235 if (inst.reloc.type == BFD_RELOC_UNUSED)
11237 imm = inst.reloc.exp.X_add_number;
11238 inst.instruction |= (imm & 0xf000) << 4;
11239 inst.instruction |= (imm & 0x0800) << 15;
11240 inst.instruction |= (imm & 0x0700) << 4;
11241 inst.instruction |= (imm & 0x00ff);
11246 do_t_mvn_tst (void)
11250 Rn = inst.operands[0].reg;
11251 Rm = inst.operands[1].reg;
11253 if (inst.instruction == T_MNEM_cmp
11254 || inst.instruction == T_MNEM_cmn)
11255 constraint (Rn == REG_PC, BAD_PC);
11257 reject_bad_reg (Rn);
11258 reject_bad_reg (Rm);
11260 if (unified_syntax)
11262 int r0off = (inst.instruction == T_MNEM_mvn
11263 || inst.instruction == T_MNEM_mvns) ? 8 : 16;
11264 bfd_boolean narrow;
11266 if (inst.size_req == 4
11267 || inst.instruction > 0xffff
11268 || inst.operands[1].shifted
11269 || Rn > 7 || Rm > 7)
11271 else if (inst.instruction == T_MNEM_cmn)
11273 else if (THUMB_SETS_FLAGS (inst.instruction))
11274 narrow = !in_it_block ();
11276 narrow = in_it_block ();
11278 if (!inst.operands[1].isreg)
11280 /* For an immediate, we always generate a 32-bit opcode;
11281 section relaxation will shrink it later if possible. */
11282 if (inst.instruction < 0xffff)
11283 inst.instruction = THUMB_OP32 (inst.instruction);
11284 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
11285 inst.instruction |= Rn << r0off;
11286 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
11290 /* See if we can do this with a 16-bit instruction. */
11293 inst.instruction = THUMB_OP16 (inst.instruction);
11294 inst.instruction |= Rn;
11295 inst.instruction |= Rm << 3;
11299 constraint (inst.operands[1].shifted
11300 && inst.operands[1].immisreg,
11301 _("shift must be constant"));
11302 if (inst.instruction < 0xffff)
11303 inst.instruction = THUMB_OP32 (inst.instruction);
11304 inst.instruction |= Rn << r0off;
11305 encode_thumb32_shifted_operand (1);
11311 constraint (inst.instruction > 0xffff
11312 || inst.instruction == T_MNEM_mvns, BAD_THUMB32);
11313 constraint (!inst.operands[1].isreg || inst.operands[1].shifted,
11314 _("unshifted register required"));
11315 constraint (Rn > 7 || Rm > 7,
11318 inst.instruction = THUMB_OP16 (inst.instruction);
11319 inst.instruction |= Rn;
11320 inst.instruction |= Rm << 3;
11329 if (do_vfp_nsyn_mrs () == SUCCESS)
11332 Rd = inst.operands[0].reg;
11333 reject_bad_reg (Rd);
11334 inst.instruction |= Rd << 8;
11336 if (inst.operands[1].isreg)
11338 unsigned br = inst.operands[1].reg;
11339 if (((br & 0x200) == 0) && ((br & 0xf000) != 0xf000))
11340 as_bad (_("bad register for mrs"));
11342 inst.instruction |= br & (0xf << 16);
11343 inst.instruction |= (br & 0x300) >> 4;
11344 inst.instruction |= (br & SPSR_BIT) >> 2;
11348 int flags = inst.operands[1].imm & (PSR_c|PSR_x|PSR_s|PSR_f|SPSR_BIT);
11350 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_m))
11352 /* PR gas/12698: The constraint is only applied for m_profile.
11353 If the user has specified -march=all, we want to ignore it as
11354 we are building for any CPU type, including non-m variants. */
11355 bfd_boolean m_profile = selected_cpu.core != arm_arch_any.core;
11356 constraint ((flags != 0) && m_profile, _("selected processor does "
11357 "not support requested special purpose register"));
11360 /* mrs only accepts APSR/CPSR/SPSR/CPSR_all/SPSR_all (for non-M profile
11362 constraint ((flags & ~SPSR_BIT) != (PSR_c|PSR_f),
11363 _("'APSR', 'CPSR' or 'SPSR' expected"));
11365 inst.instruction |= (flags & SPSR_BIT) >> 2;
11366 inst.instruction |= inst.operands[1].imm & 0xff;
11367 inst.instruction |= 0xf0000;
11377 if (do_vfp_nsyn_msr () == SUCCESS)
11380 constraint (!inst.operands[1].isreg,
11381 _("Thumb encoding does not support an immediate here"));
11383 if (inst.operands[0].isreg)
11384 flags = (int)(inst.operands[0].reg);
11386 flags = inst.operands[0].imm;
11388 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_m))
11390 int bits = inst.operands[0].imm & (PSR_c|PSR_x|PSR_s|PSR_f|SPSR_BIT);
11392 /* PR gas/12698: The constraint is only applied for m_profile.
11393 If the user has specified -march=all, we want to ignore it as
11394 we are building for any CPU type, including non-m variants. */
11395 bfd_boolean m_profile = selected_cpu.core != arm_arch_any.core;
11396 constraint (((ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6_dsp)
11397 && (bits & ~(PSR_s | PSR_f)) != 0)
11398 || (!ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6_dsp)
11399 && bits != PSR_f)) && m_profile,
11400 _("selected processor does not support requested special "
11401 "purpose register"));
11404 constraint ((flags & 0xff) != 0, _("selected processor does not support "
11405 "requested special purpose register"));
11407 Rn = inst.operands[1].reg;
11408 reject_bad_reg (Rn);
11410 inst.instruction |= (flags & SPSR_BIT) >> 2;
11411 inst.instruction |= (flags & 0xf0000) >> 8;
11412 inst.instruction |= (flags & 0x300) >> 4;
11413 inst.instruction |= (flags & 0xff);
11414 inst.instruction |= Rn << 16;
11420 bfd_boolean narrow;
11421 unsigned Rd, Rn, Rm;
11423 if (!inst.operands[2].present)
11424 inst.operands[2].reg = inst.operands[0].reg;
11426 Rd = inst.operands[0].reg;
11427 Rn = inst.operands[1].reg;
11428 Rm = inst.operands[2].reg;
11430 if (unified_syntax)
11432 if (inst.size_req == 4
11438 else if (inst.instruction == T_MNEM_muls)
11439 narrow = !in_it_block ();
11441 narrow = in_it_block ();
11445 constraint (inst.instruction == T_MNEM_muls, BAD_THUMB32);
11446 constraint (Rn > 7 || Rm > 7,
11453 /* 16-bit MULS/Conditional MUL. */
11454 inst.instruction = THUMB_OP16 (inst.instruction);
11455 inst.instruction |= Rd;
11458 inst.instruction |= Rm << 3;
11460 inst.instruction |= Rn << 3;
11462 constraint (1, _("dest must overlap one source register"));
11466 constraint (inst.instruction != T_MNEM_mul,
11467 _("Thumb-2 MUL must not set flags"));
11469 inst.instruction = THUMB_OP32 (inst.instruction);
11470 inst.instruction |= Rd << 8;
11471 inst.instruction |= Rn << 16;
11472 inst.instruction |= Rm << 0;
11474 reject_bad_reg (Rd);
11475 reject_bad_reg (Rn);
11476 reject_bad_reg (Rm);
11483 unsigned RdLo, RdHi, Rn, Rm;
11485 RdLo = inst.operands[0].reg;
11486 RdHi = inst.operands[1].reg;
11487 Rn = inst.operands[2].reg;
11488 Rm = inst.operands[3].reg;
11490 reject_bad_reg (RdLo);
11491 reject_bad_reg (RdHi);
11492 reject_bad_reg (Rn);
11493 reject_bad_reg (Rm);
11495 inst.instruction |= RdLo << 12;
11496 inst.instruction |= RdHi << 8;
11497 inst.instruction |= Rn << 16;
11498 inst.instruction |= Rm;
11501 as_tsktsk (_("rdhi and rdlo must be different"));
11507 set_it_insn_type (NEUTRAL_IT_INSN);
11509 if (unified_syntax)
11511 if (inst.size_req == 4 || inst.operands[0].imm > 15)
11513 inst.instruction = THUMB_OP32 (inst.instruction);
11514 inst.instruction |= inst.operands[0].imm;
11518 /* PR9722: Check for Thumb2 availability before
11519 generating a thumb2 nop instruction. */
11520 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6t2))
11522 inst.instruction = THUMB_OP16 (inst.instruction);
11523 inst.instruction |= inst.operands[0].imm << 4;
11526 inst.instruction = 0x46c0;
11531 constraint (inst.operands[0].present,
11532 _("Thumb does not support NOP with hints"));
11533 inst.instruction = 0x46c0;
11540 if (unified_syntax)
11542 bfd_boolean narrow;
11544 if (THUMB_SETS_FLAGS (inst.instruction))
11545 narrow = !in_it_block ();
11547 narrow = in_it_block ();
11548 if (inst.operands[0].reg > 7 || inst.operands[1].reg > 7)
11550 if (inst.size_req == 4)
11555 inst.instruction = THUMB_OP32 (inst.instruction);
11556 inst.instruction |= inst.operands[0].reg << 8;
11557 inst.instruction |= inst.operands[1].reg << 16;
11561 inst.instruction = THUMB_OP16 (inst.instruction);
11562 inst.instruction |= inst.operands[0].reg;
11563 inst.instruction |= inst.operands[1].reg << 3;
11568 constraint (inst.operands[0].reg > 7 || inst.operands[1].reg > 7,
11570 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
11572 inst.instruction = THUMB_OP16 (inst.instruction);
11573 inst.instruction |= inst.operands[0].reg;
11574 inst.instruction |= inst.operands[1].reg << 3;
11583 Rd = inst.operands[0].reg;
11584 Rn = inst.operands[1].present ? inst.operands[1].reg : Rd;
11586 reject_bad_reg (Rd);
11587 /* Rn == REG_SP is unpredictable; Rn == REG_PC is MVN. */
11588 reject_bad_reg (Rn);
11590 inst.instruction |= Rd << 8;
11591 inst.instruction |= Rn << 16;
11593 if (!inst.operands[2].isreg)
11595 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
11596 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
11602 Rm = inst.operands[2].reg;
11603 reject_bad_reg (Rm);
11605 constraint (inst.operands[2].shifted
11606 && inst.operands[2].immisreg,
11607 _("shift must be constant"));
11608 encode_thumb32_shifted_operand (2);
11615 unsigned Rd, Rn, Rm;
11617 Rd = inst.operands[0].reg;
11618 Rn = inst.operands[1].reg;
11619 Rm = inst.operands[2].reg;
11621 reject_bad_reg (Rd);
11622 reject_bad_reg (Rn);
11623 reject_bad_reg (Rm);
11625 inst.instruction |= Rd << 8;
11626 inst.instruction |= Rn << 16;
11627 inst.instruction |= Rm;
11628 if (inst.operands[3].present)
11630 unsigned int val = inst.reloc.exp.X_add_number;
11631 constraint (inst.reloc.exp.X_op != O_constant,
11632 _("expression too complex"));
11633 inst.instruction |= (val & 0x1c) << 10;
11634 inst.instruction |= (val & 0x03) << 6;
11641 if (!inst.operands[3].present)
11645 inst.instruction &= ~0x00000020;
11647 /* PR 10168. Swap the Rm and Rn registers. */
11648 Rtmp = inst.operands[1].reg;
11649 inst.operands[1].reg = inst.operands[2].reg;
11650 inst.operands[2].reg = Rtmp;
11658 if (inst.operands[0].immisreg)
11659 reject_bad_reg (inst.operands[0].imm);
11661 encode_thumb32_addr_mode (0, /*is_t=*/FALSE, /*is_d=*/FALSE);
11665 do_t_push_pop (void)
11669 constraint (inst.operands[0].writeback,
11670 _("push/pop do not support {reglist}^"));
11671 constraint (inst.reloc.type != BFD_RELOC_UNUSED,
11672 _("expression too complex"));
11674 mask = inst.operands[0].imm;
11675 if ((mask & ~0xff) == 0)
11676 inst.instruction = THUMB_OP16 (inst.instruction) | mask;
11677 else if ((inst.instruction == T_MNEM_push
11678 && (mask & ~0xff) == 1 << REG_LR)
11679 || (inst.instruction == T_MNEM_pop
11680 && (mask & ~0xff) == 1 << REG_PC))
11682 inst.instruction = THUMB_OP16 (inst.instruction);
11683 inst.instruction |= THUMB_PP_PC_LR;
11684 inst.instruction |= mask & 0xff;
11686 else if (unified_syntax)
11688 inst.instruction = THUMB_OP32 (inst.instruction);
11689 encode_thumb2_ldmstm (13, mask, TRUE);
11693 inst.error = _("invalid register list to push/pop instruction");
11703 Rd = inst.operands[0].reg;
11704 Rm = inst.operands[1].reg;
11706 reject_bad_reg (Rd);
11707 reject_bad_reg (Rm);
11709 inst.instruction |= Rd << 8;
11710 inst.instruction |= Rm << 16;
11711 inst.instruction |= Rm;
11719 Rd = inst.operands[0].reg;
11720 Rm = inst.operands[1].reg;
11722 reject_bad_reg (Rd);
11723 reject_bad_reg (Rm);
11725 if (Rd <= 7 && Rm <= 7
11726 && inst.size_req != 4)
11728 inst.instruction = THUMB_OP16 (inst.instruction);
11729 inst.instruction |= Rd;
11730 inst.instruction |= Rm << 3;
11732 else if (unified_syntax)
11734 inst.instruction = THUMB_OP32 (inst.instruction);
11735 inst.instruction |= Rd << 8;
11736 inst.instruction |= Rm << 16;
11737 inst.instruction |= Rm;
11740 inst.error = BAD_HIREG;
11748 Rd = inst.operands[0].reg;
11749 Rm = inst.operands[1].reg;
11751 reject_bad_reg (Rd);
11752 reject_bad_reg (Rm);
11754 inst.instruction |= Rd << 8;
11755 inst.instruction |= Rm;
11763 Rd = inst.operands[0].reg;
11764 Rs = (inst.operands[1].present
11765 ? inst.operands[1].reg /* Rd, Rs, foo */
11766 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
11768 reject_bad_reg (Rd);
11769 reject_bad_reg (Rs);
11770 if (inst.operands[2].isreg)
11771 reject_bad_reg (inst.operands[2].reg);
11773 inst.instruction |= Rd << 8;
11774 inst.instruction |= Rs << 16;
11775 if (!inst.operands[2].isreg)
11777 bfd_boolean narrow;
11779 if ((inst.instruction & 0x00100000) != 0)
11780 narrow = !in_it_block ();
11782 narrow = in_it_block ();
11784 if (Rd > 7 || Rs > 7)
11787 if (inst.size_req == 4 || !unified_syntax)
11790 if (inst.reloc.exp.X_op != O_constant
11791 || inst.reloc.exp.X_add_number != 0)
11794 /* Turn rsb #0 into 16-bit neg. We should probably do this via
11795 relaxation, but it doesn't seem worth the hassle. */
11798 inst.reloc.type = BFD_RELOC_UNUSED;
11799 inst.instruction = THUMB_OP16 (T_MNEM_negs);
11800 inst.instruction |= Rs << 3;
11801 inst.instruction |= Rd;
11805 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
11806 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
11810 encode_thumb32_shifted_operand (2);
11816 if (warn_on_deprecated
11817 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
11818 as_warn (_("setend use is deprecated for ARMv8"));
11820 set_it_insn_type (OUTSIDE_IT_INSN);
11821 if (inst.operands[0].imm)
11822 inst.instruction |= 0x8;
11828 if (!inst.operands[1].present)
11829 inst.operands[1].reg = inst.operands[0].reg;
11831 if (unified_syntax)
11833 bfd_boolean narrow;
11836 switch (inst.instruction)
11839 case T_MNEM_asrs: shift_kind = SHIFT_ASR; break;
11841 case T_MNEM_lsls: shift_kind = SHIFT_LSL; break;
11843 case T_MNEM_lsrs: shift_kind = SHIFT_LSR; break;
11845 case T_MNEM_rors: shift_kind = SHIFT_ROR; break;
11849 if (THUMB_SETS_FLAGS (inst.instruction))
11850 narrow = !in_it_block ();
11852 narrow = in_it_block ();
11853 if (inst.operands[0].reg > 7 || inst.operands[1].reg > 7)
11855 if (!inst.operands[2].isreg && shift_kind == SHIFT_ROR)
11857 if (inst.operands[2].isreg
11858 && (inst.operands[1].reg != inst.operands[0].reg
11859 || inst.operands[2].reg > 7))
11861 if (inst.size_req == 4)
11864 reject_bad_reg (inst.operands[0].reg);
11865 reject_bad_reg (inst.operands[1].reg);
11869 if (inst.operands[2].isreg)
11871 reject_bad_reg (inst.operands[2].reg);
11872 inst.instruction = THUMB_OP32 (inst.instruction);
11873 inst.instruction |= inst.operands[0].reg << 8;
11874 inst.instruction |= inst.operands[1].reg << 16;
11875 inst.instruction |= inst.operands[2].reg;
11877 /* PR 12854: Error on extraneous shifts. */
11878 constraint (inst.operands[2].shifted,
11879 _("extraneous shift as part of operand to shift insn"));
11883 inst.operands[1].shifted = 1;
11884 inst.operands[1].shift_kind = shift_kind;
11885 inst.instruction = THUMB_OP32 (THUMB_SETS_FLAGS (inst.instruction)
11886 ? T_MNEM_movs : T_MNEM_mov);
11887 inst.instruction |= inst.operands[0].reg << 8;
11888 encode_thumb32_shifted_operand (1);
11889 /* Prevent the incorrect generation of an ARM_IMMEDIATE fixup. */
11890 inst.reloc.type = BFD_RELOC_UNUSED;
11895 if (inst.operands[2].isreg)
11897 switch (shift_kind)
11899 case SHIFT_ASR: inst.instruction = T_OPCODE_ASR_R; break;
11900 case SHIFT_LSL: inst.instruction = T_OPCODE_LSL_R; break;
11901 case SHIFT_LSR: inst.instruction = T_OPCODE_LSR_R; break;
11902 case SHIFT_ROR: inst.instruction = T_OPCODE_ROR_R; break;
11906 inst.instruction |= inst.operands[0].reg;
11907 inst.instruction |= inst.operands[2].reg << 3;
11909 /* PR 12854: Error on extraneous shifts. */
11910 constraint (inst.operands[2].shifted,
11911 _("extraneous shift as part of operand to shift insn"));
11915 switch (shift_kind)
11917 case SHIFT_ASR: inst.instruction = T_OPCODE_ASR_I; break;
11918 case SHIFT_LSL: inst.instruction = T_OPCODE_LSL_I; break;
11919 case SHIFT_LSR: inst.instruction = T_OPCODE_LSR_I; break;
11922 inst.reloc.type = BFD_RELOC_ARM_THUMB_SHIFT;
11923 inst.instruction |= inst.operands[0].reg;
11924 inst.instruction |= inst.operands[1].reg << 3;
11930 constraint (inst.operands[0].reg > 7
11931 || inst.operands[1].reg > 7, BAD_HIREG);
11932 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
11934 if (inst.operands[2].isreg) /* Rd, {Rs,} Rn */
11936 constraint (inst.operands[2].reg > 7, BAD_HIREG);
11937 constraint (inst.operands[0].reg != inst.operands[1].reg,
11938 _("source1 and dest must be same register"));
11940 switch (inst.instruction)
11942 case T_MNEM_asr: inst.instruction = T_OPCODE_ASR_R; break;
11943 case T_MNEM_lsl: inst.instruction = T_OPCODE_LSL_R; break;
11944 case T_MNEM_lsr: inst.instruction = T_OPCODE_LSR_R; break;
11945 case T_MNEM_ror: inst.instruction = T_OPCODE_ROR_R; break;
11949 inst.instruction |= inst.operands[0].reg;
11950 inst.instruction |= inst.operands[2].reg << 3;
11952 /* PR 12854: Error on extraneous shifts. */
11953 constraint (inst.operands[2].shifted,
11954 _("extraneous shift as part of operand to shift insn"));
11958 switch (inst.instruction)
11960 case T_MNEM_asr: inst.instruction = T_OPCODE_ASR_I; break;
11961 case T_MNEM_lsl: inst.instruction = T_OPCODE_LSL_I; break;
11962 case T_MNEM_lsr: inst.instruction = T_OPCODE_LSR_I; break;
11963 case T_MNEM_ror: inst.error = _("ror #imm not supported"); return;
11966 inst.reloc.type = BFD_RELOC_ARM_THUMB_SHIFT;
11967 inst.instruction |= inst.operands[0].reg;
11968 inst.instruction |= inst.operands[1].reg << 3;
11976 unsigned Rd, Rn, Rm;
11978 Rd = inst.operands[0].reg;
11979 Rn = inst.operands[1].reg;
11980 Rm = inst.operands[2].reg;
11982 reject_bad_reg (Rd);
11983 reject_bad_reg (Rn);
11984 reject_bad_reg (Rm);
11986 inst.instruction |= Rd << 8;
11987 inst.instruction |= Rn << 16;
11988 inst.instruction |= Rm;
11994 unsigned Rd, Rn, Rm;
11996 Rd = inst.operands[0].reg;
11997 Rm = inst.operands[1].reg;
11998 Rn = inst.operands[2].reg;
12000 reject_bad_reg (Rd);
12001 reject_bad_reg (Rn);
12002 reject_bad_reg (Rm);
12004 inst.instruction |= Rd << 8;
12005 inst.instruction |= Rn << 16;
12006 inst.instruction |= Rm;
12012 unsigned int value = inst.reloc.exp.X_add_number;
12013 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7a),
12014 _("SMC is not permitted on this architecture"));
12015 constraint (inst.reloc.exp.X_op != O_constant,
12016 _("expression too complex"));
12017 inst.reloc.type = BFD_RELOC_UNUSED;
12018 inst.instruction |= (value & 0xf000) >> 12;
12019 inst.instruction |= (value & 0x0ff0);
12020 inst.instruction |= (value & 0x000f) << 16;
12021 /* PR gas/15623: SMC instructions must be last in an IT block. */
12022 set_it_insn_type_last ();
12028 unsigned int value = inst.reloc.exp.X_add_number;
12030 inst.reloc.type = BFD_RELOC_UNUSED;
12031 inst.instruction |= (value & 0x0fff);
12032 inst.instruction |= (value & 0xf000) << 4;
12036 do_t_ssat_usat (int bias)
12040 Rd = inst.operands[0].reg;
12041 Rn = inst.operands[2].reg;
12043 reject_bad_reg (Rd);
12044 reject_bad_reg (Rn);
12046 inst.instruction |= Rd << 8;
12047 inst.instruction |= inst.operands[1].imm - bias;
12048 inst.instruction |= Rn << 16;
12050 if (inst.operands[3].present)
12052 offsetT shift_amount = inst.reloc.exp.X_add_number;
12054 inst.reloc.type = BFD_RELOC_UNUSED;
12056 constraint (inst.reloc.exp.X_op != O_constant,
12057 _("expression too complex"));
12059 if (shift_amount != 0)
12061 constraint (shift_amount > 31,
12062 _("shift expression is too large"));
12064 if (inst.operands[3].shift_kind == SHIFT_ASR)
12065 inst.instruction |= 0x00200000; /* sh bit. */
12067 inst.instruction |= (shift_amount & 0x1c) << 10;
12068 inst.instruction |= (shift_amount & 0x03) << 6;
12076 do_t_ssat_usat (1);
12084 Rd = inst.operands[0].reg;
12085 Rn = inst.operands[2].reg;
12087 reject_bad_reg (Rd);
12088 reject_bad_reg (Rn);
12090 inst.instruction |= Rd << 8;
12091 inst.instruction |= inst.operands[1].imm - 1;
12092 inst.instruction |= Rn << 16;
12098 constraint (!inst.operands[2].isreg || !inst.operands[2].preind
12099 || inst.operands[2].postind || inst.operands[2].writeback
12100 || inst.operands[2].immisreg || inst.operands[2].shifted
12101 || inst.operands[2].negative,
12104 constraint (inst.operands[2].reg == REG_PC, BAD_PC);
12106 inst.instruction |= inst.operands[0].reg << 8;
12107 inst.instruction |= inst.operands[1].reg << 12;
12108 inst.instruction |= inst.operands[2].reg << 16;
12109 inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_U8;
12115 if (!inst.operands[2].present)
12116 inst.operands[2].reg = inst.operands[1].reg + 1;
12118 constraint (inst.operands[0].reg == inst.operands[1].reg
12119 || inst.operands[0].reg == inst.operands[2].reg
12120 || inst.operands[0].reg == inst.operands[3].reg,
12123 inst.instruction |= inst.operands[0].reg;
12124 inst.instruction |= inst.operands[1].reg << 12;
12125 inst.instruction |= inst.operands[2].reg << 8;
12126 inst.instruction |= inst.operands[3].reg << 16;
12132 unsigned Rd, Rn, Rm;
12134 Rd = inst.operands[0].reg;
12135 Rn = inst.operands[1].reg;
12136 Rm = inst.operands[2].reg;
12138 reject_bad_reg (Rd);
12139 reject_bad_reg (Rn);
12140 reject_bad_reg (Rm);
12142 inst.instruction |= Rd << 8;
12143 inst.instruction |= Rn << 16;
12144 inst.instruction |= Rm;
12145 inst.instruction |= inst.operands[3].imm << 4;
12153 Rd = inst.operands[0].reg;
12154 Rm = inst.operands[1].reg;
12156 reject_bad_reg (Rd);
12157 reject_bad_reg (Rm);
12159 if (inst.instruction <= 0xffff
12160 && inst.size_req != 4
12161 && Rd <= 7 && Rm <= 7
12162 && (!inst.operands[2].present || inst.operands[2].imm == 0))
12164 inst.instruction = THUMB_OP16 (inst.instruction);
12165 inst.instruction |= Rd;
12166 inst.instruction |= Rm << 3;
12168 else if (unified_syntax)
12170 if (inst.instruction <= 0xffff)
12171 inst.instruction = THUMB_OP32 (inst.instruction);
12172 inst.instruction |= Rd << 8;
12173 inst.instruction |= Rm;
12174 inst.instruction |= inst.operands[2].imm << 4;
12178 constraint (inst.operands[2].present && inst.operands[2].imm != 0,
12179 _("Thumb encoding does not support rotation"));
12180 constraint (1, BAD_HIREG);
12187 /* We have to do the following check manually as ARM_EXT_OS only applies
12189 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6m))
12191 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_os)
12192 /* This only applies to the v6m howver, not later architectures. */
12193 && ! ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7))
12194 as_bad (_("SVC is not permitted on this architecture"));
12195 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used, arm_ext_os);
12198 inst.reloc.type = BFD_RELOC_ARM_SWI;
12207 half = (inst.instruction & 0x10) != 0;
12208 set_it_insn_type_last ();
12209 constraint (inst.operands[0].immisreg,
12210 _("instruction requires register index"));
12212 Rn = inst.operands[0].reg;
12213 Rm = inst.operands[0].imm;
12215 constraint (Rn == REG_SP, BAD_SP);
12216 reject_bad_reg (Rm);
12218 constraint (!half && inst.operands[0].shifted,
12219 _("instruction does not allow shifted index"));
12220 inst.instruction |= (Rn << 16) | Rm;
12226 do_t_ssat_usat (0);
12234 Rd = inst.operands[0].reg;
12235 Rn = inst.operands[2].reg;
12237 reject_bad_reg (Rd);
12238 reject_bad_reg (Rn);
12240 inst.instruction |= Rd << 8;
12241 inst.instruction |= inst.operands[1].imm;
12242 inst.instruction |= Rn << 16;
12245 /* Neon instruction encoder helpers. */
12247 /* Encodings for the different types for various Neon opcodes. */
12249 /* An "invalid" code for the following tables. */
12252 struct neon_tab_entry
12255 unsigned float_or_poly;
12256 unsigned scalar_or_imm;
12259 /* Map overloaded Neon opcodes to their respective encodings. */
12260 #define NEON_ENC_TAB \
12261 X(vabd, 0x0000700, 0x1200d00, N_INV), \
12262 X(vmax, 0x0000600, 0x0000f00, N_INV), \
12263 X(vmin, 0x0000610, 0x0200f00, N_INV), \
12264 X(vpadd, 0x0000b10, 0x1000d00, N_INV), \
12265 X(vpmax, 0x0000a00, 0x1000f00, N_INV), \
12266 X(vpmin, 0x0000a10, 0x1200f00, N_INV), \
12267 X(vadd, 0x0000800, 0x0000d00, N_INV), \
12268 X(vsub, 0x1000800, 0x0200d00, N_INV), \
12269 X(vceq, 0x1000810, 0x0000e00, 0x1b10100), \
12270 X(vcge, 0x0000310, 0x1000e00, 0x1b10080), \
12271 X(vcgt, 0x0000300, 0x1200e00, 0x1b10000), \
12272 /* Register variants of the following two instructions are encoded as
12273 vcge / vcgt with the operands reversed. */ \
12274 X(vclt, 0x0000300, 0x1200e00, 0x1b10200), \
12275 X(vcle, 0x0000310, 0x1000e00, 0x1b10180), \
12276 X(vfma, N_INV, 0x0000c10, N_INV), \
12277 X(vfms, N_INV, 0x0200c10, N_INV), \
12278 X(vmla, 0x0000900, 0x0000d10, 0x0800040), \
12279 X(vmls, 0x1000900, 0x0200d10, 0x0800440), \
12280 X(vmul, 0x0000910, 0x1000d10, 0x0800840), \
12281 X(vmull, 0x0800c00, 0x0800e00, 0x0800a40), /* polynomial not float. */ \
12282 X(vmlal, 0x0800800, N_INV, 0x0800240), \
12283 X(vmlsl, 0x0800a00, N_INV, 0x0800640), \
12284 X(vqdmlal, 0x0800900, N_INV, 0x0800340), \
12285 X(vqdmlsl, 0x0800b00, N_INV, 0x0800740), \
12286 X(vqdmull, 0x0800d00, N_INV, 0x0800b40), \
12287 X(vqdmulh, 0x0000b00, N_INV, 0x0800c40), \
12288 X(vqrdmulh, 0x1000b00, N_INV, 0x0800d40), \
12289 X(vshl, 0x0000400, N_INV, 0x0800510), \
12290 X(vqshl, 0x0000410, N_INV, 0x0800710), \
12291 X(vand, 0x0000110, N_INV, 0x0800030), \
12292 X(vbic, 0x0100110, N_INV, 0x0800030), \
12293 X(veor, 0x1000110, N_INV, N_INV), \
12294 X(vorn, 0x0300110, N_INV, 0x0800010), \
12295 X(vorr, 0x0200110, N_INV, 0x0800010), \
12296 X(vmvn, 0x1b00580, N_INV, 0x0800030), \
12297 X(vshll, 0x1b20300, N_INV, 0x0800a10), /* max shift, immediate. */ \
12298 X(vcvt, 0x1b30600, N_INV, 0x0800e10), /* integer, fixed-point. */ \
12299 X(vdup, 0xe800b10, N_INV, 0x1b00c00), /* arm, scalar. */ \
12300 X(vld1, 0x0200000, 0x0a00000, 0x0a00c00), /* interlv, lane, dup. */ \
12301 X(vst1, 0x0000000, 0x0800000, N_INV), \
12302 X(vld2, 0x0200100, 0x0a00100, 0x0a00d00), \
12303 X(vst2, 0x0000100, 0x0800100, N_INV), \
12304 X(vld3, 0x0200200, 0x0a00200, 0x0a00e00), \
12305 X(vst3, 0x0000200, 0x0800200, N_INV), \
12306 X(vld4, 0x0200300, 0x0a00300, 0x0a00f00), \
12307 X(vst4, 0x0000300, 0x0800300, N_INV), \
12308 X(vmovn, 0x1b20200, N_INV, N_INV), \
12309 X(vtrn, 0x1b20080, N_INV, N_INV), \
12310 X(vqmovn, 0x1b20200, N_INV, N_INV), \
12311 X(vqmovun, 0x1b20240, N_INV, N_INV), \
12312 X(vnmul, 0xe200a40, 0xe200b40, N_INV), \
12313 X(vnmla, 0xe100a40, 0xe100b40, N_INV), \
12314 X(vnmls, 0xe100a00, 0xe100b00, N_INV), \
12315 X(vfnma, 0xe900a40, 0xe900b40, N_INV), \
12316 X(vfnms, 0xe900a00, 0xe900b00, N_INV), \
12317 X(vcmp, 0xeb40a40, 0xeb40b40, N_INV), \
12318 X(vcmpz, 0xeb50a40, 0xeb50b40, N_INV), \
12319 X(vcmpe, 0xeb40ac0, 0xeb40bc0, N_INV), \
12320 X(vcmpez, 0xeb50ac0, 0xeb50bc0, N_INV), \
12321 X(vseleq, 0xe000a00, N_INV, N_INV), \
12322 X(vselvs, 0xe100a00, N_INV, N_INV), \
12323 X(vselge, 0xe200a00, N_INV, N_INV), \
12324 X(vselgt, 0xe300a00, N_INV, N_INV), \
12325 X(vmaxnm, 0xe800a00, 0x3000f10, N_INV), \
12326 X(vminnm, 0xe800a40, 0x3200f10, N_INV), \
12327 X(vcvta, 0xebc0a40, 0x3bb0000, N_INV), \
12328 X(vrintr, 0xeb60a40, 0x3ba0400, N_INV), \
12329 X(vrinta, 0xeb80a40, 0x3ba0400, N_INV), \
12330 X(aes, 0x3b00300, N_INV, N_INV), \
12331 X(sha3op, 0x2000c00, N_INV, N_INV), \
12332 X(sha1h, 0x3b902c0, N_INV, N_INV), \
12333 X(sha2op, 0x3ba0380, N_INV, N_INV)
12337 #define X(OPC,I,F,S) N_MNEM_##OPC
12342 static const struct neon_tab_entry neon_enc_tab[] =
12344 #define X(OPC,I,F,S) { (I), (F), (S) }
12349 /* Do not use these macros; instead, use NEON_ENCODE defined below. */
12350 #define NEON_ENC_INTEGER_(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
12351 #define NEON_ENC_ARMREG_(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
12352 #define NEON_ENC_POLY_(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
12353 #define NEON_ENC_FLOAT_(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
12354 #define NEON_ENC_SCALAR_(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
12355 #define NEON_ENC_IMMED_(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
12356 #define NEON_ENC_INTERLV_(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
12357 #define NEON_ENC_LANE_(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
12358 #define NEON_ENC_DUP_(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
12359 #define NEON_ENC_SINGLE_(X) \
12360 ((neon_enc_tab[(X) & 0x0fffffff].integer) | ((X) & 0xf0000000))
12361 #define NEON_ENC_DOUBLE_(X) \
12362 ((neon_enc_tab[(X) & 0x0fffffff].float_or_poly) | ((X) & 0xf0000000))
12363 #define NEON_ENC_FPV8_(X) \
12364 ((neon_enc_tab[(X) & 0x0fffffff].integer) | ((X) & 0xf000000))
12366 #define NEON_ENCODE(type, inst) \
12369 inst.instruction = NEON_ENC_##type##_ (inst.instruction); \
12370 inst.is_neon = 1; \
12374 #define check_neon_suffixes \
12377 if (!inst.error && inst.vectype.elems > 0 && !inst.is_neon) \
12379 as_bad (_("invalid neon suffix for non neon instruction")); \
12385 /* Define shapes for instruction operands. The following mnemonic characters
12386 are used in this table:
12388 F - VFP S<n> register
12389 D - Neon D<n> register
12390 Q - Neon Q<n> register
12394 L - D<n> register list
12396 This table is used to generate various data:
12397 - enumerations of the form NS_DDR to be used as arguments to
12399 - a table classifying shapes into single, double, quad, mixed.
12400 - a table used to drive neon_select_shape. */
12402 #define NEON_SHAPE_DEF \
12403 X(3, (D, D, D), DOUBLE), \
12404 X(3, (Q, Q, Q), QUAD), \
12405 X(3, (D, D, I), DOUBLE), \
12406 X(3, (Q, Q, I), QUAD), \
12407 X(3, (D, D, S), DOUBLE), \
12408 X(3, (Q, Q, S), QUAD), \
12409 X(2, (D, D), DOUBLE), \
12410 X(2, (Q, Q), QUAD), \
12411 X(2, (D, S), DOUBLE), \
12412 X(2, (Q, S), QUAD), \
12413 X(2, (D, R), DOUBLE), \
12414 X(2, (Q, R), QUAD), \
12415 X(2, (D, I), DOUBLE), \
12416 X(2, (Q, I), QUAD), \
12417 X(3, (D, L, D), DOUBLE), \
12418 X(2, (D, Q), MIXED), \
12419 X(2, (Q, D), MIXED), \
12420 X(3, (D, Q, I), MIXED), \
12421 X(3, (Q, D, I), MIXED), \
12422 X(3, (Q, D, D), MIXED), \
12423 X(3, (D, Q, Q), MIXED), \
12424 X(3, (Q, Q, D), MIXED), \
12425 X(3, (Q, D, S), MIXED), \
12426 X(3, (D, Q, S), MIXED), \
12427 X(4, (D, D, D, I), DOUBLE), \
12428 X(4, (Q, Q, Q, I), QUAD), \
12429 X(2, (F, F), SINGLE), \
12430 X(3, (F, F, F), SINGLE), \
12431 X(2, (F, I), SINGLE), \
12432 X(2, (F, D), MIXED), \
12433 X(2, (D, F), MIXED), \
12434 X(3, (F, F, I), MIXED), \
12435 X(4, (R, R, F, F), SINGLE), \
12436 X(4, (F, F, R, R), SINGLE), \
12437 X(3, (D, R, R), DOUBLE), \
12438 X(3, (R, R, D), DOUBLE), \
12439 X(2, (S, R), SINGLE), \
12440 X(2, (R, S), SINGLE), \
12441 X(2, (F, R), SINGLE), \
12442 X(2, (R, F), SINGLE)
12444 #define S2(A,B) NS_##A##B
12445 #define S3(A,B,C) NS_##A##B##C
12446 #define S4(A,B,C,D) NS_##A##B##C##D
12448 #define X(N, L, C) S##N L
12461 enum neon_shape_class
12469 #define X(N, L, C) SC_##C
12471 static enum neon_shape_class neon_shape_class[] =
12489 /* Register widths of above. */
12490 static unsigned neon_shape_el_size[] =
12501 struct neon_shape_info
12504 enum neon_shape_el el[NEON_MAX_TYPE_ELS];
12507 #define S2(A,B) { SE_##A, SE_##B }
12508 #define S3(A,B,C) { SE_##A, SE_##B, SE_##C }
12509 #define S4(A,B,C,D) { SE_##A, SE_##B, SE_##C, SE_##D }
12511 #define X(N, L, C) { N, S##N L }
12513 static struct neon_shape_info neon_shape_tab[] =
12523 /* Bit masks used in type checking given instructions.
12524 'N_EQK' means the type must be the same as (or based on in some way) the key
12525 type, which itself is marked with the 'N_KEY' bit. If the 'N_EQK' bit is
12526 set, various other bits can be set as well in order to modify the meaning of
12527 the type constraint. */
12529 enum neon_type_mask
12553 N_KEY = 0x1000000, /* Key element (main type specifier). */
12554 N_EQK = 0x2000000, /* Given operand has the same type & size as the key. */
12555 N_VFP = 0x4000000, /* VFP mode: operand size must match register width. */
12556 N_UNT = 0x8000000, /* Must be explicitly untyped. */
12557 N_DBL = 0x0000001, /* If N_EQK, this operand is twice the size. */
12558 N_HLF = 0x0000002, /* If N_EQK, this operand is half the size. */
12559 N_SGN = 0x0000004, /* If N_EQK, this operand is forced to be signed. */
12560 N_UNS = 0x0000008, /* If N_EQK, this operand is forced to be unsigned. */
12561 N_INT = 0x0000010, /* If N_EQK, this operand is forced to be integer. */
12562 N_FLT = 0x0000020, /* If N_EQK, this operand is forced to be float. */
12563 N_SIZ = 0x0000040, /* If N_EQK, this operand is forced to be size-only. */
12565 N_MAX_NONSPECIAL = N_P64
12568 #define N_ALLMODS (N_DBL | N_HLF | N_SGN | N_UNS | N_INT | N_FLT | N_SIZ)
12570 #define N_SU_ALL (N_S8 | N_S16 | N_S32 | N_S64 | N_U8 | N_U16 | N_U32 | N_U64)
12571 #define N_SU_32 (N_S8 | N_S16 | N_S32 | N_U8 | N_U16 | N_U32)
12572 #define N_SU_16_64 (N_S16 | N_S32 | N_S64 | N_U16 | N_U32 | N_U64)
12573 #define N_SUF_32 (N_SU_32 | N_F32)
12574 #define N_I_ALL (N_I8 | N_I16 | N_I32 | N_I64)
12575 #define N_IF_32 (N_I8 | N_I16 | N_I32 | N_F32)
12577 /* Pass this as the first type argument to neon_check_type to ignore types
12579 #define N_IGNORE_TYPE (N_KEY | N_EQK)
12581 /* Select a "shape" for the current instruction (describing register types or
12582 sizes) from a list of alternatives. Return NS_NULL if the current instruction
12583 doesn't fit. For non-polymorphic shapes, checking is usually done as a
12584 function of operand parsing, so this function doesn't need to be called.
12585 Shapes should be listed in order of decreasing length. */
12587 static enum neon_shape
12588 neon_select_shape (enum neon_shape shape, ...)
12591 enum neon_shape first_shape = shape;
12593 /* Fix missing optional operands. FIXME: we don't know at this point how
12594 many arguments we should have, so this makes the assumption that we have
12595 > 1. This is true of all current Neon opcodes, I think, but may not be
12596 true in the future. */
12597 if (!inst.operands[1].present)
12598 inst.operands[1] = inst.operands[0];
12600 va_start (ap, shape);
12602 for (; shape != NS_NULL; shape = (enum neon_shape) va_arg (ap, int))
12607 for (j = 0; j < neon_shape_tab[shape].els; j++)
12609 if (!inst.operands[j].present)
12615 switch (neon_shape_tab[shape].el[j])
12618 if (!(inst.operands[j].isreg
12619 && inst.operands[j].isvec
12620 && inst.operands[j].issingle
12621 && !inst.operands[j].isquad))
12626 if (!(inst.operands[j].isreg
12627 && inst.operands[j].isvec
12628 && !inst.operands[j].isquad
12629 && !inst.operands[j].issingle))
12634 if (!(inst.operands[j].isreg
12635 && !inst.operands[j].isvec))
12640 if (!(inst.operands[j].isreg
12641 && inst.operands[j].isvec
12642 && inst.operands[j].isquad
12643 && !inst.operands[j].issingle))
12648 if (!(!inst.operands[j].isreg
12649 && !inst.operands[j].isscalar))
12654 if (!(!inst.operands[j].isreg
12655 && inst.operands[j].isscalar))
12665 if (matches && (j >= ARM_IT_MAX_OPERANDS || !inst.operands[j].present))
12666 /* We've matched all the entries in the shape table, and we don't
12667 have any left over operands which have not been matched. */
12673 if (shape == NS_NULL && first_shape != NS_NULL)
12674 first_error (_("invalid instruction shape"));
12679 /* True if SHAPE is predominantly a quadword operation (most of the time, this
12680 means the Q bit should be set). */
12683 neon_quad (enum neon_shape shape)
12685 return neon_shape_class[shape] == SC_QUAD;
12689 neon_modify_type_size (unsigned typebits, enum neon_el_type *g_type,
12692 /* Allow modification to be made to types which are constrained to be
12693 based on the key element, based on bits set alongside N_EQK. */
12694 if ((typebits & N_EQK) != 0)
12696 if ((typebits & N_HLF) != 0)
12698 else if ((typebits & N_DBL) != 0)
12700 if ((typebits & N_SGN) != 0)
12701 *g_type = NT_signed;
12702 else if ((typebits & N_UNS) != 0)
12703 *g_type = NT_unsigned;
12704 else if ((typebits & N_INT) != 0)
12705 *g_type = NT_integer;
12706 else if ((typebits & N_FLT) != 0)
12707 *g_type = NT_float;
12708 else if ((typebits & N_SIZ) != 0)
12709 *g_type = NT_untyped;
12713 /* Return operand OPNO promoted by bits set in THISARG. KEY should be the "key"
12714 operand type, i.e. the single type specified in a Neon instruction when it
12715 is the only one given. */
12717 static struct neon_type_el
12718 neon_type_promote (struct neon_type_el *key, unsigned thisarg)
12720 struct neon_type_el dest = *key;
12722 gas_assert ((thisarg & N_EQK) != 0);
12724 neon_modify_type_size (thisarg, &dest.type, &dest.size);
12729 /* Convert Neon type and size into compact bitmask representation. */
12731 static enum neon_type_mask
12732 type_chk_of_el_type (enum neon_el_type type, unsigned size)
12739 case 8: return N_8;
12740 case 16: return N_16;
12741 case 32: return N_32;
12742 case 64: return N_64;
12750 case 8: return N_I8;
12751 case 16: return N_I16;
12752 case 32: return N_I32;
12753 case 64: return N_I64;
12761 case 16: return N_F16;
12762 case 32: return N_F32;
12763 case 64: return N_F64;
12771 case 8: return N_P8;
12772 case 16: return N_P16;
12773 case 64: return N_P64;
12781 case 8: return N_S8;
12782 case 16: return N_S16;
12783 case 32: return N_S32;
12784 case 64: return N_S64;
12792 case 8: return N_U8;
12793 case 16: return N_U16;
12794 case 32: return N_U32;
12795 case 64: return N_U64;
12806 /* Convert compact Neon bitmask type representation to a type and size. Only
12807 handles the case where a single bit is set in the mask. */
12810 el_type_of_type_chk (enum neon_el_type *type, unsigned *size,
12811 enum neon_type_mask mask)
12813 if ((mask & N_EQK) != 0)
12816 if ((mask & (N_S8 | N_U8 | N_I8 | N_8 | N_P8)) != 0)
12818 else if ((mask & (N_S16 | N_U16 | N_I16 | N_16 | N_F16 | N_P16)) != 0)
12820 else if ((mask & (N_S32 | N_U32 | N_I32 | N_32 | N_F32)) != 0)
12822 else if ((mask & (N_S64 | N_U64 | N_I64 | N_64 | N_F64 | N_P64)) != 0)
12827 if ((mask & (N_S8 | N_S16 | N_S32 | N_S64)) != 0)
12829 else if ((mask & (N_U8 | N_U16 | N_U32 | N_U64)) != 0)
12830 *type = NT_unsigned;
12831 else if ((mask & (N_I8 | N_I16 | N_I32 | N_I64)) != 0)
12832 *type = NT_integer;
12833 else if ((mask & (N_8 | N_16 | N_32 | N_64)) != 0)
12834 *type = NT_untyped;
12835 else if ((mask & (N_P8 | N_P16 | N_P64)) != 0)
12837 else if ((mask & (N_F16 | N_F32 | N_F64)) != 0)
12845 /* Modify a bitmask of allowed types. This is only needed for type
12849 modify_types_allowed (unsigned allowed, unsigned mods)
12852 enum neon_el_type type;
12858 for (i = 1; i <= N_MAX_NONSPECIAL; i <<= 1)
12860 if (el_type_of_type_chk (&type, &size,
12861 (enum neon_type_mask) (allowed & i)) == SUCCESS)
12863 neon_modify_type_size (mods, &type, &size);
12864 destmask |= type_chk_of_el_type (type, size);
12871 /* Check type and return type classification.
12872 The manual states (paraphrase): If one datatype is given, it indicates the
12874 - the second operand, if there is one
12875 - the operand, if there is no second operand
12876 - the result, if there are no operands.
12877 This isn't quite good enough though, so we use a concept of a "key" datatype
12878 which is set on a per-instruction basis, which is the one which matters when
12879 only one data type is written.
12880 Note: this function has side-effects (e.g. filling in missing operands). All
12881 Neon instructions should call it before performing bit encoding. */
12883 static struct neon_type_el
12884 neon_check_type (unsigned els, enum neon_shape ns, ...)
12887 unsigned i, pass, key_el = 0;
12888 unsigned types[NEON_MAX_TYPE_ELS];
12889 enum neon_el_type k_type = NT_invtype;
12890 unsigned k_size = -1u;
12891 struct neon_type_el badtype = {NT_invtype, -1};
12892 unsigned key_allowed = 0;
12894 /* Optional registers in Neon instructions are always (not) in operand 1.
12895 Fill in the missing operand here, if it was omitted. */
12896 if (els > 1 && !inst.operands[1].present)
12897 inst.operands[1] = inst.operands[0];
12899 /* Suck up all the varargs. */
12901 for (i = 0; i < els; i++)
12903 unsigned thisarg = va_arg (ap, unsigned);
12904 if (thisarg == N_IGNORE_TYPE)
12909 types[i] = thisarg;
12910 if ((thisarg & N_KEY) != 0)
12915 if (inst.vectype.elems > 0)
12916 for (i = 0; i < els; i++)
12917 if (inst.operands[i].vectype.type != NT_invtype)
12919 first_error (_("types specified in both the mnemonic and operands"));
12923 /* Duplicate inst.vectype elements here as necessary.
12924 FIXME: No idea if this is exactly the same as the ARM assembler,
12925 particularly when an insn takes one register and one non-register
12927 if (inst.vectype.elems == 1 && els > 1)
12930 inst.vectype.elems = els;
12931 inst.vectype.el[key_el] = inst.vectype.el[0];
12932 for (j = 0; j < els; j++)
12934 inst.vectype.el[j] = neon_type_promote (&inst.vectype.el[key_el],
12937 else if (inst.vectype.elems == 0 && els > 0)
12940 /* No types were given after the mnemonic, so look for types specified
12941 after each operand. We allow some flexibility here; as long as the
12942 "key" operand has a type, we can infer the others. */
12943 for (j = 0; j < els; j++)
12944 if (inst.operands[j].vectype.type != NT_invtype)
12945 inst.vectype.el[j] = inst.operands[j].vectype;
12947 if (inst.operands[key_el].vectype.type != NT_invtype)
12949 for (j = 0; j < els; j++)
12950 if (inst.operands[j].vectype.type == NT_invtype)
12951 inst.vectype.el[j] = neon_type_promote (&inst.vectype.el[key_el],
12956 first_error (_("operand types can't be inferred"));
12960 else if (inst.vectype.elems != els)
12962 first_error (_("type specifier has the wrong number of parts"));
12966 for (pass = 0; pass < 2; pass++)
12968 for (i = 0; i < els; i++)
12970 unsigned thisarg = types[i];
12971 unsigned types_allowed = ((thisarg & N_EQK) != 0 && pass != 0)
12972 ? modify_types_allowed (key_allowed, thisarg) : thisarg;
12973 enum neon_el_type g_type = inst.vectype.el[i].type;
12974 unsigned g_size = inst.vectype.el[i].size;
12976 /* Decay more-specific signed & unsigned types to sign-insensitive
12977 integer types if sign-specific variants are unavailable. */
12978 if ((g_type == NT_signed || g_type == NT_unsigned)
12979 && (types_allowed & N_SU_ALL) == 0)
12980 g_type = NT_integer;
12982 /* If only untyped args are allowed, decay any more specific types to
12983 them. Some instructions only care about signs for some element
12984 sizes, so handle that properly. */
12985 if (((types_allowed & N_UNT) == 0)
12986 && ((g_size == 8 && (types_allowed & N_8) != 0)
12987 || (g_size == 16 && (types_allowed & N_16) != 0)
12988 || (g_size == 32 && (types_allowed & N_32) != 0)
12989 || (g_size == 64 && (types_allowed & N_64) != 0)))
12990 g_type = NT_untyped;
12994 if ((thisarg & N_KEY) != 0)
12998 key_allowed = thisarg & ~N_KEY;
13003 if ((thisarg & N_VFP) != 0)
13005 enum neon_shape_el regshape;
13006 unsigned regwidth, match;
13008 /* PR 11136: Catch the case where we are passed a shape of NS_NULL. */
13011 first_error (_("invalid instruction shape"));
13014 regshape = neon_shape_tab[ns].el[i];
13015 regwidth = neon_shape_el_size[regshape];
13017 /* In VFP mode, operands must match register widths. If we
13018 have a key operand, use its width, else use the width of
13019 the current operand. */
13025 if (regwidth != match)
13027 first_error (_("operand size must match register width"));
13032 if ((thisarg & N_EQK) == 0)
13034 unsigned given_type = type_chk_of_el_type (g_type, g_size);
13036 if ((given_type & types_allowed) == 0)
13038 first_error (_("bad type in Neon instruction"));
13044 enum neon_el_type mod_k_type = k_type;
13045 unsigned mod_k_size = k_size;
13046 neon_modify_type_size (thisarg, &mod_k_type, &mod_k_size);
13047 if (g_type != mod_k_type || g_size != mod_k_size)
13049 first_error (_("inconsistent types in Neon instruction"));
13057 return inst.vectype.el[key_el];
13060 /* Neon-style VFP instruction forwarding. */
13062 /* Thumb VFP instructions have 0xE in the condition field. */
13065 do_vfp_cond_or_thumb (void)
13070 inst.instruction |= 0xe0000000;
13072 inst.instruction |= inst.cond << 28;
13075 /* Look up and encode a simple mnemonic, for use as a helper function for the
13076 Neon-style VFP syntax. This avoids duplication of bits of the insns table,
13077 etc. It is assumed that operand parsing has already been done, and that the
13078 operands are in the form expected by the given opcode (this isn't necessarily
13079 the same as the form in which they were parsed, hence some massaging must
13080 take place before this function is called).
13081 Checks current arch version against that in the looked-up opcode. */
13084 do_vfp_nsyn_opcode (const char *opname)
13086 const struct asm_opcode *opcode;
13088 opcode = (const struct asm_opcode *) hash_find (arm_ops_hsh, opname);
13093 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant,
13094 thumb_mode ? *opcode->tvariant : *opcode->avariant),
13101 inst.instruction = opcode->tvalue;
13102 opcode->tencode ();
13106 inst.instruction = (inst.cond << 28) | opcode->avalue;
13107 opcode->aencode ();
13112 do_vfp_nsyn_add_sub (enum neon_shape rs)
13114 int is_add = (inst.instruction & 0x0fffffff) == N_MNEM_vadd;
13119 do_vfp_nsyn_opcode ("fadds");
13121 do_vfp_nsyn_opcode ("fsubs");
13126 do_vfp_nsyn_opcode ("faddd");
13128 do_vfp_nsyn_opcode ("fsubd");
13132 /* Check operand types to see if this is a VFP instruction, and if so call
13136 try_vfp_nsyn (int args, void (*pfn) (enum neon_shape))
13138 enum neon_shape rs;
13139 struct neon_type_el et;
13144 rs = neon_select_shape (NS_FF, NS_DD, NS_NULL);
13145 et = neon_check_type (2, rs,
13146 N_EQK | N_VFP, N_F32 | N_F64 | N_KEY | N_VFP);
13150 rs = neon_select_shape (NS_FFF, NS_DDD, NS_NULL);
13151 et = neon_check_type (3, rs,
13152 N_EQK | N_VFP, N_EQK | N_VFP, N_F32 | N_F64 | N_KEY | N_VFP);
13159 if (et.type != NT_invtype)
13170 do_vfp_nsyn_mla_mls (enum neon_shape rs)
13172 int is_mla = (inst.instruction & 0x0fffffff) == N_MNEM_vmla;
13177 do_vfp_nsyn_opcode ("fmacs");
13179 do_vfp_nsyn_opcode ("fnmacs");
13184 do_vfp_nsyn_opcode ("fmacd");
13186 do_vfp_nsyn_opcode ("fnmacd");
13191 do_vfp_nsyn_fma_fms (enum neon_shape rs)
13193 int is_fma = (inst.instruction & 0x0fffffff) == N_MNEM_vfma;
13198 do_vfp_nsyn_opcode ("ffmas");
13200 do_vfp_nsyn_opcode ("ffnmas");
13205 do_vfp_nsyn_opcode ("ffmad");
13207 do_vfp_nsyn_opcode ("ffnmad");
13212 do_vfp_nsyn_mul (enum neon_shape rs)
13215 do_vfp_nsyn_opcode ("fmuls");
13217 do_vfp_nsyn_opcode ("fmuld");
13221 do_vfp_nsyn_abs_neg (enum neon_shape rs)
13223 int is_neg = (inst.instruction & 0x80) != 0;
13224 neon_check_type (2, rs, N_EQK | N_VFP, N_F32 | N_F64 | N_VFP | N_KEY);
13229 do_vfp_nsyn_opcode ("fnegs");
13231 do_vfp_nsyn_opcode ("fabss");
13236 do_vfp_nsyn_opcode ("fnegd");
13238 do_vfp_nsyn_opcode ("fabsd");
13242 /* Encode single-precision (only!) VFP fldm/fstm instructions. Double precision
13243 insns belong to Neon, and are handled elsewhere. */
13246 do_vfp_nsyn_ldm_stm (int is_dbmode)
13248 int is_ldm = (inst.instruction & (1 << 20)) != 0;
13252 do_vfp_nsyn_opcode ("fldmdbs");
13254 do_vfp_nsyn_opcode ("fldmias");
13259 do_vfp_nsyn_opcode ("fstmdbs");
13261 do_vfp_nsyn_opcode ("fstmias");
13266 do_vfp_nsyn_sqrt (void)
13268 enum neon_shape rs = neon_select_shape (NS_FF, NS_DD, NS_NULL);
13269 neon_check_type (2, rs, N_EQK | N_VFP, N_F32 | N_F64 | N_KEY | N_VFP);
13272 do_vfp_nsyn_opcode ("fsqrts");
13274 do_vfp_nsyn_opcode ("fsqrtd");
13278 do_vfp_nsyn_div (void)
13280 enum neon_shape rs = neon_select_shape (NS_FFF, NS_DDD, NS_NULL);
13281 neon_check_type (3, rs, N_EQK | N_VFP, N_EQK | N_VFP,
13282 N_F32 | N_F64 | N_KEY | N_VFP);
13285 do_vfp_nsyn_opcode ("fdivs");
13287 do_vfp_nsyn_opcode ("fdivd");
13291 do_vfp_nsyn_nmul (void)
13293 enum neon_shape rs = neon_select_shape (NS_FFF, NS_DDD, NS_NULL);
13294 neon_check_type (3, rs, N_EQK | N_VFP, N_EQK | N_VFP,
13295 N_F32 | N_F64 | N_KEY | N_VFP);
13299 NEON_ENCODE (SINGLE, inst);
13300 do_vfp_sp_dyadic ();
13304 NEON_ENCODE (DOUBLE, inst);
13305 do_vfp_dp_rd_rn_rm ();
13307 do_vfp_cond_or_thumb ();
13311 do_vfp_nsyn_cmp (void)
13313 if (inst.operands[1].isreg)
13315 enum neon_shape rs = neon_select_shape (NS_FF, NS_DD, NS_NULL);
13316 neon_check_type (2, rs, N_EQK | N_VFP, N_F32 | N_F64 | N_KEY | N_VFP);
13320 NEON_ENCODE (SINGLE, inst);
13321 do_vfp_sp_monadic ();
13325 NEON_ENCODE (DOUBLE, inst);
13326 do_vfp_dp_rd_rm ();
13331 enum neon_shape rs = neon_select_shape (NS_FI, NS_DI, NS_NULL);
13332 neon_check_type (2, rs, N_F32 | N_F64 | N_KEY | N_VFP, N_EQK);
13334 switch (inst.instruction & 0x0fffffff)
13337 inst.instruction += N_MNEM_vcmpz - N_MNEM_vcmp;
13340 inst.instruction += N_MNEM_vcmpez - N_MNEM_vcmpe;
13348 NEON_ENCODE (SINGLE, inst);
13349 do_vfp_sp_compare_z ();
13353 NEON_ENCODE (DOUBLE, inst);
13357 do_vfp_cond_or_thumb ();
13361 nsyn_insert_sp (void)
13363 inst.operands[1] = inst.operands[0];
13364 memset (&inst.operands[0], '\0', sizeof (inst.operands[0]));
13365 inst.operands[0].reg = REG_SP;
13366 inst.operands[0].isreg = 1;
13367 inst.operands[0].writeback = 1;
13368 inst.operands[0].present = 1;
13372 do_vfp_nsyn_push (void)
13375 if (inst.operands[1].issingle)
13376 do_vfp_nsyn_opcode ("fstmdbs");
13378 do_vfp_nsyn_opcode ("fstmdbd");
13382 do_vfp_nsyn_pop (void)
13385 if (inst.operands[1].issingle)
13386 do_vfp_nsyn_opcode ("fldmias");
13388 do_vfp_nsyn_opcode ("fldmiad");
13391 /* Fix up Neon data-processing instructions, ORing in the correct bits for
13392 ARM mode or Thumb mode and moving the encoded bit 24 to bit 28. */
13395 neon_dp_fixup (struct arm_it* insn)
13397 unsigned int i = insn->instruction;
13402 /* The U bit is at bit 24 by default. Move to bit 28 in Thumb mode. */
13413 insn->instruction = i;
13416 /* Turn a size (8, 16, 32, 64) into the respective bit number minus 3
13420 neon_logbits (unsigned x)
13422 return ffs (x) - 4;
13425 #define LOW4(R) ((R) & 0xf)
13426 #define HI1(R) (((R) >> 4) & 1)
13428 /* Encode insns with bit pattern:
13430 |28/24|23|22 |21 20|19 16|15 12|11 8|7|6|5|4|3 0|
13431 | U |x |D |size | Rn | Rd |x x x x|N|Q|M|x| Rm |
13433 SIZE is passed in bits. -1 means size field isn't changed, in case it has a
13434 different meaning for some instruction. */
13437 neon_three_same (int isquad, int ubit, int size)
13439 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
13440 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
13441 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
13442 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
13443 inst.instruction |= LOW4 (inst.operands[2].reg);
13444 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
13445 inst.instruction |= (isquad != 0) << 6;
13446 inst.instruction |= (ubit != 0) << 24;
13448 inst.instruction |= neon_logbits (size) << 20;
13450 neon_dp_fixup (&inst);
13453 /* Encode instructions of the form:
13455 |28/24|23|22|21 20|19 18|17 16|15 12|11 7|6|5|4|3 0|
13456 | U |x |D |x x |size |x x | Rd |x x x x x|Q|M|x| Rm |
13458 Don't write size if SIZE == -1. */
13461 neon_two_same (int qbit, int ubit, int size)
13463 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
13464 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
13465 inst.instruction |= LOW4 (inst.operands[1].reg);
13466 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
13467 inst.instruction |= (qbit != 0) << 6;
13468 inst.instruction |= (ubit != 0) << 24;
13471 inst.instruction |= neon_logbits (size) << 18;
13473 neon_dp_fixup (&inst);
13476 /* Neon instruction encoders, in approximate order of appearance. */
13479 do_neon_dyadic_i_su (void)
13481 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
13482 struct neon_type_el et = neon_check_type (3, rs,
13483 N_EQK, N_EQK, N_SU_32 | N_KEY);
13484 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
13488 do_neon_dyadic_i64_su (void)
13490 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
13491 struct neon_type_el et = neon_check_type (3, rs,
13492 N_EQK, N_EQK, N_SU_ALL | N_KEY);
13493 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
13497 neon_imm_shift (int write_ubit, int uval, int isquad, struct neon_type_el et,
13500 unsigned size = et.size >> 3;
13501 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
13502 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
13503 inst.instruction |= LOW4 (inst.operands[1].reg);
13504 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
13505 inst.instruction |= (isquad != 0) << 6;
13506 inst.instruction |= immbits << 16;
13507 inst.instruction |= (size >> 3) << 7;
13508 inst.instruction |= (size & 0x7) << 19;
13510 inst.instruction |= (uval != 0) << 24;
13512 neon_dp_fixup (&inst);
13516 do_neon_shl_imm (void)
13518 if (!inst.operands[2].isreg)
13520 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
13521 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_KEY | N_I_ALL);
13522 NEON_ENCODE (IMMED, inst);
13523 neon_imm_shift (FALSE, 0, neon_quad (rs), et, inst.operands[2].imm);
13527 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
13528 struct neon_type_el et = neon_check_type (3, rs,
13529 N_EQK, N_SU_ALL | N_KEY, N_EQK | N_SGN);
13532 /* VSHL/VQSHL 3-register variants have syntax such as:
13534 whereas other 3-register operations encoded by neon_three_same have
13537 (i.e. with Dn & Dm reversed). Swap operands[1].reg and operands[2].reg
13539 tmp = inst.operands[2].reg;
13540 inst.operands[2].reg = inst.operands[1].reg;
13541 inst.operands[1].reg = tmp;
13542 NEON_ENCODE (INTEGER, inst);
13543 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
13548 do_neon_qshl_imm (void)
13550 if (!inst.operands[2].isreg)
13552 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
13553 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_SU_ALL | N_KEY);
13555 NEON_ENCODE (IMMED, inst);
13556 neon_imm_shift (TRUE, et.type == NT_unsigned, neon_quad (rs), et,
13557 inst.operands[2].imm);
13561 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
13562 struct neon_type_el et = neon_check_type (3, rs,
13563 N_EQK, N_SU_ALL | N_KEY, N_EQK | N_SGN);
13566 /* See note in do_neon_shl_imm. */
13567 tmp = inst.operands[2].reg;
13568 inst.operands[2].reg = inst.operands[1].reg;
13569 inst.operands[1].reg = tmp;
13570 NEON_ENCODE (INTEGER, inst);
13571 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
13576 do_neon_rshl (void)
13578 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
13579 struct neon_type_el et = neon_check_type (3, rs,
13580 N_EQK, N_EQK, N_SU_ALL | N_KEY);
13583 tmp = inst.operands[2].reg;
13584 inst.operands[2].reg = inst.operands[1].reg;
13585 inst.operands[1].reg = tmp;
13586 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
13590 neon_cmode_for_logic_imm (unsigned immediate, unsigned *immbits, int size)
13592 /* Handle .I8 pseudo-instructions. */
13595 /* Unfortunately, this will make everything apart from zero out-of-range.
13596 FIXME is this the intended semantics? There doesn't seem much point in
13597 accepting .I8 if so. */
13598 immediate |= immediate << 8;
13604 if (immediate == (immediate & 0x000000ff))
13606 *immbits = immediate;
13609 else if (immediate == (immediate & 0x0000ff00))
13611 *immbits = immediate >> 8;
13614 else if (immediate == (immediate & 0x00ff0000))
13616 *immbits = immediate >> 16;
13619 else if (immediate == (immediate & 0xff000000))
13621 *immbits = immediate >> 24;
13624 if ((immediate & 0xffff) != (immediate >> 16))
13625 goto bad_immediate;
13626 immediate &= 0xffff;
13629 if (immediate == (immediate & 0x000000ff))
13631 *immbits = immediate;
13634 else if (immediate == (immediate & 0x0000ff00))
13636 *immbits = immediate >> 8;
13641 first_error (_("immediate value out of range"));
13645 /* True if IMM has form 0bAAAAAAAABBBBBBBBCCCCCCCCDDDDDDDD for bits
13649 neon_bits_same_in_bytes (unsigned imm)
13651 return ((imm & 0x000000ff) == 0 || (imm & 0x000000ff) == 0x000000ff)
13652 && ((imm & 0x0000ff00) == 0 || (imm & 0x0000ff00) == 0x0000ff00)
13653 && ((imm & 0x00ff0000) == 0 || (imm & 0x00ff0000) == 0x00ff0000)
13654 && ((imm & 0xff000000) == 0 || (imm & 0xff000000) == 0xff000000);
13657 /* For immediate of above form, return 0bABCD. */
13660 neon_squash_bits (unsigned imm)
13662 return (imm & 0x01) | ((imm & 0x0100) >> 7) | ((imm & 0x010000) >> 14)
13663 | ((imm & 0x01000000) >> 21);
13666 /* Compress quarter-float representation to 0b...000 abcdefgh. */
13669 neon_qfloat_bits (unsigned imm)
13671 return ((imm >> 19) & 0x7f) | ((imm >> 24) & 0x80);
13674 /* Returns CMODE. IMMBITS [7:0] is set to bits suitable for inserting into
13675 the instruction. *OP is passed as the initial value of the op field, and
13676 may be set to a different value depending on the constant (i.e.
13677 "MOV I64, 0bAAAAAAAABBBB..." which uses OP = 1 despite being MOV not
13678 MVN). If the immediate looks like a repeated pattern then also
13679 try smaller element sizes. */
13682 neon_cmode_for_move_imm (unsigned immlo, unsigned immhi, int float_p,
13683 unsigned *immbits, int *op, int size,
13684 enum neon_el_type type)
13686 /* Only permit float immediates (including 0.0/-0.0) if the operand type is
13688 if (type == NT_float && !float_p)
13691 if (type == NT_float && is_quarter_float (immlo) && immhi == 0)
13693 if (size != 32 || *op == 1)
13695 *immbits = neon_qfloat_bits (immlo);
13701 if (neon_bits_same_in_bytes (immhi)
13702 && neon_bits_same_in_bytes (immlo))
13706 *immbits = (neon_squash_bits (immhi) << 4)
13707 | neon_squash_bits (immlo);
13712 if (immhi != immlo)
13718 if (immlo == (immlo & 0x000000ff))
13723 else if (immlo == (immlo & 0x0000ff00))
13725 *immbits = immlo >> 8;
13728 else if (immlo == (immlo & 0x00ff0000))
13730 *immbits = immlo >> 16;
13733 else if (immlo == (immlo & 0xff000000))
13735 *immbits = immlo >> 24;
13738 else if (immlo == ((immlo & 0x0000ff00) | 0x000000ff))
13740 *immbits = (immlo >> 8) & 0xff;
13743 else if (immlo == ((immlo & 0x00ff0000) | 0x0000ffff))
13745 *immbits = (immlo >> 16) & 0xff;
13749 if ((immlo & 0xffff) != (immlo >> 16))
13756 if (immlo == (immlo & 0x000000ff))
13761 else if (immlo == (immlo & 0x0000ff00))
13763 *immbits = immlo >> 8;
13767 if ((immlo & 0xff) != (immlo >> 8))
13772 if (immlo == (immlo & 0x000000ff))
13774 /* Don't allow MVN with 8-bit immediate. */
13784 /* Write immediate bits [7:0] to the following locations:
13786 |28/24|23 19|18 16|15 4|3 0|
13787 | a |x x x x x|b c d|x x x x x x x x x x x x|e f g h|
13789 This function is used by VMOV/VMVN/VORR/VBIC. */
13792 neon_write_immbits (unsigned immbits)
13794 inst.instruction |= immbits & 0xf;
13795 inst.instruction |= ((immbits >> 4) & 0x7) << 16;
13796 inst.instruction |= ((immbits >> 7) & 0x1) << 24;
13799 /* Invert low-order SIZE bits of XHI:XLO. */
13802 neon_invert_size (unsigned *xlo, unsigned *xhi, int size)
13804 unsigned immlo = xlo ? *xlo : 0;
13805 unsigned immhi = xhi ? *xhi : 0;
13810 immlo = (~immlo) & 0xff;
13814 immlo = (~immlo) & 0xffff;
13818 immhi = (~immhi) & 0xffffffff;
13819 /* fall through. */
13822 immlo = (~immlo) & 0xffffffff;
13837 do_neon_logic (void)
13839 if (inst.operands[2].present && inst.operands[2].isreg)
13841 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
13842 neon_check_type (3, rs, N_IGNORE_TYPE);
13843 /* U bit and size field were set as part of the bitmask. */
13844 NEON_ENCODE (INTEGER, inst);
13845 neon_three_same (neon_quad (rs), 0, -1);
13849 const int three_ops_form = (inst.operands[2].present
13850 && !inst.operands[2].isreg);
13851 const int immoperand = (three_ops_form ? 2 : 1);
13852 enum neon_shape rs = (three_ops_form
13853 ? neon_select_shape (NS_DDI, NS_QQI, NS_NULL)
13854 : neon_select_shape (NS_DI, NS_QI, NS_NULL));
13855 struct neon_type_el et = neon_check_type (2, rs,
13856 N_I8 | N_I16 | N_I32 | N_I64 | N_F32 | N_KEY, N_EQK);
13857 enum neon_opc opcode = (enum neon_opc) inst.instruction & 0x0fffffff;
13861 if (et.type == NT_invtype)
13864 if (three_ops_form)
13865 constraint (inst.operands[0].reg != inst.operands[1].reg,
13866 _("first and second operands shall be the same register"));
13868 NEON_ENCODE (IMMED, inst);
13870 immbits = inst.operands[immoperand].imm;
13873 /* .i64 is a pseudo-op, so the immediate must be a repeating
13875 if (immbits != (inst.operands[immoperand].regisimm ?
13876 inst.operands[immoperand].reg : 0))
13878 /* Set immbits to an invalid constant. */
13879 immbits = 0xdeadbeef;
13886 cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
13890 cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
13894 /* Pseudo-instruction for VBIC. */
13895 neon_invert_size (&immbits, 0, et.size);
13896 cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
13900 /* Pseudo-instruction for VORR. */
13901 neon_invert_size (&immbits, 0, et.size);
13902 cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
13912 inst.instruction |= neon_quad (rs) << 6;
13913 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
13914 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
13915 inst.instruction |= cmode << 8;
13916 neon_write_immbits (immbits);
13918 neon_dp_fixup (&inst);
13923 do_neon_bitfield (void)
13925 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
13926 neon_check_type (3, rs, N_IGNORE_TYPE);
13927 neon_three_same (neon_quad (rs), 0, -1);
13931 neon_dyadic_misc (enum neon_el_type ubit_meaning, unsigned types,
13934 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
13935 struct neon_type_el et = neon_check_type (3, rs, N_EQK | destbits, N_EQK,
13937 if (et.type == NT_float)
13939 NEON_ENCODE (FLOAT, inst);
13940 neon_three_same (neon_quad (rs), 0, -1);
13944 NEON_ENCODE (INTEGER, inst);
13945 neon_three_same (neon_quad (rs), et.type == ubit_meaning, et.size);
13950 do_neon_dyadic_if_su (void)
13952 neon_dyadic_misc (NT_unsigned, N_SUF_32, 0);
13956 do_neon_dyadic_if_su_d (void)
13958 /* This version only allow D registers, but that constraint is enforced during
13959 operand parsing so we don't need to do anything extra here. */
13960 neon_dyadic_misc (NT_unsigned, N_SUF_32, 0);
13964 do_neon_dyadic_if_i_d (void)
13966 /* The "untyped" case can't happen. Do this to stop the "U" bit being
13967 affected if we specify unsigned args. */
13968 neon_dyadic_misc (NT_untyped, N_IF_32, 0);
13971 enum vfp_or_neon_is_neon_bits
13974 NEON_CHECK_ARCH = 2,
13975 NEON_CHECK_ARCH8 = 4
13978 /* Call this function if an instruction which may have belonged to the VFP or
13979 Neon instruction sets, but turned out to be a Neon instruction (due to the
13980 operand types involved, etc.). We have to check and/or fix-up a couple of
13983 - Make sure the user hasn't attempted to make a Neon instruction
13985 - Alter the value in the condition code field if necessary.
13986 - Make sure that the arch supports Neon instructions.
13988 Which of these operations take place depends on bits from enum
13989 vfp_or_neon_is_neon_bits.
13991 WARNING: This function has side effects! If NEON_CHECK_CC is used and the
13992 current instruction's condition is COND_ALWAYS, the condition field is
13993 changed to inst.uncond_value. This is necessary because instructions shared
13994 between VFP and Neon may be conditional for the VFP variants only, and the
13995 unconditional Neon version must have, e.g., 0xF in the condition field. */
13998 vfp_or_neon_is_neon (unsigned check)
14000 /* Conditions are always legal in Thumb mode (IT blocks). */
14001 if (!thumb_mode && (check & NEON_CHECK_CC))
14003 if (inst.cond != COND_ALWAYS)
14005 first_error (_(BAD_COND));
14008 if (inst.uncond_value != -1)
14009 inst.instruction |= inst.uncond_value << 28;
14012 if ((check & NEON_CHECK_ARCH)
14013 && !mark_feature_used (&fpu_neon_ext_v1))
14015 first_error (_(BAD_FPU));
14019 if ((check & NEON_CHECK_ARCH8)
14020 && !mark_feature_used (&fpu_neon_ext_armv8))
14022 first_error (_(BAD_FPU));
14030 do_neon_addsub_if_i (void)
14032 if (try_vfp_nsyn (3, do_vfp_nsyn_add_sub) == SUCCESS)
14035 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
14038 /* The "untyped" case can't happen. Do this to stop the "U" bit being
14039 affected if we specify unsigned args. */
14040 neon_dyadic_misc (NT_untyped, N_IF_32 | N_I64, 0);
14043 /* Swaps operands 1 and 2. If operand 1 (optional arg) was omitted, we want the
14045 V<op> A,B (A is operand 0, B is operand 2)
14050 so handle that case specially. */
14053 neon_exchange_operands (void)
14055 void *scratch = alloca (sizeof (inst.operands[0]));
14056 if (inst.operands[1].present)
14058 /* Swap operands[1] and operands[2]. */
14059 memcpy (scratch, &inst.operands[1], sizeof (inst.operands[0]));
14060 inst.operands[1] = inst.operands[2];
14061 memcpy (&inst.operands[2], scratch, sizeof (inst.operands[0]));
14065 inst.operands[1] = inst.operands[2];
14066 inst.operands[2] = inst.operands[0];
14071 neon_compare (unsigned regtypes, unsigned immtypes, int invert)
14073 if (inst.operands[2].isreg)
14076 neon_exchange_operands ();
14077 neon_dyadic_misc (NT_unsigned, regtypes, N_SIZ);
14081 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
14082 struct neon_type_el et = neon_check_type (2, rs,
14083 N_EQK | N_SIZ, immtypes | N_KEY);
14085 NEON_ENCODE (IMMED, inst);
14086 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14087 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14088 inst.instruction |= LOW4 (inst.operands[1].reg);
14089 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
14090 inst.instruction |= neon_quad (rs) << 6;
14091 inst.instruction |= (et.type == NT_float) << 10;
14092 inst.instruction |= neon_logbits (et.size) << 18;
14094 neon_dp_fixup (&inst);
14101 neon_compare (N_SUF_32, N_S8 | N_S16 | N_S32 | N_F32, FALSE);
14105 do_neon_cmp_inv (void)
14107 neon_compare (N_SUF_32, N_S8 | N_S16 | N_S32 | N_F32, TRUE);
14113 neon_compare (N_IF_32, N_IF_32, FALSE);
14116 /* For multiply instructions, we have the possibility of 16-bit or 32-bit
14117 scalars, which are encoded in 5 bits, M : Rm.
14118 For 16-bit scalars, the register is encoded in Rm[2:0] and the index in
14119 M:Rm[3], and for 32-bit scalars, the register is encoded in Rm[3:0] and the
14123 neon_scalar_for_mul (unsigned scalar, unsigned elsize)
14125 unsigned regno = NEON_SCALAR_REG (scalar);
14126 unsigned elno = NEON_SCALAR_INDEX (scalar);
14131 if (regno > 7 || elno > 3)
14133 return regno | (elno << 3);
14136 if (regno > 15 || elno > 1)
14138 return regno | (elno << 4);
14142 first_error (_("scalar out of range for multiply instruction"));
14148 /* Encode multiply / multiply-accumulate scalar instructions. */
14151 neon_mul_mac (struct neon_type_el et, int ubit)
14155 /* Give a more helpful error message if we have an invalid type. */
14156 if (et.type == NT_invtype)
14159 scalar = neon_scalar_for_mul (inst.operands[2].reg, et.size);
14160 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14161 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14162 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
14163 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
14164 inst.instruction |= LOW4 (scalar);
14165 inst.instruction |= HI1 (scalar) << 5;
14166 inst.instruction |= (et.type == NT_float) << 8;
14167 inst.instruction |= neon_logbits (et.size) << 20;
14168 inst.instruction |= (ubit != 0) << 24;
14170 neon_dp_fixup (&inst);
14174 do_neon_mac_maybe_scalar (void)
14176 if (try_vfp_nsyn (3, do_vfp_nsyn_mla_mls) == SUCCESS)
14179 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
14182 if (inst.operands[2].isscalar)
14184 enum neon_shape rs = neon_select_shape (NS_DDS, NS_QQS, NS_NULL);
14185 struct neon_type_el et = neon_check_type (3, rs,
14186 N_EQK, N_EQK, N_I16 | N_I32 | N_F32 | N_KEY);
14187 NEON_ENCODE (SCALAR, inst);
14188 neon_mul_mac (et, neon_quad (rs));
14192 /* The "untyped" case can't happen. Do this to stop the "U" bit being
14193 affected if we specify unsigned args. */
14194 neon_dyadic_misc (NT_untyped, N_IF_32, 0);
14199 do_neon_fmac (void)
14201 if (try_vfp_nsyn (3, do_vfp_nsyn_fma_fms) == SUCCESS)
14204 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
14207 neon_dyadic_misc (NT_untyped, N_IF_32, 0);
14213 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14214 struct neon_type_el et = neon_check_type (3, rs,
14215 N_EQK, N_EQK, N_8 | N_16 | N_32 | N_KEY);
14216 neon_three_same (neon_quad (rs), 0, et.size);
14219 /* VMUL with 3 registers allows the P8 type. The scalar version supports the
14220 same types as the MAC equivalents. The polynomial type for this instruction
14221 is encoded the same as the integer type. */
14226 if (try_vfp_nsyn (3, do_vfp_nsyn_mul) == SUCCESS)
14229 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
14232 if (inst.operands[2].isscalar)
14233 do_neon_mac_maybe_scalar ();
14235 neon_dyadic_misc (NT_poly, N_I8 | N_I16 | N_I32 | N_F32 | N_P8, 0);
14239 do_neon_qdmulh (void)
14241 if (inst.operands[2].isscalar)
14243 enum neon_shape rs = neon_select_shape (NS_DDS, NS_QQS, NS_NULL);
14244 struct neon_type_el et = neon_check_type (3, rs,
14245 N_EQK, N_EQK, N_S16 | N_S32 | N_KEY);
14246 NEON_ENCODE (SCALAR, inst);
14247 neon_mul_mac (et, neon_quad (rs));
14251 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14252 struct neon_type_el et = neon_check_type (3, rs,
14253 N_EQK, N_EQK, N_S16 | N_S32 | N_KEY);
14254 NEON_ENCODE (INTEGER, inst);
14255 /* The U bit (rounding) comes from bit mask. */
14256 neon_three_same (neon_quad (rs), 0, et.size);
14261 do_neon_fcmp_absolute (void)
14263 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14264 neon_check_type (3, rs, N_EQK, N_EQK, N_F32 | N_KEY);
14265 /* Size field comes from bit mask. */
14266 neon_three_same (neon_quad (rs), 1, -1);
14270 do_neon_fcmp_absolute_inv (void)
14272 neon_exchange_operands ();
14273 do_neon_fcmp_absolute ();
14277 do_neon_step (void)
14279 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14280 neon_check_type (3, rs, N_EQK, N_EQK, N_F32 | N_KEY);
14281 neon_three_same (neon_quad (rs), 0, -1);
14285 do_neon_abs_neg (void)
14287 enum neon_shape rs;
14288 struct neon_type_el et;
14290 if (try_vfp_nsyn (2, do_vfp_nsyn_abs_neg) == SUCCESS)
14293 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
14296 rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
14297 et = neon_check_type (2, rs, N_EQK, N_S8 | N_S16 | N_S32 | N_F32 | N_KEY);
14299 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14300 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14301 inst.instruction |= LOW4 (inst.operands[1].reg);
14302 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
14303 inst.instruction |= neon_quad (rs) << 6;
14304 inst.instruction |= (et.type == NT_float) << 10;
14305 inst.instruction |= neon_logbits (et.size) << 18;
14307 neon_dp_fixup (&inst);
14313 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
14314 struct neon_type_el et = neon_check_type (2, rs,
14315 N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY);
14316 int imm = inst.operands[2].imm;
14317 constraint (imm < 0 || (unsigned)imm >= et.size,
14318 _("immediate out of range for insert"));
14319 neon_imm_shift (FALSE, 0, neon_quad (rs), et, imm);
14325 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
14326 struct neon_type_el et = neon_check_type (2, rs,
14327 N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY);
14328 int imm = inst.operands[2].imm;
14329 constraint (imm < 1 || (unsigned)imm > et.size,
14330 _("immediate out of range for insert"));
14331 neon_imm_shift (FALSE, 0, neon_quad (rs), et, et.size - imm);
14335 do_neon_qshlu_imm (void)
14337 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
14338 struct neon_type_el et = neon_check_type (2, rs,
14339 N_EQK | N_UNS, N_S8 | N_S16 | N_S32 | N_S64 | N_KEY);
14340 int imm = inst.operands[2].imm;
14341 constraint (imm < 0 || (unsigned)imm >= et.size,
14342 _("immediate out of range for shift"));
14343 /* Only encodes the 'U present' variant of the instruction.
14344 In this case, signed types have OP (bit 8) set to 0.
14345 Unsigned types have OP set to 1. */
14346 inst.instruction |= (et.type == NT_unsigned) << 8;
14347 /* The rest of the bits are the same as other immediate shifts. */
14348 neon_imm_shift (FALSE, 0, neon_quad (rs), et, imm);
14352 do_neon_qmovn (void)
14354 struct neon_type_el et = neon_check_type (2, NS_DQ,
14355 N_EQK | N_HLF, N_SU_16_64 | N_KEY);
14356 /* Saturating move where operands can be signed or unsigned, and the
14357 destination has the same signedness. */
14358 NEON_ENCODE (INTEGER, inst);
14359 if (et.type == NT_unsigned)
14360 inst.instruction |= 0xc0;
14362 inst.instruction |= 0x80;
14363 neon_two_same (0, 1, et.size / 2);
14367 do_neon_qmovun (void)
14369 struct neon_type_el et = neon_check_type (2, NS_DQ,
14370 N_EQK | N_HLF | N_UNS, N_S16 | N_S32 | N_S64 | N_KEY);
14371 /* Saturating move with unsigned results. Operands must be signed. */
14372 NEON_ENCODE (INTEGER, inst);
14373 neon_two_same (0, 1, et.size / 2);
14377 do_neon_rshift_sat_narrow (void)
14379 /* FIXME: Types for narrowing. If operands are signed, results can be signed
14380 or unsigned. If operands are unsigned, results must also be unsigned. */
14381 struct neon_type_el et = neon_check_type (2, NS_DQI,
14382 N_EQK | N_HLF, N_SU_16_64 | N_KEY);
14383 int imm = inst.operands[2].imm;
14384 /* This gets the bounds check, size encoding and immediate bits calculation
14388 /* VQ{R}SHRN.I<size> <Dd>, <Qm>, #0 is a synonym for
14389 VQMOVN.I<size> <Dd>, <Qm>. */
14392 inst.operands[2].present = 0;
14393 inst.instruction = N_MNEM_vqmovn;
14398 constraint (imm < 1 || (unsigned)imm > et.size,
14399 _("immediate out of range"));
14400 neon_imm_shift (TRUE, et.type == NT_unsigned, 0, et, et.size - imm);
14404 do_neon_rshift_sat_narrow_u (void)
14406 /* FIXME: Types for narrowing. If operands are signed, results can be signed
14407 or unsigned. If operands are unsigned, results must also be unsigned. */
14408 struct neon_type_el et = neon_check_type (2, NS_DQI,
14409 N_EQK | N_HLF | N_UNS, N_S16 | N_S32 | N_S64 | N_KEY);
14410 int imm = inst.operands[2].imm;
14411 /* This gets the bounds check, size encoding and immediate bits calculation
14415 /* VQSHRUN.I<size> <Dd>, <Qm>, #0 is a synonym for
14416 VQMOVUN.I<size> <Dd>, <Qm>. */
14419 inst.operands[2].present = 0;
14420 inst.instruction = N_MNEM_vqmovun;
14425 constraint (imm < 1 || (unsigned)imm > et.size,
14426 _("immediate out of range"));
14427 /* FIXME: The manual is kind of unclear about what value U should have in
14428 VQ{R}SHRUN instructions, but U=0, op=0 definitely encodes VRSHR, so it
14430 neon_imm_shift (TRUE, 1, 0, et, et.size - imm);
14434 do_neon_movn (void)
14436 struct neon_type_el et = neon_check_type (2, NS_DQ,
14437 N_EQK | N_HLF, N_I16 | N_I32 | N_I64 | N_KEY);
14438 NEON_ENCODE (INTEGER, inst);
14439 neon_two_same (0, 1, et.size / 2);
14443 do_neon_rshift_narrow (void)
14445 struct neon_type_el et = neon_check_type (2, NS_DQI,
14446 N_EQK | N_HLF, N_I16 | N_I32 | N_I64 | N_KEY);
14447 int imm = inst.operands[2].imm;
14448 /* This gets the bounds check, size encoding and immediate bits calculation
14452 /* If immediate is zero then we are a pseudo-instruction for
14453 VMOVN.I<size> <Dd>, <Qm> */
14456 inst.operands[2].present = 0;
14457 inst.instruction = N_MNEM_vmovn;
14462 constraint (imm < 1 || (unsigned)imm > et.size,
14463 _("immediate out of range for narrowing operation"));
14464 neon_imm_shift (FALSE, 0, 0, et, et.size - imm);
14468 do_neon_shll (void)
14470 /* FIXME: Type checking when lengthening. */
14471 struct neon_type_el et = neon_check_type (2, NS_QDI,
14472 N_EQK | N_DBL, N_I8 | N_I16 | N_I32 | N_KEY);
14473 unsigned imm = inst.operands[2].imm;
14475 if (imm == et.size)
14477 /* Maximum shift variant. */
14478 NEON_ENCODE (INTEGER, inst);
14479 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14480 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14481 inst.instruction |= LOW4 (inst.operands[1].reg);
14482 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
14483 inst.instruction |= neon_logbits (et.size) << 18;
14485 neon_dp_fixup (&inst);
14489 /* A more-specific type check for non-max versions. */
14490 et = neon_check_type (2, NS_QDI,
14491 N_EQK | N_DBL, N_SU_32 | N_KEY);
14492 NEON_ENCODE (IMMED, inst);
14493 neon_imm_shift (TRUE, et.type == NT_unsigned, 0, et, imm);
14497 /* Check the various types for the VCVT instruction, and return which version
14498 the current instruction is. */
14500 #define CVT_FLAVOUR_VAR \
14501 CVT_VAR (s32_f32, N_S32, N_F32, whole_reg, "ftosls", "ftosis", "ftosizs") \
14502 CVT_VAR (u32_f32, N_U32, N_F32, whole_reg, "ftouls", "ftouis", "ftouizs") \
14503 CVT_VAR (f32_s32, N_F32, N_S32, whole_reg, "fsltos", "fsitos", NULL) \
14504 CVT_VAR (f32_u32, N_F32, N_U32, whole_reg, "fultos", "fuitos", NULL) \
14505 /* Half-precision conversions. */ \
14506 CVT_VAR (f32_f16, N_F32, N_F16, whole_reg, NULL, NULL, NULL) \
14507 CVT_VAR (f16_f32, N_F16, N_F32, whole_reg, NULL, NULL, NULL) \
14508 /* VFP instructions. */ \
14509 CVT_VAR (f32_f64, N_F32, N_F64, N_VFP, NULL, "fcvtsd", NULL) \
14510 CVT_VAR (f64_f32, N_F64, N_F32, N_VFP, NULL, "fcvtds", NULL) \
14511 CVT_VAR (s32_f64, N_S32, N_F64 | key, N_VFP, "ftosld", "ftosid", "ftosizd") \
14512 CVT_VAR (u32_f64, N_U32, N_F64 | key, N_VFP, "ftould", "ftouid", "ftouizd") \
14513 CVT_VAR (f64_s32, N_F64 | key, N_S32, N_VFP, "fsltod", "fsitod", NULL) \
14514 CVT_VAR (f64_u32, N_F64 | key, N_U32, N_VFP, "fultod", "fuitod", NULL) \
14515 /* VFP instructions with bitshift. */ \
14516 CVT_VAR (f32_s16, N_F32 | key, N_S16, N_VFP, "fshtos", NULL, NULL) \
14517 CVT_VAR (f32_u16, N_F32 | key, N_U16, N_VFP, "fuhtos", NULL, NULL) \
14518 CVT_VAR (f64_s16, N_F64 | key, N_S16, N_VFP, "fshtod", NULL, NULL) \
14519 CVT_VAR (f64_u16, N_F64 | key, N_U16, N_VFP, "fuhtod", NULL, NULL) \
14520 CVT_VAR (s16_f32, N_S16, N_F32 | key, N_VFP, "ftoshs", NULL, NULL) \
14521 CVT_VAR (u16_f32, N_U16, N_F32 | key, N_VFP, "ftouhs", NULL, NULL) \
14522 CVT_VAR (s16_f64, N_S16, N_F64 | key, N_VFP, "ftoshd", NULL, NULL) \
14523 CVT_VAR (u16_f64, N_U16, N_F64 | key, N_VFP, "ftouhd", NULL, NULL)
14525 #define CVT_VAR(C, X, Y, R, BSN, CN, ZN) \
14526 neon_cvt_flavour_##C,
14528 /* The different types of conversions we can do. */
14529 enum neon_cvt_flavour
14532 neon_cvt_flavour_invalid,
14533 neon_cvt_flavour_first_fp = neon_cvt_flavour_f32_f64
14538 static enum neon_cvt_flavour
14539 get_neon_cvt_flavour (enum neon_shape rs)
14541 #define CVT_VAR(C,X,Y,R,BSN,CN,ZN) \
14542 et = neon_check_type (2, rs, (R) | (X), (R) | (Y)); \
14543 if (et.type != NT_invtype) \
14545 inst.error = NULL; \
14546 return (neon_cvt_flavour_##C); \
14549 struct neon_type_el et;
14550 unsigned whole_reg = (rs == NS_FFI || rs == NS_FD || rs == NS_DF
14551 || rs == NS_FF) ? N_VFP : 0;
14552 /* The instruction versions which take an immediate take one register
14553 argument, which is extended to the width of the full register. Thus the
14554 "source" and "destination" registers must have the same width. Hack that
14555 here by making the size equal to the key (wider, in this case) operand. */
14556 unsigned key = (rs == NS_QQI || rs == NS_DDI || rs == NS_FFI) ? N_KEY : 0;
14560 return neon_cvt_flavour_invalid;
14575 /* Neon-syntax VFP conversions. */
14578 do_vfp_nsyn_cvt (enum neon_shape rs, enum neon_cvt_flavour flavour)
14580 const char *opname = 0;
14582 if (rs == NS_DDI || rs == NS_QQI || rs == NS_FFI)
14584 /* Conversions with immediate bitshift. */
14585 const char *enc[] =
14587 #define CVT_VAR(C,A,B,R,BSN,CN,ZN) BSN,
14593 if (flavour < (int) ARRAY_SIZE (enc))
14595 opname = enc[flavour];
14596 constraint (inst.operands[0].reg != inst.operands[1].reg,
14597 _("operands 0 and 1 must be the same register"));
14598 inst.operands[1] = inst.operands[2];
14599 memset (&inst.operands[2], '\0', sizeof (inst.operands[2]));
14604 /* Conversions without bitshift. */
14605 const char *enc[] =
14607 #define CVT_VAR(C,A,B,R,BSN,CN,ZN) CN,
14613 if (flavour < (int) ARRAY_SIZE (enc))
14614 opname = enc[flavour];
14618 do_vfp_nsyn_opcode (opname);
14622 do_vfp_nsyn_cvtz (void)
14624 enum neon_shape rs = neon_select_shape (NS_FF, NS_FD, NS_NULL);
14625 enum neon_cvt_flavour flavour = get_neon_cvt_flavour (rs);
14626 const char *enc[] =
14628 #define CVT_VAR(C,A,B,R,BSN,CN,ZN) ZN,
14634 if (flavour < (int) ARRAY_SIZE (enc) && enc[flavour])
14635 do_vfp_nsyn_opcode (enc[flavour]);
14639 do_vfp_nsyn_cvt_fpv8 (enum neon_cvt_flavour flavour,
14640 enum neon_cvt_mode mode)
14645 set_it_insn_type (OUTSIDE_IT_INSN);
14649 case neon_cvt_flavour_s32_f64:
14653 case neon_cvt_flavour_s32_f32:
14657 case neon_cvt_flavour_u32_f64:
14661 case neon_cvt_flavour_u32_f32:
14666 first_error (_("invalid instruction shape"));
14672 case neon_cvt_mode_a: rm = 0; break;
14673 case neon_cvt_mode_n: rm = 1; break;
14674 case neon_cvt_mode_p: rm = 2; break;
14675 case neon_cvt_mode_m: rm = 3; break;
14676 default: first_error (_("invalid rounding mode")); return;
14679 NEON_ENCODE (FPV8, inst);
14680 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
14681 encode_arm_vfp_reg (inst.operands[1].reg, sz == 1 ? VFP_REG_Dm : VFP_REG_Sm);
14682 inst.instruction |= sz << 8;
14683 inst.instruction |= op << 7;
14684 inst.instruction |= rm << 16;
14685 inst.instruction |= 0xf0000000;
14686 inst.is_neon = TRUE;
14690 do_neon_cvt_1 (enum neon_cvt_mode mode)
14692 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_FFI, NS_DD, NS_QQ,
14693 NS_FD, NS_DF, NS_FF, NS_QD, NS_DQ, NS_NULL);
14694 enum neon_cvt_flavour flavour = get_neon_cvt_flavour (rs);
14696 /* PR11109: Handle round-to-zero for VCVT conversions. */
14697 if (mode == neon_cvt_mode_z
14698 && ARM_CPU_HAS_FEATURE (cpu_variant, fpu_arch_vfp_v2)
14699 && (flavour == neon_cvt_flavour_s32_f32
14700 || flavour == neon_cvt_flavour_u32_f32
14701 || flavour == neon_cvt_flavour_s32_f64
14702 || flavour == neon_cvt_flavour_u32_f64)
14703 && (rs == NS_FD || rs == NS_FF))
14705 do_vfp_nsyn_cvtz ();
14709 /* VFP rather than Neon conversions. */
14710 if (flavour >= neon_cvt_flavour_first_fp)
14712 if (mode == neon_cvt_mode_x || mode == neon_cvt_mode_z)
14713 do_vfp_nsyn_cvt (rs, flavour);
14715 do_vfp_nsyn_cvt_fpv8 (flavour, mode);
14726 unsigned enctab[] = { 0x0000100, 0x1000100, 0x0, 0x1000000 };
14728 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
14731 /* Fixed-point conversion with #0 immediate is encoded as an
14732 integer conversion. */
14733 if (inst.operands[2].present && inst.operands[2].imm == 0)
14735 immbits = 32 - inst.operands[2].imm;
14736 NEON_ENCODE (IMMED, inst);
14737 if (flavour != neon_cvt_flavour_invalid)
14738 inst.instruction |= enctab[flavour];
14739 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14740 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14741 inst.instruction |= LOW4 (inst.operands[1].reg);
14742 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
14743 inst.instruction |= neon_quad (rs) << 6;
14744 inst.instruction |= 1 << 21;
14745 inst.instruction |= immbits << 16;
14747 neon_dp_fixup (&inst);
14753 if (mode != neon_cvt_mode_x && mode != neon_cvt_mode_z)
14755 NEON_ENCODE (FLOAT, inst);
14756 set_it_insn_type (OUTSIDE_IT_INSN);
14758 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH8) == FAIL)
14761 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14762 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14763 inst.instruction |= LOW4 (inst.operands[1].reg);
14764 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
14765 inst.instruction |= neon_quad (rs) << 6;
14766 inst.instruction |= (flavour == neon_cvt_flavour_u32_f32) << 7;
14767 inst.instruction |= mode << 8;
14769 inst.instruction |= 0xfc000000;
14771 inst.instruction |= 0xf0000000;
14777 unsigned enctab[] = { 0x100, 0x180, 0x0, 0x080 };
14779 NEON_ENCODE (INTEGER, inst);
14781 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
14784 if (flavour != neon_cvt_flavour_invalid)
14785 inst.instruction |= enctab[flavour];
14787 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14788 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14789 inst.instruction |= LOW4 (inst.operands[1].reg);
14790 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
14791 inst.instruction |= neon_quad (rs) << 6;
14792 inst.instruction |= 2 << 18;
14794 neon_dp_fixup (&inst);
14799 /* Half-precision conversions for Advanced SIMD -- neon. */
14804 && (inst.vectype.el[0].size != 16 || inst.vectype.el[1].size != 32))
14806 as_bad (_("operand size must match register width"));
14811 && ((inst.vectype.el[0].size != 32 || inst.vectype.el[1].size != 16)))
14813 as_bad (_("operand size must match register width"));
14818 inst.instruction = 0x3b60600;
14820 inst.instruction = 0x3b60700;
14822 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14823 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14824 inst.instruction |= LOW4 (inst.operands[1].reg);
14825 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
14826 neon_dp_fixup (&inst);
14830 /* Some VFP conversions go here (s32 <-> f32, u32 <-> f32). */
14831 if (mode == neon_cvt_mode_x || mode == neon_cvt_mode_z)
14832 do_vfp_nsyn_cvt (rs, flavour);
14834 do_vfp_nsyn_cvt_fpv8 (flavour, mode);
14839 do_neon_cvtr (void)
14841 do_neon_cvt_1 (neon_cvt_mode_x);
14847 do_neon_cvt_1 (neon_cvt_mode_z);
14851 do_neon_cvta (void)
14853 do_neon_cvt_1 (neon_cvt_mode_a);
14857 do_neon_cvtn (void)
14859 do_neon_cvt_1 (neon_cvt_mode_n);
14863 do_neon_cvtp (void)
14865 do_neon_cvt_1 (neon_cvt_mode_p);
14869 do_neon_cvtm (void)
14871 do_neon_cvt_1 (neon_cvt_mode_m);
14875 do_neon_cvttb_2 (bfd_boolean t, bfd_boolean to, bfd_boolean is_double)
14878 mark_feature_used (&fpu_vfp_ext_armv8);
14880 encode_arm_vfp_reg (inst.operands[0].reg,
14881 (is_double && !to) ? VFP_REG_Dd : VFP_REG_Sd);
14882 encode_arm_vfp_reg (inst.operands[1].reg,
14883 (is_double && to) ? VFP_REG_Dm : VFP_REG_Sm);
14884 inst.instruction |= to ? 0x10000 : 0;
14885 inst.instruction |= t ? 0x80 : 0;
14886 inst.instruction |= is_double ? 0x100 : 0;
14887 do_vfp_cond_or_thumb ();
14891 do_neon_cvttb_1 (bfd_boolean t)
14893 enum neon_shape rs = neon_select_shape (NS_FF, NS_FD, NS_DF, NS_NULL);
14897 else if (neon_check_type (2, rs, N_F16, N_F32 | N_VFP).type != NT_invtype)
14900 do_neon_cvttb_2 (t, /*to=*/TRUE, /*is_double=*/FALSE);
14902 else if (neon_check_type (2, rs, N_F32 | N_VFP, N_F16).type != NT_invtype)
14905 do_neon_cvttb_2 (t, /*to=*/FALSE, /*is_double=*/FALSE);
14907 else if (neon_check_type (2, rs, N_F16, N_F64 | N_VFP).type != NT_invtype)
14910 do_neon_cvttb_2 (t, /*to=*/TRUE, /*is_double=*/TRUE);
14912 else if (neon_check_type (2, rs, N_F64 | N_VFP, N_F16).type != NT_invtype)
14915 do_neon_cvttb_2 (t, /*to=*/FALSE, /*is_double=*/TRUE);
14922 do_neon_cvtb (void)
14924 do_neon_cvttb_1 (FALSE);
14929 do_neon_cvtt (void)
14931 do_neon_cvttb_1 (TRUE);
14935 neon_move_immediate (void)
14937 enum neon_shape rs = neon_select_shape (NS_DI, NS_QI, NS_NULL);
14938 struct neon_type_el et = neon_check_type (2, rs,
14939 N_I8 | N_I16 | N_I32 | N_I64 | N_F32 | N_KEY, N_EQK);
14940 unsigned immlo, immhi = 0, immbits;
14941 int op, cmode, float_p;
14943 constraint (et.type == NT_invtype,
14944 _("operand size must be specified for immediate VMOV"));
14946 /* We start out as an MVN instruction if OP = 1, MOV otherwise. */
14947 op = (inst.instruction & (1 << 5)) != 0;
14949 immlo = inst.operands[1].imm;
14950 if (inst.operands[1].regisimm)
14951 immhi = inst.operands[1].reg;
14953 constraint (et.size < 32 && (immlo & ~((1 << et.size) - 1)) != 0,
14954 _("immediate has bits set outside the operand size"));
14956 float_p = inst.operands[1].immisfloat;
14958 if ((cmode = neon_cmode_for_move_imm (immlo, immhi, float_p, &immbits, &op,
14959 et.size, et.type)) == FAIL)
14961 /* Invert relevant bits only. */
14962 neon_invert_size (&immlo, &immhi, et.size);
14963 /* Flip from VMOV/VMVN to VMVN/VMOV. Some immediate types are unavailable
14964 with one or the other; those cases are caught by
14965 neon_cmode_for_move_imm. */
14967 if ((cmode = neon_cmode_for_move_imm (immlo, immhi, float_p, &immbits,
14968 &op, et.size, et.type)) == FAIL)
14970 first_error (_("immediate out of range"));
14975 inst.instruction &= ~(1 << 5);
14976 inst.instruction |= op << 5;
14978 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14979 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14980 inst.instruction |= neon_quad (rs) << 6;
14981 inst.instruction |= cmode << 8;
14983 neon_write_immbits (immbits);
14989 if (inst.operands[1].isreg)
14991 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
14993 NEON_ENCODE (INTEGER, inst);
14994 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14995 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14996 inst.instruction |= LOW4 (inst.operands[1].reg);
14997 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
14998 inst.instruction |= neon_quad (rs) << 6;
15002 NEON_ENCODE (IMMED, inst);
15003 neon_move_immediate ();
15006 neon_dp_fixup (&inst);
15009 /* Encode instructions of form:
15011 |28/24|23|22|21 20|19 16|15 12|11 8|7|6|5|4|3 0|
15012 | U |x |D |size | Rn | Rd |x x x x|N|x|M|x| Rm | */
15015 neon_mixed_length (struct neon_type_el et, unsigned size)
15017 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15018 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15019 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
15020 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
15021 inst.instruction |= LOW4 (inst.operands[2].reg);
15022 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
15023 inst.instruction |= (et.type == NT_unsigned) << 24;
15024 inst.instruction |= neon_logbits (size) << 20;
15026 neon_dp_fixup (&inst);
15030 do_neon_dyadic_long (void)
15032 /* FIXME: Type checking for lengthening op. */
15033 struct neon_type_el et = neon_check_type (3, NS_QDD,
15034 N_EQK | N_DBL, N_EQK, N_SU_32 | N_KEY);
15035 neon_mixed_length (et, et.size);
15039 do_neon_abal (void)
15041 struct neon_type_el et = neon_check_type (3, NS_QDD,
15042 N_EQK | N_INT | N_DBL, N_EQK, N_SU_32 | N_KEY);
15043 neon_mixed_length (et, et.size);
15047 neon_mac_reg_scalar_long (unsigned regtypes, unsigned scalartypes)
15049 if (inst.operands[2].isscalar)
15051 struct neon_type_el et = neon_check_type (3, NS_QDS,
15052 N_EQK | N_DBL, N_EQK, regtypes | N_KEY);
15053 NEON_ENCODE (SCALAR, inst);
15054 neon_mul_mac (et, et.type == NT_unsigned);
15058 struct neon_type_el et = neon_check_type (3, NS_QDD,
15059 N_EQK | N_DBL, N_EQK, scalartypes | N_KEY);
15060 NEON_ENCODE (INTEGER, inst);
15061 neon_mixed_length (et, et.size);
15066 do_neon_mac_maybe_scalar_long (void)
15068 neon_mac_reg_scalar_long (N_S16 | N_S32 | N_U16 | N_U32, N_SU_32);
15072 do_neon_dyadic_wide (void)
15074 struct neon_type_el et = neon_check_type (3, NS_QQD,
15075 N_EQK | N_DBL, N_EQK | N_DBL, N_SU_32 | N_KEY);
15076 neon_mixed_length (et, et.size);
15080 do_neon_dyadic_narrow (void)
15082 struct neon_type_el et = neon_check_type (3, NS_QDD,
15083 N_EQK | N_DBL, N_EQK, N_I16 | N_I32 | N_I64 | N_KEY);
15084 /* Operand sign is unimportant, and the U bit is part of the opcode,
15085 so force the operand type to integer. */
15086 et.type = NT_integer;
15087 neon_mixed_length (et, et.size / 2);
15091 do_neon_mul_sat_scalar_long (void)
15093 neon_mac_reg_scalar_long (N_S16 | N_S32, N_S16 | N_S32);
15097 do_neon_vmull (void)
15099 if (inst.operands[2].isscalar)
15100 do_neon_mac_maybe_scalar_long ();
15103 struct neon_type_el et = neon_check_type (3, NS_QDD,
15104 N_EQK | N_DBL, N_EQK, N_SU_32 | N_P8 | N_P64 | N_KEY);
15106 if (et.type == NT_poly)
15107 NEON_ENCODE (POLY, inst);
15109 NEON_ENCODE (INTEGER, inst);
15111 /* For polynomial encoding the U bit must be zero, and the size must
15112 be 8 (encoded as 0b00) or, on ARMv8 or later 64 (encoded, non
15113 obviously, as 0b10). */
15116 /* Check we're on the correct architecture. */
15117 if (!mark_feature_used (&fpu_crypto_ext_armv8))
15119 _("Instruction form not available on this architecture.");
15124 neon_mixed_length (et, et.size);
15131 enum neon_shape rs = neon_select_shape (NS_DDDI, NS_QQQI, NS_NULL);
15132 struct neon_type_el et = neon_check_type (3, rs,
15133 N_EQK, N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY);
15134 unsigned imm = (inst.operands[3].imm * et.size) / 8;
15136 constraint (imm >= (unsigned) (neon_quad (rs) ? 16 : 8),
15137 _("shift out of range"));
15138 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15139 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15140 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
15141 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
15142 inst.instruction |= LOW4 (inst.operands[2].reg);
15143 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
15144 inst.instruction |= neon_quad (rs) << 6;
15145 inst.instruction |= imm << 8;
15147 neon_dp_fixup (&inst);
15153 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
15154 struct neon_type_el et = neon_check_type (2, rs,
15155 N_EQK, N_8 | N_16 | N_32 | N_KEY);
15156 unsigned op = (inst.instruction >> 7) & 3;
15157 /* N (width of reversed regions) is encoded as part of the bitmask. We
15158 extract it here to check the elements to be reversed are smaller.
15159 Otherwise we'd get a reserved instruction. */
15160 unsigned elsize = (op == 2) ? 16 : (op == 1) ? 32 : (op == 0) ? 64 : 0;
15161 gas_assert (elsize != 0);
15162 constraint (et.size >= elsize,
15163 _("elements must be smaller than reversal region"));
15164 neon_two_same (neon_quad (rs), 1, et.size);
15170 if (inst.operands[1].isscalar)
15172 enum neon_shape rs = neon_select_shape (NS_DS, NS_QS, NS_NULL);
15173 struct neon_type_el et = neon_check_type (2, rs,
15174 N_EQK, N_8 | N_16 | N_32 | N_KEY);
15175 unsigned sizebits = et.size >> 3;
15176 unsigned dm = NEON_SCALAR_REG (inst.operands[1].reg);
15177 int logsize = neon_logbits (et.size);
15178 unsigned x = NEON_SCALAR_INDEX (inst.operands[1].reg) << logsize;
15180 if (vfp_or_neon_is_neon (NEON_CHECK_CC) == FAIL)
15183 NEON_ENCODE (SCALAR, inst);
15184 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15185 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15186 inst.instruction |= LOW4 (dm);
15187 inst.instruction |= HI1 (dm) << 5;
15188 inst.instruction |= neon_quad (rs) << 6;
15189 inst.instruction |= x << 17;
15190 inst.instruction |= sizebits << 16;
15192 neon_dp_fixup (&inst);
15196 enum neon_shape rs = neon_select_shape (NS_DR, NS_QR, NS_NULL);
15197 struct neon_type_el et = neon_check_type (2, rs,
15198 N_8 | N_16 | N_32 | N_KEY, N_EQK);
15199 /* Duplicate ARM register to lanes of vector. */
15200 NEON_ENCODE (ARMREG, inst);
15203 case 8: inst.instruction |= 0x400000; break;
15204 case 16: inst.instruction |= 0x000020; break;
15205 case 32: inst.instruction |= 0x000000; break;
15208 inst.instruction |= LOW4 (inst.operands[1].reg) << 12;
15209 inst.instruction |= LOW4 (inst.operands[0].reg) << 16;
15210 inst.instruction |= HI1 (inst.operands[0].reg) << 7;
15211 inst.instruction |= neon_quad (rs) << 21;
15212 /* The encoding for this instruction is identical for the ARM and Thumb
15213 variants, except for the condition field. */
15214 do_vfp_cond_or_thumb ();
15218 /* VMOV has particularly many variations. It can be one of:
15219 0. VMOV<c><q> <Qd>, <Qm>
15220 1. VMOV<c><q> <Dd>, <Dm>
15221 (Register operations, which are VORR with Rm = Rn.)
15222 2. VMOV<c><q>.<dt> <Qd>, #<imm>
15223 3. VMOV<c><q>.<dt> <Dd>, #<imm>
15225 4. VMOV<c><q>.<size> <Dn[x]>, <Rd>
15226 (ARM register to scalar.)
15227 5. VMOV<c><q> <Dm>, <Rd>, <Rn>
15228 (Two ARM registers to vector.)
15229 6. VMOV<c><q>.<dt> <Rd>, <Dn[x]>
15230 (Scalar to ARM register.)
15231 7. VMOV<c><q> <Rd>, <Rn>, <Dm>
15232 (Vector to two ARM registers.)
15233 8. VMOV.F32 <Sd>, <Sm>
15234 9. VMOV.F64 <Dd>, <Dm>
15235 (VFP register moves.)
15236 10. VMOV.F32 <Sd>, #imm
15237 11. VMOV.F64 <Dd>, #imm
15238 (VFP float immediate load.)
15239 12. VMOV <Rd>, <Sm>
15240 (VFP single to ARM reg.)
15241 13. VMOV <Sd>, <Rm>
15242 (ARM reg to VFP single.)
15243 14. VMOV <Rd>, <Re>, <Sn>, <Sm>
15244 (Two ARM regs to two VFP singles.)
15245 15. VMOV <Sd>, <Se>, <Rn>, <Rm>
15246 (Two VFP singles to two ARM regs.)
15248 These cases can be disambiguated using neon_select_shape, except cases 1/9
15249 and 3/11 which depend on the operand type too.
15251 All the encoded bits are hardcoded by this function.
15253 Cases 4, 6 may be used with VFPv1 and above (only 32-bit transfers!).
15254 Cases 5, 7 may be used with VFPv2 and above.
15256 FIXME: Some of the checking may be a bit sloppy (in a couple of cases you
15257 can specify a type where it doesn't make sense to, and is ignored). */
15262 enum neon_shape rs = neon_select_shape (NS_RRFF, NS_FFRR, NS_DRR, NS_RRD,
15263 NS_QQ, NS_DD, NS_QI, NS_DI, NS_SR, NS_RS, NS_FF, NS_FI, NS_RF, NS_FR,
15265 struct neon_type_el et;
15266 const char *ldconst = 0;
15270 case NS_DD: /* case 1/9. */
15271 et = neon_check_type (2, rs, N_EQK, N_F64 | N_KEY);
15272 /* It is not an error here if no type is given. */
15274 if (et.type == NT_float && et.size == 64)
15276 do_vfp_nsyn_opcode ("fcpyd");
15279 /* fall through. */
15281 case NS_QQ: /* case 0/1. */
15283 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
15285 /* The architecture manual I have doesn't explicitly state which
15286 value the U bit should have for register->register moves, but
15287 the equivalent VORR instruction has U = 0, so do that. */
15288 inst.instruction = 0x0200110;
15289 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15290 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15291 inst.instruction |= LOW4 (inst.operands[1].reg);
15292 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
15293 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
15294 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
15295 inst.instruction |= neon_quad (rs) << 6;
15297 neon_dp_fixup (&inst);
15301 case NS_DI: /* case 3/11. */
15302 et = neon_check_type (2, rs, N_EQK, N_F64 | N_KEY);
15304 if (et.type == NT_float && et.size == 64)
15306 /* case 11 (fconstd). */
15307 ldconst = "fconstd";
15308 goto encode_fconstd;
15310 /* fall through. */
15312 case NS_QI: /* case 2/3. */
15313 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
15315 inst.instruction = 0x0800010;
15316 neon_move_immediate ();
15317 neon_dp_fixup (&inst);
15320 case NS_SR: /* case 4. */
15322 unsigned bcdebits = 0;
15324 unsigned dn = NEON_SCALAR_REG (inst.operands[0].reg);
15325 unsigned x = NEON_SCALAR_INDEX (inst.operands[0].reg);
15327 /* .<size> is optional here, defaulting to .32. */
15328 if (inst.vectype.elems == 0
15329 && inst.operands[0].vectype.type == NT_invtype
15330 && inst.operands[1].vectype.type == NT_invtype)
15332 inst.vectype.el[0].type = NT_untyped;
15333 inst.vectype.el[0].size = 32;
15334 inst.vectype.elems = 1;
15337 et = neon_check_type (2, NS_NULL, N_8 | N_16 | N_32 | N_KEY, N_EQK);
15338 logsize = neon_logbits (et.size);
15340 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1),
15342 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_v1)
15343 && et.size != 32, _(BAD_FPU));
15344 constraint (et.type == NT_invtype, _("bad type for scalar"));
15345 constraint (x >= 64 / et.size, _("scalar index out of range"));
15349 case 8: bcdebits = 0x8; break;
15350 case 16: bcdebits = 0x1; break;
15351 case 32: bcdebits = 0x0; break;
15355 bcdebits |= x << logsize;
15357 inst.instruction = 0xe000b10;
15358 do_vfp_cond_or_thumb ();
15359 inst.instruction |= LOW4 (dn) << 16;
15360 inst.instruction |= HI1 (dn) << 7;
15361 inst.instruction |= inst.operands[1].reg << 12;
15362 inst.instruction |= (bcdebits & 3) << 5;
15363 inst.instruction |= (bcdebits >> 2) << 21;
15367 case NS_DRR: /* case 5 (fmdrr). */
15368 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v2),
15371 inst.instruction = 0xc400b10;
15372 do_vfp_cond_or_thumb ();
15373 inst.instruction |= LOW4 (inst.operands[0].reg);
15374 inst.instruction |= HI1 (inst.operands[0].reg) << 5;
15375 inst.instruction |= inst.operands[1].reg << 12;
15376 inst.instruction |= inst.operands[2].reg << 16;
15379 case NS_RS: /* case 6. */
15382 unsigned dn = NEON_SCALAR_REG (inst.operands[1].reg);
15383 unsigned x = NEON_SCALAR_INDEX (inst.operands[1].reg);
15384 unsigned abcdebits = 0;
15386 /* .<dt> is optional here, defaulting to .32. */
15387 if (inst.vectype.elems == 0
15388 && inst.operands[0].vectype.type == NT_invtype
15389 && inst.operands[1].vectype.type == NT_invtype)
15391 inst.vectype.el[0].type = NT_untyped;
15392 inst.vectype.el[0].size = 32;
15393 inst.vectype.elems = 1;
15396 et = neon_check_type (2, NS_NULL,
15397 N_EQK, N_S8 | N_S16 | N_U8 | N_U16 | N_32 | N_KEY);
15398 logsize = neon_logbits (et.size);
15400 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1),
15402 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_v1)
15403 && et.size != 32, _(BAD_FPU));
15404 constraint (et.type == NT_invtype, _("bad type for scalar"));
15405 constraint (x >= 64 / et.size, _("scalar index out of range"));
15409 case 8: abcdebits = (et.type == NT_signed) ? 0x08 : 0x18; break;
15410 case 16: abcdebits = (et.type == NT_signed) ? 0x01 : 0x11; break;
15411 case 32: abcdebits = 0x00; break;
15415 abcdebits |= x << logsize;
15416 inst.instruction = 0xe100b10;
15417 do_vfp_cond_or_thumb ();
15418 inst.instruction |= LOW4 (dn) << 16;
15419 inst.instruction |= HI1 (dn) << 7;
15420 inst.instruction |= inst.operands[0].reg << 12;
15421 inst.instruction |= (abcdebits & 3) << 5;
15422 inst.instruction |= (abcdebits >> 2) << 21;
15426 case NS_RRD: /* case 7 (fmrrd). */
15427 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v2),
15430 inst.instruction = 0xc500b10;
15431 do_vfp_cond_or_thumb ();
15432 inst.instruction |= inst.operands[0].reg << 12;
15433 inst.instruction |= inst.operands[1].reg << 16;
15434 inst.instruction |= LOW4 (inst.operands[2].reg);
15435 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
15438 case NS_FF: /* case 8 (fcpys). */
15439 do_vfp_nsyn_opcode ("fcpys");
15442 case NS_FI: /* case 10 (fconsts). */
15443 ldconst = "fconsts";
15445 if (is_quarter_float (inst.operands[1].imm))
15447 inst.operands[1].imm = neon_qfloat_bits (inst.operands[1].imm);
15448 do_vfp_nsyn_opcode (ldconst);
15451 first_error (_("immediate out of range"));
15454 case NS_RF: /* case 12 (fmrs). */
15455 do_vfp_nsyn_opcode ("fmrs");
15458 case NS_FR: /* case 13 (fmsr). */
15459 do_vfp_nsyn_opcode ("fmsr");
15462 /* The encoders for the fmrrs and fmsrr instructions expect three operands
15463 (one of which is a list), but we have parsed four. Do some fiddling to
15464 make the operands what do_vfp_reg2_from_sp2 and do_vfp_sp2_from_reg2
15466 case NS_RRFF: /* case 14 (fmrrs). */
15467 constraint (inst.operands[3].reg != inst.operands[2].reg + 1,
15468 _("VFP registers must be adjacent"));
15469 inst.operands[2].imm = 2;
15470 memset (&inst.operands[3], '\0', sizeof (inst.operands[3]));
15471 do_vfp_nsyn_opcode ("fmrrs");
15474 case NS_FFRR: /* case 15 (fmsrr). */
15475 constraint (inst.operands[1].reg != inst.operands[0].reg + 1,
15476 _("VFP registers must be adjacent"));
15477 inst.operands[1] = inst.operands[2];
15478 inst.operands[2] = inst.operands[3];
15479 inst.operands[0].imm = 2;
15480 memset (&inst.operands[3], '\0', sizeof (inst.operands[3]));
15481 do_vfp_nsyn_opcode ("fmsrr");
15485 /* neon_select_shape has determined that the instruction
15486 shape is wrong and has already set the error message. */
15495 do_neon_rshift_round_imm (void)
15497 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
15498 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_SU_ALL | N_KEY);
15499 int imm = inst.operands[2].imm;
15501 /* imm == 0 case is encoded as VMOV for V{R}SHR. */
15504 inst.operands[2].present = 0;
15509 constraint (imm < 1 || (unsigned)imm > et.size,
15510 _("immediate out of range for shift"));
15511 neon_imm_shift (TRUE, et.type == NT_unsigned, neon_quad (rs), et,
15516 do_neon_movl (void)
15518 struct neon_type_el et = neon_check_type (2, NS_QD,
15519 N_EQK | N_DBL, N_SU_32 | N_KEY);
15520 unsigned sizebits = et.size >> 3;
15521 inst.instruction |= sizebits << 19;
15522 neon_two_same (0, et.type == NT_unsigned, -1);
15528 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
15529 struct neon_type_el et = neon_check_type (2, rs,
15530 N_EQK, N_8 | N_16 | N_32 | N_KEY);
15531 NEON_ENCODE (INTEGER, inst);
15532 neon_two_same (neon_quad (rs), 1, et.size);
15536 do_neon_zip_uzp (void)
15538 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
15539 struct neon_type_el et = neon_check_type (2, rs,
15540 N_EQK, N_8 | N_16 | N_32 | N_KEY);
15541 if (rs == NS_DD && et.size == 32)
15543 /* Special case: encode as VTRN.32 <Dd>, <Dm>. */
15544 inst.instruction = N_MNEM_vtrn;
15548 neon_two_same (neon_quad (rs), 1, et.size);
15552 do_neon_sat_abs_neg (void)
15554 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
15555 struct neon_type_el et = neon_check_type (2, rs,
15556 N_EQK, N_S8 | N_S16 | N_S32 | N_KEY);
15557 neon_two_same (neon_quad (rs), 1, et.size);
15561 do_neon_pair_long (void)
15563 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
15564 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_SU_32 | N_KEY);
15565 /* Unsigned is encoded in OP field (bit 7) for these instruction. */
15566 inst.instruction |= (et.type == NT_unsigned) << 7;
15567 neon_two_same (neon_quad (rs), 1, et.size);
15571 do_neon_recip_est (void)
15573 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
15574 struct neon_type_el et = neon_check_type (2, rs,
15575 N_EQK | N_FLT, N_F32 | N_U32 | N_KEY);
15576 inst.instruction |= (et.type == NT_float) << 8;
15577 neon_two_same (neon_quad (rs), 1, et.size);
15583 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
15584 struct neon_type_el et = neon_check_type (2, rs,
15585 N_EQK, N_S8 | N_S16 | N_S32 | N_KEY);
15586 neon_two_same (neon_quad (rs), 1, et.size);
15592 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
15593 struct neon_type_el et = neon_check_type (2, rs,
15594 N_EQK, N_I8 | N_I16 | N_I32 | N_KEY);
15595 neon_two_same (neon_quad (rs), 1, et.size);
15601 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
15602 struct neon_type_el et = neon_check_type (2, rs,
15603 N_EQK | N_INT, N_8 | N_KEY);
15604 neon_two_same (neon_quad (rs), 1, et.size);
15610 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
15611 neon_two_same (neon_quad (rs), 1, -1);
15615 do_neon_tbl_tbx (void)
15617 unsigned listlenbits;
15618 neon_check_type (3, NS_DLD, N_EQK, N_EQK, N_8 | N_KEY);
15620 if (inst.operands[1].imm < 1 || inst.operands[1].imm > 4)
15622 first_error (_("bad list length for table lookup"));
15626 listlenbits = inst.operands[1].imm - 1;
15627 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15628 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15629 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
15630 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
15631 inst.instruction |= LOW4 (inst.operands[2].reg);
15632 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
15633 inst.instruction |= listlenbits << 8;
15635 neon_dp_fixup (&inst);
15639 do_neon_ldm_stm (void)
15641 /* P, U and L bits are part of bitmask. */
15642 int is_dbmode = (inst.instruction & (1 << 24)) != 0;
15643 unsigned offsetbits = inst.operands[1].imm * 2;
15645 if (inst.operands[1].issingle)
15647 do_vfp_nsyn_ldm_stm (is_dbmode);
15651 constraint (is_dbmode && !inst.operands[0].writeback,
15652 _("writeback (!) must be used for VLDMDB and VSTMDB"));
15654 constraint (inst.operands[1].imm < 1 || inst.operands[1].imm > 16,
15655 _("register list must contain at least 1 and at most 16 "
15658 inst.instruction |= inst.operands[0].reg << 16;
15659 inst.instruction |= inst.operands[0].writeback << 21;
15660 inst.instruction |= LOW4 (inst.operands[1].reg) << 12;
15661 inst.instruction |= HI1 (inst.operands[1].reg) << 22;
15663 inst.instruction |= offsetbits;
15665 do_vfp_cond_or_thumb ();
15669 do_neon_ldr_str (void)
15671 int is_ldr = (inst.instruction & (1 << 20)) != 0;
15673 /* Use of PC in vstr in ARM mode is deprecated in ARMv7.
15674 And is UNPREDICTABLE in thumb mode. */
15676 && inst.operands[1].reg == REG_PC
15677 && (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v7) || thumb_mode))
15680 inst.error = _("Use of PC here is UNPREDICTABLE");
15681 else if (warn_on_deprecated)
15682 as_warn (_("Use of PC here is deprecated"));
15685 if (inst.operands[0].issingle)
15688 do_vfp_nsyn_opcode ("flds");
15690 do_vfp_nsyn_opcode ("fsts");
15695 do_vfp_nsyn_opcode ("fldd");
15697 do_vfp_nsyn_opcode ("fstd");
15701 /* "interleave" version also handles non-interleaving register VLD1/VST1
15705 do_neon_ld_st_interleave (void)
15707 struct neon_type_el et = neon_check_type (1, NS_NULL,
15708 N_8 | N_16 | N_32 | N_64);
15709 unsigned alignbits = 0;
15711 /* The bits in this table go:
15712 0: register stride of one (0) or two (1)
15713 1,2: register list length, minus one (1, 2, 3, 4).
15714 3,4: <n> in instruction type, minus one (VLD<n> / VST<n>).
15715 We use -1 for invalid entries. */
15716 const int typetable[] =
15718 0x7, -1, 0xa, -1, 0x6, -1, 0x2, -1, /* VLD1 / VST1. */
15719 -1, -1, 0x8, 0x9, -1, -1, 0x3, -1, /* VLD2 / VST2. */
15720 -1, -1, -1, -1, 0x4, 0x5, -1, -1, /* VLD3 / VST3. */
15721 -1, -1, -1, -1, -1, -1, 0x0, 0x1 /* VLD4 / VST4. */
15725 if (et.type == NT_invtype)
15728 if (inst.operands[1].immisalign)
15729 switch (inst.operands[1].imm >> 8)
15731 case 64: alignbits = 1; break;
15733 if (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 2
15734 && NEON_REGLIST_LENGTH (inst.operands[0].imm) != 4)
15735 goto bad_alignment;
15739 if (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 4)
15740 goto bad_alignment;
15745 first_error (_("bad alignment"));
15749 inst.instruction |= alignbits << 4;
15750 inst.instruction |= neon_logbits (et.size) << 6;
15752 /* Bits [4:6] of the immediate in a list specifier encode register stride
15753 (minus 1) in bit 4, and list length in bits [5:6]. We put the <n> of
15754 VLD<n>/VST<n> in bits [9:8] of the initial bitmask. Suck it out here, look
15755 up the right value for "type" in a table based on this value and the given
15756 list style, then stick it back. */
15757 idx = ((inst.operands[0].imm >> 4) & 7)
15758 | (((inst.instruction >> 8) & 3) << 3);
15760 typebits = typetable[idx];
15762 constraint (typebits == -1, _("bad list type for instruction"));
15764 inst.instruction &= ~0xf00;
15765 inst.instruction |= typebits << 8;
15768 /* Check alignment is valid for do_neon_ld_st_lane and do_neon_ld_dup.
15769 *DO_ALIGN is set to 1 if the relevant alignment bit should be set, 0
15770 otherwise. The variable arguments are a list of pairs of legal (size, align)
15771 values, terminated with -1. */
15774 neon_alignment_bit (int size, int align, int *do_align, ...)
15777 int result = FAIL, thissize, thisalign;
15779 if (!inst.operands[1].immisalign)
15785 va_start (ap, do_align);
15789 thissize = va_arg (ap, int);
15790 if (thissize == -1)
15792 thisalign = va_arg (ap, int);
15794 if (size == thissize && align == thisalign)
15797 while (result != SUCCESS);
15801 if (result == SUCCESS)
15804 first_error (_("unsupported alignment for instruction"));
15810 do_neon_ld_st_lane (void)
15812 struct neon_type_el et = neon_check_type (1, NS_NULL, N_8 | N_16 | N_32);
15813 int align_good, do_align = 0;
15814 int logsize = neon_logbits (et.size);
15815 int align = inst.operands[1].imm >> 8;
15816 int n = (inst.instruction >> 8) & 3;
15817 int max_el = 64 / et.size;
15819 if (et.type == NT_invtype)
15822 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != n + 1,
15823 _("bad list length"));
15824 constraint (NEON_LANE (inst.operands[0].imm) >= max_el,
15825 _("scalar index out of range"));
15826 constraint (n != 0 && NEON_REG_STRIDE (inst.operands[0].imm) == 2
15828 _("stride of 2 unavailable when element size is 8"));
15832 case 0: /* VLD1 / VST1. */
15833 align_good = neon_alignment_bit (et.size, align, &do_align, 16, 16,
15835 if (align_good == FAIL)
15839 unsigned alignbits = 0;
15842 case 16: alignbits = 0x1; break;
15843 case 32: alignbits = 0x3; break;
15846 inst.instruction |= alignbits << 4;
15850 case 1: /* VLD2 / VST2. */
15851 align_good = neon_alignment_bit (et.size, align, &do_align, 8, 16, 16, 32,
15853 if (align_good == FAIL)
15856 inst.instruction |= 1 << 4;
15859 case 2: /* VLD3 / VST3. */
15860 constraint (inst.operands[1].immisalign,
15861 _("can't use alignment with this instruction"));
15864 case 3: /* VLD4 / VST4. */
15865 align_good = neon_alignment_bit (et.size, align, &do_align, 8, 32,
15866 16, 64, 32, 64, 32, 128, -1);
15867 if (align_good == FAIL)
15871 unsigned alignbits = 0;
15874 case 8: alignbits = 0x1; break;
15875 case 16: alignbits = 0x1; break;
15876 case 32: alignbits = (align == 64) ? 0x1 : 0x2; break;
15879 inst.instruction |= alignbits << 4;
15886 /* Reg stride of 2 is encoded in bit 5 when size==16, bit 6 when size==32. */
15887 if (n != 0 && NEON_REG_STRIDE (inst.operands[0].imm) == 2)
15888 inst.instruction |= 1 << (4 + logsize);
15890 inst.instruction |= NEON_LANE (inst.operands[0].imm) << (logsize + 5);
15891 inst.instruction |= logsize << 10;
15894 /* Encode single n-element structure to all lanes VLD<n> instructions. */
15897 do_neon_ld_dup (void)
15899 struct neon_type_el et = neon_check_type (1, NS_NULL, N_8 | N_16 | N_32);
15900 int align_good, do_align = 0;
15902 if (et.type == NT_invtype)
15905 switch ((inst.instruction >> 8) & 3)
15907 case 0: /* VLD1. */
15908 gas_assert (NEON_REG_STRIDE (inst.operands[0].imm) != 2);
15909 align_good = neon_alignment_bit (et.size, inst.operands[1].imm >> 8,
15910 &do_align, 16, 16, 32, 32, -1);
15911 if (align_good == FAIL)
15913 switch (NEON_REGLIST_LENGTH (inst.operands[0].imm))
15916 case 2: inst.instruction |= 1 << 5; break;
15917 default: first_error (_("bad list length")); return;
15919 inst.instruction |= neon_logbits (et.size) << 6;
15922 case 1: /* VLD2. */
15923 align_good = neon_alignment_bit (et.size, inst.operands[1].imm >> 8,
15924 &do_align, 8, 16, 16, 32, 32, 64, -1);
15925 if (align_good == FAIL)
15927 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 2,
15928 _("bad list length"));
15929 if (NEON_REG_STRIDE (inst.operands[0].imm) == 2)
15930 inst.instruction |= 1 << 5;
15931 inst.instruction |= neon_logbits (et.size) << 6;
15934 case 2: /* VLD3. */
15935 constraint (inst.operands[1].immisalign,
15936 _("can't use alignment with this instruction"));
15937 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 3,
15938 _("bad list length"));
15939 if (NEON_REG_STRIDE (inst.operands[0].imm) == 2)
15940 inst.instruction |= 1 << 5;
15941 inst.instruction |= neon_logbits (et.size) << 6;
15944 case 3: /* VLD4. */
15946 int align = inst.operands[1].imm >> 8;
15947 align_good = neon_alignment_bit (et.size, align, &do_align, 8, 32,
15948 16, 64, 32, 64, 32, 128, -1);
15949 if (align_good == FAIL)
15951 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 4,
15952 _("bad list length"));
15953 if (NEON_REG_STRIDE (inst.operands[0].imm) == 2)
15954 inst.instruction |= 1 << 5;
15955 if (et.size == 32 && align == 128)
15956 inst.instruction |= 0x3 << 6;
15958 inst.instruction |= neon_logbits (et.size) << 6;
15965 inst.instruction |= do_align << 4;
15968 /* Disambiguate VLD<n> and VST<n> instructions, and fill in common bits (those
15969 apart from bits [11:4]. */
15972 do_neon_ldx_stx (void)
15974 if (inst.operands[1].isreg)
15975 constraint (inst.operands[1].reg == REG_PC, BAD_PC);
15977 switch (NEON_LANE (inst.operands[0].imm))
15979 case NEON_INTERLEAVE_LANES:
15980 NEON_ENCODE (INTERLV, inst);
15981 do_neon_ld_st_interleave ();
15984 case NEON_ALL_LANES:
15985 NEON_ENCODE (DUP, inst);
15986 if (inst.instruction == N_INV)
15988 first_error ("only loads support such operands");
15995 NEON_ENCODE (LANE, inst);
15996 do_neon_ld_st_lane ();
15999 /* L bit comes from bit mask. */
16000 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16001 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16002 inst.instruction |= inst.operands[1].reg << 16;
16004 if (inst.operands[1].postind)
16006 int postreg = inst.operands[1].imm & 0xf;
16007 constraint (!inst.operands[1].immisreg,
16008 _("post-index must be a register"));
16009 constraint (postreg == 0xd || postreg == 0xf,
16010 _("bad register for post-index"));
16011 inst.instruction |= postreg;
16013 else if (inst.operands[1].writeback)
16015 inst.instruction |= 0xd;
16018 inst.instruction |= 0xf;
16021 inst.instruction |= 0xf9000000;
16023 inst.instruction |= 0xf4000000;
16028 do_vfp_nsyn_fpv8 (enum neon_shape rs)
16030 NEON_ENCODE (FPV8, inst);
16033 do_vfp_sp_dyadic ();
16035 do_vfp_dp_rd_rn_rm ();
16038 inst.instruction |= 0x100;
16040 inst.instruction |= 0xf0000000;
16046 set_it_insn_type (OUTSIDE_IT_INSN);
16048 if (try_vfp_nsyn (3, do_vfp_nsyn_fpv8) != SUCCESS)
16049 first_error (_("invalid instruction shape"));
16055 set_it_insn_type (OUTSIDE_IT_INSN);
16057 if (try_vfp_nsyn (3, do_vfp_nsyn_fpv8) == SUCCESS)
16060 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH8) == FAIL)
16063 neon_dyadic_misc (NT_untyped, N_F32, 0);
16067 do_vrint_1 (enum neon_cvt_mode mode)
16069 enum neon_shape rs = neon_select_shape (NS_FF, NS_DD, NS_QQ, NS_NULL);
16070 struct neon_type_el et;
16075 et = neon_check_type (2, rs, N_EQK | N_VFP, N_F32 | N_F64 | N_KEY | N_VFP);
16076 if (et.type != NT_invtype)
16078 /* VFP encodings. */
16079 if (mode == neon_cvt_mode_a || mode == neon_cvt_mode_n
16080 || mode == neon_cvt_mode_p || mode == neon_cvt_mode_m)
16081 set_it_insn_type (OUTSIDE_IT_INSN);
16083 NEON_ENCODE (FPV8, inst);
16085 do_vfp_sp_monadic ();
16087 do_vfp_dp_rd_rm ();
16091 case neon_cvt_mode_r: inst.instruction |= 0x00000000; break;
16092 case neon_cvt_mode_z: inst.instruction |= 0x00000080; break;
16093 case neon_cvt_mode_x: inst.instruction |= 0x00010000; break;
16094 case neon_cvt_mode_a: inst.instruction |= 0xf0000000; break;
16095 case neon_cvt_mode_n: inst.instruction |= 0xf0010000; break;
16096 case neon_cvt_mode_p: inst.instruction |= 0xf0020000; break;
16097 case neon_cvt_mode_m: inst.instruction |= 0xf0030000; break;
16101 inst.instruction |= (rs == NS_DD) << 8;
16102 do_vfp_cond_or_thumb ();
16106 /* Neon encodings (or something broken...). */
16108 et = neon_check_type (2, rs, N_EQK, N_F32 | N_KEY);
16110 if (et.type == NT_invtype)
16113 set_it_insn_type (OUTSIDE_IT_INSN);
16114 NEON_ENCODE (FLOAT, inst);
16116 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH8) == FAIL)
16119 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16120 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16121 inst.instruction |= LOW4 (inst.operands[1].reg);
16122 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
16123 inst.instruction |= neon_quad (rs) << 6;
16126 case neon_cvt_mode_z: inst.instruction |= 3 << 7; break;
16127 case neon_cvt_mode_x: inst.instruction |= 1 << 7; break;
16128 case neon_cvt_mode_a: inst.instruction |= 2 << 7; break;
16129 case neon_cvt_mode_n: inst.instruction |= 0 << 7; break;
16130 case neon_cvt_mode_p: inst.instruction |= 7 << 7; break;
16131 case neon_cvt_mode_m: inst.instruction |= 5 << 7; break;
16132 case neon_cvt_mode_r: inst.error = _("invalid rounding mode"); break;
16137 inst.instruction |= 0xfc000000;
16139 inst.instruction |= 0xf0000000;
16146 do_vrint_1 (neon_cvt_mode_x);
16152 do_vrint_1 (neon_cvt_mode_z);
16158 do_vrint_1 (neon_cvt_mode_r);
16164 do_vrint_1 (neon_cvt_mode_a);
16170 do_vrint_1 (neon_cvt_mode_n);
16176 do_vrint_1 (neon_cvt_mode_p);
16182 do_vrint_1 (neon_cvt_mode_m);
16185 /* Crypto v1 instructions. */
16187 do_crypto_2op_1 (unsigned elttype, int op)
16189 set_it_insn_type (OUTSIDE_IT_INSN);
16191 if (neon_check_type (2, NS_QQ, N_EQK | N_UNT, elttype | N_UNT | N_KEY).type
16197 NEON_ENCODE (INTEGER, inst);
16198 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16199 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16200 inst.instruction |= LOW4 (inst.operands[1].reg);
16201 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
16203 inst.instruction |= op << 6;
16206 inst.instruction |= 0xfc000000;
16208 inst.instruction |= 0xf0000000;
16212 do_crypto_3op_1 (int u, int op)
16214 set_it_insn_type (OUTSIDE_IT_INSN);
16216 if (neon_check_type (3, NS_QQQ, N_EQK | N_UNT, N_EQK | N_UNT,
16217 N_32 | N_UNT | N_KEY).type == NT_invtype)
16222 NEON_ENCODE (INTEGER, inst);
16223 neon_three_same (1, u, 8 << op);
16229 do_crypto_2op_1 (N_8, 0);
16235 do_crypto_2op_1 (N_8, 1);
16241 do_crypto_2op_1 (N_8, 2);
16247 do_crypto_2op_1 (N_8, 3);
16253 do_crypto_3op_1 (0, 0);
16259 do_crypto_3op_1 (0, 1);
16265 do_crypto_3op_1 (0, 2);
16271 do_crypto_3op_1 (0, 3);
16277 do_crypto_3op_1 (1, 0);
16283 do_crypto_3op_1 (1, 1);
16287 do_sha256su1 (void)
16289 do_crypto_3op_1 (1, 2);
16295 do_crypto_2op_1 (N_32, -1);
16301 do_crypto_2op_1 (N_32, 0);
16305 do_sha256su0 (void)
16307 do_crypto_2op_1 (N_32, 1);
16311 do_crc32_1 (unsigned int poly, unsigned int sz)
16313 unsigned int Rd = inst.operands[0].reg;
16314 unsigned int Rn = inst.operands[1].reg;
16315 unsigned int Rm = inst.operands[2].reg;
16317 set_it_insn_type (OUTSIDE_IT_INSN);
16318 inst.instruction |= LOW4 (Rd) << (thumb_mode ? 8 : 12);
16319 inst.instruction |= LOW4 (Rn) << 16;
16320 inst.instruction |= LOW4 (Rm);
16321 inst.instruction |= sz << (thumb_mode ? 4 : 21);
16322 inst.instruction |= poly << (thumb_mode ? 20 : 9);
16324 if (Rd == REG_PC || Rn == REG_PC || Rm == REG_PC)
16325 as_warn (UNPRED_REG ("r15"));
16326 if (thumb_mode && (Rd == REG_SP || Rn == REG_SP || Rm == REG_SP))
16327 as_warn (UNPRED_REG ("r13"));
16367 /* Overall per-instruction processing. */
16369 /* We need to be able to fix up arbitrary expressions in some statements.
16370 This is so that we can handle symbols that are an arbitrary distance from
16371 the pc. The most common cases are of the form ((+/-sym -/+ . - 8) & mask),
16372 which returns part of an address in a form which will be valid for
16373 a data instruction. We do this by pushing the expression into a symbol
16374 in the expr_section, and creating a fix for that. */
16377 fix_new_arm (fragS * frag,
16391 /* Create an absolute valued symbol, so we have something to
16392 refer to in the object file. Unfortunately for us, gas's
16393 generic expression parsing will already have folded out
16394 any use of .set foo/.type foo %function that may have
16395 been used to set type information of the target location,
16396 that's being specified symbolically. We have to presume
16397 the user knows what they are doing. */
16401 sprintf (name, "*ABS*0x%lx", (unsigned long)exp->X_add_number);
16403 symbol = symbol_find_or_make (name);
16404 S_SET_SEGMENT (symbol, absolute_section);
16405 symbol_set_frag (symbol, &zero_address_frag);
16406 S_SET_VALUE (symbol, exp->X_add_number);
16407 exp->X_op = O_symbol;
16408 exp->X_add_symbol = symbol;
16409 exp->X_add_number = 0;
16415 new_fix = fix_new_exp (frag, where, size, exp, pc_rel,
16416 (enum bfd_reloc_code_real) reloc);
16420 new_fix = (fixS *) fix_new (frag, where, size, make_expr_symbol (exp), 0,
16421 pc_rel, (enum bfd_reloc_code_real) reloc);
16425 /* Mark whether the fix is to a THUMB instruction, or an ARM
16427 new_fix->tc_fix_data = thumb_mode;
16430 /* Create a frg for an instruction requiring relaxation. */
16432 output_relax_insn (void)
16438 /* The size of the instruction is unknown, so tie the debug info to the
16439 start of the instruction. */
16440 dwarf2_emit_insn (0);
16442 switch (inst.reloc.exp.X_op)
16445 sym = inst.reloc.exp.X_add_symbol;
16446 offset = inst.reloc.exp.X_add_number;
16450 offset = inst.reloc.exp.X_add_number;
16453 sym = make_expr_symbol (&inst.reloc.exp);
16457 to = frag_var (rs_machine_dependent, INSN_SIZE, THUMB_SIZE,
16458 inst.relax, sym, offset, NULL/*offset, opcode*/);
16459 md_number_to_chars (to, inst.instruction, THUMB_SIZE);
16462 /* Write a 32-bit thumb instruction to buf. */
16464 put_thumb32_insn (char * buf, unsigned long insn)
16466 md_number_to_chars (buf, insn >> 16, THUMB_SIZE);
16467 md_number_to_chars (buf + THUMB_SIZE, insn, THUMB_SIZE);
16471 output_inst (const char * str)
16477 as_bad ("%s -- `%s'", inst.error, str);
16482 output_relax_insn ();
16485 if (inst.size == 0)
16488 to = frag_more (inst.size);
16489 /* PR 9814: Record the thumb mode into the current frag so that we know
16490 what type of NOP padding to use, if necessary. We override any previous
16491 setting so that if the mode has changed then the NOPS that we use will
16492 match the encoding of the last instruction in the frag. */
16493 frag_now->tc_frag_data.thumb_mode = thumb_mode | MODE_RECORDED;
16495 if (thumb_mode && (inst.size > THUMB_SIZE))
16497 gas_assert (inst.size == (2 * THUMB_SIZE));
16498 put_thumb32_insn (to, inst.instruction);
16500 else if (inst.size > INSN_SIZE)
16502 gas_assert (inst.size == (2 * INSN_SIZE));
16503 md_number_to_chars (to, inst.instruction, INSN_SIZE);
16504 md_number_to_chars (to + INSN_SIZE, inst.instruction, INSN_SIZE);
16507 md_number_to_chars (to, inst.instruction, inst.size);
16509 if (inst.reloc.type != BFD_RELOC_UNUSED)
16510 fix_new_arm (frag_now, to - frag_now->fr_literal,
16511 inst.size, & inst.reloc.exp, inst.reloc.pc_rel,
16514 dwarf2_emit_insn (inst.size);
16518 output_it_inst (int cond, int mask, char * to)
16520 unsigned long instruction = 0xbf00;
16523 instruction |= mask;
16524 instruction |= cond << 4;
16528 to = frag_more (2);
16530 dwarf2_emit_insn (2);
16534 md_number_to_chars (to, instruction, 2);
16539 /* Tag values used in struct asm_opcode's tag field. */
16542 OT_unconditional, /* Instruction cannot be conditionalized.
16543 The ARM condition field is still 0xE. */
16544 OT_unconditionalF, /* Instruction cannot be conditionalized
16545 and carries 0xF in its ARM condition field. */
16546 OT_csuffix, /* Instruction takes a conditional suffix. */
16547 OT_csuffixF, /* Some forms of the instruction take a conditional
16548 suffix, others place 0xF where the condition field
16550 OT_cinfix3, /* Instruction takes a conditional infix,
16551 beginning at character index 3. (In
16552 unified mode, it becomes a suffix.) */
16553 OT_cinfix3_deprecated, /* The same as OT_cinfix3. This is used for
16554 tsts, cmps, cmns, and teqs. */
16555 OT_cinfix3_legacy, /* Legacy instruction takes a conditional infix at
16556 character index 3, even in unified mode. Used for
16557 legacy instructions where suffix and infix forms
16558 may be ambiguous. */
16559 OT_csuf_or_in3, /* Instruction takes either a conditional
16560 suffix or an infix at character index 3. */
16561 OT_odd_infix_unc, /* This is the unconditional variant of an
16562 instruction that takes a conditional infix
16563 at an unusual position. In unified mode,
16564 this variant will accept a suffix. */
16565 OT_odd_infix_0 /* Values greater than or equal to OT_odd_infix_0
16566 are the conditional variants of instructions that
16567 take conditional infixes in unusual positions.
16568 The infix appears at character index
16569 (tag - OT_odd_infix_0). These are not accepted
16570 in unified mode. */
16573 /* Subroutine of md_assemble, responsible for looking up the primary
16574 opcode from the mnemonic the user wrote. STR points to the
16575 beginning of the mnemonic.
16577 This is not simply a hash table lookup, because of conditional
16578 variants. Most instructions have conditional variants, which are
16579 expressed with a _conditional affix_ to the mnemonic. If we were
16580 to encode each conditional variant as a literal string in the opcode
16581 table, it would have approximately 20,000 entries.
16583 Most mnemonics take this affix as a suffix, and in unified syntax,
16584 'most' is upgraded to 'all'. However, in the divided syntax, some
16585 instructions take the affix as an infix, notably the s-variants of
16586 the arithmetic instructions. Of those instructions, all but six
16587 have the infix appear after the third character of the mnemonic.
16589 Accordingly, the algorithm for looking up primary opcodes given
16592 1. Look up the identifier in the opcode table.
16593 If we find a match, go to step U.
16595 2. Look up the last two characters of the identifier in the
16596 conditions table. If we find a match, look up the first N-2
16597 characters of the identifier in the opcode table. If we
16598 find a match, go to step CE.
16600 3. Look up the fourth and fifth characters of the identifier in
16601 the conditions table. If we find a match, extract those
16602 characters from the identifier, and look up the remaining
16603 characters in the opcode table. If we find a match, go
16608 U. Examine the tag field of the opcode structure, in case this is
16609 one of the six instructions with its conditional infix in an
16610 unusual place. If it is, the tag tells us where to find the
16611 infix; look it up in the conditions table and set inst.cond
16612 accordingly. Otherwise, this is an unconditional instruction.
16613 Again set inst.cond accordingly. Return the opcode structure.
16615 CE. Examine the tag field to make sure this is an instruction that
16616 should receive a conditional suffix. If it is not, fail.
16617 Otherwise, set inst.cond from the suffix we already looked up,
16618 and return the opcode structure.
16620 CM. Examine the tag field to make sure this is an instruction that
16621 should receive a conditional infix after the third character.
16622 If it is not, fail. Otherwise, undo the edits to the current
16623 line of input and proceed as for case CE. */
16625 static const struct asm_opcode *
16626 opcode_lookup (char **str)
16630 const struct asm_opcode *opcode;
16631 const struct asm_cond *cond;
16634 /* Scan up to the end of the mnemonic, which must end in white space,
16635 '.' (in unified mode, or for Neon/VFP instructions), or end of string. */
16636 for (base = end = *str; *end != '\0'; end++)
16637 if (*end == ' ' || *end == '.')
16643 /* Handle a possible width suffix and/or Neon type suffix. */
16648 /* The .w and .n suffixes are only valid if the unified syntax is in
16650 if (unified_syntax && end[1] == 'w')
16652 else if (unified_syntax && end[1] == 'n')
16657 inst.vectype.elems = 0;
16659 *str = end + offset;
16661 if (end[offset] == '.')
16663 /* See if we have a Neon type suffix (possible in either unified or
16664 non-unified ARM syntax mode). */
16665 if (parse_neon_type (&inst.vectype, str) == FAIL)
16668 else if (end[offset] != '\0' && end[offset] != ' ')
16674 /* Look for unaffixed or special-case affixed mnemonic. */
16675 opcode = (const struct asm_opcode *) hash_find_n (arm_ops_hsh, base,
16680 if (opcode->tag < OT_odd_infix_0)
16682 inst.cond = COND_ALWAYS;
16686 if (warn_on_deprecated && unified_syntax)
16687 as_warn (_("conditional infixes are deprecated in unified syntax"));
16688 affix = base + (opcode->tag - OT_odd_infix_0);
16689 cond = (const struct asm_cond *) hash_find_n (arm_cond_hsh, affix, 2);
16692 inst.cond = cond->value;
16696 /* Cannot have a conditional suffix on a mnemonic of less than two
16698 if (end - base < 3)
16701 /* Look for suffixed mnemonic. */
16703 cond = (const struct asm_cond *) hash_find_n (arm_cond_hsh, affix, 2);
16704 opcode = (const struct asm_opcode *) hash_find_n (arm_ops_hsh, base,
16706 if (opcode && cond)
16709 switch (opcode->tag)
16711 case OT_cinfix3_legacy:
16712 /* Ignore conditional suffixes matched on infix only mnemonics. */
16716 case OT_cinfix3_deprecated:
16717 case OT_odd_infix_unc:
16718 if (!unified_syntax)
16720 /* else fall through */
16724 case OT_csuf_or_in3:
16725 inst.cond = cond->value;
16728 case OT_unconditional:
16729 case OT_unconditionalF:
16731 inst.cond = cond->value;
16734 /* Delayed diagnostic. */
16735 inst.error = BAD_COND;
16736 inst.cond = COND_ALWAYS;
16745 /* Cannot have a usual-position infix on a mnemonic of less than
16746 six characters (five would be a suffix). */
16747 if (end - base < 6)
16750 /* Look for infixed mnemonic in the usual position. */
16752 cond = (const struct asm_cond *) hash_find_n (arm_cond_hsh, affix, 2);
16756 memcpy (save, affix, 2);
16757 memmove (affix, affix + 2, (end - affix) - 2);
16758 opcode = (const struct asm_opcode *) hash_find_n (arm_ops_hsh, base,
16760 memmove (affix + 2, affix, (end - affix) - 2);
16761 memcpy (affix, save, 2);
16764 && (opcode->tag == OT_cinfix3
16765 || opcode->tag == OT_cinfix3_deprecated
16766 || opcode->tag == OT_csuf_or_in3
16767 || opcode->tag == OT_cinfix3_legacy))
16770 if (warn_on_deprecated && unified_syntax
16771 && (opcode->tag == OT_cinfix3
16772 || opcode->tag == OT_cinfix3_deprecated))
16773 as_warn (_("conditional infixes are deprecated in unified syntax"));
16775 inst.cond = cond->value;
16782 /* This function generates an initial IT instruction, leaving its block
16783 virtually open for the new instructions. Eventually,
16784 the mask will be updated by now_it_add_mask () each time
16785 a new instruction needs to be included in the IT block.
16786 Finally, the block is closed with close_automatic_it_block ().
16787 The block closure can be requested either from md_assemble (),
16788 a tencode (), or due to a label hook. */
16791 new_automatic_it_block (int cond)
16793 now_it.state = AUTOMATIC_IT_BLOCK;
16794 now_it.mask = 0x18;
16796 now_it.block_length = 1;
16797 mapping_state (MAP_THUMB);
16798 now_it.insn = output_it_inst (cond, now_it.mask, NULL);
16799 now_it.warn_deprecated = FALSE;
16800 now_it.insn_cond = TRUE;
16803 /* Close an automatic IT block.
16804 See comments in new_automatic_it_block (). */
16807 close_automatic_it_block (void)
16809 now_it.mask = 0x10;
16810 now_it.block_length = 0;
16813 /* Update the mask of the current automatically-generated IT
16814 instruction. See comments in new_automatic_it_block (). */
16817 now_it_add_mask (int cond)
16819 #define CLEAR_BIT(value, nbit) ((value) & ~(1 << (nbit)))
16820 #define SET_BIT_VALUE(value, bitvalue, nbit) (CLEAR_BIT (value, nbit) \
16821 | ((bitvalue) << (nbit)))
16822 const int resulting_bit = (cond & 1);
16824 now_it.mask &= 0xf;
16825 now_it.mask = SET_BIT_VALUE (now_it.mask,
16827 (5 - now_it.block_length));
16828 now_it.mask = SET_BIT_VALUE (now_it.mask,
16830 ((5 - now_it.block_length) - 1) );
16831 output_it_inst (now_it.cc, now_it.mask, now_it.insn);
16834 #undef SET_BIT_VALUE
16837 /* The IT blocks handling machinery is accessed through the these functions:
16838 it_fsm_pre_encode () from md_assemble ()
16839 set_it_insn_type () optional, from the tencode functions
16840 set_it_insn_type_last () ditto
16841 in_it_block () ditto
16842 it_fsm_post_encode () from md_assemble ()
16843 force_automatic_it_block_close () from label habdling functions
16846 1) md_assemble () calls it_fsm_pre_encode () before calling tencode (),
16847 initializing the IT insn type with a generic initial value depending
16848 on the inst.condition.
16849 2) During the tencode function, two things may happen:
16850 a) The tencode function overrides the IT insn type by
16851 calling either set_it_insn_type (type) or set_it_insn_type_last ().
16852 b) The tencode function queries the IT block state by
16853 calling in_it_block () (i.e. to determine narrow/not narrow mode).
16855 Both set_it_insn_type and in_it_block run the internal FSM state
16856 handling function (handle_it_state), because: a) setting the IT insn
16857 type may incur in an invalid state (exiting the function),
16858 and b) querying the state requires the FSM to be updated.
16859 Specifically we want to avoid creating an IT block for conditional
16860 branches, so it_fsm_pre_encode is actually a guess and we can't
16861 determine whether an IT block is required until the tencode () routine
16862 has decided what type of instruction this actually it.
16863 Because of this, if set_it_insn_type and in_it_block have to be used,
16864 set_it_insn_type has to be called first.
16866 set_it_insn_type_last () is a wrapper of set_it_insn_type (type), that
16867 determines the insn IT type depending on the inst.cond code.
16868 When a tencode () routine encodes an instruction that can be
16869 either outside an IT block, or, in the case of being inside, has to be
16870 the last one, set_it_insn_type_last () will determine the proper
16871 IT instruction type based on the inst.cond code. Otherwise,
16872 set_it_insn_type can be called for overriding that logic or
16873 for covering other cases.
16875 Calling handle_it_state () may not transition the IT block state to
16876 OUTSIDE_IT_BLOCK immediatelly, since the (current) state could be
16877 still queried. Instead, if the FSM determines that the state should
16878 be transitioned to OUTSIDE_IT_BLOCK, a flag is marked to be closed
16879 after the tencode () function: that's what it_fsm_post_encode () does.
16881 Since in_it_block () calls the state handling function to get an
16882 updated state, an error may occur (due to invalid insns combination).
16883 In that case, inst.error is set.
16884 Therefore, inst.error has to be checked after the execution of
16885 the tencode () routine.
16887 3) Back in md_assemble(), it_fsm_post_encode () is called to commit
16888 any pending state change (if any) that didn't take place in
16889 handle_it_state () as explained above. */
16892 it_fsm_pre_encode (void)
16894 if (inst.cond != COND_ALWAYS)
16895 inst.it_insn_type = INSIDE_IT_INSN;
16897 inst.it_insn_type = OUTSIDE_IT_INSN;
16899 now_it.state_handled = 0;
16902 /* IT state FSM handling function. */
16905 handle_it_state (void)
16907 now_it.state_handled = 1;
16908 now_it.insn_cond = FALSE;
16910 switch (now_it.state)
16912 case OUTSIDE_IT_BLOCK:
16913 switch (inst.it_insn_type)
16915 case OUTSIDE_IT_INSN:
16918 case INSIDE_IT_INSN:
16919 case INSIDE_IT_LAST_INSN:
16920 if (thumb_mode == 0)
16923 && !(implicit_it_mode & IMPLICIT_IT_MODE_ARM))
16924 as_tsktsk (_("Warning: conditional outside an IT block"\
16929 if ((implicit_it_mode & IMPLICIT_IT_MODE_THUMB)
16930 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_arch_t2))
16932 /* Automatically generate the IT instruction. */
16933 new_automatic_it_block (inst.cond);
16934 if (inst.it_insn_type == INSIDE_IT_LAST_INSN)
16935 close_automatic_it_block ();
16939 inst.error = BAD_OUT_IT;
16945 case IF_INSIDE_IT_LAST_INSN:
16946 case NEUTRAL_IT_INSN:
16950 now_it.state = MANUAL_IT_BLOCK;
16951 now_it.block_length = 0;
16956 case AUTOMATIC_IT_BLOCK:
16957 /* Three things may happen now:
16958 a) We should increment current it block size;
16959 b) We should close current it block (closing insn or 4 insns);
16960 c) We should close current it block and start a new one (due
16961 to incompatible conditions or
16962 4 insns-length block reached). */
16964 switch (inst.it_insn_type)
16966 case OUTSIDE_IT_INSN:
16967 /* The closure of the block shall happen immediatelly,
16968 so any in_it_block () call reports the block as closed. */
16969 force_automatic_it_block_close ();
16972 case INSIDE_IT_INSN:
16973 case INSIDE_IT_LAST_INSN:
16974 case IF_INSIDE_IT_LAST_INSN:
16975 now_it.block_length++;
16977 if (now_it.block_length > 4
16978 || !now_it_compatible (inst.cond))
16980 force_automatic_it_block_close ();
16981 if (inst.it_insn_type != IF_INSIDE_IT_LAST_INSN)
16982 new_automatic_it_block (inst.cond);
16986 now_it.insn_cond = TRUE;
16987 now_it_add_mask (inst.cond);
16990 if (now_it.state == AUTOMATIC_IT_BLOCK
16991 && (inst.it_insn_type == INSIDE_IT_LAST_INSN
16992 || inst.it_insn_type == IF_INSIDE_IT_LAST_INSN))
16993 close_automatic_it_block ();
16996 case NEUTRAL_IT_INSN:
16997 now_it.block_length++;
16998 now_it.insn_cond = TRUE;
17000 if (now_it.block_length > 4)
17001 force_automatic_it_block_close ();
17003 now_it_add_mask (now_it.cc & 1);
17007 close_automatic_it_block ();
17008 now_it.state = MANUAL_IT_BLOCK;
17013 case MANUAL_IT_BLOCK:
17015 /* Check conditional suffixes. */
17016 const int cond = now_it.cc ^ ((now_it.mask >> 4) & 1) ^ 1;
17019 now_it.mask &= 0x1f;
17020 is_last = (now_it.mask == 0x10);
17021 now_it.insn_cond = TRUE;
17023 switch (inst.it_insn_type)
17025 case OUTSIDE_IT_INSN:
17026 inst.error = BAD_NOT_IT;
17029 case INSIDE_IT_INSN:
17030 if (cond != inst.cond)
17032 inst.error = BAD_IT_COND;
17037 case INSIDE_IT_LAST_INSN:
17038 case IF_INSIDE_IT_LAST_INSN:
17039 if (cond != inst.cond)
17041 inst.error = BAD_IT_COND;
17046 inst.error = BAD_BRANCH;
17051 case NEUTRAL_IT_INSN:
17052 /* The BKPT instruction is unconditional even in an IT block. */
17056 inst.error = BAD_IT_IT;
17066 struct depr_insn_mask
17068 unsigned long pattern;
17069 unsigned long mask;
17070 const char* description;
17073 /* List of 16-bit instruction patterns deprecated in an IT block in
17075 static const struct depr_insn_mask depr_it_insns[] = {
17076 { 0xc000, 0xc000, N_("Short branches, Undefined, SVC, LDM/STM") },
17077 { 0xb000, 0xb000, N_("Miscellaneous 16-bit instructions") },
17078 { 0xa000, 0xb800, N_("ADR") },
17079 { 0x4800, 0xf800, N_("Literal loads") },
17080 { 0x4478, 0xf478, N_("Hi-register ADD, MOV, CMP, BX, BLX using pc") },
17081 { 0x4487, 0xfc87, N_("Hi-register ADD, MOV, CMP using pc") },
17086 it_fsm_post_encode (void)
17090 if (!now_it.state_handled)
17091 handle_it_state ();
17093 if (now_it.insn_cond
17094 && !now_it.warn_deprecated
17095 && warn_on_deprecated
17096 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
17098 if (inst.instruction >= 0x10000)
17100 as_warn (_("IT blocks containing 32-bit Thumb instructions are "
17101 "deprecated in ARMv8"));
17102 now_it.warn_deprecated = TRUE;
17106 const struct depr_insn_mask *p = depr_it_insns;
17108 while (p->mask != 0)
17110 if ((inst.instruction & p->mask) == p->pattern)
17112 as_warn (_("IT blocks containing 16-bit Thumb instructions "
17113 "of the following class are deprecated in ARMv8: "
17114 "%s"), p->description);
17115 now_it.warn_deprecated = TRUE;
17123 if (now_it.block_length > 1)
17125 as_warn (_("IT blocks containing more than one conditional "
17126 "instruction are deprecated in ARMv8"));
17127 now_it.warn_deprecated = TRUE;
17131 is_last = (now_it.mask == 0x10);
17134 now_it.state = OUTSIDE_IT_BLOCK;
17140 force_automatic_it_block_close (void)
17142 if (now_it.state == AUTOMATIC_IT_BLOCK)
17144 close_automatic_it_block ();
17145 now_it.state = OUTSIDE_IT_BLOCK;
17153 if (!now_it.state_handled)
17154 handle_it_state ();
17156 return now_it.state != OUTSIDE_IT_BLOCK;
17160 md_assemble (char *str)
17163 const struct asm_opcode * opcode;
17165 /* Align the previous label if needed. */
17166 if (last_label_seen != NULL)
17168 symbol_set_frag (last_label_seen, frag_now);
17169 S_SET_VALUE (last_label_seen, (valueT) frag_now_fix ());
17170 S_SET_SEGMENT (last_label_seen, now_seg);
17173 memset (&inst, '\0', sizeof (inst));
17174 inst.reloc.type = BFD_RELOC_UNUSED;
17176 opcode = opcode_lookup (&p);
17179 /* It wasn't an instruction, but it might be a register alias of
17180 the form alias .req reg, or a Neon .dn/.qn directive. */
17181 if (! create_register_alias (str, p)
17182 && ! create_neon_reg_alias (str, p))
17183 as_bad (_("bad instruction `%s'"), str);
17188 if (warn_on_deprecated && opcode->tag == OT_cinfix3_deprecated)
17189 as_warn (_("s suffix on comparison instruction is deprecated"));
17191 /* The value which unconditional instructions should have in place of the
17192 condition field. */
17193 inst.uncond_value = (opcode->tag == OT_csuffixF) ? 0xf : -1;
17197 arm_feature_set variant;
17199 variant = cpu_variant;
17200 /* Only allow coprocessor instructions on Thumb-2 capable devices. */
17201 if (!ARM_CPU_HAS_FEATURE (variant, arm_arch_t2))
17202 ARM_CLEAR_FEATURE (variant, variant, fpu_any_hard);
17203 /* Check that this instruction is supported for this CPU. */
17204 if (!opcode->tvariant
17205 || (thumb_mode == 1
17206 && !ARM_CPU_HAS_FEATURE (variant, *opcode->tvariant)))
17208 as_bad (_("selected processor does not support Thumb mode `%s'"), str);
17211 if (inst.cond != COND_ALWAYS && !unified_syntax
17212 && opcode->tencode != do_t_branch)
17214 as_bad (_("Thumb does not support conditional execution"));
17218 if (!ARM_CPU_HAS_FEATURE (variant, arm_ext_v6t2))
17220 if (opcode->tencode != do_t_blx && opcode->tencode != do_t_branch23
17221 && !(ARM_CPU_HAS_FEATURE(*opcode->tvariant, arm_ext_msr)
17222 || ARM_CPU_HAS_FEATURE(*opcode->tvariant, arm_ext_barrier)))
17224 /* Two things are addressed here.
17225 1) Implicit require narrow instructions on Thumb-1.
17226 This avoids relaxation accidentally introducing Thumb-2
17228 2) Reject wide instructions in non Thumb-2 cores. */
17229 if (inst.size_req == 0)
17231 else if (inst.size_req == 4)
17233 as_bad (_("selected processor does not support Thumb-2 mode `%s'"), str);
17239 inst.instruction = opcode->tvalue;
17241 if (!parse_operands (p, opcode->operands, /*thumb=*/TRUE))
17243 /* Prepare the it_insn_type for those encodings that don't set
17245 it_fsm_pre_encode ();
17247 opcode->tencode ();
17249 it_fsm_post_encode ();
17252 if (!(inst.error || inst.relax))
17254 gas_assert (inst.instruction < 0xe800 || inst.instruction > 0xffff);
17255 inst.size = (inst.instruction > 0xffff ? 4 : 2);
17256 if (inst.size_req && inst.size_req != inst.size)
17258 as_bad (_("cannot honor width suffix -- `%s'"), str);
17263 /* Something has gone badly wrong if we try to relax a fixed size
17265 gas_assert (inst.size_req == 0 || !inst.relax);
17267 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
17268 *opcode->tvariant);
17269 /* Many Thumb-2 instructions also have Thumb-1 variants, so explicitly
17270 set those bits when Thumb-2 32-bit instructions are seen. ie.
17271 anything other than bl/blx and v6-M instructions.
17272 This is overly pessimistic for relaxable instructions. */
17273 if (((inst.size == 4 && (inst.instruction & 0xf800e800) != 0xf000e800)
17275 && !(ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_msr)
17276 || ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_barrier)))
17277 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
17280 check_neon_suffixes;
17284 mapping_state (MAP_THUMB);
17287 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1))
17291 /* bx is allowed on v5 cores, and sometimes on v4 cores. */
17292 is_bx = (opcode->aencode == do_bx);
17294 /* Check that this instruction is supported for this CPU. */
17295 if (!(is_bx && fix_v4bx)
17296 && !(opcode->avariant &&
17297 ARM_CPU_HAS_FEATURE (cpu_variant, *opcode->avariant)))
17299 as_bad (_("selected processor does not support ARM mode `%s'"), str);
17304 as_bad (_("width suffixes are invalid in ARM mode -- `%s'"), str);
17308 inst.instruction = opcode->avalue;
17309 if (opcode->tag == OT_unconditionalF)
17310 inst.instruction |= 0xF << 28;
17312 inst.instruction |= inst.cond << 28;
17313 inst.size = INSN_SIZE;
17314 if (!parse_operands (p, opcode->operands, /*thumb=*/FALSE))
17316 it_fsm_pre_encode ();
17317 opcode->aencode ();
17318 it_fsm_post_encode ();
17320 /* Arm mode bx is marked as both v4T and v5 because it's still required
17321 on a hypothetical non-thumb v5 core. */
17323 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used, arm_ext_v4t);
17325 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used,
17326 *opcode->avariant);
17328 check_neon_suffixes;
17332 mapping_state (MAP_ARM);
17337 as_bad (_("attempt to use an ARM instruction on a Thumb-only processor "
17345 check_it_blocks_finished (void)
17350 for (sect = stdoutput->sections; sect != NULL; sect = sect->next)
17351 if (seg_info (sect)->tc_segment_info_data.current_it.state
17352 == MANUAL_IT_BLOCK)
17354 as_warn (_("section '%s' finished with an open IT block."),
17358 if (now_it.state == MANUAL_IT_BLOCK)
17359 as_warn (_("file finished with an open IT block."));
17363 /* Various frobbings of labels and their addresses. */
17366 arm_start_line_hook (void)
17368 last_label_seen = NULL;
17372 arm_frob_label (symbolS * sym)
17374 last_label_seen = sym;
17376 ARM_SET_THUMB (sym, thumb_mode);
17378 #if defined OBJ_COFF || defined OBJ_ELF
17379 ARM_SET_INTERWORK (sym, support_interwork);
17382 force_automatic_it_block_close ();
17384 /* Note - do not allow local symbols (.Lxxx) to be labelled
17385 as Thumb functions. This is because these labels, whilst
17386 they exist inside Thumb code, are not the entry points for
17387 possible ARM->Thumb calls. Also, these labels can be used
17388 as part of a computed goto or switch statement. eg gcc
17389 can generate code that looks like this:
17391 ldr r2, [pc, .Laaa]
17401 The first instruction loads the address of the jump table.
17402 The second instruction converts a table index into a byte offset.
17403 The third instruction gets the jump address out of the table.
17404 The fourth instruction performs the jump.
17406 If the address stored at .Laaa is that of a symbol which has the
17407 Thumb_Func bit set, then the linker will arrange for this address
17408 to have the bottom bit set, which in turn would mean that the
17409 address computation performed by the third instruction would end
17410 up with the bottom bit set. Since the ARM is capable of unaligned
17411 word loads, the instruction would then load the incorrect address
17412 out of the jump table, and chaos would ensue. */
17413 if (label_is_thumb_function_name
17414 && (S_GET_NAME (sym)[0] != '.' || S_GET_NAME (sym)[1] != 'L')
17415 && (bfd_get_section_flags (stdoutput, now_seg) & SEC_CODE) != 0)
17417 /* When the address of a Thumb function is taken the bottom
17418 bit of that address should be set. This will allow
17419 interworking between Arm and Thumb functions to work
17422 THUMB_SET_FUNC (sym, 1);
17424 label_is_thumb_function_name = FALSE;
17427 dwarf2_emit_label (sym);
17431 arm_data_in_code (void)
17433 if (thumb_mode && ! strncmp (input_line_pointer + 1, "data:", 5))
17435 *input_line_pointer = '/';
17436 input_line_pointer += 5;
17437 *input_line_pointer = 0;
17445 arm_canonicalize_symbol_name (char * name)
17449 if (thumb_mode && (len = strlen (name)) > 5
17450 && streq (name + len - 5, "/data"))
17451 *(name + len - 5) = 0;
17456 /* Table of all register names defined by default. The user can
17457 define additional names with .req. Note that all register names
17458 should appear in both upper and lowercase variants. Some registers
17459 also have mixed-case names. */
17461 #define REGDEF(s,n,t) { #s, n, REG_TYPE_##t, TRUE, 0 }
17462 #define REGNUM(p,n,t) REGDEF(p##n, n, t)
17463 #define REGNUM2(p,n,t) REGDEF(p##n, 2 * n, t)
17464 #define REGSET(p,t) \
17465 REGNUM(p, 0,t), REGNUM(p, 1,t), REGNUM(p, 2,t), REGNUM(p, 3,t), \
17466 REGNUM(p, 4,t), REGNUM(p, 5,t), REGNUM(p, 6,t), REGNUM(p, 7,t), \
17467 REGNUM(p, 8,t), REGNUM(p, 9,t), REGNUM(p,10,t), REGNUM(p,11,t), \
17468 REGNUM(p,12,t), REGNUM(p,13,t), REGNUM(p,14,t), REGNUM(p,15,t)
17469 #define REGSETH(p,t) \
17470 REGNUM(p,16,t), REGNUM(p,17,t), REGNUM(p,18,t), REGNUM(p,19,t), \
17471 REGNUM(p,20,t), REGNUM(p,21,t), REGNUM(p,22,t), REGNUM(p,23,t), \
17472 REGNUM(p,24,t), REGNUM(p,25,t), REGNUM(p,26,t), REGNUM(p,27,t), \
17473 REGNUM(p,28,t), REGNUM(p,29,t), REGNUM(p,30,t), REGNUM(p,31,t)
17474 #define REGSET2(p,t) \
17475 REGNUM2(p, 0,t), REGNUM2(p, 1,t), REGNUM2(p, 2,t), REGNUM2(p, 3,t), \
17476 REGNUM2(p, 4,t), REGNUM2(p, 5,t), REGNUM2(p, 6,t), REGNUM2(p, 7,t), \
17477 REGNUM2(p, 8,t), REGNUM2(p, 9,t), REGNUM2(p,10,t), REGNUM2(p,11,t), \
17478 REGNUM2(p,12,t), REGNUM2(p,13,t), REGNUM2(p,14,t), REGNUM2(p,15,t)
17479 #define SPLRBANK(base,bank,t) \
17480 REGDEF(lr_##bank, 768|((base+0)<<16), t), \
17481 REGDEF(sp_##bank, 768|((base+1)<<16), t), \
17482 REGDEF(spsr_##bank, 768|(base<<16)|SPSR_BIT, t), \
17483 REGDEF(LR_##bank, 768|((base+0)<<16), t), \
17484 REGDEF(SP_##bank, 768|((base+1)<<16), t), \
17485 REGDEF(SPSR_##bank, 768|(base<<16)|SPSR_BIT, t)
17487 static const struct reg_entry reg_names[] =
17489 /* ARM integer registers. */
17490 REGSET(r, RN), REGSET(R, RN),
17492 /* ATPCS synonyms. */
17493 REGDEF(a1,0,RN), REGDEF(a2,1,RN), REGDEF(a3, 2,RN), REGDEF(a4, 3,RN),
17494 REGDEF(v1,4,RN), REGDEF(v2,5,RN), REGDEF(v3, 6,RN), REGDEF(v4, 7,RN),
17495 REGDEF(v5,8,RN), REGDEF(v6,9,RN), REGDEF(v7,10,RN), REGDEF(v8,11,RN),
17497 REGDEF(A1,0,RN), REGDEF(A2,1,RN), REGDEF(A3, 2,RN), REGDEF(A4, 3,RN),
17498 REGDEF(V1,4,RN), REGDEF(V2,5,RN), REGDEF(V3, 6,RN), REGDEF(V4, 7,RN),
17499 REGDEF(V5,8,RN), REGDEF(V6,9,RN), REGDEF(V7,10,RN), REGDEF(V8,11,RN),
17501 /* Well-known aliases. */
17502 REGDEF(wr, 7,RN), REGDEF(sb, 9,RN), REGDEF(sl,10,RN), REGDEF(fp,11,RN),
17503 REGDEF(ip,12,RN), REGDEF(sp,13,RN), REGDEF(lr,14,RN), REGDEF(pc,15,RN),
17505 REGDEF(WR, 7,RN), REGDEF(SB, 9,RN), REGDEF(SL,10,RN), REGDEF(FP,11,RN),
17506 REGDEF(IP,12,RN), REGDEF(SP,13,RN), REGDEF(LR,14,RN), REGDEF(PC,15,RN),
17508 /* Coprocessor numbers. */
17509 REGSET(p, CP), REGSET(P, CP),
17511 /* Coprocessor register numbers. The "cr" variants are for backward
17513 REGSET(c, CN), REGSET(C, CN),
17514 REGSET(cr, CN), REGSET(CR, CN),
17516 /* ARM banked registers. */
17517 REGDEF(R8_usr,512|(0<<16),RNB), REGDEF(r8_usr,512|(0<<16),RNB),
17518 REGDEF(R9_usr,512|(1<<16),RNB), REGDEF(r9_usr,512|(1<<16),RNB),
17519 REGDEF(R10_usr,512|(2<<16),RNB), REGDEF(r10_usr,512|(2<<16),RNB),
17520 REGDEF(R11_usr,512|(3<<16),RNB), REGDEF(r11_usr,512|(3<<16),RNB),
17521 REGDEF(R12_usr,512|(4<<16),RNB), REGDEF(r12_usr,512|(4<<16),RNB),
17522 REGDEF(SP_usr,512|(5<<16),RNB), REGDEF(sp_usr,512|(5<<16),RNB),
17523 REGDEF(LR_usr,512|(6<<16),RNB), REGDEF(lr_usr,512|(6<<16),RNB),
17525 REGDEF(R8_fiq,512|(8<<16),RNB), REGDEF(r8_fiq,512|(8<<16),RNB),
17526 REGDEF(R9_fiq,512|(9<<16),RNB), REGDEF(r9_fiq,512|(9<<16),RNB),
17527 REGDEF(R10_fiq,512|(10<<16),RNB), REGDEF(r10_fiq,512|(10<<16),RNB),
17528 REGDEF(R11_fiq,512|(11<<16),RNB), REGDEF(r11_fiq,512|(11<<16),RNB),
17529 REGDEF(R12_fiq,512|(12<<16),RNB), REGDEF(r12_fiq,512|(12<<16),RNB),
17530 REGDEF(SP_fiq,512|(13<<16),RNB), REGDEF(sp_fiq,512|(13<<16),RNB),
17531 REGDEF(LR_fiq,512|(14<<16),RNB), REGDEF(lr_fiq,512|(14<<16),RNB),
17532 REGDEF(SPSR_fiq,512|(14<<16)|SPSR_BIT,RNB), REGDEF(spsr_fiq,512|(14<<16)|SPSR_BIT,RNB),
17534 SPLRBANK(0,IRQ,RNB), SPLRBANK(0,irq,RNB),
17535 SPLRBANK(2,SVC,RNB), SPLRBANK(2,svc,RNB),
17536 SPLRBANK(4,ABT,RNB), SPLRBANK(4,abt,RNB),
17537 SPLRBANK(6,UND,RNB), SPLRBANK(6,und,RNB),
17538 SPLRBANK(12,MON,RNB), SPLRBANK(12,mon,RNB),
17539 REGDEF(elr_hyp,768|(14<<16),RNB), REGDEF(ELR_hyp,768|(14<<16),RNB),
17540 REGDEF(sp_hyp,768|(15<<16),RNB), REGDEF(SP_hyp,768|(15<<16),RNB),
17541 REGDEF(spsr_hyp,768|(14<<16)|SPSR_BIT,RNB),
17542 REGDEF(SPSR_hyp,768|(14<<16)|SPSR_BIT,RNB),
17544 /* FPA registers. */
17545 REGNUM(f,0,FN), REGNUM(f,1,FN), REGNUM(f,2,FN), REGNUM(f,3,FN),
17546 REGNUM(f,4,FN), REGNUM(f,5,FN), REGNUM(f,6,FN), REGNUM(f,7, FN),
17548 REGNUM(F,0,FN), REGNUM(F,1,FN), REGNUM(F,2,FN), REGNUM(F,3,FN),
17549 REGNUM(F,4,FN), REGNUM(F,5,FN), REGNUM(F,6,FN), REGNUM(F,7, FN),
17551 /* VFP SP registers. */
17552 REGSET(s,VFS), REGSET(S,VFS),
17553 REGSETH(s,VFS), REGSETH(S,VFS),
17555 /* VFP DP Registers. */
17556 REGSET(d,VFD), REGSET(D,VFD),
17557 /* Extra Neon DP registers. */
17558 REGSETH(d,VFD), REGSETH(D,VFD),
17560 /* Neon QP registers. */
17561 REGSET2(q,NQ), REGSET2(Q,NQ),
17563 /* VFP control registers. */
17564 REGDEF(fpsid,0,VFC), REGDEF(fpscr,1,VFC), REGDEF(fpexc,8,VFC),
17565 REGDEF(FPSID,0,VFC), REGDEF(FPSCR,1,VFC), REGDEF(FPEXC,8,VFC),
17566 REGDEF(fpinst,9,VFC), REGDEF(fpinst2,10,VFC),
17567 REGDEF(FPINST,9,VFC), REGDEF(FPINST2,10,VFC),
17568 REGDEF(mvfr0,7,VFC), REGDEF(mvfr1,6,VFC),
17569 REGDEF(MVFR0,7,VFC), REGDEF(MVFR1,6,VFC),
17571 /* Maverick DSP coprocessor registers. */
17572 REGSET(mvf,MVF), REGSET(mvd,MVD), REGSET(mvfx,MVFX), REGSET(mvdx,MVDX),
17573 REGSET(MVF,MVF), REGSET(MVD,MVD), REGSET(MVFX,MVFX), REGSET(MVDX,MVDX),
17575 REGNUM(mvax,0,MVAX), REGNUM(mvax,1,MVAX),
17576 REGNUM(mvax,2,MVAX), REGNUM(mvax,3,MVAX),
17577 REGDEF(dspsc,0,DSPSC),
17579 REGNUM(MVAX,0,MVAX), REGNUM(MVAX,1,MVAX),
17580 REGNUM(MVAX,2,MVAX), REGNUM(MVAX,3,MVAX),
17581 REGDEF(DSPSC,0,DSPSC),
17583 /* iWMMXt data registers - p0, c0-15. */
17584 REGSET(wr,MMXWR), REGSET(wR,MMXWR), REGSET(WR, MMXWR),
17586 /* iWMMXt control registers - p1, c0-3. */
17587 REGDEF(wcid, 0,MMXWC), REGDEF(wCID, 0,MMXWC), REGDEF(WCID, 0,MMXWC),
17588 REGDEF(wcon, 1,MMXWC), REGDEF(wCon, 1,MMXWC), REGDEF(WCON, 1,MMXWC),
17589 REGDEF(wcssf, 2,MMXWC), REGDEF(wCSSF, 2,MMXWC), REGDEF(WCSSF, 2,MMXWC),
17590 REGDEF(wcasf, 3,MMXWC), REGDEF(wCASF, 3,MMXWC), REGDEF(WCASF, 3,MMXWC),
17592 /* iWMMXt scalar (constant/offset) registers - p1, c8-11. */
17593 REGDEF(wcgr0, 8,MMXWCG), REGDEF(wCGR0, 8,MMXWCG), REGDEF(WCGR0, 8,MMXWCG),
17594 REGDEF(wcgr1, 9,MMXWCG), REGDEF(wCGR1, 9,MMXWCG), REGDEF(WCGR1, 9,MMXWCG),
17595 REGDEF(wcgr2,10,MMXWCG), REGDEF(wCGR2,10,MMXWCG), REGDEF(WCGR2,10,MMXWCG),
17596 REGDEF(wcgr3,11,MMXWCG), REGDEF(wCGR3,11,MMXWCG), REGDEF(WCGR3,11,MMXWCG),
17598 /* XScale accumulator registers. */
17599 REGNUM(acc,0,XSCALE), REGNUM(ACC,0,XSCALE),
17605 /* Table of all PSR suffixes. Bare "CPSR" and "SPSR" are handled
17606 within psr_required_here. */
17607 static const struct asm_psr psrs[] =
17609 /* Backward compatibility notation. Note that "all" is no longer
17610 truly all possible PSR bits. */
17611 {"all", PSR_c | PSR_f},
17615 /* Individual flags. */
17621 /* Combinations of flags. */
17622 {"fs", PSR_f | PSR_s},
17623 {"fx", PSR_f | PSR_x},
17624 {"fc", PSR_f | PSR_c},
17625 {"sf", PSR_s | PSR_f},
17626 {"sx", PSR_s | PSR_x},
17627 {"sc", PSR_s | PSR_c},
17628 {"xf", PSR_x | PSR_f},
17629 {"xs", PSR_x | PSR_s},
17630 {"xc", PSR_x | PSR_c},
17631 {"cf", PSR_c | PSR_f},
17632 {"cs", PSR_c | PSR_s},
17633 {"cx", PSR_c | PSR_x},
17634 {"fsx", PSR_f | PSR_s | PSR_x},
17635 {"fsc", PSR_f | PSR_s | PSR_c},
17636 {"fxs", PSR_f | PSR_x | PSR_s},
17637 {"fxc", PSR_f | PSR_x | PSR_c},
17638 {"fcs", PSR_f | PSR_c | PSR_s},
17639 {"fcx", PSR_f | PSR_c | PSR_x},
17640 {"sfx", PSR_s | PSR_f | PSR_x},
17641 {"sfc", PSR_s | PSR_f | PSR_c},
17642 {"sxf", PSR_s | PSR_x | PSR_f},
17643 {"sxc", PSR_s | PSR_x | PSR_c},
17644 {"scf", PSR_s | PSR_c | PSR_f},
17645 {"scx", PSR_s | PSR_c | PSR_x},
17646 {"xfs", PSR_x | PSR_f | PSR_s},
17647 {"xfc", PSR_x | PSR_f | PSR_c},
17648 {"xsf", PSR_x | PSR_s | PSR_f},
17649 {"xsc", PSR_x | PSR_s | PSR_c},
17650 {"xcf", PSR_x | PSR_c | PSR_f},
17651 {"xcs", PSR_x | PSR_c | PSR_s},
17652 {"cfs", PSR_c | PSR_f | PSR_s},
17653 {"cfx", PSR_c | PSR_f | PSR_x},
17654 {"csf", PSR_c | PSR_s | PSR_f},
17655 {"csx", PSR_c | PSR_s | PSR_x},
17656 {"cxf", PSR_c | PSR_x | PSR_f},
17657 {"cxs", PSR_c | PSR_x | PSR_s},
17658 {"fsxc", PSR_f | PSR_s | PSR_x | PSR_c},
17659 {"fscx", PSR_f | PSR_s | PSR_c | PSR_x},
17660 {"fxsc", PSR_f | PSR_x | PSR_s | PSR_c},
17661 {"fxcs", PSR_f | PSR_x | PSR_c | PSR_s},
17662 {"fcsx", PSR_f | PSR_c | PSR_s | PSR_x},
17663 {"fcxs", PSR_f | PSR_c | PSR_x | PSR_s},
17664 {"sfxc", PSR_s | PSR_f | PSR_x | PSR_c},
17665 {"sfcx", PSR_s | PSR_f | PSR_c | PSR_x},
17666 {"sxfc", PSR_s | PSR_x | PSR_f | PSR_c},
17667 {"sxcf", PSR_s | PSR_x | PSR_c | PSR_f},
17668 {"scfx", PSR_s | PSR_c | PSR_f | PSR_x},
17669 {"scxf", PSR_s | PSR_c | PSR_x | PSR_f},
17670 {"xfsc", PSR_x | PSR_f | PSR_s | PSR_c},
17671 {"xfcs", PSR_x | PSR_f | PSR_c | PSR_s},
17672 {"xsfc", PSR_x | PSR_s | PSR_f | PSR_c},
17673 {"xscf", PSR_x | PSR_s | PSR_c | PSR_f},
17674 {"xcfs", PSR_x | PSR_c | PSR_f | PSR_s},
17675 {"xcsf", PSR_x | PSR_c | PSR_s | PSR_f},
17676 {"cfsx", PSR_c | PSR_f | PSR_s | PSR_x},
17677 {"cfxs", PSR_c | PSR_f | PSR_x | PSR_s},
17678 {"csfx", PSR_c | PSR_s | PSR_f | PSR_x},
17679 {"csxf", PSR_c | PSR_s | PSR_x | PSR_f},
17680 {"cxfs", PSR_c | PSR_x | PSR_f | PSR_s},
17681 {"cxsf", PSR_c | PSR_x | PSR_s | PSR_f},
17684 /* Table of V7M psr names. */
17685 static const struct asm_psr v7m_psrs[] =
17687 {"apsr", 0 }, {"APSR", 0 },
17688 {"iapsr", 1 }, {"IAPSR", 1 },
17689 {"eapsr", 2 }, {"EAPSR", 2 },
17690 {"psr", 3 }, {"PSR", 3 },
17691 {"xpsr", 3 }, {"XPSR", 3 }, {"xPSR", 3 },
17692 {"ipsr", 5 }, {"IPSR", 5 },
17693 {"epsr", 6 }, {"EPSR", 6 },
17694 {"iepsr", 7 }, {"IEPSR", 7 },
17695 {"msp", 8 }, {"MSP", 8 },
17696 {"psp", 9 }, {"PSP", 9 },
17697 {"primask", 16}, {"PRIMASK", 16},
17698 {"basepri", 17}, {"BASEPRI", 17},
17699 {"basepri_max", 18}, {"BASEPRI_MAX", 18},
17700 {"basepri_max", 18}, {"BASEPRI_MASK", 18}, /* Typo, preserved for backwards compatibility. */
17701 {"faultmask", 19}, {"FAULTMASK", 19},
17702 {"control", 20}, {"CONTROL", 20}
17705 /* Table of all shift-in-operand names. */
17706 static const struct asm_shift_name shift_names [] =
17708 { "asl", SHIFT_LSL }, { "ASL", SHIFT_LSL },
17709 { "lsl", SHIFT_LSL }, { "LSL", SHIFT_LSL },
17710 { "lsr", SHIFT_LSR }, { "LSR", SHIFT_LSR },
17711 { "asr", SHIFT_ASR }, { "ASR", SHIFT_ASR },
17712 { "ror", SHIFT_ROR }, { "ROR", SHIFT_ROR },
17713 { "rrx", SHIFT_RRX }, { "RRX", SHIFT_RRX }
17716 /* Table of all explicit relocation names. */
17718 static struct reloc_entry reloc_names[] =
17720 { "got", BFD_RELOC_ARM_GOT32 }, { "GOT", BFD_RELOC_ARM_GOT32 },
17721 { "gotoff", BFD_RELOC_ARM_GOTOFF }, { "GOTOFF", BFD_RELOC_ARM_GOTOFF },
17722 { "plt", BFD_RELOC_ARM_PLT32 }, { "PLT", BFD_RELOC_ARM_PLT32 },
17723 { "target1", BFD_RELOC_ARM_TARGET1 }, { "TARGET1", BFD_RELOC_ARM_TARGET1 },
17724 { "target2", BFD_RELOC_ARM_TARGET2 }, { "TARGET2", BFD_RELOC_ARM_TARGET2 },
17725 { "sbrel", BFD_RELOC_ARM_SBREL32 }, { "SBREL", BFD_RELOC_ARM_SBREL32 },
17726 { "tlsgd", BFD_RELOC_ARM_TLS_GD32}, { "TLSGD", BFD_RELOC_ARM_TLS_GD32},
17727 { "tlsldm", BFD_RELOC_ARM_TLS_LDM32}, { "TLSLDM", BFD_RELOC_ARM_TLS_LDM32},
17728 { "tlsldo", BFD_RELOC_ARM_TLS_LDO32}, { "TLSLDO", BFD_RELOC_ARM_TLS_LDO32},
17729 { "gottpoff",BFD_RELOC_ARM_TLS_IE32}, { "GOTTPOFF",BFD_RELOC_ARM_TLS_IE32},
17730 { "tpoff", BFD_RELOC_ARM_TLS_LE32}, { "TPOFF", BFD_RELOC_ARM_TLS_LE32},
17731 { "got_prel", BFD_RELOC_ARM_GOT_PREL}, { "GOT_PREL", BFD_RELOC_ARM_GOT_PREL},
17732 { "tlsdesc", BFD_RELOC_ARM_TLS_GOTDESC},
17733 { "TLSDESC", BFD_RELOC_ARM_TLS_GOTDESC},
17734 { "tlscall", BFD_RELOC_ARM_TLS_CALL},
17735 { "TLSCALL", BFD_RELOC_ARM_TLS_CALL},
17736 { "tlsdescseq", BFD_RELOC_ARM_TLS_DESCSEQ},
17737 { "TLSDESCSEQ", BFD_RELOC_ARM_TLS_DESCSEQ}
17741 /* Table of all conditional affixes. 0xF is not defined as a condition code. */
17742 static const struct asm_cond conds[] =
17746 {"cs", 0x2}, {"hs", 0x2},
17747 {"cc", 0x3}, {"ul", 0x3}, {"lo", 0x3},
17761 #define UL_BARRIER(L,U,CODE,FEAT) \
17762 { L, CODE, ARM_FEATURE (FEAT, 0) }, \
17763 { U, CODE, ARM_FEATURE (FEAT, 0) }
17765 static struct asm_barrier_opt barrier_opt_names[] =
17767 UL_BARRIER ("sy", "SY", 0xf, ARM_EXT_BARRIER),
17768 UL_BARRIER ("st", "ST", 0xe, ARM_EXT_BARRIER),
17769 UL_BARRIER ("ld", "LD", 0xd, ARM_EXT_V8),
17770 UL_BARRIER ("ish", "ISH", 0xb, ARM_EXT_BARRIER),
17771 UL_BARRIER ("sh", "SH", 0xb, ARM_EXT_BARRIER),
17772 UL_BARRIER ("ishst", "ISHST", 0xa, ARM_EXT_BARRIER),
17773 UL_BARRIER ("shst", "SHST", 0xa, ARM_EXT_BARRIER),
17774 UL_BARRIER ("ishld", "ISHLD", 0x9, ARM_EXT_V8),
17775 UL_BARRIER ("un", "UN", 0x7, ARM_EXT_BARRIER),
17776 UL_BARRIER ("nsh", "NSH", 0x7, ARM_EXT_BARRIER),
17777 UL_BARRIER ("unst", "UNST", 0x6, ARM_EXT_BARRIER),
17778 UL_BARRIER ("nshst", "NSHST", 0x6, ARM_EXT_BARRIER),
17779 UL_BARRIER ("nshld", "NSHLD", 0x5, ARM_EXT_V8),
17780 UL_BARRIER ("osh", "OSH", 0x3, ARM_EXT_BARRIER),
17781 UL_BARRIER ("oshst", "OSHST", 0x2, ARM_EXT_BARRIER),
17782 UL_BARRIER ("oshld", "OSHLD", 0x1, ARM_EXT_V8)
17787 /* Table of ARM-format instructions. */
17789 /* Macros for gluing together operand strings. N.B. In all cases
17790 other than OPS0, the trailing OP_stop comes from default
17791 zero-initialization of the unspecified elements of the array. */
17792 #define OPS0() { OP_stop, }
17793 #define OPS1(a) { OP_##a, }
17794 #define OPS2(a,b) { OP_##a,OP_##b, }
17795 #define OPS3(a,b,c) { OP_##a,OP_##b,OP_##c, }
17796 #define OPS4(a,b,c,d) { OP_##a,OP_##b,OP_##c,OP_##d, }
17797 #define OPS5(a,b,c,d,e) { OP_##a,OP_##b,OP_##c,OP_##d,OP_##e, }
17798 #define OPS6(a,b,c,d,e,f) { OP_##a,OP_##b,OP_##c,OP_##d,OP_##e,OP_##f, }
17800 /* These macros are similar to the OPSn, but do not prepend the OP_ prefix.
17801 This is useful when mixing operands for ARM and THUMB, i.e. using the
17802 MIX_ARM_THUMB_OPERANDS macro.
17803 In order to use these macros, prefix the number of operands with _
17805 #define OPS_1(a) { a, }
17806 #define OPS_2(a,b) { a,b, }
17807 #define OPS_3(a,b,c) { a,b,c, }
17808 #define OPS_4(a,b,c,d) { a,b,c,d, }
17809 #define OPS_5(a,b,c,d,e) { a,b,c,d,e, }
17810 #define OPS_6(a,b,c,d,e,f) { a,b,c,d,e,f, }
17812 /* These macros abstract out the exact format of the mnemonic table and
17813 save some repeated characters. */
17815 /* The normal sort of mnemonic; has a Thumb variant; takes a conditional suffix. */
17816 #define TxCE(mnem, op, top, nops, ops, ae, te) \
17817 { mnem, OPS##nops ops, OT_csuffix, 0x##op, top, ARM_VARIANT, \
17818 THUMB_VARIANT, do_##ae, do_##te }
17820 /* Two variants of the above - TCE for a numeric Thumb opcode, tCE for
17821 a T_MNEM_xyz enumerator. */
17822 #define TCE(mnem, aop, top, nops, ops, ae, te) \
17823 TxCE (mnem, aop, 0x##top, nops, ops, ae, te)
17824 #define tCE(mnem, aop, top, nops, ops, ae, te) \
17825 TxCE (mnem, aop, T_MNEM##top, nops, ops, ae, te)
17827 /* Second most common sort of mnemonic: has a Thumb variant, takes a conditional
17828 infix after the third character. */
17829 #define TxC3(mnem, op, top, nops, ops, ae, te) \
17830 { mnem, OPS##nops ops, OT_cinfix3, 0x##op, top, ARM_VARIANT, \
17831 THUMB_VARIANT, do_##ae, do_##te }
17832 #define TxC3w(mnem, op, top, nops, ops, ae, te) \
17833 { mnem, OPS##nops ops, OT_cinfix3_deprecated, 0x##op, top, ARM_VARIANT, \
17834 THUMB_VARIANT, do_##ae, do_##te }
17835 #define TC3(mnem, aop, top, nops, ops, ae, te) \
17836 TxC3 (mnem, aop, 0x##top, nops, ops, ae, te)
17837 #define TC3w(mnem, aop, top, nops, ops, ae, te) \
17838 TxC3w (mnem, aop, 0x##top, nops, ops, ae, te)
17839 #define tC3(mnem, aop, top, nops, ops, ae, te) \
17840 TxC3 (mnem, aop, T_MNEM##top, nops, ops, ae, te)
17841 #define tC3w(mnem, aop, top, nops, ops, ae, te) \
17842 TxC3w (mnem, aop, T_MNEM##top, nops, ops, ae, te)
17844 /* Mnemonic that cannot be conditionalized. The ARM condition-code
17845 field is still 0xE. Many of the Thumb variants can be executed
17846 conditionally, so this is checked separately. */
17847 #define TUE(mnem, op, top, nops, ops, ae, te) \
17848 { mnem, OPS##nops ops, OT_unconditional, 0x##op, 0x##top, ARM_VARIANT, \
17849 THUMB_VARIANT, do_##ae, do_##te }
17851 /* Same as TUE but the encoding function for ARM and Thumb modes is the same.
17852 Used by mnemonics that have very minimal differences in the encoding for
17853 ARM and Thumb variants and can be handled in a common function. */
17854 #define TUEc(mnem, op, top, nops, ops, en) \
17855 { mnem, OPS##nops ops, OT_unconditional, 0x##op, 0x##top, ARM_VARIANT, \
17856 THUMB_VARIANT, do_##en, do_##en }
17858 /* Mnemonic that cannot be conditionalized, and bears 0xF in its ARM
17859 condition code field. */
17860 #define TUF(mnem, op, top, nops, ops, ae, te) \
17861 { mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##top, ARM_VARIANT, \
17862 THUMB_VARIANT, do_##ae, do_##te }
17864 /* ARM-only variants of all the above. */
17865 #define CE(mnem, op, nops, ops, ae) \
17866 { mnem, OPS##nops ops, OT_csuffix, 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
17868 #define C3(mnem, op, nops, ops, ae) \
17869 { #mnem, OPS##nops ops, OT_cinfix3, 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
17871 /* Legacy mnemonics that always have conditional infix after the third
17873 #define CL(mnem, op, nops, ops, ae) \
17874 { mnem, OPS##nops ops, OT_cinfix3_legacy, \
17875 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
17877 /* Coprocessor instructions. Isomorphic between Arm and Thumb-2. */
17878 #define cCE(mnem, op, nops, ops, ae) \
17879 { mnem, OPS##nops ops, OT_csuffix, 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
17881 /* Legacy coprocessor instructions where conditional infix and conditional
17882 suffix are ambiguous. For consistency this includes all FPA instructions,
17883 not just the potentially ambiguous ones. */
17884 #define cCL(mnem, op, nops, ops, ae) \
17885 { mnem, OPS##nops ops, OT_cinfix3_legacy, \
17886 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
17888 /* Coprocessor, takes either a suffix or a position-3 infix
17889 (for an FPA corner case). */
17890 #define C3E(mnem, op, nops, ops, ae) \
17891 { mnem, OPS##nops ops, OT_csuf_or_in3, \
17892 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
17894 #define xCM_(m1, m2, m3, op, nops, ops, ae) \
17895 { m1 #m2 m3, OPS##nops ops, \
17896 sizeof (#m2) == 1 ? OT_odd_infix_unc : OT_odd_infix_0 + sizeof (m1) - 1, \
17897 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
17899 #define CM(m1, m2, op, nops, ops, ae) \
17900 xCM_ (m1, , m2, op, nops, ops, ae), \
17901 xCM_ (m1, eq, m2, op, nops, ops, ae), \
17902 xCM_ (m1, ne, m2, op, nops, ops, ae), \
17903 xCM_ (m1, cs, m2, op, nops, ops, ae), \
17904 xCM_ (m1, hs, m2, op, nops, ops, ae), \
17905 xCM_ (m1, cc, m2, op, nops, ops, ae), \
17906 xCM_ (m1, ul, m2, op, nops, ops, ae), \
17907 xCM_ (m1, lo, m2, op, nops, ops, ae), \
17908 xCM_ (m1, mi, m2, op, nops, ops, ae), \
17909 xCM_ (m1, pl, m2, op, nops, ops, ae), \
17910 xCM_ (m1, vs, m2, op, nops, ops, ae), \
17911 xCM_ (m1, vc, m2, op, nops, ops, ae), \
17912 xCM_ (m1, hi, m2, op, nops, ops, ae), \
17913 xCM_ (m1, ls, m2, op, nops, ops, ae), \
17914 xCM_ (m1, ge, m2, op, nops, ops, ae), \
17915 xCM_ (m1, lt, m2, op, nops, ops, ae), \
17916 xCM_ (m1, gt, m2, op, nops, ops, ae), \
17917 xCM_ (m1, le, m2, op, nops, ops, ae), \
17918 xCM_ (m1, al, m2, op, nops, ops, ae)
17920 #define UE(mnem, op, nops, ops, ae) \
17921 { #mnem, OPS##nops ops, OT_unconditional, 0x##op, 0, ARM_VARIANT, 0, do_##ae, NULL }
17923 #define UF(mnem, op, nops, ops, ae) \
17924 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0, ARM_VARIANT, 0, do_##ae, NULL }
17926 /* Neon data-processing. ARM versions are unconditional with cond=0xf.
17927 The Thumb and ARM variants are mostly the same (bits 0-23 and 24/28), so we
17928 use the same encoding function for each. */
17929 #define NUF(mnem, op, nops, ops, enc) \
17930 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##op, \
17931 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
17933 /* Neon data processing, version which indirects through neon_enc_tab for
17934 the various overloaded versions of opcodes. */
17935 #define nUF(mnem, op, nops, ops, enc) \
17936 { #mnem, OPS##nops ops, OT_unconditionalF, N_MNEM##op, N_MNEM##op, \
17937 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
17939 /* Neon insn with conditional suffix for the ARM version, non-overloaded
17941 #define NCE_tag(mnem, op, nops, ops, enc, tag) \
17942 { #mnem, OPS##nops ops, tag, 0x##op, 0x##op, ARM_VARIANT, \
17943 THUMB_VARIANT, do_##enc, do_##enc }
17945 #define NCE(mnem, op, nops, ops, enc) \
17946 NCE_tag (mnem, op, nops, ops, enc, OT_csuffix)
17948 #define NCEF(mnem, op, nops, ops, enc) \
17949 NCE_tag (mnem, op, nops, ops, enc, OT_csuffixF)
17951 /* Neon insn with conditional suffix for the ARM version, overloaded types. */
17952 #define nCE_tag(mnem, op, nops, ops, enc, tag) \
17953 { #mnem, OPS##nops ops, tag, N_MNEM##op, N_MNEM##op, \
17954 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
17956 #define nCE(mnem, op, nops, ops, enc) \
17957 nCE_tag (mnem, op, nops, ops, enc, OT_csuffix)
17959 #define nCEF(mnem, op, nops, ops, enc) \
17960 nCE_tag (mnem, op, nops, ops, enc, OT_csuffixF)
17964 static const struct asm_opcode insns[] =
17966 #define ARM_VARIANT &arm_ext_v1 /* Core ARM Instructions. */
17967 #define THUMB_VARIANT &arm_ext_v4t
17968 tCE("and", 0000000, _and, 3, (RR, oRR, SH), arit, t_arit3c),
17969 tC3("ands", 0100000, _ands, 3, (RR, oRR, SH), arit, t_arit3c),
17970 tCE("eor", 0200000, _eor, 3, (RR, oRR, SH), arit, t_arit3c),
17971 tC3("eors", 0300000, _eors, 3, (RR, oRR, SH), arit, t_arit3c),
17972 tCE("sub", 0400000, _sub, 3, (RR, oRR, SH), arit, t_add_sub),
17973 tC3("subs", 0500000, _subs, 3, (RR, oRR, SH), arit, t_add_sub),
17974 tCE("add", 0800000, _add, 3, (RR, oRR, SHG), arit, t_add_sub),
17975 tC3("adds", 0900000, _adds, 3, (RR, oRR, SHG), arit, t_add_sub),
17976 tCE("adc", 0a00000, _adc, 3, (RR, oRR, SH), arit, t_arit3c),
17977 tC3("adcs", 0b00000, _adcs, 3, (RR, oRR, SH), arit, t_arit3c),
17978 tCE("sbc", 0c00000, _sbc, 3, (RR, oRR, SH), arit, t_arit3),
17979 tC3("sbcs", 0d00000, _sbcs, 3, (RR, oRR, SH), arit, t_arit3),
17980 tCE("orr", 1800000, _orr, 3, (RR, oRR, SH), arit, t_arit3c),
17981 tC3("orrs", 1900000, _orrs, 3, (RR, oRR, SH), arit, t_arit3c),
17982 tCE("bic", 1c00000, _bic, 3, (RR, oRR, SH), arit, t_arit3),
17983 tC3("bics", 1d00000, _bics, 3, (RR, oRR, SH), arit, t_arit3),
17985 /* The p-variants of tst/cmp/cmn/teq (below) are the pre-V6 mechanism
17986 for setting PSR flag bits. They are obsolete in V6 and do not
17987 have Thumb equivalents. */
17988 tCE("tst", 1100000, _tst, 2, (RR, SH), cmp, t_mvn_tst),
17989 tC3w("tsts", 1100000, _tst, 2, (RR, SH), cmp, t_mvn_tst),
17990 CL("tstp", 110f000, 2, (RR, SH), cmp),
17991 tCE("cmp", 1500000, _cmp, 2, (RR, SH), cmp, t_mov_cmp),
17992 tC3w("cmps", 1500000, _cmp, 2, (RR, SH), cmp, t_mov_cmp),
17993 CL("cmpp", 150f000, 2, (RR, SH), cmp),
17994 tCE("cmn", 1700000, _cmn, 2, (RR, SH), cmp, t_mvn_tst),
17995 tC3w("cmns", 1700000, _cmn, 2, (RR, SH), cmp, t_mvn_tst),
17996 CL("cmnp", 170f000, 2, (RR, SH), cmp),
17998 tCE("mov", 1a00000, _mov, 2, (RR, SH), mov, t_mov_cmp),
17999 tC3("movs", 1b00000, _movs, 2, (RR, SH), mov, t_mov_cmp),
18000 tCE("mvn", 1e00000, _mvn, 2, (RR, SH), mov, t_mvn_tst),
18001 tC3("mvns", 1f00000, _mvns, 2, (RR, SH), mov, t_mvn_tst),
18003 tCE("ldr", 4100000, _ldr, 2, (RR, ADDRGLDR),ldst, t_ldst),
18004 tC3("ldrb", 4500000, _ldrb, 2, (RRnpc_npcsp, ADDRGLDR),ldst, t_ldst),
18005 tCE("str", 4000000, _str, _2, (MIX_ARM_THUMB_OPERANDS (OP_RR,
18007 OP_ADDRGLDR),ldst, t_ldst),
18008 tC3("strb", 4400000, _strb, 2, (RRnpc_npcsp, ADDRGLDR),ldst, t_ldst),
18010 tCE("stm", 8800000, _stmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
18011 tC3("stmia", 8800000, _stmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
18012 tC3("stmea", 8800000, _stmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
18013 tCE("ldm", 8900000, _ldmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
18014 tC3("ldmia", 8900000, _ldmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
18015 tC3("ldmfd", 8900000, _ldmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
18017 TCE("swi", f000000, df00, 1, (EXPi), swi, t_swi),
18018 TCE("svc", f000000, df00, 1, (EXPi), swi, t_swi),
18019 tCE("b", a000000, _b, 1, (EXPr), branch, t_branch),
18020 TCE("bl", b000000, f000f800, 1, (EXPr), bl, t_branch23),
18023 tCE("adr", 28f0000, _adr, 2, (RR, EXP), adr, t_adr),
18024 C3(adrl, 28f0000, 2, (RR, EXP), adrl),
18025 tCE("nop", 1a00000, _nop, 1, (oI255c), nop, t_nop),
18027 /* Thumb-compatibility pseudo ops. */
18028 tCE("lsl", 1a00000, _lsl, 3, (RR, oRR, SH), shift, t_shift),
18029 tC3("lsls", 1b00000, _lsls, 3, (RR, oRR, SH), shift, t_shift),
18030 tCE("lsr", 1a00020, _lsr, 3, (RR, oRR, SH), shift, t_shift),
18031 tC3("lsrs", 1b00020, _lsrs, 3, (RR, oRR, SH), shift, t_shift),
18032 tCE("asr", 1a00040, _asr, 3, (RR, oRR, SH), shift, t_shift),
18033 tC3("asrs", 1b00040, _asrs, 3, (RR, oRR, SH), shift, t_shift),
18034 tCE("ror", 1a00060, _ror, 3, (RR, oRR, SH), shift, t_shift),
18035 tC3("rors", 1b00060, _rors, 3, (RR, oRR, SH), shift, t_shift),
18036 tCE("neg", 2600000, _neg, 2, (RR, RR), rd_rn, t_neg),
18037 tC3("negs", 2700000, _negs, 2, (RR, RR), rd_rn, t_neg),
18038 tCE("push", 92d0000, _push, 1, (REGLST), push_pop, t_push_pop),
18039 tCE("pop", 8bd0000, _pop, 1, (REGLST), push_pop, t_push_pop),
18041 /* These may simplify to neg. */
18042 TCE("rsb", 0600000, ebc00000, 3, (RR, oRR, SH), arit, t_rsb),
18043 TC3("rsbs", 0700000, ebd00000, 3, (RR, oRR, SH), arit, t_rsb),
18045 #undef THUMB_VARIANT
18046 #define THUMB_VARIANT & arm_ext_v6
18048 TCE("cpy", 1a00000, 4600, 2, (RR, RR), rd_rm, t_cpy),
18050 /* V1 instructions with no Thumb analogue prior to V6T2. */
18051 #undef THUMB_VARIANT
18052 #define THUMB_VARIANT & arm_ext_v6t2
18054 TCE("teq", 1300000, ea900f00, 2, (RR, SH), cmp, t_mvn_tst),
18055 TC3w("teqs", 1300000, ea900f00, 2, (RR, SH), cmp, t_mvn_tst),
18056 CL("teqp", 130f000, 2, (RR, SH), cmp),
18058 TC3("ldrt", 4300000, f8500e00, 2, (RRnpc_npcsp, ADDR),ldstt, t_ldstt),
18059 TC3("ldrbt", 4700000, f8100e00, 2, (RRnpc_npcsp, ADDR),ldstt, t_ldstt),
18060 TC3("strt", 4200000, f8400e00, 2, (RR_npcsp, ADDR), ldstt, t_ldstt),
18061 TC3("strbt", 4600000, f8000e00, 2, (RRnpc_npcsp, ADDR),ldstt, t_ldstt),
18063 TC3("stmdb", 9000000, e9000000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
18064 TC3("stmfd", 9000000, e9000000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
18066 TC3("ldmdb", 9100000, e9100000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
18067 TC3("ldmea", 9100000, e9100000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
18069 /* V1 instructions with no Thumb analogue at all. */
18070 CE("rsc", 0e00000, 3, (RR, oRR, SH), arit),
18071 C3(rscs, 0f00000, 3, (RR, oRR, SH), arit),
18073 C3(stmib, 9800000, 2, (RRw, REGLST), ldmstm),
18074 C3(stmfa, 9800000, 2, (RRw, REGLST), ldmstm),
18075 C3(stmda, 8000000, 2, (RRw, REGLST), ldmstm),
18076 C3(stmed, 8000000, 2, (RRw, REGLST), ldmstm),
18077 C3(ldmib, 9900000, 2, (RRw, REGLST), ldmstm),
18078 C3(ldmed, 9900000, 2, (RRw, REGLST), ldmstm),
18079 C3(ldmda, 8100000, 2, (RRw, REGLST), ldmstm),
18080 C3(ldmfa, 8100000, 2, (RRw, REGLST), ldmstm),
18083 #define ARM_VARIANT & arm_ext_v2 /* ARM 2 - multiplies. */
18084 #undef THUMB_VARIANT
18085 #define THUMB_VARIANT & arm_ext_v4t
18087 tCE("mul", 0000090, _mul, 3, (RRnpc, RRnpc, oRR), mul, t_mul),
18088 tC3("muls", 0100090, _muls, 3, (RRnpc, RRnpc, oRR), mul, t_mul),
18090 #undef THUMB_VARIANT
18091 #define THUMB_VARIANT & arm_ext_v6t2
18093 TCE("mla", 0200090, fb000000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mlas, t_mla),
18094 C3(mlas, 0300090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mlas),
18096 /* Generic coprocessor instructions. */
18097 TCE("cdp", e000000, ee000000, 6, (RCP, I15b, RCN, RCN, RCN, oI7b), cdp, cdp),
18098 TCE("ldc", c100000, ec100000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
18099 TC3("ldcl", c500000, ec500000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
18100 TCE("stc", c000000, ec000000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
18101 TC3("stcl", c400000, ec400000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
18102 TCE("mcr", e000010, ee000010, 6, (RCP, I7b, RR, RCN, RCN, oI7b), co_reg, co_reg),
18103 TCE("mrc", e100010, ee100010, 6, (RCP, I7b, APSR_RR, RCN, RCN, oI7b), co_reg, co_reg),
18106 #define ARM_VARIANT & arm_ext_v2s /* ARM 3 - swp instructions. */
18108 CE("swp", 1000090, 3, (RRnpc, RRnpc, RRnpcb), rd_rm_rn),
18109 C3(swpb, 1400090, 3, (RRnpc, RRnpc, RRnpcb), rd_rm_rn),
18112 #define ARM_VARIANT & arm_ext_v3 /* ARM 6 Status register instructions. */
18113 #undef THUMB_VARIANT
18114 #define THUMB_VARIANT & arm_ext_msr
18116 TCE("mrs", 1000000, f3e08000, 2, (RRnpc, rPSR), mrs, t_mrs),
18117 TCE("msr", 120f000, f3808000, 2, (wPSR, RR_EXi), msr, t_msr),
18120 #define ARM_VARIANT & arm_ext_v3m /* ARM 7M long multiplies. */
18121 #undef THUMB_VARIANT
18122 #define THUMB_VARIANT & arm_ext_v6t2
18124 TCE("smull", 0c00090, fb800000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
18125 CM("smull","s", 0d00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
18126 TCE("umull", 0800090, fba00000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
18127 CM("umull","s", 0900090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
18128 TCE("smlal", 0e00090, fbc00000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
18129 CM("smlal","s", 0f00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
18130 TCE("umlal", 0a00090, fbe00000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
18131 CM("umlal","s", 0b00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
18134 #define ARM_VARIANT & arm_ext_v4 /* ARM Architecture 4. */
18135 #undef THUMB_VARIANT
18136 #define THUMB_VARIANT & arm_ext_v4t
18138 tC3("ldrh", 01000b0, _ldrh, 2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
18139 tC3("strh", 00000b0, _strh, 2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
18140 tC3("ldrsh", 01000f0, _ldrsh, 2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
18141 tC3("ldrsb", 01000d0, _ldrsb, 2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
18142 tC3("ldsh", 01000f0, _ldrsh, 2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
18143 tC3("ldsb", 01000d0, _ldrsb, 2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
18146 #define ARM_VARIANT & arm_ext_v4t_5
18148 /* ARM Architecture 4T. */
18149 /* Note: bx (and blx) are required on V5, even if the processor does
18150 not support Thumb. */
18151 TCE("bx", 12fff10, 4700, 1, (RR), bx, t_bx),
18154 #define ARM_VARIANT & arm_ext_v5 /* ARM Architecture 5T. */
18155 #undef THUMB_VARIANT
18156 #define THUMB_VARIANT & arm_ext_v5t
18158 /* Note: blx has 2 variants; the .value coded here is for
18159 BLX(2). Only this variant has conditional execution. */
18160 TCE("blx", 12fff30, 4780, 1, (RR_EXr), blx, t_blx),
18161 TUE("bkpt", 1200070, be00, 1, (oIffffb), bkpt, t_bkpt),
18163 #undef THUMB_VARIANT
18164 #define THUMB_VARIANT & arm_ext_v6t2
18166 TCE("clz", 16f0f10, fab0f080, 2, (RRnpc, RRnpc), rd_rm, t_clz),
18167 TUF("ldc2", c100000, fc100000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
18168 TUF("ldc2l", c500000, fc500000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
18169 TUF("stc2", c000000, fc000000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
18170 TUF("stc2l", c400000, fc400000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
18171 TUF("cdp2", e000000, fe000000, 6, (RCP, I15b, RCN, RCN, RCN, oI7b), cdp, cdp),
18172 TUF("mcr2", e000010, fe000010, 6, (RCP, I7b, RR, RCN, RCN, oI7b), co_reg, co_reg),
18173 TUF("mrc2", e100010, fe100010, 6, (RCP, I7b, RR, RCN, RCN, oI7b), co_reg, co_reg),
18176 #define ARM_VARIANT & arm_ext_v5exp /* ARM Architecture 5TExP. */
18177 #undef THUMB_VARIANT
18178 #define THUMB_VARIANT &arm_ext_v5exp
18180 TCE("smlabb", 1000080, fb100000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
18181 TCE("smlatb", 10000a0, fb100020, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
18182 TCE("smlabt", 10000c0, fb100010, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
18183 TCE("smlatt", 10000e0, fb100030, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
18185 TCE("smlawb", 1200080, fb300000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
18186 TCE("smlawt", 12000c0, fb300010, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
18188 TCE("smlalbb", 1400080, fbc00080, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
18189 TCE("smlaltb", 14000a0, fbc000a0, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
18190 TCE("smlalbt", 14000c0, fbc00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
18191 TCE("smlaltt", 14000e0, fbc000b0, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
18193 TCE("smulbb", 1600080, fb10f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
18194 TCE("smultb", 16000a0, fb10f020, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
18195 TCE("smulbt", 16000c0, fb10f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
18196 TCE("smultt", 16000e0, fb10f030, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
18198 TCE("smulwb", 12000a0, fb30f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
18199 TCE("smulwt", 12000e0, fb30f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
18201 TCE("qadd", 1000050, fa80f080, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, t_simd2),
18202 TCE("qdadd", 1400050, fa80f090, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, t_simd2),
18203 TCE("qsub", 1200050, fa80f0a0, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, t_simd2),
18204 TCE("qdsub", 1600050, fa80f0b0, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, t_simd2),
18207 #define ARM_VARIANT & arm_ext_v5e /* ARM Architecture 5TE. */
18208 #undef THUMB_VARIANT
18209 #define THUMB_VARIANT &arm_ext_v6t2
18211 TUF("pld", 450f000, f810f000, 1, (ADDR), pld, t_pld),
18212 TC3("ldrd", 00000d0, e8500000, 3, (RRnpc_npcsp, oRRnpc_npcsp, ADDRGLDRS),
18214 TC3("strd", 00000f0, e8400000, 3, (RRnpc_npcsp, oRRnpc_npcsp,
18215 ADDRGLDRS), ldrd, t_ldstd),
18217 TCE("mcrr", c400000, ec400000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
18218 TCE("mrrc", c500000, ec500000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
18221 #define ARM_VARIANT & arm_ext_v5j /* ARM Architecture 5TEJ. */
18223 TCE("bxj", 12fff20, f3c08f00, 1, (RR), bxj, t_bxj),
18226 #define ARM_VARIANT & arm_ext_v6 /* ARM V6. */
18227 #undef THUMB_VARIANT
18228 #define THUMB_VARIANT & arm_ext_v6
18230 TUF("cpsie", 1080000, b660, 2, (CPSF, oI31b), cpsi, t_cpsi),
18231 TUF("cpsid", 10c0000, b670, 2, (CPSF, oI31b), cpsi, t_cpsi),
18232 tCE("rev", 6bf0f30, _rev, 2, (RRnpc, RRnpc), rd_rm, t_rev),
18233 tCE("rev16", 6bf0fb0, _rev16, 2, (RRnpc, RRnpc), rd_rm, t_rev),
18234 tCE("revsh", 6ff0fb0, _revsh, 2, (RRnpc, RRnpc), rd_rm, t_rev),
18235 tCE("sxth", 6bf0070, _sxth, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
18236 tCE("uxth", 6ff0070, _uxth, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
18237 tCE("sxtb", 6af0070, _sxtb, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
18238 tCE("uxtb", 6ef0070, _uxtb, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
18239 TUF("setend", 1010000, b650, 1, (ENDI), setend, t_setend),
18241 #undef THUMB_VARIANT
18242 #define THUMB_VARIANT & arm_ext_v6t2
18244 TCE("ldrex", 1900f9f, e8500f00, 2, (RRnpc_npcsp, ADDR), ldrex, t_ldrex),
18245 TCE("strex", 1800f90, e8400000, 3, (RRnpc_npcsp, RRnpc_npcsp, ADDR),
18247 TUF("mcrr2", c400000, fc400000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
18248 TUF("mrrc2", c500000, fc500000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
18250 TCE("ssat", 6a00010, f3000000, 4, (RRnpc, I32, RRnpc, oSHllar),ssat, t_ssat),
18251 TCE("usat", 6e00010, f3800000, 4, (RRnpc, I31, RRnpc, oSHllar),usat, t_usat),
18253 /* ARM V6 not included in V7M. */
18254 #undef THUMB_VARIANT
18255 #define THUMB_VARIANT & arm_ext_v6_notm
18256 TUF("rfeia", 8900a00, e990c000, 1, (RRw), rfe, rfe),
18257 TUF("rfe", 8900a00, e990c000, 1, (RRw), rfe, rfe),
18258 UF(rfeib, 9900a00, 1, (RRw), rfe),
18259 UF(rfeda, 8100a00, 1, (RRw), rfe),
18260 TUF("rfedb", 9100a00, e810c000, 1, (RRw), rfe, rfe),
18261 TUF("rfefd", 8900a00, e990c000, 1, (RRw), rfe, rfe),
18262 UF(rfefa, 8100a00, 1, (RRw), rfe),
18263 TUF("rfeea", 9100a00, e810c000, 1, (RRw), rfe, rfe),
18264 UF(rfeed, 9900a00, 1, (RRw), rfe),
18265 TUF("srsia", 8c00500, e980c000, 2, (oRRw, I31w), srs, srs),
18266 TUF("srs", 8c00500, e980c000, 2, (oRRw, I31w), srs, srs),
18267 TUF("srsea", 8c00500, e980c000, 2, (oRRw, I31w), srs, srs),
18268 UF(srsib, 9c00500, 2, (oRRw, I31w), srs),
18269 UF(srsfa, 9c00500, 2, (oRRw, I31w), srs),
18270 UF(srsda, 8400500, 2, (oRRw, I31w), srs),
18271 UF(srsed, 8400500, 2, (oRRw, I31w), srs),
18272 TUF("srsdb", 9400500, e800c000, 2, (oRRw, I31w), srs, srs),
18273 TUF("srsfd", 9400500, e800c000, 2, (oRRw, I31w), srs, srs),
18275 /* ARM V6 not included in V7M (eg. integer SIMD). */
18276 #undef THUMB_VARIANT
18277 #define THUMB_VARIANT & arm_ext_v6_dsp
18278 TUF("cps", 1020000, f3af8100, 1, (I31b), imm0, t_cps),
18279 TCE("pkhbt", 6800010, eac00000, 4, (RRnpc, RRnpc, RRnpc, oSHll), pkhbt, t_pkhbt),
18280 TCE("pkhtb", 6800050, eac00020, 4, (RRnpc, RRnpc, RRnpc, oSHar), pkhtb, t_pkhtb),
18281 TCE("qadd16", 6200f10, fa90f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18282 TCE("qadd8", 6200f90, fa80f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18283 TCE("qasx", 6200f30, faa0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18284 /* Old name for QASX. */
18285 TCE("qaddsubx", 6200f30, faa0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18286 TCE("qsax", 6200f50, fae0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18287 /* Old name for QSAX. */
18288 TCE("qsubaddx", 6200f50, fae0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18289 TCE("qsub16", 6200f70, fad0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18290 TCE("qsub8", 6200ff0, fac0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18291 TCE("sadd16", 6100f10, fa90f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18292 TCE("sadd8", 6100f90, fa80f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18293 TCE("sasx", 6100f30, faa0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18294 /* Old name for SASX. */
18295 TCE("saddsubx", 6100f30, faa0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18296 TCE("shadd16", 6300f10, fa90f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18297 TCE("shadd8", 6300f90, fa80f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18298 TCE("shasx", 6300f30, faa0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18299 /* Old name for SHASX. */
18300 TCE("shaddsubx", 6300f30, faa0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18301 TCE("shsax", 6300f50, fae0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18302 /* Old name for SHSAX. */
18303 TCE("shsubaddx", 6300f50, fae0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18304 TCE("shsub16", 6300f70, fad0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18305 TCE("shsub8", 6300ff0, fac0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18306 TCE("ssax", 6100f50, fae0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18307 /* Old name for SSAX. */
18308 TCE("ssubaddx", 6100f50, fae0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18309 TCE("ssub16", 6100f70, fad0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18310 TCE("ssub8", 6100ff0, fac0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18311 TCE("uadd16", 6500f10, fa90f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18312 TCE("uadd8", 6500f90, fa80f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18313 TCE("uasx", 6500f30, faa0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18314 /* Old name for UASX. */
18315 TCE("uaddsubx", 6500f30, faa0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18316 TCE("uhadd16", 6700f10, fa90f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18317 TCE("uhadd8", 6700f90, fa80f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18318 TCE("uhasx", 6700f30, faa0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18319 /* Old name for UHASX. */
18320 TCE("uhaddsubx", 6700f30, faa0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18321 TCE("uhsax", 6700f50, fae0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18322 /* Old name for UHSAX. */
18323 TCE("uhsubaddx", 6700f50, fae0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18324 TCE("uhsub16", 6700f70, fad0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18325 TCE("uhsub8", 6700ff0, fac0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18326 TCE("uqadd16", 6600f10, fa90f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18327 TCE("uqadd8", 6600f90, fa80f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18328 TCE("uqasx", 6600f30, faa0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18329 /* Old name for UQASX. */
18330 TCE("uqaddsubx", 6600f30, faa0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18331 TCE("uqsax", 6600f50, fae0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18332 /* Old name for UQSAX. */
18333 TCE("uqsubaddx", 6600f50, fae0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18334 TCE("uqsub16", 6600f70, fad0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18335 TCE("uqsub8", 6600ff0, fac0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18336 TCE("usub16", 6500f70, fad0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18337 TCE("usax", 6500f50, fae0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18338 /* Old name for USAX. */
18339 TCE("usubaddx", 6500f50, fae0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18340 TCE("usub8", 6500ff0, fac0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18341 TCE("sxtah", 6b00070, fa00f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
18342 TCE("sxtab16", 6800070, fa20f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
18343 TCE("sxtab", 6a00070, fa40f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
18344 TCE("sxtb16", 68f0070, fa2ff080, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
18345 TCE("uxtah", 6f00070, fa10f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
18346 TCE("uxtab16", 6c00070, fa30f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
18347 TCE("uxtab", 6e00070, fa50f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
18348 TCE("uxtb16", 6cf0070, fa3ff080, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
18349 TCE("sel", 6800fb0, faa0f080, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18350 TCE("smlad", 7000010, fb200000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
18351 TCE("smladx", 7000030, fb200010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
18352 TCE("smlald", 7400010, fbc000c0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
18353 TCE("smlaldx", 7400030, fbc000d0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
18354 TCE("smlsd", 7000050, fb400000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
18355 TCE("smlsdx", 7000070, fb400010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
18356 TCE("smlsld", 7400050, fbd000c0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
18357 TCE("smlsldx", 7400070, fbd000d0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
18358 TCE("smmla", 7500010, fb500000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
18359 TCE("smmlar", 7500030, fb500010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
18360 TCE("smmls", 75000d0, fb600000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
18361 TCE("smmlsr", 75000f0, fb600010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
18362 TCE("smmul", 750f010, fb50f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
18363 TCE("smmulr", 750f030, fb50f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
18364 TCE("smuad", 700f010, fb20f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
18365 TCE("smuadx", 700f030, fb20f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
18366 TCE("smusd", 700f050, fb40f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
18367 TCE("smusdx", 700f070, fb40f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
18368 TCE("ssat16", 6a00f30, f3200000, 3, (RRnpc, I16, RRnpc), ssat16, t_ssat16),
18369 TCE("umaal", 0400090, fbe00060, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal, t_mlal),
18370 TCE("usad8", 780f010, fb70f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
18371 TCE("usada8", 7800010, fb700000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
18372 TCE("usat16", 6e00f30, f3a00000, 3, (RRnpc, I15, RRnpc), usat16, t_usat16),
18375 #define ARM_VARIANT & arm_ext_v6k
18376 #undef THUMB_VARIANT
18377 #define THUMB_VARIANT & arm_ext_v6k
18379 tCE("yield", 320f001, _yield, 0, (), noargs, t_hint),
18380 tCE("wfe", 320f002, _wfe, 0, (), noargs, t_hint),
18381 tCE("wfi", 320f003, _wfi, 0, (), noargs, t_hint),
18382 tCE("sev", 320f004, _sev, 0, (), noargs, t_hint),
18384 #undef THUMB_VARIANT
18385 #define THUMB_VARIANT & arm_ext_v6_notm
18386 TCE("ldrexd", 1b00f9f, e8d0007f, 3, (RRnpc_npcsp, oRRnpc_npcsp, RRnpcb),
18388 TCE("strexd", 1a00f90, e8c00070, 4, (RRnpc_npcsp, RRnpc_npcsp, oRRnpc_npcsp,
18389 RRnpcb), strexd, t_strexd),
18391 #undef THUMB_VARIANT
18392 #define THUMB_VARIANT & arm_ext_v6t2
18393 TCE("ldrexb", 1d00f9f, e8d00f4f, 2, (RRnpc_npcsp,RRnpcb),
18395 TCE("ldrexh", 1f00f9f, e8d00f5f, 2, (RRnpc_npcsp, RRnpcb),
18397 TCE("strexb", 1c00f90, e8c00f40, 3, (RRnpc_npcsp, RRnpc_npcsp, ADDR),
18399 TCE("strexh", 1e00f90, e8c00f50, 3, (RRnpc_npcsp, RRnpc_npcsp, ADDR),
18401 TUF("clrex", 57ff01f, f3bf8f2f, 0, (), noargs, noargs),
18404 #define ARM_VARIANT & arm_ext_sec
18405 #undef THUMB_VARIANT
18406 #define THUMB_VARIANT & arm_ext_sec
18408 TCE("smc", 1600070, f7f08000, 1, (EXPi), smc, t_smc),
18411 #define ARM_VARIANT & arm_ext_virt
18412 #undef THUMB_VARIANT
18413 #define THUMB_VARIANT & arm_ext_virt
18415 TCE("hvc", 1400070, f7e08000, 1, (EXPi), hvc, t_hvc),
18416 TCE("eret", 160006e, f3de8f00, 0, (), noargs, noargs),
18419 #define ARM_VARIANT & arm_ext_v6t2
18420 #undef THUMB_VARIANT
18421 #define THUMB_VARIANT & arm_ext_v6t2
18423 TCE("bfc", 7c0001f, f36f0000, 3, (RRnpc, I31, I32), bfc, t_bfc),
18424 TCE("bfi", 7c00010, f3600000, 4, (RRnpc, RRnpc_I0, I31, I32), bfi, t_bfi),
18425 TCE("sbfx", 7a00050, f3400000, 4, (RR, RR, I31, I32), bfx, t_bfx),
18426 TCE("ubfx", 7e00050, f3c00000, 4, (RR, RR, I31, I32), bfx, t_bfx),
18428 TCE("mls", 0600090, fb000010, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mlas, t_mla),
18429 TCE("movw", 3000000, f2400000, 2, (RRnpc, HALF), mov16, t_mov16),
18430 TCE("movt", 3400000, f2c00000, 2, (RRnpc, HALF), mov16, t_mov16),
18431 TCE("rbit", 6ff0f30, fa90f0a0, 2, (RR, RR), rd_rm, t_rbit),
18433 TC3("ldrht", 03000b0, f8300e00, 2, (RRnpc_npcsp, ADDR), ldsttv4, t_ldstt),
18434 TC3("ldrsht", 03000f0, f9300e00, 2, (RRnpc_npcsp, ADDR), ldsttv4, t_ldstt),
18435 TC3("ldrsbt", 03000d0, f9100e00, 2, (RRnpc_npcsp, ADDR), ldsttv4, t_ldstt),
18436 TC3("strht", 02000b0, f8200e00, 2, (RRnpc_npcsp, ADDR), ldsttv4, t_ldstt),
18438 /* Thumb-only instructions. */
18440 #define ARM_VARIANT NULL
18441 TUE("cbnz", 0, b900, 2, (RR, EXP), 0, t_cbz),
18442 TUE("cbz", 0, b100, 2, (RR, EXP), 0, t_cbz),
18444 /* ARM does not really have an IT instruction, so always allow it.
18445 The opcode is copied from Thumb in order to allow warnings in
18446 -mimplicit-it=[never | arm] modes. */
18448 #define ARM_VARIANT & arm_ext_v1
18450 TUE("it", bf08, bf08, 1, (COND), it, t_it),
18451 TUE("itt", bf0c, bf0c, 1, (COND), it, t_it),
18452 TUE("ite", bf04, bf04, 1, (COND), it, t_it),
18453 TUE("ittt", bf0e, bf0e, 1, (COND), it, t_it),
18454 TUE("itet", bf06, bf06, 1, (COND), it, t_it),
18455 TUE("itte", bf0a, bf0a, 1, (COND), it, t_it),
18456 TUE("itee", bf02, bf02, 1, (COND), it, t_it),
18457 TUE("itttt", bf0f, bf0f, 1, (COND), it, t_it),
18458 TUE("itett", bf07, bf07, 1, (COND), it, t_it),
18459 TUE("ittet", bf0b, bf0b, 1, (COND), it, t_it),
18460 TUE("iteet", bf03, bf03, 1, (COND), it, t_it),
18461 TUE("ittte", bf0d, bf0d, 1, (COND), it, t_it),
18462 TUE("itete", bf05, bf05, 1, (COND), it, t_it),
18463 TUE("ittee", bf09, bf09, 1, (COND), it, t_it),
18464 TUE("iteee", bf01, bf01, 1, (COND), it, t_it),
18465 /* ARM/Thumb-2 instructions with no Thumb-1 equivalent. */
18466 TC3("rrx", 01a00060, ea4f0030, 2, (RR, RR), rd_rm, t_rrx),
18467 TC3("rrxs", 01b00060, ea5f0030, 2, (RR, RR), rd_rm, t_rrx),
18469 /* Thumb2 only instructions. */
18471 #define ARM_VARIANT NULL
18473 TCE("addw", 0, f2000000, 3, (RR, RR, EXPi), 0, t_add_sub_w),
18474 TCE("subw", 0, f2a00000, 3, (RR, RR, EXPi), 0, t_add_sub_w),
18475 TCE("orn", 0, ea600000, 3, (RR, oRR, SH), 0, t_orn),
18476 TCE("orns", 0, ea700000, 3, (RR, oRR, SH), 0, t_orn),
18477 TCE("tbb", 0, e8d0f000, 1, (TB), 0, t_tb),
18478 TCE("tbh", 0, e8d0f010, 1, (TB), 0, t_tb),
18480 /* Hardware division instructions. */
18482 #define ARM_VARIANT & arm_ext_adiv
18483 #undef THUMB_VARIANT
18484 #define THUMB_VARIANT & arm_ext_div
18486 TCE("sdiv", 710f010, fb90f0f0, 3, (RR, oRR, RR), div, t_div),
18487 TCE("udiv", 730f010, fbb0f0f0, 3, (RR, oRR, RR), div, t_div),
18489 /* ARM V6M/V7 instructions. */
18491 #define ARM_VARIANT & arm_ext_barrier
18492 #undef THUMB_VARIANT
18493 #define THUMB_VARIANT & arm_ext_barrier
18495 TUF("dmb", 57ff050, f3bf8f50, 1, (oBARRIER_I15), barrier, barrier),
18496 TUF("dsb", 57ff040, f3bf8f40, 1, (oBARRIER_I15), barrier, barrier),
18497 TUF("isb", 57ff060, f3bf8f60, 1, (oBARRIER_I15), barrier, barrier),
18499 /* ARM V7 instructions. */
18501 #define ARM_VARIANT & arm_ext_v7
18502 #undef THUMB_VARIANT
18503 #define THUMB_VARIANT & arm_ext_v7
18505 TUF("pli", 450f000, f910f000, 1, (ADDR), pli, t_pld),
18506 TCE("dbg", 320f0f0, f3af80f0, 1, (I15), dbg, t_dbg),
18509 #define ARM_VARIANT & arm_ext_mp
18510 #undef THUMB_VARIANT
18511 #define THUMB_VARIANT & arm_ext_mp
18513 TUF("pldw", 410f000, f830f000, 1, (ADDR), pld, t_pld),
18515 /* AArchv8 instructions. */
18517 #define ARM_VARIANT & arm_ext_v8
18518 #undef THUMB_VARIANT
18519 #define THUMB_VARIANT & arm_ext_v8
18521 tCE("sevl", 320f005, _sevl, 0, (), noargs, t_hint),
18522 TUE("hlt", 1000070, ba80, 1, (oIffffb), bkpt, t_hlt),
18523 TCE("ldaex", 1900e9f, e8d00fef, 2, (RRnpc, RRnpcb), rd_rn, rd_rn),
18524 TCE("ldaexd", 1b00e9f, e8d000ff, 3, (RRnpc, oRRnpc, RRnpcb),
18526 TCE("ldaexb", 1d00e9f, e8d00fcf, 2, (RRnpc,RRnpcb), rd_rn, rd_rn),
18527 TCE("ldaexh", 1f00e9f, e8d00fdf, 2, (RRnpc, RRnpcb), rd_rn, rd_rn),
18528 TCE("stlex", 1800e90, e8c00fe0, 3, (RRnpc, RRnpc, RRnpcb),
18530 TCE("stlexd", 1a00e90, e8c000f0, 4, (RRnpc, RRnpc, oRRnpc, RRnpcb),
18532 TCE("stlexb", 1c00e90, e8c00fc0, 3, (RRnpc, RRnpc, RRnpcb),
18534 TCE("stlexh", 1e00e90, e8c00fd0, 3, (RRnpc, RRnpc, RRnpcb),
18536 TCE("lda", 1900c9f, e8d00faf, 2, (RRnpc, RRnpcb), rd_rn, rd_rn),
18537 TCE("ldab", 1d00c9f, e8d00f8f, 2, (RRnpc, RRnpcb), rd_rn, rd_rn),
18538 TCE("ldah", 1f00c9f, e8d00f9f, 2, (RRnpc, RRnpcb), rd_rn, rd_rn),
18539 TCE("stl", 180fc90, e8c00faf, 2, (RRnpc, RRnpcb), rm_rn, rd_rn),
18540 TCE("stlb", 1c0fc90, e8c00f8f, 2, (RRnpc, RRnpcb), rm_rn, rd_rn),
18541 TCE("stlh", 1e0fc90, e8c00f9f, 2, (RRnpc, RRnpcb), rm_rn, rd_rn),
18543 /* ARMv8 T32 only. */
18545 #define ARM_VARIANT NULL
18546 TUF("dcps1", 0, f78f8001, 0, (), noargs, noargs),
18547 TUF("dcps2", 0, f78f8002, 0, (), noargs, noargs),
18548 TUF("dcps3", 0, f78f8003, 0, (), noargs, noargs),
18550 /* FP for ARMv8. */
18552 #define ARM_VARIANT & fpu_vfp_ext_armv8
18553 #undef THUMB_VARIANT
18554 #define THUMB_VARIANT & fpu_vfp_ext_armv8
18556 nUF(vseleq, _vseleq, 3, (RVSD, RVSD, RVSD), vsel),
18557 nUF(vselvs, _vselvs, 3, (RVSD, RVSD, RVSD), vsel),
18558 nUF(vselge, _vselge, 3, (RVSD, RVSD, RVSD), vsel),
18559 nUF(vselgt, _vselgt, 3, (RVSD, RVSD, RVSD), vsel),
18560 nUF(vmaxnm, _vmaxnm, 3, (RNSDQ, oRNSDQ, RNSDQ), vmaxnm),
18561 nUF(vminnm, _vminnm, 3, (RNSDQ, oRNSDQ, RNSDQ), vmaxnm),
18562 nUF(vcvta, _vcvta, 2, (RNSDQ, oRNSDQ), neon_cvta),
18563 nUF(vcvtn, _vcvta, 2, (RNSDQ, oRNSDQ), neon_cvtn),
18564 nUF(vcvtp, _vcvta, 2, (RNSDQ, oRNSDQ), neon_cvtp),
18565 nUF(vcvtm, _vcvta, 2, (RNSDQ, oRNSDQ), neon_cvtm),
18566 nCE(vrintr, _vrintr, 2, (RNSDQ, oRNSDQ), vrintr),
18567 nCE(vrintz, _vrintr, 2, (RNSDQ, oRNSDQ), vrintz),
18568 nCE(vrintx, _vrintr, 2, (RNSDQ, oRNSDQ), vrintx),
18569 nUF(vrinta, _vrinta, 2, (RNSDQ, oRNSDQ), vrinta),
18570 nUF(vrintn, _vrinta, 2, (RNSDQ, oRNSDQ), vrintn),
18571 nUF(vrintp, _vrinta, 2, (RNSDQ, oRNSDQ), vrintp),
18572 nUF(vrintm, _vrinta, 2, (RNSDQ, oRNSDQ), vrintm),
18574 /* Crypto v1 extensions. */
18576 #define ARM_VARIANT & fpu_crypto_ext_armv8
18577 #undef THUMB_VARIANT
18578 #define THUMB_VARIANT & fpu_crypto_ext_armv8
18580 nUF(aese, _aes, 2, (RNQ, RNQ), aese),
18581 nUF(aesd, _aes, 2, (RNQ, RNQ), aesd),
18582 nUF(aesmc, _aes, 2, (RNQ, RNQ), aesmc),
18583 nUF(aesimc, _aes, 2, (RNQ, RNQ), aesimc),
18584 nUF(sha1c, _sha3op, 3, (RNQ, RNQ, RNQ), sha1c),
18585 nUF(sha1p, _sha3op, 3, (RNQ, RNQ, RNQ), sha1p),
18586 nUF(sha1m, _sha3op, 3, (RNQ, RNQ, RNQ), sha1m),
18587 nUF(sha1su0, _sha3op, 3, (RNQ, RNQ, RNQ), sha1su0),
18588 nUF(sha256h, _sha3op, 3, (RNQ, RNQ, RNQ), sha256h),
18589 nUF(sha256h2, _sha3op, 3, (RNQ, RNQ, RNQ), sha256h2),
18590 nUF(sha256su1, _sha3op, 3, (RNQ, RNQ, RNQ), sha256su1),
18591 nUF(sha1h, _sha1h, 2, (RNQ, RNQ), sha1h),
18592 nUF(sha1su1, _sha2op, 2, (RNQ, RNQ), sha1su1),
18593 nUF(sha256su0, _sha2op, 2, (RNQ, RNQ), sha256su0),
18596 #define ARM_VARIANT & crc_ext_armv8
18597 #undef THUMB_VARIANT
18598 #define THUMB_VARIANT & crc_ext_armv8
18599 TUEc("crc32b", 1000040, fac0f080, 3, (RR, oRR, RR), crc32b),
18600 TUEc("crc32h", 1200040, fac0f090, 3, (RR, oRR, RR), crc32h),
18601 TUEc("crc32w", 1400040, fac0f0a0, 3, (RR, oRR, RR), crc32w),
18602 TUEc("crc32cb",1000240, fad0f080, 3, (RR, oRR, RR), crc32cb),
18603 TUEc("crc32ch",1200240, fad0f090, 3, (RR, oRR, RR), crc32ch),
18604 TUEc("crc32cw",1400240, fad0f0a0, 3, (RR, oRR, RR), crc32cw),
18607 #define ARM_VARIANT & fpu_fpa_ext_v1 /* Core FPA instruction set (V1). */
18608 #undef THUMB_VARIANT
18609 #define THUMB_VARIANT NULL
18611 cCE("wfs", e200110, 1, (RR), rd),
18612 cCE("rfs", e300110, 1, (RR), rd),
18613 cCE("wfc", e400110, 1, (RR), rd),
18614 cCE("rfc", e500110, 1, (RR), rd),
18616 cCL("ldfs", c100100, 2, (RF, ADDRGLDC), rd_cpaddr),
18617 cCL("ldfd", c108100, 2, (RF, ADDRGLDC), rd_cpaddr),
18618 cCL("ldfe", c500100, 2, (RF, ADDRGLDC), rd_cpaddr),
18619 cCL("ldfp", c508100, 2, (RF, ADDRGLDC), rd_cpaddr),
18621 cCL("stfs", c000100, 2, (RF, ADDRGLDC), rd_cpaddr),
18622 cCL("stfd", c008100, 2, (RF, ADDRGLDC), rd_cpaddr),
18623 cCL("stfe", c400100, 2, (RF, ADDRGLDC), rd_cpaddr),
18624 cCL("stfp", c408100, 2, (RF, ADDRGLDC), rd_cpaddr),
18626 cCL("mvfs", e008100, 2, (RF, RF_IF), rd_rm),
18627 cCL("mvfsp", e008120, 2, (RF, RF_IF), rd_rm),
18628 cCL("mvfsm", e008140, 2, (RF, RF_IF), rd_rm),
18629 cCL("mvfsz", e008160, 2, (RF, RF_IF), rd_rm),
18630 cCL("mvfd", e008180, 2, (RF, RF_IF), rd_rm),
18631 cCL("mvfdp", e0081a0, 2, (RF, RF_IF), rd_rm),
18632 cCL("mvfdm", e0081c0, 2, (RF, RF_IF), rd_rm),
18633 cCL("mvfdz", e0081e0, 2, (RF, RF_IF), rd_rm),
18634 cCL("mvfe", e088100, 2, (RF, RF_IF), rd_rm),
18635 cCL("mvfep", e088120, 2, (RF, RF_IF), rd_rm),
18636 cCL("mvfem", e088140, 2, (RF, RF_IF), rd_rm),
18637 cCL("mvfez", e088160, 2, (RF, RF_IF), rd_rm),
18639 cCL("mnfs", e108100, 2, (RF, RF_IF), rd_rm),
18640 cCL("mnfsp", e108120, 2, (RF, RF_IF), rd_rm),
18641 cCL("mnfsm", e108140, 2, (RF, RF_IF), rd_rm),
18642 cCL("mnfsz", e108160, 2, (RF, RF_IF), rd_rm),
18643 cCL("mnfd", e108180, 2, (RF, RF_IF), rd_rm),
18644 cCL("mnfdp", e1081a0, 2, (RF, RF_IF), rd_rm),
18645 cCL("mnfdm", e1081c0, 2, (RF, RF_IF), rd_rm),
18646 cCL("mnfdz", e1081e0, 2, (RF, RF_IF), rd_rm),
18647 cCL("mnfe", e188100, 2, (RF, RF_IF), rd_rm),
18648 cCL("mnfep", e188120, 2, (RF, RF_IF), rd_rm),
18649 cCL("mnfem", e188140, 2, (RF, RF_IF), rd_rm),
18650 cCL("mnfez", e188160, 2, (RF, RF_IF), rd_rm),
18652 cCL("abss", e208100, 2, (RF, RF_IF), rd_rm),
18653 cCL("abssp", e208120, 2, (RF, RF_IF), rd_rm),
18654 cCL("abssm", e208140, 2, (RF, RF_IF), rd_rm),
18655 cCL("abssz", e208160, 2, (RF, RF_IF), rd_rm),
18656 cCL("absd", e208180, 2, (RF, RF_IF), rd_rm),
18657 cCL("absdp", e2081a0, 2, (RF, RF_IF), rd_rm),
18658 cCL("absdm", e2081c0, 2, (RF, RF_IF), rd_rm),
18659 cCL("absdz", e2081e0, 2, (RF, RF_IF), rd_rm),
18660 cCL("abse", e288100, 2, (RF, RF_IF), rd_rm),
18661 cCL("absep", e288120, 2, (RF, RF_IF), rd_rm),
18662 cCL("absem", e288140, 2, (RF, RF_IF), rd_rm),
18663 cCL("absez", e288160, 2, (RF, RF_IF), rd_rm),
18665 cCL("rnds", e308100, 2, (RF, RF_IF), rd_rm),
18666 cCL("rndsp", e308120, 2, (RF, RF_IF), rd_rm),
18667 cCL("rndsm", e308140, 2, (RF, RF_IF), rd_rm),
18668 cCL("rndsz", e308160, 2, (RF, RF_IF), rd_rm),
18669 cCL("rndd", e308180, 2, (RF, RF_IF), rd_rm),
18670 cCL("rnddp", e3081a0, 2, (RF, RF_IF), rd_rm),
18671 cCL("rnddm", e3081c0, 2, (RF, RF_IF), rd_rm),
18672 cCL("rnddz", e3081e0, 2, (RF, RF_IF), rd_rm),
18673 cCL("rnde", e388100, 2, (RF, RF_IF), rd_rm),
18674 cCL("rndep", e388120, 2, (RF, RF_IF), rd_rm),
18675 cCL("rndem", e388140, 2, (RF, RF_IF), rd_rm),
18676 cCL("rndez", e388160, 2, (RF, RF_IF), rd_rm),
18678 cCL("sqts", e408100, 2, (RF, RF_IF), rd_rm),
18679 cCL("sqtsp", e408120, 2, (RF, RF_IF), rd_rm),
18680 cCL("sqtsm", e408140, 2, (RF, RF_IF), rd_rm),
18681 cCL("sqtsz", e408160, 2, (RF, RF_IF), rd_rm),
18682 cCL("sqtd", e408180, 2, (RF, RF_IF), rd_rm),
18683 cCL("sqtdp", e4081a0, 2, (RF, RF_IF), rd_rm),
18684 cCL("sqtdm", e4081c0, 2, (RF, RF_IF), rd_rm),
18685 cCL("sqtdz", e4081e0, 2, (RF, RF_IF), rd_rm),
18686 cCL("sqte", e488100, 2, (RF, RF_IF), rd_rm),
18687 cCL("sqtep", e488120, 2, (RF, RF_IF), rd_rm),
18688 cCL("sqtem", e488140, 2, (RF, RF_IF), rd_rm),
18689 cCL("sqtez", e488160, 2, (RF, RF_IF), rd_rm),
18691 cCL("logs", e508100, 2, (RF, RF_IF), rd_rm),
18692 cCL("logsp", e508120, 2, (RF, RF_IF), rd_rm),
18693 cCL("logsm", e508140, 2, (RF, RF_IF), rd_rm),
18694 cCL("logsz", e508160, 2, (RF, RF_IF), rd_rm),
18695 cCL("logd", e508180, 2, (RF, RF_IF), rd_rm),
18696 cCL("logdp", e5081a0, 2, (RF, RF_IF), rd_rm),
18697 cCL("logdm", e5081c0, 2, (RF, RF_IF), rd_rm),
18698 cCL("logdz", e5081e0, 2, (RF, RF_IF), rd_rm),
18699 cCL("loge", e588100, 2, (RF, RF_IF), rd_rm),
18700 cCL("logep", e588120, 2, (RF, RF_IF), rd_rm),
18701 cCL("logem", e588140, 2, (RF, RF_IF), rd_rm),
18702 cCL("logez", e588160, 2, (RF, RF_IF), rd_rm),
18704 cCL("lgns", e608100, 2, (RF, RF_IF), rd_rm),
18705 cCL("lgnsp", e608120, 2, (RF, RF_IF), rd_rm),
18706 cCL("lgnsm", e608140, 2, (RF, RF_IF), rd_rm),
18707 cCL("lgnsz", e608160, 2, (RF, RF_IF), rd_rm),
18708 cCL("lgnd", e608180, 2, (RF, RF_IF), rd_rm),
18709 cCL("lgndp", e6081a0, 2, (RF, RF_IF), rd_rm),
18710 cCL("lgndm", e6081c0, 2, (RF, RF_IF), rd_rm),
18711 cCL("lgndz", e6081e0, 2, (RF, RF_IF), rd_rm),
18712 cCL("lgne", e688100, 2, (RF, RF_IF), rd_rm),
18713 cCL("lgnep", e688120, 2, (RF, RF_IF), rd_rm),
18714 cCL("lgnem", e688140, 2, (RF, RF_IF), rd_rm),
18715 cCL("lgnez", e688160, 2, (RF, RF_IF), rd_rm),
18717 cCL("exps", e708100, 2, (RF, RF_IF), rd_rm),
18718 cCL("expsp", e708120, 2, (RF, RF_IF), rd_rm),
18719 cCL("expsm", e708140, 2, (RF, RF_IF), rd_rm),
18720 cCL("expsz", e708160, 2, (RF, RF_IF), rd_rm),
18721 cCL("expd", e708180, 2, (RF, RF_IF), rd_rm),
18722 cCL("expdp", e7081a0, 2, (RF, RF_IF), rd_rm),
18723 cCL("expdm", e7081c0, 2, (RF, RF_IF), rd_rm),
18724 cCL("expdz", e7081e0, 2, (RF, RF_IF), rd_rm),
18725 cCL("expe", e788100, 2, (RF, RF_IF), rd_rm),
18726 cCL("expep", e788120, 2, (RF, RF_IF), rd_rm),
18727 cCL("expem", e788140, 2, (RF, RF_IF), rd_rm),
18728 cCL("expdz", e788160, 2, (RF, RF_IF), rd_rm),
18730 cCL("sins", e808100, 2, (RF, RF_IF), rd_rm),
18731 cCL("sinsp", e808120, 2, (RF, RF_IF), rd_rm),
18732 cCL("sinsm", e808140, 2, (RF, RF_IF), rd_rm),
18733 cCL("sinsz", e808160, 2, (RF, RF_IF), rd_rm),
18734 cCL("sind", e808180, 2, (RF, RF_IF), rd_rm),
18735 cCL("sindp", e8081a0, 2, (RF, RF_IF), rd_rm),
18736 cCL("sindm", e8081c0, 2, (RF, RF_IF), rd_rm),
18737 cCL("sindz", e8081e0, 2, (RF, RF_IF), rd_rm),
18738 cCL("sine", e888100, 2, (RF, RF_IF), rd_rm),
18739 cCL("sinep", e888120, 2, (RF, RF_IF), rd_rm),
18740 cCL("sinem", e888140, 2, (RF, RF_IF), rd_rm),
18741 cCL("sinez", e888160, 2, (RF, RF_IF), rd_rm),
18743 cCL("coss", e908100, 2, (RF, RF_IF), rd_rm),
18744 cCL("cossp", e908120, 2, (RF, RF_IF), rd_rm),
18745 cCL("cossm", e908140, 2, (RF, RF_IF), rd_rm),
18746 cCL("cossz", e908160, 2, (RF, RF_IF), rd_rm),
18747 cCL("cosd", e908180, 2, (RF, RF_IF), rd_rm),
18748 cCL("cosdp", e9081a0, 2, (RF, RF_IF), rd_rm),
18749 cCL("cosdm", e9081c0, 2, (RF, RF_IF), rd_rm),
18750 cCL("cosdz", e9081e0, 2, (RF, RF_IF), rd_rm),
18751 cCL("cose", e988100, 2, (RF, RF_IF), rd_rm),
18752 cCL("cosep", e988120, 2, (RF, RF_IF), rd_rm),
18753 cCL("cosem", e988140, 2, (RF, RF_IF), rd_rm),
18754 cCL("cosez", e988160, 2, (RF, RF_IF), rd_rm),
18756 cCL("tans", ea08100, 2, (RF, RF_IF), rd_rm),
18757 cCL("tansp", ea08120, 2, (RF, RF_IF), rd_rm),
18758 cCL("tansm", ea08140, 2, (RF, RF_IF), rd_rm),
18759 cCL("tansz", ea08160, 2, (RF, RF_IF), rd_rm),
18760 cCL("tand", ea08180, 2, (RF, RF_IF), rd_rm),
18761 cCL("tandp", ea081a0, 2, (RF, RF_IF), rd_rm),
18762 cCL("tandm", ea081c0, 2, (RF, RF_IF), rd_rm),
18763 cCL("tandz", ea081e0, 2, (RF, RF_IF), rd_rm),
18764 cCL("tane", ea88100, 2, (RF, RF_IF), rd_rm),
18765 cCL("tanep", ea88120, 2, (RF, RF_IF), rd_rm),
18766 cCL("tanem", ea88140, 2, (RF, RF_IF), rd_rm),
18767 cCL("tanez", ea88160, 2, (RF, RF_IF), rd_rm),
18769 cCL("asns", eb08100, 2, (RF, RF_IF), rd_rm),
18770 cCL("asnsp", eb08120, 2, (RF, RF_IF), rd_rm),
18771 cCL("asnsm", eb08140, 2, (RF, RF_IF), rd_rm),
18772 cCL("asnsz", eb08160, 2, (RF, RF_IF), rd_rm),
18773 cCL("asnd", eb08180, 2, (RF, RF_IF), rd_rm),
18774 cCL("asndp", eb081a0, 2, (RF, RF_IF), rd_rm),
18775 cCL("asndm", eb081c0, 2, (RF, RF_IF), rd_rm),
18776 cCL("asndz", eb081e0, 2, (RF, RF_IF), rd_rm),
18777 cCL("asne", eb88100, 2, (RF, RF_IF), rd_rm),
18778 cCL("asnep", eb88120, 2, (RF, RF_IF), rd_rm),
18779 cCL("asnem", eb88140, 2, (RF, RF_IF), rd_rm),
18780 cCL("asnez", eb88160, 2, (RF, RF_IF), rd_rm),
18782 cCL("acss", ec08100, 2, (RF, RF_IF), rd_rm),
18783 cCL("acssp", ec08120, 2, (RF, RF_IF), rd_rm),
18784 cCL("acssm", ec08140, 2, (RF, RF_IF), rd_rm),
18785 cCL("acssz", ec08160, 2, (RF, RF_IF), rd_rm),
18786 cCL("acsd", ec08180, 2, (RF, RF_IF), rd_rm),
18787 cCL("acsdp", ec081a0, 2, (RF, RF_IF), rd_rm),
18788 cCL("acsdm", ec081c0, 2, (RF, RF_IF), rd_rm),
18789 cCL("acsdz", ec081e0, 2, (RF, RF_IF), rd_rm),
18790 cCL("acse", ec88100, 2, (RF, RF_IF), rd_rm),
18791 cCL("acsep", ec88120, 2, (RF, RF_IF), rd_rm),
18792 cCL("acsem", ec88140, 2, (RF, RF_IF), rd_rm),
18793 cCL("acsez", ec88160, 2, (RF, RF_IF), rd_rm),
18795 cCL("atns", ed08100, 2, (RF, RF_IF), rd_rm),
18796 cCL("atnsp", ed08120, 2, (RF, RF_IF), rd_rm),
18797 cCL("atnsm", ed08140, 2, (RF, RF_IF), rd_rm),
18798 cCL("atnsz", ed08160, 2, (RF, RF_IF), rd_rm),
18799 cCL("atnd", ed08180, 2, (RF, RF_IF), rd_rm),
18800 cCL("atndp", ed081a0, 2, (RF, RF_IF), rd_rm),
18801 cCL("atndm", ed081c0, 2, (RF, RF_IF), rd_rm),
18802 cCL("atndz", ed081e0, 2, (RF, RF_IF), rd_rm),
18803 cCL("atne", ed88100, 2, (RF, RF_IF), rd_rm),
18804 cCL("atnep", ed88120, 2, (RF, RF_IF), rd_rm),
18805 cCL("atnem", ed88140, 2, (RF, RF_IF), rd_rm),
18806 cCL("atnez", ed88160, 2, (RF, RF_IF), rd_rm),
18808 cCL("urds", ee08100, 2, (RF, RF_IF), rd_rm),
18809 cCL("urdsp", ee08120, 2, (RF, RF_IF), rd_rm),
18810 cCL("urdsm", ee08140, 2, (RF, RF_IF), rd_rm),
18811 cCL("urdsz", ee08160, 2, (RF, RF_IF), rd_rm),
18812 cCL("urdd", ee08180, 2, (RF, RF_IF), rd_rm),
18813 cCL("urddp", ee081a0, 2, (RF, RF_IF), rd_rm),
18814 cCL("urddm", ee081c0, 2, (RF, RF_IF), rd_rm),
18815 cCL("urddz", ee081e0, 2, (RF, RF_IF), rd_rm),
18816 cCL("urde", ee88100, 2, (RF, RF_IF), rd_rm),
18817 cCL("urdep", ee88120, 2, (RF, RF_IF), rd_rm),
18818 cCL("urdem", ee88140, 2, (RF, RF_IF), rd_rm),
18819 cCL("urdez", ee88160, 2, (RF, RF_IF), rd_rm),
18821 cCL("nrms", ef08100, 2, (RF, RF_IF), rd_rm),
18822 cCL("nrmsp", ef08120, 2, (RF, RF_IF), rd_rm),
18823 cCL("nrmsm", ef08140, 2, (RF, RF_IF), rd_rm),
18824 cCL("nrmsz", ef08160, 2, (RF, RF_IF), rd_rm),
18825 cCL("nrmd", ef08180, 2, (RF, RF_IF), rd_rm),
18826 cCL("nrmdp", ef081a0, 2, (RF, RF_IF), rd_rm),
18827 cCL("nrmdm", ef081c0, 2, (RF, RF_IF), rd_rm),
18828 cCL("nrmdz", ef081e0, 2, (RF, RF_IF), rd_rm),
18829 cCL("nrme", ef88100, 2, (RF, RF_IF), rd_rm),
18830 cCL("nrmep", ef88120, 2, (RF, RF_IF), rd_rm),
18831 cCL("nrmem", ef88140, 2, (RF, RF_IF), rd_rm),
18832 cCL("nrmez", ef88160, 2, (RF, RF_IF), rd_rm),
18834 cCL("adfs", e000100, 3, (RF, RF, RF_IF), rd_rn_rm),
18835 cCL("adfsp", e000120, 3, (RF, RF, RF_IF), rd_rn_rm),
18836 cCL("adfsm", e000140, 3, (RF, RF, RF_IF), rd_rn_rm),
18837 cCL("adfsz", e000160, 3, (RF, RF, RF_IF), rd_rn_rm),
18838 cCL("adfd", e000180, 3, (RF, RF, RF_IF), rd_rn_rm),
18839 cCL("adfdp", e0001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
18840 cCL("adfdm", e0001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
18841 cCL("adfdz", e0001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
18842 cCL("adfe", e080100, 3, (RF, RF, RF_IF), rd_rn_rm),
18843 cCL("adfep", e080120, 3, (RF, RF, RF_IF), rd_rn_rm),
18844 cCL("adfem", e080140, 3, (RF, RF, RF_IF), rd_rn_rm),
18845 cCL("adfez", e080160, 3, (RF, RF, RF_IF), rd_rn_rm),
18847 cCL("sufs", e200100, 3, (RF, RF, RF_IF), rd_rn_rm),
18848 cCL("sufsp", e200120, 3, (RF, RF, RF_IF), rd_rn_rm),
18849 cCL("sufsm", e200140, 3, (RF, RF, RF_IF), rd_rn_rm),
18850 cCL("sufsz", e200160, 3, (RF, RF, RF_IF), rd_rn_rm),
18851 cCL("sufd", e200180, 3, (RF, RF, RF_IF), rd_rn_rm),
18852 cCL("sufdp", e2001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
18853 cCL("sufdm", e2001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
18854 cCL("sufdz", e2001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
18855 cCL("sufe", e280100, 3, (RF, RF, RF_IF), rd_rn_rm),
18856 cCL("sufep", e280120, 3, (RF, RF, RF_IF), rd_rn_rm),
18857 cCL("sufem", e280140, 3, (RF, RF, RF_IF), rd_rn_rm),
18858 cCL("sufez", e280160, 3, (RF, RF, RF_IF), rd_rn_rm),
18860 cCL("rsfs", e300100, 3, (RF, RF, RF_IF), rd_rn_rm),
18861 cCL("rsfsp", e300120, 3, (RF, RF, RF_IF), rd_rn_rm),
18862 cCL("rsfsm", e300140, 3, (RF, RF, RF_IF), rd_rn_rm),
18863 cCL("rsfsz", e300160, 3, (RF, RF, RF_IF), rd_rn_rm),
18864 cCL("rsfd", e300180, 3, (RF, RF, RF_IF), rd_rn_rm),
18865 cCL("rsfdp", e3001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
18866 cCL("rsfdm", e3001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
18867 cCL("rsfdz", e3001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
18868 cCL("rsfe", e380100, 3, (RF, RF, RF_IF), rd_rn_rm),
18869 cCL("rsfep", e380120, 3, (RF, RF, RF_IF), rd_rn_rm),
18870 cCL("rsfem", e380140, 3, (RF, RF, RF_IF), rd_rn_rm),
18871 cCL("rsfez", e380160, 3, (RF, RF, RF_IF), rd_rn_rm),
18873 cCL("mufs", e100100, 3, (RF, RF, RF_IF), rd_rn_rm),
18874 cCL("mufsp", e100120, 3, (RF, RF, RF_IF), rd_rn_rm),
18875 cCL("mufsm", e100140, 3, (RF, RF, RF_IF), rd_rn_rm),
18876 cCL("mufsz", e100160, 3, (RF, RF, RF_IF), rd_rn_rm),
18877 cCL("mufd", e100180, 3, (RF, RF, RF_IF), rd_rn_rm),
18878 cCL("mufdp", e1001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
18879 cCL("mufdm", e1001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
18880 cCL("mufdz", e1001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
18881 cCL("mufe", e180100, 3, (RF, RF, RF_IF), rd_rn_rm),
18882 cCL("mufep", e180120, 3, (RF, RF, RF_IF), rd_rn_rm),
18883 cCL("mufem", e180140, 3, (RF, RF, RF_IF), rd_rn_rm),
18884 cCL("mufez", e180160, 3, (RF, RF, RF_IF), rd_rn_rm),
18886 cCL("dvfs", e400100, 3, (RF, RF, RF_IF), rd_rn_rm),
18887 cCL("dvfsp", e400120, 3, (RF, RF, RF_IF), rd_rn_rm),
18888 cCL("dvfsm", e400140, 3, (RF, RF, RF_IF), rd_rn_rm),
18889 cCL("dvfsz", e400160, 3, (RF, RF, RF_IF), rd_rn_rm),
18890 cCL("dvfd", e400180, 3, (RF, RF, RF_IF), rd_rn_rm),
18891 cCL("dvfdp", e4001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
18892 cCL("dvfdm", e4001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
18893 cCL("dvfdz", e4001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
18894 cCL("dvfe", e480100, 3, (RF, RF, RF_IF), rd_rn_rm),
18895 cCL("dvfep", e480120, 3, (RF, RF, RF_IF), rd_rn_rm),
18896 cCL("dvfem", e480140, 3, (RF, RF, RF_IF), rd_rn_rm),
18897 cCL("dvfez", e480160, 3, (RF, RF, RF_IF), rd_rn_rm),
18899 cCL("rdfs", e500100, 3, (RF, RF, RF_IF), rd_rn_rm),
18900 cCL("rdfsp", e500120, 3, (RF, RF, RF_IF), rd_rn_rm),
18901 cCL("rdfsm", e500140, 3, (RF, RF, RF_IF), rd_rn_rm),
18902 cCL("rdfsz", e500160, 3, (RF, RF, RF_IF), rd_rn_rm),
18903 cCL("rdfd", e500180, 3, (RF, RF, RF_IF), rd_rn_rm),
18904 cCL("rdfdp", e5001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
18905 cCL("rdfdm", e5001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
18906 cCL("rdfdz", e5001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
18907 cCL("rdfe", e580100, 3, (RF, RF, RF_IF), rd_rn_rm),
18908 cCL("rdfep", e580120, 3, (RF, RF, RF_IF), rd_rn_rm),
18909 cCL("rdfem", e580140, 3, (RF, RF, RF_IF), rd_rn_rm),
18910 cCL("rdfez", e580160, 3, (RF, RF, RF_IF), rd_rn_rm),
18912 cCL("pows", e600100, 3, (RF, RF, RF_IF), rd_rn_rm),
18913 cCL("powsp", e600120, 3, (RF, RF, RF_IF), rd_rn_rm),
18914 cCL("powsm", e600140, 3, (RF, RF, RF_IF), rd_rn_rm),
18915 cCL("powsz", e600160, 3, (RF, RF, RF_IF), rd_rn_rm),
18916 cCL("powd", e600180, 3, (RF, RF, RF_IF), rd_rn_rm),
18917 cCL("powdp", e6001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
18918 cCL("powdm", e6001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
18919 cCL("powdz", e6001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
18920 cCL("powe", e680100, 3, (RF, RF, RF_IF), rd_rn_rm),
18921 cCL("powep", e680120, 3, (RF, RF, RF_IF), rd_rn_rm),
18922 cCL("powem", e680140, 3, (RF, RF, RF_IF), rd_rn_rm),
18923 cCL("powez", e680160, 3, (RF, RF, RF_IF), rd_rn_rm),
18925 cCL("rpws", e700100, 3, (RF, RF, RF_IF), rd_rn_rm),
18926 cCL("rpwsp", e700120, 3, (RF, RF, RF_IF), rd_rn_rm),
18927 cCL("rpwsm", e700140, 3, (RF, RF, RF_IF), rd_rn_rm),
18928 cCL("rpwsz", e700160, 3, (RF, RF, RF_IF), rd_rn_rm),
18929 cCL("rpwd", e700180, 3, (RF, RF, RF_IF), rd_rn_rm),
18930 cCL("rpwdp", e7001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
18931 cCL("rpwdm", e7001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
18932 cCL("rpwdz", e7001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
18933 cCL("rpwe", e780100, 3, (RF, RF, RF_IF), rd_rn_rm),
18934 cCL("rpwep", e780120, 3, (RF, RF, RF_IF), rd_rn_rm),
18935 cCL("rpwem", e780140, 3, (RF, RF, RF_IF), rd_rn_rm),
18936 cCL("rpwez", e780160, 3, (RF, RF, RF_IF), rd_rn_rm),
18938 cCL("rmfs", e800100, 3, (RF, RF, RF_IF), rd_rn_rm),
18939 cCL("rmfsp", e800120, 3, (RF, RF, RF_IF), rd_rn_rm),
18940 cCL("rmfsm", e800140, 3, (RF, RF, RF_IF), rd_rn_rm),
18941 cCL("rmfsz", e800160, 3, (RF, RF, RF_IF), rd_rn_rm),
18942 cCL("rmfd", e800180, 3, (RF, RF, RF_IF), rd_rn_rm),
18943 cCL("rmfdp", e8001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
18944 cCL("rmfdm", e8001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
18945 cCL("rmfdz", e8001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
18946 cCL("rmfe", e880100, 3, (RF, RF, RF_IF), rd_rn_rm),
18947 cCL("rmfep", e880120, 3, (RF, RF, RF_IF), rd_rn_rm),
18948 cCL("rmfem", e880140, 3, (RF, RF, RF_IF), rd_rn_rm),
18949 cCL("rmfez", e880160, 3, (RF, RF, RF_IF), rd_rn_rm),
18951 cCL("fmls", e900100, 3, (RF, RF, RF_IF), rd_rn_rm),
18952 cCL("fmlsp", e900120, 3, (RF, RF, RF_IF), rd_rn_rm),
18953 cCL("fmlsm", e900140, 3, (RF, RF, RF_IF), rd_rn_rm),
18954 cCL("fmlsz", e900160, 3, (RF, RF, RF_IF), rd_rn_rm),
18955 cCL("fmld", e900180, 3, (RF, RF, RF_IF), rd_rn_rm),
18956 cCL("fmldp", e9001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
18957 cCL("fmldm", e9001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
18958 cCL("fmldz", e9001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
18959 cCL("fmle", e980100, 3, (RF, RF, RF_IF), rd_rn_rm),
18960 cCL("fmlep", e980120, 3, (RF, RF, RF_IF), rd_rn_rm),
18961 cCL("fmlem", e980140, 3, (RF, RF, RF_IF), rd_rn_rm),
18962 cCL("fmlez", e980160, 3, (RF, RF, RF_IF), rd_rn_rm),
18964 cCL("fdvs", ea00100, 3, (RF, RF, RF_IF), rd_rn_rm),
18965 cCL("fdvsp", ea00120, 3, (RF, RF, RF_IF), rd_rn_rm),
18966 cCL("fdvsm", ea00140, 3, (RF, RF, RF_IF), rd_rn_rm),
18967 cCL("fdvsz", ea00160, 3, (RF, RF, RF_IF), rd_rn_rm),
18968 cCL("fdvd", ea00180, 3, (RF, RF, RF_IF), rd_rn_rm),
18969 cCL("fdvdp", ea001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
18970 cCL("fdvdm", ea001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
18971 cCL("fdvdz", ea001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
18972 cCL("fdve", ea80100, 3, (RF, RF, RF_IF), rd_rn_rm),
18973 cCL("fdvep", ea80120, 3, (RF, RF, RF_IF), rd_rn_rm),
18974 cCL("fdvem", ea80140, 3, (RF, RF, RF_IF), rd_rn_rm),
18975 cCL("fdvez", ea80160, 3, (RF, RF, RF_IF), rd_rn_rm),
18977 cCL("frds", eb00100, 3, (RF, RF, RF_IF), rd_rn_rm),
18978 cCL("frdsp", eb00120, 3, (RF, RF, RF_IF), rd_rn_rm),
18979 cCL("frdsm", eb00140, 3, (RF, RF, RF_IF), rd_rn_rm),
18980 cCL("frdsz", eb00160, 3, (RF, RF, RF_IF), rd_rn_rm),
18981 cCL("frdd", eb00180, 3, (RF, RF, RF_IF), rd_rn_rm),
18982 cCL("frddp", eb001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
18983 cCL("frddm", eb001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
18984 cCL("frddz", eb001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
18985 cCL("frde", eb80100, 3, (RF, RF, RF_IF), rd_rn_rm),
18986 cCL("frdep", eb80120, 3, (RF, RF, RF_IF), rd_rn_rm),
18987 cCL("frdem", eb80140, 3, (RF, RF, RF_IF), rd_rn_rm),
18988 cCL("frdez", eb80160, 3, (RF, RF, RF_IF), rd_rn_rm),
18990 cCL("pols", ec00100, 3, (RF, RF, RF_IF), rd_rn_rm),
18991 cCL("polsp", ec00120, 3, (RF, RF, RF_IF), rd_rn_rm),
18992 cCL("polsm", ec00140, 3, (RF, RF, RF_IF), rd_rn_rm),
18993 cCL("polsz", ec00160, 3, (RF, RF, RF_IF), rd_rn_rm),
18994 cCL("pold", ec00180, 3, (RF, RF, RF_IF), rd_rn_rm),
18995 cCL("poldp", ec001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
18996 cCL("poldm", ec001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
18997 cCL("poldz", ec001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
18998 cCL("pole", ec80100, 3, (RF, RF, RF_IF), rd_rn_rm),
18999 cCL("polep", ec80120, 3, (RF, RF, RF_IF), rd_rn_rm),
19000 cCL("polem", ec80140, 3, (RF, RF, RF_IF), rd_rn_rm),
19001 cCL("polez", ec80160, 3, (RF, RF, RF_IF), rd_rn_rm),
19003 cCE("cmf", e90f110, 2, (RF, RF_IF), fpa_cmp),
19004 C3E("cmfe", ed0f110, 2, (RF, RF_IF), fpa_cmp),
19005 cCE("cnf", eb0f110, 2, (RF, RF_IF), fpa_cmp),
19006 C3E("cnfe", ef0f110, 2, (RF, RF_IF), fpa_cmp),
19008 cCL("flts", e000110, 2, (RF, RR), rn_rd),
19009 cCL("fltsp", e000130, 2, (RF, RR), rn_rd),
19010 cCL("fltsm", e000150, 2, (RF, RR), rn_rd),
19011 cCL("fltsz", e000170, 2, (RF, RR), rn_rd),
19012 cCL("fltd", e000190, 2, (RF, RR), rn_rd),
19013 cCL("fltdp", e0001b0, 2, (RF, RR), rn_rd),
19014 cCL("fltdm", e0001d0, 2, (RF, RR), rn_rd),
19015 cCL("fltdz", e0001f0, 2, (RF, RR), rn_rd),
19016 cCL("flte", e080110, 2, (RF, RR), rn_rd),
19017 cCL("fltep", e080130, 2, (RF, RR), rn_rd),
19018 cCL("fltem", e080150, 2, (RF, RR), rn_rd),
19019 cCL("fltez", e080170, 2, (RF, RR), rn_rd),
19021 /* The implementation of the FIX instruction is broken on some
19022 assemblers, in that it accepts a precision specifier as well as a
19023 rounding specifier, despite the fact that this is meaningless.
19024 To be more compatible, we accept it as well, though of course it
19025 does not set any bits. */
19026 cCE("fix", e100110, 2, (RR, RF), rd_rm),
19027 cCL("fixp", e100130, 2, (RR, RF), rd_rm),
19028 cCL("fixm", e100150, 2, (RR, RF), rd_rm),
19029 cCL("fixz", e100170, 2, (RR, RF), rd_rm),
19030 cCL("fixsp", e100130, 2, (RR, RF), rd_rm),
19031 cCL("fixsm", e100150, 2, (RR, RF), rd_rm),
19032 cCL("fixsz", e100170, 2, (RR, RF), rd_rm),
19033 cCL("fixdp", e100130, 2, (RR, RF), rd_rm),
19034 cCL("fixdm", e100150, 2, (RR, RF), rd_rm),
19035 cCL("fixdz", e100170, 2, (RR, RF), rd_rm),
19036 cCL("fixep", e100130, 2, (RR, RF), rd_rm),
19037 cCL("fixem", e100150, 2, (RR, RF), rd_rm),
19038 cCL("fixez", e100170, 2, (RR, RF), rd_rm),
19040 /* Instructions that were new with the real FPA, call them V2. */
19042 #define ARM_VARIANT & fpu_fpa_ext_v2
19044 cCE("lfm", c100200, 3, (RF, I4b, ADDR), fpa_ldmstm),
19045 cCL("lfmfd", c900200, 3, (RF, I4b, ADDR), fpa_ldmstm),
19046 cCL("lfmea", d100200, 3, (RF, I4b, ADDR), fpa_ldmstm),
19047 cCE("sfm", c000200, 3, (RF, I4b, ADDR), fpa_ldmstm),
19048 cCL("sfmfd", d000200, 3, (RF, I4b, ADDR), fpa_ldmstm),
19049 cCL("sfmea", c800200, 3, (RF, I4b, ADDR), fpa_ldmstm),
19052 #define ARM_VARIANT & fpu_vfp_ext_v1xd /* VFP V1xD (single precision). */
19054 /* Moves and type conversions. */
19055 cCE("fcpys", eb00a40, 2, (RVS, RVS), vfp_sp_monadic),
19056 cCE("fmrs", e100a10, 2, (RR, RVS), vfp_reg_from_sp),
19057 cCE("fmsr", e000a10, 2, (RVS, RR), vfp_sp_from_reg),
19058 cCE("fmstat", ef1fa10, 0, (), noargs),
19059 cCE("vmrs", ef00a10, 2, (APSR_RR, RVC), vmrs),
19060 cCE("vmsr", ee00a10, 2, (RVC, RR), vmsr),
19061 cCE("fsitos", eb80ac0, 2, (RVS, RVS), vfp_sp_monadic),
19062 cCE("fuitos", eb80a40, 2, (RVS, RVS), vfp_sp_monadic),
19063 cCE("ftosis", ebd0a40, 2, (RVS, RVS), vfp_sp_monadic),
19064 cCE("ftosizs", ebd0ac0, 2, (RVS, RVS), vfp_sp_monadic),
19065 cCE("ftouis", ebc0a40, 2, (RVS, RVS), vfp_sp_monadic),
19066 cCE("ftouizs", ebc0ac0, 2, (RVS, RVS), vfp_sp_monadic),
19067 cCE("fmrx", ef00a10, 2, (RR, RVC), rd_rn),
19068 cCE("fmxr", ee00a10, 2, (RVC, RR), rn_rd),
19070 /* Memory operations. */
19071 cCE("flds", d100a00, 2, (RVS, ADDRGLDC), vfp_sp_ldst),
19072 cCE("fsts", d000a00, 2, (RVS, ADDRGLDC), vfp_sp_ldst),
19073 cCE("fldmias", c900a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmia),
19074 cCE("fldmfds", c900a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmia),
19075 cCE("fldmdbs", d300a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmdb),
19076 cCE("fldmeas", d300a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmdb),
19077 cCE("fldmiax", c900b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmia),
19078 cCE("fldmfdx", c900b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmia),
19079 cCE("fldmdbx", d300b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmdb),
19080 cCE("fldmeax", d300b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmdb),
19081 cCE("fstmias", c800a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmia),
19082 cCE("fstmeas", c800a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmia),
19083 cCE("fstmdbs", d200a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmdb),
19084 cCE("fstmfds", d200a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmdb),
19085 cCE("fstmiax", c800b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmia),
19086 cCE("fstmeax", c800b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmia),
19087 cCE("fstmdbx", d200b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmdb),
19088 cCE("fstmfdx", d200b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmdb),
19090 /* Monadic operations. */
19091 cCE("fabss", eb00ac0, 2, (RVS, RVS), vfp_sp_monadic),
19092 cCE("fnegs", eb10a40, 2, (RVS, RVS), vfp_sp_monadic),
19093 cCE("fsqrts", eb10ac0, 2, (RVS, RVS), vfp_sp_monadic),
19095 /* Dyadic operations. */
19096 cCE("fadds", e300a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
19097 cCE("fsubs", e300a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
19098 cCE("fmuls", e200a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
19099 cCE("fdivs", e800a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
19100 cCE("fmacs", e000a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
19101 cCE("fmscs", e100a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
19102 cCE("fnmuls", e200a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
19103 cCE("fnmacs", e000a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
19104 cCE("fnmscs", e100a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
19107 cCE("fcmps", eb40a40, 2, (RVS, RVS), vfp_sp_monadic),
19108 cCE("fcmpzs", eb50a40, 1, (RVS), vfp_sp_compare_z),
19109 cCE("fcmpes", eb40ac0, 2, (RVS, RVS), vfp_sp_monadic),
19110 cCE("fcmpezs", eb50ac0, 1, (RVS), vfp_sp_compare_z),
19112 /* Double precision load/store are still present on single precision
19113 implementations. */
19114 cCE("fldd", d100b00, 2, (RVD, ADDRGLDC), vfp_dp_ldst),
19115 cCE("fstd", d000b00, 2, (RVD, ADDRGLDC), vfp_dp_ldst),
19116 cCE("fldmiad", c900b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmia),
19117 cCE("fldmfdd", c900b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmia),
19118 cCE("fldmdbd", d300b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmdb),
19119 cCE("fldmead", d300b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmdb),
19120 cCE("fstmiad", c800b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmia),
19121 cCE("fstmead", c800b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmia),
19122 cCE("fstmdbd", d200b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmdb),
19123 cCE("fstmfdd", d200b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmdb),
19126 #define ARM_VARIANT & fpu_vfp_ext_v1 /* VFP V1 (Double precision). */
19128 /* Moves and type conversions. */
19129 cCE("fcpyd", eb00b40, 2, (RVD, RVD), vfp_dp_rd_rm),
19130 cCE("fcvtds", eb70ac0, 2, (RVD, RVS), vfp_dp_sp_cvt),
19131 cCE("fcvtsd", eb70bc0, 2, (RVS, RVD), vfp_sp_dp_cvt),
19132 cCE("fmdhr", e200b10, 2, (RVD, RR), vfp_dp_rn_rd),
19133 cCE("fmdlr", e000b10, 2, (RVD, RR), vfp_dp_rn_rd),
19134 cCE("fmrdh", e300b10, 2, (RR, RVD), vfp_dp_rd_rn),
19135 cCE("fmrdl", e100b10, 2, (RR, RVD), vfp_dp_rd_rn),
19136 cCE("fsitod", eb80bc0, 2, (RVD, RVS), vfp_dp_sp_cvt),
19137 cCE("fuitod", eb80b40, 2, (RVD, RVS), vfp_dp_sp_cvt),
19138 cCE("ftosid", ebd0b40, 2, (RVS, RVD), vfp_sp_dp_cvt),
19139 cCE("ftosizd", ebd0bc0, 2, (RVS, RVD), vfp_sp_dp_cvt),
19140 cCE("ftouid", ebc0b40, 2, (RVS, RVD), vfp_sp_dp_cvt),
19141 cCE("ftouizd", ebc0bc0, 2, (RVS, RVD), vfp_sp_dp_cvt),
19143 /* Monadic operations. */
19144 cCE("fabsd", eb00bc0, 2, (RVD, RVD), vfp_dp_rd_rm),
19145 cCE("fnegd", eb10b40, 2, (RVD, RVD), vfp_dp_rd_rm),
19146 cCE("fsqrtd", eb10bc0, 2, (RVD, RVD), vfp_dp_rd_rm),
19148 /* Dyadic operations. */
19149 cCE("faddd", e300b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
19150 cCE("fsubd", e300b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
19151 cCE("fmuld", e200b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
19152 cCE("fdivd", e800b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
19153 cCE("fmacd", e000b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
19154 cCE("fmscd", e100b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
19155 cCE("fnmuld", e200b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
19156 cCE("fnmacd", e000b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
19157 cCE("fnmscd", e100b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
19160 cCE("fcmpd", eb40b40, 2, (RVD, RVD), vfp_dp_rd_rm),
19161 cCE("fcmpzd", eb50b40, 1, (RVD), vfp_dp_rd),
19162 cCE("fcmped", eb40bc0, 2, (RVD, RVD), vfp_dp_rd_rm),
19163 cCE("fcmpezd", eb50bc0, 1, (RVD), vfp_dp_rd),
19166 #define ARM_VARIANT & fpu_vfp_ext_v2
19168 cCE("fmsrr", c400a10, 3, (VRSLST, RR, RR), vfp_sp2_from_reg2),
19169 cCE("fmrrs", c500a10, 3, (RR, RR, VRSLST), vfp_reg2_from_sp2),
19170 cCE("fmdrr", c400b10, 3, (RVD, RR, RR), vfp_dp_rm_rd_rn),
19171 cCE("fmrrd", c500b10, 3, (RR, RR, RVD), vfp_dp_rd_rn_rm),
19173 /* Instructions which may belong to either the Neon or VFP instruction sets.
19174 Individual encoder functions perform additional architecture checks. */
19176 #define ARM_VARIANT & fpu_vfp_ext_v1xd
19177 #undef THUMB_VARIANT
19178 #define THUMB_VARIANT & fpu_vfp_ext_v1xd
19180 /* These mnemonics are unique to VFP. */
19181 NCE(vsqrt, 0, 2, (RVSD, RVSD), vfp_nsyn_sqrt),
19182 NCE(vdiv, 0, 3, (RVSD, RVSD, RVSD), vfp_nsyn_div),
19183 nCE(vnmul, _vnmul, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
19184 nCE(vnmla, _vnmla, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
19185 nCE(vnmls, _vnmls, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
19186 nCE(vcmp, _vcmp, 2, (RVSD, RVSD_I0), vfp_nsyn_cmp),
19187 nCE(vcmpe, _vcmpe, 2, (RVSD, RVSD_I0), vfp_nsyn_cmp),
19188 NCE(vpush, 0, 1, (VRSDLST), vfp_nsyn_push),
19189 NCE(vpop, 0, 1, (VRSDLST), vfp_nsyn_pop),
19190 NCE(vcvtz, 0, 2, (RVSD, RVSD), vfp_nsyn_cvtz),
19192 /* Mnemonics shared by Neon and VFP. */
19193 nCEF(vmul, _vmul, 3, (RNSDQ, oRNSDQ, RNSDQ_RNSC), neon_mul),
19194 nCEF(vmla, _vmla, 3, (RNSDQ, oRNSDQ, RNSDQ_RNSC), neon_mac_maybe_scalar),
19195 nCEF(vmls, _vmls, 3, (RNSDQ, oRNSDQ, RNSDQ_RNSC), neon_mac_maybe_scalar),
19197 nCEF(vadd, _vadd, 3, (RNSDQ, oRNSDQ, RNSDQ), neon_addsub_if_i),
19198 nCEF(vsub, _vsub, 3, (RNSDQ, oRNSDQ, RNSDQ), neon_addsub_if_i),
19200 NCEF(vabs, 1b10300, 2, (RNSDQ, RNSDQ), neon_abs_neg),
19201 NCEF(vneg, 1b10380, 2, (RNSDQ, RNSDQ), neon_abs_neg),
19203 NCE(vldm, c900b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
19204 NCE(vldmia, c900b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
19205 NCE(vldmdb, d100b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
19206 NCE(vstm, c800b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
19207 NCE(vstmia, c800b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
19208 NCE(vstmdb, d000b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
19209 NCE(vldr, d100b00, 2, (RVSD, ADDRGLDC), neon_ldr_str),
19210 NCE(vstr, d000b00, 2, (RVSD, ADDRGLDC), neon_ldr_str),
19212 nCEF(vcvt, _vcvt, 3, (RNSDQ, RNSDQ, oI32z), neon_cvt),
19213 nCEF(vcvtr, _vcvt, 2, (RNSDQ, RNSDQ), neon_cvtr),
19214 NCEF(vcvtb, eb20a40, 2, (RVSD, RVSD), neon_cvtb),
19215 NCEF(vcvtt, eb20a40, 2, (RVSD, RVSD), neon_cvtt),
19218 /* NOTE: All VMOV encoding is special-cased! */
19219 NCE(vmov, 0, 1, (VMOV), neon_mov),
19220 NCE(vmovq, 0, 1, (VMOV), neon_mov),
19222 #undef THUMB_VARIANT
19223 #define THUMB_VARIANT & fpu_neon_ext_v1
19225 #define ARM_VARIANT & fpu_neon_ext_v1
19227 /* Data processing with three registers of the same length. */
19228 /* integer ops, valid types S8 S16 S32 U8 U16 U32. */
19229 NUF(vaba, 0000710, 3, (RNDQ, RNDQ, RNDQ), neon_dyadic_i_su),
19230 NUF(vabaq, 0000710, 3, (RNQ, RNQ, RNQ), neon_dyadic_i_su),
19231 NUF(vhadd, 0000000, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i_su),
19232 NUF(vhaddq, 0000000, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i_su),
19233 NUF(vrhadd, 0000100, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i_su),
19234 NUF(vrhaddq, 0000100, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i_su),
19235 NUF(vhsub, 0000200, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i_su),
19236 NUF(vhsubq, 0000200, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i_su),
19237 /* integer ops, valid types S8 S16 S32 S64 U8 U16 U32 U64. */
19238 NUF(vqadd, 0000010, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i64_su),
19239 NUF(vqaddq, 0000010, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i64_su),
19240 NUF(vqsub, 0000210, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i64_su),
19241 NUF(vqsubq, 0000210, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i64_su),
19242 NUF(vrshl, 0000500, 3, (RNDQ, oRNDQ, RNDQ), neon_rshl),
19243 NUF(vrshlq, 0000500, 3, (RNQ, oRNQ, RNQ), neon_rshl),
19244 NUF(vqrshl, 0000510, 3, (RNDQ, oRNDQ, RNDQ), neon_rshl),
19245 NUF(vqrshlq, 0000510, 3, (RNQ, oRNQ, RNQ), neon_rshl),
19246 /* If not immediate, fall back to neon_dyadic_i64_su.
19247 shl_imm should accept I8 I16 I32 I64,
19248 qshl_imm should accept S8 S16 S32 S64 U8 U16 U32 U64. */
19249 nUF(vshl, _vshl, 3, (RNDQ, oRNDQ, RNDQ_I63b), neon_shl_imm),
19250 nUF(vshlq, _vshl, 3, (RNQ, oRNQ, RNDQ_I63b), neon_shl_imm),
19251 nUF(vqshl, _vqshl, 3, (RNDQ, oRNDQ, RNDQ_I63b), neon_qshl_imm),
19252 nUF(vqshlq, _vqshl, 3, (RNQ, oRNQ, RNDQ_I63b), neon_qshl_imm),
19253 /* Logic ops, types optional & ignored. */
19254 nUF(vand, _vand, 3, (RNDQ, oRNDQ, RNDQ_Ibig), neon_logic),
19255 nUF(vandq, _vand, 3, (RNQ, oRNQ, RNDQ_Ibig), neon_logic),
19256 nUF(vbic, _vbic, 3, (RNDQ, oRNDQ, RNDQ_Ibig), neon_logic),
19257 nUF(vbicq, _vbic, 3, (RNQ, oRNQ, RNDQ_Ibig), neon_logic),
19258 nUF(vorr, _vorr, 3, (RNDQ, oRNDQ, RNDQ_Ibig), neon_logic),
19259 nUF(vorrq, _vorr, 3, (RNQ, oRNQ, RNDQ_Ibig), neon_logic),
19260 nUF(vorn, _vorn, 3, (RNDQ, oRNDQ, RNDQ_Ibig), neon_logic),
19261 nUF(vornq, _vorn, 3, (RNQ, oRNQ, RNDQ_Ibig), neon_logic),
19262 nUF(veor, _veor, 3, (RNDQ, oRNDQ, RNDQ), neon_logic),
19263 nUF(veorq, _veor, 3, (RNQ, oRNQ, RNQ), neon_logic),
19264 /* Bitfield ops, untyped. */
19265 NUF(vbsl, 1100110, 3, (RNDQ, RNDQ, RNDQ), neon_bitfield),
19266 NUF(vbslq, 1100110, 3, (RNQ, RNQ, RNQ), neon_bitfield),
19267 NUF(vbit, 1200110, 3, (RNDQ, RNDQ, RNDQ), neon_bitfield),
19268 NUF(vbitq, 1200110, 3, (RNQ, RNQ, RNQ), neon_bitfield),
19269 NUF(vbif, 1300110, 3, (RNDQ, RNDQ, RNDQ), neon_bitfield),
19270 NUF(vbifq, 1300110, 3, (RNQ, RNQ, RNQ), neon_bitfield),
19271 /* Int and float variants, types S8 S16 S32 U8 U16 U32 F32. */
19272 nUF(vabd, _vabd, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_if_su),
19273 nUF(vabdq, _vabd, 3, (RNQ, oRNQ, RNQ), neon_dyadic_if_su),
19274 nUF(vmax, _vmax, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_if_su),
19275 nUF(vmaxq, _vmax, 3, (RNQ, oRNQ, RNQ), neon_dyadic_if_su),
19276 nUF(vmin, _vmin, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_if_su),
19277 nUF(vminq, _vmin, 3, (RNQ, oRNQ, RNQ), neon_dyadic_if_su),
19278 /* Comparisons. Types S8 S16 S32 U8 U16 U32 F32. Non-immediate versions fall
19279 back to neon_dyadic_if_su. */
19280 nUF(vcge, _vcge, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp),
19281 nUF(vcgeq, _vcge, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp),
19282 nUF(vcgt, _vcgt, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp),
19283 nUF(vcgtq, _vcgt, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp),
19284 nUF(vclt, _vclt, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp_inv),
19285 nUF(vcltq, _vclt, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp_inv),
19286 nUF(vcle, _vcle, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp_inv),
19287 nUF(vcleq, _vcle, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp_inv),
19288 /* Comparison. Type I8 I16 I32 F32. */
19289 nUF(vceq, _vceq, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_ceq),
19290 nUF(vceqq, _vceq, 3, (RNQ, oRNQ, RNDQ_I0), neon_ceq),
19291 /* As above, D registers only. */
19292 nUF(vpmax, _vpmax, 3, (RND, oRND, RND), neon_dyadic_if_su_d),
19293 nUF(vpmin, _vpmin, 3, (RND, oRND, RND), neon_dyadic_if_su_d),
19294 /* Int and float variants, signedness unimportant. */
19295 nUF(vmlaq, _vmla, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_mac_maybe_scalar),
19296 nUF(vmlsq, _vmls, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_mac_maybe_scalar),
19297 nUF(vpadd, _vpadd, 3, (RND, oRND, RND), neon_dyadic_if_i_d),
19298 /* Add/sub take types I8 I16 I32 I64 F32. */
19299 nUF(vaddq, _vadd, 3, (RNQ, oRNQ, RNQ), neon_addsub_if_i),
19300 nUF(vsubq, _vsub, 3, (RNQ, oRNQ, RNQ), neon_addsub_if_i),
19301 /* vtst takes sizes 8, 16, 32. */
19302 NUF(vtst, 0000810, 3, (RNDQ, oRNDQ, RNDQ), neon_tst),
19303 NUF(vtstq, 0000810, 3, (RNQ, oRNQ, RNQ), neon_tst),
19304 /* VMUL takes I8 I16 I32 F32 P8. */
19305 nUF(vmulq, _vmul, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_mul),
19306 /* VQD{R}MULH takes S16 S32. */
19307 nUF(vqdmulh, _vqdmulh, 3, (RNDQ, oRNDQ, RNDQ_RNSC), neon_qdmulh),
19308 nUF(vqdmulhq, _vqdmulh, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_qdmulh),
19309 nUF(vqrdmulh, _vqrdmulh, 3, (RNDQ, oRNDQ, RNDQ_RNSC), neon_qdmulh),
19310 nUF(vqrdmulhq, _vqrdmulh, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_qdmulh),
19311 NUF(vacge, 0000e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute),
19312 NUF(vacgeq, 0000e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute),
19313 NUF(vacgt, 0200e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute),
19314 NUF(vacgtq, 0200e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute),
19315 NUF(vaclt, 0200e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute_inv),
19316 NUF(vacltq, 0200e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute_inv),
19317 NUF(vacle, 0000e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute_inv),
19318 NUF(vacleq, 0000e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute_inv),
19319 NUF(vrecps, 0000f10, 3, (RNDQ, oRNDQ, RNDQ), neon_step),
19320 NUF(vrecpsq, 0000f10, 3, (RNQ, oRNQ, RNQ), neon_step),
19321 NUF(vrsqrts, 0200f10, 3, (RNDQ, oRNDQ, RNDQ), neon_step),
19322 NUF(vrsqrtsq, 0200f10, 3, (RNQ, oRNQ, RNQ), neon_step),
19324 /* Two address, int/float. Types S8 S16 S32 F32. */
19325 NUF(vabsq, 1b10300, 2, (RNQ, RNQ), neon_abs_neg),
19326 NUF(vnegq, 1b10380, 2, (RNQ, RNQ), neon_abs_neg),
19328 /* Data processing with two registers and a shift amount. */
19329 /* Right shifts, and variants with rounding.
19330 Types accepted S8 S16 S32 S64 U8 U16 U32 U64. */
19331 NUF(vshr, 0800010, 3, (RNDQ, oRNDQ, I64z), neon_rshift_round_imm),
19332 NUF(vshrq, 0800010, 3, (RNQ, oRNQ, I64z), neon_rshift_round_imm),
19333 NUF(vrshr, 0800210, 3, (RNDQ, oRNDQ, I64z), neon_rshift_round_imm),
19334 NUF(vrshrq, 0800210, 3, (RNQ, oRNQ, I64z), neon_rshift_round_imm),
19335 NUF(vsra, 0800110, 3, (RNDQ, oRNDQ, I64), neon_rshift_round_imm),
19336 NUF(vsraq, 0800110, 3, (RNQ, oRNQ, I64), neon_rshift_round_imm),
19337 NUF(vrsra, 0800310, 3, (RNDQ, oRNDQ, I64), neon_rshift_round_imm),
19338 NUF(vrsraq, 0800310, 3, (RNQ, oRNQ, I64), neon_rshift_round_imm),
19339 /* Shift and insert. Sizes accepted 8 16 32 64. */
19340 NUF(vsli, 1800510, 3, (RNDQ, oRNDQ, I63), neon_sli),
19341 NUF(vsliq, 1800510, 3, (RNQ, oRNQ, I63), neon_sli),
19342 NUF(vsri, 1800410, 3, (RNDQ, oRNDQ, I64), neon_sri),
19343 NUF(vsriq, 1800410, 3, (RNQ, oRNQ, I64), neon_sri),
19344 /* QSHL{U} immediate accepts S8 S16 S32 S64 U8 U16 U32 U64. */
19345 NUF(vqshlu, 1800610, 3, (RNDQ, oRNDQ, I63), neon_qshlu_imm),
19346 NUF(vqshluq, 1800610, 3, (RNQ, oRNQ, I63), neon_qshlu_imm),
19347 /* Right shift immediate, saturating & narrowing, with rounding variants.
19348 Types accepted S16 S32 S64 U16 U32 U64. */
19349 NUF(vqshrn, 0800910, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow),
19350 NUF(vqrshrn, 0800950, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow),
19351 /* As above, unsigned. Types accepted S16 S32 S64. */
19352 NUF(vqshrun, 0800810, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow_u),
19353 NUF(vqrshrun, 0800850, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow_u),
19354 /* Right shift narrowing. Types accepted I16 I32 I64. */
19355 NUF(vshrn, 0800810, 3, (RND, RNQ, I32z), neon_rshift_narrow),
19356 NUF(vrshrn, 0800850, 3, (RND, RNQ, I32z), neon_rshift_narrow),
19357 /* Special case. Types S8 S16 S32 U8 U16 U32. Handles max shift variant. */
19358 nUF(vshll, _vshll, 3, (RNQ, RND, I32), neon_shll),
19359 /* CVT with optional immediate for fixed-point variant. */
19360 nUF(vcvtq, _vcvt, 3, (RNQ, RNQ, oI32b), neon_cvt),
19362 nUF(vmvn, _vmvn, 2, (RNDQ, RNDQ_Ibig), neon_mvn),
19363 nUF(vmvnq, _vmvn, 2, (RNQ, RNDQ_Ibig), neon_mvn),
19365 /* Data processing, three registers of different lengths. */
19366 /* Dyadic, long insns. Types S8 S16 S32 U8 U16 U32. */
19367 NUF(vabal, 0800500, 3, (RNQ, RND, RND), neon_abal),
19368 NUF(vabdl, 0800700, 3, (RNQ, RND, RND), neon_dyadic_long),
19369 NUF(vaddl, 0800000, 3, (RNQ, RND, RND), neon_dyadic_long),
19370 NUF(vsubl, 0800200, 3, (RNQ, RND, RND), neon_dyadic_long),
19371 /* If not scalar, fall back to neon_dyadic_long.
19372 Vector types as above, scalar types S16 S32 U16 U32. */
19373 nUF(vmlal, _vmlal, 3, (RNQ, RND, RND_RNSC), neon_mac_maybe_scalar_long),
19374 nUF(vmlsl, _vmlsl, 3, (RNQ, RND, RND_RNSC), neon_mac_maybe_scalar_long),
19375 /* Dyadic, widening insns. Types S8 S16 S32 U8 U16 U32. */
19376 NUF(vaddw, 0800100, 3, (RNQ, oRNQ, RND), neon_dyadic_wide),
19377 NUF(vsubw, 0800300, 3, (RNQ, oRNQ, RND), neon_dyadic_wide),
19378 /* Dyadic, narrowing insns. Types I16 I32 I64. */
19379 NUF(vaddhn, 0800400, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
19380 NUF(vraddhn, 1800400, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
19381 NUF(vsubhn, 0800600, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
19382 NUF(vrsubhn, 1800600, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
19383 /* Saturating doubling multiplies. Types S16 S32. */
19384 nUF(vqdmlal, _vqdmlal, 3, (RNQ, RND, RND_RNSC), neon_mul_sat_scalar_long),
19385 nUF(vqdmlsl, _vqdmlsl, 3, (RNQ, RND, RND_RNSC), neon_mul_sat_scalar_long),
19386 nUF(vqdmull, _vqdmull, 3, (RNQ, RND, RND_RNSC), neon_mul_sat_scalar_long),
19387 /* VMULL. Vector types S8 S16 S32 U8 U16 U32 P8, scalar types
19388 S16 S32 U16 U32. */
19389 nUF(vmull, _vmull, 3, (RNQ, RND, RND_RNSC), neon_vmull),
19391 /* Extract. Size 8. */
19392 NUF(vext, 0b00000, 4, (RNDQ, oRNDQ, RNDQ, I15), neon_ext),
19393 NUF(vextq, 0b00000, 4, (RNQ, oRNQ, RNQ, I15), neon_ext),
19395 /* Two registers, miscellaneous. */
19396 /* Reverse. Sizes 8 16 32 (must be < size in opcode). */
19397 NUF(vrev64, 1b00000, 2, (RNDQ, RNDQ), neon_rev),
19398 NUF(vrev64q, 1b00000, 2, (RNQ, RNQ), neon_rev),
19399 NUF(vrev32, 1b00080, 2, (RNDQ, RNDQ), neon_rev),
19400 NUF(vrev32q, 1b00080, 2, (RNQ, RNQ), neon_rev),
19401 NUF(vrev16, 1b00100, 2, (RNDQ, RNDQ), neon_rev),
19402 NUF(vrev16q, 1b00100, 2, (RNQ, RNQ), neon_rev),
19403 /* Vector replicate. Sizes 8 16 32. */
19404 nCE(vdup, _vdup, 2, (RNDQ, RR_RNSC), neon_dup),
19405 nCE(vdupq, _vdup, 2, (RNQ, RR_RNSC), neon_dup),
19406 /* VMOVL. Types S8 S16 S32 U8 U16 U32. */
19407 NUF(vmovl, 0800a10, 2, (RNQ, RND), neon_movl),
19408 /* VMOVN. Types I16 I32 I64. */
19409 nUF(vmovn, _vmovn, 2, (RND, RNQ), neon_movn),
19410 /* VQMOVN. Types S16 S32 S64 U16 U32 U64. */
19411 nUF(vqmovn, _vqmovn, 2, (RND, RNQ), neon_qmovn),
19412 /* VQMOVUN. Types S16 S32 S64. */
19413 nUF(vqmovun, _vqmovun, 2, (RND, RNQ), neon_qmovun),
19414 /* VZIP / VUZP. Sizes 8 16 32. */
19415 NUF(vzip, 1b20180, 2, (RNDQ, RNDQ), neon_zip_uzp),
19416 NUF(vzipq, 1b20180, 2, (RNQ, RNQ), neon_zip_uzp),
19417 NUF(vuzp, 1b20100, 2, (RNDQ, RNDQ), neon_zip_uzp),
19418 NUF(vuzpq, 1b20100, 2, (RNQ, RNQ), neon_zip_uzp),
19419 /* VQABS / VQNEG. Types S8 S16 S32. */
19420 NUF(vqabs, 1b00700, 2, (RNDQ, RNDQ), neon_sat_abs_neg),
19421 NUF(vqabsq, 1b00700, 2, (RNQ, RNQ), neon_sat_abs_neg),
19422 NUF(vqneg, 1b00780, 2, (RNDQ, RNDQ), neon_sat_abs_neg),
19423 NUF(vqnegq, 1b00780, 2, (RNQ, RNQ), neon_sat_abs_neg),
19424 /* Pairwise, lengthening. Types S8 S16 S32 U8 U16 U32. */
19425 NUF(vpadal, 1b00600, 2, (RNDQ, RNDQ), neon_pair_long),
19426 NUF(vpadalq, 1b00600, 2, (RNQ, RNQ), neon_pair_long),
19427 NUF(vpaddl, 1b00200, 2, (RNDQ, RNDQ), neon_pair_long),
19428 NUF(vpaddlq, 1b00200, 2, (RNQ, RNQ), neon_pair_long),
19429 /* Reciprocal estimates. Types U32 F32. */
19430 NUF(vrecpe, 1b30400, 2, (RNDQ, RNDQ), neon_recip_est),
19431 NUF(vrecpeq, 1b30400, 2, (RNQ, RNQ), neon_recip_est),
19432 NUF(vrsqrte, 1b30480, 2, (RNDQ, RNDQ), neon_recip_est),
19433 NUF(vrsqrteq, 1b30480, 2, (RNQ, RNQ), neon_recip_est),
19434 /* VCLS. Types S8 S16 S32. */
19435 NUF(vcls, 1b00400, 2, (RNDQ, RNDQ), neon_cls),
19436 NUF(vclsq, 1b00400, 2, (RNQ, RNQ), neon_cls),
19437 /* VCLZ. Types I8 I16 I32. */
19438 NUF(vclz, 1b00480, 2, (RNDQ, RNDQ), neon_clz),
19439 NUF(vclzq, 1b00480, 2, (RNQ, RNQ), neon_clz),
19440 /* VCNT. Size 8. */
19441 NUF(vcnt, 1b00500, 2, (RNDQ, RNDQ), neon_cnt),
19442 NUF(vcntq, 1b00500, 2, (RNQ, RNQ), neon_cnt),
19443 /* Two address, untyped. */
19444 NUF(vswp, 1b20000, 2, (RNDQ, RNDQ), neon_swp),
19445 NUF(vswpq, 1b20000, 2, (RNQ, RNQ), neon_swp),
19446 /* VTRN. Sizes 8 16 32. */
19447 nUF(vtrn, _vtrn, 2, (RNDQ, RNDQ), neon_trn),
19448 nUF(vtrnq, _vtrn, 2, (RNQ, RNQ), neon_trn),
19450 /* Table lookup. Size 8. */
19451 NUF(vtbl, 1b00800, 3, (RND, NRDLST, RND), neon_tbl_tbx),
19452 NUF(vtbx, 1b00840, 3, (RND, NRDLST, RND), neon_tbl_tbx),
19454 #undef THUMB_VARIANT
19455 #define THUMB_VARIANT & fpu_vfp_v3_or_neon_ext
19457 #define ARM_VARIANT & fpu_vfp_v3_or_neon_ext
19459 /* Neon element/structure load/store. */
19460 nUF(vld1, _vld1, 2, (NSTRLST, ADDR), neon_ldx_stx),
19461 nUF(vst1, _vst1, 2, (NSTRLST, ADDR), neon_ldx_stx),
19462 nUF(vld2, _vld2, 2, (NSTRLST, ADDR), neon_ldx_stx),
19463 nUF(vst2, _vst2, 2, (NSTRLST, ADDR), neon_ldx_stx),
19464 nUF(vld3, _vld3, 2, (NSTRLST, ADDR), neon_ldx_stx),
19465 nUF(vst3, _vst3, 2, (NSTRLST, ADDR), neon_ldx_stx),
19466 nUF(vld4, _vld4, 2, (NSTRLST, ADDR), neon_ldx_stx),
19467 nUF(vst4, _vst4, 2, (NSTRLST, ADDR), neon_ldx_stx),
19469 #undef THUMB_VARIANT
19470 #define THUMB_VARIANT &fpu_vfp_ext_v3xd
19472 #define ARM_VARIANT &fpu_vfp_ext_v3xd
19473 cCE("fconsts", eb00a00, 2, (RVS, I255), vfp_sp_const),
19474 cCE("fshtos", eba0a40, 2, (RVS, I16z), vfp_sp_conv_16),
19475 cCE("fsltos", eba0ac0, 2, (RVS, I32), vfp_sp_conv_32),
19476 cCE("fuhtos", ebb0a40, 2, (RVS, I16z), vfp_sp_conv_16),
19477 cCE("fultos", ebb0ac0, 2, (RVS, I32), vfp_sp_conv_32),
19478 cCE("ftoshs", ebe0a40, 2, (RVS, I16z), vfp_sp_conv_16),
19479 cCE("ftosls", ebe0ac0, 2, (RVS, I32), vfp_sp_conv_32),
19480 cCE("ftouhs", ebf0a40, 2, (RVS, I16z), vfp_sp_conv_16),
19481 cCE("ftouls", ebf0ac0, 2, (RVS, I32), vfp_sp_conv_32),
19483 #undef THUMB_VARIANT
19484 #define THUMB_VARIANT & fpu_vfp_ext_v3
19486 #define ARM_VARIANT & fpu_vfp_ext_v3
19488 cCE("fconstd", eb00b00, 2, (RVD, I255), vfp_dp_const),
19489 cCE("fshtod", eba0b40, 2, (RVD, I16z), vfp_dp_conv_16),
19490 cCE("fsltod", eba0bc0, 2, (RVD, I32), vfp_dp_conv_32),
19491 cCE("fuhtod", ebb0b40, 2, (RVD, I16z), vfp_dp_conv_16),
19492 cCE("fultod", ebb0bc0, 2, (RVD, I32), vfp_dp_conv_32),
19493 cCE("ftoshd", ebe0b40, 2, (RVD, I16z), vfp_dp_conv_16),
19494 cCE("ftosld", ebe0bc0, 2, (RVD, I32), vfp_dp_conv_32),
19495 cCE("ftouhd", ebf0b40, 2, (RVD, I16z), vfp_dp_conv_16),
19496 cCE("ftould", ebf0bc0, 2, (RVD, I32), vfp_dp_conv_32),
19499 #define ARM_VARIANT &fpu_vfp_ext_fma
19500 #undef THUMB_VARIANT
19501 #define THUMB_VARIANT &fpu_vfp_ext_fma
19502 /* Mnemonics shared by Neon and VFP. These are included in the
19503 VFP FMA variant; NEON and VFP FMA always includes the NEON
19504 FMA instructions. */
19505 nCEF(vfma, _vfma, 3, (RNSDQ, oRNSDQ, RNSDQ), neon_fmac),
19506 nCEF(vfms, _vfms, 3, (RNSDQ, oRNSDQ, RNSDQ), neon_fmac),
19507 /* ffmas/ffmad/ffmss/ffmsd are dummy mnemonics to satisfy gas;
19508 the v form should always be used. */
19509 cCE("ffmas", ea00a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
19510 cCE("ffnmas", ea00a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
19511 cCE("ffmad", ea00b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
19512 cCE("ffnmad", ea00b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
19513 nCE(vfnma, _vfnma, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
19514 nCE(vfnms, _vfnms, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
19516 #undef THUMB_VARIANT
19518 #define ARM_VARIANT & arm_cext_xscale /* Intel XScale extensions. */
19520 cCE("mia", e200010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
19521 cCE("miaph", e280010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
19522 cCE("miabb", e2c0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
19523 cCE("miabt", e2d0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
19524 cCE("miatb", e2e0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
19525 cCE("miatt", e2f0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
19526 cCE("mar", c400000, 3, (RXA, RRnpc, RRnpc), xsc_mar),
19527 cCE("mra", c500000, 3, (RRnpc, RRnpc, RXA), xsc_mra),
19530 #define ARM_VARIANT & arm_cext_iwmmxt /* Intel Wireless MMX technology. */
19532 cCE("tandcb", e13f130, 1, (RR), iwmmxt_tandorc),
19533 cCE("tandch", e53f130, 1, (RR), iwmmxt_tandorc),
19534 cCE("tandcw", e93f130, 1, (RR), iwmmxt_tandorc),
19535 cCE("tbcstb", e400010, 2, (RIWR, RR), rn_rd),
19536 cCE("tbcsth", e400050, 2, (RIWR, RR), rn_rd),
19537 cCE("tbcstw", e400090, 2, (RIWR, RR), rn_rd),
19538 cCE("textrcb", e130170, 2, (RR, I7), iwmmxt_textrc),
19539 cCE("textrch", e530170, 2, (RR, I7), iwmmxt_textrc),
19540 cCE("textrcw", e930170, 2, (RR, I7), iwmmxt_textrc),
19541 cCE("textrmub", e100070, 3, (RR, RIWR, I7), iwmmxt_textrm),
19542 cCE("textrmuh", e500070, 3, (RR, RIWR, I7), iwmmxt_textrm),
19543 cCE("textrmuw", e900070, 3, (RR, RIWR, I7), iwmmxt_textrm),
19544 cCE("textrmsb", e100078, 3, (RR, RIWR, I7), iwmmxt_textrm),
19545 cCE("textrmsh", e500078, 3, (RR, RIWR, I7), iwmmxt_textrm),
19546 cCE("textrmsw", e900078, 3, (RR, RIWR, I7), iwmmxt_textrm),
19547 cCE("tinsrb", e600010, 3, (RIWR, RR, I7), iwmmxt_tinsr),
19548 cCE("tinsrh", e600050, 3, (RIWR, RR, I7), iwmmxt_tinsr),
19549 cCE("tinsrw", e600090, 3, (RIWR, RR, I7), iwmmxt_tinsr),
19550 cCE("tmcr", e000110, 2, (RIWC_RIWG, RR), rn_rd),
19551 cCE("tmcrr", c400000, 3, (RIWR, RR, RR), rm_rd_rn),
19552 cCE("tmia", e200010, 3, (RIWR, RR, RR), iwmmxt_tmia),
19553 cCE("tmiaph", e280010, 3, (RIWR, RR, RR), iwmmxt_tmia),
19554 cCE("tmiabb", e2c0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
19555 cCE("tmiabt", e2d0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
19556 cCE("tmiatb", e2e0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
19557 cCE("tmiatt", e2f0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
19558 cCE("tmovmskb", e100030, 2, (RR, RIWR), rd_rn),
19559 cCE("tmovmskh", e500030, 2, (RR, RIWR), rd_rn),
19560 cCE("tmovmskw", e900030, 2, (RR, RIWR), rd_rn),
19561 cCE("tmrc", e100110, 2, (RR, RIWC_RIWG), rd_rn),
19562 cCE("tmrrc", c500000, 3, (RR, RR, RIWR), rd_rn_rm),
19563 cCE("torcb", e13f150, 1, (RR), iwmmxt_tandorc),
19564 cCE("torch", e53f150, 1, (RR), iwmmxt_tandorc),
19565 cCE("torcw", e93f150, 1, (RR), iwmmxt_tandorc),
19566 cCE("waccb", e0001c0, 2, (RIWR, RIWR), rd_rn),
19567 cCE("wacch", e4001c0, 2, (RIWR, RIWR), rd_rn),
19568 cCE("waccw", e8001c0, 2, (RIWR, RIWR), rd_rn),
19569 cCE("waddbss", e300180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19570 cCE("waddb", e000180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19571 cCE("waddbus", e100180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19572 cCE("waddhss", e700180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19573 cCE("waddh", e400180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19574 cCE("waddhus", e500180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19575 cCE("waddwss", eb00180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19576 cCE("waddw", e800180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19577 cCE("waddwus", e900180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19578 cCE("waligni", e000020, 4, (RIWR, RIWR, RIWR, I7), iwmmxt_waligni),
19579 cCE("walignr0", e800020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19580 cCE("walignr1", e900020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19581 cCE("walignr2", ea00020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19582 cCE("walignr3", eb00020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19583 cCE("wand", e200000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19584 cCE("wandn", e300000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19585 cCE("wavg2b", e800000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19586 cCE("wavg2br", e900000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19587 cCE("wavg2h", ec00000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19588 cCE("wavg2hr", ed00000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19589 cCE("wcmpeqb", e000060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19590 cCE("wcmpeqh", e400060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19591 cCE("wcmpeqw", e800060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19592 cCE("wcmpgtub", e100060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19593 cCE("wcmpgtuh", e500060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19594 cCE("wcmpgtuw", e900060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19595 cCE("wcmpgtsb", e300060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19596 cCE("wcmpgtsh", e700060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19597 cCE("wcmpgtsw", eb00060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19598 cCE("wldrb", c100000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
19599 cCE("wldrh", c500000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
19600 cCE("wldrw", c100100, 2, (RIWR_RIWC, ADDR), iwmmxt_wldstw),
19601 cCE("wldrd", c500100, 2, (RIWR, ADDR), iwmmxt_wldstd),
19602 cCE("wmacs", e600100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19603 cCE("wmacsz", e700100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19604 cCE("wmacu", e400100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19605 cCE("wmacuz", e500100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19606 cCE("wmadds", ea00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19607 cCE("wmaddu", e800100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19608 cCE("wmaxsb", e200160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19609 cCE("wmaxsh", e600160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19610 cCE("wmaxsw", ea00160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19611 cCE("wmaxub", e000160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19612 cCE("wmaxuh", e400160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19613 cCE("wmaxuw", e800160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19614 cCE("wminsb", e300160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19615 cCE("wminsh", e700160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19616 cCE("wminsw", eb00160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19617 cCE("wminub", e100160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19618 cCE("wminuh", e500160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19619 cCE("wminuw", e900160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19620 cCE("wmov", e000000, 2, (RIWR, RIWR), iwmmxt_wmov),
19621 cCE("wmulsm", e300100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19622 cCE("wmulsl", e200100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19623 cCE("wmulum", e100100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19624 cCE("wmulul", e000100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19625 cCE("wor", e000000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19626 cCE("wpackhss", e700080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19627 cCE("wpackhus", e500080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19628 cCE("wpackwss", eb00080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19629 cCE("wpackwus", e900080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19630 cCE("wpackdss", ef00080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19631 cCE("wpackdus", ed00080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19632 cCE("wrorh", e700040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
19633 cCE("wrorhg", e700148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
19634 cCE("wrorw", eb00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
19635 cCE("wrorwg", eb00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
19636 cCE("wrord", ef00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
19637 cCE("wrordg", ef00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
19638 cCE("wsadb", e000120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19639 cCE("wsadbz", e100120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19640 cCE("wsadh", e400120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19641 cCE("wsadhz", e500120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19642 cCE("wshufh", e0001e0, 3, (RIWR, RIWR, I255), iwmmxt_wshufh),
19643 cCE("wsllh", e500040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
19644 cCE("wsllhg", e500148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
19645 cCE("wsllw", e900040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
19646 cCE("wsllwg", e900148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
19647 cCE("wslld", ed00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
19648 cCE("wslldg", ed00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
19649 cCE("wsrah", e400040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
19650 cCE("wsrahg", e400148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
19651 cCE("wsraw", e800040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
19652 cCE("wsrawg", e800148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
19653 cCE("wsrad", ec00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
19654 cCE("wsradg", ec00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
19655 cCE("wsrlh", e600040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
19656 cCE("wsrlhg", e600148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
19657 cCE("wsrlw", ea00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
19658 cCE("wsrlwg", ea00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
19659 cCE("wsrld", ee00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
19660 cCE("wsrldg", ee00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
19661 cCE("wstrb", c000000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
19662 cCE("wstrh", c400000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
19663 cCE("wstrw", c000100, 2, (RIWR_RIWC, ADDR), iwmmxt_wldstw),
19664 cCE("wstrd", c400100, 2, (RIWR, ADDR), iwmmxt_wldstd),
19665 cCE("wsubbss", e3001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19666 cCE("wsubb", e0001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19667 cCE("wsubbus", e1001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19668 cCE("wsubhss", e7001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19669 cCE("wsubh", e4001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19670 cCE("wsubhus", e5001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19671 cCE("wsubwss", eb001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19672 cCE("wsubw", e8001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19673 cCE("wsubwus", e9001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19674 cCE("wunpckehub",e0000c0, 2, (RIWR, RIWR), rd_rn),
19675 cCE("wunpckehuh",e4000c0, 2, (RIWR, RIWR), rd_rn),
19676 cCE("wunpckehuw",e8000c0, 2, (RIWR, RIWR), rd_rn),
19677 cCE("wunpckehsb",e2000c0, 2, (RIWR, RIWR), rd_rn),
19678 cCE("wunpckehsh",e6000c0, 2, (RIWR, RIWR), rd_rn),
19679 cCE("wunpckehsw",ea000c0, 2, (RIWR, RIWR), rd_rn),
19680 cCE("wunpckihb", e1000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19681 cCE("wunpckihh", e5000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19682 cCE("wunpckihw", e9000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19683 cCE("wunpckelub",e0000e0, 2, (RIWR, RIWR), rd_rn),
19684 cCE("wunpckeluh",e4000e0, 2, (RIWR, RIWR), rd_rn),
19685 cCE("wunpckeluw",e8000e0, 2, (RIWR, RIWR), rd_rn),
19686 cCE("wunpckelsb",e2000e0, 2, (RIWR, RIWR), rd_rn),
19687 cCE("wunpckelsh",e6000e0, 2, (RIWR, RIWR), rd_rn),
19688 cCE("wunpckelsw",ea000e0, 2, (RIWR, RIWR), rd_rn),
19689 cCE("wunpckilb", e1000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19690 cCE("wunpckilh", e5000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19691 cCE("wunpckilw", e9000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19692 cCE("wxor", e100000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19693 cCE("wzero", e300000, 1, (RIWR), iwmmxt_wzero),
19696 #define ARM_VARIANT & arm_cext_iwmmxt2 /* Intel Wireless MMX technology, version 2. */
19698 cCE("torvscb", e12f190, 1, (RR), iwmmxt_tandorc),
19699 cCE("torvsch", e52f190, 1, (RR), iwmmxt_tandorc),
19700 cCE("torvscw", e92f190, 1, (RR), iwmmxt_tandorc),
19701 cCE("wabsb", e2001c0, 2, (RIWR, RIWR), rd_rn),
19702 cCE("wabsh", e6001c0, 2, (RIWR, RIWR), rd_rn),
19703 cCE("wabsw", ea001c0, 2, (RIWR, RIWR), rd_rn),
19704 cCE("wabsdiffb", e1001c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19705 cCE("wabsdiffh", e5001c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19706 cCE("wabsdiffw", e9001c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19707 cCE("waddbhusl", e2001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19708 cCE("waddbhusm", e6001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19709 cCE("waddhc", e600180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19710 cCE("waddwc", ea00180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19711 cCE("waddsubhx", ea001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19712 cCE("wavg4", e400000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19713 cCE("wavg4r", e500000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19714 cCE("wmaddsn", ee00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19715 cCE("wmaddsx", eb00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19716 cCE("wmaddun", ec00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19717 cCE("wmaddux", e900100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19718 cCE("wmerge", e000080, 4, (RIWR, RIWR, RIWR, I7), iwmmxt_wmerge),
19719 cCE("wmiabb", e0000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19720 cCE("wmiabt", e1000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19721 cCE("wmiatb", e2000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19722 cCE("wmiatt", e3000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19723 cCE("wmiabbn", e4000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19724 cCE("wmiabtn", e5000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19725 cCE("wmiatbn", e6000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19726 cCE("wmiattn", e7000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19727 cCE("wmiawbb", e800120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19728 cCE("wmiawbt", e900120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19729 cCE("wmiawtb", ea00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19730 cCE("wmiawtt", eb00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19731 cCE("wmiawbbn", ec00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19732 cCE("wmiawbtn", ed00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19733 cCE("wmiawtbn", ee00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19734 cCE("wmiawttn", ef00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19735 cCE("wmulsmr", ef00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19736 cCE("wmulumr", ed00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19737 cCE("wmulwumr", ec000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19738 cCE("wmulwsmr", ee000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19739 cCE("wmulwum", ed000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19740 cCE("wmulwsm", ef000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19741 cCE("wmulwl", eb000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19742 cCE("wqmiabb", e8000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19743 cCE("wqmiabt", e9000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19744 cCE("wqmiatb", ea000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19745 cCE("wqmiatt", eb000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19746 cCE("wqmiabbn", ec000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19747 cCE("wqmiabtn", ed000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19748 cCE("wqmiatbn", ee000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19749 cCE("wqmiattn", ef000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19750 cCE("wqmulm", e100080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19751 cCE("wqmulmr", e300080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19752 cCE("wqmulwm", ec000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19753 cCE("wqmulwmr", ee000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19754 cCE("wsubaddhx", ed001c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19757 #define ARM_VARIANT & arm_cext_maverick /* Cirrus Maverick instructions. */
19759 cCE("cfldrs", c100400, 2, (RMF, ADDRGLDC), rd_cpaddr),
19760 cCE("cfldrd", c500400, 2, (RMD, ADDRGLDC), rd_cpaddr),
19761 cCE("cfldr32", c100500, 2, (RMFX, ADDRGLDC), rd_cpaddr),
19762 cCE("cfldr64", c500500, 2, (RMDX, ADDRGLDC), rd_cpaddr),
19763 cCE("cfstrs", c000400, 2, (RMF, ADDRGLDC), rd_cpaddr),
19764 cCE("cfstrd", c400400, 2, (RMD, ADDRGLDC), rd_cpaddr),
19765 cCE("cfstr32", c000500, 2, (RMFX, ADDRGLDC), rd_cpaddr),
19766 cCE("cfstr64", c400500, 2, (RMDX, ADDRGLDC), rd_cpaddr),
19767 cCE("cfmvsr", e000450, 2, (RMF, RR), rn_rd),
19768 cCE("cfmvrs", e100450, 2, (RR, RMF), rd_rn),
19769 cCE("cfmvdlr", e000410, 2, (RMD, RR), rn_rd),
19770 cCE("cfmvrdl", e100410, 2, (RR, RMD), rd_rn),
19771 cCE("cfmvdhr", e000430, 2, (RMD, RR), rn_rd),
19772 cCE("cfmvrdh", e100430, 2, (RR, RMD), rd_rn),
19773 cCE("cfmv64lr", e000510, 2, (RMDX, RR), rn_rd),
19774 cCE("cfmvr64l", e100510, 2, (RR, RMDX), rd_rn),
19775 cCE("cfmv64hr", e000530, 2, (RMDX, RR), rn_rd),
19776 cCE("cfmvr64h", e100530, 2, (RR, RMDX), rd_rn),
19777 cCE("cfmval32", e200440, 2, (RMAX, RMFX), rd_rn),
19778 cCE("cfmv32al", e100440, 2, (RMFX, RMAX), rd_rn),
19779 cCE("cfmvam32", e200460, 2, (RMAX, RMFX), rd_rn),
19780 cCE("cfmv32am", e100460, 2, (RMFX, RMAX), rd_rn),
19781 cCE("cfmvah32", e200480, 2, (RMAX, RMFX), rd_rn),
19782 cCE("cfmv32ah", e100480, 2, (RMFX, RMAX), rd_rn),
19783 cCE("cfmva32", e2004a0, 2, (RMAX, RMFX), rd_rn),
19784 cCE("cfmv32a", e1004a0, 2, (RMFX, RMAX), rd_rn),
19785 cCE("cfmva64", e2004c0, 2, (RMAX, RMDX), rd_rn),
19786 cCE("cfmv64a", e1004c0, 2, (RMDX, RMAX), rd_rn),
19787 cCE("cfmvsc32", e2004e0, 2, (RMDS, RMDX), mav_dspsc),
19788 cCE("cfmv32sc", e1004e0, 2, (RMDX, RMDS), rd),
19789 cCE("cfcpys", e000400, 2, (RMF, RMF), rd_rn),
19790 cCE("cfcpyd", e000420, 2, (RMD, RMD), rd_rn),
19791 cCE("cfcvtsd", e000460, 2, (RMD, RMF), rd_rn),
19792 cCE("cfcvtds", e000440, 2, (RMF, RMD), rd_rn),
19793 cCE("cfcvt32s", e000480, 2, (RMF, RMFX), rd_rn),
19794 cCE("cfcvt32d", e0004a0, 2, (RMD, RMFX), rd_rn),
19795 cCE("cfcvt64s", e0004c0, 2, (RMF, RMDX), rd_rn),
19796 cCE("cfcvt64d", e0004e0, 2, (RMD, RMDX), rd_rn),
19797 cCE("cfcvts32", e100580, 2, (RMFX, RMF), rd_rn),
19798 cCE("cfcvtd32", e1005a0, 2, (RMFX, RMD), rd_rn),
19799 cCE("cftruncs32",e1005c0, 2, (RMFX, RMF), rd_rn),
19800 cCE("cftruncd32",e1005e0, 2, (RMFX, RMD), rd_rn),
19801 cCE("cfrshl32", e000550, 3, (RMFX, RMFX, RR), mav_triple),
19802 cCE("cfrshl64", e000570, 3, (RMDX, RMDX, RR), mav_triple),
19803 cCE("cfsh32", e000500, 3, (RMFX, RMFX, I63s), mav_shift),
19804 cCE("cfsh64", e200500, 3, (RMDX, RMDX, I63s), mav_shift),
19805 cCE("cfcmps", e100490, 3, (RR, RMF, RMF), rd_rn_rm),
19806 cCE("cfcmpd", e1004b0, 3, (RR, RMD, RMD), rd_rn_rm),
19807 cCE("cfcmp32", e100590, 3, (RR, RMFX, RMFX), rd_rn_rm),
19808 cCE("cfcmp64", e1005b0, 3, (RR, RMDX, RMDX), rd_rn_rm),
19809 cCE("cfabss", e300400, 2, (RMF, RMF), rd_rn),
19810 cCE("cfabsd", e300420, 2, (RMD, RMD), rd_rn),
19811 cCE("cfnegs", e300440, 2, (RMF, RMF), rd_rn),
19812 cCE("cfnegd", e300460, 2, (RMD, RMD), rd_rn),
19813 cCE("cfadds", e300480, 3, (RMF, RMF, RMF), rd_rn_rm),
19814 cCE("cfaddd", e3004a0, 3, (RMD, RMD, RMD), rd_rn_rm),
19815 cCE("cfsubs", e3004c0, 3, (RMF, RMF, RMF), rd_rn_rm),
19816 cCE("cfsubd", e3004e0, 3, (RMD, RMD, RMD), rd_rn_rm),
19817 cCE("cfmuls", e100400, 3, (RMF, RMF, RMF), rd_rn_rm),
19818 cCE("cfmuld", e100420, 3, (RMD, RMD, RMD), rd_rn_rm),
19819 cCE("cfabs32", e300500, 2, (RMFX, RMFX), rd_rn),
19820 cCE("cfabs64", e300520, 2, (RMDX, RMDX), rd_rn),
19821 cCE("cfneg32", e300540, 2, (RMFX, RMFX), rd_rn),
19822 cCE("cfneg64", e300560, 2, (RMDX, RMDX), rd_rn),
19823 cCE("cfadd32", e300580, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
19824 cCE("cfadd64", e3005a0, 3, (RMDX, RMDX, RMDX), rd_rn_rm),
19825 cCE("cfsub32", e3005c0, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
19826 cCE("cfsub64", e3005e0, 3, (RMDX, RMDX, RMDX), rd_rn_rm),
19827 cCE("cfmul32", e100500, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
19828 cCE("cfmul64", e100520, 3, (RMDX, RMDX, RMDX), rd_rn_rm),
19829 cCE("cfmac32", e100540, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
19830 cCE("cfmsc32", e100560, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
19831 cCE("cfmadd32", e000600, 4, (RMAX, RMFX, RMFX, RMFX), mav_quad),
19832 cCE("cfmsub32", e100600, 4, (RMAX, RMFX, RMFX, RMFX), mav_quad),
19833 cCE("cfmadda32", e200600, 4, (RMAX, RMAX, RMFX, RMFX), mav_quad),
19834 cCE("cfmsuba32", e300600, 4, (RMAX, RMAX, RMFX, RMFX), mav_quad),
19837 #undef THUMB_VARIANT
19863 /* MD interface: bits in the object file. */
19865 /* Turn an integer of n bytes (in val) into a stream of bytes appropriate
19866 for use in the a.out file, and stores them in the array pointed to by buf.
19867 This knows about the endian-ness of the target machine and does
19868 THE RIGHT THING, whatever it is. Possible values for n are 1 (byte)
19869 2 (short) and 4 (long) Floating numbers are put out as a series of
19870 LITTLENUMS (shorts, here at least). */
19873 md_number_to_chars (char * buf, valueT val, int n)
19875 if (target_big_endian)
19876 number_to_chars_bigendian (buf, val, n);
19878 number_to_chars_littleendian (buf, val, n);
19882 md_chars_to_number (char * buf, int n)
19885 unsigned char * where = (unsigned char *) buf;
19887 if (target_big_endian)
19892 result |= (*where++ & 255);
19900 result |= (where[n] & 255);
19907 /* MD interface: Sections. */
19909 /* Calculate the maximum variable size (i.e., excluding fr_fix)
19910 that an rs_machine_dependent frag may reach. */
19913 arm_frag_max_var (fragS *fragp)
19915 /* We only use rs_machine_dependent for variable-size Thumb instructions,
19916 which are either THUMB_SIZE (2) or INSN_SIZE (4).
19918 Note that we generate relaxable instructions even for cases that don't
19919 really need it, like an immediate that's a trivial constant. So we're
19920 overestimating the instruction size for some of those cases. Rather
19921 than putting more intelligence here, it would probably be better to
19922 avoid generating a relaxation frag in the first place when it can be
19923 determined up front that a short instruction will suffice. */
19925 gas_assert (fragp->fr_type == rs_machine_dependent);
19929 /* Estimate the size of a frag before relaxing. Assume everything fits in
19933 md_estimate_size_before_relax (fragS * fragp,
19934 segT segtype ATTRIBUTE_UNUSED)
19940 /* Convert a machine dependent frag. */
19943 md_convert_frag (bfd *abfd, segT asec ATTRIBUTE_UNUSED, fragS *fragp)
19945 unsigned long insn;
19946 unsigned long old_op;
19954 buf = fragp->fr_literal + fragp->fr_fix;
19956 old_op = bfd_get_16(abfd, buf);
19957 if (fragp->fr_symbol)
19959 exp.X_op = O_symbol;
19960 exp.X_add_symbol = fragp->fr_symbol;
19964 exp.X_op = O_constant;
19966 exp.X_add_number = fragp->fr_offset;
19967 opcode = fragp->fr_subtype;
19970 case T_MNEM_ldr_pc:
19971 case T_MNEM_ldr_pc2:
19972 case T_MNEM_ldr_sp:
19973 case T_MNEM_str_sp:
19980 if (fragp->fr_var == 4)
19982 insn = THUMB_OP32 (opcode);
19983 if ((old_op >> 12) == 4 || (old_op >> 12) == 9)
19985 insn |= (old_op & 0x700) << 4;
19989 insn |= (old_op & 7) << 12;
19990 insn |= (old_op & 0x38) << 13;
19992 insn |= 0x00000c00;
19993 put_thumb32_insn (buf, insn);
19994 reloc_type = BFD_RELOC_ARM_T32_OFFSET_IMM;
19998 reloc_type = BFD_RELOC_ARM_THUMB_OFFSET;
20000 pc_rel = (opcode == T_MNEM_ldr_pc2);
20003 if (fragp->fr_var == 4)
20005 insn = THUMB_OP32 (opcode);
20006 insn |= (old_op & 0xf0) << 4;
20007 put_thumb32_insn (buf, insn);
20008 reloc_type = BFD_RELOC_ARM_T32_ADD_PC12;
20012 reloc_type = BFD_RELOC_ARM_THUMB_ADD;
20013 exp.X_add_number -= 4;
20021 if (fragp->fr_var == 4)
20023 int r0off = (opcode == T_MNEM_mov
20024 || opcode == T_MNEM_movs) ? 0 : 8;
20025 insn = THUMB_OP32 (opcode);
20026 insn = (insn & 0xe1ffffff) | 0x10000000;
20027 insn |= (old_op & 0x700) << r0off;
20028 put_thumb32_insn (buf, insn);
20029 reloc_type = BFD_RELOC_ARM_T32_IMMEDIATE;
20033 reloc_type = BFD_RELOC_ARM_THUMB_IMM;
20038 if (fragp->fr_var == 4)
20040 insn = THUMB_OP32(opcode);
20041 put_thumb32_insn (buf, insn);
20042 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH25;
20045 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH12;
20049 if (fragp->fr_var == 4)
20051 insn = THUMB_OP32(opcode);
20052 insn |= (old_op & 0xf00) << 14;
20053 put_thumb32_insn (buf, insn);
20054 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH20;
20057 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH9;
20060 case T_MNEM_add_sp:
20061 case T_MNEM_add_pc:
20062 case T_MNEM_inc_sp:
20063 case T_MNEM_dec_sp:
20064 if (fragp->fr_var == 4)
20066 /* ??? Choose between add and addw. */
20067 insn = THUMB_OP32 (opcode);
20068 insn |= (old_op & 0xf0) << 4;
20069 put_thumb32_insn (buf, insn);
20070 if (opcode == T_MNEM_add_pc)
20071 reloc_type = BFD_RELOC_ARM_T32_IMM12;
20073 reloc_type = BFD_RELOC_ARM_T32_ADD_IMM;
20076 reloc_type = BFD_RELOC_ARM_THUMB_ADD;
20084 if (fragp->fr_var == 4)
20086 insn = THUMB_OP32 (opcode);
20087 insn |= (old_op & 0xf0) << 4;
20088 insn |= (old_op & 0xf) << 16;
20089 put_thumb32_insn (buf, insn);
20090 if (insn & (1 << 20))
20091 reloc_type = BFD_RELOC_ARM_T32_ADD_IMM;
20093 reloc_type = BFD_RELOC_ARM_T32_IMMEDIATE;
20096 reloc_type = BFD_RELOC_ARM_THUMB_ADD;
20102 fixp = fix_new_exp (fragp, fragp->fr_fix, fragp->fr_var, &exp, pc_rel,
20103 (enum bfd_reloc_code_real) reloc_type);
20104 fixp->fx_file = fragp->fr_file;
20105 fixp->fx_line = fragp->fr_line;
20106 fragp->fr_fix += fragp->fr_var;
20109 /* Return the size of a relaxable immediate operand instruction.
20110 SHIFT and SIZE specify the form of the allowable immediate. */
20112 relax_immediate (fragS *fragp, int size, int shift)
20118 /* ??? Should be able to do better than this. */
20119 if (fragp->fr_symbol)
20122 low = (1 << shift) - 1;
20123 mask = (1 << (shift + size)) - (1 << shift);
20124 offset = fragp->fr_offset;
20125 /* Force misaligned offsets to 32-bit variant. */
20128 if (offset & ~mask)
20133 /* Get the address of a symbol during relaxation. */
20135 relaxed_symbol_addr (fragS *fragp, long stretch)
20141 sym = fragp->fr_symbol;
20142 sym_frag = symbol_get_frag (sym);
20143 know (S_GET_SEGMENT (sym) != absolute_section
20144 || sym_frag == &zero_address_frag);
20145 addr = S_GET_VALUE (sym) + fragp->fr_offset;
20147 /* If frag has yet to be reached on this pass, assume it will
20148 move by STRETCH just as we did. If this is not so, it will
20149 be because some frag between grows, and that will force
20153 && sym_frag->relax_marker != fragp->relax_marker)
20157 /* Adjust stretch for any alignment frag. Note that if have
20158 been expanding the earlier code, the symbol may be
20159 defined in what appears to be an earlier frag. FIXME:
20160 This doesn't handle the fr_subtype field, which specifies
20161 a maximum number of bytes to skip when doing an
20163 for (f = fragp; f != NULL && f != sym_frag; f = f->fr_next)
20165 if (f->fr_type == rs_align || f->fr_type == rs_align_code)
20168 stretch = - ((- stretch)
20169 & ~ ((1 << (int) f->fr_offset) - 1));
20171 stretch &= ~ ((1 << (int) f->fr_offset) - 1);
20183 /* Return the size of a relaxable adr pseudo-instruction or PC-relative
20186 relax_adr (fragS *fragp, asection *sec, long stretch)
20191 /* Assume worst case for symbols not known to be in the same section. */
20192 if (fragp->fr_symbol == NULL
20193 || !S_IS_DEFINED (fragp->fr_symbol)
20194 || sec != S_GET_SEGMENT (fragp->fr_symbol)
20195 || S_IS_WEAK (fragp->fr_symbol))
20198 val = relaxed_symbol_addr (fragp, stretch);
20199 addr = fragp->fr_address + fragp->fr_fix;
20200 addr = (addr + 4) & ~3;
20201 /* Force misaligned targets to 32-bit variant. */
20205 if (val < 0 || val > 1020)
20210 /* Return the size of a relaxable add/sub immediate instruction. */
20212 relax_addsub (fragS *fragp, asection *sec)
20217 buf = fragp->fr_literal + fragp->fr_fix;
20218 op = bfd_get_16(sec->owner, buf);
20219 if ((op & 0xf) == ((op >> 4) & 0xf))
20220 return relax_immediate (fragp, 8, 0);
20222 return relax_immediate (fragp, 3, 0);
20225 /* Return TRUE iff the definition of symbol S could be pre-empted
20226 (overridden) at link or load time. */
20228 symbol_preemptible (symbolS *s)
20230 /* Weak symbols can always be pre-empted. */
20234 /* Non-global symbols cannot be pre-empted. */
20235 if (! S_IS_EXTERNAL (s))
20239 /* In ELF, a global symbol can be marked protected, or private. In that
20240 case it can't be pre-empted (other definitions in the same link unit
20241 would violate the ODR). */
20242 if (ELF_ST_VISIBILITY (S_GET_OTHER (s)) > STV_DEFAULT)
20246 /* Other global symbols might be pre-empted. */
20250 /* Return the size of a relaxable branch instruction. BITS is the
20251 size of the offset field in the narrow instruction. */
20254 relax_branch (fragS *fragp, asection *sec, int bits, long stretch)
20260 /* Assume worst case for symbols not known to be in the same section. */
20261 if (!S_IS_DEFINED (fragp->fr_symbol)
20262 || sec != S_GET_SEGMENT (fragp->fr_symbol)
20263 || S_IS_WEAK (fragp->fr_symbol))
20267 /* A branch to a function in ARM state will require interworking. */
20268 if (S_IS_DEFINED (fragp->fr_symbol)
20269 && ARM_IS_FUNC (fragp->fr_symbol))
20273 if (symbol_preemptible (fragp->fr_symbol))
20276 val = relaxed_symbol_addr (fragp, stretch);
20277 addr = fragp->fr_address + fragp->fr_fix + 4;
20280 /* Offset is a signed value *2 */
20282 if (val >= limit || val < -limit)
20288 /* Relax a machine dependent frag. This returns the amount by which
20289 the current size of the frag should change. */
20292 arm_relax_frag (asection *sec, fragS *fragp, long stretch)
20297 oldsize = fragp->fr_var;
20298 switch (fragp->fr_subtype)
20300 case T_MNEM_ldr_pc2:
20301 newsize = relax_adr (fragp, sec, stretch);
20303 case T_MNEM_ldr_pc:
20304 case T_MNEM_ldr_sp:
20305 case T_MNEM_str_sp:
20306 newsize = relax_immediate (fragp, 8, 2);
20310 newsize = relax_immediate (fragp, 5, 2);
20314 newsize = relax_immediate (fragp, 5, 1);
20318 newsize = relax_immediate (fragp, 5, 0);
20321 newsize = relax_adr (fragp, sec, stretch);
20327 newsize = relax_immediate (fragp, 8, 0);
20330 newsize = relax_branch (fragp, sec, 11, stretch);
20333 newsize = relax_branch (fragp, sec, 8, stretch);
20335 case T_MNEM_add_sp:
20336 case T_MNEM_add_pc:
20337 newsize = relax_immediate (fragp, 8, 2);
20339 case T_MNEM_inc_sp:
20340 case T_MNEM_dec_sp:
20341 newsize = relax_immediate (fragp, 7, 2);
20347 newsize = relax_addsub (fragp, sec);
20353 fragp->fr_var = newsize;
20354 /* Freeze wide instructions that are at or before the same location as
20355 in the previous pass. This avoids infinite loops.
20356 Don't freeze them unconditionally because targets may be artificially
20357 misaligned by the expansion of preceding frags. */
20358 if (stretch <= 0 && newsize > 2)
20360 md_convert_frag (sec->owner, sec, fragp);
20364 return newsize - oldsize;
20367 /* Round up a section size to the appropriate boundary. */
20370 md_section_align (segT segment ATTRIBUTE_UNUSED,
20373 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
20374 if (OUTPUT_FLAVOR == bfd_target_aout_flavour)
20376 /* For a.out, force the section size to be aligned. If we don't do
20377 this, BFD will align it for us, but it will not write out the
20378 final bytes of the section. This may be a bug in BFD, but it is
20379 easier to fix it here since that is how the other a.out targets
20383 align = bfd_get_section_alignment (stdoutput, segment);
20384 size = ((size + (1 << align) - 1) & ((valueT) -1 << align));
20391 /* This is called from HANDLE_ALIGN in write.c. Fill in the contents
20392 of an rs_align_code fragment. */
20395 arm_handle_align (fragS * fragP)
20397 static char const arm_noop[2][2][4] =
20400 {0x00, 0x00, 0xa0, 0xe1}, /* LE */
20401 {0xe1, 0xa0, 0x00, 0x00}, /* BE */
20404 {0x00, 0xf0, 0x20, 0xe3}, /* LE */
20405 {0xe3, 0x20, 0xf0, 0x00}, /* BE */
20408 static char const thumb_noop[2][2][2] =
20411 {0xc0, 0x46}, /* LE */
20412 {0x46, 0xc0}, /* BE */
20415 {0x00, 0xbf}, /* LE */
20416 {0xbf, 0x00} /* BE */
20419 static char const wide_thumb_noop[2][4] =
20420 { /* Wide Thumb-2 */
20421 {0xaf, 0xf3, 0x00, 0x80}, /* LE */
20422 {0xf3, 0xaf, 0x80, 0x00}, /* BE */
20425 unsigned bytes, fix, noop_size;
20428 const char *narrow_noop = NULL;
20433 if (fragP->fr_type != rs_align_code)
20436 bytes = fragP->fr_next->fr_address - fragP->fr_address - fragP->fr_fix;
20437 p = fragP->fr_literal + fragP->fr_fix;
20440 if (bytes > MAX_MEM_FOR_RS_ALIGN_CODE)
20441 bytes &= MAX_MEM_FOR_RS_ALIGN_CODE;
20443 gas_assert ((fragP->tc_frag_data.thumb_mode & MODE_RECORDED) != 0);
20445 if (fragP->tc_frag_data.thumb_mode & (~ MODE_RECORDED))
20447 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6t2))
20449 narrow_noop = thumb_noop[1][target_big_endian];
20450 noop = wide_thumb_noop[target_big_endian];
20453 noop = thumb_noop[0][target_big_endian];
20461 noop = arm_noop[ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6k) != 0]
20462 [target_big_endian];
20469 fragP->fr_var = noop_size;
20471 if (bytes & (noop_size - 1))
20473 fix = bytes & (noop_size - 1);
20475 insert_data_mapping_symbol (state, fragP->fr_fix, fragP, fix);
20477 memset (p, 0, fix);
20484 if (bytes & noop_size)
20486 /* Insert a narrow noop. */
20487 memcpy (p, narrow_noop, noop_size);
20489 bytes -= noop_size;
20493 /* Use wide noops for the remainder */
20497 while (bytes >= noop_size)
20499 memcpy (p, noop, noop_size);
20501 bytes -= noop_size;
20505 fragP->fr_fix += fix;
20508 /* Called from md_do_align. Used to create an alignment
20509 frag in a code section. */
20512 arm_frag_align_code (int n, int max)
20516 /* We assume that there will never be a requirement
20517 to support alignments greater than MAX_MEM_FOR_RS_ALIGN_CODE bytes. */
20518 if (max > MAX_MEM_FOR_RS_ALIGN_CODE)
20523 _("alignments greater than %d bytes not supported in .text sections."),
20524 MAX_MEM_FOR_RS_ALIGN_CODE + 1);
20525 as_fatal ("%s", err_msg);
20528 p = frag_var (rs_align_code,
20529 MAX_MEM_FOR_RS_ALIGN_CODE,
20531 (relax_substateT) max,
20538 /* Perform target specific initialisation of a frag.
20539 Note - despite the name this initialisation is not done when the frag
20540 is created, but only when its type is assigned. A frag can be created
20541 and used a long time before its type is set, so beware of assuming that
20542 this initialisationis performed first. */
20546 arm_init_frag (fragS * fragP, int max_chars ATTRIBUTE_UNUSED)
20548 /* Record whether this frag is in an ARM or a THUMB area. */
20549 fragP->tc_frag_data.thumb_mode = thumb_mode | MODE_RECORDED;
20552 #else /* OBJ_ELF is defined. */
20554 arm_init_frag (fragS * fragP, int max_chars)
20556 /* If the current ARM vs THUMB mode has not already
20557 been recorded into this frag then do so now. */
20558 if ((fragP->tc_frag_data.thumb_mode & MODE_RECORDED) == 0)
20560 fragP->tc_frag_data.thumb_mode = thumb_mode | MODE_RECORDED;
20562 /* Record a mapping symbol for alignment frags. We will delete this
20563 later if the alignment ends up empty. */
20564 switch (fragP->fr_type)
20567 case rs_align_test:
20569 mapping_state_2 (MAP_DATA, max_chars);
20571 case rs_align_code:
20572 mapping_state_2 (thumb_mode ? MAP_THUMB : MAP_ARM, max_chars);
20580 /* When we change sections we need to issue a new mapping symbol. */
20583 arm_elf_change_section (void)
20585 /* Link an unlinked unwind index table section to the .text section. */
20586 if (elf_section_type (now_seg) == SHT_ARM_EXIDX
20587 && elf_linked_to_section (now_seg) == NULL)
20588 elf_linked_to_section (now_seg) = text_section;
20592 arm_elf_section_type (const char * str, size_t len)
20594 if (len == 5 && strncmp (str, "exidx", 5) == 0)
20595 return SHT_ARM_EXIDX;
20600 /* Code to deal with unwinding tables. */
20602 static void add_unwind_adjustsp (offsetT);
20604 /* Generate any deferred unwind frame offset. */
20607 flush_pending_unwind (void)
20611 offset = unwind.pending_offset;
20612 unwind.pending_offset = 0;
20614 add_unwind_adjustsp (offset);
20617 /* Add an opcode to this list for this function. Two-byte opcodes should
20618 be passed as op[0] << 8 | op[1]. The list of opcodes is built in reverse
20622 add_unwind_opcode (valueT op, int length)
20624 /* Add any deferred stack adjustment. */
20625 if (unwind.pending_offset)
20626 flush_pending_unwind ();
20628 unwind.sp_restored = 0;
20630 if (unwind.opcode_count + length > unwind.opcode_alloc)
20632 unwind.opcode_alloc += ARM_OPCODE_CHUNK_SIZE;
20633 if (unwind.opcodes)
20634 unwind.opcodes = (unsigned char *) xrealloc (unwind.opcodes,
20635 unwind.opcode_alloc);
20637 unwind.opcodes = (unsigned char *) xmalloc (unwind.opcode_alloc);
20642 unwind.opcodes[unwind.opcode_count] = op & 0xff;
20644 unwind.opcode_count++;
20648 /* Add unwind opcodes to adjust the stack pointer. */
20651 add_unwind_adjustsp (offsetT offset)
20655 if (offset > 0x200)
20657 /* We need at most 5 bytes to hold a 32-bit value in a uleb128. */
20662 /* Long form: 0xb2, uleb128. */
20663 /* This might not fit in a word so add the individual bytes,
20664 remembering the list is built in reverse order. */
20665 o = (valueT) ((offset - 0x204) >> 2);
20667 add_unwind_opcode (0, 1);
20669 /* Calculate the uleb128 encoding of the offset. */
20673 bytes[n] = o & 0x7f;
20679 /* Add the insn. */
20681 add_unwind_opcode (bytes[n - 1], 1);
20682 add_unwind_opcode (0xb2, 1);
20684 else if (offset > 0x100)
20686 /* Two short opcodes. */
20687 add_unwind_opcode (0x3f, 1);
20688 op = (offset - 0x104) >> 2;
20689 add_unwind_opcode (op, 1);
20691 else if (offset > 0)
20693 /* Short opcode. */
20694 op = (offset - 4) >> 2;
20695 add_unwind_opcode (op, 1);
20697 else if (offset < 0)
20700 while (offset > 0x100)
20702 add_unwind_opcode (0x7f, 1);
20705 op = ((offset - 4) >> 2) | 0x40;
20706 add_unwind_opcode (op, 1);
20710 /* Finish the list of unwind opcodes for this function. */
20712 finish_unwind_opcodes (void)
20716 if (unwind.fp_used)
20718 /* Adjust sp as necessary. */
20719 unwind.pending_offset += unwind.fp_offset - unwind.frame_size;
20720 flush_pending_unwind ();
20722 /* After restoring sp from the frame pointer. */
20723 op = 0x90 | unwind.fp_reg;
20724 add_unwind_opcode (op, 1);
20727 flush_pending_unwind ();
20731 /* Start an exception table entry. If idx is nonzero this is an index table
20735 start_unwind_section (const segT text_seg, int idx)
20737 const char * text_name;
20738 const char * prefix;
20739 const char * prefix_once;
20740 const char * group_name;
20744 size_t sec_name_len;
20751 prefix = ELF_STRING_ARM_unwind;
20752 prefix_once = ELF_STRING_ARM_unwind_once;
20753 type = SHT_ARM_EXIDX;
20757 prefix = ELF_STRING_ARM_unwind_info;
20758 prefix_once = ELF_STRING_ARM_unwind_info_once;
20759 type = SHT_PROGBITS;
20762 text_name = segment_name (text_seg);
20763 if (streq (text_name, ".text"))
20766 if (strncmp (text_name, ".gnu.linkonce.t.",
20767 strlen (".gnu.linkonce.t.")) == 0)
20769 prefix = prefix_once;
20770 text_name += strlen (".gnu.linkonce.t.");
20773 prefix_len = strlen (prefix);
20774 text_len = strlen (text_name);
20775 sec_name_len = prefix_len + text_len;
20776 sec_name = (char *) xmalloc (sec_name_len + 1);
20777 memcpy (sec_name, prefix, prefix_len);
20778 memcpy (sec_name + prefix_len, text_name, text_len);
20779 sec_name[prefix_len + text_len] = '\0';
20785 /* Handle COMDAT group. */
20786 if (prefix != prefix_once && (text_seg->flags & SEC_LINK_ONCE) != 0)
20788 group_name = elf_group_name (text_seg);
20789 if (group_name == NULL)
20791 as_bad (_("Group section `%s' has no group signature"),
20792 segment_name (text_seg));
20793 ignore_rest_of_line ();
20796 flags |= SHF_GROUP;
20800 obj_elf_change_section (sec_name, type, flags, 0, group_name, linkonce, 0);
20802 /* Set the section link for index tables. */
20804 elf_linked_to_section (now_seg) = text_seg;
20808 /* Start an unwind table entry. HAVE_DATA is nonzero if we have additional
20809 personality routine data. Returns zero, or the index table value for
20810 and inline entry. */
20813 create_unwind_entry (int have_data)
20818 /* The current word of data. */
20820 /* The number of bytes left in this word. */
20823 finish_unwind_opcodes ();
20825 /* Remember the current text section. */
20826 unwind.saved_seg = now_seg;
20827 unwind.saved_subseg = now_subseg;
20829 start_unwind_section (now_seg, 0);
20831 if (unwind.personality_routine == NULL)
20833 if (unwind.personality_index == -2)
20836 as_bad (_("handlerdata in cantunwind frame"));
20837 return 1; /* EXIDX_CANTUNWIND. */
20840 /* Use a default personality routine if none is specified. */
20841 if (unwind.personality_index == -1)
20843 if (unwind.opcode_count > 3)
20844 unwind.personality_index = 1;
20846 unwind.personality_index = 0;
20849 /* Space for the personality routine entry. */
20850 if (unwind.personality_index == 0)
20852 if (unwind.opcode_count > 3)
20853 as_bad (_("too many unwind opcodes for personality routine 0"));
20857 /* All the data is inline in the index table. */
20860 while (unwind.opcode_count > 0)
20862 unwind.opcode_count--;
20863 data = (data << 8) | unwind.opcodes[unwind.opcode_count];
20867 /* Pad with "finish" opcodes. */
20869 data = (data << 8) | 0xb0;
20876 /* We get two opcodes "free" in the first word. */
20877 size = unwind.opcode_count - 2;
20881 gas_assert (unwind.personality_index == -1);
20883 /* An extra byte is required for the opcode count. */
20884 size = unwind.opcode_count + 1;
20887 size = (size + 3) >> 2;
20889 as_bad (_("too many unwind opcodes"));
20891 frag_align (2, 0, 0);
20892 record_alignment (now_seg, 2);
20893 unwind.table_entry = expr_build_dot ();
20895 /* Allocate the table entry. */
20896 ptr = frag_more ((size << 2) + 4);
20897 /* PR 13449: Zero the table entries in case some of them are not used. */
20898 memset (ptr, 0, (size << 2) + 4);
20899 where = frag_now_fix () - ((size << 2) + 4);
20901 switch (unwind.personality_index)
20904 /* ??? Should this be a PLT generating relocation? */
20905 /* Custom personality routine. */
20906 fix_new (frag_now, where, 4, unwind.personality_routine, 0, 1,
20907 BFD_RELOC_ARM_PREL31);
20912 /* Set the first byte to the number of additional words. */
20913 data = size > 0 ? size - 1 : 0;
20917 /* ABI defined personality routines. */
20919 /* Three opcodes bytes are packed into the first word. */
20926 /* The size and first two opcode bytes go in the first word. */
20927 data = ((0x80 + unwind.personality_index) << 8) | size;
20932 /* Should never happen. */
20936 /* Pack the opcodes into words (MSB first), reversing the list at the same
20938 while (unwind.opcode_count > 0)
20942 md_number_to_chars (ptr, data, 4);
20947 unwind.opcode_count--;
20949 data = (data << 8) | unwind.opcodes[unwind.opcode_count];
20952 /* Finish off the last word. */
20955 /* Pad with "finish" opcodes. */
20957 data = (data << 8) | 0xb0;
20959 md_number_to_chars (ptr, data, 4);
20964 /* Add an empty descriptor if there is no user-specified data. */
20965 ptr = frag_more (4);
20966 md_number_to_chars (ptr, 0, 4);
20973 /* Initialize the DWARF-2 unwind information for this procedure. */
20976 tc_arm_frame_initial_instructions (void)
20978 cfi_add_CFA_def_cfa (REG_SP, 0);
20980 #endif /* OBJ_ELF */
20982 /* Convert REGNAME to a DWARF-2 register number. */
20985 tc_arm_regname_to_dw2regnum (char *regname)
20987 int reg = arm_reg_parse (®name, REG_TYPE_RN);
20997 tc_pe_dwarf2_emit_offset (symbolS *symbol, unsigned int size)
21001 exp.X_op = O_secrel;
21002 exp.X_add_symbol = symbol;
21003 exp.X_add_number = 0;
21004 emit_expr (&exp, size);
21008 /* MD interface: Symbol and relocation handling. */
21010 /* Return the address within the segment that a PC-relative fixup is
21011 relative to. For ARM, PC-relative fixups applied to instructions
21012 are generally relative to the location of the fixup plus 8 bytes.
21013 Thumb branches are offset by 4, and Thumb loads relative to PC
21014 require special handling. */
21017 md_pcrel_from_section (fixS * fixP, segT seg)
21019 offsetT base = fixP->fx_where + fixP->fx_frag->fr_address;
21021 /* If this is pc-relative and we are going to emit a relocation
21022 then we just want to put out any pipeline compensation that the linker
21023 will need. Otherwise we want to use the calculated base.
21024 For WinCE we skip the bias for externals as well, since this
21025 is how the MS ARM-CE assembler behaves and we want to be compatible. */
21027 && ((fixP->fx_addsy && S_GET_SEGMENT (fixP->fx_addsy) != seg)
21028 || (arm_force_relocation (fixP)
21030 && !S_IS_EXTERNAL (fixP->fx_addsy)
21036 switch (fixP->fx_r_type)
21038 /* PC relative addressing on the Thumb is slightly odd as the
21039 bottom two bits of the PC are forced to zero for the
21040 calculation. This happens *after* application of the
21041 pipeline offset. However, Thumb adrl already adjusts for
21042 this, so we need not do it again. */
21043 case BFD_RELOC_ARM_THUMB_ADD:
21046 case BFD_RELOC_ARM_THUMB_OFFSET:
21047 case BFD_RELOC_ARM_T32_OFFSET_IMM:
21048 case BFD_RELOC_ARM_T32_ADD_PC12:
21049 case BFD_RELOC_ARM_T32_CP_OFF_IMM:
21050 return (base + 4) & ~3;
21052 /* Thumb branches are simply offset by +4. */
21053 case BFD_RELOC_THUMB_PCREL_BRANCH7:
21054 case BFD_RELOC_THUMB_PCREL_BRANCH9:
21055 case BFD_RELOC_THUMB_PCREL_BRANCH12:
21056 case BFD_RELOC_THUMB_PCREL_BRANCH20:
21057 case BFD_RELOC_THUMB_PCREL_BRANCH25:
21060 case BFD_RELOC_THUMB_PCREL_BRANCH23:
21062 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
21063 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
21064 && ARM_IS_FUNC (fixP->fx_addsy)
21065 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
21066 base = fixP->fx_where + fixP->fx_frag->fr_address;
21069 /* BLX is like branches above, but forces the low two bits of PC to
21071 case BFD_RELOC_THUMB_PCREL_BLX:
21073 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
21074 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
21075 && THUMB_IS_FUNC (fixP->fx_addsy)
21076 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
21077 base = fixP->fx_where + fixP->fx_frag->fr_address;
21078 return (base + 4) & ~3;
21080 /* ARM mode branches are offset by +8. However, the Windows CE
21081 loader expects the relocation not to take this into account. */
21082 case BFD_RELOC_ARM_PCREL_BLX:
21084 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
21085 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
21086 && ARM_IS_FUNC (fixP->fx_addsy)
21087 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
21088 base = fixP->fx_where + fixP->fx_frag->fr_address;
21091 case BFD_RELOC_ARM_PCREL_CALL:
21093 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
21094 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
21095 && THUMB_IS_FUNC (fixP->fx_addsy)
21096 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
21097 base = fixP->fx_where + fixP->fx_frag->fr_address;
21100 case BFD_RELOC_ARM_PCREL_BRANCH:
21101 case BFD_RELOC_ARM_PCREL_JUMP:
21102 case BFD_RELOC_ARM_PLT32:
21104 /* When handling fixups immediately, because we have already
21105 discovered the value of a symbol, or the address of the frag involved
21106 we must account for the offset by +8, as the OS loader will never see the reloc.
21107 see fixup_segment() in write.c
21108 The S_IS_EXTERNAL test handles the case of global symbols.
21109 Those need the calculated base, not just the pipe compensation the linker will need. */
21111 && fixP->fx_addsy != NULL
21112 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
21113 && (S_IS_EXTERNAL (fixP->fx_addsy) || !arm_force_relocation (fixP)))
21121 /* ARM mode loads relative to PC are also offset by +8. Unlike
21122 branches, the Windows CE loader *does* expect the relocation
21123 to take this into account. */
21124 case BFD_RELOC_ARM_OFFSET_IMM:
21125 case BFD_RELOC_ARM_OFFSET_IMM8:
21126 case BFD_RELOC_ARM_HWLITERAL:
21127 case BFD_RELOC_ARM_LITERAL:
21128 case BFD_RELOC_ARM_CP_OFF_IMM:
21132 /* Other PC-relative relocations are un-offset. */
21138 /* Under ELF we need to default _GLOBAL_OFFSET_TABLE.
21139 Otherwise we have no need to default values of symbols. */
21142 md_undefined_symbol (char * name ATTRIBUTE_UNUSED)
21145 if (name[0] == '_' && name[1] == 'G'
21146 && streq (name, GLOBAL_OFFSET_TABLE_NAME))
21150 if (symbol_find (name))
21151 as_bad (_("GOT already in the symbol table"));
21153 GOT_symbol = symbol_new (name, undefined_section,
21154 (valueT) 0, & zero_address_frag);
21164 /* Subroutine of md_apply_fix. Check to see if an immediate can be
21165 computed as two separate immediate values, added together. We
21166 already know that this value cannot be computed by just one ARM
21169 static unsigned int
21170 validate_immediate_twopart (unsigned int val,
21171 unsigned int * highpart)
21176 for (i = 0; i < 32; i += 2)
21177 if (((a = rotate_left (val, i)) & 0xff) != 0)
21183 * highpart = (a >> 8) | ((i + 24) << 7);
21185 else if (a & 0xff0000)
21187 if (a & 0xff000000)
21189 * highpart = (a >> 16) | ((i + 16) << 7);
21193 gas_assert (a & 0xff000000);
21194 * highpart = (a >> 24) | ((i + 8) << 7);
21197 return (a & 0xff) | (i << 7);
21204 validate_offset_imm (unsigned int val, int hwse)
21206 if ((hwse && val > 255) || val > 4095)
21211 /* Subroutine of md_apply_fix. Do those data_ops which can take a
21212 negative immediate constant by altering the instruction. A bit of
21217 by inverting the second operand, and
21220 by negating the second operand. */
21223 negate_data_op (unsigned long * instruction,
21224 unsigned long value)
21227 unsigned long negated, inverted;
21229 negated = encode_arm_immediate (-value);
21230 inverted = encode_arm_immediate (~value);
21232 op = (*instruction >> DATA_OP_SHIFT) & 0xf;
21235 /* First negates. */
21236 case OPCODE_SUB: /* ADD <-> SUB */
21237 new_inst = OPCODE_ADD;
21242 new_inst = OPCODE_SUB;
21246 case OPCODE_CMP: /* CMP <-> CMN */
21247 new_inst = OPCODE_CMN;
21252 new_inst = OPCODE_CMP;
21256 /* Now Inverted ops. */
21257 case OPCODE_MOV: /* MOV <-> MVN */
21258 new_inst = OPCODE_MVN;
21263 new_inst = OPCODE_MOV;
21267 case OPCODE_AND: /* AND <-> BIC */
21268 new_inst = OPCODE_BIC;
21273 new_inst = OPCODE_AND;
21277 case OPCODE_ADC: /* ADC <-> SBC */
21278 new_inst = OPCODE_SBC;
21283 new_inst = OPCODE_ADC;
21287 /* We cannot do anything. */
21292 if (value == (unsigned) FAIL)
21295 *instruction &= OPCODE_MASK;
21296 *instruction |= new_inst << DATA_OP_SHIFT;
21300 /* Like negate_data_op, but for Thumb-2. */
21302 static unsigned int
21303 thumb32_negate_data_op (offsetT *instruction, unsigned int value)
21307 unsigned int negated, inverted;
21309 negated = encode_thumb32_immediate (-value);
21310 inverted = encode_thumb32_immediate (~value);
21312 rd = (*instruction >> 8) & 0xf;
21313 op = (*instruction >> T2_DATA_OP_SHIFT) & 0xf;
21316 /* ADD <-> SUB. Includes CMP <-> CMN. */
21317 case T2_OPCODE_SUB:
21318 new_inst = T2_OPCODE_ADD;
21322 case T2_OPCODE_ADD:
21323 new_inst = T2_OPCODE_SUB;
21327 /* ORR <-> ORN. Includes MOV <-> MVN. */
21328 case T2_OPCODE_ORR:
21329 new_inst = T2_OPCODE_ORN;
21333 case T2_OPCODE_ORN:
21334 new_inst = T2_OPCODE_ORR;
21338 /* AND <-> BIC. TST has no inverted equivalent. */
21339 case T2_OPCODE_AND:
21340 new_inst = T2_OPCODE_BIC;
21347 case T2_OPCODE_BIC:
21348 new_inst = T2_OPCODE_AND;
21353 case T2_OPCODE_ADC:
21354 new_inst = T2_OPCODE_SBC;
21358 case T2_OPCODE_SBC:
21359 new_inst = T2_OPCODE_ADC;
21363 /* We cannot do anything. */
21368 if (value == (unsigned int)FAIL)
21371 *instruction &= T2_OPCODE_MASK;
21372 *instruction |= new_inst << T2_DATA_OP_SHIFT;
21376 /* Read a 32-bit thumb instruction from buf. */
21377 static unsigned long
21378 get_thumb32_insn (char * buf)
21380 unsigned long insn;
21381 insn = md_chars_to_number (buf, THUMB_SIZE) << 16;
21382 insn |= md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
21388 /* We usually want to set the low bit on the address of thumb function
21389 symbols. In particular .word foo - . should have the low bit set.
21390 Generic code tries to fold the difference of two symbols to
21391 a constant. Prevent this and force a relocation when the first symbols
21392 is a thumb function. */
21395 arm_optimize_expr (expressionS *l, operatorT op, expressionS *r)
21397 if (op == O_subtract
21398 && l->X_op == O_symbol
21399 && r->X_op == O_symbol
21400 && THUMB_IS_FUNC (l->X_add_symbol))
21402 l->X_op = O_subtract;
21403 l->X_op_symbol = r->X_add_symbol;
21404 l->X_add_number -= r->X_add_number;
21408 /* Process as normal. */
21412 /* Encode Thumb2 unconditional branches and calls. The encoding
21413 for the 2 are identical for the immediate values. */
21416 encode_thumb2_b_bl_offset (char * buf, offsetT value)
21418 #define T2I1I2MASK ((1 << 13) | (1 << 11))
21421 addressT S, I1, I2, lo, hi;
21423 S = (value >> 24) & 0x01;
21424 I1 = (value >> 23) & 0x01;
21425 I2 = (value >> 22) & 0x01;
21426 hi = (value >> 12) & 0x3ff;
21427 lo = (value >> 1) & 0x7ff;
21428 newval = md_chars_to_number (buf, THUMB_SIZE);
21429 newval2 = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
21430 newval |= (S << 10) | hi;
21431 newval2 &= ~T2I1I2MASK;
21432 newval2 |= (((I1 ^ S) << 13) | ((I2 ^ S) << 11) | lo) ^ T2I1I2MASK;
21433 md_number_to_chars (buf, newval, THUMB_SIZE);
21434 md_number_to_chars (buf + THUMB_SIZE, newval2, THUMB_SIZE);
21438 md_apply_fix (fixS * fixP,
21442 offsetT value = * valP;
21444 unsigned int newimm;
21445 unsigned long temp;
21447 char * buf = fixP->fx_where + fixP->fx_frag->fr_literal;
21449 gas_assert (fixP->fx_r_type <= BFD_RELOC_UNUSED);
21451 /* Note whether this will delete the relocation. */
21453 if (fixP->fx_addsy == 0 && !fixP->fx_pcrel)
21456 /* On a 64-bit host, silently truncate 'value' to 32 bits for
21457 consistency with the behaviour on 32-bit hosts. Remember value
21459 value &= 0xffffffff;
21460 value ^= 0x80000000;
21461 value -= 0x80000000;
21464 fixP->fx_addnumber = value;
21466 /* Same treatment for fixP->fx_offset. */
21467 fixP->fx_offset &= 0xffffffff;
21468 fixP->fx_offset ^= 0x80000000;
21469 fixP->fx_offset -= 0x80000000;
21471 switch (fixP->fx_r_type)
21473 case BFD_RELOC_NONE:
21474 /* This will need to go in the object file. */
21478 case BFD_RELOC_ARM_IMMEDIATE:
21479 /* We claim that this fixup has been processed here,
21480 even if in fact we generate an error because we do
21481 not have a reloc for it, so tc_gen_reloc will reject it. */
21484 if (fixP->fx_addsy)
21486 const char *msg = 0;
21488 if (! S_IS_DEFINED (fixP->fx_addsy))
21489 msg = _("undefined symbol %s used as an immediate value");
21490 else if (S_GET_SEGMENT (fixP->fx_addsy) != seg)
21491 msg = _("symbol %s is in a different section");
21492 else if (S_IS_WEAK (fixP->fx_addsy))
21493 msg = _("symbol %s is weak and may be overridden later");
21497 as_bad_where (fixP->fx_file, fixP->fx_line,
21498 msg, S_GET_NAME (fixP->fx_addsy));
21503 temp = md_chars_to_number (buf, INSN_SIZE);
21505 /* If the offset is negative, we should use encoding A2 for ADR. */
21506 if ((temp & 0xfff0000) == 0x28f0000 && value < 0)
21507 newimm = negate_data_op (&temp, value);
21510 newimm = encode_arm_immediate (value);
21512 /* If the instruction will fail, see if we can fix things up by
21513 changing the opcode. */
21514 if (newimm == (unsigned int) FAIL)
21515 newimm = negate_data_op (&temp, value);
21518 if (newimm == (unsigned int) FAIL)
21520 as_bad_where (fixP->fx_file, fixP->fx_line,
21521 _("invalid constant (%lx) after fixup"),
21522 (unsigned long) value);
21526 newimm |= (temp & 0xfffff000);
21527 md_number_to_chars (buf, (valueT) newimm, INSN_SIZE);
21530 case BFD_RELOC_ARM_ADRL_IMMEDIATE:
21532 unsigned int highpart = 0;
21533 unsigned int newinsn = 0xe1a00000; /* nop. */
21535 if (fixP->fx_addsy)
21537 const char *msg = 0;
21539 if (! S_IS_DEFINED (fixP->fx_addsy))
21540 msg = _("undefined symbol %s used as an immediate value");
21541 else if (S_GET_SEGMENT (fixP->fx_addsy) != seg)
21542 msg = _("symbol %s is in a different section");
21543 else if (S_IS_WEAK (fixP->fx_addsy))
21544 msg = _("symbol %s is weak and may be overridden later");
21548 as_bad_where (fixP->fx_file, fixP->fx_line,
21549 msg, S_GET_NAME (fixP->fx_addsy));
21554 newimm = encode_arm_immediate (value);
21555 temp = md_chars_to_number (buf, INSN_SIZE);
21557 /* If the instruction will fail, see if we can fix things up by
21558 changing the opcode. */
21559 if (newimm == (unsigned int) FAIL
21560 && (newimm = negate_data_op (& temp, value)) == (unsigned int) FAIL)
21562 /* No ? OK - try using two ADD instructions to generate
21564 newimm = validate_immediate_twopart (value, & highpart);
21566 /* Yes - then make sure that the second instruction is
21568 if (newimm != (unsigned int) FAIL)
21570 /* Still No ? Try using a negated value. */
21571 else if ((newimm = validate_immediate_twopart (- value, & highpart)) != (unsigned int) FAIL)
21572 temp = newinsn = (temp & OPCODE_MASK) | OPCODE_SUB << DATA_OP_SHIFT;
21573 /* Otherwise - give up. */
21576 as_bad_where (fixP->fx_file, fixP->fx_line,
21577 _("unable to compute ADRL instructions for PC offset of 0x%lx"),
21582 /* Replace the first operand in the 2nd instruction (which
21583 is the PC) with the destination register. We have
21584 already added in the PC in the first instruction and we
21585 do not want to do it again. */
21586 newinsn &= ~ 0xf0000;
21587 newinsn |= ((newinsn & 0x0f000) << 4);
21590 newimm |= (temp & 0xfffff000);
21591 md_number_to_chars (buf, (valueT) newimm, INSN_SIZE);
21593 highpart |= (newinsn & 0xfffff000);
21594 md_number_to_chars (buf + INSN_SIZE, (valueT) highpart, INSN_SIZE);
21598 case BFD_RELOC_ARM_OFFSET_IMM:
21599 if (!fixP->fx_done && seg->use_rela_p)
21602 case BFD_RELOC_ARM_LITERAL:
21608 if (validate_offset_imm (value, 0) == FAIL)
21610 if (fixP->fx_r_type == BFD_RELOC_ARM_LITERAL)
21611 as_bad_where (fixP->fx_file, fixP->fx_line,
21612 _("invalid literal constant: pool needs to be closer"));
21614 as_bad_where (fixP->fx_file, fixP->fx_line,
21615 _("bad immediate value for offset (%ld)"),
21620 newval = md_chars_to_number (buf, INSN_SIZE);
21622 newval &= 0xfffff000;
21625 newval &= 0xff7ff000;
21626 newval |= value | (sign ? INDEX_UP : 0);
21628 md_number_to_chars (buf, newval, INSN_SIZE);
21631 case BFD_RELOC_ARM_OFFSET_IMM8:
21632 case BFD_RELOC_ARM_HWLITERAL:
21638 if (validate_offset_imm (value, 1) == FAIL)
21640 if (fixP->fx_r_type == BFD_RELOC_ARM_HWLITERAL)
21641 as_bad_where (fixP->fx_file, fixP->fx_line,
21642 _("invalid literal constant: pool needs to be closer"));
21644 as_bad_where (fixP->fx_file, fixP->fx_line,
21645 _("bad immediate value for 8-bit offset (%ld)"),
21650 newval = md_chars_to_number (buf, INSN_SIZE);
21652 newval &= 0xfffff0f0;
21655 newval &= 0xff7ff0f0;
21656 newval |= ((value >> 4) << 8) | (value & 0xf) | (sign ? INDEX_UP : 0);
21658 md_number_to_chars (buf, newval, INSN_SIZE);
21661 case BFD_RELOC_ARM_T32_OFFSET_U8:
21662 if (value < 0 || value > 1020 || value % 4 != 0)
21663 as_bad_where (fixP->fx_file, fixP->fx_line,
21664 _("bad immediate value for offset (%ld)"), (long) value);
21667 newval = md_chars_to_number (buf+2, THUMB_SIZE);
21669 md_number_to_chars (buf+2, newval, THUMB_SIZE);
21672 case BFD_RELOC_ARM_T32_OFFSET_IMM:
21673 /* This is a complicated relocation used for all varieties of Thumb32
21674 load/store instruction with immediate offset:
21676 1110 100P u1WL NNNN XXXX YYYY iiii iiii - +/-(U) pre/post(P) 8-bit,
21677 *4, optional writeback(W)
21678 (doubleword load/store)
21680 1111 100S uTTL 1111 XXXX iiii iiii iiii - +/-(U) 12-bit PC-rel
21681 1111 100S 0TTL NNNN XXXX 1Pu1 iiii iiii - +/-(U) pre/post(P) 8-bit
21682 1111 100S 0TTL NNNN XXXX 1110 iiii iiii - positive 8-bit (T instruction)
21683 1111 100S 1TTL NNNN XXXX iiii iiii iiii - positive 12-bit
21684 1111 100S 0TTL NNNN XXXX 1100 iiii iiii - negative 8-bit
21686 Uppercase letters indicate bits that are already encoded at
21687 this point. Lowercase letters are our problem. For the
21688 second block of instructions, the secondary opcode nybble
21689 (bits 8..11) is present, and bit 23 is zero, even if this is
21690 a PC-relative operation. */
21691 newval = md_chars_to_number (buf, THUMB_SIZE);
21693 newval |= md_chars_to_number (buf+THUMB_SIZE, THUMB_SIZE);
21695 if ((newval & 0xf0000000) == 0xe0000000)
21697 /* Doubleword load/store: 8-bit offset, scaled by 4. */
21699 newval |= (1 << 23);
21702 if (value % 4 != 0)
21704 as_bad_where (fixP->fx_file, fixP->fx_line,
21705 _("offset not a multiple of 4"));
21711 as_bad_where (fixP->fx_file, fixP->fx_line,
21712 _("offset out of range"));
21717 else if ((newval & 0x000f0000) == 0x000f0000)
21719 /* PC-relative, 12-bit offset. */
21721 newval |= (1 << 23);
21726 as_bad_where (fixP->fx_file, fixP->fx_line,
21727 _("offset out of range"));
21732 else if ((newval & 0x00000100) == 0x00000100)
21734 /* Writeback: 8-bit, +/- offset. */
21736 newval |= (1 << 9);
21741 as_bad_where (fixP->fx_file, fixP->fx_line,
21742 _("offset out of range"));
21747 else if ((newval & 0x00000f00) == 0x00000e00)
21749 /* T-instruction: positive 8-bit offset. */
21750 if (value < 0 || value > 0xff)
21752 as_bad_where (fixP->fx_file, fixP->fx_line,
21753 _("offset out of range"));
21761 /* Positive 12-bit or negative 8-bit offset. */
21765 newval |= (1 << 23);
21775 as_bad_where (fixP->fx_file, fixP->fx_line,
21776 _("offset out of range"));
21783 md_number_to_chars (buf, (newval >> 16) & 0xffff, THUMB_SIZE);
21784 md_number_to_chars (buf + THUMB_SIZE, newval & 0xffff, THUMB_SIZE);
21787 case BFD_RELOC_ARM_SHIFT_IMM:
21788 newval = md_chars_to_number (buf, INSN_SIZE);
21789 if (((unsigned long) value) > 32
21791 && (((newval & 0x60) == 0) || (newval & 0x60) == 0x60)))
21793 as_bad_where (fixP->fx_file, fixP->fx_line,
21794 _("shift expression is too large"));
21799 /* Shifts of zero must be done as lsl. */
21801 else if (value == 32)
21803 newval &= 0xfffff07f;
21804 newval |= (value & 0x1f) << 7;
21805 md_number_to_chars (buf, newval, INSN_SIZE);
21808 case BFD_RELOC_ARM_T32_IMMEDIATE:
21809 case BFD_RELOC_ARM_T32_ADD_IMM:
21810 case BFD_RELOC_ARM_T32_IMM12:
21811 case BFD_RELOC_ARM_T32_ADD_PC12:
21812 /* We claim that this fixup has been processed here,
21813 even if in fact we generate an error because we do
21814 not have a reloc for it, so tc_gen_reloc will reject it. */
21818 && ! S_IS_DEFINED (fixP->fx_addsy))
21820 as_bad_where (fixP->fx_file, fixP->fx_line,
21821 _("undefined symbol %s used as an immediate value"),
21822 S_GET_NAME (fixP->fx_addsy));
21826 newval = md_chars_to_number (buf, THUMB_SIZE);
21828 newval |= md_chars_to_number (buf+2, THUMB_SIZE);
21831 if (fixP->fx_r_type == BFD_RELOC_ARM_T32_IMMEDIATE
21832 || fixP->fx_r_type == BFD_RELOC_ARM_T32_ADD_IMM)
21834 newimm = encode_thumb32_immediate (value);
21835 if (newimm == (unsigned int) FAIL)
21836 newimm = thumb32_negate_data_op (&newval, value);
21838 if (fixP->fx_r_type != BFD_RELOC_ARM_T32_IMMEDIATE
21839 && newimm == (unsigned int) FAIL)
21841 /* Turn add/sum into addw/subw. */
21842 if (fixP->fx_r_type == BFD_RELOC_ARM_T32_ADD_IMM)
21843 newval = (newval & 0xfeffffff) | 0x02000000;
21844 /* No flat 12-bit imm encoding for addsw/subsw. */
21845 if ((newval & 0x00100000) == 0)
21847 /* 12 bit immediate for addw/subw. */
21851 newval ^= 0x00a00000;
21854 newimm = (unsigned int) FAIL;
21860 if (newimm == (unsigned int)FAIL)
21862 as_bad_where (fixP->fx_file, fixP->fx_line,
21863 _("invalid constant (%lx) after fixup"),
21864 (unsigned long) value);
21868 newval |= (newimm & 0x800) << 15;
21869 newval |= (newimm & 0x700) << 4;
21870 newval |= (newimm & 0x0ff);
21872 md_number_to_chars (buf, (valueT) ((newval >> 16) & 0xffff), THUMB_SIZE);
21873 md_number_to_chars (buf+2, (valueT) (newval & 0xffff), THUMB_SIZE);
21876 case BFD_RELOC_ARM_SMC:
21877 if (((unsigned long) value) > 0xffff)
21878 as_bad_where (fixP->fx_file, fixP->fx_line,
21879 _("invalid smc expression"));
21880 newval = md_chars_to_number (buf, INSN_SIZE);
21881 newval |= (value & 0xf) | ((value & 0xfff0) << 4);
21882 md_number_to_chars (buf, newval, INSN_SIZE);
21885 case BFD_RELOC_ARM_HVC:
21886 if (((unsigned long) value) > 0xffff)
21887 as_bad_where (fixP->fx_file, fixP->fx_line,
21888 _("invalid hvc expression"));
21889 newval = md_chars_to_number (buf, INSN_SIZE);
21890 newval |= (value & 0xf) | ((value & 0xfff0) << 4);
21891 md_number_to_chars (buf, newval, INSN_SIZE);
21894 case BFD_RELOC_ARM_SWI:
21895 if (fixP->tc_fix_data != 0)
21897 if (((unsigned long) value) > 0xff)
21898 as_bad_where (fixP->fx_file, fixP->fx_line,
21899 _("invalid swi expression"));
21900 newval = md_chars_to_number (buf, THUMB_SIZE);
21902 md_number_to_chars (buf, newval, THUMB_SIZE);
21906 if (((unsigned long) value) > 0x00ffffff)
21907 as_bad_where (fixP->fx_file, fixP->fx_line,
21908 _("invalid swi expression"));
21909 newval = md_chars_to_number (buf, INSN_SIZE);
21911 md_number_to_chars (buf, newval, INSN_SIZE);
21915 case BFD_RELOC_ARM_MULTI:
21916 if (((unsigned long) value) > 0xffff)
21917 as_bad_where (fixP->fx_file, fixP->fx_line,
21918 _("invalid expression in load/store multiple"));
21919 newval = value | md_chars_to_number (buf, INSN_SIZE);
21920 md_number_to_chars (buf, newval, INSN_SIZE);
21924 case BFD_RELOC_ARM_PCREL_CALL:
21926 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t)
21928 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
21929 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
21930 && THUMB_IS_FUNC (fixP->fx_addsy))
21931 /* Flip the bl to blx. This is a simple flip
21932 bit here because we generate PCREL_CALL for
21933 unconditional bls. */
21935 newval = md_chars_to_number (buf, INSN_SIZE);
21936 newval = newval | 0x10000000;
21937 md_number_to_chars (buf, newval, INSN_SIZE);
21943 goto arm_branch_common;
21945 case BFD_RELOC_ARM_PCREL_JUMP:
21946 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t)
21948 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
21949 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
21950 && THUMB_IS_FUNC (fixP->fx_addsy))
21952 /* This would map to a bl<cond>, b<cond>,
21953 b<always> to a Thumb function. We
21954 need to force a relocation for this particular
21956 newval = md_chars_to_number (buf, INSN_SIZE);
21960 case BFD_RELOC_ARM_PLT32:
21962 case BFD_RELOC_ARM_PCREL_BRANCH:
21964 goto arm_branch_common;
21966 case BFD_RELOC_ARM_PCREL_BLX:
21969 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t)
21971 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
21972 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
21973 && ARM_IS_FUNC (fixP->fx_addsy))
21975 /* Flip the blx to a bl and warn. */
21976 const char *name = S_GET_NAME (fixP->fx_addsy);
21977 newval = 0xeb000000;
21978 as_warn_where (fixP->fx_file, fixP->fx_line,
21979 _("blx to '%s' an ARM ISA state function changed to bl"),
21981 md_number_to_chars (buf, newval, INSN_SIZE);
21987 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
21988 fixP->fx_r_type = BFD_RELOC_ARM_PCREL_CALL;
21992 /* We are going to store value (shifted right by two) in the
21993 instruction, in a 24 bit, signed field. Bits 26 through 32 either
21994 all clear or all set and bit 0 must be clear. For B/BL bit 1 must
21995 also be be clear. */
21997 as_bad_where (fixP->fx_file, fixP->fx_line,
21998 _("misaligned branch destination"));
21999 if ((value & (offsetT)0xfe000000) != (offsetT)0
22000 && (value & (offsetT)0xfe000000) != (offsetT)0xfe000000)
22001 as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
22003 if (fixP->fx_done || !seg->use_rela_p)
22005 newval = md_chars_to_number (buf, INSN_SIZE);
22006 newval |= (value >> 2) & 0x00ffffff;
22007 /* Set the H bit on BLX instructions. */
22011 newval |= 0x01000000;
22013 newval &= ~0x01000000;
22015 md_number_to_chars (buf, newval, INSN_SIZE);
22019 case BFD_RELOC_THUMB_PCREL_BRANCH7: /* CBZ */
22020 /* CBZ can only branch forward. */
22022 /* Attempts to use CBZ to branch to the next instruction
22023 (which, strictly speaking, are prohibited) will be turned into
22026 FIXME: It may be better to remove the instruction completely and
22027 perform relaxation. */
22030 newval = md_chars_to_number (buf, THUMB_SIZE);
22031 newval = 0xbf00; /* NOP encoding T1 */
22032 md_number_to_chars (buf, newval, THUMB_SIZE);
22037 as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
22039 if (fixP->fx_done || !seg->use_rela_p)
22041 newval = md_chars_to_number (buf, THUMB_SIZE);
22042 newval |= ((value & 0x3e) << 2) | ((value & 0x40) << 3);
22043 md_number_to_chars (buf, newval, THUMB_SIZE);
22048 case BFD_RELOC_THUMB_PCREL_BRANCH9: /* Conditional branch. */
22049 if ((value & ~0xff) && ((value & ~0xff) != ~0xff))
22050 as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
22052 if (fixP->fx_done || !seg->use_rela_p)
22054 newval = md_chars_to_number (buf, THUMB_SIZE);
22055 newval |= (value & 0x1ff) >> 1;
22056 md_number_to_chars (buf, newval, THUMB_SIZE);
22060 case BFD_RELOC_THUMB_PCREL_BRANCH12: /* Unconditional branch. */
22061 if ((value & ~0x7ff) && ((value & ~0x7ff) != ~0x7ff))
22062 as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
22064 if (fixP->fx_done || !seg->use_rela_p)
22066 newval = md_chars_to_number (buf, THUMB_SIZE);
22067 newval |= (value & 0xfff) >> 1;
22068 md_number_to_chars (buf, newval, THUMB_SIZE);
22072 case BFD_RELOC_THUMB_PCREL_BRANCH20:
22074 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
22075 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
22076 && ARM_IS_FUNC (fixP->fx_addsy)
22077 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
22079 /* Force a relocation for a branch 20 bits wide. */
22082 if ((value & ~0x1fffff) && ((value & ~0x0fffff) != ~0x0fffff))
22083 as_bad_where (fixP->fx_file, fixP->fx_line,
22084 _("conditional branch out of range"));
22086 if (fixP->fx_done || !seg->use_rela_p)
22089 addressT S, J1, J2, lo, hi;
22091 S = (value & 0x00100000) >> 20;
22092 J2 = (value & 0x00080000) >> 19;
22093 J1 = (value & 0x00040000) >> 18;
22094 hi = (value & 0x0003f000) >> 12;
22095 lo = (value & 0x00000ffe) >> 1;
22097 newval = md_chars_to_number (buf, THUMB_SIZE);
22098 newval2 = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
22099 newval |= (S << 10) | hi;
22100 newval2 |= (J1 << 13) | (J2 << 11) | lo;
22101 md_number_to_chars (buf, newval, THUMB_SIZE);
22102 md_number_to_chars (buf + THUMB_SIZE, newval2, THUMB_SIZE);
22106 case BFD_RELOC_THUMB_PCREL_BLX:
22107 /* If there is a blx from a thumb state function to
22108 another thumb function flip this to a bl and warn
22112 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
22113 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
22114 && THUMB_IS_FUNC (fixP->fx_addsy))
22116 const char *name = S_GET_NAME (fixP->fx_addsy);
22117 as_warn_where (fixP->fx_file, fixP->fx_line,
22118 _("blx to Thumb func '%s' from Thumb ISA state changed to bl"),
22120 newval = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
22121 newval = newval | 0x1000;
22122 md_number_to_chars (buf+THUMB_SIZE, newval, THUMB_SIZE);
22123 fixP->fx_r_type = BFD_RELOC_THUMB_PCREL_BRANCH23;
22128 goto thumb_bl_common;
22130 case BFD_RELOC_THUMB_PCREL_BRANCH23:
22131 /* A bl from Thumb state ISA to an internal ARM state function
22132 is converted to a blx. */
22134 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
22135 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
22136 && ARM_IS_FUNC (fixP->fx_addsy)
22137 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
22139 newval = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
22140 newval = newval & ~0x1000;
22141 md_number_to_chars (buf+THUMB_SIZE, newval, THUMB_SIZE);
22142 fixP->fx_r_type = BFD_RELOC_THUMB_PCREL_BLX;
22148 if (fixP->fx_r_type == BFD_RELOC_THUMB_PCREL_BLX)
22149 /* For a BLX instruction, make sure that the relocation is rounded up
22150 to a word boundary. This follows the semantics of the instruction
22151 which specifies that bit 1 of the target address will come from bit
22152 1 of the base address. */
22153 value = (value + 3) & ~ 3;
22156 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4
22157 && fixP->fx_r_type == BFD_RELOC_THUMB_PCREL_BLX)
22158 fixP->fx_r_type = BFD_RELOC_THUMB_PCREL_BRANCH23;
22161 if ((value & ~0x3fffff) && ((value & ~0x3fffff) != ~0x3fffff))
22163 if (!(ARM_CPU_HAS_FEATURE (cpu_variant, arm_arch_t2)))
22164 as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
22165 else if ((value & ~0x1ffffff)
22166 && ((value & ~0x1ffffff) != ~0x1ffffff))
22167 as_bad_where (fixP->fx_file, fixP->fx_line,
22168 _("Thumb2 branch out of range"));
22171 if (fixP->fx_done || !seg->use_rela_p)
22172 encode_thumb2_b_bl_offset (buf, value);
22176 case BFD_RELOC_THUMB_PCREL_BRANCH25:
22177 if ((value & ~0x0ffffff) && ((value & ~0x0ffffff) != ~0x0ffffff))
22178 as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
22180 if (fixP->fx_done || !seg->use_rela_p)
22181 encode_thumb2_b_bl_offset (buf, value);
22186 if (fixP->fx_done || !seg->use_rela_p)
22187 md_number_to_chars (buf, value, 1);
22191 if (fixP->fx_done || !seg->use_rela_p)
22192 md_number_to_chars (buf, value, 2);
22196 case BFD_RELOC_ARM_TLS_CALL:
22197 case BFD_RELOC_ARM_THM_TLS_CALL:
22198 case BFD_RELOC_ARM_TLS_DESCSEQ:
22199 case BFD_RELOC_ARM_THM_TLS_DESCSEQ:
22200 S_SET_THREAD_LOCAL (fixP->fx_addsy);
22203 case BFD_RELOC_ARM_TLS_GOTDESC:
22204 case BFD_RELOC_ARM_TLS_GD32:
22205 case BFD_RELOC_ARM_TLS_LE32:
22206 case BFD_RELOC_ARM_TLS_IE32:
22207 case BFD_RELOC_ARM_TLS_LDM32:
22208 case BFD_RELOC_ARM_TLS_LDO32:
22209 S_SET_THREAD_LOCAL (fixP->fx_addsy);
22212 case BFD_RELOC_ARM_GOT32:
22213 case BFD_RELOC_ARM_GOTOFF:
22214 if (fixP->fx_done || !seg->use_rela_p)
22215 md_number_to_chars (buf, 0, 4);
22218 case BFD_RELOC_ARM_GOT_PREL:
22219 if (fixP->fx_done || !seg->use_rela_p)
22220 md_number_to_chars (buf, value, 4);
22223 case BFD_RELOC_ARM_TARGET2:
22224 /* TARGET2 is not partial-inplace, so we need to write the
22225 addend here for REL targets, because it won't be written out
22226 during reloc processing later. */
22227 if (fixP->fx_done || !seg->use_rela_p)
22228 md_number_to_chars (buf, fixP->fx_offset, 4);
22232 case BFD_RELOC_RVA:
22234 case BFD_RELOC_ARM_TARGET1:
22235 case BFD_RELOC_ARM_ROSEGREL32:
22236 case BFD_RELOC_ARM_SBREL32:
22237 case BFD_RELOC_32_PCREL:
22239 case BFD_RELOC_32_SECREL:
22241 if (fixP->fx_done || !seg->use_rela_p)
22243 /* For WinCE we only do this for pcrel fixups. */
22244 if (fixP->fx_done || fixP->fx_pcrel)
22246 md_number_to_chars (buf, value, 4);
22250 case BFD_RELOC_ARM_PREL31:
22251 if (fixP->fx_done || !seg->use_rela_p)
22253 newval = md_chars_to_number (buf, 4) & 0x80000000;
22254 if ((value ^ (value >> 1)) & 0x40000000)
22256 as_bad_where (fixP->fx_file, fixP->fx_line,
22257 _("rel31 relocation overflow"));
22259 newval |= value & 0x7fffffff;
22260 md_number_to_chars (buf, newval, 4);
22265 case BFD_RELOC_ARM_CP_OFF_IMM:
22266 case BFD_RELOC_ARM_T32_CP_OFF_IMM:
22267 if (value < -1023 || value > 1023 || (value & 3))
22268 as_bad_where (fixP->fx_file, fixP->fx_line,
22269 _("co-processor offset out of range"));
22274 if (fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM
22275 || fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM_S2)
22276 newval = md_chars_to_number (buf, INSN_SIZE);
22278 newval = get_thumb32_insn (buf);
22280 newval &= 0xffffff00;
22283 newval &= 0xff7fff00;
22284 newval |= (value >> 2) | (sign ? INDEX_UP : 0);
22286 if (fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM
22287 || fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM_S2)
22288 md_number_to_chars (buf, newval, INSN_SIZE);
22290 put_thumb32_insn (buf, newval);
22293 case BFD_RELOC_ARM_CP_OFF_IMM_S2:
22294 case BFD_RELOC_ARM_T32_CP_OFF_IMM_S2:
22295 if (value < -255 || value > 255)
22296 as_bad_where (fixP->fx_file, fixP->fx_line,
22297 _("co-processor offset out of range"));
22299 goto cp_off_common;
22301 case BFD_RELOC_ARM_THUMB_OFFSET:
22302 newval = md_chars_to_number (buf, THUMB_SIZE);
22303 /* Exactly what ranges, and where the offset is inserted depends
22304 on the type of instruction, we can establish this from the
22306 switch (newval >> 12)
22308 case 4: /* PC load. */
22309 /* Thumb PC loads are somewhat odd, bit 1 of the PC is
22310 forced to zero for these loads; md_pcrel_from has already
22311 compensated for this. */
22313 as_bad_where (fixP->fx_file, fixP->fx_line,
22314 _("invalid offset, target not word aligned (0x%08lX)"),
22315 (((unsigned long) fixP->fx_frag->fr_address
22316 + (unsigned long) fixP->fx_where) & ~3)
22317 + (unsigned long) value);
22319 if (value & ~0x3fc)
22320 as_bad_where (fixP->fx_file, fixP->fx_line,
22321 _("invalid offset, value too big (0x%08lX)"),
22324 newval |= value >> 2;
22327 case 9: /* SP load/store. */
22328 if (value & ~0x3fc)
22329 as_bad_where (fixP->fx_file, fixP->fx_line,
22330 _("invalid offset, value too big (0x%08lX)"),
22332 newval |= value >> 2;
22335 case 6: /* Word load/store. */
22337 as_bad_where (fixP->fx_file, fixP->fx_line,
22338 _("invalid offset, value too big (0x%08lX)"),
22340 newval |= value << 4; /* 6 - 2. */
22343 case 7: /* Byte load/store. */
22345 as_bad_where (fixP->fx_file, fixP->fx_line,
22346 _("invalid offset, value too big (0x%08lX)"),
22348 newval |= value << 6;
22351 case 8: /* Halfword load/store. */
22353 as_bad_where (fixP->fx_file, fixP->fx_line,
22354 _("invalid offset, value too big (0x%08lX)"),
22356 newval |= value << 5; /* 6 - 1. */
22360 as_bad_where (fixP->fx_file, fixP->fx_line,
22361 "Unable to process relocation for thumb opcode: %lx",
22362 (unsigned long) newval);
22365 md_number_to_chars (buf, newval, THUMB_SIZE);
22368 case BFD_RELOC_ARM_THUMB_ADD:
22369 /* This is a complicated relocation, since we use it for all of
22370 the following immediate relocations:
22374 9bit ADD/SUB SP word-aligned
22375 10bit ADD PC/SP word-aligned
22377 The type of instruction being processed is encoded in the
22384 newval = md_chars_to_number (buf, THUMB_SIZE);
22386 int rd = (newval >> 4) & 0xf;
22387 int rs = newval & 0xf;
22388 int subtract = !!(newval & 0x8000);
22390 /* Check for HI regs, only very restricted cases allowed:
22391 Adjusting SP, and using PC or SP to get an address. */
22392 if ((rd > 7 && (rd != REG_SP || rs != REG_SP))
22393 || (rs > 7 && rs != REG_SP && rs != REG_PC))
22394 as_bad_where (fixP->fx_file, fixP->fx_line,
22395 _("invalid Hi register with immediate"));
22397 /* If value is negative, choose the opposite instruction. */
22401 subtract = !subtract;
22403 as_bad_where (fixP->fx_file, fixP->fx_line,
22404 _("immediate value out of range"));
22409 if (value & ~0x1fc)
22410 as_bad_where (fixP->fx_file, fixP->fx_line,
22411 _("invalid immediate for stack address calculation"));
22412 newval = subtract ? T_OPCODE_SUB_ST : T_OPCODE_ADD_ST;
22413 newval |= value >> 2;
22415 else if (rs == REG_PC || rs == REG_SP)
22417 if (subtract || value & ~0x3fc)
22418 as_bad_where (fixP->fx_file, fixP->fx_line,
22419 _("invalid immediate for address calculation (value = 0x%08lX)"),
22420 (unsigned long) value);
22421 newval = (rs == REG_PC ? T_OPCODE_ADD_PC : T_OPCODE_ADD_SP);
22423 newval |= value >> 2;
22428 as_bad_where (fixP->fx_file, fixP->fx_line,
22429 _("immediate value out of range"));
22430 newval = subtract ? T_OPCODE_SUB_I8 : T_OPCODE_ADD_I8;
22431 newval |= (rd << 8) | value;
22436 as_bad_where (fixP->fx_file, fixP->fx_line,
22437 _("immediate value out of range"));
22438 newval = subtract ? T_OPCODE_SUB_I3 : T_OPCODE_ADD_I3;
22439 newval |= rd | (rs << 3) | (value << 6);
22442 md_number_to_chars (buf, newval, THUMB_SIZE);
22445 case BFD_RELOC_ARM_THUMB_IMM:
22446 newval = md_chars_to_number (buf, THUMB_SIZE);
22447 if (value < 0 || value > 255)
22448 as_bad_where (fixP->fx_file, fixP->fx_line,
22449 _("invalid immediate: %ld is out of range"),
22452 md_number_to_chars (buf, newval, THUMB_SIZE);
22455 case BFD_RELOC_ARM_THUMB_SHIFT:
22456 /* 5bit shift value (0..32). LSL cannot take 32. */
22457 newval = md_chars_to_number (buf, THUMB_SIZE) & 0xf83f;
22458 temp = newval & 0xf800;
22459 if (value < 0 || value > 32 || (value == 32 && temp == T_OPCODE_LSL_I))
22460 as_bad_where (fixP->fx_file, fixP->fx_line,
22461 _("invalid shift value: %ld"), (long) value);
22462 /* Shifts of zero must be encoded as LSL. */
22464 newval = (newval & 0x003f) | T_OPCODE_LSL_I;
22465 /* Shifts of 32 are encoded as zero. */
22466 else if (value == 32)
22468 newval |= value << 6;
22469 md_number_to_chars (buf, newval, THUMB_SIZE);
22472 case BFD_RELOC_VTABLE_INHERIT:
22473 case BFD_RELOC_VTABLE_ENTRY:
22477 case BFD_RELOC_ARM_MOVW:
22478 case BFD_RELOC_ARM_MOVT:
22479 case BFD_RELOC_ARM_THUMB_MOVW:
22480 case BFD_RELOC_ARM_THUMB_MOVT:
22481 if (fixP->fx_done || !seg->use_rela_p)
22483 /* REL format relocations are limited to a 16-bit addend. */
22484 if (!fixP->fx_done)
22486 if (value < -0x8000 || value > 0x7fff)
22487 as_bad_where (fixP->fx_file, fixP->fx_line,
22488 _("offset out of range"));
22490 else if (fixP->fx_r_type == BFD_RELOC_ARM_MOVT
22491 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT)
22496 if (fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVW
22497 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT)
22499 newval = get_thumb32_insn (buf);
22500 newval &= 0xfbf08f00;
22501 newval |= (value & 0xf000) << 4;
22502 newval |= (value & 0x0800) << 15;
22503 newval |= (value & 0x0700) << 4;
22504 newval |= (value & 0x00ff);
22505 put_thumb32_insn (buf, newval);
22509 newval = md_chars_to_number (buf, 4);
22510 newval &= 0xfff0f000;
22511 newval |= value & 0x0fff;
22512 newval |= (value & 0xf000) << 4;
22513 md_number_to_chars (buf, newval, 4);
22518 case BFD_RELOC_ARM_ALU_PC_G0_NC:
22519 case BFD_RELOC_ARM_ALU_PC_G0:
22520 case BFD_RELOC_ARM_ALU_PC_G1_NC:
22521 case BFD_RELOC_ARM_ALU_PC_G1:
22522 case BFD_RELOC_ARM_ALU_PC_G2:
22523 case BFD_RELOC_ARM_ALU_SB_G0_NC:
22524 case BFD_RELOC_ARM_ALU_SB_G0:
22525 case BFD_RELOC_ARM_ALU_SB_G1_NC:
22526 case BFD_RELOC_ARM_ALU_SB_G1:
22527 case BFD_RELOC_ARM_ALU_SB_G2:
22528 gas_assert (!fixP->fx_done);
22529 if (!seg->use_rela_p)
22532 bfd_vma encoded_addend;
22533 bfd_vma addend_abs = abs (value);
22535 /* Check that the absolute value of the addend can be
22536 expressed as an 8-bit constant plus a rotation. */
22537 encoded_addend = encode_arm_immediate (addend_abs);
22538 if (encoded_addend == (unsigned int) FAIL)
22539 as_bad_where (fixP->fx_file, fixP->fx_line,
22540 _("the offset 0x%08lX is not representable"),
22541 (unsigned long) addend_abs);
22543 /* Extract the instruction. */
22544 insn = md_chars_to_number (buf, INSN_SIZE);
22546 /* If the addend is positive, use an ADD instruction.
22547 Otherwise use a SUB. Take care not to destroy the S bit. */
22548 insn &= 0xff1fffff;
22554 /* Place the encoded addend into the first 12 bits of the
22556 insn &= 0xfffff000;
22557 insn |= encoded_addend;
22559 /* Update the instruction. */
22560 md_number_to_chars (buf, insn, INSN_SIZE);
22564 case BFD_RELOC_ARM_LDR_PC_G0:
22565 case BFD_RELOC_ARM_LDR_PC_G1:
22566 case BFD_RELOC_ARM_LDR_PC_G2:
22567 case BFD_RELOC_ARM_LDR_SB_G0:
22568 case BFD_RELOC_ARM_LDR_SB_G1:
22569 case BFD_RELOC_ARM_LDR_SB_G2:
22570 gas_assert (!fixP->fx_done);
22571 if (!seg->use_rela_p)
22574 bfd_vma addend_abs = abs (value);
22576 /* Check that the absolute value of the addend can be
22577 encoded in 12 bits. */
22578 if (addend_abs >= 0x1000)
22579 as_bad_where (fixP->fx_file, fixP->fx_line,
22580 _("bad offset 0x%08lX (only 12 bits available for the magnitude)"),
22581 (unsigned long) addend_abs);
22583 /* Extract the instruction. */
22584 insn = md_chars_to_number (buf, INSN_SIZE);
22586 /* If the addend is negative, clear bit 23 of the instruction.
22587 Otherwise set it. */
22589 insn &= ~(1 << 23);
22593 /* Place the absolute value of the addend into the first 12 bits
22594 of the instruction. */
22595 insn &= 0xfffff000;
22596 insn |= addend_abs;
22598 /* Update the instruction. */
22599 md_number_to_chars (buf, insn, INSN_SIZE);
22603 case BFD_RELOC_ARM_LDRS_PC_G0:
22604 case BFD_RELOC_ARM_LDRS_PC_G1:
22605 case BFD_RELOC_ARM_LDRS_PC_G2:
22606 case BFD_RELOC_ARM_LDRS_SB_G0:
22607 case BFD_RELOC_ARM_LDRS_SB_G1:
22608 case BFD_RELOC_ARM_LDRS_SB_G2:
22609 gas_assert (!fixP->fx_done);
22610 if (!seg->use_rela_p)
22613 bfd_vma addend_abs = abs (value);
22615 /* Check that the absolute value of the addend can be
22616 encoded in 8 bits. */
22617 if (addend_abs >= 0x100)
22618 as_bad_where (fixP->fx_file, fixP->fx_line,
22619 _("bad offset 0x%08lX (only 8 bits available for the magnitude)"),
22620 (unsigned long) addend_abs);
22622 /* Extract the instruction. */
22623 insn = md_chars_to_number (buf, INSN_SIZE);
22625 /* If the addend is negative, clear bit 23 of the instruction.
22626 Otherwise set it. */
22628 insn &= ~(1 << 23);
22632 /* Place the first four bits of the absolute value of the addend
22633 into the first 4 bits of the instruction, and the remaining
22634 four into bits 8 .. 11. */
22635 insn &= 0xfffff0f0;
22636 insn |= (addend_abs & 0xf) | ((addend_abs & 0xf0) << 4);
22638 /* Update the instruction. */
22639 md_number_to_chars (buf, insn, INSN_SIZE);
22643 case BFD_RELOC_ARM_LDC_PC_G0:
22644 case BFD_RELOC_ARM_LDC_PC_G1:
22645 case BFD_RELOC_ARM_LDC_PC_G2:
22646 case BFD_RELOC_ARM_LDC_SB_G0:
22647 case BFD_RELOC_ARM_LDC_SB_G1:
22648 case BFD_RELOC_ARM_LDC_SB_G2:
22649 gas_assert (!fixP->fx_done);
22650 if (!seg->use_rela_p)
22653 bfd_vma addend_abs = abs (value);
22655 /* Check that the absolute value of the addend is a multiple of
22656 four and, when divided by four, fits in 8 bits. */
22657 if (addend_abs & 0x3)
22658 as_bad_where (fixP->fx_file, fixP->fx_line,
22659 _("bad offset 0x%08lX (must be word-aligned)"),
22660 (unsigned long) addend_abs);
22662 if ((addend_abs >> 2) > 0xff)
22663 as_bad_where (fixP->fx_file, fixP->fx_line,
22664 _("bad offset 0x%08lX (must be an 8-bit number of words)"),
22665 (unsigned long) addend_abs);
22667 /* Extract the instruction. */
22668 insn = md_chars_to_number (buf, INSN_SIZE);
22670 /* If the addend is negative, clear bit 23 of the instruction.
22671 Otherwise set it. */
22673 insn &= ~(1 << 23);
22677 /* Place the addend (divided by four) into the first eight
22678 bits of the instruction. */
22679 insn &= 0xfffffff0;
22680 insn |= addend_abs >> 2;
22682 /* Update the instruction. */
22683 md_number_to_chars (buf, insn, INSN_SIZE);
22687 case BFD_RELOC_ARM_V4BX:
22688 /* This will need to go in the object file. */
22692 case BFD_RELOC_UNUSED:
22694 as_bad_where (fixP->fx_file, fixP->fx_line,
22695 _("bad relocation fixup type (%d)"), fixP->fx_r_type);
22699 /* Translate internal representation of relocation info to BFD target
22703 tc_gen_reloc (asection *section, fixS *fixp)
22706 bfd_reloc_code_real_type code;
22708 reloc = (arelent *) xmalloc (sizeof (arelent));
22710 reloc->sym_ptr_ptr = (asymbol **) xmalloc (sizeof (asymbol *));
22711 *reloc->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy);
22712 reloc->address = fixp->fx_frag->fr_address + fixp->fx_where;
22714 if (fixp->fx_pcrel)
22716 if (section->use_rela_p)
22717 fixp->fx_offset -= md_pcrel_from_section (fixp, section);
22719 fixp->fx_offset = reloc->address;
22721 reloc->addend = fixp->fx_offset;
22723 switch (fixp->fx_r_type)
22726 if (fixp->fx_pcrel)
22728 code = BFD_RELOC_8_PCREL;
22733 if (fixp->fx_pcrel)
22735 code = BFD_RELOC_16_PCREL;
22740 if (fixp->fx_pcrel)
22742 code = BFD_RELOC_32_PCREL;
22746 case BFD_RELOC_ARM_MOVW:
22747 if (fixp->fx_pcrel)
22749 code = BFD_RELOC_ARM_MOVW_PCREL;
22753 case BFD_RELOC_ARM_MOVT:
22754 if (fixp->fx_pcrel)
22756 code = BFD_RELOC_ARM_MOVT_PCREL;
22760 case BFD_RELOC_ARM_THUMB_MOVW:
22761 if (fixp->fx_pcrel)
22763 code = BFD_RELOC_ARM_THUMB_MOVW_PCREL;
22767 case BFD_RELOC_ARM_THUMB_MOVT:
22768 if (fixp->fx_pcrel)
22770 code = BFD_RELOC_ARM_THUMB_MOVT_PCREL;
22774 case BFD_RELOC_NONE:
22775 case BFD_RELOC_ARM_PCREL_BRANCH:
22776 case BFD_RELOC_ARM_PCREL_BLX:
22777 case BFD_RELOC_RVA:
22778 case BFD_RELOC_THUMB_PCREL_BRANCH7:
22779 case BFD_RELOC_THUMB_PCREL_BRANCH9:
22780 case BFD_RELOC_THUMB_PCREL_BRANCH12:
22781 case BFD_RELOC_THUMB_PCREL_BRANCH20:
22782 case BFD_RELOC_THUMB_PCREL_BRANCH23:
22783 case BFD_RELOC_THUMB_PCREL_BRANCH25:
22784 case BFD_RELOC_VTABLE_ENTRY:
22785 case BFD_RELOC_VTABLE_INHERIT:
22787 case BFD_RELOC_32_SECREL:
22789 code = fixp->fx_r_type;
22792 case BFD_RELOC_THUMB_PCREL_BLX:
22794 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
22795 code = BFD_RELOC_THUMB_PCREL_BRANCH23;
22798 code = BFD_RELOC_THUMB_PCREL_BLX;
22801 case BFD_RELOC_ARM_LITERAL:
22802 case BFD_RELOC_ARM_HWLITERAL:
22803 /* If this is called then the a literal has
22804 been referenced across a section boundary. */
22805 as_bad_where (fixp->fx_file, fixp->fx_line,
22806 _("literal referenced across section boundary"));
22810 case BFD_RELOC_ARM_TLS_CALL:
22811 case BFD_RELOC_ARM_THM_TLS_CALL:
22812 case BFD_RELOC_ARM_TLS_DESCSEQ:
22813 case BFD_RELOC_ARM_THM_TLS_DESCSEQ:
22814 case BFD_RELOC_ARM_GOT32:
22815 case BFD_RELOC_ARM_GOTOFF:
22816 case BFD_RELOC_ARM_GOT_PREL:
22817 case BFD_RELOC_ARM_PLT32:
22818 case BFD_RELOC_ARM_TARGET1:
22819 case BFD_RELOC_ARM_ROSEGREL32:
22820 case BFD_RELOC_ARM_SBREL32:
22821 case BFD_RELOC_ARM_PREL31:
22822 case BFD_RELOC_ARM_TARGET2:
22823 case BFD_RELOC_ARM_TLS_LE32:
22824 case BFD_RELOC_ARM_TLS_LDO32:
22825 case BFD_RELOC_ARM_PCREL_CALL:
22826 case BFD_RELOC_ARM_PCREL_JUMP:
22827 case BFD_RELOC_ARM_ALU_PC_G0_NC:
22828 case BFD_RELOC_ARM_ALU_PC_G0:
22829 case BFD_RELOC_ARM_ALU_PC_G1_NC:
22830 case BFD_RELOC_ARM_ALU_PC_G1:
22831 case BFD_RELOC_ARM_ALU_PC_G2:
22832 case BFD_RELOC_ARM_LDR_PC_G0:
22833 case BFD_RELOC_ARM_LDR_PC_G1:
22834 case BFD_RELOC_ARM_LDR_PC_G2:
22835 case BFD_RELOC_ARM_LDRS_PC_G0:
22836 case BFD_RELOC_ARM_LDRS_PC_G1:
22837 case BFD_RELOC_ARM_LDRS_PC_G2:
22838 case BFD_RELOC_ARM_LDC_PC_G0:
22839 case BFD_RELOC_ARM_LDC_PC_G1:
22840 case BFD_RELOC_ARM_LDC_PC_G2:
22841 case BFD_RELOC_ARM_ALU_SB_G0_NC:
22842 case BFD_RELOC_ARM_ALU_SB_G0:
22843 case BFD_RELOC_ARM_ALU_SB_G1_NC:
22844 case BFD_RELOC_ARM_ALU_SB_G1:
22845 case BFD_RELOC_ARM_ALU_SB_G2:
22846 case BFD_RELOC_ARM_LDR_SB_G0:
22847 case BFD_RELOC_ARM_LDR_SB_G1:
22848 case BFD_RELOC_ARM_LDR_SB_G2:
22849 case BFD_RELOC_ARM_LDRS_SB_G0:
22850 case BFD_RELOC_ARM_LDRS_SB_G1:
22851 case BFD_RELOC_ARM_LDRS_SB_G2:
22852 case BFD_RELOC_ARM_LDC_SB_G0:
22853 case BFD_RELOC_ARM_LDC_SB_G1:
22854 case BFD_RELOC_ARM_LDC_SB_G2:
22855 case BFD_RELOC_ARM_V4BX:
22856 code = fixp->fx_r_type;
22859 case BFD_RELOC_ARM_TLS_GOTDESC:
22860 case BFD_RELOC_ARM_TLS_GD32:
22861 case BFD_RELOC_ARM_TLS_IE32:
22862 case BFD_RELOC_ARM_TLS_LDM32:
22863 /* BFD will include the symbol's address in the addend.
22864 But we don't want that, so subtract it out again here. */
22865 if (!S_IS_COMMON (fixp->fx_addsy))
22866 reloc->addend -= (*reloc->sym_ptr_ptr)->value;
22867 code = fixp->fx_r_type;
22871 case BFD_RELOC_ARM_IMMEDIATE:
22872 as_bad_where (fixp->fx_file, fixp->fx_line,
22873 _("internal relocation (type: IMMEDIATE) not fixed up"));
22876 case BFD_RELOC_ARM_ADRL_IMMEDIATE:
22877 as_bad_where (fixp->fx_file, fixp->fx_line,
22878 _("ADRL used for a symbol not defined in the same file"));
22881 case BFD_RELOC_ARM_OFFSET_IMM:
22882 if (section->use_rela_p)
22884 code = fixp->fx_r_type;
22888 if (fixp->fx_addsy != NULL
22889 && !S_IS_DEFINED (fixp->fx_addsy)
22890 && S_IS_LOCAL (fixp->fx_addsy))
22892 as_bad_where (fixp->fx_file, fixp->fx_line,
22893 _("undefined local label `%s'"),
22894 S_GET_NAME (fixp->fx_addsy));
22898 as_bad_where (fixp->fx_file, fixp->fx_line,
22899 _("internal_relocation (type: OFFSET_IMM) not fixed up"));
22906 switch (fixp->fx_r_type)
22908 case BFD_RELOC_NONE: type = "NONE"; break;
22909 case BFD_RELOC_ARM_OFFSET_IMM8: type = "OFFSET_IMM8"; break;
22910 case BFD_RELOC_ARM_SHIFT_IMM: type = "SHIFT_IMM"; break;
22911 case BFD_RELOC_ARM_SMC: type = "SMC"; break;
22912 case BFD_RELOC_ARM_SWI: type = "SWI"; break;
22913 case BFD_RELOC_ARM_MULTI: type = "MULTI"; break;
22914 case BFD_RELOC_ARM_CP_OFF_IMM: type = "CP_OFF_IMM"; break;
22915 case BFD_RELOC_ARM_T32_OFFSET_IMM: type = "T32_OFFSET_IMM"; break;
22916 case BFD_RELOC_ARM_T32_CP_OFF_IMM: type = "T32_CP_OFF_IMM"; break;
22917 case BFD_RELOC_ARM_THUMB_ADD: type = "THUMB_ADD"; break;
22918 case BFD_RELOC_ARM_THUMB_SHIFT: type = "THUMB_SHIFT"; break;
22919 case BFD_RELOC_ARM_THUMB_IMM: type = "THUMB_IMM"; break;
22920 case BFD_RELOC_ARM_THUMB_OFFSET: type = "THUMB_OFFSET"; break;
22921 default: type = _("<unknown>"); break;
22923 as_bad_where (fixp->fx_file, fixp->fx_line,
22924 _("cannot represent %s relocation in this object file format"),
22931 if ((code == BFD_RELOC_32_PCREL || code == BFD_RELOC_32)
22933 && fixp->fx_addsy == GOT_symbol)
22935 code = BFD_RELOC_ARM_GOTPC;
22936 reloc->addend = fixp->fx_offset = reloc->address;
22940 reloc->howto = bfd_reloc_type_lookup (stdoutput, code);
22942 if (reloc->howto == NULL)
22944 as_bad_where (fixp->fx_file, fixp->fx_line,
22945 _("cannot represent %s relocation in this object file format"),
22946 bfd_get_reloc_code_name (code));
22950 /* HACK: Since arm ELF uses Rel instead of Rela, encode the
22951 vtable entry to be used in the relocation's section offset. */
22952 if (fixp->fx_r_type == BFD_RELOC_VTABLE_ENTRY)
22953 reloc->address = fixp->fx_offset;
22958 /* This fix_new is called by cons via TC_CONS_FIX_NEW. */
22961 cons_fix_new_arm (fragS * frag,
22966 bfd_reloc_code_real_type type;
22970 FIXME: @@ Should look at CPU word size. */
22974 type = BFD_RELOC_8;
22977 type = BFD_RELOC_16;
22981 type = BFD_RELOC_32;
22984 type = BFD_RELOC_64;
22989 if (exp->X_op == O_secrel)
22991 exp->X_op = O_symbol;
22992 type = BFD_RELOC_32_SECREL;
22996 fix_new_exp (frag, where, (int) size, exp, pcrel, type);
22999 #if defined (OBJ_COFF)
23001 arm_validate_fix (fixS * fixP)
23003 /* If the destination of the branch is a defined symbol which does not have
23004 the THUMB_FUNC attribute, then we must be calling a function which has
23005 the (interfacearm) attribute. We look for the Thumb entry point to that
23006 function and change the branch to refer to that function instead. */
23007 if (fixP->fx_r_type == BFD_RELOC_THUMB_PCREL_BRANCH23
23008 && fixP->fx_addsy != NULL
23009 && S_IS_DEFINED (fixP->fx_addsy)
23010 && ! THUMB_IS_FUNC (fixP->fx_addsy))
23012 fixP->fx_addsy = find_real_start (fixP->fx_addsy);
23019 arm_force_relocation (struct fix * fixp)
23021 #if defined (OBJ_COFF) && defined (TE_PE)
23022 if (fixp->fx_r_type == BFD_RELOC_RVA)
23026 /* In case we have a call or a branch to a function in ARM ISA mode from
23027 a thumb function or vice-versa force the relocation. These relocations
23028 are cleared off for some cores that might have blx and simple transformations
23032 switch (fixp->fx_r_type)
23034 case BFD_RELOC_ARM_PCREL_JUMP:
23035 case BFD_RELOC_ARM_PCREL_CALL:
23036 case BFD_RELOC_THUMB_PCREL_BLX:
23037 if (THUMB_IS_FUNC (fixp->fx_addsy))
23041 case BFD_RELOC_ARM_PCREL_BLX:
23042 case BFD_RELOC_THUMB_PCREL_BRANCH25:
23043 case BFD_RELOC_THUMB_PCREL_BRANCH20:
23044 case BFD_RELOC_THUMB_PCREL_BRANCH23:
23045 if (ARM_IS_FUNC (fixp->fx_addsy))
23054 /* Resolve these relocations even if the symbol is extern or weak.
23055 Technically this is probably wrong due to symbol preemption.
23056 In practice these relocations do not have enough range to be useful
23057 at dynamic link time, and some code (e.g. in the Linux kernel)
23058 expects these references to be resolved. */
23059 if (fixp->fx_r_type == BFD_RELOC_ARM_IMMEDIATE
23060 || fixp->fx_r_type == BFD_RELOC_ARM_OFFSET_IMM
23061 || fixp->fx_r_type == BFD_RELOC_ARM_OFFSET_IMM8
23062 || fixp->fx_r_type == BFD_RELOC_ARM_ADRL_IMMEDIATE
23063 || fixp->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM
23064 || fixp->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM_S2
23065 || fixp->fx_r_type == BFD_RELOC_ARM_THUMB_OFFSET
23066 || fixp->fx_r_type == BFD_RELOC_ARM_T32_ADD_IMM
23067 || fixp->fx_r_type == BFD_RELOC_ARM_T32_IMMEDIATE
23068 || fixp->fx_r_type == BFD_RELOC_ARM_T32_IMM12
23069 || fixp->fx_r_type == BFD_RELOC_ARM_T32_OFFSET_IMM
23070 || fixp->fx_r_type == BFD_RELOC_ARM_T32_ADD_PC12
23071 || fixp->fx_r_type == BFD_RELOC_ARM_T32_CP_OFF_IMM
23072 || fixp->fx_r_type == BFD_RELOC_ARM_T32_CP_OFF_IMM_S2)
23075 /* Always leave these relocations for the linker. */
23076 if ((fixp->fx_r_type >= BFD_RELOC_ARM_ALU_PC_G0_NC
23077 && fixp->fx_r_type <= BFD_RELOC_ARM_LDC_SB_G2)
23078 || fixp->fx_r_type == BFD_RELOC_ARM_LDR_PC_G0)
23081 /* Always generate relocations against function symbols. */
23082 if (fixp->fx_r_type == BFD_RELOC_32
23084 && (symbol_get_bfdsym (fixp->fx_addsy)->flags & BSF_FUNCTION))
23087 return generic_force_reloc (fixp);
23090 #if defined (OBJ_ELF) || defined (OBJ_COFF)
23091 /* Relocations against function names must be left unadjusted,
23092 so that the linker can use this information to generate interworking
23093 stubs. The MIPS version of this function
23094 also prevents relocations that are mips-16 specific, but I do not
23095 know why it does this.
23098 There is one other problem that ought to be addressed here, but
23099 which currently is not: Taking the address of a label (rather
23100 than a function) and then later jumping to that address. Such
23101 addresses also ought to have their bottom bit set (assuming that
23102 they reside in Thumb code), but at the moment they will not. */
23105 arm_fix_adjustable (fixS * fixP)
23107 if (fixP->fx_addsy == NULL)
23110 /* Preserve relocations against symbols with function type. */
23111 if (symbol_get_bfdsym (fixP->fx_addsy)->flags & BSF_FUNCTION)
23114 if (THUMB_IS_FUNC (fixP->fx_addsy)
23115 && fixP->fx_subsy == NULL)
23118 /* We need the symbol name for the VTABLE entries. */
23119 if ( fixP->fx_r_type == BFD_RELOC_VTABLE_INHERIT
23120 || fixP->fx_r_type == BFD_RELOC_VTABLE_ENTRY)
23123 /* Don't allow symbols to be discarded on GOT related relocs. */
23124 if (fixP->fx_r_type == BFD_RELOC_ARM_PLT32
23125 || fixP->fx_r_type == BFD_RELOC_ARM_GOT32
23126 || fixP->fx_r_type == BFD_RELOC_ARM_GOTOFF
23127 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_GD32
23128 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LE32
23129 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_IE32
23130 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LDM32
23131 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LDO32
23132 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_GOTDESC
23133 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_CALL
23134 || fixP->fx_r_type == BFD_RELOC_ARM_THM_TLS_CALL
23135 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_DESCSEQ
23136 || fixP->fx_r_type == BFD_RELOC_ARM_THM_TLS_DESCSEQ
23137 || fixP->fx_r_type == BFD_RELOC_ARM_TARGET2)
23140 /* Similarly for group relocations. */
23141 if ((fixP->fx_r_type >= BFD_RELOC_ARM_ALU_PC_G0_NC
23142 && fixP->fx_r_type <= BFD_RELOC_ARM_LDC_SB_G2)
23143 || fixP->fx_r_type == BFD_RELOC_ARM_LDR_PC_G0)
23146 /* MOVW/MOVT REL relocations have limited offsets, so keep the symbols. */
23147 if (fixP->fx_r_type == BFD_RELOC_ARM_MOVW
23148 || fixP->fx_r_type == BFD_RELOC_ARM_MOVT
23149 || fixP->fx_r_type == BFD_RELOC_ARM_MOVW_PCREL
23150 || fixP->fx_r_type == BFD_RELOC_ARM_MOVT_PCREL
23151 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVW
23152 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT
23153 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVW_PCREL
23154 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT_PCREL)
23159 #endif /* defined (OBJ_ELF) || defined (OBJ_COFF) */
23164 elf32_arm_target_format (void)
23167 return (target_big_endian
23168 ? "elf32-bigarm-symbian"
23169 : "elf32-littlearm-symbian");
23170 #elif defined (TE_VXWORKS)
23171 return (target_big_endian
23172 ? "elf32-bigarm-vxworks"
23173 : "elf32-littlearm-vxworks");
23174 #elif defined (TE_NACL)
23175 return (target_big_endian
23176 ? "elf32-bigarm-nacl"
23177 : "elf32-littlearm-nacl");
23179 if (target_big_endian)
23180 return "elf32-bigarm";
23182 return "elf32-littlearm";
23187 armelf_frob_symbol (symbolS * symp,
23190 elf_frob_symbol (symp, puntp);
23194 /* MD interface: Finalization. */
23199 literal_pool * pool;
23201 /* Ensure that all the IT blocks are properly closed. */
23202 check_it_blocks_finished ();
23204 for (pool = list_of_pools; pool; pool = pool->next)
23206 /* Put it at the end of the relevant section. */
23207 subseg_set (pool->section, pool->sub_section);
23209 arm_elf_change_section ();
23216 /* Remove any excess mapping symbols generated for alignment frags in
23217 SEC. We may have created a mapping symbol before a zero byte
23218 alignment; remove it if there's a mapping symbol after the
23221 check_mapping_symbols (bfd *abfd ATTRIBUTE_UNUSED, asection *sec,
23222 void *dummy ATTRIBUTE_UNUSED)
23224 segment_info_type *seginfo = seg_info (sec);
23227 if (seginfo == NULL || seginfo->frchainP == NULL)
23230 for (fragp = seginfo->frchainP->frch_root;
23232 fragp = fragp->fr_next)
23234 symbolS *sym = fragp->tc_frag_data.last_map;
23235 fragS *next = fragp->fr_next;
23237 /* Variable-sized frags have been converted to fixed size by
23238 this point. But if this was variable-sized to start with,
23239 there will be a fixed-size frag after it. So don't handle
23241 if (sym == NULL || next == NULL)
23244 if (S_GET_VALUE (sym) < next->fr_address)
23245 /* Not at the end of this frag. */
23247 know (S_GET_VALUE (sym) == next->fr_address);
23251 if (next->tc_frag_data.first_map != NULL)
23253 /* Next frag starts with a mapping symbol. Discard this
23255 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
23259 if (next->fr_next == NULL)
23261 /* This mapping symbol is at the end of the section. Discard
23263 know (next->fr_fix == 0 && next->fr_var == 0);
23264 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
23268 /* As long as we have empty frags without any mapping symbols,
23270 /* If the next frag is non-empty and does not start with a
23271 mapping symbol, then this mapping symbol is required. */
23272 if (next->fr_address != next->fr_next->fr_address)
23275 next = next->fr_next;
23277 while (next != NULL);
23282 /* Adjust the symbol table. This marks Thumb symbols as distinct from
23286 arm_adjust_symtab (void)
23291 for (sym = symbol_rootP; sym != NULL; sym = symbol_next (sym))
23293 if (ARM_IS_THUMB (sym))
23295 if (THUMB_IS_FUNC (sym))
23297 /* Mark the symbol as a Thumb function. */
23298 if ( S_GET_STORAGE_CLASS (sym) == C_STAT
23299 || S_GET_STORAGE_CLASS (sym) == C_LABEL) /* This can happen! */
23300 S_SET_STORAGE_CLASS (sym, C_THUMBSTATFUNC);
23302 else if (S_GET_STORAGE_CLASS (sym) == C_EXT)
23303 S_SET_STORAGE_CLASS (sym, C_THUMBEXTFUNC);
23305 as_bad (_("%s: unexpected function type: %d"),
23306 S_GET_NAME (sym), S_GET_STORAGE_CLASS (sym));
23308 else switch (S_GET_STORAGE_CLASS (sym))
23311 S_SET_STORAGE_CLASS (sym, C_THUMBEXT);
23314 S_SET_STORAGE_CLASS (sym, C_THUMBSTAT);
23317 S_SET_STORAGE_CLASS (sym, C_THUMBLABEL);
23325 if (ARM_IS_INTERWORK (sym))
23326 coffsymbol (symbol_get_bfdsym (sym))->native->u.syment.n_flags = 0xFF;
23333 for (sym = symbol_rootP; sym != NULL; sym = symbol_next (sym))
23335 if (ARM_IS_THUMB (sym))
23337 elf_symbol_type * elf_sym;
23339 elf_sym = elf_symbol (symbol_get_bfdsym (sym));
23340 bind = ELF_ST_BIND (elf_sym->internal_elf_sym.st_info);
23342 if (! bfd_is_arm_special_symbol_name (elf_sym->symbol.name,
23343 BFD_ARM_SPECIAL_SYM_TYPE_ANY))
23345 /* If it's a .thumb_func, declare it as so,
23346 otherwise tag label as .code 16. */
23347 if (THUMB_IS_FUNC (sym))
23348 elf_sym->internal_elf_sym.st_target_internal
23349 = ST_BRANCH_TO_THUMB;
23350 else if (EF_ARM_EABI_VERSION (meabi_flags) < EF_ARM_EABI_VER4)
23351 elf_sym->internal_elf_sym.st_info =
23352 ELF_ST_INFO (bind, STT_ARM_16BIT);
23357 /* Remove any overlapping mapping symbols generated by alignment frags. */
23358 bfd_map_over_sections (stdoutput, check_mapping_symbols, (char *) 0);
23359 /* Now do generic ELF adjustments. */
23360 elf_adjust_symtab ();
23364 /* MD interface: Initialization. */
23367 set_constant_flonums (void)
23371 for (i = 0; i < NUM_FLOAT_VALS; i++)
23372 if (atof_ieee ((char *) fp_const[i], 'x', fp_values[i]) == NULL)
23376 /* Auto-select Thumb mode if it's the only available instruction set for the
23377 given architecture. */
23380 autoselect_thumb_from_cpu_variant (void)
23382 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1))
23383 opcode_select (16);
23392 if ( (arm_ops_hsh = hash_new ()) == NULL
23393 || (arm_cond_hsh = hash_new ()) == NULL
23394 || (arm_shift_hsh = hash_new ()) == NULL
23395 || (arm_psr_hsh = hash_new ()) == NULL
23396 || (arm_v7m_psr_hsh = hash_new ()) == NULL
23397 || (arm_reg_hsh = hash_new ()) == NULL
23398 || (arm_reloc_hsh = hash_new ()) == NULL
23399 || (arm_barrier_opt_hsh = hash_new ()) == NULL)
23400 as_fatal (_("virtual memory exhausted"));
23402 for (i = 0; i < sizeof (insns) / sizeof (struct asm_opcode); i++)
23403 hash_insert (arm_ops_hsh, insns[i].template_name, (void *) (insns + i));
23404 for (i = 0; i < sizeof (conds) / sizeof (struct asm_cond); i++)
23405 hash_insert (arm_cond_hsh, conds[i].template_name, (void *) (conds + i));
23406 for (i = 0; i < sizeof (shift_names) / sizeof (struct asm_shift_name); i++)
23407 hash_insert (arm_shift_hsh, shift_names[i].name, (void *) (shift_names + i));
23408 for (i = 0; i < sizeof (psrs) / sizeof (struct asm_psr); i++)
23409 hash_insert (arm_psr_hsh, psrs[i].template_name, (void *) (psrs + i));
23410 for (i = 0; i < sizeof (v7m_psrs) / sizeof (struct asm_psr); i++)
23411 hash_insert (arm_v7m_psr_hsh, v7m_psrs[i].template_name,
23412 (void *) (v7m_psrs + i));
23413 for (i = 0; i < sizeof (reg_names) / sizeof (struct reg_entry); i++)
23414 hash_insert (arm_reg_hsh, reg_names[i].name, (void *) (reg_names + i));
23416 i < sizeof (barrier_opt_names) / sizeof (struct asm_barrier_opt);
23418 hash_insert (arm_barrier_opt_hsh, barrier_opt_names[i].template_name,
23419 (void *) (barrier_opt_names + i));
23421 for (i = 0; i < ARRAY_SIZE (reloc_names); i++)
23423 struct reloc_entry * entry = reloc_names + i;
23425 if (arm_is_eabi() && entry->reloc == BFD_RELOC_ARM_PLT32)
23426 /* This makes encode_branch() use the EABI versions of this relocation. */
23427 entry->reloc = BFD_RELOC_UNUSED;
23429 hash_insert (arm_reloc_hsh, entry->name, (void *) entry);
23433 set_constant_flonums ();
23435 /* Set the cpu variant based on the command-line options. We prefer
23436 -mcpu= over -march= if both are set (as for GCC); and we prefer
23437 -mfpu= over any other way of setting the floating point unit.
23438 Use of legacy options with new options are faulted. */
23441 if (mcpu_cpu_opt || march_cpu_opt)
23442 as_bad (_("use of old and new-style options to set CPU type"));
23444 mcpu_cpu_opt = legacy_cpu;
23446 else if (!mcpu_cpu_opt)
23447 mcpu_cpu_opt = march_cpu_opt;
23452 as_bad (_("use of old and new-style options to set FPU type"));
23454 mfpu_opt = legacy_fpu;
23456 else if (!mfpu_opt)
23458 #if !(defined (EABI_DEFAULT) || defined (TE_LINUX) \
23459 || defined (TE_NetBSD) || defined (TE_VXWORKS))
23460 /* Some environments specify a default FPU. If they don't, infer it
23461 from the processor. */
23463 mfpu_opt = mcpu_fpu_opt;
23465 mfpu_opt = march_fpu_opt;
23467 mfpu_opt = &fpu_default;
23473 if (mcpu_cpu_opt != NULL)
23474 mfpu_opt = &fpu_default;
23475 else if (mcpu_fpu_opt != NULL && ARM_CPU_HAS_FEATURE (*mcpu_fpu_opt, arm_ext_v5))
23476 mfpu_opt = &fpu_arch_vfp_v2;
23478 mfpu_opt = &fpu_arch_fpa;
23484 mcpu_cpu_opt = &cpu_default;
23485 selected_cpu = cpu_default;
23489 selected_cpu = *mcpu_cpu_opt;
23491 mcpu_cpu_opt = &arm_arch_any;
23494 ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
23496 autoselect_thumb_from_cpu_variant ();
23498 arm_arch_used = thumb_arch_used = arm_arch_none;
23500 #if defined OBJ_COFF || defined OBJ_ELF
23502 unsigned int flags = 0;
23504 #if defined OBJ_ELF
23505 flags = meabi_flags;
23507 switch (meabi_flags)
23509 case EF_ARM_EABI_UNKNOWN:
23511 /* Set the flags in the private structure. */
23512 if (uses_apcs_26) flags |= F_APCS26;
23513 if (support_interwork) flags |= F_INTERWORK;
23514 if (uses_apcs_float) flags |= F_APCS_FLOAT;
23515 if (pic_code) flags |= F_PIC;
23516 if (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_any_hard))
23517 flags |= F_SOFT_FLOAT;
23519 switch (mfloat_abi_opt)
23521 case ARM_FLOAT_ABI_SOFT:
23522 case ARM_FLOAT_ABI_SOFTFP:
23523 flags |= F_SOFT_FLOAT;
23526 case ARM_FLOAT_ABI_HARD:
23527 if (flags & F_SOFT_FLOAT)
23528 as_bad (_("hard-float conflicts with specified fpu"));
23532 /* Using pure-endian doubles (even if soft-float). */
23533 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_endian_pure))
23534 flags |= F_VFP_FLOAT;
23536 #if defined OBJ_ELF
23537 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_arch_maverick))
23538 flags |= EF_ARM_MAVERICK_FLOAT;
23541 case EF_ARM_EABI_VER4:
23542 case EF_ARM_EABI_VER5:
23543 /* No additional flags to set. */
23550 bfd_set_private_flags (stdoutput, flags);
23552 /* We have run out flags in the COFF header to encode the
23553 status of ATPCS support, so instead we create a dummy,
23554 empty, debug section called .arm.atpcs. */
23559 sec = bfd_make_section (stdoutput, ".arm.atpcs");
23563 bfd_set_section_flags
23564 (stdoutput, sec, SEC_READONLY | SEC_DEBUGGING /* | SEC_HAS_CONTENTS */);
23565 bfd_set_section_size (stdoutput, sec, 0);
23566 bfd_set_section_contents (stdoutput, sec, NULL, 0, 0);
23572 /* Record the CPU type as well. */
23573 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt2))
23574 mach = bfd_mach_arm_iWMMXt2;
23575 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt))
23576 mach = bfd_mach_arm_iWMMXt;
23577 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_xscale))
23578 mach = bfd_mach_arm_XScale;
23579 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_maverick))
23580 mach = bfd_mach_arm_ep9312;
23581 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v5e))
23582 mach = bfd_mach_arm_5TE;
23583 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v5))
23585 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4t))
23586 mach = bfd_mach_arm_5T;
23588 mach = bfd_mach_arm_5;
23590 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4))
23592 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4t))
23593 mach = bfd_mach_arm_4T;
23595 mach = bfd_mach_arm_4;
23597 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v3m))
23598 mach = bfd_mach_arm_3M;
23599 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v3))
23600 mach = bfd_mach_arm_3;
23601 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v2s))
23602 mach = bfd_mach_arm_2a;
23603 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v2))
23604 mach = bfd_mach_arm_2;
23606 mach = bfd_mach_arm_unknown;
23608 bfd_set_arch_mach (stdoutput, TARGET_ARCH, mach);
23611 /* Command line processing. */
23614 Invocation line includes a switch not recognized by the base assembler.
23615 See if it's a processor-specific option.
23617 This routine is somewhat complicated by the need for backwards
23618 compatibility (since older releases of gcc can't be changed).
23619 The new options try to make the interface as compatible as
23622 New options (supported) are:
23624 -mcpu=<cpu name> Assemble for selected processor
23625 -march=<architecture name> Assemble for selected architecture
23626 -mfpu=<fpu architecture> Assemble for selected FPU.
23627 -EB/-mbig-endian Big-endian
23628 -EL/-mlittle-endian Little-endian
23629 -k Generate PIC code
23630 -mthumb Start in Thumb mode
23631 -mthumb-interwork Code supports ARM/Thumb interworking
23633 -m[no-]warn-deprecated Warn about deprecated features
23635 For now we will also provide support for:
23637 -mapcs-32 32-bit Program counter
23638 -mapcs-26 26-bit Program counter
23639 -macps-float Floats passed in FP registers
23640 -mapcs-reentrant Reentrant code
23642 (sometime these will probably be replaced with -mapcs=<list of options>
23643 and -matpcs=<list of options>)
23645 The remaining options are only supported for back-wards compatibility.
23646 Cpu variants, the arm part is optional:
23647 -m[arm]1 Currently not supported.
23648 -m[arm]2, -m[arm]250 Arm 2 and Arm 250 processor
23649 -m[arm]3 Arm 3 processor
23650 -m[arm]6[xx], Arm 6 processors
23651 -m[arm]7[xx][t][[d]m] Arm 7 processors
23652 -m[arm]8[10] Arm 8 processors
23653 -m[arm]9[20][tdmi] Arm 9 processors
23654 -mstrongarm[110[0]] StrongARM processors
23655 -mxscale XScale processors
23656 -m[arm]v[2345[t[e]]] Arm architectures
23657 -mall All (except the ARM1)
23659 -mfpa10, -mfpa11 FPA10 and 11 co-processor instructions
23660 -mfpe-old (No float load/store multiples)
23661 -mvfpxd VFP Single precision
23663 -mno-fpu Disable all floating point instructions
23665 The following CPU names are recognized:
23666 arm1, arm2, arm250, arm3, arm6, arm600, arm610, arm620,
23667 arm7, arm7m, arm7d, arm7dm, arm7di, arm7dmi, arm70, arm700,
23668 arm700i, arm710 arm710t, arm720, arm720t, arm740t, arm710c,
23669 arm7100, arm7500, arm7500fe, arm7tdmi, arm8, arm810, arm9,
23670 arm920, arm920t, arm940t, arm946, arm966, arm9tdmi, arm9e,
23671 arm10t arm10e, arm1020t, arm1020e, arm10200e,
23672 strongarm, strongarm110, strongarm1100, strongarm1110, xscale.
23676 const char * md_shortopts = "m:k";
23678 #ifdef ARM_BI_ENDIAN
23679 #define OPTION_EB (OPTION_MD_BASE + 0)
23680 #define OPTION_EL (OPTION_MD_BASE + 1)
23682 #if TARGET_BYTES_BIG_ENDIAN
23683 #define OPTION_EB (OPTION_MD_BASE + 0)
23685 #define OPTION_EL (OPTION_MD_BASE + 1)
23688 #define OPTION_FIX_V4BX (OPTION_MD_BASE + 2)
23690 struct option md_longopts[] =
23693 {"EB", no_argument, NULL, OPTION_EB},
23696 {"EL", no_argument, NULL, OPTION_EL},
23698 {"fix-v4bx", no_argument, NULL, OPTION_FIX_V4BX},
23699 {NULL, no_argument, NULL, 0}
23702 size_t md_longopts_size = sizeof (md_longopts);
23704 struct arm_option_table
23706 char *option; /* Option name to match. */
23707 char *help; /* Help information. */
23708 int *var; /* Variable to change. */
23709 int value; /* What to change it to. */
23710 char *deprecated; /* If non-null, print this message. */
23713 struct arm_option_table arm_opts[] =
23715 {"k", N_("generate PIC code"), &pic_code, 1, NULL},
23716 {"mthumb", N_("assemble Thumb code"), &thumb_mode, 1, NULL},
23717 {"mthumb-interwork", N_("support ARM/Thumb interworking"),
23718 &support_interwork, 1, NULL},
23719 {"mapcs-32", N_("code uses 32-bit program counter"), &uses_apcs_26, 0, NULL},
23720 {"mapcs-26", N_("code uses 26-bit program counter"), &uses_apcs_26, 1, NULL},
23721 {"mapcs-float", N_("floating point args are in fp regs"), &uses_apcs_float,
23723 {"mapcs-reentrant", N_("re-entrant code"), &pic_code, 1, NULL},
23724 {"matpcs", N_("code is ATPCS conformant"), &atpcs, 1, NULL},
23725 {"mbig-endian", N_("assemble for big-endian"), &target_big_endian, 1, NULL},
23726 {"mlittle-endian", N_("assemble for little-endian"), &target_big_endian, 0,
23729 /* These are recognized by the assembler, but have no affect on code. */
23730 {"mapcs-frame", N_("use frame pointer"), NULL, 0, NULL},
23731 {"mapcs-stack-check", N_("use stack size checking"), NULL, 0, NULL},
23733 {"mwarn-deprecated", NULL, &warn_on_deprecated, 1, NULL},
23734 {"mno-warn-deprecated", N_("do not warn on use of deprecated feature"),
23735 &warn_on_deprecated, 0, NULL},
23736 {NULL, NULL, NULL, 0, NULL}
23739 struct arm_legacy_option_table
23741 char *option; /* Option name to match. */
23742 const arm_feature_set **var; /* Variable to change. */
23743 const arm_feature_set value; /* What to change it to. */
23744 char *deprecated; /* If non-null, print this message. */
23747 const struct arm_legacy_option_table arm_legacy_opts[] =
23749 /* DON'T add any new processors to this list -- we want the whole list
23750 to go away... Add them to the processors table instead. */
23751 {"marm1", &legacy_cpu, ARM_ARCH_V1, N_("use -mcpu=arm1")},
23752 {"m1", &legacy_cpu, ARM_ARCH_V1, N_("use -mcpu=arm1")},
23753 {"marm2", &legacy_cpu, ARM_ARCH_V2, N_("use -mcpu=arm2")},
23754 {"m2", &legacy_cpu, ARM_ARCH_V2, N_("use -mcpu=arm2")},
23755 {"marm250", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm250")},
23756 {"m250", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm250")},
23757 {"marm3", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm3")},
23758 {"m3", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm3")},
23759 {"marm6", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm6")},
23760 {"m6", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm6")},
23761 {"marm600", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm600")},
23762 {"m600", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm600")},
23763 {"marm610", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm610")},
23764 {"m610", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm610")},
23765 {"marm620", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm620")},
23766 {"m620", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm620")},
23767 {"marm7", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7")},
23768 {"m7", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7")},
23769 {"marm70", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm70")},
23770 {"m70", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm70")},
23771 {"marm700", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700")},
23772 {"m700", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700")},
23773 {"marm700i", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700i")},
23774 {"m700i", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700i")},
23775 {"marm710", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710")},
23776 {"m710", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710")},
23777 {"marm710c", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710c")},
23778 {"m710c", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710c")},
23779 {"marm720", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm720")},
23780 {"m720", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm720")},
23781 {"marm7d", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7d")},
23782 {"m7d", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7d")},
23783 {"marm7di", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7di")},
23784 {"m7di", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7di")},
23785 {"marm7m", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7m")},
23786 {"m7m", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7m")},
23787 {"marm7dm", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dm")},
23788 {"m7dm", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dm")},
23789 {"marm7dmi", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dmi")},
23790 {"m7dmi", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dmi")},
23791 {"marm7100", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7100")},
23792 {"m7100", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7100")},
23793 {"marm7500", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500")},
23794 {"m7500", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500")},
23795 {"marm7500fe", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500fe")},
23796 {"m7500fe", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500fe")},
23797 {"marm7t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
23798 {"m7t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
23799 {"marm7tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
23800 {"m7tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
23801 {"marm710t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm710t")},
23802 {"m710t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm710t")},
23803 {"marm720t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm720t")},
23804 {"m720t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm720t")},
23805 {"marm740t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm740t")},
23806 {"m740t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm740t")},
23807 {"marm8", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm8")},
23808 {"m8", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm8")},
23809 {"marm810", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm810")},
23810 {"m810", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm810")},
23811 {"marm9", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9")},
23812 {"m9", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9")},
23813 {"marm9tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9tdmi")},
23814 {"m9tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9tdmi")},
23815 {"marm920", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm920")},
23816 {"m920", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm920")},
23817 {"marm940", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm940")},
23818 {"m940", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm940")},
23819 {"mstrongarm", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=strongarm")},
23820 {"mstrongarm110", &legacy_cpu, ARM_ARCH_V4,
23821 N_("use -mcpu=strongarm110")},
23822 {"mstrongarm1100", &legacy_cpu, ARM_ARCH_V4,
23823 N_("use -mcpu=strongarm1100")},
23824 {"mstrongarm1110", &legacy_cpu, ARM_ARCH_V4,
23825 N_("use -mcpu=strongarm1110")},
23826 {"mxscale", &legacy_cpu, ARM_ARCH_XSCALE, N_("use -mcpu=xscale")},
23827 {"miwmmxt", &legacy_cpu, ARM_ARCH_IWMMXT, N_("use -mcpu=iwmmxt")},
23828 {"mall", &legacy_cpu, ARM_ANY, N_("use -mcpu=all")},
23830 /* Architecture variants -- don't add any more to this list either. */
23831 {"mv2", &legacy_cpu, ARM_ARCH_V2, N_("use -march=armv2")},
23832 {"marmv2", &legacy_cpu, ARM_ARCH_V2, N_("use -march=armv2")},
23833 {"mv2a", &legacy_cpu, ARM_ARCH_V2S, N_("use -march=armv2a")},
23834 {"marmv2a", &legacy_cpu, ARM_ARCH_V2S, N_("use -march=armv2a")},
23835 {"mv3", &legacy_cpu, ARM_ARCH_V3, N_("use -march=armv3")},
23836 {"marmv3", &legacy_cpu, ARM_ARCH_V3, N_("use -march=armv3")},
23837 {"mv3m", &legacy_cpu, ARM_ARCH_V3M, N_("use -march=armv3m")},
23838 {"marmv3m", &legacy_cpu, ARM_ARCH_V3M, N_("use -march=armv3m")},
23839 {"mv4", &legacy_cpu, ARM_ARCH_V4, N_("use -march=armv4")},
23840 {"marmv4", &legacy_cpu, ARM_ARCH_V4, N_("use -march=armv4")},
23841 {"mv4t", &legacy_cpu, ARM_ARCH_V4T, N_("use -march=armv4t")},
23842 {"marmv4t", &legacy_cpu, ARM_ARCH_V4T, N_("use -march=armv4t")},
23843 {"mv5", &legacy_cpu, ARM_ARCH_V5, N_("use -march=armv5")},
23844 {"marmv5", &legacy_cpu, ARM_ARCH_V5, N_("use -march=armv5")},
23845 {"mv5t", &legacy_cpu, ARM_ARCH_V5T, N_("use -march=armv5t")},
23846 {"marmv5t", &legacy_cpu, ARM_ARCH_V5T, N_("use -march=armv5t")},
23847 {"mv5e", &legacy_cpu, ARM_ARCH_V5TE, N_("use -march=armv5te")},
23848 {"marmv5e", &legacy_cpu, ARM_ARCH_V5TE, N_("use -march=armv5te")},
23850 /* Floating point variants -- don't add any more to this list either. */
23851 {"mfpe-old", &legacy_fpu, FPU_ARCH_FPE, N_("use -mfpu=fpe")},
23852 {"mfpa10", &legacy_fpu, FPU_ARCH_FPA, N_("use -mfpu=fpa10")},
23853 {"mfpa11", &legacy_fpu, FPU_ARCH_FPA, N_("use -mfpu=fpa11")},
23854 {"mno-fpu", &legacy_fpu, ARM_ARCH_NONE,
23855 N_("use either -mfpu=softfpa or -mfpu=softvfp")},
23857 {NULL, NULL, ARM_ARCH_NONE, NULL}
23860 struct arm_cpu_option_table
23864 const arm_feature_set value;
23865 /* For some CPUs we assume an FPU unless the user explicitly sets
23867 const arm_feature_set default_fpu;
23868 /* The canonical name of the CPU, or NULL to use NAME converted to upper
23870 const char *canonical_name;
23873 /* This list should, at a minimum, contain all the cpu names
23874 recognized by GCC. */
23875 #define ARM_CPU_OPT(N, V, DF, CN) { N, sizeof (N) - 1, V, DF, CN }
23876 static const struct arm_cpu_option_table arm_cpus[] =
23878 ARM_CPU_OPT ("all", ARM_ANY, FPU_ARCH_FPA, NULL),
23879 ARM_CPU_OPT ("arm1", ARM_ARCH_V1, FPU_ARCH_FPA, NULL),
23880 ARM_CPU_OPT ("arm2", ARM_ARCH_V2, FPU_ARCH_FPA, NULL),
23881 ARM_CPU_OPT ("arm250", ARM_ARCH_V2S, FPU_ARCH_FPA, NULL),
23882 ARM_CPU_OPT ("arm3", ARM_ARCH_V2S, FPU_ARCH_FPA, NULL),
23883 ARM_CPU_OPT ("arm6", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
23884 ARM_CPU_OPT ("arm60", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
23885 ARM_CPU_OPT ("arm600", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
23886 ARM_CPU_OPT ("arm610", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
23887 ARM_CPU_OPT ("arm620", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
23888 ARM_CPU_OPT ("arm7", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
23889 ARM_CPU_OPT ("arm7m", ARM_ARCH_V3M, FPU_ARCH_FPA, NULL),
23890 ARM_CPU_OPT ("arm7d", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
23891 ARM_CPU_OPT ("arm7dm", ARM_ARCH_V3M, FPU_ARCH_FPA, NULL),
23892 ARM_CPU_OPT ("arm7di", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
23893 ARM_CPU_OPT ("arm7dmi", ARM_ARCH_V3M, FPU_ARCH_FPA, NULL),
23894 ARM_CPU_OPT ("arm70", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
23895 ARM_CPU_OPT ("arm700", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
23896 ARM_CPU_OPT ("arm700i", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
23897 ARM_CPU_OPT ("arm710", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
23898 ARM_CPU_OPT ("arm710t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL),
23899 ARM_CPU_OPT ("arm720", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
23900 ARM_CPU_OPT ("arm720t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL),
23901 ARM_CPU_OPT ("arm740t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL),
23902 ARM_CPU_OPT ("arm710c", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
23903 ARM_CPU_OPT ("arm7100", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
23904 ARM_CPU_OPT ("arm7500", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
23905 ARM_CPU_OPT ("arm7500fe", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
23906 ARM_CPU_OPT ("arm7t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL),
23907 ARM_CPU_OPT ("arm7tdmi", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL),
23908 ARM_CPU_OPT ("arm7tdmi-s", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL),
23909 ARM_CPU_OPT ("arm8", ARM_ARCH_V4, FPU_ARCH_FPA, NULL),
23910 ARM_CPU_OPT ("arm810", ARM_ARCH_V4, FPU_ARCH_FPA, NULL),
23911 ARM_CPU_OPT ("strongarm", ARM_ARCH_V4, FPU_ARCH_FPA, NULL),
23912 ARM_CPU_OPT ("strongarm1", ARM_ARCH_V4, FPU_ARCH_FPA, NULL),
23913 ARM_CPU_OPT ("strongarm110", ARM_ARCH_V4, FPU_ARCH_FPA, NULL),
23914 ARM_CPU_OPT ("strongarm1100", ARM_ARCH_V4, FPU_ARCH_FPA, NULL),
23915 ARM_CPU_OPT ("strongarm1110", ARM_ARCH_V4, FPU_ARCH_FPA, NULL),
23916 ARM_CPU_OPT ("arm9", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL),
23917 ARM_CPU_OPT ("arm920", ARM_ARCH_V4T, FPU_ARCH_FPA, "ARM920T"),
23918 ARM_CPU_OPT ("arm920t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL),
23919 ARM_CPU_OPT ("arm922t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL),
23920 ARM_CPU_OPT ("arm940t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL),
23921 ARM_CPU_OPT ("arm9tdmi", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL),
23922 ARM_CPU_OPT ("fa526", ARM_ARCH_V4, FPU_ARCH_FPA, NULL),
23923 ARM_CPU_OPT ("fa626", ARM_ARCH_V4, FPU_ARCH_FPA, NULL),
23924 /* For V5 or later processors we default to using VFP; but the user
23925 should really set the FPU type explicitly. */
23926 ARM_CPU_OPT ("arm9e-r0", ARM_ARCH_V5TExP, FPU_ARCH_VFP_V2, NULL),
23927 ARM_CPU_OPT ("arm9e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL),
23928 ARM_CPU_OPT ("arm926ej", ARM_ARCH_V5TEJ, FPU_ARCH_VFP_V2, "ARM926EJ-S"),
23929 ARM_CPU_OPT ("arm926ejs", ARM_ARCH_V5TEJ, FPU_ARCH_VFP_V2, "ARM926EJ-S"),
23930 ARM_CPU_OPT ("arm926ej-s", ARM_ARCH_V5TEJ, FPU_ARCH_VFP_V2, NULL),
23931 ARM_CPU_OPT ("arm946e-r0", ARM_ARCH_V5TExP, FPU_ARCH_VFP_V2, NULL),
23932 ARM_CPU_OPT ("arm946e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, "ARM946E-S"),
23933 ARM_CPU_OPT ("arm946e-s", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL),
23934 ARM_CPU_OPT ("arm966e-r0", ARM_ARCH_V5TExP, FPU_ARCH_VFP_V2, NULL),
23935 ARM_CPU_OPT ("arm966e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, "ARM966E-S"),
23936 ARM_CPU_OPT ("arm966e-s", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL),
23937 ARM_CPU_OPT ("arm968e-s", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL),
23938 ARM_CPU_OPT ("arm10t", ARM_ARCH_V5T, FPU_ARCH_VFP_V1, NULL),
23939 ARM_CPU_OPT ("arm10tdmi", ARM_ARCH_V5T, FPU_ARCH_VFP_V1, NULL),
23940 ARM_CPU_OPT ("arm10e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL),
23941 ARM_CPU_OPT ("arm1020", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, "ARM1020E"),
23942 ARM_CPU_OPT ("arm1020t", ARM_ARCH_V5T, FPU_ARCH_VFP_V1, NULL),
23943 ARM_CPU_OPT ("arm1020e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL),
23944 ARM_CPU_OPT ("arm1022e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL),
23945 ARM_CPU_OPT ("arm1026ejs", ARM_ARCH_V5TEJ, FPU_ARCH_VFP_V2,
23947 ARM_CPU_OPT ("arm1026ej-s", ARM_ARCH_V5TEJ, FPU_ARCH_VFP_V2, NULL),
23948 ARM_CPU_OPT ("fa606te", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL),
23949 ARM_CPU_OPT ("fa616te", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL),
23950 ARM_CPU_OPT ("fa626te", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL),
23951 ARM_CPU_OPT ("fmp626", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL),
23952 ARM_CPU_OPT ("fa726te", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL),
23953 ARM_CPU_OPT ("arm1136js", ARM_ARCH_V6, FPU_NONE, "ARM1136J-S"),
23954 ARM_CPU_OPT ("arm1136j-s", ARM_ARCH_V6, FPU_NONE, NULL),
23955 ARM_CPU_OPT ("arm1136jfs", ARM_ARCH_V6, FPU_ARCH_VFP_V2,
23957 ARM_CPU_OPT ("arm1136jf-s", ARM_ARCH_V6, FPU_ARCH_VFP_V2, NULL),
23958 ARM_CPU_OPT ("mpcore", ARM_ARCH_V6K, FPU_ARCH_VFP_V2, "MPCore"),
23959 ARM_CPU_OPT ("mpcorenovfp", ARM_ARCH_V6K, FPU_NONE, "MPCore"),
23960 ARM_CPU_OPT ("arm1156t2-s", ARM_ARCH_V6T2, FPU_NONE, NULL),
23961 ARM_CPU_OPT ("arm1156t2f-s", ARM_ARCH_V6T2, FPU_ARCH_VFP_V2, NULL),
23962 ARM_CPU_OPT ("arm1176jz-s", ARM_ARCH_V6ZK, FPU_NONE, NULL),
23963 ARM_CPU_OPT ("arm1176jzf-s", ARM_ARCH_V6ZK, FPU_ARCH_VFP_V2, NULL),
23964 ARM_CPU_OPT ("cortex-a5", ARM_ARCH_V7A_MP_SEC,
23965 FPU_NONE, "Cortex-A5"),
23966 ARM_CPU_OPT ("cortex-a7", ARM_ARCH_V7A_IDIV_MP_SEC_VIRT,
23967 FPU_ARCH_NEON_VFP_V4,
23969 ARM_CPU_OPT ("cortex-a8", ARM_ARCH_V7A_SEC,
23970 ARM_FEATURE (0, FPU_VFP_V3
23971 | FPU_NEON_EXT_V1),
23973 ARM_CPU_OPT ("cortex-a9", ARM_ARCH_V7A_MP_SEC,
23974 ARM_FEATURE (0, FPU_VFP_V3
23975 | FPU_NEON_EXT_V1),
23977 ARM_CPU_OPT ("cortex-a12", ARM_ARCH_V7A_IDIV_MP_SEC_VIRT,
23978 FPU_ARCH_NEON_VFP_V4,
23980 ARM_CPU_OPT ("cortex-a15", ARM_ARCH_V7A_IDIV_MP_SEC_VIRT,
23981 FPU_ARCH_NEON_VFP_V4,
23983 ARM_CPU_OPT ("cortex-a53", ARM_ARCH_V8A, FPU_ARCH_CRYPTO_NEON_VFP_ARMV8,
23985 ARM_CPU_OPT ("cortex-a57", ARM_ARCH_V8A, FPU_ARCH_CRYPTO_NEON_VFP_ARMV8,
23987 ARM_CPU_OPT ("cortex-r4", ARM_ARCH_V7R, FPU_NONE, "Cortex-R4"),
23988 ARM_CPU_OPT ("cortex-r4f", ARM_ARCH_V7R, FPU_ARCH_VFP_V3D16,
23990 ARM_CPU_OPT ("cortex-r5", ARM_ARCH_V7R_IDIV,
23991 FPU_NONE, "Cortex-R5"),
23992 ARM_CPU_OPT ("cortex-r7", ARM_ARCH_V7R_IDIV,
23993 FPU_ARCH_VFP_V3D16,
23995 ARM_CPU_OPT ("cortex-m4", ARM_ARCH_V7EM, FPU_NONE, "Cortex-M4"),
23996 ARM_CPU_OPT ("cortex-m3", ARM_ARCH_V7M, FPU_NONE, "Cortex-M3"),
23997 ARM_CPU_OPT ("cortex-m1", ARM_ARCH_V6SM, FPU_NONE, "Cortex-M1"),
23998 ARM_CPU_OPT ("cortex-m0", ARM_ARCH_V6SM, FPU_NONE, "Cortex-M0"),
23999 ARM_CPU_OPT ("cortex-m0plus", ARM_ARCH_V6SM, FPU_NONE, "Cortex-M0+"),
24000 /* ??? XSCALE is really an architecture. */
24001 ARM_CPU_OPT ("xscale", ARM_ARCH_XSCALE, FPU_ARCH_VFP_V2, NULL),
24002 /* ??? iwmmxt is not a processor. */
24003 ARM_CPU_OPT ("iwmmxt", ARM_ARCH_IWMMXT, FPU_ARCH_VFP_V2, NULL),
24004 ARM_CPU_OPT ("iwmmxt2", ARM_ARCH_IWMMXT2,FPU_ARCH_VFP_V2, NULL),
24005 ARM_CPU_OPT ("i80200", ARM_ARCH_XSCALE, FPU_ARCH_VFP_V2, NULL),
24007 ARM_CPU_OPT ("ep9312", ARM_FEATURE (ARM_AEXT_V4T, ARM_CEXT_MAVERICK),
24008 FPU_ARCH_MAVERICK, "ARM920T"),
24009 /* Marvell processors. */
24010 ARM_CPU_OPT ("marvell-pj4", ARM_FEATURE (ARM_AEXT_V7A | ARM_EXT_MP | ARM_EXT_SEC, 0),
24011 FPU_ARCH_VFP_V3D16, NULL),
24013 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE, NULL }
24017 struct arm_arch_option_table
24021 const arm_feature_set value;
24022 const arm_feature_set default_fpu;
24025 /* This list should, at a minimum, contain all the architecture names
24026 recognized by GCC. */
24027 #define ARM_ARCH_OPT(N, V, DF) { N, sizeof (N) - 1, V, DF }
24028 static const struct arm_arch_option_table arm_archs[] =
24030 ARM_ARCH_OPT ("all", ARM_ANY, FPU_ARCH_FPA),
24031 ARM_ARCH_OPT ("armv1", ARM_ARCH_V1, FPU_ARCH_FPA),
24032 ARM_ARCH_OPT ("armv2", ARM_ARCH_V2, FPU_ARCH_FPA),
24033 ARM_ARCH_OPT ("armv2a", ARM_ARCH_V2S, FPU_ARCH_FPA),
24034 ARM_ARCH_OPT ("armv2s", ARM_ARCH_V2S, FPU_ARCH_FPA),
24035 ARM_ARCH_OPT ("armv3", ARM_ARCH_V3, FPU_ARCH_FPA),
24036 ARM_ARCH_OPT ("armv3m", ARM_ARCH_V3M, FPU_ARCH_FPA),
24037 ARM_ARCH_OPT ("armv4", ARM_ARCH_V4, FPU_ARCH_FPA),
24038 ARM_ARCH_OPT ("armv4xm", ARM_ARCH_V4xM, FPU_ARCH_FPA),
24039 ARM_ARCH_OPT ("armv4t", ARM_ARCH_V4T, FPU_ARCH_FPA),
24040 ARM_ARCH_OPT ("armv4txm", ARM_ARCH_V4TxM, FPU_ARCH_FPA),
24041 ARM_ARCH_OPT ("armv5", ARM_ARCH_V5, FPU_ARCH_VFP),
24042 ARM_ARCH_OPT ("armv5t", ARM_ARCH_V5T, FPU_ARCH_VFP),
24043 ARM_ARCH_OPT ("armv5txm", ARM_ARCH_V5TxM, FPU_ARCH_VFP),
24044 ARM_ARCH_OPT ("armv5te", ARM_ARCH_V5TE, FPU_ARCH_VFP),
24045 ARM_ARCH_OPT ("armv5texp", ARM_ARCH_V5TExP, FPU_ARCH_VFP),
24046 ARM_ARCH_OPT ("armv5tej", ARM_ARCH_V5TEJ, FPU_ARCH_VFP),
24047 ARM_ARCH_OPT ("armv6", ARM_ARCH_V6, FPU_ARCH_VFP),
24048 ARM_ARCH_OPT ("armv6j", ARM_ARCH_V6, FPU_ARCH_VFP),
24049 ARM_ARCH_OPT ("armv6k", ARM_ARCH_V6K, FPU_ARCH_VFP),
24050 ARM_ARCH_OPT ("armv6z", ARM_ARCH_V6Z, FPU_ARCH_VFP),
24051 ARM_ARCH_OPT ("armv6zk", ARM_ARCH_V6ZK, FPU_ARCH_VFP),
24052 ARM_ARCH_OPT ("armv6t2", ARM_ARCH_V6T2, FPU_ARCH_VFP),
24053 ARM_ARCH_OPT ("armv6kt2", ARM_ARCH_V6KT2, FPU_ARCH_VFP),
24054 ARM_ARCH_OPT ("armv6zt2", ARM_ARCH_V6ZT2, FPU_ARCH_VFP),
24055 ARM_ARCH_OPT ("armv6zkt2", ARM_ARCH_V6ZKT2, FPU_ARCH_VFP),
24056 ARM_ARCH_OPT ("armv6-m", ARM_ARCH_V6M, FPU_ARCH_VFP),
24057 ARM_ARCH_OPT ("armv6s-m", ARM_ARCH_V6SM, FPU_ARCH_VFP),
24058 ARM_ARCH_OPT ("armv7", ARM_ARCH_V7, FPU_ARCH_VFP),
24059 /* The official spelling of the ARMv7 profile variants is the dashed form.
24060 Accept the non-dashed form for compatibility with old toolchains. */
24061 ARM_ARCH_OPT ("armv7a", ARM_ARCH_V7A, FPU_ARCH_VFP),
24062 ARM_ARCH_OPT ("armv7r", ARM_ARCH_V7R, FPU_ARCH_VFP),
24063 ARM_ARCH_OPT ("armv7m", ARM_ARCH_V7M, FPU_ARCH_VFP),
24064 ARM_ARCH_OPT ("armv7-a", ARM_ARCH_V7A, FPU_ARCH_VFP),
24065 ARM_ARCH_OPT ("armv7-r", ARM_ARCH_V7R, FPU_ARCH_VFP),
24066 ARM_ARCH_OPT ("armv7-m", ARM_ARCH_V7M, FPU_ARCH_VFP),
24067 ARM_ARCH_OPT ("armv7e-m", ARM_ARCH_V7EM, FPU_ARCH_VFP),
24068 ARM_ARCH_OPT ("armv8-a", ARM_ARCH_V8A, FPU_ARCH_VFP),
24069 ARM_ARCH_OPT ("xscale", ARM_ARCH_XSCALE, FPU_ARCH_VFP),
24070 ARM_ARCH_OPT ("iwmmxt", ARM_ARCH_IWMMXT, FPU_ARCH_VFP),
24071 ARM_ARCH_OPT ("iwmmxt2", ARM_ARCH_IWMMXT2,FPU_ARCH_VFP),
24072 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
24074 #undef ARM_ARCH_OPT
24076 /* ISA extensions in the co-processor and main instruction set space. */
24077 struct arm_option_extension_value_table
24081 const arm_feature_set value;
24082 const arm_feature_set allowed_archs;
24085 /* The following table must be in alphabetical order with a NULL last entry.
24087 #define ARM_EXT_OPT(N, V, AA) { N, sizeof (N) - 1, V, AA }
24088 static const struct arm_option_extension_value_table arm_extensions[] =
24090 ARM_EXT_OPT ("crc", ARCH_CRC_ARMV8, ARM_FEATURE (ARM_EXT_V8, 0)),
24091 ARM_EXT_OPT ("crypto", FPU_ARCH_CRYPTO_NEON_VFP_ARMV8,
24092 ARM_FEATURE (ARM_EXT_V8, 0)),
24093 ARM_EXT_OPT ("fp", FPU_ARCH_VFP_ARMV8,
24094 ARM_FEATURE (ARM_EXT_V8, 0)),
24095 ARM_EXT_OPT ("idiv", ARM_FEATURE (ARM_EXT_ADIV | ARM_EXT_DIV, 0),
24096 ARM_FEATURE (ARM_EXT_V7A | ARM_EXT_V7R, 0)),
24097 ARM_EXT_OPT ("iwmmxt",ARM_FEATURE (0, ARM_CEXT_IWMMXT), ARM_ANY),
24098 ARM_EXT_OPT ("iwmmxt2",
24099 ARM_FEATURE (0, ARM_CEXT_IWMMXT2), ARM_ANY),
24100 ARM_EXT_OPT ("maverick",
24101 ARM_FEATURE (0, ARM_CEXT_MAVERICK), ARM_ANY),
24102 ARM_EXT_OPT ("mp", ARM_FEATURE (ARM_EXT_MP, 0),
24103 ARM_FEATURE (ARM_EXT_V7A | ARM_EXT_V7R, 0)),
24104 ARM_EXT_OPT ("simd", FPU_ARCH_NEON_VFP_ARMV8,
24105 ARM_FEATURE (ARM_EXT_V8, 0)),
24106 ARM_EXT_OPT ("os", ARM_FEATURE (ARM_EXT_OS, 0),
24107 ARM_FEATURE (ARM_EXT_V6M, 0)),
24108 ARM_EXT_OPT ("sec", ARM_FEATURE (ARM_EXT_SEC, 0),
24109 ARM_FEATURE (ARM_EXT_V6K | ARM_EXT_V7A, 0)),
24110 ARM_EXT_OPT ("virt", ARM_FEATURE (ARM_EXT_VIRT | ARM_EXT_ADIV
24112 ARM_FEATURE (ARM_EXT_V7A, 0)),
24113 ARM_EXT_OPT ("xscale",ARM_FEATURE (0, ARM_CEXT_XSCALE), ARM_ANY),
24114 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
24118 /* ISA floating-point and Advanced SIMD extensions. */
24119 struct arm_option_fpu_value_table
24122 const arm_feature_set value;
24125 /* This list should, at a minimum, contain all the fpu names
24126 recognized by GCC. */
24127 static const struct arm_option_fpu_value_table arm_fpus[] =
24129 {"softfpa", FPU_NONE},
24130 {"fpe", FPU_ARCH_FPE},
24131 {"fpe2", FPU_ARCH_FPE},
24132 {"fpe3", FPU_ARCH_FPA}, /* Third release supports LFM/SFM. */
24133 {"fpa", FPU_ARCH_FPA},
24134 {"fpa10", FPU_ARCH_FPA},
24135 {"fpa11", FPU_ARCH_FPA},
24136 {"arm7500fe", FPU_ARCH_FPA},
24137 {"softvfp", FPU_ARCH_VFP},
24138 {"softvfp+vfp", FPU_ARCH_VFP_V2},
24139 {"vfp", FPU_ARCH_VFP_V2},
24140 {"vfp9", FPU_ARCH_VFP_V2},
24141 {"vfp3", FPU_ARCH_VFP_V3}, /* For backwards compatbility. */
24142 {"vfp10", FPU_ARCH_VFP_V2},
24143 {"vfp10-r0", FPU_ARCH_VFP_V1},
24144 {"vfpxd", FPU_ARCH_VFP_V1xD},
24145 {"vfpv2", FPU_ARCH_VFP_V2},
24146 {"vfpv3", FPU_ARCH_VFP_V3},
24147 {"vfpv3-fp16", FPU_ARCH_VFP_V3_FP16},
24148 {"vfpv3-d16", FPU_ARCH_VFP_V3D16},
24149 {"vfpv3-d16-fp16", FPU_ARCH_VFP_V3D16_FP16},
24150 {"vfpv3xd", FPU_ARCH_VFP_V3xD},
24151 {"vfpv3xd-fp16", FPU_ARCH_VFP_V3xD_FP16},
24152 {"arm1020t", FPU_ARCH_VFP_V1},
24153 {"arm1020e", FPU_ARCH_VFP_V2},
24154 {"arm1136jfs", FPU_ARCH_VFP_V2},
24155 {"arm1136jf-s", FPU_ARCH_VFP_V2},
24156 {"maverick", FPU_ARCH_MAVERICK},
24157 {"neon", FPU_ARCH_VFP_V3_PLUS_NEON_V1},
24158 {"neon-fp16", FPU_ARCH_NEON_FP16},
24159 {"vfpv4", FPU_ARCH_VFP_V4},
24160 {"vfpv4-d16", FPU_ARCH_VFP_V4D16},
24161 {"fpv4-sp-d16", FPU_ARCH_VFP_V4_SP_D16},
24162 {"neon-vfpv4", FPU_ARCH_NEON_VFP_V4},
24163 {"fp-armv8", FPU_ARCH_VFP_ARMV8},
24164 {"neon-fp-armv8", FPU_ARCH_NEON_VFP_ARMV8},
24165 {"crypto-neon-fp-armv8",
24166 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8},
24167 {NULL, ARM_ARCH_NONE}
24170 struct arm_option_value_table
24176 static const struct arm_option_value_table arm_float_abis[] =
24178 {"hard", ARM_FLOAT_ABI_HARD},
24179 {"softfp", ARM_FLOAT_ABI_SOFTFP},
24180 {"soft", ARM_FLOAT_ABI_SOFT},
24185 /* We only know how to output GNU and ver 4/5 (AAELF) formats. */
24186 static const struct arm_option_value_table arm_eabis[] =
24188 {"gnu", EF_ARM_EABI_UNKNOWN},
24189 {"4", EF_ARM_EABI_VER4},
24190 {"5", EF_ARM_EABI_VER5},
24195 struct arm_long_option_table
24197 char * option; /* Substring to match. */
24198 char * help; /* Help information. */
24199 int (* func) (char * subopt); /* Function to decode sub-option. */
24200 char * deprecated; /* If non-null, print this message. */
24204 arm_parse_extension (char *str, const arm_feature_set **opt_p)
24206 arm_feature_set *ext_set = (arm_feature_set *)
24207 xmalloc (sizeof (arm_feature_set));
24209 /* We insist on extensions being specified in alphabetical order, and with
24210 extensions being added before being removed. We achieve this by having
24211 the global ARM_EXTENSIONS table in alphabetical order, and using the
24212 ADDING_VALUE variable to indicate whether we are adding an extension (1)
24213 or removing it (0) and only allowing it to change in the order
24215 const struct arm_option_extension_value_table * opt = NULL;
24216 int adding_value = -1;
24218 /* Copy the feature set, so that we can modify it. */
24219 *ext_set = **opt_p;
24222 while (str != NULL && *str != 0)
24229 as_bad (_("invalid architectural extension"));
24234 ext = strchr (str, '+');
24239 len = strlen (str);
24241 if (len >= 2 && strncmp (str, "no", 2) == 0)
24243 if (adding_value != 0)
24246 opt = arm_extensions;
24254 if (adding_value == -1)
24257 opt = arm_extensions;
24259 else if (adding_value != 1)
24261 as_bad (_("must specify extensions to add before specifying "
24262 "those to remove"));
24269 as_bad (_("missing architectural extension"));
24273 gas_assert (adding_value != -1);
24274 gas_assert (opt != NULL);
24276 /* Scan over the options table trying to find an exact match. */
24277 for (; opt->name != NULL; opt++)
24278 if (opt->name_len == len && strncmp (opt->name, str, len) == 0)
24280 /* Check we can apply the extension to this architecture. */
24281 if (!ARM_CPU_HAS_FEATURE (*ext_set, opt->allowed_archs))
24283 as_bad (_("extension does not apply to the base architecture"));
24287 /* Add or remove the extension. */
24289 ARM_MERGE_FEATURE_SETS (*ext_set, *ext_set, opt->value);
24291 ARM_CLEAR_FEATURE (*ext_set, *ext_set, opt->value);
24296 if (opt->name == NULL)
24298 /* Did we fail to find an extension because it wasn't specified in
24299 alphabetical order, or because it does not exist? */
24301 for (opt = arm_extensions; opt->name != NULL; opt++)
24302 if (opt->name_len == len && strncmp (opt->name, str, len) == 0)
24305 if (opt->name == NULL)
24306 as_bad (_("unknown architectural extension `%s'"), str);
24308 as_bad (_("architectural extensions must be specified in "
24309 "alphabetical order"));
24315 /* We should skip the extension we've just matched the next time
24327 arm_parse_cpu (char *str)
24329 const struct arm_cpu_option_table *opt;
24330 char *ext = strchr (str, '+');
24336 len = strlen (str);
24340 as_bad (_("missing cpu name `%s'"), str);
24344 for (opt = arm_cpus; opt->name != NULL; opt++)
24345 if (opt->name_len == len && strncmp (opt->name, str, len) == 0)
24347 mcpu_cpu_opt = &opt->value;
24348 mcpu_fpu_opt = &opt->default_fpu;
24349 if (opt->canonical_name)
24350 strcpy (selected_cpu_name, opt->canonical_name);
24355 for (i = 0; i < len; i++)
24356 selected_cpu_name[i] = TOUPPER (opt->name[i]);
24357 selected_cpu_name[i] = 0;
24361 return arm_parse_extension (ext, &mcpu_cpu_opt);
24366 as_bad (_("unknown cpu `%s'"), str);
24371 arm_parse_arch (char *str)
24373 const struct arm_arch_option_table *opt;
24374 char *ext = strchr (str, '+');
24380 len = strlen (str);
24384 as_bad (_("missing architecture name `%s'"), str);
24388 for (opt = arm_archs; opt->name != NULL; opt++)
24389 if (opt->name_len == len && strncmp (opt->name, str, len) == 0)
24391 march_cpu_opt = &opt->value;
24392 march_fpu_opt = &opt->default_fpu;
24393 strcpy (selected_cpu_name, opt->name);
24396 return arm_parse_extension (ext, &march_cpu_opt);
24401 as_bad (_("unknown architecture `%s'\n"), str);
24406 arm_parse_fpu (char * str)
24408 const struct arm_option_fpu_value_table * opt;
24410 for (opt = arm_fpus; opt->name != NULL; opt++)
24411 if (streq (opt->name, str))
24413 mfpu_opt = &opt->value;
24417 as_bad (_("unknown floating point format `%s'\n"), str);
24422 arm_parse_float_abi (char * str)
24424 const struct arm_option_value_table * opt;
24426 for (opt = arm_float_abis; opt->name != NULL; opt++)
24427 if (streq (opt->name, str))
24429 mfloat_abi_opt = opt->value;
24433 as_bad (_("unknown floating point abi `%s'\n"), str);
24439 arm_parse_eabi (char * str)
24441 const struct arm_option_value_table *opt;
24443 for (opt = arm_eabis; opt->name != NULL; opt++)
24444 if (streq (opt->name, str))
24446 meabi_flags = opt->value;
24449 as_bad (_("unknown EABI `%s'\n"), str);
24455 arm_parse_it_mode (char * str)
24457 bfd_boolean ret = TRUE;
24459 if (streq ("arm", str))
24460 implicit_it_mode = IMPLICIT_IT_MODE_ARM;
24461 else if (streq ("thumb", str))
24462 implicit_it_mode = IMPLICIT_IT_MODE_THUMB;
24463 else if (streq ("always", str))
24464 implicit_it_mode = IMPLICIT_IT_MODE_ALWAYS;
24465 else if (streq ("never", str))
24466 implicit_it_mode = IMPLICIT_IT_MODE_NEVER;
24469 as_bad (_("unknown implicit IT mode `%s', should be "\
24470 "arm, thumb, always, or never."), str);
24477 struct arm_long_option_table arm_long_opts[] =
24479 {"mcpu=", N_("<cpu name>\t assemble for CPU <cpu name>"),
24480 arm_parse_cpu, NULL},
24481 {"march=", N_("<arch name>\t assemble for architecture <arch name>"),
24482 arm_parse_arch, NULL},
24483 {"mfpu=", N_("<fpu name>\t assemble for FPU architecture <fpu name>"),
24484 arm_parse_fpu, NULL},
24485 {"mfloat-abi=", N_("<abi>\t assemble for floating point ABI <abi>"),
24486 arm_parse_float_abi, NULL},
24488 {"meabi=", N_("<ver>\t\t assemble for eabi version <ver>"),
24489 arm_parse_eabi, NULL},
24491 {"mimplicit-it=", N_("<mode>\t controls implicit insertion of IT instructions"),
24492 arm_parse_it_mode, NULL},
24493 {NULL, NULL, 0, NULL}
24497 md_parse_option (int c, char * arg)
24499 struct arm_option_table *opt;
24500 const struct arm_legacy_option_table *fopt;
24501 struct arm_long_option_table *lopt;
24507 target_big_endian = 1;
24513 target_big_endian = 0;
24517 case OPTION_FIX_V4BX:
24522 /* Listing option. Just ignore these, we don't support additional
24527 for (opt = arm_opts; opt->option != NULL; opt++)
24529 if (c == opt->option[0]
24530 && ((arg == NULL && opt->option[1] == 0)
24531 || streq (arg, opt->option + 1)))
24533 /* If the option is deprecated, tell the user. */
24534 if (warn_on_deprecated && opt->deprecated != NULL)
24535 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c,
24536 arg ? arg : "", _(opt->deprecated));
24538 if (opt->var != NULL)
24539 *opt->var = opt->value;
24545 for (fopt = arm_legacy_opts; fopt->option != NULL; fopt++)
24547 if (c == fopt->option[0]
24548 && ((arg == NULL && fopt->option[1] == 0)
24549 || streq (arg, fopt->option + 1)))
24551 /* If the option is deprecated, tell the user. */
24552 if (warn_on_deprecated && fopt->deprecated != NULL)
24553 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c,
24554 arg ? arg : "", _(fopt->deprecated));
24556 if (fopt->var != NULL)
24557 *fopt->var = &fopt->value;
24563 for (lopt = arm_long_opts; lopt->option != NULL; lopt++)
24565 /* These options are expected to have an argument. */
24566 if (c == lopt->option[0]
24568 && strncmp (arg, lopt->option + 1,
24569 strlen (lopt->option + 1)) == 0)
24571 /* If the option is deprecated, tell the user. */
24572 if (warn_on_deprecated && lopt->deprecated != NULL)
24573 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c, arg,
24574 _(lopt->deprecated));
24576 /* Call the sup-option parser. */
24577 return lopt->func (arg + strlen (lopt->option) - 1);
24588 md_show_usage (FILE * fp)
24590 struct arm_option_table *opt;
24591 struct arm_long_option_table *lopt;
24593 fprintf (fp, _(" ARM-specific assembler options:\n"));
24595 for (opt = arm_opts; opt->option != NULL; opt++)
24596 if (opt->help != NULL)
24597 fprintf (fp, " -%-23s%s\n", opt->option, _(opt->help));
24599 for (lopt = arm_long_opts; lopt->option != NULL; lopt++)
24600 if (lopt->help != NULL)
24601 fprintf (fp, " -%s%s\n", lopt->option, _(lopt->help));
24605 -EB assemble code for a big-endian cpu\n"));
24610 -EL assemble code for a little-endian cpu\n"));
24614 --fix-v4bx Allow BX in ARMv4 code\n"));
24622 arm_feature_set flags;
24623 } cpu_arch_ver_table;
24625 /* Mapping from CPU features to EABI CPU arch values. Table must be sorted
24626 least features first. */
24627 static const cpu_arch_ver_table cpu_arch_ver[] =
24633 {4, ARM_ARCH_V5TE},
24634 {5, ARM_ARCH_V5TEJ},
24638 {11, ARM_ARCH_V6M},
24639 {12, ARM_ARCH_V6SM},
24640 {8, ARM_ARCH_V6T2},
24641 {10, ARM_ARCH_V7A_IDIV_MP_SEC_VIRT},
24642 {10, ARM_ARCH_V7R},
24643 {10, ARM_ARCH_V7M},
24644 {14, ARM_ARCH_V8A},
24648 /* Set an attribute if it has not already been set by the user. */
24650 aeabi_set_attribute_int (int tag, int value)
24653 || tag >= NUM_KNOWN_OBJ_ATTRIBUTES
24654 || !attributes_set_explicitly[tag])
24655 bfd_elf_add_proc_attr_int (stdoutput, tag, value);
24659 aeabi_set_attribute_string (int tag, const char *value)
24662 || tag >= NUM_KNOWN_OBJ_ATTRIBUTES
24663 || !attributes_set_explicitly[tag])
24664 bfd_elf_add_proc_attr_string (stdoutput, tag, value);
24667 /* Set the public EABI object attributes. */
24669 aeabi_set_public_attributes (void)
24674 int fp16_optional = 0;
24675 arm_feature_set flags;
24676 arm_feature_set tmp;
24677 const cpu_arch_ver_table *p;
24679 /* Choose the architecture based on the capabilities of the requested cpu
24680 (if any) and/or the instructions actually used. */
24681 ARM_MERGE_FEATURE_SETS (flags, arm_arch_used, thumb_arch_used);
24682 ARM_MERGE_FEATURE_SETS (flags, flags, *mfpu_opt);
24683 ARM_MERGE_FEATURE_SETS (flags, flags, selected_cpu);
24685 if (ARM_CPU_HAS_FEATURE (arm_arch_used, arm_arch_any))
24686 ARM_MERGE_FEATURE_SETS (flags, flags, arm_ext_v1);
24688 if (ARM_CPU_HAS_FEATURE (thumb_arch_used, arm_arch_any))
24689 ARM_MERGE_FEATURE_SETS (flags, flags, arm_ext_v4t);
24691 /* Allow the user to override the reported architecture. */
24694 ARM_CLEAR_FEATURE (flags, flags, arm_arch_any);
24695 ARM_MERGE_FEATURE_SETS (flags, flags, *object_arch);
24698 /* We need to make sure that the attributes do not identify us as v6S-M
24699 when the only v6S-M feature in use is the Operating System Extensions. */
24700 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_os))
24701 if (!ARM_CPU_HAS_FEATURE (flags, arm_arch_v6m_only))
24702 ARM_CLEAR_FEATURE (flags, flags, arm_ext_os);
24706 for (p = cpu_arch_ver; p->val; p++)
24708 if (ARM_CPU_HAS_FEATURE (tmp, p->flags))
24711 ARM_CLEAR_FEATURE (tmp, tmp, p->flags);
24715 /* The table lookup above finds the last architecture to contribute
24716 a new feature. Unfortunately, Tag13 is a subset of the union of
24717 v6T2 and v7-M, so it is never seen as contributing a new feature.
24718 We can not search for the last entry which is entirely used,
24719 because if no CPU is specified we build up only those flags
24720 actually used. Perhaps we should separate out the specified
24721 and implicit cases. Avoid taking this path for -march=all by
24722 checking for contradictory v7-A / v7-M features. */
24724 && !ARM_CPU_HAS_FEATURE (flags, arm_ext_v7a)
24725 && ARM_CPU_HAS_FEATURE (flags, arm_ext_v7m)
24726 && ARM_CPU_HAS_FEATURE (flags, arm_ext_v6_dsp))
24729 /* Tag_CPU_name. */
24730 if (selected_cpu_name[0])
24734 q = selected_cpu_name;
24735 if (strncmp (q, "armv", 4) == 0)
24740 for (i = 0; q[i]; i++)
24741 q[i] = TOUPPER (q[i]);
24743 aeabi_set_attribute_string (Tag_CPU_name, q);
24746 /* Tag_CPU_arch. */
24747 aeabi_set_attribute_int (Tag_CPU_arch, arch);
24749 /* Tag_CPU_arch_profile. */
24750 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v7a))
24752 else if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v7r))
24754 else if (ARM_CPU_HAS_FEATURE (flags, arm_ext_m))
24759 if (profile != '\0')
24760 aeabi_set_attribute_int (Tag_CPU_arch_profile, profile);
24762 /* Tag_ARM_ISA_use. */
24763 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v1)
24765 aeabi_set_attribute_int (Tag_ARM_ISA_use, 1);
24767 /* Tag_THUMB_ISA_use. */
24768 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v4t)
24770 aeabi_set_attribute_int (Tag_THUMB_ISA_use,
24771 ARM_CPU_HAS_FEATURE (flags, arm_arch_t2) ? 2 : 1);
24773 /* Tag_VFP_arch. */
24774 if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_armv8))
24775 aeabi_set_attribute_int (Tag_VFP_arch, 7);
24776 else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_fma))
24777 aeabi_set_attribute_int (Tag_VFP_arch,
24778 ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_d32)
24780 else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_d32))
24783 aeabi_set_attribute_int (Tag_VFP_arch, 3);
24785 else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v3xd))
24787 aeabi_set_attribute_int (Tag_VFP_arch, 4);
24790 else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v2))
24791 aeabi_set_attribute_int (Tag_VFP_arch, 2);
24792 else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v1)
24793 || ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v1xd))
24794 aeabi_set_attribute_int (Tag_VFP_arch, 1);
24796 /* Tag_ABI_HardFP_use. */
24797 if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v1xd)
24798 && !ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v1))
24799 aeabi_set_attribute_int (Tag_ABI_HardFP_use, 1);
24801 /* Tag_WMMX_arch. */
24802 if (ARM_CPU_HAS_FEATURE (flags, arm_cext_iwmmxt2))
24803 aeabi_set_attribute_int (Tag_WMMX_arch, 2);
24804 else if (ARM_CPU_HAS_FEATURE (flags, arm_cext_iwmmxt))
24805 aeabi_set_attribute_int (Tag_WMMX_arch, 1);
24807 /* Tag_Advanced_SIMD_arch (formerly Tag_NEON_arch). */
24808 if (ARM_CPU_HAS_FEATURE (flags, fpu_neon_ext_armv8))
24809 aeabi_set_attribute_int (Tag_Advanced_SIMD_arch, 3);
24810 else if (ARM_CPU_HAS_FEATURE (flags, fpu_neon_ext_v1))
24812 if (ARM_CPU_HAS_FEATURE (flags, fpu_neon_ext_fma))
24814 aeabi_set_attribute_int (Tag_Advanced_SIMD_arch, 2);
24818 aeabi_set_attribute_int (Tag_Advanced_SIMD_arch, 1);
24823 /* Tag_VFP_HP_extension (formerly Tag_NEON_FP16_arch). */
24824 if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_fp16) && fp16_optional)
24825 aeabi_set_attribute_int (Tag_VFP_HP_extension, 1);
24829 We set Tag_DIV_use to two when integer divide instructions have been used
24830 in ARM state, or when Thumb integer divide instructions have been used,
24831 but we have no architecture profile set, nor have we any ARM instructions.
24833 For ARMv8 we set the tag to 0 as integer divide is implied by the base
24836 For new architectures we will have to check these tests. */
24837 gas_assert (arch <= TAG_CPU_ARCH_V8);
24838 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v8))
24839 aeabi_set_attribute_int (Tag_DIV_use, 0);
24840 else if (ARM_CPU_HAS_FEATURE (flags, arm_ext_adiv)
24841 || (profile == '\0'
24842 && ARM_CPU_HAS_FEATURE (flags, arm_ext_div)
24843 && !ARM_CPU_HAS_FEATURE (arm_arch_used, arm_arch_any)))
24844 aeabi_set_attribute_int (Tag_DIV_use, 2);
24846 /* Tag_MP_extension_use. */
24847 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_mp))
24848 aeabi_set_attribute_int (Tag_MPextension_use, 1);
24850 /* Tag Virtualization_use. */
24851 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_sec))
24853 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_virt))
24856 aeabi_set_attribute_int (Tag_Virtualization_use, virt_sec);
24859 /* Add the default contents for the .ARM.attributes section. */
24863 if (EF_ARM_EABI_VERSION (meabi_flags) < EF_ARM_EABI_VER4)
24866 aeabi_set_public_attributes ();
24868 #endif /* OBJ_ELF */
24871 /* Parse a .cpu directive. */
24874 s_arm_cpu (int ignored ATTRIBUTE_UNUSED)
24876 const struct arm_cpu_option_table *opt;
24880 name = input_line_pointer;
24881 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
24882 input_line_pointer++;
24883 saved_char = *input_line_pointer;
24884 *input_line_pointer = 0;
24886 /* Skip the first "all" entry. */
24887 for (opt = arm_cpus + 1; opt->name != NULL; opt++)
24888 if (streq (opt->name, name))
24890 mcpu_cpu_opt = &opt->value;
24891 selected_cpu = opt->value;
24892 if (opt->canonical_name)
24893 strcpy (selected_cpu_name, opt->canonical_name);
24897 for (i = 0; opt->name[i]; i++)
24898 selected_cpu_name[i] = TOUPPER (opt->name[i]);
24900 selected_cpu_name[i] = 0;
24902 ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
24903 *input_line_pointer = saved_char;
24904 demand_empty_rest_of_line ();
24907 as_bad (_("unknown cpu `%s'"), name);
24908 *input_line_pointer = saved_char;
24909 ignore_rest_of_line ();
24913 /* Parse a .arch directive. */
24916 s_arm_arch (int ignored ATTRIBUTE_UNUSED)
24918 const struct arm_arch_option_table *opt;
24922 name = input_line_pointer;
24923 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
24924 input_line_pointer++;
24925 saved_char = *input_line_pointer;
24926 *input_line_pointer = 0;
24928 /* Skip the first "all" entry. */
24929 for (opt = arm_archs + 1; opt->name != NULL; opt++)
24930 if (streq (opt->name, name))
24932 mcpu_cpu_opt = &opt->value;
24933 selected_cpu = opt->value;
24934 strcpy (selected_cpu_name, opt->name);
24935 ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
24936 *input_line_pointer = saved_char;
24937 demand_empty_rest_of_line ();
24941 as_bad (_("unknown architecture `%s'\n"), name);
24942 *input_line_pointer = saved_char;
24943 ignore_rest_of_line ();
24947 /* Parse a .object_arch directive. */
24950 s_arm_object_arch (int ignored ATTRIBUTE_UNUSED)
24952 const struct arm_arch_option_table *opt;
24956 name = input_line_pointer;
24957 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
24958 input_line_pointer++;
24959 saved_char = *input_line_pointer;
24960 *input_line_pointer = 0;
24962 /* Skip the first "all" entry. */
24963 for (opt = arm_archs + 1; opt->name != NULL; opt++)
24964 if (streq (opt->name, name))
24966 object_arch = &opt->value;
24967 *input_line_pointer = saved_char;
24968 demand_empty_rest_of_line ();
24972 as_bad (_("unknown architecture `%s'\n"), name);
24973 *input_line_pointer = saved_char;
24974 ignore_rest_of_line ();
24977 /* Parse a .arch_extension directive. */
24980 s_arm_arch_extension (int ignored ATTRIBUTE_UNUSED)
24982 const struct arm_option_extension_value_table *opt;
24985 int adding_value = 1;
24987 name = input_line_pointer;
24988 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
24989 input_line_pointer++;
24990 saved_char = *input_line_pointer;
24991 *input_line_pointer = 0;
24993 if (strlen (name) >= 2
24994 && strncmp (name, "no", 2) == 0)
25000 for (opt = arm_extensions; opt->name != NULL; opt++)
25001 if (streq (opt->name, name))
25003 if (!ARM_CPU_HAS_FEATURE (*mcpu_cpu_opt, opt->allowed_archs))
25005 as_bad (_("architectural extension `%s' is not allowed for the "
25006 "current base architecture"), name);
25011 ARM_MERGE_FEATURE_SETS (selected_cpu, selected_cpu, opt->value);
25013 ARM_CLEAR_FEATURE (selected_cpu, selected_cpu, opt->value);
25015 mcpu_cpu_opt = &selected_cpu;
25016 ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
25017 *input_line_pointer = saved_char;
25018 demand_empty_rest_of_line ();
25022 if (opt->name == NULL)
25023 as_bad (_("unknown architecture `%s'\n"), name);
25025 *input_line_pointer = saved_char;
25026 ignore_rest_of_line ();
25029 /* Parse a .fpu directive. */
25032 s_arm_fpu (int ignored ATTRIBUTE_UNUSED)
25034 const struct arm_option_fpu_value_table *opt;
25038 name = input_line_pointer;
25039 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
25040 input_line_pointer++;
25041 saved_char = *input_line_pointer;
25042 *input_line_pointer = 0;
25044 for (opt = arm_fpus; opt->name != NULL; opt++)
25045 if (streq (opt->name, name))
25047 mfpu_opt = &opt->value;
25048 ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
25049 *input_line_pointer = saved_char;
25050 demand_empty_rest_of_line ();
25054 as_bad (_("unknown floating point format `%s'\n"), name);
25055 *input_line_pointer = saved_char;
25056 ignore_rest_of_line ();
25059 /* Copy symbol information. */
25062 arm_copy_symbol_attributes (symbolS *dest, symbolS *src)
25064 ARM_GET_FLAG (dest) = ARM_GET_FLAG (src);
25068 /* Given a symbolic attribute NAME, return the proper integer value.
25069 Returns -1 if the attribute is not known. */
25072 arm_convert_symbolic_attribute (const char *name)
25074 static const struct
25079 attribute_table[] =
25081 /* When you modify this table you should
25082 also modify the list in doc/c-arm.texi. */
25083 #define T(tag) {#tag, tag}
25084 T (Tag_CPU_raw_name),
25087 T (Tag_CPU_arch_profile),
25088 T (Tag_ARM_ISA_use),
25089 T (Tag_THUMB_ISA_use),
25093 T (Tag_Advanced_SIMD_arch),
25094 T (Tag_PCS_config),
25095 T (Tag_ABI_PCS_R9_use),
25096 T (Tag_ABI_PCS_RW_data),
25097 T (Tag_ABI_PCS_RO_data),
25098 T (Tag_ABI_PCS_GOT_use),
25099 T (Tag_ABI_PCS_wchar_t),
25100 T (Tag_ABI_FP_rounding),
25101 T (Tag_ABI_FP_denormal),
25102 T (Tag_ABI_FP_exceptions),
25103 T (Tag_ABI_FP_user_exceptions),
25104 T (Tag_ABI_FP_number_model),
25105 T (Tag_ABI_align_needed),
25106 T (Tag_ABI_align8_needed),
25107 T (Tag_ABI_align_preserved),
25108 T (Tag_ABI_align8_preserved),
25109 T (Tag_ABI_enum_size),
25110 T (Tag_ABI_HardFP_use),
25111 T (Tag_ABI_VFP_args),
25112 T (Tag_ABI_WMMX_args),
25113 T (Tag_ABI_optimization_goals),
25114 T (Tag_ABI_FP_optimization_goals),
25115 T (Tag_compatibility),
25116 T (Tag_CPU_unaligned_access),
25117 T (Tag_FP_HP_extension),
25118 T (Tag_VFP_HP_extension),
25119 T (Tag_ABI_FP_16bit_format),
25120 T (Tag_MPextension_use),
25122 T (Tag_nodefaults),
25123 T (Tag_also_compatible_with),
25124 T (Tag_conformance),
25126 T (Tag_Virtualization_use),
25127 /* We deliberately do not include Tag_MPextension_use_legacy. */
25135 for (i = 0; i < ARRAY_SIZE (attribute_table); i++)
25136 if (streq (name, attribute_table[i].name))
25137 return attribute_table[i].tag;
25143 /* Apply sym value for relocations only in the case that
25144 they are for local symbols and you have the respective
25145 architectural feature for blx and simple switches. */
25147 arm_apply_sym_value (struct fix * fixP)
25150 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t)
25151 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE))
25153 switch (fixP->fx_r_type)
25155 case BFD_RELOC_ARM_PCREL_BLX:
25156 case BFD_RELOC_THUMB_PCREL_BRANCH23:
25157 if (ARM_IS_FUNC (fixP->fx_addsy))
25161 case BFD_RELOC_ARM_PCREL_CALL:
25162 case BFD_RELOC_THUMB_PCREL_BLX:
25163 if (THUMB_IS_FUNC (fixP->fx_addsy))
25174 #endif /* OBJ_ELF */